aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/DMA-API.txt2
-rw-r--r--Documentation/DocBook/kernel-api.tmpl5
-rw-r--r--Documentation/RCU/checklist.txt2
-rw-r--r--Documentation/RCU/rcuref.txt16
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--Documentation/SELinux.txt27
-rw-r--r--Documentation/block/deadline-iosched.txt14
-rw-r--r--Documentation/cdrom/ide-cd3
-rw-r--r--Documentation/cpu-freq/index.txt10
-rw-r--r--Documentation/hwmon/adt747318
-rw-r--r--Documentation/hwmon/sysfs-interface12
-rw-r--r--Documentation/kernel-parameters.txt12
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt36
-rw-r--r--Documentation/x86/00-INDEX4
-rw-r--r--Documentation/x86/boot.txt (renamed from Documentation/x86/i386/boot.txt)2
-rw-r--r--Documentation/x86/mtrr.txt (renamed from Documentation/mtrr.txt)4
-rw-r--r--Documentation/x86/pat.txt54
-rw-r--r--Documentation/x86/usb-legacy-support.txt (renamed from Documentation/x86/i386/usb-legacy-support.txt)0
-rw-r--r--Documentation/x86/x86_64/boot-options.txt4
-rw-r--r--Documentation/x86/zero-page.txt (renamed from Documentation/x86/i386/zero-page.txt)0
-rw-r--r--MAINTAINERS5
-rw-r--r--Makefile2
-rw-r--r--arch/mips/sibyte/swarm/platform.c4
-rw-r--r--arch/x86/Kconfig75
-rw-r--r--arch/x86/Kconfig.cpu18
-rw-r--r--arch/x86/boot/compressed/head_32.S5
-rw-r--r--arch/x86/boot/compressed/misc.c12
-rw-r--r--arch/x86/boot/header.S1
-rw-r--r--arch/x86/configs/i386_defconfig19
-rw-r--r--arch/x86/configs/x86_64_defconfig29
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/crc32c-intel.c197
-rw-r--r--arch/x86/ia32/ia32_aout.c11
-rw-r--r--arch/x86/ia32/ia32_signal.c21
-rw-r--r--arch/x86/ia32/sys_ia32.c9
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/alternative.c8
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/asm-offsets_64.c2
-rw-r--r--arch/x86/kernel/bios_uv.c10
-rw-r--r--arch/x86/kernel/cpu/common_64.c51
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c7
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c274
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c86
-rw-r--r--arch/x86/kernel/cpuid.c1
-rw-r--r--arch/x86/kernel/crash_dump_64.c13
-rw-r--r--arch/x86/kernel/ds.c954
-rw-r--r--arch/x86/kernel/efi.c6
-rw-r--r--arch/x86/kernel/entry_64.S4
-rw-r--r--arch/x86/kernel/head64.c5
-rw-r--r--arch/x86/kernel/ioport.c1
-rw-r--r--arch/x86/kernel/ipi.c3
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/ldt.c1
-rw-r--r--arch/x86/kernel/nmi.c11
-rw-r--r--arch/x86/kernel/olpc.c6
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c2
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c27
-rw-r--r--arch/x86/kernel/pcspeaker.c13
-rw-r--r--arch/x86/kernel/process.c3
-rw-r--r--arch/x86/kernel/process_32.c62
-rw-r--r--arch/x86/kernel/process_64.c170
-rw-r--r--arch/x86/kernel/ptrace.c480
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kernel/setup_percpu.c9
-rw-r--r--arch/x86/kernel/sigframe.h5
-rw-r--r--arch/x86/kernel/signal_32.c12
-rw-r--r--arch/x86/kernel/signal_64.c112
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/sys_i386_32.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c44
-rw-r--r--arch/x86/kernel/syscall_64.c4
-rw-r--r--arch/x86/kernel/time_32.c1
-rw-r--r--arch/x86/kernel/tls.c1
-rw-r--r--arch/x86/kernel/traps_64.c66
-rw-r--r--arch/x86/kernel/tsc.c290
-rw-r--r--arch/x86/kernel/visws_quirks.c16
-rw-r--r--arch/x86/kernel/vm86_32.c1
-rw-r--r--arch/x86/kernel/vmi_32.c10
-rw-r--r--arch/x86/lib/msr-on-cpu.c78
-rw-r--r--arch/x86/lib/string_32.c42
-rw-r--r--arch/x86/lib/strstr_32.c6
-rw-r--r--arch/x86/mach-default/setup.c4
-rw-r--r--arch/x86/mm/discontig_32.c2
-rw-r--r--arch/x86/mm/dump_pagetables.c4
-rw-r--r--arch/x86/mm/fault.c3
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/numa_64.c10
-rw-r--r--arch/x86/mm/pageattr.c4
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/mm/pgtable_32.c3
-rw-r--r--arch/x86/oprofile/op_model_p4.c175
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/pci/irq.c67
-rw-r--r--arch/x86/power/hibernate_asm_32.S14
-rw-r--r--arch/x86/xen/enlighten.c20
-rw-r--r--block/Makefile4
-rw-r--r--block/as-iosched.c14
-rw-r--r--block/blk-barrier.c72
-rw-r--r--block/blk-core.c605
-rw-r--r--block/blk-exec.c6
-rw-r--r--block/blk-integrity.c33
-rw-r--r--block/blk-map.c68
-rw-r--r--block/blk-merge.c129
-rw-r--r--block/blk-settings.c43
-rw-r--r--block/blk-softirq.c175
-rw-r--r--block/blk-sysfs.c35
-rw-r--r--block/blk-tag.c22
-rw-r--r--block/blk-timeout.c238
-rw-r--r--block/blk.h48
-rw-r--r--block/blktrace.c32
-rw-r--r--block/bsg.c6
-rw-r--r--block/cfq-iosched.c57
-rw-r--r--block/cmd-filter.c9
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--block/deadline-iosched.c40
-rw-r--r--block/elevator.c40
-rw-r--r--block/genhd.c965
-rw-r--r--block/ioctl.c124
-rw-r--r--block/scsi_ioctl.c8
-rw-r--r--crypto/Kconfig127
-rw-r--r--crypto/Makefile12
-rw-r--r--crypto/algapi.c147
-rw-r--r--crypto/algboss.c (renamed from crypto/cryptomgr.c)92
-rw-r--r--crypto/ansi_cprng.c417
-rw-r--r--crypto/api.c81
-rw-r--r--crypto/blkcipher.c29
-rw-r--r--crypto/chainiv.c44
-rw-r--r--crypto/eseqiv.c35
-rw-r--r--crypto/fips.c27
-rw-r--r--crypto/internal.h18
-rw-r--r--crypto/krng.c66
-rw-r--r--crypto/proc.c57
-rw-r--r--crypto/rng.c126
-rw-r--r--crypto/seqiv.c27
-rw-r--r--crypto/tcrypt.c1347
-rw-r--r--crypto/tcrypt.h8709
-rw-r--r--crypto/testmgr.c1868
-rw-r--r--crypto/testmgr.h8738
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c25
-rw-r--r--drivers/ata/ata_piix.c184
-rw-r--r--drivers/ata/libata-core.c252
-rw-r--r--drivers/ata/libata-eh.c375
-rw-r--r--drivers/ata/libata-scsi.c112
-rw-r--r--drivers/ata/libata.h5
-rw-r--r--drivers/ata/pata_bf54x.c34
-rw-r--r--drivers/ata/pata_sil680.c2
-rw-r--r--drivers/ata/sata_fsl.c26
-rw-r--r--drivers/ata/sata_inic162x.c8
-rw-r--r--drivers/ata/sata_mv.c28
-rw-r--r--drivers/ata/sata_nv.c16
-rw-r--r--drivers/ata/sata_promise.c16
-rw-r--r--drivers/ata/sata_qstor.c12
-rw-r--r--drivers/ata/sata_sil.c16
-rw-r--r--drivers/ata/sata_sil24.c12
-rw-r--r--drivers/ata/sata_sis.c28
-rw-r--r--drivers/ata/sata_svw.c10
-rw-r--r--drivers/ata/sata_uli.c24
-rw-r--r--drivers/ata/sata_via.c24
-rw-r--r--drivers/ata/sata_vsc.c10
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/class.c136
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/block/aoe/aoeblk.c6
-rw-r--r--drivers/block/aoe/aoecmd.c19
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/cciss.c8
-rw-r--r--drivers/block/cciss_scsi.c151
-rw-r--r--drivers/block/cciss_scsi.h4
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c31
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/ps3disk.c11
-rw-r--r--drivers/block/virtio_blk.c14
-rw-r--r--drivers/block/xen-blkfront.c76
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btusb.c8
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/gdrom.c4
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/hwmon/abituguru3.c3
-rw-r--r--drivers/hwmon/it87.c70
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-disk.c15
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/mad.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h14
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c225
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c211
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c59
-rw-r--r--drivers/infiniband/hw/nes/nes.c95
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c41
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c205
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c122
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c88
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c30
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c68
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c31
-rw-r--r--drivers/md/dm-crypt.c109
-rw-r--r--drivers/md/dm-exception-store.c29
-rw-r--r--drivers/md/dm-ioctl.c10
-rw-r--r--drivers/md/dm-mpath.c50
-rw-r--r--drivers/md/dm-mpath.h2
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-table.c97
-rw-r--r--drivers/md/dm.c40
-rw-r--r--drivers/md/dm.h10
-rw-r--r--drivers/md/linear.c10
-rw-r--r--drivers/md/md.c15
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c10
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c14
-rw-r--r--drivers/md/raid5.c75
-rw-r--r--drivers/memstick/core/mspro_block.c4
-rw-r--r--drivers/misc/eeepc-laptop.c16
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mtd/ftl.c24
-rw-r--r--drivers/mtd/mtd_blkdevs.c16
-rw-r--r--drivers/pnp/Makefile5
-rw-r--r--drivers/pnp/pnpacpi/core.c2
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/s390/block/dasd_proc.c3
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/scsi/zfcp_aux.c150
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c45
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c75
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h179
-rw-r--r--drivers/s390/scsi/zfcp_erp.c229
-rw-r--r--drivers/s390/scsi/zfcp_ext.h27
-rw-r--r--drivers/s390/scsi/zfcp_fc.c227
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c584
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h75
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c67
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c28
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c60
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/gdth.c60
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/gdth_proc.c66
-rw-r--r--drivers/scsi/gdth_proc.h3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c19
-rw-r--r--drivers/scsi/libsas/sas_ata.c10
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c6
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h71
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c338
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c105
-rw-r--r--drivers/scsi/scsi_error.c90
-rw-r--r--drivers/scsi/scsi_lib.c56
-rw-r--r--drivers/scsi/scsi_netlink.c523
-rw-r--r--drivers/scsi/scsi_priv.h7
-rw-r--r--drivers/scsi/scsi_proc.c8
-rw-r--r--drivers/scsi/scsi_scan.c20
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_tgt_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_fc.c60
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c122
-rw-r--r--drivers/scsi/sg.c667
-rw-r--r--drivers/scsi/sr.c7
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/tmscsim.c4
-rw-r--r--fs/bio-integrity.c29
-rw-r--r--fs/bio.c297
-rw-r--r--fs/block_dev.c182
-rw-r--r--fs/dlm/config.c77
-rw-r--r--fs/dlm/dlm_internal.h7
-rw-r--r--fs/dlm/lockspace.c158
-rw-r--r--fs/dlm/lockspace.h1
-rw-r--r--fs/dlm/user.c124
-rw-r--r--fs/dlm/user.h4
-rw-r--r--fs/fat/fatent.c14
-rw-r--r--fs/gfs2/glock.c15
-rw-r--r--fs/gfs2/glock.h1
-rw-r--r--fs/gfs2/incore.h38
-rw-r--r--fs/gfs2/inode.c159
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/locking/dlm/mount.c3
-rw-r--r--fs/gfs2/log.c21
-rw-r--r--fs/gfs2/mount.c7
-rw-r--r--fs/gfs2/ops_address.c18
-rw-r--r--fs/gfs2/ops_file.c16
-rw-r--r--fs/gfs2/ops_fstype.c578
-rw-r--r--fs/gfs2/ops_inode.c127
-rw-r--r--fs/gfs2/ops_super.c108
-rw-r--r--fs/gfs2/super.c340
-rw-r--r--fs/gfs2/super.h6
-rw-r--r--fs/gfs2/sys.c11
-rw-r--r--fs/partitions/check.c268
-rw-r--r--fs/partitions/check.h4
-rw-r--r--fs/splice.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h8
-rw-r--r--fs/xfs/xfs_log.c7
-rw-r--r--include/asm-x86/a.out-core.h6
-rw-r--r--include/asm-x86/a.out.h6
-rw-r--r--include/asm-x86/acpi.h6
-rw-r--r--include/asm-x86/agp.h6
-rw-r--r--include/asm-x86/alternative.h6
-rw-r--r--include/asm-x86/amd_iommu.h6
-rw-r--r--include/asm-x86/amd_iommu_types.h6
-rw-r--r--include/asm-x86/apic.h15
-rw-r--r--include/asm-x86/apicdef.h6
-rw-r--r--include/asm-x86/arch_hooks.h6
-rw-r--r--include/asm-x86/asm.h13
-rw-r--r--include/asm-x86/atomic_32.h6
-rw-r--r--include/asm-x86/atomic_64.h6
-rw-r--r--include/asm-x86/auxvec.h6
-rw-r--r--include/asm-x86/bios_ebda.h6
-rw-r--r--include/asm-x86/bitops.h6
-rw-r--r--include/asm-x86/boot.h6
-rw-r--r--include/asm-x86/bootparam.h6
-rw-r--r--include/asm-x86/bug.h6
-rw-r--r--include/asm-x86/bugs.h6
-rw-r--r--include/asm-x86/byteorder.h6
-rw-r--r--include/asm-x86/cache.h6
-rw-r--r--include/asm-x86/cacheflush.h6
-rw-r--r--include/asm-x86/calgary.h6
-rw-r--r--include/asm-x86/checksum_32.h6
-rw-r--r--include/asm-x86/checksum_64.h6
-rw-r--r--include/asm-x86/cmpxchg_32.h6
-rw-r--r--include/asm-x86/cmpxchg_64.h6
-rw-r--r--include/asm-x86/compat.h6
-rw-r--r--include/asm-x86/cpu.h6
-rw-r--r--include/asm-x86/cpufeature.h6
-rw-r--r--include/asm-x86/current.h6
-rw-r--r--include/asm-x86/debugreg.h6
-rw-r--r--include/asm-x86/delay.h6
-rw-r--r--include/asm-x86/desc.h6
-rw-r--r--include/asm-x86/desc_defs.h6
-rw-r--r--include/asm-x86/device.h6
-rw-r--r--include/asm-x86/div64.h6
-rw-r--r--include/asm-x86/dma-mapping.h6
-rw-r--r--include/asm-x86/dma.h6
-rw-r--r--include/asm-x86/dmi.h6
-rw-r--r--include/asm-x86/ds.h266
-rw-r--r--include/asm-x86/dwarf2.h6
-rw-r--r--include/asm-x86/e820.h7
-rw-r--r--include/asm-x86/edac.h6
-rw-r--r--include/asm-x86/efi.h6
-rw-r--r--include/asm-x86/elf.h11
-rw-r--r--include/asm-x86/emergency-restart.h6
-rw-r--r--include/asm-x86/fb.h6
-rw-r--r--include/asm-x86/fixmap.h6
-rw-r--r--include/asm-x86/fixmap_32.h6
-rw-r--r--include/asm-x86/fixmap_64.h6
-rw-r--r--include/asm-x86/floppy.h6
-rw-r--r--include/asm-x86/ftrace.h6
-rw-r--r--include/asm-x86/futex.h12
-rw-r--r--include/asm-x86/gart.h12
-rw-r--r--include/asm-x86/genapic_32.h6
-rw-r--r--include/asm-x86/genapic_64.h6
-rw-r--r--include/asm-x86/geode.h6
-rw-r--r--include/asm-x86/gpio.h2
-rw-r--r--include/asm-x86/hardirq_32.h6
-rw-r--r--include/asm-x86/hardirq_64.h6
-rw-r--r--include/asm-x86/highmem.h6
-rw-r--r--include/asm-x86/hpet.h6
-rw-r--r--include/asm-x86/hugetlb.h6
-rw-r--r--include/asm-x86/hw_irq.h26
-rw-r--r--include/asm-x86/hypertransport.h6
-rw-r--r--include/asm-x86/i387.h7
-rw-r--r--include/asm-x86/i8253.h6
-rw-r--r--include/asm-x86/i8259.h6
-rw-r--r--include/asm-x86/ia32.h6
-rw-r--r--include/asm-x86/ia32_unistd.h6
-rw-r--r--include/asm-x86/idle.h6
-rw-r--r--include/asm-x86/intel_arch_perfmon.h6
-rw-r--r--include/asm-x86/io.h8
-rw-r--r--include/asm-x86/io_32.h6
-rw-r--r--include/asm-x86/io_64.h7
-rw-r--r--include/asm-x86/io_apic.h6
-rw-r--r--include/asm-x86/ioctls.h6
-rw-r--r--include/asm-x86/iommu.h6
-rw-r--r--include/asm-x86/ipcbuf.h6
-rw-r--r--include/asm-x86/ipi.h6
-rw-r--r--include/asm-x86/irq.h6
-rw-r--r--include/asm-x86/irq_regs_32.h6
-rw-r--r--include/asm-x86/irq_vectors.h6
-rw-r--r--include/asm-x86/ist.h6
-rw-r--r--include/asm-x86/k8.h6
-rw-r--r--include/asm-x86/kdebug.h6
-rw-r--r--include/asm-x86/kexec.h6
-rw-r--r--include/asm-x86/kgdb.h6
-rw-r--r--include/asm-x86/kmap_types.h6
-rw-r--r--include/asm-x86/kprobes.h6
-rw-r--r--include/asm-x86/kvm.h6
-rw-r--r--include/asm-x86/kvm_host.h8
-rw-r--r--include/asm-x86/kvm_para.h6
-rw-r--r--include/asm-x86/kvm_x86_emulate.h6
-rw-r--r--include/asm-x86/ldt.h6
-rw-r--r--include/asm-x86/lguest.h6
-rw-r--r--include/asm-x86/lguest_hcall.h6
-rw-r--r--include/asm-x86/linkage.h6
-rw-r--r--include/asm-x86/local.h6
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apic.h6
-rw-r--r--include/asm-x86/mach-bigsmp/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-bigsmp/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-default/apm.h6
-rw-r--r--include/asm-x86/mach-default/mach_apic.h6
-rw-r--r--include/asm-x86/mach-default/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-default/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-default/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-default/mach_mpspec.h6
-rw-r--r--include/asm-x86/mach-default/mach_timer.h6
-rw-r--r--include/asm-x86/mach-default/mach_traps.h6
-rw-r--r--include/asm-x86/mach-default/mach_wakecpu.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_apic.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-es7000/mach_wakecpu.h6
-rw-r--r--include/asm-x86/mach-generic/gpio.h6
-rw-r--r--include/asm-x86/mach-generic/irq_vectors_limits.h6
-rw-r--r--include/asm-x86/mach-generic/mach_apic.h6
-rw-r--r--include/asm-x86/mach-generic/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-generic/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-generic/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-generic/mach_mpspec.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_apic.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_mpparse.h6
-rw-r--r--include/asm-x86/mach-numaq/mach_wakecpu.h6
-rw-r--r--include/asm-x86/mach-rdc321x/gpio.h9
-rw-r--r--include/asm-x86/mach-summit/irq_vectors_limits.h6
-rw-r--r--include/asm-x86/mach-summit/mach_apic.h6
-rw-r--r--include/asm-x86/mach-summit/mach_apicdef.h6
-rw-r--r--include/asm-x86/mach-summit/mach_ipi.h6
-rw-r--r--include/asm-x86/mach-summit/mach_mpparse.h6
-rw-r--r--include/asm-x86/math_emu.h6
-rw-r--r--include/asm-x86/mc146818rtc.h6
-rw-r--r--include/asm-x86/mca.h6
-rw-r--r--include/asm-x86/mca_dma.h6
-rw-r--r--include/asm-x86/mce.h6
-rw-r--r--include/asm-x86/mman.h6
-rw-r--r--include/asm-x86/mmconfig.h6
-rw-r--r--include/asm-x86/mmu.h11
-rw-r--r--include/asm-x86/mmu_context.h6
-rw-r--r--include/asm-x86/mmu_context_32.h6
-rw-r--r--include/asm-x86/mmu_context_64.h6
-rw-r--r--include/asm-x86/mmx.h6
-rw-r--r--include/asm-x86/mmzone_32.h6
-rw-r--r--include/asm-x86/mmzone_64.h6
-rw-r--r--include/asm-x86/module.h6
-rw-r--r--include/asm-x86/mpspec.h6
-rw-r--r--include/asm-x86/mpspec_def.h6
-rw-r--r--include/asm-x86/msgbuf.h6
-rw-r--r--include/asm-x86/msidef.h6
-rw-r--r--include/asm-x86/msr-index.h6
-rw-r--r--include/asm-x86/msr.h29
-rw-r--r--include/asm-x86/mtrr.h6
-rw-r--r--include/asm-x86/mutex_32.h6
-rw-r--r--include/asm-x86/mutex_64.h6
-rw-r--r--include/asm-x86/nmi.h7
-rw-r--r--include/asm-x86/nops.h6
-rw-r--r--include/asm-x86/numa_32.h6
-rw-r--r--include/asm-x86/numa_64.h6
-rw-r--r--include/asm-x86/numaq.h6
-rw-r--r--include/asm-x86/olpc.h6
-rw-r--r--include/asm-x86/page.h6
-rw-r--r--include/asm-x86/page_32.h10
-rw-r--r--include/asm-x86/page_64.h7
-rw-r--r--include/asm-x86/param.h6
-rw-r--r--include/asm-x86/paravirt.h48
-rw-r--r--include/asm-x86/parport.h6
-rw-r--r--include/asm-x86/pat.h6
-rw-r--r--include/asm-x86/pci-direct.h6
-rw-r--r--include/asm-x86/pci.h6
-rw-r--r--include/asm-x86/pci_32.h6
-rw-r--r--include/asm-x86/pci_64.h6
-rw-r--r--include/asm-x86/pda.h6
-rw-r--r--include/asm-x86/percpu.h6
-rw-r--r--include/asm-x86/pgalloc.h6
-rw-r--r--include/asm-x86/pgtable-2level-defs.h6
-rw-r--r--include/asm-x86/pgtable-2level.h8
-rw-r--r--include/asm-x86/pgtable-3level-defs.h6
-rw-r--r--include/asm-x86/pgtable-3level.h13
-rw-r--r--include/asm-x86/pgtable.h15
-rw-r--r--include/asm-x86/pgtable_32.h12
-rw-r--r--include/asm-x86/pgtable_64.h8
-rw-r--r--include/asm-x86/posix_types_32.h6
-rw-r--r--include/asm-x86/posix_types_64.h6
-rw-r--r--include/asm-x86/prctl.h6
-rw-r--r--include/asm-x86/processor-flags.h6
-rw-r--r--include/asm-x86/processor.h22
-rw-r--r--include/asm-x86/proto.h6
-rw-r--r--include/asm-x86/ptrace-abi.h20
-rw-r--r--include/asm-x86/ptrace.h52
-rw-r--r--include/asm-x86/pvclock-abi.h6
-rw-r--r--include/asm-x86/pvclock.h6
-rw-r--r--include/asm-x86/reboot.h6
-rw-r--r--include/asm-x86/reboot_fixups.h6
-rw-r--r--include/asm-x86/required-features.h6
-rw-r--r--include/asm-x86/resume-trace.h8
-rw-r--r--include/asm-x86/rio.h6
-rw-r--r--include/asm-x86/rwlock.h6
-rw-r--r--include/asm-x86/rwsem.h6
-rw-r--r--include/asm-x86/scatterlist.h6
-rw-r--r--include/asm-x86/seccomp_32.h6
-rw-r--r--include/asm-x86/seccomp_64.h6
-rw-r--r--include/asm-x86/segment.h6
-rw-r--r--include/asm-x86/sembuf.h6
-rw-r--r--include/asm-x86/serial.h6
-rw-r--r--include/asm-x86/setup.h7
-rw-r--r--include/asm-x86/shmbuf.h6
-rw-r--r--include/asm-x86/shmparam.h6
-rw-r--r--include/asm-x86/sigcontext.h6
-rw-r--r--include/asm-x86/sigcontext32.h6
-rw-r--r--include/asm-x86/siginfo.h6
-rw-r--r--include/asm-x86/signal.h9
-rw-r--r--include/asm-x86/smp.h10
-rw-r--r--include/asm-x86/socket.h6
-rw-r--r--include/asm-x86/sockios.h6
-rw-r--r--include/asm-x86/sparsemem.h6
-rw-r--r--include/asm-x86/spinlock.h12
-rw-r--r--include/asm-x86/spinlock_types.h6
-rw-r--r--include/asm-x86/srat.h6
-rw-r--r--include/asm-x86/stacktrace.h6
-rw-r--r--include/asm-x86/stat.h6
-rw-r--r--include/asm-x86/statfs.h6
-rw-r--r--include/asm-x86/string_32.h6
-rw-r--r--include/asm-x86/string_64.h6
-rw-r--r--include/asm-x86/suspend_32.h6
-rw-r--r--include/asm-x86/suspend_64.h6
-rw-r--r--include/asm-x86/swiotlb.h6
-rw-r--r--include/asm-x86/sync_bitops.h6
-rw-r--r--include/asm-x86/syscall.h211
-rw-r--r--include/asm-x86/syscalls.h93
-rw-r--r--include/asm-x86/system.h6
-rw-r--r--include/asm-x86/system_64.h6
-rw-r--r--include/asm-x86/tce.h6
-rw-r--r--include/asm-x86/termbits.h6
-rw-r--r--include/asm-x86/termios.h6
-rw-r--r--include/asm-x86/therm_throt.h6
-rw-r--r--include/asm-x86/thread_info.h10
-rw-r--r--include/asm-x86/time.h8
-rw-r--r--include/asm-x86/timer.h11
-rw-r--r--include/asm-x86/timex.h6
-rw-r--r--include/asm-x86/tlb.h6
-rw-r--r--include/asm-x86/tlbflush.h6
-rw-r--r--include/asm-x86/topology.h6
-rw-r--r--include/asm-x86/trampoline.h6
-rw-r--r--include/asm-x86/traps.h10
-rw-r--r--include/asm-x86/tsc.h6
-rw-r--r--include/asm-x86/types.h6
-rw-r--r--include/asm-x86/uaccess.h6
-rw-r--r--include/asm-x86/uaccess_32.h6
-rw-r--r--include/asm-x86/uaccess_64.h6
-rw-r--r--include/asm-x86/ucontext.h6
-rw-r--r--include/asm-x86/unaligned.h6
-rw-r--r--include/asm-x86/unistd_32.h6
-rw-r--r--include/asm-x86/unistd_64.h6
-rw-r--r--include/asm-x86/unwind.h6
-rw-r--r--include/asm-x86/user32.h6
-rw-r--r--include/asm-x86/user_32.h6
-rw-r--r--include/asm-x86/user_64.h6
-rw-r--r--include/asm-x86/uv/bios.h6
-rw-r--r--include/asm-x86/uv/uv_bau.h6
-rw-r--r--include/asm-x86/uv/uv_hub.h6
-rw-r--r--include/asm-x86/uv/uv_mmrs.h6
-rw-r--r--include/asm-x86/vdso.h6
-rw-r--r--include/asm-x86/vga.h6
-rw-r--r--include/asm-x86/vgtod.h6
-rw-r--r--include/asm-x86/visws/cobalt.h6
-rw-r--r--include/asm-x86/visws/lithium.h6
-rw-r--r--include/asm-x86/visws/piix4.h6
-rw-r--r--include/asm-x86/vm86.h6
-rw-r--r--include/asm-x86/vmi_time.h6
-rw-r--r--include/asm-x86/vsyscall.h6
-rw-r--r--include/asm-x86/xen/events.h6
-rw-r--r--include/asm-x86/xen/grant_table.h6
-rw-r--r--include/asm-x86/xen/hypercall.h6
-rw-r--r--include/asm-x86/xen/hypervisor.h6
-rw-r--r--include/asm-x86/xen/interface.h6
-rw-r--r--include/asm-x86/xen/interface_32.h6
-rw-r--r--include/asm-x86/xen/interface_64.h6
-rw-r--r--include/asm-x86/xen/page.h6
-rw-r--r--include/crypto/internal/rng.h26
-rw-r--r--include/crypto/internal/skcipher.h6
-rw-r--r--include/crypto/rng.h75
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/ata.h15
-rw-r--r--include/linux/bio.h108
-rw-r--r--include/linux/blkdev.h151
-rw-r--r--include/linux/blktrace_api.h62
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/crypto.h35
-rw-r--r--include/linux/device-mapper.h18
-rw-r--r--include/linux/device.h14
-rw-r--r--include/linux/dlm.h5
-rw-r--r--include/linux/dlm_device.h2
-rw-r--r--include/linux/elevator.h9
-rw-r--r--include/linux/fd.h8
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/genhd.h363
-rw-r--r--include/linux/gfs2_ondisk.h6
-rw-r--r--include/linux/klist.h3
-rw-r--r--include/linux/libata.h66
-rw-r--r--include/linux/major.h2
-rw-r--r--include/linux/mtd/blktrans.h2
-rw-r--r--include/linux/rcuclassic.h37
-rw-r--r--include/linux/rculist.h14
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcupreempt.h11
-rw-r--r--include/linux/security.h54
-rw-r--r--include/linux/string_helpers.h16
-rw-r--r--include/scsi/scsi_cmnd.h3
-rw-r--r--include/scsi/scsi_device.h21
-rw-r--r--include/scsi/scsi_host.h9
-rw-r--r--include/scsi/scsi_netlink.h62
-rw-r--r--include/scsi/scsi_transport.h3
-rw-r--r--include/scsi/scsi_transport_fc.h23
-rw-r--r--init/do_mounts.c4
-rw-r--r--kernel/rcuclassic.c337
-rw-r--r--kernel/rcupreempt.c8
-rw-r--r--kernel/rcupreempt_trace.c7
-rw-r--r--lib/Kconfig.debug48
-rw-r--r--lib/Makefile3
-rw-r--r--lib/klist.c96
-rw-r--r--lib/string_helpers.c64
-rw-r--r--mm/bounce.c2
-rw-r--r--mm/slob.c8
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/ax25/ax25_std_timer.c8
-rw-r--r--net/core/dev.c43
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ipv4/tcp_hybla.c6
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--scripts/Makefile3
-rw-r--r--scripts/selinux/Makefile2
-rw-r--r--scripts/selinux/README2
-rw-r--r--scripts/selinux/install_policy.sh69
-rw-r--r--scripts/selinux/mdp/.gitignore2
-rw-r--r--scripts/selinux/mdp/Makefile5
-rw-r--r--scripts/selinux/mdp/dbus_contexts6
-rw-r--r--scripts/selinux/mdp/mdp.c242
-rw-r--r--security/Kconfig8
-rw-r--r--security/Makefile3
-rw-r--r--security/commoncap.c2
-rw-r--r--security/inode.c33
-rw-r--r--security/security.c8
-rw-r--r--security/selinux/Kconfig3
-rw-r--r--security/selinux/avc.c2
-rw-r--r--security/selinux/hooks.c62
-rw-r--r--security/selinux/include/avc.h4
-rw-r--r--security/selinux/include/security.h15
-rw-r--r--security/selinux/ss/avtab.c8
-rw-r--r--security/selinux/ss/conditional.c18
-rw-r--r--security/selinux/ss/conditional.h2
-rw-r--r--security/selinux/ss/ebitmap.c4
-rw-r--r--security/selinux/ss/hashtab.c6
-rw-r--r--security/selinux/ss/mls.c14
-rw-r--r--security/selinux/ss/policydb.c225
-rw-r--r--security/selinux/ss/policydb.h5
-rw-r--r--security/selinux/ss/services.c180
-rw-r--r--security/selinux/ss/sidtab.c12
-rw-r--r--security/smack/smack.h1
-rw-r--r--security/smack/smack_access.c10
-rw-r--r--security/smack/smackfs.c92
718 files changed, 26561 insertions, 18132 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 5b5aba404aac..73060819ed99 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -251,8 +251,6 @@ mono.txt
251 - how to execute Mono-based .NET binaries with the help of BINFMT_MISC. 251 - how to execute Mono-based .NET binaries with the help of BINFMT_MISC.
252moxa-smartio 252moxa-smartio
253 - file with info on installing/using Moxa multiport serial driver. 253 - file with info on installing/using Moxa multiport serial driver.
254mtrr.txt
255 - how to use PPro Memory Type Range Registers to increase performance.
256mutex-design.txt 254mutex-design.txt
257 - info on the generic mutex subsystem. 255 - info on the generic mutex subsystem.
258namespaces/ 256namespaces/
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index d8b63d164e41..b8e86460046e 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -337,7 +337,7 @@ With scatterlists, you use the resulting mapping like this:
337 int i, count = dma_map_sg(dev, sglist, nents, direction); 337 int i, count = dma_map_sg(dev, sglist, nents, direction);
338 struct scatterlist *sg; 338 struct scatterlist *sg;
339 339
340 for (i = 0, sg = sglist; i < count; i++, sg++) { 340 for_each_sg(sglist, sg, count, i) {
341 hw_address[i] = sg_dma_address(sg); 341 hw_address[i] = sg_dma_address(sg);
342 hw_len[i] = sg_dma_len(sg); 342 hw_len[i] = sg_dma_len(sg);
343 } 343 }
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index b7b1482f6e04..9d0058e788e5 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -283,6 +283,7 @@ X!Earch/x86/kernel/mca_32.c
283 <chapter id="security"> 283 <chapter id="security">
284 <title>Security Framework</title> 284 <title>Security Framework</title>
285!Isecurity/security.c 285!Isecurity/security.c
286!Esecurity/inode.c
286 </chapter> 287 </chapter>
287 288
288 <chapter id="audit"> 289 <chapter id="audit">
@@ -364,6 +365,10 @@ X!Edrivers/pnp/system.c
364!Eblock/blk-barrier.c 365!Eblock/blk-barrier.c
365!Eblock/blk-tag.c 366!Eblock/blk-tag.c
366!Iblock/blk-tag.c 367!Iblock/blk-tag.c
368!Eblock/blk-integrity.c
369!Iblock/blktrace.c
370!Iblock/genhd.c
371!Eblock/genhd.c
367 </chapter> 372 </chapter>
368 373
369 <chapter id="chrdev"> 374 <chapter id="chrdev">
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index cf5562cbe356..6e253407b3dc 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -210,7 +210,7 @@ over a rather long period of time, but improvements are always welcome!
210 number of updates per grace period. 210 number of updates per grace period.
211 211
2129. All RCU list-traversal primitives, which include 2129. All RCU list-traversal primitives, which include
213 rcu_dereference(), list_for_each_rcu(), list_for_each_entry_rcu(), 213 rcu_dereference(), list_for_each_entry_rcu(),
214 list_for_each_continue_rcu(), and list_for_each_safe_rcu(), 214 list_for_each_continue_rcu(), and list_for_each_safe_rcu(),
215 must be either within an RCU read-side critical section or 215 must be either within an RCU read-side critical section or
216 must be protected by appropriate update-side locks. RCU 216 must be protected by appropriate update-side locks. RCU
diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.txt
index 451de2ad8329..4202ad093130 100644
--- a/Documentation/RCU/rcuref.txt
+++ b/Documentation/RCU/rcuref.txt
@@ -29,9 +29,9 @@ release_referenced() delete()
29 } 29 }
30 30
31If this list/array is made lock free using RCU as in changing the 31If this list/array is made lock free using RCU as in changing the
32write_lock() in add() and delete() to spin_lock and changing read_lock 32write_lock() in add() and delete() to spin_lock() and changing read_lock()
33in search_and_reference to rcu_read_lock(), the atomic_get in 33in search_and_reference() to rcu_read_lock(), the atomic_inc() in
34search_and_reference could potentially hold reference to an element which 34search_and_reference() could potentially hold reference to an element which
35has already been deleted from the list/array. Use atomic_inc_not_zero() 35has already been deleted from the list/array. Use atomic_inc_not_zero()
36in this scenario as follows: 36in this scenario as follows:
37 37
@@ -40,20 +40,20 @@ add() search_and_reference()
40{ { 40{ {
41 alloc_object rcu_read_lock(); 41 alloc_object rcu_read_lock();
42 ... search_for_element 42 ... search_for_element
43 atomic_set(&el->rc, 1); if (atomic_inc_not_zero(&el->rc)) { 43 atomic_set(&el->rc, 1); if (!atomic_inc_not_zero(&el->rc)) {
44 write_lock(&list_lock); rcu_read_unlock(); 44 spin_lock(&list_lock); rcu_read_unlock();
45 return FAIL; 45 return FAIL;
46 add_element } 46 add_element }
47 ... ... 47 ... ...
48 write_unlock(&list_lock); rcu_read_unlock(); 48 spin_unlock(&list_lock); rcu_read_unlock();
49} } 49} }
503. 4. 503. 4.
51release_referenced() delete() 51release_referenced() delete()
52{ { 52{ {
53 ... write_lock(&list_lock); 53 ... spin_lock(&list_lock);
54 if (atomic_dec_and_test(&el->rc)) ... 54 if (atomic_dec_and_test(&el->rc)) ...
55 call_rcu(&el->head, el_free); delete_element 55 call_rcu(&el->head, el_free); delete_element
56 ... write_unlock(&list_lock); 56 ... spin_unlock(&list_lock);
57} ... 57} ...
58 if (atomic_dec_and_test(&el->rc)) 58 if (atomic_dec_and_test(&el->rc))
59 call_rcu(&el->head, el_free); 59 call_rcu(&el->head, el_free);
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index e04d643a9f57..96170824a717 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -786,8 +786,6 @@ RCU pointer/list traversal:
786 list_for_each_entry_rcu 786 list_for_each_entry_rcu
787 hlist_for_each_entry_rcu 787 hlist_for_each_entry_rcu
788 788
789 list_for_each_rcu (to be deprecated in favor of
790 list_for_each_entry_rcu)
791 list_for_each_continue_rcu (to be deprecated in favor of new 789 list_for_each_continue_rcu (to be deprecated in favor of new
792 list_for_each_entry_continue_rcu) 790 list_for_each_entry_continue_rcu)
793 791
diff --git a/Documentation/SELinux.txt b/Documentation/SELinux.txt
new file mode 100644
index 000000000000..07eae00f3314
--- /dev/null
+++ b/Documentation/SELinux.txt
@@ -0,0 +1,27 @@
1If you want to use SELinux, chances are you will want
2to use the distro-provided policies, or install the
3latest reference policy release from
4 http://oss.tresys.com/projects/refpolicy
5
6However, if you want to install a dummy policy for
7testing, you can do using 'mdp' provided under
8scripts/selinux. Note that this requires the selinux
9userspace to be installed - in particular you will
10need checkpolicy to compile a kernel, and setfiles and
11fixfiles to label the filesystem.
12
13 1. Compile the kernel with selinux enabled.
14 2. Type 'make' to compile mdp.
15 3. Make sure that you are not running with
16 SELinux enabled and a real policy. If
17 you are, reboot with selinux disabled
18 before continuing.
19 4. Run install_policy.sh:
20 cd scripts/selinux
21 sh install_policy.sh
22
23Step 4 will create a new dummy policy valid for your
24kernel, with a single selinux user, role, and type.
25It will compile the policy, will set your SELINUXTYPE to
26dummy in /etc/selinux/config, install the compiled policy
27as 'dummy', and relabel your filesystem.
diff --git a/Documentation/block/deadline-iosched.txt b/Documentation/block/deadline-iosched.txt
index c23cab13c3d1..72576769e0f4 100644
--- a/Documentation/block/deadline-iosched.txt
+++ b/Documentation/block/deadline-iosched.txt
@@ -30,12 +30,18 @@ write_expire (in ms)
30Similar to read_expire mentioned above, but for writes. 30Similar to read_expire mentioned above, but for writes.
31 31
32 32
33fifo_batch 33fifo_batch (number of requests)
34---------- 34----------
35 35
36When a read request expires its deadline, we must move some requests from 36Requests are grouped into ``batches'' of a particular data direction (read or
37the sorted io scheduler list to the block device dispatch queue. fifo_batch 37write) which are serviced in increasing sector order. To limit extra seeking,
38controls how many requests we move. 38deadline expiries are only checked between batches. fifo_batch controls the
39maximum number of requests per batch.
40
41This parameter tunes the balance between per-request latency and aggregate
42throughput. When low latency is the primary concern, smaller is better (where
43a value of 1 yields first-come first-served behaviour). Increasing fifo_batch
44generally improves throughput, at the cost of latency variation.
39 45
40 46
41writes_starved (number of dispatches) 47writes_starved (number of dispatches)
diff --git a/Documentation/cdrom/ide-cd b/Documentation/cdrom/ide-cd
index 91c0dcc6fa5c..2c558cd6c1ef 100644
--- a/Documentation/cdrom/ide-cd
+++ b/Documentation/cdrom/ide-cd
@@ -145,8 +145,7 @@ useful for reading photocds.
145 145
146To play an audio CD, you should first unmount and remove any data 146To play an audio CD, you should first unmount and remove any data
147CDROM. Any of the CDROM player programs should then work (workman, 147CDROM. Any of the CDROM player programs should then work (workman,
148workbone, cdplayer, etc.). Lacking anything else, you could use the 148workbone, cdplayer, etc.).
149cdtester program in Documentation/cdrom/sbpcd.
150 149
151On a few drives, you can read digital audio directly using a program 150On a few drives, you can read digital audio directly using a program
152such as cdda2wav. The only types of drive which I've heard support 151such as cdda2wav. The only types of drive which I've heard support
diff --git a/Documentation/cpu-freq/index.txt b/Documentation/cpu-freq/index.txt
index ffdb5323df37..3d0b915035b9 100644
--- a/Documentation/cpu-freq/index.txt
+++ b/Documentation/cpu-freq/index.txt
@@ -35,11 +35,9 @@ Mailing List
35------------ 35------------
36There is a CPU frequency changing CVS commit and general list where 36There is a CPU frequency changing CVS commit and general list where
37you can report bugs, problems or submit patches. To post a message, 37you can report bugs, problems or submit patches. To post a message,
38send an email to cpufreq@lists.linux.org.uk, to subscribe go to 38send an email to cpufreq@vger.kernel.org, to subscribe go to
39http://lists.linux.org.uk/mailman/listinfo/cpufreq. Previous post to the 39http://vger.kernel.org/vger-lists.html#cpufreq and follow the
40mailing list are available to subscribers at 40instructions there.
41http://lists.linux.org.uk/mailman/private/cpufreq/.
42
43 41
44Links 42Links
45----- 43-----
@@ -50,7 +48,7 @@ how to access the CVS repository:
50* http://cvs.arm.linux.org.uk/ 48* http://cvs.arm.linux.org.uk/
51 49
52the CPUFreq Mailing list: 50the CPUFreq Mailing list:
53* http://lists.linux.org.uk/mailman/listinfo/cpufreq 51* http://vger.kernel.org/vger-lists.html#cpufreq
54 52
55Clock and voltage scaling for the SA-1100: 53Clock and voltage scaling for the SA-1100:
56* http://www.lartmaker.nl/projects/scaling 54* http://www.lartmaker.nl/projects/scaling
diff --git a/Documentation/hwmon/adt7473 b/Documentation/hwmon/adt7473
index 2126de34c711..1cbf671822e2 100644
--- a/Documentation/hwmon/adt7473
+++ b/Documentation/hwmon/adt7473
@@ -14,14 +14,14 @@ Description
14 14
15This driver implements support for the Analog Devices ADT7473 chip family. 15This driver implements support for the Analog Devices ADT7473 chip family.
16 16
17The LM85 uses the 2-wire interface compatible with the SMBUS 2.0 17The ADT7473 uses the 2-wire interface compatible with the SMBUS 2.0
18specification. Using an analog to digital converter it measures three (3) 18specification. Using an analog to digital converter it measures three (3)
19temperatures and two (2) voltages. It has three (3) 16-bit counters for 19temperatures and two (2) voltages. It has four (4) 16-bit counters for
20measuring fan speed. There are three (3) PWM outputs that can be used 20measuring fan speed. There are three (3) PWM outputs that can be used
21to control fan speed. 21to control fan speed.
22 22
23A sophisticated control system for the PWM outputs is designed into the 23A sophisticated control system for the PWM outputs is designed into the
24LM85 that allows fan speed to be adjusted automatically based on any of the 24ADT7473 that allows fan speed to be adjusted automatically based on any of the
25three temperature sensors. Each PWM output is individually adjustable and 25three temperature sensors. Each PWM output is individually adjustable and
26programmable. Once configured, the ADT7473 will adjust the PWM outputs in 26programmable. Once configured, the ADT7473 will adjust the PWM outputs in
27response to the measured temperatures without further host intervention. 27response to the measured temperatures without further host intervention.
@@ -46,14 +46,6 @@ from the raw value to get the temperature value.
46The Analog Devices datasheet is very detailed and describes a procedure for 46The Analog Devices datasheet is very detailed and describes a procedure for
47determining an optimal configuration for the automatic PWM control. 47determining an optimal configuration for the automatic PWM control.
48 48
49Hardware Configurations
50-----------------------
51
52The ADT7473 chips have an optional SMBALERT output that can be used to
53signal the chipset in case a limit is exceeded or the temperature sensors
54fail. Individual sensor interrupts can be masked so they won't trigger
55SMBALERT. The SMBALERT output if configured replaces the PWM2 function.
56
57Configuration Notes 49Configuration Notes
58------------------- 50-------------------
59 51
@@ -61,8 +53,8 @@ Besides standard interfaces driver adds the following:
61 53
62* PWM Control 54* PWM Control
63 55
64* pwm#_auto_point1_pwm and pwm#_auto_point1_temp and 56* pwm#_auto_point1_pwm and temp#_auto_point1_temp and
65* pwm#_auto_point2_pwm and pwm#_auto_point2_temp - 57* pwm#_auto_point2_pwm and temp#_auto_point2_temp -
66 58
67point1: Set the pwm speed at a lower temperature bound. 59point1: Set the pwm speed at a lower temperature bound.
68point2: Set the pwm speed at a higher temperature bound. 60point2: Set the pwm speed at a higher temperature bound.
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 2d845730d4e0..6dbfd5efd991 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -329,6 +329,10 @@ power[1-*]_average Average power use
329 Unit: microWatt 329 Unit: microWatt
330 RO 330 RO
331 331
332power[1-*]_average_interval Power use averaging interval
333 Unit: milliseconds
334 RW
335
332power[1-*]_average_highest Historical average maximum power use 336power[1-*]_average_highest Historical average maximum power use
333 Unit: microWatt 337 Unit: microWatt
334 RO 338 RO
@@ -354,6 +358,14 @@ power[1-*]_reset_history Reset input_highest, input_lowest,
354 WO 358 WO
355 359
356********** 360**********
361* Energy *
362**********
363
364energy[1-*]_input Cumulative energy use
365 Unit: microJoule
366 RO
367
368**********
357* Alarms * 369* Alarms *
358********** 370**********
359 371
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1150444a21ab..329dcabe4c5e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -463,12 +463,6 @@ and is between 256 and 4096 characters. It is defined in the file
463 Range: 0 - 8192 463 Range: 0 - 8192
464 Default: 64 464 Default: 64
465 465
466 disable_8254_timer
467 enable_8254_timer
468 [IA32/X86_64] Disable/Enable interrupt 0 timer routing
469 over the 8254 in addition to over the IO-APIC. The
470 kernel tries to set a sensible default.
471
472 hpet= [X86-32,HPET] option to control HPET usage 466 hpet= [X86-32,HPET] option to control HPET usage
473 Format: { enable (default) | disable | force } 467 Format: { enable (default) | disable | force }
474 disable: disable HPET and use PIT instead 468 disable: disable HPET and use PIT instead
@@ -1882,6 +1876,12 @@ and is between 256 and 4096 characters. It is defined in the file
1882 shapers= [NET] 1876 shapers= [NET]
1883 Maximal number of shapers. 1877 Maximal number of shapers.
1884 1878
1879 show_msr= [x86] show boot-time MSR settings
1880 Format: { <integer> }
1881 Show boot-time (BIOS-initialized) MSR settings.
1882 The parameter means the number of CPUs to show,
1883 for example 1 means boot CPU only.
1884
1885 sim710= [SCSI,HW] 1885 sim710= [SCSI,HW]
1886 See header of drivers/scsi/sim710.c. 1886 See header of drivers/scsi/sim710.c.
1887 1887
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
index 75143f0c23b6..38d324d62b25 100644
--- a/Documentation/scsi/scsi_fc_transport.txt
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -436,6 +436,42 @@ Other:
436 was updated to remove all vports for the fc_host as well. 436 was updated to remove all vports for the fc_host as well.
437 437
438 438
439Transport supplied functions
440----------------------------
441
442The following functions are supplied by the FC-transport for use by LLDs.
443
444 fc_vport_create - create a vport
445 fc_vport_terminate - detach and remove a vport
446
447Details:
448
449/**
450 * fc_vport_create - Admin App or LLDD requests creation of a vport
451 * @shost: scsi host the virtual port is connected to.
452 * @ids: The world wide names, FC4 port roles, etc for
453 * the virtual port.
454 *
455 * Notes:
456 * This routine assumes no locks are held on entry.
457 */
458struct fc_vport *
459fc_vport_create(struct Scsi_Host *shost, struct fc_vport_identifiers *ids)
460
461/**
462 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
463 * @vport: fc_vport to be terminated
464 *
465 * Calls the LLDD vport_delete() function, then deallocates and removes
466 * the vport from the shost and object tree.
467 *
468 * Notes:
469 * This routine assumes no locks are held on entry.
470 */
471int
472fc_vport_terminate(struct fc_vport *vport)
473
474
439Credits 475Credits
440======= 476=======
441The following people have contributed to this document: 477The following people have contributed to this document:
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
new file mode 100644
index 000000000000..dbe3377754af
--- /dev/null
+++ b/Documentation/x86/00-INDEX
@@ -0,0 +1,4 @@
100-INDEX
2 - this file
3mtrr.txt
4 - how to use x86 Memory Type Range Registers to increase performance
diff --git a/Documentation/x86/i386/boot.txt b/Documentation/x86/boot.txt
index 147bfe511cdd..83c0033ee9e0 100644
--- a/Documentation/x86/i386/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -308,7 +308,7 @@ Protocol: 2.00+
308 308
309Field name: start_sys 309Field name: start_sys
310Type: read 310Type: read
311Offset/size: 0x20c/4 311Offset/size: 0x20c/2
312Protocol: 2.00+ 312Protocol: 2.00+
313 313
314 The load low segment (0x1000). Obsolete. 314 The load low segment (0x1000). Obsolete.
diff --git a/Documentation/mtrr.txt b/Documentation/x86/mtrr.txt
index c39ac395970e..cc071dc333c2 100644
--- a/Documentation/mtrr.txt
+++ b/Documentation/x86/mtrr.txt
@@ -18,7 +18,7 @@ Richard Gooch
18 The AMD K6-2 (stepping 8 and above) and K6-3 processors have two 18 The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
19 MTRRs. These are supported. The AMD Athlon family provide 8 Intel 19 MTRRs. These are supported. The AMD Athlon family provide 8 Intel
20 style MTRRs. 20 style MTRRs.
21 21
22 The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These 22 The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These
23 are supported. 23 are supported.
24 24
@@ -87,7 +87,7 @@ reg00: base=0x00000000 ( 0MB), size= 64MB: write-back, count=1
87reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1 87reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1
88reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1 88reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1
89 89
90Some cards (especially Voodoo Graphics boards) need this 4 kB area 90Some cards (especially Voodoo Graphics boards) need this 4 kB area
91excluded from the beginning of the region because it is used for 91excluded from the beginning of the region because it is used for
92registers. 92registers.
93 93
diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt
index 17965f927c15..c93ff5f4c0dd 100644
--- a/Documentation/x86/pat.txt
+++ b/Documentation/x86/pat.txt
@@ -14,6 +14,10 @@ PAT allows for different types of memory attributes. The most commonly used
14ones that will be supported at this time are Write-back, Uncached, 14ones that will be supported at this time are Write-back, Uncached,
15Write-combined and Uncached Minus. 15Write-combined and Uncached Minus.
16 16
17
18PAT APIs
19--------
20
17There are many different APIs in the kernel that allows setting of memory 21There are many different APIs in the kernel that allows setting of memory
18attributes at the page level. In order to avoid aliasing, these interfaces 22attributes at the page level. In order to avoid aliasing, these interfaces
19should be used thoughtfully. Below is a table of interfaces available, 23should be used thoughtfully. Below is a table of interfaces available,
@@ -26,38 +30,38 @@ address range to avoid any aliasing.
26API | RAM | ACPI,... | Reserved/Holes | 30API | RAM | ACPI,... | Reserved/Holes |
27-----------------------|----------|------------|------------------| 31-----------------------|----------|------------|------------------|
28 | | | | 32 | | | |
29ioremap | -- | UC | UC | 33ioremap | -- | UC- | UC- |
30 | | | | 34 | | | |
31ioremap_cache | -- | WB | WB | 35ioremap_cache | -- | WB | WB |
32 | | | | 36 | | | |
33ioremap_nocache | -- | UC | UC | 37ioremap_nocache | -- | UC- | UC- |
34 | | | | 38 | | | |
35ioremap_wc | -- | -- | WC | 39ioremap_wc | -- | -- | WC |
36 | | | | 40 | | | |
37set_memory_uc | UC | -- | -- | 41set_memory_uc | UC- | -- | -- |
38 set_memory_wb | | | | 42 set_memory_wb | | | |
39 | | | | 43 | | | |
40set_memory_wc | WC | -- | -- | 44set_memory_wc | WC | -- | -- |
41 set_memory_wb | | | | 45 set_memory_wb | | | |
42 | | | | 46 | | | |
43pci sysfs resource | -- | -- | UC | 47pci sysfs resource | -- | -- | UC- |
44 | | | | 48 | | | |
45pci sysfs resource_wc | -- | -- | WC | 49pci sysfs resource_wc | -- | -- | WC |
46 is IORESOURCE_PREFETCH| | | | 50 is IORESOURCE_PREFETCH| | | |
47 | | | | 51 | | | |
48pci proc | -- | -- | UC | 52pci proc | -- | -- | UC- |
49 !PCIIOC_WRITE_COMBINE | | | | 53 !PCIIOC_WRITE_COMBINE | | | |
50 | | | | 54 | | | |
51pci proc | -- | -- | WC | 55pci proc | -- | -- | WC |
52 PCIIOC_WRITE_COMBINE | | | | 56 PCIIOC_WRITE_COMBINE | | | |
53 | | | | 57 | | | |
54/dev/mem | -- | UC | UC | 58/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
55 read-write | | | | 59 read-write | | | |
56 | | | | 60 | | | |
57/dev/mem | -- | UC | UC | 61/dev/mem | -- | UC- | UC- |
58 mmap SYNC flag | | | | 62 mmap SYNC flag | | | |
59 | | | | 63 | | | |
60/dev/mem | -- | WB/WC/UC | WB/WC/UC | 64/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
61 mmap !SYNC flag | |(from exist-| (from exist- | 65 mmap !SYNC flag | |(from exist-| (from exist- |
62 and | | ing alias)| ing alias) | 66 and | | ing alias)| ing alias) |
63 any alias to this area| | | | 67 any alias to this area| | | |
@@ -68,7 +72,7 @@ pci proc | -- | -- | WC |
68 and | | | | 72 and | | | |
69 MTRR says WB | | | | 73 MTRR says WB | | | |
70 | | | | 74 | | | |
71/dev/mem | -- | -- | UC_MINUS | 75/dev/mem | -- | -- | UC- |
72 mmap !SYNC flag | | | | 76 mmap !SYNC flag | | | |
73 no alias to this area | | | | 77 no alias to this area | | | |
74 and | | | | 78 and | | | |
@@ -98,3 +102,35 @@ types.
98 102
99Drivers should use set_memory_[uc|wc] to set access type for RAM ranges. 103Drivers should use set_memory_[uc|wc] to set access type for RAM ranges.
100 104
105
106PAT debugging
107-------------
108
109With CONFIG_DEBUG_FS enabled, PAT memtype list can be examined by
110
111# mount -t debugfs debugfs /sys/kernel/debug
112# cat /sys/kernel/debug/x86/pat_memtype_list
113PAT memtype list:
114uncached-minus @ 0x7fadf000-0x7fae0000
115uncached-minus @ 0x7fb19000-0x7fb1a000
116uncached-minus @ 0x7fb1a000-0x7fb1b000
117uncached-minus @ 0x7fb1b000-0x7fb1c000
118uncached-minus @ 0x7fb1c000-0x7fb1d000
119uncached-minus @ 0x7fb1d000-0x7fb1e000
120uncached-minus @ 0x7fb1e000-0x7fb25000
121uncached-minus @ 0x7fb25000-0x7fb26000
122uncached-minus @ 0x7fb26000-0x7fb27000
123uncached-minus @ 0x7fb27000-0x7fb28000
124uncached-minus @ 0x7fb28000-0x7fb2e000
125uncached-minus @ 0x7fb2e000-0x7fb2f000
126uncached-minus @ 0x7fb2f000-0x7fb30000
127uncached-minus @ 0x7fb31000-0x7fb32000
128uncached-minus @ 0x80000000-0x90000000
129
130This list shows physical address ranges and various PAT settings used to
131access those physical address ranges.
132
133Another, more verbose way of getting PAT related debug messages is with
134"debugpat" boot parameter. With this parameter, various debug messages are
135printed to dmesg log.
136
diff --git a/Documentation/x86/i386/usb-legacy-support.txt b/Documentation/x86/usb-legacy-support.txt
index 1894cdfc69d9..1894cdfc69d9 100644
--- a/Documentation/x86/i386/usb-legacy-support.txt
+++ b/Documentation/x86/usb-legacy-support.txt
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index b0c7b6c4abda..72ffb5373ec7 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -54,10 +54,6 @@ APICs
54 apicmaintimer. Useful when your PIT timer is totally 54 apicmaintimer. Useful when your PIT timer is totally
55 broken. 55 broken.
56 56
57 disable_8254_timer / enable_8254_timer
58 Enable interrupt 0 timer routing over the 8254 in addition to over
59 the IO-APIC. The kernel tries to set a sensible default.
60
61Early Console 57Early Console
62 58
63 syntax: earlyprintk=vga 59 syntax: earlyprintk=vga
diff --git a/Documentation/x86/i386/zero-page.txt b/Documentation/x86/zero-page.txt
index 169ad423a3d1..169ad423a3d1 100644
--- a/Documentation/x86/i386/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index 8dae4555f10e..7a03bd5a91a3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3649,8 +3649,9 @@ M: jmorris@namei.org
3649P: Eric Paris 3649P: Eric Paris
3650M: eparis@parisplace.org 3650M: eparis@parisplace.org
3651L: linux-kernel@vger.kernel.org (kernel issues) 3651L: linux-kernel@vger.kernel.org (kernel issues)
3652L: selinux@tycho.nsa.gov (subscribers-only, general discussion) 3652L: selinux@tycho.nsa.gov (subscribers-only, general discussion)
3653W: http://www.nsa.gov/selinux 3653W: http://selinuxproject.org
3654T: git kernel.org:pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
3654S: Supported 3655S: Supported
3655 3656
3656SENSABLE PHANTOM 3657SENSABLE PHANTOM
diff --git a/Makefile b/Makefile
index ce9eceb2538e..16e3fbb968a8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 27 3SUBLEVEL = 27
4EXTRAVERSION = -rc9 4EXTRAVERSION =
5NAME = Rotary Wombat 5NAME = Rotary Wombat
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index dd0e5b9b64e8..54847fe1e564 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -9,6 +9,8 @@
9#include <asm/sibyte/sb1250_genbus.h> 9#include <asm/sibyte/sb1250_genbus.h>
10#include <asm/sibyte/sb1250_regs.h> 10#include <asm/sibyte/sb1250_regs.h>
11 11
12#if defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR)
13
12#define DRV_NAME "pata-swarm" 14#define DRV_NAME "pata-swarm"
13 15
14#define SWARM_IDE_SHIFT 5 16#define SWARM_IDE_SHIFT 5
@@ -79,3 +81,5 @@ static int __init swarm_pata_init(void)
79} 81}
80 82
81device_initcall(swarm_pata_init); 83device_initcall(swarm_pata_init);
84
85#endif /* defined(CONFIG_SIBYTE_SWARM) || defined(CONFIG_SIBYTE_LITTLESUR) */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ed92864d1325..97f0d2b6dc0c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,6 +29,7 @@ config X86
29 select HAVE_FTRACE 29 select HAVE_FTRACE
30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 30 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
31 select HAVE_ARCH_KGDB if !X86_VOYAGER 31 select HAVE_ARCH_KGDB if !X86_VOYAGER
32 select HAVE_ARCH_TRACEHOOK
32 select HAVE_GENERIC_DMA_COHERENT if X86_32 33 select HAVE_GENERIC_DMA_COHERENT if X86_32
33 select HAVE_EFFICIENT_UNALIGNED_ACCESS 34 select HAVE_EFFICIENT_UNALIGNED_ACCESS
34 35
@@ -1020,7 +1021,7 @@ config HAVE_ARCH_ALLOC_REMAP
1020 1021
1021config ARCH_FLATMEM_ENABLE 1022config ARCH_FLATMEM_ENABLE
1022 def_bool y 1023 def_bool y
1023 depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && X86_PC && !NUMA 1024 depends on X86_32 && ARCH_SELECT_MEMORY_MODEL && !NUMA
1024 1025
1025config ARCH_DISCONTIGMEM_ENABLE 1026config ARCH_DISCONTIGMEM_ENABLE
1026 def_bool y 1027 def_bool y
@@ -1036,7 +1037,7 @@ config ARCH_SPARSEMEM_DEFAULT
1036 1037
1037config ARCH_SPARSEMEM_ENABLE 1038config ARCH_SPARSEMEM_ENABLE
1038 def_bool y 1039 def_bool y
1039 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) 1040 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
1040 select SPARSEMEM_STATIC if X86_32 1041 select SPARSEMEM_STATIC if X86_32
1041 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 1042 select SPARSEMEM_VMEMMAP_ENABLE if X86_64
1042 1043
@@ -1117,10 +1118,10 @@ config MTRR
1117 You can safely say Y even if your machine doesn't have MTRRs, you'll 1118 You can safely say Y even if your machine doesn't have MTRRs, you'll
1118 just add about 9 KB to your kernel. 1119 just add about 9 KB to your kernel.
1119 1120
1120 See <file:Documentation/mtrr.txt> for more information. 1121 See <file:Documentation/x86/mtrr.txt> for more information.
1121 1122
1122config MTRR_SANITIZER 1123config MTRR_SANITIZER
1123 bool 1124 def_bool y
1124 prompt "MTRR cleanup support" 1125 prompt "MTRR cleanup support"
1125 depends on MTRR 1126 depends on MTRR
1126 help 1127 help
@@ -1131,7 +1132,7 @@ config MTRR_SANITIZER
1131 The largest mtrr entry size for a continous block can be set with 1132 The largest mtrr entry size for a continous block can be set with
1132 mtrr_chunk_size. 1133 mtrr_chunk_size.
1133 1134
1134 If unsure, say N. 1135 If unsure, say Y.
1135 1136
1136config MTRR_SANITIZER_ENABLE_DEFAULT 1137config MTRR_SANITIZER_ENABLE_DEFAULT
1137 int "MTRR cleanup enable value (0-1)" 1138 int "MTRR cleanup enable value (0-1)"
@@ -1191,7 +1192,6 @@ config IRQBALANCE
1191config SECCOMP 1192config SECCOMP
1192 def_bool y 1193 def_bool y
1193 prompt "Enable seccomp to safely compute untrusted bytecode" 1194 prompt "Enable seccomp to safely compute untrusted bytecode"
1194 depends on PROC_FS
1195 help 1195 help
1196 This kernel feature is useful for number crunching applications 1196 This kernel feature is useful for number crunching applications
1197 that may need to compute untrusted bytecode during their 1197 that may need to compute untrusted bytecode during their
@@ -1199,7 +1199,7 @@ config SECCOMP
1199 the process as file descriptors supporting the read/write 1199 the process as file descriptors supporting the read/write
1200 syscalls, it's possible to isolate those applications in 1200 syscalls, it's possible to isolate those applications in
1201 their own address space using seccomp. Once seccomp is 1201 their own address space using seccomp. Once seccomp is
1202 enabled via /proc/<pid>/seccomp, it cannot be disabled 1202 enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
1203 and the task is only allowed to execute a few safe syscalls 1203 and the task is only allowed to execute a few safe syscalls
1204 defined by each seccomp mode. 1204 defined by each seccomp mode.
1205 1205
@@ -1356,14 +1356,14 @@ config PHYSICAL_ALIGN
1356 Don't change this unless you know what you are doing. 1356 Don't change this unless you know what you are doing.
1357 1357
1358config HOTPLUG_CPU 1358config HOTPLUG_CPU
1359 bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)" 1359 bool "Support for hot-pluggable CPUs"
1360 depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER 1360 depends on SMP && HOTPLUG && !X86_VOYAGER
1361 ---help--- 1361 ---help---
1362 Say Y here to experiment with turning CPUs off and on, and to 1362 Say Y here to allow turning CPUs off and on. CPUs can be
1363 enable suspend on SMP systems. CPUs can be controlled through 1363 controlled through /sys/devices/system/cpu.
1364 /sys/devices/system/cpu. 1364 ( Note: power management support will enable this option
1365 Say N if you want to disable CPU hotplug and don't need to 1365 automatically on SMP systems. )
1366 suspend. 1366 Say N if you want to disable CPU hotplug.
1367 1367
1368config COMPAT_VDSO 1368config COMPAT_VDSO
1369 def_bool y 1369 def_bool y
@@ -1378,6 +1378,51 @@ config COMPAT_VDSO
1378 1378
1379 If unsure, say Y. 1379 If unsure, say Y.
1380 1380
1381config CMDLINE_BOOL
1382 bool "Built-in kernel command line"
1383 default n
1384 help
1385 Allow for specifying boot arguments to the kernel at
1386 build time. On some systems (e.g. embedded ones), it is
1387 necessary or convenient to provide some or all of the
1388 kernel boot arguments with the kernel itself (that is,
1389 to not rely on the boot loader to provide them.)
1390
1391 To compile command line arguments into the kernel,
1392 set this option to 'Y', then fill in the
1393 the boot arguments in CONFIG_CMDLINE.
1394
1395 Systems with fully functional boot loaders (i.e. non-embedded)
1396 should leave this option set to 'N'.
1397
1398config CMDLINE
1399 string "Built-in kernel command string"
1400 depends on CMDLINE_BOOL
1401 default ""
1402 help
1403 Enter arguments here that should be compiled into the kernel
1404 image and used at boot time. If the boot loader provides a
1405 command line at boot time, it is appended to this string to
1406 form the full kernel command line, when the system boots.
1407
1408 However, you can use the CONFIG_CMDLINE_OVERRIDE option to
1409 change this behavior.
1410
1411 In most cases, the command line (whether built-in or provided
1412 by the boot loader) should specify the device for the root
1413 file system.
1414
1415config CMDLINE_OVERRIDE
1416 bool "Built-in command line overrides boot loader arguments"
1417 default n
1418 depends on CMDLINE_BOOL
1419 help
1420 Set this option to 'Y' to have the kernel ignore the boot loader
1421 command line, and use ONLY the built-in command line.
1422
1423 This is used to work around broken boot loaders. This should
1424 be set to 'N' under normal conditions.
1425
1381endmenu 1426endmenu
1382 1427
1383config ARCH_ENABLE_MEMORY_HOTPLUG 1428config ARCH_ENABLE_MEMORY_HOTPLUG
@@ -1773,7 +1818,7 @@ config COMPAT_FOR_U64_ALIGNMENT
1773 1818
1774config SYSVIPC_COMPAT 1819config SYSVIPC_COMPAT
1775 def_bool y 1820 def_bool y
1776 depends on X86_64 && COMPAT && SYSVIPC 1821 depends on COMPAT && SYSVIPC
1777 1822
1778endmenu 1823endmenu
1779 1824
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index b225219c448c..60a85768cfcb 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -418,3 +418,21 @@ config X86_MINIMUM_CPU_FAMILY
418config X86_DEBUGCTLMSR 418config X86_DEBUGCTLMSR
419 def_bool y 419 def_bool y
420 depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) 420 depends on !(MK6 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386)
421
422config X86_DS
423 bool "Debug Store support"
424 default y
425 help
426 Add support for Debug Store.
427 This allows the kernel to provide a memory buffer to the hardware
428 to store various profiling and tracing events.
429
430config X86_PTRACE_BTS
431 bool "ptrace interface to Branch Trace Store"
432 default y
433 depends on (X86_DS && X86_DEBUGCTLMSR)
434 help
435 Add a ptrace interface to allow collecting an execution trace
436 of the traced task.
437 This collects control flow changes in a (cyclic) buffer and allows
438 debuggers to fill in the gaps and show an execution trace of the debuggee.
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index ba7736cf2ec7..29c5fbf08392 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -137,14 +137,15 @@ relocated:
137 */ 137 */
138 movl output_len(%ebx), %eax 138 movl output_len(%ebx), %eax
139 pushl %eax 139 pushl %eax
140 # push arguments for decompress_kernel:
140 pushl %ebp # output address 141 pushl %ebp # output address
141 movl input_len(%ebx), %eax 142 movl input_len(%ebx), %eax
142 pushl %eax # input_len 143 pushl %eax # input_len
143 leal input_data(%ebx), %eax 144 leal input_data(%ebx), %eax
144 pushl %eax # input_data 145 pushl %eax # input_data
145 leal boot_heap(%ebx), %eax 146 leal boot_heap(%ebx), %eax
146 pushl %eax # heap area as third argument 147 pushl %eax # heap area
147 pushl %esi # real mode pointer as second arg 148 pushl %esi # real mode pointer
148 call decompress_kernel 149 call decompress_kernel
149 addl $20, %esp 150 addl $20, %esp
150 popl %ecx 151 popl %ecx
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 9fea73706479..5780d361105b 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -16,7 +16,7 @@
16 */ 16 */
17#undef CONFIG_PARAVIRT 17#undef CONFIG_PARAVIRT
18#ifdef CONFIG_X86_32 18#ifdef CONFIG_X86_32
19#define _ASM_DESC_H_ 1 19#define ASM_X86__DESC_H 1
20#endif 20#endif
21 21
22#ifdef CONFIG_X86_64 22#ifdef CONFIG_X86_64
@@ -27,7 +27,7 @@
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <linux/screen_info.h> 28#include <linux/screen_info.h>
29#include <linux/elf.h> 29#include <linux/elf.h>
30#include <asm/io.h> 30#include <linux/io.h>
31#include <asm/page.h> 31#include <asm/page.h>
32#include <asm/boot.h> 32#include <asm/boot.h>
33#include <asm/bootparam.h> 33#include <asm/bootparam.h>
@@ -251,7 +251,7 @@ static void __putstr(int error, const char *s)
251 y--; 251 y--;
252 } 252 }
253 } else { 253 } else {
254 vidmem [(x + cols * y) * 2] = c; 254 vidmem[(x + cols * y) * 2] = c;
255 if (++x >= cols) { 255 if (++x >= cols) {
256 x = 0; 256 x = 0;
257 if (++y >= lines) { 257 if (++y >= lines) {
@@ -277,7 +277,8 @@ static void *memset(void *s, int c, unsigned n)
277 int i; 277 int i;
278 char *ss = s; 278 char *ss = s;
279 279
280 for (i = 0; i < n; i++) ss[i] = c; 280 for (i = 0; i < n; i++)
281 ss[i] = c;
281 return s; 282 return s;
282} 283}
283 284
@@ -287,7 +288,8 @@ static void *memcpy(void *dest, const void *src, unsigned n)
287 const char *s = src; 288 const char *s = src;
288 char *d = dest; 289 char *d = dest;
289 290
290 for (i = 0; i < n; i++) d[i] = s[i]; 291 for (i = 0; i < n; i++)
292 d[i] = s[i];
291 return dest; 293 return dest;
292} 294}
293 295
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index af86e431acfa..b993062e9a5f 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -30,7 +30,6 @@ SYSSEG = DEF_SYSSEG /* system loaded at 0x10000 (65536) */
30SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */ 30SYSSIZE = DEF_SYSSIZE /* system size: # of 16-byte clicks */
31 /* to be loaded */ 31 /* to be loaded */
32ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */ 32ROOT_DEV = 0 /* ROOT_DEV is now written by "build" */
33SWAP_DEV = 0 /* SWAP_DEV is now written by "build" */
34 33
35#ifndef SVGA_MODE 34#ifndef SVGA_MODE
36#define SVGA_MODE ASK_VGA 35#define SVGA_MODE ASK_VGA
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 104275e191a8..ef9a52005ec9 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc4 3# Linux kernel version: 2.6.27-rc5
4# Mon Aug 25 15:04:00 2008 4# Wed Sep 3 17:23:09 2008
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -202,7 +202,7 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
202# CONFIG_M586 is not set 202# CONFIG_M586 is not set
203# CONFIG_M586TSC is not set 203# CONFIG_M586TSC is not set
204# CONFIG_M586MMX is not set 204# CONFIG_M586MMX is not set
205# CONFIG_M686 is not set 205CONFIG_M686=y
206# CONFIG_MPENTIUMII is not set 206# CONFIG_MPENTIUMII is not set
207# CONFIG_MPENTIUMIII is not set 207# CONFIG_MPENTIUMIII is not set
208# CONFIG_MPENTIUMM is not set 208# CONFIG_MPENTIUMM is not set
@@ -221,13 +221,14 @@ CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
221# CONFIG_MVIAC3_2 is not set 221# CONFIG_MVIAC3_2 is not set
222# CONFIG_MVIAC7 is not set 222# CONFIG_MVIAC7 is not set
223# CONFIG_MPSC is not set 223# CONFIG_MPSC is not set
224CONFIG_MCORE2=y 224# CONFIG_MCORE2 is not set
225# CONFIG_GENERIC_CPU is not set 225# CONFIG_GENERIC_CPU is not set
226CONFIG_X86_GENERIC=y 226CONFIG_X86_GENERIC=y
227CONFIG_X86_CPU=y 227CONFIG_X86_CPU=y
228CONFIG_X86_CMPXCHG=y 228CONFIG_X86_CMPXCHG=y
229CONFIG_X86_L1_CACHE_SHIFT=7 229CONFIG_X86_L1_CACHE_SHIFT=7
230CONFIG_X86_XADD=y 230CONFIG_X86_XADD=y
231# CONFIG_X86_PPRO_FENCE is not set
231CONFIG_X86_WP_WORKS_OK=y 232CONFIG_X86_WP_WORKS_OK=y
232CONFIG_X86_INVLPG=y 233CONFIG_X86_INVLPG=y
233CONFIG_X86_BSWAP=y 234CONFIG_X86_BSWAP=y
@@ -235,14 +236,15 @@ CONFIG_X86_POPAD_OK=y
235CONFIG_X86_INTEL_USERCOPY=y 236CONFIG_X86_INTEL_USERCOPY=y
236CONFIG_X86_USE_PPRO_CHECKSUM=y 237CONFIG_X86_USE_PPRO_CHECKSUM=y
237CONFIG_X86_TSC=y 238CONFIG_X86_TSC=y
239CONFIG_X86_CMOV=y
238CONFIG_X86_MINIMUM_CPU_FAMILY=4 240CONFIG_X86_MINIMUM_CPU_FAMILY=4
239CONFIG_X86_DEBUGCTLMSR=y 241CONFIG_X86_DEBUGCTLMSR=y
240CONFIG_HPET_TIMER=y 242CONFIG_HPET_TIMER=y
241CONFIG_HPET_EMULATE_RTC=y 243CONFIG_HPET_EMULATE_RTC=y
242CONFIG_DMI=y 244CONFIG_DMI=y
243# CONFIG_IOMMU_HELPER is not set 245# CONFIG_IOMMU_HELPER is not set
244CONFIG_NR_CPUS=4 246CONFIG_NR_CPUS=64
245# CONFIG_SCHED_SMT is not set 247CONFIG_SCHED_SMT=y
246CONFIG_SCHED_MC=y 248CONFIG_SCHED_MC=y
247# CONFIG_PREEMPT_NONE is not set 249# CONFIG_PREEMPT_NONE is not set
248CONFIG_PREEMPT_VOLUNTARY=y 250CONFIG_PREEMPT_VOLUNTARY=y
@@ -254,7 +256,8 @@ CONFIG_VM86=y
254# CONFIG_TOSHIBA is not set 256# CONFIG_TOSHIBA is not set
255# CONFIG_I8K is not set 257# CONFIG_I8K is not set
256CONFIG_X86_REBOOTFIXUPS=y 258CONFIG_X86_REBOOTFIXUPS=y
257# CONFIG_MICROCODE is not set 259CONFIG_MICROCODE=y
260CONFIG_MICROCODE_OLD_INTERFACE=y
258CONFIG_X86_MSR=y 261CONFIG_X86_MSR=y
259CONFIG_X86_CPUID=y 262CONFIG_X86_CPUID=y
260# CONFIG_NOHIGHMEM is not set 263# CONFIG_NOHIGHMEM is not set
@@ -2115,7 +2118,7 @@ CONFIG_IO_DELAY_0X80=y
2115CONFIG_DEFAULT_IO_DELAY_TYPE=0 2118CONFIG_DEFAULT_IO_DELAY_TYPE=0
2116CONFIG_DEBUG_BOOT_PARAMS=y 2119CONFIG_DEBUG_BOOT_PARAMS=y
2117# CONFIG_CPA_DEBUG is not set 2120# CONFIG_CPA_DEBUG is not set
2118# CONFIG_OPTIMIZE_INLINING is not set 2121CONFIG_OPTIMIZE_INLINING=y
2119 2122
2120# 2123#
2121# Security options 2124# Security options
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 678c8acefe04..e620ea6e2a7a 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc4 3# Linux kernel version: 2.6.27-rc5
4# Mon Aug 25 14:40:46 2008 4# Wed Sep 3 17:13:39 2008
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
@@ -218,17 +218,14 @@ CONFIG_X86_PC=y
218# CONFIG_MVIAC3_2 is not set 218# CONFIG_MVIAC3_2 is not set
219# CONFIG_MVIAC7 is not set 219# CONFIG_MVIAC7 is not set
220# CONFIG_MPSC is not set 220# CONFIG_MPSC is not set
221CONFIG_MCORE2=y 221# CONFIG_MCORE2 is not set
222# CONFIG_GENERIC_CPU is not set 222CONFIG_GENERIC_CPU=y
223CONFIG_X86_CPU=y 223CONFIG_X86_CPU=y
224CONFIG_X86_L1_CACHE_BYTES=64 224CONFIG_X86_L1_CACHE_BYTES=128
225CONFIG_X86_INTERNODE_CACHE_BYTES=64 225CONFIG_X86_INTERNODE_CACHE_BYTES=128
226CONFIG_X86_CMPXCHG=y 226CONFIG_X86_CMPXCHG=y
227CONFIG_X86_L1_CACHE_SHIFT=6 227CONFIG_X86_L1_CACHE_SHIFT=7
228CONFIG_X86_WP_WORKS_OK=y 228CONFIG_X86_WP_WORKS_OK=y
229CONFIG_X86_INTEL_USERCOPY=y
230CONFIG_X86_USE_PPRO_CHECKSUM=y
231CONFIG_X86_P6_NOP=y
232CONFIG_X86_TSC=y 229CONFIG_X86_TSC=y
233CONFIG_X86_CMPXCHG64=y 230CONFIG_X86_CMPXCHG64=y
234CONFIG_X86_CMOV=y 231CONFIG_X86_CMOV=y
@@ -243,9 +240,8 @@ CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
243CONFIG_AMD_IOMMU=y 240CONFIG_AMD_IOMMU=y
244CONFIG_SWIOTLB=y 241CONFIG_SWIOTLB=y
245CONFIG_IOMMU_HELPER=y 242CONFIG_IOMMU_HELPER=y
246# CONFIG_MAXSMP is not set 243CONFIG_NR_CPUS=64
247CONFIG_NR_CPUS=4 244CONFIG_SCHED_SMT=y
248# CONFIG_SCHED_SMT is not set
249CONFIG_SCHED_MC=y 245CONFIG_SCHED_MC=y
250# CONFIG_PREEMPT_NONE is not set 246# CONFIG_PREEMPT_NONE is not set
251CONFIG_PREEMPT_VOLUNTARY=y 247CONFIG_PREEMPT_VOLUNTARY=y
@@ -254,7 +250,8 @@ CONFIG_X86_LOCAL_APIC=y
254CONFIG_X86_IO_APIC=y 250CONFIG_X86_IO_APIC=y
255# CONFIG_X86_MCE is not set 251# CONFIG_X86_MCE is not set
256# CONFIG_I8K is not set 252# CONFIG_I8K is not set
257# CONFIG_MICROCODE is not set 253CONFIG_MICROCODE=y
254CONFIG_MICROCODE_OLD_INTERFACE=y
258CONFIG_X86_MSR=y 255CONFIG_X86_MSR=y
259CONFIG_X86_CPUID=y 256CONFIG_X86_CPUID=y
260CONFIG_NUMA=y 257CONFIG_NUMA=y
@@ -290,7 +287,7 @@ CONFIG_BOUNCE=y
290CONFIG_VIRT_TO_BUS=y 287CONFIG_VIRT_TO_BUS=y
291CONFIG_MTRR=y 288CONFIG_MTRR=y
292# CONFIG_MTRR_SANITIZER is not set 289# CONFIG_MTRR_SANITIZER is not set
293# CONFIG_X86_PAT is not set 290CONFIG_X86_PAT=y
294CONFIG_EFI=y 291CONFIG_EFI=y
295CONFIG_SECCOMP=y 292CONFIG_SECCOMP=y
296# CONFIG_HZ_100 is not set 293# CONFIG_HZ_100 is not set
@@ -2089,7 +2086,7 @@ CONFIG_IO_DELAY_0X80=y
2089CONFIG_DEFAULT_IO_DELAY_TYPE=0 2086CONFIG_DEFAULT_IO_DELAY_TYPE=0
2090CONFIG_DEBUG_BOOT_PARAMS=y 2087CONFIG_DEBUG_BOOT_PARAMS=y
2091# CONFIG_CPA_DEBUG is not set 2088# CONFIG_CPA_DEBUG is not set
2092# CONFIG_OPTIMIZE_INLINING is not set 2089CONFIG_OPTIMIZE_INLINING=y
2093 2090
2094# 2091#
2095# Security options 2092# Security options
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 3874c2de5403..903de4aa5094 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
10obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o 10obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
11obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o 11obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
12 12
13obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
14
13aes-i586-y := aes-i586-asm_32.o aes_glue.o 15aes-i586-y := aes-i586-asm_32.o aes_glue.o
14twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o 16twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
15salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o 17salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
diff --git a/arch/x86/crypto/crc32c-intel.c b/arch/x86/crypto/crc32c-intel.c
new file mode 100644
index 000000000000..070afc5b6c94
--- /dev/null
+++ b/arch/x86/crypto/crc32c-intel.c
@@ -0,0 +1,197 @@
1/*
2 * Using hardware provided CRC32 instruction to accelerate the CRC32 disposal.
3 * CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE)
4 * CRC32 is a new instruction in Intel SSE4.2, the reference can be found at:
5 * http://www.intel.com/products/processor/manuals/
6 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
7 * Volume 2A: Instruction Set Reference, A-M
8 *
9 * Copyright (c) 2008 Austin Zhang <austin_zhang@linux.intel.com>
10 * Copyright (c) 2008 Kent Liu <kent.liu@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 *
17 */
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/string.h>
21#include <linux/kernel.h>
22#include <crypto/internal/hash.h>
23
24#include <asm/cpufeature.h>
25
26#define CHKSUM_BLOCK_SIZE 1
27#define CHKSUM_DIGEST_SIZE 4
28
29#define SCALE_F sizeof(unsigned long)
30
31#ifdef CONFIG_X86_64
32#define REX_PRE "0x48, "
33#else
34#define REX_PRE
35#endif
36
37static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
38{
39 while (length--) {
40 __asm__ __volatile__(
41 ".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1"
42 :"=S"(crc)
43 :"0"(crc), "c"(*data)
44 );
45 data++;
46 }
47
48 return crc;
49}
50
51static u32 __pure crc32c_intel_le_hw(u32 crc, unsigned char const *p, size_t len)
52{
53 unsigned int iquotient = len / SCALE_F;
54 unsigned int iremainder = len % SCALE_F;
55 unsigned long *ptmp = (unsigned long *)p;
56
57 while (iquotient--) {
58 __asm__ __volatile__(
59 ".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;"
60 :"=S"(crc)
61 :"0"(crc), "c"(*ptmp)
62 );
63 ptmp++;
64 }
65
66 if (iremainder)
67 crc = crc32c_intel_le_hw_byte(crc, (unsigned char *)ptmp,
68 iremainder);
69
70 return crc;
71}
72
73/*
74 * Setting the seed allows arbitrary accumulators and flexible XOR policy
75 * If your algorithm starts with ~0, then XOR with ~0 before you set
76 * the seed.
77 */
78static int crc32c_intel_setkey(struct crypto_ahash *hash, const u8 *key,
79 unsigned int keylen)
80{
81 u32 *mctx = crypto_ahash_ctx(hash);
82
83 if (keylen != sizeof(u32)) {
84 crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
85 return -EINVAL;
86 }
87 *mctx = le32_to_cpup((__le32 *)key);
88 return 0;
89}
90
91static int crc32c_intel_init(struct ahash_request *req)
92{
93 u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
94 u32 *crcp = ahash_request_ctx(req);
95
96 *crcp = *mctx;
97
98 return 0;
99}
100
101static int crc32c_intel_update(struct ahash_request *req)
102{
103 struct crypto_hash_walk walk;
104 u32 *crcp = ahash_request_ctx(req);
105 u32 crc = *crcp;
106 int nbytes;
107
108 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
109 nbytes = crypto_hash_walk_done(&walk, 0))
110 crc = crc32c_intel_le_hw(crc, walk.data, nbytes);
111
112 *crcp = crc;
113 return 0;
114}
115
116static int crc32c_intel_final(struct ahash_request *req)
117{
118 u32 *crcp = ahash_request_ctx(req);
119
120 *(__le32 *)req->result = ~cpu_to_le32p(crcp);
121 return 0;
122}
123
124static int crc32c_intel_digest(struct ahash_request *req)
125{
126 struct crypto_hash_walk walk;
127 u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
128 u32 crc = *mctx;
129 int nbytes;
130
131 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
132 nbytes = crypto_hash_walk_done(&walk, 0))
133 crc = crc32c_intel_le_hw(crc, walk.data, nbytes);
134
135 *(__le32 *)req->result = ~cpu_to_le32(crc);
136 return 0;
137}
138
139static int crc32c_intel_cra_init(struct crypto_tfm *tfm)
140{
141 u32 *key = crypto_tfm_ctx(tfm);
142
143 *key = ~0;
144
145 tfm->crt_ahash.reqsize = sizeof(u32);
146
147 return 0;
148}
149
150static struct crypto_alg alg = {
151 .cra_name = "crc32c",
152 .cra_driver_name = "crc32c-intel",
153 .cra_priority = 200,
154 .cra_flags = CRYPTO_ALG_TYPE_AHASH,
155 .cra_blocksize = CHKSUM_BLOCK_SIZE,
156 .cra_alignmask = 3,
157 .cra_ctxsize = sizeof(u32),
158 .cra_module = THIS_MODULE,
159 .cra_list = LIST_HEAD_INIT(alg.cra_list),
160 .cra_init = crc32c_intel_cra_init,
161 .cra_type = &crypto_ahash_type,
162 .cra_u = {
163 .ahash = {
164 .digestsize = CHKSUM_DIGEST_SIZE,
165 .setkey = crc32c_intel_setkey,
166 .init = crc32c_intel_init,
167 .update = crc32c_intel_update,
168 .final = crc32c_intel_final,
169 .digest = crc32c_intel_digest,
170 }
171 }
172};
173
174
175static int __init crc32c_intel_mod_init(void)
176{
177 if (cpu_has_xmm4_2)
178 return crypto_register_alg(&alg);
179 else
180 return -ENODEV;
181}
182
183static void __exit crc32c_intel_mod_fini(void)
184{
185 crypto_unregister_alg(&alg);
186}
187
188module_init(crc32c_intel_mod_init);
189module_exit(crc32c_intel_mod_fini);
190
191MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.com>");
192MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
193MODULE_LICENSE("GPL");
194
195MODULE_ALIAS("crc32c");
196MODULE_ALIAS("crc32c-intel");
197
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index a0e1dbe67dc1..127ec3f07214 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -85,8 +85,10 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
85 dump->regs.ax = regs->ax; 85 dump->regs.ax = regs->ax;
86 dump->regs.ds = current->thread.ds; 86 dump->regs.ds = current->thread.ds;
87 dump->regs.es = current->thread.es; 87 dump->regs.es = current->thread.es;
88 asm("movl %%fs,%0" : "=r" (fs)); dump->regs.fs = fs; 88 savesegment(fs, fs);
89 asm("movl %%gs,%0" : "=r" (gs)); dump->regs.gs = gs; 89 dump->regs.fs = fs;
90 savesegment(gs, gs);
91 dump->regs.gs = gs;
90 dump->regs.orig_ax = regs->orig_ax; 92 dump->regs.orig_ax = regs->orig_ax;
91 dump->regs.ip = regs->ip; 93 dump->regs.ip = regs->ip;
92 dump->regs.cs = regs->cs; 94 dump->regs.cs = regs->cs;
@@ -430,8 +432,9 @@ beyond_if:
430 current->mm->start_stack = 432 current->mm->start_stack =
431 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm); 433 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
432 /* start thread */ 434 /* start thread */
433 asm volatile("movl %0,%%fs" :: "r" (0)); \ 435 loadsegment(fs, 0);
434 asm volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); 436 loadsegment(ds, __USER32_DS);
437 loadsegment(es, __USER32_DS);
435 load_gs_index(0); 438 load_gs_index(0);
436 (regs)->ip = ex.a_entry; 439 (regs)->ip = ex.a_entry;
437 (regs)->sp = current->mm->start_stack; 440 (regs)->sp = current->mm->start_stack;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 20af4c79579a..f1a2ac777faf 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -206,7 +206,7 @@ struct rt_sigframe
206 { unsigned int cur; \ 206 { unsigned int cur; \
207 unsigned short pre; \ 207 unsigned short pre; \
208 err |= __get_user(pre, &sc->seg); \ 208 err |= __get_user(pre, &sc->seg); \
209 asm volatile("movl %%" #seg ",%0" : "=r" (cur)); \ 209 savesegment(seg, cur); \
210 pre |= mask; \ 210 pre |= mask; \
211 if (pre != cur) loadsegment(seg, pre); } 211 if (pre != cur) loadsegment(seg, pre); }
212 212
@@ -235,7 +235,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
235 */ 235 */
236 err |= __get_user(gs, &sc->gs); 236 err |= __get_user(gs, &sc->gs);
237 gs |= 3; 237 gs |= 3;
238 asm("movl %%gs,%0" : "=r" (oldgs)); 238 savesegment(gs, oldgs);
239 if (gs != oldgs) 239 if (gs != oldgs)
240 load_gs_index(gs); 240 load_gs_index(gs);
241 241
@@ -355,14 +355,13 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
355{ 355{
356 int tmp, err = 0; 356 int tmp, err = 0;
357 357
358 tmp = 0; 358 savesegment(gs, tmp);
359 __asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
360 err |= __put_user(tmp, (unsigned int __user *)&sc->gs); 359 err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
361 __asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp)); 360 savesegment(fs, tmp);
362 err |= __put_user(tmp, (unsigned int __user *)&sc->fs); 361 err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
363 __asm__("movl %%ds,%0" : "=r"(tmp): "0"(tmp)); 362 savesegment(ds, tmp);
364 err |= __put_user(tmp, (unsigned int __user *)&sc->ds); 363 err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
365 __asm__("movl %%es,%0" : "=r"(tmp): "0"(tmp)); 364 savesegment(es, tmp);
366 err |= __put_user(tmp, (unsigned int __user *)&sc->es); 365 err |= __put_user(tmp, (unsigned int __user *)&sc->es);
367 366
368 err |= __put_user((u32)regs->di, &sc->di); 367 err |= __put_user((u32)regs->di, &sc->di);
@@ -498,8 +497,8 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
498 regs->dx = 0; 497 regs->dx = 0;
499 regs->cx = 0; 498 regs->cx = 0;
500 499
501 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 500 loadsegment(ds, __USER32_DS);
502 asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 501 loadsegment(es, __USER32_DS);
503 502
504 regs->cs = __USER32_CS; 503 regs->cs = __USER32_CS;
505 regs->ss = __USER32_DS; 504 regs->ss = __USER32_DS;
@@ -591,8 +590,8 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
591 regs->dx = (unsigned long) &frame->info; 590 regs->dx = (unsigned long) &frame->info;
592 regs->cx = (unsigned long) &frame->uc; 591 regs->cx = (unsigned long) &frame->uc;
593 592
594 asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 593 loadsegment(ds, __USER32_DS);
595 asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 594 loadsegment(es, __USER32_DS);
596 595
597 regs->cs = __USER32_CS; 596 regs->cs = __USER32_CS;
598 regs->ss = __USER32_DS; 597 regs->ss = __USER32_DS;
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index d3c64088b981..beda4232ce69 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -556,15 +556,6 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
556 return ret; 556 return ret;
557} 557}
558 558
559/* These are here just in case some old ia32 binary calls it. */
560asmlinkage long sys32_pause(void)
561{
562 current->state = TASK_INTERRUPTIBLE;
563 schedule();
564 return -ERESTARTNOHAND;
565}
566
567
568#ifdef CONFIG_SYSCTL_SYSCALL 559#ifdef CONFIG_SYSCTL_SYSCALL
569struct sysctl_ia32 { 560struct sysctl_ia32 {
570 unsigned int name; 561 unsigned int name;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index c102af85df9c..7d40ef7b36e3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -58,7 +58,6 @@ EXPORT_SYMBOL(acpi_disabled);
58#ifdef CONFIG_X86_64 58#ifdef CONFIG_X86_64
59 59
60#include <asm/proto.h> 60#include <asm/proto.h>
61#include <asm/genapic.h>
62 61
63#else /* X86 */ 62#else /* X86 */
64 63
@@ -97,8 +96,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
97#warning ACPI uses CMPXCHG, i486 and later hardware 96#warning ACPI uses CMPXCHG, i486 and later hardware
98#endif 97#endif
99 98
100static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
101
102/* -------------------------------------------------------------------------- 99/* --------------------------------------------------------------------------
103 Boot-time Configuration 100 Boot-time Configuration
104 -------------------------------------------------------------------------- */ 101 -------------------------------------------------------------------------- */
@@ -160,6 +157,8 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
160struct acpi_mcfg_allocation *pci_mmcfg_config; 157struct acpi_mcfg_allocation *pci_mmcfg_config;
161int pci_mmcfg_config_num; 158int pci_mmcfg_config_num;
162 159
160static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
161
163static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg) 162static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
164{ 163{
165 if (!strcmp(mcfg->header.oem_id, "SGI")) 164 if (!strcmp(mcfg->header.oem_id, "SGI"))
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 65a0c1b48696..fb04e49776ba 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -231,25 +231,25 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
231 continue; 231 continue;
232 if (*ptr > text_end) 232 if (*ptr > text_end)
233 continue; 233 continue;
234 text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */ 234 /* turn DS segment override prefix into lock prefix */
235 text_poke(*ptr, ((unsigned char []){0xf0}), 1);
235 }; 236 };
236} 237}
237 238
238static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) 239static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
239{ 240{
240 u8 **ptr; 241 u8 **ptr;
241 char insn[1];
242 242
243 if (noreplace_smp) 243 if (noreplace_smp)
244 return; 244 return;
245 245
246 add_nops(insn, 1);
247 for (ptr = start; ptr < end; ptr++) { 246 for (ptr = start; ptr < end; ptr++) {
248 if (*ptr < text) 247 if (*ptr < text)
249 continue; 248 continue;
250 if (*ptr > text_end) 249 if (*ptr > text_end)
251 continue; 250 continue;
252 text_poke(*ptr, insn, 1); 251 /* turn lock prefix into DS segment override prefix */
252 text_poke(*ptr, ((unsigned char []){0x3E}), 1);
253 }; 253 };
254} 254}
255 255
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 44e21826db11..9a32b37ee2ee 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -455,11 +455,11 @@ out:
455 force_iommu || 455 force_iommu ||
456 valid_agp || 456 valid_agp ||
457 fallback_aper_force) { 457 fallback_aper_force) {
458 printk(KERN_ERR 458 printk(KERN_INFO
459 "Your BIOS doesn't leave a aperture memory hole\n"); 459 "Your BIOS doesn't leave a aperture memory hole\n");
460 printk(KERN_ERR 460 printk(KERN_INFO
461 "Please enable the IOMMU option in the BIOS setup\n"); 461 "Please enable the IOMMU option in the BIOS setup\n");
462 printk(KERN_ERR 462 printk(KERN_INFO
463 "This costs you %d MB of RAM\n", 463 "This costs you %d MB of RAM\n",
464 32 << fallback_aper_order); 464 32 << fallback_aper_order);
465 465
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 732d1f4e10ee..5145a6e72bbb 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -228,7 +228,6 @@
228#include <linux/suspend.h> 228#include <linux/suspend.h>
229#include <linux/kthread.h> 229#include <linux/kthread.h>
230#include <linux/jiffies.h> 230#include <linux/jiffies.h>
231#include <linux/smp_lock.h>
232 231
233#include <asm/system.h> 232#include <asm/system.h>
234#include <asm/uaccess.h> 233#include <asm/uaccess.h>
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index aa89387006fe..505543a75a56 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -22,7 +22,7 @@
22 22
23#define __NO_STUBS 1 23#define __NO_STUBS 1
24#undef __SYSCALL 24#undef __SYSCALL
25#undef _ASM_X86_64_UNISTD_H_ 25#undef ASM_X86__UNISTD_64_H
26#define __SYSCALL(nr, sym) [nr] = 1, 26#define __SYSCALL(nr, sym) [nr] = 1,
27static char syscalls[] = { 27static char syscalls[] = {
28#include <asm/unistd.h> 28#include <asm/unistd.h>
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index c639bd55391c..fdd585f9c53d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -25,11 +25,11 @@ x86_bios_strerror(long status)
25{ 25{
26 const char *str; 26 const char *str;
27 switch (status) { 27 switch (status) {
28 case 0: str = "Call completed without error"; break; 28 case 0: str = "Call completed without error"; break;
29 case -1: str = "Not implemented"; break; 29 case -1: str = "Not implemented"; break;
30 case -2: str = "Invalid argument"; break; 30 case -2: str = "Invalid argument"; break;
31 case -3: str = "Call completed with error"; break; 31 case -3: str = "Call completed with error"; break;
32 default: str = "Unknown BIOS status code"; break; 32 default: str = "Unknown BIOS status code"; break;
33 } 33 }
34 return str; 34 return str;
35} 35}
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index a11f5d4477cd..305b465889b0 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -430,6 +430,49 @@ static __init int setup_noclflush(char *arg)
430} 430}
431__setup("noclflush", setup_noclflush); 431__setup("noclflush", setup_noclflush);
432 432
433struct msr_range {
434 unsigned min;
435 unsigned max;
436};
437
438static struct msr_range msr_range_array[] __cpuinitdata = {
439 { 0x00000000, 0x00000418},
440 { 0xc0000000, 0xc000040b},
441 { 0xc0010000, 0xc0010142},
442 { 0xc0011000, 0xc001103b},
443};
444
445static void __cpuinit print_cpu_msr(void)
446{
447 unsigned index;
448 u64 val;
449 int i;
450 unsigned index_min, index_max;
451
452 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
453 index_min = msr_range_array[i].min;
454 index_max = msr_range_array[i].max;
455 for (index = index_min; index < index_max; index++) {
456 if (rdmsrl_amd_safe(index, &val))
457 continue;
458 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
459 }
460 }
461}
462
463static int show_msr __cpuinitdata;
464static __init int setup_show_msr(char *arg)
465{
466 int num;
467
468 get_option(&arg, &num);
469
470 if (num > 0)
471 show_msr = num;
472 return 1;
473}
474__setup("show_msr=", setup_show_msr);
475
433void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 476void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
434{ 477{
435 if (c->x86_model_id[0]) 478 if (c->x86_model_id[0])
@@ -439,6 +482,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
439 printk(KERN_CONT " stepping %02x\n", c->x86_mask); 482 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
440 else 483 else
441 printk(KERN_CONT "\n"); 484 printk(KERN_CONT "\n");
485
486#ifdef CONFIG_SMP
487 if (c->cpu_index < show_msr)
488 print_cpu_msr();
489#else
490 if (show_msr)
491 print_cpu_msr();
492#endif
442} 493}
443 494
444static __init int setup_disablecpuid(char *arg) 495static __init int setup_disablecpuid(char *arg)
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index f1685fb91fbd..b8e05ee4f736 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -171,7 +171,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
171 } 171 }
172 172
173 if (c->x86 != 0xF) { 173 if (c->x86 != 0xF) {
174 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n"); 174 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@vger.kernel.org>\n");
175 return 0; 175 return 0;
176 } 176 }
177 177
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 15e13c01cc36..3b5f06423e77 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -26,7 +26,7 @@
26#include <asm/cpufeature.h> 26#include <asm/cpufeature.h>
27 27
28#define PFX "speedstep-centrino: " 28#define PFX "speedstep-centrino: "
29#define MAINTAINER "cpufreq@lists.linux.org.uk" 29#define MAINTAINER "cpufreq@vger.kernel.org"
30 30
31#define dprintk(msg...) \ 31#define dprintk(msg...) \
32 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 32 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b75f2569b8f8..f113ef4595f6 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -222,10 +222,11 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
222 set_cpu_cap(c, X86_FEATURE_BTS); 222 set_cpu_cap(c, X86_FEATURE_BTS);
223 if (!(l1 & (1<<12))) 223 if (!(l1 & (1<<12)))
224 set_cpu_cap(c, X86_FEATURE_PEBS); 224 set_cpu_cap(c, X86_FEATURE_PEBS);
225 ds_init_intel(c);
225 } 226 }
226 227
227 if (cpu_has_bts) 228 if (cpu_has_bts)
228 ds_init_intel(c); 229 ptrace_bts_init_intel(c);
229 230
230 /* 231 /*
231 * See if we have a good local APIC by checking for buggy Pentia, 232 * See if we have a good local APIC by checking for buggy Pentia,
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index cb7d3b6a80eb..4e8d77f01eeb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -401,12 +401,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
401 tmp |= ~((1<<(hi - 1)) - 1); 401 tmp |= ~((1<<(hi - 1)) - 1);
402 402
403 if (tmp != mask_lo) { 403 if (tmp != mask_lo) {
404 static int once = 1; 404 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
405
406 if (once) {
407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
408 once = 0;
409 }
410 mask_lo = tmp; 405 mask_lo = tmp;
411 } 406 }
412 } 407 }
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 84c480bb3715..4c4214690dd1 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -405,9 +405,9 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
405 } 405 }
406 /* RED-PEN: base can be > 32bit */ 406 /* RED-PEN: base can be > 32bit */
407 len += seq_printf(seq, 407 len += seq_printf(seq,
408 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", 408 "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
409 i, base, base >> (20 - PAGE_SHIFT), size, factor, 409 i, base, base >> (20 - PAGE_SHIFT), size, factor,
410 mtrr_attrib_to_str(type), mtrr_usage_table[i]); 410 mtrr_usage_table[i], mtrr_attrib_to_str(type));
411 } 411 }
412 } 412 }
413 return 0; 413 return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 885c8265e6b5..c78c04821ea1 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -729,7 +729,7 @@ struct var_mtrr_range_state {
729 mtrr_type type; 729 mtrr_type type;
730}; 730};
731 731
732struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; 732static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
733static int __initdata debug_print; 733static int __initdata debug_print;
734 734
735static int __init 735static int __init
@@ -759,7 +759,8 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
759 /* take out UC ranges */ 759 /* take out UC ranges */
760 for (i = 0; i < num_var_ranges; i++) { 760 for (i = 0; i < num_var_ranges; i++) {
761 type = range_state[i].type; 761 type = range_state[i].type;
762 if (type != MTRR_TYPE_UNCACHABLE) 762 if (type != MTRR_TYPE_UNCACHABLE &&
763 type != MTRR_TYPE_WRPROT)
763 continue; 764 continue;
764 size = range_state[i].size_pfn; 765 size = range_state[i].size_pfn;
765 if (!size) 766 if (!size)
@@ -836,6 +837,13 @@ static int __init enable_mtrr_cleanup_setup(char *str)
836} 837}
837early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); 838early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
838 839
840static int __init mtrr_cleanup_debug_setup(char *str)
841{
842 debug_print = 1;
843 return 0;
844}
845early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
846
839struct var_mtrr_state { 847struct var_mtrr_state {
840 unsigned long range_startk; 848 unsigned long range_startk;
841 unsigned long range_sizek; 849 unsigned long range_sizek;
@@ -898,6 +906,27 @@ set_var_mtrr_all(unsigned int address_bits)
898 } 906 }
899} 907}
900 908
909static unsigned long to_size_factor(unsigned long sizek, char *factorp)
910{
911 char factor;
912 unsigned long base = sizek;
913
914 if (base & ((1<<10) - 1)) {
915 /* not MB alignment */
916 factor = 'K';
917 } else if (base & ((1<<20) - 1)){
918 factor = 'M';
919 base >>= 10;
920 } else {
921 factor = 'G';
922 base >>= 20;
923 }
924
925 *factorp = factor;
926
927 return base;
928}
929
901static unsigned int __init 930static unsigned int __init
902range_to_mtrr(unsigned int reg, unsigned long range_startk, 931range_to_mtrr(unsigned int reg, unsigned long range_startk,
903 unsigned long range_sizek, unsigned char type) 932 unsigned long range_sizek, unsigned char type)
@@ -919,13 +948,21 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
919 align = max_align; 948 align = max_align;
920 949
921 sizek = 1 << align; 950 sizek = 1 << align;
922 if (debug_print) 951 if (debug_print) {
952 char start_factor = 'K', size_factor = 'K';
953 unsigned long start_base, size_base;
954
955 start_base = to_size_factor(range_startk, &start_factor),
956 size_base = to_size_factor(sizek, &size_factor),
957
923 printk(KERN_DEBUG "Setting variable MTRR %d, " 958 printk(KERN_DEBUG "Setting variable MTRR %d, "
924 "base: %ldMB, range: %ldMB, type %s\n", 959 "base: %ld%cB, range: %ld%cB, type %s\n",
925 reg, range_startk >> 10, sizek >> 10, 960 reg, start_base, start_factor,
961 size_base, size_factor,
926 (type == MTRR_TYPE_UNCACHABLE)?"UC": 962 (type == MTRR_TYPE_UNCACHABLE)?"UC":
927 ((type == MTRR_TYPE_WRBACK)?"WB":"Other") 963 ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
928 ); 964 );
965 }
929 save_var_mtrr(reg++, range_startk, sizek, type); 966 save_var_mtrr(reg++, range_startk, sizek, type);
930 range_startk += sizek; 967 range_startk += sizek;
931 range_sizek -= sizek; 968 range_sizek -= sizek;
@@ -970,6 +1007,8 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
970 /* try to append some small hole */ 1007 /* try to append some small hole */
971 range0_basek = state->range_startk; 1008 range0_basek = state->range_startk;
972 range0_sizek = ALIGN(state->range_sizek, chunk_sizek); 1009 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
1010
1011 /* no increase */
973 if (range0_sizek == state->range_sizek) { 1012 if (range0_sizek == state->range_sizek) {
974 if (debug_print) 1013 if (debug_print)
975 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", 1014 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n",
@@ -980,13 +1019,40 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
980 return 0; 1019 return 0;
981 } 1020 }
982 1021
983 range0_sizek -= chunk_sizek; 1022 /* only cut back, when it is not the last */
984 if (range0_sizek && sizek) { 1023 if (sizek) {
985 while (range0_basek + range0_sizek > (basek + sizek)) { 1024 while (range0_basek + range0_sizek > (basek + sizek)) {
986 range0_sizek -= chunk_sizek; 1025 if (range0_sizek >= chunk_sizek)
987 if (!range0_sizek) 1026 range0_sizek -= chunk_sizek;
988 break; 1027 else
989 } 1028 range0_sizek = 0;
1029
1030 if (!range0_sizek)
1031 break;
1032 }
1033 }
1034
1035second_try:
1036 range_basek = range0_basek + range0_sizek;
1037
1038 /* one hole in the middle */
1039 if (range_basek > basek && range_basek <= (basek + sizek))
1040 second_sizek = range_basek - basek;
1041
1042 if (range0_sizek > state->range_sizek) {
1043
1044 /* one hole in middle or at end */
1045 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
1046
1047 /* hole size should be less than half of range0 size */
1048 if (hole_sizek >= (range0_sizek >> 1) &&
1049 range0_sizek >= chunk_sizek) {
1050 range0_sizek -= chunk_sizek;
1051 second_sizek = 0;
1052 hole_sizek = 0;
1053
1054 goto second_try;
1055 }
990 } 1056 }
991 1057
992 if (range0_sizek) { 1058 if (range0_sizek) {
@@ -996,50 +1062,28 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
996 (range0_basek + range0_sizek)<<10); 1062 (range0_basek + range0_sizek)<<10);
997 state->reg = range_to_mtrr(state->reg, range0_basek, 1063 state->reg = range_to_mtrr(state->reg, range0_basek,
998 range0_sizek, MTRR_TYPE_WRBACK); 1064 range0_sizek, MTRR_TYPE_WRBACK);
999
1000 }
1001
1002 range_basek = range0_basek + range0_sizek;
1003 range_sizek = chunk_sizek;
1004
1005 if (range_basek + range_sizek > basek &&
1006 range_basek + range_sizek <= (basek + sizek)) {
1007 /* one hole */
1008 second_basek = basek;
1009 second_sizek = range_basek + range_sizek - basek;
1010 } 1065 }
1011 1066
1012 /* if last piece, only could one hole near end */ 1067 if (range0_sizek < state->range_sizek) {
1013 if ((second_basek || !basek) && 1068 /* need to handle left over */
1014 range_sizek - (state->range_sizek - range0_sizek) - second_sizek <
1015 (chunk_sizek >> 1)) {
1016 /*
1017 * one hole in middle (second_sizek is 0) or at end
1018 * (second_sizek is 0 )
1019 */
1020 hole_sizek = range_sizek - (state->range_sizek - range0_sizek)
1021 - second_sizek;
1022 hole_basek = range_basek + range_sizek - hole_sizek
1023 - second_sizek;
1024 } else {
1025 /* fallback for big hole, or several holes */
1026 range_sizek = state->range_sizek - range0_sizek; 1069 range_sizek = state->range_sizek - range0_sizek;
1027 second_basek = 0; 1070
1028 second_sizek = 0; 1071 if (debug_print)
1072 printk(KERN_DEBUG "range: %016lx - %016lx\n",
1073 range_basek<<10,
1074 (range_basek + range_sizek)<<10);
1075 state->reg = range_to_mtrr(state->reg, range_basek,
1076 range_sizek, MTRR_TYPE_WRBACK);
1029 } 1077 }
1030 1078
1031 if (debug_print)
1032 printk(KERN_DEBUG "range: %016lx - %016lx\n", range_basek<<10,
1033 (range_basek + range_sizek)<<10);
1034 state->reg = range_to_mtrr(state->reg, range_basek, range_sizek,
1035 MTRR_TYPE_WRBACK);
1036 if (hole_sizek) { 1079 if (hole_sizek) {
1080 hole_basek = range_basek - hole_sizek - second_sizek;
1037 if (debug_print) 1081 if (debug_print)
1038 printk(KERN_DEBUG "hole: %016lx - %016lx\n", 1082 printk(KERN_DEBUG "hole: %016lx - %016lx\n",
1039 hole_basek<<10, (hole_basek + hole_sizek)<<10); 1083 hole_basek<<10,
1040 state->reg = range_to_mtrr(state->reg, hole_basek, hole_sizek, 1084 (hole_basek + hole_sizek)<<10);
1041 MTRR_TYPE_UNCACHABLE); 1085 state->reg = range_to_mtrr(state->reg, hole_basek,
1042 1086 hole_sizek, MTRR_TYPE_UNCACHABLE);
1043 } 1087 }
1044 1088
1045 return second_sizek; 1089 return second_sizek;
@@ -1154,11 +1198,11 @@ struct mtrr_cleanup_result {
1154}; 1198};
1155 1199
1156/* 1200/*
1157 * gran_size: 1M, 2M, ..., 2G 1201 * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
1158 * chunk size: gran_size, ..., 4G 1202 * chunk size: gran_size, ..., 2G
1159 * so we need (2+13)*6 1203 * so we need (1+16)*8
1160 */ 1204 */
1161#define NUM_RESULT 90 1205#define NUM_RESULT 136
1162#define PSHIFT (PAGE_SHIFT - 10) 1206#define PSHIFT (PAGE_SHIFT - 10)
1163 1207
1164static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; 1208static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
@@ -1168,13 +1212,14 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
1168static int __init mtrr_cleanup(unsigned address_bits) 1212static int __init mtrr_cleanup(unsigned address_bits)
1169{ 1213{
1170 unsigned long extra_remove_base, extra_remove_size; 1214 unsigned long extra_remove_base, extra_remove_size;
1171 unsigned long i, base, size, def, dummy; 1215 unsigned long base, size, def, dummy;
1172 mtrr_type type; 1216 mtrr_type type;
1173 int nr_range, nr_range_new; 1217 int nr_range, nr_range_new;
1174 u64 chunk_size, gran_size; 1218 u64 chunk_size, gran_size;
1175 unsigned long range_sums, range_sums_new; 1219 unsigned long range_sums, range_sums_new;
1176 int index_good; 1220 int index_good;
1177 int num_reg_good; 1221 int num_reg_good;
1222 int i;
1178 1223
1179 /* extra one for all 0 */ 1224 /* extra one for all 0 */
1180 int num[MTRR_NUM_TYPES + 1]; 1225 int num[MTRR_NUM_TYPES + 1];
@@ -1204,6 +1249,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1204 continue; 1249 continue;
1205 if (!size) 1250 if (!size)
1206 type = MTRR_NUM_TYPES; 1251 type = MTRR_NUM_TYPES;
1252 if (type == MTRR_TYPE_WRPROT)
1253 type = MTRR_TYPE_UNCACHABLE;
1207 num[type]++; 1254 num[type]++;
1208 } 1255 }
1209 1256
@@ -1216,23 +1263,57 @@ static int __init mtrr_cleanup(unsigned address_bits)
1216 num_var_ranges - num[MTRR_NUM_TYPES]) 1263 num_var_ranges - num[MTRR_NUM_TYPES])
1217 return 0; 1264 return 0;
1218 1265
1266 /* print original var MTRRs at first, for debugging: */
1267 printk(KERN_DEBUG "original variable MTRRs\n");
1268 for (i = 0; i < num_var_ranges; i++) {
1269 char start_factor = 'K', size_factor = 'K';
1270 unsigned long start_base, size_base;
1271
1272 size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
1273 if (!size_base)
1274 continue;
1275
1276 size_base = to_size_factor(size_base, &size_factor),
1277 start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
1278 start_base = to_size_factor(start_base, &start_factor),
1279 type = range_state[i].type;
1280
1281 printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
1282 i, start_base, start_factor,
1283 size_base, size_factor,
1284 (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
1285 ((type == MTRR_TYPE_WRPROT) ? "WP" :
1286 ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
1287 );
1288 }
1289
1219 memset(range, 0, sizeof(range)); 1290 memset(range, 0, sizeof(range));
1220 extra_remove_size = 0; 1291 extra_remove_size = 0;
1221 if (mtrr_tom2) { 1292 extra_remove_base = 1 << (32 - PAGE_SHIFT);
1222 extra_remove_base = 1 << (32 - PAGE_SHIFT); 1293 if (mtrr_tom2)
1223 extra_remove_size = 1294 extra_remove_size =
1224 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; 1295 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base;
1225 }
1226 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, 1296 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base,
1227 extra_remove_size); 1297 extra_remove_size);
1298 /*
1299 * [0, 1M) should always be coverred by var mtrr with WB
1300 * and fixed mtrrs should take effective before var mtrr for it
1301 */
1302 nr_range = add_range_with_merge(range, nr_range, 0,
1303 (1ULL<<(20 - PAGE_SHIFT)) - 1);
1304 /* sort the ranges */
1305 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
1306
1228 range_sums = sum_ranges(range, nr_range); 1307 range_sums = sum_ranges(range, nr_range);
1229 printk(KERN_INFO "total RAM coverred: %ldM\n", 1308 printk(KERN_INFO "total RAM coverred: %ldM\n",
1230 range_sums >> (20 - PAGE_SHIFT)); 1309 range_sums >> (20 - PAGE_SHIFT));
1231 1310
1232 if (mtrr_chunk_size && mtrr_gran_size) { 1311 if (mtrr_chunk_size && mtrr_gran_size) {
1233 int num_reg; 1312 int num_reg;
1313 char gran_factor, chunk_factor, lose_factor;
1314 unsigned long gran_base, chunk_base, lose_base;
1234 1315
1235 debug_print = 1; 1316 debug_print++;
1236 /* convert ranges to var ranges state */ 1317 /* convert ranges to var ranges state */
1237 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size, 1318 num_reg = x86_setup_var_mtrrs(range, nr_range, mtrr_chunk_size,
1238 mtrr_gran_size); 1319 mtrr_gran_size);
@@ -1256,34 +1337,48 @@ static int __init mtrr_cleanup(unsigned address_bits)
1256 result[i].lose_cover_sizek = 1337 result[i].lose_cover_sizek =
1257 (range_sums - range_sums_new) << PSHIFT; 1338 (range_sums - range_sums_new) << PSHIFT;
1258 1339
1259 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1340 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1260 result[i].bad?"*BAD*":" ", result[i].gran_sizek >> 10, 1341 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1261 result[i].chunk_sizek >> 10); 1342 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1262 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ldM \n", 1343 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1344 result[i].bad?"*BAD*":" ",
1345 gran_base, gran_factor, chunk_base, chunk_factor);
1346 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1263 result[i].num_reg, result[i].bad?"-":"", 1347 result[i].num_reg, result[i].bad?"-":"",
1264 result[i].lose_cover_sizek >> 10); 1348 lose_base, lose_factor);
1265 if (!result[i].bad) { 1349 if (!result[i].bad) {
1266 set_var_mtrr_all(address_bits); 1350 set_var_mtrr_all(address_bits);
1267 return 1; 1351 return 1;
1268 } 1352 }
1269 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, " 1353 printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
1270 "will find optimal one\n"); 1354 "will find optimal one\n");
1271 debug_print = 0; 1355 debug_print--;
1272 memset(result, 0, sizeof(result[0])); 1356 memset(result, 0, sizeof(result[0]));
1273 } 1357 }
1274 1358
1275 i = 0; 1359 i = 0;
1276 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); 1360 memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
1277 memset(result, 0, sizeof(result)); 1361 memset(result, 0, sizeof(result));
1278 for (gran_size = (1ULL<<20); gran_size < (1ULL<<32); gran_size <<= 1) { 1362 for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
1279 for (chunk_size = gran_size; chunk_size < (1ULL<<33); 1363 char gran_factor;
1364 unsigned long gran_base;
1365
1366 if (debug_print)
1367 gran_base = to_size_factor(gran_size >> 10, &gran_factor);
1368
1369 for (chunk_size = gran_size; chunk_size < (1ULL<<32);
1280 chunk_size <<= 1) { 1370 chunk_size <<= 1) {
1281 int num_reg; 1371 int num_reg;
1282 1372
1283 if (debug_print) 1373 if (debug_print) {
1284 printk(KERN_INFO 1374 char chunk_factor;
1285 "\ngran_size: %lldM chunk_size_size: %lldM\n", 1375 unsigned long chunk_base;
1286 gran_size >> 20, chunk_size >> 20); 1376
1377 chunk_base = to_size_factor(chunk_size>>10, &chunk_factor),
1378 printk(KERN_INFO "\n");
1379 printk(KERN_INFO "gran_size: %ld%c chunk_size: %ld%c \n",
1380 gran_base, gran_factor, chunk_base, chunk_factor);
1381 }
1287 if (i >= NUM_RESULT) 1382 if (i >= NUM_RESULT)
1288 continue; 1383 continue;
1289 1384
@@ -1326,12 +1421,18 @@ static int __init mtrr_cleanup(unsigned address_bits)
1326 1421
1327 /* print out all */ 1422 /* print out all */
1328 for (i = 0; i < NUM_RESULT; i++) { 1423 for (i = 0; i < NUM_RESULT; i++) {
1329 printk(KERN_INFO "%sgran_size: %ldM \tchunk_size: %ldM \t", 1424 char gran_factor, chunk_factor, lose_factor;
1330 result[i].bad?"*BAD* ":" ", result[i].gran_sizek >> 10, 1425 unsigned long gran_base, chunk_base, lose_base;
1331 result[i].chunk_sizek >> 10); 1426
1332 printk(KERN_CONT "num_reg: %d \tlose RAM: %s%ldM\n", 1427 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1333 result[i].num_reg, result[i].bad?"-":"", 1428 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1334 result[i].lose_cover_sizek >> 10); 1429 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1430 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t",
1431 result[i].bad?"*BAD*":" ",
1432 gran_base, gran_factor, chunk_base, chunk_factor);
1433 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n",
1434 result[i].num_reg, result[i].bad?"-":"",
1435 lose_base, lose_factor);
1335 } 1436 }
1336 1437
1337 /* try to find the optimal index */ 1438 /* try to find the optimal index */
@@ -1339,10 +1440,8 @@ static int __init mtrr_cleanup(unsigned address_bits)
1339 nr_mtrr_spare_reg = num_var_ranges - 1; 1440 nr_mtrr_spare_reg = num_var_ranges - 1;
1340 num_reg_good = -1; 1441 num_reg_good = -1;
1341 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { 1442 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
1342 if (!min_loss_pfn[i]) { 1443 if (!min_loss_pfn[i])
1343 num_reg_good = i; 1444 num_reg_good = i;
1344 break;
1345 }
1346 } 1445 }
1347 1446
1348 index_good = -1; 1447 index_good = -1;
@@ -1358,21 +1457,26 @@ static int __init mtrr_cleanup(unsigned address_bits)
1358 } 1457 }
1359 1458
1360 if (index_good != -1) { 1459 if (index_good != -1) {
1460 char gran_factor, chunk_factor, lose_factor;
1461 unsigned long gran_base, chunk_base, lose_base;
1462
1361 printk(KERN_INFO "Found optimal setting for mtrr clean up\n"); 1463 printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
1362 i = index_good; 1464 i = index_good;
1363 printk(KERN_INFO "gran_size: %ldM \tchunk_size: %ldM \t", 1465 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
1364 result[i].gran_sizek >> 10, 1466 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
1365 result[i].chunk_sizek >> 10); 1467 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
1366 printk(KERN_CONT "num_reg: %d \tlose RAM: %ldM\n", 1468 printk(KERN_INFO "gran_size: %ld%c \tchunk_size: %ld%c \t",
1367 result[i].num_reg, 1469 gran_base, gran_factor, chunk_base, chunk_factor);
1368 result[i].lose_cover_sizek >> 10); 1470 printk(KERN_CONT "num_reg: %d \tlose RAM: %ld%c\n",
1471 result[i].num_reg, lose_base, lose_factor);
1369 /* convert ranges to var ranges state */ 1472 /* convert ranges to var ranges state */
1370 chunk_size = result[i].chunk_sizek; 1473 chunk_size = result[i].chunk_sizek;
1371 chunk_size <<= 10; 1474 chunk_size <<= 10;
1372 gran_size = result[i].gran_sizek; 1475 gran_size = result[i].gran_sizek;
1373 gran_size <<= 10; 1476 gran_size <<= 10;
1374 debug_print = 1; 1477 debug_print++;
1375 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); 1478 x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
1479 debug_print--;
1376 set_var_mtrr_all(address_bits); 1480 set_var_mtrr_all(address_bits);
1377 return 1; 1481 return 1;
1378 } 1482 }
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 05cc22dbd4ff..6bff382094f5 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -295,13 +295,19 @@ static int setup_k7_watchdog(unsigned nmi_hz)
295 /* setup the timer */ 295 /* setup the timer */
296 wrmsr(evntsel_msr, evntsel, 0); 296 wrmsr(evntsel_msr, evntsel, 0);
297 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); 297 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
298 apic_write(APIC_LVTPC, APIC_DM_NMI);
299 evntsel |= K7_EVNTSEL_ENABLE;
300 wrmsr(evntsel_msr, evntsel, 0);
301 298
299 /* initialize the wd struct before enabling */
302 wd->perfctr_msr = perfctr_msr; 300 wd->perfctr_msr = perfctr_msr;
303 wd->evntsel_msr = evntsel_msr; 301 wd->evntsel_msr = evntsel_msr;
304 wd->cccr_msr = 0; /* unused */ 302 wd->cccr_msr = 0; /* unused */
303
304 /* ok, everything is initialized, announce that we're set */
305 cpu_nmi_set_wd_enabled();
306
307 apic_write(APIC_LVTPC, APIC_DM_NMI);
308 evntsel |= K7_EVNTSEL_ENABLE;
309 wrmsr(evntsel_msr, evntsel, 0);
310
305 return 1; 311 return 1;
306} 312}
307 313
@@ -379,13 +385,19 @@ static int setup_p6_watchdog(unsigned nmi_hz)
379 wrmsr(evntsel_msr, evntsel, 0); 385 wrmsr(evntsel_msr, evntsel, 0);
380 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 386 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
381 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); 387 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
382 apic_write(APIC_LVTPC, APIC_DM_NMI);
383 evntsel |= P6_EVNTSEL0_ENABLE;
384 wrmsr(evntsel_msr, evntsel, 0);
385 388
389 /* initialize the wd struct before enabling */
386 wd->perfctr_msr = perfctr_msr; 390 wd->perfctr_msr = perfctr_msr;
387 wd->evntsel_msr = evntsel_msr; 391 wd->evntsel_msr = evntsel_msr;
388 wd->cccr_msr = 0; /* unused */ 392 wd->cccr_msr = 0; /* unused */
393
394 /* ok, everything is initialized, announce that we're set */
395 cpu_nmi_set_wd_enabled();
396
397 apic_write(APIC_LVTPC, APIC_DM_NMI);
398 evntsel |= P6_EVNTSEL0_ENABLE;
399 wrmsr(evntsel_msr, evntsel, 0);
400
389 return 1; 401 return 1;
390} 402}
391 403
@@ -432,6 +444,27 @@ static const struct wd_ops p6_wd_ops = {
432#define P4_CCCR_ENABLE (1 << 12) 444#define P4_CCCR_ENABLE (1 << 12)
433#define P4_CCCR_OVF (1 << 31) 445#define P4_CCCR_OVF (1 << 31)
434 446
447#define P4_CONTROLS 18
448static unsigned int p4_controls[18] = {
449 MSR_P4_BPU_CCCR0,
450 MSR_P4_BPU_CCCR1,
451 MSR_P4_BPU_CCCR2,
452 MSR_P4_BPU_CCCR3,
453 MSR_P4_MS_CCCR0,
454 MSR_P4_MS_CCCR1,
455 MSR_P4_MS_CCCR2,
456 MSR_P4_MS_CCCR3,
457 MSR_P4_FLAME_CCCR0,
458 MSR_P4_FLAME_CCCR1,
459 MSR_P4_FLAME_CCCR2,
460 MSR_P4_FLAME_CCCR3,
461 MSR_P4_IQ_CCCR0,
462 MSR_P4_IQ_CCCR1,
463 MSR_P4_IQ_CCCR2,
464 MSR_P4_IQ_CCCR3,
465 MSR_P4_IQ_CCCR4,
466 MSR_P4_IQ_CCCR5,
467};
435/* 468/*
436 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter 469 * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
437 * CRU_ESCR0 (with any non-null event selector) through a complemented 470 * CRU_ESCR0 (with any non-null event selector) through a complemented
@@ -473,6 +506,26 @@ static int setup_p4_watchdog(unsigned nmi_hz)
473 evntsel_msr = MSR_P4_CRU_ESCR0; 506 evntsel_msr = MSR_P4_CRU_ESCR0;
474 cccr_msr = MSR_P4_IQ_CCCR0; 507 cccr_msr = MSR_P4_IQ_CCCR0;
475 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); 508 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
509
510 /*
511 * If we're on the kdump kernel or other situation, we may
512 * still have other performance counter registers set to
513 * interrupt and they'll keep interrupting forever because
514 * of the P4_CCCR_OVF quirk. So we need to ACK all the
515 * pending interrupts and disable all the registers here,
516 * before reenabling the NMI delivery. Refer to p4_rearm()
517 * about the P4_CCCR_OVF quirk.
518 */
519 if (reset_devices) {
520 unsigned int low, high;
521 int i;
522
523 for (i = 0; i < P4_CONTROLS; i++) {
524 rdmsr(p4_controls[i], low, high);
525 low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF);
526 wrmsr(p4_controls[i], low, high);
527 }
528 }
476 } else { 529 } else {
477 /* logical cpu 1 */ 530 /* logical cpu 1 */
478 perfctr_msr = MSR_P4_IQ_PERFCTR1; 531 perfctr_msr = MSR_P4_IQ_PERFCTR1;
@@ -499,12 +552,17 @@ static int setup_p4_watchdog(unsigned nmi_hz)
499 wrmsr(evntsel_msr, evntsel, 0); 552 wrmsr(evntsel_msr, evntsel, 0);
500 wrmsr(cccr_msr, cccr_val, 0); 553 wrmsr(cccr_msr, cccr_val, 0);
501 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); 554 write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
502 apic_write(APIC_LVTPC, APIC_DM_NMI); 555
503 cccr_val |= P4_CCCR_ENABLE;
504 wrmsr(cccr_msr, cccr_val, 0);
505 wd->perfctr_msr = perfctr_msr; 556 wd->perfctr_msr = perfctr_msr;
506 wd->evntsel_msr = evntsel_msr; 557 wd->evntsel_msr = evntsel_msr;
507 wd->cccr_msr = cccr_msr; 558 wd->cccr_msr = cccr_msr;
559
560 /* ok, everything is initialized, announce that we're set */
561 cpu_nmi_set_wd_enabled();
562
563 apic_write(APIC_LVTPC, APIC_DM_NMI);
564 cccr_val |= P4_CCCR_ENABLE;
565 wrmsr(cccr_msr, cccr_val, 0);
508 return 1; 566 return 1;
509} 567}
510 568
@@ -620,13 +678,17 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
620 wrmsr(evntsel_msr, evntsel, 0); 678 wrmsr(evntsel_msr, evntsel, 0);
621 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 679 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
622 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); 680 write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
623 apic_write(APIC_LVTPC, APIC_DM_NMI);
624 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
625 wrmsr(evntsel_msr, evntsel, 0);
626 681
627 wd->perfctr_msr = perfctr_msr; 682 wd->perfctr_msr = perfctr_msr;
628 wd->evntsel_msr = evntsel_msr; 683 wd->evntsel_msr = evntsel_msr;
629 wd->cccr_msr = 0; /* unused */ 684 wd->cccr_msr = 0; /* unused */
685
686 /* ok, everything is initialized, announce that we're set */
687 cpu_nmi_set_wd_enabled();
688
689 apic_write(APIC_LVTPC, APIC_DM_NMI);
690 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
691 wrmsr(evntsel_msr, evntsel, 0);
630 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); 692 intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
631 return 1; 693 return 1;
632} 694}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8e9cd6a8ec12..6a44d6465991 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -36,7 +36,6 @@
36#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
37#include <linux/major.h> 37#include <linux/major.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/smp_lock.h>
40#include <linux/device.h> 39#include <linux/device.h>
41#include <linux/cpu.h> 40#include <linux/cpu.h>
42#include <linux/notifier.h> 41#include <linux/notifier.h>
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index 15e6c6bc4a46..e90a60ef10c2 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -7,9 +7,8 @@
7 7
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/crash_dump.h> 9#include <linux/crash_dump.h>
10 10#include <linux/uaccess.h>
11#include <asm/uaccess.h> 11#include <linux/io.h>
12#include <asm/io.h>
13 12
14/** 13/**
15 * copy_oldmem_page - copy one page from "oldmem" 14 * copy_oldmem_page - copy one page from "oldmem"
@@ -25,7 +24,7 @@
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic. 24 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
26 */ 25 */
27ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 26ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf) 27 size_t csize, unsigned long offset, int userbuf)
29{ 28{
30 void *vaddr; 29 void *vaddr;
31 30
@@ -33,14 +32,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
33 return 0; 32 return 0;
34 33
35 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 34 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
35 if (!vaddr)
36 return -ENOMEM;
36 37
37 if (userbuf) { 38 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) { 39 if (copy_to_user(buf, vaddr + offset, csize)) {
39 iounmap(vaddr); 40 iounmap(vaddr);
40 return -EFAULT; 41 return -EFAULT;
41 } 42 }
42 } else 43 } else
43 memcpy(buf, (vaddr + offset), csize); 44 memcpy(buf, vaddr + offset, csize);
44 45
45 iounmap(vaddr); 46 iounmap(vaddr);
46 return csize; 47 return csize;
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 11c11b8ec48d..2b69994fd3a8 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -2,26 +2,49 @@
2 * Debug Store support 2 * Debug Store support
3 * 3 *
4 * This provides a low-level interface to the hardware's Debug Store 4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and 5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS). 6 * precise-event based sampling (PEBS).
7 * 7 *
8 * Different architectures use a different DS layout/pointer size. 8 * It manages:
9 * The below functions therefore work on a void*. 9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
10 * 13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
11 * 17 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 * 18 *
15 * 19 * Copyright (C) 2007-2008 Intel Corporation.
16 * Copyright (C) 2007 Intel Corporation. 20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */ 21 */
19 22
23
24#ifdef CONFIG_X86_DS
25
20#include <asm/ds.h> 26#include <asm/ds.h>
21 27
22#include <linux/errno.h> 28#include <linux/errno.h>
23#include <linux/string.h> 29#include <linux/string.h>
24#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/sched.h>
32#include <linux/mm.h>
33
34
35/*
36 * The configuration for a particular DS hardware implementation.
37 */
38struct ds_configuration {
39 /* the size of the DS structure in bytes */
40 unsigned char sizeof_ds;
41 /* the size of one pointer-typed field in the DS structure in bytes;
42 this covers the first 8 fields related to buffer management. */
43 unsigned char sizeof_field;
44 /* the size of a BTS/PEBS record in bytes */
45 unsigned char sizeof_rec[2];
46};
47static struct ds_configuration ds_cfg;
25 48
26 49
27/* 50/*
@@ -44,378 +67,747 @@
44 * (interrupt occurs when write pointer passes interrupt pointer) 67 * (interrupt occurs when write pointer passes interrupt pointer)
45 * - value to which counter is reset following counter overflow 68 * - value to which counter is reset following counter overflow
46 * 69 *
47 * On later architectures, the last branch recording hardware uses 70 * Later architectures use 64bit pointers throughout, whereas earlier
48 * 64bit pointers even in 32bit mode. 71 * architectures use 32bit pointers in 32bit mode.
49 *
50 *
51 * Branch Trace Store (BTS) records store information about control
52 * flow changes. They at least provide the following information:
53 * - source linear address
54 * - destination linear address
55 * 72 *
56 * Netburst supported a predicated bit that had been dropped in later
57 * architectures. We do not suppor it.
58 * 73 *
74 * We compute the base address for the first 8 fields based on:
75 * - the field size stored in the DS configuration
76 * - the relative field position
77 * - an offset giving the start of the respective region
59 * 78 *
60 * In order to abstract from the actual DS and BTS layout, we describe 79 * This offset is further used to index various arrays holding
61 * the access to the relevant fields. 80 * information for BTS and PEBS at the respective index.
62 * Thanks to Andi Kleen for proposing this design.
63 * 81 *
64 * The implementation, however, is not as general as it might seem. In 82 * On later 32bit processors, we only access the lower 32bit of the
65 * order to stay somewhat simple and efficient, we assume an 83 * 64bit pointer fields. The upper halves will be zeroed out.
66 * underlying unsigned type (mostly a pointer type) and we expect the
67 * field to be at least as big as that type.
68 */ 84 */
69 85
70/* 86enum ds_field {
71 * A special from_ip address to indicate that the BTS record is an 87 ds_buffer_base = 0,
72 * info record that needs to be interpreted or skipped. 88 ds_index,
73 */ 89 ds_absolute_maximum,
74#define BTS_ESCAPE_ADDRESS (-1) 90 ds_interrupt_threshold,
91};
75 92
76/* 93enum ds_qualifier {
77 * A field access descriptor 94 ds_bts = 0,
78 */ 95 ds_pebs
79struct access_desc {
80 unsigned char offset;
81 unsigned char size;
82}; 96};
83 97
98static inline unsigned long ds_get(const unsigned char *base,
99 enum ds_qualifier qual, enum ds_field field)
100{
101 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
102 return *(unsigned long *)base;
103}
104
105static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
106 enum ds_field field, unsigned long value)
107{
108 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
109 (*(unsigned long *)base) = value;
110}
111
112
84/* 113/*
85 * The configuration for a particular DS/BTS hardware implementation. 114 * Locking is done only for allocating BTS or PEBS resources and for
115 * guarding context and buffer memory allocation.
116 *
117 * Most functions require the current task to own the ds context part
118 * they are going to access. All the locking is done when validating
119 * access to the context.
86 */ 120 */
87struct ds_configuration { 121static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
88 /* the DS configuration */
89 unsigned char sizeof_ds;
90 struct access_desc bts_buffer_base;
91 struct access_desc bts_index;
92 struct access_desc bts_absolute_maximum;
93 struct access_desc bts_interrupt_threshold;
94 /* the BTS configuration */
95 unsigned char sizeof_bts;
96 struct access_desc from_ip;
97 struct access_desc to_ip;
98 /* BTS variants used to store additional information like
99 timestamps */
100 struct access_desc info_type;
101 struct access_desc info_data;
102 unsigned long debugctl_mask;
103};
104 122
105/* 123/*
106 * The global configuration used by the below accessor functions 124 * Validate that the current task is allowed to access the BTS/PEBS
125 * buffer of the parameter task.
126 *
127 * Returns 0, if access is granted; -Eerrno, otherwise.
107 */ 128 */
108static struct ds_configuration ds_cfg; 129static inline int ds_validate_access(struct ds_context *context,
130 enum ds_qualifier qual)
131{
132 if (!context)
133 return -EPERM;
134
135 if (context->owner[qual] == current)
136 return 0;
137
138 return -EPERM;
139}
140
109 141
110/* 142/*
111 * Accessor functions for some DS and BTS fields using the above 143 * We either support (system-wide) per-cpu or per-thread allocation.
112 * global ptrace_bts_cfg. 144 * We distinguish the two based on the task_struct pointer, where a
145 * NULL pointer indicates per-cpu allocation for the current cpu.
146 *
147 * Allocations are use-counted. As soon as resources are allocated,
148 * further allocations must be of the same type (per-cpu or
149 * per-thread). We model this by counting allocations (i.e. the number
150 * of tracers of a certain type) for one type negatively:
151 * =0 no tracers
152 * >0 number of per-thread tracers
153 * <0 number of per-cpu tracers
154 *
155 * The below functions to get and put tracers and to check the
156 * allocation type require the ds_lock to be held by the caller.
157 *
158 * Tracers essentially gives the number of ds contexts for a certain
159 * type of allocation.
113 */ 160 */
114static inline unsigned long get_bts_buffer_base(char *base) 161static long tracers;
162
163static inline void get_tracer(struct task_struct *task)
115{ 164{
116 return *(unsigned long *)(base + ds_cfg.bts_buffer_base.offset); 165 tracers += (task ? 1 : -1);
117} 166}
118static inline void set_bts_buffer_base(char *base, unsigned long value) 167
168static inline void put_tracer(struct task_struct *task)
119{ 169{
120 (*(unsigned long *)(base + ds_cfg.bts_buffer_base.offset)) = value; 170 tracers -= (task ? 1 : -1);
121} 171}
122static inline unsigned long get_bts_index(char *base) 172
173static inline int check_tracer(struct task_struct *task)
123{ 174{
124 return *(unsigned long *)(base + ds_cfg.bts_index.offset); 175 return (task ? (tracers >= 0) : (tracers <= 0));
125} 176}
126static inline void set_bts_index(char *base, unsigned long value) 177
178
179/*
180 * The DS context is either attached to a thread or to a cpu:
181 * - in the former case, the thread_struct contains a pointer to the
182 * attached context.
183 * - in the latter case, we use a static array of per-cpu context
184 * pointers.
185 *
186 * Contexts are use-counted. They are allocated on first access and
187 * deallocated when the last user puts the context.
188 *
189 * We distinguish between an allocating and a non-allocating get of a
190 * context:
191 * - the allocating get is used for requesting BTS/PEBS resources. It
192 * requires the caller to hold the global ds_lock.
193 * - the non-allocating get is used for all other cases. A
194 * non-existing context indicates an error. It acquires and releases
195 * the ds_lock itself for obtaining the context.
196 *
197 * A context and its DS configuration are allocated and deallocated
198 * together. A context always has a DS configuration of the
199 * appropriate size.
200 */
201static DEFINE_PER_CPU(struct ds_context *, system_context);
202
203#define this_system_context per_cpu(system_context, smp_processor_id())
204
205/*
206 * Returns the pointer to the parameter task's context or to the
207 * system-wide context, if task is NULL.
208 *
209 * Increases the use count of the returned context, if not NULL.
210 */
211static inline struct ds_context *ds_get_context(struct task_struct *task)
127{ 212{
128 (*(unsigned long *)(base + ds_cfg.bts_index.offset)) = value; 213 struct ds_context *context;
214
215 spin_lock(&ds_lock);
216
217 context = (task ? task->thread.ds_ctx : this_system_context);
218 if (context)
219 context->count++;
220
221 spin_unlock(&ds_lock);
222
223 return context;
129} 224}
130static inline unsigned long get_bts_absolute_maximum(char *base) 225
226/*
227 * Same as ds_get_context, but allocates the context and it's DS
228 * structure, if necessary; returns NULL; if out of memory.
229 *
230 * pre: requires ds_lock to be held
231 */
232static inline struct ds_context *ds_alloc_context(struct task_struct *task)
131{ 233{
132 return *(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset); 234 struct ds_context **p_context =
235 (task ? &task->thread.ds_ctx : &this_system_context);
236 struct ds_context *context = *p_context;
237
238 if (!context) {
239 context = kzalloc(sizeof(*context), GFP_KERNEL);
240
241 if (!context)
242 return NULL;
243
244 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
245 if (!context->ds) {
246 kfree(context);
247 return NULL;
248 }
249
250 *p_context = context;
251
252 context->this = p_context;
253 context->task = task;
254
255 if (task)
256 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
257
258 if (!task || (task == current))
259 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
260
261 get_tracer(task);
262 }
263
264 context->count++;
265
266 return context;
133} 267}
134static inline void set_bts_absolute_maximum(char *base, unsigned long value) 268
269/*
270 * Decreases the use count of the parameter context, if not NULL.
271 * Deallocates the context, if the use count reaches zero.
272 */
273static inline void ds_put_context(struct ds_context *context)
135{ 274{
136 (*(unsigned long *)(base + ds_cfg.bts_absolute_maximum.offset)) = value; 275 if (!context)
276 return;
277
278 spin_lock(&ds_lock);
279
280 if (--context->count)
281 goto out;
282
283 *(context->this) = NULL;
284
285 if (context->task)
286 clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
287
288 if (!context->task || (context->task == current))
289 wrmsrl(MSR_IA32_DS_AREA, 0);
290
291 put_tracer(context->task);
292
293 /* free any leftover buffers from tracers that did not
294 * deallocate them properly. */
295 kfree(context->buffer[ds_bts]);
296 kfree(context->buffer[ds_pebs]);
297 kfree(context->ds);
298 kfree(context);
299 out:
300 spin_unlock(&ds_lock);
137} 301}
138static inline unsigned long get_bts_interrupt_threshold(char *base) 302
303
304/*
305 * Handle a buffer overflow
306 *
307 * task: the task whose buffers are overflowing;
308 * NULL for a buffer overflow on the current cpu
309 * context: the ds context
310 * qual: the buffer type
311 */
312static void ds_overflow(struct task_struct *task, struct ds_context *context,
313 enum ds_qualifier qual)
139{ 314{
140 return *(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset); 315 if (!context)
316 return;
317
318 if (context->callback[qual])
319 (*context->callback[qual])(task);
320
321 /* todo: do some more overflow handling */
141} 322}
142static inline void set_bts_interrupt_threshold(char *base, unsigned long value) 323
324
325/*
326 * Allocate a non-pageable buffer of the parameter size.
327 * Checks the memory and the locked memory rlimit.
328 *
329 * Returns the buffer, if successful;
330 * NULL, if out of memory or rlimit exceeded.
331 *
332 * size: the requested buffer size in bytes
333 * pages (out): if not NULL, contains the number of pages reserved
334 */
335static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
143{ 336{
144 (*(unsigned long *)(base + ds_cfg.bts_interrupt_threshold.offset)) = value; 337 unsigned long rlim, vm, pgsz;
338 void *buffer;
339
340 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
341
342 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
343 vm = current->mm->total_vm + pgsz;
344 if (rlim < vm)
345 return NULL;
346
347 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
348 vm = current->mm->locked_vm + pgsz;
349 if (rlim < vm)
350 return NULL;
351
352 buffer = kzalloc(size, GFP_KERNEL);
353 if (!buffer)
354 return NULL;
355
356 current->mm->total_vm += pgsz;
357 current->mm->locked_vm += pgsz;
358
359 if (pages)
360 *pages = pgsz;
361
362 return buffer;
145} 363}
146static inline unsigned long get_from_ip(char *base) 364
365static int ds_request(struct task_struct *task, void *base, size_t size,
366 ds_ovfl_callback_t ovfl, enum ds_qualifier qual)
147{ 367{
148 return *(unsigned long *)(base + ds_cfg.from_ip.offset); 368 struct ds_context *context;
369 unsigned long buffer, adj;
370 const unsigned long alignment = (1 << 3);
371 int error = 0;
372
373 if (!ds_cfg.sizeof_ds)
374 return -EOPNOTSUPP;
375
376 /* we require some space to do alignment adjustments below */
377 if (size < (alignment + ds_cfg.sizeof_rec[qual]))
378 return -EINVAL;
379
380 /* buffer overflow notification is not yet implemented */
381 if (ovfl)
382 return -EOPNOTSUPP;
383
384
385 spin_lock(&ds_lock);
386
387 if (!check_tracer(task))
388 return -EPERM;
389
390 error = -ENOMEM;
391 context = ds_alloc_context(task);
392 if (!context)
393 goto out_unlock;
394
395 error = -EALREADY;
396 if (context->owner[qual] == current)
397 goto out_unlock;
398 error = -EPERM;
399 if (context->owner[qual] != NULL)
400 goto out_unlock;
401 context->owner[qual] = current;
402
403 spin_unlock(&ds_lock);
404
405
406 error = -ENOMEM;
407 if (!base) {
408 base = ds_allocate_buffer(size, &context->pages[qual]);
409 if (!base)
410 goto out_release;
411
412 context->buffer[qual] = base;
413 }
414 error = 0;
415
416 context->callback[qual] = ovfl;
417
418 /* adjust the buffer address and size to meet alignment
419 * constraints:
420 * - buffer is double-word aligned
421 * - size is multiple of record size
422 *
423 * We checked the size at the very beginning; we have enough
424 * space to do the adjustment.
425 */
426 buffer = (unsigned long)base;
427
428 adj = ALIGN(buffer, alignment) - buffer;
429 buffer += adj;
430 size -= adj;
431
432 size /= ds_cfg.sizeof_rec[qual];
433 size *= ds_cfg.sizeof_rec[qual];
434
435 ds_set(context->ds, qual, ds_buffer_base, buffer);
436 ds_set(context->ds, qual, ds_index, buffer);
437 ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
438
439 if (ovfl) {
440 /* todo: select a suitable interrupt threshold */
441 } else
442 ds_set(context->ds, qual,
443 ds_interrupt_threshold, buffer + size + 1);
444
445 /* we keep the context until ds_release */
446 return error;
447
448 out_release:
449 context->owner[qual] = NULL;
450 ds_put_context(context);
451 return error;
452
453 out_unlock:
454 spin_unlock(&ds_lock);
455 ds_put_context(context);
456 return error;
149} 457}
150static inline void set_from_ip(char *base, unsigned long value) 458
459int ds_request_bts(struct task_struct *task, void *base, size_t size,
460 ds_ovfl_callback_t ovfl)
151{ 461{
152 (*(unsigned long *)(base + ds_cfg.from_ip.offset)) = value; 462 return ds_request(task, base, size, ovfl, ds_bts);
153} 463}
154static inline unsigned long get_to_ip(char *base) 464
465int ds_request_pebs(struct task_struct *task, void *base, size_t size,
466 ds_ovfl_callback_t ovfl)
155{ 467{
156 return *(unsigned long *)(base + ds_cfg.to_ip.offset); 468 return ds_request(task, base, size, ovfl, ds_pebs);
157} 469}
158static inline void set_to_ip(char *base, unsigned long value) 470
471static int ds_release(struct task_struct *task, enum ds_qualifier qual)
159{ 472{
160 (*(unsigned long *)(base + ds_cfg.to_ip.offset)) = value; 473 struct ds_context *context;
474 int error;
475
476 context = ds_get_context(task);
477 error = ds_validate_access(context, qual);
478 if (error < 0)
479 goto out;
480
481 kfree(context->buffer[qual]);
482 context->buffer[qual] = NULL;
483
484 current->mm->total_vm -= context->pages[qual];
485 current->mm->locked_vm -= context->pages[qual];
486 context->pages[qual] = 0;
487 context->owner[qual] = NULL;
488
489 /*
490 * we put the context twice:
491 * once for the ds_get_context
492 * once for the corresponding ds_request
493 */
494 ds_put_context(context);
495 out:
496 ds_put_context(context);
497 return error;
161} 498}
162static inline unsigned char get_info_type(char *base) 499
500int ds_release_bts(struct task_struct *task)
163{ 501{
164 return *(unsigned char *)(base + ds_cfg.info_type.offset); 502 return ds_release(task, ds_bts);
165} 503}
166static inline void set_info_type(char *base, unsigned char value) 504
505int ds_release_pebs(struct task_struct *task)
167{ 506{
168 (*(unsigned char *)(base + ds_cfg.info_type.offset)) = value; 507 return ds_release(task, ds_pebs);
169} 508}
170static inline unsigned long get_info_data(char *base) 509
510static int ds_get_index(struct task_struct *task, size_t *pos,
511 enum ds_qualifier qual)
171{ 512{
172 return *(unsigned long *)(base + ds_cfg.info_data.offset); 513 struct ds_context *context;
514 unsigned long base, index;
515 int error;
516
517 context = ds_get_context(task);
518 error = ds_validate_access(context, qual);
519 if (error < 0)
520 goto out;
521
522 base = ds_get(context->ds, qual, ds_buffer_base);
523 index = ds_get(context->ds, qual, ds_index);
524
525 error = ((index - base) / ds_cfg.sizeof_rec[qual]);
526 if (pos)
527 *pos = error;
528 out:
529 ds_put_context(context);
530 return error;
173} 531}
174static inline void set_info_data(char *base, unsigned long value) 532
533int ds_get_bts_index(struct task_struct *task, size_t *pos)
175{ 534{
176 (*(unsigned long *)(base + ds_cfg.info_data.offset)) = value; 535 return ds_get_index(task, pos, ds_bts);
177} 536}
178 537
538int ds_get_pebs_index(struct task_struct *task, size_t *pos)
539{
540 return ds_get_index(task, pos, ds_pebs);
541}
179 542
180int ds_allocate(void **dsp, size_t bts_size_in_bytes) 543static int ds_get_end(struct task_struct *task, size_t *pos,
544 enum ds_qualifier qual)
181{ 545{
182 size_t bts_size_in_records; 546 struct ds_context *context;
183 unsigned long bts; 547 unsigned long base, end;
184 void *ds; 548 int error;
549
550 context = ds_get_context(task);
551 error = ds_validate_access(context, qual);
552 if (error < 0)
553 goto out;
554
555 base = ds_get(context->ds, qual, ds_buffer_base);
556 end = ds_get(context->ds, qual, ds_absolute_maximum);
557
558 error = ((end - base) / ds_cfg.sizeof_rec[qual]);
559 if (pos)
560 *pos = error;
561 out:
562 ds_put_context(context);
563 return error;
564}
185 565
186 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 566int ds_get_bts_end(struct task_struct *task, size_t *pos)
187 return -EOPNOTSUPP; 567{
568 return ds_get_end(task, pos, ds_bts);
569}
188 570
189 if (bts_size_in_bytes < 0) 571int ds_get_pebs_end(struct task_struct *task, size_t *pos)
190 return -EINVAL; 572{
573 return ds_get_end(task, pos, ds_pebs);
574}
191 575
192 bts_size_in_records = 576static int ds_access(struct task_struct *task, size_t index,
193 bts_size_in_bytes / ds_cfg.sizeof_bts; 577 const void **record, enum ds_qualifier qual)
194 bts_size_in_bytes = 578{
195 bts_size_in_records * ds_cfg.sizeof_bts; 579 struct ds_context *context;
580 unsigned long base, idx;
581 int error;
196 582
197 if (bts_size_in_bytes <= 0) 583 if (!record)
198 return -EINVAL; 584 return -EINVAL;
199 585
200 bts = (unsigned long)kzalloc(bts_size_in_bytes, GFP_KERNEL); 586 context = ds_get_context(task);
201 587 error = ds_validate_access(context, qual);
202 if (!bts) 588 if (error < 0)
203 return -ENOMEM; 589 goto out;
204 590
205 ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); 591 base = ds_get(context->ds, qual, ds_buffer_base);
592 idx = base + (index * ds_cfg.sizeof_rec[qual]);
206 593
207 if (!ds) { 594 error = -EINVAL;
208 kfree((void *)bts); 595 if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
209 return -ENOMEM; 596 goto out;
210 }
211
212 set_bts_buffer_base(ds, bts);
213 set_bts_index(ds, bts);
214 set_bts_absolute_maximum(ds, bts + bts_size_in_bytes);
215 set_bts_interrupt_threshold(ds, bts + bts_size_in_bytes + 1);
216 597
217 *dsp = ds; 598 *record = (const void *)idx;
218 return 0; 599 error = ds_cfg.sizeof_rec[qual];
600 out:
601 ds_put_context(context);
602 return error;
219} 603}
220 604
221int ds_free(void **dsp) 605int ds_access_bts(struct task_struct *task, size_t index, const void **record)
222{ 606{
223 if (*dsp) { 607 return ds_access(task, index, record, ds_bts);
224 kfree((void *)get_bts_buffer_base(*dsp));
225 kfree(*dsp);
226 *dsp = NULL;
227 }
228 return 0;
229} 608}
230 609
231int ds_get_bts_size(void *ds) 610int ds_access_pebs(struct task_struct *task, size_t index, const void **record)
232{ 611{
233 int size_in_bytes; 612 return ds_access(task, index, record, ds_pebs);
234
235 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
236 return -EOPNOTSUPP;
237
238 if (!ds)
239 return 0;
240
241 size_in_bytes =
242 get_bts_absolute_maximum(ds) -
243 get_bts_buffer_base(ds);
244 return size_in_bytes;
245} 613}
246 614
247int ds_get_bts_end(void *ds) 615static int ds_write(struct task_struct *task, const void *record, size_t size,
616 enum ds_qualifier qual, int force)
248{ 617{
249 int size_in_bytes = ds_get_bts_size(ds); 618 struct ds_context *context;
250 619 int error;
251 if (size_in_bytes <= 0)
252 return size_in_bytes;
253 620
254 return size_in_bytes / ds_cfg.sizeof_bts; 621 if (!record)
255} 622 return -EINVAL;
256 623
257int ds_get_bts_index(void *ds) 624 error = -EPERM;
258{ 625 context = ds_get_context(task);
259 int index_offset_in_bytes; 626 if (!context)
627 goto out;
260 628
261 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 629 if (!force) {
262 return -EOPNOTSUPP; 630 error = ds_validate_access(context, qual);
631 if (error < 0)
632 goto out;
633 }
263 634
264 index_offset_in_bytes = 635 error = 0;
265 get_bts_index(ds) - 636 while (size) {
266 get_bts_buffer_base(ds); 637 unsigned long base, index, end, write_end, int_th;
638 unsigned long write_size, adj_write_size;
639
640 /*
641 * write as much as possible without producing an
642 * overflow interrupt.
643 *
644 * interrupt_threshold must either be
645 * - bigger than absolute_maximum or
646 * - point to a record between buffer_base and absolute_maximum
647 *
648 * index points to a valid record.
649 */
650 base = ds_get(context->ds, qual, ds_buffer_base);
651 index = ds_get(context->ds, qual, ds_index);
652 end = ds_get(context->ds, qual, ds_absolute_maximum);
653 int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
654
655 write_end = min(end, int_th);
656
657 /* if we are already beyond the interrupt threshold,
658 * we fill the entire buffer */
659 if (write_end <= index)
660 write_end = end;
661
662 if (write_end <= index)
663 goto out;
664
665 write_size = min((unsigned long) size, write_end - index);
666 memcpy((void *)index, record, write_size);
667
668 record = (const char *)record + write_size;
669 size -= write_size;
670 error += write_size;
671
672 adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
673 adj_write_size *= ds_cfg.sizeof_rec[qual];
674
675 /* zero out trailing bytes */
676 memset((char *)index + write_size, 0,
677 adj_write_size - write_size);
678 index += adj_write_size;
679
680 if (index >= end)
681 index = base;
682 ds_set(context->ds, qual, ds_index, index);
683
684 if (index >= int_th)
685 ds_overflow(task, context, qual);
686 }
267 687
268 return index_offset_in_bytes / ds_cfg.sizeof_bts; 688 out:
689 ds_put_context(context);
690 return error;
269} 691}
270 692
271int ds_set_overflow(void *ds, int method) 693int ds_write_bts(struct task_struct *task, const void *record, size_t size)
272{ 694{
273 switch (method) { 695 return ds_write(task, record, size, ds_bts, /* force = */ 0);
274 case DS_O_SIGNAL:
275 return -EOPNOTSUPP;
276 case DS_O_WRAP:
277 return 0;
278 default:
279 return -EINVAL;
280 }
281} 696}
282 697
283int ds_get_overflow(void *ds) 698int ds_write_pebs(struct task_struct *task, const void *record, size_t size)
284{ 699{
285 return DS_O_WRAP; 700 return ds_write(task, record, size, ds_pebs, /* force = */ 0);
286} 701}
287 702
288int ds_clear(void *ds) 703int ds_unchecked_write_bts(struct task_struct *task,
704 const void *record, size_t size)
289{ 705{
290 int bts_size = ds_get_bts_size(ds); 706 return ds_write(task, record, size, ds_bts, /* force = */ 1);
291 unsigned long bts_base;
292
293 if (bts_size <= 0)
294 return bts_size;
295
296 bts_base = get_bts_buffer_base(ds);
297 memset((void *)bts_base, 0, bts_size);
298
299 set_bts_index(ds, bts_base);
300 return 0;
301} 707}
302 708
303int ds_read_bts(void *ds, int index, struct bts_struct *out) 709int ds_unchecked_write_pebs(struct task_struct *task,
710 const void *record, size_t size)
304{ 711{
305 void *bts; 712 return ds_write(task, record, size, ds_pebs, /* force = */ 1);
713}
306 714
307 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts) 715static int ds_reset_or_clear(struct task_struct *task,
308 return -EOPNOTSUPP; 716 enum ds_qualifier qual, int clear)
717{
718 struct ds_context *context;
719 unsigned long base, end;
720 int error;
309 721
310 if (index < 0) 722 context = ds_get_context(task);
311 return -EINVAL; 723 error = ds_validate_access(context, qual);
724 if (error < 0)
725 goto out;
312 726
313 if (index >= ds_get_bts_size(ds)) 727 base = ds_get(context->ds, qual, ds_buffer_base);
314 return -EINVAL; 728 end = ds_get(context->ds, qual, ds_absolute_maximum);
315 729
316 bts = (void *)(get_bts_buffer_base(ds) + (index * ds_cfg.sizeof_bts)); 730 if (clear)
731 memset((void *)base, 0, end - base);
317 732
318 memset(out, 0, sizeof(*out)); 733 ds_set(context->ds, qual, ds_index, base);
319 if (get_from_ip(bts) == BTS_ESCAPE_ADDRESS) {
320 out->qualifier = get_info_type(bts);
321 out->variant.jiffies = get_info_data(bts);
322 } else {
323 out->qualifier = BTS_BRANCH;
324 out->variant.lbr.from_ip = get_from_ip(bts);
325 out->variant.lbr.to_ip = get_to_ip(bts);
326 }
327 734
328 return sizeof(*out);; 735 error = 0;
736 out:
737 ds_put_context(context);
738 return error;
329} 739}
330 740
331int ds_write_bts(void *ds, const struct bts_struct *in) 741int ds_reset_bts(struct task_struct *task)
332{ 742{
333 unsigned long bts; 743 return ds_reset_or_clear(task, ds_bts, /* clear = */ 0);
334 744}
335 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
336 return -EOPNOTSUPP;
337
338 if (ds_get_bts_size(ds) <= 0)
339 return -ENXIO;
340 745
341 bts = get_bts_index(ds); 746int ds_reset_pebs(struct task_struct *task)
747{
748 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0);
749}
342 750
343 memset((void *)bts, 0, ds_cfg.sizeof_bts); 751int ds_clear_bts(struct task_struct *task)
344 switch (in->qualifier) { 752{
345 case BTS_INVALID: 753 return ds_reset_or_clear(task, ds_bts, /* clear = */ 1);
346 break; 754}
347 755
348 case BTS_BRANCH: 756int ds_clear_pebs(struct task_struct *task)
349 set_from_ip((void *)bts, in->variant.lbr.from_ip); 757{
350 set_to_ip((void *)bts, in->variant.lbr.to_ip); 758 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1);
351 break; 759}
352 760
353 case BTS_TASK_ARRIVES: 761int ds_get_pebs_reset(struct task_struct *task, u64 *value)
354 case BTS_TASK_DEPARTS: 762{
355 set_from_ip((void *)bts, BTS_ESCAPE_ADDRESS); 763 struct ds_context *context;
356 set_info_type((void *)bts, in->qualifier); 764 int error;
357 set_info_data((void *)bts, in->variant.jiffies);
358 break;
359 765
360 default: 766 if (!value)
361 return -EINVAL; 767 return -EINVAL;
362 }
363 768
364 bts = bts + ds_cfg.sizeof_bts; 769 context = ds_get_context(task);
365 if (bts >= get_bts_absolute_maximum(ds)) 770 error = ds_validate_access(context, ds_pebs);
366 bts = get_bts_buffer_base(ds); 771 if (error < 0)
367 set_bts_index(ds, bts); 772 goto out;
368 773
369 return ds_cfg.sizeof_bts; 774 *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8));
775
776 error = 0;
777 out:
778 ds_put_context(context);
779 return error;
370} 780}
371 781
372unsigned long ds_debugctl_mask(void) 782int ds_set_pebs_reset(struct task_struct *task, u64 value)
373{ 783{
374 return ds_cfg.debugctl_mask; 784 struct ds_context *context;
375} 785 int error;
376 786
377#ifdef __i386__ 787 context = ds_get_context(task);
378static const struct ds_configuration ds_cfg_netburst = { 788 error = ds_validate_access(context, ds_pebs);
379 .sizeof_ds = 9 * 4, 789 if (error < 0)
380 .bts_buffer_base = { 0, 4 }, 790 goto out;
381 .bts_index = { 4, 4 },
382 .bts_absolute_maximum = { 8, 4 },
383 .bts_interrupt_threshold = { 12, 4 },
384 .sizeof_bts = 3 * 4,
385 .from_ip = { 0, 4 },
386 .to_ip = { 4, 4 },
387 .info_type = { 4, 1 },
388 .info_data = { 8, 4 },
389 .debugctl_mask = (1<<2)|(1<<3)
390};
391 791
392static const struct ds_configuration ds_cfg_pentium_m = { 792 *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value;
393 .sizeof_ds = 9 * 4, 793
394 .bts_buffer_base = { 0, 4 }, 794 error = 0;
395 .bts_index = { 4, 4 }, 795 out:
396 .bts_absolute_maximum = { 8, 4 }, 796 ds_put_context(context);
397 .bts_interrupt_threshold = { 12, 4 }, 797 return error;
398 .sizeof_bts = 3 * 4, 798}
399 .from_ip = { 0, 4 }, 799
400 .to_ip = { 4, 4 }, 800static const struct ds_configuration ds_cfg_var = {
401 .info_type = { 4, 1 }, 801 .sizeof_ds = sizeof(long) * 12,
402 .info_data = { 8, 4 }, 802 .sizeof_field = sizeof(long),
403 .debugctl_mask = (1<<6)|(1<<7) 803 .sizeof_rec[ds_bts] = sizeof(long) * 3,
804 .sizeof_rec[ds_pebs] = sizeof(long) * 10
404}; 805};
405#endif /* _i386_ */ 806static const struct ds_configuration ds_cfg_64 = {
406 807 .sizeof_ds = 8 * 12,
407static const struct ds_configuration ds_cfg_core2 = { 808 .sizeof_field = 8,
408 .sizeof_ds = 9 * 8, 809 .sizeof_rec[ds_bts] = 8 * 3,
409 .bts_buffer_base = { 0, 8 }, 810 .sizeof_rec[ds_pebs] = 8 * 10
410 .bts_index = { 8, 8 },
411 .bts_absolute_maximum = { 16, 8 },
412 .bts_interrupt_threshold = { 24, 8 },
413 .sizeof_bts = 3 * 8,
414 .from_ip = { 0, 8 },
415 .to_ip = { 8, 8 },
416 .info_type = { 8, 1 },
417 .info_data = { 16, 8 },
418 .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
419}; 811};
420 812
421static inline void 813static inline void
@@ -429,14 +821,13 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
429 switch (c->x86) { 821 switch (c->x86) {
430 case 0x6: 822 case 0x6:
431 switch (c->x86_model) { 823 switch (c->x86_model) {
432#ifdef __i386__
433 case 0xD: 824 case 0xD:
434 case 0xE: /* Pentium M */ 825 case 0xE: /* Pentium M */
435 ds_configure(&ds_cfg_pentium_m); 826 ds_configure(&ds_cfg_var);
436 break; 827 break;
437#endif /* _i386_ */
438 case 0xF: /* Core2 */ 828 case 0xF: /* Core2 */
439 ds_configure(&ds_cfg_core2); 829 case 0x1C: /* Atom */
830 ds_configure(&ds_cfg_64);
440 break; 831 break;
441 default: 832 default:
442 /* sorry, don't know about them */ 833 /* sorry, don't know about them */
@@ -445,13 +836,11 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
445 break; 836 break;
446 case 0xF: 837 case 0xF:
447 switch (c->x86_model) { 838 switch (c->x86_model) {
448#ifdef __i386__
449 case 0x0: 839 case 0x0:
450 case 0x1: 840 case 0x1:
451 case 0x2: /* Netburst */ 841 case 0x2: /* Netburst */
452 ds_configure(&ds_cfg_netburst); 842 ds_configure(&ds_cfg_var);
453 break; 843 break;
454#endif /* _i386_ */
455 default: 844 default:
456 /* sorry, don't know about them */ 845 /* sorry, don't know about them */
457 break; 846 break;
@@ -462,3 +851,14 @@ void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
462 break; 851 break;
463 } 852 }
464} 853}
854
855void ds_free(struct ds_context *context)
856{
857 /* This is called when the task owning the parameter context
858 * is dying. There should not be any user of that context left
859 * to disturb us, anymore. */
860 unsigned long leftovers = context->count;
861 while (leftovers--)
862 ds_put_context(context);
863}
864#endif /* CONFIG_X86_DS */
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 06cc8d4254b1..945a31cdd81f 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -414,9 +414,11 @@ void __init efi_init(void)
414 if (memmap.map == NULL) 414 if (memmap.map == NULL)
415 printk(KERN_ERR "Could not map the EFI memory map!\n"); 415 printk(KERN_ERR "Could not map the EFI memory map!\n");
416 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size); 416 memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
417
417 if (memmap.desc_size != sizeof(efi_memory_desc_t)) 418 if (memmap.desc_size != sizeof(efi_memory_desc_t))
418 printk(KERN_WARNING "Kernel-defined memdesc" 419 printk(KERN_WARNING
419 "doesn't match the one from EFI!\n"); 420 "Kernel-defined memdesc doesn't match the one from EFI!\n");
421
420 if (add_efi_memmap) 422 if (add_efi_memmap)
421 do_add_efi_memmap(); 423 do_add_efi_memmap();
422 424
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 89434d439605..cf3a0b2d0059 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -275,9 +275,9 @@ ENTRY(native_usergs_sysret64)
275ENTRY(ret_from_fork) 275ENTRY(ret_from_fork)
276 CFI_DEFAULT_STACK 276 CFI_DEFAULT_STACK
277 push kernel_eflags(%rip) 277 push kernel_eflags(%rip)
278 CFI_ADJUST_CFA_OFFSET 4 278 CFI_ADJUST_CFA_OFFSET 8
279 popf # reset kernel eflags 279 popf # reset kernel eflags
280 CFI_ADJUST_CFA_OFFSET -4 280 CFI_ADJUST_CFA_OFFSET -8
281 call schedule_tail 281 call schedule_tail
282 GET_THREAD_INFO(%rcx) 282 GET_THREAD_INFO(%rcx)
283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 9bfc4d72fb2e..d16084f90649 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -108,12 +108,11 @@ void __init x86_64_start_kernel(char * real_mode_data)
108 } 108 }
109 load_idt((const struct desc_ptr *)&idt_descr); 109 load_idt((const struct desc_ptr *)&idt_descr);
110 110
111 early_printk("Kernel alive\n"); 111 if (console_loglevel == 10)
112 early_printk("Kernel alive\n");
112 113
113 x86_64_init_pda(); 114 x86_64_init_pda();
114 115
115 early_printk("Kernel really alive\n");
116
117 x86_64_start_reservations(real_mode_data); 116 x86_64_start_reservations(real_mode_data);
118} 117}
119 118
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 50e5e4a31c85..191914302744 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/thread_info.h> 15#include <linux/thread_info.h>
16#include <linux/syscalls.h> 16#include <linux/syscalls.h>
17#include <asm/syscalls.h>
17 18
18/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ 19/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
19static void set_bitmap(unsigned long *bitmap, unsigned int base, 20static void set_bitmap(unsigned long *bitmap, unsigned int base,
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
index 3f7537b669d3..f1c688e46f35 100644
--- a/arch/x86/kernel/ipi.c
+++ b/arch/x86/kernel/ipi.c
@@ -20,6 +20,8 @@
20 20
21#ifdef CONFIG_X86_32 21#ifdef CONFIG_X86_32
22#include <mach_apic.h> 22#include <mach_apic.h>
23#include <mach_ipi.h>
24
23/* 25/*
24 * the following functions deal with sending IPIs between CPUs. 26 * the following functions deal with sending IPIs between CPUs.
25 * 27 *
@@ -147,7 +149,6 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
147} 149}
148 150
149/* must come after the send_IPI functions above for inlining */ 151/* must come after the send_IPI functions above for inlining */
150#include <mach_ipi.h>
151static int convert_apicid_to_cpu(int apic_id) 152static int convert_apicid_to_cpu(int apic_id)
152{ 153{
153 int i; 154 int i;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 1cf8c1fcc088..b71e02d42f4f 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -325,7 +325,7 @@ skip:
325 for_each_online_cpu(j) 325 for_each_online_cpu(j)
326 seq_printf(p, "%10u ", 326 seq_printf(p, "%10u ",
327 per_cpu(irq_stat,j).irq_call_count); 327 per_cpu(irq_stat,j).irq_call_count);
328 seq_printf(p, " function call interrupts\n"); 328 seq_printf(p, " Function call interrupts\n");
329 seq_printf(p, "TLB: "); 329 seq_printf(p, "TLB: ");
330 for_each_online_cpu(j) 330 for_each_online_cpu(j)
331 seq_printf(p, "%10u ", 331 seq_printf(p, "%10u ",
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 1f78b238d8d2..f065fe9071b9 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -129,7 +129,7 @@ skip:
129 seq_printf(p, "CAL: "); 129 seq_printf(p, "CAL: ");
130 for_each_online_cpu(j) 130 for_each_online_cpu(j)
131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count); 131 seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
132 seq_printf(p, " function call interrupts\n"); 132 seq_printf(p, " Function call interrupts\n");
133 seq_printf(p, "TLB: "); 133 seq_printf(p, "TLB: ");
134 for_each_online_cpu(j) 134 for_each_online_cpu(j)
135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count); 135 seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8b7a3cf37d2b..478bca986eca 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -178,7 +178,7 @@ static void kvm_flush_tlb(void)
178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb); 178 kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
179} 179}
180 180
181static void kvm_release_pt(u32 pfn) 181static void kvm_release_pt(unsigned long pfn)
182{ 182{
183 struct kvm_mmu_op_release_pt rpt = { 183 struct kvm_mmu_op_release_pt rpt = {
184 .header.op = KVM_MMU_OP_RELEASE_PT, 184 .header.op = KVM_MMU_OP_RELEASE_PT,
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index b68e21f06f4f..0ed5f939b905 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -18,6 +18,7 @@
18#include <asm/ldt.h> 18#include <asm/ldt.h>
19#include <asm/desc.h> 19#include <asm/desc.h>
20#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
21#include <asm/syscalls.h>
21 22
22#ifdef CONFIG_SMP 23#ifdef CONFIG_SMP
23static void flush_ldt(void *current_mm) 24static void flush_ldt(void *current_mm)
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index abb78a2cc4ad..2c97f07f1c2c 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -299,6 +299,15 @@ void acpi_nmi_disable(void)
299 on_each_cpu(__acpi_nmi_disable, NULL, 1); 299 on_each_cpu(__acpi_nmi_disable, NULL, 1);
300} 300}
301 301
302/*
303 * This function is called as soon the LAPIC NMI watchdog driver has everything
304 * in place and it's ready to check if the NMIs belong to the NMI watchdog
305 */
306void cpu_nmi_set_wd_enabled(void)
307{
308 __get_cpu_var(wd_enabled) = 1;
309}
310
302void setup_apic_nmi_watchdog(void *unused) 311void setup_apic_nmi_watchdog(void *unused)
303{ 312{
304 if (__get_cpu_var(wd_enabled)) 313 if (__get_cpu_var(wd_enabled))
@@ -311,8 +320,6 @@ void setup_apic_nmi_watchdog(void *unused)
311 320
312 switch (nmi_watchdog) { 321 switch (nmi_watchdog) {
313 case NMI_LOCAL_APIC: 322 case NMI_LOCAL_APIC:
314 /* enable it before to avoid race with handler */
315 __get_cpu_var(wd_enabled) = 1;
316 if (lapic_watchdog_init(nmi_hz) < 0) { 323 if (lapic_watchdog_init(nmi_hz) < 0) {
317 __get_cpu_var(wd_enabled) = 0; 324 __get_cpu_var(wd_enabled) = 0;
318 return; 325 return;
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 3e6672274807..7a13fac63a1f 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -190,12 +190,12 @@ EXPORT_SYMBOL_GPL(olpc_ec_cmd);
190static void __init platform_detect(void) 190static void __init platform_detect(void)
191{ 191{
192 size_t propsize; 192 size_t propsize;
193 u32 rev; 193 __be32 rev;
194 194
195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, 195 if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4,
196 &propsize) || propsize != 4) { 196 &propsize) || propsize != 4) {
197 printk(KERN_ERR "ofw: getprop call failed!\n"); 197 printk(KERN_ERR "ofw: getprop call failed!\n");
198 rev = 0; 198 rev = cpu_to_be32(0);
199 } 199 }
200 olpc_platform_info.boardrev = be32_to_cpu(rev); 200 olpc_platform_info.boardrev = be32_to_cpu(rev);
201} 201}
@@ -203,7 +203,7 @@ static void __init platform_detect(void)
203static void __init platform_detect(void) 203static void __init platform_detect(void)
204{ 204{
205 /* stopgap until OFW support is added to the kernel */ 205 /* stopgap until OFW support is added to the kernel */
206 olpc_platform_info.boardrev = be32_to_cpu(0xc2); 206 olpc_platform_info.boardrev = 0xc2;
207} 207}
208#endif 208#endif
209 209
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 300da17e61cb..e2f43768723a 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
330#endif 330#endif
331 .wbinvd = native_wbinvd, 331 .wbinvd = native_wbinvd,
332 .read_msr = native_read_msr_safe, 332 .read_msr = native_read_msr_safe,
333 .read_msr_amd = native_read_msr_amd_safe,
333 .write_msr = native_write_msr_safe, 334 .write_msr = native_write_msr_safe,
334 .read_tsc = native_read_tsc, 335 .read_tsc = native_read_tsc,
335 .read_pmc = native_read_pmc, 336 .read_pmc = native_read_pmc,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 58262218781b..9fe644f4861d 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -23,7 +23,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
23 start = start_##ops##_##x; \ 23 start = start_##ops##_##x; \
24 end = end_##ops##_##x; \ 24 end = end_##ops##_##x; \
25 goto patch_site 25 goto patch_site
26 switch(type) { 26 switch (type) {
27 PATCH_SITE(pv_irq_ops, irq_disable); 27 PATCH_SITE(pv_irq_ops, irq_disable);
28 PATCH_SITE(pv_irq_ops, irq_enable); 28 PATCH_SITE(pv_irq_ops, irq_enable);
29 PATCH_SITE(pv_irq_ops, restore_fl); 29 PATCH_SITE(pv_irq_ops, restore_fl);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 87d4d6964ec2..f704cb51ff82 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -82,7 +82,7 @@ void __init dma32_reserve_bootmem(void)
82 * using 512M as goal 82 * using 512M as goal
83 */ 83 */
84 align = 64ULL<<20; 84 align = 64ULL<<20;
85 size = round_up(dma32_bootmem_size, align); 85 size = roundup(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
87 512ULL<<20); 87 512ULL<<20);
88 if (dma32_bootmem_ptr) 88 if (dma32_bootmem_ptr)
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index be33a5442d82..1a895a582534 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -82,7 +82,8 @@ AGPEXTERN __u32 *agp_gatt_table;
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 83static int need_flush; /* global flush state. set for each gart wrap */
84 84
85static unsigned long alloc_iommu(struct device *dev, int size) 85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
86{ 87{
87 unsigned long offset, flags; 88 unsigned long offset, flags;
88 unsigned long boundary_size; 89 unsigned long boundary_size;
@@ -90,16 +91,17 @@ static unsigned long alloc_iommu(struct device *dev, int size)
90 91
91 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
92 PAGE_SIZE) >> PAGE_SHIFT; 93 PAGE_SIZE) >> PAGE_SHIFT;
93 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
94 PAGE_SIZE) >> PAGE_SHIFT; 95 PAGE_SIZE) >> PAGE_SHIFT;
95 96
96 spin_lock_irqsave(&iommu_bitmap_lock, flags); 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
97 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
98 size, base_index, boundary_size, 0); 99 size, base_index, boundary_size, align_mask);
99 if (offset == -1) { 100 if (offset == -1) {
100 need_flush = 1; 101 need_flush = 1;
101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
102 size, base_index, boundary_size, 0); 103 size, base_index, boundary_size,
104 align_mask);
103 } 105 }
104 if (offset != -1) { 106 if (offset != -1) {
105 next_bit = offset+size; 107 next_bit = offset+size;
@@ -236,10 +238,10 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
236 * Caller needs to check if the iommu is needed and flush. 238 * Caller needs to check if the iommu is needed and flush.
237 */ 239 */
238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 240static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
239 size_t size, int dir) 241 size_t size, int dir, unsigned long align_mask)
240{ 242{
241 unsigned long npages = iommu_num_pages(phys_mem, size); 243 unsigned long npages = iommu_num_pages(phys_mem, size);
242 unsigned long iommu_page = alloc_iommu(dev, npages); 244 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
243 int i; 245 int i;
244 246
245 if (iommu_page == -1) { 247 if (iommu_page == -1) {
@@ -262,7 +264,11 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
262static dma_addr_t 264static dma_addr_t
263gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) 265gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
264{ 266{
265 dma_addr_t map = dma_map_area(dev, paddr, size, dir); 267 dma_addr_t map;
268 unsigned long align_mask;
269
270 align_mask = (1UL << get_order(size)) - 1;
271 map = dma_map_area(dev, paddr, size, dir, align_mask);
266 272
267 flush_gart(); 273 flush_gart();
268 274
@@ -281,7 +287,8 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
281 if (!need_iommu(dev, paddr, size)) 287 if (!need_iommu(dev, paddr, size))
282 return paddr; 288 return paddr;
283 289
284 bus = gart_map_simple(dev, paddr, size, dir); 290 bus = dma_map_area(dev, paddr, size, dir, 0);
291 flush_gart();
285 292
286 return bus; 293 return bus;
287} 294}
@@ -340,7 +347,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
340 unsigned long addr = sg_phys(s); 347 unsigned long addr = sg_phys(s);
341 348
342 if (nonforced_iommu(dev, addr, s->length)) { 349 if (nonforced_iommu(dev, addr, s->length)) {
343 addr = dma_map_area(dev, addr, s->length, dir); 350 addr = dma_map_area(dev, addr, s->length, dir, 0);
344 if (addr == bad_dma_address) { 351 if (addr == bad_dma_address) {
345 if (i > 0) 352 if (i > 0)
346 gart_unmap_sg(dev, sg, i, dir); 353 gart_unmap_sg(dev, sg, i, dir);
@@ -362,7 +369,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
362 int nelems, struct scatterlist *sout, 369 int nelems, struct scatterlist *sout,
363 unsigned long pages) 370 unsigned long pages)
364{ 371{
365 unsigned long iommu_start = alloc_iommu(dev, pages); 372 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
366 unsigned long iommu_page = iommu_start; 373 unsigned long iommu_page = iommu_start;
367 struct scatterlist *s; 374 struct scatterlist *s;
368 int i; 375 int i;
diff --git a/arch/x86/kernel/pcspeaker.c b/arch/x86/kernel/pcspeaker.c
index bc1f2d3ea277..a311ffcaad16 100644
--- a/arch/x86/kernel/pcspeaker.c
+++ b/arch/x86/kernel/pcspeaker.c
@@ -1,20 +1,13 @@
1#include <linux/platform_device.h> 1#include <linux/platform_device.h>
2#include <linux/errno.h> 2#include <linux/err.h>
3#include <linux/init.h> 3#include <linux/init.h>
4 4
5static __init int add_pcspkr(void) 5static __init int add_pcspkr(void)
6{ 6{
7 struct platform_device *pd; 7 struct platform_device *pd;
8 int ret;
9 8
10 pd = platform_device_alloc("pcspkr", -1); 9 pd = platform_device_register_simple("pcspkr", -1, NULL, 0);
11 if (!pd)
12 return -ENOMEM;
13 10
14 ret = platform_device_add(pd); 11 return IS_ERR(pd) ? PTR_ERR(pd) : 0;
15 if (ret)
16 platform_device_put(pd);
17
18 return ret;
19} 12}
20device_initcall(add_pcspkr); 13device_initcall(add_pcspkr);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 876e91890777..ec7a2ba9bce8 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -185,7 +185,8 @@ static void mwait_idle(void)
185static void poll_idle(void) 185static void poll_idle(void)
186{ 186{
187 local_irq_enable(); 187 local_irq_enable();
188 cpu_relax(); 188 while (!need_resched())
189 cpu_relax();
189} 190}
190 191
191/* 192/*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 31f40b24bf5d..205188db9626 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -37,6 +37,7 @@
37#include <linux/tick.h> 37#include <linux/tick.h>
38#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/dmi.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -56,6 +57,8 @@
56#include <asm/cpu.h> 57#include <asm/cpu.h>
57#include <asm/kdebug.h> 58#include <asm/kdebug.h>
58#include <asm/idle.h> 59#include <asm/idle.h>
60#include <asm/syscalls.h>
61#include <asm/smp.h>
59 62
60asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
61 64
@@ -161,6 +164,7 @@ void __show_registers(struct pt_regs *regs, int all)
161 unsigned long d0, d1, d2, d3, d6, d7; 164 unsigned long d0, d1, d2, d3, d6, d7;
162 unsigned long sp; 165 unsigned long sp;
163 unsigned short ss, gs; 166 unsigned short ss, gs;
167 const char *board;
164 168
165 if (user_mode_vm(regs)) { 169 if (user_mode_vm(regs)) {
166 sp = regs->sp; 170 sp = regs->sp;
@@ -173,11 +177,15 @@ void __show_registers(struct pt_regs *regs, int all)
173 } 177 }
174 178
175 printk("\n"); 179 printk("\n");
176 printk("Pid: %d, comm: %s %s (%s %.*s)\n", 180
181 board = dmi_get_system_info(DMI_PRODUCT_NAME);
182 if (!board)
183 board = "";
184 printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
177 task_pid_nr(current), current->comm, 185 task_pid_nr(current), current->comm,
178 print_tainted(), init_utsname()->release, 186 print_tainted(), init_utsname()->release,
179 (int)strcspn(init_utsname()->version, " "), 187 (int)strcspn(init_utsname()->version, " "),
180 init_utsname()->version); 188 init_utsname()->version, board);
181 189
182 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", 190 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
183 (u16)regs->cs, regs->ip, regs->flags, 191 (u16)regs->cs, regs->ip, regs->flags,
@@ -277,6 +285,14 @@ void exit_thread(void)
277 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; 285 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
278 put_cpu(); 286 put_cpu();
279 } 287 }
288#ifdef CONFIG_X86_DS
289 /* Free any DS contexts that have not been properly released. */
290 if (unlikely(current->thread.ds_ctx)) {
291 /* we clear debugctl to make sure DS is not used. */
292 update_debugctlmsr(0);
293 ds_free(current->thread.ds_ctx);
294 }
295#endif /* CONFIG_X86_DS */
280} 296}
281 297
282void flush_thread(void) 298void flush_thread(void)
@@ -438,6 +454,35 @@ int set_tsc_mode(unsigned int val)
438 return 0; 454 return 0;
439} 455}
440 456
457#ifdef CONFIG_X86_DS
458static int update_debugctl(struct thread_struct *prev,
459 struct thread_struct *next, unsigned long debugctl)
460{
461 unsigned long ds_prev = 0;
462 unsigned long ds_next = 0;
463
464 if (prev->ds_ctx)
465 ds_prev = (unsigned long)prev->ds_ctx->ds;
466 if (next->ds_ctx)
467 ds_next = (unsigned long)next->ds_ctx->ds;
468
469 if (ds_next != ds_prev) {
470 /* we clear debugctl to make sure DS
471 * is not in use when we change it */
472 debugctl = 0;
473 update_debugctlmsr(0);
474 wrmsr(MSR_IA32_DS_AREA, ds_next, 0);
475 }
476 return debugctl;
477}
478#else
479static int update_debugctl(struct thread_struct *prev,
480 struct thread_struct *next, unsigned long debugctl)
481{
482 return debugctl;
483}
484#endif /* CONFIG_X86_DS */
485
441static noinline void 486static noinline void
442__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 487__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
443 struct tss_struct *tss) 488 struct tss_struct *tss)
@@ -448,14 +493,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
448 prev = &prev_p->thread; 493 prev = &prev_p->thread;
449 next = &next_p->thread; 494 next = &next_p->thread;
450 495
451 debugctl = prev->debugctlmsr; 496 debugctl = update_debugctl(prev, next, prev->debugctlmsr);
452 if (next->ds_area_msr != prev->ds_area_msr) {
453 /* we clear debugctl to make sure DS
454 * is not in use when we change it */
455 debugctl = 0;
456 update_debugctlmsr(0);
457 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
458 }
459 497
460 if (next->debugctlmsr != debugctl) 498 if (next->debugctlmsr != debugctl)
461 update_debugctlmsr(next->debugctlmsr); 499 update_debugctlmsr(next->debugctlmsr);
@@ -479,13 +517,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
479 hard_enable_TSC(); 517 hard_enable_TSC();
480 } 518 }
481 519
482#ifdef X86_BTS 520#ifdef CONFIG_X86_PTRACE_BTS
483 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 521 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
484 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 522 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
485 523
486 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 524 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
487 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 525 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
488#endif 526#endif /* CONFIG_X86_PTRACE_BTS */
489 527
490 528
491 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 529 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e12e0e4dd256..2a8ccb9238b4 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -37,11 +37,11 @@
37#include <linux/kdebug.h> 37#include <linux/kdebug.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/prctl.h> 39#include <linux/prctl.h>
40#include <linux/uaccess.h>
41#include <linux/io.h>
40 42
41#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/system.h> 44#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h> 45#include <asm/processor.h>
46#include <asm/i387.h> 46#include <asm/i387.h>
47#include <asm/mmu_context.h> 47#include <asm/mmu_context.h>
@@ -51,6 +51,7 @@
51#include <asm/proto.h> 51#include <asm/proto.h>
52#include <asm/ia32.h> 52#include <asm/ia32.h>
53#include <asm/idle.h> 53#include <asm/idle.h>
54#include <asm/syscalls.h>
54 55
55asmlinkage extern void ret_from_fork(void); 56asmlinkage extern void ret_from_fork(void);
56 57
@@ -88,7 +89,7 @@ void exit_idle(void)
88#ifdef CONFIG_HOTPLUG_CPU 89#ifdef CONFIG_HOTPLUG_CPU
89DECLARE_PER_CPU(int, cpu_state); 90DECLARE_PER_CPU(int, cpu_state);
90 91
91#include <asm/nmi.h> 92#include <linux/nmi.h>
92/* We halt the CPU with physical CPU hotplug */ 93/* We halt the CPU with physical CPU hotplug */
93static inline void play_dead(void) 94static inline void play_dead(void)
94{ 95{
@@ -153,7 +154,7 @@ void cpu_idle(void)
153} 154}
154 155
155/* Prints also some state that isn't saved in the pt_regs */ 156/* Prints also some state that isn't saved in the pt_regs */
156void __show_regs(struct pt_regs * regs) 157void __show_regs(struct pt_regs *regs)
157{ 158{
158 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
159 unsigned long d0, d1, d2, d3, d6, d7; 160 unsigned long d0, d1, d2, d3, d6, d7;
@@ -162,59 +163,61 @@ void __show_regs(struct pt_regs * regs)
162 163
163 printk("\n"); 164 printk("\n");
164 print_modules(); 165 print_modules();
165 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 166 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s\n",
166 current->pid, current->comm, print_tainted(), 167 current->pid, current->comm, print_tainted(),
167 init_utsname()->release, 168 init_utsname()->release,
168 (int)strcspn(init_utsname()->version, " "), 169 (int)strcspn(init_utsname()->version, " "),
169 init_utsname()->version); 170 init_utsname()->version);
170 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); 171 printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
171 printk_address(regs->ip, 1); 172 printk_address(regs->ip, 1);
172 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, 173 printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
173 regs->flags); 174 regs->sp, regs->flags);
174 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n", 175 printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
175 regs->ax, regs->bx, regs->cx); 176 regs->ax, regs->bx, regs->cx);
176 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n", 177 printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
177 regs->dx, regs->si, regs->di); 178 regs->dx, regs->si, regs->di);
178 printk("RBP: %016lx R08: %016lx R09: %016lx\n", 179 printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
179 regs->bp, regs->r8, regs->r9); 180 regs->bp, regs->r8, regs->r9);
180 printk("R10: %016lx R11: %016lx R12: %016lx\n", 181 printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
181 regs->r10, regs->r11, regs->r12); 182 regs->r10, regs->r11, regs->r12);
182 printk("R13: %016lx R14: %016lx R15: %016lx\n", 183 printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
183 regs->r13, regs->r14, regs->r15); 184 regs->r13, regs->r14, regs->r15);
184 185
185 asm("movl %%ds,%0" : "=r" (ds)); 186 asm("movl %%ds,%0" : "=r" (ds));
186 asm("movl %%cs,%0" : "=r" (cs)); 187 asm("movl %%cs,%0" : "=r" (cs));
187 asm("movl %%es,%0" : "=r" (es)); 188 asm("movl %%es,%0" : "=r" (es));
188 asm("movl %%fs,%0" : "=r" (fsindex)); 189 asm("movl %%fs,%0" : "=r" (fsindex));
189 asm("movl %%gs,%0" : "=r" (gsindex)); 190 asm("movl %%gs,%0" : "=r" (gsindex));
190 191
191 rdmsrl(MSR_FS_BASE, fs); 192 rdmsrl(MSR_FS_BASE, fs);
192 rdmsrl(MSR_GS_BASE, gs); 193 rdmsrl(MSR_GS_BASE, gs);
193 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 194 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
194 195
195 cr0 = read_cr0(); 196 cr0 = read_cr0();
196 cr2 = read_cr2(); 197 cr2 = read_cr2();
197 cr3 = read_cr3(); 198 cr3 = read_cr3();
198 cr4 = read_cr4(); 199 cr4 = read_cr4();
199 200
200 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 201 printk(KERN_INFO "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
201 fs,fsindex,gs,gsindex,shadowgs); 202 fs, fsindex, gs, gsindex, shadowgs);
202 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 203 printk(KERN_INFO "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
203 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); 204 es, cr0);
205 printk(KERN_INFO "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
206 cr4);
204 207
205 get_debugreg(d0, 0); 208 get_debugreg(d0, 0);
206 get_debugreg(d1, 1); 209 get_debugreg(d1, 1);
207 get_debugreg(d2, 2); 210 get_debugreg(d2, 2);
208 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); 211 printk(KERN_INFO "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
209 get_debugreg(d3, 3); 212 get_debugreg(d3, 3);
210 get_debugreg(d6, 6); 213 get_debugreg(d6, 6);
211 get_debugreg(d7, 7); 214 get_debugreg(d7, 7);
212 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); 215 printk(KERN_INFO "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
213} 216}
214 217
215void show_regs(struct pt_regs *regs) 218void show_regs(struct pt_regs *regs)
216{ 219{
217 printk("CPU %d:", smp_processor_id()); 220 printk(KERN_INFO "CPU %d:", smp_processor_id());
218 __show_regs(regs); 221 __show_regs(regs);
219 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 222 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
220} 223}
@@ -240,6 +243,14 @@ void exit_thread(void)
240 t->io_bitmap_max = 0; 243 t->io_bitmap_max = 0;
241 put_cpu(); 244 put_cpu();
242 } 245 }
246#ifdef CONFIG_X86_DS
247 /* Free any DS contexts that have not been properly released. */
248 if (unlikely(t->ds_ctx)) {
249 /* we clear debugctl to make sure DS is not used. */
250 update_debugctlmsr(0);
251 ds_free(t->ds_ctx);
252 }
253#endif /* CONFIG_X86_DS */
243} 254}
244 255
245void flush_thread(void) 256void flush_thread(void)
@@ -315,10 +326,10 @@ void prepare_to_copy(struct task_struct *tsk)
315 326
316int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 327int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
317 unsigned long unused, 328 unsigned long unused,
318 struct task_struct * p, struct pt_regs * regs) 329 struct task_struct *p, struct pt_regs *regs)
319{ 330{
320 int err; 331 int err;
321 struct pt_regs * childregs; 332 struct pt_regs *childregs;
322 struct task_struct *me = current; 333 struct task_struct *me = current;
323 334
324 childregs = ((struct pt_regs *) 335 childregs = ((struct pt_regs *)
@@ -363,10 +374,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
363 if (test_thread_flag(TIF_IA32)) 374 if (test_thread_flag(TIF_IA32))
364 err = do_set_thread_area(p, -1, 375 err = do_set_thread_area(p, -1,
365 (struct user_desc __user *)childregs->si, 0); 376 (struct user_desc __user *)childregs->si, 0);
366 else 377 else
367#endif 378#endif
368 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 379 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
369 if (err) 380 if (err)
370 goto out; 381 goto out;
371 } 382 }
372 err = 0; 383 err = 0;
@@ -473,13 +484,27 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
473 next = &next_p->thread; 484 next = &next_p->thread;
474 485
475 debugctl = prev->debugctlmsr; 486 debugctl = prev->debugctlmsr;
476 if (next->ds_area_msr != prev->ds_area_msr) { 487
477 /* we clear debugctl to make sure DS 488#ifdef CONFIG_X86_DS
478 * is not in use when we change it */ 489 {
479 debugctl = 0; 490 unsigned long ds_prev = 0, ds_next = 0;
480 update_debugctlmsr(0); 491
481 wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr); 492 if (prev->ds_ctx)
493 ds_prev = (unsigned long)prev->ds_ctx->ds;
494 if (next->ds_ctx)
495 ds_next = (unsigned long)next->ds_ctx->ds;
496
497 if (ds_next != ds_prev) {
498 /*
499 * We clear debugctl to make sure DS
500 * is not in use when we change it:
501 */
502 debugctl = 0;
503 update_debugctlmsr(0);
504 wrmsrl(MSR_IA32_DS_AREA, ds_next);
505 }
482 } 506 }
507#endif /* CONFIG_X86_DS */
483 508
484 if (next->debugctlmsr != debugctl) 509 if (next->debugctlmsr != debugctl)
485 update_debugctlmsr(next->debugctlmsr); 510 update_debugctlmsr(next->debugctlmsr);
@@ -517,13 +542,13 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
517 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 542 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
518 } 543 }
519 544
520#ifdef X86_BTS 545#ifdef CONFIG_X86_PTRACE_BTS
521 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) 546 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
522 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); 547 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
523 548
524 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) 549 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
525 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); 550 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
526#endif 551#endif /* CONFIG_X86_PTRACE_BTS */
527} 552}
528 553
529/* 554/*
@@ -545,7 +570,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
545 unsigned fsindex, gsindex; 570 unsigned fsindex, gsindex;
546 571
547 /* we're going to use this soon, after a few expensive things */ 572 /* we're going to use this soon, after a few expensive things */
548 if (next_p->fpu_counter>5) 573 if (next_p->fpu_counter > 5)
549 prefetch(next->xstate); 574 prefetch(next->xstate);
550 575
551 /* 576 /*
@@ -553,13 +578,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
553 */ 578 */
554 load_sp0(tss, next); 579 load_sp0(tss, next);
555 580
556 /* 581 /*
557 * Switch DS and ES. 582 * Switch DS and ES.
558 * This won't pick up thread selector changes, but I guess that is ok. 583 * This won't pick up thread selector changes, but I guess that is ok.
559 */ 584 */
560 savesegment(es, prev->es); 585 savesegment(es, prev->es);
561 if (unlikely(next->es | prev->es)) 586 if (unlikely(next->es | prev->es))
562 loadsegment(es, next->es); 587 loadsegment(es, next->es);
563 588
564 savesegment(ds, prev->ds); 589 savesegment(ds, prev->ds);
565 if (unlikely(next->ds | prev->ds)) 590 if (unlikely(next->ds | prev->ds))
@@ -585,7 +610,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
585 */ 610 */
586 arch_leave_lazy_cpu_mode(); 611 arch_leave_lazy_cpu_mode();
587 612
588 /* 613 /*
589 * Switch FS and GS. 614 * Switch FS and GS.
590 * 615 *
591 * Segment register != 0 always requires a reload. Also 616 * Segment register != 0 always requires a reload. Also
@@ -594,13 +619,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
594 */ 619 */
595 if (unlikely(fsindex | next->fsindex | prev->fs)) { 620 if (unlikely(fsindex | next->fsindex | prev->fs)) {
596 loadsegment(fs, next->fsindex); 621 loadsegment(fs, next->fsindex);
597 /* 622 /*
598 * Check if the user used a selector != 0; if yes 623 * Check if the user used a selector != 0; if yes
599 * clear 64bit base, since overloaded base is always 624 * clear 64bit base, since overloaded base is always
600 * mapped to the Null selector 625 * mapped to the Null selector
601 */ 626 */
602 if (fsindex) 627 if (fsindex)
603 prev->fs = 0; 628 prev->fs = 0;
604 } 629 }
605 /* when next process has a 64bit base use it */ 630 /* when next process has a 64bit base use it */
606 if (next->fs) 631 if (next->fs)
@@ -610,7 +635,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
610 if (unlikely(gsindex | next->gsindex | prev->gs)) { 635 if (unlikely(gsindex | next->gsindex | prev->gs)) {
611 load_gs_index(next->gsindex); 636 load_gs_index(next->gsindex);
612 if (gsindex) 637 if (gsindex)
613 prev->gs = 0; 638 prev->gs = 0;
614 } 639 }
615 if (next->gs) 640 if (next->gs)
616 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 641 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
@@ -619,12 +644,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
619 /* Must be after DS reload */ 644 /* Must be after DS reload */
620 unlazy_fpu(prev_p); 645 unlazy_fpu(prev_p);
621 646
622 /* 647 /*
623 * Switch the PDA and FPU contexts. 648 * Switch the PDA and FPU contexts.
624 */ 649 */
625 prev->usersp = read_pda(oldrsp); 650 prev->usersp = read_pda(oldrsp);
626 write_pda(oldrsp, next->usersp); 651 write_pda(oldrsp, next->usersp);
627 write_pda(pcurrent, next_p); 652 write_pda(pcurrent, next_p);
628 653
629 write_pda(kernelstack, 654 write_pda(kernelstack,
630 (unsigned long)task_stack_page(next_p) + 655 (unsigned long)task_stack_page(next_p) +
@@ -665,7 +690,7 @@ long sys_execve(char __user *name, char __user * __user *argv,
665 char __user * __user *envp, struct pt_regs *regs) 690 char __user * __user *envp, struct pt_regs *regs)
666{ 691{
667 long error; 692 long error;
668 char * filename; 693 char *filename;
669 694
670 filename = getname(name); 695 filename = getname(name);
671 error = PTR_ERR(filename); 696 error = PTR_ERR(filename);
@@ -723,55 +748,55 @@ asmlinkage long sys_vfork(struct pt_regs *regs)
723unsigned long get_wchan(struct task_struct *p) 748unsigned long get_wchan(struct task_struct *p)
724{ 749{
725 unsigned long stack; 750 unsigned long stack;
726 u64 fp,ip; 751 u64 fp, ip;
727 int count = 0; 752 int count = 0;
728 753
729 if (!p || p == current || p->state==TASK_RUNNING) 754 if (!p || p == current || p->state == TASK_RUNNING)
730 return 0; 755 return 0;
731 stack = (unsigned long)task_stack_page(p); 756 stack = (unsigned long)task_stack_page(p);
732 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE) 757 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
733 return 0; 758 return 0;
734 fp = *(u64 *)(p->thread.sp); 759 fp = *(u64 *)(p->thread.sp);
735 do { 760 do {
736 if (fp < (unsigned long)stack || 761 if (fp < (unsigned long)stack ||
737 fp > (unsigned long)stack+THREAD_SIZE) 762 fp > (unsigned long)stack+THREAD_SIZE)
738 return 0; 763 return 0;
739 ip = *(u64 *)(fp+8); 764 ip = *(u64 *)(fp+8);
740 if (!in_sched_functions(ip)) 765 if (!in_sched_functions(ip))
741 return ip; 766 return ip;
742 fp = *(u64 *)fp; 767 fp = *(u64 *)fp;
743 } while (count++ < 16); 768 } while (count++ < 16);
744 return 0; 769 return 0;
745} 770}
746 771
747long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) 772long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
748{ 773{
749 int ret = 0; 774 int ret = 0;
750 int doit = task == current; 775 int doit = task == current;
751 int cpu; 776 int cpu;
752 777
753 switch (code) { 778 switch (code) {
754 case ARCH_SET_GS: 779 case ARCH_SET_GS:
755 if (addr >= TASK_SIZE_OF(task)) 780 if (addr >= TASK_SIZE_OF(task))
756 return -EPERM; 781 return -EPERM;
757 cpu = get_cpu(); 782 cpu = get_cpu();
758 /* handle small bases via the GDT because that's faster to 783 /* handle small bases via the GDT because that's faster to
759 switch. */ 784 switch. */
760 if (addr <= 0xffffffff) { 785 if (addr <= 0xffffffff) {
761 set_32bit_tls(task, GS_TLS, addr); 786 set_32bit_tls(task, GS_TLS, addr);
762 if (doit) { 787 if (doit) {
763 load_TLS(&task->thread, cpu); 788 load_TLS(&task->thread, cpu);
764 load_gs_index(GS_TLS_SEL); 789 load_gs_index(GS_TLS_SEL);
765 } 790 }
766 task->thread.gsindex = GS_TLS_SEL; 791 task->thread.gsindex = GS_TLS_SEL;
767 task->thread.gs = 0; 792 task->thread.gs = 0;
768 } else { 793 } else {
769 task->thread.gsindex = 0; 794 task->thread.gsindex = 0;
770 task->thread.gs = addr; 795 task->thread.gs = addr;
771 if (doit) { 796 if (doit) {
772 load_gs_index(0); 797 load_gs_index(0);
773 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); 798 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
774 } 799 }
775 } 800 }
776 put_cpu(); 801 put_cpu();
777 break; 802 break;
@@ -825,8 +850,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
825 rdmsrl(MSR_KERNEL_GS_BASE, base); 850 rdmsrl(MSR_KERNEL_GS_BASE, base);
826 else 851 else
827 base = task->thread.gs; 852 base = task->thread.gs;
828 } 853 } else
829 else
830 base = task->thread.gs; 854 base = task->thread.gs;
831 ret = put_user(base, (unsigned long __user *)addr); 855 ret = put_user(base, (unsigned long __user *)addr);
832 break; 856 break;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e37dccce85db..e375b658efc3 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -14,6 +14,7 @@
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/regset.h> 16#include <linux/regset.h>
17#include <linux/tracehook.h>
17#include <linux/user.h> 18#include <linux/user.h>
18#include <linux/elf.h> 19#include <linux/elf.h>
19#include <linux/security.h> 20#include <linux/security.h>
@@ -69,7 +70,7 @@ static inline bool invalid_selector(u16 value)
69 70
70#define FLAG_MASK FLAG_MASK_32 71#define FLAG_MASK FLAG_MASK_32
71 72
72static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 73static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
73{ 74{
74 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 75 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
75 regno >>= 2; 76 regno >>= 2;
@@ -554,45 +555,115 @@ static int ptrace_set_debugreg(struct task_struct *child,
554 return 0; 555 return 0;
555} 556}
556 557
557#ifdef X86_BTS 558#ifdef CONFIG_X86_PTRACE_BTS
559/*
560 * The configuration for a particular BTS hardware implementation.
561 */
562struct bts_configuration {
563 /* the size of a BTS record in bytes; at most BTS_MAX_RECORD_SIZE */
564 unsigned char sizeof_bts;
565 /* the size of a field in the BTS record in bytes */
566 unsigned char sizeof_field;
567 /* a bitmask to enable/disable BTS in DEBUGCTL MSR */
568 unsigned long debugctl_mask;
569};
570static struct bts_configuration bts_cfg;
571
572#define BTS_MAX_RECORD_SIZE (8 * 3)
573
574
575/*
576 * Branch Trace Store (BTS) uses the following format. Different
577 * architectures vary in the size of those fields.
578 * - source linear address
579 * - destination linear address
580 * - flags
581 *
582 * Later architectures use 64bit pointers throughout, whereas earlier
583 * architectures use 32bit pointers in 32bit mode.
584 *
585 * We compute the base address for the first 8 fields based on:
586 * - the field size stored in the DS configuration
587 * - the relative field position
588 *
589 * In order to store additional information in the BTS buffer, we use
590 * a special source address to indicate that the record requires
591 * special interpretation.
592 *
593 * Netburst indicated via a bit in the flags field whether the branch
594 * was predicted; this is ignored.
595 */
596
597enum bts_field {
598 bts_from = 0,
599 bts_to,
600 bts_flags,
601
602 bts_escape = (unsigned long)-1,
603 bts_qual = bts_to,
604 bts_jiffies = bts_flags
605};
606
607static inline unsigned long bts_get(const char *base, enum bts_field field)
608{
609 base += (bts_cfg.sizeof_field * field);
610 return *(unsigned long *)base;
611}
558 612
559static int ptrace_bts_get_size(struct task_struct *child) 613static inline void bts_set(char *base, enum bts_field field, unsigned long val)
560{ 614{
561 if (!child->thread.ds_area_msr) 615 base += (bts_cfg.sizeof_field * field);;
562 return -ENXIO; 616 (*(unsigned long *)base) = val;
617}
563 618
564 return ds_get_bts_index((void *)child->thread.ds_area_msr); 619/*
620 * Translate a BTS record from the raw format into the bts_struct format
621 *
622 * out (out): bts_struct interpretation
623 * raw: raw BTS record
624 */
625static void ptrace_bts_translate_record(struct bts_struct *out, const void *raw)
626{
627 memset(out, 0, sizeof(*out));
628 if (bts_get(raw, bts_from) == bts_escape) {
629 out->qualifier = bts_get(raw, bts_qual);
630 out->variant.jiffies = bts_get(raw, bts_jiffies);
631 } else {
632 out->qualifier = BTS_BRANCH;
633 out->variant.lbr.from_ip = bts_get(raw, bts_from);
634 out->variant.lbr.to_ip = bts_get(raw, bts_to);
635 }
565} 636}
566 637
567static int ptrace_bts_read_record(struct task_struct *child, 638static int ptrace_bts_read_record(struct task_struct *child, size_t index,
568 long index,
569 struct bts_struct __user *out) 639 struct bts_struct __user *out)
570{ 640{
571 struct bts_struct ret; 641 struct bts_struct ret;
572 int retval; 642 const void *bts_record;
573 int bts_end; 643 size_t bts_index, bts_end;
574 int bts_index; 644 int error;
575
576 if (!child->thread.ds_area_msr)
577 return -ENXIO;
578 645
579 if (index < 0) 646 error = ds_get_bts_end(child, &bts_end);
580 return -EINVAL; 647 if (error < 0)
648 return error;
581 649
582 bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
583 if (bts_end <= index) 650 if (bts_end <= index)
584 return -EINVAL; 651 return -EINVAL;
585 652
653 error = ds_get_bts_index(child, &bts_index);
654 if (error < 0)
655 return error;
656
586 /* translate the ptrace bts index into the ds bts index */ 657 /* translate the ptrace bts index into the ds bts index */
587 bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr); 658 bts_index += bts_end - (index + 1);
588 bts_index -= (index + 1); 659 if (bts_end <= bts_index)
589 if (bts_index < 0) 660 bts_index -= bts_end;
590 bts_index += bts_end;
591 661
592 retval = ds_read_bts((void *)child->thread.ds_area_msr, 662 error = ds_access_bts(child, bts_index, &bts_record);
593 bts_index, &ret); 663 if (error < 0)
594 if (retval < 0) 664 return error;
595 return retval; 665
666 ptrace_bts_translate_record(&ret, bts_record);
596 667
597 if (copy_to_user(out, &ret, sizeof(ret))) 668 if (copy_to_user(out, &ret, sizeof(ret)))
598 return -EFAULT; 669 return -EFAULT;
@@ -600,101 +671,106 @@ static int ptrace_bts_read_record(struct task_struct *child,
600 return sizeof(ret); 671 return sizeof(ret);
601} 672}
602 673
603static int ptrace_bts_clear(struct task_struct *child)
604{
605 if (!child->thread.ds_area_msr)
606 return -ENXIO;
607
608 return ds_clear((void *)child->thread.ds_area_msr);
609}
610
611static int ptrace_bts_drain(struct task_struct *child, 674static int ptrace_bts_drain(struct task_struct *child,
612 long size, 675 long size,
613 struct bts_struct __user *out) 676 struct bts_struct __user *out)
614{ 677{
615 int end, i; 678 struct bts_struct ret;
616 void *ds = (void *)child->thread.ds_area_msr; 679 const unsigned char *raw;
617 680 size_t end, i;
618 if (!ds) 681 int error;
619 return -ENXIO;
620 682
621 end = ds_get_bts_index(ds); 683 error = ds_get_bts_index(child, &end);
622 if (end <= 0) 684 if (error < 0)
623 return end; 685 return error;
624 686
625 if (size < (end * sizeof(struct bts_struct))) 687 if (size < (end * sizeof(struct bts_struct)))
626 return -EIO; 688 return -EIO;
627 689
628 for (i = 0; i < end; i++, out++) { 690 error = ds_access_bts(child, 0, (const void **)&raw);
629 struct bts_struct ret; 691 if (error < 0)
630 int retval; 692 return error;
631 693
632 retval = ds_read_bts(ds, i, &ret); 694 for (i = 0; i < end; i++, out++, raw += bts_cfg.sizeof_bts) {
633 if (retval < 0) 695 ptrace_bts_translate_record(&ret, raw);
634 return retval;
635 696
636 if (copy_to_user(out, &ret, sizeof(ret))) 697 if (copy_to_user(out, &ret, sizeof(ret)))
637 return -EFAULT; 698 return -EFAULT;
638 } 699 }
639 700
640 ds_clear(ds); 701 error = ds_clear_bts(child);
702 if (error < 0)
703 return error;
641 704
642 return end; 705 return end;
643} 706}
644 707
708static void ptrace_bts_ovfl(struct task_struct *child)
709{
710 send_sig(child->thread.bts_ovfl_signal, child, 0);
711}
712
645static int ptrace_bts_config(struct task_struct *child, 713static int ptrace_bts_config(struct task_struct *child,
646 long cfg_size, 714 long cfg_size,
647 const struct ptrace_bts_config __user *ucfg) 715 const struct ptrace_bts_config __user *ucfg)
648{ 716{
649 struct ptrace_bts_config cfg; 717 struct ptrace_bts_config cfg;
650 int bts_size, ret = 0; 718 int error = 0;
651 void *ds; 719
720 error = -EOPNOTSUPP;
721 if (!bts_cfg.sizeof_bts)
722 goto errout;
652 723
724 error = -EIO;
653 if (cfg_size < sizeof(cfg)) 725 if (cfg_size < sizeof(cfg))
654 return -EIO; 726 goto errout;
655 727
728 error = -EFAULT;
656 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 729 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
657 return -EFAULT; 730 goto errout;
658 731
659 if ((int)cfg.size < 0) 732 error = -EINVAL;
660 return -EINVAL; 733 if ((cfg.flags & PTRACE_BTS_O_SIGNAL) &&
734 !(cfg.flags & PTRACE_BTS_O_ALLOC))
735 goto errout;
661 736
662 bts_size = 0; 737 if (cfg.flags & PTRACE_BTS_O_ALLOC) {
663 ds = (void *)child->thread.ds_area_msr; 738 ds_ovfl_callback_t ovfl = NULL;
664 if (ds) { 739 unsigned int sig = 0;
665 bts_size = ds_get_bts_size(ds); 740
666 if (bts_size < 0) 741 /* we ignore the error in case we were not tracing child */
667 return bts_size; 742 (void)ds_release_bts(child);
668 }
669 cfg.size = PAGE_ALIGN(cfg.size);
670 743
671 if (bts_size != cfg.size) { 744 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
672 ret = ptrace_bts_realloc(child, cfg.size, 745 if (!cfg.signal)
673 cfg.flags & PTRACE_BTS_O_CUT_SIZE); 746 goto errout;
674 if (ret < 0) 747
748 sig = cfg.signal;
749 ovfl = ptrace_bts_ovfl;
750 }
751
752 error = ds_request_bts(child, /* base = */ NULL, cfg.size, ovfl);
753 if (error < 0)
675 goto errout; 754 goto errout;
676 755
677 ds = (void *)child->thread.ds_area_msr; 756 child->thread.bts_ovfl_signal = sig;
678 } 757 }
679 758
680 if (cfg.flags & PTRACE_BTS_O_SIGNAL) 759 error = -EINVAL;
681 ret = ds_set_overflow(ds, DS_O_SIGNAL); 760 if (!child->thread.ds_ctx && cfg.flags)
682 else
683 ret = ds_set_overflow(ds, DS_O_WRAP);
684 if (ret < 0)
685 goto errout; 761 goto errout;
686 762
687 if (cfg.flags & PTRACE_BTS_O_TRACE) 763 if (cfg.flags & PTRACE_BTS_O_TRACE)
688 child->thread.debugctlmsr |= ds_debugctl_mask(); 764 child->thread.debugctlmsr |= bts_cfg.debugctl_mask;
689 else 765 else
690 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 766 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
691 767
692 if (cfg.flags & PTRACE_BTS_O_SCHED) 768 if (cfg.flags & PTRACE_BTS_O_SCHED)
693 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 769 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
694 else 770 else
695 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 771 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
696 772
697 ret = sizeof(cfg); 773 error = sizeof(cfg);
698 774
699out: 775out:
700 if (child->thread.debugctlmsr) 776 if (child->thread.debugctlmsr)
@@ -702,10 +778,10 @@ out:
702 else 778 else
703 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 779 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
704 780
705 return ret; 781 return error;
706 782
707errout: 783errout:
708 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 784 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
709 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 785 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
710 goto out; 786 goto out;
711} 787}
@@ -714,29 +790,40 @@ static int ptrace_bts_status(struct task_struct *child,
714 long cfg_size, 790 long cfg_size,
715 struct ptrace_bts_config __user *ucfg) 791 struct ptrace_bts_config __user *ucfg)
716{ 792{
717 void *ds = (void *)child->thread.ds_area_msr;
718 struct ptrace_bts_config cfg; 793 struct ptrace_bts_config cfg;
794 size_t end;
795 const void *base, *max;
796 int error;
719 797
720 if (cfg_size < sizeof(cfg)) 798 if (cfg_size < sizeof(cfg))
721 return -EIO; 799 return -EIO;
722 800
723 memset(&cfg, 0, sizeof(cfg)); 801 error = ds_get_bts_end(child, &end);
802 if (error < 0)
803 return error;
724 804
725 if (ds) { 805 error = ds_access_bts(child, /* index = */ 0, &base);
726 cfg.size = ds_get_bts_size(ds); 806 if (error < 0)
807 return error;
727 808
728 if (ds_get_overflow(ds) == DS_O_SIGNAL) 809 error = ds_access_bts(child, /* index = */ end, &max);
729 cfg.flags |= PTRACE_BTS_O_SIGNAL; 810 if (error < 0)
811 return error;
730 812
731 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && 813 memset(&cfg, 0, sizeof(cfg));
732 child->thread.debugctlmsr & ds_debugctl_mask()) 814 cfg.size = (max - base);
733 cfg.flags |= PTRACE_BTS_O_TRACE; 815 cfg.signal = child->thread.bts_ovfl_signal;
816 cfg.bts_size = sizeof(struct bts_struct);
734 817
735 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) 818 if (cfg.signal)
736 cfg.flags |= PTRACE_BTS_O_SCHED; 819 cfg.flags |= PTRACE_BTS_O_SIGNAL;
737 }
738 820
739 cfg.bts_size = sizeof(struct bts_struct); 821 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
822 child->thread.debugctlmsr & bts_cfg.debugctl_mask)
823 cfg.flags |= PTRACE_BTS_O_TRACE;
824
825 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
826 cfg.flags |= PTRACE_BTS_O_SCHED;
740 827
741 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 828 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
742 return -EFAULT; 829 return -EFAULT;
@@ -744,89 +831,38 @@ static int ptrace_bts_status(struct task_struct *child,
744 return sizeof(cfg); 831 return sizeof(cfg);
745} 832}
746 833
747
748static int ptrace_bts_write_record(struct task_struct *child, 834static int ptrace_bts_write_record(struct task_struct *child,
749 const struct bts_struct *in) 835 const struct bts_struct *in)
750{ 836{
751 int retval; 837 unsigned char bts_record[BTS_MAX_RECORD_SIZE];
752 838
753 if (!child->thread.ds_area_msr) 839 BUG_ON(BTS_MAX_RECORD_SIZE < bts_cfg.sizeof_bts);
754 return -ENXIO;
755 840
756 retval = ds_write_bts((void *)child->thread.ds_area_msr, in); 841 memset(bts_record, 0, bts_cfg.sizeof_bts);
757 if (retval) 842 switch (in->qualifier) {
758 return retval; 843 case BTS_INVALID:
844 break;
759 845
760 return sizeof(*in); 846 case BTS_BRANCH:
761} 847 bts_set(bts_record, bts_from, in->variant.lbr.from_ip);
848 bts_set(bts_record, bts_to, in->variant.lbr.to_ip);
849 break;
762 850
763static int ptrace_bts_realloc(struct task_struct *child, 851 case BTS_TASK_ARRIVES:
764 int size, int reduce_size) 852 case BTS_TASK_DEPARTS:
765{ 853 bts_set(bts_record, bts_from, bts_escape);
766 unsigned long rlim, vm; 854 bts_set(bts_record, bts_qual, in->qualifier);
767 int ret, old_size; 855 bts_set(bts_record, bts_jiffies, in->variant.jiffies);
856 break;
768 857
769 if (size < 0) 858 default:
770 return -EINVAL; 859 return -EINVAL;
771
772 old_size = ds_get_bts_size((void *)child->thread.ds_area_msr);
773 if (old_size < 0)
774 return old_size;
775
776 ret = ds_free((void **)&child->thread.ds_area_msr);
777 if (ret < 0)
778 goto out;
779
780 size >>= PAGE_SHIFT;
781 old_size >>= PAGE_SHIFT;
782
783 current->mm->total_vm -= old_size;
784 current->mm->locked_vm -= old_size;
785
786 if (size == 0)
787 goto out;
788
789 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
790 vm = current->mm->total_vm + size;
791 if (rlim < vm) {
792 ret = -ENOMEM;
793
794 if (!reduce_size)
795 goto out;
796
797 size = rlim - current->mm->total_vm;
798 if (size <= 0)
799 goto out;
800 }
801
802 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
803 vm = current->mm->locked_vm + size;
804 if (rlim < vm) {
805 ret = -ENOMEM;
806
807 if (!reduce_size)
808 goto out;
809
810 size = rlim - current->mm->locked_vm;
811 if (size <= 0)
812 goto out;
813 } 860 }
814 861
815 ret = ds_allocate((void **)&child->thread.ds_area_msr, 862 /* The writing task will be the switched-to task on a context
816 size << PAGE_SHIFT); 863 * switch. It needs to write into the switched-from task's BTS
817 if (ret < 0) 864 * buffer. */
818 goto out; 865 return ds_unchecked_write_bts(child, bts_record, bts_cfg.sizeof_bts);
819
820 current->mm->total_vm += size;
821 current->mm->locked_vm += size;
822
823out:
824 if (child->thread.ds_area_msr)
825 set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
826 else
827 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
828
829 return ret;
830} 866}
831 867
832void ptrace_bts_take_timestamp(struct task_struct *tsk, 868void ptrace_bts_take_timestamp(struct task_struct *tsk,
@@ -839,7 +875,66 @@ void ptrace_bts_take_timestamp(struct task_struct *tsk,
839 875
840 ptrace_bts_write_record(tsk, &rec); 876 ptrace_bts_write_record(tsk, &rec);
841} 877}
842#endif /* X86_BTS */ 878
879static const struct bts_configuration bts_cfg_netburst = {
880 .sizeof_bts = sizeof(long) * 3,
881 .sizeof_field = sizeof(long),
882 .debugctl_mask = (1<<2)|(1<<3)|(1<<5)
883};
884
885static const struct bts_configuration bts_cfg_pentium_m = {
886 .sizeof_bts = sizeof(long) * 3,
887 .sizeof_field = sizeof(long),
888 .debugctl_mask = (1<<6)|(1<<7)
889};
890
891static const struct bts_configuration bts_cfg_core2 = {
892 .sizeof_bts = 8 * 3,
893 .sizeof_field = 8,
894 .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
895};
896
897static inline void bts_configure(const struct bts_configuration *cfg)
898{
899 bts_cfg = *cfg;
900}
901
902void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *c)
903{
904 switch (c->x86) {
905 case 0x6:
906 switch (c->x86_model) {
907 case 0xD:
908 case 0xE: /* Pentium M */
909 bts_configure(&bts_cfg_pentium_m);
910 break;
911 case 0xF: /* Core2 */
912 case 0x1C: /* Atom */
913 bts_configure(&bts_cfg_core2);
914 break;
915 default:
916 /* sorry, don't know about them */
917 break;
918 }
919 break;
920 case 0xF:
921 switch (c->x86_model) {
922 case 0x0:
923 case 0x1:
924 case 0x2: /* Netburst */
925 bts_configure(&bts_cfg_netburst);
926 break;
927 default:
928 /* sorry, don't know about them */
929 break;
930 }
931 break;
932 default:
933 /* sorry, don't know about them */
934 break;
935 }
936}
937#endif /* CONFIG_X86_PTRACE_BTS */
843 938
844/* 939/*
845 * Called by kernel/ptrace.c when detaching.. 940 * Called by kernel/ptrace.c when detaching..
@@ -852,15 +947,15 @@ void ptrace_disable(struct task_struct *child)
852#ifdef TIF_SYSCALL_EMU 947#ifdef TIF_SYSCALL_EMU
853 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 948 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
854#endif 949#endif
855 if (child->thread.ds_area_msr) { 950#ifdef CONFIG_X86_PTRACE_BTS
856#ifdef X86_BTS 951 (void)ds_release_bts(child);
857 ptrace_bts_realloc(child, 0, 0); 952
858#endif 953 child->thread.debugctlmsr &= ~bts_cfg.debugctl_mask;
859 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 954 if (!child->thread.debugctlmsr)
860 if (!child->thread.debugctlmsr) 955 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
861 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 956
862 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 957 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
863 } 958#endif /* CONFIG_X86_PTRACE_BTS */
864} 959}
865 960
866#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 961#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
@@ -980,7 +1075,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
980 /* 1075 /*
981 * These bits need more cooking - not enabled yet: 1076 * These bits need more cooking - not enabled yet:
982 */ 1077 */
983#ifdef X86_BTS 1078#ifdef CONFIG_X86_PTRACE_BTS
984 case PTRACE_BTS_CONFIG: 1079 case PTRACE_BTS_CONFIG:
985 ret = ptrace_bts_config 1080 ret = ptrace_bts_config
986 (child, data, (struct ptrace_bts_config __user *)addr); 1081 (child, data, (struct ptrace_bts_config __user *)addr);
@@ -992,7 +1087,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
992 break; 1087 break;
993 1088
994 case PTRACE_BTS_SIZE: 1089 case PTRACE_BTS_SIZE:
995 ret = ptrace_bts_get_size(child); 1090 ret = ds_get_bts_index(child, /* pos = */ NULL);
996 break; 1091 break;
997 1092
998 case PTRACE_BTS_GET: 1093 case PTRACE_BTS_GET:
@@ -1001,14 +1096,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1001 break; 1096 break;
1002 1097
1003 case PTRACE_BTS_CLEAR: 1098 case PTRACE_BTS_CLEAR:
1004 ret = ptrace_bts_clear(child); 1099 ret = ds_clear_bts(child);
1005 break; 1100 break;
1006 1101
1007 case PTRACE_BTS_DRAIN: 1102 case PTRACE_BTS_DRAIN:
1008 ret = ptrace_bts_drain 1103 ret = ptrace_bts_drain
1009 (child, data, (struct bts_struct __user *) addr); 1104 (child, data, (struct bts_struct __user *) addr);
1010 break; 1105 break;
1011#endif 1106#endif /* CONFIG_X86_PTRACE_BTS */
1012 1107
1013 default: 1108 default:
1014 ret = ptrace_request(child, request, addr, data); 1109 ret = ptrace_request(child, request, addr, data);
@@ -1375,30 +1470,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
1375 force_sig_info(SIGTRAP, &info, tsk); 1470 force_sig_info(SIGTRAP, &info, tsk);
1376} 1471}
1377 1472
1378static void syscall_trace(struct pt_regs *regs)
1379{
1380 if (!(current->ptrace & PT_PTRACED))
1381 return;
1382
1383#if 0
1384 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1385 current->comm,
1386 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
1387 current_thread_info()->flags, current->ptrace);
1388#endif
1389
1390 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1391 ? 0x80 : 0));
1392 /*
1393 * this isn't the same as continuing with a signal, but it will do
1394 * for normal use. strace only continues with a signal if the
1395 * stopping signal is not SIGTRAP. -brl
1396 */
1397 if (current->exit_code) {
1398 send_sig(current->exit_code, current, 1);
1399 current->exit_code = 0;
1400 }
1401}
1402 1473
1403#ifdef CONFIG_X86_32 1474#ifdef CONFIG_X86_32
1404# define IS_IA32 1 1475# define IS_IA32 1
@@ -1432,8 +1503,9 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1432 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1503 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1433 ret = -1L; 1504 ret = -1L;
1434 1505
1435 if (ret || test_thread_flag(TIF_SYSCALL_TRACE)) 1506 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1436 syscall_trace(regs); 1507 tracehook_report_syscall_entry(regs))
1508 ret = -1L;
1437 1509
1438 if (unlikely(current->audit_context)) { 1510 if (unlikely(current->audit_context)) {
1439 if (IS_IA32) 1511 if (IS_IA32)
@@ -1459,7 +1531,7 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1459 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1531 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1460 1532
1461 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1533 if (test_thread_flag(TIF_SYSCALL_TRACE))
1462 syscall_trace(regs); 1534 tracehook_report_syscall_exit(regs, 0);
1463 1535
1464 /* 1536 /*
1465 * If TIF_SYSCALL_EMU is set, we only get here because of 1537 * If TIF_SYSCALL_EMU is set, we only get here because of
@@ -1475,6 +1547,6 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1475 * system call instruction. 1547 * system call instruction.
1476 */ 1548 */
1477 if (test_thread_flag(TIF_SINGLESTEP) && 1549 if (test_thread_flag(TIF_SINGLESTEP) &&
1478 (current->ptrace & PT_PTRACED)) 1550 tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
1479 send_sigtrap(current, regs, 0); 1551 send_sigtrap(current, regs, 0);
1480} 1552}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..f4c93f1cfc19 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(pm_power_off);
29 29
30static const struct desc_ptr no_idt = {}; 30static const struct desc_ptr no_idt = {};
31static int reboot_mode; 31static int reboot_mode;
32enum reboot_type reboot_type = BOOT_KBD; 32/*
33 * Keyboard reset and triple fault may result in INIT, not RESET, which
34 * doesn't work when we're in vmx root mode. Try ACPI first.
35 */
36enum reboot_type reboot_type = BOOT_ACPI;
33int reboot_force; 37int reboot_force;
34 38
35#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 39#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9838f2539dfc..141efab52400 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -223,6 +223,9 @@ unsigned long saved_video_mode;
223#define RAMDISK_LOAD_FLAG 0x4000 223#define RAMDISK_LOAD_FLAG 0x4000
224 224
225static char __initdata command_line[COMMAND_LINE_SIZE]; 225static char __initdata command_line[COMMAND_LINE_SIZE];
226#ifdef CONFIG_CMDLINE_BOOL
227static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
228#endif
226 229
227#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) 230#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
228struct edd edd; 231struct edd edd;
@@ -665,6 +668,19 @@ void __init setup_arch(char **cmdline_p)
665 bss_resource.start = virt_to_phys(&__bss_start); 668 bss_resource.start = virt_to_phys(&__bss_start);
666 bss_resource.end = virt_to_phys(&__bss_stop)-1; 669 bss_resource.end = virt_to_phys(&__bss_stop)-1;
667 670
671#ifdef CONFIG_CMDLINE_BOOL
672#ifdef CONFIG_CMDLINE_OVERRIDE
673 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
674#else
675 if (builtin_cmdline[0]) {
676 /* append boot loader cmdline to builtin */
677 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
678 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
679 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
680 }
681#endif
682#endif
683
668 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); 684 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
669 *cmdline_p = command_line; 685 *cmdline_p = command_line;
670 686
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 76e305e064f9..0e67f72d9316 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -162,9 +162,16 @@ void __init setup_per_cpu_areas(void)
162 printk(KERN_INFO 162 printk(KERN_INFO
163 "cpu %d has no node %d or node-local memory\n", 163 "cpu %d has no node %d or node-local memory\n",
164 cpu, node); 164 cpu, node);
165 if (ptr)
166 printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
167 cpu, __pa(ptr));
165 } 168 }
166 else 169 else {
167 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); 170 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
171 if (ptr)
172 printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
173 cpu, node, __pa(ptr));
174 }
168#endif 175#endif
169 per_cpu_offset(cpu) = ptr - __per_cpu_start; 176 per_cpu_offset(cpu) = ptr - __per_cpu_start;
170 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 177 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
diff --git a/arch/x86/kernel/sigframe.h b/arch/x86/kernel/sigframe.h
index 72bbb519d2dc..8b4956e800ac 100644
--- a/arch/x86/kernel/sigframe.h
+++ b/arch/x86/kernel/sigframe.h
@@ -24,4 +24,9 @@ struct rt_sigframe {
24 struct ucontext uc; 24 struct ucontext uc;
25 struct siginfo info; 25 struct siginfo info;
26}; 26};
27
28int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
29 sigset_t *set, struct pt_regs *regs);
30int ia32_setup_frame(int sig, struct k_sigaction *ka,
31 sigset_t *set, struct pt_regs *regs);
27#endif 32#endif
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 6fb5bcdd8933..2a2435d3037d 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/wait.h> 19#include <linux/wait.h>
20#include <linux/tracehook.h>
20#include <linux/elf.h> 21#include <linux/elf.h>
21#include <linux/smp.h> 22#include <linux/smp.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -26,6 +27,7 @@
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <asm/i387.h> 28#include <asm/i387.h>
28#include <asm/vdso.h> 29#include <asm/vdso.h>
30#include <asm/syscalls.h>
29 31
30#include "sigframe.h" 32#include "sigframe.h"
31 33
@@ -558,8 +560,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
558 * handler too. 560 * handler too.
559 */ 561 */
560 regs->flags &= ~X86_EFLAGS_TF; 562 regs->flags &= ~X86_EFLAGS_TF;
561 if (test_thread_flag(TIF_SINGLESTEP))
562 ptrace_notify(SIGTRAP);
563 563
564 spin_lock_irq(&current->sighand->siglock); 564 spin_lock_irq(&current->sighand->siglock);
565 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 565 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
@@ -568,6 +568,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
568 recalc_sigpending(); 568 recalc_sigpending();
569 spin_unlock_irq(&current->sighand->siglock); 569 spin_unlock_irq(&current->sighand->siglock);
570 570
571 tracehook_signal_handler(sig, info, ka, regs,
572 test_thread_flag(TIF_SINGLESTEP));
573
571 return 0; 574 return 0;
572} 575}
573 576
@@ -661,5 +664,10 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
661 if (thread_info_flags & _TIF_SIGPENDING) 664 if (thread_info_flags & _TIF_SIGPENDING)
662 do_signal(regs); 665 do_signal(regs);
663 666
667 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
668 clear_thread_flag(TIF_NOTIFY_RESUME);
669 tracehook_notify_resume(regs);
670 }
671
664 clear_thread_flag(TIF_IRET); 672 clear_thread_flag(TIF_IRET);
665} 673}
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index ca316b5b742c..694aa888bb19 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -15,17 +15,21 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/tracehook.h>
18#include <linux/unistd.h> 19#include <linux/unistd.h>
19#include <linux/stddef.h> 20#include <linux/stddef.h>
20#include <linux/personality.h> 21#include <linux/personality.h>
21#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/uaccess.h>
24
22#include <asm/processor.h> 25#include <asm/processor.h>
23#include <asm/ucontext.h> 26#include <asm/ucontext.h>
24#include <asm/uaccess.h>
25#include <asm/i387.h> 27#include <asm/i387.h>
26#include <asm/proto.h> 28#include <asm/proto.h>
27#include <asm/ia32_unistd.h> 29#include <asm/ia32_unistd.h>
28#include <asm/mce.h> 30#include <asm/mce.h>
31#include <asm/syscall.h>
32#include <asm/syscalls.h>
29#include "sigframe.h" 33#include "sigframe.h"
30 34
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -41,11 +45,6 @@
41# define FIX_EFLAGS __FIX_EFLAGS 45# define FIX_EFLAGS __FIX_EFLAGS
42#endif 46#endif
43 47
44int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
45 sigset_t *set, struct pt_regs * regs);
46int ia32_setup_frame(int sig, struct k_sigaction *ka,
47 sigset_t *set, struct pt_regs * regs);
48
49asmlinkage long 48asmlinkage long
50sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 49sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
51 struct pt_regs *regs) 50 struct pt_regs *regs)
@@ -128,7 +127,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
128 /* Always make any pending restarted system calls return -EINTR */ 127 /* Always make any pending restarted system calls return -EINTR */
129 current_thread_info()->restart_block.fn = do_no_restart_syscall; 128 current_thread_info()->restart_block.fn = do_no_restart_syscall;
130 129
131#define COPY(x) err |= __get_user(regs->x, &sc->x) 130#define COPY(x) (err |= __get_user(regs->x, &sc->x))
132 131
133 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); 132 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
134 COPY(dx); COPY(cx); COPY(ip); 133 COPY(dx); COPY(cx); COPY(ip);
@@ -158,7 +157,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
158 } 157 }
159 158
160 { 159 {
161 struct _fpstate __user * buf; 160 struct _fpstate __user *buf;
162 err |= __get_user(buf, &sc->fpstate); 161 err |= __get_user(buf, &sc->fpstate);
163 162
164 if (buf) { 163 if (buf) {
@@ -198,7 +197,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
198 current->blocked = set; 197 current->blocked = set;
199 recalc_sigpending(); 198 recalc_sigpending();
200 spin_unlock_irq(&current->sighand->siglock); 199 spin_unlock_irq(&current->sighand->siglock);
201 200
202 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 201 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
203 goto badframe; 202 goto badframe;
204 203
@@ -208,16 +207,17 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
208 return ax; 207 return ax;
209 208
210badframe: 209badframe:
211 signal_fault(regs,frame,"sigreturn"); 210 signal_fault(regs, frame, "sigreturn");
212 return 0; 211 return 0;
213} 212}
214 213
215/* 214/*
216 * Set up a signal frame. 215 * Set up a signal frame.
217 */ 216 */
218 217
219static inline int 218static inline int
220setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, struct task_struct *me) 219setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
220 unsigned long mask, struct task_struct *me)
221{ 221{
222 int err = 0; 222 int err = 0;
223 223
@@ -273,35 +273,35 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
273} 273}
274 274
275static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 275static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
276 sigset_t *set, struct pt_regs * regs) 276 sigset_t *set, struct pt_regs *regs)
277{ 277{
278 struct rt_sigframe __user *frame; 278 struct rt_sigframe __user *frame;
279 struct _fpstate __user *fp = NULL; 279 struct _fpstate __user *fp = NULL;
280 int err = 0; 280 int err = 0;
281 struct task_struct *me = current; 281 struct task_struct *me = current;
282 282
283 if (used_math()) { 283 if (used_math()) {
284 fp = get_stack(ka, regs, sizeof(struct _fpstate)); 284 fp = get_stack(ka, regs, sizeof(struct _fpstate));
285 frame = (void __user *)round_down( 285 frame = (void __user *)round_down(
286 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; 286 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
287 287
288 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) 288 if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
289 goto give_sigsegv; 289 goto give_sigsegv;
290 290
291 if (save_i387(fp) < 0) 291 if (save_i387(fp) < 0)
292 err |= -1; 292 err |= -1;
293 } else 293 } else
294 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; 294 frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
295 295
296 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 296 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
297 goto give_sigsegv; 297 goto give_sigsegv;
298 298
299 if (ka->sa.sa_flags & SA_SIGINFO) { 299 if (ka->sa.sa_flags & SA_SIGINFO) {
300 err |= copy_siginfo_to_user(&frame->info, info); 300 err |= copy_siginfo_to_user(&frame->info, info);
301 if (err) 301 if (err)
302 goto give_sigsegv; 302 goto give_sigsegv;
303 } 303 }
304 304
305 /* Create the ucontext. */ 305 /* Create the ucontext. */
306 err |= __put_user(0, &frame->uc.uc_flags); 306 err |= __put_user(0, &frame->uc.uc_flags);
307 err |= __put_user(0, &frame->uc.uc_link); 307 err |= __put_user(0, &frame->uc.uc_link);
@@ -311,9 +311,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
311 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); 311 err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
312 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); 312 err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
313 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); 313 err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
314 if (sizeof(*set) == 16) { 314 if (sizeof(*set) == 16) {
315 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 315 __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
316 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 316 __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
317 } else 317 } else
318 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 318 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
319 319
@@ -324,7 +324,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
324 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); 324 err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
325 } else { 325 } else {
326 /* could use a vstub here */ 326 /* could use a vstub here */
327 goto give_sigsegv; 327 goto give_sigsegv;
328 } 328 }
329 329
330 if (err) 330 if (err)
@@ -332,7 +332,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
332 332
333 /* Set up registers for signal handler */ 333 /* Set up registers for signal handler */
334 regs->di = sig; 334 regs->di = sig;
335 /* In case the signal handler was declared without prototypes */ 335 /* In case the signal handler was declared without prototypes */
336 regs->ax = 0; 336 regs->ax = 0;
337 337
338 /* This also works for non SA_SIGINFO handlers because they expect the 338 /* This also works for non SA_SIGINFO handlers because they expect the
@@ -355,37 +355,8 @@ give_sigsegv:
355} 355}
356 356
357/* 357/*
358 * Return -1L or the syscall number that @regs is executing.
359 */
360static long current_syscall(struct pt_regs *regs)
361{
362 /*
363 * We always sign-extend a -1 value being set here,
364 * so this is always either -1L or a syscall number.
365 */
366 return regs->orig_ax;
367}
368
369/*
370 * Return a value that is -EFOO if the system call in @regs->orig_ax
371 * returned an error. This only works for @regs from @current.
372 */
373static long current_syscall_ret(struct pt_regs *regs)
374{
375#ifdef CONFIG_IA32_EMULATION
376 if (test_thread_flag(TIF_IA32))
377 /*
378 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
379 * and will match correctly in comparisons.
380 */
381 return (int) regs->ax;
382#endif
383 return regs->ax;
384}
385
386/*
387 * OK, we're invoking a handler 358 * OK, we're invoking a handler
388 */ 359 */
389 360
390static int 361static int
391handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 362handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
@@ -394,9 +365,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
394 int ret; 365 int ret;
395 366
396 /* Are we from a system call? */ 367 /* Are we from a system call? */
397 if (current_syscall(regs) >= 0) { 368 if (syscall_get_nr(current, regs) >= 0) {
398 /* If so, check system call restarting.. */ 369 /* If so, check system call restarting.. */
399 switch (current_syscall_ret(regs)) { 370 switch (syscall_get_error(current, regs)) {
400 case -ERESTART_RESTARTBLOCK: 371 case -ERESTART_RESTARTBLOCK:
401 case -ERESTARTNOHAND: 372 case -ERESTARTNOHAND:
402 regs->ax = -EINTR; 373 regs->ax = -EINTR;
@@ -429,7 +400,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
429 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs); 400 ret = ia32_setup_rt_frame(sig, ka, info, oldset, regs);
430 else 401 else
431 ret = ia32_setup_frame(sig, ka, oldset, regs); 402 ret = ia32_setup_frame(sig, ka, oldset, regs);
432 } else 403 } else
433#endif 404#endif
434 ret = setup_rt_frame(sig, ka, info, oldset, regs); 405 ret = setup_rt_frame(sig, ka, info, oldset, regs);
435 406
@@ -453,15 +424,16 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
453 * handler too. 424 * handler too.
454 */ 425 */
455 regs->flags &= ~X86_EFLAGS_TF; 426 regs->flags &= ~X86_EFLAGS_TF;
456 if (test_thread_flag(TIF_SINGLESTEP))
457 ptrace_notify(SIGTRAP);
458 427
459 spin_lock_irq(&current->sighand->siglock); 428 spin_lock_irq(&current->sighand->siglock);
460 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 429 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
461 if (!(ka->sa.sa_flags & SA_NODEFER)) 430 if (!(ka->sa.sa_flags & SA_NODEFER))
462 sigaddset(&current->blocked,sig); 431 sigaddset(&current->blocked, sig);
463 recalc_sigpending(); 432 recalc_sigpending();
464 spin_unlock_irq(&current->sighand->siglock); 433 spin_unlock_irq(&current->sighand->siglock);
434
435 tracehook_signal_handler(sig, info, ka, regs,
436 test_thread_flag(TIF_SINGLESTEP));
465 } 437 }
466 438
467 return ret; 439 return ret;
@@ -518,9 +490,9 @@ static void do_signal(struct pt_regs *regs)
518 } 490 }
519 491
520 /* Did we come from a system call? */ 492 /* Did we come from a system call? */
521 if (current_syscall(regs) >= 0) { 493 if (syscall_get_nr(current, regs) >= 0) {
522 /* Restart the system call - no handlers present */ 494 /* Restart the system call - no handlers present */
523 switch (current_syscall_ret(regs)) { 495 switch (syscall_get_error(current, regs)) {
524 case -ERESTARTNOHAND: 496 case -ERESTARTNOHAND:
525 case -ERESTARTSYS: 497 case -ERESTARTSYS:
526 case -ERESTARTNOINTR: 498 case -ERESTARTNOINTR:
@@ -558,17 +530,23 @@ void do_notify_resume(struct pt_regs *regs, void *unused,
558 /* deal with pending signal delivery */ 530 /* deal with pending signal delivery */
559 if (thread_info_flags & _TIF_SIGPENDING) 531 if (thread_info_flags & _TIF_SIGPENDING)
560 do_signal(regs); 532 do_signal(regs);
533
534 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
535 clear_thread_flag(TIF_NOTIFY_RESUME);
536 tracehook_notify_resume(regs);
537 }
561} 538}
562 539
563void signal_fault(struct pt_regs *regs, void __user *frame, char *where) 540void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
564{ 541{
565 struct task_struct *me = current; 542 struct task_struct *me = current;
566 if (show_unhandled_signals && printk_ratelimit()) { 543 if (show_unhandled_signals && printk_ratelimit()) {
567 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx", 544 printk("%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
568 me->comm,me->pid,where,frame,regs->ip,regs->sp,regs->orig_ax); 545 me->comm, me->pid, where, frame, regs->ip,
546 regs->sp, regs->orig_ax);
569 print_vma_addr(" in ", regs->ip); 547 print_vma_addr(" in ", regs->ip);
570 printk("\n"); 548 printk("\n");
571 } 549 }
572 550
573 force_sig(SIGSEGV, me); 551 force_sig(SIGSEGV, me);
574} 552}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0b8261c3cac2..4e7ccb0e2a9b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -88,7 +88,7 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
88#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 88#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
89#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 89#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
90#else 90#else
91struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 91static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
92#define get_idle_for_cpu(x) (idle_thread_array[(x)]) 92#define get_idle_for_cpu(x) (idle_thread_array[(x)])
93#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 93#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
94#endif 94#endif
@@ -129,7 +129,7 @@ static int boot_cpu_logical_apicid;
129static cpumask_t cpu_sibling_setup_map; 129static cpumask_t cpu_sibling_setup_map;
130 130
131/* Set if we find a B stepping CPU */ 131/* Set if we find a B stepping CPU */
132int __cpuinitdata smp_b_stepping; 132static int __cpuinitdata smp_b_stepping;
133 133
134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) 134#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
135 135
@@ -1314,16 +1314,13 @@ __init void prefill_possible_map(void)
1314 if (!num_processors) 1314 if (!num_processors)
1315 num_processors = 1; 1315 num_processors = 1;
1316 1316
1317#ifdef CONFIG_HOTPLUG_CPU
1318 if (additional_cpus == -1) { 1317 if (additional_cpus == -1) {
1319 if (disabled_cpus > 0) 1318 if (disabled_cpus > 0)
1320 additional_cpus = disabled_cpus; 1319 additional_cpus = disabled_cpus;
1321 else 1320 else
1322 additional_cpus = 0; 1321 additional_cpus = 0;
1323 } 1322 }
1324#else 1323
1325 additional_cpus = 0;
1326#endif
1327 possible = num_processors + additional_cpus; 1324 possible = num_processors + additional_cpus;
1328 if (possible > NR_CPUS) 1325 if (possible > NR_CPUS)
1329 possible = NR_CPUS; 1326 possible = NR_CPUS;
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 7066cb855a60..1884a8d12bfa 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -22,6 +22,8 @@
22#include <linux/uaccess.h> 22#include <linux/uaccess.h>
23#include <linux/unistd.h> 23#include <linux/unistd.h>
24 24
25#include <asm/syscalls.h>
26
25asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 27asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
26 unsigned long prot, unsigned long flags, 28 unsigned long prot, unsigned long flags,
27 unsigned long fd, unsigned long pgoff) 29 unsigned long fd, unsigned long pgoff)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 3b360ef33817..6bc211accf08 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -13,15 +13,17 @@
13#include <linux/utsname.h> 13#include <linux/utsname.h>
14#include <linux/personality.h> 14#include <linux/personality.h>
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/uaccess.h>
16 17
17#include <asm/uaccess.h>
18#include <asm/ia32.h> 18#include <asm/ia32.h>
19#include <asm/syscalls.h>
19 20
20asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, 21asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
21 unsigned long fd, unsigned long off) 22 unsigned long prot, unsigned long flags,
23 unsigned long fd, unsigned long off)
22{ 24{
23 long error; 25 long error;
24 struct file * file; 26 struct file *file;
25 27
26 error = -EINVAL; 28 error = -EINVAL;
27 if (off & ~PAGE_MASK) 29 if (off & ~PAGE_MASK)
@@ -56,9 +58,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
56 unmapped base down for this case. This can give 58 unmapped base down for this case. This can give
57 conflicts with the heap, but we assume that glibc 59 conflicts with the heap, but we assume that glibc
58 malloc knows how to fall back to mmap. Give it 1GB 60 malloc knows how to fall back to mmap. Give it 1GB
59 of playground for now. -AK */ 61 of playground for now. -AK */
60 *begin = 0x40000000; 62 *begin = 0x40000000;
61 *end = 0x80000000; 63 *end = 0x80000000;
62 if (current->flags & PF_RANDOMIZE) { 64 if (current->flags & PF_RANDOMIZE) {
63 new_begin = randomize_range(*begin, *begin + 0x02000000, 0); 65 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
64 if (new_begin) 66 if (new_begin)
@@ -66,9 +68,9 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
66 } 68 }
67 } else { 69 } else {
68 *begin = TASK_UNMAPPED_BASE; 70 *begin = TASK_UNMAPPED_BASE;
69 *end = TASK_SIZE; 71 *end = TASK_SIZE;
70 } 72 }
71} 73}
72 74
73unsigned long 75unsigned long
74arch_get_unmapped_area(struct file *filp, unsigned long addr, 76arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -78,11 +80,11 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78 struct vm_area_struct *vma; 80 struct vm_area_struct *vma;
79 unsigned long start_addr; 81 unsigned long start_addr;
80 unsigned long begin, end; 82 unsigned long begin, end;
81 83
82 if (flags & MAP_FIXED) 84 if (flags & MAP_FIXED)
83 return addr; 85 return addr;
84 86
85 find_start_end(flags, &begin, &end); 87 find_start_end(flags, &begin, &end);
86 88
87 if (len > end) 89 if (len > end)
88 return -ENOMEM; 90 return -ENOMEM;
@@ -96,12 +98,12 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
96 } 98 }
97 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 99 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
98 && len <= mm->cached_hole_size) { 100 && len <= mm->cached_hole_size) {
99 mm->cached_hole_size = 0; 101 mm->cached_hole_size = 0;
100 mm->free_area_cache = begin; 102 mm->free_area_cache = begin;
101 } 103 }
102 addr = mm->free_area_cache; 104 addr = mm->free_area_cache;
103 if (addr < begin) 105 if (addr < begin)
104 addr = begin; 106 addr = begin;
105 start_addr = addr; 107 start_addr = addr;
106 108
107full_search: 109full_search:
@@ -127,7 +129,7 @@ full_search:
127 return addr; 129 return addr;
128 } 130 }
129 if (addr + mm->cached_hole_size < vma->vm_start) 131 if (addr + mm->cached_hole_size < vma->vm_start)
130 mm->cached_hole_size = vma->vm_start - addr; 132 mm->cached_hole_size = vma->vm_start - addr;
131 133
132 addr = vma->vm_end; 134 addr = vma->vm_end;
133 } 135 }
@@ -177,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
177 vma = find_vma(mm, addr-len); 179 vma = find_vma(mm, addr-len);
178 if (!vma || addr <= vma->vm_start) 180 if (!vma || addr <= vma->vm_start)
179 /* remember the address as a hint for next time */ 181 /* remember the address as a hint for next time */
180 return (mm->free_area_cache = addr-len); 182 return mm->free_area_cache = addr-len;
181 } 183 }
182 184
183 if (mm->mmap_base < len) 185 if (mm->mmap_base < len)
@@ -194,7 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
194 vma = find_vma(mm, addr); 196 vma = find_vma(mm, addr);
195 if (!vma || addr+len <= vma->vm_start) 197 if (!vma || addr+len <= vma->vm_start)
196 /* remember the address as a hint for next time */ 198 /* remember the address as a hint for next time */
197 return (mm->free_area_cache = addr); 199 return mm->free_area_cache = addr;
198 200
199 /* remember the largest hole we saw so far */ 201 /* remember the largest hole we saw so far */
200 if (addr + mm->cached_hole_size < vma->vm_start) 202 if (addr + mm->cached_hole_size < vma->vm_start)
@@ -224,13 +226,13 @@ bottomup:
224} 226}
225 227
226 228
227asmlinkage long sys_uname(struct new_utsname __user * name) 229asmlinkage long sys_uname(struct new_utsname __user *name)
228{ 230{
229 int err; 231 int err;
230 down_read(&uts_sem); 232 down_read(&uts_sem);
231 err = copy_to_user(name, utsname(), sizeof (*name)); 233 err = copy_to_user(name, utsname(), sizeof(*name));
232 up_read(&uts_sem); 234 up_read(&uts_sem);
233 if (personality(current->personality) == PER_LINUX32) 235 if (personality(current->personality) == PER_LINUX32)
234 err |= copy_to_user(&name->machine, "i686", 5); 236 err |= copy_to_user(&name->machine, "i686", 5);
235 return err ? -EFAULT : 0; 237 return err ? -EFAULT : 0;
236} 238}
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 170d43c17487..3d1be4f0fac5 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -8,12 +8,12 @@
8#define __NO_STUBS 8#define __NO_STUBS
9 9
10#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; 10#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
11#undef _ASM_X86_64_UNISTD_H_ 11#undef ASM_X86__UNISTD_64_H
12#include <asm/unistd_64.h> 12#include <asm/unistd_64.h>
13 13
14#undef __SYSCALL 14#undef __SYSCALL
15#define __SYSCALL(nr, sym) [nr] = sym, 15#define __SYSCALL(nr, sym) [nr] = sym,
16#undef _ASM_X86_64_UNISTD_H_ 16#undef ASM_X86__UNISTD_64_H
17 17
18typedef void (*sys_call_ptr_t)(void); 18typedef void (*sys_call_ptr_t)(void);
19 19
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index ffe3c664afc0..bbecf8b6bf96 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -36,6 +36,7 @@
36#include <asm/arch_hooks.h> 36#include <asm/arch_hooks.h>
37#include <asm/hpet.h> 37#include <asm/hpet.h>
38#include <asm/time.h> 38#include <asm/time.h>
39#include <asm/timer.h>
39 40
40#include "do_timer.h" 41#include "do_timer.h"
41 42
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index ab6bf375a307..6bb7b8579e70 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -10,6 +10,7 @@
10#include <asm/ldt.h> 10#include <asm/ldt.h>
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/proto.h> 12#include <asm/proto.h>
13#include <asm/syscalls.h>
13 14
14#include "tls.h" 15#include "tls.h"
15 16
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 513caaca7115..7a31f104bef9 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -32,6 +32,8 @@
32#include <linux/bug.h> 32#include <linux/bug.h>
33#include <linux/nmi.h> 33#include <linux/nmi.h>
34#include <linux/mm.h> 34#include <linux/mm.h>
35#include <linux/smp.h>
36#include <linux/io.h>
35 37
36#if defined(CONFIG_EDAC) 38#if defined(CONFIG_EDAC)
37#include <linux/edac.h> 39#include <linux/edac.h>
@@ -45,9 +47,6 @@
45#include <asm/unwind.h> 47#include <asm/unwind.h>
46#include <asm/desc.h> 48#include <asm/desc.h>
47#include <asm/i387.h> 49#include <asm/i387.h>
48#include <asm/nmi.h>
49#include <asm/smp.h>
50#include <asm/io.h>
51#include <asm/pgalloc.h> 50#include <asm/pgalloc.h>
52#include <asm/proto.h> 51#include <asm/proto.h>
53#include <asm/pda.h> 52#include <asm/pda.h>
@@ -85,7 +84,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
85 84
86void printk_address(unsigned long address, int reliable) 85void printk_address(unsigned long address, int reliable)
87{ 86{
88 printk(" [<%016lx>] %s%pS\n", address, reliable ? "": "? ", (void *) address); 87 printk(" [<%016lx>] %s%pS\n",
88 address, reliable ? "" : "? ", (void *) address);
89} 89}
90 90
91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack, 91static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
@@ -98,7 +98,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
98 [STACKFAULT_STACK - 1] = "#SS", 98 [STACKFAULT_STACK - 1] = "#SS",
99 [MCE_STACK - 1] = "#MC", 99 [MCE_STACK - 1] = "#MC",
100#if DEBUG_STKSZ > EXCEPTION_STKSZ 100#if DEBUG_STKSZ > EXCEPTION_STKSZ
101 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]" 101 [N_EXCEPTION_STACKS ...
102 N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
102#endif 103#endif
103 }; 104 };
104 unsigned k; 105 unsigned k;
@@ -163,7 +164,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
163} 164}
164 165
165/* 166/*
166 * x86-64 can have up to three kernel stacks: 167 * x86-64 can have up to three kernel stacks:
167 * process stack 168 * process stack
168 * interrupt stack 169 * interrupt stack
169 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 170 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
219 const struct stacktrace_ops *ops, void *data) 220 const struct stacktrace_ops *ops, void *data)
220{ 221{
221 const unsigned cpu = get_cpu(); 222 const unsigned cpu = get_cpu();
222 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr; 223 unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
223 unsigned used = 0; 224 unsigned used = 0;
224 struct thread_info *tinfo; 225 struct thread_info *tinfo;
225 226
@@ -237,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
237 if (!bp) { 238 if (!bp) {
238 if (task == current) { 239 if (task == current) {
239 /* Grab bp right from our regs */ 240 /* Grab bp right from our regs */
240 asm("movq %%rbp, %0" : "=r" (bp) :); 241 asm("movq %%rbp, %0" : "=r" (bp) : );
241 } else { 242 } else {
242 /* bp is the last reg pushed by switch_to */ 243 /* bp is the last reg pushed by switch_to */
243 bp = *(unsigned long *) task->thread.sp; 244 bp = *(unsigned long *) task->thread.sp;
@@ -339,9 +340,8 @@ static void
339show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 340show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
340 unsigned long *stack, unsigned long bp, char *log_lvl) 341 unsigned long *stack, unsigned long bp, char *log_lvl)
341{ 342{
342 printk("\nCall Trace:\n"); 343 printk("Call Trace:\n");
343 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); 344 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
344 printk("\n");
345} 345}
346 346
347void show_trace(struct task_struct *task, struct pt_regs *regs, 347void show_trace(struct task_struct *task, struct pt_regs *regs,
@@ -357,11 +357,15 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
357 unsigned long *stack; 357 unsigned long *stack;
358 int i; 358 int i;
359 const int cpu = smp_processor_id(); 359 const int cpu = smp_processor_id();
360 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr); 360 unsigned long *irqstack_end =
361 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE); 361 (unsigned long *) (cpu_pda(cpu)->irqstackptr);
362 unsigned long *irqstack =
363 (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
362 364
363 // debugging aid: "show_stack(NULL, NULL);" prints the 365 /*
364 // back trace for this cpu. 366 * debugging aid: "show_stack(NULL, NULL);" prints the
367 * back trace for this cpu.
368 */
365 369
366 if (sp == NULL) { 370 if (sp == NULL) {
367 if (task) 371 if (task)
@@ -386,6 +390,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
386 printk(" %016lx", *stack++); 390 printk(" %016lx", *stack++);
387 touch_nmi_watchdog(); 391 touch_nmi_watchdog();
388 } 392 }
393 printk("\n");
389 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 394 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
390} 395}
391 396
@@ -404,7 +409,7 @@ void dump_stack(void)
404 409
405#ifdef CONFIG_FRAME_POINTER 410#ifdef CONFIG_FRAME_POINTER
406 if (!bp) 411 if (!bp)
407 asm("movq %%rbp, %0" : "=r" (bp):); 412 asm("movq %%rbp, %0" : "=r" (bp) : );
408#endif 413#endif
409 414
410 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 415 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
@@ -414,7 +419,6 @@ void dump_stack(void)
414 init_utsname()->version); 419 init_utsname()->version);
415 show_trace(NULL, NULL, &stack, bp); 420 show_trace(NULL, NULL, &stack, bp);
416} 421}
417
418EXPORT_SYMBOL(dump_stack); 422EXPORT_SYMBOL(dump_stack);
419 423
420void show_registers(struct pt_regs *regs) 424void show_registers(struct pt_regs *regs)
@@ -443,7 +447,6 @@ void show_registers(struct pt_regs *regs)
443 printk("Stack: "); 447 printk("Stack: ");
444 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 448 show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
445 regs->bp, ""); 449 regs->bp, "");
446 printk("\n");
447 450
448 printk(KERN_EMERG "Code: "); 451 printk(KERN_EMERG "Code: ");
449 452
@@ -493,7 +496,7 @@ unsigned __kprobes long oops_begin(void)
493 raw_local_irq_save(flags); 496 raw_local_irq_save(flags);
494 cpu = smp_processor_id(); 497 cpu = smp_processor_id();
495 if (!__raw_spin_trylock(&die_lock)) { 498 if (!__raw_spin_trylock(&die_lock)) {
496 if (cpu == die_owner) 499 if (cpu == die_owner)
497 /* nested oops. should stop eventually */; 500 /* nested oops. should stop eventually */;
498 else 501 else
499 __raw_spin_lock(&die_lock); 502 __raw_spin_lock(&die_lock);
@@ -638,7 +641,7 @@ kernel_trap:
638} 641}
639 642
640#define DO_ERROR(trapnr, signr, str, name) \ 643#define DO_ERROR(trapnr, signr, str, name) \
641asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 644asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
642{ \ 645{ \
643 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ 646 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
644 == NOTIFY_STOP) \ 647 == NOTIFY_STOP) \
@@ -648,7 +651,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
648} 651}
649 652
650#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ 653#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
651asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ 654asmlinkage void do_##name(struct pt_regs *regs, long error_code) \
652{ \ 655{ \
653 siginfo_t info; \ 656 siginfo_t info; \
654 info.si_signo = signr; \ 657 info.si_signo = signr; \
@@ -683,7 +686,7 @@ asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
683 preempt_conditional_cli(regs); 686 preempt_conditional_cli(regs);
684} 687}
685 688
686asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) 689asmlinkage void do_double_fault(struct pt_regs *regs, long error_code)
687{ 690{
688 static const char str[] = "double fault"; 691 static const char str[] = "double fault";
689 struct task_struct *tsk = current; 692 struct task_struct *tsk = current;
@@ -778,9 +781,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
778} 781}
779 782
780static notrace __kprobes void 783static notrace __kprobes void
781unknown_nmi_error(unsigned char reason, struct pt_regs * regs) 784unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
782{ 785{
783 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 786 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
787 NOTIFY_STOP)
784 return; 788 return;
785 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n", 789 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
786 reason); 790 reason);
@@ -882,7 +886,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
882 else if (user_mode(eregs)) 886 else if (user_mode(eregs))
883 regs = task_pt_regs(current); 887 regs = task_pt_regs(current);
884 /* Exception from kernel and interrupts are enabled. Move to 888 /* Exception from kernel and interrupts are enabled. Move to
885 kernel process stack. */ 889 kernel process stack. */
886 else if (eregs->flags & X86_EFLAGS_IF) 890 else if (eregs->flags & X86_EFLAGS_IF)
887 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); 891 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
888 if (eregs != regs) 892 if (eregs != regs)
@@ -891,7 +895,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
891} 895}
892 896
893/* runs on IST stack. */ 897/* runs on IST stack. */
894asmlinkage void __kprobes do_debug(struct pt_regs * regs, 898asmlinkage void __kprobes do_debug(struct pt_regs *regs,
895 unsigned long error_code) 899 unsigned long error_code)
896{ 900{
897 struct task_struct *tsk = current; 901 struct task_struct *tsk = current;
@@ -1035,7 +1039,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
1035 1039
1036asmlinkage void bad_intr(void) 1040asmlinkage void bad_intr(void)
1037{ 1041{
1038 printk("bad interrupt"); 1042 printk("bad interrupt");
1039} 1043}
1040 1044
1041asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs) 1045asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
@@ -1047,7 +1051,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1047 1051
1048 conditional_sti(regs); 1052 conditional_sti(regs);
1049 if (!user_mode(regs) && 1053 if (!user_mode(regs) &&
1050 kernel_math_error(regs, "kernel simd math error", 19)) 1054 kernel_math_error(regs, "kernel simd math error", 19))
1051 return; 1055 return;
1052 1056
1053 /* 1057 /*
@@ -1092,7 +1096,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1092 force_sig_info(SIGFPE, &info, task); 1096 force_sig_info(SIGFPE, &info, task);
1093} 1097}
1094 1098
1095asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs) 1099asmlinkage void do_spurious_interrupt_bug(struct pt_regs *regs)
1096{ 1100{
1097} 1101}
1098 1102
@@ -1149,8 +1153,10 @@ void __init trap_init(void)
1149 set_intr_gate(0, &divide_error); 1153 set_intr_gate(0, &divide_error);
1150 set_intr_gate_ist(1, &debug, DEBUG_STACK); 1154 set_intr_gate_ist(1, &debug, DEBUG_STACK);
1151 set_intr_gate_ist(2, &nmi, NMI_STACK); 1155 set_intr_gate_ist(2, &nmi, NMI_STACK);
1152 set_system_gate_ist(3, &int3, DEBUG_STACK); /* int3 can be called from all */ 1156 /* int3 can be called from all */
1153 set_system_gate(4, &overflow); /* int4 can be called from all */ 1157 set_system_gate_ist(3, &int3, DEBUG_STACK);
1158 /* int4 can be called from all */
1159 set_system_gate(4, &overflow);
1154 set_intr_gate(5, &bounds); 1160 set_intr_gate(5, &bounds);
1155 set_intr_gate(6, &invalid_op); 1161 set_intr_gate(6, &invalid_op);
1156 set_intr_gate(7, &device_not_available); 1162 set_intr_gate(7, &device_not_available);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8f98e9de1b82..161bb850fc47 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,7 +104,7 @@ __setup("notsc", notsc_setup);
104/* 104/*
105 * Read TSC and the reference counters. Take care of SMI disturbance 105 * Read TSC and the reference counters. Take care of SMI disturbance
106 */ 106 */
107static u64 tsc_read_refs(u64 *pm, u64 *hpet) 107static u64 tsc_read_refs(u64 *p, int hpet)
108{ 108{
109 u64 t1, t2; 109 u64 t1, t2;
110 int i; 110 int i;
@@ -112,9 +112,9 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
112 for (i = 0; i < MAX_RETRIES; i++) { 112 for (i = 0; i < MAX_RETRIES; i++) {
113 t1 = get_cycles(); 113 t1 = get_cycles();
114 if (hpet) 114 if (hpet)
115 *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; 115 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
116 else 116 else
117 *pm = acpi_pm_read_early(); 117 *p = acpi_pm_read_early();
118 t2 = get_cycles(); 118 t2 = get_cycles();
119 if ((t2 - t1) < SMI_TRESHOLD) 119 if ((t2 - t1) < SMI_TRESHOLD)
120 return t2; 120 return t2;
@@ -123,13 +123,59 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
123} 123}
124 124
125/* 125/*
126 * Calculate the TSC frequency from HPET reference
127 */
128static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
129{
130 u64 tmp;
131
132 if (hpet2 < hpet1)
133 hpet2 += 0x100000000ULL;
134 hpet2 -= hpet1;
135 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
136 do_div(tmp, 1000000);
137 do_div(deltatsc, tmp);
138
139 return (unsigned long) deltatsc;
140}
141
142/*
143 * Calculate the TSC frequency from PMTimer reference
144 */
145static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
146{
147 u64 tmp;
148
149 if (!pm1 && !pm2)
150 return ULONG_MAX;
151
152 if (pm2 < pm1)
153 pm2 += (u64)ACPI_PM_OVRRUN;
154 pm2 -= pm1;
155 tmp = pm2 * 1000000000LL;
156 do_div(tmp, PMTMR_TICKS_PER_SEC);
157 do_div(deltatsc, tmp);
158
159 return (unsigned long) deltatsc;
160}
161
162#define CAL_MS 10
163#define CAL_LATCH (CLOCK_TICK_RATE / (1000 / CAL_MS))
164#define CAL_PIT_LOOPS 1000
165
166#define CAL2_MS 50
167#define CAL2_LATCH (CLOCK_TICK_RATE / (1000 / CAL2_MS))
168#define CAL2_PIT_LOOPS 5000
169
170
171/*
126 * Try to calibrate the TSC against the Programmable 172 * Try to calibrate the TSC against the Programmable
127 * Interrupt Timer and return the frequency of the TSC 173 * Interrupt Timer and return the frequency of the TSC
128 * in kHz. 174 * in kHz.
129 * 175 *
130 * Return ULONG_MAX on failure to calibrate. 176 * Return ULONG_MAX on failure to calibrate.
131 */ 177 */
132static unsigned long pit_calibrate_tsc(void) 178static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
133{ 179{
134 u64 tsc, t1, t2, delta; 180 u64 tsc, t1, t2, delta;
135 unsigned long tscmin, tscmax; 181 unsigned long tscmin, tscmax;
@@ -144,8 +190,8 @@ static unsigned long pit_calibrate_tsc(void)
144 * (LSB then MSB) to begin countdown. 190 * (LSB then MSB) to begin countdown.
145 */ 191 */
146 outb(0xb0, 0x43); 192 outb(0xb0, 0x43);
147 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); 193 outb(latch & 0xff, 0x42);
148 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); 194 outb(latch >> 8, 0x42);
149 195
150 tsc = t1 = t2 = get_cycles(); 196 tsc = t1 = t2 = get_cycles();
151 197
@@ -166,31 +212,154 @@ static unsigned long pit_calibrate_tsc(void)
166 /* 212 /*
167 * Sanity checks: 213 * Sanity checks:
168 * 214 *
169 * If we were not able to read the PIT more than 5000 215 * If we were not able to read the PIT more than loopmin
170 * times, then we have been hit by a massive SMI 216 * times, then we have been hit by a massive SMI
171 * 217 *
172 * If the maximum is 10 times larger than the minimum, 218 * If the maximum is 10 times larger than the minimum,
173 * then we got hit by an SMI as well. 219 * then we got hit by an SMI as well.
174 */ 220 */
175 if (pitcnt < 5000 || tscmax > 10 * tscmin) 221 if (pitcnt < loopmin || tscmax > 10 * tscmin)
176 return ULONG_MAX; 222 return ULONG_MAX;
177 223
178 /* Calculate the PIT value */ 224 /* Calculate the PIT value */
179 delta = t2 - t1; 225 delta = t2 - t1;
180 do_div(delta, 50); 226 do_div(delta, ms);
181 return delta; 227 return delta;
182} 228}
183 229
230/*
231 * This reads the current MSB of the PIT counter, and
232 * checks if we are running on sufficiently fast and
233 * non-virtualized hardware.
234 *
235 * Our expectations are:
236 *
237 * - the PIT is running at roughly 1.19MHz
238 *
239 * - each IO is going to take about 1us on real hardware,
240 * but we allow it to be much faster (by a factor of 10) or
241 * _slightly_ slower (ie we allow up to a 2us read+counter
242 * update - anything else implies a unacceptably slow CPU
243 * or PIT for the fast calibration to work.
244 *
245 * - with 256 PIT ticks to read the value, we have 214us to
246 * see the same MSB (and overhead like doing a single TSC
247 * read per MSB value etc).
248 *
249 * - We're doing 2 reads per loop (LSB, MSB), and we expect
250 * them each to take about a microsecond on real hardware.
251 * So we expect a count value of around 100. But we'll be
252 * generous, and accept anything over 50.
253 *
254 * - if the PIT is stuck, and we see *many* more reads, we
255 * return early (and the next caller of pit_expect_msb()
256 * then consider it a failure when they don't see the
257 * next expected value).
258 *
259 * These expectations mean that we know that we have seen the
260 * transition from one expected value to another with a fairly
261 * high accuracy, and we didn't miss any events. We can thus
262 * use the TSC value at the transitions to calculate a pretty
263 * good value for the TSC frequencty.
264 */
265static inline int pit_expect_msb(unsigned char val)
266{
267 int count = 0;
268
269 for (count = 0; count < 50000; count++) {
270 /* Ignore LSB */
271 inb(0x42);
272 if (inb(0x42) != val)
273 break;
274 }
275 return count > 50;
276}
277
278/*
279 * How many MSB values do we want to see? We aim for a
280 * 15ms calibration, which assuming a 2us counter read
281 * error should give us roughly 150 ppm precision for
282 * the calibration.
283 */
284#define QUICK_PIT_MS 15
285#define QUICK_PIT_ITERATIONS (QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
286
287static unsigned long quick_pit_calibrate(void)
288{
289 /* Set the Gate high, disable speaker */
290 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
291
292 /*
293 * Counter 2, mode 0 (one-shot), binary count
294 *
295 * NOTE! Mode 2 decrements by two (and then the
296 * output is flipped each time, giving the same
297 * final output frequency as a decrement-by-one),
298 * so mode 0 is much better when looking at the
299 * individual counts.
300 */
301 outb(0xb0, 0x43);
302
303 /* Start at 0xffff */
304 outb(0xff, 0x42);
305 outb(0xff, 0x42);
306
307 if (pit_expect_msb(0xff)) {
308 int i;
309 u64 t1, t2, delta;
310 unsigned char expect = 0xfe;
311
312 t1 = get_cycles();
313 for (i = 0; i < QUICK_PIT_ITERATIONS; i++, expect--) {
314 if (!pit_expect_msb(expect))
315 goto failed;
316 }
317 t2 = get_cycles();
318
319 /*
320 * Make sure we can rely on the second TSC timestamp:
321 */
322 if (!pit_expect_msb(expect))
323 goto failed;
324
325 /*
326 * Ok, if we get here, then we've seen the
327 * MSB of the PIT decrement QUICK_PIT_ITERATIONS
328 * times, and each MSB had many hits, so we never
329 * had any sudden jumps.
330 *
331 * As a result, we can depend on there not being
332 * any odd delays anywhere, and the TSC reads are
333 * reliable.
334 *
335 * kHz = ticks / time-in-seconds / 1000;
336 * kHz = (t2 - t1) / (QPI * 256 / PIT_TICK_RATE) / 1000
337 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (QPI * 256 * 1000)
338 */
339 delta = (t2 - t1)*PIT_TICK_RATE;
340 do_div(delta, QUICK_PIT_ITERATIONS*256*1000);
341 printk("Fast TSC calibration using PIT\n");
342 return delta;
343 }
344failed:
345 return 0;
346}
184 347
185/** 348/**
186 * native_calibrate_tsc - calibrate the tsc on boot 349 * native_calibrate_tsc - calibrate the tsc on boot
187 */ 350 */
188unsigned long native_calibrate_tsc(void) 351unsigned long native_calibrate_tsc(void)
189{ 352{
190 u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2; 353 u64 tsc1, tsc2, delta, ref1, ref2;
191 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; 354 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
192 unsigned long flags; 355 unsigned long flags, latch, ms, fast_calibrate;
193 int hpet = is_hpet_enabled(), i; 356 int hpet = is_hpet_enabled(), i, loopmin;
357
358 local_irq_save(flags);
359 fast_calibrate = quick_pit_calibrate();
360 local_irq_restore(flags);
361 if (fast_calibrate)
362 return fast_calibrate;
194 363
195 /* 364 /*
196 * Run 5 calibration loops to get the lowest frequency value 365 * Run 5 calibration loops to get the lowest frequency value
@@ -216,7 +385,13 @@ unsigned long native_calibrate_tsc(void)
216 * calibration delay loop as we have to wait for a certain 385 * calibration delay loop as we have to wait for a certain
217 * amount of time anyway. 386 * amount of time anyway.
218 */ 387 */
219 for (i = 0; i < 5; i++) { 388
389 /* Preset PIT loop values */
390 latch = CAL_LATCH;
391 ms = CAL_MS;
392 loopmin = CAL_PIT_LOOPS;
393
394 for (i = 0; i < 3; i++) {
220 unsigned long tsc_pit_khz; 395 unsigned long tsc_pit_khz;
221 396
222 /* 397 /*
@@ -226,16 +401,16 @@ unsigned long native_calibrate_tsc(void)
226 * read the end value. 401 * read the end value.
227 */ 402 */
228 local_irq_save(flags); 403 local_irq_save(flags);
229 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); 404 tsc1 = tsc_read_refs(&ref1, hpet);
230 tsc_pit_khz = pit_calibrate_tsc(); 405 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
231 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); 406 tsc2 = tsc_read_refs(&ref2, hpet);
232 local_irq_restore(flags); 407 local_irq_restore(flags);
233 408
234 /* Pick the lowest PIT TSC calibration so far */ 409 /* Pick the lowest PIT TSC calibration so far */
235 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); 410 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
236 411
237 /* hpet or pmtimer available ? */ 412 /* hpet or pmtimer available ? */
238 if (!hpet && !pm1 && !pm2) 413 if (!hpet && !ref1 && !ref2)
239 continue; 414 continue;
240 415
241 /* Check, whether the sampling was disturbed by an SMI */ 416 /* Check, whether the sampling was disturbed by an SMI */
@@ -243,23 +418,41 @@ unsigned long native_calibrate_tsc(void)
243 continue; 418 continue;
244 419
245 tsc2 = (tsc2 - tsc1) * 1000000LL; 420 tsc2 = (tsc2 - tsc1) * 1000000LL;
421 if (hpet)
422 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
423 else
424 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
246 425
247 if (hpet) { 426 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
248 if (hpet2 < hpet1) 427
249 hpet2 += 0x100000000ULL; 428 /* Check the reference deviation */
250 hpet2 -= hpet1; 429 delta = ((u64) tsc_pit_min) * 100;
251 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 430 do_div(delta, tsc_ref_min);
252 do_div(tsc1, 1000000); 431
253 } else { 432 /*
254 if (pm2 < pm1) 433 * If both calibration results are inside a 10% window
255 pm2 += (u64)ACPI_PM_OVRRUN; 434 * then we can be sure, that the calibration
256 pm2 -= pm1; 435 * succeeded. We break out of the loop right away. We
257 tsc1 = pm2 * 1000000000LL; 436 * use the reference value, as it is more precise.
258 do_div(tsc1, PMTMR_TICKS_PER_SEC); 437 */
438 if (delta >= 90 && delta <= 110) {
439 printk(KERN_INFO
440 "TSC: PIT calibration matches %s. %d loops\n",
441 hpet ? "HPET" : "PMTIMER", i + 1);
442 return tsc_ref_min;
259 } 443 }
260 444
261 do_div(tsc2, tsc1); 445 /*
262 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); 446 * Check whether PIT failed more than once. This
447 * happens in virtualized environments. We need to
448 * give the virtual PC a slightly longer timeframe for
449 * the HPET/PMTIMER to make the result precise.
450 */
451 if (i == 1 && tsc_pit_min == ULONG_MAX) {
452 latch = CAL2_LATCH;
453 ms = CAL2_MS;
454 loopmin = CAL2_PIT_LOOPS;
455 }
263 } 456 }
264 457
265 /* 458 /*
@@ -270,7 +463,7 @@ unsigned long native_calibrate_tsc(void)
270 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n"); 463 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
271 464
272 /* We don't have an alternative source, disable TSC */ 465 /* We don't have an alternative source, disable TSC */
273 if (!hpet && !pm1 && !pm2) { 466 if (!hpet && !ref1 && !ref2) {
274 printk("TSC: No reference (HPET/PMTIMER) available\n"); 467 printk("TSC: No reference (HPET/PMTIMER) available\n");
275 return 0; 468 return 0;
276 } 469 }
@@ -278,7 +471,7 @@ unsigned long native_calibrate_tsc(void)
278 /* The alternative source failed as well, disable TSC */ 471 /* The alternative source failed as well, disable TSC */
279 if (tsc_ref_min == ULONG_MAX) { 472 if (tsc_ref_min == ULONG_MAX) {
280 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration " 473 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
281 "failed due to SMI disturbance.\n"); 474 "failed.\n");
282 return 0; 475 return 0;
283 } 476 }
284 477
@@ -290,44 +483,25 @@ unsigned long native_calibrate_tsc(void)
290 } 483 }
291 484
292 /* We don't have an alternative source, use the PIT calibration value */ 485 /* We don't have an alternative source, use the PIT calibration value */
293 if (!hpet && !pm1 && !pm2) { 486 if (!hpet && !ref1 && !ref2) {
294 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 487 printk(KERN_INFO "TSC: Using PIT calibration value\n");
295 return tsc_pit_min; 488 return tsc_pit_min;
296 } 489 }
297 490
298 /* The alternative source failed, use the PIT calibration value */ 491 /* The alternative source failed, use the PIT calibration value */
299 if (tsc_ref_min == ULONG_MAX) { 492 if (tsc_ref_min == ULONG_MAX) {
300 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due " 493 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. "
301 "to SMI disturbance. Using PIT calibration\n"); 494 "Using PIT calibration\n");
302 return tsc_pit_min; 495 return tsc_pit_min;
303 } 496 }
304 497
305 /* Check the reference deviation */
306 delta = ((u64) tsc_pit_min) * 100;
307 do_div(delta, tsc_ref_min);
308
309 /*
310 * If both calibration results are inside a 5% window, the we
311 * use the lower frequency of those as it is probably the
312 * closest estimate.
313 */
314 if (delta >= 95 && delta <= 105) {
315 printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
316 hpet ? "HPET" : "PMTIMER");
317 printk(KERN_INFO "TSC: using %s calibration value\n",
318 tsc_pit_min <= tsc_ref_min ? "PIT" :
319 hpet ? "HPET" : "PMTIMER");
320 return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
321 }
322
323 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
324 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
325
326 /* 498 /*
327 * The calibration values differ too much. In doubt, we use 499 * The calibration values differ too much. In doubt, we use
328 * the PIT value as we know that there are PMTIMERs around 500 * the PIT value as we know that there are PMTIMERs around
329 * running at double speed. 501 * running at double speed. At least we let the user know:
330 */ 502 */
503 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
504 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
331 printk(KERN_INFO "TSC: Using PIT calibration value\n"); 505 printk(KERN_INFO "TSC: Using PIT calibration value\n");
332 return tsc_pit_min; 506 return tsc_pit_min;
333} 507}
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 594ef47f0a63..61a97e616f70 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -25,45 +25,31 @@
25#include <asm/visws/cobalt.h> 25#include <asm/visws/cobalt.h>
26#include <asm/visws/piix4.h> 26#include <asm/visws/piix4.h>
27#include <asm/arch_hooks.h> 27#include <asm/arch_hooks.h>
28#include <asm/io_apic.h>
28#include <asm/fixmap.h> 29#include <asm/fixmap.h>
29#include <asm/reboot.h> 30#include <asm/reboot.h>
30#include <asm/setup.h> 31#include <asm/setup.h>
31#include <asm/e820.h> 32#include <asm/e820.h>
32#include <asm/smp.h>
33#include <asm/io.h> 33#include <asm/io.h>
34 34
35#include <mach_ipi.h> 35#include <mach_ipi.h>
36 36
37#include "mach_apic.h" 37#include "mach_apic.h"
38 38
39#include <linux/init.h>
40#include <linux/smp.h>
41
42#include <linux/kernel_stat.h> 39#include <linux/kernel_stat.h>
43#include <linux/interrupt.h>
44#include <linux/init.h>
45 40
46#include <asm/io.h>
47#include <asm/apic.h>
48#include <asm/i8259.h> 41#include <asm/i8259.h>
49#include <asm/irq_vectors.h> 42#include <asm/irq_vectors.h>
50#include <asm/visws/cobalt.h>
51#include <asm/visws/lithium.h> 43#include <asm/visws/lithium.h>
52#include <asm/visws/piix4.h>
53 44
54#include <linux/sched.h> 45#include <linux/sched.h>
55#include <linux/kernel.h> 46#include <linux/kernel.h>
56#include <linux/init.h>
57#include <linux/pci.h> 47#include <linux/pci.h>
58#include <linux/pci_ids.h> 48#include <linux/pci_ids.h>
59 49
60extern int no_broadcast; 50extern int no_broadcast;
61 51
62#include <asm/io.h>
63#include <asm/apic.h> 52#include <asm/apic.h>
64#include <asm/arch_hooks.h>
65#include <asm/visws/cobalt.h>
66#include <asm/visws/lithium.h>
67 53
68char visws_board_type = -1; 54char visws_board_type = -1;
69char visws_board_rev = -1; 55char visws_board_rev = -1;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 38f566fa27d2..4eeb5cf9720d 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -46,6 +46,7 @@
46#include <asm/io.h> 46#include <asm/io.h>
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/irq.h> 48#include <asm/irq.h>
49#include <asm/syscalls.h>
49 50
50/* 51/*
51 * Known problems: 52 * Known problems:
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index edfb09f30479..8c9ad02af5a2 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -393,13 +393,13 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
393} 393}
394#endif 394#endif
395 395
396static void vmi_allocate_pte(struct mm_struct *mm, u32 pfn) 396static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
397{ 397{
398 vmi_set_page_type(pfn, VMI_PAGE_L1); 398 vmi_set_page_type(pfn, VMI_PAGE_L1);
399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 399 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
400} 400}
401 401
402static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn) 402static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
403{ 403{
404 /* 404 /*
405 * This call comes in very early, before mem_map is setup. 405 * This call comes in very early, before mem_map is setup.
@@ -410,20 +410,20 @@ static void vmi_allocate_pmd(struct mm_struct *mm, u32 pfn)
410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); 410 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
411} 411}
412 412
413static void vmi_allocate_pmd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) 413static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
414{ 414{
415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); 415 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE);
416 vmi_check_page_type(clonepfn, VMI_PAGE_L2); 416 vmi_check_page_type(clonepfn, VMI_PAGE_L2);
417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); 417 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
418} 418}
419 419
420static void vmi_release_pte(u32 pfn) 420static void vmi_release_pte(unsigned long pfn)
421{ 421{
422 vmi_ops.release_page(pfn, VMI_PAGE_L1); 422 vmi_ops.release_page(pfn, VMI_PAGE_L1);
423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 423 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
424} 424}
425 425
426static void vmi_release_pmd(u32 pfn) 426static void vmi_release_pmd(unsigned long pfn)
427{ 427{
428 vmi_ops.release_page(pfn, VMI_PAGE_L2); 428 vmi_ops.release_page(pfn, VMI_PAGE_L2);
429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 429 vmi_set_page_type(pfn, VMI_PAGE_NORMAL);
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index 01b868ba82f8..321cf720dbb6 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -16,37 +16,46 @@ static void __rdmsr_on_cpu(void *info)
16 rdmsr(rv->msr_no, rv->l, rv->h); 16 rdmsr(rv->msr_no, rv->l, rv->h);
17} 17}
18 18
19static void __rdmsr_safe_on_cpu(void *info) 19static void __wrmsr_on_cpu(void *info)
20{ 20{
21 struct msr_info *rv = info; 21 struct msr_info *rv = info;
22 22
23 rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); 23 wrmsr(rv->msr_no, rv->l, rv->h);
24} 24}
25 25
26static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) 26int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
27{ 27{
28 int err = 0; 28 int err;
29 struct msr_info rv; 29 struct msr_info rv;
30 30
31 rv.msr_no = msr_no; 31 rv.msr_no = msr_no;
32 if (safe) { 32 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
33 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
34 &rv, 1);
35 err = err ? err : rv.err;
36 } else {
37 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
38 }
39 *l = rv.l; 33 *l = rv.l;
40 *h = rv.h; 34 *h = rv.h;
41 35
42 return err; 36 return err;
43} 37}
44 38
45static void __wrmsr_on_cpu(void *info) 39int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
40{
41 int err;
42 struct msr_info rv;
43
44 rv.msr_no = msr_no;
45 rv.l = l;
46 rv.h = h;
47 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
48
49 return err;
50}
51
52/* These "safe" variants are slower and should be used when the target MSR
53 may not actually exist. */
54static void __rdmsr_safe_on_cpu(void *info)
46{ 55{
47 struct msr_info *rv = info; 56 struct msr_info *rv = info;
48 57
49 wrmsr(rv->msr_no, rv->l, rv->h); 58 rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
50} 59}
51 60
52static void __wrmsr_safe_on_cpu(void *info) 61static void __wrmsr_safe_on_cpu(void *info)
@@ -56,45 +65,30 @@ static void __wrmsr_safe_on_cpu(void *info)
56 rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); 65 rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
57} 66}
58 67
59static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) 68int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
60{ 69{
61 int err = 0; 70 int err;
62 struct msr_info rv; 71 struct msr_info rv;
63 72
64 rv.msr_no = msr_no; 73 rv.msr_no = msr_no;
65 rv.l = l; 74 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
66 rv.h = h; 75 *l = rv.l;
67 if (safe) { 76 *h = rv.h;
68 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
69 &rv, 1);
70 err = err ? err : rv.err;
71 } else {
72 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
73 }
74
75 return err;
76}
77 77
78int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 78 return err ? err : rv.err;
79{
80 return _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
81} 79}
82 80
83int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
84{
85 return _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
86}
87
88/* These "safe" variants are slower and should be used when the target MSR
89 may not actually exist. */
90int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 81int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
91{ 82{
92 return _wrmsr_on_cpu(cpu, msr_no, l, h, 1); 83 int err;
93} 84 struct msr_info rv;
94 85
95int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 86 rv.msr_no = msr_no;
96{ 87 rv.l = l;
97 return _rdmsr_on_cpu(cpu, msr_no, l, h, 1); 88 rv.h = h;
89 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
90
91 return err ? err : rv.err;
98} 92}
99 93
100EXPORT_SYMBOL(rdmsr_on_cpu); 94EXPORT_SYMBOL(rdmsr_on_cpu);
diff --git a/arch/x86/lib/string_32.c b/arch/x86/lib/string_32.c
index 94972e7c094d..82004d2bf05e 100644
--- a/arch/x86/lib/string_32.c
+++ b/arch/x86/lib/string_32.c
@@ -22,7 +22,7 @@ char *strcpy(char *dest, const char *src)
22 "testb %%al,%%al\n\t" 22 "testb %%al,%%al\n\t"
23 "jne 1b" 23 "jne 1b"
24 : "=&S" (d0), "=&D" (d1), "=&a" (d2) 24 : "=&S" (d0), "=&D" (d1), "=&a" (d2)
25 :"0" (src), "1" (dest) : "memory"); 25 : "0" (src), "1" (dest) : "memory");
26 return dest; 26 return dest;
27} 27}
28EXPORT_SYMBOL(strcpy); 28EXPORT_SYMBOL(strcpy);
@@ -42,7 +42,7 @@ char *strncpy(char *dest, const char *src, size_t count)
42 "stosb\n" 42 "stosb\n"
43 "2:" 43 "2:"
44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3) 44 : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
45 :"0" (src), "1" (dest), "2" (count) : "memory"); 45 : "0" (src), "1" (dest), "2" (count) : "memory");
46 return dest; 46 return dest;
47} 47}
48EXPORT_SYMBOL(strncpy); 48EXPORT_SYMBOL(strncpy);
@@ -60,7 +60,7 @@ char *strcat(char *dest, const char *src)
60 "testb %%al,%%al\n\t" 60 "testb %%al,%%al\n\t"
61 "jne 1b" 61 "jne 1b"
62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) 62 : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu): "memory"); 63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory");
64 return dest; 64 return dest;
65} 65}
66EXPORT_SYMBOL(strcat); 66EXPORT_SYMBOL(strcat);
@@ -105,9 +105,9 @@ int strcmp(const char *cs, const char *ct)
105 "2:\tsbbl %%eax,%%eax\n\t" 105 "2:\tsbbl %%eax,%%eax\n\t"
106 "orb $1,%%al\n" 106 "orb $1,%%al\n"
107 "3:" 107 "3:"
108 :"=a" (res), "=&S" (d0), "=&D" (d1) 108 : "=a" (res), "=&S" (d0), "=&D" (d1)
109 :"1" (cs), "2" (ct) 109 : "1" (cs), "2" (ct)
110 :"memory"); 110 : "memory");
111 return res; 111 return res;
112} 112}
113EXPORT_SYMBOL(strcmp); 113EXPORT_SYMBOL(strcmp);
@@ -130,9 +130,9 @@ int strncmp(const char *cs, const char *ct, size_t count)
130 "3:\tsbbl %%eax,%%eax\n\t" 130 "3:\tsbbl %%eax,%%eax\n\t"
131 "orb $1,%%al\n" 131 "orb $1,%%al\n"
132 "4:" 132 "4:"
133 :"=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2) 133 : "=a" (res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
134 :"1" (cs), "2" (ct), "3" (count) 134 : "1" (cs), "2" (ct), "3" (count)
135 :"memory"); 135 : "memory");
136 return res; 136 return res;
137} 137}
138EXPORT_SYMBOL(strncmp); 138EXPORT_SYMBOL(strncmp);
@@ -152,9 +152,9 @@ char *strchr(const char *s, int c)
152 "movl $1,%1\n" 152 "movl $1,%1\n"
153 "2:\tmovl %1,%0\n\t" 153 "2:\tmovl %1,%0\n\t"
154 "decl %0" 154 "decl %0"
155 :"=a" (res), "=&S" (d0) 155 : "=a" (res), "=&S" (d0)
156 :"1" (s), "0" (c) 156 : "1" (s), "0" (c)
157 :"memory"); 157 : "memory");
158 return res; 158 return res;
159} 159}
160EXPORT_SYMBOL(strchr); 160EXPORT_SYMBOL(strchr);
@@ -169,9 +169,9 @@ size_t strlen(const char *s)
169 "scasb\n\t" 169 "scasb\n\t"
170 "notl %0\n\t" 170 "notl %0\n\t"
171 "decl %0" 171 "decl %0"
172 :"=c" (res), "=&D" (d0) 172 : "=c" (res), "=&D" (d0)
173 :"1" (s), "a" (0), "0" (0xffffffffu) 173 : "1" (s), "a" (0), "0" (0xffffffffu)
174 :"memory"); 174 : "memory");
175 return res; 175 return res;
176} 176}
177EXPORT_SYMBOL(strlen); 177EXPORT_SYMBOL(strlen);
@@ -189,9 +189,9 @@ void *memchr(const void *cs, int c, size_t count)
189 "je 1f\n\t" 189 "je 1f\n\t"
190 "movl $1,%0\n" 190 "movl $1,%0\n"
191 "1:\tdecl %0" 191 "1:\tdecl %0"
192 :"=D" (res), "=&c" (d0) 192 : "=D" (res), "=&c" (d0)
193 :"a" (c), "0" (cs), "1" (count) 193 : "a" (c), "0" (cs), "1" (count)
194 :"memory"); 194 : "memory");
195 return res; 195 return res;
196} 196}
197EXPORT_SYMBOL(memchr); 197EXPORT_SYMBOL(memchr);
@@ -228,9 +228,9 @@ size_t strnlen(const char *s, size_t count)
228 "cmpl $-1,%1\n\t" 228 "cmpl $-1,%1\n\t"
229 "jne 1b\n" 229 "jne 1b\n"
230 "3:\tsubl %2,%0" 230 "3:\tsubl %2,%0"
231 :"=a" (res), "=&d" (d0) 231 : "=a" (res), "=&d" (d0)
232 :"c" (s), "1" (count) 232 : "c" (s), "1" (count)
233 :"memory"); 233 : "memory");
234 return res; 234 return res;
235} 235}
236EXPORT_SYMBOL(strnlen); 236EXPORT_SYMBOL(strnlen);
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 42e8a50303f3..8e2d55f754bf 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -23,9 +23,9 @@ __asm__ __volatile__(
23 "jne 1b\n\t" 23 "jne 1b\n\t"
24 "xorl %%eax,%%eax\n\t" 24 "xorl %%eax,%%eax\n\t"
25 "2:" 25 "2:"
26 :"=a" (__res), "=&c" (d0), "=&S" (d1) 26 : "=a" (__res), "=&c" (d0), "=&S" (d1)
27 :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) 27 : "0" (0), "1" (0xffffffff), "2" (cs), "g" (ct)
28 :"dx", "di"); 28 : "dx", "di");
29return __res; 29return __res;
30} 30}
31 31
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
index 3d317836be9e..3f2cf11f201a 100644
--- a/arch/x86/mach-default/setup.c
+++ b/arch/x86/mach-default/setup.c
@@ -10,13 +10,15 @@
10#include <asm/e820.h> 10#include <asm/e820.h>
11#include <asm/setup.h> 11#include <asm/setup.h>
12 12
13#include <mach_ipi.h>
14
13#ifdef CONFIG_HOTPLUG_CPU 15#ifdef CONFIG_HOTPLUG_CPU
14#define DEFAULT_SEND_IPI (1) 16#define DEFAULT_SEND_IPI (1)
15#else 17#else
16#define DEFAULT_SEND_IPI (0) 18#define DEFAULT_SEND_IPI (0)
17#endif 19#endif
18 20
19int no_broadcast=DEFAULT_SEND_IPI; 21int no_broadcast = DEFAULT_SEND_IPI;
20 22
21/** 23/**
22 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors 24 * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 62fa440678d8..847c164725f4 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -328,7 +328,7 @@ void __init initmem_init(unsigned long start_pfn,
328 328
329 get_memcfg_numa(); 329 get_memcfg_numa();
330 330
331 kva_pages = round_up(calculate_numa_remap_pages(), PTRS_PER_PTE); 331 kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
332 332
333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); 333 kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
334 do { 334 do {
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a20d1fa64b4e..e7277cbcfb40 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -148,8 +148,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
148 * we have now. "break" is either changing perms, levels or 148 * we have now. "break" is either changing perms, levels or
149 * address space marker. 149 * address space marker.
150 */ 150 */
151 prot = pgprot_val(new_prot) & ~(PTE_PFN_MASK); 151 prot = pgprot_val(new_prot) & PTE_FLAGS_MASK;
152 cur = pgprot_val(st->current_prot) & ~(PTE_PFN_MASK); 152 cur = pgprot_val(st->current_prot) & PTE_FLAGS_MASK;
153 153
154 if (!st->level) { 154 if (!st->level) {
155 /* First entry */ 155 /* First entry */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 455f3fe67b42..8f92cac4e6db 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -35,6 +35,7 @@
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include <asm/proto.h> 36#include <asm/proto.h>
37#include <asm-generic/sections.h> 37#include <asm-generic/sections.h>
38#include <asm/traps.h>
38 39
39/* 40/*
40 * Page fault error code bits 41 * Page fault error code bits
@@ -357,8 +358,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address)
357 return 0; 358 return 0;
358} 359}
359 360
360void do_invalid_op(struct pt_regs *, unsigned long);
361
362static int is_f00f_bug(struct pt_regs *regs, unsigned long address) 361static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
363{ 362{
364#ifdef CONFIG_X86_F00F_BUG 363#ifdef CONFIG_X86_F00F_BUG
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 60ec1d08ff24..6b9a9358b330 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -47,6 +47,7 @@
47#include <asm/paravirt.h> 47#include <asm/paravirt.h>
48#include <asm/setup.h> 48#include <asm/setup.h>
49#include <asm/cacheflush.h> 49#include <asm/cacheflush.h>
50#include <asm/smp.h>
50 51
51unsigned int __VMALLOC_RESERVE = 128 << 20; 52unsigned int __VMALLOC_RESERVE = 128 << 20;
52 53
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d3746efb060d..770536ebf7e9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -225,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
225void __init cleanup_highmap(void) 225void __init cleanup_highmap(void)
226{ 226{
227 unsigned long vaddr = __START_KERNEL_map; 227 unsigned long vaddr = __START_KERNEL_map;
228 unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; 228 unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
229 pmd_t *pmd = level2_kernel_pgt; 229 pmd_t *pmd = level2_kernel_pgt;
230 pmd_t *last_pmd = pmd + PTRS_PER_PMD; 230 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
231 231
@@ -451,14 +451,14 @@ static void __init find_early_table_space(unsigned long end)
451 unsigned long puds, pmds, ptes, tables, start; 451 unsigned long puds, pmds, ptes, tables, start;
452 452
453 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 453 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
454 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); 454 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
455 if (direct_gbpages) { 455 if (direct_gbpages) {
456 unsigned long extra; 456 unsigned long extra;
457 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); 457 extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
458 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; 458 pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
459 } else 459 } else
460 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 460 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
461 tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); 461 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
462 462
463 if (cpu_has_pse) { 463 if (cpu_has_pse) {
464 unsigned long extra; 464 unsigned long extra;
@@ -466,7 +466,7 @@ static void __init find_early_table_space(unsigned long end)
466 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 466 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
467 } else 467 } else
468 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 468 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
469 tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); 469 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
470 470
471 /* 471 /*
472 * RED-PEN putting page tables only on node 0 could 472 * RED-PEN putting page tables only on node 0 could
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index d4b6e6a29ae3..cac6da54203b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -421,7 +421,7 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
421 return; 421 return;
422} 422}
423 423
424int __initdata early_ioremap_debug; 424static int __initdata early_ioremap_debug;
425 425
426static int __init early_ioremap_debug_setup(char *str) 426static int __init early_ioremap_debug_setup(char *str)
427{ 427{
@@ -547,7 +547,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
547} 547}
548 548
549 549
550int __initdata early_ioremap_nested; 550static int __initdata early_ioremap_nested;
551 551
552static int __init check_early_ioremap_leak(void) 552static int __init check_early_ioremap_leak(void)
553{ 553{
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a4dd793d6003..cebcbf152d46 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -79,7 +79,7 @@ static int __init allocate_cachealigned_memnodemap(void)
79 return 0; 79 return 0;
80 80
81 addr = 0x8000; 81 addr = 0x8000;
82 nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 82 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT, 83 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
84 nodemap_size, L1_CACHE_BYTES); 84 nodemap_size, L1_CACHE_BYTES);
85 if (nodemap_addr == -1UL) { 85 if (nodemap_addr == -1UL) {
@@ -176,10 +176,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size; 176 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
177 unsigned long bootmap_start, nodedata_phys; 177 unsigned long bootmap_start, nodedata_phys;
178 void *bootmap; 178 void *bootmap;
179 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); 179 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
180 int nid; 180 int nid;
181 181
182 start = round_up(start, ZONE_ALIGN); 182 start = roundup(start, ZONE_ALIGN);
183 183
184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 184 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
185 start, end); 185 start, end);
@@ -210,9 +210,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); 210 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
211 nid = phys_to_nid(nodedata_phys); 211 nid = phys_to_nid(nodedata_phys);
212 if (nid == nodeid) 212 if (nid == nodeid)
213 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 213 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
214 else 214 else
215 bootmap_start = round_up(start, PAGE_SIZE); 215 bootmap_start = roundup(start, PAGE_SIZE);
216 /* 216 /*
217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like 217 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
218 * to use that to align to PAGE_SIZE 218 * to use that to align to PAGE_SIZE
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 43e2f8483e4f..898fad617abe 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -84,7 +84,7 @@ static inline unsigned long highmap_start_pfn(void)
84 84
85static inline unsigned long highmap_end_pfn(void) 85static inline unsigned long highmap_end_pfn(void)
86{ 86{
87 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; 87 return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
88} 88}
89 89
90#endif 90#endif
@@ -906,11 +906,13 @@ int set_memory_ro(unsigned long addr, int numpages)
906{ 906{
907 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); 907 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
908} 908}
909EXPORT_SYMBOL_GPL(set_memory_ro);
909 910
910int set_memory_rw(unsigned long addr, int numpages) 911int set_memory_rw(unsigned long addr, int numpages)
911{ 912{
912 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); 913 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
913} 914}
915EXPORT_SYMBOL_GPL(set_memory_rw);
914 916
915int set_memory_np(unsigned long addr, int numpages) 917int set_memory_np(unsigned long addr, int numpages)
916{ 918{
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d50302774fe2..86f2ffc43c3d 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -63,10 +63,8 @@ static inline void pgd_list_del(pgd_t *pgd)
63#define UNSHARED_PTRS_PER_PGD \ 63#define UNSHARED_PTRS_PER_PGD \
64 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 64 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
65 65
66static void pgd_ctor(void *p) 66static void pgd_ctor(pgd_t *pgd)
67{ 67{
68 pgd_t *pgd = p;
69
70 /* If the pgd points to a shared pagetable level (either the 68 /* If the pgd points to a shared pagetable level (either the
71 ptes in non-PAE, or shared PMD in PAE), then just copy the 69 ptes in non-PAE, or shared PMD in PAE), then just copy the
72 references from swapper_pg_dir. */ 70 references from swapper_pg_dir. */
@@ -87,7 +85,7 @@ static void pgd_ctor(void *p)
87 pgd_list_add(pgd); 85 pgd_list_add(pgd);
88} 86}
89 87
90static void pgd_dtor(void *pgd) 88static void pgd_dtor(pgd_t *pgd)
91{ 89{
92 unsigned long flags; /* can be called from interrupt context */ 90 unsigned long flags; /* can be called from interrupt context */
93 91
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index cab0abbd1ebe..0951db9ee519 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -123,7 +123,8 @@ static int __init parse_vmalloc(char *arg)
123 if (!arg) 123 if (!arg)
124 return -EINVAL; 124 return -EINVAL;
125 125
126 __VMALLOC_RESERVE = memparse(arg, &arg); 126 /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
127 __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
127 return 0; 128 return 0;
128} 129}
129early_param("vmalloc", parse_vmalloc); 130early_param("vmalloc", parse_vmalloc);
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 56b4757a1f47..43ac5af338d8 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -10,11 +10,12 @@
10 10
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/ptrace.h>
14#include <linux/nmi.h>
13#include <asm/msr.h> 15#include <asm/msr.h>
14#include <asm/ptrace.h>
15#include <asm/fixmap.h> 16#include <asm/fixmap.h>
16#include <asm/apic.h> 17#include <asm/apic.h>
17#include <asm/nmi.h> 18
18 19
19#include "op_x86_model.h" 20#include "op_x86_model.h"
20#include "op_counter.h" 21#include "op_counter.h"
@@ -40,7 +41,7 @@ static unsigned int num_controls = NUM_CONTROLS_NON_HT;
40static inline void setup_num_counters(void) 41static inline void setup_num_counters(void)
41{ 42{
42#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
43 if (smp_num_siblings == 2){ 44 if (smp_num_siblings == 2) {
44 num_counters = NUM_COUNTERS_HT2; 45 num_counters = NUM_COUNTERS_HT2;
45 num_controls = NUM_CONTROLS_HT2; 46 num_controls = NUM_CONTROLS_HT2;
46 } 47 }
@@ -86,7 +87,7 @@ struct p4_event_binding {
86#define CTR_FLAME_2 (1 << 6) 87#define CTR_FLAME_2 (1 << 6)
87#define CTR_IQ_5 (1 << 7) 88#define CTR_IQ_5 (1 << 7)
88 89
89static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = { 90static struct p4_counter_binding p4_counters[NUM_COUNTERS_NON_HT] = {
90 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 }, 91 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
91 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 }, 92 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
92 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 }, 93 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
@@ -97,32 +98,32 @@ static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = {
97 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 } 98 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
98}; 99};
99 100
100#define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT 101#define NUM_UNUSED_CCCRS (NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT)
101 102
102/* p4 event codes in libop/op_event.h are indices into this table. */ 103/* p4 event codes in libop/op_event.h are indices into this table. */
103 104
104static struct p4_event_binding p4_events[NUM_EVENTS] = { 105static struct p4_event_binding p4_events[NUM_EVENTS] = {
105 106
106 { /* BRANCH_RETIRED */ 107 { /* BRANCH_RETIRED */
107 0x05, 0x06, 108 0x05, 0x06,
108 { {CTR_IQ_4, MSR_P4_CRU_ESCR2}, 109 { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
109 {CTR_IQ_5, MSR_P4_CRU_ESCR3} } 110 {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
110 }, 111 },
111 112
112 { /* MISPRED_BRANCH_RETIRED */ 113 { /* MISPRED_BRANCH_RETIRED */
113 0x04, 0x03, 114 0x04, 0x03,
114 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 115 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
115 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 116 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
116 }, 117 },
117 118
118 { /* TC_DELIVER_MODE */ 119 { /* TC_DELIVER_MODE */
119 0x01, 0x01, 120 0x01, 0x01,
120 { { CTR_MS_0, MSR_P4_TC_ESCR0}, 121 { { CTR_MS_0, MSR_P4_TC_ESCR0},
121 { CTR_MS_2, MSR_P4_TC_ESCR1} } 122 { CTR_MS_2, MSR_P4_TC_ESCR1} }
122 }, 123 },
123 124
124 { /* BPU_FETCH_REQUEST */ 125 { /* BPU_FETCH_REQUEST */
125 0x00, 0x03, 126 0x00, 0x03,
126 { { CTR_BPU_0, MSR_P4_BPU_ESCR0}, 127 { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
127 { CTR_BPU_2, MSR_P4_BPU_ESCR1} } 128 { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
128 }, 129 },
@@ -146,7 +147,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
146 }, 147 },
147 148
148 { /* LOAD_PORT_REPLAY */ 149 { /* LOAD_PORT_REPLAY */
149 0x02, 0x04, 150 0x02, 0x04,
150 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0}, 151 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
151 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} } 152 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
152 }, 153 },
@@ -170,43 +171,43 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
170 }, 171 },
171 172
172 { /* BSQ_CACHE_REFERENCE */ 173 { /* BSQ_CACHE_REFERENCE */
173 0x07, 0x0c, 174 0x07, 0x0c,
174 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 175 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
175 { CTR_BPU_2, MSR_P4_BSU_ESCR1} } 176 { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
176 }, 177 },
177 178
178 { /* IOQ_ALLOCATION */ 179 { /* IOQ_ALLOCATION */
179 0x06, 0x03, 180 0x06, 0x03,
180 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 181 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
181 { 0, 0 } } 182 { 0, 0 } }
182 }, 183 },
183 184
184 { /* IOQ_ACTIVE_ENTRIES */ 185 { /* IOQ_ACTIVE_ENTRIES */
185 0x06, 0x1a, 186 0x06, 0x1a,
186 { { CTR_BPU_2, MSR_P4_FSB_ESCR1}, 187 { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
187 { 0, 0 } } 188 { 0, 0 } }
188 }, 189 },
189 190
190 { /* FSB_DATA_ACTIVITY */ 191 { /* FSB_DATA_ACTIVITY */
191 0x06, 0x17, 192 0x06, 0x17,
192 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 193 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
193 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 194 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
194 }, 195 },
195 196
196 { /* BSQ_ALLOCATION */ 197 { /* BSQ_ALLOCATION */
197 0x07, 0x05, 198 0x07, 0x05,
198 { { CTR_BPU_0, MSR_P4_BSU_ESCR0}, 199 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
199 { 0, 0 } } 200 { 0, 0 } }
200 }, 201 },
201 202
202 { /* BSQ_ACTIVE_ENTRIES */ 203 { /* BSQ_ACTIVE_ENTRIES */
203 0x07, 0x06, 204 0x07, 0x06,
204 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */}, 205 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
205 { 0, 0 } } 206 { 0, 0 } }
206 }, 207 },
207 208
208 { /* X87_ASSIST */ 209 { /* X87_ASSIST */
209 0x05, 0x03, 210 0x05, 0x03,
210 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 211 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
211 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 212 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
212 }, 213 },
@@ -216,21 +217,21 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
216 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 217 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
217 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 218 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
218 }, 219 },
219 220
220 { /* PACKED_SP_UOP */ 221 { /* PACKED_SP_UOP */
221 0x01, 0x08, 222 0x01, 0x08,
222 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 223 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
223 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 224 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
224 }, 225 },
225 226
226 { /* PACKED_DP_UOP */ 227 { /* PACKED_DP_UOP */
227 0x01, 0x0c, 228 0x01, 0x0c,
228 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 229 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
229 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 230 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
230 }, 231 },
231 232
232 { /* SCALAR_SP_UOP */ 233 { /* SCALAR_SP_UOP */
233 0x01, 0x0a, 234 0x01, 0x0a,
234 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 235 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
235 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 236 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
236 }, 237 },
@@ -242,31 +243,31 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
242 }, 243 },
243 244
244 { /* 64BIT_MMX_UOP */ 245 { /* 64BIT_MMX_UOP */
245 0x01, 0x02, 246 0x01, 0x02,
246 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 247 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
247 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 248 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
248 }, 249 },
249 250
250 { /* 128BIT_MMX_UOP */ 251 { /* 128BIT_MMX_UOP */
251 0x01, 0x1a, 252 0x01, 0x1a,
252 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 253 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
253 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 254 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
254 }, 255 },
255 256
256 { /* X87_FP_UOP */ 257 { /* X87_FP_UOP */
257 0x01, 0x04, 258 0x01, 0x04,
258 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 259 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
259 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 260 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
260 }, 261 },
261 262
262 { /* X87_SIMD_MOVES_UOP */ 263 { /* X87_SIMD_MOVES_UOP */
263 0x01, 0x2e, 264 0x01, 0x2e,
264 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0}, 265 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
265 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} } 266 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
266 }, 267 },
267 268
268 { /* MACHINE_CLEAR */ 269 { /* MACHINE_CLEAR */
269 0x05, 0x02, 270 0x05, 0x02,
270 { { CTR_IQ_4, MSR_P4_CRU_ESCR2}, 271 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
271 { CTR_IQ_5, MSR_P4_CRU_ESCR3} } 272 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
272 }, 273 },
@@ -276,9 +277,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
276 { { CTR_BPU_0, MSR_P4_FSB_ESCR0}, 277 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
277 { CTR_BPU_2, MSR_P4_FSB_ESCR1} } 278 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
278 }, 279 },
279 280
280 { /* TC_MS_XFER */ 281 { /* TC_MS_XFER */
281 0x00, 0x05, 282 0x00, 0x05,
282 { { CTR_MS_0, MSR_P4_MS_ESCR0}, 283 { { CTR_MS_0, MSR_P4_MS_ESCR0},
283 { CTR_MS_2, MSR_P4_MS_ESCR1} } 284 { CTR_MS_2, MSR_P4_MS_ESCR1} }
284 }, 285 },
@@ -308,7 +309,7 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
308 }, 309 },
309 310
310 { /* INSTR_RETIRED */ 311 { /* INSTR_RETIRED */
311 0x04, 0x02, 312 0x04, 0x02,
312 { { CTR_IQ_4, MSR_P4_CRU_ESCR0}, 313 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
313 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 314 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
314 }, 315 },
@@ -319,14 +320,14 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
319 { CTR_IQ_5, MSR_P4_CRU_ESCR1} } 320 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
320 }, 321 },
321 322
322 { /* UOP_TYPE */ 323 { /* UOP_TYPE */
323 0x02, 0x02, 324 0x02, 0x02,
324 { { CTR_IQ_4, MSR_P4_RAT_ESCR0}, 325 { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
325 { CTR_IQ_5, MSR_P4_RAT_ESCR1} } 326 { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
326 }, 327 },
327 328
328 { /* RETIRED_MISPRED_BRANCH_TYPE */ 329 { /* RETIRED_MISPRED_BRANCH_TYPE */
329 0x02, 0x05, 330 0x02, 0x05,
330 { { CTR_MS_0, MSR_P4_TBPU_ESCR0}, 331 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
331 { CTR_MS_2, MSR_P4_TBPU_ESCR1} } 332 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
332 }, 333 },
@@ -349,8 +350,8 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
349#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) 350#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
350#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) 351#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
351#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) 352#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
352#define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 353#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
353#define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0) 354#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
354 355
355#define CCCR_RESERVED_BITS 0x38030FFF 356#define CCCR_RESERVED_BITS 0x38030FFF
356#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) 357#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
@@ -360,15 +361,15 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
360#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) 361#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
361#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) 362#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
362#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) 363#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
363#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 364#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
364#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0) 365#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
365#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) 366#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
366#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) 367#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
367 368
368#define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0) 369#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
369#define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0) 370#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
370#define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0) 371#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
371#define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0) 372#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
372#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000)) 373#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
373 374
374 375
@@ -380,7 +381,7 @@ static unsigned int get_stagger(void)
380#ifdef CONFIG_SMP 381#ifdef CONFIG_SMP
381 int cpu = smp_processor_id(); 382 int cpu = smp_processor_id();
382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); 383 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
383#endif 384#endif
384 return 0; 385 return 0;
385} 386}
386 387
@@ -395,25 +396,23 @@ static unsigned long reset_value[NUM_COUNTERS_NON_HT];
395 396
396static void p4_fill_in_addresses(struct op_msrs * const msrs) 397static void p4_fill_in_addresses(struct op_msrs * const msrs)
397{ 398{
398 unsigned int i; 399 unsigned int i;
399 unsigned int addr, cccraddr, stag; 400 unsigned int addr, cccraddr, stag;
400 401
401 setup_num_counters(); 402 setup_num_counters();
402 stag = get_stagger(); 403 stag = get_stagger();
403 404
404 /* initialize some registers */ 405 /* initialize some registers */
405 for (i = 0; i < num_counters; ++i) { 406 for (i = 0; i < num_counters; ++i)
406 msrs->counters[i].addr = 0; 407 msrs->counters[i].addr = 0;
407 } 408 for (i = 0; i < num_controls; ++i)
408 for (i = 0; i < num_controls; ++i) {
409 msrs->controls[i].addr = 0; 409 msrs->controls[i].addr = 0;
410 } 410
411
412 /* the counter & cccr registers we pay attention to */ 411 /* the counter & cccr registers we pay attention to */
413 for (i = 0; i < num_counters; ++i) { 412 for (i = 0; i < num_counters; ++i) {
414 addr = p4_counters[VIRT_CTR(stag, i)].counter_address; 413 addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
415 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address; 414 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
416 if (reserve_perfctr_nmi(addr)){ 415 if (reserve_perfctr_nmi(addr)) {
417 msrs->counters[i].addr = addr; 416 msrs->counters[i].addr = addr;
418 msrs->controls[i].addr = cccraddr; 417 msrs->controls[i].addr = cccraddr;
419 } 418 }
@@ -447,22 +446,22 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs)
447 if (reserve_evntsel_nmi(addr)) 446 if (reserve_evntsel_nmi(addr))
448 msrs->controls[i].addr = addr; 447 msrs->controls[i].addr = addr;
449 } 448 }
450 449
451 for (addr = MSR_P4_MS_ESCR0 + stag; 450 for (addr = MSR_P4_MS_ESCR0 + stag;
452 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) { 451 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
453 if (reserve_evntsel_nmi(addr)) 452 if (reserve_evntsel_nmi(addr))
454 msrs->controls[i].addr = addr; 453 msrs->controls[i].addr = addr;
455 } 454 }
456 455
457 for (addr = MSR_P4_IX_ESCR0 + stag; 456 for (addr = MSR_P4_IX_ESCR0 + stag;
458 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) { 457 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
459 if (reserve_evntsel_nmi(addr)) 458 if (reserve_evntsel_nmi(addr))
460 msrs->controls[i].addr = addr; 459 msrs->controls[i].addr = addr;
461 } 460 }
462 461
463 /* there are 2 remaining non-contiguously located ESCRs */ 462 /* there are 2 remaining non-contiguously located ESCRs */
464 463
465 if (num_counters == NUM_COUNTERS_NON_HT) { 464 if (num_counters == NUM_COUNTERS_NON_HT) {
466 /* standard non-HT CPUs handle both remaining ESCRs*/ 465 /* standard non-HT CPUs handle both remaining ESCRs*/
467 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) 466 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
468 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; 467 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
@@ -498,20 +497,20 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
498 unsigned int stag; 497 unsigned int stag;
499 498
500 stag = get_stagger(); 499 stag = get_stagger();
501 500
502 /* convert from counter *number* to counter *bit* */ 501 /* convert from counter *number* to counter *bit* */
503 counter_bit = 1 << VIRT_CTR(stag, ctr); 502 counter_bit = 1 << VIRT_CTR(stag, ctr);
504 503
505 /* find our event binding structure. */ 504 /* find our event binding structure. */
506 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) { 505 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
507 printk(KERN_ERR 506 printk(KERN_ERR
508 "oprofile: P4 event code 0x%lx out of range\n", 507 "oprofile: P4 event code 0x%lx out of range\n",
509 counter_config[ctr].event); 508 counter_config[ctr].event);
510 return; 509 return;
511 } 510 }
512 511
513 ev = &(p4_events[counter_config[ctr].event - 1]); 512 ev = &(p4_events[counter_config[ctr].event - 1]);
514 513
515 for (i = 0; i < maxbind; i++) { 514 for (i = 0; i < maxbind; i++) {
516 if (ev->bindings[i].virt_counter & counter_bit) { 515 if (ev->bindings[i].virt_counter & counter_bit) {
517 516
@@ -526,25 +525,24 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
526 ESCR_SET_OS_1(escr, counter_config[ctr].kernel); 525 ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
527 } 526 }
528 ESCR_SET_EVENT_SELECT(escr, ev->event_select); 527 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
529 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); 528 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
530 ESCR_WRITE(escr, high, ev, i); 529 ESCR_WRITE(escr, high, ev, i);
531 530
532 /* modify CCCR */ 531 /* modify CCCR */
533 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); 532 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
534 CCCR_CLEAR(cccr); 533 CCCR_CLEAR(cccr);
535 CCCR_SET_REQUIRED_BITS(cccr); 534 CCCR_SET_REQUIRED_BITS(cccr);
536 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); 535 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
537 if (stag == 0) { 536 if (stag == 0)
538 CCCR_SET_PMI_OVF_0(cccr); 537 CCCR_SET_PMI_OVF_0(cccr);
539 } else { 538 else
540 CCCR_SET_PMI_OVF_1(cccr); 539 CCCR_SET_PMI_OVF_1(cccr);
541 }
542 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); 540 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
543 return; 541 return;
544 } 542 }
545 } 543 }
546 544
547 printk(KERN_ERR 545 printk(KERN_ERR
548 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n", 546 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
549 counter_config[ctr].event, stag, ctr); 547 counter_config[ctr].event, stag, ctr);
550} 548}
@@ -559,14 +557,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
559 stag = get_stagger(); 557 stag = get_stagger();
560 558
561 rdmsr(MSR_IA32_MISC_ENABLE, low, high); 559 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
562 if (! MISC_PMC_ENABLED_P(low)) { 560 if (!MISC_PMC_ENABLED_P(low)) {
563 printk(KERN_ERR "oprofile: P4 PMC not available\n"); 561 printk(KERN_ERR "oprofile: P4 PMC not available\n");
564 return; 562 return;
565 } 563 }
566 564
567 /* clear the cccrs we will use */ 565 /* clear the cccrs we will use */
568 for (i = 0 ; i < num_counters ; i++) { 566 for (i = 0 ; i < num_counters ; i++) {
569 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 567 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
570 continue; 568 continue;
571 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); 569 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
572 CCCR_CLEAR(low); 570 CCCR_CLEAR(low);
@@ -576,14 +574,14 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
576 574
577 /* clear all escrs (including those outside our concern) */ 575 /* clear all escrs (including those outside our concern) */
578 for (i = num_counters; i < num_controls; i++) { 576 for (i = num_counters; i < num_controls; i++) {
579 if (unlikely(!CTRL_IS_RESERVED(msrs,i))) 577 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
580 continue; 578 continue;
581 wrmsr(msrs->controls[i].addr, 0, 0); 579 wrmsr(msrs->controls[i].addr, 0, 0);
582 } 580 }
583 581
584 /* setup all counters */ 582 /* setup all counters */
585 for (i = 0 ; i < num_counters ; ++i) { 583 for (i = 0 ; i < num_counters ; ++i) {
586 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) { 584 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
587 reset_value[i] = counter_config[i].count; 585 reset_value[i] = counter_config[i].count;
588 pmc_setup_one_p4_counter(i); 586 pmc_setup_one_p4_counter(i);
589 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); 587 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
@@ -603,11 +601,11 @@ static int p4_check_ctrs(struct pt_regs * const regs,
603 stag = get_stagger(); 601 stag = get_stagger();
604 602
605 for (i = 0; i < num_counters; ++i) { 603 for (i = 0; i < num_counters; ++i) {
606 604
607 if (!reset_value[i]) 605 if (!reset_value[i])
608 continue; 606 continue;
609 607
610 /* 608 /*
611 * there is some eccentricity in the hardware which 609 * there is some eccentricity in the hardware which
612 * requires that we perform 2 extra corrections: 610 * requires that we perform 2 extra corrections:
613 * 611 *
@@ -616,24 +614,24 @@ static int p4_check_ctrs(struct pt_regs * const regs,
616 * 614 *
617 * - write the counter back twice to ensure it gets 615 * - write the counter back twice to ensure it gets
618 * updated properly. 616 * updated properly.
619 * 617 *
620 * the former seems to be related to extra NMIs happening 618 * the former seems to be related to extra NMIs happening
621 * during the current NMI; the latter is reported as errata 619 * during the current NMI; the latter is reported as errata
622 * N15 in intel doc 249199-029, pentium 4 specification 620 * N15 in intel doc 249199-029, pentium 4 specification
623 * update, though their suggested work-around does not 621 * update, though their suggested work-around does not
624 * appear to solve the problem. 622 * appear to solve the problem.
625 */ 623 */
626 624
627 real = VIRT_CTR(stag, i); 625 real = VIRT_CTR(stag, i);
628 626
629 CCCR_READ(low, high, real); 627 CCCR_READ(low, high, real);
630 CTR_READ(ctr, high, real); 628 CTR_READ(ctr, high, real);
631 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { 629 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
632 oprofile_add_sample(regs, i); 630 oprofile_add_sample(regs, i);
633 CTR_WRITE(reset_value[i], real); 631 CTR_WRITE(reset_value[i], real);
634 CCCR_CLEAR_OVF(low); 632 CCCR_CLEAR_OVF(low);
635 CCCR_WRITE(low, high, real); 633 CCCR_WRITE(low, high, real);
636 CTR_WRITE(reset_value[i], real); 634 CTR_WRITE(reset_value[i], real);
637 } 635 }
638 } 636 }
639 637
@@ -683,15 +681,16 @@ static void p4_shutdown(struct op_msrs const * const msrs)
683 int i; 681 int i;
684 682
685 for (i = 0 ; i < num_counters ; ++i) { 683 for (i = 0 ; i < num_counters ; ++i) {
686 if (CTR_IS_RESERVED(msrs,i)) 684 if (CTR_IS_RESERVED(msrs, i))
687 release_perfctr_nmi(msrs->counters[i].addr); 685 release_perfctr_nmi(msrs->counters[i].addr);
688 } 686 }
689 /* some of the control registers are specially reserved in 687 /*
688 * some of the control registers are specially reserved in
690 * conjunction with the counter registers (hence the starting offset). 689 * conjunction with the counter registers (hence the starting offset).
691 * This saves a few bits. 690 * This saves a few bits.
692 */ 691 */
693 for (i = num_counters ; i < num_controls ; ++i) { 692 for (i = num_counters ; i < num_controls ; ++i) {
694 if (CTRL_IS_RESERVED(msrs,i)) 693 if (CTRL_IS_RESERVED(msrs, i))
695 release_evntsel_nmi(msrs->controls[i].addr); 694 release_evntsel_nmi(msrs->controls[i].addr);
696 } 695 }
697} 696}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 6a0fca78c362..22e057665e55 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -580,7 +580,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
580 unsigned long action, void *hcpu) 580 unsigned long action, void *hcpu)
581{ 581{
582 int cpu = (long)hcpu; 582 int cpu = (long)hcpu;
583 switch(action) { 583 switch (action) {
584 case CPU_ONLINE: 584 case CPU_ONLINE:
585 case CPU_ONLINE_FROZEN: 585 case CPU_ONLINE_FROZEN:
586 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0); 586 smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 8e077185e185..006599db0dc7 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1043,35 +1043,44 @@ static void __init pcibios_fixup_irqs(void)
1043 if (io_apic_assign_pci_irqs) { 1043 if (io_apic_assign_pci_irqs) {
1044 int irq; 1044 int irq;
1045 1045
1046 if (pin) { 1046 if (!pin)
1047 /* 1047 continue;
1048 * interrupt pins are numbered starting 1048
1049 * from 1 1049 /*
1050 */ 1050 * interrupt pins are numbered starting from 1
1051 pin--; 1051 */
1052 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, 1052 pin--;
1053 PCI_SLOT(dev->devfn), pin); 1053 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
1054 /* 1054 PCI_SLOT(dev->devfn), pin);
1055 * Busses behind bridges are typically not listed in the MP-table. 1055 /*
1056 * In this case we have to look up the IRQ based on the parent bus, 1056 * Busses behind bridges are typically not listed in the
1057 * parent slot, and pin number. The SMP code detects such bridged 1057 * MP-table. In this case we have to look up the IRQ
1058 * busses itself so we should get into this branch reliably. 1058 * based on the parent bus, parent slot, and pin number.
1059 */ 1059 * The SMP code detects such bridged busses itself so we
1060 if (irq < 0 && dev->bus->parent) { /* go back to the bridge */ 1060 * should get into this branch reliably.
1061 struct pci_dev *bridge = dev->bus->self; 1061 */
1062 1062 if (irq < 0 && dev->bus->parent) {
1063 pin = (pin + PCI_SLOT(dev->devfn)) % 4; 1063 /* go back to the bridge */
1064 irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, 1064 struct pci_dev *bridge = dev->bus->self;
1065 PCI_SLOT(bridge->devfn), pin); 1065 int bus;
1066 if (irq >= 0) 1066
1067 dev_warn(&dev->dev, "using bridge %s INT %c to get IRQ %d\n", 1067 pin = (pin + PCI_SLOT(dev->devfn)) % 4;
1068 pci_name(bridge), 1068 bus = bridge->bus->number;
1069 'A' + pin, irq); 1069 irq = IO_APIC_get_PCI_irq_vector(bus,
1070 } 1070 PCI_SLOT(bridge->devfn), pin);
1071 if (irq >= 0) { 1071 if (irq >= 0)
1072 dev_info(&dev->dev, "PCI->APIC IRQ transform: INT %c -> IRQ %d\n", 'A' + pin, irq); 1072 dev_warn(&dev->dev,
1073 dev->irq = irq; 1073 "using bridge %s INT %c to "
1074 } 1074 "get IRQ %d\n",
1075 pci_name(bridge),
1076 'A' + pin, irq);
1077 }
1078 if (irq >= 0) {
1079 dev_info(&dev->dev,
1080 "PCI->APIC IRQ transform: INT %c "
1081 "-> IRQ %d\n",
1082 'A' + pin, irq);
1083 dev->irq = irq;
1075 } 1084 }
1076 } 1085 }
1077#endif 1086#endif
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 4fc7e872c85e..d1e9b53f9d33 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -1,5 +1,3 @@
1.text
2
3/* 1/*
4 * This may not use any stack, nor any variable that is not "NoSave": 2 * This may not use any stack, nor any variable that is not "NoSave":
5 * 3 *
@@ -12,17 +10,18 @@
12#include <asm/segment.h> 10#include <asm/segment.h>
13#include <asm/page.h> 11#include <asm/page.h>
14#include <asm/asm-offsets.h> 12#include <asm/asm-offsets.h>
13#include <asm/processor-flags.h>
15 14
16 .text 15.text
17 16
18ENTRY(swsusp_arch_suspend) 17ENTRY(swsusp_arch_suspend)
19
20 movl %esp, saved_context_esp 18 movl %esp, saved_context_esp
21 movl %ebx, saved_context_ebx 19 movl %ebx, saved_context_ebx
22 movl %ebp, saved_context_ebp 20 movl %ebp, saved_context_ebp
23 movl %esi, saved_context_esi 21 movl %esi, saved_context_esi
24 movl %edi, saved_context_edi 22 movl %edi, saved_context_edi
25 pushfl ; popl saved_context_eflags 23 pushfl
24 popl saved_context_eflags
26 25
27 call swsusp_save 26 call swsusp_save
28 ret 27 ret
@@ -59,7 +58,7 @@ done:
59 movl mmu_cr4_features, %ecx 58 movl mmu_cr4_features, %ecx
60 jecxz 1f # cr4 Pentium and higher, skip if zero 59 jecxz 1f # cr4 Pentium and higher, skip if zero
61 movl %ecx, %edx 60 movl %ecx, %edx
62 andl $~(1<<7), %edx; # PGE 61 andl $~(X86_CR4_PGE), %edx
63 movl %edx, %cr4; # turn off PGE 62 movl %edx, %cr4; # turn off PGE
641: 631:
65 movl %cr3, %eax; # flush TLB 64 movl %cr3, %eax; # flush TLB
@@ -74,7 +73,8 @@ done:
74 movl saved_context_esi, %esi 73 movl saved_context_esi, %esi
75 movl saved_context_edi, %edi 74 movl saved_context_edi, %edi
76 75
77 pushl saved_context_eflags ; popfl 76 pushl saved_context_eflags
77 popfl
78 78
79 xorl %eax, %eax 79 xorl %eax, %eax
80 80
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a4e201b47f64..7dcd321a0508 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -812,7 +812,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
812 812
813/* Early in boot, while setting up the initial pagetable, assume 813/* Early in boot, while setting up the initial pagetable, assume
814 everything is pinned. */ 814 everything is pinned. */
815static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) 815static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
816{ 816{
817#ifdef CONFIG_FLATMEM 817#ifdef CONFIG_FLATMEM
818 BUG_ON(mem_map); /* should only be used early */ 818 BUG_ON(mem_map); /* should only be used early */
@@ -822,7 +822,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn)
822 822
823/* Early release_pte assumes that all pts are pinned, since there's 823/* Early release_pte assumes that all pts are pinned, since there's
824 only init_mm and anything attached to that is pinned. */ 824 only init_mm and anything attached to that is pinned. */
825static void xen_release_pte_init(u32 pfn) 825static void xen_release_pte_init(unsigned long pfn)
826{ 826{
827 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 827 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
828} 828}
@@ -838,7 +838,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
838 838
839/* This needs to make sure the new pte page is pinned iff its being 839/* This needs to make sure the new pte page is pinned iff its being
840 attached to a pinned pagetable. */ 840 attached to a pinned pagetable. */
841static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) 841static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
842{ 842{
843 struct page *page = pfn_to_page(pfn); 843 struct page *page = pfn_to_page(pfn);
844 844
@@ -856,12 +856,12 @@ static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level)
856 } 856 }
857} 857}
858 858
859static void xen_alloc_pte(struct mm_struct *mm, u32 pfn) 859static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
860{ 860{
861 xen_alloc_ptpage(mm, pfn, PT_PTE); 861 xen_alloc_ptpage(mm, pfn, PT_PTE);
862} 862}
863 863
864static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn) 864static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
865{ 865{
866 xen_alloc_ptpage(mm, pfn, PT_PMD); 866 xen_alloc_ptpage(mm, pfn, PT_PMD);
867} 867}
@@ -909,7 +909,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
909} 909}
910 910
911/* This should never happen until we're OK to use struct page */ 911/* This should never happen until we're OK to use struct page */
912static void xen_release_ptpage(u32 pfn, unsigned level) 912static void xen_release_ptpage(unsigned long pfn, unsigned level)
913{ 913{
914 struct page *page = pfn_to_page(pfn); 914 struct page *page = pfn_to_page(pfn);
915 915
@@ -923,23 +923,23 @@ static void xen_release_ptpage(u32 pfn, unsigned level)
923 } 923 }
924} 924}
925 925
926static void xen_release_pte(u32 pfn) 926static void xen_release_pte(unsigned long pfn)
927{ 927{
928 xen_release_ptpage(pfn, PT_PTE); 928 xen_release_ptpage(pfn, PT_PTE);
929} 929}
930 930
931static void xen_release_pmd(u32 pfn) 931static void xen_release_pmd(unsigned long pfn)
932{ 932{
933 xen_release_ptpage(pfn, PT_PMD); 933 xen_release_ptpage(pfn, PT_PMD);
934} 934}
935 935
936#if PAGETABLE_LEVELS == 4 936#if PAGETABLE_LEVELS == 4
937static void xen_alloc_pud(struct mm_struct *mm, u32 pfn) 937static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
938{ 938{
939 xen_alloc_ptpage(mm, pfn, PT_PUD); 939 xen_alloc_ptpage(mm, pfn, PT_PUD);
940} 940}
941 941
942static void xen_release_pud(u32 pfn) 942static void xen_release_pud(unsigned long pfn)
943{ 943{
944 xen_release_ptpage(pfn, PT_PUD); 944 xen_release_ptpage(pfn, PT_PUD);
945} 945}
diff --git a/block/Makefile b/block/Makefile
index 208000b0750d..bfe73049f939 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -4,8 +4,8 @@
4 4
5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 5obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ 6 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
7 blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o \ 7 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
8 cmd-filter.o 8 ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
9 9
10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o 10obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
11obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o 11obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
diff --git a/block/as-iosched.c b/block/as-iosched.c
index cf4eb0eefbbf..71f0abb219ee 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -462,7 +462,7 @@ static void as_antic_stop(struct as_data *ad)
462 del_timer(&ad->antic_timer); 462 del_timer(&ad->antic_timer);
463 ad->antic_status = ANTIC_FINISHED; 463 ad->antic_status = ANTIC_FINISHED;
464 /* see as_work_handler */ 464 /* see as_work_handler */
465 kblockd_schedule_work(&ad->antic_work); 465 kblockd_schedule_work(ad->q, &ad->antic_work);
466 } 466 }
467} 467}
468 468
@@ -483,7 +483,7 @@ static void as_antic_timeout(unsigned long data)
483 aic = ad->io_context->aic; 483 aic = ad->io_context->aic;
484 484
485 ad->antic_status = ANTIC_FINISHED; 485 ad->antic_status = ANTIC_FINISHED;
486 kblockd_schedule_work(&ad->antic_work); 486 kblockd_schedule_work(q, &ad->antic_work);
487 487
488 if (aic->ttime_samples == 0) { 488 if (aic->ttime_samples == 0) {
489 /* process anticipated on has exited or timed out*/ 489 /* process anticipated on has exited or timed out*/
@@ -745,6 +745,14 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
745 */ 745 */
746static int as_can_anticipate(struct as_data *ad, struct request *rq) 746static int as_can_anticipate(struct as_data *ad, struct request *rq)
747{ 747{
748#if 0 /* disable for now, we need to check tag level as well */
749 /*
750 * SSD device without seek penalty, disable idling
751 */
752 if (blk_queue_nonrot(ad->q)) axman
753 return 0;
754#endif
755
748 if (!ad->io_context) 756 if (!ad->io_context)
749 /* 757 /*
750 * Last request submitted was a write 758 * Last request submitted was a write
@@ -844,7 +852,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
844 if (ad->changed_batch && ad->nr_dispatched == 1) { 852 if (ad->changed_batch && ad->nr_dispatched == 1) {
845 ad->current_batch_expires = jiffies + 853 ad->current_batch_expires = jiffies +
846 ad->batch_expire[ad->batch_data_dir]; 854 ad->batch_expire[ad->batch_data_dir];
847 kblockd_schedule_work(&ad->antic_work); 855 kblockd_schedule_work(q, &ad->antic_work);
848 ad->changed_batch = 0; 856 ad->changed_batch = 0;
849 857
850 if (ad->batch_data_dir == REQ_SYNC) 858 if (ad->batch_data_dir == REQ_SYNC)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index a09ead19f9c5..5c99ff8d2db8 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -293,7 +293,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
293 bio->bi_end_io = bio_end_empty_barrier; 293 bio->bi_end_io = bio_end_empty_barrier;
294 bio->bi_private = &wait; 294 bio->bi_private = &wait;
295 bio->bi_bdev = bdev; 295 bio->bi_bdev = bdev;
296 submit_bio(1 << BIO_RW_BARRIER, bio); 296 submit_bio(WRITE_BARRIER, bio);
297 297
298 wait_for_completion(&wait); 298 wait_for_completion(&wait);
299 299
@@ -315,3 +315,73 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
315 return ret; 315 return ret;
316} 316}
317EXPORT_SYMBOL(blkdev_issue_flush); 317EXPORT_SYMBOL(blkdev_issue_flush);
318
319static void blkdev_discard_end_io(struct bio *bio, int err)
320{
321 if (err) {
322 if (err == -EOPNOTSUPP)
323 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
324 clear_bit(BIO_UPTODATE, &bio->bi_flags);
325 }
326
327 bio_put(bio);
328}
329
330/**
331 * blkdev_issue_discard - queue a discard
332 * @bdev: blockdev to issue discard for
333 * @sector: start sector
334 * @nr_sects: number of sectors to discard
335 * @gfp_mask: memory allocation flags (for bio_alloc)
336 *
337 * Description:
338 * Issue a discard request for the sectors in question. Does not wait.
339 */
340int blkdev_issue_discard(struct block_device *bdev,
341 sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
342{
343 struct request_queue *q;
344 struct bio *bio;
345 int ret = 0;
346
347 if (bdev->bd_disk == NULL)
348 return -ENXIO;
349
350 q = bdev_get_queue(bdev);
351 if (!q)
352 return -ENXIO;
353
354 if (!q->prepare_discard_fn)
355 return -EOPNOTSUPP;
356
357 while (nr_sects && !ret) {
358 bio = bio_alloc(gfp_mask, 0);
359 if (!bio)
360 return -ENOMEM;
361
362 bio->bi_end_io = blkdev_discard_end_io;
363 bio->bi_bdev = bdev;
364
365 bio->bi_sector = sector;
366
367 if (nr_sects > q->max_hw_sectors) {
368 bio->bi_size = q->max_hw_sectors << 9;
369 nr_sects -= q->max_hw_sectors;
370 sector += q->max_hw_sectors;
371 } else {
372 bio->bi_size = nr_sects << 9;
373 nr_sects = 0;
374 }
375 bio_get(bio);
376 submit_bio(DISCARD_BARRIER, bio);
377
378 /* Check if it failed immediately */
379 if (bio_flagged(bio, BIO_EOPNOTSUPP))
380 ret = -EOPNOTSUPP;
381 else if (!bio_flagged(bio, BIO_UPTODATE))
382 ret = -EIO;
383 bio_put(bio);
384 }
385 return ret;
386}
387EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-core.c b/block/blk-core.c
index 2cba5ef97b2b..2d053b584410 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -26,8 +26,6 @@
26#include <linux/swap.h> 26#include <linux/swap.h>
27#include <linux/writeback.h> 27#include <linux/writeback.h>
28#include <linux/task_io_accounting_ops.h> 28#include <linux/task_io_accounting_ops.h>
29#include <linux/interrupt.h>
30#include <linux/cpu.h>
31#include <linux/blktrace_api.h> 29#include <linux/blktrace_api.h>
32#include <linux/fault-inject.h> 30#include <linux/fault-inject.h>
33 31
@@ -50,27 +48,26 @@ struct kmem_cache *blk_requestq_cachep;
50 */ 48 */
51static struct workqueue_struct *kblockd_workqueue; 49static struct workqueue_struct *kblockd_workqueue;
52 50
53static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
54
55static void drive_stat_acct(struct request *rq, int new_io) 51static void drive_stat_acct(struct request *rq, int new_io)
56{ 52{
57 struct hd_struct *part; 53 struct hd_struct *part;
58 int rw = rq_data_dir(rq); 54 int rw = rq_data_dir(rq);
55 int cpu;
59 56
60 if (!blk_fs_request(rq) || !rq->rq_disk) 57 if (!blk_fs_request(rq) || !rq->rq_disk)
61 return; 58 return;
62 59
63 part = get_part(rq->rq_disk, rq->sector); 60 cpu = part_stat_lock();
61 part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
62
64 if (!new_io) 63 if (!new_io)
65 __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); 64 part_stat_inc(cpu, part, merges[rw]);
66 else { 65 else {
67 disk_round_stats(rq->rq_disk); 66 part_round_stats(cpu, part);
68 rq->rq_disk->in_flight++; 67 part_inc_in_flight(part);
69 if (part) {
70 part_round_stats(part);
71 part->in_flight++;
72 }
73 } 68 }
69
70 part_stat_unlock();
74} 71}
75 72
76void blk_queue_congestion_threshold(struct request_queue *q) 73void blk_queue_congestion_threshold(struct request_queue *q)
@@ -113,7 +110,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
113 memset(rq, 0, sizeof(*rq)); 110 memset(rq, 0, sizeof(*rq));
114 111
115 INIT_LIST_HEAD(&rq->queuelist); 112 INIT_LIST_HEAD(&rq->queuelist);
116 INIT_LIST_HEAD(&rq->donelist); 113 INIT_LIST_HEAD(&rq->timeout_list);
114 rq->cpu = -1;
117 rq->q = q; 115 rq->q = q;
118 rq->sector = rq->hard_sector = (sector_t) -1; 116 rq->sector = rq->hard_sector = (sector_t) -1;
119 INIT_HLIST_NODE(&rq->hash); 117 INIT_HLIST_NODE(&rq->hash);
@@ -308,7 +306,7 @@ void blk_unplug_timeout(unsigned long data)
308 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, 306 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
309 q->rq.count[READ] + q->rq.count[WRITE]); 307 q->rq.count[READ] + q->rq.count[WRITE]);
310 308
311 kblockd_schedule_work(&q->unplug_work); 309 kblockd_schedule_work(q, &q->unplug_work);
312} 310}
313 311
314void blk_unplug(struct request_queue *q) 312void blk_unplug(struct request_queue *q)
@@ -325,6 +323,21 @@ void blk_unplug(struct request_queue *q)
325} 323}
326EXPORT_SYMBOL(blk_unplug); 324EXPORT_SYMBOL(blk_unplug);
327 325
326static void blk_invoke_request_fn(struct request_queue *q)
327{
328 /*
329 * one level of recursion is ok and is much faster than kicking
330 * the unplug handling
331 */
332 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
333 q->request_fn(q);
334 queue_flag_clear(QUEUE_FLAG_REENTER, q);
335 } else {
336 queue_flag_set(QUEUE_FLAG_PLUGGED, q);
337 kblockd_schedule_work(q, &q->unplug_work);
338 }
339}
340
328/** 341/**
329 * blk_start_queue - restart a previously stopped queue 342 * blk_start_queue - restart a previously stopped queue
330 * @q: The &struct request_queue in question 343 * @q: The &struct request_queue in question
@@ -339,18 +352,7 @@ void blk_start_queue(struct request_queue *q)
339 WARN_ON(!irqs_disabled()); 352 WARN_ON(!irqs_disabled());
340 353
341 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
342 355 blk_invoke_request_fn(q);
343 /*
344 * one level of recursion is ok and is much faster than kicking
345 * the unplug handling
346 */
347 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
348 q->request_fn(q);
349 queue_flag_clear(QUEUE_FLAG_REENTER, q);
350 } else {
351 blk_plug_device(q);
352 kblockd_schedule_work(&q->unplug_work);
353 }
354} 356}
355EXPORT_SYMBOL(blk_start_queue); 357EXPORT_SYMBOL(blk_start_queue);
356 358
@@ -408,15 +410,8 @@ void __blk_run_queue(struct request_queue *q)
408 * Only recurse once to avoid overrunning the stack, let the unplug 410 * Only recurse once to avoid overrunning the stack, let the unplug
409 * handling reinvoke the handler shortly if we already got there. 411 * handling reinvoke the handler shortly if we already got there.
410 */ 412 */
411 if (!elv_queue_empty(q)) { 413 if (!elv_queue_empty(q))
412 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 414 blk_invoke_request_fn(q);
413 q->request_fn(q);
414 queue_flag_clear(QUEUE_FLAG_REENTER, q);
415 } else {
416 blk_plug_device(q);
417 kblockd_schedule_work(&q->unplug_work);
418 }
419 }
420} 415}
421EXPORT_SYMBOL(__blk_run_queue); 416EXPORT_SYMBOL(__blk_run_queue);
422 417
@@ -441,6 +436,14 @@ void blk_put_queue(struct request_queue *q)
441 436
442void blk_cleanup_queue(struct request_queue *q) 437void blk_cleanup_queue(struct request_queue *q)
443{ 438{
439 /*
440 * We know we have process context here, so we can be a little
441 * cautious and ensure that pending block actions on this device
442 * are done before moving on. Going into this function, we should
443 * not have processes doing IO to this device.
444 */
445 blk_sync_queue(q);
446
444 mutex_lock(&q->sysfs_lock); 447 mutex_lock(&q->sysfs_lock);
445 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 448 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
446 mutex_unlock(&q->sysfs_lock); 449 mutex_unlock(&q->sysfs_lock);
@@ -496,6 +499,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
496 } 499 }
497 500
498 init_timer(&q->unplug_timer); 501 init_timer(&q->unplug_timer);
502 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
503 INIT_LIST_HEAD(&q->timeout_list);
499 504
500 kobject_init(&q->kobj, &blk_queue_ktype); 505 kobject_init(&q->kobj, &blk_queue_ktype);
501 506
@@ -531,7 +536,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
531 * request queue; this lock will be taken also from interrupt context, so irq 536 * request queue; this lock will be taken also from interrupt context, so irq
532 * disabling is needed for it. 537 * disabling is needed for it.
533 * 538 *
534 * Function returns a pointer to the initialized request queue, or NULL if 539 * Function returns a pointer to the initialized request queue, or %NULL if
535 * it didn't succeed. 540 * it didn't succeed.
536 * 541 *
537 * Note: 542 * Note:
@@ -569,7 +574,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
569 q->request_fn = rfn; 574 q->request_fn = rfn;
570 q->prep_rq_fn = NULL; 575 q->prep_rq_fn = NULL;
571 q->unplug_fn = generic_unplug_device; 576 q->unplug_fn = generic_unplug_device;
572 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); 577 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER |
578 1 << QUEUE_FLAG_STACKABLE);
573 q->queue_lock = lock; 579 q->queue_lock = lock;
574 580
575 blk_queue_segment_boundary(q, 0xffffffff); 581 blk_queue_segment_boundary(q, 0xffffffff);
@@ -624,10 +630,6 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
624 630
625 blk_rq_init(q, rq); 631 blk_rq_init(q, rq);
626 632
627 /*
628 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
629 * see bio.h and blkdev.h
630 */
631 rq->cmd_flags = rw | REQ_ALLOCED; 633 rq->cmd_flags = rw | REQ_ALLOCED;
632 634
633 if (priv) { 635 if (priv) {
@@ -888,9 +890,11 @@ EXPORT_SYMBOL(blk_get_request);
888 */ 890 */
889void blk_start_queueing(struct request_queue *q) 891void blk_start_queueing(struct request_queue *q)
890{ 892{
891 if (!blk_queue_plugged(q)) 893 if (!blk_queue_plugged(q)) {
894 if (unlikely(blk_queue_stopped(q)))
895 return;
892 q->request_fn(q); 896 q->request_fn(q);
893 else 897 } else
894 __generic_unplug_device(q); 898 __generic_unplug_device(q);
895} 899}
896EXPORT_SYMBOL(blk_start_queueing); 900EXPORT_SYMBOL(blk_start_queueing);
@@ -907,6 +911,8 @@ EXPORT_SYMBOL(blk_start_queueing);
907 */ 911 */
908void blk_requeue_request(struct request_queue *q, struct request *rq) 912void blk_requeue_request(struct request_queue *q, struct request *rq)
909{ 913{
914 blk_delete_timer(rq);
915 blk_clear_rq_complete(rq);
910 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 916 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
911 917
912 if (blk_rq_tagged(rq)) 918 if (blk_rq_tagged(rq))
@@ -917,7 +923,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
917EXPORT_SYMBOL(blk_requeue_request); 923EXPORT_SYMBOL(blk_requeue_request);
918 924
919/** 925/**
920 * blk_insert_request - insert a special request in to a request queue 926 * blk_insert_request - insert a special request into a request queue
921 * @q: request queue where request should be inserted 927 * @q: request queue where request should be inserted
922 * @rq: request to be inserted 928 * @rq: request to be inserted
923 * @at_head: insert request at head or tail of queue 929 * @at_head: insert request at head or tail of queue
@@ -927,8 +933,8 @@ EXPORT_SYMBOL(blk_requeue_request);
927 * Many block devices need to execute commands asynchronously, so they don't 933 * Many block devices need to execute commands asynchronously, so they don't
928 * block the whole kernel from preemption during request execution. This is 934 * block the whole kernel from preemption during request execution. This is
929 * accomplished normally by inserting aritficial requests tagged as 935 * accomplished normally by inserting aritficial requests tagged as
930 * REQ_SPECIAL in to the corresponding request queue, and letting them be 936 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
931 * scheduled for actual execution by the request queue. 937 * be scheduled for actual execution by the request queue.
932 * 938 *
933 * We have the option of inserting the head or the tail of the queue. 939 * We have the option of inserting the head or the tail of the queue.
934 * Typically we use the tail for new ioctls and so forth. We use the head 940 * Typically we use the tail for new ioctls and so forth. We use the head
@@ -982,8 +988,22 @@ static inline void add_request(struct request_queue *q, struct request *req)
982 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 988 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
983} 989}
984 990
985/* 991static void part_round_stats_single(int cpu, struct hd_struct *part,
986 * disk_round_stats() - Round off the performance stats on a struct 992 unsigned long now)
993{
994 if (now == part->stamp)
995 return;
996
997 if (part->in_flight) {
998 __part_stat_add(cpu, part, time_in_queue,
999 part->in_flight * (now - part->stamp));
1000 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1001 }
1002 part->stamp = now;
1003}
1004
1005/**
1006 * part_round_stats() - Round off the performance stats on a struct
987 * disk_stats. 1007 * disk_stats.
988 * 1008 *
989 * The average IO queue length and utilisation statistics are maintained 1009 * The average IO queue length and utilisation statistics are maintained
@@ -997,36 +1017,15 @@ static inline void add_request(struct request_queue *q, struct request *req)
997 * /proc/diskstats. This accounts immediately for all queue usage up to 1017 * /proc/diskstats. This accounts immediately for all queue usage up to
998 * the current jiffies and restarts the counters again. 1018 * the current jiffies and restarts the counters again.
999 */ 1019 */
1000void disk_round_stats(struct gendisk *disk) 1020void part_round_stats(int cpu, struct hd_struct *part)
1001{ 1021{
1002 unsigned long now = jiffies; 1022 unsigned long now = jiffies;
1003 1023
1004 if (now == disk->stamp) 1024 if (part->partno)
1005 return; 1025 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1006 1026 part_round_stats_single(cpu, part, now);
1007 if (disk->in_flight) {
1008 __disk_stat_add(disk, time_in_queue,
1009 disk->in_flight * (now - disk->stamp));
1010 __disk_stat_add(disk, io_ticks, (now - disk->stamp));
1011 }
1012 disk->stamp = now;
1013}
1014EXPORT_SYMBOL_GPL(disk_round_stats);
1015
1016void part_round_stats(struct hd_struct *part)
1017{
1018 unsigned long now = jiffies;
1019
1020 if (now == part->stamp)
1021 return;
1022
1023 if (part->in_flight) {
1024 __part_stat_add(part, time_in_queue,
1025 part->in_flight * (now - part->stamp));
1026 __part_stat_add(part, io_ticks, (now - part->stamp));
1027 }
1028 part->stamp = now;
1029} 1027}
1028EXPORT_SYMBOL_GPL(part_round_stats);
1030 1029
1031/* 1030/*
1032 * queue lock must be held 1031 * queue lock must be held
@@ -1070,6 +1069,7 @@ EXPORT_SYMBOL(blk_put_request);
1070 1069
1071void init_request_from_bio(struct request *req, struct bio *bio) 1070void init_request_from_bio(struct request *req, struct bio *bio)
1072{ 1071{
1072 req->cpu = bio->bi_comp_cpu;
1073 req->cmd_type = REQ_TYPE_FS; 1073 req->cmd_type = REQ_TYPE_FS;
1074 1074
1075 /* 1075 /*
@@ -1081,7 +1081,12 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1081 /* 1081 /*
1082 * REQ_BARRIER implies no merging, but lets make it explicit 1082 * REQ_BARRIER implies no merging, but lets make it explicit
1083 */ 1083 */
1084 if (unlikely(bio_barrier(bio))) 1084 if (unlikely(bio_discard(bio))) {
1085 req->cmd_flags |= REQ_DISCARD;
1086 if (bio_barrier(bio))
1087 req->cmd_flags |= REQ_SOFTBARRIER;
1088 req->q->prepare_discard_fn(req->q, req);
1089 } else if (unlikely(bio_barrier(bio)))
1085 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 1090 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1086 1091
1087 if (bio_sync(bio)) 1092 if (bio_sync(bio))
@@ -1099,7 +1104,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1099static int __make_request(struct request_queue *q, struct bio *bio) 1104static int __make_request(struct request_queue *q, struct bio *bio)
1100{ 1105{
1101 struct request *req; 1106 struct request *req;
1102 int el_ret, nr_sectors, barrier, err; 1107 int el_ret, nr_sectors, barrier, discard, err;
1103 const unsigned short prio = bio_prio(bio); 1108 const unsigned short prio = bio_prio(bio);
1104 const int sync = bio_sync(bio); 1109 const int sync = bio_sync(bio);
1105 int rw_flags; 1110 int rw_flags;
@@ -1114,7 +1119,14 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1114 blk_queue_bounce(q, &bio); 1119 blk_queue_bounce(q, &bio);
1115 1120
1116 barrier = bio_barrier(bio); 1121 barrier = bio_barrier(bio);
1117 if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { 1122 if (unlikely(barrier) && bio_has_data(bio) &&
1123 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1124 err = -EOPNOTSUPP;
1125 goto end_io;
1126 }
1127
1128 discard = bio_discard(bio);
1129 if (unlikely(discard) && !q->prepare_discard_fn) {
1118 err = -EOPNOTSUPP; 1130 err = -EOPNOTSUPP;
1119 goto end_io; 1131 goto end_io;
1120 } 1132 }
@@ -1138,6 +1150,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1138 req->biotail = bio; 1150 req->biotail = bio;
1139 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1151 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1140 req->ioprio = ioprio_best(req->ioprio, prio); 1152 req->ioprio = ioprio_best(req->ioprio, prio);
1153 if (!blk_rq_cpu_valid(req))
1154 req->cpu = bio->bi_comp_cpu;
1141 drive_stat_acct(req, 0); 1155 drive_stat_acct(req, 0);
1142 if (!attempt_back_merge(q, req)) 1156 if (!attempt_back_merge(q, req))
1143 elv_merged_request(q, req, el_ret); 1157 elv_merged_request(q, req, el_ret);
@@ -1165,6 +1179,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1165 req->sector = req->hard_sector = bio->bi_sector; 1179 req->sector = req->hard_sector = bio->bi_sector;
1166 req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1180 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1167 req->ioprio = ioprio_best(req->ioprio, prio); 1181 req->ioprio = ioprio_best(req->ioprio, prio);
1182 if (!blk_rq_cpu_valid(req))
1183 req->cpu = bio->bi_comp_cpu;
1168 drive_stat_acct(req, 0); 1184 drive_stat_acct(req, 0);
1169 if (!attempt_front_merge(q, req)) 1185 if (!attempt_front_merge(q, req))
1170 elv_merged_request(q, req, el_ret); 1186 elv_merged_request(q, req, el_ret);
@@ -1200,13 +1216,15 @@ get_rq:
1200 init_request_from_bio(req, bio); 1216 init_request_from_bio(req, bio);
1201 1217
1202 spin_lock_irq(q->queue_lock); 1218 spin_lock_irq(q->queue_lock);
1219 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
1220 bio_flagged(bio, BIO_CPU_AFFINE))
1221 req->cpu = blk_cpu_to_group(smp_processor_id());
1203 if (elv_queue_empty(q)) 1222 if (elv_queue_empty(q))
1204 blk_plug_device(q); 1223 blk_plug_device(q);
1205 add_request(q, req); 1224 add_request(q, req);
1206out: 1225out:
1207 if (sync) 1226 if (sync)
1208 __generic_unplug_device(q); 1227 __generic_unplug_device(q);
1209
1210 spin_unlock_irq(q->queue_lock); 1228 spin_unlock_irq(q->queue_lock);
1211 return 0; 1229 return 0;
1212 1230
@@ -1260,8 +1278,9 @@ __setup("fail_make_request=", setup_fail_make_request);
1260 1278
1261static int should_fail_request(struct bio *bio) 1279static int should_fail_request(struct bio *bio)
1262{ 1280{
1263 if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) || 1281 struct hd_struct *part = bio->bi_bdev->bd_part;
1264 (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail)) 1282
1283 if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
1265 return should_fail(&fail_make_request, bio->bi_size); 1284 return should_fail(&fail_make_request, bio->bi_size);
1266 1285
1267 return 0; 1286 return 0;
@@ -1314,7 +1333,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1314} 1333}
1315 1334
1316/** 1335/**
1317 * generic_make_request: hand a buffer to its device driver for I/O 1336 * generic_make_request - hand a buffer to its device driver for I/O
1318 * @bio: The bio describing the location in memory and on the device. 1337 * @bio: The bio describing the location in memory and on the device.
1319 * 1338 *
1320 * generic_make_request() is used to make I/O requests of block 1339 * generic_make_request() is used to make I/O requests of block
@@ -1409,7 +1428,8 @@ end_io:
1409 1428
1410 if (bio_check_eod(bio, nr_sectors)) 1429 if (bio_check_eod(bio, nr_sectors))
1411 goto end_io; 1430 goto end_io;
1412 if (bio_empty_barrier(bio) && !q->prepare_flush_fn) { 1431 if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
1432 (bio_discard(bio) && !q->prepare_discard_fn)) {
1413 err = -EOPNOTSUPP; 1433 err = -EOPNOTSUPP;
1414 goto end_io; 1434 goto end_io;
1415 } 1435 }
@@ -1471,13 +1491,13 @@ void generic_make_request(struct bio *bio)
1471EXPORT_SYMBOL(generic_make_request); 1491EXPORT_SYMBOL(generic_make_request);
1472 1492
1473/** 1493/**
1474 * submit_bio: submit a bio to the block device layer for I/O 1494 * submit_bio - submit a bio to the block device layer for I/O
1475 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 1495 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1476 * @bio: The &struct bio which describes the I/O 1496 * @bio: The &struct bio which describes the I/O
1477 * 1497 *
1478 * submit_bio() is very similar in purpose to generic_make_request(), and 1498 * submit_bio() is very similar in purpose to generic_make_request(), and
1479 * uses that function to do most of the work. Both are fairly rough 1499 * uses that function to do most of the work. Both are fairly rough
1480 * interfaces, @bio must be presetup and ready for I/O. 1500 * interfaces; @bio must be presetup and ready for I/O.
1481 * 1501 *
1482 */ 1502 */
1483void submit_bio(int rw, struct bio *bio) 1503void submit_bio(int rw, struct bio *bio)
@@ -1490,11 +1510,7 @@ void submit_bio(int rw, struct bio *bio)
1490 * If it's a regular read/write or a barrier with data attached, 1510 * If it's a regular read/write or a barrier with data attached,
1491 * go through the normal accounting stuff before submission. 1511 * go through the normal accounting stuff before submission.
1492 */ 1512 */
1493 if (!bio_empty_barrier(bio)) { 1513 if (bio_has_data(bio)) {
1494
1495 BIO_BUG_ON(!bio->bi_size);
1496 BIO_BUG_ON(!bio->bi_io_vec);
1497
1498 if (rw & WRITE) { 1514 if (rw & WRITE) {
1499 count_vm_events(PGPGOUT, count); 1515 count_vm_events(PGPGOUT, count);
1500 } else { 1516 } else {
@@ -1517,9 +1533,90 @@ void submit_bio(int rw, struct bio *bio)
1517EXPORT_SYMBOL(submit_bio); 1533EXPORT_SYMBOL(submit_bio);
1518 1534
1519/** 1535/**
1536 * blk_rq_check_limits - Helper function to check a request for the queue limit
1537 * @q: the queue
1538 * @rq: the request being checked
1539 *
1540 * Description:
1541 * @rq may have been made based on weaker limitations of upper-level queues
1542 * in request stacking drivers, and it may violate the limitation of @q.
1543 * Since the block layer and the underlying device driver trust @rq
1544 * after it is inserted to @q, it should be checked against @q before
1545 * the insertion using this generic function.
1546 *
1547 * This function should also be useful for request stacking drivers
1548 * in some cases below, so export this fuction.
1549 * Request stacking drivers like request-based dm may change the queue
1550 * limits while requests are in the queue (e.g. dm's table swapping).
1551 * Such request stacking drivers should check those requests agaist
1552 * the new queue limits again when they dispatch those requests,
1553 * although such checkings are also done against the old queue limits
1554 * when submitting requests.
1555 */
1556int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1557{
1558 if (rq->nr_sectors > q->max_sectors ||
1559 rq->data_len > q->max_hw_sectors << 9) {
1560 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1561 return -EIO;
1562 }
1563
1564 /*
1565 * queue's settings related to segment counting like q->bounce_pfn
1566 * may differ from that of other stacking queues.
1567 * Recalculate it to check the request correctly on this queue's
1568 * limitation.
1569 */
1570 blk_recalc_rq_segments(rq);
1571 if (rq->nr_phys_segments > q->max_phys_segments ||
1572 rq->nr_phys_segments > q->max_hw_segments) {
1573 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1574 return -EIO;
1575 }
1576
1577 return 0;
1578}
1579EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1580
1581/**
1582 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1583 * @q: the queue to submit the request
1584 * @rq: the request being queued
1585 */
1586int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1587{
1588 unsigned long flags;
1589
1590 if (blk_rq_check_limits(q, rq))
1591 return -EIO;
1592
1593#ifdef CONFIG_FAIL_MAKE_REQUEST
1594 if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
1595 should_fail(&fail_make_request, blk_rq_bytes(rq)))
1596 return -EIO;
1597#endif
1598
1599 spin_lock_irqsave(q->queue_lock, flags);
1600
1601 /*
1602 * Submitting request must be dequeued before calling this function
1603 * because it will be linked to another request_queue
1604 */
1605 BUG_ON(blk_queued_rq(rq));
1606
1607 drive_stat_acct(rq, 1);
1608 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1609
1610 spin_unlock_irqrestore(q->queue_lock, flags);
1611
1612 return 0;
1613}
1614EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1615
1616/**
1520 * __end_that_request_first - end I/O on a request 1617 * __end_that_request_first - end I/O on a request
1521 * @req: the request being processed 1618 * @req: the request being processed
1522 * @error: 0 for success, < 0 for error 1619 * @error: %0 for success, < %0 for error
1523 * @nr_bytes: number of bytes to complete 1620 * @nr_bytes: number of bytes to complete
1524 * 1621 *
1525 * Description: 1622 * Description:
@@ -1527,8 +1624,8 @@ EXPORT_SYMBOL(submit_bio);
1527 * for the next range of segments (if any) in the cluster. 1624 * for the next range of segments (if any) in the cluster.
1528 * 1625 *
1529 * Return: 1626 * Return:
1530 * 0 - we are done with this request, call end_that_request_last() 1627 * %0 - we are done with this request, call end_that_request_last()
1531 * 1 - still buffers pending for this request 1628 * %1 - still buffers pending for this request
1532 **/ 1629 **/
1533static int __end_that_request_first(struct request *req, int error, 1630static int __end_that_request_first(struct request *req, int error,
1534 int nr_bytes) 1631 int nr_bytes)
@@ -1539,7 +1636,7 @@ static int __end_that_request_first(struct request *req, int error,
1539 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); 1636 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1540 1637
1541 /* 1638 /*
1542 * for a REQ_BLOCK_PC request, we want to carry any eventual 1639 * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
1543 * sense key with us all the way through 1640 * sense key with us all the way through
1544 */ 1641 */
1545 if (!blk_pc_request(req)) 1642 if (!blk_pc_request(req))
@@ -1552,11 +1649,14 @@ static int __end_that_request_first(struct request *req, int error,
1552 } 1649 }
1553 1650
1554 if (blk_fs_request(req) && req->rq_disk) { 1651 if (blk_fs_request(req) && req->rq_disk) {
1555 struct hd_struct *part = get_part(req->rq_disk, req->sector);
1556 const int rw = rq_data_dir(req); 1652 const int rw = rq_data_dir(req);
1653 struct hd_struct *part;
1654 int cpu;
1557 1655
1558 all_stat_add(req->rq_disk, part, sectors[rw], 1656 cpu = part_stat_lock();
1559 nr_bytes >> 9, req->sector); 1657 part = disk_map_sector_rcu(req->rq_disk, req->sector);
1658 part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
1659 part_stat_unlock();
1560 } 1660 }
1561 1661
1562 total_bytes = bio_nbytes = 0; 1662 total_bytes = bio_nbytes = 0;
@@ -1641,88 +1741,14 @@ static int __end_that_request_first(struct request *req, int error,
1641} 1741}
1642 1742
1643/* 1743/*
1644 * splice the completion data to a local structure and hand off to
1645 * process_completion_queue() to complete the requests
1646 */
1647static void blk_done_softirq(struct softirq_action *h)
1648{
1649 struct list_head *cpu_list, local_list;
1650
1651 local_irq_disable();
1652 cpu_list = &__get_cpu_var(blk_cpu_done);
1653 list_replace_init(cpu_list, &local_list);
1654 local_irq_enable();
1655
1656 while (!list_empty(&local_list)) {
1657 struct request *rq;
1658
1659 rq = list_entry(local_list.next, struct request, donelist);
1660 list_del_init(&rq->donelist);
1661 rq->q->softirq_done_fn(rq);
1662 }
1663}
1664
1665static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1666 unsigned long action, void *hcpu)
1667{
1668 /*
1669 * If a CPU goes away, splice its entries to the current CPU
1670 * and trigger a run of the softirq
1671 */
1672 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1673 int cpu = (unsigned long) hcpu;
1674
1675 local_irq_disable();
1676 list_splice_init(&per_cpu(blk_cpu_done, cpu),
1677 &__get_cpu_var(blk_cpu_done));
1678 raise_softirq_irqoff(BLOCK_SOFTIRQ);
1679 local_irq_enable();
1680 }
1681
1682 return NOTIFY_OK;
1683}
1684
1685
1686static struct notifier_block blk_cpu_notifier __cpuinitdata = {
1687 .notifier_call = blk_cpu_notify,
1688};
1689
1690/**
1691 * blk_complete_request - end I/O on a request
1692 * @req: the request being processed
1693 *
1694 * Description:
1695 * Ends all I/O on a request. It does not handle partial completions,
1696 * unless the driver actually implements this in its completion callback
1697 * through requeueing. The actual completion happens out-of-order,
1698 * through a softirq handler. The user must have registered a completion
1699 * callback through blk_queue_softirq_done().
1700 **/
1701
1702void blk_complete_request(struct request *req)
1703{
1704 struct list_head *cpu_list;
1705 unsigned long flags;
1706
1707 BUG_ON(!req->q->softirq_done_fn);
1708
1709 local_irq_save(flags);
1710
1711 cpu_list = &__get_cpu_var(blk_cpu_done);
1712 list_add_tail(&req->donelist, cpu_list);
1713 raise_softirq_irqoff(BLOCK_SOFTIRQ);
1714
1715 local_irq_restore(flags);
1716}
1717EXPORT_SYMBOL(blk_complete_request);
1718
1719/*
1720 * queue lock must be held 1744 * queue lock must be held
1721 */ 1745 */
1722static void end_that_request_last(struct request *req, int error) 1746static void end_that_request_last(struct request *req, int error)
1723{ 1747{
1724 struct gendisk *disk = req->rq_disk; 1748 struct gendisk *disk = req->rq_disk;
1725 1749
1750 blk_delete_timer(req);
1751
1726 if (blk_rq_tagged(req)) 1752 if (blk_rq_tagged(req))
1727 blk_queue_end_tag(req->q, req); 1753 blk_queue_end_tag(req->q, req);
1728 1754
@@ -1740,16 +1766,18 @@ static void end_that_request_last(struct request *req, int error)
1740 if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { 1766 if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1741 unsigned long duration = jiffies - req->start_time; 1767 unsigned long duration = jiffies - req->start_time;
1742 const int rw = rq_data_dir(req); 1768 const int rw = rq_data_dir(req);
1743 struct hd_struct *part = get_part(disk, req->sector); 1769 struct hd_struct *part;
1744 1770 int cpu;
1745 __all_stat_inc(disk, part, ios[rw], req->sector); 1771
1746 __all_stat_add(disk, part, ticks[rw], duration, req->sector); 1772 cpu = part_stat_lock();
1747 disk_round_stats(disk); 1773 part = disk_map_sector_rcu(disk, req->sector);
1748 disk->in_flight--; 1774
1749 if (part) { 1775 part_stat_inc(cpu, part, ios[rw]);
1750 part_round_stats(part); 1776 part_stat_add(cpu, part, ticks[rw], duration);
1751 part->in_flight--; 1777 part_round_stats(cpu, part);
1752 } 1778 part_dec_in_flight(part);
1779
1780 part_stat_unlock();
1753 } 1781 }
1754 1782
1755 if (req->end_io) 1783 if (req->end_io)
@@ -1762,17 +1790,6 @@ static void end_that_request_last(struct request *req, int error)
1762 } 1790 }
1763} 1791}
1764 1792
1765static inline void __end_request(struct request *rq, int uptodate,
1766 unsigned int nr_bytes)
1767{
1768 int error = 0;
1769
1770 if (uptodate <= 0)
1771 error = uptodate ? uptodate : -EIO;
1772
1773 __blk_end_request(rq, error, nr_bytes);
1774}
1775
1776/** 1793/**
1777 * blk_rq_bytes - Returns bytes left to complete in the entire request 1794 * blk_rq_bytes - Returns bytes left to complete in the entire request
1778 * @rq: the request being processed 1795 * @rq: the request being processed
@@ -1803,74 +1820,57 @@ unsigned int blk_rq_cur_bytes(struct request *rq)
1803EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); 1820EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1804 1821
1805/** 1822/**
1806 * end_queued_request - end all I/O on a queued request
1807 * @rq: the request being processed
1808 * @uptodate: error value or 0/1 uptodate flag
1809 *
1810 * Description:
1811 * Ends all I/O on a request, and removes it from the block layer queues.
1812 * Not suitable for normal IO completion, unless the driver still has
1813 * the request attached to the block layer.
1814 *
1815 **/
1816void end_queued_request(struct request *rq, int uptodate)
1817{
1818 __end_request(rq, uptodate, blk_rq_bytes(rq));
1819}
1820EXPORT_SYMBOL(end_queued_request);
1821
1822/**
1823 * end_dequeued_request - end all I/O on a dequeued request
1824 * @rq: the request being processed
1825 * @uptodate: error value or 0/1 uptodate flag
1826 *
1827 * Description:
1828 * Ends all I/O on a request. The request must already have been
1829 * dequeued using blkdev_dequeue_request(), as is normally the case
1830 * for most drivers.
1831 *
1832 **/
1833void end_dequeued_request(struct request *rq, int uptodate)
1834{
1835 __end_request(rq, uptodate, blk_rq_bytes(rq));
1836}
1837EXPORT_SYMBOL(end_dequeued_request);
1838
1839
1840/**
1841 * end_request - end I/O on the current segment of the request 1823 * end_request - end I/O on the current segment of the request
1842 * @req: the request being processed 1824 * @req: the request being processed
1843 * @uptodate: error value or 0/1 uptodate flag 1825 * @uptodate: error value or %0/%1 uptodate flag
1844 * 1826 *
1845 * Description: 1827 * Description:
1846 * Ends I/O on the current segment of a request. If that is the only 1828 * Ends I/O on the current segment of a request. If that is the only
1847 * remaining segment, the request is also completed and freed. 1829 * remaining segment, the request is also completed and freed.
1848 * 1830 *
1849 * This is a remnant of how older block drivers handled IO completions. 1831 * This is a remnant of how older block drivers handled I/O completions.
1850 * Modern drivers typically end IO on the full request in one go, unless 1832 * Modern drivers typically end I/O on the full request in one go, unless
1851 * they have a residual value to account for. For that case this function 1833 * they have a residual value to account for. For that case this function
1852 * isn't really useful, unless the residual just happens to be the 1834 * isn't really useful, unless the residual just happens to be the
1853 * full current segment. In other words, don't use this function in new 1835 * full current segment. In other words, don't use this function in new
1854 * code. Either use end_request_completely(), or the 1836 * code. Use blk_end_request() or __blk_end_request() to end a request.
1855 * end_that_request_chunk() (along with end_that_request_last()) for
1856 * partial completions.
1857 *
1858 **/ 1837 **/
1859void end_request(struct request *req, int uptodate) 1838void end_request(struct request *req, int uptodate)
1860{ 1839{
1861 __end_request(req, uptodate, req->hard_cur_sectors << 9); 1840 int error = 0;
1841
1842 if (uptodate <= 0)
1843 error = uptodate ? uptodate : -EIO;
1844
1845 __blk_end_request(req, error, req->hard_cur_sectors << 9);
1862} 1846}
1863EXPORT_SYMBOL(end_request); 1847EXPORT_SYMBOL(end_request);
1864 1848
1849static int end_that_request_data(struct request *rq, int error,
1850 unsigned int nr_bytes, unsigned int bidi_bytes)
1851{
1852 if (rq->bio) {
1853 if (__end_that_request_first(rq, error, nr_bytes))
1854 return 1;
1855
1856 /* Bidi request must be completed as a whole */
1857 if (blk_bidi_rq(rq) &&
1858 __end_that_request_first(rq->next_rq, error, bidi_bytes))
1859 return 1;
1860 }
1861
1862 return 0;
1863}
1864
1865/** 1865/**
1866 * blk_end_io - Generic end_io function to complete a request. 1866 * blk_end_io - Generic end_io function to complete a request.
1867 * @rq: the request being processed 1867 * @rq: the request being processed
1868 * @error: 0 for success, < 0 for error 1868 * @error: %0 for success, < %0 for error
1869 * @nr_bytes: number of bytes to complete @rq 1869 * @nr_bytes: number of bytes to complete @rq
1870 * @bidi_bytes: number of bytes to complete @rq->next_rq 1870 * @bidi_bytes: number of bytes to complete @rq->next_rq
1871 * @drv_callback: function called between completion of bios in the request 1871 * @drv_callback: function called between completion of bios in the request
1872 * and completion of the request. 1872 * and completion of the request.
1873 * If the callback returns non 0, this helper returns without 1873 * If the callback returns non %0, this helper returns without
1874 * completion of the request. 1874 * completion of the request.
1875 * 1875 *
1876 * Description: 1876 * Description:
@@ -1878,8 +1878,8 @@ EXPORT_SYMBOL(end_request);
1878 * If @rq has leftover, sets it up for the next range of segments. 1878 * If @rq has leftover, sets it up for the next range of segments.
1879 * 1879 *
1880 * Return: 1880 * Return:
1881 * 0 - we are done with this request 1881 * %0 - we are done with this request
1882 * 1 - this request is not freed yet, it still has pending buffers. 1882 * %1 - this request is not freed yet, it still has pending buffers.
1883 **/ 1883 **/
1884static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 1884static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1885 unsigned int bidi_bytes, 1885 unsigned int bidi_bytes,
@@ -1888,15 +1888,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1888 struct request_queue *q = rq->q; 1888 struct request_queue *q = rq->q;
1889 unsigned long flags = 0UL; 1889 unsigned long flags = 0UL;
1890 1890
1891 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1891 if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
1892 if (__end_that_request_first(rq, error, nr_bytes)) 1892 return 1;
1893 return 1;
1894
1895 /* Bidi request must be completed as a whole */
1896 if (blk_bidi_rq(rq) &&
1897 __end_that_request_first(rq->next_rq, error, bidi_bytes))
1898 return 1;
1899 }
1900 1893
1901 /* Special feature for tricky drivers */ 1894 /* Special feature for tricky drivers */
1902 if (drv_callback && drv_callback(rq)) 1895 if (drv_callback && drv_callback(rq))
@@ -1914,7 +1907,7 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1914/** 1907/**
1915 * blk_end_request - Helper function for drivers to complete the request. 1908 * blk_end_request - Helper function for drivers to complete the request.
1916 * @rq: the request being processed 1909 * @rq: the request being processed
1917 * @error: 0 for success, < 0 for error 1910 * @error: %0 for success, < %0 for error
1918 * @nr_bytes: number of bytes to complete 1911 * @nr_bytes: number of bytes to complete
1919 * 1912 *
1920 * Description: 1913 * Description:
@@ -1922,8 +1915,8 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1922 * If @rq has leftover, sets it up for the next range of segments. 1915 * If @rq has leftover, sets it up for the next range of segments.
1923 * 1916 *
1924 * Return: 1917 * Return:
1925 * 0 - we are done with this request 1918 * %0 - we are done with this request
1926 * 1 - still buffers pending for this request 1919 * %1 - still buffers pending for this request
1927 **/ 1920 **/
1928int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1921int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1929{ 1922{
@@ -1934,22 +1927,20 @@ EXPORT_SYMBOL_GPL(blk_end_request);
1934/** 1927/**
1935 * __blk_end_request - Helper function for drivers to complete the request. 1928 * __blk_end_request - Helper function for drivers to complete the request.
1936 * @rq: the request being processed 1929 * @rq: the request being processed
1937 * @error: 0 for success, < 0 for error 1930 * @error: %0 for success, < %0 for error
1938 * @nr_bytes: number of bytes to complete 1931 * @nr_bytes: number of bytes to complete
1939 * 1932 *
1940 * Description: 1933 * Description:
1941 * Must be called with queue lock held unlike blk_end_request(). 1934 * Must be called with queue lock held unlike blk_end_request().
1942 * 1935 *
1943 * Return: 1936 * Return:
1944 * 0 - we are done with this request 1937 * %0 - we are done with this request
1945 * 1 - still buffers pending for this request 1938 * %1 - still buffers pending for this request
1946 **/ 1939 **/
1947int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1940int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1948{ 1941{
1949 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1942 if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
1950 if (__end_that_request_first(rq, error, nr_bytes)) 1943 return 1;
1951 return 1;
1952 }
1953 1944
1954 add_disk_randomness(rq->rq_disk); 1945 add_disk_randomness(rq->rq_disk);
1955 1946
@@ -1962,7 +1953,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
1962/** 1953/**
1963 * blk_end_bidi_request - Helper function for drivers to complete bidi request. 1954 * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1964 * @rq: the bidi request being processed 1955 * @rq: the bidi request being processed
1965 * @error: 0 for success, < 0 for error 1956 * @error: %0 for success, < %0 for error
1966 * @nr_bytes: number of bytes to complete @rq 1957 * @nr_bytes: number of bytes to complete @rq
1967 * @bidi_bytes: number of bytes to complete @rq->next_rq 1958 * @bidi_bytes: number of bytes to complete @rq->next_rq
1968 * 1959 *
@@ -1970,8 +1961,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
1970 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 1961 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1971 * 1962 *
1972 * Return: 1963 * Return:
1973 * 0 - we are done with this request 1964 * %0 - we are done with this request
1974 * 1 - still buffers pending for this request 1965 * %1 - still buffers pending for this request
1975 **/ 1966 **/
1976int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 1967int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1977 unsigned int bidi_bytes) 1968 unsigned int bidi_bytes)
@@ -1981,13 +1972,43 @@ int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1981EXPORT_SYMBOL_GPL(blk_end_bidi_request); 1972EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1982 1973
1983/** 1974/**
1975 * blk_update_request - Special helper function for request stacking drivers
1976 * @rq: the request being processed
1977 * @error: %0 for success, < %0 for error
1978 * @nr_bytes: number of bytes to complete @rq
1979 *
1980 * Description:
1981 * Ends I/O on a number of bytes attached to @rq, but doesn't complete
1982 * the request structure even if @rq doesn't have leftover.
1983 * If @rq has leftover, sets it up for the next range of segments.
1984 *
1985 * This special helper function is only for request stacking drivers
1986 * (e.g. request-based dm) so that they can handle partial completion.
1987 * Actual device drivers should use blk_end_request instead.
1988 */
1989void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
1990{
1991 if (!end_that_request_data(rq, error, nr_bytes, 0)) {
1992 /*
1993 * These members are not updated in end_that_request_data()
1994 * when all bios are completed.
1995 * Update them so that the request stacking driver can find
1996 * how many bytes remain in the request later.
1997 */
1998 rq->nr_sectors = rq->hard_nr_sectors = 0;
1999 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
2000 }
2001}
2002EXPORT_SYMBOL_GPL(blk_update_request);
2003
2004/**
1984 * blk_end_request_callback - Special helper function for tricky drivers 2005 * blk_end_request_callback - Special helper function for tricky drivers
1985 * @rq: the request being processed 2006 * @rq: the request being processed
1986 * @error: 0 for success, < 0 for error 2007 * @error: %0 for success, < %0 for error
1987 * @nr_bytes: number of bytes to complete 2008 * @nr_bytes: number of bytes to complete
1988 * @drv_callback: function called between completion of bios in the request 2009 * @drv_callback: function called between completion of bios in the request
1989 * and completion of the request. 2010 * and completion of the request.
1990 * If the callback returns non 0, this helper returns without 2011 * If the callback returns non %0, this helper returns without
1991 * completion of the request. 2012 * completion of the request.
1992 * 2013 *
1993 * Description: 2014 * Description:
@@ -2000,10 +2021,10 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request);
2000 * Don't use this interface in other places anymore. 2021 * Don't use this interface in other places anymore.
2001 * 2022 *
2002 * Return: 2023 * Return:
2003 * 0 - we are done with this request 2024 * %0 - we are done with this request
2004 * 1 - this request is not freed yet. 2025 * %1 - this request is not freed yet.
2005 * this request still has pending buffers or 2026 * this request still has pending buffers or
2006 * the driver doesn't want to finish this request yet. 2027 * the driver doesn't want to finish this request yet.
2007 **/ 2028 **/
2008int blk_end_request_callback(struct request *rq, int error, 2029int blk_end_request_callback(struct request *rq, int error,
2009 unsigned int nr_bytes, 2030 unsigned int nr_bytes,
@@ -2016,15 +2037,17 @@ EXPORT_SYMBOL_GPL(blk_end_request_callback);
2016void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2037void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2017 struct bio *bio) 2038 struct bio *bio)
2018{ 2039{
2019 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ 2040 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw, and
2041 we want BIO_RW_AHEAD (bit 1) to imply REQ_FAILFAST (bit 1). */
2020 rq->cmd_flags |= (bio->bi_rw & 3); 2042 rq->cmd_flags |= (bio->bi_rw & 3);
2021 2043
2022 rq->nr_phys_segments = bio_phys_segments(q, bio); 2044 if (bio_has_data(bio)) {
2023 rq->nr_hw_segments = bio_hw_segments(q, bio); 2045 rq->nr_phys_segments = bio_phys_segments(q, bio);
2046 rq->buffer = bio_data(bio);
2047 }
2024 rq->current_nr_sectors = bio_cur_sectors(bio); 2048 rq->current_nr_sectors = bio_cur_sectors(bio);
2025 rq->hard_cur_sectors = rq->current_nr_sectors; 2049 rq->hard_cur_sectors = rq->current_nr_sectors;
2026 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2050 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2027 rq->buffer = bio_data(bio);
2028 rq->data_len = bio->bi_size; 2051 rq->data_len = bio->bi_size;
2029 2052
2030 rq->bio = rq->biotail = bio; 2053 rq->bio = rq->biotail = bio;
@@ -2033,7 +2056,35 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2033 rq->rq_disk = bio->bi_bdev->bd_disk; 2056 rq->rq_disk = bio->bi_bdev->bd_disk;
2034} 2057}
2035 2058
2036int kblockd_schedule_work(struct work_struct *work) 2059/**
2060 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2061 * @q : the queue of the device being checked
2062 *
2063 * Description:
2064 * Check if underlying low-level drivers of a device are busy.
2065 * If the drivers want to export their busy state, they must set own
2066 * exporting function using blk_queue_lld_busy() first.
2067 *
2068 * Basically, this function is used only by request stacking drivers
2069 * to stop dispatching requests to underlying devices when underlying
2070 * devices are busy. This behavior helps more I/O merging on the queue
2071 * of the request stacking driver and prevents I/O throughput regression
2072 * on burst I/O load.
2073 *
2074 * Return:
2075 * 0 - Not busy (The request stacking driver should dispatch request)
2076 * 1 - Busy (The request stacking driver should stop dispatching request)
2077 */
2078int blk_lld_busy(struct request_queue *q)
2079{
2080 if (q->lld_busy_fn)
2081 return q->lld_busy_fn(q);
2082
2083 return 0;
2084}
2085EXPORT_SYMBOL_GPL(blk_lld_busy);
2086
2087int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2037{ 2088{
2038 return queue_work(kblockd_workqueue, work); 2089 return queue_work(kblockd_workqueue, work);
2039} 2090}
@@ -2047,8 +2098,6 @@ EXPORT_SYMBOL(kblockd_flush_work);
2047 2098
2048int __init blk_dev_init(void) 2099int __init blk_dev_init(void)
2049{ 2100{
2050 int i;
2051
2052 kblockd_workqueue = create_workqueue("kblockd"); 2101 kblockd_workqueue = create_workqueue("kblockd");
2053 if (!kblockd_workqueue) 2102 if (!kblockd_workqueue)
2054 panic("Failed to create kblockd\n"); 2103 panic("Failed to create kblockd\n");
@@ -2059,12 +2108,6 @@ int __init blk_dev_init(void)
2059 blk_requestq_cachep = kmem_cache_create("blkdev_queue", 2108 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2060 sizeof(struct request_queue), 0, SLAB_PANIC, NULL); 2109 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2061 2110
2062 for_each_possible_cpu(i)
2063 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2064
2065 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
2066 register_hotcpu_notifier(&blk_cpu_notifier);
2067
2068 return 0; 2111 return 0;
2069} 2112}
2070 2113
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 9bceff7674f2..6af716d1e54e 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -16,7 +16,7 @@
16/** 16/**
17 * blk_end_sync_rq - executes a completion event on a request 17 * blk_end_sync_rq - executes a completion event on a request
18 * @rq: request to complete 18 * @rq: request to complete
19 * @error: end io status of the request 19 * @error: end I/O status of the request
20 */ 20 */
21static void blk_end_sync_rq(struct request *rq, int error) 21static void blk_end_sync_rq(struct request *rq, int error)
22{ 22{
@@ -41,7 +41,7 @@ static void blk_end_sync_rq(struct request *rq, int error)
41 * @done: I/O completion handler 41 * @done: I/O completion handler
42 * 42 *
43 * Description: 43 * Description:
44 * Insert a fully prepared request at the back of the io scheduler queue 44 * Insert a fully prepared request at the back of the I/O scheduler queue
45 * for execution. Don't wait for completion. 45 * for execution. Don't wait for completion.
46 */ 46 */
47void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, 47void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
72 * @at_head: insert request at head or tail of queue 72 * @at_head: insert request at head or tail of queue
73 * 73 *
74 * Description: 74 * Description:
75 * Insert a fully prepared request at the back of the io scheduler queue 75 * Insert a fully prepared request at the back of the I/O scheduler queue
76 * for execution and wait for completion. 76 * for execution and wait for completion.
77 */ 77 */
78int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, 78int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 3f1a8478cc38..61a8e2f8fdd0 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -108,51 +108,51 @@ new_segment:
108EXPORT_SYMBOL(blk_rq_map_integrity_sg); 108EXPORT_SYMBOL(blk_rq_map_integrity_sg);
109 109
110/** 110/**
111 * blk_integrity_compare - Compare integrity profile of two block devices 111 * blk_integrity_compare - Compare integrity profile of two disks
112 * @b1: Device to compare 112 * @gd1: Disk to compare
113 * @b2: Device to compare 113 * @gd2: Disk to compare
114 * 114 *
115 * Description: Meta-devices like DM and MD need to verify that all 115 * Description: Meta-devices like DM and MD need to verify that all
116 * sub-devices use the same integrity format before advertising to 116 * sub-devices use the same integrity format before advertising to
117 * upper layers that they can send/receive integrity metadata. This 117 * upper layers that they can send/receive integrity metadata. This
118 * function can be used to check whether two block devices have 118 * function can be used to check whether two gendisk devices have
119 * compatible integrity formats. 119 * compatible integrity formats.
120 */ 120 */
121int blk_integrity_compare(struct block_device *bd1, struct block_device *bd2) 121int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
122{ 122{
123 struct blk_integrity *b1 = bd1->bd_disk->integrity; 123 struct blk_integrity *b1 = gd1->integrity;
124 struct blk_integrity *b2 = bd2->bd_disk->integrity; 124 struct blk_integrity *b2 = gd2->integrity;
125 125
126 BUG_ON(bd1->bd_disk == NULL); 126 if (!b1 && !b2)
127 BUG_ON(bd2->bd_disk == NULL); 127 return 0;
128 128
129 if (!b1 || !b2) 129 if (!b1 || !b2)
130 return 0; 130 return -1;
131 131
132 if (b1->sector_size != b2->sector_size) { 132 if (b1->sector_size != b2->sector_size) {
133 printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__, 133 printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__,
134 bd1->bd_disk->disk_name, bd2->bd_disk->disk_name, 134 gd1->disk_name, gd2->disk_name,
135 b1->sector_size, b2->sector_size); 135 b1->sector_size, b2->sector_size);
136 return -1; 136 return -1;
137 } 137 }
138 138
139 if (b1->tuple_size != b2->tuple_size) { 139 if (b1->tuple_size != b2->tuple_size) {
140 printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__, 140 printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
141 bd1->bd_disk->disk_name, bd2->bd_disk->disk_name, 141 gd1->disk_name, gd2->disk_name,
142 b1->tuple_size, b2->tuple_size); 142 b1->tuple_size, b2->tuple_size);
143 return -1; 143 return -1;
144 } 144 }
145 145
146 if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { 146 if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
147 printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__, 147 printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
148 bd1->bd_disk->disk_name, bd2->bd_disk->disk_name, 148 gd1->disk_name, gd2->disk_name,
149 b1->tag_size, b2->tag_size); 149 b1->tag_size, b2->tag_size);
150 return -1; 150 return -1;
151 } 151 }
152 152
153 if (strcmp(b1->name, b2->name)) { 153 if (strcmp(b1->name, b2->name)) {
154 printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__, 154 printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
155 bd1->bd_disk->disk_name, bd2->bd_disk->disk_name, 155 gd1->disk_name, gd2->disk_name,
156 b1->name, b2->name); 156 b1->name, b2->name);
157 return -1; 157 return -1;
158 } 158 }
@@ -331,7 +331,8 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
331 return -1; 331 return -1;
332 332
333 if (kobject_init_and_add(&bi->kobj, &integrity_ktype, 333 if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
334 &disk->dev.kobj, "%s", "integrity")) { 334 &disk_to_dev(disk)->kobj,
335 "%s", "integrity")) {
335 kmem_cache_free(integrity_cachep, bi); 336 kmem_cache_free(integrity_cachep, bi);
336 return -1; 337 return -1;
337 } 338 }
@@ -375,7 +376,7 @@ void blk_integrity_unregister(struct gendisk *disk)
375 376
376 kobject_uevent(&bi->kobj, KOBJ_REMOVE); 377 kobject_uevent(&bi->kobj, KOBJ_REMOVE);
377 kobject_del(&bi->kobj); 378 kobject_del(&bi->kobj);
378 kobject_put(&disk->dev.kobj);
379 kmem_cache_free(integrity_cachep, bi); 379 kmem_cache_free(integrity_cachep, bi);
380 disk->integrity = NULL;
380} 381}
381EXPORT_SYMBOL(blk_integrity_unregister); 382EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-map.c b/block/blk-map.c
index af37e4ae62f5..4849fa36161e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -41,10 +41,10 @@ static int __blk_rq_unmap_user(struct bio *bio)
41} 41}
42 42
43static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
44 void __user *ubuf, unsigned int len) 44 struct rq_map_data *map_data, void __user *ubuf,
45 unsigned int len, int null_mapped, gfp_t gfp_mask)
45{ 46{
46 unsigned long uaddr; 47 unsigned long uaddr;
47 unsigned int alignment;
48 struct bio *bio, *orig_bio; 48 struct bio *bio, *orig_bio;
49 int reading, ret; 49 int reading, ret;
50 50
@@ -55,15 +55,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
55 * direct dma. else, set up kernel bounce buffers 55 * direct dma. else, set up kernel bounce buffers
56 */ 56 */
57 uaddr = (unsigned long) ubuf; 57 uaddr = (unsigned long) ubuf;
58 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 58 if (blk_rq_aligned(q, ubuf, len) && !map_data)
59 if (!(uaddr & alignment) && !(len & alignment)) 59 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
60 bio = bio_map_user(q, NULL, uaddr, len, reading);
61 else 60 else
62 bio = bio_copy_user(q, uaddr, len, reading); 61 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
63 62
64 if (IS_ERR(bio)) 63 if (IS_ERR(bio))
65 return PTR_ERR(bio); 64 return PTR_ERR(bio);
66 65
66 if (null_mapped)
67 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
68
67 orig_bio = bio; 69 orig_bio = bio;
68 blk_queue_bounce(q, &bio); 70 blk_queue_bounce(q, &bio);
69 71
@@ -85,17 +87,19 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
85} 87}
86 88
87/** 89/**
88 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 90 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
89 * @q: request queue where request should be inserted 91 * @q: request queue where request should be inserted
90 * @rq: request structure to fill 92 * @rq: request structure to fill
93 * @map_data: pointer to the rq_map_data holding pages (if necessary)
91 * @ubuf: the user buffer 94 * @ubuf: the user buffer
92 * @len: length of user data 95 * @len: length of user data
96 * @gfp_mask: memory allocation flags
93 * 97 *
94 * Description: 98 * Description:
95 * Data will be mapped directly for zero copy io, if possible. Otherwise 99 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
96 * a kernel bounce buffer is used. 100 * a kernel bounce buffer is used.
97 * 101 *
98 * A matching blk_rq_unmap_user() must be issued at the end of io, while 102 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
99 * still in process context. 103 * still in process context.
100 * 104 *
101 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 105 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -105,16 +109,22 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
105 * unmapping. 109 * unmapping.
106 */ 110 */
107int blk_rq_map_user(struct request_queue *q, struct request *rq, 111int blk_rq_map_user(struct request_queue *q, struct request *rq,
108 void __user *ubuf, unsigned long len) 112 struct rq_map_data *map_data, void __user *ubuf,
113 unsigned long len, gfp_t gfp_mask)
109{ 114{
110 unsigned long bytes_read = 0; 115 unsigned long bytes_read = 0;
111 struct bio *bio = NULL; 116 struct bio *bio = NULL;
112 int ret; 117 int ret, null_mapped = 0;
113 118
114 if (len > (q->max_hw_sectors << 9)) 119 if (len > (q->max_hw_sectors << 9))
115 return -EINVAL; 120 return -EINVAL;
116 if (!len || !ubuf) 121 if (!len)
117 return -EINVAL; 122 return -EINVAL;
123 if (!ubuf) {
124 if (!map_data || rq_data_dir(rq) != READ)
125 return -EINVAL;
126 null_mapped = 1;
127 }
118 128
119 while (bytes_read != len) { 129 while (bytes_read != len) {
120 unsigned long map_len, end, start; 130 unsigned long map_len, end, start;
@@ -132,7 +142,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
132 if (end - start > BIO_MAX_PAGES) 142 if (end - start > BIO_MAX_PAGES)
133 map_len -= PAGE_SIZE; 143 map_len -= PAGE_SIZE;
134 144
135 ret = __blk_rq_map_user(q, rq, ubuf, map_len); 145 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
146 null_mapped, gfp_mask);
136 if (ret < 0) 147 if (ret < 0)
137 goto unmap_rq; 148 goto unmap_rq;
138 if (!bio) 149 if (!bio)
@@ -154,18 +165,20 @@ unmap_rq:
154EXPORT_SYMBOL(blk_rq_map_user); 165EXPORT_SYMBOL(blk_rq_map_user);
155 166
156/** 167/**
157 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage 168 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
158 * @q: request queue where request should be inserted 169 * @q: request queue where request should be inserted
159 * @rq: request to map data to 170 * @rq: request to map data to
171 * @map_data: pointer to the rq_map_data holding pages (if necessary)
160 * @iov: pointer to the iovec 172 * @iov: pointer to the iovec
161 * @iov_count: number of elements in the iovec 173 * @iov_count: number of elements in the iovec
162 * @len: I/O byte count 174 * @len: I/O byte count
175 * @gfp_mask: memory allocation flags
163 * 176 *
164 * Description: 177 * Description:
165 * Data will be mapped directly for zero copy io, if possible. Otherwise 178 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
166 * a kernel bounce buffer is used. 179 * a kernel bounce buffer is used.
167 * 180 *
168 * A matching blk_rq_unmap_user() must be issued at the end of io, while 181 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
169 * still in process context. 182 * still in process context.
170 * 183 *
171 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 184 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
@@ -175,7 +188,8 @@ EXPORT_SYMBOL(blk_rq_map_user);
175 * unmapping. 188 * unmapping.
176 */ 189 */
177int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 190int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
178 struct sg_iovec *iov, int iov_count, unsigned int len) 191 struct rq_map_data *map_data, struct sg_iovec *iov,
192 int iov_count, unsigned int len, gfp_t gfp_mask)
179{ 193{
180 struct bio *bio; 194 struct bio *bio;
181 int i, read = rq_data_dir(rq) == READ; 195 int i, read = rq_data_dir(rq) == READ;
@@ -193,10 +207,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
193 } 207 }
194 } 208 }
195 209
196 if (unaligned || (q->dma_pad_mask & len)) 210 if (unaligned || (q->dma_pad_mask & len) || map_data)
197 bio = bio_copy_user_iov(q, iov, iov_count, read); 211 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
212 gfp_mask);
198 else 213 else
199 bio = bio_map_user_iov(q, NULL, iov, iov_count, read); 214 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
200 215
201 if (IS_ERR(bio)) 216 if (IS_ERR(bio))
202 return PTR_ERR(bio); 217 return PTR_ERR(bio);
@@ -216,6 +231,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
216 rq->buffer = rq->data = NULL; 231 rq->buffer = rq->data = NULL;
217 return 0; 232 return 0;
218} 233}
234EXPORT_SYMBOL(blk_rq_map_user_iov);
219 235
220/** 236/**
221 * blk_rq_unmap_user - unmap a request with user data 237 * blk_rq_unmap_user - unmap a request with user data
@@ -224,7 +240,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
224 * Description: 240 * Description:
225 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 241 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
226 * supply the original rq->bio from the blk_rq_map_user() return, since 242 * supply the original rq->bio from the blk_rq_map_user() return, since
227 * the io completion may have changed rq->bio. 243 * the I/O completion may have changed rq->bio.
228 */ 244 */
229int blk_rq_unmap_user(struct bio *bio) 245int blk_rq_unmap_user(struct bio *bio)
230{ 246{
@@ -250,7 +266,7 @@ int blk_rq_unmap_user(struct bio *bio)
250EXPORT_SYMBOL(blk_rq_unmap_user); 266EXPORT_SYMBOL(blk_rq_unmap_user);
251 267
252/** 268/**
253 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage 269 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
254 * @q: request queue where request should be inserted 270 * @q: request queue where request should be inserted
255 * @rq: request to fill 271 * @rq: request to fill
256 * @kbuf: the kernel buffer 272 * @kbuf: the kernel buffer
@@ -264,8 +280,6 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
264int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 280int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
265 unsigned int len, gfp_t gfp_mask) 281 unsigned int len, gfp_t gfp_mask)
266{ 282{
267 unsigned long kaddr;
268 unsigned int alignment;
269 int reading = rq_data_dir(rq) == READ; 283 int reading = rq_data_dir(rq) == READ;
270 int do_copy = 0; 284 int do_copy = 0;
271 struct bio *bio; 285 struct bio *bio;
@@ -275,11 +289,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
275 if (!len || !kbuf) 289 if (!len || !kbuf)
276 return -EINVAL; 290 return -EINVAL;
277 291
278 kaddr = (unsigned long)kbuf; 292 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
279 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
280 do_copy = ((kaddr & alignment) || (len & alignment) ||
281 object_is_on_stack(kbuf));
282
283 if (do_copy) 293 if (do_copy)
284 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 294 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
285 else 295 else
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 5efc9e7a68b7..908d3e11ac52 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -11,7 +11,7 @@
11 11
12void blk_recalc_rq_sectors(struct request *rq, int nsect) 12void blk_recalc_rq_sectors(struct request *rq, int nsect)
13{ 13{
14 if (blk_fs_request(rq)) { 14 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
15 rq->hard_sector += nsect; 15 rq->hard_sector += nsect;
16 rq->hard_nr_sectors -= nsect; 16 rq->hard_nr_sectors -= nsect;
17 17
@@ -41,12 +41,9 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
41void blk_recalc_rq_segments(struct request *rq) 41void blk_recalc_rq_segments(struct request *rq)
42{ 42{
43 int nr_phys_segs; 43 int nr_phys_segs;
44 int nr_hw_segs;
45 unsigned int phys_size; 44 unsigned int phys_size;
46 unsigned int hw_size;
47 struct bio_vec *bv, *bvprv = NULL; 45 struct bio_vec *bv, *bvprv = NULL;
48 int seg_size; 46 int seg_size;
49 int hw_seg_size;
50 int cluster; 47 int cluster;
51 struct req_iterator iter; 48 struct req_iterator iter;
52 int high, highprv = 1; 49 int high, highprv = 1;
@@ -56,8 +53,8 @@ void blk_recalc_rq_segments(struct request *rq)
56 return; 53 return;
57 54
58 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 55 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
59 hw_seg_size = seg_size = 0; 56 seg_size = 0;
60 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; 57 phys_size = nr_phys_segs = 0;
61 rq_for_each_segment(bv, rq, iter) { 58 rq_for_each_segment(bv, rq, iter) {
62 /* 59 /*
63 * the trick here is making sure that a high page is never 60 * the trick here is making sure that a high page is never
@@ -66,7 +63,7 @@ void blk_recalc_rq_segments(struct request *rq)
66 */ 63 */
67 high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 64 high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
68 if (high || highprv) 65 if (high || highprv)
69 goto new_hw_segment; 66 goto new_segment;
70 if (cluster) { 67 if (cluster) {
71 if (seg_size + bv->bv_len > q->max_segment_size) 68 if (seg_size + bv->bv_len > q->max_segment_size)
72 goto new_segment; 69 goto new_segment;
@@ -74,40 +71,19 @@ void blk_recalc_rq_segments(struct request *rq)
74 goto new_segment; 71 goto new_segment;
75 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) 72 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
76 goto new_segment; 73 goto new_segment;
77 if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
78 goto new_hw_segment;
79 74
80 seg_size += bv->bv_len; 75 seg_size += bv->bv_len;
81 hw_seg_size += bv->bv_len;
82 bvprv = bv; 76 bvprv = bv;
83 continue; 77 continue;
84 } 78 }
85new_segment: 79new_segment:
86 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
87 !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
88 hw_seg_size += bv->bv_len;
89 else {
90new_hw_segment:
91 if (nr_hw_segs == 1 &&
92 hw_seg_size > rq->bio->bi_hw_front_size)
93 rq->bio->bi_hw_front_size = hw_seg_size;
94 hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
95 nr_hw_segs++;
96 }
97
98 nr_phys_segs++; 80 nr_phys_segs++;
99 bvprv = bv; 81 bvprv = bv;
100 seg_size = bv->bv_len; 82 seg_size = bv->bv_len;
101 highprv = high; 83 highprv = high;
102 } 84 }
103 85
104 if (nr_hw_segs == 1 &&
105 hw_seg_size > rq->bio->bi_hw_front_size)
106 rq->bio->bi_hw_front_size = hw_seg_size;
107 if (hw_seg_size > rq->biotail->bi_hw_back_size)
108 rq->biotail->bi_hw_back_size = hw_seg_size;
109 rq->nr_phys_segments = nr_phys_segs; 86 rq->nr_phys_segments = nr_phys_segs;
110 rq->nr_hw_segments = nr_hw_segs;
111} 87}
112 88
113void blk_recount_segments(struct request_queue *q, struct bio *bio) 89void blk_recount_segments(struct request_queue *q, struct bio *bio)
@@ -120,7 +96,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
120 blk_recalc_rq_segments(&rq); 96 blk_recalc_rq_segments(&rq);
121 bio->bi_next = nxt; 97 bio->bi_next = nxt;
122 bio->bi_phys_segments = rq.nr_phys_segments; 98 bio->bi_phys_segments = rq.nr_phys_segments;
123 bio->bi_hw_segments = rq.nr_hw_segments;
124 bio->bi_flags |= (1 << BIO_SEG_VALID); 99 bio->bi_flags |= (1 << BIO_SEG_VALID);
125} 100}
126EXPORT_SYMBOL(blk_recount_segments); 101EXPORT_SYMBOL(blk_recount_segments);
@@ -131,13 +106,17 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
131 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 106 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
132 return 0; 107 return 0;
133 108
134 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
135 return 0;
136 if (bio->bi_size + nxt->bi_size > q->max_segment_size) 109 if (bio->bi_size + nxt->bi_size > q->max_segment_size)
137 return 0; 110 return 0;
138 111
112 if (!bio_has_data(bio))
113 return 1;
114
115 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
116 return 0;
117
139 /* 118 /*
140 * bio and nxt are contigous in memory, check if the queue allows 119 * bio and nxt are contiguous in memory; check if the queue allows
141 * these two to be merged into one 120 * these two to be merged into one
142 */ 121 */
143 if (BIO_SEG_BOUNDARY(q, bio, nxt)) 122 if (BIO_SEG_BOUNDARY(q, bio, nxt))
@@ -146,22 +125,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
146 return 0; 125 return 0;
147} 126}
148 127
149static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
150 struct bio *nxt)
151{
152 if (!bio_flagged(bio, BIO_SEG_VALID))
153 blk_recount_segments(q, bio);
154 if (!bio_flagged(nxt, BIO_SEG_VALID))
155 blk_recount_segments(q, nxt);
156 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
157 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
158 return 0;
159 if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
160 return 0;
161
162 return 1;
163}
164
165/* 128/*
166 * map a request to scatterlist, return number of sg entries setup. Caller 129 * map a request to scatterlist, return number of sg entries setup. Caller
167 * must make sure sg can hold rq->nr_phys_segments entries 130 * must make sure sg can hold rq->nr_phys_segments entries
@@ -275,10 +238,9 @@ static inline int ll_new_hw_segment(struct request_queue *q,
275 struct request *req, 238 struct request *req,
276 struct bio *bio) 239 struct bio *bio)
277{ 240{
278 int nr_hw_segs = bio_hw_segments(q, bio);
279 int nr_phys_segs = bio_phys_segments(q, bio); 241 int nr_phys_segs = bio_phys_segments(q, bio);
280 242
281 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments 243 if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
282 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 244 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
283 req->cmd_flags |= REQ_NOMERGE; 245 req->cmd_flags |= REQ_NOMERGE;
284 if (req == q->last_merge) 246 if (req == q->last_merge)
@@ -290,7 +252,6 @@ static inline int ll_new_hw_segment(struct request_queue *q,
290 * This will form the start of a new hw segment. Bump both 252 * This will form the start of a new hw segment. Bump both
291 * counters. 253 * counters.
292 */ 254 */
293 req->nr_hw_segments += nr_hw_segs;
294 req->nr_phys_segments += nr_phys_segs; 255 req->nr_phys_segments += nr_phys_segs;
295 return 1; 256 return 1;
296} 257}
@@ -299,7 +260,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
299 struct bio *bio) 260 struct bio *bio)
300{ 261{
301 unsigned short max_sectors; 262 unsigned short max_sectors;
302 int len;
303 263
304 if (unlikely(blk_pc_request(req))) 264 if (unlikely(blk_pc_request(req)))
305 max_sectors = q->max_hw_sectors; 265 max_sectors = q->max_hw_sectors;
@@ -316,19 +276,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
316 blk_recount_segments(q, req->biotail); 276 blk_recount_segments(q, req->biotail);
317 if (!bio_flagged(bio, BIO_SEG_VALID)) 277 if (!bio_flagged(bio, BIO_SEG_VALID))
318 blk_recount_segments(q, bio); 278 blk_recount_segments(q, bio);
319 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
320 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
321 && !BIOVEC_VIRT_OVERSIZE(len)) {
322 int mergeable = ll_new_mergeable(q, req, bio);
323
324 if (mergeable) {
325 if (req->nr_hw_segments == 1)
326 req->bio->bi_hw_front_size = len;
327 if (bio->bi_hw_segments == 1)
328 bio->bi_hw_back_size = len;
329 }
330 return mergeable;
331 }
332 279
333 return ll_new_hw_segment(q, req, bio); 280 return ll_new_hw_segment(q, req, bio);
334} 281}
@@ -337,7 +284,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
337 struct bio *bio) 284 struct bio *bio)
338{ 285{
339 unsigned short max_sectors; 286 unsigned short max_sectors;
340 int len;
341 287
342 if (unlikely(blk_pc_request(req))) 288 if (unlikely(blk_pc_request(req)))
343 max_sectors = q->max_hw_sectors; 289 max_sectors = q->max_hw_sectors;
@@ -351,23 +297,10 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
351 q->last_merge = NULL; 297 q->last_merge = NULL;
352 return 0; 298 return 0;
353 } 299 }
354 len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
355 if (!bio_flagged(bio, BIO_SEG_VALID)) 300 if (!bio_flagged(bio, BIO_SEG_VALID))
356 blk_recount_segments(q, bio); 301 blk_recount_segments(q, bio);
357 if (!bio_flagged(req->bio, BIO_SEG_VALID)) 302 if (!bio_flagged(req->bio, BIO_SEG_VALID))
358 blk_recount_segments(q, req->bio); 303 blk_recount_segments(q, req->bio);
359 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
360 !BIOVEC_VIRT_OVERSIZE(len)) {
361 int mergeable = ll_new_mergeable(q, req, bio);
362
363 if (mergeable) {
364 if (bio->bi_hw_segments == 1)
365 bio->bi_hw_front_size = len;
366 if (req->nr_hw_segments == 1)
367 req->biotail->bi_hw_back_size = len;
368 }
369 return mergeable;
370 }
371 304
372 return ll_new_hw_segment(q, req, bio); 305 return ll_new_hw_segment(q, req, bio);
373} 306}
@@ -376,7 +309,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
376 struct request *next) 309 struct request *next)
377{ 310{
378 int total_phys_segments; 311 int total_phys_segments;
379 int total_hw_segments;
380 312
381 /* 313 /*
382 * First check if the either of the requests are re-queued 314 * First check if the either of the requests are re-queued
@@ -398,26 +330,11 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
398 if (total_phys_segments > q->max_phys_segments) 330 if (total_phys_segments > q->max_phys_segments)
399 return 0; 331 return 0;
400 332
401 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 333 if (total_phys_segments > q->max_hw_segments)
402 if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
403 int len = req->biotail->bi_hw_back_size +
404 next->bio->bi_hw_front_size;
405 /*
406 * propagate the combined length to the end of the requests
407 */
408 if (req->nr_hw_segments == 1)
409 req->bio->bi_hw_front_size = len;
410 if (next->nr_hw_segments == 1)
411 next->biotail->bi_hw_back_size = len;
412 total_hw_segments--;
413 }
414
415 if (total_hw_segments > q->max_hw_segments)
416 return 0; 334 return 0;
417 335
418 /* Merge is OK... */ 336 /* Merge is OK... */
419 req->nr_phys_segments = total_phys_segments; 337 req->nr_phys_segments = total_phys_segments;
420 req->nr_hw_segments = total_hw_segments;
421 return 1; 338 return 1;
422} 339}
423 340
@@ -470,17 +387,21 @@ static int attempt_merge(struct request_queue *q, struct request *req,
470 elv_merge_requests(q, req, next); 387 elv_merge_requests(q, req, next);
471 388
472 if (req->rq_disk) { 389 if (req->rq_disk) {
473 struct hd_struct *part 390 struct hd_struct *part;
474 = get_part(req->rq_disk, req->sector); 391 int cpu;
475 disk_round_stats(req->rq_disk); 392
476 req->rq_disk->in_flight--; 393 cpu = part_stat_lock();
477 if (part) { 394 part = disk_map_sector_rcu(req->rq_disk, req->sector);
478 part_round_stats(part); 395
479 part->in_flight--; 396 part_round_stats(cpu, part);
480 } 397 part_dec_in_flight(part);
398
399 part_stat_unlock();
481 } 400 }
482 401
483 req->ioprio = ioprio_best(req->ioprio, next->ioprio); 402 req->ioprio = ioprio_best(req->ioprio, next->ioprio);
403 if (blk_rq_cpu_valid(next))
404 req->cpu = next->cpu;
484 405
485 __blk_put_request(q, next); 406 __blk_put_request(q, next);
486 return 1; 407 return 1;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dfc77012843f..b21dcdb64151 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -33,6 +33,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
33EXPORT_SYMBOL(blk_queue_prep_rq); 33EXPORT_SYMBOL(blk_queue_prep_rq);
34 34
35/** 35/**
36 * blk_queue_set_discard - set a discard_sectors function for queue
37 * @q: queue
38 * @dfn: prepare_discard function
39 *
40 * It's possible for a queue to register a discard callback which is used
41 * to transform a discard request into the appropriate type for the
42 * hardware. If none is registered, then discard requests are failed
43 * with %EOPNOTSUPP.
44 *
45 */
46void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
47{
48 q->prepare_discard_fn = dfn;
49}
50EXPORT_SYMBOL(blk_queue_set_discard);
51
52/**
36 * blk_queue_merge_bvec - set a merge_bvec function for queue 53 * blk_queue_merge_bvec - set a merge_bvec function for queue
37 * @q: queue 54 * @q: queue
38 * @mbfn: merge_bvec_fn 55 * @mbfn: merge_bvec_fn
@@ -60,6 +77,24 @@ void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
60} 77}
61EXPORT_SYMBOL(blk_queue_softirq_done); 78EXPORT_SYMBOL(blk_queue_softirq_done);
62 79
80void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
81{
82 q->rq_timeout = timeout;
83}
84EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
85
86void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
87{
88 q->rq_timed_out_fn = fn;
89}
90EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
91
92void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
93{
94 q->lld_busy_fn = fn;
95}
96EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
97
63/** 98/**
64 * blk_queue_make_request - define an alternate make_request function for a device 99 * blk_queue_make_request - define an alternate make_request function for a device
65 * @q: the request queue for the device to be affected 100 * @q: the request queue for the device to be affected
@@ -127,7 +162,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
127 * Different hardware can have different requirements as to what pages 162 * Different hardware can have different requirements as to what pages
128 * it can do I/O directly to. A low level driver can call 163 * it can do I/O directly to. A low level driver can call
129 * blk_queue_bounce_limit to have lower memory pages allocated as bounce 164 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
130 * buffers for doing I/O to pages residing above @page. 165 * buffers for doing I/O to pages residing above @dma_addr.
131 **/ 166 **/
132void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 167void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
133{ 168{
@@ -212,7 +247,7 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
212 * Description: 247 * Description:
213 * Enables a low level driver to set an upper limit on the number of 248 * Enables a low level driver to set an upper limit on the number of
214 * hw data segments in a request. This would be the largest number of 249 * hw data segments in a request. This would be the largest number of
215 * address/length pairs the host adapter can actually give as once 250 * address/length pairs the host adapter can actually give at once
216 * to the device. 251 * to the device.
217 **/ 252 **/
218void blk_queue_max_hw_segments(struct request_queue *q, 253void blk_queue_max_hw_segments(struct request_queue *q,
@@ -393,7 +428,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
393 * @mask: alignment mask 428 * @mask: alignment mask
394 * 429 *
395 * description: 430 * description:
396 * set required memory and length aligment for direct dma transactions. 431 * set required memory and length alignment for direct dma transactions.
397 * this is used when buiding direct io requests for the queue. 432 * this is used when buiding direct io requests for the queue.
398 * 433 *
399 **/ 434 **/
@@ -409,7 +444,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
409 * @mask: alignment mask 444 * @mask: alignment mask
410 * 445 *
411 * description: 446 * description:
412 * update required memory and length aligment for direct dma transactions. 447 * update required memory and length alignment for direct dma transactions.
413 * If the requested alignment is larger than the current alignment, then 448 * If the requested alignment is larger than the current alignment, then
414 * the current queue alignment is updated to the new value, otherwise it 449 * the current queue alignment is updated to the new value, otherwise it
415 * is left alone. The design of this is to allow multiple objects 450 * is left alone. The design of this is to allow multiple objects
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
new file mode 100644
index 000000000000..e660d26ca656
--- /dev/null
+++ b/block/blk-softirq.c
@@ -0,0 +1,175 @@
1/*
2 * Functions related to softirq rq completions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/interrupt.h>
10#include <linux/cpu.h>
11
12#include "blk.h"
13
14static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
15
16/*
17 * Softirq action handler - move entries to local list and loop over them
18 * while passing them to the queue registered handler.
19 */
20static void blk_done_softirq(struct softirq_action *h)
21{
22 struct list_head *cpu_list, local_list;
23
24 local_irq_disable();
25 cpu_list = &__get_cpu_var(blk_cpu_done);
26 list_replace_init(cpu_list, &local_list);
27 local_irq_enable();
28
29 while (!list_empty(&local_list)) {
30 struct request *rq;
31
32 rq = list_entry(local_list.next, struct request, csd.list);
33 list_del_init(&rq->csd.list);
34 rq->q->softirq_done_fn(rq);
35 }
36}
37
38#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
39static void trigger_softirq(void *data)
40{
41 struct request *rq = data;
42 unsigned long flags;
43 struct list_head *list;
44
45 local_irq_save(flags);
46 list = &__get_cpu_var(blk_cpu_done);
47 list_add_tail(&rq->csd.list, list);
48
49 if (list->next == &rq->csd.list)
50 raise_softirq_irqoff(BLOCK_SOFTIRQ);
51
52 local_irq_restore(flags);
53}
54
55/*
56 * Setup and invoke a run of 'trigger_softirq' on the given cpu.
57 */
58static int raise_blk_irq(int cpu, struct request *rq)
59{
60 if (cpu_online(cpu)) {
61 struct call_single_data *data = &rq->csd;
62
63 data->func = trigger_softirq;
64 data->info = rq;
65 data->flags = 0;
66
67 __smp_call_function_single(cpu, data);
68 return 0;
69 }
70
71 return 1;
72}
73#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
74static int raise_blk_irq(int cpu, struct request *rq)
75{
76 return 1;
77}
78#endif
79
80static int __cpuinit blk_cpu_notify(struct notifier_block *self,
81 unsigned long action, void *hcpu)
82{
83 /*
84 * If a CPU goes away, splice its entries to the current CPU
85 * and trigger a run of the softirq
86 */
87 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
88 int cpu = (unsigned long) hcpu;
89
90 local_irq_disable();
91 list_splice_init(&per_cpu(blk_cpu_done, cpu),
92 &__get_cpu_var(blk_cpu_done));
93 raise_softirq_irqoff(BLOCK_SOFTIRQ);
94 local_irq_enable();
95 }
96
97 return NOTIFY_OK;
98}
99
100static struct notifier_block __cpuinitdata blk_cpu_notifier = {
101 .notifier_call = blk_cpu_notify,
102};
103
104void __blk_complete_request(struct request *req)
105{
106 struct request_queue *q = req->q;
107 unsigned long flags;
108 int ccpu, cpu, group_cpu;
109
110 BUG_ON(!q->softirq_done_fn);
111
112 local_irq_save(flags);
113 cpu = smp_processor_id();
114 group_cpu = blk_cpu_to_group(cpu);
115
116 /*
117 * Select completion CPU
118 */
119 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
120 ccpu = req->cpu;
121 else
122 ccpu = cpu;
123
124 if (ccpu == cpu || ccpu == group_cpu) {
125 struct list_head *list;
126do_local:
127 list = &__get_cpu_var(blk_cpu_done);
128 list_add_tail(&req->csd.list, list);
129
130 /*
131 * if the list only contains our just added request,
132 * signal a raise of the softirq. If there are already
133 * entries there, someone already raised the irq but it
134 * hasn't run yet.
135 */
136 if (list->next == &req->csd.list)
137 raise_softirq_irqoff(BLOCK_SOFTIRQ);
138 } else if (raise_blk_irq(ccpu, req))
139 goto do_local;
140
141 local_irq_restore(flags);
142}
143
144/**
145 * blk_complete_request - end I/O on a request
146 * @req: the request being processed
147 *
148 * Description:
149 * Ends all I/O on a request. It does not handle partial completions,
150 * unless the driver actually implements this in its completion callback
151 * through requeueing. The actual completion happens out-of-order,
152 * through a softirq handler. The user must have registered a completion
153 * callback through blk_queue_softirq_done().
154 **/
155void blk_complete_request(struct request *req)
156{
157 if (unlikely(blk_should_fake_timeout(req->q)))
158 return;
159 if (!blk_mark_rq_complete(req))
160 __blk_complete_request(req);
161}
162EXPORT_SYMBOL(blk_complete_request);
163
164__init int blk_softirq_init(void)
165{
166 int i;
167
168 for_each_possible_cpu(i)
169 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
170
171 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
172 register_hotcpu_notifier(&blk_cpu_notifier);
173 return 0;
174}
175subsys_initcall(blk_softirq_init);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 304ec73ab821..21e275d7eed9 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -156,6 +156,30 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
156 return ret; 156 return ret;
157} 157}
158 158
159static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
160{
161 unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
162
163 return queue_var_show(set != 0, page);
164}
165
166static ssize_t
167queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
168{
169 ssize_t ret = -EINVAL;
170#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
171 unsigned long val;
172
173 ret = queue_var_store(&val, page, count);
174 spin_lock_irq(q->queue_lock);
175 if (val)
176 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
177 else
178 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
179 spin_unlock_irq(q->queue_lock);
180#endif
181 return ret;
182}
159 183
160static struct queue_sysfs_entry queue_requests_entry = { 184static struct queue_sysfs_entry queue_requests_entry = {
161 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, 185 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -197,6 +221,12 @@ static struct queue_sysfs_entry queue_nomerges_entry = {
197 .store = queue_nomerges_store, 221 .store = queue_nomerges_store,
198}; 222};
199 223
224static struct queue_sysfs_entry queue_rq_affinity_entry = {
225 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
226 .show = queue_rq_affinity_show,
227 .store = queue_rq_affinity_store,
228};
229
200static struct attribute *default_attrs[] = { 230static struct attribute *default_attrs[] = {
201 &queue_requests_entry.attr, 231 &queue_requests_entry.attr,
202 &queue_ra_entry.attr, 232 &queue_ra_entry.attr,
@@ -205,6 +235,7 @@ static struct attribute *default_attrs[] = {
205 &queue_iosched_entry.attr, 235 &queue_iosched_entry.attr,
206 &queue_hw_sector_size_entry.attr, 236 &queue_hw_sector_size_entry.attr,
207 &queue_nomerges_entry.attr, 237 &queue_nomerges_entry.attr,
238 &queue_rq_affinity_entry.attr,
208 NULL, 239 NULL,
209}; 240};
210 241
@@ -310,7 +341,7 @@ int blk_register_queue(struct gendisk *disk)
310 if (!q->request_fn) 341 if (!q->request_fn)
311 return 0; 342 return 0;
312 343
313 ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj), 344 ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
314 "%s", "queue"); 345 "%s", "queue");
315 if (ret < 0) 346 if (ret < 0)
316 return ret; 347 return ret;
@@ -339,6 +370,6 @@ void blk_unregister_queue(struct gendisk *disk)
339 370
340 kobject_uevent(&q->kobj, KOBJ_REMOVE); 371 kobject_uevent(&q->kobj, KOBJ_REMOVE);
341 kobject_del(&q->kobj); 372 kobject_del(&q->kobj);
342 kobject_put(&disk->dev.kobj); 373 kobject_put(&disk_to_dev(disk)->kobj);
343 } 374 }
344} 375}
diff --git a/block/blk-tag.c b/block/blk-tag.c
index ed5166fbc599..c0d419e84ce7 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -29,7 +29,7 @@ EXPORT_SYMBOL(blk_queue_find_tag);
29 * __blk_free_tags - release a given set of tag maintenance info 29 * __blk_free_tags - release a given set of tag maintenance info
30 * @bqt: the tag map to free 30 * @bqt: the tag map to free
31 * 31 *
32 * Tries to free the specified @bqt@. Returns true if it was 32 * Tries to free the specified @bqt. Returns true if it was
33 * actually freed and false if there are still references using it 33 * actually freed and false if there are still references using it
34 */ 34 */
35static int __blk_free_tags(struct blk_queue_tag *bqt) 35static int __blk_free_tags(struct blk_queue_tag *bqt)
@@ -78,7 +78,7 @@ void __blk_queue_free_tags(struct request_queue *q)
78 * blk_free_tags - release a given set of tag maintenance info 78 * blk_free_tags - release a given set of tag maintenance info
79 * @bqt: the tag map to free 79 * @bqt: the tag map to free
80 * 80 *
81 * For externally managed @bqt@ frees the map. Callers of this 81 * For externally managed @bqt frees the map. Callers of this
82 * function must guarantee to have released all the queues that 82 * function must guarantee to have released all the queues that
83 * might have been using this tag map. 83 * might have been using this tag map.
84 */ 84 */
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(blk_free_tags);
94 * @q: the request queue for the device 94 * @q: the request queue for the device
95 * 95 *
96 * Notes: 96 * Notes:
97 * This is used to disabled tagged queuing to a device, yet leave 97 * This is used to disable tagged queuing to a device, yet leave
98 * queue in function. 98 * queue in function.
99 **/ 99 **/
100void blk_queue_free_tags(struct request_queue *q) 100void blk_queue_free_tags(struct request_queue *q)
@@ -271,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
271 * @rq: the request that has completed 271 * @rq: the request that has completed
272 * 272 *
273 * Description: 273 * Description:
274 * Typically called when end_that_request_first() returns 0, meaning 274 * Typically called when end_that_request_first() returns %0, meaning
275 * all transfers have been done for a request. It's important to call 275 * all transfers have been done for a request. It's important to call
276 * this function before end_that_request_last(), as that will put the 276 * this function before end_that_request_last(), as that will put the
277 * request back on the free list thus corrupting the internal tag list. 277 * request back on the free list thus corrupting the internal tag list.
@@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
337int blk_queue_start_tag(struct request_queue *q, struct request *rq) 337int blk_queue_start_tag(struct request_queue *q, struct request *rq)
338{ 338{
339 struct blk_queue_tag *bqt = q->queue_tags; 339 struct blk_queue_tag *bqt = q->queue_tags;
340 unsigned max_depth, offset;
340 int tag; 341 int tag;
341 342
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 343 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
350 /* 351 /*
351 * Protect against shared tag maps, as we may not have exclusive 352 * Protect against shared tag maps, as we may not have exclusive
352 * access to the tag map. 353 * access to the tag map.
354 *
355 * We reserve a few tags just for sync IO, since we don't want
356 * to starve sync IO on behalf of flooding async IO.
353 */ 357 */
358 max_depth = bqt->max_depth;
359 if (rq_is_sync(rq))
360 offset = 0;
361 else
362 offset = max_depth >> 2;
363
354 do { 364 do {
355 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); 365 tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
356 if (tag >= bqt->max_depth) 366 if (tag >= max_depth)
357 return 1; 367 return 1;
358 368
359 } while (test_and_set_bit_lock(tag, bqt->tag_map)); 369 } while (test_and_set_bit_lock(tag, bqt->tag_map));
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
new file mode 100644
index 000000000000..972a63f848fb
--- /dev/null
+++ b/block/blk-timeout.c
@@ -0,0 +1,238 @@
1/*
2 * Functions related to generic timeout handling of requests.
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/blkdev.h>
7#include <linux/fault-inject.h>
8
9#include "blk.h"
10
11#ifdef CONFIG_FAIL_IO_TIMEOUT
12
13static DECLARE_FAULT_ATTR(fail_io_timeout);
14
15static int __init setup_fail_io_timeout(char *str)
16{
17 return setup_fault_attr(&fail_io_timeout, str);
18}
19__setup("fail_io_timeout=", setup_fail_io_timeout);
20
21int blk_should_fake_timeout(struct request_queue *q)
22{
23 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
24 return 0;
25
26 return should_fail(&fail_io_timeout, 1);
27}
28
29static int __init fail_io_timeout_debugfs(void)
30{
31 return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
32}
33
34late_initcall(fail_io_timeout_debugfs);
35
36ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
37 char *buf)
38{
39 struct gendisk *disk = dev_to_disk(dev);
40 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
41
42 return sprintf(buf, "%d\n", set != 0);
43}
44
45ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
46 const char *buf, size_t count)
47{
48 struct gendisk *disk = dev_to_disk(dev);
49 int val;
50
51 if (count) {
52 struct request_queue *q = disk->queue;
53 char *p = (char *) buf;
54
55 val = simple_strtoul(p, &p, 10);
56 spin_lock_irq(q->queue_lock);
57 if (val)
58 queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
59 else
60 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
61 spin_unlock_irq(q->queue_lock);
62 }
63
64 return count;
65}
66
67#endif /* CONFIG_FAIL_IO_TIMEOUT */
68
69/*
70 * blk_delete_timer - Delete/cancel timer for a given function.
71 * @req: request that we are canceling timer for
72 *
73 */
74void blk_delete_timer(struct request *req)
75{
76 struct request_queue *q = req->q;
77
78 /*
79 * Nothing to detach
80 */
81 if (!q->rq_timed_out_fn || !req->deadline)
82 return;
83
84 list_del_init(&req->timeout_list);
85
86 if (list_empty(&q->timeout_list))
87 del_timer(&q->timeout);
88}
89
90static void blk_rq_timed_out(struct request *req)
91{
92 struct request_queue *q = req->q;
93 enum blk_eh_timer_return ret;
94
95 ret = q->rq_timed_out_fn(req);
96 switch (ret) {
97 case BLK_EH_HANDLED:
98 __blk_complete_request(req);
99 break;
100 case BLK_EH_RESET_TIMER:
101 blk_clear_rq_complete(req);
102 blk_add_timer(req);
103 break;
104 case BLK_EH_NOT_HANDLED:
105 /*
106 * LLD handles this for now but in the future
107 * we can send a request msg to abort the command
108 * and we can move more of the generic scsi eh code to
109 * the blk layer.
110 */
111 break;
112 default:
113 printk(KERN_ERR "block: bad eh return: %d\n", ret);
114 break;
115 }
116}
117
118void blk_rq_timed_out_timer(unsigned long data)
119{
120 struct request_queue *q = (struct request_queue *) data;
121 unsigned long flags, uninitialized_var(next), next_set = 0;
122 struct request *rq, *tmp;
123
124 spin_lock_irqsave(q->queue_lock, flags);
125
126 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
127 if (time_after_eq(jiffies, rq->deadline)) {
128 list_del_init(&rq->timeout_list);
129
130 /*
131 * Check if we raced with end io completion
132 */
133 if (blk_mark_rq_complete(rq))
134 continue;
135 blk_rq_timed_out(rq);
136 }
137 if (!next_set) {
138 next = rq->deadline;
139 next_set = 1;
140 } else if (time_after(next, rq->deadline))
141 next = rq->deadline;
142 }
143
144 if (next_set && !list_empty(&q->timeout_list))
145 mod_timer(&q->timeout, round_jiffies(next));
146
147 spin_unlock_irqrestore(q->queue_lock, flags);
148}
149
150/**
151 * blk_abort_request -- Request request recovery for the specified command
152 * @req: pointer to the request of interest
153 *
154 * This function requests that the block layer start recovery for the
155 * request by deleting the timer and calling the q's timeout function.
156 * LLDDs who implement their own error recovery MAY ignore the timeout
157 * event if they generated blk_abort_req. Must hold queue lock.
158 */
159void blk_abort_request(struct request *req)
160{
161 if (blk_mark_rq_complete(req))
162 return;
163 blk_delete_timer(req);
164 blk_rq_timed_out(req);
165}
166EXPORT_SYMBOL_GPL(blk_abort_request);
167
168/**
169 * blk_add_timer - Start timeout timer for a single request
170 * @req: request that is about to start running.
171 *
172 * Notes:
173 * Each request has its own timer, and as it is added to the queue, we
174 * set up the timer. When the request completes, we cancel the timer.
175 */
176void blk_add_timer(struct request *req)
177{
178 struct request_queue *q = req->q;
179 unsigned long expiry;
180
181 if (!q->rq_timed_out_fn)
182 return;
183
184 BUG_ON(!list_empty(&req->timeout_list));
185 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
186
187 if (req->timeout)
188 req->deadline = jiffies + req->timeout;
189 else {
190 req->deadline = jiffies + q->rq_timeout;
191 /*
192 * Some LLDs, like scsi, peek at the timeout to prevent
193 * a command from being retried forever.
194 */
195 req->timeout = q->rq_timeout;
196 }
197 list_add_tail(&req->timeout_list, &q->timeout_list);
198
199 /*
200 * If the timer isn't already pending or this timeout is earlier
201 * than an existing one, modify the timer. Round to next nearest
202 * second.
203 */
204 expiry = round_jiffies(req->deadline);
205
206 /*
207 * We use ->deadline == 0 to detect whether a timer was added or
208 * not, so just increase to next jiffy for that specific case
209 */
210 if (unlikely(!req->deadline))
211 req->deadline = 1;
212
213 if (!timer_pending(&q->timeout) ||
214 time_before(expiry, q->timeout.expires))
215 mod_timer(&q->timeout, expiry);
216}
217
218/**
219 * blk_abort_queue -- Abort all request on given queue
220 * @queue: pointer to queue
221 *
222 */
223void blk_abort_queue(struct request_queue *q)
224{
225 unsigned long flags;
226 struct request *rq, *tmp;
227
228 spin_lock_irqsave(q->queue_lock, flags);
229
230 elv_abort_queue(q);
231
232 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
233 blk_abort_request(rq);
234
235 spin_unlock_irqrestore(q->queue_lock, flags);
236
237}
238EXPORT_SYMBOL_GPL(blk_abort_queue);
diff --git a/block/blk.h b/block/blk.h
index c79f30e1df52..e5c579769963 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -17,6 +17,42 @@ void __blk_queue_free_tags(struct request_queue *q);
17 17
18void blk_unplug_work(struct work_struct *work); 18void blk_unplug_work(struct work_struct *work);
19void blk_unplug_timeout(unsigned long data); 19void blk_unplug_timeout(unsigned long data);
20void blk_rq_timed_out_timer(unsigned long data);
21void blk_delete_timer(struct request *);
22void blk_add_timer(struct request *);
23
24/*
25 * Internal atomic flags for request handling
26 */
27enum rq_atomic_flags {
28 REQ_ATOM_COMPLETE = 0,
29};
30
31/*
32 * EH timer and IO completion will both attempt to 'grab' the request, make
33 * sure that only one of them suceeds
34 */
35static inline int blk_mark_rq_complete(struct request *rq)
36{
37 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
38}
39
40static inline void blk_clear_rq_complete(struct request *rq)
41{
42 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
43}
44
45#ifdef CONFIG_FAIL_IO_TIMEOUT
46int blk_should_fake_timeout(struct request_queue *);
47ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
48ssize_t part_timeout_store(struct device *, struct device_attribute *,
49 const char *, size_t);
50#else
51static inline int blk_should_fake_timeout(struct request_queue *q)
52{
53 return 0;
54}
55#endif
20 56
21struct io_context *current_io_context(gfp_t gfp_flags, int node); 57struct io_context *current_io_context(gfp_t gfp_flags, int node);
22 58
@@ -59,4 +95,16 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
59 95
60#endif /* BLK_DEV_INTEGRITY */ 96#endif /* BLK_DEV_INTEGRITY */
61 97
98static inline int blk_cpu_to_group(int cpu)
99{
100#ifdef CONFIG_SCHED_MC
101 cpumask_t mask = cpu_coregroup_map(cpu);
102 return first_cpu(mask);
103#elif defined(CONFIG_SCHED_SMT)
104 return first_cpu(per_cpu(cpu_sibling_map, cpu));
105#else
106 return cpu;
107#endif
108}
109
62#endif 110#endif
diff --git a/block/blktrace.c b/block/blktrace.c
index eb9651ccb241..85049a7e7a17 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -111,23 +111,9 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
111 */ 111 */
112static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; 112static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };
113 113
114/* 114/* The ilog2() calls fall out because they're constant */
115 * Bio action bits of interest 115#define MASK_TC_BIT(rw, __name) ( (rw & (1 << BIO_RW_ ## __name)) << \
116 */ 116 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name) )
117static u32 bio_act[9] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD), 0, 0, 0, BLK_TC_ACT(BLK_TC_META) };
118
119/*
120 * More could be added as needed, taking care to increment the decrementer
121 * to get correct indexing
122 */
123#define trace_barrier_bit(rw) \
124 (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
125#define trace_sync_bit(rw) \
126 (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
127#define trace_ahead_bit(rw) \
128 (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
129#define trace_meta_bit(rw) \
130 (((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
131 117
132/* 118/*
133 * The worker for the various blk_add_trace*() types. Fills out a 119 * The worker for the various blk_add_trace*() types. Fills out a
@@ -147,10 +133,11 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
147 return; 133 return;
148 134
149 what |= ddir_act[rw & WRITE]; 135 what |= ddir_act[rw & WRITE];
150 what |= bio_act[trace_barrier_bit(rw)]; 136 what |= MASK_TC_BIT(rw, BARRIER);
151 what |= bio_act[trace_sync_bit(rw)]; 137 what |= MASK_TC_BIT(rw, SYNC);
152 what |= bio_act[trace_ahead_bit(rw)]; 138 what |= MASK_TC_BIT(rw, AHEAD);
153 what |= bio_act[trace_meta_bit(rw)]; 139 what |= MASK_TC_BIT(rw, META);
140 what |= MASK_TC_BIT(rw, DISCARD);
154 141
155 pid = tsk->pid; 142 pid = tsk->pid;
156 if (unlikely(act_log_check(bt, what, sector, pid))) 143 if (unlikely(act_log_check(bt, what, sector, pid)))
@@ -382,7 +369,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
382 if (!buts->buf_size || !buts->buf_nr) 369 if (!buts->buf_size || !buts->buf_nr)
383 return -EINVAL; 370 return -EINVAL;
384 371
385 strcpy(buts->name, name); 372 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
373 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
386 374
387 /* 375 /*
388 * some device names have larger paths - convert the slashes 376 * some device names have larger paths - convert the slashes
diff --git a/block/bsg.c b/block/bsg.c
index 0aae8d7ba99c..56cb343c76d8 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -283,7 +283,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
283 next_rq->cmd_type = rq->cmd_type; 283 next_rq->cmd_type = rq->cmd_type;
284 284
285 dxferp = (void*)(unsigned long)hdr->din_xferp; 285 dxferp = (void*)(unsigned long)hdr->din_xferp;
286 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len); 286 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
287 hdr->din_xfer_len, GFP_KERNEL);
287 if (ret) 288 if (ret)
288 goto out; 289 goto out;
289 } 290 }
@@ -298,7 +299,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
298 dxfer_len = 0; 299 dxfer_len = 0;
299 300
300 if (dxfer_len) { 301 if (dxfer_len) {
301 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); 302 ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
303 GFP_KERNEL);
302 if (ret) 304 if (ret)
303 goto out; 305 goto out;
304 } 306 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1e2aff812ee2..6a062eebbd15 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -39,6 +39,7 @@ static int cfq_slice_idle = HZ / 125;
39#define CFQ_MIN_TT (2) 39#define CFQ_MIN_TT (2)
40 40
41#define CFQ_SLICE_SCALE (5) 41#define CFQ_SLICE_SCALE (5)
42#define CFQ_HW_QUEUE_MIN (5)
42 43
43#define RQ_CIC(rq) \ 44#define RQ_CIC(rq) \
44 ((struct cfq_io_context *) (rq)->elevator_private) 45 ((struct cfq_io_context *) (rq)->elevator_private)
@@ -86,7 +87,14 @@ struct cfq_data {
86 87
87 int rq_in_driver; 88 int rq_in_driver;
88 int sync_flight; 89 int sync_flight;
90
91 /*
92 * queue-depth detection
93 */
94 int rq_queued;
89 int hw_tag; 95 int hw_tag;
96 int hw_tag_samples;
97 int rq_in_driver_peak;
90 98
91 /* 99 /*
92 * idle window management 100 * idle window management
@@ -244,7 +252,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
244{ 252{
245 if (cfqd->busy_queues) { 253 if (cfqd->busy_queues) {
246 cfq_log(cfqd, "schedule dispatch"); 254 cfq_log(cfqd, "schedule dispatch");
247 kblockd_schedule_work(&cfqd->unplug_work); 255 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
248 } 256 }
249} 257}
250 258
@@ -654,15 +662,6 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
654 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 662 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
655 cfqd->rq_in_driver); 663 cfqd->rq_in_driver);
656 664
657 /*
658 * If the depth is larger 1, it really could be queueing. But lets
659 * make the mark a little higher - idling could still be good for
660 * low queueing, and a low queueing number could also just indicate
661 * a SCSI mid layer like behaviour where limit+1 is often seen.
662 */
663 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
664 cfqd->hw_tag = 1;
665
666 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 665 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
667} 666}
668 667
@@ -686,6 +685,7 @@ static void cfq_remove_request(struct request *rq)
686 list_del_init(&rq->queuelist); 685 list_del_init(&rq->queuelist);
687 cfq_del_rq_rb(rq); 686 cfq_del_rq_rb(rq);
688 687
688 cfqq->cfqd->rq_queued--;
689 if (rq_is_meta(rq)) { 689 if (rq_is_meta(rq)) {
690 WARN_ON(!cfqq->meta_pending); 690 WARN_ON(!cfqq->meta_pending);
691 cfqq->meta_pending--; 691 cfqq->meta_pending--;
@@ -878,6 +878,14 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
878 struct cfq_io_context *cic; 878 struct cfq_io_context *cic;
879 unsigned long sl; 879 unsigned long sl;
880 880
881 /*
882 * SSD device without seek penalty, disable idling. But only do so
883 * for devices that support queuing, otherwise we still have a problem
884 * with sync vs async workloads.
885 */
886 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
887 return;
888
881 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 889 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
882 WARN_ON(cfq_cfqq_slice_new(cfqq)); 890 WARN_ON(cfq_cfqq_slice_new(cfqq));
883 891
@@ -1833,6 +1841,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1833{ 1841{
1834 struct cfq_io_context *cic = RQ_CIC(rq); 1842 struct cfq_io_context *cic = RQ_CIC(rq);
1835 1843
1844 cfqd->rq_queued++;
1836 if (rq_is_meta(rq)) 1845 if (rq_is_meta(rq))
1837 cfqq->meta_pending++; 1846 cfqq->meta_pending++;
1838 1847
@@ -1880,6 +1889,31 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
1880 cfq_rq_enqueued(cfqd, cfqq, rq); 1889 cfq_rq_enqueued(cfqd, cfqq, rq);
1881} 1890}
1882 1891
1892/*
1893 * Update hw_tag based on peak queue depth over 50 samples under
1894 * sufficient load.
1895 */
1896static void cfq_update_hw_tag(struct cfq_data *cfqd)
1897{
1898 if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
1899 cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
1900
1901 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
1902 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
1903 return;
1904
1905 if (cfqd->hw_tag_samples++ < 50)
1906 return;
1907
1908 if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
1909 cfqd->hw_tag = 1;
1910 else
1911 cfqd->hw_tag = 0;
1912
1913 cfqd->hw_tag_samples = 0;
1914 cfqd->rq_in_driver_peak = 0;
1915}
1916
1883static void cfq_completed_request(struct request_queue *q, struct request *rq) 1917static void cfq_completed_request(struct request_queue *q, struct request *rq)
1884{ 1918{
1885 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1919 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1890,6 +1924,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
1890 now = jiffies; 1924 now = jiffies;
1891 cfq_log_cfqq(cfqd, cfqq, "complete"); 1925 cfq_log_cfqq(cfqd, cfqq, "complete");
1892 1926
1927 cfq_update_hw_tag(cfqd);
1928
1893 WARN_ON(!cfqd->rq_in_driver); 1929 WARN_ON(!cfqd->rq_in_driver);
1894 WARN_ON(!cfqq->dispatched); 1930 WARN_ON(!cfqq->dispatched);
1895 cfqd->rq_in_driver--; 1931 cfqd->rq_in_driver--;
@@ -2200,6 +2236,7 @@ static void *cfq_init_queue(struct request_queue *q)
2200 cfqd->cfq_slice[1] = cfq_slice_sync; 2236 cfqd->cfq_slice[1] = cfq_slice_sync;
2201 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2237 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2202 cfqd->cfq_slice_idle = cfq_slice_idle; 2238 cfqd->cfq_slice_idle = cfq_slice_idle;
2239 cfqd->hw_tag = 1;
2203 2240
2204 return cfqd; 2241 return cfqd;
2205} 2242}
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
index 79c14996ac11..e669aed4c6bc 100644
--- a/block/cmd-filter.c
+++ b/block/cmd-filter.c
@@ -211,14 +211,10 @@ int blk_register_filter(struct gendisk *disk)
211{ 211{
212 int ret; 212 int ret;
213 struct blk_cmd_filter *filter = &disk->queue->cmd_filter; 213 struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
214 struct kobject *parent = kobject_get(disk->holder_dir->parent);
215 214
216 if (!parent) 215 ret = kobject_init_and_add(&filter->kobj, &rcf_ktype,
217 return -ENODEV; 216 &disk_to_dev(disk)->kobj,
218
219 ret = kobject_init_and_add(&filter->kobj, &rcf_ktype, parent,
220 "%s", "cmd_filter"); 217 "%s", "cmd_filter");
221
222 if (ret < 0) 218 if (ret < 0)
223 return ret; 219 return ret;
224 220
@@ -231,7 +227,6 @@ void blk_unregister_filter(struct gendisk *disk)
231 struct blk_cmd_filter *filter = &disk->queue->cmd_filter; 227 struct blk_cmd_filter *filter = &disk->queue->cmd_filter;
232 228
233 kobject_put(&filter->kobj); 229 kobject_put(&filter->kobj);
234 kobject_put(disk->holder_dir->parent);
235} 230}
236EXPORT_SYMBOL(blk_unregister_filter); 231EXPORT_SYMBOL(blk_unregister_filter);
237#endif 232#endif
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index c23177e4623f..1e559fba7bdf 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -788,6 +788,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
788 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); 788 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
789 case BLKFLSBUF: 789 case BLKFLSBUF:
790 case BLKROSET: 790 case BLKROSET:
791 case BLKDISCARD:
791 /* 792 /*
792 * the ones below are implemented in blkdev_locked_ioctl, 793 * the ones below are implemented in blkdev_locked_ioctl,
793 * but we call blkdev_ioctl, which gets the lock for us 794 * but we call blkdev_ioctl, which gets the lock for us
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 342448c3d2dd..fd311179f44c 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -33,7 +33,7 @@ struct deadline_data {
33 */ 33 */
34 struct rb_root sort_list[2]; 34 struct rb_root sort_list[2];
35 struct list_head fifo_list[2]; 35 struct list_head fifo_list[2];
36 36
37 /* 37 /*
38 * next in sort order. read, write or both are NULL 38 * next in sort order. read, write or both are NULL
39 */ 39 */
@@ -53,7 +53,11 @@ struct deadline_data {
53 53
54static void deadline_move_request(struct deadline_data *, struct request *); 54static void deadline_move_request(struct deadline_data *, struct request *);
55 55
56#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) 56static inline struct rb_root *
57deadline_rb_root(struct deadline_data *dd, struct request *rq)
58{
59 return &dd->sort_list[rq_data_dir(rq)];
60}
57 61
58/* 62/*
59 * get the request after `rq' in sector-sorted order 63 * get the request after `rq' in sector-sorted order
@@ -72,15 +76,11 @@ deadline_latter_request(struct request *rq)
72static void 76static void
73deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) 77deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
74{ 78{
75 struct rb_root *root = RQ_RB_ROOT(dd, rq); 79 struct rb_root *root = deadline_rb_root(dd, rq);
76 struct request *__alias; 80 struct request *__alias;
77 81
78retry: 82 while (unlikely(__alias = elv_rb_add(root, rq)))
79 __alias = elv_rb_add(root, rq);
80 if (unlikely(__alias)) {
81 deadline_move_request(dd, __alias); 83 deadline_move_request(dd, __alias);
82 goto retry;
83 }
84} 84}
85 85
86static inline void 86static inline void
@@ -91,7 +91,7 @@ deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
91 if (dd->next_rq[data_dir] == rq) 91 if (dd->next_rq[data_dir] == rq)
92 dd->next_rq[data_dir] = deadline_latter_request(rq); 92 dd->next_rq[data_dir] = deadline_latter_request(rq);
93 93
94 elv_rb_del(RQ_RB_ROOT(dd, rq), rq); 94 elv_rb_del(deadline_rb_root(dd, rq), rq);
95} 95}
96 96
97/* 97/*
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
106 deadline_add_rq_rb(dd, rq); 106 deadline_add_rq_rb(dd, rq);
107 107
108 /* 108 /*
109 * set expire time (only used for reads) and add to fifo list 109 * set expire time and add to fifo list
110 */ 110 */
111 rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]); 111 rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
112 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); 112 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
@@ -162,7 +162,7 @@ static void deadline_merged_request(struct request_queue *q,
162 * if the merge was a front merge, we need to reposition request 162 * if the merge was a front merge, we need to reposition request
163 */ 163 */
164 if (type == ELEVATOR_FRONT_MERGE) { 164 if (type == ELEVATOR_FRONT_MERGE) {
165 elv_rb_del(RQ_RB_ROOT(dd, req), req); 165 elv_rb_del(deadline_rb_root(dd, req), req);
166 deadline_add_rq_rb(dd, req); 166 deadline_add_rq_rb(dd, req);
167 } 167 }
168} 168}
@@ -212,7 +212,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
212 dd->next_rq[WRITE] = NULL; 212 dd->next_rq[WRITE] = NULL;
213 dd->next_rq[data_dir] = deadline_latter_request(rq); 213 dd->next_rq[data_dir] = deadline_latter_request(rq);
214 214
215 dd->last_sector = rq->sector + rq->nr_sectors; 215 dd->last_sector = rq_end_sector(rq);
216 216
217 /* 217 /*
218 * take it off the sort and fifo list, move 218 * take it off the sort and fifo list, move
@@ -222,7 +222,7 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
222} 222}
223 223
224/* 224/*
225 * deadline_check_fifo returns 0 if there are no expired reads on the fifo, 225 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
226 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) 226 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
227 */ 227 */
228static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) 228static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
@@ -258,17 +258,9 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
258 else 258 else
259 rq = dd->next_rq[READ]; 259 rq = dd->next_rq[READ];
260 260
261 if (rq) { 261 if (rq && dd->batching < dd->fifo_batch)
262 /* we have a "next request" */ 262 /* we have a next request are still entitled to batch */
263 263 goto dispatch_request;
264 if (dd->last_sector != rq->sector)
265 /* end the batch on a non sequential request */
266 dd->batching += dd->fifo_batch;
267
268 if (dd->batching < dd->fifo_batch)
269 /* we are still entitled to batch */
270 goto dispatch_request;
271 }
272 264
273 /* 265 /*
274 * at this point we are not running a batch. select the appropriate 266 * at this point we are not running a batch. select the appropriate
diff --git a/block/elevator.c b/block/elevator.c
index ed6f8f32d27e..04518921db31 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -34,8 +34,9 @@
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/blktrace_api.h> 35#include <linux/blktrace_api.h>
36#include <linux/hash.h> 36#include <linux/hash.h>
37#include <linux/uaccess.h>
37 38
38#include <asm/uaccess.h> 39#include "blk.h"
39 40
40static DEFINE_SPINLOCK(elv_list_lock); 41static DEFINE_SPINLOCK(elv_list_lock);
41static LIST_HEAD(elv_list); 42static LIST_HEAD(elv_list);
@@ -75,6 +76,12 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
75 return 0; 76 return 0;
76 77
77 /* 78 /*
79 * Don't merge file system requests and discard requests
80 */
81 if (bio_discard(bio) != bio_discard(rq->bio))
82 return 0;
83
84 /*
78 * different data direction or already started, don't merge 85 * different data direction or already started, don't merge
79 */ 86 */
80 if (bio_data_dir(bio) != rq_data_dir(rq)) 87 if (bio_data_dir(bio) != rq_data_dir(rq))
@@ -438,6 +445,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
438 list_for_each_prev(entry, &q->queue_head) { 445 list_for_each_prev(entry, &q->queue_head) {
439 struct request *pos = list_entry_rq(entry); 446 struct request *pos = list_entry_rq(entry);
440 447
448 if (blk_discard_rq(rq) != blk_discard_rq(pos))
449 break;
441 if (rq_data_dir(rq) != rq_data_dir(pos)) 450 if (rq_data_dir(rq) != rq_data_dir(pos))
442 break; 451 break;
443 if (pos->cmd_flags & stop_flags) 452 if (pos->cmd_flags & stop_flags)
@@ -607,7 +616,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
607 break; 616 break;
608 617
609 case ELEVATOR_INSERT_SORT: 618 case ELEVATOR_INSERT_SORT:
610 BUG_ON(!blk_fs_request(rq)); 619 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
611 rq->cmd_flags |= REQ_SORTED; 620 rq->cmd_flags |= REQ_SORTED;
612 q->nr_sorted++; 621 q->nr_sorted++;
613 if (rq_mergeable(rq)) { 622 if (rq_mergeable(rq)) {
@@ -692,7 +701,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
692 * this request is scheduling boundary, update 701 * this request is scheduling boundary, update
693 * end_sector 702 * end_sector
694 */ 703 */
695 if (blk_fs_request(rq)) { 704 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
696 q->end_sector = rq_end_sector(rq); 705 q->end_sector = rq_end_sector(rq);
697 q->boundary_rq = rq; 706 q->boundary_rq = rq;
698 } 707 }
@@ -745,7 +754,7 @@ struct request *elv_next_request(struct request_queue *q)
745 * not ever see it. 754 * not ever see it.
746 */ 755 */
747 if (blk_empty_barrier(rq)) { 756 if (blk_empty_barrier(rq)) {
748 end_queued_request(rq, 1); 757 __blk_end_request(rq, 0, blk_rq_bytes(rq));
749 continue; 758 continue;
750 } 759 }
751 if (!(rq->cmd_flags & REQ_STARTED)) { 760 if (!(rq->cmd_flags & REQ_STARTED)) {
@@ -764,6 +773,12 @@ struct request *elv_next_request(struct request_queue *q)
764 */ 773 */
765 rq->cmd_flags |= REQ_STARTED; 774 rq->cmd_flags |= REQ_STARTED;
766 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 775 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
776
777 /*
778 * We are now handing the request to the hardware,
779 * add the timeout handler
780 */
781 blk_add_timer(rq);
767 } 782 }
768 783
769 if (!q->boundary_rq || q->boundary_rq == rq) { 784 if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -782,7 +797,6 @@ struct request *elv_next_request(struct request_queue *q)
782 * device can handle 797 * device can handle
783 */ 798 */
784 rq->nr_phys_segments++; 799 rq->nr_phys_segments++;
785 rq->nr_hw_segments++;
786 } 800 }
787 801
788 if (!q->prep_rq_fn) 802 if (!q->prep_rq_fn)
@@ -805,14 +819,13 @@ struct request *elv_next_request(struct request_queue *q)
805 * so that we don't add it again 819 * so that we don't add it again
806 */ 820 */
807 --rq->nr_phys_segments; 821 --rq->nr_phys_segments;
808 --rq->nr_hw_segments;
809 } 822 }
810 823
811 rq = NULL; 824 rq = NULL;
812 break; 825 break;
813 } else if (ret == BLKPREP_KILL) { 826 } else if (ret == BLKPREP_KILL) {
814 rq->cmd_flags |= REQ_QUIET; 827 rq->cmd_flags |= REQ_QUIET;
815 end_queued_request(rq, 0); 828 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
816 } else { 829 } else {
817 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 830 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
818 break; 831 break;
@@ -901,6 +914,19 @@ int elv_may_queue(struct request_queue *q, int rw)
901 return ELV_MQUEUE_MAY; 914 return ELV_MQUEUE_MAY;
902} 915}
903 916
917void elv_abort_queue(struct request_queue *q)
918{
919 struct request *rq;
920
921 while (!list_empty(&q->queue_head)) {
922 rq = list_entry_rq(q->queue_head.next);
923 rq->cmd_flags |= REQ_QUIET;
924 blk_add_trace_rq(q, rq, BLK_TA_ABORT);
925 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
926 }
927}
928EXPORT_SYMBOL(elv_abort_queue);
929
904void elv_completed_request(struct request_queue *q, struct request *rq) 930void elv_completed_request(struct request_queue *q, struct request *rq)
905{ 931{
906 elevator_t *e = q->elevator; 932 elevator_t *e = q->elevator;
diff --git a/block/genhd.c b/block/genhd.c
index e0ce23ac2ece..4cd3433c99ac 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -16,6 +16,7 @@
16#include <linux/kobj_map.h> 16#include <linux/kobj_map.h>
17#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/idr.h>
19 20
20#include "blk.h" 21#include "blk.h"
21 22
@@ -24,8 +25,194 @@ static DEFINE_MUTEX(block_class_lock);
24struct kobject *block_depr; 25struct kobject *block_depr;
25#endif 26#endif
26 27
28/* for extended dynamic devt allocation, currently only one major is used */
29#define MAX_EXT_DEVT (1 << MINORBITS)
30
31/* For extended devt allocation. ext_devt_mutex prevents look up
32 * results from going away underneath its user.
33 */
34static DEFINE_MUTEX(ext_devt_mutex);
35static DEFINE_IDR(ext_devt_idr);
36
27static struct device_type disk_type; 37static struct device_type disk_type;
28 38
39/**
40 * disk_get_part - get partition
41 * @disk: disk to look partition from
42 * @partno: partition number
43 *
44 * Look for partition @partno from @disk. If found, increment
45 * reference count and return it.
46 *
47 * CONTEXT:
48 * Don't care.
49 *
50 * RETURNS:
51 * Pointer to the found partition on success, NULL if not found.
52 */
53struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
54{
55 struct hd_struct *part = NULL;
56 struct disk_part_tbl *ptbl;
57
58 if (unlikely(partno < 0))
59 return NULL;
60
61 rcu_read_lock();
62
63 ptbl = rcu_dereference(disk->part_tbl);
64 if (likely(partno < ptbl->len)) {
65 part = rcu_dereference(ptbl->part[partno]);
66 if (part)
67 get_device(part_to_dev(part));
68 }
69
70 rcu_read_unlock();
71
72 return part;
73}
74EXPORT_SYMBOL_GPL(disk_get_part);
75
76/**
77 * disk_part_iter_init - initialize partition iterator
78 * @piter: iterator to initialize
79 * @disk: disk to iterate over
80 * @flags: DISK_PITER_* flags
81 *
82 * Initialize @piter so that it iterates over partitions of @disk.
83 *
84 * CONTEXT:
85 * Don't care.
86 */
87void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
88 unsigned int flags)
89{
90 struct disk_part_tbl *ptbl;
91
92 rcu_read_lock();
93 ptbl = rcu_dereference(disk->part_tbl);
94
95 piter->disk = disk;
96 piter->part = NULL;
97
98 if (flags & DISK_PITER_REVERSE)
99 piter->idx = ptbl->len - 1;
100 else if (flags & DISK_PITER_INCL_PART0)
101 piter->idx = 0;
102 else
103 piter->idx = 1;
104
105 piter->flags = flags;
106
107 rcu_read_unlock();
108}
109EXPORT_SYMBOL_GPL(disk_part_iter_init);
110
111/**
112 * disk_part_iter_next - proceed iterator to the next partition and return it
113 * @piter: iterator of interest
114 *
115 * Proceed @piter to the next partition and return it.
116 *
117 * CONTEXT:
118 * Don't care.
119 */
120struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
121{
122 struct disk_part_tbl *ptbl;
123 int inc, end;
124
125 /* put the last partition */
126 disk_put_part(piter->part);
127 piter->part = NULL;
128
129 /* get part_tbl */
130 rcu_read_lock();
131 ptbl = rcu_dereference(piter->disk->part_tbl);
132
133 /* determine iteration parameters */
134 if (piter->flags & DISK_PITER_REVERSE) {
135 inc = -1;
136 if (piter->flags & DISK_PITER_INCL_PART0)
137 end = -1;
138 else
139 end = 0;
140 } else {
141 inc = 1;
142 end = ptbl->len;
143 }
144
145 /* iterate to the next partition */
146 for (; piter->idx != end; piter->idx += inc) {
147 struct hd_struct *part;
148
149 part = rcu_dereference(ptbl->part[piter->idx]);
150 if (!part)
151 continue;
152 if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects)
153 continue;
154
155 get_device(part_to_dev(part));
156 piter->part = part;
157 piter->idx += inc;
158 break;
159 }
160
161 rcu_read_unlock();
162
163 return piter->part;
164}
165EXPORT_SYMBOL_GPL(disk_part_iter_next);
166
167/**
168 * disk_part_iter_exit - finish up partition iteration
169 * @piter: iter of interest
170 *
171 * Called when iteration is over. Cleans up @piter.
172 *
173 * CONTEXT:
174 * Don't care.
175 */
176void disk_part_iter_exit(struct disk_part_iter *piter)
177{
178 disk_put_part(piter->part);
179 piter->part = NULL;
180}
181EXPORT_SYMBOL_GPL(disk_part_iter_exit);
182
183/**
184 * disk_map_sector_rcu - map sector to partition
185 * @disk: gendisk of interest
186 * @sector: sector to map
187 *
188 * Find out which partition @sector maps to on @disk. This is
189 * primarily used for stats accounting.
190 *
191 * CONTEXT:
192 * RCU read locked. The returned partition pointer is valid only
193 * while preemption is disabled.
194 *
195 * RETURNS:
196 * Found partition on success, part0 is returned if no partition matches
197 */
198struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
199{
200 struct disk_part_tbl *ptbl;
201 int i;
202
203 ptbl = rcu_dereference(disk->part_tbl);
204
205 for (i = 1; i < ptbl->len; i++) {
206 struct hd_struct *part = rcu_dereference(ptbl->part[i]);
207
208 if (part && part->start_sect <= sector &&
209 sector < part->start_sect + part->nr_sects)
210 return part;
211 }
212 return &disk->part0;
213}
214EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
215
29/* 216/*
30 * Can be deleted altogether. Later. 217 * Can be deleted altogether. Later.
31 * 218 *
@@ -43,14 +230,14 @@ static inline int major_to_index(int major)
43} 230}
44 231
45#ifdef CONFIG_PROC_FS 232#ifdef CONFIG_PROC_FS
46void blkdev_show(struct seq_file *f, off_t offset) 233void blkdev_show(struct seq_file *seqf, off_t offset)
47{ 234{
48 struct blk_major_name *dp; 235 struct blk_major_name *dp;
49 236
50 if (offset < BLKDEV_MAJOR_HASH_SIZE) { 237 if (offset < BLKDEV_MAJOR_HASH_SIZE) {
51 mutex_lock(&block_class_lock); 238 mutex_lock(&block_class_lock);
52 for (dp = major_names[offset]; dp; dp = dp->next) 239 for (dp = major_names[offset]; dp; dp = dp->next)
53 seq_printf(f, "%3d %s\n", dp->major, dp->name); 240 seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
54 mutex_unlock(&block_class_lock); 241 mutex_unlock(&block_class_lock);
55 } 242 }
56} 243}
@@ -136,6 +323,118 @@ EXPORT_SYMBOL(unregister_blkdev);
136 323
137static struct kobj_map *bdev_map; 324static struct kobj_map *bdev_map;
138 325
326/**
327 * blk_mangle_minor - scatter minor numbers apart
328 * @minor: minor number to mangle
329 *
330 * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
331 * is enabled. Mangling twice gives the original value.
332 *
333 * RETURNS:
334 * Mangled value.
335 *
336 * CONTEXT:
337 * Don't care.
338 */
339static int blk_mangle_minor(int minor)
340{
341#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
342 int i;
343
344 for (i = 0; i < MINORBITS / 2; i++) {
345 int low = minor & (1 << i);
346 int high = minor & (1 << (MINORBITS - 1 - i));
347 int distance = MINORBITS - 1 - 2 * i;
348
349 minor ^= low | high; /* clear both bits */
350 low <<= distance; /* swap the positions */
351 high >>= distance;
352 minor |= low | high; /* and set */
353 }
354#endif
355 return minor;
356}
357
358/**
359 * blk_alloc_devt - allocate a dev_t for a partition
360 * @part: partition to allocate dev_t for
361 * @gfp_mask: memory allocation flag
362 * @devt: out parameter for resulting dev_t
363 *
364 * Allocate a dev_t for block device.
365 *
366 * RETURNS:
367 * 0 on success, allocated dev_t is returned in *@devt. -errno on
368 * failure.
369 *
370 * CONTEXT:
371 * Might sleep.
372 */
373int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
374{
375 struct gendisk *disk = part_to_disk(part);
376 int idx, rc;
377
378 /* in consecutive minor range? */
379 if (part->partno < disk->minors) {
380 *devt = MKDEV(disk->major, disk->first_minor + part->partno);
381 return 0;
382 }
383
384 /* allocate ext devt */
385 do {
386 if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
387 return -ENOMEM;
388 rc = idr_get_new(&ext_devt_idr, part, &idx);
389 } while (rc == -EAGAIN);
390
391 if (rc)
392 return rc;
393
394 if (idx > MAX_EXT_DEVT) {
395 idr_remove(&ext_devt_idr, idx);
396 return -EBUSY;
397 }
398
399 *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
400 return 0;
401}
402
403/**
404 * blk_free_devt - free a dev_t
405 * @devt: dev_t to free
406 *
407 * Free @devt which was allocated using blk_alloc_devt().
408 *
409 * CONTEXT:
410 * Might sleep.
411 */
412void blk_free_devt(dev_t devt)
413{
414 might_sleep();
415
416 if (devt == MKDEV(0, 0))
417 return;
418
419 if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
420 mutex_lock(&ext_devt_mutex);
421 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
422 mutex_unlock(&ext_devt_mutex);
423 }
424}
425
426static char *bdevt_str(dev_t devt, char *buf)
427{
428 if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
429 char tbuf[BDEVT_SIZE];
430 snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
431 snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
432 } else
433 snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
434
435 return buf;
436}
437
139/* 438/*
140 * Register device numbers dev..(dev+range-1) 439 * Register device numbers dev..(dev+range-1)
141 * range must be nonzero 440 * range must be nonzero
@@ -157,11 +456,11 @@ void blk_unregister_region(dev_t devt, unsigned long range)
157 456
158EXPORT_SYMBOL(blk_unregister_region); 457EXPORT_SYMBOL(blk_unregister_region);
159 458
160static struct kobject *exact_match(dev_t devt, int *part, void *data) 459static struct kobject *exact_match(dev_t devt, int *partno, void *data)
161{ 460{
162 struct gendisk *p = data; 461 struct gendisk *p = data;
163 462
164 return &p->dev.kobj; 463 return &disk_to_dev(p)->kobj;
165} 464}
166 465
167static int exact_lock(dev_t devt, void *data) 466static int exact_lock(dev_t devt, void *data)
@@ -179,21 +478,46 @@ static int exact_lock(dev_t devt, void *data)
179 * 478 *
180 * This function registers the partitioning information in @disk 479 * This function registers the partitioning information in @disk
181 * with the kernel. 480 * with the kernel.
481 *
482 * FIXME: error handling
182 */ 483 */
183void add_disk(struct gendisk *disk) 484void add_disk(struct gendisk *disk)
184{ 485{
185 struct backing_dev_info *bdi; 486 struct backing_dev_info *bdi;
487 dev_t devt;
186 int retval; 488 int retval;
187 489
490 /* minors == 0 indicates to use ext devt from part0 and should
491 * be accompanied with EXT_DEVT flag. Make sure all
492 * parameters make sense.
493 */
494 WARN_ON(disk->minors && !(disk->major || disk->first_minor));
495 WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
496
188 disk->flags |= GENHD_FL_UP; 497 disk->flags |= GENHD_FL_UP;
189 blk_register_region(MKDEV(disk->major, disk->first_minor), 498
190 disk->minors, NULL, exact_match, exact_lock, disk); 499 retval = blk_alloc_devt(&disk->part0, &devt);
500 if (retval) {
501 WARN_ON(1);
502 return;
503 }
504 disk_to_dev(disk)->devt = devt;
505
506 /* ->major and ->first_minor aren't supposed to be
507 * dereferenced from here on, but set them just in case.
508 */
509 disk->major = MAJOR(devt);
510 disk->first_minor = MINOR(devt);
511
512 blk_register_region(disk_devt(disk), disk->minors, NULL,
513 exact_match, exact_lock, disk);
191 register_disk(disk); 514 register_disk(disk);
192 blk_register_queue(disk); 515 blk_register_queue(disk);
193 516
194 bdi = &disk->queue->backing_dev_info; 517 bdi = &disk->queue->backing_dev_info;
195 bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor)); 518 bdi_register_dev(bdi, disk_devt(disk));
196 retval = sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi"); 519 retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
520 "bdi");
197 WARN_ON(retval); 521 WARN_ON(retval);
198} 522}
199 523
@@ -202,78 +526,71 @@ EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
202 526
203void unlink_gendisk(struct gendisk *disk) 527void unlink_gendisk(struct gendisk *disk)
204{ 528{
205 sysfs_remove_link(&disk->dev.kobj, "bdi"); 529 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
206 bdi_unregister(&disk->queue->backing_dev_info); 530 bdi_unregister(&disk->queue->backing_dev_info);
207 blk_unregister_queue(disk); 531 blk_unregister_queue(disk);
208 blk_unregister_region(MKDEV(disk->major, disk->first_minor), 532 blk_unregister_region(disk_devt(disk), disk->minors);
209 disk->minors);
210} 533}
211 534
212/** 535/**
213 * get_gendisk - get partitioning information for a given device 536 * get_gendisk - get partitioning information for a given device
214 * @dev: device to get partitioning information for 537 * @devt: device to get partitioning information for
538 * @part: returned partition index
215 * 539 *
216 * This function gets the structure containing partitioning 540 * This function gets the structure containing partitioning
217 * information for the given device @dev. 541 * information for the given device @devt.
218 */ 542 */
219struct gendisk *get_gendisk(dev_t devt, int *part) 543struct gendisk *get_gendisk(dev_t devt, int *partno)
220{ 544{
221 struct kobject *kobj = kobj_lookup(bdev_map, devt, part); 545 struct gendisk *disk = NULL;
222 struct device *dev = kobj_to_dev(kobj); 546
547 if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
548 struct kobject *kobj;
549
550 kobj = kobj_lookup(bdev_map, devt, partno);
551 if (kobj)
552 disk = dev_to_disk(kobj_to_dev(kobj));
553 } else {
554 struct hd_struct *part;
223 555
224 return kobj ? dev_to_disk(dev) : NULL; 556 mutex_lock(&ext_devt_mutex);
557 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
558 if (part && get_disk(part_to_disk(part))) {
559 *partno = part->partno;
560 disk = part_to_disk(part);
561 }
562 mutex_unlock(&ext_devt_mutex);
563 }
564
565 return disk;
225} 566}
226 567
227/* 568/**
228 * print a partitions - intended for places where the root filesystem can't be 569 * bdget_disk - do bdget() by gendisk and partition number
229 * mounted and thus to give the victim some idea of what went wrong 570 * @disk: gendisk of interest
571 * @partno: partition number
572 *
573 * Find partition @partno from @disk, do bdget() on it.
574 *
575 * CONTEXT:
576 * Don't care.
577 *
578 * RETURNS:
579 * Resulting block_device on success, NULL on failure.
230 */ 580 */
231static int printk_partition(struct device *dev, void *data) 581struct block_device *bdget_disk(struct gendisk *disk, int partno)
232{ 582{
233 struct gendisk *sgp; 583 struct hd_struct *part;
234 char buf[BDEVNAME_SIZE]; 584 struct block_device *bdev = NULL;
235 int n;
236
237 if (dev->type != &disk_type)
238 goto exit;
239 585
240 sgp = dev_to_disk(dev); 586 part = disk_get_part(disk, partno);
241 /* 587 if (part)
242 * Don't show empty devices or things that have been surpressed 588 bdev = bdget(part_devt(part));
243 */ 589 disk_put_part(part);
244 if (get_capacity(sgp) == 0 ||
245 (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
246 goto exit;
247 590
248 /* 591 return bdev;
249 * Note, unlike /proc/partitions, I am showing the numbers in
250 * hex - the same format as the root= option takes.
251 */
252 printk("%02x%02x %10llu %s",
253 sgp->major, sgp->first_minor,
254 (unsigned long long)get_capacity(sgp) >> 1,
255 disk_name(sgp, 0, buf));
256 if (sgp->driverfs_dev != NULL &&
257 sgp->driverfs_dev->driver != NULL)
258 printk(" driver: %s\n",
259 sgp->driverfs_dev->driver->name);
260 else
261 printk(" (driver?)\n");
262
263 /* now show the partitions */
264 for (n = 0; n < sgp->minors - 1; ++n) {
265 if (sgp->part[n] == NULL)
266 goto exit;
267 if (sgp->part[n]->nr_sects == 0)
268 goto exit;
269 printk(" %02x%02x %10llu %s\n",
270 sgp->major, n + 1 + sgp->first_minor,
271 (unsigned long long)sgp->part[n]->nr_sects >> 1,
272 disk_name(sgp, n + 1, buf));
273 }
274exit:
275 return 0;
276} 592}
593EXPORT_SYMBOL(bdget_disk);
277 594
278/* 595/*
279 * print a full list of all partitions - intended for places where the root 596 * print a full list of all partitions - intended for places where the root
@@ -282,120 +599,145 @@ exit:
282 */ 599 */
283void __init printk_all_partitions(void) 600void __init printk_all_partitions(void)
284{ 601{
285 mutex_lock(&block_class_lock); 602 struct class_dev_iter iter;
286 class_for_each_device(&block_class, NULL, NULL, printk_partition); 603 struct device *dev;
287 mutex_unlock(&block_class_lock); 604
605 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
606 while ((dev = class_dev_iter_next(&iter))) {
607 struct gendisk *disk = dev_to_disk(dev);
608 struct disk_part_iter piter;
609 struct hd_struct *part;
610 char name_buf[BDEVNAME_SIZE];
611 char devt_buf[BDEVT_SIZE];
612
613 /*
614 * Don't show empty devices or things that have been
615 * surpressed
616 */
617 if (get_capacity(disk) == 0 ||
618 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
619 continue;
620
621 /*
622 * Note, unlike /proc/partitions, I am showing the
623 * numbers in hex - the same format as the root=
624 * option takes.
625 */
626 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
627 while ((part = disk_part_iter_next(&piter))) {
628 bool is_part0 = part == &disk->part0;
629
630 printk("%s%s %10llu %s", is_part0 ? "" : " ",
631 bdevt_str(part_devt(part), devt_buf),
632 (unsigned long long)part->nr_sects >> 1,
633 disk_name(disk, part->partno, name_buf));
634 if (is_part0) {
635 if (disk->driverfs_dev != NULL &&
636 disk->driverfs_dev->driver != NULL)
637 printk(" driver: %s\n",
638 disk->driverfs_dev->driver->name);
639 else
640 printk(" (driver?)\n");
641 } else
642 printk("\n");
643 }
644 disk_part_iter_exit(&piter);
645 }
646 class_dev_iter_exit(&iter);
288} 647}
289 648
290#ifdef CONFIG_PROC_FS 649#ifdef CONFIG_PROC_FS
291/* iterator */ 650/* iterator */
292static int find_start(struct device *dev, void *data) 651static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
293{ 652{
294 loff_t *k = data; 653 loff_t skip = *pos;
654 struct class_dev_iter *iter;
655 struct device *dev;
295 656
296 if (dev->type != &disk_type) 657 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
297 return 0; 658 if (!iter)
298 if (!*k) 659 return ERR_PTR(-ENOMEM);
299 return 1; 660
300 (*k)--; 661 seqf->private = iter;
301 return 0; 662 class_dev_iter_init(iter, &block_class, NULL, &disk_type);
663 do {
664 dev = class_dev_iter_next(iter);
665 if (!dev)
666 return NULL;
667 } while (skip--);
668
669 return dev_to_disk(dev);
302} 670}
303 671
304static void *part_start(struct seq_file *part, loff_t *pos) 672static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
305{ 673{
306 struct device *dev; 674 struct device *dev;
307 loff_t k = *pos;
308
309 if (!k)
310 part->private = (void *)1LU; /* tell show to print header */
311 675
312 mutex_lock(&block_class_lock); 676 (*pos)++;
313 dev = class_find_device(&block_class, NULL, &k, find_start); 677 dev = class_dev_iter_next(seqf->private);
314 if (dev) { 678 if (dev)
315 put_device(dev);
316 return dev_to_disk(dev); 679 return dev_to_disk(dev);
317 } 680
318 return NULL; 681 return NULL;
319} 682}
320 683
321static int find_next(struct device *dev, void *data) 684static void disk_seqf_stop(struct seq_file *seqf, void *v)
322{ 685{
323 if (dev->type == &disk_type) 686 struct class_dev_iter *iter = seqf->private;
324 return 1;
325 return 0;
326}
327 687
328static void *part_next(struct seq_file *part, void *v, loff_t *pos) 688 /* stop is called even after start failed :-( */
329{ 689 if (iter) {
330 struct gendisk *gp = v; 690 class_dev_iter_exit(iter);
331 struct device *dev; 691 kfree(iter);
332 ++*pos;
333 dev = class_find_device(&block_class, &gp->dev, NULL, find_next);
334 if (dev) {
335 put_device(dev);
336 return dev_to_disk(dev);
337 } 692 }
338 return NULL;
339} 693}
340 694
341static void part_stop(struct seq_file *part, void *v) 695static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
342{ 696{
343 mutex_unlock(&block_class_lock); 697 static void *p;
698
699 p = disk_seqf_start(seqf, pos);
700 if (!IS_ERR(p) && p && !*pos)
701 seq_puts(seqf, "major minor #blocks name\n\n");
702 return p;
344} 703}
345 704
346static int show_partition(struct seq_file *part, void *v) 705static int show_partition(struct seq_file *seqf, void *v)
347{ 706{
348 struct gendisk *sgp = v; 707 struct gendisk *sgp = v;
349 int n; 708 struct disk_part_iter piter;
709 struct hd_struct *part;
350 char buf[BDEVNAME_SIZE]; 710 char buf[BDEVNAME_SIZE];
351 711
352 /*
353 * Print header if start told us to do. This is to preserve
354 * the original behavior of not printing header if no
355 * partition exists. This hackery will be removed later with
356 * class iteration clean up.
357 */
358 if (part->private) {
359 seq_puts(part, "major minor #blocks name\n\n");
360 part->private = NULL;
361 }
362
363 /* Don't show non-partitionable removeable devices or empty devices */ 712 /* Don't show non-partitionable removeable devices or empty devices */
364 if (!get_capacity(sgp) || 713 if (!get_capacity(sgp) || (!disk_partitionable(sgp) &&
365 (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE))) 714 (sgp->flags & GENHD_FL_REMOVABLE)))
366 return 0; 715 return 0;
367 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO) 716 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
368 return 0; 717 return 0;
369 718
370 /* show the full disk and all non-0 size partitions of it */ 719 /* show the full disk and all non-0 size partitions of it */
371 seq_printf(part, "%4d %4d %10llu %s\n", 720 disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
372 sgp->major, sgp->first_minor, 721 while ((part = disk_part_iter_next(&piter)))
373 (unsigned long long)get_capacity(sgp) >> 1, 722 seq_printf(seqf, "%4d %7d %10llu %s\n",
374 disk_name(sgp, 0, buf)); 723 MAJOR(part_devt(part)), MINOR(part_devt(part)),
375 for (n = 0; n < sgp->minors - 1; n++) { 724 (unsigned long long)part->nr_sects >> 1,
376 if (!sgp->part[n]) 725 disk_name(sgp, part->partno, buf));
377 continue; 726 disk_part_iter_exit(&piter);
378 if (sgp->part[n]->nr_sects == 0)
379 continue;
380 seq_printf(part, "%4d %4d %10llu %s\n",
381 sgp->major, n + 1 + sgp->first_minor,
382 (unsigned long long)sgp->part[n]->nr_sects >> 1 ,
383 disk_name(sgp, n + 1, buf));
384 }
385 727
386 return 0; 728 return 0;
387} 729}
388 730
389const struct seq_operations partitions_op = { 731const struct seq_operations partitions_op = {
390 .start = part_start, 732 .start = show_partition_start,
391 .next = part_next, 733 .next = disk_seqf_next,
392 .stop = part_stop, 734 .stop = disk_seqf_stop,
393 .show = show_partition 735 .show = show_partition
394}; 736};
395#endif 737#endif
396 738
397 739
398static struct kobject *base_probe(dev_t devt, int *part, void *data) 740static struct kobject *base_probe(dev_t devt, int *partno, void *data)
399{ 741{
400 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) 742 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
401 /* Make old-style 2.4 aliases work */ 743 /* Make old-style 2.4 aliases work */
@@ -431,29 +773,29 @@ static ssize_t disk_range_show(struct device *dev,
431 return sprintf(buf, "%d\n", disk->minors); 773 return sprintf(buf, "%d\n", disk->minors);
432} 774}
433 775
434static ssize_t disk_removable_show(struct device *dev, 776static ssize_t disk_ext_range_show(struct device *dev,
435 struct device_attribute *attr, char *buf) 777 struct device_attribute *attr, char *buf)
436{ 778{
437 struct gendisk *disk = dev_to_disk(dev); 779 struct gendisk *disk = dev_to_disk(dev);
438 780
439 return sprintf(buf, "%d\n", 781 return sprintf(buf, "%d\n", disk_max_parts(disk));
440 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
441} 782}
442 783
443static ssize_t disk_ro_show(struct device *dev, 784static ssize_t disk_removable_show(struct device *dev,
444 struct device_attribute *attr, char *buf) 785 struct device_attribute *attr, char *buf)
445{ 786{
446 struct gendisk *disk = dev_to_disk(dev); 787 struct gendisk *disk = dev_to_disk(dev);
447 788
448 return sprintf(buf, "%d\n", disk->policy ? 1 : 0); 789 return sprintf(buf, "%d\n",
790 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
449} 791}
450 792
451static ssize_t disk_size_show(struct device *dev, 793static ssize_t disk_ro_show(struct device *dev,
452 struct device_attribute *attr, char *buf) 794 struct device_attribute *attr, char *buf)
453{ 795{
454 struct gendisk *disk = dev_to_disk(dev); 796 struct gendisk *disk = dev_to_disk(dev);
455 797
456 return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk)); 798 return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
457} 799}
458 800
459static ssize_t disk_capability_show(struct device *dev, 801static ssize_t disk_capability_show(struct device *dev,
@@ -464,73 +806,26 @@ static ssize_t disk_capability_show(struct device *dev,
464 return sprintf(buf, "%x\n", disk->flags); 806 return sprintf(buf, "%x\n", disk->flags);
465} 807}
466 808
467static ssize_t disk_stat_show(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct gendisk *disk = dev_to_disk(dev);
471
472 preempt_disable();
473 disk_round_stats(disk);
474 preempt_enable();
475 return sprintf(buf,
476 "%8lu %8lu %8llu %8u "
477 "%8lu %8lu %8llu %8u "
478 "%8u %8u %8u"
479 "\n",
480 disk_stat_read(disk, ios[READ]),
481 disk_stat_read(disk, merges[READ]),
482 (unsigned long long)disk_stat_read(disk, sectors[READ]),
483 jiffies_to_msecs(disk_stat_read(disk, ticks[READ])),
484 disk_stat_read(disk, ios[WRITE]),
485 disk_stat_read(disk, merges[WRITE]),
486 (unsigned long long)disk_stat_read(disk, sectors[WRITE]),
487 jiffies_to_msecs(disk_stat_read(disk, ticks[WRITE])),
488 disk->in_flight,
489 jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
490 jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
491}
492
493#ifdef CONFIG_FAIL_MAKE_REQUEST
494static ssize_t disk_fail_show(struct device *dev,
495 struct device_attribute *attr, char *buf)
496{
497 struct gendisk *disk = dev_to_disk(dev);
498
499 return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
500}
501
502static ssize_t disk_fail_store(struct device *dev,
503 struct device_attribute *attr,
504 const char *buf, size_t count)
505{
506 struct gendisk *disk = dev_to_disk(dev);
507 int i;
508
509 if (count > 0 && sscanf(buf, "%d", &i) > 0) {
510 if (i == 0)
511 disk->flags &= ~GENHD_FL_FAIL;
512 else
513 disk->flags |= GENHD_FL_FAIL;
514 }
515
516 return count;
517}
518
519#endif
520
521static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 809static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
810static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
522static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); 811static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
523static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); 812static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
524static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL); 813static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
525static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 814static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
526static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL); 815static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
527#ifdef CONFIG_FAIL_MAKE_REQUEST 816#ifdef CONFIG_FAIL_MAKE_REQUEST
528static struct device_attribute dev_attr_fail = 817static struct device_attribute dev_attr_fail =
529 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store); 818 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
819#endif
820#ifdef CONFIG_FAIL_IO_TIMEOUT
821static struct device_attribute dev_attr_fail_timeout =
822 __ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show,
823 part_timeout_store);
530#endif 824#endif
531 825
532static struct attribute *disk_attrs[] = { 826static struct attribute *disk_attrs[] = {
533 &dev_attr_range.attr, 827 &dev_attr_range.attr,
828 &dev_attr_ext_range.attr,
534 &dev_attr_removable.attr, 829 &dev_attr_removable.attr,
535 &dev_attr_ro.attr, 830 &dev_attr_ro.attr,
536 &dev_attr_size.attr, 831 &dev_attr_size.attr,
@@ -539,6 +834,9 @@ static struct attribute *disk_attrs[] = {
539#ifdef CONFIG_FAIL_MAKE_REQUEST 834#ifdef CONFIG_FAIL_MAKE_REQUEST
540 &dev_attr_fail.attr, 835 &dev_attr_fail.attr,
541#endif 836#endif
837#ifdef CONFIG_FAIL_IO_TIMEOUT
838 &dev_attr_fail_timeout.attr,
839#endif
542 NULL 840 NULL
543}; 841};
544 842
@@ -551,13 +849,87 @@ static struct attribute_group *disk_attr_groups[] = {
551 NULL 849 NULL
552}; 850};
553 851
852static void disk_free_ptbl_rcu_cb(struct rcu_head *head)
853{
854 struct disk_part_tbl *ptbl =
855 container_of(head, struct disk_part_tbl, rcu_head);
856
857 kfree(ptbl);
858}
859
860/**
861 * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
862 * @disk: disk to replace part_tbl for
863 * @new_ptbl: new part_tbl to install
864 *
865 * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The
866 * original ptbl is freed using RCU callback.
867 *
868 * LOCKING:
869 * Matching bd_mutx locked.
870 */
871static void disk_replace_part_tbl(struct gendisk *disk,
872 struct disk_part_tbl *new_ptbl)
873{
874 struct disk_part_tbl *old_ptbl = disk->part_tbl;
875
876 rcu_assign_pointer(disk->part_tbl, new_ptbl);
877 if (old_ptbl)
878 call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
879}
880
881/**
882 * disk_expand_part_tbl - expand disk->part_tbl
883 * @disk: disk to expand part_tbl for
884 * @partno: expand such that this partno can fit in
885 *
886 * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl
887 * uses RCU to allow unlocked dereferencing for stats and other stuff.
888 *
889 * LOCKING:
890 * Matching bd_mutex locked, might sleep.
891 *
892 * RETURNS:
893 * 0 on success, -errno on failure.
894 */
895int disk_expand_part_tbl(struct gendisk *disk, int partno)
896{
897 struct disk_part_tbl *old_ptbl = disk->part_tbl;
898 struct disk_part_tbl *new_ptbl;
899 int len = old_ptbl ? old_ptbl->len : 0;
900 int target = partno + 1;
901 size_t size;
902 int i;
903
904 /* disk_max_parts() is zero during initialization, ignore if so */
905 if (disk_max_parts(disk) && target > disk_max_parts(disk))
906 return -EINVAL;
907
908 if (target <= len)
909 return 0;
910
911 size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
912 new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
913 if (!new_ptbl)
914 return -ENOMEM;
915
916 INIT_RCU_HEAD(&new_ptbl->rcu_head);
917 new_ptbl->len = target;
918
919 for (i = 0; i < len; i++)
920 rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
921
922 disk_replace_part_tbl(disk, new_ptbl);
923 return 0;
924}
925
554static void disk_release(struct device *dev) 926static void disk_release(struct device *dev)
555{ 927{
556 struct gendisk *disk = dev_to_disk(dev); 928 struct gendisk *disk = dev_to_disk(dev);
557 929
558 kfree(disk->random); 930 kfree(disk->random);
559 kfree(disk->part); 931 disk_replace_part_tbl(disk, NULL);
560 free_disk_stats(disk); 932 free_part_stats(&disk->part0);
561 kfree(disk); 933 kfree(disk);
562} 934}
563struct class block_class = { 935struct class block_class = {
@@ -578,83 +950,31 @@ static struct device_type disk_type = {
578 * The output looks suspiciously like /proc/partitions with a bunch of 950 * The output looks suspiciously like /proc/partitions with a bunch of
579 * extra fields. 951 * extra fields.
580 */ 952 */
581 953static int diskstats_show(struct seq_file *seqf, void *v)
582static void *diskstats_start(struct seq_file *part, loff_t *pos)
583{
584 struct device *dev;
585 loff_t k = *pos;
586
587 mutex_lock(&block_class_lock);
588 dev = class_find_device(&block_class, NULL, &k, find_start);
589 if (dev) {
590 put_device(dev);
591 return dev_to_disk(dev);
592 }
593 return NULL;
594}
595
596static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
597{
598 struct gendisk *gp = v;
599 struct device *dev;
600
601 ++*pos;
602 dev = class_find_device(&block_class, &gp->dev, NULL, find_next);
603 if (dev) {
604 put_device(dev);
605 return dev_to_disk(dev);
606 }
607 return NULL;
608}
609
610static void diskstats_stop(struct seq_file *part, void *v)
611{
612 mutex_unlock(&block_class_lock);
613}
614
615static int diskstats_show(struct seq_file *s, void *v)
616{ 954{
617 struct gendisk *gp = v; 955 struct gendisk *gp = v;
956 struct disk_part_iter piter;
957 struct hd_struct *hd;
618 char buf[BDEVNAME_SIZE]; 958 char buf[BDEVNAME_SIZE];
619 int n = 0; 959 int cpu;
620 960
621 /* 961 /*
622 if (&gp->dev.kobj.entry == block_class.devices.next) 962 if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
623 seq_puts(s, "major minor name" 963 seq_puts(seqf, "major minor name"
624 " rio rmerge rsect ruse wio wmerge " 964 " rio rmerge rsect ruse wio wmerge "
625 "wsect wuse running use aveq" 965 "wsect wuse running use aveq"
626 "\n\n"); 966 "\n\n");
627 */ 967 */
628 968
629 preempt_disable(); 969 disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0);
630 disk_round_stats(gp); 970 while ((hd = disk_part_iter_next(&piter))) {
631 preempt_enable(); 971 cpu = part_stat_lock();
632 seq_printf(s, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n", 972 part_round_stats(cpu, hd);
633 gp->major, n + gp->first_minor, disk_name(gp, n, buf), 973 part_stat_unlock();
634 disk_stat_read(gp, ios[0]), disk_stat_read(gp, merges[0]), 974 seq_printf(seqf, "%4d %7d %s %lu %lu %llu "
635 (unsigned long long)disk_stat_read(gp, sectors[0]),
636 jiffies_to_msecs(disk_stat_read(gp, ticks[0])),
637 disk_stat_read(gp, ios[1]), disk_stat_read(gp, merges[1]),
638 (unsigned long long)disk_stat_read(gp, sectors[1]),
639 jiffies_to_msecs(disk_stat_read(gp, ticks[1])),
640 gp->in_flight,
641 jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
642 jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
643
644 /* now show all non-0 size partitions of it */
645 for (n = 0; n < gp->minors - 1; n++) {
646 struct hd_struct *hd = gp->part[n];
647
648 if (!hd || !hd->nr_sects)
649 continue;
650
651 preempt_disable();
652 part_round_stats(hd);
653 preempt_enable();
654 seq_printf(s, "%4d %4d %s %lu %lu %llu "
655 "%u %lu %lu %llu %u %u %u %u\n", 975 "%u %lu %lu %llu %u %u %u %u\n",
656 gp->major, n + gp->first_minor + 1, 976 MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
657 disk_name(gp, n + 1, buf), 977 disk_name(gp, hd->partno, buf),
658 part_stat_read(hd, ios[0]), 978 part_stat_read(hd, ios[0]),
659 part_stat_read(hd, merges[0]), 979 part_stat_read(hd, merges[0]),
660 (unsigned long long)part_stat_read(hd, sectors[0]), 980 (unsigned long long)part_stat_read(hd, sectors[0]),
@@ -668,14 +988,15 @@ static int diskstats_show(struct seq_file *s, void *v)
668 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 988 jiffies_to_msecs(part_stat_read(hd, time_in_queue))
669 ); 989 );
670 } 990 }
991 disk_part_iter_exit(&piter);
671 992
672 return 0; 993 return 0;
673} 994}
674 995
675const struct seq_operations diskstats_op = { 996const struct seq_operations diskstats_op = {
676 .start = diskstats_start, 997 .start = disk_seqf_start,
677 .next = diskstats_next, 998 .next = disk_seqf_next,
678 .stop = diskstats_stop, 999 .stop = disk_seqf_stop,
679 .show = diskstats_show 1000 .show = diskstats_show
680}; 1001};
681#endif /* CONFIG_PROC_FS */ 1002#endif /* CONFIG_PROC_FS */
@@ -690,7 +1011,7 @@ static void media_change_notify_thread(struct work_struct *work)
690 * set enviroment vars to indicate which event this is for 1011 * set enviroment vars to indicate which event this is for
691 * so that user space will know to go check the media status. 1012 * so that user space will know to go check the media status.
692 */ 1013 */
693 kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp); 1014 kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
694 put_device(gd->driverfs_dev); 1015 put_device(gd->driverfs_dev);
695} 1016}
696 1017
@@ -703,42 +1024,29 @@ void genhd_media_change_notify(struct gendisk *disk)
703EXPORT_SYMBOL_GPL(genhd_media_change_notify); 1024EXPORT_SYMBOL_GPL(genhd_media_change_notify);
704#endif /* 0 */ 1025#endif /* 0 */
705 1026
706struct find_block { 1027dev_t blk_lookup_devt(const char *name, int partno)
707 const char *name;
708 int part;
709};
710
711static int match_id(struct device *dev, void *data)
712{ 1028{
713 struct find_block *find = data; 1029 dev_t devt = MKDEV(0, 0);
1030 struct class_dev_iter iter;
1031 struct device *dev;
714 1032
715 if (dev->type != &disk_type) 1033 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
716 return 0; 1034 while ((dev = class_dev_iter_next(&iter))) {
717 if (strcmp(dev->bus_id, find->name) == 0) {
718 struct gendisk *disk = dev_to_disk(dev); 1035 struct gendisk *disk = dev_to_disk(dev);
719 if (find->part < disk->minors) 1036 struct hd_struct *part;
720 return 1;
721 }
722 return 0;
723}
724 1037
725dev_t blk_lookup_devt(const char *name, int part) 1038 if (strcmp(dev->bus_id, name))
726{ 1039 continue;
727 struct device *dev;
728 dev_t devt = MKDEV(0, 0);
729 struct find_block find;
730 1040
731 mutex_lock(&block_class_lock); 1041 part = disk_get_part(disk, partno);
732 find.name = name; 1042 if (part) {
733 find.part = part; 1043 devt = part_devt(part);
734 dev = class_find_device(&block_class, NULL, &find, match_id); 1044 disk_put_part(part);
735 if (dev) { 1045 break;
736 put_device(dev); 1046 }
737 devt = MKDEV(MAJOR(dev->devt), 1047 disk_put_part(part);
738 MINOR(dev->devt) + part);
739 } 1048 }
740 mutex_unlock(&block_class_lock); 1049 class_dev_iter_exit(&iter);
741
742 return devt; 1050 return devt;
743} 1051}
744EXPORT_SYMBOL(blk_lookup_devt); 1052EXPORT_SYMBOL(blk_lookup_devt);
@@ -747,6 +1055,7 @@ struct gendisk *alloc_disk(int minors)
747{ 1055{
748 return alloc_disk_node(minors, -1); 1056 return alloc_disk_node(minors, -1);
749} 1057}
1058EXPORT_SYMBOL(alloc_disk);
750 1059
751struct gendisk *alloc_disk_node(int minors, int node_id) 1060struct gendisk *alloc_disk_node(int minors, int node_id)
752{ 1061{
@@ -755,32 +1064,28 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
755 disk = kmalloc_node(sizeof(struct gendisk), 1064 disk = kmalloc_node(sizeof(struct gendisk),
756 GFP_KERNEL | __GFP_ZERO, node_id); 1065 GFP_KERNEL | __GFP_ZERO, node_id);
757 if (disk) { 1066 if (disk) {
758 if (!init_disk_stats(disk)) { 1067 if (!init_part_stats(&disk->part0)) {
759 kfree(disk); 1068 kfree(disk);
760 return NULL; 1069 return NULL;
761 } 1070 }
762 if (minors > 1) { 1071 if (disk_expand_part_tbl(disk, 0)) {
763 int size = (minors - 1) * sizeof(struct hd_struct *); 1072 free_part_stats(&disk->part0);
764 disk->part = kmalloc_node(size, 1073 kfree(disk);
765 GFP_KERNEL | __GFP_ZERO, node_id); 1074 return NULL;
766 if (!disk->part) {
767 free_disk_stats(disk);
768 kfree(disk);
769 return NULL;
770 }
771 } 1075 }
1076 disk->part_tbl->part[0] = &disk->part0;
1077
772 disk->minors = minors; 1078 disk->minors = minors;
773 rand_initialize_disk(disk); 1079 rand_initialize_disk(disk);
774 disk->dev.class = &block_class; 1080 disk_to_dev(disk)->class = &block_class;
775 disk->dev.type = &disk_type; 1081 disk_to_dev(disk)->type = &disk_type;
776 device_initialize(&disk->dev); 1082 device_initialize(disk_to_dev(disk));
777 INIT_WORK(&disk->async_notify, 1083 INIT_WORK(&disk->async_notify,
778 media_change_notify_thread); 1084 media_change_notify_thread);
1085 disk->node_id = node_id;
779 } 1086 }
780 return disk; 1087 return disk;
781} 1088}
782
783EXPORT_SYMBOL(alloc_disk);
784EXPORT_SYMBOL(alloc_disk_node); 1089EXPORT_SYMBOL(alloc_disk_node);
785 1090
786struct kobject *get_disk(struct gendisk *disk) 1091struct kobject *get_disk(struct gendisk *disk)
@@ -793,7 +1098,7 @@ struct kobject *get_disk(struct gendisk *disk)
793 owner = disk->fops->owner; 1098 owner = disk->fops->owner;
794 if (owner && !try_module_get(owner)) 1099 if (owner && !try_module_get(owner))
795 return NULL; 1100 return NULL;
796 kobj = kobject_get(&disk->dev.kobj); 1101 kobj = kobject_get(&disk_to_dev(disk)->kobj);
797 if (kobj == NULL) { 1102 if (kobj == NULL) {
798 module_put(owner); 1103 module_put(owner);
799 return NULL; 1104 return NULL;
@@ -807,27 +1112,28 @@ EXPORT_SYMBOL(get_disk);
807void put_disk(struct gendisk *disk) 1112void put_disk(struct gendisk *disk)
808{ 1113{
809 if (disk) 1114 if (disk)
810 kobject_put(&disk->dev.kobj); 1115 kobject_put(&disk_to_dev(disk)->kobj);
811} 1116}
812 1117
813EXPORT_SYMBOL(put_disk); 1118EXPORT_SYMBOL(put_disk);
814 1119
815void set_device_ro(struct block_device *bdev, int flag) 1120void set_device_ro(struct block_device *bdev, int flag)
816{ 1121{
817 if (bdev->bd_contains != bdev) 1122 bdev->bd_part->policy = flag;
818 bdev->bd_part->policy = flag;
819 else
820 bdev->bd_disk->policy = flag;
821} 1123}
822 1124
823EXPORT_SYMBOL(set_device_ro); 1125EXPORT_SYMBOL(set_device_ro);
824 1126
825void set_disk_ro(struct gendisk *disk, int flag) 1127void set_disk_ro(struct gendisk *disk, int flag)
826{ 1128{
827 int i; 1129 struct disk_part_iter piter;
828 disk->policy = flag; 1130 struct hd_struct *part;
829 for (i = 0; i < disk->minors - 1; i++) 1131
830 if (disk->part[i]) disk->part[i]->policy = flag; 1132 disk_part_iter_init(&piter, disk,
1133 DISK_PITER_INCL_EMPTY | DISK_PITER_INCL_PART0);
1134 while ((part = disk_part_iter_next(&piter)))
1135 part->policy = flag;
1136 disk_part_iter_exit(&piter);
831} 1137}
832 1138
833EXPORT_SYMBOL(set_disk_ro); 1139EXPORT_SYMBOL(set_disk_ro);
@@ -836,18 +1142,15 @@ int bdev_read_only(struct block_device *bdev)
836{ 1142{
837 if (!bdev) 1143 if (!bdev)
838 return 0; 1144 return 0;
839 else if (bdev->bd_contains != bdev) 1145 return bdev->bd_part->policy;
840 return bdev->bd_part->policy;
841 else
842 return bdev->bd_disk->policy;
843} 1146}
844 1147
845EXPORT_SYMBOL(bdev_read_only); 1148EXPORT_SYMBOL(bdev_read_only);
846 1149
847int invalidate_partition(struct gendisk *disk, int index) 1150int invalidate_partition(struct gendisk *disk, int partno)
848{ 1151{
849 int res = 0; 1152 int res = 0;
850 struct block_device *bdev = bdget_disk(disk, index); 1153 struct block_device *bdev = bdget_disk(disk, partno);
851 if (bdev) { 1154 if (bdev) {
852 fsync_bdev(bdev); 1155 fsync_bdev(bdev);
853 res = __invalidate_device(bdev); 1156 res = __invalidate_device(bdev);
diff --git a/block/ioctl.c b/block/ioctl.c
index 77185e5c026a..38bee321e1fa 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -12,11 +12,12 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
12{ 12{
13 struct block_device *bdevp; 13 struct block_device *bdevp;
14 struct gendisk *disk; 14 struct gendisk *disk;
15 struct hd_struct *part;
15 struct blkpg_ioctl_arg a; 16 struct blkpg_ioctl_arg a;
16 struct blkpg_partition p; 17 struct blkpg_partition p;
18 struct disk_part_iter piter;
17 long long start, length; 19 long long start, length;
18 int part; 20 int partno;
19 int i;
20 int err; 21 int err;
21 22
22 if (!capable(CAP_SYS_ADMIN)) 23 if (!capable(CAP_SYS_ADMIN))
@@ -28,8 +29,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
28 disk = bdev->bd_disk; 29 disk = bdev->bd_disk;
29 if (bdev != bdev->bd_contains) 30 if (bdev != bdev->bd_contains)
30 return -EINVAL; 31 return -EINVAL;
31 part = p.pno; 32 partno = p.pno;
32 if (part <= 0 || part >= disk->minors) 33 if (partno <= 0)
33 return -EINVAL; 34 return -EINVAL;
34 switch (a.op) { 35 switch (a.op) {
35 case BLKPG_ADD_PARTITION: 36 case BLKPG_ADD_PARTITION:
@@ -43,36 +44,37 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
43 || pstart < 0 || plength < 0) 44 || pstart < 0 || plength < 0)
44 return -EINVAL; 45 return -EINVAL;
45 } 46 }
46 /* partition number in use? */ 47
47 mutex_lock(&bdev->bd_mutex); 48 mutex_lock(&bdev->bd_mutex);
48 if (disk->part[part - 1]) {
49 mutex_unlock(&bdev->bd_mutex);
50 return -EBUSY;
51 }
52 /* overlap? */
53 for (i = 0; i < disk->minors - 1; i++) {
54 struct hd_struct *s = disk->part[i];
55 49
56 if (!s) 50 /* overlap? */
57 continue; 51 disk_part_iter_init(&piter, disk,
58 if (!(start+length <= s->start_sect || 52 DISK_PITER_INCL_EMPTY);
59 start >= s->start_sect + s->nr_sects)) { 53 while ((part = disk_part_iter_next(&piter))) {
54 if (!(start + length <= part->start_sect ||
55 start >= part->start_sect + part->nr_sects)) {
56 disk_part_iter_exit(&piter);
60 mutex_unlock(&bdev->bd_mutex); 57 mutex_unlock(&bdev->bd_mutex);
61 return -EBUSY; 58 return -EBUSY;
62 } 59 }
63 } 60 }
61 disk_part_iter_exit(&piter);
62
64 /* all seems OK */ 63 /* all seems OK */
65 err = add_partition(disk, part, start, length, ADDPART_FLAG_NONE); 64 err = add_partition(disk, partno, start, length,
65 ADDPART_FLAG_NONE);
66 mutex_unlock(&bdev->bd_mutex); 66 mutex_unlock(&bdev->bd_mutex);
67 return err; 67 return err;
68 case BLKPG_DEL_PARTITION: 68 case BLKPG_DEL_PARTITION:
69 if (!disk->part[part-1]) 69 part = disk_get_part(disk, partno);
70 return -ENXIO; 70 if (!part)
71 if (disk->part[part - 1]->nr_sects == 0)
72 return -ENXIO; 71 return -ENXIO;
73 bdevp = bdget_disk(disk, part); 72
73 bdevp = bdget(part_devt(part));
74 disk_put_part(part);
74 if (!bdevp) 75 if (!bdevp)
75 return -ENOMEM; 76 return -ENOMEM;
77
76 mutex_lock(&bdevp->bd_mutex); 78 mutex_lock(&bdevp->bd_mutex);
77 if (bdevp->bd_openers) { 79 if (bdevp->bd_openers) {
78 mutex_unlock(&bdevp->bd_mutex); 80 mutex_unlock(&bdevp->bd_mutex);
@@ -84,7 +86,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
84 invalidate_bdev(bdevp); 86 invalidate_bdev(bdevp);
85 87
86 mutex_lock_nested(&bdev->bd_mutex, 1); 88 mutex_lock_nested(&bdev->bd_mutex, 1);
87 delete_partition(disk, part); 89 delete_partition(disk, partno);
88 mutex_unlock(&bdev->bd_mutex); 90 mutex_unlock(&bdev->bd_mutex);
89 mutex_unlock(&bdevp->bd_mutex); 91 mutex_unlock(&bdevp->bd_mutex);
90 bdput(bdevp); 92 bdput(bdevp);
@@ -100,7 +102,7 @@ static int blkdev_reread_part(struct block_device *bdev)
100 struct gendisk *disk = bdev->bd_disk; 102 struct gendisk *disk = bdev->bd_disk;
101 int res; 103 int res;
102 104
103 if (disk->minors == 1 || bdev != bdev->bd_contains) 105 if (!disk_partitionable(disk) || bdev != bdev->bd_contains)
104 return -EINVAL; 106 return -EINVAL;
105 if (!capable(CAP_SYS_ADMIN)) 107 if (!capable(CAP_SYS_ADMIN))
106 return -EACCES; 108 return -EACCES;
@@ -111,6 +113,69 @@ static int blkdev_reread_part(struct block_device *bdev)
111 return res; 113 return res;
112} 114}
113 115
116static void blk_ioc_discard_endio(struct bio *bio, int err)
117{
118 if (err) {
119 if (err == -EOPNOTSUPP)
120 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
121 clear_bit(BIO_UPTODATE, &bio->bi_flags);
122 }
123 complete(bio->bi_private);
124}
125
126static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
127 uint64_t len)
128{
129 struct request_queue *q = bdev_get_queue(bdev);
130 int ret = 0;
131
132 if (start & 511)
133 return -EINVAL;
134 if (len & 511)
135 return -EINVAL;
136 start >>= 9;
137 len >>= 9;
138
139 if (start + len > (bdev->bd_inode->i_size >> 9))
140 return -EINVAL;
141
142 if (!q->prepare_discard_fn)
143 return -EOPNOTSUPP;
144
145 while (len && !ret) {
146 DECLARE_COMPLETION_ONSTACK(wait);
147 struct bio *bio;
148
149 bio = bio_alloc(GFP_KERNEL, 0);
150 if (!bio)
151 return -ENOMEM;
152
153 bio->bi_end_io = blk_ioc_discard_endio;
154 bio->bi_bdev = bdev;
155 bio->bi_private = &wait;
156 bio->bi_sector = start;
157
158 if (len > q->max_hw_sectors) {
159 bio->bi_size = q->max_hw_sectors << 9;
160 len -= q->max_hw_sectors;
161 start += q->max_hw_sectors;
162 } else {
163 bio->bi_size = len << 9;
164 len = 0;
165 }
166 submit_bio(DISCARD_NOBARRIER, bio);
167
168 wait_for_completion(&wait);
169
170 if (bio_flagged(bio, BIO_EOPNOTSUPP))
171 ret = -EOPNOTSUPP;
172 else if (!bio_flagged(bio, BIO_UPTODATE))
173 ret = -EIO;
174 bio_put(bio);
175 }
176 return ret;
177}
178
114static int put_ushort(unsigned long arg, unsigned short val) 179static int put_ushort(unsigned long arg, unsigned short val)
115{ 180{
116 return put_user(val, (unsigned short __user *)arg); 181 return put_user(val, (unsigned short __user *)arg);
@@ -258,6 +323,19 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
258 set_device_ro(bdev, n); 323 set_device_ro(bdev, n);
259 unlock_kernel(); 324 unlock_kernel();
260 return 0; 325 return 0;
326
327 case BLKDISCARD: {
328 uint64_t range[2];
329
330 if (!(file->f_mode & FMODE_WRITE))
331 return -EBADF;
332
333 if (copy_from_user(range, (void __user *)arg, sizeof(range)))
334 return -EFAULT;
335
336 return blk_ioctl_discard(bdev, range[0], range[1]);
337 }
338
261 case HDIO_GETGEO: { 339 case HDIO_GETGEO: {
262 struct hd_geometry geo; 340 struct hd_geometry geo;
263 341
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index ec4b7f234626..c34272a348fe 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -185,6 +185,7 @@ void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
185 __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok); 185 __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
186 __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok); 186 __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
187 __set_bit(GPCMD_SET_STREAMING, filter->write_ok); 187 __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
188 __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
188} 189}
189EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults); 190EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults);
190 191
@@ -313,11 +314,12 @@ static int sg_io(struct file *file, struct request_queue *q,
313 goto out; 314 goto out;
314 } 315 }
315 316
316 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, 317 ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
317 hdr->dxfer_len); 318 hdr->dxfer_len, GFP_KERNEL);
318 kfree(iov); 319 kfree(iov);
319 } else if (hdr->dxfer_len) 320 } else if (hdr->dxfer_len)
320 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); 321 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
322 GFP_KERNEL);
321 323
322 if (ret) 324 if (ret)
323 goto out; 325 goto out;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index d83185915eee..39dbd8e4dde1 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -21,6 +21,14 @@ if CRYPTO
21 21
22comment "Crypto core or helper" 22comment "Crypto core or helper"
23 23
24config CRYPTO_FIPS
25 bool "FIPS 200 compliance"
26 help
27 This options enables the fips boot option which is
28 required if you want to system to operate in a FIPS 200
29 certification. You should say no unless you know what
30 this is.
31
24config CRYPTO_ALGAPI 32config CRYPTO_ALGAPI
25 tristate 33 tristate
26 help 34 help
@@ -33,14 +41,21 @@ config CRYPTO_AEAD
33config CRYPTO_BLKCIPHER 41config CRYPTO_BLKCIPHER
34 tristate 42 tristate
35 select CRYPTO_ALGAPI 43 select CRYPTO_ALGAPI
44 select CRYPTO_RNG
36 45
37config CRYPTO_HASH 46config CRYPTO_HASH
38 tristate 47 tristate
39 select CRYPTO_ALGAPI 48 select CRYPTO_ALGAPI
40 49
50config CRYPTO_RNG
51 tristate
52 select CRYPTO_ALGAPI
53
41config CRYPTO_MANAGER 54config CRYPTO_MANAGER
42 tristate "Cryptographic algorithm manager" 55 tristate "Cryptographic algorithm manager"
43 select CRYPTO_ALGAPI 56 select CRYPTO_AEAD
57 select CRYPTO_HASH
58 select CRYPTO_BLKCIPHER
44 help 59 help
45 Create default cryptographic template instantiations such as 60 Create default cryptographic template instantiations such as
46 cbc(aes). 61 cbc(aes).
@@ -85,9 +100,7 @@ config CRYPTO_AUTHENC
85config CRYPTO_TEST 100config CRYPTO_TEST
86 tristate "Testing module" 101 tristate "Testing module"
87 depends on m 102 depends on m
88 select CRYPTO_ALGAPI 103 select CRYPTO_MANAGER
89 select CRYPTO_AEAD
90 select CRYPTO_BLKCIPHER
91 help 104 help
92 Quick & dirty crypto test module. 105 Quick & dirty crypto test module.
93 106
@@ -113,6 +126,7 @@ config CRYPTO_SEQIV
113 tristate "Sequence Number IV Generator" 126 tristate "Sequence Number IV Generator"
114 select CRYPTO_AEAD 127 select CRYPTO_AEAD
115 select CRYPTO_BLKCIPHER 128 select CRYPTO_BLKCIPHER
129 select CRYPTO_RNG
116 help 130 help
117 This IV generator generates an IV based on a sequence number by 131 This IV generator generates an IV based on a sequence number by
118 xoring it with a salt. This algorithm is mainly useful for CTR 132 xoring it with a salt. This algorithm is mainly useful for CTR
@@ -219,7 +233,19 @@ config CRYPTO_CRC32C
219 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used 233 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
220 by iSCSI for header and data digests and by others. 234 by iSCSI for header and data digests and by others.
221 See Castagnoli93. This implementation uses lib/libcrc32c. 235 See Castagnoli93. This implementation uses lib/libcrc32c.
222 Module will be crc32c. 236 Module will be crc32c.
237
238config CRYPTO_CRC32C_INTEL
239 tristate "CRC32c INTEL hardware acceleration"
240 depends on X86
241 select CRYPTO_HASH
242 help
243 In Intel processor with SSE4.2 supported, the processor will
244 support CRC32C implementation using hardware accelerated CRC32
245 instruction. This option will create 'crc32c-intel' module,
246 which will enable any routine to use the CRC32 instruction to
247 gain performance compared with software implementation.
248 Module will be crc32c-intel.
223 249
224config CRYPTO_MD4 250config CRYPTO_MD4
225 tristate "MD4 digest algorithm" 251 tristate "MD4 digest algorithm"
@@ -243,55 +269,58 @@ config CRYPTO_MICHAEL_MIC
243 of the algorithm. 269 of the algorithm.
244 270
245config CRYPTO_RMD128 271config CRYPTO_RMD128
246 tristate "RIPEMD-128 digest algorithm" 272 tristate "RIPEMD-128 digest algorithm"
247 select CRYPTO_ALGAPI 273 select CRYPTO_ALGAPI
248 help 274 help
249 RIPEMD-128 (ISO/IEC 10118-3:2004). 275 RIPEMD-128 (ISO/IEC 10118-3:2004).
250 276
251 RIPEMD-128 is a 128-bit cryptographic hash function. It should only 277 RIPEMD-128 is a 128-bit cryptographic hash function. It should only
252 to be used as a secure replacement for RIPEMD. For other use cases 278 to be used as a secure replacement for RIPEMD. For other use cases
253 RIPEMD-160 should be used. 279 RIPEMD-160 should be used.
254 280
255 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 281 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
256 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 282 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
257 283
258config CRYPTO_RMD160 284config CRYPTO_RMD160
259 tristate "RIPEMD-160 digest algorithm" 285 tristate "RIPEMD-160 digest algorithm"
260 select CRYPTO_ALGAPI 286 select CRYPTO_ALGAPI
261 help 287 help
262 RIPEMD-160 (ISO/IEC 10118-3:2004). 288 RIPEMD-160 (ISO/IEC 10118-3:2004).
263 289
264 RIPEMD-160 is a 160-bit cryptographic hash function. It is intended 290 RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
265 to be used as a secure replacement for the 128-bit hash functions 291 to be used as a secure replacement for the 128-bit hash functions
266 MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128). 292 MD4, MD5 and it's predecessor RIPEMD
293 (not to be confused with RIPEMD-128).
267 294
268 It's speed is comparable to SHA1 and there are no known attacks against 295 It's speed is comparable to SHA1 and there are no known attacks
269 RIPEMD-160. 296 against RIPEMD-160.
270 297
271 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 298 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
272 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 299 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
273 300
274config CRYPTO_RMD256 301config CRYPTO_RMD256
275 tristate "RIPEMD-256 digest algorithm" 302 tristate "RIPEMD-256 digest algorithm"
276 select CRYPTO_ALGAPI 303 select CRYPTO_ALGAPI
277 help 304 help
278 RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash. 305 RIPEMD-256 is an optional extension of RIPEMD-128 with a
279 It is intended for applications that require longer hash-results, without 306 256 bit hash. It is intended for applications that require
280 needing a larger security level (than RIPEMD-128). 307 longer hash-results, without needing a larger security level
308 (than RIPEMD-128).
281 309
282 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 310 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
283 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 311 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
284 312
285config CRYPTO_RMD320 313config CRYPTO_RMD320
286 tristate "RIPEMD-320 digest algorithm" 314 tristate "RIPEMD-320 digest algorithm"
287 select CRYPTO_ALGAPI 315 select CRYPTO_ALGAPI
288 help 316 help
289 RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash. 317 RIPEMD-320 is an optional extension of RIPEMD-160 with a
290 It is intended for applications that require longer hash-results, without 318 320 bit hash. It is intended for applications that require
291 needing a larger security level (than RIPEMD-160). 319 longer hash-results, without needing a larger security level
320 (than RIPEMD-160).
292 321
293 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 322 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
294 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 323 See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
295 324
296config CRYPTO_SHA1 325config CRYPTO_SHA1
297 tristate "SHA1 digest algorithm" 326 tristate "SHA1 digest algorithm"
@@ -308,8 +337,8 @@ config CRYPTO_SHA256
308 This version of SHA implements a 256 bit hash with 128 bits of 337 This version of SHA implements a 256 bit hash with 128 bits of
309 security against collision attacks. 338 security against collision attacks.
310 339
311 This code also includes SHA-224, a 224 bit hash with 112 bits 340 This code also includes SHA-224, a 224 bit hash with 112 bits
312 of security against collision attacks. 341 of security against collision attacks.
313 342
314config CRYPTO_SHA512 343config CRYPTO_SHA512
315 tristate "SHA384 and SHA512 digest algorithms" 344 tristate "SHA384 and SHA512 digest algorithms"
@@ -666,6 +695,18 @@ config CRYPTO_LZO
666 help 695 help
667 This is the LZO algorithm. 696 This is the LZO algorithm.
668 697
698comment "Random Number Generation"
699
700config CRYPTO_ANSI_CPRNG
701 tristate "Pseudo Random Number Generation for Cryptographic modules"
702 select CRYPTO_AES
703 select CRYPTO_RNG
704 select CRYPTO_FIPS
705 help
706 This option enables the generic pseudo random number generator
707 for cryptographic modules. Uses the Algorithm specified in
708 ANSI X9.31 A.2.4
709
669source "drivers/crypto/Kconfig" 710source "drivers/crypto/Kconfig"
670 711
671endif # if CRYPTO 712endif # if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index d4f3ed857df0..5862b807334e 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -5,6 +5,8 @@
5obj-$(CONFIG_CRYPTO) += crypto.o 5obj-$(CONFIG_CRYPTO) += crypto.o
6crypto-objs := api.o cipher.o digest.o compress.o 6crypto-objs := api.o cipher.o digest.o compress.o
7 7
8obj-$(CONFIG_CRYPTO_FIPS) += fips.o
9
8crypto_algapi-$(CONFIG_PROC_FS) += proc.o 10crypto_algapi-$(CONFIG_PROC_FS) += proc.o
9crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y) 11crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
10obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o 12obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
@@ -13,15 +15,17 @@ obj-$(CONFIG_CRYPTO_AEAD) += aead.o
13 15
14crypto_blkcipher-objs := ablkcipher.o 16crypto_blkcipher-objs := ablkcipher.o
15crypto_blkcipher-objs += blkcipher.o 17crypto_blkcipher-objs += blkcipher.o
16crypto_blkcipher-objs += chainiv.o
17crypto_blkcipher-objs += eseqiv.o
18obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o 18obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o
19obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o
20obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o
19obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 21obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
20 22
21crypto_hash-objs := hash.o 23crypto_hash-objs := hash.o
22crypto_hash-objs += ahash.o 24crypto_hash-objs += ahash.o
23obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o 25obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
24 26
27cryptomgr-objs := algboss.o testmgr.o
28
25obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o 29obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
26obj-$(CONFIG_CRYPTO_HMAC) += hmac.o 30obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
27obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o 31obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
@@ -69,7 +73,9 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
69obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o 73obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
70obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o 74obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
71obj-$(CONFIG_CRYPTO_LZO) += lzo.o 75obj-$(CONFIG_CRYPTO_LZO) += lzo.o
72 76obj-$(CONFIG_CRYPTO_RNG) += rng.o
77obj-$(CONFIG_CRYPTO_RNG) += krng.o
78obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
73obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o 79obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
74 80
75# 81#
diff --git a/crypto/algapi.c b/crypto/algapi.c
index e65cb50cf4af..7c41e7405c41 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -21,15 +21,15 @@
21 21
22#include "internal.h" 22#include "internal.h"
23 23
24static void crypto_remove_final(struct list_head *list);
25
24static LIST_HEAD(crypto_template_list); 26static LIST_HEAD(crypto_template_list);
25 27
26void crypto_larval_error(const char *name, u32 type, u32 mask) 28void crypto_larval_error(const char *name, u32 type, u32 mask)
27{ 29{
28 struct crypto_alg *alg; 30 struct crypto_alg *alg;
29 31
30 down_read(&crypto_alg_sem); 32 alg = crypto_alg_lookup(name, type, mask);
31 alg = __crypto_alg_lookup(name, type, mask);
32 up_read(&crypto_alg_sem);
33 33
34 if (alg) { 34 if (alg) {
35 if (crypto_is_larval(alg)) { 35 if (crypto_is_larval(alg)) {
@@ -128,23 +128,97 @@ static void crypto_remove_spawns(struct list_head *spawns,
128 } 128 }
129} 129}
130 130
131static int __crypto_register_alg(struct crypto_alg *alg, 131static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
132 struct list_head *list)
133{ 132{
134 struct crypto_alg *q; 133 struct crypto_alg *q;
134 struct crypto_larval *larval;
135 int ret = -EAGAIN; 135 int ret = -EAGAIN;
136 136
137 if (crypto_is_dead(alg)) 137 if (crypto_is_dead(alg))
138 goto out; 138 goto err;
139 139
140 INIT_LIST_HEAD(&alg->cra_users); 140 INIT_LIST_HEAD(&alg->cra_users);
141 141
142 /* No cheating! */
143 alg->cra_flags &= ~CRYPTO_ALG_TESTED;
144
142 ret = -EEXIST; 145 ret = -EEXIST;
143 146
144 atomic_set(&alg->cra_refcnt, 1); 147 atomic_set(&alg->cra_refcnt, 1);
145 list_for_each_entry(q, &crypto_alg_list, cra_list) { 148 list_for_each_entry(q, &crypto_alg_list, cra_list) {
146 if (q == alg) 149 if (q == alg)
147 goto out; 150 goto err;
151
152 if (crypto_is_larval(q)) {
153 if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
154 goto err;
155 continue;
156 }
157
158 if (!strcmp(q->cra_driver_name, alg->cra_name) ||
159 !strcmp(q->cra_name, alg->cra_driver_name))
160 goto err;
161 }
162
163 larval = crypto_larval_alloc(alg->cra_name,
164 alg->cra_flags | CRYPTO_ALG_TESTED, 0);
165 if (IS_ERR(larval))
166 goto out;
167
168 ret = -ENOENT;
169 larval->adult = crypto_mod_get(alg);
170 if (!larval->adult)
171 goto free_larval;
172
173 atomic_set(&larval->alg.cra_refcnt, 1);
174 memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
175 CRYPTO_MAX_ALG_NAME);
176 larval->alg.cra_priority = alg->cra_priority;
177
178 list_add(&alg->cra_list, &crypto_alg_list);
179 list_add(&larval->alg.cra_list, &crypto_alg_list);
180
181out:
182 return larval;
183
184free_larval:
185 kfree(larval);
186err:
187 larval = ERR_PTR(ret);
188 goto out;
189}
190
191void crypto_alg_tested(const char *name, int err)
192{
193 struct crypto_larval *test;
194 struct crypto_alg *alg;
195 struct crypto_alg *q;
196 LIST_HEAD(list);
197
198 down_write(&crypto_alg_sem);
199 list_for_each_entry(q, &crypto_alg_list, cra_list) {
200 if (!crypto_is_larval(q))
201 continue;
202
203 test = (struct crypto_larval *)q;
204
205 if (!strcmp(q->cra_driver_name, name))
206 goto found;
207 }
208
209 printk(KERN_ERR "alg: Unexpected test result for %s: %d\n", name, err);
210 goto unlock;
211
212found:
213 alg = test->adult;
214 if (err || list_empty(&alg->cra_list))
215 goto complete;
216
217 alg->cra_flags |= CRYPTO_ALG_TESTED;
218
219 list_for_each_entry(q, &crypto_alg_list, cra_list) {
220 if (q == alg)
221 continue;
148 222
149 if (crypto_is_moribund(q)) 223 if (crypto_is_moribund(q))
150 continue; 224 continue;
@@ -180,17 +254,18 @@ static int __crypto_register_alg(struct crypto_alg *alg,
180 q->cra_priority > alg->cra_priority) 254 q->cra_priority > alg->cra_priority)
181 continue; 255 continue;
182 256
183 crypto_remove_spawns(&q->cra_users, list, alg->cra_flags); 257 crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags);
184 } 258 }
185
186 list_add(&alg->cra_list, &crypto_alg_list);
187 259
188 crypto_notify(CRYPTO_MSG_ALG_REGISTER, alg); 260complete:
189 ret = 0; 261 complete_all(&test->completion);
190 262
191out: 263unlock:
192 return ret; 264 up_write(&crypto_alg_sem);
265
266 crypto_remove_final(&list);
193} 267}
268EXPORT_SYMBOL_GPL(crypto_alg_tested);
194 269
195static void crypto_remove_final(struct list_head *list) 270static void crypto_remove_final(struct list_head *list)
196{ 271{
@@ -203,9 +278,27 @@ static void crypto_remove_final(struct list_head *list)
203 } 278 }
204} 279}
205 280
281static void crypto_wait_for_test(struct crypto_larval *larval)
282{
283 int err;
284
285 err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
286 if (err != NOTIFY_STOP) {
287 if (WARN_ON(err != NOTIFY_DONE))
288 goto out;
289 crypto_alg_tested(larval->alg.cra_driver_name, 0);
290 }
291
292 err = wait_for_completion_interruptible(&larval->completion);
293 WARN_ON(err);
294
295out:
296 crypto_larval_kill(&larval->alg);
297}
298
206int crypto_register_alg(struct crypto_alg *alg) 299int crypto_register_alg(struct crypto_alg *alg)
207{ 300{
208 LIST_HEAD(list); 301 struct crypto_larval *larval;
209 int err; 302 int err;
210 303
211 err = crypto_check_alg(alg); 304 err = crypto_check_alg(alg);
@@ -213,11 +306,14 @@ int crypto_register_alg(struct crypto_alg *alg)
213 return err; 306 return err;
214 307
215 down_write(&crypto_alg_sem); 308 down_write(&crypto_alg_sem);
216 err = __crypto_register_alg(alg, &list); 309 larval = __crypto_register_alg(alg);
217 up_write(&crypto_alg_sem); 310 up_write(&crypto_alg_sem);
218 311
219 crypto_remove_final(&list); 312 if (IS_ERR(larval))
220 return err; 313 return PTR_ERR(larval);
314
315 crypto_wait_for_test(larval);
316 return 0;
221} 317}
222EXPORT_SYMBOL_GPL(crypto_register_alg); 318EXPORT_SYMBOL_GPL(crypto_register_alg);
223 319
@@ -335,8 +431,8 @@ EXPORT_SYMBOL_GPL(crypto_lookup_template);
335int crypto_register_instance(struct crypto_template *tmpl, 431int crypto_register_instance(struct crypto_template *tmpl,
336 struct crypto_instance *inst) 432 struct crypto_instance *inst)
337{ 433{
338 LIST_HEAD(list); 434 struct crypto_larval *larval;
339 int err = -EINVAL; 435 int err;
340 436
341 err = crypto_check_alg(&inst->alg); 437 err = crypto_check_alg(&inst->alg);
342 if (err) 438 if (err)
@@ -346,8 +442,8 @@ int crypto_register_instance(struct crypto_template *tmpl,
346 442
347 down_write(&crypto_alg_sem); 443 down_write(&crypto_alg_sem);
348 444
349 err = __crypto_register_alg(&inst->alg, &list); 445 larval = __crypto_register_alg(&inst->alg);
350 if (err) 446 if (IS_ERR(larval))
351 goto unlock; 447 goto unlock;
352 448
353 hlist_add_head(&inst->list, &tmpl->instances); 449 hlist_add_head(&inst->list, &tmpl->instances);
@@ -356,7 +452,12 @@ int crypto_register_instance(struct crypto_template *tmpl,
356unlock: 452unlock:
357 up_write(&crypto_alg_sem); 453 up_write(&crypto_alg_sem);
358 454
359 crypto_remove_final(&list); 455 err = PTR_ERR(larval);
456 if (IS_ERR(larval))
457 goto err;
458
459 crypto_wait_for_test(larval);
460 err = 0;
360 461
361err: 462err:
362 return err; 463 return err;
diff --git a/crypto/cryptomgr.c b/crypto/algboss.c
index e5e3cf848d42..4601e4267c88 100644
--- a/crypto/cryptomgr.c
+++ b/crypto/algboss.c
@@ -45,6 +45,15 @@ struct cryptomgr_param {
45 45
46 char larval[CRYPTO_MAX_ALG_NAME]; 46 char larval[CRYPTO_MAX_ALG_NAME];
47 char template[CRYPTO_MAX_ALG_NAME]; 47 char template[CRYPTO_MAX_ALG_NAME];
48
49 u32 otype;
50 u32 omask;
51};
52
53struct crypto_test_param {
54 char driver[CRYPTO_MAX_ALG_NAME];
55 char alg[CRYPTO_MAX_ALG_NAME];
56 u32 type;
48}; 57};
49 58
50static int cryptomgr_probe(void *data) 59static int cryptomgr_probe(void *data)
@@ -76,8 +85,7 @@ out:
76 module_put_and_exit(0); 85 module_put_and_exit(0);
77 86
78err: 87err:
79 crypto_larval_error(param->larval, param->type.data.type, 88 crypto_larval_error(param->larval, param->otype, param->omask);
80 param->type.data.mask);
81 goto out; 89 goto out;
82} 90}
83 91
@@ -169,13 +177,65 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
169 177
170 param->type.attr.rta_len = sizeof(param->type); 178 param->type.attr.rta_len = sizeof(param->type);
171 param->type.attr.rta_type = CRYPTOA_TYPE; 179 param->type.attr.rta_type = CRYPTOA_TYPE;
172 param->type.data.type = larval->alg.cra_flags; 180 param->type.data.type = larval->alg.cra_flags & ~CRYPTO_ALG_TESTED;
173 param->type.data.mask = larval->mask; 181 param->type.data.mask = larval->mask & ~CRYPTO_ALG_TESTED;
174 param->tb[0] = &param->type.attr; 182 param->tb[0] = &param->type.attr;
175 183
184 param->otype = larval->alg.cra_flags;
185 param->omask = larval->mask;
186
176 memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); 187 memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
177 188
178 thread = kthread_run(cryptomgr_probe, param, "cryptomgr"); 189 thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
190 if (IS_ERR(thread))
191 goto err_free_param;
192
193 return NOTIFY_STOP;
194
195err_free_param:
196 kfree(param);
197err_put_module:
198 module_put(THIS_MODULE);
199err:
200 return NOTIFY_OK;
201}
202
203static int cryptomgr_test(void *data)
204{
205 struct crypto_test_param *param = data;
206 u32 type = param->type;
207 int err = 0;
208
209 if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
210 CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV))
211 goto skiptest;
212
213 err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
214
215skiptest:
216 crypto_alg_tested(param->driver, err);
217
218 kfree(param);
219 module_put_and_exit(0);
220}
221
222static int cryptomgr_schedule_test(struct crypto_alg *alg)
223{
224 struct task_struct *thread;
225 struct crypto_test_param *param;
226
227 if (!try_module_get(THIS_MODULE))
228 goto err;
229
230 param = kzalloc(sizeof(*param), GFP_KERNEL);
231 if (!param)
232 goto err_put_module;
233
234 memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
235 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
236 param->type = alg->cra_flags;
237
238 thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
179 if (IS_ERR(thread)) 239 if (IS_ERR(thread))
180 goto err_free_param; 240 goto err_free_param;
181 241
@@ -195,6 +255,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
195 switch (msg) { 255 switch (msg) {
196 case CRYPTO_MSG_ALG_REQUEST: 256 case CRYPTO_MSG_ALG_REQUEST:
197 return cryptomgr_schedule_probe(data); 257 return cryptomgr_schedule_probe(data);
258 case CRYPTO_MSG_ALG_REGISTER:
259 return cryptomgr_schedule_test(data);
198 } 260 }
199 261
200 return NOTIFY_DONE; 262 return NOTIFY_DONE;
@@ -206,16 +268,32 @@ static struct notifier_block cryptomgr_notifier = {
206 268
207static int __init cryptomgr_init(void) 269static int __init cryptomgr_init(void)
208{ 270{
209 return crypto_register_notifier(&cryptomgr_notifier); 271 int err;
272
273 err = testmgr_init();
274 if (err)
275 return err;
276
277 err = crypto_register_notifier(&cryptomgr_notifier);
278 if (err)
279 goto free_testmgr;
280
281 return 0;
282
283free_testmgr:
284 testmgr_exit();
285 return err;
210} 286}
211 287
212static void __exit cryptomgr_exit(void) 288static void __exit cryptomgr_exit(void)
213{ 289{
214 int err = crypto_unregister_notifier(&cryptomgr_notifier); 290 int err = crypto_unregister_notifier(&cryptomgr_notifier);
215 BUG_ON(err); 291 BUG_ON(err);
292
293 testmgr_exit();
216} 294}
217 295
218module_init(cryptomgr_init); 296subsys_initcall(cryptomgr_init);
219module_exit(cryptomgr_exit); 297module_exit(cryptomgr_exit);
220 298
221MODULE_LICENSE("GPL"); 299MODULE_LICENSE("GPL");
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
new file mode 100644
index 000000000000..72db0fd763cc
--- /dev/null
+++ b/crypto/ansi_cprng.c
@@ -0,0 +1,417 @@
1/*
2 * PRNG: Pseudo Random Number Generator
3 * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
4 * AES 128 cipher
5 *
6 * (C) Neil Horman <nhorman@tuxdriver.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * any later version.
12 *
13 *
14 */
15
16#include <crypto/internal/rng.h>
17#include <linux/err.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/string.h>
22
23#include "internal.h"
24
25#define DEFAULT_PRNG_KEY "0123456789abcdef"
26#define DEFAULT_PRNG_KSZ 16
27#define DEFAULT_BLK_SZ 16
28#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
29
30/*
31 * Flags for the prng_context flags field
32 */
33
34#define PRNG_FIXED_SIZE 0x1
35#define PRNG_NEED_RESET 0x2
36
37/*
38 * Note: DT is our counter value
39 * I is our intermediate value
40 * V is our seed vector
41 * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
42 * for implementation details
43 */
44
45
46struct prng_context {
47 spinlock_t prng_lock;
48 unsigned char rand_data[DEFAULT_BLK_SZ];
49 unsigned char last_rand_data[DEFAULT_BLK_SZ];
50 unsigned char DT[DEFAULT_BLK_SZ];
51 unsigned char I[DEFAULT_BLK_SZ];
52 unsigned char V[DEFAULT_BLK_SZ];
53 u32 rand_data_valid;
54 struct crypto_cipher *tfm;
55 u32 flags;
56};
57
58static int dbg;
59
60static void hexdump(char *note, unsigned char *buf, unsigned int len)
61{
62 if (dbg) {
63 printk(KERN_CRIT "%s", note);
64 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
65 16, 1,
66 buf, len, false);
67 }
68}
69
70#define dbgprint(format, args...) do {\
71if (dbg)\
72 printk(format, ##args);\
73} while (0)
74
75static void xor_vectors(unsigned char *in1, unsigned char *in2,
76 unsigned char *out, unsigned int size)
77{
78 int i;
79
80 for (i = 0; i < size; i++)
81 out[i] = in1[i] ^ in2[i];
82
83}
84/*
85 * Returns DEFAULT_BLK_SZ bytes of random data per call
86 * returns 0 if generation succeded, <0 if something went wrong
87 */
88static int _get_more_prng_bytes(struct prng_context *ctx)
89{
90 int i;
91 unsigned char tmp[DEFAULT_BLK_SZ];
92 unsigned char *output = NULL;
93
94
95 dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",
96 ctx);
97
98 hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
99 hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
100 hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
101
102 /*
103 * This algorithm is a 3 stage state machine
104 */
105 for (i = 0; i < 3; i++) {
106
107 switch (i) {
108 case 0:
109 /*
110 * Start by encrypting the counter value
111 * This gives us an intermediate value I
112 */
113 memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
114 output = ctx->I;
115 hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
116 break;
117 case 1:
118
119 /*
120 * Next xor I with our secret vector V
121 * encrypt that result to obtain our
122 * pseudo random data which we output
123 */
124 xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
125 hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
126 output = ctx->rand_data;
127 break;
128 case 2:
129 /*
130 * First check that we didn't produce the same
131 * random data that we did last time around through this
132 */
133 if (!memcmp(ctx->rand_data, ctx->last_rand_data,
134 DEFAULT_BLK_SZ)) {
135 printk(KERN_ERR
136 "ctx %p Failed repetition check!\n",
137 ctx);
138 ctx->flags |= PRNG_NEED_RESET;
139 return -EINVAL;
140 }
141 memcpy(ctx->last_rand_data, ctx->rand_data,
142 DEFAULT_BLK_SZ);
143
144 /*
145 * Lastly xor the random data with I
146 * and encrypt that to obtain a new secret vector V
147 */
148 xor_vectors(ctx->rand_data, ctx->I, tmp,
149 DEFAULT_BLK_SZ);
150 output = ctx->V;
151 hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
152 break;
153 }
154
155
156 /* do the encryption */
157 crypto_cipher_encrypt_one(ctx->tfm, output, tmp);
158
159 }
160
161 /*
162 * Now update our DT value
163 */
164 for (i = 0; i < DEFAULT_BLK_SZ; i++) {
165 ctx->DT[i] += 1;
166 if (ctx->DT[i] != 0)
167 break;
168 }
169
170 dbgprint("Returning new block for context %p\n", ctx);
171 ctx->rand_data_valid = 0;
172
173 hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
174 hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
175 hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
176 hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
177
178 return 0;
179}
180
181/* Our exported functions */
182static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
183{
184 unsigned long flags;
185 unsigned char *ptr = buf;
186 unsigned int byte_count = (unsigned int)nbytes;
187 int err;
188
189
190 if (nbytes < 0)
191 return -EINVAL;
192
193 spin_lock_irqsave(&ctx->prng_lock, flags);
194
195 err = -EINVAL;
196 if (ctx->flags & PRNG_NEED_RESET)
197 goto done;
198
199 /*
200 * If the FIXED_SIZE flag is on, only return whole blocks of
201 * pseudo random data
202 */
203 err = -EINVAL;
204 if (ctx->flags & PRNG_FIXED_SIZE) {
205 if (nbytes < DEFAULT_BLK_SZ)
206 goto done;
207 byte_count = DEFAULT_BLK_SZ;
208 }
209
210 err = byte_count;
211
212 dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
213 byte_count, ctx);
214
215
216remainder:
217 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
218 if (_get_more_prng_bytes(ctx) < 0) {
219 memset(buf, 0, nbytes);
220 err = -EINVAL;
221 goto done;
222 }
223 }
224
225 /*
226 * Copy up to the next whole block size
227 */
228 if (byte_count < DEFAULT_BLK_SZ) {
229 for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
230 ctx->rand_data_valid++) {
231 *ptr = ctx->rand_data[ctx->rand_data_valid];
232 ptr++;
233 byte_count--;
234 if (byte_count == 0)
235 goto done;
236 }
237 }
238
239 /*
240 * Now copy whole blocks
241 */
242 for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
243 if (_get_more_prng_bytes(ctx) < 0) {
244 memset(buf, 0, nbytes);
245 err = -EINVAL;
246 goto done;
247 }
248 memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
249 ctx->rand_data_valid += DEFAULT_BLK_SZ;
250 ptr += DEFAULT_BLK_SZ;
251 }
252
253 /*
254 * Now copy any extra partial data
255 */
256 if (byte_count)
257 goto remainder;
258
259done:
260 spin_unlock_irqrestore(&ctx->prng_lock, flags);
261 dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
262 err, ctx);
263 return err;
264}
265
266static void free_prng_context(struct prng_context *ctx)
267{
268 crypto_free_cipher(ctx->tfm);
269}
270
271static int reset_prng_context(struct prng_context *ctx,
272 unsigned char *key, size_t klen,
273 unsigned char *V, unsigned char *DT)
274{
275 int ret;
276 int rc = -EINVAL;
277 unsigned char *prng_key;
278
279 spin_lock(&ctx->prng_lock);
280 ctx->flags |= PRNG_NEED_RESET;
281
282 prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
283
284 if (!key)
285 klen = DEFAULT_PRNG_KSZ;
286
287 if (V)
288 memcpy(ctx->V, V, DEFAULT_BLK_SZ);
289 else
290 memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ);
291
292 if (DT)
293 memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
294 else
295 memset(ctx->DT, 0, DEFAULT_BLK_SZ);
296
297 memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
298 memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
299
300 if (ctx->tfm)
301 crypto_free_cipher(ctx->tfm);
302
303 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
304 if (IS_ERR(ctx->tfm)) {
305 dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
306 ctx);
307 ctx->tfm = NULL;
308 goto out;
309 }
310
311 ctx->rand_data_valid = DEFAULT_BLK_SZ;
312
313 ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
314 if (ret) {
315 dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
316 crypto_cipher_get_flags(ctx->tfm));
317 crypto_free_cipher(ctx->tfm);
318 goto out;
319 }
320
321 rc = 0;
322 ctx->flags &= ~PRNG_NEED_RESET;
323out:
324 spin_unlock(&ctx->prng_lock);
325
326 return rc;
327
328}
329
330static int cprng_init(struct crypto_tfm *tfm)
331{
332 struct prng_context *ctx = crypto_tfm_ctx(tfm);
333
334 spin_lock_init(&ctx->prng_lock);
335
336 return reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL);
337}
338
339static void cprng_exit(struct crypto_tfm *tfm)
340{
341 free_prng_context(crypto_tfm_ctx(tfm));
342}
343
344static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata,
345 unsigned int dlen)
346{
347 struct prng_context *prng = crypto_rng_ctx(tfm);
348
349 return get_prng_bytes(rdata, dlen, prng);
350}
351
352static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
353{
354 struct prng_context *prng = crypto_rng_ctx(tfm);
355 u8 *key = seed + DEFAULT_PRNG_KSZ;
356
357 if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
358 return -EINVAL;
359
360 reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, NULL);
361
362 if (prng->flags & PRNG_NEED_RESET)
363 return -EINVAL;
364 return 0;
365}
366
367static struct crypto_alg rng_alg = {
368 .cra_name = "stdrng",
369 .cra_driver_name = "ansi_cprng",
370 .cra_priority = 100,
371 .cra_flags = CRYPTO_ALG_TYPE_RNG,
372 .cra_ctxsize = sizeof(struct prng_context),
373 .cra_type = &crypto_rng_type,
374 .cra_module = THIS_MODULE,
375 .cra_list = LIST_HEAD_INIT(rng_alg.cra_list),
376 .cra_init = cprng_init,
377 .cra_exit = cprng_exit,
378 .cra_u = {
379 .rng = {
380 .rng_make_random = cprng_get_random,
381 .rng_reset = cprng_reset,
382 .seedsize = DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ,
383 }
384 }
385};
386
387
388/* Module initalization */
389static int __init prng_mod_init(void)
390{
391 int ret = 0;
392
393 if (fips_enabled)
394 rng_alg.cra_priority += 200;
395
396 ret = crypto_register_alg(&rng_alg);
397
398 if (ret)
399 goto out;
400out:
401 return 0;
402}
403
404static void __exit prng_mod_fini(void)
405{
406 crypto_unregister_alg(&rng_alg);
407 return;
408}
409
410MODULE_LICENSE("GPL");
411MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
412MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
413module_param(dbg, int, 0);
414MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
415module_init(prng_mod_init);
416module_exit(prng_mod_fini);
417MODULE_ALIAS("stdrng");
diff --git a/crypto/api.c b/crypto/api.c
index d06e33270abe..0444d242e985 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -55,7 +55,13 @@ void crypto_mod_put(struct crypto_alg *alg)
55} 55}
56EXPORT_SYMBOL_GPL(crypto_mod_put); 56EXPORT_SYMBOL_GPL(crypto_mod_put);
57 57
58struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask) 58static inline int crypto_is_test_larval(struct crypto_larval *larval)
59{
60 return larval->alg.cra_driver_name[0];
61}
62
63static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
64 u32 mask)
59{ 65{
60 struct crypto_alg *q, *alg = NULL; 66 struct crypto_alg *q, *alg = NULL;
61 int best = -2; 67 int best = -2;
@@ -70,6 +76,7 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask)
70 continue; 76 continue;
71 77
72 if (crypto_is_larval(q) && 78 if (crypto_is_larval(q) &&
79 !crypto_is_test_larval((struct crypto_larval *)q) &&
73 ((struct crypto_larval *)q)->mask != mask) 80 ((struct crypto_larval *)q)->mask != mask)
74 continue; 81 continue;
75 82
@@ -92,7 +99,6 @@ struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask)
92 99
93 return alg; 100 return alg;
94} 101}
95EXPORT_SYMBOL_GPL(__crypto_alg_lookup);
96 102
97static void crypto_larval_destroy(struct crypto_alg *alg) 103static void crypto_larval_destroy(struct crypto_alg *alg)
98{ 104{
@@ -104,10 +110,8 @@ static void crypto_larval_destroy(struct crypto_alg *alg)
104 kfree(larval); 110 kfree(larval);
105} 111}
106 112
107static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type, 113struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
108 u32 mask)
109{ 114{
110 struct crypto_alg *alg;
111 struct crypto_larval *larval; 115 struct crypto_larval *larval;
112 116
113 larval = kzalloc(sizeof(*larval), GFP_KERNEL); 117 larval = kzalloc(sizeof(*larval), GFP_KERNEL);
@@ -119,10 +123,25 @@ static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type,
119 larval->alg.cra_priority = -1; 123 larval->alg.cra_priority = -1;
120 larval->alg.cra_destroy = crypto_larval_destroy; 124 larval->alg.cra_destroy = crypto_larval_destroy;
121 125
122 atomic_set(&larval->alg.cra_refcnt, 2);
123 strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); 126 strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
124 init_completion(&larval->completion); 127 init_completion(&larval->completion);
125 128
129 return larval;
130}
131EXPORT_SYMBOL_GPL(crypto_larval_alloc);
132
133static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
134 u32 mask)
135{
136 struct crypto_alg *alg;
137 struct crypto_larval *larval;
138
139 larval = crypto_larval_alloc(name, type, mask);
140 if (IS_ERR(larval))
141 return ERR_CAST(larval);
142
143 atomic_set(&larval->alg.cra_refcnt, 2);
144
126 down_write(&crypto_alg_sem); 145 down_write(&crypto_alg_sem);
127 alg = __crypto_alg_lookup(name, type, mask); 146 alg = __crypto_alg_lookup(name, type, mask);
128 if (!alg) { 147 if (!alg) {
@@ -152,21 +171,29 @@ EXPORT_SYMBOL_GPL(crypto_larval_kill);
152static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) 171static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
153{ 172{
154 struct crypto_larval *larval = (void *)alg; 173 struct crypto_larval *larval = (void *)alg;
174 long timeout;
175
176 timeout = wait_for_completion_interruptible_timeout(
177 &larval->completion, 60 * HZ);
155 178
156 wait_for_completion_interruptible_timeout(&larval->completion, 60 * HZ);
157 alg = larval->adult; 179 alg = larval->adult;
158 if (alg) { 180 if (timeout < 0)
159 if (!crypto_mod_get(alg)) 181 alg = ERR_PTR(-EINTR);
160 alg = ERR_PTR(-EAGAIN); 182 else if (!timeout)
161 } else 183 alg = ERR_PTR(-ETIMEDOUT);
184 else if (!alg)
162 alg = ERR_PTR(-ENOENT); 185 alg = ERR_PTR(-ENOENT);
186 else if (crypto_is_test_larval(larval) &&
187 !(alg->cra_flags & CRYPTO_ALG_TESTED))
188 alg = ERR_PTR(-EAGAIN);
189 else if (!crypto_mod_get(alg))
190 alg = ERR_PTR(-EAGAIN);
163 crypto_mod_put(&larval->alg); 191 crypto_mod_put(&larval->alg);
164 192
165 return alg; 193 return alg;
166} 194}
167 195
168static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, 196struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
169 u32 mask)
170{ 197{
171 struct crypto_alg *alg; 198 struct crypto_alg *alg;
172 199
@@ -176,6 +203,7 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
176 203
177 return alg; 204 return alg;
178} 205}
206EXPORT_SYMBOL_GPL(crypto_alg_lookup);
179 207
180struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) 208struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
181{ 209{
@@ -192,25 +220,40 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
192 if (alg) 220 if (alg)
193 return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; 221 return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
194 222
195 return crypto_larval_alloc(name, type, mask); 223 return crypto_larval_add(name, type, mask);
196} 224}
197EXPORT_SYMBOL_GPL(crypto_larval_lookup); 225EXPORT_SYMBOL_GPL(crypto_larval_lookup);
198 226
227int crypto_probing_notify(unsigned long val, void *v)
228{
229 int ok;
230
231 ok = blocking_notifier_call_chain(&crypto_chain, val, v);
232 if (ok == NOTIFY_DONE) {
233 request_module("cryptomgr");
234 ok = blocking_notifier_call_chain(&crypto_chain, val, v);
235 }
236
237 return ok;
238}
239EXPORT_SYMBOL_GPL(crypto_probing_notify);
240
199struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) 241struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
200{ 242{
201 struct crypto_alg *alg; 243 struct crypto_alg *alg;
202 struct crypto_alg *larval; 244 struct crypto_alg *larval;
203 int ok; 245 int ok;
204 246
247 if (!(mask & CRYPTO_ALG_TESTED)) {
248 type |= CRYPTO_ALG_TESTED;
249 mask |= CRYPTO_ALG_TESTED;
250 }
251
205 larval = crypto_larval_lookup(name, type, mask); 252 larval = crypto_larval_lookup(name, type, mask);
206 if (IS_ERR(larval) || !crypto_is_larval(larval)) 253 if (IS_ERR(larval) || !crypto_is_larval(larval))
207 return larval; 254 return larval;
208 255
209 ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval); 256 ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
210 if (ok == NOTIFY_DONE) {
211 request_module("cryptomgr");
212 ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval);
213 }
214 257
215 if (ok == NOTIFY_STOP) 258 if (ok == NOTIFY_STOP)
216 alg = crypto_larval_wait(larval); 259 alg = crypto_larval_wait(larval);
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 185f955fb0d7..4a7e65c4df4d 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -696,34 +696,5 @@ void skcipher_geniv_exit(struct crypto_tfm *tfm)
696} 696}
697EXPORT_SYMBOL_GPL(skcipher_geniv_exit); 697EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
698 698
699static int __init blkcipher_module_init(void)
700{
701 int err;
702
703 err = chainiv_module_init();
704 if (err)
705 goto out;
706
707 err = eseqiv_module_init();
708 if (err)
709 goto eseqiv_err;
710
711out:
712 return err;
713
714eseqiv_err:
715 chainiv_module_exit();
716 goto out;
717}
718
719static void __exit blkcipher_module_exit(void)
720{
721 eseqiv_module_exit();
722 chainiv_module_exit();
723}
724
725module_init(blkcipher_module_init);
726module_exit(blkcipher_module_exit);
727
728MODULE_LICENSE("GPL"); 699MODULE_LICENSE("GPL");
729MODULE_DESCRIPTION("Generic block chaining cipher type"); 700MODULE_DESCRIPTION("Generic block chaining cipher type");
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 9affadee3287..7c37a497b860 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -14,11 +14,11 @@
14 */ 14 */
15 15
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <crypto/rng.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/random.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
@@ -83,6 +83,7 @@ static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
83{ 83{
84 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); 84 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
85 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); 85 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
86 int err = 0;
86 87
87 spin_lock_bh(&ctx->lock); 88 spin_lock_bh(&ctx->lock);
88 if (crypto_ablkcipher_crt(geniv)->givencrypt != 89 if (crypto_ablkcipher_crt(geniv)->givencrypt !=
@@ -90,11 +91,15 @@ static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
90 goto unlock; 91 goto unlock;
91 92
92 crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt; 93 crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
93 get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv)); 94 err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
95 crypto_ablkcipher_ivsize(geniv));
94 96
95unlock: 97unlock:
96 spin_unlock_bh(&ctx->lock); 98 spin_unlock_bh(&ctx->lock);
97 99
100 if (err)
101 return err;
102
98 return chainiv_givencrypt(req); 103 return chainiv_givencrypt(req);
99} 104}
100 105
@@ -203,6 +208,7 @@ static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
203{ 208{
204 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); 209 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
205 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); 210 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
211 int err = 0;
206 212
207 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) 213 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
208 goto out; 214 goto out;
@@ -212,11 +218,15 @@ static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
212 goto unlock; 218 goto unlock;
213 219
214 crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt; 220 crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
215 get_random_bytes(ctx->iv, crypto_ablkcipher_ivsize(geniv)); 221 err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
222 crypto_ablkcipher_ivsize(geniv));
216 223
217unlock: 224unlock:
218 clear_bit(CHAINIV_STATE_INUSE, &ctx->state); 225 clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
219 226
227 if (err)
228 return err;
229
220out: 230out:
221 return async_chainiv_givencrypt(req); 231 return async_chainiv_givencrypt(req);
222} 232}
@@ -284,9 +294,13 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
284 if (IS_ERR(algt)) 294 if (IS_ERR(algt))
285 return ERR_PTR(err); 295 return ERR_PTR(err);
286 296
297 err = crypto_get_default_rng();
298 if (err)
299 return ERR_PTR(err);
300
287 inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0); 301 inst = skcipher_geniv_alloc(&chainiv_tmpl, tb, 0, 0);
288 if (IS_ERR(inst)) 302 if (IS_ERR(inst))
289 goto out; 303 goto put_rng;
290 304
291 inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first; 305 inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
292 306
@@ -311,21 +325,37 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
311 325
312out: 326out:
313 return inst; 327 return inst;
328
329put_rng:
330 crypto_put_default_rng();
331 goto out;
332}
333
334static void chainiv_free(struct crypto_instance *inst)
335{
336 skcipher_geniv_free(inst);
337 crypto_put_default_rng();
314} 338}
315 339
316static struct crypto_template chainiv_tmpl = { 340static struct crypto_template chainiv_tmpl = {
317 .name = "chainiv", 341 .name = "chainiv",
318 .alloc = chainiv_alloc, 342 .alloc = chainiv_alloc,
319 .free = skcipher_geniv_free, 343 .free = chainiv_free,
320 .module = THIS_MODULE, 344 .module = THIS_MODULE,
321}; 345};
322 346
323int __init chainiv_module_init(void) 347static int __init chainiv_module_init(void)
324{ 348{
325 return crypto_register_template(&chainiv_tmpl); 349 return crypto_register_template(&chainiv_tmpl);
326} 350}
327 351
328void chainiv_module_exit(void) 352static void chainiv_module_exit(void)
329{ 353{
330 crypto_unregister_template(&chainiv_tmpl); 354 crypto_unregister_template(&chainiv_tmpl);
331} 355}
356
357module_init(chainiv_module_init);
358module_exit(chainiv_module_exit);
359
360MODULE_LICENSE("GPL");
361MODULE_DESCRIPTION("Chain IV Generator");
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 881d30910434..2a342c8e52b3 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -16,13 +16,13 @@
16 */ 16 */
17 17
18#include <crypto/internal/skcipher.h> 18#include <crypto/internal/skcipher.h>
19#include <crypto/rng.h>
19#include <crypto/scatterwalk.h> 20#include <crypto/scatterwalk.h>
20#include <linux/err.h> 21#include <linux/err.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/random.h>
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/string.h> 28#include <linux/string.h>
@@ -163,17 +163,22 @@ static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
163{ 163{
164 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); 164 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
165 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); 165 struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
166 int err = 0;
166 167
167 spin_lock_bh(&ctx->lock); 168 spin_lock_bh(&ctx->lock);
168 if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first) 169 if (crypto_ablkcipher_crt(geniv)->givencrypt != eseqiv_givencrypt_first)
169 goto unlock; 170 goto unlock;
170 171
171 crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt; 172 crypto_ablkcipher_crt(geniv)->givencrypt = eseqiv_givencrypt;
172 get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv)); 173 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
174 crypto_ablkcipher_ivsize(geniv));
173 175
174unlock: 176unlock:
175 spin_unlock_bh(&ctx->lock); 177 spin_unlock_bh(&ctx->lock);
176 178
179 if (err)
180 return err;
181
177 return eseqiv_givencrypt(req); 182 return eseqiv_givencrypt(req);
178} 183}
179 184
@@ -216,9 +221,13 @@ static struct crypto_instance *eseqiv_alloc(struct rtattr **tb)
216 struct crypto_instance *inst; 221 struct crypto_instance *inst;
217 int err; 222 int err;
218 223
224 err = crypto_get_default_rng();
225 if (err)
226 return ERR_PTR(err);
227
219 inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0); 228 inst = skcipher_geniv_alloc(&eseqiv_tmpl, tb, 0, 0);
220 if (IS_ERR(inst)) 229 if (IS_ERR(inst))
221 goto out; 230 goto put_rng;
222 231
223 err = -EINVAL; 232 err = -EINVAL;
224 if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize) 233 if (inst->alg.cra_ablkcipher.ivsize != inst->alg.cra_blocksize)
@@ -238,22 +247,36 @@ out:
238free_inst: 247free_inst:
239 skcipher_geniv_free(inst); 248 skcipher_geniv_free(inst);
240 inst = ERR_PTR(err); 249 inst = ERR_PTR(err);
250put_rng:
251 crypto_put_default_rng();
241 goto out; 252 goto out;
242} 253}
243 254
255static void eseqiv_free(struct crypto_instance *inst)
256{
257 skcipher_geniv_free(inst);
258 crypto_put_default_rng();
259}
260
244static struct crypto_template eseqiv_tmpl = { 261static struct crypto_template eseqiv_tmpl = {
245 .name = "eseqiv", 262 .name = "eseqiv",
246 .alloc = eseqiv_alloc, 263 .alloc = eseqiv_alloc,
247 .free = skcipher_geniv_free, 264 .free = eseqiv_free,
248 .module = THIS_MODULE, 265 .module = THIS_MODULE,
249}; 266};
250 267
251int __init eseqiv_module_init(void) 268static int __init eseqiv_module_init(void)
252{ 269{
253 return crypto_register_template(&eseqiv_tmpl); 270 return crypto_register_template(&eseqiv_tmpl);
254} 271}
255 272
256void __exit eseqiv_module_exit(void) 273static void __exit eseqiv_module_exit(void)
257{ 274{
258 crypto_unregister_template(&eseqiv_tmpl); 275 crypto_unregister_template(&eseqiv_tmpl);
259} 276}
277
278module_init(eseqiv_module_init);
279module_exit(eseqiv_module_exit);
280
281MODULE_LICENSE("GPL");
282MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
diff --git a/crypto/fips.c b/crypto/fips.c
new file mode 100644
index 000000000000..553970081c62
--- /dev/null
+++ b/crypto/fips.c
@@ -0,0 +1,27 @@
1/*
2 * FIPS 200 support.
3 *
4 * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include "internal.h"
14
15int fips_enabled;
16EXPORT_SYMBOL_GPL(fips_enabled);
17
18/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
19static int fips_enable(char *str)
20{
21 fips_enabled = !!simple_strtol(str, NULL, 0);
22 printk(KERN_INFO "fips mode: %s\n",
23 fips_enabled ? "enabled" : "disabled");
24 return 1;
25}
26
27__setup("fips=", fips_enable);
diff --git a/crypto/internal.h b/crypto/internal.h
index 683fcb2d91f4..8ef72d76092e 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -26,6 +26,12 @@
26#include <linux/rwsem.h> 26#include <linux/rwsem.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29#ifdef CONFIG_CRYPTO_FIPS
30extern int fips_enabled;
31#else
32#define fips_enabled 0
33#endif
34
29/* Crypto notification events. */ 35/* Crypto notification events. */
30enum { 36enum {
31 CRYPTO_MSG_ALG_REQUEST, 37 CRYPTO_MSG_ALG_REQUEST,
@@ -82,7 +88,7 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
82} 88}
83 89
84struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); 90struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
85struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask); 91struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
86struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); 92struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
87 93
88int crypto_init_digest_ops(struct crypto_tfm *tfm); 94int crypto_init_digest_ops(struct crypto_tfm *tfm);
@@ -94,9 +100,11 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm);
94void crypto_exit_cipher_ops(struct crypto_tfm *tfm); 100void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
95void crypto_exit_compress_ops(struct crypto_tfm *tfm); 101void crypto_exit_compress_ops(struct crypto_tfm *tfm);
96 102
103struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
97void crypto_larval_kill(struct crypto_alg *alg); 104void crypto_larval_kill(struct crypto_alg *alg);
98struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); 105struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
99void crypto_larval_error(const char *name, u32 type, u32 mask); 106void crypto_larval_error(const char *name, u32 type, u32 mask);
107void crypto_alg_tested(const char *name, int err);
100 108
101void crypto_shoot_alg(struct crypto_alg *alg); 109void crypto_shoot_alg(struct crypto_alg *alg);
102struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, 110struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
@@ -107,6 +115,10 @@ int crypto_register_instance(struct crypto_template *tmpl,
107 115
108int crypto_register_notifier(struct notifier_block *nb); 116int crypto_register_notifier(struct notifier_block *nb);
109int crypto_unregister_notifier(struct notifier_block *nb); 117int crypto_unregister_notifier(struct notifier_block *nb);
118int crypto_probing_notify(unsigned long val, void *v);
119
120int __init testmgr_init(void);
121void testmgr_exit(void);
110 122
111static inline void crypto_alg_put(struct crypto_alg *alg) 123static inline void crypto_alg_put(struct crypto_alg *alg)
112{ 124{
@@ -139,9 +151,9 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
139 return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING); 151 return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING);
140} 152}
141 153
142static inline int crypto_notify(unsigned long val, void *v) 154static inline void crypto_notify(unsigned long val, void *v)
143{ 155{
144 return blocking_notifier_call_chain(&crypto_chain, val, v); 156 blocking_notifier_call_chain(&crypto_chain, val, v);
145} 157}
146 158
147#endif /* _CRYPTO_INTERNAL_H */ 159#endif /* _CRYPTO_INTERNAL_H */
diff --git a/crypto/krng.c b/crypto/krng.c
new file mode 100644
index 000000000000..4328bb3430ed
--- /dev/null
+++ b/crypto/krng.c
@@ -0,0 +1,66 @@
1/*
2 * RNG implementation using standard kernel RNG.
3 *
4 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * any later version.
10 *
11 */
12
13#include <crypto/internal/rng.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/random.h>
18
19static int krng_get_random(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen)
20{
21 get_random_bytes(rdata, dlen);
22 return 0;
23}
24
25static int krng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
26{
27 return 0;
28}
29
30static struct crypto_alg krng_alg = {
31 .cra_name = "stdrng",
32 .cra_driver_name = "krng",
33 .cra_priority = 200,
34 .cra_flags = CRYPTO_ALG_TYPE_RNG,
35 .cra_ctxsize = 0,
36 .cra_type = &crypto_rng_type,
37 .cra_module = THIS_MODULE,
38 .cra_list = LIST_HEAD_INIT(krng_alg.cra_list),
39 .cra_u = {
40 .rng = {
41 .rng_make_random = krng_get_random,
42 .rng_reset = krng_reset,
43 .seedsize = 0,
44 }
45 }
46};
47
48
49/* Module initalization */
50static int __init krng_mod_init(void)
51{
52 return crypto_register_alg(&krng_alg);
53}
54
55static void __exit krng_mod_fini(void)
56{
57 crypto_unregister_alg(&krng_alg);
58 return;
59}
60
61module_init(krng_mod_init);
62module_exit(krng_mod_fini);
63
64MODULE_LICENSE("GPL");
65MODULE_DESCRIPTION("Kernel Random Number Generator");
66MODULE_ALIAS("stdrng");
diff --git a/crypto/proc.c b/crypto/proc.c
index 02ff5670c158..37a13d05636d 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -19,8 +19,53 @@
19#include <linux/rwsem.h> 19#include <linux/rwsem.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/sysctl.h>
22#include "internal.h" 23#include "internal.h"
23 24
25#ifdef CONFIG_CRYPTO_FIPS
26static struct ctl_table crypto_sysctl_table[] = {
27 {
28 .ctl_name = CTL_UNNUMBERED,
29 .procname = "fips_enabled",
30 .data = &fips_enabled,
31 .maxlen = sizeof(int),
32 .mode = 0444,
33 .proc_handler = &proc_dointvec
34 },
35 {
36 .ctl_name = 0,
37 },
38};
39
40static struct ctl_table crypto_dir_table[] = {
41 {
42 .ctl_name = CTL_UNNUMBERED,
43 .procname = "crypto",
44 .mode = 0555,
45 .child = crypto_sysctl_table
46 },
47 {
48 .ctl_name = 0,
49 },
50};
51
52static struct ctl_table_header *crypto_sysctls;
53
54static void crypto_proc_fips_init(void)
55{
56 crypto_sysctls = register_sysctl_table(crypto_dir_table);
57}
58
59static void crypto_proc_fips_exit(void)
60{
61 if (crypto_sysctls)
62 unregister_sysctl_table(crypto_sysctls);
63}
64#else
65#define crypto_proc_fips_init()
66#define crypto_proc_fips_exit()
67#endif
68
24static void *c_start(struct seq_file *m, loff_t *pos) 69static void *c_start(struct seq_file *m, loff_t *pos)
25{ 70{
26 down_read(&crypto_alg_sem); 71 down_read(&crypto_alg_sem);
@@ -46,8 +91,11 @@ static int c_show(struct seq_file *m, void *p)
46 seq_printf(m, "module : %s\n", module_name(alg->cra_module)); 91 seq_printf(m, "module : %s\n", module_name(alg->cra_module));
47 seq_printf(m, "priority : %d\n", alg->cra_priority); 92 seq_printf(m, "priority : %d\n", alg->cra_priority);
48 seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt)); 93 seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt));
94 seq_printf(m, "selftest : %s\n",
95 (alg->cra_flags & CRYPTO_ALG_TESTED) ?
96 "passed" : "unknown");
49 97
50 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 98 switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
51 case CRYPTO_ALG_TYPE_CIPHER: 99 case CRYPTO_ALG_TYPE_CIPHER:
52 seq_printf(m, "type : cipher\n"); 100 seq_printf(m, "type : cipher\n");
53 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 101 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
@@ -67,7 +115,10 @@ static int c_show(struct seq_file *m, void *p)
67 seq_printf(m, "type : compression\n"); 115 seq_printf(m, "type : compression\n");
68 break; 116 break;
69 default: 117 default:
70 if (alg->cra_type && alg->cra_type->show) 118 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
119 seq_printf(m, "type : larval\n");
120 seq_printf(m, "flags : 0x%x\n", alg->cra_flags);
121 } else if (alg->cra_type && alg->cra_type->show)
71 alg->cra_type->show(m, alg); 122 alg->cra_type->show(m, alg);
72 else 123 else
73 seq_printf(m, "type : unknown\n"); 124 seq_printf(m, "type : unknown\n");
@@ -100,9 +151,11 @@ static const struct file_operations proc_crypto_ops = {
100void __init crypto_init_proc(void) 151void __init crypto_init_proc(void)
101{ 152{
102 proc_create("crypto", 0, NULL, &proc_crypto_ops); 153 proc_create("crypto", 0, NULL, &proc_crypto_ops);
154 crypto_proc_fips_init();
103} 155}
104 156
105void __exit crypto_exit_proc(void) 157void __exit crypto_exit_proc(void)
106{ 158{
159 crypto_proc_fips_exit();
107 remove_proc_entry("crypto", NULL); 160 remove_proc_entry("crypto", NULL);
108} 161}
diff --git a/crypto/rng.c b/crypto/rng.c
new file mode 100644
index 000000000000..6e94bc735578
--- /dev/null
+++ b/crypto/rng.c
@@ -0,0 +1,126 @@
1/*
2 * Cryptographic API.
3 *
4 * RNG operations.
5 *
6 * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <asm/atomic.h>
16#include <crypto/internal/rng.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/random.h>
21#include <linux/seq_file.h>
22#include <linux/string.h>
23
24static DEFINE_MUTEX(crypto_default_rng_lock);
25struct crypto_rng *crypto_default_rng;
26EXPORT_SYMBOL_GPL(crypto_default_rng);
27static int crypto_default_rng_refcnt;
28
29static int rngapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
30{
31 u8 *buf = NULL;
32 int err;
33
34 if (!seed && slen) {
35 buf = kmalloc(slen, GFP_KERNEL);
36 if (!buf)
37 return -ENOMEM;
38
39 get_random_bytes(buf, slen);
40 seed = buf;
41 }
42
43 err = crypto_rng_alg(tfm)->rng_reset(tfm, seed, slen);
44
45 kfree(buf);
46 return err;
47}
48
49static int crypto_init_rng_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
50{
51 struct rng_alg *alg = &tfm->__crt_alg->cra_rng;
52 struct rng_tfm *ops = &tfm->crt_rng;
53
54 ops->rng_gen_random = alg->rng_make_random;
55 ops->rng_reset = rngapi_reset;
56
57 return 0;
58}
59
60static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
61 __attribute__ ((unused));
62static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
63{
64 seq_printf(m, "type : rng\n");
65 seq_printf(m, "seedsize : %u\n", alg->cra_rng.seedsize);
66}
67
68static unsigned int crypto_rng_ctxsize(struct crypto_alg *alg, u32 type,
69 u32 mask)
70{
71 return alg->cra_ctxsize;
72}
73
74const struct crypto_type crypto_rng_type = {
75 .ctxsize = crypto_rng_ctxsize,
76 .init = crypto_init_rng_ops,
77#ifdef CONFIG_PROC_FS
78 .show = crypto_rng_show,
79#endif
80};
81EXPORT_SYMBOL_GPL(crypto_rng_type);
82
83int crypto_get_default_rng(void)
84{
85 struct crypto_rng *rng;
86 int err;
87
88 mutex_lock(&crypto_default_rng_lock);
89 if (!crypto_default_rng) {
90 rng = crypto_alloc_rng("stdrng", 0, 0);
91 err = PTR_ERR(rng);
92 if (IS_ERR(rng))
93 goto unlock;
94
95 err = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
96 if (err) {
97 crypto_free_rng(rng);
98 goto unlock;
99 }
100
101 crypto_default_rng = rng;
102 }
103
104 crypto_default_rng_refcnt++;
105 err = 0;
106
107unlock:
108 mutex_unlock(&crypto_default_rng_lock);
109
110 return err;
111}
112EXPORT_SYMBOL_GPL(crypto_get_default_rng);
113
114void crypto_put_default_rng(void)
115{
116 mutex_lock(&crypto_default_rng_lock);
117 if (!--crypto_default_rng_refcnt) {
118 crypto_free_rng(crypto_default_rng);
119 crypto_default_rng = NULL;
120 }
121 mutex_unlock(&crypto_default_rng_lock);
122}
123EXPORT_SYMBOL_GPL(crypto_put_default_rng);
124
125MODULE_LICENSE("GPL");
126MODULE_DESCRIPTION("Random Number Genertor");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index b903aab31577..5a013a8bf87a 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -15,11 +15,11 @@
15 15
16#include <crypto/internal/aead.h> 16#include <crypto/internal/aead.h>
17#include <crypto/internal/skcipher.h> 17#include <crypto/internal/skcipher.h>
18#include <crypto/rng.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/random.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/string.h> 24#include <linux/string.h>
25 25
@@ -189,17 +189,22 @@ static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
189{ 189{
190 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); 190 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
191 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); 191 struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
192 int err = 0;
192 193
193 spin_lock_bh(&ctx->lock); 194 spin_lock_bh(&ctx->lock);
194 if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first) 195 if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
195 goto unlock; 196 goto unlock;
196 197
197 crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; 198 crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
198 get_random_bytes(ctx->salt, crypto_ablkcipher_ivsize(geniv)); 199 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
200 crypto_ablkcipher_ivsize(geniv));
199 201
200unlock: 202unlock:
201 spin_unlock_bh(&ctx->lock); 203 spin_unlock_bh(&ctx->lock);
202 204
205 if (err)
206 return err;
207
203 return seqiv_givencrypt(req); 208 return seqiv_givencrypt(req);
204} 209}
205 210
@@ -207,17 +212,22 @@ static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
207{ 212{
208 struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); 213 struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
209 struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); 214 struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
215 int err = 0;
210 216
211 spin_lock_bh(&ctx->lock); 217 spin_lock_bh(&ctx->lock);
212 if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first) 218 if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
213 goto unlock; 219 goto unlock;
214 220
215 crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt; 221 crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
216 get_random_bytes(ctx->salt, crypto_aead_ivsize(geniv)); 222 err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
223 crypto_aead_ivsize(geniv));
217 224
218unlock: 225unlock:
219 spin_unlock_bh(&ctx->lock); 226 spin_unlock_bh(&ctx->lock);
220 227
228 if (err)
229 return err;
230
221 return seqiv_aead_givencrypt(req); 231 return seqiv_aead_givencrypt(req);
222} 232}
223 233
@@ -298,19 +308,27 @@ static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
298 if (IS_ERR(algt)) 308 if (IS_ERR(algt))
299 return ERR_PTR(err); 309 return ERR_PTR(err);
300 310
311 err = crypto_get_default_rng();
312 if (err)
313 return ERR_PTR(err);
314
301 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) 315 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
302 inst = seqiv_ablkcipher_alloc(tb); 316 inst = seqiv_ablkcipher_alloc(tb);
303 else 317 else
304 inst = seqiv_aead_alloc(tb); 318 inst = seqiv_aead_alloc(tb);
305 319
306 if (IS_ERR(inst)) 320 if (IS_ERR(inst))
307 goto out; 321 goto put_rng;
308 322
309 inst->alg.cra_alignmask |= __alignof__(u32) - 1; 323 inst->alg.cra_alignmask |= __alignof__(u32) - 1;
310 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); 324 inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
311 325
312out: 326out:
313 return inst; 327 return inst;
328
329put_rng:
330 crypto_put_default_rng();
331 goto out;
314} 332}
315 333
316static void seqiv_free(struct crypto_instance *inst) 334static void seqiv_free(struct crypto_instance *inst)
@@ -319,6 +337,7 @@ static void seqiv_free(struct crypto_instance *inst)
319 skcipher_geniv_free(inst); 337 skcipher_geniv_free(inst);
320 else 338 else
321 aead_geniv_free(inst); 339 aead_geniv_free(inst);
340 crypto_put_default_rng();
322} 341}
323 342
324static struct crypto_template seqiv_tmpl = { 343static struct crypto_template seqiv_tmpl = {
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 66368022e0bf..28a45a1e6f42 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -19,11 +19,9 @@
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mm.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <linux/string.h> 24#include <linux/string.h>
26#include <linux/crypto.h>
27#include <linux/moduleparam.h> 25#include <linux/moduleparam.h>
28#include <linux/jiffies.h> 26#include <linux/jiffies.h>
29#include <linux/timex.h> 27#include <linux/timex.h>
@@ -31,45 +29,23 @@
31#include "tcrypt.h" 29#include "tcrypt.h"
32 30
33/* 31/*
34 * Need to kmalloc() memory for testing. 32 * Need slab memory for testing (size in number of pages).
35 */ 33 */
36#define TVMEMSIZE 16384 34#define TVMEMSIZE 4
37#define XBUFSIZE 32768
38 35
39/* 36/*
40 * Indexes into the xbuf to simulate cross-page access. 37* Used by test_cipher_speed()
41 */
42#define IDX1 32
43#define IDX2 32400
44#define IDX3 1
45#define IDX4 8193
46#define IDX5 22222
47#define IDX6 17101
48#define IDX7 27333
49#define IDX8 3000
50
51/*
52* Used by test_cipher()
53*/ 38*/
54#define ENCRYPT 1 39#define ENCRYPT 1
55#define DECRYPT 0 40#define DECRYPT 0
56 41
57struct tcrypt_result {
58 struct completion completion;
59 int err;
60};
61
62static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
63
64/* 42/*
65 * Used by test_cipher_speed() 43 * Used by test_cipher_speed()
66 */ 44 */
67static unsigned int sec; 45static unsigned int sec;
68 46
69static int mode; 47static int mode;
70static char *xbuf; 48static char *tvmem[TVMEMSIZE];
71static char *axbuf;
72static char *tvmem;
73 49
74static char *check[] = { 50static char *check[] = {
75 "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", 51 "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
@@ -80,655 +56,13 @@ static char *check[] = {
80 "lzo", "cts", NULL 56 "lzo", "cts", NULL
81}; 57};
82 58
83static void hexdump(unsigned char *buf, unsigned int len) 59static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
84{ 60 struct scatterlist *sg, int blen, int sec)
85 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
86 16, 1,
87 buf, len, false);
88}
89
90static void tcrypt_complete(struct crypto_async_request *req, int err)
91{
92 struct tcrypt_result *res = req->data;
93
94 if (err == -EINPROGRESS)
95 return;
96
97 res->err = err;
98 complete(&res->completion);
99}
100
101static void test_hash(char *algo, struct hash_testvec *template,
102 unsigned int tcount)
103{
104 unsigned int i, j, k, temp;
105 struct scatterlist sg[8];
106 char result[64];
107 struct crypto_ahash *tfm;
108 struct ahash_request *req;
109 struct tcrypt_result tresult;
110 int ret;
111 void *hash_buff;
112
113 printk("\ntesting %s\n", algo);
114
115 init_completion(&tresult.completion);
116
117 tfm = crypto_alloc_ahash(algo, 0, 0);
118 if (IS_ERR(tfm)) {
119 printk("failed to load transform for %s: %ld\n", algo,
120 PTR_ERR(tfm));
121 return;
122 }
123
124 req = ahash_request_alloc(tfm, GFP_KERNEL);
125 if (!req) {
126 printk(KERN_ERR "failed to allocate request for %s\n", algo);
127 goto out_noreq;
128 }
129 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
130 tcrypt_complete, &tresult);
131
132 for (i = 0; i < tcount; i++) {
133 printk("test %u:\n", i + 1);
134 memset(result, 0, 64);
135
136 hash_buff = kzalloc(template[i].psize, GFP_KERNEL);
137 if (!hash_buff)
138 continue;
139
140 memcpy(hash_buff, template[i].plaintext, template[i].psize);
141 sg_init_one(&sg[0], hash_buff, template[i].psize);
142
143 if (template[i].ksize) {
144 crypto_ahash_clear_flags(tfm, ~0);
145 ret = crypto_ahash_setkey(tfm, template[i].key,
146 template[i].ksize);
147 if (ret) {
148 printk("setkey() failed ret=%d\n", ret);
149 kfree(hash_buff);
150 goto out;
151 }
152 }
153
154 ahash_request_set_crypt(req, sg, result, template[i].psize);
155 ret = crypto_ahash_digest(req);
156 switch (ret) {
157 case 0:
158 break;
159 case -EINPROGRESS:
160 case -EBUSY:
161 ret = wait_for_completion_interruptible(
162 &tresult.completion);
163 if (!ret && !(ret = tresult.err)) {
164 INIT_COMPLETION(tresult.completion);
165 break;
166 }
167 /* fall through */
168 default:
169 printk("digest () failed ret=%d\n", ret);
170 kfree(hash_buff);
171 goto out;
172 }
173
174 hexdump(result, crypto_ahash_digestsize(tfm));
175 printk("%s\n",
176 memcmp(result, template[i].digest,
177 crypto_ahash_digestsize(tfm)) ?
178 "fail" : "pass");
179 kfree(hash_buff);
180 }
181
182 printk("testing %s across pages\n", algo);
183
184 /* setup the dummy buffer first */
185 memset(xbuf, 0, XBUFSIZE);
186
187 j = 0;
188 for (i = 0; i < tcount; i++) {
189 if (template[i].np) {
190 j++;
191 printk("test %u:\n", j);
192 memset(result, 0, 64);
193
194 temp = 0;
195 sg_init_table(sg, template[i].np);
196 for (k = 0; k < template[i].np; k++) {
197 memcpy(&xbuf[IDX[k]],
198 template[i].plaintext + temp,
199 template[i].tap[k]);
200 temp += template[i].tap[k];
201 sg_set_buf(&sg[k], &xbuf[IDX[k]],
202 template[i].tap[k]);
203 }
204
205 if (template[i].ksize) {
206 crypto_ahash_clear_flags(tfm, ~0);
207 ret = crypto_ahash_setkey(tfm, template[i].key,
208 template[i].ksize);
209
210 if (ret) {
211 printk("setkey() failed ret=%d\n", ret);
212 goto out;
213 }
214 }
215
216 ahash_request_set_crypt(req, sg, result,
217 template[i].psize);
218 ret = crypto_ahash_digest(req);
219 switch (ret) {
220 case 0:
221 break;
222 case -EINPROGRESS:
223 case -EBUSY:
224 ret = wait_for_completion_interruptible(
225 &tresult.completion);
226 if (!ret && !(ret = tresult.err)) {
227 INIT_COMPLETION(tresult.completion);
228 break;
229 }
230 /* fall through */
231 default:
232 printk("digest () failed ret=%d\n", ret);
233 goto out;
234 }
235
236 hexdump(result, crypto_ahash_digestsize(tfm));
237 printk("%s\n",
238 memcmp(result, template[i].digest,
239 crypto_ahash_digestsize(tfm)) ?
240 "fail" : "pass");
241 }
242 }
243
244out:
245 ahash_request_free(req);
246out_noreq:
247 crypto_free_ahash(tfm);
248}
249
250static void test_aead(char *algo, int enc, struct aead_testvec *template,
251 unsigned int tcount)
252{
253 unsigned int ret, i, j, k, n, temp;
254 char *q;
255 struct crypto_aead *tfm;
256 char *key;
257 struct aead_request *req;
258 struct scatterlist sg[8];
259 struct scatterlist asg[8];
260 const char *e;
261 struct tcrypt_result result;
262 unsigned int authsize;
263 void *input;
264 void *assoc;
265 char iv[MAX_IVLEN];
266
267 if (enc == ENCRYPT)
268 e = "encryption";
269 else
270 e = "decryption";
271
272 printk(KERN_INFO "\ntesting %s %s\n", algo, e);
273
274 init_completion(&result.completion);
275
276 tfm = crypto_alloc_aead(algo, 0, 0);
277
278 if (IS_ERR(tfm)) {
279 printk(KERN_INFO "failed to load transform for %s: %ld\n",
280 algo, PTR_ERR(tfm));
281 return;
282 }
283
284 req = aead_request_alloc(tfm, GFP_KERNEL);
285 if (!req) {
286 printk(KERN_INFO "failed to allocate request for %s\n", algo);
287 goto out;
288 }
289
290 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
291 tcrypt_complete, &result);
292
293 for (i = 0, j = 0; i < tcount; i++) {
294 if (!template[i].np) {
295 printk(KERN_INFO "test %u (%d bit key):\n",
296 ++j, template[i].klen * 8);
297
298 /* some tepmplates have no input data but they will
299 * touch input
300 */
301 input = kzalloc(template[i].ilen + template[i].rlen, GFP_KERNEL);
302 if (!input)
303 continue;
304
305 assoc = kzalloc(template[i].alen, GFP_KERNEL);
306 if (!assoc) {
307 kfree(input);
308 continue;
309 }
310
311 memcpy(input, template[i].input, template[i].ilen);
312 memcpy(assoc, template[i].assoc, template[i].alen);
313 if (template[i].iv)
314 memcpy(iv, template[i].iv, MAX_IVLEN);
315 else
316 memset(iv, 0, MAX_IVLEN);
317
318 crypto_aead_clear_flags(tfm, ~0);
319 if (template[i].wk)
320 crypto_aead_set_flags(
321 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
322
323 if (template[i].key)
324 key = template[i].key;
325 else
326 key = kzalloc(template[i].klen, GFP_KERNEL);
327
328 ret = crypto_aead_setkey(tfm, key,
329 template[i].klen);
330 if (ret) {
331 printk(KERN_INFO "setkey() failed flags=%x\n",
332 crypto_aead_get_flags(tfm));
333
334 if (!template[i].fail)
335 goto next_one;
336 }
337
338 authsize = abs(template[i].rlen - template[i].ilen);
339 ret = crypto_aead_setauthsize(tfm, authsize);
340 if (ret) {
341 printk(KERN_INFO
342 "failed to set authsize = %u\n",
343 authsize);
344 goto next_one;
345 }
346
347 sg_init_one(&sg[0], input,
348 template[i].ilen + (enc ? authsize : 0));
349
350 sg_init_one(&asg[0], assoc, template[i].alen);
351
352 aead_request_set_crypt(req, sg, sg,
353 template[i].ilen, iv);
354
355 aead_request_set_assoc(req, asg, template[i].alen);
356
357 ret = enc ?
358 crypto_aead_encrypt(req) :
359 crypto_aead_decrypt(req);
360
361 switch (ret) {
362 case 0:
363 break;
364 case -EINPROGRESS:
365 case -EBUSY:
366 ret = wait_for_completion_interruptible(
367 &result.completion);
368 if (!ret && !(ret = result.err)) {
369 INIT_COMPLETION(result.completion);
370 break;
371 }
372 /* fall through */
373 default:
374 printk(KERN_INFO "%s () failed err=%d\n",
375 e, -ret);
376 goto next_one;
377 }
378
379 q = input;
380 hexdump(q, template[i].rlen);
381
382 printk(KERN_INFO "enc/dec: %s\n",
383 memcmp(q, template[i].result,
384 template[i].rlen) ? "fail" : "pass");
385next_one:
386 if (!template[i].key)
387 kfree(key);
388 kfree(assoc);
389 kfree(input);
390 }
391 }
392
393 printk(KERN_INFO "\ntesting %s %s across pages (chunking)\n", algo, e);
394 memset(axbuf, 0, XBUFSIZE);
395
396 for (i = 0, j = 0; i < tcount; i++) {
397 if (template[i].np) {
398 printk(KERN_INFO "test %u (%d bit key):\n",
399 ++j, template[i].klen * 8);
400
401 if (template[i].iv)
402 memcpy(iv, template[i].iv, MAX_IVLEN);
403 else
404 memset(iv, 0, MAX_IVLEN);
405
406 crypto_aead_clear_flags(tfm, ~0);
407 if (template[i].wk)
408 crypto_aead_set_flags(
409 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
410 key = template[i].key;
411
412 ret = crypto_aead_setkey(tfm, key, template[i].klen);
413 if (ret) {
414 printk(KERN_INFO "setkey() failed flags=%x\n",
415 crypto_aead_get_flags(tfm));
416
417 if (!template[i].fail)
418 goto out;
419 }
420
421 memset(xbuf, 0, XBUFSIZE);
422 sg_init_table(sg, template[i].np);
423 for (k = 0, temp = 0; k < template[i].np; k++) {
424 memcpy(&xbuf[IDX[k]],
425 template[i].input + temp,
426 template[i].tap[k]);
427 temp += template[i].tap[k];
428 sg_set_buf(&sg[k], &xbuf[IDX[k]],
429 template[i].tap[k]);
430 }
431
432 authsize = abs(template[i].rlen - template[i].ilen);
433 ret = crypto_aead_setauthsize(tfm, authsize);
434 if (ret) {
435 printk(KERN_INFO
436 "failed to set authsize = %u\n",
437 authsize);
438 goto out;
439 }
440
441 if (enc)
442 sg[k - 1].length += authsize;
443
444 sg_init_table(asg, template[i].anp);
445 for (k = 0, temp = 0; k < template[i].anp; k++) {
446 memcpy(&axbuf[IDX[k]],
447 template[i].assoc + temp,
448 template[i].atap[k]);
449 temp += template[i].atap[k];
450 sg_set_buf(&asg[k], &axbuf[IDX[k]],
451 template[i].atap[k]);
452 }
453
454 aead_request_set_crypt(req, sg, sg,
455 template[i].ilen,
456 iv);
457
458 aead_request_set_assoc(req, asg, template[i].alen);
459
460 ret = enc ?
461 crypto_aead_encrypt(req) :
462 crypto_aead_decrypt(req);
463
464 switch (ret) {
465 case 0:
466 break;
467 case -EINPROGRESS:
468 case -EBUSY:
469 ret = wait_for_completion_interruptible(
470 &result.completion);
471 if (!ret && !(ret = result.err)) {
472 INIT_COMPLETION(result.completion);
473 break;
474 }
475 /* fall through */
476 default:
477 printk(KERN_INFO "%s () failed err=%d\n",
478 e, -ret);
479 goto out;
480 }
481
482 for (k = 0, temp = 0; k < template[i].np; k++) {
483 printk(KERN_INFO "page %u\n", k);
484 q = &xbuf[IDX[k]];
485
486 n = template[i].tap[k];
487 if (k == template[i].np - 1)
488 n += enc ? authsize : -authsize;
489 hexdump(q, n);
490 printk(KERN_INFO "%s\n",
491 memcmp(q, template[i].result + temp, n) ?
492 "fail" : "pass");
493
494 q += n;
495 if (k == template[i].np - 1 && !enc) {
496 if (memcmp(q, template[i].input +
497 temp + n, authsize))
498 n = authsize;
499 else
500 n = 0;
501 } else {
502 for (n = 0; q[n]; n++)
503 ;
504 }
505 if (n) {
506 printk("Result buffer corruption %u "
507 "bytes:\n", n);
508 hexdump(q, n);
509 }
510
511 temp += template[i].tap[k];
512 }
513 }
514 }
515
516out:
517 crypto_free_aead(tfm);
518 aead_request_free(req);
519}
520
521static void test_cipher(char *algo, int enc,
522 struct cipher_testvec *template, unsigned int tcount)
523{ 61{
524 unsigned int ret, i, j, k, n, temp;
525 char *q;
526 struct crypto_ablkcipher *tfm;
527 struct ablkcipher_request *req;
528 struct scatterlist sg[8];
529 const char *e;
530 struct tcrypt_result result;
531 void *data;
532 char iv[MAX_IVLEN];
533
534 if (enc == ENCRYPT)
535 e = "encryption";
536 else
537 e = "decryption";
538
539 printk("\ntesting %s %s\n", algo, e);
540
541 init_completion(&result.completion);
542 tfm = crypto_alloc_ablkcipher(algo, 0, 0);
543
544 if (IS_ERR(tfm)) {
545 printk("failed to load transform for %s: %ld\n", algo,
546 PTR_ERR(tfm));
547 return;
548 }
549
550 req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
551 if (!req) {
552 printk("failed to allocate request for %s\n", algo);
553 goto out;
554 }
555
556 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
557 tcrypt_complete, &result);
558
559 j = 0;
560 for (i = 0; i < tcount; i++) {
561
562 data = kzalloc(template[i].ilen, GFP_KERNEL);
563 if (!data)
564 continue;
565
566 memcpy(data, template[i].input, template[i].ilen);
567 if (template[i].iv)
568 memcpy(iv, template[i].iv, MAX_IVLEN);
569 else
570 memset(iv, 0, MAX_IVLEN);
571
572 if (!(template[i].np)) {
573 j++;
574 printk("test %u (%d bit key):\n",
575 j, template[i].klen * 8);
576
577 crypto_ablkcipher_clear_flags(tfm, ~0);
578 if (template[i].wk)
579 crypto_ablkcipher_set_flags(
580 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
581
582 ret = crypto_ablkcipher_setkey(tfm, template[i].key,
583 template[i].klen);
584 if (ret) {
585 printk("setkey() failed flags=%x\n",
586 crypto_ablkcipher_get_flags(tfm));
587
588 if (!template[i].fail) {
589 kfree(data);
590 goto out;
591 }
592 }
593
594 sg_init_one(&sg[0], data, template[i].ilen);
595
596 ablkcipher_request_set_crypt(req, sg, sg,
597 template[i].ilen, iv);
598 ret = enc ?
599 crypto_ablkcipher_encrypt(req) :
600 crypto_ablkcipher_decrypt(req);
601
602 switch (ret) {
603 case 0:
604 break;
605 case -EINPROGRESS:
606 case -EBUSY:
607 ret = wait_for_completion_interruptible(
608 &result.completion);
609 if (!ret && !((ret = result.err))) {
610 INIT_COMPLETION(result.completion);
611 break;
612 }
613 /* fall through */
614 default:
615 printk("%s () failed err=%d\n", e, -ret);
616 kfree(data);
617 goto out;
618 }
619
620 q = data;
621 hexdump(q, template[i].rlen);
622
623 printk("%s\n",
624 memcmp(q, template[i].result,
625 template[i].rlen) ? "fail" : "pass");
626 }
627 kfree(data);
628 }
629
630 printk("\ntesting %s %s across pages (chunking)\n", algo, e);
631
632 j = 0;
633 for (i = 0; i < tcount; i++) {
634
635 if (template[i].iv)
636 memcpy(iv, template[i].iv, MAX_IVLEN);
637 else
638 memset(iv, 0, MAX_IVLEN);
639
640 if (template[i].np) {
641 j++;
642 printk("test %u (%d bit key):\n",
643 j, template[i].klen * 8);
644
645 memset(xbuf, 0, XBUFSIZE);
646 crypto_ablkcipher_clear_flags(tfm, ~0);
647 if (template[i].wk)
648 crypto_ablkcipher_set_flags(
649 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
650
651 ret = crypto_ablkcipher_setkey(tfm, template[i].key,
652 template[i].klen);
653 if (ret) {
654 printk("setkey() failed flags=%x\n",
655 crypto_ablkcipher_get_flags(tfm));
656
657 if (!template[i].fail)
658 goto out;
659 }
660
661 temp = 0;
662 sg_init_table(sg, template[i].np);
663 for (k = 0; k < template[i].np; k++) {
664 memcpy(&xbuf[IDX[k]],
665 template[i].input + temp,
666 template[i].tap[k]);
667 temp += template[i].tap[k];
668 sg_set_buf(&sg[k], &xbuf[IDX[k]],
669 template[i].tap[k]);
670 }
671
672 ablkcipher_request_set_crypt(req, sg, sg,
673 template[i].ilen, iv);
674
675 ret = enc ?
676 crypto_ablkcipher_encrypt(req) :
677 crypto_ablkcipher_decrypt(req);
678
679 switch (ret) {
680 case 0:
681 break;
682 case -EINPROGRESS:
683 case -EBUSY:
684 ret = wait_for_completion_interruptible(
685 &result.completion);
686 if (!ret && !((ret = result.err))) {
687 INIT_COMPLETION(result.completion);
688 break;
689 }
690 /* fall through */
691 default:
692 printk("%s () failed err=%d\n", e, -ret);
693 goto out;
694 }
695
696 temp = 0;
697 for (k = 0; k < template[i].np; k++) {
698 printk("page %u\n", k);
699 q = &xbuf[IDX[k]];
700 hexdump(q, template[i].tap[k]);
701 printk("%s\n",
702 memcmp(q, template[i].result + temp,
703 template[i].tap[k]) ? "fail" :
704 "pass");
705
706 for (n = 0; q[template[i].tap[k] + n]; n++)
707 ;
708 if (n) {
709 printk("Result buffer corruption %u "
710 "bytes:\n", n);
711 hexdump(&q[template[i].tap[k]], n);
712 }
713 temp += template[i].tap[k];
714 }
715 }
716 }
717out:
718 crypto_free_ablkcipher(tfm);
719 ablkcipher_request_free(req);
720}
721
722static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p,
723 int blen, int sec)
724{
725 struct scatterlist sg[1];
726 unsigned long start, end; 62 unsigned long start, end;
727 int bcount; 63 int bcount;
728 int ret; 64 int ret;
729 65
730 sg_init_one(sg, p, blen);
731
732 for (start = jiffies, end = start + sec * HZ, bcount = 0; 66 for (start = jiffies, end = start + sec * HZ, bcount = 0;
733 time_before(jiffies, end); bcount++) { 67 time_before(jiffies, end); bcount++) {
734 if (enc) 68 if (enc)
@@ -745,16 +79,13 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p,
745 return 0; 79 return 0;
746} 80}
747 81
748static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p, 82static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
749 int blen) 83 struct scatterlist *sg, int blen)
750{ 84{
751 struct scatterlist sg[1];
752 unsigned long cycles = 0; 85 unsigned long cycles = 0;
753 int ret = 0; 86 int ret = 0;
754 int i; 87 int i;
755 88
756 sg_init_one(sg, p, blen);
757
758 local_bh_disable(); 89 local_bh_disable();
759 local_irq_disable(); 90 local_irq_disable();
760 91
@@ -799,12 +130,12 @@ out:
799 130
800static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; 131static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
801 132
802static void test_cipher_speed(char *algo, int enc, unsigned int sec, 133static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
803 struct cipher_testvec *template, 134 struct cipher_speed_template *template,
804 unsigned int tcount, u8 *keysize) 135 unsigned int tcount, u8 *keysize)
805{ 136{
806 unsigned int ret, i, j, iv_len; 137 unsigned int ret, i, j, iv_len;
807 unsigned char *key, *p, iv[128]; 138 const char *key, iv[128];
808 struct crypto_blkcipher *tfm; 139 struct crypto_blkcipher *tfm;
809 struct blkcipher_desc desc; 140 struct blkcipher_desc desc;
810 const char *e; 141 const char *e;
@@ -832,27 +163,28 @@ static void test_cipher_speed(char *algo, int enc, unsigned int sec,
832 163
833 b_size = block_sizes; 164 b_size = block_sizes;
834 do { 165 do {
166 struct scatterlist sg[TVMEMSIZE];
835 167
836 if ((*keysize + *b_size) > TVMEMSIZE) { 168 if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
837 printk("template (%u) too big for tvmem (%u)\n", 169 printk("template (%u) too big for "
838 *keysize + *b_size, TVMEMSIZE); 170 "tvmem (%lu)\n", *keysize + *b_size,
171 TVMEMSIZE * PAGE_SIZE);
839 goto out; 172 goto out;
840 } 173 }
841 174
842 printk("test %u (%d bit key, %d byte blocks): ", i, 175 printk("test %u (%d bit key, %d byte blocks): ", i,
843 *keysize * 8, *b_size); 176 *keysize * 8, *b_size);
844 177
845 memset(tvmem, 0xff, *keysize + *b_size); 178 memset(tvmem[0], 0xff, PAGE_SIZE);
846 179
847 /* set key, plain text and IV */ 180 /* set key, plain text and IV */
848 key = (unsigned char *)tvmem; 181 key = tvmem[0];
849 for (j = 0; j < tcount; j++) { 182 for (j = 0; j < tcount; j++) {
850 if (template[j].klen == *keysize) { 183 if (template[j].klen == *keysize) {
851 key = template[j].key; 184 key = template[j].key;
852 break; 185 break;
853 } 186 }
854 } 187 }
855 p = (unsigned char *)tvmem + *keysize;
856 188
857 ret = crypto_blkcipher_setkey(tfm, key, *keysize); 189 ret = crypto_blkcipher_setkey(tfm, key, *keysize);
858 if (ret) { 190 if (ret) {
@@ -861,6 +193,14 @@ static void test_cipher_speed(char *algo, int enc, unsigned int sec,
861 goto out; 193 goto out;
862 } 194 }
863 195
196 sg_init_table(sg, TVMEMSIZE);
197 sg_set_buf(sg, tvmem[0] + *keysize,
198 PAGE_SIZE - *keysize);
199 for (j = 1; j < TVMEMSIZE; j++) {
200 sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
201 memset (tvmem[j], 0xff, PAGE_SIZE);
202 }
203
864 iv_len = crypto_blkcipher_ivsize(tfm); 204 iv_len = crypto_blkcipher_ivsize(tfm);
865 if (iv_len) { 205 if (iv_len) {
866 memset(&iv, 0xff, iv_len); 206 memset(&iv, 0xff, iv_len);
@@ -868,9 +208,11 @@ static void test_cipher_speed(char *algo, int enc, unsigned int sec,
868 } 208 }
869 209
870 if (sec) 210 if (sec)
871 ret = test_cipher_jiffies(&desc, enc, p, *b_size, sec); 211 ret = test_cipher_jiffies(&desc, enc, sg,
212 *b_size, sec);
872 else 213 else
873 ret = test_cipher_cycles(&desc, enc, p, *b_size); 214 ret = test_cipher_cycles(&desc, enc, sg,
215 *b_size);
874 216
875 if (ret) { 217 if (ret) {
876 printk("%s() failed flags=%x\n", e, desc.flags); 218 printk("%s() failed flags=%x\n", e, desc.flags);
@@ -886,19 +228,16 @@ out:
886 crypto_free_blkcipher(tfm); 228 crypto_free_blkcipher(tfm);
887} 229}
888 230
889static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen, 231static int test_hash_jiffies_digest(struct hash_desc *desc,
232 struct scatterlist *sg, int blen,
890 char *out, int sec) 233 char *out, int sec)
891{ 234{
892 struct scatterlist sg[1];
893 unsigned long start, end; 235 unsigned long start, end;
894 int bcount; 236 int bcount;
895 int ret; 237 int ret;
896 238
897 sg_init_table(sg, 1);
898
899 for (start = jiffies, end = start + sec * HZ, bcount = 0; 239 for (start = jiffies, end = start + sec * HZ, bcount = 0;
900 time_before(jiffies, end); bcount++) { 240 time_before(jiffies, end); bcount++) {
901 sg_set_buf(sg, p, blen);
902 ret = crypto_hash_digest(desc, sg, blen, out); 241 ret = crypto_hash_digest(desc, sg, blen, out);
903 if (ret) 242 if (ret)
904 return ret; 243 return ret;
@@ -910,18 +249,15 @@ static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen,
910 return 0; 249 return 0;
911} 250}
912 251
913static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen, 252static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
914 int plen, char *out, int sec) 253 int blen, int plen, char *out, int sec)
915{ 254{
916 struct scatterlist sg[1];
917 unsigned long start, end; 255 unsigned long start, end;
918 int bcount, pcount; 256 int bcount, pcount;
919 int ret; 257 int ret;
920 258
921 if (plen == blen) 259 if (plen == blen)
922 return test_hash_jiffies_digest(desc, p, blen, out, sec); 260 return test_hash_jiffies_digest(desc, sg, blen, out, sec);
923
924 sg_init_table(sg, 1);
925 261
926 for (start = jiffies, end = start + sec * HZ, bcount = 0; 262 for (start = jiffies, end = start + sec * HZ, bcount = 0;
927 time_before(jiffies, end); bcount++) { 263 time_before(jiffies, end); bcount++) {
@@ -929,7 +265,6 @@ static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen,
929 if (ret) 265 if (ret)
930 return ret; 266 return ret;
931 for (pcount = 0; pcount < blen; pcount += plen) { 267 for (pcount = 0; pcount < blen; pcount += plen) {
932 sg_set_buf(sg, p + pcount, plen);
933 ret = crypto_hash_update(desc, sg, plen); 268 ret = crypto_hash_update(desc, sg, plen);
934 if (ret) 269 if (ret)
935 return ret; 270 return ret;
@@ -946,22 +281,18 @@ static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen,
946 return 0; 281 return 0;
947} 282}
948 283
949static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen, 284static int test_hash_cycles_digest(struct hash_desc *desc,
950 char *out) 285 struct scatterlist *sg, int blen, char *out)
951{ 286{
952 struct scatterlist sg[1];
953 unsigned long cycles = 0; 287 unsigned long cycles = 0;
954 int i; 288 int i;
955 int ret; 289 int ret;
956 290
957 sg_init_table(sg, 1);
958
959 local_bh_disable(); 291 local_bh_disable();
960 local_irq_disable(); 292 local_irq_disable();
961 293
962 /* Warm-up run. */ 294 /* Warm-up run. */
963 for (i = 0; i < 4; i++) { 295 for (i = 0; i < 4; i++) {
964 sg_set_buf(sg, p, blen);
965 ret = crypto_hash_digest(desc, sg, blen, out); 296 ret = crypto_hash_digest(desc, sg, blen, out);
966 if (ret) 297 if (ret)
967 goto out; 298 goto out;
@@ -973,7 +304,6 @@ static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen,
973 304
974 start = get_cycles(); 305 start = get_cycles();
975 306
976 sg_set_buf(sg, p, blen);
977 ret = crypto_hash_digest(desc, sg, blen, out); 307 ret = crypto_hash_digest(desc, sg, blen, out);
978 if (ret) 308 if (ret)
979 goto out; 309 goto out;
@@ -996,18 +326,15 @@ out:
996 return 0; 326 return 0;
997} 327}
998 328
999static int test_hash_cycles(struct hash_desc *desc, char *p, int blen, 329static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
1000 int plen, char *out) 330 int blen, int plen, char *out)
1001{ 331{
1002 struct scatterlist sg[1];
1003 unsigned long cycles = 0; 332 unsigned long cycles = 0;
1004 int i, pcount; 333 int i, pcount;
1005 int ret; 334 int ret;
1006 335
1007 if (plen == blen) 336 if (plen == blen)
1008 return test_hash_cycles_digest(desc, p, blen, out); 337 return test_hash_cycles_digest(desc, sg, blen, out);
1009
1010 sg_init_table(sg, 1);
1011 338
1012 local_bh_disable(); 339 local_bh_disable();
1013 local_irq_disable(); 340 local_irq_disable();
@@ -1018,7 +345,6 @@ static int test_hash_cycles(struct hash_desc *desc, char *p, int blen,
1018 if (ret) 345 if (ret)
1019 goto out; 346 goto out;
1020 for (pcount = 0; pcount < blen; pcount += plen) { 347 for (pcount = 0; pcount < blen; pcount += plen) {
1021 sg_set_buf(sg, p + pcount, plen);
1022 ret = crypto_hash_update(desc, sg, plen); 348 ret = crypto_hash_update(desc, sg, plen);
1023 if (ret) 349 if (ret)
1024 goto out; 350 goto out;
@@ -1038,7 +364,6 @@ static int test_hash_cycles(struct hash_desc *desc, char *p, int blen,
1038 if (ret) 364 if (ret)
1039 goto out; 365 goto out;
1040 for (pcount = 0; pcount < blen; pcount += plen) { 366 for (pcount = 0; pcount < blen; pcount += plen) {
1041 sg_set_buf(sg, p + pcount, plen);
1042 ret = crypto_hash_update(desc, sg, plen); 367 ret = crypto_hash_update(desc, sg, plen);
1043 if (ret) 368 if (ret)
1044 goto out; 369 goto out;
@@ -1065,9 +390,10 @@ out:
1065 return 0; 390 return 0;
1066} 391}
1067 392
1068static void test_hash_speed(char *algo, unsigned int sec, 393static void test_hash_speed(const char *algo, unsigned int sec,
1069 struct hash_speed *speed) 394 struct hash_speed *speed)
1070{ 395{
396 struct scatterlist sg[TVMEMSIZE];
1071 struct crypto_hash *tfm; 397 struct crypto_hash *tfm;
1072 struct hash_desc desc; 398 struct hash_desc desc;
1073 char output[1024]; 399 char output[1024];
@@ -1093,23 +419,27 @@ static void test_hash_speed(char *algo, unsigned int sec,
1093 goto out; 419 goto out;
1094 } 420 }
1095 421
422 sg_init_table(sg, TVMEMSIZE);
423 for (i = 0; i < TVMEMSIZE; i++) {
424 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
425 memset(tvmem[i], 0xff, PAGE_SIZE);
426 }
427
1096 for (i = 0; speed[i].blen != 0; i++) { 428 for (i = 0; speed[i].blen != 0; i++) {
1097 if (speed[i].blen > TVMEMSIZE) { 429 if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
1098 printk("template (%u) too big for tvmem (%u)\n", 430 printk("template (%u) too big for tvmem (%lu)\n",
1099 speed[i].blen, TVMEMSIZE); 431 speed[i].blen, TVMEMSIZE * PAGE_SIZE);
1100 goto out; 432 goto out;
1101 } 433 }
1102 434
1103 printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", 435 printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ",
1104 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); 436 i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
1105 437
1106 memset(tvmem, 0xff, speed[i].blen);
1107
1108 if (sec) 438 if (sec)
1109 ret = test_hash_jiffies(&desc, tvmem, speed[i].blen, 439 ret = test_hash_jiffies(&desc, sg, speed[i].blen,
1110 speed[i].plen, output, sec); 440 speed[i].plen, output, sec);
1111 else 441 else
1112 ret = test_hash_cycles(&desc, tvmem, speed[i].blen, 442 ret = test_hash_cycles(&desc, sg, speed[i].blen,
1113 speed[i].plen, output); 443 speed[i].plen, output);
1114 444
1115 if (ret) { 445 if (ret) {
@@ -1122,73 +452,6 @@ out:
1122 crypto_free_hash(tfm); 452 crypto_free_hash(tfm);
1123} 453}
1124 454
1125static void test_comp(char *algo, struct comp_testvec *ctemplate,
1126 struct comp_testvec *dtemplate, int ctcount, int dtcount)
1127{
1128 unsigned int i;
1129 char result[COMP_BUF_SIZE];
1130 struct crypto_comp *tfm;
1131 unsigned int tsize;
1132
1133 printk("\ntesting %s compression\n", algo);
1134
1135 tfm = crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC);
1136 if (IS_ERR(tfm)) {
1137 printk("failed to load transform for %s\n", algo);
1138 return;
1139 }
1140
1141 for (i = 0; i < ctcount; i++) {
1142 int ilen, ret, dlen = COMP_BUF_SIZE;
1143
1144 printk("test %u:\n", i + 1);
1145 memset(result, 0, sizeof (result));
1146
1147 ilen = ctemplate[i].inlen;
1148 ret = crypto_comp_compress(tfm, ctemplate[i].input,
1149 ilen, result, &dlen);
1150 if (ret) {
1151 printk("fail: ret=%d\n", ret);
1152 continue;
1153 }
1154 hexdump(result, dlen);
1155 printk("%s (ratio %d:%d)\n",
1156 memcmp(result, ctemplate[i].output, dlen) ? "fail" : "pass",
1157 ilen, dlen);
1158 }
1159
1160 printk("\ntesting %s decompression\n", algo);
1161
1162 tsize = sizeof(struct comp_testvec);
1163 tsize *= dtcount;
1164 if (tsize > TVMEMSIZE) {
1165 printk("template (%u) too big for tvmem (%u)\n", tsize,
1166 TVMEMSIZE);
1167 goto out;
1168 }
1169
1170 for (i = 0; i < dtcount; i++) {
1171 int ilen, ret, dlen = COMP_BUF_SIZE;
1172
1173 printk("test %u:\n", i + 1);
1174 memset(result, 0, sizeof (result));
1175
1176 ilen = dtemplate[i].inlen;
1177 ret = crypto_comp_decompress(tfm, dtemplate[i].input,
1178 ilen, result, &dlen);
1179 if (ret) {
1180 printk("fail: ret=%d\n", ret);
1181 continue;
1182 }
1183 hexdump(result, dlen);
1184 printk("%s (ratio %d:%d)\n",
1185 memcmp(result, dtemplate[i].output, dlen) ? "fail" : "pass",
1186 ilen, dlen);
1187 }
1188out:
1189 crypto_free_comp(tfm);
1190}
1191
1192static void test_available(void) 455static void test_available(void)
1193{ 456{
1194 char **name = check; 457 char **name = check;
@@ -1201,549 +464,237 @@ static void test_available(void)
1201 } 464 }
1202} 465}
1203 466
1204static void do_test(void) 467static inline int tcrypt_test(const char *alg)
1205{ 468{
1206 switch (mode) { 469 return alg_test(alg, alg, 0, 0);
470}
471
472static void do_test(int m)
473{
474 int i;
1207 475
476 switch (m) {
1208 case 0: 477 case 0:
1209 test_hash("md5", md5_tv_template, MD5_TEST_VECTORS); 478 for (i = 1; i < 200; i++)
1210 479 do_test(i);
1211 test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS);
1212
1213 //DES
1214 test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template,
1215 DES_ENC_TEST_VECTORS);
1216 test_cipher("ecb(des)", DECRYPT, des_dec_tv_template,
1217 DES_DEC_TEST_VECTORS);
1218 test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template,
1219 DES_CBC_ENC_TEST_VECTORS);
1220 test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template,
1221 DES_CBC_DEC_TEST_VECTORS);
1222
1223 //DES3_EDE
1224 test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template,
1225 DES3_EDE_ENC_TEST_VECTORS);
1226 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
1227 DES3_EDE_DEC_TEST_VECTORS);
1228
1229 test_cipher("cbc(des3_ede)", ENCRYPT,
1230 des3_ede_cbc_enc_tv_template,
1231 DES3_EDE_CBC_ENC_TEST_VECTORS);
1232
1233 test_cipher("cbc(des3_ede)", DECRYPT,
1234 des3_ede_cbc_dec_tv_template,
1235 DES3_EDE_CBC_DEC_TEST_VECTORS);
1236
1237 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS);
1238
1239 test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS);
1240
1241 test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS);
1242
1243 //BLOWFISH
1244 test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template,
1245 BF_ENC_TEST_VECTORS);
1246 test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template,
1247 BF_DEC_TEST_VECTORS);
1248 test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template,
1249 BF_CBC_ENC_TEST_VECTORS);
1250 test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template,
1251 BF_CBC_DEC_TEST_VECTORS);
1252
1253 //TWOFISH
1254 test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template,
1255 TF_ENC_TEST_VECTORS);
1256 test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template,
1257 TF_DEC_TEST_VECTORS);
1258 test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template,
1259 TF_CBC_ENC_TEST_VECTORS);
1260 test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template,
1261 TF_CBC_DEC_TEST_VECTORS);
1262
1263 //SERPENT
1264 test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template,
1265 SERPENT_ENC_TEST_VECTORS);
1266 test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template,
1267 SERPENT_DEC_TEST_VECTORS);
1268
1269 //TNEPRES
1270 test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template,
1271 TNEPRES_ENC_TEST_VECTORS);
1272 test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template,
1273 TNEPRES_DEC_TEST_VECTORS);
1274
1275 //AES
1276 test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template,
1277 AES_ENC_TEST_VECTORS);
1278 test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template,
1279 AES_DEC_TEST_VECTORS);
1280 test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template,
1281 AES_CBC_ENC_TEST_VECTORS);
1282 test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
1283 AES_CBC_DEC_TEST_VECTORS);
1284 test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
1285 AES_LRW_ENC_TEST_VECTORS);
1286 test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
1287 AES_LRW_DEC_TEST_VECTORS);
1288 test_cipher("xts(aes)", ENCRYPT, aes_xts_enc_tv_template,
1289 AES_XTS_ENC_TEST_VECTORS);
1290 test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
1291 AES_XTS_DEC_TEST_VECTORS);
1292 test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
1293 AES_CTR_ENC_TEST_VECTORS);
1294 test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
1295 AES_CTR_DEC_TEST_VECTORS);
1296 test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template,
1297 AES_GCM_ENC_TEST_VECTORS);
1298 test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
1299 AES_GCM_DEC_TEST_VECTORS);
1300 test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template,
1301 AES_CCM_ENC_TEST_VECTORS);
1302 test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
1303 AES_CCM_DEC_TEST_VECTORS);
1304
1305 //CAST5
1306 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
1307 CAST5_ENC_TEST_VECTORS);
1308 test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template,
1309 CAST5_DEC_TEST_VECTORS);
1310
1311 //CAST6
1312 test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template,
1313 CAST6_ENC_TEST_VECTORS);
1314 test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template,
1315 CAST6_DEC_TEST_VECTORS);
1316
1317 //ARC4
1318 test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template,
1319 ARC4_ENC_TEST_VECTORS);
1320 test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template,
1321 ARC4_DEC_TEST_VECTORS);
1322
1323 //TEA
1324 test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template,
1325 TEA_ENC_TEST_VECTORS);
1326 test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template,
1327 TEA_DEC_TEST_VECTORS);
1328
1329
1330 //XTEA
1331 test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template,
1332 XTEA_ENC_TEST_VECTORS);
1333 test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template,
1334 XTEA_DEC_TEST_VECTORS);
1335
1336 //KHAZAD
1337 test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template,
1338 KHAZAD_ENC_TEST_VECTORS);
1339 test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template,
1340 KHAZAD_DEC_TEST_VECTORS);
1341
1342 //ANUBIS
1343 test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template,
1344 ANUBIS_ENC_TEST_VECTORS);
1345 test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template,
1346 ANUBIS_DEC_TEST_VECTORS);
1347 test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template,
1348 ANUBIS_CBC_ENC_TEST_VECTORS);
1349 test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template,
1350 ANUBIS_CBC_ENC_TEST_VECTORS);
1351
1352 //XETA
1353 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template,
1354 XETA_ENC_TEST_VECTORS);
1355 test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template,
1356 XETA_DEC_TEST_VECTORS);
1357
1358 //FCrypt
1359 test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template,
1360 FCRYPT_ENC_TEST_VECTORS);
1361 test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
1362 FCRYPT_DEC_TEST_VECTORS);
1363
1364 //CAMELLIA
1365 test_cipher("ecb(camellia)", ENCRYPT,
1366 camellia_enc_tv_template,
1367 CAMELLIA_ENC_TEST_VECTORS);
1368 test_cipher("ecb(camellia)", DECRYPT,
1369 camellia_dec_tv_template,
1370 CAMELLIA_DEC_TEST_VECTORS);
1371 test_cipher("cbc(camellia)", ENCRYPT,
1372 camellia_cbc_enc_tv_template,
1373 CAMELLIA_CBC_ENC_TEST_VECTORS);
1374 test_cipher("cbc(camellia)", DECRYPT,
1375 camellia_cbc_dec_tv_template,
1376 CAMELLIA_CBC_DEC_TEST_VECTORS);
1377
1378 //SEED
1379 test_cipher("ecb(seed)", ENCRYPT, seed_enc_tv_template,
1380 SEED_ENC_TEST_VECTORS);
1381 test_cipher("ecb(seed)", DECRYPT, seed_dec_tv_template,
1382 SEED_DEC_TEST_VECTORS);
1383
1384 //CTS
1385 test_cipher("cts(cbc(aes))", ENCRYPT, cts_mode_enc_tv_template,
1386 CTS_MODE_ENC_TEST_VECTORS);
1387 test_cipher("cts(cbc(aes))", DECRYPT, cts_mode_dec_tv_template,
1388 CTS_MODE_DEC_TEST_VECTORS);
1389
1390 test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
1391 test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
1392 test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS);
1393 test_hash("wp384", wp384_tv_template, WP384_TEST_VECTORS);
1394 test_hash("wp256", wp256_tv_template, WP256_TEST_VECTORS);
1395 test_hash("tgr192", tgr192_tv_template, TGR192_TEST_VECTORS);
1396 test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS);
1397 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS);
1398 test_comp("deflate", deflate_comp_tv_template,
1399 deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
1400 DEFLATE_DECOMP_TEST_VECTORS);
1401 test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template,
1402 LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
1403 test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS);
1404 test_hash("hmac(md5)", hmac_md5_tv_template,
1405 HMAC_MD5_TEST_VECTORS);
1406 test_hash("hmac(sha1)", hmac_sha1_tv_template,
1407 HMAC_SHA1_TEST_VECTORS);
1408 test_hash("hmac(sha224)", hmac_sha224_tv_template,
1409 HMAC_SHA224_TEST_VECTORS);
1410 test_hash("hmac(sha256)", hmac_sha256_tv_template,
1411 HMAC_SHA256_TEST_VECTORS);
1412 test_hash("hmac(sha384)", hmac_sha384_tv_template,
1413 HMAC_SHA384_TEST_VECTORS);
1414 test_hash("hmac(sha512)", hmac_sha512_tv_template,
1415 HMAC_SHA512_TEST_VECTORS);
1416
1417 test_hash("xcbc(aes)", aes_xcbc128_tv_template,
1418 XCBC_AES_TEST_VECTORS);
1419
1420 test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS);
1421 break; 480 break;
1422 481
1423 case 1: 482 case 1:
1424 test_hash("md5", md5_tv_template, MD5_TEST_VECTORS); 483 tcrypt_test("md5");
1425 break; 484 break;
1426 485
1427 case 2: 486 case 2:
1428 test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS); 487 tcrypt_test("sha1");
1429 break; 488 break;
1430 489
1431 case 3: 490 case 3:
1432 test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template, 491 tcrypt_test("ecb(des)");
1433 DES_ENC_TEST_VECTORS); 492 tcrypt_test("cbc(des)");
1434 test_cipher("ecb(des)", DECRYPT, des_dec_tv_template,
1435 DES_DEC_TEST_VECTORS);
1436 test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template,
1437 DES_CBC_ENC_TEST_VECTORS);
1438 test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template,
1439 DES_CBC_DEC_TEST_VECTORS);
1440 break; 493 break;
1441 494
1442 case 4: 495 case 4:
1443 test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template, 496 tcrypt_test("ecb(des3_ede)");
1444 DES3_EDE_ENC_TEST_VECTORS); 497 tcrypt_test("cbc(des3_ede)");
1445 test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template,
1446 DES3_EDE_DEC_TEST_VECTORS);
1447
1448 test_cipher("cbc(des3_ede)", ENCRYPT,
1449 des3_ede_cbc_enc_tv_template,
1450 DES3_EDE_CBC_ENC_TEST_VECTORS);
1451
1452 test_cipher("cbc(des3_ede)", DECRYPT,
1453 des3_ede_cbc_dec_tv_template,
1454 DES3_EDE_CBC_DEC_TEST_VECTORS);
1455 break; 498 break;
1456 499
1457 case 5: 500 case 5:
1458 test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); 501 tcrypt_test("md4");
1459 break; 502 break;
1460 503
1461 case 6: 504 case 6:
1462 test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS); 505 tcrypt_test("sha256");
1463 break; 506 break;
1464 507
1465 case 7: 508 case 7:
1466 test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template, 509 tcrypt_test("ecb(blowfish)");
1467 BF_ENC_TEST_VECTORS); 510 tcrypt_test("cbc(blowfish)");
1468 test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template,
1469 BF_DEC_TEST_VECTORS);
1470 test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template,
1471 BF_CBC_ENC_TEST_VECTORS);
1472 test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template,
1473 BF_CBC_DEC_TEST_VECTORS);
1474 break; 511 break;
1475 512
1476 case 8: 513 case 8:
1477 test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template, 514 tcrypt_test("ecb(twofish)");
1478 TF_ENC_TEST_VECTORS); 515 tcrypt_test("cbc(twofish)");
1479 test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template,
1480 TF_DEC_TEST_VECTORS);
1481 test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template,
1482 TF_CBC_ENC_TEST_VECTORS);
1483 test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template,
1484 TF_CBC_DEC_TEST_VECTORS);
1485 break; 516 break;
1486 517
1487 case 9: 518 case 9:
1488 test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template, 519 tcrypt_test("ecb(serpent)");
1489 SERPENT_ENC_TEST_VECTORS);
1490 test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template,
1491 SERPENT_DEC_TEST_VECTORS);
1492 break; 520 break;
1493 521
1494 case 10: 522 case 10:
1495 test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template, 523 tcrypt_test("ecb(aes)");
1496 AES_ENC_TEST_VECTORS); 524 tcrypt_test("cbc(aes)");
1497 test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template, 525 tcrypt_test("lrw(aes)");
1498 AES_DEC_TEST_VECTORS); 526 tcrypt_test("xts(aes)");
1499 test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template, 527 tcrypt_test("rfc3686(ctr(aes))");
1500 AES_CBC_ENC_TEST_VECTORS);
1501 test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
1502 AES_CBC_DEC_TEST_VECTORS);
1503 test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
1504 AES_LRW_ENC_TEST_VECTORS);
1505 test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
1506 AES_LRW_DEC_TEST_VECTORS);
1507 test_cipher("xts(aes)", ENCRYPT, aes_xts_enc_tv_template,
1508 AES_XTS_ENC_TEST_VECTORS);
1509 test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template,
1510 AES_XTS_DEC_TEST_VECTORS);
1511 test_cipher("rfc3686(ctr(aes))", ENCRYPT, aes_ctr_enc_tv_template,
1512 AES_CTR_ENC_TEST_VECTORS);
1513 test_cipher("rfc3686(ctr(aes))", DECRYPT, aes_ctr_dec_tv_template,
1514 AES_CTR_DEC_TEST_VECTORS);
1515 break; 528 break;
1516 529
1517 case 11: 530 case 11:
1518 test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); 531 tcrypt_test("sha384");
1519 break; 532 break;
1520 533
1521 case 12: 534 case 12:
1522 test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); 535 tcrypt_test("sha512");
1523 break; 536 break;
1524 537
1525 case 13: 538 case 13:
1526 test_comp("deflate", deflate_comp_tv_template, 539 tcrypt_test("deflate");
1527 deflate_decomp_tv_template, DEFLATE_COMP_TEST_VECTORS,
1528 DEFLATE_DECOMP_TEST_VECTORS);
1529 break; 540 break;
1530 541
1531 case 14: 542 case 14:
1532 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, 543 tcrypt_test("ecb(cast5)");
1533 CAST5_ENC_TEST_VECTORS);
1534 test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template,
1535 CAST5_DEC_TEST_VECTORS);
1536 break; 544 break;
1537 545
1538 case 15: 546 case 15:
1539 test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template, 547 tcrypt_test("ecb(cast6)");
1540 CAST6_ENC_TEST_VECTORS);
1541 test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template,
1542 CAST6_DEC_TEST_VECTORS);
1543 break; 548 break;
1544 549
1545 case 16: 550 case 16:
1546 test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template, 551 tcrypt_test("ecb(arc4)");
1547 ARC4_ENC_TEST_VECTORS);
1548 test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template,
1549 ARC4_DEC_TEST_VECTORS);
1550 break; 552 break;
1551 553
1552 case 17: 554 case 17:
1553 test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS); 555 tcrypt_test("michael_mic");
1554 break; 556 break;
1555 557
1556 case 18: 558 case 18:
1557 test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); 559 tcrypt_test("crc32c");
1558 break; 560 break;
1559 561
1560 case 19: 562 case 19:
1561 test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template, 563 tcrypt_test("ecb(tea)");
1562 TEA_ENC_TEST_VECTORS);
1563 test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template,
1564 TEA_DEC_TEST_VECTORS);
1565 break; 564 break;
1566 565
1567 case 20: 566 case 20:
1568 test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template, 567 tcrypt_test("ecb(xtea)");
1569 XTEA_ENC_TEST_VECTORS);
1570 test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template,
1571 XTEA_DEC_TEST_VECTORS);
1572 break; 568 break;
1573 569
1574 case 21: 570 case 21:
1575 test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template, 571 tcrypt_test("ecb(khazad)");
1576 KHAZAD_ENC_TEST_VECTORS);
1577 test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template,
1578 KHAZAD_DEC_TEST_VECTORS);
1579 break; 572 break;
1580 573
1581 case 22: 574 case 22:
1582 test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS); 575 tcrypt_test("wp512");
1583 break; 576 break;
1584 577
1585 case 23: 578 case 23:
1586 test_hash("wp384", wp384_tv_template, WP384_TEST_VECTORS); 579 tcrypt_test("wp384");
1587 break; 580 break;
1588 581
1589 case 24: 582 case 24:
1590 test_hash("wp256", wp256_tv_template, WP256_TEST_VECTORS); 583 tcrypt_test("wp256");
1591 break; 584 break;
1592 585
1593 case 25: 586 case 25:
1594 test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template, 587 tcrypt_test("ecb(tnepres)");
1595 TNEPRES_ENC_TEST_VECTORS);
1596 test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template,
1597 TNEPRES_DEC_TEST_VECTORS);
1598 break; 588 break;
1599 589
1600 case 26: 590 case 26:
1601 test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template, 591 tcrypt_test("ecb(anubis)");
1602 ANUBIS_ENC_TEST_VECTORS); 592 tcrypt_test("cbc(anubis)");
1603 test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template,
1604 ANUBIS_DEC_TEST_VECTORS);
1605 test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template,
1606 ANUBIS_CBC_ENC_TEST_VECTORS);
1607 test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template,
1608 ANUBIS_CBC_ENC_TEST_VECTORS);
1609 break; 593 break;
1610 594
1611 case 27: 595 case 27:
1612 test_hash("tgr192", tgr192_tv_template, TGR192_TEST_VECTORS); 596 tcrypt_test("tgr192");
1613 break; 597 break;
1614 598
1615 case 28: 599 case 28:
1616 600
1617 test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS); 601 tcrypt_test("tgr160");
1618 break; 602 break;
1619 603
1620 case 29: 604 case 29:
1621 test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); 605 tcrypt_test("tgr128");
1622 break; 606 break;
1623 607
1624 case 30: 608 case 30:
1625 test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, 609 tcrypt_test("ecb(xeta)");
1626 XETA_ENC_TEST_VECTORS);
1627 test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template,
1628 XETA_DEC_TEST_VECTORS);
1629 break; 610 break;
1630 611
1631 case 31: 612 case 31:
1632 test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template, 613 tcrypt_test("pcbc(fcrypt)");
1633 FCRYPT_ENC_TEST_VECTORS);
1634 test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
1635 FCRYPT_DEC_TEST_VECTORS);
1636 break; 614 break;
1637 615
1638 case 32: 616 case 32:
1639 test_cipher("ecb(camellia)", ENCRYPT, 617 tcrypt_test("ecb(camellia)");
1640 camellia_enc_tv_template, 618 tcrypt_test("cbc(camellia)");
1641 CAMELLIA_ENC_TEST_VECTORS);
1642 test_cipher("ecb(camellia)", DECRYPT,
1643 camellia_dec_tv_template,
1644 CAMELLIA_DEC_TEST_VECTORS);
1645 test_cipher("cbc(camellia)", ENCRYPT,
1646 camellia_cbc_enc_tv_template,
1647 CAMELLIA_CBC_ENC_TEST_VECTORS);
1648 test_cipher("cbc(camellia)", DECRYPT,
1649 camellia_cbc_dec_tv_template,
1650 CAMELLIA_CBC_DEC_TEST_VECTORS);
1651 break; 619 break;
1652 case 33: 620 case 33:
1653 test_hash("sha224", sha224_tv_template, SHA224_TEST_VECTORS); 621 tcrypt_test("sha224");
1654 break; 622 break;
1655 623
1656 case 34: 624 case 34:
1657 test_cipher("salsa20", ENCRYPT, 625 tcrypt_test("salsa20");
1658 salsa20_stream_enc_tv_template,
1659 SALSA20_STREAM_ENC_TEST_VECTORS);
1660 break; 626 break;
1661 627
1662 case 35: 628 case 35:
1663 test_aead("gcm(aes)", ENCRYPT, aes_gcm_enc_tv_template, 629 tcrypt_test("gcm(aes)");
1664 AES_GCM_ENC_TEST_VECTORS);
1665 test_aead("gcm(aes)", DECRYPT, aes_gcm_dec_tv_template,
1666 AES_GCM_DEC_TEST_VECTORS);
1667 break; 630 break;
1668 631
1669 case 36: 632 case 36:
1670 test_comp("lzo", lzo_comp_tv_template, lzo_decomp_tv_template, 633 tcrypt_test("lzo");
1671 LZO_COMP_TEST_VECTORS, LZO_DECOMP_TEST_VECTORS);
1672 break; 634 break;
1673 635
1674 case 37: 636 case 37:
1675 test_aead("ccm(aes)", ENCRYPT, aes_ccm_enc_tv_template, 637 tcrypt_test("ccm(aes)");
1676 AES_CCM_ENC_TEST_VECTORS);
1677 test_aead("ccm(aes)", DECRYPT, aes_ccm_dec_tv_template,
1678 AES_CCM_DEC_TEST_VECTORS);
1679 break; 638 break;
1680 639
1681 case 38: 640 case 38:
1682 test_cipher("cts(cbc(aes))", ENCRYPT, cts_mode_enc_tv_template, 641 tcrypt_test("cts(cbc(aes))");
1683 CTS_MODE_ENC_TEST_VECTORS);
1684 test_cipher("cts(cbc(aes))", DECRYPT, cts_mode_dec_tv_template,
1685 CTS_MODE_DEC_TEST_VECTORS);
1686 break; 642 break;
1687 643
1688 case 39: 644 case 39:
1689 test_hash("rmd128", rmd128_tv_template, RMD128_TEST_VECTORS); 645 tcrypt_test("rmd128");
1690 break; 646 break;
1691 647
1692 case 40: 648 case 40:
1693 test_hash("rmd160", rmd160_tv_template, RMD160_TEST_VECTORS); 649 tcrypt_test("rmd160");
1694 break; 650 break;
1695 651
1696 case 41: 652 case 41:
1697 test_hash("rmd256", rmd256_tv_template, RMD256_TEST_VECTORS); 653 tcrypt_test("rmd256");
1698 break; 654 break;
1699 655
1700 case 42: 656 case 42:
1701 test_hash("rmd320", rmd320_tv_template, RMD320_TEST_VECTORS); 657 tcrypt_test("rmd320");
658 break;
659
660 case 43:
661 tcrypt_test("ecb(seed)");
1702 break; 662 break;
1703 663
1704 case 100: 664 case 100:
1705 test_hash("hmac(md5)", hmac_md5_tv_template, 665 tcrypt_test("hmac(md5)");
1706 HMAC_MD5_TEST_VECTORS);
1707 break; 666 break;
1708 667
1709 case 101: 668 case 101:
1710 test_hash("hmac(sha1)", hmac_sha1_tv_template, 669 tcrypt_test("hmac(sha1)");
1711 HMAC_SHA1_TEST_VECTORS);
1712 break; 670 break;
1713 671
1714 case 102: 672 case 102:
1715 test_hash("hmac(sha256)", hmac_sha256_tv_template, 673 tcrypt_test("hmac(sha256)");
1716 HMAC_SHA256_TEST_VECTORS);
1717 break; 674 break;
1718 675
1719 case 103: 676 case 103:
1720 test_hash("hmac(sha384)", hmac_sha384_tv_template, 677 tcrypt_test("hmac(sha384)");
1721 HMAC_SHA384_TEST_VECTORS);
1722 break; 678 break;
1723 679
1724 case 104: 680 case 104:
1725 test_hash("hmac(sha512)", hmac_sha512_tv_template, 681 tcrypt_test("hmac(sha512)");
1726 HMAC_SHA512_TEST_VECTORS);
1727 break; 682 break;
1728 683
1729 case 105: 684 case 105:
1730 test_hash("hmac(sha224)", hmac_sha224_tv_template, 685 tcrypt_test("hmac(sha224)");
1731 HMAC_SHA224_TEST_VECTORS);
1732 break; 686 break;
1733 687
1734 case 106: 688 case 106:
1735 test_hash("xcbc(aes)", aes_xcbc128_tv_template, 689 tcrypt_test("xcbc(aes)");
1736 XCBC_AES_TEST_VECTORS);
1737 break; 690 break;
1738 691
1739 case 107: 692 case 107:
1740 test_hash("hmac(rmd128)", hmac_rmd128_tv_template, 693 tcrypt_test("hmac(rmd128)");
1741 HMAC_RMD128_TEST_VECTORS);
1742 break; 694 break;
1743 695
1744 case 108: 696 case 108:
1745 test_hash("hmac(rmd160)", hmac_rmd160_tv_template, 697 tcrypt_test("hmac(rmd160)");
1746 HMAC_RMD160_TEST_VECTORS);
1747 break; 698 break;
1748 699
1749 case 200: 700 case 200:
@@ -1767,16 +718,16 @@ static void do_test(void)
1767 718
1768 case 201: 719 case 201:
1769 test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec, 720 test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec,
1770 des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS, 721 des3_speed_template, DES3_SPEED_VECTORS,
1771 speed_template_24); 722 speed_template_24);
1772 test_cipher_speed("ecb(des3_ede)", DECRYPT, sec, 723 test_cipher_speed("ecb(des3_ede)", DECRYPT, sec,
1773 des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS, 724 des3_speed_template, DES3_SPEED_VECTORS,
1774 speed_template_24); 725 speed_template_24);
1775 test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec, 726 test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec,
1776 des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS, 727 des3_speed_template, DES3_SPEED_VECTORS,
1777 speed_template_24); 728 speed_template_24);
1778 test_cipher_speed("cbc(des3_ede)", DECRYPT, sec, 729 test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
1779 des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS, 730 des3_speed_template, DES3_SPEED_VECTORS,
1780 speed_template_24); 731 speed_template_24);
1781 break; 732 break;
1782 733
@@ -1906,31 +857,21 @@ static void do_test(void)
1906 case 1000: 857 case 1000:
1907 test_available(); 858 test_available();
1908 break; 859 break;
1909
1910 default:
1911 /* useful for debugging */
1912 printk("not testing anything\n");
1913 break;
1914 } 860 }
1915} 861}
1916 862
1917static int __init tcrypt_mod_init(void) 863static int __init tcrypt_mod_init(void)
1918{ 864{
1919 int err = -ENOMEM; 865 int err = -ENOMEM;
866 int i;
1920 867
1921 tvmem = kmalloc(TVMEMSIZE, GFP_KERNEL); 868 for (i = 0; i < TVMEMSIZE; i++) {
1922 if (tvmem == NULL) 869 tvmem[i] = (void *)__get_free_page(GFP_KERNEL);
1923 return err; 870 if (!tvmem[i])
1924 871 goto err_free_tv;
1925 xbuf = kmalloc(XBUFSIZE, GFP_KERNEL); 872 }
1926 if (xbuf == NULL)
1927 goto err_free_tv;
1928
1929 axbuf = kmalloc(XBUFSIZE, GFP_KERNEL);
1930 if (axbuf == NULL)
1931 goto err_free_xbuf;
1932 873
1933 do_test(); 874 do_test(mode);
1934 875
1935 /* We intentionaly return -EAGAIN to prevent keeping 876 /* We intentionaly return -EAGAIN to prevent keeping
1936 * the module. It does all its work from init() 877 * the module. It does all its work from init()
@@ -1940,11 +881,9 @@ static int __init tcrypt_mod_init(void)
1940 */ 881 */
1941 err = -EAGAIN; 882 err = -EAGAIN;
1942 883
1943 kfree(axbuf); 884err_free_tv:
1944 err_free_xbuf: 885 for (i = 0; i < TVMEMSIZE && tvmem[i]; i++)
1945 kfree(xbuf); 886 free_page((unsigned long)tvmem[i]);
1946 err_free_tv:
1947 kfree(tvmem);
1948 887
1949 return err; 888 return err;
1950} 889}
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 801e0c288862..966bbfaf95b1 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -17,53 +17,9 @@
17#ifndef _CRYPTO_TCRYPT_H 17#ifndef _CRYPTO_TCRYPT_H
18#define _CRYPTO_TCRYPT_H 18#define _CRYPTO_TCRYPT_H
19 19
20#define MAX_DIGEST_SIZE 64 20struct cipher_speed_template {
21#define MAX_TAP 8 21 const char *key;
22 22 unsigned int klen;
23#define MAX_KEYLEN 56
24#define MAX_IVLEN 32
25
26struct hash_testvec {
27 /* only used with keyed hash algorithms */
28 char *key;
29 char *plaintext;
30 char *digest;
31 unsigned char tap[MAX_TAP];
32 unsigned char psize;
33 unsigned char np;
34 unsigned char ksize;
35};
36
37struct cipher_testvec {
38 char *key;
39 char *iv;
40 char *input;
41 char *result;
42 unsigned char tap[MAX_TAP];
43 int np;
44 unsigned char fail;
45 unsigned char wk; /* weak key flag */
46 unsigned char klen;
47 unsigned short ilen;
48 unsigned short rlen;
49};
50
51struct aead_testvec {
52 char *key;
53 char *iv;
54 char *input;
55 char *assoc;
56 char *result;
57 unsigned char tap[MAX_TAP];
58 unsigned char atap[MAX_TAP];
59 int np;
60 int anp;
61 unsigned char fail;
62 unsigned char wk; /* weak key flag */
63 unsigned char klen;
64 unsigned short ilen;
65 unsigned short alen;
66 unsigned short rlen;
67}; 23};
68 24
69struct hash_speed { 25struct hash_speed {
@@ -71,8673 +27,20 @@ struct hash_speed {
71 unsigned int plen; /* per-update length */ 27 unsigned int plen; /* per-update length */
72}; 28};
73 29
74static char zeroed_string[48];
75
76/*
77 * MD4 test vectors from RFC1320
78 */
79#define MD4_TEST_VECTORS 7
80
81static struct hash_testvec md4_tv_template [] = {
82 {
83 .plaintext = "",
84 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
85 "\xb7\x3c\x59\xd7\xe0\xc0\x89\xc0",
86 }, {
87 .plaintext = "a",
88 .psize = 1,
89 .digest = "\xbd\xe5\x2c\xb3\x1d\xe3\x3e\x46"
90 "\x24\x5e\x05\xfb\xdb\xd6\xfb\x24",
91 }, {
92 .plaintext = "abc",
93 .psize = 3,
94 .digest = "\xa4\x48\x01\x7a\xaf\x21\xd8\x52"
95 "\x5f\xc1\x0a\xe8\x7a\xa6\x72\x9d",
96 }, {
97 .plaintext = "message digest",
98 .psize = 14,
99 .digest = "\xd9\x13\x0a\x81\x64\x54\x9f\xe8"
100 "\x18\x87\x48\x06\xe1\xc7\x01\x4b",
101 }, {
102 .plaintext = "abcdefghijklmnopqrstuvwxyz",
103 .psize = 26,
104 .digest = "\xd7\x9e\x1c\x30\x8a\xa5\xbb\xcd"
105 "\xee\xa8\xed\x63\xdf\x41\x2d\xa9",
106 .np = 2,
107 .tap = { 13, 13 },
108 }, {
109 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
110 .psize = 62,
111 .digest = "\x04\x3f\x85\x82\xf2\x41\xdb\x35"
112 "\x1c\xe6\x27\xe1\x53\xe7\xf0\xe4",
113 }, {
114 .plaintext = "123456789012345678901234567890123456789012345678901234567890123"
115 "45678901234567890",
116 .psize = 80,
117 .digest = "\xe3\x3b\x4d\xdc\x9c\x38\xf2\x19"
118 "\x9c\x3e\x7b\x16\x4f\xcc\x05\x36",
119 },
120};
121
122/*
123 * MD5 test vectors from RFC1321
124 */
125#define MD5_TEST_VECTORS 7
126
127static struct hash_testvec md5_tv_template[] = {
128 {
129 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
130 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
131 }, {
132 .plaintext = "a",
133 .psize = 1,
134 .digest = "\x0c\xc1\x75\xb9\xc0\xf1\xb6\xa8"
135 "\x31\xc3\x99\xe2\x69\x77\x26\x61",
136 }, {
137 .plaintext = "abc",
138 .psize = 3,
139 .digest = "\x90\x01\x50\x98\x3c\xd2\x4f\xb0"
140 "\xd6\x96\x3f\x7d\x28\xe1\x7f\x72",
141 }, {
142 .plaintext = "message digest",
143 .psize = 14,
144 .digest = "\xf9\x6b\x69\x7d\x7c\xb7\x93\x8d"
145 "\x52\x5a\x2f\x31\xaa\xf1\x61\xd0",
146 }, {
147 .plaintext = "abcdefghijklmnopqrstuvwxyz",
148 .psize = 26,
149 .digest = "\xc3\xfc\xd3\xd7\x61\x92\xe4\x00"
150 "\x7d\xfb\x49\x6c\xca\x67\xe1\x3b",
151 .np = 2,
152 .tap = {13, 13}
153 }, {
154 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
155 .psize = 62,
156 .digest = "\xd1\x74\xab\x98\xd2\x77\xd9\xf5"
157 "\xa5\x61\x1c\x2c\x9f\x41\x9d\x9f",
158 }, {
159 .plaintext = "12345678901234567890123456789012345678901234567890123456789012"
160 "345678901234567890",
161 .psize = 80,
162 .digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55"
163 "\xac\x49\xda\x2e\x21\x07\xb6\x7a",
164 }
165
166};
167
168/*
169 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
170 */
171#define RMD128_TEST_VECTORS 10
172
173static struct hash_testvec rmd128_tv_template[] = {
174 {
175 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
176 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
177 }, {
178 .plaintext = "a",
179 .psize = 1,
180 .digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7"
181 "\xcf\xc7\x85\xe7\x2f\x57\x8d\x33",
182 }, {
183 .plaintext = "abc",
184 .psize = 3,
185 .digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba"
186 "\x84\x63\x6b\x0f\x69\x14\x4c\x77",
187 }, {
188 .plaintext = "message digest",
189 .psize = 14,
190 .digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62"
191 "\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8",
192 }, {
193 .plaintext = "abcdefghijklmnopqrstuvwxyz",
194 .psize = 26,
195 .digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5"
196 "\x10\x71\x49\x22\xb3\x71\x83\x4e",
197 }, {
198 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
199 "fghijklmnopqrstuvwxyz0123456789",
200 .psize = 62,
201 .digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f"
202 "\xae\xa4\x62\x4c\x60\xc5\xc7\x02",
203 }, {
204 .plaintext = "1234567890123456789012345678901234567890"
205 "1234567890123456789012345678901234567890",
206 .psize = 80,
207 .digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb"
208 "\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3",
209 }, {
210 .plaintext = "abcdbcdecdefdefgefghfghighij"
211 "hijkijkljklmklmnlmnomnopnopq",
212 .psize = 56,
213 .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
214 "\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
215 .np = 2,
216 .tap = { 28, 28 },
217 }, {
218 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
219 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
220 "lmnopqrsmnopqrstnopqrstu",
221 .psize = 112,
222 .digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b"
223 "\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46",
224 }, {
225 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
226 .psize = 32,
227 .digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d"
228 "\xe1\x93\xff\x46\xdb\xac\xcf\xd4",
229 }
230};
231
232/*
233 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
234 */
235#define RMD160_TEST_VECTORS 10
236
237static struct hash_testvec rmd160_tv_template[] = {
238 {
239 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
240 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
241 }, {
242 .plaintext = "a",
243 .psize = 1,
244 .digest = "\x0b\xdc\x9d\x2d\x25\x6b\x3e\xe9\xda\xae"
245 "\x34\x7b\xe6\xf4\xdc\x83\x5a\x46\x7f\xfe",
246 }, {
247 .plaintext = "abc",
248 .psize = 3,
249 .digest = "\x8e\xb2\x08\xf7\xe0\x5d\x98\x7a\x9b\x04"
250 "\x4a\x8e\x98\xc6\xb0\x87\xf1\x5a\x0b\xfc",
251 }, {
252 .plaintext = "message digest",
253 .psize = 14,
254 .digest = "\x5d\x06\x89\xef\x49\xd2\xfa\xe5\x72\xb8"
255 "\x81\xb1\x23\xa8\x5f\xfa\x21\x59\x5f\x36",
256 }, {
257 .plaintext = "abcdefghijklmnopqrstuvwxyz",
258 .psize = 26,
259 .digest = "\xf7\x1c\x27\x10\x9c\x69\x2c\x1b\x56\xbb"
260 "\xdc\xeb\x5b\x9d\x28\x65\xb3\x70\x8d\xbc",
261 }, {
262 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
263 "fghijklmnopqrstuvwxyz0123456789",
264 .psize = 62,
265 .digest = "\xb0\xe2\x0b\x6e\x31\x16\x64\x02\x86\xed"
266 "\x3a\x87\xa5\x71\x30\x79\xb2\x1f\x51\x89",
267 }, {
268 .plaintext = "1234567890123456789012345678901234567890"
269 "1234567890123456789012345678901234567890",
270 .psize = 80,
271 .digest = "\x9b\x75\x2e\x45\x57\x3d\x4b\x39\xf4\xdb"
272 "\xd3\x32\x3c\xab\x82\xbf\x63\x32\x6b\xfb",
273 }, {
274 .plaintext = "abcdbcdecdefdefgefghfghighij"
275 "hijkijkljklmklmnlmnomnopnopq",
276 .psize = 56,
277 .digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05"
278 "\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b",
279 .np = 2,
280 .tap = { 28, 28 },
281 }, {
282 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
283 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
284 "lmnopqrsmnopqrstnopqrstu",
285 .psize = 112,
286 .digest = "\x6f\x3f\xa3\x9b\x6b\x50\x3c\x38\x4f\x91"
287 "\x9a\x49\xa7\xaa\x5c\x2c\x08\xbd\xfb\x45",
288 }, {
289 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
290 .psize = 32,
291 .digest = "\x94\xc2\x64\x11\x54\x04\xe6\x33\x79\x0d"
292 "\xfc\xc8\x7b\x58\x7d\x36\x77\x06\x7d\x9f",
293 }
294};
295
296/*
297 * RIPEMD-256 test vectors
298 */
299#define RMD256_TEST_VECTORS 8
300
301static struct hash_testvec rmd256_tv_template[] = {
302 {
303 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
304 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
305 "\x2d\x97\x74\xfb\x1e\x5d\x02\x63"
306 "\x80\xae\x01\x68\xe3\xc5\x52\x2d",
307 }, {
308 .plaintext = "a",
309 .psize = 1,
310 .digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9"
311 "\x0a\x91\xba\xb7\x0a\x1e\xba\x0c"
312 "\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf"
313 "\xcd\x88\x3a\x91\x34\x69\x29\x25",
314 }, {
315 .plaintext = "abc",
316 .psize = 3,
317 .digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb"
318 "\xce\xf5\xca\x2d\x03\xe6\xdb\xa1"
319 "\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e"
320 "\x1e\x42\xd2\xe9\x75\x45\x9b\x65",
321 }, {
322 .plaintext = "message digest",
323 .psize = 14,
324 .digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a"
325 "\x51\x4d\x5c\x91\x4c\x39\x2c\x90"
326 "\x18\xc7\xc4\x6b\xc1\x44\x65\x55"
327 "\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e",
328 }, {
329 .plaintext = "abcdefghijklmnopqrstuvwxyz",
330 .psize = 26,
331 .digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16"
332 "\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc"
333 "\x78\x96\x11\x8a\x51\x97\x96\x87"
334 "\x82\xdd\x1f\xd9\x7d\x8d\x51\x33",
335 }, {
336 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
337 "fghijklmnopqrstuvwxyz0123456789",
338 .psize = 62,
339 .digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20"
340 "\xb8\x44\x24\xae\x93\x1c\xbb\x1f"
341 "\xe3\x63\xd1\xd0\xbf\x40\x17\xf1"
342 "\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8",
343 }, {
344 .plaintext = "1234567890123456789012345678901234567890"
345 "1234567890123456789012345678901234567890",
346 .psize = 80,
347 .digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa"
348 "\xf9\x13\x68\xc0\x6a\x62\x75\xb5"
349 "\x53\xe3\xf0\x99\xbf\x0e\xa4\xed"
350 "\xfd\x67\x78\xdf\x89\xa8\x90\xdd",
351 }, {
352 .plaintext = "abcdbcdecdefdefgefghfghighij"
353 "hijkijkljklmklmnlmnomnopnopq",
354 .psize = 56,
355 .digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8"
356 "\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
357 "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
358 "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
359 .np = 2,
360 .tap = { 28, 28 },
361 }
362};
363
364/*
365 * RIPEMD-320 test vectors
366 */
367#define RMD320_TEST_VECTORS 8
368
369static struct hash_testvec rmd320_tv_template[] = {
370 {
371 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
372 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
373 "\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e"
374 "\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8",
375 }, {
376 .plaintext = "a",
377 .psize = 1,
378 .digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5"
379 "\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57"
380 "\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54"
381 "\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d",
382 }, {
383 .plaintext = "abc",
384 .psize = 3,
385 .digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d"
386 "\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08"
387 "\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74"
388 "\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d",
389 }, {
390 .plaintext = "message digest",
391 .psize = 14,
392 .digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68"
393 "\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa"
394 "\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d"
395 "\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97",
396 }, {
397 .plaintext = "abcdefghijklmnopqrstuvwxyz",
398 .psize = 26,
399 .digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93"
400 "\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4"
401 "\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed"
402 "\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09",
403 }, {
404 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
405 "fghijklmnopqrstuvwxyz0123456789",
406 .psize = 62,
407 .digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2"
408 "\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c"
409 "\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9"
410 "\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4",
411 }, {
412 .plaintext = "1234567890123456789012345678901234567890"
413 "1234567890123456789012345678901234567890",
414 .psize = 80,
415 .digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6"
416 "\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41"
417 "\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f"
418 "\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42",
419 }, {
420 .plaintext = "abcdbcdecdefdefgefghfghighij"
421 "hijkijkljklmklmnlmnomnopnopq",
422 .psize = 56,
423 .digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4"
424 "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
425 "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
426 "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
427 .np = 2,
428 .tap = { 28, 28 },
429 }
430};
431
432/*
433 * SHA1 test vectors from from FIPS PUB 180-1
434 */
435#define SHA1_TEST_VECTORS 2
436
437static struct hash_testvec sha1_tv_template[] = {
438 {
439 .plaintext = "abc",
440 .psize = 3,
441 .digest = "\xa9\x99\x3e\x36\x47\x06\x81\x6a\xba\x3e"
442 "\x25\x71\x78\x50\xc2\x6c\x9c\xd0\xd8\x9d",
443 }, {
444 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
445 .psize = 56,
446 .digest = "\x84\x98\x3e\x44\x1c\x3b\xd2\x6e\xba\xae"
447 "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1",
448 .np = 2,
449 .tap = { 28, 28 }
450 }
451};
452
453
454/*
455 * SHA224 test vectors from from FIPS PUB 180-2
456 */
457#define SHA224_TEST_VECTORS 2
458
459static struct hash_testvec sha224_tv_template[] = {
460 {
461 .plaintext = "abc",
462 .psize = 3,
463 .digest = "\x23\x09\x7D\x22\x34\x05\xD8\x22"
464 "\x86\x42\xA4\x77\xBD\xA2\x55\xB3"
465 "\x2A\xAD\xBC\xE4\xBD\xA0\xB3\xF7"
466 "\xE3\x6C\x9D\xA7",
467 }, {
468 .plaintext =
469 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
470 .psize = 56,
471 .digest = "\x75\x38\x8B\x16\x51\x27\x76\xCC"
472 "\x5D\xBA\x5D\xA1\xFD\x89\x01\x50"
473 "\xB0\xC6\x45\x5C\xB4\xF5\x8B\x19"
474 "\x52\x52\x25\x25",
475 .np = 2,
476 .tap = { 28, 28 }
477 }
478};
479
480/*
481 * SHA256 test vectors from from NIST
482 */
483#define SHA256_TEST_VECTORS 2
484
485static struct hash_testvec sha256_tv_template[] = {
486 {
487 .plaintext = "abc",
488 .psize = 3,
489 .digest = "\xba\x78\x16\xbf\x8f\x01\xcf\xea"
490 "\x41\x41\x40\xde\x5d\xae\x22\x23"
491 "\xb0\x03\x61\xa3\x96\x17\x7a\x9c"
492 "\xb4\x10\xff\x61\xf2\x00\x15\xad",
493 }, {
494 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
495 .psize = 56,
496 .digest = "\x24\x8d\x6a\x61\xd2\x06\x38\xb8"
497 "\xe5\xc0\x26\x93\x0c\x3e\x60\x39"
498 "\xa3\x3c\xe4\x59\x64\xff\x21\x67"
499 "\xf6\xec\xed\xd4\x19\xdb\x06\xc1",
500 .np = 2,
501 .tap = { 28, 28 }
502 },
503};
504
505/*
506 * SHA384 test vectors from from NIST and kerneli
507 */
508#define SHA384_TEST_VECTORS 4
509
510static struct hash_testvec sha384_tv_template[] = {
511 {
512 .plaintext= "abc",
513 .psize = 3,
514 .digest = "\xcb\x00\x75\x3f\x45\xa3\x5e\x8b"
515 "\xb5\xa0\x3d\x69\x9a\xc6\x50\x07"
516 "\x27\x2c\x32\xab\x0e\xde\xd1\x63"
517 "\x1a\x8b\x60\x5a\x43\xff\x5b\xed"
518 "\x80\x86\x07\x2b\xa1\xe7\xcc\x23"
519 "\x58\xba\xec\xa1\x34\xc8\x25\xa7",
520 }, {
521 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
522 .psize = 56,
523 .digest = "\x33\x91\xfd\xdd\xfc\x8d\xc7\x39"
524 "\x37\x07\xa6\x5b\x1b\x47\x09\x39"
525 "\x7c\xf8\xb1\xd1\x62\xaf\x05\xab"
526 "\xfe\x8f\x45\x0d\xe5\xf3\x6b\xc6"
527 "\xb0\x45\x5a\x85\x20\xbc\x4e\x6f"
528 "\x5f\xe9\x5b\x1f\xe3\xc8\x45\x2b",
529 }, {
530 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
531 "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
532 .psize = 112,
533 .digest = "\x09\x33\x0c\x33\xf7\x11\x47\xe8"
534 "\x3d\x19\x2f\xc7\x82\xcd\x1b\x47"
535 "\x53\x11\x1b\x17\x3b\x3b\x05\xd2"
536 "\x2f\xa0\x80\x86\xe3\xb0\xf7\x12"
537 "\xfc\xc7\xc7\x1a\x55\x7e\x2d\xb9"
538 "\x66\xc3\xe9\xfa\x91\x74\x60\x39",
539 }, {
540 .plaintext = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd"
541 "efghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz",
542 .psize = 104,
543 .digest = "\x3d\x20\x89\x73\xab\x35\x08\xdb"
544 "\xbd\x7e\x2c\x28\x62\xba\x29\x0a"
545 "\xd3\x01\x0e\x49\x78\xc1\x98\xdc"
546 "\x4d\x8f\xd0\x14\xe5\x82\x82\x3a"
547 "\x89\xe1\x6f\x9b\x2a\x7b\xbc\x1a"
548 "\xc9\x38\xe2\xd1\x99\xe8\xbe\xa4",
549 .np = 4,
550 .tap = { 26, 26, 26, 26 }
551 },
552};
553
554/*
555 * SHA512 test vectors from from NIST and kerneli
556 */
557#define SHA512_TEST_VECTORS 4
558
559static struct hash_testvec sha512_tv_template[] = {
560 {
561 .plaintext = "abc",
562 .psize = 3,
563 .digest = "\xdd\xaf\x35\xa1\x93\x61\x7a\xba"
564 "\xcc\x41\x73\x49\xae\x20\x41\x31"
565 "\x12\xe6\xfa\x4e\x89\xa9\x7e\xa2"
566 "\x0a\x9e\xee\xe6\x4b\x55\xd3\x9a"
567 "\x21\x92\x99\x2a\x27\x4f\xc1\xa8"
568 "\x36\xba\x3c\x23\xa3\xfe\xeb\xbd"
569 "\x45\x4d\x44\x23\x64\x3c\xe8\x0e"
570 "\x2a\x9a\xc9\x4f\xa5\x4c\xa4\x9f",
571 }, {
572 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
573 .psize = 56,
574 .digest = "\x20\x4a\x8f\xc6\xdd\xa8\x2f\x0a"
575 "\x0c\xed\x7b\xeb\x8e\x08\xa4\x16"
576 "\x57\xc1\x6e\xf4\x68\xb2\x28\xa8"
577 "\x27\x9b\xe3\x31\xa7\x03\xc3\x35"
578 "\x96\xfd\x15\xc1\x3b\x1b\x07\xf9"
579 "\xaa\x1d\x3b\xea\x57\x78\x9c\xa0"
580 "\x31\xad\x85\xc7\xa7\x1d\xd7\x03"
581 "\x54\xec\x63\x12\x38\xca\x34\x45",
582 }, {
583 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
584 "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
585 .psize = 112,
586 .digest = "\x8e\x95\x9b\x75\xda\xe3\x13\xda"
587 "\x8c\xf4\xf7\x28\x14\xfc\x14\x3f"
588 "\x8f\x77\x79\xc6\xeb\x9f\x7f\xa1"
589 "\x72\x99\xae\xad\xb6\x88\x90\x18"
590 "\x50\x1d\x28\x9e\x49\x00\xf7\xe4"
591 "\x33\x1b\x99\xde\xc4\xb5\x43\x3a"
592 "\xc7\xd3\x29\xee\xb6\xdd\x26\x54"
593 "\x5e\x96\xe5\x5b\x87\x4b\xe9\x09",
594 }, {
595 .plaintext = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd"
596 "efghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz",
597 .psize = 104,
598 .digest = "\x93\x0d\x0c\xef\xcb\x30\xff\x11"
599 "\x33\xb6\x89\x81\x21\xf1\xcf\x3d"
600 "\x27\x57\x8a\xfc\xaf\xe8\x67\x7c"
601 "\x52\x57\xcf\x06\x99\x11\xf7\x5d"
602 "\x8f\x58\x31\xb5\x6e\xbf\xda\x67"
603 "\xb2\x78\xe6\x6d\xff\x8b\x84\xfe"
604 "\x2b\x28\x70\xf7\x42\xa5\x80\xd8"
605 "\xed\xb4\x19\x87\x23\x28\x50\xc9",
606 .np = 4,
607 .tap = { 26, 26, 26, 26 }
608 },
609};
610
611
612/*
613 * WHIRLPOOL test vectors from Whirlpool package
614 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
615 * submission
616 */
617#define WP512_TEST_VECTORS 8
618
619static struct hash_testvec wp512_tv_template[] = {
620 {
621 .plaintext = "",
622 .psize = 0,
623 .digest = "\x19\xFA\x61\xD7\x55\x22\xA4\x66"
624 "\x9B\x44\xE3\x9C\x1D\x2E\x17\x26"
625 "\xC5\x30\x23\x21\x30\xD4\x07\xF8"
626 "\x9A\xFE\xE0\x96\x49\x97\xF7\xA7"
627 "\x3E\x83\xBE\x69\x8B\x28\x8F\xEB"
628 "\xCF\x88\xE3\xE0\x3C\x4F\x07\x57"
629 "\xEA\x89\x64\xE5\x9B\x63\xD9\x37"
630 "\x08\xB1\x38\xCC\x42\xA6\x6E\xB3",
631
632
633 }, {
634 .plaintext = "a",
635 .psize = 1,
636 .digest = "\x8A\xCA\x26\x02\x79\x2A\xEC\x6F"
637 "\x11\xA6\x72\x06\x53\x1F\xB7\xD7"
638 "\xF0\xDF\xF5\x94\x13\x14\x5E\x69"
639 "\x73\xC4\x50\x01\xD0\x08\x7B\x42"
640 "\xD1\x1B\xC6\x45\x41\x3A\xEF\xF6"
641 "\x3A\x42\x39\x1A\x39\x14\x5A\x59"
642 "\x1A\x92\x20\x0D\x56\x01\x95\xE5"
643 "\x3B\x47\x85\x84\xFD\xAE\x23\x1A",
644 }, {
645 .plaintext = "abc",
646 .psize = 3,
647 .digest = "\x4E\x24\x48\xA4\xC6\xF4\x86\xBB"
648 "\x16\xB6\x56\x2C\x73\xB4\x02\x0B"
649 "\xF3\x04\x3E\x3A\x73\x1B\xCE\x72"
650 "\x1A\xE1\xB3\x03\xD9\x7E\x6D\x4C"
651 "\x71\x81\xEE\xBD\xB6\xC5\x7E\x27"
652 "\x7D\x0E\x34\x95\x71\x14\xCB\xD6"
653 "\xC7\x97\xFC\x9D\x95\xD8\xB5\x82"
654 "\xD2\x25\x29\x20\x76\xD4\xEE\xF5",
655 }, {
656 .plaintext = "message digest",
657 .psize = 14,
658 .digest = "\x37\x8C\x84\xA4\x12\x6E\x2D\xC6"
659 "\xE5\x6D\xCC\x74\x58\x37\x7A\xAC"
660 "\x83\x8D\x00\x03\x22\x30\xF5\x3C"
661 "\xE1\xF5\x70\x0C\x0F\xFB\x4D\x3B"
662 "\x84\x21\x55\x76\x59\xEF\x55\xC1"
663 "\x06\xB4\xB5\x2A\xC5\xA4\xAA\xA6"
664 "\x92\xED\x92\x00\x52\x83\x8F\x33"
665 "\x62\xE8\x6D\xBD\x37\xA8\x90\x3E",
666 }, {
667 .plaintext = "abcdefghijklmnopqrstuvwxyz",
668 .psize = 26,
669 .digest = "\xF1\xD7\x54\x66\x26\x36\xFF\xE9"
670 "\x2C\x82\xEB\xB9\x21\x2A\x48\x4A"
671 "\x8D\x38\x63\x1E\xAD\x42\x38\xF5"
672 "\x44\x2E\xE1\x3B\x80\x54\xE4\x1B"
673 "\x08\xBF\x2A\x92\x51\xC3\x0B\x6A"
674 "\x0B\x8A\xAE\x86\x17\x7A\xB4\xA6"
675 "\xF6\x8F\x67\x3E\x72\x07\x86\x5D"
676 "\x5D\x98\x19\xA3\xDB\xA4\xEB\x3B",
677 }, {
678 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
679 "abcdefghijklmnopqrstuvwxyz0123456789",
680 .psize = 62,
681 .digest = "\xDC\x37\xE0\x08\xCF\x9E\xE6\x9B"
682 "\xF1\x1F\x00\xED\x9A\xBA\x26\x90"
683 "\x1D\xD7\xC2\x8C\xDE\xC0\x66\xCC"
684 "\x6A\xF4\x2E\x40\xF8\x2F\x3A\x1E"
685 "\x08\xEB\xA2\x66\x29\x12\x9D\x8F"
686 "\xB7\xCB\x57\x21\x1B\x92\x81\xA6"
687 "\x55\x17\xCC\x87\x9D\x7B\x96\x21"
688 "\x42\xC6\x5F\x5A\x7A\xF0\x14\x67",
689 }, {
690 .plaintext = "1234567890123456789012345678901234567890"
691 "1234567890123456789012345678901234567890",
692 .psize = 80,
693 .digest = "\x46\x6E\xF1\x8B\xAB\xB0\x15\x4D"
694 "\x25\xB9\xD3\x8A\x64\x14\xF5\xC0"
695 "\x87\x84\x37\x2B\xCC\xB2\x04\xD6"
696 "\x54\x9C\x4A\xFA\xDB\x60\x14\x29"
697 "\x4D\x5B\xD8\xDF\x2A\x6C\x44\xE5"
698 "\x38\xCD\x04\x7B\x26\x81\xA5\x1A"
699 "\x2C\x60\x48\x1E\x88\xC5\xA2\x0B"
700 "\x2C\x2A\x80\xCF\x3A\x9A\x08\x3B",
701 }, {
702 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
703 .psize = 32,
704 .digest = "\x2A\x98\x7E\xA4\x0F\x91\x70\x61"
705 "\xF5\xD6\xF0\xA0\xE4\x64\x4F\x48"
706 "\x8A\x7A\x5A\x52\xDE\xEE\x65\x62"
707 "\x07\xC5\x62\xF9\x88\xE9\x5C\x69"
708 "\x16\xBD\xC8\x03\x1B\xC5\xBE\x1B"
709 "\x7B\x94\x76\x39\xFE\x05\x0B\x56"
710 "\x93\x9B\xAA\xA0\xAD\xFF\x9A\xE6"
711 "\x74\x5B\x7B\x18\x1C\x3B\xE3\xFD",
712 },
713};
714
715#define WP384_TEST_VECTORS 8
716
717static struct hash_testvec wp384_tv_template[] = {
718 {
719 .plaintext = "",
720 .psize = 0,
721 .digest = "\x19\xFA\x61\xD7\x55\x22\xA4\x66"
722 "\x9B\x44\xE3\x9C\x1D\x2E\x17\x26"
723 "\xC5\x30\x23\x21\x30\xD4\x07\xF8"
724 "\x9A\xFE\xE0\x96\x49\x97\xF7\xA7"
725 "\x3E\x83\xBE\x69\x8B\x28\x8F\xEB"
726 "\xCF\x88\xE3\xE0\x3C\x4F\x07\x57",
727
728
729 }, {
730 .plaintext = "a",
731 .psize = 1,
732 .digest = "\x8A\xCA\x26\x02\x79\x2A\xEC\x6F"
733 "\x11\xA6\x72\x06\x53\x1F\xB7\xD7"
734 "\xF0\xDF\xF5\x94\x13\x14\x5E\x69"
735 "\x73\xC4\x50\x01\xD0\x08\x7B\x42"
736 "\xD1\x1B\xC6\x45\x41\x3A\xEF\xF6"
737 "\x3A\x42\x39\x1A\x39\x14\x5A\x59",
738 }, {
739 .plaintext = "abc",
740 .psize = 3,
741 .digest = "\x4E\x24\x48\xA4\xC6\xF4\x86\xBB"
742 "\x16\xB6\x56\x2C\x73\xB4\x02\x0B"
743 "\xF3\x04\x3E\x3A\x73\x1B\xCE\x72"
744 "\x1A\xE1\xB3\x03\xD9\x7E\x6D\x4C"
745 "\x71\x81\xEE\xBD\xB6\xC5\x7E\x27"
746 "\x7D\x0E\x34\x95\x71\x14\xCB\xD6",
747 }, {
748 .plaintext = "message digest",
749 .psize = 14,
750 .digest = "\x37\x8C\x84\xA4\x12\x6E\x2D\xC6"
751 "\xE5\x6D\xCC\x74\x58\x37\x7A\xAC"
752 "\x83\x8D\x00\x03\x22\x30\xF5\x3C"
753 "\xE1\xF5\x70\x0C\x0F\xFB\x4D\x3B"
754 "\x84\x21\x55\x76\x59\xEF\x55\xC1"
755 "\x06\xB4\xB5\x2A\xC5\xA4\xAA\xA6",
756 }, {
757 .plaintext = "abcdefghijklmnopqrstuvwxyz",
758 .psize = 26,
759 .digest = "\xF1\xD7\x54\x66\x26\x36\xFF\xE9"
760 "\x2C\x82\xEB\xB9\x21\x2A\x48\x4A"
761 "\x8D\x38\x63\x1E\xAD\x42\x38\xF5"
762 "\x44\x2E\xE1\x3B\x80\x54\xE4\x1B"
763 "\x08\xBF\x2A\x92\x51\xC3\x0B\x6A"
764 "\x0B\x8A\xAE\x86\x17\x7A\xB4\xA6",
765 }, {
766 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
767 "abcdefghijklmnopqrstuvwxyz0123456789",
768 .psize = 62,
769 .digest = "\xDC\x37\xE0\x08\xCF\x9E\xE6\x9B"
770 "\xF1\x1F\x00\xED\x9A\xBA\x26\x90"
771 "\x1D\xD7\xC2\x8C\xDE\xC0\x66\xCC"
772 "\x6A\xF4\x2E\x40\xF8\x2F\x3A\x1E"
773 "\x08\xEB\xA2\x66\x29\x12\x9D\x8F"
774 "\xB7\xCB\x57\x21\x1B\x92\x81\xA6",
775 }, {
776 .plaintext = "1234567890123456789012345678901234567890"
777 "1234567890123456789012345678901234567890",
778 .psize = 80,
779 .digest = "\x46\x6E\xF1\x8B\xAB\xB0\x15\x4D"
780 "\x25\xB9\xD3\x8A\x64\x14\xF5\xC0"
781 "\x87\x84\x37\x2B\xCC\xB2\x04\xD6"
782 "\x54\x9C\x4A\xFA\xDB\x60\x14\x29"
783 "\x4D\x5B\xD8\xDF\x2A\x6C\x44\xE5"
784 "\x38\xCD\x04\x7B\x26\x81\xA5\x1A",
785 }, {
786 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
787 .psize = 32,
788 .digest = "\x2A\x98\x7E\xA4\x0F\x91\x70\x61"
789 "\xF5\xD6\xF0\xA0\xE4\x64\x4F\x48"
790 "\x8A\x7A\x5A\x52\xDE\xEE\x65\x62"
791 "\x07\xC5\x62\xF9\x88\xE9\x5C\x69"
792 "\x16\xBD\xC8\x03\x1B\xC5\xBE\x1B"
793 "\x7B\x94\x76\x39\xFE\x05\x0B\x56",
794 },
795};
796
797#define WP256_TEST_VECTORS 8
798
799static struct hash_testvec wp256_tv_template[] = {
800 {
801 .plaintext = "",
802 .psize = 0,
803 .digest = "\x19\xFA\x61\xD7\x55\x22\xA4\x66"
804 "\x9B\x44\xE3\x9C\x1D\x2E\x17\x26"
805 "\xC5\x30\x23\x21\x30\xD4\x07\xF8"
806 "\x9A\xFE\xE0\x96\x49\x97\xF7\xA7",
807
808
809 }, {
810 .plaintext = "a",
811 .psize = 1,
812 .digest = "\x8A\xCA\x26\x02\x79\x2A\xEC\x6F"
813 "\x11\xA6\x72\x06\x53\x1F\xB7\xD7"
814 "\xF0\xDF\xF5\x94\x13\x14\x5E\x69"
815 "\x73\xC4\x50\x01\xD0\x08\x7B\x42",
816 }, {
817 .plaintext = "abc",
818 .psize = 3,
819 .digest = "\x4E\x24\x48\xA4\xC6\xF4\x86\xBB"
820 "\x16\xB6\x56\x2C\x73\xB4\x02\x0B"
821 "\xF3\x04\x3E\x3A\x73\x1B\xCE\x72"
822 "\x1A\xE1\xB3\x03\xD9\x7E\x6D\x4C",
823 }, {
824 .plaintext = "message digest",
825 .psize = 14,
826 .digest = "\x37\x8C\x84\xA4\x12\x6E\x2D\xC6"
827 "\xE5\x6D\xCC\x74\x58\x37\x7A\xAC"
828 "\x83\x8D\x00\x03\x22\x30\xF5\x3C"
829 "\xE1\xF5\x70\x0C\x0F\xFB\x4D\x3B",
830 }, {
831 .plaintext = "abcdefghijklmnopqrstuvwxyz",
832 .psize = 26,
833 .digest = "\xF1\xD7\x54\x66\x26\x36\xFF\xE9"
834 "\x2C\x82\xEB\xB9\x21\x2A\x48\x4A"
835 "\x8D\x38\x63\x1E\xAD\x42\x38\xF5"
836 "\x44\x2E\xE1\x3B\x80\x54\xE4\x1B",
837 }, {
838 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
839 "abcdefghijklmnopqrstuvwxyz0123456789",
840 .psize = 62,
841 .digest = "\xDC\x37\xE0\x08\xCF\x9E\xE6\x9B"
842 "\xF1\x1F\x00\xED\x9A\xBA\x26\x90"
843 "\x1D\xD7\xC2\x8C\xDE\xC0\x66\xCC"
844 "\x6A\xF4\x2E\x40\xF8\x2F\x3A\x1E",
845 }, {
846 .plaintext = "1234567890123456789012345678901234567890"
847 "1234567890123456789012345678901234567890",
848 .psize = 80,
849 .digest = "\x46\x6E\xF1\x8B\xAB\xB0\x15\x4D"
850 "\x25\xB9\xD3\x8A\x64\x14\xF5\xC0"
851 "\x87\x84\x37\x2B\xCC\xB2\x04\xD6"
852 "\x54\x9C\x4A\xFA\xDB\x60\x14\x29",
853 }, {
854 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
855 .psize = 32,
856 .digest = "\x2A\x98\x7E\xA4\x0F\x91\x70\x61"
857 "\xF5\xD6\xF0\xA0\xE4\x64\x4F\x48"
858 "\x8A\x7A\x5A\x52\xDE\xEE\x65\x62"
859 "\x07\xC5\x62\xF9\x88\xE9\x5C\x69",
860 },
861};
862
863/*
864 * TIGER test vectors from Tiger website
865 */
866#define TGR192_TEST_VECTORS 6
867
868static struct hash_testvec tgr192_tv_template[] = {
869 {
870 .plaintext = "",
871 .psize = 0,
872 .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
873 "\x16\x16\x6e\x76\xb1\xbb\x92\x5f"
874 "\xf3\x73\xde\x2d\x49\x58\x4e\x7a",
875 }, {
876 .plaintext = "abc",
877 .psize = 3,
878 .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
879 "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf"
880 "\x93\x5f\x7b\x95\x1c\x13\x29\x51",
881 }, {
882 .plaintext = "Tiger",
883 .psize = 5,
884 .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
885 "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec"
886 "\x37\x79\x0c\x11\x6f\x9d\x2b\xdf",
887 }, {
888 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
889 .psize = 64,
890 .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
891 "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e"
892 "\xb5\x86\x44\x50\x34\xa5\xa3\x86",
893 }, {
894 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
895 .psize = 64,
896 .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
897 "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9"
898 "\x57\x89\x65\x65\x97\x5f\x91\x97",
899 }, {
900 .plaintext = "Tiger - A Fast New Hash Function, "
901 "by Ross Anderson and Eli Biham, "
902 "proceedings of Fast Software Encryption 3, "
903 "Cambridge, 1996.",
904 .psize = 125,
905 .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
906 "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24"
907 "\xdd\x68\x15\x1d\x50\x39\x74\xfc",
908 },
909};
910
911#define TGR160_TEST_VECTORS 6
912
913static struct hash_testvec tgr160_tv_template[] = {
914 {
915 .plaintext = "",
916 .psize = 0,
917 .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
918 "\x16\x16\x6e\x76\xb1\xbb\x92\x5f"
919 "\xf3\x73\xde\x2d",
920 }, {
921 .plaintext = "abc",
922 .psize = 3,
923 .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
924 "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf"
925 "\x93\x5f\x7b\x95",
926 }, {
927 .plaintext = "Tiger",
928 .psize = 5,
929 .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
930 "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec"
931 "\x37\x79\x0c\x11",
932 }, {
933 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
934 .psize = 64,
935 .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
936 "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e"
937 "\xb5\x86\x44\x50",
938 }, {
939 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
940 .psize = 64,
941 .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
942 "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9"
943 "\x57\x89\x65\x65",
944 }, {
945 .plaintext = "Tiger - A Fast New Hash Function, "
946 "by Ross Anderson and Eli Biham, "
947 "proceedings of Fast Software Encryption 3, "
948 "Cambridge, 1996.",
949 .psize = 125,
950 .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
951 "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24"
952 "\xdd\x68\x15\x1d",
953 },
954};
955
956#define TGR128_TEST_VECTORS 6
957
958static struct hash_testvec tgr128_tv_template[] = {
959 {
960 .plaintext = "",
961 .psize = 0,
962 .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
963 "\x16\x16\x6e\x76\xb1\xbb\x92\x5f",
964 }, {
965 .plaintext = "abc",
966 .psize = 3,
967 .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
968 "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf",
969 }, {
970 .plaintext = "Tiger",
971 .psize = 5,
972 .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
973 "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec",
974 }, {
975 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
976 .psize = 64,
977 .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
978 "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e",
979 }, {
980 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
981 .psize = 64,
982 .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
983 "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9",
984 }, {
985 .plaintext = "Tiger - A Fast New Hash Function, "
986 "by Ross Anderson and Eli Biham, "
987 "proceedings of Fast Software Encryption 3, "
988 "Cambridge, 1996.",
989 .psize = 125,
990 .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
991 "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24",
992 },
993};
994
995/*
996 * HMAC-MD5 test vectors from RFC2202
997 * (These need to be fixed to not use strlen).
998 */
999#define HMAC_MD5_TEST_VECTORS 7
1000
1001static struct hash_testvec hmac_md5_tv_template[] =
1002{
1003 {
1004 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1005 .ksize = 16,
1006 .plaintext = "Hi There",
1007 .psize = 8,
1008 .digest = "\x92\x94\x72\x7a\x36\x38\xbb\x1c"
1009 "\x13\xf4\x8e\xf8\x15\x8b\xfc\x9d",
1010 }, {
1011 .key = "Jefe",
1012 .ksize = 4,
1013 .plaintext = "what do ya want for nothing?",
1014 .psize = 28,
1015 .digest = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03"
1016 "\xea\xa8\x6e\x31\x0a\x5d\xb7\x38",
1017 .np = 2,
1018 .tap = {14, 14}
1019 }, {
1020 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1021 .ksize = 16,
1022 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1023 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1024 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1025 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1026 .psize = 50,
1027 .digest = "\x56\xbe\x34\x52\x1d\x14\x4c\x88"
1028 "\xdb\xb8\xc7\x33\xf0\xe8\xb3\xf6",
1029 }, {
1030 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1031 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1032 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1033 .ksize = 25,
1034 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1035 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1036 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1037 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1038 .psize = 50,
1039 .digest = "\x69\x7e\xaf\x0a\xca\x3a\x3a\xea"
1040 "\x3a\x75\x16\x47\x46\xff\xaa\x79",
1041 }, {
1042 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1043 .ksize = 16,
1044 .plaintext = "Test With Truncation",
1045 .psize = 20,
1046 .digest = "\x56\x46\x1e\xf2\x34\x2e\xdc\x00"
1047 "\xf9\xba\xb9\x95\x69\x0e\xfd\x4c",
1048 }, {
1049 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1050 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1051 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1052 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1053 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1054 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1055 "\xaa\xaa",
1056 .ksize = 80,
1057 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1058 .psize = 54,
1059 .digest = "\x6b\x1a\xb7\xfe\x4b\xd7\xbf\x8f"
1060 "\x0b\x62\xe6\xce\x61\xb9\xd0\xcd",
1061 }, {
1062 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1063 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1064 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1065 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1066 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1067 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1068 "\xaa\xaa",
1069 .ksize = 80,
1070 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1071 "Block-Size Data",
1072 .psize = 73,
1073 .digest = "\x6f\x63\x0f\xad\x67\xcd\xa0\xee"
1074 "\x1f\xb1\xf5\x62\xdb\x3a\xa5\x3e",
1075 },
1076};
1077
1078/*
1079 * HMAC-RIPEMD128 test vectors from RFC2286
1080 */
1081#define HMAC_RMD128_TEST_VECTORS 7
1082
1083static struct hash_testvec hmac_rmd128_tv_template[] = {
1084 {
1085 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1086 .ksize = 16,
1087 .plaintext = "Hi There",
1088 .psize = 8,
1089 .digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf"
1090 "\x81\xc1\x72\xe8\x4e\x07\x34\xdb",
1091 }, {
1092 .key = "Jefe",
1093 .ksize = 4,
1094 .plaintext = "what do ya want for nothing?",
1095 .psize = 28,
1096 .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
1097 "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
1098 .np = 2,
1099 .tap = { 14, 14 },
1100 }, {
1101 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1102 .ksize = 16,
1103 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1104 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1105 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1106 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1107 .psize = 50,
1108 .digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d"
1109 "\xa3\x63\xcb\xec\x8d\x62\xa3\x8d",
1110 }, {
1111 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1112 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1113 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1114 .ksize = 25,
1115 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1116 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1117 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1118 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1119 .psize = 50,
1120 .digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a"
1121 "\xa6\x0a\xf8\x15\xbe\x4d\x22\x94",
1122 }, {
1123 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1124 .ksize = 16,
1125 .plaintext = "Test With Truncation",
1126 .psize = 20,
1127 .digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03"
1128 "\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a",
1129 }, {
1130 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1131 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1132 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1133 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1134 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1135 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1136 "\xaa\xaa",
1137 .ksize = 80,
1138 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1139 .psize = 54,
1140 .digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a"
1141 "\x1f\x59\xd3\x73\xc1\x50\xac\xbb",
1142 }, {
1143 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1144 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1145 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1146 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1147 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1148 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1149 "\xaa\xaa",
1150 .ksize = 80,
1151 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1152 "Block-Size Data",
1153 .psize = 73,
1154 .digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4"
1155 "\x06\x90\xc2\x37\x63\x5f\x30\xc5",
1156 },
1157};
1158
1159/*
1160 * HMAC-RIPEMD160 test vectors from RFC2286
1161 */
1162#define HMAC_RMD160_TEST_VECTORS 7
1163
1164static struct hash_testvec hmac_rmd160_tv_template[] = {
1165 {
1166 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1167 .ksize = 20,
1168 .plaintext = "Hi There",
1169 .psize = 8,
1170 .digest = "\x24\xcb\x4b\xd6\x7d\x20\xfc\x1a\x5d\x2e"
1171 "\xd7\x73\x2d\xcc\x39\x37\x7f\x0a\x56\x68",
1172 }, {
1173 .key = "Jefe",
1174 .ksize = 4,
1175 .plaintext = "what do ya want for nothing?",
1176 .psize = 28,
1177 .digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4"
1178 "\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69",
1179 .np = 2,
1180 .tap = { 14, 14 },
1181 }, {
1182 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1183 .ksize = 20,
1184 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1185 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1186 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1187 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1188 .psize = 50,
1189 .digest = "\xb0\xb1\x05\x36\x0d\xe7\x59\x96\x0a\xb4"
1190 "\xf3\x52\x98\xe1\x16\xe2\x95\xd8\xe7\xc1",
1191 }, {
1192 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1193 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1194 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1195 .ksize = 25,
1196 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1197 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1198 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1199 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1200 .psize = 50,
1201 .digest = "\xd5\xca\x86\x2f\x4d\x21\xd5\xe6\x10\xe1"
1202 "\x8b\x4c\xf1\xbe\xb9\x7a\x43\x65\xec\xf4",
1203 }, {
1204 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1205 .ksize = 20,
1206 .plaintext = "Test With Truncation",
1207 .psize = 20,
1208 .digest = "\x76\x19\x69\x39\x78\xf9\x1d\x90\x53\x9a"
1209 "\xe7\x86\x50\x0f\xf3\xd8\xe0\x51\x8e\x39",
1210 }, {
1211 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1212 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1213 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1214 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1215 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1216 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1217 "\xaa\xaa",
1218 .ksize = 80,
1219 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1220 .psize = 54,
1221 .digest = "\x64\x66\xca\x07\xac\x5e\xac\x29\xe1\xbd"
1222 "\x52\x3e\x5a\xda\x76\x05\xb7\x91\xfd\x8b",
1223 }, {
1224 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1225 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1226 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1227 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1228 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1229 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1230 "\xaa\xaa",
1231 .ksize = 80,
1232 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1233 "Block-Size Data",
1234 .psize = 73,
1235 .digest = "\x69\xea\x60\x79\x8d\x71\x61\x6c\xce\x5f"
1236 "\xd0\x87\x1e\x23\x75\x4c\xd7\x5d\x5a\x0a",
1237 },
1238};
1239
1240/*
1241 * HMAC-SHA1 test vectors from RFC2202
1242 */
1243#define HMAC_SHA1_TEST_VECTORS 7
1244
1245static struct hash_testvec hmac_sha1_tv_template[] = {
1246 {
1247 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1248 .ksize = 20,
1249 .plaintext = "Hi There",
1250 .psize = 8,
1251 .digest = "\xb6\x17\x31\x86\x55\x05\x72\x64"
1252 "\xe2\x8b\xc0\xb6\xfb\x37\x8c\x8e\xf1"
1253 "\x46\xbe",
1254 }, {
1255 .key = "Jefe",
1256 .ksize = 4,
1257 .plaintext = "what do ya want for nothing?",
1258 .psize = 28,
1259 .digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74"
1260 "\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79",
1261 .np = 2,
1262 .tap = { 14, 14 }
1263 }, {
1264 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1265 .ksize = 20,
1266 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1267 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1268 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1269 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1270 .psize = 50,
1271 .digest = "\x12\x5d\x73\x42\xb9\xac\x11\xcd\x91\xa3"
1272 "\x9a\xf4\x8a\xa1\x7b\x4f\x63\xf1\x75\xd3",
1273 }, {
1274 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1275 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1276 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1277 .ksize = 25,
1278 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1279 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1280 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1281 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1282 .psize = 50,
1283 .digest = "\x4c\x90\x07\xf4\x02\x62\x50\xc6\xbc\x84"
1284 "\x14\xf9\xbf\x50\xc8\x6c\x2d\x72\x35\xda",
1285 }, {
1286 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1287 .ksize = 20,
1288 .plaintext = "Test With Truncation",
1289 .psize = 20,
1290 .digest = "\x4c\x1a\x03\x42\x4b\x55\xe0\x7f\xe7\xf2"
1291 "\x7b\xe1\xd5\x8b\xb9\x32\x4a\x9a\x5a\x04",
1292 }, {
1293 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1294 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1295 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1296 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1297 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1298 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1299 "\xaa\xaa",
1300 .ksize = 80,
1301 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1302 .psize = 54,
1303 .digest = "\xaa\x4a\xe5\xe1\x52\x72\xd0\x0e\x95\x70"
1304 "\x56\x37\xce\x8a\x3b\x55\xed\x40\x21\x12",
1305 }, {
1306 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1307 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1308 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1309 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1310 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1311 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1312 "\xaa\xaa",
1313 .ksize = 80,
1314 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1315 "Block-Size Data",
1316 .psize = 73,
1317 .digest = "\xe8\xe9\x9d\x0f\x45\x23\x7d\x78\x6d\x6b"
1318 "\xba\xa7\x96\x5c\x78\x08\xbb\xff\x1a\x91",
1319 },
1320};
1321
1322
1323/*
1324 * SHA224 HMAC test vectors from RFC4231
1325 */
1326#define HMAC_SHA224_TEST_VECTORS 4
1327
1328static struct hash_testvec hmac_sha224_tv_template[] = {
1329 {
1330 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1331 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1332 "\x0b\x0b\x0b\x0b",
1333 .ksize = 20,
1334 /* ("Hi There") */
1335 .plaintext = "\x48\x69\x20\x54\x68\x65\x72\x65",
1336 .psize = 8,
1337 .digest = "\x89\x6f\xb1\x12\x8a\xbb\xdf\x19"
1338 "\x68\x32\x10\x7c\xd4\x9d\xf3\x3f"
1339 "\x47\xb4\xb1\x16\x99\x12\xba\x4f"
1340 "\x53\x68\x4b\x22",
1341 }, {
1342 .key = "Jefe",
1343 .ksize = 4,
1344 /* ("what do ya want for nothing?") */
1345 .plaintext = "\x77\x68\x61\x74\x20\x64\x6f\x20"
1346 "\x79\x61\x20\x77\x61\x6e\x74\x20"
1347 "\x66\x6f\x72\x20\x6e\x6f\x74\x68"
1348 "\x69\x6e\x67\x3f",
1349 .psize = 28,
1350 .digest = "\xa3\x0e\x01\x09\x8b\xc6\xdb\xbf"
1351 "\x45\x69\x0f\x3a\x7e\x9e\x6d\x0f"
1352 "\x8b\xbe\xa2\xa3\x9e\x61\x48\x00"
1353 "\x8f\xd0\x5e\x44",
1354 .np = 4,
1355 .tap = { 7, 7, 7, 7 }
1356 }, {
1357 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1358 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1359 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1360 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1361 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1362 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1363 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1364 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1365 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1366 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1367 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1368 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1369 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1370 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1371 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1372 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1373 "\xaa\xaa\xaa",
1374 .ksize = 131,
1375 /* ("Test Using Larger Than Block-Size Key - Hash Key First") */
1376 .plaintext = "\x54\x65\x73\x74\x20\x55\x73\x69"
1377 "\x6e\x67\x20\x4c\x61\x72\x67\x65"
1378 "\x72\x20\x54\x68\x61\x6e\x20\x42"
1379 "\x6c\x6f\x63\x6b\x2d\x53\x69\x7a"
1380 "\x65\x20\x4b\x65\x79\x20\x2d\x20"
1381 "\x48\x61\x73\x68\x20\x4b\x65\x79"
1382 "\x20\x46\x69\x72\x73\x74",
1383 .psize = 54,
1384 .digest = "\x95\xe9\xa0\xdb\x96\x20\x95\xad"
1385 "\xae\xbe\x9b\x2d\x6f\x0d\xbc\xe2"
1386 "\xd4\x99\xf1\x12\xf2\xd2\xb7\x27"
1387 "\x3f\xa6\x87\x0e",
1388 }, {
1389 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1390 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1391 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1392 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1393 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1394 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1395 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1396 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1397 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1398 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1399 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1400 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1401 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1402 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1403 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1404 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1405 "\xaa\xaa\xaa",
1406 .ksize = 131,
1407 /* ("This is a test using a larger than block-size key and a")
1408 (" larger than block-size data. The key needs to be")
1409 (" hashed before being used by the HMAC algorithm.") */
1410 .plaintext = "\x54\x68\x69\x73\x20\x69\x73\x20"
1411 "\x61\x20\x74\x65\x73\x74\x20\x75"
1412 "\x73\x69\x6e\x67\x20\x61\x20\x6c"
1413 "\x61\x72\x67\x65\x72\x20\x74\x68"
1414 "\x61\x6e\x20\x62\x6c\x6f\x63\x6b"
1415 "\x2d\x73\x69\x7a\x65\x20\x6b\x65"
1416 "\x79\x20\x61\x6e\x64\x20\x61\x20"
1417 "\x6c\x61\x72\x67\x65\x72\x20\x74"
1418 "\x68\x61\x6e\x20\x62\x6c\x6f\x63"
1419 "\x6b\x2d\x73\x69\x7a\x65\x20\x64"
1420 "\x61\x74\x61\x2e\x20\x54\x68\x65"
1421 "\x20\x6b\x65\x79\x20\x6e\x65\x65"
1422 "\x64\x73\x20\x74\x6f\x20\x62\x65"
1423 "\x20\x68\x61\x73\x68\x65\x64\x20"
1424 "\x62\x65\x66\x6f\x72\x65\x20\x62"
1425 "\x65\x69\x6e\x67\x20\x75\x73\x65"
1426 "\x64\x20\x62\x79\x20\x74\x68\x65"
1427 "\x20\x48\x4d\x41\x43\x20\x61\x6c"
1428 "\x67\x6f\x72\x69\x74\x68\x6d\x2e",
1429 .psize = 152,
1430 .digest = "\x3a\x85\x41\x66\xac\x5d\x9f\x02"
1431 "\x3f\x54\xd5\x17\xd0\xb3\x9d\xbd"
1432 "\x94\x67\x70\xdb\x9c\x2b\x95\xc9"
1433 "\xf6\xf5\x65\xd1",
1434 },
1435};
1436
1437/*
1438 * HMAC-SHA256 test vectors from
1439 * draft-ietf-ipsec-ciph-sha-256-01.txt
1440 */
1441#define HMAC_SHA256_TEST_VECTORS 10
1442
1443static struct hash_testvec hmac_sha256_tv_template[] = {
1444 {
1445 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1446 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1447 "\x11\x12\x13\x14\x15\x16\x17\x18"
1448 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20",
1449 .ksize = 32,
1450 .plaintext = "abc",
1451 .psize = 3,
1452 .digest = "\xa2\x1b\x1f\x5d\x4c\xf4\xf7\x3a"
1453 "\x4d\xd9\x39\x75\x0f\x7a\x06\x6a"
1454 "\x7f\x98\xcc\x13\x1c\xb1\x6a\x66"
1455 "\x92\x75\x90\x21\xcf\xab\x81\x81",
1456 }, {
1457 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1458 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1459 "\x11\x12\x13\x14\x15\x16\x17\x18"
1460 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20",
1461 .ksize = 32,
1462 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
1463 .psize = 56,
1464 .digest = "\x10\x4f\xdc\x12\x57\x32\x8f\x08"
1465 "\x18\x4b\xa7\x31\x31\xc5\x3c\xae"
1466 "\xe6\x98\xe3\x61\x19\x42\x11\x49"
1467 "\xea\x8c\x71\x24\x56\x69\x7d\x30",
1468 }, {
1469 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1470 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1471 "\x11\x12\x13\x14\x15\x16\x17\x18"
1472 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20",
1473 .ksize = 32,
1474 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
1475 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
1476 .psize = 112,
1477 .digest = "\x47\x03\x05\xfc\x7e\x40\xfe\x34"
1478 "\xd3\xee\xb3\xe7\x73\xd9\x5a\xab"
1479 "\x73\xac\xf0\xfd\x06\x04\x47\xa5"
1480 "\xeb\x45\x95\xbf\x33\xa9\xd1\xa3",
1481 }, {
1482 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1483 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1484 "\x0b\x0b\x0b\x0b\x0b\x0b",
1485 .ksize = 32,
1486 .plaintext = "Hi There",
1487 .psize = 8,
1488 .digest = "\x19\x8a\x60\x7e\xb4\x4b\xfb\xc6"
1489 "\x99\x03\xa0\xf1\xcf\x2b\xbd\xc5"
1490 "\xba\x0a\xa3\xf3\xd9\xae\x3c\x1c"
1491 "\x7a\x3b\x16\x96\xa0\xb6\x8c\xf7",
1492 }, {
1493 .key = "Jefe",
1494 .ksize = 4,
1495 .plaintext = "what do ya want for nothing?",
1496 .psize = 28,
1497 .digest = "\x5b\xdc\xc1\x46\xbf\x60\x75\x4e"
1498 "\x6a\x04\x24\x26\x08\x95\x75\xc7"
1499 "\x5a\x00\x3f\x08\x9d\x27\x39\x83"
1500 "\x9d\xec\x58\xb9\x64\xec\x38\x43",
1501 .np = 2,
1502 .tap = { 14, 14 }
1503 }, {
1504 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1505 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1506 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1507 .ksize = 32,
1508 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1509 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1510 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1511 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1512 .psize = 50,
1513 .digest = "\xcd\xcb\x12\x20\xd1\xec\xcc\xea"
1514 "\x91\xe5\x3a\xba\x30\x92\xf9\x62"
1515 "\xe5\x49\xfe\x6c\xe9\xed\x7f\xdc"
1516 "\x43\x19\x1f\xbd\xe4\x5c\x30\xb0",
1517 }, {
1518 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1519 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1520 "\x11\x12\x13\x14\x15\x16\x17\x18"
1521 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
1522 "\x21\x22\x23\x24\x25",
1523 .ksize = 37,
1524 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1525 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1526 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1527 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1528 .psize = 50,
1529 .digest = "\xd4\x63\x3c\x17\xf6\xfb\x8d\x74"
1530 "\x4c\x66\xde\xe0\xf8\xf0\x74\x55"
1531 "\x6e\xc4\xaf\x55\xef\x07\x99\x85"
1532 "\x41\x46\x8e\xb4\x9b\xd2\xe9\x17",
1533 }, {
1534 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
1535 "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
1536 "\x0c\x0c\x0c\x0c\x0c\x0c",
1537 .ksize = 32,
1538 .plaintext = "Test With Truncation",
1539 .psize = 20,
1540 .digest = "\x75\x46\xaf\x01\x84\x1f\xc0\x9b"
1541 "\x1a\xb9\xc3\x74\x9a\x5f\x1c\x17"
1542 "\xd4\xf5\x89\x66\x8a\x58\x7b\x27"
1543 "\x00\xa9\xc9\x7c\x11\x93\xcf\x42",
1544 }, {
1545 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1546 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1547 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1548 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1549 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1550 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1551 "\xaa\xaa",
1552 .ksize = 80,
1553 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1554 .psize = 54,
1555 .digest = "\x69\x53\x02\x5e\xd9\x6f\x0c\x09"
1556 "\xf8\x0a\x96\xf7\x8e\x65\x38\xdb"
1557 "\xe2\xe7\xb8\x20\xe3\xdd\x97\x0e"
1558 "\x7d\xdd\x39\x09\x1b\x32\x35\x2f",
1559 }, {
1560 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1561 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1562 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1563 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1564 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1565 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1566 "\xaa\xaa",
1567 .ksize = 80,
1568 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than "
1569 "One Block-Size Data",
1570 .psize = 73,
1571 .digest = "\x63\x55\xac\x22\xe8\x90\xd0\xa3"
1572 "\xc8\x48\x1a\x5c\xa4\x82\x5b\xc8"
1573 "\x84\xd3\xe7\xa1\xff\x98\xa2\xfc"
1574 "\x2a\xc7\xd8\xe0\x64\xc3\xb2\xe6",
1575 },
1576};
1577
1578#define XCBC_AES_TEST_VECTORS 6
1579
1580static struct hash_testvec aes_xcbc128_tv_template[] = {
1581 {
1582 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1583 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1584 .plaintext = zeroed_string,
1585 .digest = "\x75\xf0\x25\x1d\x52\x8a\xc0\x1c"
1586 "\x45\x73\xdf\xd5\x84\xd7\x9f\x29",
1587 .psize = 0,
1588 .ksize = 16,
1589 }, {
1590 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1591 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1592 .plaintext = "\x00\x01\x02",
1593 .digest = "\x5b\x37\x65\x80\xae\x2f\x19\xaf"
1594 "\xe7\x21\x9c\xee\xf1\x72\x75\x6f",
1595 .psize = 3,
1596 .ksize = 16,
1597 } , {
1598 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1599 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1600 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1601 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1602 .digest = "\xd2\xa2\x46\xfa\x34\x9b\x68\xa7"
1603 "\x99\x98\xa4\x39\x4f\xf7\xa2\x63",
1604 .psize = 16,
1605 .ksize = 16,
1606 }, {
1607 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1608 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1609 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1610 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
1611 "\x10\x11\x12\x13",
1612 .digest = "\x47\xf5\x1b\x45\x64\x96\x62\x15"
1613 "\xb8\x98\x5c\x63\x05\x5e\xd3\x08",
1614 .tap = { 10, 10 },
1615 .psize = 20,
1616 .np = 2,
1617 .ksize = 16,
1618 }, {
1619 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1620 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1621 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1622 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
1623 "\x10\x11\x12\x13\x14\x15\x16\x17"
1624 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
1625 .digest = "\xf5\x4f\x0e\xc8\xd2\xb9\xf3\xd3"
1626 "\x68\x07\x73\x4b\xd5\x28\x3f\xd4",
1627 .psize = 32,
1628 .ksize = 16,
1629 }, {
1630 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1631 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1632 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1633 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
1634 "\x10\x11\x12\x13\x14\x15\x16\x17"
1635 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
1636 "\x20\x21",
1637 .digest = "\xbe\xcb\xb3\xbc\xcd\xb5\x18\xa3"
1638 "\x06\x77\xd5\x48\x1f\xb6\xb4\xd8",
1639 .tap = { 17, 17 },
1640 .psize = 34,
1641 .np = 2,
1642 .ksize = 16,
1643 }
1644};
1645
1646/*
1647 * SHA384 HMAC test vectors from RFC4231
1648 */
1649
1650#define HMAC_SHA384_TEST_VECTORS 4
1651
1652static struct hash_testvec hmac_sha384_tv_template[] = {
1653 {
1654 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1655 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1656 "\x0b\x0b\x0b\x0b",
1657 .ksize = 20,
1658 .plaintext = "Hi There",
1659 .psize = 8,
1660 .digest = "\xaf\xd0\x39\x44\xd8\x48\x95\x62"
1661 "\x6b\x08\x25\xf4\xab\x46\x90\x7f"
1662 "\x15\xf9\xda\xdb\xe4\x10\x1e\xc6"
1663 "\x82\xaa\x03\x4c\x7c\xeb\xc5\x9c"
1664 "\xfa\xea\x9e\xa9\x07\x6e\xde\x7f"
1665 "\x4a\xf1\x52\xe8\xb2\xfa\x9c\xb6",
1666 }, {
1667 .key = "Jefe",
1668 .ksize = 4,
1669 .plaintext = "what do ya want for nothing?",
1670 .psize = 28,
1671 .digest = "\xaf\x45\xd2\xe3\x76\x48\x40\x31"
1672 "\x61\x7f\x78\xd2\xb5\x8a\x6b\x1b"
1673 "\x9c\x7e\xf4\x64\xf5\xa0\x1b\x47"
1674 "\xe4\x2e\xc3\x73\x63\x22\x44\x5e"
1675 "\x8e\x22\x40\xca\x5e\x69\xe2\xc7"
1676 "\x8b\x32\x39\xec\xfa\xb2\x16\x49",
1677 .np = 4,
1678 .tap = { 7, 7, 7, 7 }
1679 }, {
1680 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1681 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1682 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1683 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1684 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1685 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1686 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1687 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1688 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1689 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1690 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1691 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1692 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1693 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1694 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1695 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1696 "\xaa\xaa\xaa",
1697 .ksize = 131,
1698 .plaintext = "Test Using Larger Than Block-Siz"
1699 "e Key - Hash Key First",
1700 .psize = 54,
1701 .digest = "\x4e\xce\x08\x44\x85\x81\x3e\x90"
1702 "\x88\xd2\xc6\x3a\x04\x1b\xc5\xb4"
1703 "\x4f\x9e\xf1\x01\x2a\x2b\x58\x8f"
1704 "\x3c\xd1\x1f\x05\x03\x3a\xc4\xc6"
1705 "\x0c\x2e\xf6\xab\x40\x30\xfe\x82"
1706 "\x96\x24\x8d\xf1\x63\xf4\x49\x52",
1707 }, {
1708 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1709 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1710 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1711 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1712 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1713 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1714 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1715 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1716 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1717 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1718 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1719 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1720 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1721 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1722 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1723 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1724 "\xaa\xaa\xaa",
1725 .ksize = 131,
1726 .plaintext = "This is a test u"
1727 "sing a larger th"
1728 "an block-size ke"
1729 "y and a larger t"
1730 "han block-size d"
1731 "ata. The key nee"
1732 "ds to be hashed "
1733 "before being use"
1734 "d by the HMAC al"
1735 "gorithm.",
1736 .psize = 152,
1737 .digest = "\x66\x17\x17\x8e\x94\x1f\x02\x0d"
1738 "\x35\x1e\x2f\x25\x4e\x8f\xd3\x2c"
1739 "\x60\x24\x20\xfe\xb0\xb8\xfb\x9a"
1740 "\xdc\xce\xbb\x82\x46\x1e\x99\xc5"
1741 "\xa6\x78\xcc\x31\xe7\x99\x17\x6d"
1742 "\x38\x60\xe6\x11\x0c\x46\x52\x3e",
1743 },
1744};
1745
1746/*
1747 * SHA512 HMAC test vectors from RFC4231
1748 */
1749
1750#define HMAC_SHA512_TEST_VECTORS 4
1751
1752static struct hash_testvec hmac_sha512_tv_template[] = {
1753 {
1754 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1755 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1756 "\x0b\x0b\x0b\x0b",
1757 .ksize = 20,
1758 .plaintext = "Hi There",
1759 .psize = 8,
1760 .digest = "\x87\xaa\x7c\xde\xa5\xef\x61\x9d"
1761 "\x4f\xf0\xb4\x24\x1a\x1d\x6c\xb0"
1762 "\x23\x79\xf4\xe2\xce\x4e\xc2\x78"
1763 "\x7a\xd0\xb3\x05\x45\xe1\x7c\xde"
1764 "\xda\xa8\x33\xb7\xd6\xb8\xa7\x02"
1765 "\x03\x8b\x27\x4e\xae\xa3\xf4\xe4"
1766 "\xbe\x9d\x91\x4e\xeb\x61\xf1\x70"
1767 "\x2e\x69\x6c\x20\x3a\x12\x68\x54",
1768 }, {
1769 .key = "Jefe",
1770 .ksize = 4,
1771 .plaintext = "what do ya want for nothing?",
1772 .psize = 28,
1773 .digest = "\x16\x4b\x7a\x7b\xfc\xf8\x19\xe2"
1774 "\xe3\x95\xfb\xe7\x3b\x56\xe0\xa3"
1775 "\x87\xbd\x64\x22\x2e\x83\x1f\xd6"
1776 "\x10\x27\x0c\xd7\xea\x25\x05\x54"
1777 "\x97\x58\xbf\x75\xc0\x5a\x99\x4a"
1778 "\x6d\x03\x4f\x65\xf8\xf0\xe6\xfd"
1779 "\xca\xea\xb1\xa3\x4d\x4a\x6b\x4b"
1780 "\x63\x6e\x07\x0a\x38\xbc\xe7\x37",
1781 .np = 4,
1782 .tap = { 7, 7, 7, 7 }
1783 }, {
1784 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1785 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1786 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1787 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1788 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1789 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1790 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1791 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1792 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1793 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1794 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1795 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1796 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1797 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1798 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1799 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1800 "\xaa\xaa\xaa",
1801 .ksize = 131,
1802 .plaintext = "Test Using Large"
1803 "r Than Block-Siz"
1804 "e Key - Hash Key"
1805 " First",
1806 .psize = 54,
1807 .digest = "\x80\xb2\x42\x63\xc7\xc1\xa3\xeb"
1808 "\xb7\x14\x93\xc1\xdd\x7b\xe8\xb4"
1809 "\x9b\x46\xd1\xf4\x1b\x4a\xee\xc1"
1810 "\x12\x1b\x01\x37\x83\xf8\xf3\x52"
1811 "\x6b\x56\xd0\x37\xe0\x5f\x25\x98"
1812 "\xbd\x0f\xd2\x21\x5d\x6a\x1e\x52"
1813 "\x95\xe6\x4f\x73\xf6\x3f\x0a\xec"
1814 "\x8b\x91\x5a\x98\x5d\x78\x65\x98",
1815 }, {
1816 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1817 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1818 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1819 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1820 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1821 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1822 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1823 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1824 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1825 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1826 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1827 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1828 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1829 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1830 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1831 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1832 "\xaa\xaa\xaa",
1833 .ksize = 131,
1834 .plaintext =
1835 "This is a test u"
1836 "sing a larger th"
1837 "an block-size ke"
1838 "y and a larger t"
1839 "han block-size d"
1840 "ata. The key nee"
1841 "ds to be hashed "
1842 "before being use"
1843 "d by the HMAC al"
1844 "gorithm.",
1845 .psize = 152,
1846 .digest = "\xe3\x7b\x6a\x77\x5d\xc8\x7d\xba"
1847 "\xa4\xdf\xa9\xf9\x6e\x5e\x3f\xfd"
1848 "\xde\xbd\x71\xf8\x86\x72\x89\x86"
1849 "\x5d\xf5\xa3\x2d\x20\xcd\xc9\x44"
1850 "\xb6\x02\x2c\xac\x3c\x49\x82\xb1"
1851 "\x0d\x5e\xeb\x55\xc3\xe4\xde\x15"
1852 "\x13\x46\x76\xfb\x6d\xe0\x44\x60"
1853 "\x65\xc9\x74\x40\xfa\x8c\x6a\x58",
1854 },
1855};
1856
1857/* 30/*
1858 * DES test vectors. 31 * DES test vectors.
1859 */ 32 */
1860#define DES_ENC_TEST_VECTORS 10 33#define DES3_SPEED_VECTORS 1
1861#define DES_DEC_TEST_VECTORS 4
1862#define DES_CBC_ENC_TEST_VECTORS 5
1863#define DES_CBC_DEC_TEST_VECTORS 4
1864#define DES3_EDE_ENC_TEST_VECTORS 3
1865#define DES3_EDE_DEC_TEST_VECTORS 3
1866#define DES3_EDE_CBC_ENC_TEST_VECTORS 1
1867#define DES3_EDE_CBC_DEC_TEST_VECTORS 1
1868
1869static struct cipher_testvec des_enc_tv_template[] = {
1870 { /* From Applied Cryptography */
1871 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1872 .klen = 8,
1873 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1874 .ilen = 8,
1875 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1876 .rlen = 8,
1877 }, { /* Same key, different plaintext block */
1878 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1879 .klen = 8,
1880 .input = "\x22\x33\x44\x55\x66\x77\x88\x99",
1881 .ilen = 8,
1882 .result = "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1883 .rlen = 8,
1884 }, { /* Sbox test from NBS */
1885 .key = "\x7c\xa1\x10\x45\x4a\x1a\x6e\x57",
1886 .klen = 8,
1887 .input = "\x01\xa1\xd6\xd0\x39\x77\x67\x42",
1888 .ilen = 8,
1889 .result = "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
1890 .rlen = 8,
1891 }, { /* Three blocks */
1892 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1893 .klen = 8,
1894 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1895 "\x22\x33\x44\x55\x66\x77\x88\x99"
1896 "\xca\xfe\xba\xbe\xfe\xed\xbe\xef",
1897 .ilen = 24,
1898 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1899 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b"
1900 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
1901 .rlen = 24,
1902 }, { /* Weak key */
1903 .fail = 1,
1904 .wk = 1,
1905 .key = "\x01\x01\x01\x01\x01\x01\x01\x01",
1906 .klen = 8,
1907 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1908 .ilen = 8,
1909 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1910 .rlen = 8,
1911 }, { /* Two blocks -- for testing encryption across pages */
1912 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1913 .klen = 8,
1914 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1915 "\x22\x33\x44\x55\x66\x77\x88\x99",
1916 .ilen = 16,
1917 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1918 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1919 .rlen = 16,
1920 .np = 2,
1921 .tap = { 8, 8 }
1922 }, { /* Four blocks -- for testing encryption with chunking */
1923 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1924 .klen = 8,
1925 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1926 "\x22\x33\x44\x55\x66\x77\x88\x99"
1927 "\xca\xfe\xba\xbe\xfe\xed\xbe\xef"
1928 "\x22\x33\x44\x55\x66\x77\x88\x99",
1929 .ilen = 32,
1930 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1931 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b"
1932 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90"
1933 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1934 .rlen = 32,
1935 .np = 3,
1936 .tap = { 14, 10, 8 }
1937 }, {
1938 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1939 .klen = 8,
1940 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1941 "\x22\x33\x44\x55\x66\x77\x88\x99"
1942 "\xca\xfe\xba\xbe\xfe\xed\xbe\xef",
1943 .ilen = 24,
1944 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1945 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b"
1946 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
1947 .rlen = 24,
1948 .np = 4,
1949 .tap = { 2, 1, 3, 18 }
1950 }, {
1951 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1952 .klen = 8,
1953 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1954 "\x22\x33\x44\x55\x66\x77\x88\x99",
1955 .ilen = 16,
1956 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1957 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1958 .rlen = 16,
1959 .np = 5,
1960 .tap = { 2, 2, 2, 2, 8 }
1961 }, {
1962 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1963 .klen = 8,
1964 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1965 .ilen = 8,
1966 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1967 .rlen = 8,
1968 .np = 8,
1969 .tap = { 1, 1, 1, 1, 1, 1, 1, 1 }
1970 },
1971};
1972
1973static struct cipher_testvec des_dec_tv_template[] = {
1974 { /* From Applied Cryptography */
1975 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1976 .klen = 8,
1977 .input = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1978 .ilen = 8,
1979 .result = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1980 .rlen = 8,
1981 }, { /* Sbox test from NBS */
1982 .key = "\x7c\xa1\x10\x45\x4a\x1a\x6e\x57",
1983 .klen = 8,
1984 .input = "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
1985 .ilen = 8,
1986 .result = "\x01\xa1\xd6\xd0\x39\x77\x67\x42",
1987 .rlen = 8,
1988 }, { /* Two blocks, for chunking test */
1989 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1990 .klen = 8,
1991 .input = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1992 "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
1993 .ilen = 16,
1994 .result = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1995 "\xa3\x99\x7b\xca\xaf\x69\xa0\xf5",
1996 .rlen = 16,
1997 .np = 2,
1998 .tap = { 8, 8 }
1999 }, {
2000 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2001 .klen = 8,
2002 .input = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
2003 "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
2004 .ilen = 16,
2005 .result = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
2006 "\xa3\x99\x7b\xca\xaf\x69\xa0\xf5",
2007 .rlen = 16,
2008 .np = 3,
2009 .tap = { 3, 12, 1 }
2010 },
2011};
2012
2013static struct cipher_testvec des_cbc_enc_tv_template[] = {
2014 { /* From OpenSSL */
2015 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2016 .klen = 8,
2017 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2018 .input = "\x37\x36\x35\x34\x33\x32\x31\x20"
2019 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2020 "\x68\x65\x20\x74\x69\x6d\x65\x20",
2021 .ilen = 24,
2022 .result = "\xcc\xd1\x73\xff\xab\x20\x39\xf4"
2023 "\xac\xd8\xae\xfd\xdf\xd8\xa1\xeb"
2024 "\x46\x8e\x91\x15\x78\x88\xba\x68",
2025 .rlen = 24,
2026 }, { /* FIPS Pub 81 */
2027 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2028 .klen = 8,
2029 .iv = "\x12\x34\x56\x78\x90\xab\xcd\xef",
2030 .input = "\x4e\x6f\x77\x20\x69\x73\x20\x74",
2031 .ilen = 8,
2032 .result = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2033 .rlen = 8,
2034 }, {
2035 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2036 .klen = 8,
2037 .iv = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2038 .input = "\x68\x65\x20\x74\x69\x6d\x65\x20",
2039 .ilen = 8,
2040 .result = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2041 .rlen = 8,
2042 }, {
2043 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2044 .klen = 8,
2045 .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2046 .input = "\x66\x6f\x72\x20\x61\x6c\x6c\x20",
2047 .ilen = 8,
2048 .result = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
2049 .rlen = 8,
2050 }, { /* Copy of openssl vector for chunk testing */
2051 /* From OpenSSL */
2052 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2053 .klen = 8,
2054 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2055 .input = "\x37\x36\x35\x34\x33\x32\x31\x20"
2056 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2057 "\x68\x65\x20\x74\x69\x6d\x65\x20",
2058 .ilen = 24,
2059 .result = "\xcc\xd1\x73\xff\xab\x20\x39\xf4"
2060 "\xac\xd8\xae\xfd\xdf\xd8\xa1\xeb"
2061 "\x46\x8e\x91\x15\x78\x88\xba\x68",
2062 .rlen = 24,
2063 .np = 2,
2064 .tap = { 13, 11 }
2065 },
2066};
2067
2068static struct cipher_testvec des_cbc_dec_tv_template[] = {
2069 { /* FIPS Pub 81 */
2070 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2071 .klen = 8,
2072 .iv = "\x12\x34\x56\x78\x90\xab\xcd\xef",
2073 .input = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2074 .ilen = 8,
2075 .result = "\x4e\x6f\x77\x20\x69\x73\x20\x74",
2076 .rlen = 8,
2077 }, {
2078 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2079 .klen = 8,
2080 .iv = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2081 .input = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2082 .ilen = 8,
2083 .result = "\x68\x65\x20\x74\x69\x6d\x65\x20",
2084 .rlen = 8,
2085 }, {
2086 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2087 .klen = 8,
2088 .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2089 .input = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
2090 .ilen = 8,
2091 .result = "\x66\x6f\x72\x20\x61\x6c\x6c\x20",
2092 .rlen = 8,
2093 }, { /* Copy of above, for chunk testing */
2094 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2095 .klen = 8,
2096 .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2097 .input = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
2098 .ilen = 8,
2099 .result = "\x66\x6f\x72\x20\x61\x6c\x6c\x20",
2100 .rlen = 8,
2101 .np = 2,
2102 .tap = { 4, 4 }
2103 },
2104};
2105
2106static struct cipher_testvec des3_ede_enc_tv_template[] = {
2107 { /* These are from openssl */
2108 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2109 "\x55\x55\x55\x55\x55\x55\x55\x55"
2110 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2111 .klen = 24,
2112 .input = "\x73\x6f\x6d\x65\x64\x61\x74\x61",
2113 .ilen = 8,
2114 .result = "\x18\xd7\x48\xe5\x63\x62\x05\x72",
2115 .rlen = 8,
2116 }, {
2117 .key = "\x03\x52\x02\x07\x67\x20\x82\x17"
2118 "\x86\x02\x87\x66\x59\x08\x21\x98"
2119 "\x64\x05\x6a\xbd\xfe\xa9\x34\x57",
2120 .klen = 24,
2121 .input = "\x73\x71\x75\x69\x67\x67\x6c\x65",
2122 .ilen = 8,
2123 .result = "\xc0\x7d\x2a\x0f\xa5\x66\xfa\x30",
2124 .rlen = 8,
2125 }, {
2126 .key = "\x10\x46\x10\x34\x89\x98\x80\x20"
2127 "\x91\x07\xd0\x15\x89\x19\x01\x01"
2128 "\x19\x07\x92\x10\x98\x1a\x01\x01",
2129 .klen = 24,
2130 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
2131 .ilen = 8,
2132 .result = "\xe1\xef\x62\xc3\x32\xfe\x82\x5b",
2133 .rlen = 8,
2134 },
2135};
2136
2137static struct cipher_testvec des3_ede_dec_tv_template[] = {
2138 { /* These are from openssl */
2139 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2140 "\x55\x55\x55\x55\x55\x55\x55\x55"
2141 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2142 .klen = 24,
2143 .input = "\x18\xd7\x48\xe5\x63\x62\x05\x72",
2144 .ilen = 8,
2145 .result = "\x73\x6f\x6d\x65\x64\x61\x74\x61",
2146 .rlen = 8,
2147 }, {
2148 .key = "\x03\x52\x02\x07\x67\x20\x82\x17"
2149 "\x86\x02\x87\x66\x59\x08\x21\x98"
2150 "\x64\x05\x6a\xbd\xfe\xa9\x34\x57",
2151 .klen = 24,
2152 .input = "\xc0\x7d\x2a\x0f\xa5\x66\xfa\x30",
2153 .ilen = 8,
2154 .result = "\x73\x71\x75\x69\x67\x67\x6c\x65",
2155 .rlen = 8,
2156 }, {
2157 .key = "\x10\x46\x10\x34\x89\x98\x80\x20"
2158 "\x91\x07\xd0\x15\x89\x19\x01\x01"
2159 "\x19\x07\x92\x10\x98\x1a\x01\x01",
2160 .klen = 24,
2161 .input = "\xe1\xef\x62\xc3\x32\xfe\x82\x5b",
2162 .ilen = 8,
2163 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
2164 .rlen = 8,
2165 },
2166};
2167
2168static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
2169 { /* Generated from openssl */
2170 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2171 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2172 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2173 .klen = 24,
2174 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2175 .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2176 "\x53\x20\x63\x65\x65\x72\x73\x74"
2177 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2178 "\x20\x79\x65\x53\x72\x63\x74\x65"
2179 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2180 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2181 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2182 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2183 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2184 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2185 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2186 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2187 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2188 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2189 "\x63\x65\x65\x72\x73\x74\x54\x20"
2190 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2191 .ilen = 128,
2192 .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2193 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2194 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2195 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2196 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2197 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2198 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2199 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2200 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2201 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2202 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2203 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2204 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2205 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2206 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2207 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2208 .rlen = 128,
2209 },
2210};
2211
2212static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
2213 { /* Generated from openssl */
2214 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2215 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2216 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2217 .klen = 24,
2218 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2219 .input = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2220 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2221 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2222 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2223 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2224 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2225 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2226 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2227 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2228 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2229 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2230 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2231 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2232 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2233 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2234 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2235 .ilen = 128,
2236 .result = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2237 "\x53\x20\x63\x65\x65\x72\x73\x74"
2238 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2239 "\x20\x79\x65\x53\x72\x63\x74\x65"
2240 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2241 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2242 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2243 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2244 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2245 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2246 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2247 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2248 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2249 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2250 "\x63\x65\x65\x72\x73\x74\x54\x20"
2251 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2252 .rlen = 128,
2253 },
2254};
2255
2256/*
2257 * Blowfish test vectors.
2258 */
2259#define BF_ENC_TEST_VECTORS 6
2260#define BF_DEC_TEST_VECTORS 6
2261#define BF_CBC_ENC_TEST_VECTORS 1
2262#define BF_CBC_DEC_TEST_VECTORS 1
2263
2264static struct cipher_testvec bf_enc_tv_template[] = {
2265 { /* DES test vectors from OpenSSL */
2266 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
2267 .klen = 8,
2268 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
2269 .ilen = 8,
2270 .result = "\x4e\xf9\x97\x45\x61\x98\xdd\x78",
2271 .rlen = 8,
2272 }, {
2273 .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e",
2274 .klen = 8,
2275 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2276 .ilen = 8,
2277 .result = "\xa7\x90\x79\x51\x08\xea\x3c\xae",
2278 .rlen = 8,
2279 }, {
2280 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2281 .klen = 8,
2282 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2283 .ilen = 8,
2284 .result = "\xe8\x7a\x24\x4e\x2c\xc8\x5e\x82",
2285 .rlen = 8,
2286 }, { /* Vary the keylength... */
2287 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2288 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f",
2289 .klen = 16,
2290 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2291 .ilen = 8,
2292 .result = "\x93\x14\x28\x87\xee\x3b\xe1\x5c",
2293 .rlen = 8,
2294 }, {
2295 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2296 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2297 "\x00\x11\x22\x33\x44",
2298 .klen = 21,
2299 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2300 .ilen = 8,
2301 .result = "\xe6\xf5\x1e\xd7\x9b\x9d\xb2\x1f",
2302 .rlen = 8,
2303 }, { /* Generated with bf488 */
2304 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2305 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2306 "\x00\x11\x22\x33\x44\x55\x66\x77"
2307 "\x04\x68\x91\x04\xc2\xfd\x3b\x2f"
2308 "\x58\x40\x23\x64\x1a\xba\x61\x76"
2309 "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e"
2310 "\xff\xff\xff\xff\xff\xff\xff\xff",
2311 .klen = 56,
2312 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2313 .ilen = 8,
2314 .result = "\xc0\x45\x04\x01\x2e\x4e\x1f\x53",
2315 .rlen = 8,
2316 },
2317};
2318
2319static struct cipher_testvec bf_dec_tv_template[] = {
2320 { /* DES test vectors from OpenSSL */
2321 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
2322 .klen = 8,
2323 .input = "\x4e\xf9\x97\x45\x61\x98\xdd\x78",
2324 .ilen = 8,
2325 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
2326 .rlen = 8,
2327 }, {
2328 .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e",
2329 .klen = 8,
2330 .input = "\xa7\x90\x79\x51\x08\xea\x3c\xae",
2331 .ilen = 8,
2332 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2333 .rlen = 8,
2334 }, {
2335 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2336 .klen = 8,
2337 .input = "\xe8\x7a\x24\x4e\x2c\xc8\x5e\x82",
2338 .ilen = 8,
2339 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2340 .rlen = 8,
2341 }, { /* Vary the keylength... */
2342 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2343 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f",
2344 .klen = 16,
2345 .input = "\x93\x14\x28\x87\xee\x3b\xe1\x5c",
2346 .ilen = 8,
2347 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2348 .rlen = 8,
2349 }, {
2350 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2351 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2352 "\x00\x11\x22\x33\x44",
2353 .klen = 21,
2354 .input = "\xe6\xf5\x1e\xd7\x9b\x9d\xb2\x1f",
2355 .ilen = 8,
2356 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2357 .rlen = 8,
2358 }, { /* Generated with bf488, using OpenSSL, Libgcrypt and Nettle */
2359 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2360 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2361 "\x00\x11\x22\x33\x44\x55\x66\x77"
2362 "\x04\x68\x91\x04\xc2\xfd\x3b\x2f"
2363 "\x58\x40\x23\x64\x1a\xba\x61\x76"
2364 "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e"
2365 "\xff\xff\xff\xff\xff\xff\xff\xff",
2366 .klen = 56,
2367 .input = "\xc0\x45\x04\x01\x2e\x4e\x1f\x53",
2368 .ilen = 8,
2369 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2370 .rlen = 8,
2371 },
2372};
2373
2374static struct cipher_testvec bf_cbc_enc_tv_template[] = {
2375 { /* From OpenSSL */
2376 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2377 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2378 .klen = 16,
2379 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2380 .input = "\x37\x36\x35\x34\x33\x32\x31\x20"
2381 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2382 "\x68\x65\x20\x74\x69\x6d\x65\x20"
2383 "\x66\x6f\x72\x20\x00\x00\x00\x00",
2384 .ilen = 32,
2385 .result = "\x6b\x77\xb4\xd6\x30\x06\xde\xe6"
2386 "\x05\xb1\x56\xe2\x74\x03\x97\x93"
2387 "\x58\xde\xb9\xe7\x15\x46\x16\xd9"
2388 "\x59\xf1\x65\x2b\xd5\xff\x92\xcc",
2389 .rlen = 32,
2390 },
2391};
2392
2393static struct cipher_testvec bf_cbc_dec_tv_template[] = {
2394 { /* From OpenSSL */
2395 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2396 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2397 .klen = 16,
2398 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2399 .input = "\x6b\x77\xb4\xd6\x30\x06\xde\xe6"
2400 "\x05\xb1\x56\xe2\x74\x03\x97\x93"
2401 "\x58\xde\xb9\xe7\x15\x46\x16\xd9"
2402 "\x59\xf1\x65\x2b\xd5\xff\x92\xcc",
2403 .ilen = 32,
2404 .result = "\x37\x36\x35\x34\x33\x32\x31\x20"
2405 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2406 "\x68\x65\x20\x74\x69\x6d\x65\x20"
2407 "\x66\x6f\x72\x20\x00\x00\x00\x00",
2408 .rlen = 32,
2409 },
2410};
2411
2412/*
2413 * Twofish test vectors.
2414 */
2415#define TF_ENC_TEST_VECTORS 3
2416#define TF_DEC_TEST_VECTORS 3
2417#define TF_CBC_ENC_TEST_VECTORS 4
2418#define TF_CBC_DEC_TEST_VECTORS 4
2419
2420static struct cipher_testvec tf_enc_tv_template[] = {
2421 {
2422 .key = zeroed_string,
2423 .klen = 16,
2424 .input = zeroed_string,
2425 .ilen = 16,
2426 .result = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2427 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2428 .rlen = 16,
2429 }, {
2430 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2431 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
2432 "\x00\x11\x22\x33\x44\x55\x66\x77",
2433 .klen = 24,
2434 .input = zeroed_string,
2435 .ilen = 16,
2436 .result = "\xcf\xd1\xd2\xe5\xa9\xbe\x9c\xdf"
2437 "\x50\x1f\x13\xb8\x92\xbd\x22\x48",
2438 .rlen = 16,
2439 }, {
2440 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2441 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
2442 "\x00\x11\x22\x33\x44\x55\x66\x77"
2443 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2444 .klen = 32,
2445 .input = zeroed_string,
2446 .ilen = 16,
2447 .result = "\x37\x52\x7b\xe0\x05\x23\x34\xb8"
2448 "\x9f\x0c\xfc\xca\xe8\x7c\xfa\x20",
2449 .rlen = 16,
2450 },
2451};
2452 34
2453static struct cipher_testvec tf_dec_tv_template[] = { 35static struct cipher_speed_template des3_speed_template[] = {
2454 { 36 {
2455 .key = zeroed_string,
2456 .klen = 16,
2457 .input = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2458 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2459 .ilen = 16,
2460 .result = zeroed_string,
2461 .rlen = 16,
2462 }, {
2463 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef" 37 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2464 "\xfe\xdc\xba\x98\x76\x54\x32\x10" 38 "\x55\x55\x55\x55\x55\x55\x55\x55"
2465 "\x00\x11\x22\x33\x44\x55\x66\x77",
2466 .klen = 24,
2467 .input = "\xcf\xd1\xd2\xe5\xa9\xbe\x9c\xdf"
2468 "\x50\x1f\x13\xb8\x92\xbd\x22\x48",
2469 .ilen = 16,
2470 .result = zeroed_string,
2471 .rlen = 16,
2472 }, {
2473 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2474 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
2475 "\x00\x11\x22\x33\x44\x55\x66\x77"
2476 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2477 .klen = 32,
2478 .input = "\x37\x52\x7b\xe0\x05\x23\x34\xb8"
2479 "\x9f\x0c\xfc\xca\xe8\x7c\xfa\x20",
2480 .ilen = 16,
2481 .result = zeroed_string,
2482 .rlen = 16,
2483 },
2484};
2485
2486static struct cipher_testvec tf_cbc_enc_tv_template[] = {
2487 { /* Generated with Nettle */
2488 .key = zeroed_string,
2489 .klen = 16,
2490 .iv = zeroed_string,
2491 .input = zeroed_string,
2492 .ilen = 16,
2493 .result = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2494 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2495 .rlen = 16,
2496 }, {
2497 .key = zeroed_string,
2498 .klen = 16,
2499 .iv = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2500 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2501 .input = zeroed_string,
2502 .ilen = 16,
2503 .result = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2504 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2505 .rlen = 16,
2506 }, {
2507 .key = zeroed_string,
2508 .klen = 16,
2509 .iv = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2510 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2511 .input = zeroed_string,
2512 .ilen = 16,
2513 .result = "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2514 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2515 .rlen = 16,
2516 }, {
2517 .key = zeroed_string,
2518 .klen = 16,
2519 .iv = zeroed_string,
2520 .input = zeroed_string,
2521 .ilen = 48,
2522 .result = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2523 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a"
2524 "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2525 "\x86\xcb\x08\x6b\x78\x9f\x54\x19"
2526 "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2527 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2528 .rlen = 48,
2529 },
2530};
2531
2532static struct cipher_testvec tf_cbc_dec_tv_template[] = {
2533 { /* Reverse of the first four above */
2534 .key = zeroed_string,
2535 .klen = 16,
2536 .iv = zeroed_string,
2537 .input = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2538 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2539 .ilen = 16,
2540 .result = zeroed_string,
2541 .rlen = 16,
2542 }, {
2543 .key = zeroed_string,
2544 .klen = 16,
2545 .iv = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2546 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2547 .input = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2548 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2549 .ilen = 16,
2550 .result = zeroed_string,
2551 .rlen = 16,
2552 }, {
2553 .key = zeroed_string,
2554 .klen = 16,
2555 .iv = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2556 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2557 .input = "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2558 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2559 .ilen = 16,
2560 .result = zeroed_string,
2561 .rlen = 16,
2562 }, {
2563 .key = zeroed_string,
2564 .klen = 16,
2565 .iv = zeroed_string,
2566 .input = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2567 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a"
2568 "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2569 "\x86\xcb\x08\x6b\x78\x9f\x54\x19"
2570 "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2571 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2572 .ilen = 48,
2573 .result = zeroed_string,
2574 .rlen = 48,
2575 },
2576};
2577
2578/*
2579 * Serpent test vectors. These are backwards because Serpent writes
2580 * octet sequences in right-to-left mode.
2581 */
2582#define SERPENT_ENC_TEST_VECTORS 4
2583#define SERPENT_DEC_TEST_VECTORS 4
2584
2585#define TNEPRES_ENC_TEST_VECTORS 4
2586#define TNEPRES_DEC_TEST_VECTORS 4
2587
2588static struct cipher_testvec serpent_enc_tv_template[] = {
2589 {
2590 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2591 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2592 .ilen = 16,
2593 .result = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
2594 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
2595 .rlen = 16,
2596 }, {
2597 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2598 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2599 .klen = 16,
2600 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2601 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2602 .ilen = 16,
2603 .result = "\x4c\x7d\x8a\x32\x80\x72\xa2\x2c"
2604 "\x82\x3e\x4a\x1f\x3a\xcd\xa1\x6d",
2605 .rlen = 16,
2606 }, {
2607 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2608 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2609 "\x10\x11\x12\x13\x14\x15\x16\x17"
2610 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2611 .klen = 32,
2612 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2613 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2614 .ilen = 16,
2615 .result = "\xde\x26\x9f\xf8\x33\xe4\x32\xb8"
2616 "\x5b\x2e\x88\xd2\x70\x1c\xe7\x5c",
2617 .rlen = 16,
2618 }, {
2619 .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80",
2620 .klen = 16,
2621 .input = zeroed_string,
2622 .ilen = 16,
2623 .result = "\xdd\xd2\x6b\x98\xa5\xff\xd8\x2c"
2624 "\x05\x34\x5a\x9d\xad\xbf\xaf\x49",
2625 .rlen = 16,
2626 },
2627};
2628
2629static struct cipher_testvec tnepres_enc_tv_template[] = {
2630 { /* KeySize=128, PT=0, I=1 */
2631 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
2632 "\x00\x00\x00\x00\x00\x00\x00\x00",
2633 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
2634 "\x00\x00\x00\x00\x00\x00\x00\x00",
2635 .klen = 16,
2636 .ilen = 16,
2637 .result = "\x49\xaf\xbf\xad\x9d\x5a\x34\x05"
2638 "\x2c\xd8\xff\xa5\x98\x6b\xd2\xdd",
2639 .rlen = 16,
2640 }, { /* KeySize=192, PT=0, I=1 */
2641 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
2642 "\x00\x00\x00\x00\x00\x00\x00\x00"
2643 "\x00\x00\x00\x00\x00\x00\x00\x00",
2644 .klen = 24,
2645 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
2646 "\x00\x00\x00\x00\x00\x00\x00\x00",
2647 .ilen = 16,
2648 .result = "\xe7\x8e\x54\x02\xc7\x19\x55\x68"
2649 "\xac\x36\x78\xf7\xa3\xf6\x0c\x66",
2650 .rlen = 16,
2651 }, { /* KeySize=256, PT=0, I=1 */
2652 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
2653 "\x00\x00\x00\x00\x00\x00\x00\x00"
2654 "\x00\x00\x00\x00\x00\x00\x00\x00"
2655 "\x00\x00\x00\x00\x00\x00\x00\x00",
2656 .klen = 32,
2657 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
2658 "\x00\x00\x00\x00\x00\x00\x00\x00",
2659 .ilen = 16,
2660 .result = "\xab\xed\x96\xe7\x66\xbf\x28\xcb"
2661 "\xc0\xeb\xd2\x1a\x82\xef\x08\x19",
2662 .rlen = 16,
2663 }, { /* KeySize=256, I=257 */
2664 .key = "\x1f\x1e\x1d\x1c\x1b\x1a\x19\x18"
2665 "\x17\x16\x15\x14\x13\x12\x11\x10"
2666 "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08"
2667 "\x07\x06\x05\x04\x03\x02\x01\x00",
2668 .klen = 32,
2669 .input = "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08"
2670 "\x07\x06\x05\x04\x03\x02\x01\x00",
2671 .ilen = 16,
2672 .result = "\x5c\xe7\x1c\x70\xd2\x88\x2e\x5b"
2673 "\xb8\x32\xe4\x33\xf8\x9f\x26\xde",
2674 .rlen = 16,
2675 },
2676};
2677
2678
2679static struct cipher_testvec serpent_dec_tv_template[] = {
2680 {
2681 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
2682 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
2683 .ilen = 16,
2684 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2685 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2686 .rlen = 16,
2687 }, {
2688 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2689 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2690 .klen = 16,
2691 .input = "\x4c\x7d\x8a\x32\x80\x72\xa2\x2c"
2692 "\x82\x3e\x4a\x1f\x3a\xcd\xa1\x6d",
2693 .ilen = 16,
2694 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2695 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2696 .rlen = 16,
2697 }, {
2698 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2699 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2700 "\x10\x11\x12\x13\x14\x15\x16\x17"
2701 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2702 .klen = 32,
2703 .input = "\xde\x26\x9f\xf8\x33\xe4\x32\xb8"
2704 "\x5b\x2e\x88\xd2\x70\x1c\xe7\x5c",
2705 .ilen = 16,
2706 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2707 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2708 .rlen = 16,
2709 }, {
2710 .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80",
2711 .klen = 16,
2712 .input = "\xdd\xd2\x6b\x98\xa5\xff\xd8\x2c"
2713 "\x05\x34\x5a\x9d\xad\xbf\xaf\x49",
2714 .ilen = 16,
2715 .result = zeroed_string,
2716 .rlen = 16,
2717 },
2718};
2719
2720static struct cipher_testvec tnepres_dec_tv_template[] = {
2721 {
2722 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
2723 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
2724 .ilen = 16,
2725 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2726 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2727 .rlen = 16,
2728 }, {
2729 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2730 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2731 .klen = 16,
2732 .input = "\xea\xf4\xd7\xfc\xd8\x01\x34\x47"
2733 "\x81\x45\x0b\xfa\x0c\xd6\xad\x6e",
2734 .ilen = 16,
2735 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2736 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2737 .rlen = 16,
2738 }, {
2739 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2740 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2741 "\x10\x11\x12\x13\x14\x15\x16\x17"
2742 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2743 .klen = 32,
2744 .input = "\x64\xa9\x1a\x37\xed\x9f\xe7\x49"
2745 "\xa8\x4e\x76\xd6\xf5\x0d\x78\xee",
2746 .ilen = 16,
2747 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2748 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2749 .rlen = 16,
2750 }, { /* KeySize=128, I=121 */
2751 .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80",
2752 .klen = 16,
2753 .input = "\x3d\xda\xbf\xc0\x06\xda\xab\x06"
2754 "\x46\x2a\xf4\xef\x81\x54\x4e\x26",
2755 .ilen = 16,
2756 .result = zeroed_string,
2757 .rlen = 16,
2758 },
2759};
2760
2761
2762/* Cast6 test vectors from RFC 2612 */
2763#define CAST6_ENC_TEST_VECTORS 3
2764#define CAST6_DEC_TEST_VECTORS 3
2765
2766static struct cipher_testvec cast6_enc_tv_template[] = {
2767 {
2768 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2769 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
2770 .klen = 16,
2771 .input = zeroed_string,
2772 .ilen = 16,
2773 .result = "\xc8\x42\xa0\x89\x72\xb4\x3d\x20"
2774 "\x83\x6c\x91\xd1\xb7\x53\x0f\x6b",
2775 .rlen = 16,
2776 }, {
2777 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2778 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2779 "\xba\xc7\x7a\x77\x17\x94\x28\x63",
2780 .klen = 24,
2781 .input = zeroed_string,
2782 .ilen = 16,
2783 .result = "\x1b\x38\x6c\x02\x10\xdc\xad\xcb"
2784 "\xdd\x0e\x41\xaa\x08\xa7\xa7\xe8",
2785 .rlen = 16,
2786 }, {
2787 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2788 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2789 "\x8d\x7c\x47\xce\x26\x49\x08\x46"
2790 "\x1c\xc1\xb5\x13\x7a\xe6\xb6\x04",
2791 .klen = 32,
2792 .input = zeroed_string,
2793 .ilen = 16,
2794 .result = "\x4f\x6a\x20\x38\x28\x68\x97\xb9"
2795 "\xc9\x87\x01\x36\x55\x33\x17\xfa",
2796 .rlen = 16,
2797 },
2798};
2799
2800static struct cipher_testvec cast6_dec_tv_template[] = {
2801 {
2802 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2803 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
2804 .klen = 16,
2805 .input = "\xc8\x42\xa0\x89\x72\xb4\x3d\x20"
2806 "\x83\x6c\x91\xd1\xb7\x53\x0f\x6b",
2807 .ilen = 16,
2808 .result = zeroed_string,
2809 .rlen = 16,
2810 }, {
2811 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2812 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2813 "\xba\xc7\x7a\x77\x17\x94\x28\x63",
2814 .klen = 24,
2815 .input = "\x1b\x38\x6c\x02\x10\xdc\xad\xcb"
2816 "\xdd\x0e\x41\xaa\x08\xa7\xa7\xe8",
2817 .ilen = 16,
2818 .result = zeroed_string,
2819 .rlen = 16,
2820 }, {
2821 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2822 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2823 "\x8d\x7c\x47\xce\x26\x49\x08\x46"
2824 "\x1c\xc1\xb5\x13\x7a\xe6\xb6\x04",
2825 .klen = 32,
2826 .input = "\x4f\x6a\x20\x38\x28\x68\x97\xb9"
2827 "\xc9\x87\x01\x36\x55\x33\x17\xfa",
2828 .ilen = 16,
2829 .result = zeroed_string,
2830 .rlen = 16,
2831 },
2832};
2833
2834
2835/*
2836 * AES test vectors.
2837 */
2838#define AES_ENC_TEST_VECTORS 3
2839#define AES_DEC_TEST_VECTORS 3
2840#define AES_CBC_ENC_TEST_VECTORS 4
2841#define AES_CBC_DEC_TEST_VECTORS 4
2842#define AES_LRW_ENC_TEST_VECTORS 8
2843#define AES_LRW_DEC_TEST_VECTORS 8
2844#define AES_XTS_ENC_TEST_VECTORS 4
2845#define AES_XTS_DEC_TEST_VECTORS 4
2846#define AES_CTR_ENC_TEST_VECTORS 7
2847#define AES_CTR_DEC_TEST_VECTORS 6
2848#define AES_GCM_ENC_TEST_VECTORS 9
2849#define AES_GCM_DEC_TEST_VECTORS 8
2850#define AES_CCM_ENC_TEST_VECTORS 7
2851#define AES_CCM_DEC_TEST_VECTORS 7
2852
2853static struct cipher_testvec aes_enc_tv_template[] = {
2854 { /* From FIPS-197 */
2855 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2856 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2857 .klen = 16,
2858 .input = "\x00\x11\x22\x33\x44\x55\x66\x77"
2859 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2860 .ilen = 16,
2861 .result = "\x69\xc4\xe0\xd8\x6a\x7b\x04\x30"
2862 "\xd8\xcd\xb7\x80\x70\xb4\xc5\x5a",
2863 .rlen = 16,
2864 }, {
2865 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2866 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2867 "\x10\x11\x12\x13\x14\x15\x16\x17",
2868 .klen = 24,
2869 .input = "\x00\x11\x22\x33\x44\x55\x66\x77"
2870 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2871 .ilen = 16,
2872 .result = "\xdd\xa9\x7c\xa4\x86\x4c\xdf\xe0"
2873 "\x6e\xaf\x70\xa0\xec\x0d\x71\x91",
2874 .rlen = 16,
2875 }, {
2876 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2877 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2878 "\x10\x11\x12\x13\x14\x15\x16\x17"
2879 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2880 .klen = 32,
2881 .input = "\x00\x11\x22\x33\x44\x55\x66\x77"
2882 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2883 .ilen = 16,
2884 .result = "\x8e\xa2\xb7\xca\x51\x67\x45\xbf"
2885 "\xea\xfc\x49\x90\x4b\x49\x60\x89",
2886 .rlen = 16,
2887 },
2888};
2889
2890static struct cipher_testvec aes_dec_tv_template[] = {
2891 { /* From FIPS-197 */
2892 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2893 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2894 .klen = 16,
2895 .input = "\x69\xc4\xe0\xd8\x6a\x7b\x04\x30"
2896 "\xd8\xcd\xb7\x80\x70\xb4\xc5\x5a",
2897 .ilen = 16,
2898 .result = "\x00\x11\x22\x33\x44\x55\x66\x77"
2899 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2900 .rlen = 16,
2901 }, {
2902 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2903 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2904 "\x10\x11\x12\x13\x14\x15\x16\x17",
2905 .klen = 24,
2906 .input = "\xdd\xa9\x7c\xa4\x86\x4c\xdf\xe0"
2907 "\x6e\xaf\x70\xa0\xec\x0d\x71\x91",
2908 .ilen = 16,
2909 .result = "\x00\x11\x22\x33\x44\x55\x66\x77"
2910 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2911 .rlen = 16,
2912 }, {
2913 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2914 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2915 "\x10\x11\x12\x13\x14\x15\x16\x17"
2916 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2917 .klen = 32,
2918 .input = "\x8e\xa2\xb7\xca\x51\x67\x45\xbf"
2919 "\xea\xfc\x49\x90\x4b\x49\x60\x89",
2920 .ilen = 16,
2921 .result = "\x00\x11\x22\x33\x44\x55\x66\x77"
2922 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2923 .rlen = 16,
2924 },
2925};
2926
2927static struct cipher_testvec aes_cbc_enc_tv_template[] = {
2928 { /* From RFC 3602 */
2929 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
2930 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
2931 .klen = 16,
2932 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
2933 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
2934 .input = "Single block msg",
2935 .ilen = 16,
2936 .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
2937 "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
2938 .rlen = 16,
2939 }, {
2940 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
2941 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
2942 .klen = 16,
2943 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
2944 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
2945 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2946 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2947 "\x10\x11\x12\x13\x14\x15\x16\x17"
2948 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2949 .ilen = 32,
2950 .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
2951 "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
2952 "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
2953 "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1",
2954 .rlen = 32,
2955 }, { /* From NIST SP800-38A */
2956 .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
2957 "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
2958 "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
2959 .klen = 24,
2960 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2961 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2962 .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
2963 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
2964 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
2965 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
2966 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
2967 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
2968 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
2969 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
2970 .ilen = 64,
2971 .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
2972 "\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
2973 "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
2974 "\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
2975 "\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
2976 "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
2977 "\x08\xb0\xe2\x79\x88\x59\x88\x81"
2978 "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd",
2979 .rlen = 64,
2980 }, {
2981 .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
2982 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
2983 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
2984 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
2985 .klen = 32,
2986 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2987 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2988 .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
2989 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
2990 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
2991 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
2992 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
2993 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
2994 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
2995 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
2996 .ilen = 64,
2997 .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
2998 "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
2999 "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
3000 "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
3001 "\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
3002 "\xa5\x30\xe2\x63\x04\x23\x14\x61"
3003 "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
3004 "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
3005 .rlen = 64,
3006 },
3007};
3008
3009static struct cipher_testvec aes_cbc_dec_tv_template[] = {
3010 { /* From RFC 3602 */
3011 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
3012 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
3013 .klen = 16,
3014 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
3015 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
3016 .input = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
3017 "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
3018 .ilen = 16,
3019 .result = "Single block msg",
3020 .rlen = 16,
3021 }, {
3022 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
3023 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
3024 .klen = 16,
3025 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
3026 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
3027 .input = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
3028 "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
3029 "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
3030 "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1",
3031 .ilen = 32,
3032 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
3033 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3034 "\x10\x11\x12\x13\x14\x15\x16\x17"
3035 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
3036 .rlen = 32,
3037 }, { /* From NIST SP800-38A */
3038 .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
3039 "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
3040 "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
3041 .klen = 24,
3042 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
3043 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
3044 .input = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
3045 "\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
3046 "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
3047 "\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
3048 "\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
3049 "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
3050 "\x08\xb0\xe2\x79\x88\x59\x88\x81"
3051 "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd",
3052 .ilen = 64,
3053 .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
3054 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
3055 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
3056 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
3057 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
3058 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
3059 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
3060 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
3061 .rlen = 64,
3062 }, {
3063 .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
3064 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
3065 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
3066 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
3067 .klen = 32,
3068 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
3069 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
3070 .input = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
3071 "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
3072 "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
3073 "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
3074 "\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
3075 "\xa5\x30\xe2\x63\x04\x23\x14\x61"
3076 "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
3077 "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
3078 .ilen = 64,
3079 .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
3080 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
3081 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
3082 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
3083 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
3084 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
3085 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
3086 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
3087 .rlen = 64,
3088 },
3089};
3090
3091static struct cipher_testvec aes_lrw_enc_tv_template[] = {
3092 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
3093 { /* LRW-32-AES 1 */
3094 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
3095 "\x4c\x26\x84\x14\xb5\x68\x01\x85"
3096 "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
3097 "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
3098 .klen = 32,
3099 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3100 "\x00\x00\x00\x00\x00\x00\x00\x01",
3101 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3102 "\x38\x39\x41\x42\x43\x44\x45\x46",
3103 .ilen = 16,
3104 .result = "\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
3105 "\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
3106 .rlen = 16,
3107 }, { /* LRW-32-AES 2 */
3108 .key = "\x59\x70\x47\x14\xf5\x57\x47\x8c"
3109 "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
3110 "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
3111 "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
3112 .klen = 32,
3113 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3114 "\x00\x00\x00\x00\x00\x00\x00\x02",
3115 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3116 "\x38\x39\x41\x42\x43\x44\x45\x46",
3117 .ilen = 16,
3118 .result = "\x00\xc8\x2b\xae\x95\xbb\xcd\xe5"
3119 "\x27\x4f\x07\x69\xb2\x60\xe1\x36",
3120 .rlen = 16,
3121 }, { /* LRW-32-AES 3 */
3122 .key = "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
3123 "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
3124 "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
3125 "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
3126 .klen = 32,
3127 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3128 "\x00\x00\x00\x02\x00\x00\x00\x00",
3129 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3130 "\x38\x39\x41\x42\x43\x44\x45\x46",
3131 .ilen = 16,
3132 .result = "\x76\x32\x21\x83\xed\x8f\xf1\x82"
3133 "\xf9\x59\x62\x03\x69\x0e\x5e\x01",
3134 .rlen = 16,
3135 }, { /* LRW-32-AES 4 */
3136 .key = "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
3137 "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
3138 "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
3139 "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
3140 "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
3141 .klen = 40,
3142 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3143 "\x00\x00\x00\x00\x00\x00\x00\x01",
3144 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3145 "\x38\x39\x41\x42\x43\x44\x45\x46",
3146 .ilen = 16,
3147 .result = "\x9c\x0f\x15\x2f\x55\xa2\xd8\xf0"
3148 "\xd6\x7b\x8f\x9e\x28\x22\xbc\x41",
3149 .rlen = 16,
3150 }, { /* LRW-32-AES 5 */
3151 .key = "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
3152 "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
3153 "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
3154 "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
3155 "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
3156 .klen = 40,
3157 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3158 "\x00\x00\x00\x02\x00\x00\x00\x00",
3159 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3160 "\x38\x39\x41\x42\x43\x44\x45\x46",
3161 .ilen = 16,
3162 .result = "\xd4\x27\x6a\x7f\x14\x91\x3d\x65"
3163 "\xc8\x60\x48\x02\x87\xe3\x34\x06",
3164 .rlen = 16,
3165 }, { /* LRW-32-AES 6 */
3166 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3167 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3168 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3169 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3170 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3171 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3172 .klen = 48,
3173 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3174 "\x00\x00\x00\x00\x00\x00\x00\x01",
3175 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3176 "\x38\x39\x41\x42\x43\x44\x45\x46",
3177 .ilen = 16,
3178 .result = "\xbd\x06\xb8\xe1\xdb\x98\x89\x9e"
3179 "\xc4\x98\xe4\x91\xcf\x1c\x70\x2b",
3180 .rlen = 16,
3181 }, { /* LRW-32-AES 7 */
3182 .key = "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
3183 "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
3184 "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
3185 "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
3186 "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
3187 "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
3188 .klen = 48,
3189 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3190 "\x00\x00\x00\x02\x00\x00\x00\x00",
3191 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3192 "\x38\x39\x41\x42\x43\x44\x45\x46",
3193 .ilen = 16,
3194 .result = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
3195 "\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
3196 .rlen = 16,
3197 }, {
3198/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
3199 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3200 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3201 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3202 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3203 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3204 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3205 .klen = 48,
3206 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3207 "\x00\x00\x00\x00\x00\x00\x00\x01",
3208 .input = "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
3209 "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
3210 "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
3211 "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
3212 "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
3213 "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
3214 "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
3215 "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
3216 "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
3217 "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
3218 "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
3219 "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
3220 "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
3221 "\x4c\x96\x12\xed\x7c\x92\x03\x01"
3222 "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
3223 "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
3224 "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
3225 "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
3226 "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
3227 "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
3228 "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
3229 "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
3230 "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
3231 "\x76\x12\x73\x44\x1a\x56\xd7\x72"
3232 "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
3233 "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
3234 "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
3235 "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
3236 "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
3237 "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
3238 "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
3239 "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
3240 "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
3241 "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
3242 "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
3243 "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
3244 "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
3245 "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
3246 "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
3247 "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
3248 "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
3249 "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
3250 "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
3251 "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
3252 "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
3253 "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
3254 "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
3255 "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
3256 "\x62\x73\x65\xfd\x46\x63\x25\x3d"
3257 "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
3258 "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
3259 "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
3260 "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
3261 "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
3262 "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
3263 "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
3264 "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
3265 "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
3266 "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
3267 "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
3268 "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
3269 "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
3270 "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
3271 "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
3272 .ilen = 512,
3273 .result = "\x1a\x1d\xa9\x30\xad\xf9\x2f\x9b"
3274 "\xb6\x1d\xae\xef\xf0\x2f\xf8\x5a"
3275 "\x39\x3c\xbf\x2a\xb2\x45\xb2\x23"
3276 "\x1b\x63\x3c\xcf\xaa\xbe\xcf\x4e"
3277 "\xfa\xe8\x29\xc2\x20\x68\x2b\x3c"
3278 "\x2e\x8b\xf7\x6e\x25\xbd\xe3\x3d"
3279 "\x66\x27\xd6\xaf\xd6\x64\x3e\xe3"
3280 "\xe8\x58\x46\x97\x39\x51\x07\xde"
3281 "\xcb\x37\xbc\xa9\xc0\x5f\x75\xc3"
3282 "\x0e\x84\x23\x1d\x16\xd4\x1c\x59"
3283 "\x9c\x1a\x02\x55\xab\x3a\x97\x1d"
3284 "\xdf\xdd\xc7\x06\x51\xd7\x70\xae"
3285 "\x23\xc6\x8c\xf5\x1e\xa0\xe5\x82"
3286 "\xb8\xb2\xbf\x04\xa0\x32\x8e\x68"
3287 "\xeb\xaf\x6e\x2d\x94\x22\x2f\xce"
3288 "\x4c\xb5\x59\xe2\xa2\x2f\xa0\x98"
3289 "\x1a\x97\xc6\xd4\xb5\x00\x59\xf2"
3290 "\x84\x14\x72\xb1\x9a\x6e\xa3\x7f"
3291 "\xea\x20\xe7\xcb\x65\x77\x3a\xdf"
3292 "\xc8\x97\x67\x15\xc2\x2a\x27\xcc"
3293 "\x18\x55\xa1\x24\x0b\x24\x24\xaf"
3294 "\x5b\xec\x68\xb8\xc8\xf5\xba\x63"
3295 "\xff\xed\x89\xce\xd5\x3d\x88\xf3"
3296 "\x25\xef\x05\x7c\x3a\xef\xeb\xd8"
3297 "\x7a\x32\x0d\xd1\x1e\x58\x59\x99"
3298 "\x90\x25\xb5\x26\xb0\xe3\x2b\x6c"
3299 "\x4c\xa9\x8b\x84\x4f\x5e\x01\x50"
3300 "\x41\x30\x58\xc5\x62\x74\x52\x1d"
3301 "\x45\x24\x6a\x42\x64\x4f\x97\x1c"
3302 "\xa8\x66\xb5\x6d\x79\xd4\x0d\x48"
3303 "\xc5\x5f\xf3\x90\x32\xdd\xdd\xe1"
3304 "\xe4\xa9\x9f\xfc\xc3\x52\x5a\x46"
3305 "\xe4\x81\x84\x95\x36\x59\x7a\x6b"
3306 "\xaa\xb3\x60\xad\xce\x9f\x9f\x28"
3307 "\xe0\x01\x75\x22\xc4\x4e\xa9\x62"
3308 "\x5c\x62\x0d\x00\xcb\x13\xe8\x43"
3309 "\x72\xd4\x2d\x53\x46\xb5\xd1\x16"
3310 "\x22\x18\xdf\x34\x33\xf5\xd6\x1c"
3311 "\xb8\x79\x78\x97\x94\xff\x72\x13"
3312 "\x4c\x27\xfc\xcb\xbf\x01\x53\xa6"
3313 "\xb4\x50\x6e\xde\xdf\xb5\x43\xa4"
3314 "\x59\xdf\x52\xf9\x7c\xe0\x11\x6f"
3315 "\x2d\x14\x8e\x24\x61\x2c\xe1\x17"
3316 "\xcc\xce\x51\x0c\x19\x8a\x82\x30"
3317 "\x94\xd5\x3d\x6a\x53\x06\x5e\xbd"
3318 "\xb7\xeb\xfa\xfd\x27\x51\xde\x85"
3319 "\x1e\x86\x53\x11\x53\x94\x00\xee"
3320 "\x2b\x8c\x08\x2a\xbf\xdd\xae\x11"
3321 "\xcb\x1e\xa2\x07\x9a\x80\xcf\x62"
3322 "\x9b\x09\xdc\x95\x3c\x96\x8e\xb1"
3323 "\x09\xbd\xe4\xeb\xdb\xca\x70\x7a"
3324 "\x9e\xfa\x31\x18\x45\x3c\x21\x33"
3325 "\xb0\xb3\x2b\xea\xf3\x71\x2d\xe1"
3326 "\x03\xad\x1b\x48\xd4\x67\x27\xf0"
3327 "\x62\xe4\x3d\xfb\x9b\x08\x76\xe7"
3328 "\xdd\x2b\x01\x39\x04\x5a\x58\x7a"
3329 "\xf7\x11\x90\xec\xbd\x51\x5c\x32"
3330 "\x6b\xd7\x35\x39\x02\x6b\xf2\xa6"
3331 "\xd0\x0d\x07\xe1\x06\xc4\x5b\x7d"
3332 "\xe4\x6a\xd7\xee\x15\x1f\x83\xb4"
3333 "\xa3\xa7\x5e\xc3\x90\xb7\xef\xd3"
3334 "\xb7\x4f\xf8\x92\x4c\xb7\x3c\x29"
3335 "\xcd\x7e\x2b\x5d\x43\xea\x42\xe7"
3336 "\x74\x3f\x7d\x58\x88\x75\xde\x3e",
3337 .rlen = 512,
3338 }
3339};
3340
3341static struct cipher_testvec aes_lrw_dec_tv_template[] = {
3342 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
3343 /* same as enc vectors with input and result reversed */
3344 { /* LRW-32-AES 1 */
3345 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
3346 "\x4c\x26\x84\x14\xb5\x68\x01\x85"
3347 "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
3348 "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
3349 .klen = 32,
3350 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3351 "\x00\x00\x00\x00\x00\x00\x00\x01",
3352 .input = "\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
3353 "\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
3354 .ilen = 16,
3355 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3356 "\x38\x39\x41\x42\x43\x44\x45\x46",
3357 .rlen = 16,
3358 }, { /* LRW-32-AES 2 */
3359 .key = "\x59\x70\x47\x14\xf5\x57\x47\x8c"
3360 "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
3361 "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
3362 "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
3363 .klen = 32,
3364 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3365 "\x00\x00\x00\x00\x00\x00\x00\x02",
3366 .input = "\x00\xc8\x2b\xae\x95\xbb\xcd\xe5"
3367 "\x27\x4f\x07\x69\xb2\x60\xe1\x36",
3368 .ilen = 16,
3369 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3370 "\x38\x39\x41\x42\x43\x44\x45\x46",
3371 .rlen = 16,
3372 }, { /* LRW-32-AES 3 */
3373 .key = "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
3374 "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
3375 "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
3376 "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
3377 .klen = 32,
3378 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3379 "\x00\x00\x00\x02\x00\x00\x00\x00",
3380 .input = "\x76\x32\x21\x83\xed\x8f\xf1\x82"
3381 "\xf9\x59\x62\x03\x69\x0e\x5e\x01",
3382 .ilen = 16,
3383 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3384 "\x38\x39\x41\x42\x43\x44\x45\x46",
3385 .rlen = 16,
3386 }, { /* LRW-32-AES 4 */
3387 .key = "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
3388 "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
3389 "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
3390 "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
3391 "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
3392 .klen = 40,
3393 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3394 "\x00\x00\x00\x00\x00\x00\x00\x01",
3395 .input = "\x9c\x0f\x15\x2f\x55\xa2\xd8\xf0"
3396 "\xd6\x7b\x8f\x9e\x28\x22\xbc\x41",
3397 .ilen = 16,
3398 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3399 "\x38\x39\x41\x42\x43\x44\x45\x46",
3400 .rlen = 16,
3401 }, { /* LRW-32-AES 5 */
3402 .key = "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
3403 "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
3404 "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
3405 "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
3406 "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
3407 .klen = 40,
3408 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3409 "\x00\x00\x00\x02\x00\x00\x00\x00",
3410 .input = "\xd4\x27\x6a\x7f\x14\x91\x3d\x65"
3411 "\xc8\x60\x48\x02\x87\xe3\x34\x06",
3412 .ilen = 16,
3413 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3414 "\x38\x39\x41\x42\x43\x44\x45\x46",
3415 .rlen = 16,
3416 }, { /* LRW-32-AES 6 */
3417 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3418 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3419 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3420 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3421 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3422 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3423 .klen = 48,
3424 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3425 "\x00\x00\x00\x00\x00\x00\x00\x01",
3426 .input = "\xbd\x06\xb8\xe1\xdb\x98\x89\x9e"
3427 "\xc4\x98\xe4\x91\xcf\x1c\x70\x2b",
3428 .ilen = 16,
3429 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3430 "\x38\x39\x41\x42\x43\x44\x45\x46",
3431 .rlen = 16,
3432 }, { /* LRW-32-AES 7 */
3433 .key = "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
3434 "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
3435 "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
3436 "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
3437 "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
3438 "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
3439 .klen = 48,
3440 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3441 "\x00\x00\x00\x02\x00\x00\x00\x00",
3442 .input = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
3443 "\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
3444 .ilen = 16,
3445 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3446 "\x38\x39\x41\x42\x43\x44\x45\x46",
3447 .rlen = 16,
3448 }, {
3449/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
3450 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3451 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3452 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3453 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3454 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3455 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3456 .klen = 48,
3457 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3458 "\x00\x00\x00\x00\x00\x00\x00\x01",
3459 .input = "\x1a\x1d\xa9\x30\xad\xf9\x2f\x9b"
3460 "\xb6\x1d\xae\xef\xf0\x2f\xf8\x5a"
3461 "\x39\x3c\xbf\x2a\xb2\x45\xb2\x23"
3462 "\x1b\x63\x3c\xcf\xaa\xbe\xcf\x4e"
3463 "\xfa\xe8\x29\xc2\x20\x68\x2b\x3c"
3464 "\x2e\x8b\xf7\x6e\x25\xbd\xe3\x3d"
3465 "\x66\x27\xd6\xaf\xd6\x64\x3e\xe3"
3466 "\xe8\x58\x46\x97\x39\x51\x07\xde"
3467 "\xcb\x37\xbc\xa9\xc0\x5f\x75\xc3"
3468 "\x0e\x84\x23\x1d\x16\xd4\x1c\x59"
3469 "\x9c\x1a\x02\x55\xab\x3a\x97\x1d"
3470 "\xdf\xdd\xc7\x06\x51\xd7\x70\xae"
3471 "\x23\xc6\x8c\xf5\x1e\xa0\xe5\x82"
3472 "\xb8\xb2\xbf\x04\xa0\x32\x8e\x68"
3473 "\xeb\xaf\x6e\x2d\x94\x22\x2f\xce"
3474 "\x4c\xb5\x59\xe2\xa2\x2f\xa0\x98"
3475 "\x1a\x97\xc6\xd4\xb5\x00\x59\xf2"
3476 "\x84\x14\x72\xb1\x9a\x6e\xa3\x7f"
3477 "\xea\x20\xe7\xcb\x65\x77\x3a\xdf"
3478 "\xc8\x97\x67\x15\xc2\x2a\x27\xcc"
3479 "\x18\x55\xa1\x24\x0b\x24\x24\xaf"
3480 "\x5b\xec\x68\xb8\xc8\xf5\xba\x63"
3481 "\xff\xed\x89\xce\xd5\x3d\x88\xf3"
3482 "\x25\xef\x05\x7c\x3a\xef\xeb\xd8"
3483 "\x7a\x32\x0d\xd1\x1e\x58\x59\x99"
3484 "\x90\x25\xb5\x26\xb0\xe3\x2b\x6c"
3485 "\x4c\xa9\x8b\x84\x4f\x5e\x01\x50"
3486 "\x41\x30\x58\xc5\x62\x74\x52\x1d"
3487 "\x45\x24\x6a\x42\x64\x4f\x97\x1c"
3488 "\xa8\x66\xb5\x6d\x79\xd4\x0d\x48"
3489 "\xc5\x5f\xf3\x90\x32\xdd\xdd\xe1"
3490 "\xe4\xa9\x9f\xfc\xc3\x52\x5a\x46"
3491 "\xe4\x81\x84\x95\x36\x59\x7a\x6b"
3492 "\xaa\xb3\x60\xad\xce\x9f\x9f\x28"
3493 "\xe0\x01\x75\x22\xc4\x4e\xa9\x62"
3494 "\x5c\x62\x0d\x00\xcb\x13\xe8\x43"
3495 "\x72\xd4\x2d\x53\x46\xb5\xd1\x16"
3496 "\x22\x18\xdf\x34\x33\xf5\xd6\x1c"
3497 "\xb8\x79\x78\x97\x94\xff\x72\x13"
3498 "\x4c\x27\xfc\xcb\xbf\x01\x53\xa6"
3499 "\xb4\x50\x6e\xde\xdf\xb5\x43\xa4"
3500 "\x59\xdf\x52\xf9\x7c\xe0\x11\x6f"
3501 "\x2d\x14\x8e\x24\x61\x2c\xe1\x17"
3502 "\xcc\xce\x51\x0c\x19\x8a\x82\x30"
3503 "\x94\xd5\x3d\x6a\x53\x06\x5e\xbd"
3504 "\xb7\xeb\xfa\xfd\x27\x51\xde\x85"
3505 "\x1e\x86\x53\x11\x53\x94\x00\xee"
3506 "\x2b\x8c\x08\x2a\xbf\xdd\xae\x11"
3507 "\xcb\x1e\xa2\x07\x9a\x80\xcf\x62"
3508 "\x9b\x09\xdc\x95\x3c\x96\x8e\xb1"
3509 "\x09\xbd\xe4\xeb\xdb\xca\x70\x7a"
3510 "\x9e\xfa\x31\x18\x45\x3c\x21\x33"
3511 "\xb0\xb3\x2b\xea\xf3\x71\x2d\xe1"
3512 "\x03\xad\x1b\x48\xd4\x67\x27\xf0"
3513 "\x62\xe4\x3d\xfb\x9b\x08\x76\xe7"
3514 "\xdd\x2b\x01\x39\x04\x5a\x58\x7a"
3515 "\xf7\x11\x90\xec\xbd\x51\x5c\x32"
3516 "\x6b\xd7\x35\x39\x02\x6b\xf2\xa6"
3517 "\xd0\x0d\x07\xe1\x06\xc4\x5b\x7d"
3518 "\xe4\x6a\xd7\xee\x15\x1f\x83\xb4"
3519 "\xa3\xa7\x5e\xc3\x90\xb7\xef\xd3"
3520 "\xb7\x4f\xf8\x92\x4c\xb7\x3c\x29"
3521 "\xcd\x7e\x2b\x5d\x43\xea\x42\xe7"
3522 "\x74\x3f\x7d\x58\x88\x75\xde\x3e",
3523 .ilen = 512,
3524 .result = "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
3525 "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
3526 "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
3527 "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
3528 "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
3529 "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
3530 "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
3531 "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
3532 "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
3533 "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
3534 "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
3535 "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
3536 "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
3537 "\x4c\x96\x12\xed\x7c\x92\x03\x01"
3538 "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
3539 "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
3540 "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
3541 "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
3542 "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
3543 "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
3544 "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
3545 "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
3546 "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
3547 "\x76\x12\x73\x44\x1a\x56\xd7\x72"
3548 "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
3549 "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
3550 "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
3551 "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
3552 "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
3553 "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
3554 "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
3555 "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
3556 "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
3557 "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
3558 "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
3559 "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
3560 "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
3561 "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
3562 "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
3563 "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
3564 "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
3565 "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
3566 "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
3567 "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
3568 "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
3569 "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
3570 "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
3571 "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
3572 "\x62\x73\x65\xfd\x46\x63\x25\x3d"
3573 "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
3574 "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
3575 "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
3576 "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
3577 "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
3578 "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
3579 "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
3580 "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
3581 "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
3582 "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
3583 "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
3584 "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
3585 "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
3586 "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
3587 "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
3588 .rlen = 512,
3589 }
3590};
3591
3592static struct cipher_testvec aes_xts_enc_tv_template[] = {
3593 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
3594 { /* XTS-AES 1 */
3595 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
3596 "\x00\x00\x00\x00\x00\x00\x00\x00"
3597 "\x00\x00\x00\x00\x00\x00\x00\x00"
3598 "\x00\x00\x00\x00\x00\x00\x00\x00",
3599 .klen = 32,
3600 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3601 "\x00\x00\x00\x00\x00\x00\x00\x00",
3602 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
3603 "\x00\x00\x00\x00\x00\x00\x00\x00"
3604 "\x00\x00\x00\x00\x00\x00\x00\x00"
3605 "\x00\x00\x00\x00\x00\x00\x00\x00",
3606 .ilen = 32,
3607 .result = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
3608 "\x9b\x9f\xe9\xa3\xea\xdd\xa6\x92"
3609 "\xcd\x43\xd2\xf5\x95\x98\xed\x85"
3610 "\x8c\x02\xc2\x65\x2f\xbf\x92\x2e",
3611 .rlen = 32,
3612 }, { /* XTS-AES 2 */
3613 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
3614 "\x11\x11\x11\x11\x11\x11\x11\x11"
3615 "\x22\x22\x22\x22\x22\x22\x22\x22"
3616 "\x22\x22\x22\x22\x22\x22\x22\x22",
3617 .klen = 32,
3618 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3619 "\x00\x00\x00\x00\x00\x00\x00\x00",
3620 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
3621 "\x44\x44\x44\x44\x44\x44\x44\x44"
3622 "\x44\x44\x44\x44\x44\x44\x44\x44"
3623 "\x44\x44\x44\x44\x44\x44\x44\x44",
3624 .ilen = 32,
3625 .result = "\xc4\x54\x18\x5e\x6a\x16\x93\x6e"
3626 "\x39\x33\x40\x38\xac\xef\x83\x8b"
3627 "\xfb\x18\x6f\xff\x74\x80\xad\xc4"
3628 "\x28\x93\x82\xec\xd6\xd3\x94\xf0",
3629 .rlen = 32,
3630 }, { /* XTS-AES 3 */
3631 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
3632 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
3633 "\x22\x22\x22\x22\x22\x22\x22\x22"
3634 "\x22\x22\x22\x22\x22\x22\x22\x22",
3635 .klen = 32,
3636 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3637 "\x00\x00\x00\x00\x00\x00\x00\x00",
3638 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
3639 "\x44\x44\x44\x44\x44\x44\x44\x44"
3640 "\x44\x44\x44\x44\x44\x44\x44\x44"
3641 "\x44\x44\x44\x44\x44\x44\x44\x44",
3642 .ilen = 32,
3643 .result = "\xaf\x85\x33\x6b\x59\x7a\xfc\x1a"
3644 "\x90\x0b\x2e\xb2\x1e\xc9\x49\xd2"
3645 "\x92\xdf\x4c\x04\x7e\x0b\x21\x53"
3646 "\x21\x86\xa5\x97\x1a\x22\x7a\x89",
3647 .rlen = 32,
3648 }, { /* XTS-AES 4 */
3649 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
3650 "\x23\x53\x60\x28\x74\x71\x35\x26"
3651 "\x31\x41\x59\x26\x53\x58\x97\x93"
3652 "\x23\x84\x62\x64\x33\x83\x27\x95",
3653 .klen = 32,
3654 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3655 "\x00\x00\x00\x00\x00\x00\x00\x00",
3656 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
3657 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3658 "\x10\x11\x12\x13\x14\x15\x16\x17"
3659 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3660 "\x20\x21\x22\x23\x24\x25\x26\x27"
3661 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3662 "\x30\x31\x32\x33\x34\x35\x36\x37"
3663 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3664 "\x40\x41\x42\x43\x44\x45\x46\x47"
3665 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3666 "\x50\x51\x52\x53\x54\x55\x56\x57"
3667 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3668 "\x60\x61\x62\x63\x64\x65\x66\x67"
3669 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3670 "\x70\x71\x72\x73\x74\x75\x76\x77"
3671 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3672 "\x80\x81\x82\x83\x84\x85\x86\x87"
3673 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3674 "\x90\x91\x92\x93\x94\x95\x96\x97"
3675 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3676 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3677 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3678 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3679 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3680 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3681 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3682 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3683 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3684 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3685 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3686 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3687 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
3688 "\x00\x01\x02\x03\x04\x05\x06\x07"
3689 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3690 "\x10\x11\x12\x13\x14\x15\x16\x17"
3691 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3692 "\x20\x21\x22\x23\x24\x25\x26\x27"
3693 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3694 "\x30\x31\x32\x33\x34\x35\x36\x37"
3695 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3696 "\x40\x41\x42\x43\x44\x45\x46\x47"
3697 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3698 "\x50\x51\x52\x53\x54\x55\x56\x57"
3699 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3700 "\x60\x61\x62\x63\x64\x65\x66\x67"
3701 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3702 "\x70\x71\x72\x73\x74\x75\x76\x77"
3703 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3704 "\x80\x81\x82\x83\x84\x85\x86\x87"
3705 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3706 "\x90\x91\x92\x93\x94\x95\x96\x97"
3707 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3708 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3709 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3710 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3711 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3712 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3713 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3714 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3715 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3716 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3717 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3718 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3719 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
3720 .ilen = 512,
3721 .result = "\x27\xa7\x47\x9b\xef\xa1\xd4\x76"
3722 "\x48\x9f\x30\x8c\xd4\xcf\xa6\xe2"
3723 "\xa9\x6e\x4b\xbe\x32\x08\xff\x25"
3724 "\x28\x7d\xd3\x81\x96\x16\xe8\x9c"
3725 "\xc7\x8c\xf7\xf5\xe5\x43\x44\x5f"
3726 "\x83\x33\xd8\xfa\x7f\x56\x00\x00"
3727 "\x05\x27\x9f\xa5\xd8\xb5\xe4\xad"
3728 "\x40\xe7\x36\xdd\xb4\xd3\x54\x12"
3729 "\x32\x80\x63\xfd\x2a\xab\x53\xe5"
3730 "\xea\x1e\x0a\x9f\x33\x25\x00\xa5"
3731 "\xdf\x94\x87\xd0\x7a\x5c\x92\xcc"
3732 "\x51\x2c\x88\x66\xc7\xe8\x60\xce"
3733 "\x93\xfd\xf1\x66\xa2\x49\x12\xb4"
3734 "\x22\x97\x61\x46\xae\x20\xce\x84"
3735 "\x6b\xb7\xdc\x9b\xa9\x4a\x76\x7a"
3736 "\xae\xf2\x0c\x0d\x61\xad\x02\x65"
3737 "\x5e\xa9\x2d\xc4\xc4\xe4\x1a\x89"
3738 "\x52\xc6\x51\xd3\x31\x74\xbe\x51"
3739 "\xa1\x0c\x42\x11\x10\xe6\xd8\x15"
3740 "\x88\xed\xe8\x21\x03\xa2\x52\xd8"
3741 "\xa7\x50\xe8\x76\x8d\xef\xff\xed"
3742 "\x91\x22\x81\x0a\xae\xb9\x9f\x91"
3743 "\x72\xaf\x82\xb6\x04\xdc\x4b\x8e"
3744 "\x51\xbc\xb0\x82\x35\xa6\xf4\x34"
3745 "\x13\x32\xe4\xca\x60\x48\x2a\x4b"
3746 "\xa1\xa0\x3b\x3e\x65\x00\x8f\xc5"
3747 "\xda\x76\xb7\x0b\xf1\x69\x0d\xb4"
3748 "\xea\xe2\x9c\x5f\x1b\xad\xd0\x3c"
3749 "\x5c\xcf\x2a\x55\xd7\x05\xdd\xcd"
3750 "\x86\xd4\x49\x51\x1c\xeb\x7e\xc3"
3751 "\x0b\xf1\x2b\x1f\xa3\x5b\x91\x3f"
3752 "\x9f\x74\x7a\x8a\xfd\x1b\x13\x0e"
3753 "\x94\xbf\xf9\x4e\xff\xd0\x1a\x91"
3754 "\x73\x5c\xa1\x72\x6a\xcd\x0b\x19"
3755 "\x7c\x4e\x5b\x03\x39\x36\x97\xe1"
3756 "\x26\x82\x6f\xb6\xbb\xde\x8e\xcc"
3757 "\x1e\x08\x29\x85\x16\xe2\xc9\xed"
3758 "\x03\xff\x3c\x1b\x78\x60\xf6\xde"
3759 "\x76\xd4\xce\xcd\x94\xc8\x11\x98"
3760 "\x55\xef\x52\x97\xca\x67\xe9\xf3"
3761 "\xe7\xff\x72\xb1\xe9\x97\x85\xca"
3762 "\x0a\x7e\x77\x20\xc5\xb3\x6d\xc6"
3763 "\xd7\x2c\xac\x95\x74\xc8\xcb\xbc"
3764 "\x2f\x80\x1e\x23\xe5\x6f\xd3\x44"
3765 "\xb0\x7f\x22\x15\x4b\xeb\xa0\xf0"
3766 "\x8c\xe8\x89\x1e\x64\x3e\xd9\x95"
3767 "\xc9\x4d\x9a\x69\xc9\xf1\xb5\xf4"
3768 "\x99\x02\x7a\x78\x57\x2a\xee\xbd"
3769 "\x74\xd2\x0c\xc3\x98\x81\xc2\x13"
3770 "\xee\x77\x0b\x10\x10\xe4\xbe\xa7"
3771 "\x18\x84\x69\x77\xae\x11\x9f\x7a"
3772 "\x02\x3a\xb5\x8c\xca\x0a\xd7\x52"
3773 "\xaf\xe6\x56\xbb\x3c\x17\x25\x6a"
3774 "\x9f\x6e\x9b\xf1\x9f\xdd\x5a\x38"
3775 "\xfc\x82\xbb\xe8\x72\xc5\x53\x9e"
3776 "\xdb\x60\x9e\xf4\xf7\x9c\x20\x3e"
3777 "\xbb\x14\x0f\x2e\x58\x3c\xb2\xad"
3778 "\x15\xb4\xaa\x5b\x65\x50\x16\xa8"
3779 "\x44\x92\x77\xdb\xd4\x77\xef\x2c"
3780 "\x8d\x6c\x01\x7d\xb7\x38\xb1\x8d"
3781 "\xeb\x4a\x42\x7d\x19\x23\xce\x3f"
3782 "\xf2\x62\x73\x57\x79\xa4\x18\xf2"
3783 "\x0a\x28\x2d\xf9\x20\x14\x7b\xea"
3784 "\xbe\x42\x1e\xe5\x31\x9d\x05\x68",
3785 .rlen = 512,
3786 }
3787};
3788
3789static struct cipher_testvec aes_xts_dec_tv_template[] = {
3790 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
3791 { /* XTS-AES 1 */
3792 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
3793 "\x00\x00\x00\x00\x00\x00\x00\x00"
3794 "\x00\x00\x00\x00\x00\x00\x00\x00"
3795 "\x00\x00\x00\x00\x00\x00\x00\x00",
3796 .klen = 32,
3797 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3798 "\x00\x00\x00\x00\x00\x00\x00\x00",
3799 .input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
3800 "\x9b\x9f\xe9\xa3\xea\xdd\xa6\x92"
3801 "\xcd\x43\xd2\xf5\x95\x98\xed\x85"
3802 "\x8c\x02\xc2\x65\x2f\xbf\x92\x2e",
3803 .ilen = 32,
3804 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
3805 "\x00\x00\x00\x00\x00\x00\x00\x00"
3806 "\x00\x00\x00\x00\x00\x00\x00\x00"
3807 "\x00\x00\x00\x00\x00\x00\x00\x00",
3808 .rlen = 32,
3809 }, { /* XTS-AES 2 */
3810 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
3811 "\x11\x11\x11\x11\x11\x11\x11\x11"
3812 "\x22\x22\x22\x22\x22\x22\x22\x22"
3813 "\x22\x22\x22\x22\x22\x22\x22\x22",
3814 .klen = 32,
3815 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3816 "\x00\x00\x00\x00\x00\x00\x00\x00",
3817 .input = "\xc4\x54\x18\x5e\x6a\x16\x93\x6e"
3818 "\x39\x33\x40\x38\xac\xef\x83\x8b"
3819 "\xfb\x18\x6f\xff\x74\x80\xad\xc4"
3820 "\x28\x93\x82\xec\xd6\xd3\x94\xf0",
3821 .ilen = 32,
3822 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
3823 "\x44\x44\x44\x44\x44\x44\x44\x44"
3824 "\x44\x44\x44\x44\x44\x44\x44\x44"
3825 "\x44\x44\x44\x44\x44\x44\x44\x44",
3826 .rlen = 32,
3827 }, { /* XTS-AES 3 */
3828 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
3829 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
3830 "\x22\x22\x22\x22\x22\x22\x22\x22"
3831 "\x22\x22\x22\x22\x22\x22\x22\x22",
3832 .klen = 32,
3833 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3834 "\x00\x00\x00\x00\x00\x00\x00\x00",
3835 .input = "\xaf\x85\x33\x6b\x59\x7a\xfc\x1a"
3836 "\x90\x0b\x2e\xb2\x1e\xc9\x49\xd2"
3837 "\x92\xdf\x4c\x04\x7e\x0b\x21\x53"
3838 "\x21\x86\xa5\x97\x1a\x22\x7a\x89",
3839 .ilen = 32,
3840 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
3841 "\x44\x44\x44\x44\x44\x44\x44\x44"
3842 "\x44\x44\x44\x44\x44\x44\x44\x44"
3843 "\x44\x44\x44\x44\x44\x44\x44\x44",
3844 .rlen = 32,
3845 }, { /* XTS-AES 4 */
3846 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
3847 "\x23\x53\x60\x28\x74\x71\x35\x26"
3848 "\x31\x41\x59\x26\x53\x58\x97\x93"
3849 "\x23\x84\x62\x64\x33\x83\x27\x95",
3850 .klen = 32,
3851 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3852 "\x00\x00\x00\x00\x00\x00\x00\x00",
3853 .input = "\x27\xa7\x47\x9b\xef\xa1\xd4\x76"
3854 "\x48\x9f\x30\x8c\xd4\xcf\xa6\xe2"
3855 "\xa9\x6e\x4b\xbe\x32\x08\xff\x25"
3856 "\x28\x7d\xd3\x81\x96\x16\xe8\x9c"
3857 "\xc7\x8c\xf7\xf5\xe5\x43\x44\x5f"
3858 "\x83\x33\xd8\xfa\x7f\x56\x00\x00"
3859 "\x05\x27\x9f\xa5\xd8\xb5\xe4\xad"
3860 "\x40\xe7\x36\xdd\xb4\xd3\x54\x12"
3861 "\x32\x80\x63\xfd\x2a\xab\x53\xe5"
3862 "\xea\x1e\x0a\x9f\x33\x25\x00\xa5"
3863 "\xdf\x94\x87\xd0\x7a\x5c\x92\xcc"
3864 "\x51\x2c\x88\x66\xc7\xe8\x60\xce"
3865 "\x93\xfd\xf1\x66\xa2\x49\x12\xb4"
3866 "\x22\x97\x61\x46\xae\x20\xce\x84"
3867 "\x6b\xb7\xdc\x9b\xa9\x4a\x76\x7a"
3868 "\xae\xf2\x0c\x0d\x61\xad\x02\x65"
3869 "\x5e\xa9\x2d\xc4\xc4\xe4\x1a\x89"
3870 "\x52\xc6\x51\xd3\x31\x74\xbe\x51"
3871 "\xa1\x0c\x42\x11\x10\xe6\xd8\x15"
3872 "\x88\xed\xe8\x21\x03\xa2\x52\xd8"
3873 "\xa7\x50\xe8\x76\x8d\xef\xff\xed"
3874 "\x91\x22\x81\x0a\xae\xb9\x9f\x91"
3875 "\x72\xaf\x82\xb6\x04\xdc\x4b\x8e"
3876 "\x51\xbc\xb0\x82\x35\xa6\xf4\x34"
3877 "\x13\x32\xe4\xca\x60\x48\x2a\x4b"
3878 "\xa1\xa0\x3b\x3e\x65\x00\x8f\xc5"
3879 "\xda\x76\xb7\x0b\xf1\x69\x0d\xb4"
3880 "\xea\xe2\x9c\x5f\x1b\xad\xd0\x3c"
3881 "\x5c\xcf\x2a\x55\xd7\x05\xdd\xcd"
3882 "\x86\xd4\x49\x51\x1c\xeb\x7e\xc3"
3883 "\x0b\xf1\x2b\x1f\xa3\x5b\x91\x3f"
3884 "\x9f\x74\x7a\x8a\xfd\x1b\x13\x0e"
3885 "\x94\xbf\xf9\x4e\xff\xd0\x1a\x91"
3886 "\x73\x5c\xa1\x72\x6a\xcd\x0b\x19"
3887 "\x7c\x4e\x5b\x03\x39\x36\x97\xe1"
3888 "\x26\x82\x6f\xb6\xbb\xde\x8e\xcc"
3889 "\x1e\x08\x29\x85\x16\xe2\xc9\xed"
3890 "\x03\xff\x3c\x1b\x78\x60\xf6\xde"
3891 "\x76\xd4\xce\xcd\x94\xc8\x11\x98"
3892 "\x55\xef\x52\x97\xca\x67\xe9\xf3"
3893 "\xe7\xff\x72\xb1\xe9\x97\x85\xca"
3894 "\x0a\x7e\x77\x20\xc5\xb3\x6d\xc6"
3895 "\xd7\x2c\xac\x95\x74\xc8\xcb\xbc"
3896 "\x2f\x80\x1e\x23\xe5\x6f\xd3\x44"
3897 "\xb0\x7f\x22\x15\x4b\xeb\xa0\xf0"
3898 "\x8c\xe8\x89\x1e\x64\x3e\xd9\x95"
3899 "\xc9\x4d\x9a\x69\xc9\xf1\xb5\xf4"
3900 "\x99\x02\x7a\x78\x57\x2a\xee\xbd"
3901 "\x74\xd2\x0c\xc3\x98\x81\xc2\x13"
3902 "\xee\x77\x0b\x10\x10\xe4\xbe\xa7"
3903 "\x18\x84\x69\x77\xae\x11\x9f\x7a"
3904 "\x02\x3a\xb5\x8c\xca\x0a\xd7\x52"
3905 "\xaf\xe6\x56\xbb\x3c\x17\x25\x6a"
3906 "\x9f\x6e\x9b\xf1\x9f\xdd\x5a\x38"
3907 "\xfc\x82\xbb\xe8\x72\xc5\x53\x9e"
3908 "\xdb\x60\x9e\xf4\xf7\x9c\x20\x3e"
3909 "\xbb\x14\x0f\x2e\x58\x3c\xb2\xad"
3910 "\x15\xb4\xaa\x5b\x65\x50\x16\xa8"
3911 "\x44\x92\x77\xdb\xd4\x77\xef\x2c"
3912 "\x8d\x6c\x01\x7d\xb7\x38\xb1\x8d"
3913 "\xeb\x4a\x42\x7d\x19\x23\xce\x3f"
3914 "\xf2\x62\x73\x57\x79\xa4\x18\xf2"
3915 "\x0a\x28\x2d\xf9\x20\x14\x7b\xea"
3916 "\xbe\x42\x1e\xe5\x31\x9d\x05\x68",
3917 .ilen = 512,
3918 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
3919 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3920 "\x10\x11\x12\x13\x14\x15\x16\x17"
3921 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3922 "\x20\x21\x22\x23\x24\x25\x26\x27"
3923 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3924 "\x30\x31\x32\x33\x34\x35\x36\x37"
3925 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3926 "\x40\x41\x42\x43\x44\x45\x46\x47"
3927 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3928 "\x50\x51\x52\x53\x54\x55\x56\x57"
3929 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3930 "\x60\x61\x62\x63\x64\x65\x66\x67"
3931 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3932 "\x70\x71\x72\x73\x74\x75\x76\x77"
3933 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3934 "\x80\x81\x82\x83\x84\x85\x86\x87"
3935 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3936 "\x90\x91\x92\x93\x94\x95\x96\x97"
3937 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3938 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3939 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3940 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3941 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3942 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3943 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3944 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3945 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3946 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3947 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3948 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3949 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
3950 "\x00\x01\x02\x03\x04\x05\x06\x07"
3951 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3952 "\x10\x11\x12\x13\x14\x15\x16\x17"
3953 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3954 "\x20\x21\x22\x23\x24\x25\x26\x27"
3955 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3956 "\x30\x31\x32\x33\x34\x35\x36\x37"
3957 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3958 "\x40\x41\x42\x43\x44\x45\x46\x47"
3959 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3960 "\x50\x51\x52\x53\x54\x55\x56\x57"
3961 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3962 "\x60\x61\x62\x63\x64\x65\x66\x67"
3963 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3964 "\x70\x71\x72\x73\x74\x75\x76\x77"
3965 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3966 "\x80\x81\x82\x83\x84\x85\x86\x87"
3967 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3968 "\x90\x91\x92\x93\x94\x95\x96\x97"
3969 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3970 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3971 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3972 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3973 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3974 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3975 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3976 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3977 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3978 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3979 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3980 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3981 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
3982 .rlen = 512,
3983 }
3984};
3985
3986
3987static struct cipher_testvec aes_ctr_enc_tv_template[] = {
3988 { /* From RFC 3686 */
3989 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
3990 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
3991 "\x00\x00\x00\x30",
3992 .klen = 20,
3993 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
3994 .input = "Single block msg",
3995 .ilen = 16,
3996 .result = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
3997 "\x2d\x61\x75\xa3\x26\x13\x11\xb8",
3998 .rlen = 16,
3999 }, {
4000 .key = "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
4001 "\x43\xd6\xce\x1f\x32\x53\x91\x63"
4002 "\x00\x6c\xb6\xdb",
4003 .klen = 20,
4004 .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
4005 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
4006 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4007 "\x10\x11\x12\x13\x14\x15\x16\x17"
4008 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
4009 .ilen = 32,
4010 .result = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
4011 "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
4012 "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
4013 "\xfc\xe6\x30\xdf\x91\x41\xbe\x28",
4014 .rlen = 32,
4015 }, {
4016 .key = "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
4017 "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
4018 "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
4019 "\x00\x00\x00\x48",
4020 .klen = 28,
4021 .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
4022 .input = "Single block msg",
4023 .ilen = 16,
4024 .result = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
4025 "\x4e\x79\x35\xa0\x03\xcb\xe9\x28",
4026 .rlen = 16,
4027 }, {
4028 .key = "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
4029 "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
4030 "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
4031 "\x00\x96\xb0\x3b",
4032 .klen = 28,
4033 .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
4034 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
4035 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4036 "\x10\x11\x12\x13\x14\x15\x16\x17"
4037 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
4038 .ilen = 32,
4039 .result = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
4040 "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
4041 "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
4042 "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00",
4043 .rlen = 32,
4044 }, {
4045 .key = "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
4046 "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
4047 "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
4048 "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
4049 "\x00\x00\x00\x60",
4050 .klen = 36,
4051 .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
4052 .input = "Single block msg",
4053 .ilen = 16,
4054 .result = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
4055 "\x56\x08\x63\xdc\x71\xe3\xe0\xc0",
4056 .rlen = 16,
4057 }, {
4058 .key = "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
4059 "\x07\x96\x36\x58\x79\xef\xf8\x86"
4060 "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
4061 "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
4062 "\x00\xfa\xac\x24",
4063 .klen = 36,
4064 .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
4065 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
4066 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4067 "\x10\x11\x12\x13\x14\x15\x16\x17"
4068 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
4069 .ilen = 32,
4070 .result = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
4071 "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
4072 "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
4073 "\x55\x30\x83\x1d\x93\x44\xaf\x1c",
4074 .rlen = 32,
4075 }, {
4076 // generated using Crypto++
4077 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4078 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4079 "\x10\x11\x12\x13\x14\x15\x16\x17"
4080 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
4081 "\x00\x00\x00\x00",
4082 .klen = 32 + 4,
4083 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
4084 .input =
4085 "\x00\x01\x02\x03\x04\x05\x06\x07"
4086 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4087 "\x10\x11\x12\x13\x14\x15\x16\x17"
4088 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
4089 "\x20\x21\x22\x23\x24\x25\x26\x27"
4090 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
4091 "\x30\x31\x32\x33\x34\x35\x36\x37"
4092 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
4093 "\x40\x41\x42\x43\x44\x45\x46\x47"
4094 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
4095 "\x50\x51\x52\x53\x54\x55\x56\x57"
4096 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
4097 "\x60\x61\x62\x63\x64\x65\x66\x67"
4098 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
4099 "\x70\x71\x72\x73\x74\x75\x76\x77"
4100 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
4101 "\x80\x81\x82\x83\x84\x85\x86\x87"
4102 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
4103 "\x90\x91\x92\x93\x94\x95\x96\x97"
4104 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
4105 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
4106 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
4107 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
4108 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
4109 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
4110 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
4111 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
4112 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
4113 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
4114 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
4115 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
4116 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
4117 "\x00\x03\x06\x09\x0c\x0f\x12\x15"
4118 "\x18\x1b\x1e\x21\x24\x27\x2a\x2d"
4119 "\x30\x33\x36\x39\x3c\x3f\x42\x45"
4120 "\x48\x4b\x4e\x51\x54\x57\x5a\x5d"
4121 "\x60\x63\x66\x69\x6c\x6f\x72\x75"
4122 "\x78\x7b\x7e\x81\x84\x87\x8a\x8d"
4123 "\x90\x93\x96\x99\x9c\x9f\xa2\xa5"
4124 "\xa8\xab\xae\xb1\xb4\xb7\xba\xbd"
4125 "\xc0\xc3\xc6\xc9\xcc\xcf\xd2\xd5"
4126 "\xd8\xdb\xde\xe1\xe4\xe7\xea\xed"
4127 "\xf0\xf3\xf6\xf9\xfc\xff\x02\x05"
4128 "\x08\x0b\x0e\x11\x14\x17\x1a\x1d"
4129 "\x20\x23\x26\x29\x2c\x2f\x32\x35"
4130 "\x38\x3b\x3e\x41\x44\x47\x4a\x4d"
4131 "\x50\x53\x56\x59\x5c\x5f\x62\x65"
4132 "\x68\x6b\x6e\x71\x74\x77\x7a\x7d"
4133 "\x80\x83\x86\x89\x8c\x8f\x92\x95"
4134 "\x98\x9b\x9e\xa1\xa4\xa7\xaa\xad"
4135 "\xb0\xb3\xb6\xb9\xbc\xbf\xc2\xc5"
4136 "\xc8\xcb\xce\xd1\xd4\xd7\xda\xdd"
4137 "\xe0\xe3\xe6\xe9\xec\xef\xf2\xf5"
4138 "\xf8\xfb\xfe\x01\x04\x07\x0a\x0d"
4139 "\x10\x13\x16\x19\x1c\x1f\x22\x25"
4140 "\x28\x2b\x2e\x31\x34\x37\x3a\x3d"
4141 "\x40\x43\x46\x49\x4c\x4f\x52\x55"
4142 "\x58\x5b\x5e\x61\x64\x67\x6a\x6d"
4143 "\x70\x73\x76\x79\x7c\x7f\x82\x85"
4144 "\x88\x8b\x8e\x91\x94\x97\x9a\x9d"
4145 "\xa0\xa3\xa6\xa9\xac\xaf\xb2\xb5"
4146 "\xb8\xbb\xbe\xc1\xc4\xc7\xca\xcd"
4147 "\xd0\xd3\xd6\xd9\xdc\xdf\xe2\xe5"
4148 "\xe8\xeb\xee\xf1\xf4\xf7\xfa\xfd"
4149 "\x00\x05\x0a\x0f\x14\x19\x1e\x23"
4150 "\x28\x2d\x32\x37\x3c\x41\x46\x4b"
4151 "\x50\x55\x5a\x5f\x64\x69\x6e\x73"
4152 "\x78\x7d\x82\x87\x8c\x91\x96\x9b"
4153 "\xa0\xa5\xaa\xaf\xb4\xb9\xbe\xc3"
4154 "\xc8\xcd\xd2\xd7\xdc\xe1\xe6\xeb"
4155 "\xf0\xf5\xfa\xff\x04\x09\x0e\x13"
4156 "\x18\x1d\x22\x27\x2c\x31\x36\x3b"
4157 "\x40\x45\x4a\x4f\x54\x59\x5e\x63"
4158 "\x68\x6d\x72\x77\x7c\x81\x86\x8b"
4159 "\x90\x95\x9a\x9f\xa4\xa9\xae\xb3"
4160 "\xb8\xbd\xc2\xc7\xcc\xd1\xd6\xdb"
4161 "\xe0\xe5\xea\xef\xf4\xf9\xfe\x03"
4162 "\x08\x0d\x12\x17\x1c\x21\x26\x2b"
4163 "\x30\x35\x3a\x3f\x44\x49\x4e\x53"
4164 "\x58\x5d\x62\x67\x6c\x71\x76\x7b"
4165 "\x80\x85\x8a\x8f\x94\x99\x9e\xa3"
4166 "\xa8\xad\xb2\xb7\xbc\xc1\xc6\xcb"
4167 "\xd0\xd5\xda\xdf\xe4\xe9\xee\xf3"
4168 "\xf8\xfd\x02\x07\x0c\x11\x16\x1b"
4169 "\x20\x25\x2a\x2f\x34\x39\x3e\x43"
4170 "\x48\x4d\x52\x57\x5c\x61\x66\x6b"
4171 "\x70\x75\x7a\x7f\x84\x89\x8e\x93"
4172 "\x98\x9d\xa2\xa7\xac\xb1\xb6\xbb"
4173 "\xc0\xc5\xca\xcf\xd4\xd9\xde\xe3"
4174 "\xe8\xed\xf2\xf7\xfc\x01\x06\x0b"
4175 "\x10\x15\x1a\x1f\x24\x29\x2e\x33"
4176 "\x38\x3d\x42\x47\x4c\x51\x56\x5b"
4177 "\x60\x65\x6a\x6f\x74\x79\x7e\x83"
4178 "\x88\x8d\x92\x97\x9c\xa1\xa6\xab"
4179 "\xb0\xb5\xba\xbf\xc4\xc9\xce\xd3"
4180 "\xd8\xdd\xe2\xe7\xec\xf1\xf6\xfb"
4181 "\x00\x07\x0e\x15\x1c\x23\x2a\x31"
4182 "\x38\x3f\x46\x4d\x54\x5b\x62\x69"
4183 "\x70\x77\x7e\x85\x8c\x93\x9a\xa1"
4184 "\xa8\xaf\xb6\xbd\xc4\xcb\xd2\xd9"
4185 "\xe0\xe7\xee\xf5\xfc\x03\x0a\x11"
4186 "\x18\x1f\x26\x2d\x34\x3b\x42\x49"
4187 "\x50\x57\x5e\x65\x6c\x73\x7a\x81"
4188 "\x88\x8f\x96\x9d\xa4\xab\xb2\xb9"
4189 "\xc0\xc7\xce\xd5\xdc\xe3\xea\xf1"
4190 "\xf8\xff\x06\x0d\x14\x1b\x22\x29"
4191 "\x30\x37\x3e\x45\x4c\x53\x5a\x61"
4192 "\x68\x6f\x76\x7d\x84\x8b\x92\x99"
4193 "\xa0\xa7\xae\xb5\xbc\xc3\xca\xd1"
4194 "\xd8\xdf\xe6\xed\xf4\xfb\x02\x09"
4195 "\x10\x17\x1e\x25\x2c\x33\x3a\x41"
4196 "\x48\x4f\x56\x5d\x64\x6b\x72\x79"
4197 "\x80\x87\x8e\x95\x9c\xa3\xaa\xb1"
4198 "\xb8\xbf\xc6\xcd\xd4\xdb\xe2\xe9"
4199 "\xf0\xf7\xfe\x05\x0c\x13\x1a\x21"
4200 "\x28\x2f\x36\x3d\x44\x4b\x52\x59"
4201 "\x60\x67\x6e\x75\x7c\x83\x8a\x91"
4202 "\x98\x9f\xa6\xad\xb4\xbb\xc2\xc9"
4203 "\xd0\xd7\xde\xe5\xec\xf3\xfa\x01"
4204 "\x08\x0f\x16\x1d\x24\x2b\x32\x39"
4205 "\x40\x47\x4e\x55\x5c\x63\x6a\x71"
4206 "\x78\x7f\x86\x8d\x94\x9b\xa2\xa9"
4207 "\xb0\xb7\xbe\xc5\xcc\xd3\xda\xe1"
4208 "\xe8\xef\xf6\xfd\x04\x0b\x12\x19"
4209 "\x20\x27\x2e\x35\x3c\x43\x4a\x51"
4210 "\x58\x5f\x66\x6d\x74\x7b\x82\x89"
4211 "\x90\x97\x9e\xa5\xac\xb3\xba\xc1"
4212 "\xc8\xcf\xd6\xdd\xe4\xeb\xf2\xf9"
4213 "\x00\x09\x12\x1b\x24\x2d\x36\x3f"
4214 "\x48\x51\x5a\x63\x6c\x75\x7e\x87"
4215 "\x90\x99\xa2\xab\xb4\xbd\xc6\xcf"
4216 "\xd8\xe1\xea\xf3\xfc\x05\x0e\x17"
4217 "\x20\x29\x32\x3b\x44\x4d\x56\x5f"
4218 "\x68\x71\x7a\x83\x8c\x95\x9e\xa7"
4219 "\xb0\xb9\xc2\xcb\xd4\xdd\xe6\xef"
4220 "\xf8\x01\x0a\x13\x1c\x25\x2e\x37"
4221 "\x40\x49\x52\x5b\x64\x6d\x76\x7f"
4222 "\x88\x91\x9a\xa3\xac\xb5\xbe\xc7"
4223 "\xd0\xd9\xe2\xeb\xf4\xfd\x06\x0f"
4224 "\x18\x21\x2a\x33\x3c\x45\x4e\x57"
4225 "\x60\x69\x72\x7b\x84\x8d\x96\x9f"
4226 "\xa8\xb1\xba\xc3\xcc\xd5\xde\xe7"
4227 "\xf0\xf9\x02\x0b\x14\x1d\x26\x2f"
4228 "\x38\x41\x4a\x53\x5c\x65\x6e\x77"
4229 "\x80\x89\x92\x9b\xa4\xad\xb6\xbf"
4230 "\xc8\xd1\xda\xe3\xec\xf5\xfe\x07"
4231 "\x10\x19\x22\x2b\x34\x3d\x46\x4f"
4232 "\x58\x61\x6a\x73\x7c\x85\x8e\x97"
4233 "\xa0\xa9\xb2\xbb\xc4\xcd\xd6\xdf"
4234 "\xe8\xf1\xfa\x03\x0c\x15\x1e\x27"
4235 "\x30\x39\x42\x4b\x54\x5d\x66\x6f"
4236 "\x78\x81\x8a\x93\x9c\xa5\xae\xb7"
4237 "\xc0\xc9\xd2\xdb\xe4\xed\xf6\xff"
4238 "\x08\x11\x1a\x23\x2c\x35\x3e\x47"
4239 "\x50\x59\x62\x6b\x74\x7d\x86\x8f"
4240 "\x98\xa1\xaa\xb3\xbc\xc5\xce\xd7"
4241 "\xe0\xe9\xf2\xfb\x04\x0d\x16\x1f"
4242 "\x28\x31\x3a\x43\x4c\x55\x5e\x67"
4243 "\x70\x79\x82\x8b\x94\x9d\xa6\xaf"
4244 "\xb8\xc1\xca\xd3\xdc\xe5\xee\xf7"
4245 "\x00\x0b\x16\x21\x2c\x37\x42\x4d"
4246 "\x58\x63\x6e\x79\x84\x8f\x9a\xa5"
4247 "\xb0\xbb\xc6\xd1\xdc\xe7\xf2\xfd"
4248 "\x08\x13\x1e\x29\x34\x3f\x4a\x55"
4249 "\x60\x6b\x76\x81\x8c\x97\xa2\xad"
4250 "\xb8\xc3\xce\xd9\xe4\xef\xfa\x05"
4251 "\x10\x1b\x26\x31\x3c\x47\x52\x5d"
4252 "\x68\x73\x7e\x89\x94\x9f\xaa\xb5"
4253 "\xc0\xcb\xd6\xe1\xec\xf7\x02\x0d"
4254 "\x18\x23\x2e\x39\x44\x4f\x5a\x65"
4255 "\x70\x7b\x86\x91\x9c\xa7\xb2\xbd"
4256 "\xc8\xd3\xde\xe9\xf4\xff\x0a\x15"
4257 "\x20\x2b\x36\x41\x4c\x57\x62\x6d"
4258 "\x78\x83\x8e\x99\xa4\xaf\xba\xc5"
4259 "\xd0\xdb\xe6\xf1\xfc\x07\x12\x1d"
4260 "\x28\x33\x3e\x49\x54\x5f\x6a\x75"
4261 "\x80\x8b\x96\xa1\xac\xb7\xc2\xcd"
4262 "\xd8\xe3\xee\xf9\x04\x0f\x1a\x25"
4263 "\x30\x3b\x46\x51\x5c\x67\x72\x7d"
4264 "\x88\x93\x9e\xa9\xb4\xbf\xca\xd5"
4265 "\xe0\xeb\xf6\x01\x0c\x17\x22\x2d"
4266 "\x38\x43\x4e\x59\x64\x6f\x7a\x85"
4267 "\x90\x9b\xa6\xb1\xbc\xc7\xd2\xdd"
4268 "\xe8\xf3\xfe\x09\x14\x1f\x2a\x35"
4269 "\x40\x4b\x56\x61\x6c\x77\x82\x8d"
4270 "\x98\xa3\xae\xb9\xc4\xcf\xda\xe5"
4271 "\xf0\xfb\x06\x11\x1c\x27\x32\x3d"
4272 "\x48\x53\x5e\x69\x74\x7f\x8a\x95"
4273 "\xa0\xab\xb6\xc1\xcc\xd7\xe2\xed"
4274 "\xf8\x03\x0e\x19\x24\x2f\x3a\x45"
4275 "\x50\x5b\x66\x71\x7c\x87\x92\x9d"
4276 "\xa8\xb3\xbe\xc9\xd4\xdf\xea\xf5"
4277 "\x00\x0d\x1a\x27\x34\x41\x4e\x5b"
4278 "\x68\x75\x82\x8f\x9c\xa9\xb6\xc3"
4279 "\xd0\xdd\xea\xf7\x04\x11\x1e\x2b"
4280 "\x38\x45\x52\x5f\x6c\x79\x86\x93"
4281 "\xa0\xad\xba\xc7\xd4\xe1\xee\xfb"
4282 "\x08\x15\x22\x2f\x3c\x49\x56\x63"
4283 "\x70\x7d\x8a\x97\xa4\xb1\xbe\xcb"
4284 "\xd8\xe5\xf2\xff\x0c\x19\x26\x33"
4285 "\x40\x4d\x5a\x67\x74\x81\x8e\x9b"
4286 "\xa8\xb5\xc2\xcf\xdc\xe9\xf6\x03"
4287 "\x10\x1d\x2a\x37\x44\x51\x5e\x6b"
4288 "\x78\x85\x92\x9f\xac\xb9\xc6\xd3"
4289 "\xe0\xed\xfa\x07\x14\x21\x2e\x3b"
4290 "\x48\x55\x62\x6f\x7c\x89\x96\xa3"
4291 "\xb0\xbd\xca\xd7\xe4\xf1\xfe\x0b"
4292 "\x18\x25\x32\x3f\x4c\x59\x66\x73"
4293 "\x80\x8d\x9a\xa7\xb4\xc1\xce\xdb"
4294 "\xe8\xf5\x02\x0f\x1c\x29\x36\x43"
4295 "\x50\x5d\x6a\x77\x84\x91\x9e\xab"
4296 "\xb8\xc5\xd2\xdf\xec\xf9\x06\x13"
4297 "\x20\x2d\x3a\x47\x54\x61\x6e\x7b"
4298 "\x88\x95\xa2\xaf\xbc\xc9\xd6\xe3"
4299 "\xf0\xfd\x0a\x17\x24\x31\x3e\x4b"
4300 "\x58\x65\x72\x7f\x8c\x99\xa6\xb3"
4301 "\xc0\xcd\xda\xe7\xf4\x01\x0e\x1b"
4302 "\x28\x35\x42\x4f\x5c\x69\x76\x83"
4303 "\x90\x9d\xaa\xb7\xc4\xd1\xde\xeb"
4304 "\xf8\x05\x12\x1f\x2c\x39\x46\x53"
4305 "\x60\x6d\x7a\x87\x94\xa1\xae\xbb"
4306 "\xc8\xd5\xe2\xef\xfc\x09\x16\x23"
4307 "\x30\x3d\x4a\x57\x64\x71\x7e\x8b"
4308 "\x98\xa5\xb2\xbf\xcc\xd9\xe6\xf3"
4309 "\x00\x0f\x1e\x2d\x3c\x4b\x5a\x69"
4310 "\x78\x87\x96\xa5\xb4\xc3\xd2\xe1"
4311 "\xf0\xff\x0e\x1d\x2c\x3b\x4a\x59"
4312 "\x68\x77\x86\x95\xa4\xb3\xc2\xd1"
4313 "\xe0\xef\xfe\x0d\x1c\x2b\x3a\x49"
4314 "\x58\x67\x76\x85\x94\xa3\xb2\xc1"
4315 "\xd0\xdf\xee\xfd\x0c\x1b\x2a\x39"
4316 "\x48\x57\x66\x75\x84\x93\xa2\xb1"
4317 "\xc0\xcf\xde\xed\xfc\x0b\x1a\x29"
4318 "\x38\x47\x56\x65\x74\x83\x92\xa1"
4319 "\xb0\xbf\xce\xdd\xec\xfb\x0a\x19"
4320 "\x28\x37\x46\x55\x64\x73\x82\x91"
4321 "\xa0\xaf\xbe\xcd\xdc\xeb\xfa\x09"
4322 "\x18\x27\x36\x45\x54\x63\x72\x81"
4323 "\x90\x9f\xae\xbd\xcc\xdb\xea\xf9"
4324 "\x08\x17\x26\x35\x44\x53\x62\x71"
4325 "\x80\x8f\x9e\xad\xbc\xcb\xda\xe9"
4326 "\xf8\x07\x16\x25\x34\x43\x52\x61"
4327 "\x70\x7f\x8e\x9d\xac\xbb\xca\xd9"
4328 "\xe8\xf7\x06\x15\x24\x33\x42\x51"
4329 "\x60\x6f\x7e\x8d\x9c\xab\xba\xc9"
4330 "\xd8\xe7\xf6\x05\x14\x23\x32\x41"
4331 "\x50\x5f\x6e\x7d\x8c\x9b\xaa\xb9"
4332 "\xc8\xd7\xe6\xf5\x04\x13\x22\x31"
4333 "\x40\x4f\x5e\x6d\x7c\x8b\x9a\xa9"
4334 "\xb8\xc7\xd6\xe5\xf4\x03\x12\x21"
4335 "\x30\x3f\x4e\x5d\x6c\x7b\x8a\x99"
4336 "\xa8\xb7\xc6\xd5\xe4\xf3\x02\x11"
4337 "\x20\x2f\x3e\x4d\x5c\x6b\x7a\x89"
4338 "\x98\xa7\xb6\xc5\xd4\xe3\xf2\x01"
4339 "\x10\x1f\x2e\x3d\x4c\x5b\x6a\x79"
4340 "\x88\x97\xa6\xb5\xc4\xd3\xe2\xf1"
4341 "\x00\x11\x22\x33\x44\x55\x66\x77"
4342 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff"
4343 "\x10\x21\x32\x43\x54\x65\x76\x87"
4344 "\x98\xa9\xba\xcb\xdc\xed\xfe\x0f"
4345 "\x20\x31\x42\x53\x64\x75\x86\x97"
4346 "\xa8\xb9\xca\xdb\xec\xfd\x0e\x1f"
4347 "\x30\x41\x52\x63\x74\x85\x96\xa7"
4348 "\xb8\xc9\xda\xeb\xfc\x0d\x1e\x2f"
4349 "\x40\x51\x62\x73\x84\x95\xa6\xb7"
4350 "\xc8\xd9\xea\xfb\x0c\x1d\x2e\x3f"
4351 "\x50\x61\x72\x83\x94\xa5\xb6\xc7"
4352 "\xd8\xe9\xfa\x0b\x1c\x2d\x3e\x4f"
4353 "\x60\x71\x82\x93\xa4\xb5\xc6\xd7"
4354 "\xe8\xf9\x0a\x1b\x2c\x3d\x4e\x5f"
4355 "\x70\x81\x92\xa3\xb4\xc5\xd6\xe7"
4356 "\xf8\x09\x1a\x2b\x3c\x4d\x5e\x6f"
4357 "\x80\x91\xa2\xb3\xc4\xd5\xe6\xf7"
4358 "\x08\x19\x2a\x3b\x4c\x5d\x6e\x7f"
4359 "\x90\xa1\xb2\xc3\xd4\xe5\xf6\x07"
4360 "\x18\x29\x3a\x4b\x5c\x6d\x7e\x8f"
4361 "\xa0\xb1\xc2\xd3\xe4\xf5\x06\x17"
4362 "\x28\x39\x4a\x5b\x6c\x7d\x8e\x9f"
4363 "\xb0\xc1\xd2\xe3\xf4\x05\x16\x27"
4364 "\x38\x49\x5a\x6b\x7c\x8d\x9e\xaf"
4365 "\xc0\xd1\xe2\xf3\x04\x15\x26\x37"
4366 "\x48\x59\x6a\x7b\x8c\x9d\xae\xbf"
4367 "\xd0\xe1\xf2\x03\x14\x25\x36\x47"
4368 "\x58\x69\x7a\x8b\x9c\xad\xbe\xcf"
4369 "\xe0\xf1\x02\x13\x24\x35\x46\x57"
4370 "\x68\x79\x8a\x9b\xac\xbd\xce\xdf"
4371 "\xf0\x01\x12\x23\x34\x45\x56\x67"
4372 "\x78\x89\x9a\xab\xbc\xcd\xde\xef"
4373 "\x00\x13\x26\x39\x4c\x5f\x72\x85"
4374 "\x98\xab\xbe\xd1\xe4\xf7\x0a\x1d"
4375 "\x30\x43\x56\x69\x7c\x8f\xa2\xb5"
4376 "\xc8\xdb\xee\x01\x14\x27\x3a\x4d"
4377 "\x60\x73\x86\x99\xac\xbf\xd2\xe5"
4378 "\xf8\x0b\x1e\x31\x44\x57\x6a\x7d"
4379 "\x90\xa3\xb6\xc9\xdc\xef\x02\x15"
4380 "\x28\x3b\x4e\x61\x74\x87\x9a\xad"
4381 "\xc0\xd3\xe6\xf9\x0c\x1f\x32\x45"
4382 "\x58\x6b\x7e\x91\xa4\xb7\xca\xdd"
4383 "\xf0\x03\x16\x29\x3c\x4f\x62\x75"
4384 "\x88\x9b\xae\xc1\xd4\xe7\xfa\x0d"
4385 "\x20\x33\x46\x59\x6c\x7f\x92\xa5"
4386 "\xb8\xcb\xde\xf1\x04\x17\x2a\x3d"
4387 "\x50\x63\x76\x89\x9c\xaf\xc2\xd5"
4388 "\xe8\xfb\x0e\x21\x34\x47\x5a\x6d"
4389 "\x80\x93\xa6\xb9\xcc\xdf\xf2\x05"
4390 "\x18\x2b\x3e\x51\x64\x77\x8a\x9d"
4391 "\xb0\xc3\xd6\xe9\xfc\x0f\x22\x35"
4392 "\x48\x5b\x6e\x81\x94\xa7\xba\xcd"
4393 "\xe0\xf3\x06\x19\x2c\x3f\x52\x65"
4394 "\x78\x8b\x9e\xb1\xc4\xd7\xea\xfd"
4395 "\x10\x23\x36\x49\x5c\x6f\x82\x95"
4396 "\xa8\xbb\xce\xe1\xf4\x07\x1a\x2d"
4397 "\x40\x53\x66\x79\x8c\x9f\xb2\xc5"
4398 "\xd8\xeb\xfe\x11\x24\x37\x4a\x5d"
4399 "\x70\x83\x96\xa9\xbc\xcf\xe2\xf5"
4400 "\x08\x1b\x2e\x41\x54\x67\x7a\x8d"
4401 "\xa0\xb3\xc6\xd9\xec\xff\x12\x25"
4402 "\x38\x4b\x5e\x71\x84\x97\xaa\xbd"
4403 "\xd0\xe3\xf6\x09\x1c\x2f\x42\x55"
4404 "\x68\x7b\x8e\xa1\xb4\xc7\xda\xed"
4405 "\x00\x15\x2a\x3f\x54\x69\x7e\x93"
4406 "\xa8\xbd\xd2\xe7\xfc\x11\x26\x3b"
4407 "\x50\x65\x7a\x8f\xa4\xb9\xce\xe3"
4408 "\xf8\x0d\x22\x37\x4c\x61\x76\x8b"
4409 "\xa0\xb5\xca\xdf\xf4\x09\x1e\x33"
4410 "\x48\x5d\x72\x87\x9c\xb1\xc6\xdb"
4411 "\xf0\x05\x1a\x2f\x44\x59\x6e\x83"
4412 "\x98\xad\xc2\xd7\xec\x01\x16\x2b"
4413 "\x40\x55\x6a\x7f\x94\xa9\xbe\xd3"
4414 "\xe8\xfd\x12\x27\x3c\x51\x66\x7b"
4415 "\x90\xa5\xba\xcf\xe4\xf9\x0e\x23"
4416 "\x38\x4d\x62\x77\x8c\xa1\xb6\xcb"
4417 "\xe0\xf5\x0a\x1f\x34\x49\x5e\x73"
4418 "\x88\x9d\xb2\xc7\xdc\xf1\x06\x1b"
4419 "\x30\x45\x5a\x6f\x84\x99\xae\xc3"
4420 "\xd8\xed\x02\x17\x2c\x41\x56\x6b"
4421 "\x80\x95\xaa\xbf\xd4\xe9\xfe\x13"
4422 "\x28\x3d\x52\x67\x7c\x91\xa6\xbb"
4423 "\xd0\xe5\xfa\x0f\x24\x39\x4e\x63"
4424 "\x78\x8d\xa2\xb7\xcc\xe1\xf6\x0b"
4425 "\x20\x35\x4a\x5f\x74\x89\x9e\xb3"
4426 "\xc8\xdd\xf2\x07\x1c\x31\x46\x5b"
4427 "\x70\x85\x9a\xaf\xc4\xd9\xee\x03"
4428 "\x18\x2d\x42\x57\x6c\x81\x96\xab"
4429 "\xc0\xd5\xea\xff\x14\x29\x3e\x53"
4430 "\x68\x7d\x92\xa7\xbc\xd1\xe6\xfb"
4431 "\x10\x25\x3a\x4f\x64\x79\x8e\xa3"
4432 "\xb8\xcd\xe2\xf7\x0c\x21\x36\x4b"
4433 "\x60\x75\x8a\x9f\xb4\xc9\xde\xf3"
4434 "\x08\x1d\x32\x47\x5c\x71\x86\x9b"
4435 "\xb0\xc5\xda\xef\x04\x19\x2e\x43"
4436 "\x58\x6d\x82\x97\xac\xc1\xd6\xeb"
4437 "\x00\x17\x2e\x45\x5c\x73\x8a\xa1"
4438 "\xb8\xcf\xe6\xfd\x14\x2b\x42\x59"
4439 "\x70\x87\x9e\xb5\xcc\xe3\xfa\x11"
4440 "\x28\x3f\x56\x6d\x84\x9b\xb2\xc9"
4441 "\xe0\xf7\x0e\x25\x3c\x53\x6a\x81"
4442 "\x98\xaf\xc6\xdd\xf4\x0b\x22\x39"
4443 "\x50\x67\x7e\x95\xac\xc3\xda\xf1"
4444 "\x08\x1f\x36\x4d\x64\x7b\x92\xa9"
4445 "\xc0\xd7\xee\x05\x1c\x33\x4a\x61"
4446 "\x78\x8f\xa6\xbd\xd4\xeb\x02\x19"
4447 "\x30\x47\x5e\x75\x8c\xa3\xba\xd1"
4448 "\xe8\xff\x16\x2d\x44\x5b\x72\x89"
4449 "\xa0\xb7\xce\xe5\xfc\x13\x2a\x41"
4450 "\x58\x6f\x86\x9d\xb4\xcb\xe2\xf9"
4451 "\x10\x27\x3e\x55\x6c\x83\x9a\xb1"
4452 "\xc8\xdf\xf6\x0d\x24\x3b\x52\x69"
4453 "\x80\x97\xae\xc5\xdc\xf3\x0a\x21"
4454 "\x38\x4f\x66\x7d\x94\xab\xc2\xd9"
4455 "\xf0\x07\x1e\x35\x4c\x63\x7a\x91"
4456 "\xa8\xbf\xd6\xed\x04\x1b\x32\x49"
4457 "\x60\x77\x8e\xa5\xbc\xd3\xea\x01"
4458 "\x18\x2f\x46\x5d\x74\x8b\xa2\xb9"
4459 "\xd0\xe7\xfe\x15\x2c\x43\x5a\x71"
4460 "\x88\x9f\xb6\xcd\xe4\xfb\x12\x29"
4461 "\x40\x57\x6e\x85\x9c\xb3\xca\xe1"
4462 "\xf8\x0f\x26\x3d\x54\x6b\x82\x99"
4463 "\xb0\xc7\xde\xf5\x0c\x23\x3a\x51"
4464 "\x68\x7f\x96\xad\xc4\xdb\xf2\x09"
4465 "\x20\x37\x4e\x65\x7c\x93\xaa\xc1"
4466 "\xd8\xef\x06\x1d\x34\x4b\x62\x79"
4467 "\x90\xa7\xbe\xd5\xec\x03\x1a\x31"
4468 "\x48\x5f\x76\x8d\xa4\xbb\xd2\xe9"
4469 "\x00\x19\x32\x4b\x64\x7d\x96\xaf"
4470 "\xc8\xe1\xfa\x13\x2c\x45\x5e\x77"
4471 "\x90\xa9\xc2\xdb\xf4\x0d\x26\x3f"
4472 "\x58\x71\x8a\xa3\xbc\xd5\xee\x07"
4473 "\x20\x39\x52\x6b\x84\x9d\xb6\xcf"
4474 "\xe8\x01\x1a\x33\x4c\x65\x7e\x97"
4475 "\xb0\xc9\xe2\xfb\x14\x2d\x46\x5f"
4476 "\x78\x91\xaa\xc3\xdc\xf5\x0e\x27"
4477 "\x40\x59\x72\x8b\xa4\xbd\xd6\xef"
4478 "\x08\x21\x3a\x53\x6c\x85\x9e\xb7"
4479 "\xd0\xe9\x02\x1b\x34\x4d\x66\x7f"
4480 "\x98\xb1\xca\xe3\xfc\x15\x2e\x47"
4481 "\x60\x79\x92\xab\xc4\xdd\xf6\x0f"
4482 "\x28\x41\x5a\x73\x8c\xa5\xbe\xd7"
4483 "\xf0\x09\x22\x3b\x54\x6d\x86\x9f"
4484 "\xb8\xd1\xea\x03\x1c\x35\x4e\x67"
4485 "\x80\x99\xb2\xcb\xe4\xfd\x16\x2f"
4486 "\x48\x61\x7a\x93\xac\xc5\xde\xf7"
4487 "\x10\x29\x42\x5b\x74\x8d\xa6\xbf"
4488 "\xd8\xf1\x0a\x23\x3c\x55\x6e\x87"
4489 "\xa0\xb9\xd2\xeb\x04\x1d\x36\x4f"
4490 "\x68\x81\x9a\xb3\xcc\xe5\xfe\x17"
4491 "\x30\x49\x62\x7b\x94\xad\xc6\xdf"
4492 "\xf8\x11\x2a\x43\x5c\x75\x8e\xa7"
4493 "\xc0\xd9\xf2\x0b\x24\x3d\x56\x6f"
4494 "\x88\xa1\xba\xd3\xec\x05\x1e\x37"
4495 "\x50\x69\x82\x9b\xb4\xcd\xe6\xff"
4496 "\x18\x31\x4a\x63\x7c\x95\xae\xc7"
4497 "\xe0\xf9\x12\x2b\x44\x5d\x76\x8f"
4498 "\xa8\xc1\xda\xf3\x0c\x25\x3e\x57"
4499 "\x70\x89\xa2\xbb\xd4\xed\x06\x1f"
4500 "\x38\x51\x6a\x83\x9c\xb5\xce\xe7"
4501 "\x00\x1b\x36\x51\x6c\x87\xa2\xbd"
4502 "\xd8\xf3\x0e\x29\x44\x5f\x7a\x95"
4503 "\xb0\xcb\xe6\x01\x1c\x37\x52\x6d"
4504 "\x88\xa3\xbe\xd9\xf4\x0f\x2a\x45"
4505 "\x60\x7b\x96\xb1\xcc\xe7\x02\x1d"
4506 "\x38\x53\x6e\x89\xa4\xbf\xda\xf5"
4507 "\x10\x2b\x46\x61\x7c\x97\xb2\xcd"
4508 "\xe8\x03\x1e\x39\x54\x6f\x8a\xa5"
4509 "\xc0\xdb\xf6\x11\x2c\x47\x62\x7d"
4510 "\x98\xb3\xce\xe9\x04\x1f\x3a\x55"
4511 "\x70\x8b\xa6\xc1\xdc\xf7\x12\x2d"
4512 "\x48\x63\x7e\x99\xb4\xcf\xea\x05"
4513 "\x20\x3b\x56\x71\x8c\xa7\xc2\xdd"
4514 "\xf8\x13\x2e\x49\x64\x7f\x9a\xb5"
4515 "\xd0\xeb\x06\x21\x3c\x57\x72\x8d"
4516 "\xa8\xc3\xde\xf9\x14\x2f\x4a\x65"
4517 "\x80\x9b\xb6\xd1\xec\x07\x22\x3d"
4518 "\x58\x73\x8e\xa9\xc4\xdf\xfa\x15"
4519 "\x30\x4b\x66\x81\x9c\xb7\xd2\xed"
4520 "\x08\x23\x3e\x59\x74\x8f\xaa\xc5"
4521 "\xe0\xfb\x16\x31\x4c\x67\x82\x9d"
4522 "\xb8\xd3\xee\x09\x24\x3f\x5a\x75"
4523 "\x90\xab\xc6\xe1\xfc\x17\x32\x4d"
4524 "\x68\x83\x9e\xb9\xd4\xef\x0a\x25"
4525 "\x40\x5b\x76\x91\xac\xc7\xe2\xfd"
4526 "\x18\x33\x4e\x69\x84\x9f\xba\xd5"
4527 "\xf0\x0b\x26\x41\x5c\x77\x92\xad"
4528 "\xc8\xe3\xfe\x19\x34\x4f\x6a\x85"
4529 "\xa0\xbb\xd6\xf1\x0c\x27\x42\x5d"
4530 "\x78\x93\xae\xc9\xe4\xff\x1a\x35"
4531 "\x50\x6b\x86\xa1\xbc\xd7\xf2\x0d"
4532 "\x28\x43\x5e\x79\x94\xaf\xca\xe5"
4533 "\x00\x1d\x3a\x57\x74\x91\xae\xcb"
4534 "\xe8\x05\x22\x3f\x5c\x79\x96\xb3"
4535 "\xd0\xed\x0a\x27\x44\x61\x7e\x9b"
4536 "\xb8\xd5\xf2\x0f\x2c\x49\x66\x83"
4537 "\xa0\xbd\xda\xf7\x14\x31\x4e\x6b"
4538 "\x88\xa5\xc2\xdf\xfc\x19\x36\x53"
4539 "\x70\x8d\xaa\xc7\xe4\x01\x1e\x3b"
4540 "\x58\x75\x92\xaf\xcc\xe9\x06\x23"
4541 "\x40\x5d\x7a\x97\xb4\xd1\xee\x0b"
4542 "\x28\x45\x62\x7f\x9c\xb9\xd6\xf3"
4543 "\x10\x2d\x4a\x67\x84\xa1\xbe\xdb"
4544 "\xf8\x15\x32\x4f\x6c\x89\xa6\xc3"
4545 "\xe0\xfd\x1a\x37\x54\x71\x8e\xab"
4546 "\xc8\xe5\x02\x1f\x3c\x59\x76\x93"
4547 "\xb0\xcd\xea\x07\x24\x41\x5e\x7b"
4548 "\x98\xb5\xd2\xef\x0c\x29\x46\x63"
4549 "\x80\x9d\xba\xd7\xf4\x11\x2e\x4b"
4550 "\x68\x85\xa2\xbf\xdc\xf9\x16\x33"
4551 "\x50\x6d\x8a\xa7\xc4\xe1\xfe\x1b"
4552 "\x38\x55\x72\x8f\xac\xc9\xe6\x03"
4553 "\x20\x3d\x5a\x77\x94\xb1\xce\xeb"
4554 "\x08\x25\x42\x5f\x7c\x99\xb6\xd3"
4555 "\xf0\x0d\x2a\x47\x64\x81\x9e\xbb"
4556 "\xd8\xf5\x12\x2f\x4c\x69\x86\xa3"
4557 "\xc0\xdd\xfa\x17\x34\x51\x6e\x8b"
4558 "\xa8\xc5\xe2\xff\x1c\x39\x56\x73"
4559 "\x90\xad\xca\xe7\x04\x21\x3e\x5b"
4560 "\x78\x95\xb2\xcf\xec\x09\x26\x43"
4561 "\x60\x7d\x9a\xb7\xd4\xf1\x0e\x2b"
4562 "\x48\x65\x82\x9f\xbc\xd9\xf6\x13"
4563 "\x30\x4d\x6a\x87\xa4\xc1\xde\xfb"
4564 "\x18\x35\x52\x6f\x8c\xa9\xc6\xe3"
4565 "\x00\x1f\x3e\x5d\x7c\x9b\xba\xd9"
4566 "\xf8\x17\x36\x55\x74\x93\xb2\xd1"
4567 "\xf0\x0f\x2e\x4d\x6c\x8b\xaa\xc9"
4568 "\xe8\x07\x26\x45\x64\x83\xa2\xc1"
4569 "\xe0\xff\x1e\x3d\x5c\x7b\x9a\xb9"
4570 "\xd8\xf7\x16\x35\x54\x73\x92\xb1"
4571 "\xd0\xef\x0e\x2d\x4c\x6b\x8a\xa9"
4572 "\xc8\xe7\x06\x25\x44\x63\x82\xa1"
4573 "\xc0\xdf\xfe\x1d\x3c\x5b\x7a\x99"
4574 "\xb8\xd7\xf6\x15\x34\x53\x72\x91"
4575 "\xb0\xcf\xee\x0d\x2c\x4b\x6a\x89"
4576 "\xa8\xc7\xe6\x05\x24\x43\x62\x81"
4577 "\xa0\xbf\xde\xfd\x1c\x3b\x5a\x79"
4578 "\x98\xb7\xd6\xf5\x14\x33\x52\x71"
4579 "\x90\xaf\xce\xed\x0c\x2b\x4a\x69"
4580 "\x88\xa7\xc6\xe5\x04\x23\x42\x61"
4581 "\x80\x9f\xbe\xdd\xfc\x1b\x3a\x59"
4582 "\x78\x97\xb6\xd5\xf4\x13\x32\x51"
4583 "\x70\x8f\xae\xcd\xec\x0b\x2a\x49"
4584 "\x68\x87\xa6\xc5\xe4\x03\x22\x41"
4585 "\x60\x7f\x9e\xbd\xdc\xfb\x1a\x39"
4586 "\x58\x77\x96\xb5\xd4\xf3\x12\x31"
4587 "\x50\x6f\x8e\xad\xcc\xeb\x0a\x29"
4588 "\x48\x67\x86\xa5\xc4\xe3\x02\x21"
4589 "\x40\x5f\x7e\x9d\xbc\xdb\xfa\x19"
4590 "\x38\x57\x76\x95\xb4\xd3\xf2\x11"
4591 "\x30\x4f\x6e\x8d\xac\xcb\xea\x09"
4592 "\x28\x47\x66\x85\xa4\xc3\xe2\x01"
4593 "\x20\x3f\x5e\x7d\x9c\xbb\xda\xf9"
4594 "\x18\x37\x56\x75\x94\xb3\xd2\xf1"
4595 "\x10\x2f\x4e\x6d\x8c\xab\xca\xe9"
4596 "\x08\x27\x46\x65\x84\xa3\xc2\xe1"
4597 "\x00\x21\x42\x63",
4598 .ilen = 4100,
4599 .result =
4600 "\xf0\x5c\x74\xad\x4e\xbc\x99\xe2"
4601 "\xae\xff\x91\x3a\x44\xcf\x38\x32"
4602 "\x1e\xad\xa7\xcd\xa1\x39\x95\xaa"
4603 "\x10\xb1\xb3\x2e\x04\x31\x8f\x86"
4604 "\xf2\x62\x74\x70\x0c\xa4\x46\x08"
4605 "\xa8\xb7\x99\xa8\xe9\xd2\x73\x79"
4606 "\x7e\x6e\xd4\x8f\x1e\xc7\x8e\x31"
4607 "\x0b\xfa\x4b\xce\xfd\xf3\x57\x71"
4608 "\xe9\x46\x03\xa5\x3d\x34\x00\xe2"
4609 "\x18\xff\x75\x6d\x06\x2d\x00\xab"
4610 "\xb9\x3e\x6c\x59\xc5\x84\x06\xb5"
4611 "\x8b\xd0\x89\x9c\x4a\x79\x16\xc6"
4612 "\x3d\x74\x54\xfa\x44\xcd\x23\x26"
4613 "\x5c\xcf\x7e\x28\x92\x32\xbf\xdf"
4614 "\xa7\x20\x3c\x74\x58\x2a\x9a\xde"
4615 "\x61\x00\x1c\x4f\xff\x59\xc4\x22"
4616 "\xac\x3c\xd0\xe8\x6c\xf9\x97\x1b"
4617 "\x58\x9b\xad\x71\xe8\xa9\xb5\x0d"
4618 "\xee\x2f\x04\x1f\x7f\xbc\x99\xee"
4619 "\x84\xff\x42\x60\xdc\x3a\x18\xa5"
4620 "\x81\xf9\xef\xdc\x7a\x0f\x65\x41"
4621 "\x2f\xa3\xd3\xf9\xc2\xcb\xc0\x4d"
4622 "\x8f\xd3\x76\x96\xad\x49\x6d\x38"
4623 "\x3d\x39\x0b\x6c\x80\xb7\x54\x69"
4624 "\xf0\x2c\x90\x02\x29\x0d\x1c\x12"
4625 "\xad\x55\xc3\x8b\x68\xd9\xcc\xb3"
4626 "\xb2\x64\x33\x90\x5e\xca\x4b\xe2"
4627 "\xfb\x75\xdc\x63\xf7\x9f\x82\x74"
4628 "\xf0\xc9\xaa\x7f\xe9\x2a\x9b\x33"
4629 "\xbc\x88\x00\x7f\xca\xb2\x1f\x14"
4630 "\xdb\xc5\x8e\x7b\x11\x3c\x3e\x08"
4631 "\xf3\x83\xe8\xe0\x94\x86\x2e\x92"
4632 "\x78\x6b\x01\xc9\xc7\x83\xba\x21"
4633 "\x6a\x25\x15\x33\x4e\x45\x08\xec"
4634 "\x35\xdb\xe0\x6e\x31\x51\x79\xa9"
4635 "\x42\x44\x65\xc1\xa0\xf1\xf9\x2a"
4636 "\x70\xd5\xb6\xc6\xc1\x8c\x39\xfc"
4637 "\x25\xa6\x55\xd9\xdd\x2d\x4c\xec"
4638 "\x49\xc6\xeb\x0e\xa8\x25\x2a\x16"
4639 "\x1b\x66\x84\xda\xe2\x92\xe5\xc0"
4640 "\xc8\x53\x07\xaf\x80\x84\xec\xfd"
4641 "\xcd\xd1\x6e\xcd\x6f\x6a\xf5\x36"
4642 "\xc5\x15\xe5\x25\x7d\x77\xd1\x1a"
4643 "\x93\x36\xa9\xcf\x7c\xa4\x54\x4a"
4644 "\x06\x51\x48\x4e\xf6\x59\x87\xd2"
4645 "\x04\x02\xef\xd3\x44\xde\x76\x31"
4646 "\xb3\x34\x17\x1b\x9d\x66\x11\x9f"
4647 "\x1e\xcc\x17\xe9\xc7\x3c\x1b\xe7"
4648 "\xcb\x50\x08\xfc\xdc\x2b\x24\xdb"
4649 "\x65\x83\xd0\x3b\xe3\x30\xea\x94"
4650 "\x6c\xe7\xe8\x35\x32\xc7\xdb\x64"
4651 "\xb4\x01\xab\x36\x2c\x77\x13\xaf"
4652 "\xf8\x2b\x88\x3f\x54\x39\xc4\x44"
4653 "\xfe\xef\x6f\x68\x34\xbe\x0f\x05"
4654 "\x16\x6d\xf6\x0a\x30\xe7\xe3\xed"
4655 "\xc4\xde\x3c\x1b\x13\xd8\xdb\xfe"
4656 "\x41\x62\xe5\x28\xd4\x8d\xa3\xc7"
4657 "\x93\x97\xc6\x48\x45\x1d\x9f\x83"
4658 "\xdf\x4b\x40\x3e\x42\x25\x87\x80"
4659 "\x4c\x7d\xa8\xd4\x98\x23\x95\x75"
4660 "\x41\x8c\xda\x41\x9b\xd4\xa7\x06"
4661 "\xb5\xf1\x71\x09\x53\xbe\xca\xbf"
4662 "\x32\x03\xed\xf0\x50\x1c\x56\x39"
4663 "\x5b\xa4\x75\x18\xf7\x9b\x58\xef"
4664 "\x53\xfc\x2a\x38\x23\x15\x75\xcd"
4665 "\x45\xe5\x5a\x82\x55\xba\x21\xfa"
4666 "\xd4\xbd\xc6\x94\x7c\xc5\x80\x12"
4667 "\xf7\x4b\x32\xc4\x9a\x82\xd8\x28"
4668 "\x8f\xd9\xc2\x0f\x60\x03\xbe\x5e"
4669 "\x21\xd6\x5f\x58\xbf\x5c\xb1\x32"
4670 "\x82\x8d\xa9\xe5\xf2\x66\x1a\xc0"
4671 "\xa0\xbc\x58\x2f\x71\xf5\x2f\xed"
4672 "\xd1\x26\xb9\xd8\x49\x5a\x07\x19"
4673 "\x01\x7c\x59\xb0\xf8\xa4\xb7\xd3"
4674 "\x7b\x1a\x8c\x38\xf4\x50\xa4\x59"
4675 "\xb0\xcc\x41\x0b\x88\x7f\xe5\x31"
4676 "\xb3\x42\xba\xa2\x7e\xd4\x32\x71"
4677 "\x45\x87\x48\xa9\xc2\xf2\x89\xb3"
4678 "\xe4\xa7\x7e\x52\x15\x61\xfa\xfe"
4679 "\xc9\xdd\x81\xeb\x13\xab\xab\xc3"
4680 "\x98\x59\xd8\x16\x3d\x14\x7a\x1c"
4681 "\x3c\x41\x9a\x16\x16\x9b\xd2\xd2"
4682 "\x69\x3a\x29\x23\xac\x86\x32\xa5"
4683 "\x48\x9c\x9e\xf3\x47\x77\x81\x70"
4684 "\x24\xe8\x85\xd2\xf5\xb5\xfa\xff"
4685 "\x59\x6a\xd3\x50\x59\x43\x59\xde"
4686 "\xd9\xf1\x55\xa5\x0c\xc3\x1a\x1a"
4687 "\x18\x34\x0d\x1a\x63\x33\xed\x10"
4688 "\xe0\x1d\x2a\x18\xd2\xc0\x54\xa8"
4689 "\xca\xb5\x9a\xd3\xdd\xca\x45\x84"
4690 "\x50\xe7\x0f\xfe\xa4\x99\x5a\xbe"
4691 "\x43\x2d\x9a\xcb\x92\x3f\x5a\x1d"
4692 "\x85\xd8\xc9\xdf\x68\xc9\x12\x80"
4693 "\x56\x0c\xdc\x00\xdc\x3a\x7d\x9d"
4694 "\xa3\xa2\xe8\x4d\xbf\xf9\x70\xa0"
4695 "\xa4\x13\x4f\x6b\xaf\x0a\x89\x7f"
4696 "\xda\xf0\xbf\x9b\xc8\x1d\xe5\xf8"
4697 "\x2e\x8b\x07\xb5\x73\x1b\xcc\xa2"
4698 "\xa6\xad\x30\xbc\x78\x3c\x5b\x10"
4699 "\xfa\x5e\x62\x2d\x9e\x64\xb3\x33"
4700 "\xce\xf9\x1f\x86\xe7\x8b\xa2\xb8"
4701 "\xe8\x99\x57\x8c\x11\xed\x66\xd9"
4702 "\x3c\x72\xb9\xc3\xe6\x4e\x17\x3a"
4703 "\x6a\xcb\x42\x24\x06\xed\x3e\x4e"
4704 "\xa3\xe8\x6a\x94\xda\x0d\x4e\xd5"
4705 "\x14\x19\xcf\xb6\x26\xd8\x2e\xcc"
4706 "\x64\x76\x38\x49\x4d\xfe\x30\x6d"
4707 "\xe4\xc8\x8c\x7b\xc4\xe0\x35\xba"
4708 "\x22\x6e\x76\xe1\x1a\xf2\x53\xc3"
4709 "\x28\xa2\x82\x1f\x61\x69\xad\xc1"
4710 "\x7b\x28\x4b\x1e\x6c\x85\x95\x9b"
4711 "\x51\xb5\x17\x7f\x12\x69\x8c\x24"
4712 "\xd5\xc7\x5a\x5a\x11\x54\xff\x5a"
4713 "\xf7\x16\xc3\x91\xa6\xf0\xdc\x0a"
4714 "\xb6\xa7\x4a\x0d\x7a\x58\xfe\xa5"
4715 "\xf5\xcb\x8f\x7b\x0e\xea\x57\xe7"
4716 "\xbd\x79\xd6\x1c\x88\x23\x6c\xf2"
4717 "\x4d\x29\x77\x53\x35\x6a\x00\x8d"
4718 "\xcd\xa3\x58\xbe\x77\x99\x18\xf8"
4719 "\xe6\xe1\x8f\xe9\x37\x8f\xe3\xe2"
4720 "\x5a\x8a\x93\x25\xaf\xf3\x78\x80"
4721 "\xbe\xa6\x1b\xc6\xac\x8b\x1c\x91"
4722 "\x58\xe1\x9f\x89\x35\x9d\x1d\x21"
4723 "\x29\x9f\xf4\x99\x02\x27\x0f\xa8"
4724 "\x4f\x79\x94\x2b\x33\x2c\xda\xa2"
4725 "\x26\x39\x83\x94\xef\x27\xd8\x53"
4726 "\x8f\x66\x0d\xe4\x41\x7d\x34\xcd"
4727 "\x43\x7c\x95\x0a\x53\xef\x66\xda"
4728 "\x7e\x9b\xf3\x93\xaf\xd0\x73\x71"
4729 "\xba\x40\x9b\x74\xf8\xd7\xd7\x41"
4730 "\x6d\xaf\x72\x9c\x8d\x21\x87\x3c"
4731 "\xfd\x0a\x90\xa9\x47\x96\x9e\xd3"
4732 "\x88\xee\x73\xcf\x66\x2f\x52\x56"
4733 "\x6d\xa9\x80\x4c\xe2\x6f\x62\x88"
4734 "\x3f\x0e\x54\x17\x48\x80\x5d\xd3"
4735 "\xc3\xda\x25\x3d\xa1\xc8\xcb\x9f"
4736 "\x9b\x70\xb3\xa1\xeb\x04\x52\xa1"
4737 "\xf2\x22\x0f\xfc\xc8\x18\xfa\xf9"
4738 "\x85\x9c\xf1\xac\xeb\x0c\x02\x46"
4739 "\x75\xd2\xf5\x2c\xe3\xd2\x59\x94"
4740 "\x12\xf3\x3c\xfc\xd7\x92\xfa\x36"
4741 "\xba\x61\x34\x38\x7c\xda\x48\x3e"
4742 "\x08\xc9\x39\x23\x5e\x02\x2c\x1a"
4743 "\x18\x7e\xb4\xd9\xfd\x9e\x40\x02"
4744 "\xb1\x33\x37\x32\xe7\xde\xd6\xd0"
4745 "\x7c\x58\x65\x4b\xf8\x34\x27\x9c"
4746 "\x44\xb4\xbd\xe9\xe9\x4c\x78\x7d"
4747 "\x4b\x9f\xce\xb1\xcd\x47\xa5\x37"
4748 "\xe5\x6d\xbd\xb9\x43\x94\x0a\xd4"
4749 "\xd6\xf9\x04\x5f\xb5\x66\x6c\x1a"
4750 "\x35\x12\xe3\x36\x28\x27\x36\x58"
4751 "\x01\x2b\x79\xe4\xba\x6d\x10\x7d"
4752 "\x65\xdf\x84\x95\xf4\xd5\xb6\x8f"
4753 "\x2b\x9f\x96\x00\x86\x60\xf0\x21"
4754 "\x76\xa8\x6a\x8c\x28\x1c\xb3\x6b"
4755 "\x97\xd7\xb6\x53\x2a\xcc\xab\x40"
4756 "\x9d\x62\x79\x58\x52\xe6\x65\xb7"
4757 "\xab\x55\x67\x9c\x89\x7c\x03\xb0"
4758 "\x73\x59\xc5\x81\xf5\x18\x17\x5c"
4759 "\x89\xf3\x78\x35\x44\x62\x78\x72"
4760 "\xd0\x96\xeb\x31\xe7\x87\x77\x14"
4761 "\x99\x51\xf2\x59\x26\x9e\xb5\xa6"
4762 "\x45\xfe\x6e\xbd\x07\x4c\x94\x5a"
4763 "\xa5\x7d\xfc\xf1\x2b\x77\xe2\xfe"
4764 "\x17\xd4\x84\xa0\xac\xb5\xc7\xda"
4765 "\xa9\x1a\xb6\xf3\x74\x11\xb4\x9d"
4766 "\xfb\x79\x2e\x04\x2d\x50\x28\x83"
4767 "\xbf\xc6\x52\xd3\x34\xd6\xe8\x7a"
4768 "\xb6\xea\xe7\xa8\x6c\x15\x1e\x2c"
4769 "\x57\xbc\x48\x4e\x5f\x5c\xb6\x92"
4770 "\xd2\x49\x77\x81\x6d\x90\x70\xae"
4771 "\x98\xa1\x03\x0d\x6b\xb9\x77\x14"
4772 "\xf1\x4e\x23\xd3\xf8\x68\xbd\xc2"
4773 "\xfe\x04\xb7\x5c\xc5\x17\x60\x8f"
4774 "\x65\x54\xa4\x7a\x42\xdc\x18\x0d"
4775 "\xb5\xcf\x0f\xd3\xc7\x91\x66\x1b"
4776 "\x45\x42\x27\x75\x50\xe5\xee\xb8"
4777 "\x7f\x33\x2c\xba\x4a\x92\x4d\x2c"
4778 "\x3c\xe3\x0d\x80\x01\xba\x0d\x29"
4779 "\xd8\x3c\xe9\x13\x16\x57\xe6\xea"
4780 "\x94\x52\xe7\x00\x4d\x30\xb0\x0f"
4781 "\x35\xb8\xb8\xa7\xb1\xb5\x3b\x44"
4782 "\xe1\x2f\xfd\x88\xed\x43\xe7\x52"
4783 "\x10\x93\xb3\x8a\x30\x6b\x0a\xf7"
4784 "\x23\xc6\x50\x9d\x4a\xb0\xde\xc3"
4785 "\xdc\x9b\x2f\x01\x56\x36\x09\xc5"
4786 "\x2f\x6b\xfe\xf1\xd8\x27\x45\x03"
4787 "\x30\x5e\x5c\x5b\xb4\x62\x0e\x1a"
4788 "\xa9\x21\x2b\x92\x94\x87\x62\x57"
4789 "\x4c\x10\x74\x1a\xf1\x0a\xc5\x84"
4790 "\x3b\x9e\x72\x02\xd7\xcc\x09\x56"
4791 "\xbd\x54\xc1\xf0\xc3\xe3\xb3\xf8"
4792 "\xd2\x0d\x61\xcb\xef\xce\x0d\x05"
4793 "\xb0\x98\xd9\x8e\x4f\xf9\xbc\x93"
4794 "\xa6\xea\xc8\xcf\x10\x53\x4b\xf1"
4795 "\xec\xfc\x89\xf9\x64\xb0\x22\xbf"
4796 "\x9e\x55\x46\x9f\x7c\x50\x8e\x84"
4797 "\x54\x20\x98\xd7\x6c\x40\x1e\xdb"
4798 "\x69\x34\x78\x61\x24\x21\x9c\x8a"
4799 "\xb3\x62\x31\x8b\x6e\xf5\x2a\x35"
4800 "\x86\x13\xb1\x6c\x64\x2e\x41\xa5"
4801 "\x05\xf2\x42\xba\xd2\x3a\x0d\x8e"
4802 "\x8a\x59\x94\x3c\xcf\x36\x27\x82"
4803 "\xc2\x45\xee\x58\xcd\x88\xb4\xec"
4804 "\xde\xb2\x96\x0a\xaf\x38\x6f\x88"
4805 "\xd7\xd8\xe1\xdf\xb9\x96\xa9\x0a"
4806 "\xb1\x95\x28\x86\x20\xe9\x17\x49"
4807 "\xa2\x29\x38\xaa\xa5\xe9\x6e\xf1"
4808 "\x19\x27\xc0\xd5\x2a\x22\xc3\x0b"
4809 "\xdb\x7c\x73\x10\xb9\xba\x89\x76"
4810 "\x54\xae\x7d\x71\xb3\x93\xf6\x32"
4811 "\xe6\x47\x43\x55\xac\xa0\x0d\xc2"
4812 "\x93\x27\x4a\x8e\x0e\x74\x15\xc7"
4813 "\x0b\x85\xd9\x0c\xa9\x30\x7a\x3e"
4814 "\xea\x8f\x85\x6d\x3a\x12\x4f\x72"
4815 "\x69\x58\x7a\x80\xbb\xb5\x97\xf3"
4816 "\xcf\x70\xd2\x5d\xdd\x4d\x21\x79"
4817 "\x54\x4d\xe4\x05\xe8\xbd\xc2\x62"
4818 "\xb1\x3b\x77\x1c\xd6\x5c\xf3\xa0"
4819 "\x79\x00\xa8\x6c\x29\xd9\x18\x24"
4820 "\x36\xa2\x46\xc0\x96\x65\x7f\xbd"
4821 "\x2a\xed\x36\x16\x0c\xaa\x9f\xf4"
4822 "\xc5\xb4\xe2\x12\xed\x69\xed\x4f"
4823 "\x26\x2c\x39\x52\x89\x98\xe7\x2c"
4824 "\x99\xa4\x9e\xa3\x9b\x99\x46\x7a"
4825 "\x3a\xdc\xa8\x59\xa3\xdb\xc3\x3b"
4826 "\x95\x0d\x3b\x09\x6e\xee\x83\x5d"
4827 "\x32\x4d\xed\xab\xfa\x98\x14\x4e"
4828 "\xc3\x15\x45\x53\x61\xc4\x93\xbd"
4829 "\x90\xf4\x99\x95\x4c\xe6\x76\x92"
4830 "\x29\x90\x46\x30\x92\x69\x7d\x13"
4831 "\xf2\xa5\xcd\x69\x49\x44\xb2\x0f"
4832 "\x63\x40\x36\x5f\x09\xe2\x78\xf8"
4833 "\x91\xe3\xe2\xfa\x10\xf7\xc8\x24"
4834 "\xa8\x89\x32\x5c\x37\x25\x1d\xb2"
4835 "\xea\x17\x8a\x0a\xa9\x64\xc3\x7c"
4836 "\x3c\x7c\xbd\xc6\x79\x34\xe7\xe2"
4837 "\x85\x8e\xbf\xf8\xde\x92\xa0\xae"
4838 "\x20\xc4\xf6\xbb\x1f\x38\x19\x0e"
4839 "\xe8\x79\x9c\xa1\x23\xe9\x54\x7e"
4840 "\x37\x2f\xe2\x94\x32\xaf\xa0\x23"
4841 "\x49\xe4\xc0\xb3\xac\x00\x8f\x36"
4842 "\x05\xc4\xa6\x96\xec\x05\x98\x4f"
4843 "\x96\x67\x57\x1f\x20\x86\x1b\x2d"
4844 "\x69\xe4\x29\x93\x66\x5f\xaf\x6b"
4845 "\x88\x26\x2c\x67\x02\x4b\x52\xd0"
4846 "\x83\x7a\x43\x1f\xc0\x71\x15\x25"
4847 "\x77\x65\x08\x60\x11\x76\x4c\x8d"
4848 "\xed\xa9\x27\xc6\xb1\x2a\x2c\x6a"
4849 "\x4a\x97\xf5\xc6\xb7\x70\x42\xd3"
4850 "\x03\xd1\x24\x95\xec\x6d\xab\x38"
4851 "\x72\xce\xe2\x8b\x33\xd7\x51\x09"
4852 "\xdc\x45\xe0\x09\x96\x32\xf3\xc4"
4853 "\x84\xdc\x73\x73\x2d\x1b\x11\x98"
4854 "\xc5\x0e\x69\x28\x94\xc7\xb5\x4d"
4855 "\xc8\x8a\xd0\xaa\x13\x2e\x18\x74"
4856 "\xdd\xd1\x1e\xf3\x90\xe8\xfc\x9a"
4857 "\x72\x4a\x0e\xd1\xe4\xfb\x0d\x96"
4858 "\xd1\x0c\x79\x85\x1b\x1c\xfe\xe1"
4859 "\x62\x8f\x7a\x73\x32\xab\xc8\x18"
4860 "\x69\xe3\x34\x30\xdf\x13\xa6\xe5"
4861 "\xe8\x0e\x67\x7f\x81\x11\xb4\x60"
4862 "\xc7\xbd\x79\x65\x50\xdc\xc4\x5b"
4863 "\xde\x39\xa4\x01\x72\x63\xf3\xd1"
4864 "\x64\x4e\xdf\xfc\x27\x92\x37\x0d"
4865 "\x57\xcd\x11\x4f\x11\x04\x8e\x1d"
4866 "\x16\xf7\xcd\x92\x9a\x99\x30\x14"
4867 "\xf1\x7c\x67\x1b\x1f\x41\x0b\xe8"
4868 "\x32\xe8\xb8\xc1\x4f\x54\x86\x4f"
4869 "\xe5\x79\x81\x73\xcd\x43\x59\x68"
4870 "\x73\x02\x3b\x78\x21\x72\x43\x00"
4871 "\x49\x17\xf7\x00\xaf\x68\x24\x53"
4872 "\x05\x0a\xc3\x33\xe0\x33\x3f\x69"
4873 "\xd2\x84\x2f\x0b\xed\xde\x04\xf4"
4874 "\x11\x94\x13\x69\x51\x09\x28\xde"
4875 "\x57\x5c\xef\xdc\x9a\x49\x1c\x17"
4876 "\x97\xf3\x96\xc1\x7f\x5d\x2e\x7d"
4877 "\x55\xb8\xb3\x02\x09\xb3\x1f\xe7"
4878 "\xc9\x8d\xa3\x36\x34\x8a\x77\x13"
4879 "\x30\x63\x4c\xa5\xcd\xc3\xe0\x7e"
4880 "\x05\xa1\x7b\x0c\xcb\x74\x47\x31"
4881 "\x62\x03\x43\xf1\x87\xb4\xb0\x85"
4882 "\x87\x8e\x4b\x25\xc7\xcf\xae\x4b"
4883 "\x36\x46\x3e\x62\xbc\x6f\xeb\x5f"
4884 "\x73\xac\xe6\x07\xee\xc1\xa1\xd6"
4885 "\xc4\xab\xc9\xd6\x89\x45\xe1\xf1"
4886 "\x04\x4e\x1a\x6f\xbb\x4f\x3a\xa3"
4887 "\xa0\xcb\xa3\x0a\xd8\x71\x35\x55"
4888 "\xe4\xbc\x2e\x04\x06\xe6\xff\x5b"
4889 "\x1c\xc0\x11\x7c\xc5\x17\xf3\x38"
4890 "\xcf\xe9\xba\x0f\x0e\xef\x02\xc2"
4891 "\x8d\xc6\xbc\x4b\x67\x20\x95\xd7"
4892 "\x2c\x45\x5b\x86\x44\x8c\x6f\x2e"
4893 "\x7e\x9f\x1c\x77\xba\x6b\x0e\xa3"
4894 "\x69\xdc\xab\x24\x57\x60\x47\xc1"
4895 "\xd1\xa5\x9d\x23\xe6\xb1\x37\xfe"
4896 "\x93\xd2\x4c\x46\xf9\x0c\xc6\xfb"
4897 "\xd6\x9d\x99\x69\xab\x7a\x07\x0c"
4898 "\x65\xe7\xc4\x08\x96\xe2\xa5\x01"
4899 "\x3f\x46\x07\x05\x7e\xe8\x9a\x90"
4900 "\x50\xdc\xe9\x7a\xea\xa1\x39\x6e"
4901 "\x66\xe4\x6f\xa5\x5f\xb2\xd9\x5b"
4902 "\xf5\xdb\x2a\x32\xf0\x11\x6f\x7c"
4903 "\x26\x10\x8f\x3d\x80\xe9\x58\xf7"
4904 "\xe0\xa8\x57\xf8\xdb\x0e\xce\x99"
4905 "\x63\x19\x3d\xd5\xec\x1b\x77\x69"
4906 "\x98\xf6\xe4\x5f\x67\x17\x4b\x09"
4907 "\x85\x62\x82\x70\x18\xe2\x9a\x78"
4908 "\xe2\x62\xbd\xb4\xf1\x42\xc6\xfb"
4909 "\x08\xd0\xbd\xeb\x4e\x09\xf2\xc8"
4910 "\x1e\xdc\x3d\x32\x21\x56\x9c\x4f"
4911 "\x35\xf3\x61\x06\x72\x84\xc4\x32"
4912 "\xf2\xf1\xfa\x0b\x2f\xc3\xdb\x02"
4913 "\x04\xc2\xde\x57\x64\x60\x8d\xcf"
4914 "\xcb\x86\x5d\x97\x3e\xb1\x9c\x01"
4915 "\xd6\x28\x8f\x99\xbc\x46\xeb\x05"
4916 "\xaf\x7e\xb8\x21\x2a\x56\x85\x1c"
4917 "\xb3\x71\xa0\xde\xca\x96\xf1\x78"
4918 "\x49\xa2\x99\x81\x80\x5c\x01\xf5"
4919 "\xa0\xa2\x56\x63\xe2\x70\x07\xa5"
4920 "\x95\xd6\x85\xeb\x36\x9e\xa9\x51"
4921 "\x66\x56\x5f\x1d\x02\x19\xe2\xf6"
4922 "\x4f\x73\x38\x09\x75\x64\x48\xe0"
4923 "\xf1\x7e\x0e\xe8\x9d\xf9\xed\x94"
4924 "\xfe\x16\x26\x62\x49\x74\xf4\xb0"
4925 "\xd4\xa9\x6c\xb0\xfd\x53\xe9\x81"
4926 "\xe0\x7a\xbf\xcf\xb5\xc4\x01\x81"
4927 "\x79\x99\x77\x01\x3b\xe9\xa2\xb6"
4928 "\xe6\x6a\x8a\x9e\x56\x1c\x8d\x1e"
4929 "\x8f\x06\x55\x2c\x6c\xdc\x92\x87"
4930 "\x64\x3b\x4b\x19\xa1\x13\x64\x1d"
4931 "\x4a\xe9\xc0\x00\xb8\x95\xef\x6b"
4932 "\x1a\x86\x6d\x37\x52\x02\xc2\xe0"
4933 "\xc8\xbb\x42\x0c\x02\x21\x4a\xc9"
4934 "\xef\xa0\x54\xe4\x5e\x16\x53\x81"
4935 "\x70\x62\x10\xaf\xde\xb8\xb5\xd3"
4936 "\xe8\x5e\x6c\xc3\x8a\x3e\x18\x07"
4937 "\xf2\x2f\x7d\xa7\xe1\x3d\x4e\xb4"
4938 "\x26\xa7\xa3\x93\x86\xb2\x04\x1e"
4939 "\x53\x5d\x86\xd6\xde\x65\xca\xe3"
4940 "\x4e\xc1\xcf\xef\xc8\x70\x1b\x83"
4941 "\x13\xdd\x18\x8b\x0d\x76\xd2\xf6"
4942 "\x37\x7a\x93\x7a\x50\x11\x9f\x96"
4943 "\x86\x25\xfd\xac\xdc\xbe\x18\x93"
4944 "\x19\x6b\xec\x58\x4f\xb9\x75\xa7"
4945 "\xdd\x3f\x2f\xec\xc8\x5a\x84\xab"
4946 "\xd5\xe4\x8a\x07\xf6\x4d\x23\xd6"
4947 "\x03\xfb\x03\x6a\xea\x66\xbf\xd4"
4948 "\xb1\x34\xfb\x78\xe9\x55\xdc\x7c"
4949 "\x3d\x9c\xe5\x9a\xac\xc3\x7a\x80"
4950 "\x24\x6d\xa0\xef\x25\x7c\xb7\xea"
4951 "\xce\x4d\x5f\x18\x60\xce\x87\x22"
4952 "\x66\x2f\xd5\xdd\xdd\x02\x21\x75"
4953 "\x82\xa0\x1f\x58\xc6\xd3\x62\xf7"
4954 "\x32\xd8\xaf\x1e\x07\x77\x51\x96"
4955 "\xd5\x6b\x1e\x7e\x80\x02\xe8\x67"
4956 "\xea\x17\x0b\x10\xd2\x3f\x28\x25"
4957 "\x4f\x05\x77\x02\x14\x69\xf0\x2c"
4958 "\xbe\x0c\xf1\x74\x30\xd1\xb9\x9b"
4959 "\xfc\x8c\xbb\x04\x16\xd9\xba\xc3"
4960 "\xbc\x91\x8a\xc4\x30\xa4\xb0\x12"
4961 "\x4c\x21\x87\xcb\xc9\x1d\x16\x96"
4962 "\x07\x6f\x23\x54\xb9\x6f\x79\xe5"
4963 "\x64\xc0\x64\xda\xb1\xae\xdd\x60"
4964 "\x6c\x1a\x9d\xd3\x04\x8e\x45\xb0"
4965 "\x92\x61\xd0\x48\x81\xed\x5e\x1d"
4966 "\xa0\xc9\xa4\x33\xc7\x13\x51\x5d"
4967 "\x7f\x83\x73\xb6\x70\x18\x65\x3e"
4968 "\x2f\x0e\x7a\x12\x39\x98\xab\xd8"
4969 "\x7e\x6f\xa3\xd1\xba\x56\xad\xbd"
4970 "\xf0\x03\x01\x1c\x85\x35\x9f\xeb"
4971 "\x19\x63\xa1\xaf\xfe\x2d\x35\x50"
4972 "\x39\xa0\x65\x7c\x95\x7e\x6b\xfe"
4973 "\xc1\xac\x07\x7c\x98\x4f\xbe\x57"
4974 "\xa7\x22\xec\xe2\x7e\x29\x09\x53"
4975 "\xe8\xbf\xb4\x7e\x3f\x8f\xfc\x14"
4976 "\xce\x54\xf9\x18\x58\xb5\xff\x44"
4977 "\x05\x9d\xce\x1b\xb6\x82\x23\xc8"
4978 "\x2e\xbc\x69\xbb\x4a\x29\x0f\x65"
4979 "\x94\xf0\x63\x06\x0e\xef\x8c\xbd"
4980 "\xff\xfd\xb0\x21\x6e\x57\x05\x75"
4981 "\xda\xd5\xc4\xeb\x8d\x32\xf7\x50"
4982 "\xd3\x6f\x22\xed\x5f\x8e\xa2\x5b"
4983 "\x80\x8c\xc8\x78\x40\x24\x4b\x89"
4984 "\x30\xce\x7a\x97\x0e\xc4\xaf\xef"
4985 "\x9b\xb4\xcd\x66\x74\x14\x04\x2b"
4986 "\xf7\xce\x0b\x1c\x6e\xc2\x78\x8c"
4987 "\xca\xc5\xd0\x1c\x95\x4a\x91\x2d"
4988 "\xa7\x20\xeb\x86\x52\xb7\x67\xd8"
4989 "\x0c\xd6\x04\x14\xde\x51\x74\x75"
4990 "\xe7\x11\xb4\x87\xa3\x3d\x2d\xad"
4991 "\x4f\xef\xa0\x0f\x70\x00\x6d\x13"
4992 "\x19\x1d\x41\x50\xe9\xd8\xf0\x32"
4993 "\x71\xbc\xd3\x11\xf2\xac\xbe\xaf"
4994 "\x75\x46\x65\x4e\x07\x34\x37\xa3"
4995 "\x89\xfe\x75\xd4\x70\x4c\xc6\x3f"
4996 "\x69\x24\x0e\x38\x67\x43\x8c\xde"
4997 "\x06\xb5\xb8\xe7\xc4\xf0\x41\x8f"
4998 "\xf0\xbd\x2f\x0b\xb9\x18\xf8\xde"
4999 "\x64\xb1\xdb\xee\x00\x50\x77\xe1"
5000 "\xc7\xff\xa6\xfa\xdd\x70\xf4\xe3"
5001 "\x93\xe9\x77\x35\x3d\x4b\x2f\x2b"
5002 "\x6d\x55\xf0\xfc\x88\x54\x4e\x89"
5003 "\xc1\x8a\x23\x31\x2d\x14\x2a\xb8"
5004 "\x1b\x15\xdd\x9e\x6e\x7b\xda\x05"
5005 "\x91\x7d\x62\x64\x96\x72\xde\xfc"
5006 "\xc1\xec\xf0\x23\x51\x6f\xdb\x5b"
5007 "\x1d\x08\x57\xce\x09\xb8\xf6\xcd"
5008 "\x8d\x95\xf2\x20\xbf\x0f\x20\x57"
5009 "\x98\x81\x84\x4f\x15\x5c\x76\xe7"
5010 "\x3e\x0a\x3a\x6c\xc4\x8a\xbe\x78"
5011 "\x74\x77\xc3\x09\x4b\x5d\x48\xe4"
5012 "\xc8\xcb\x0b\xea\x17\x28\xcf\xcf"
5013 "\x31\x32\x44\xa4\xe5\x0e\x1a\x98"
5014 "\x94\xc4\xf0\xff\xae\x3e\x44\xe8"
5015 "\xa5\xb3\xb5\x37\x2f\xe8\xaf\x6f"
5016 "\x28\xc1\x37\x5f\x31\xd2\xb9\x33"
5017 "\xb1\xb2\x52\x94\x75\x2c\x29\x59"
5018 "\x06\xc2\x25\xe8\x71\x65\x4e\xed"
5019 "\xc0\x9c\xb1\xbb\x25\xdc\x6c\xe7"
5020 "\x4b\xa5\x7a\x54\x7a\x60\xff\x7a"
5021 "\xe0\x50\x40\x96\x35\x63\xe4\x0b"
5022 "\x76\xbd\xa4\x65\x00\x1b\x57\x88"
5023 "\xae\xed\x39\x88\x42\x11\x3c\xed"
5024 "\x85\x67\x7d\xb9\x68\x82\xe9\x43"
5025 "\x3c\x47\x53\xfa\xe8\xf8\x9f\x1f"
5026 "\x9f\xef\x0f\xf7\x30\xd9\x30\x0e"
5027 "\xb9\x9f\x69\x18\x2f\x7e\xf8\xf8"
5028 "\xf8\x8c\x0f\xd4\x02\x4d\xea\xcd"
5029 "\x0a\x9c\x6f\x71\x6d\x5a\x4c\x60"
5030 "\xce\x20\x56\x32\xc6\xc5\x99\x1f"
5031 "\x09\xe6\x4e\x18\x1a\x15\x13\xa8"
5032 "\x7d\xb1\x6b\xc0\xb2\x6d\xf8\x26"
5033 "\x66\xf8\x3d\x18\x74\x70\x66\x7a"
5034 "\x34\x17\xde\xba\x47\xf1\x06\x18"
5035 "\xcb\xaf\xeb\x4a\x1e\x8f\xa7\x77"
5036 "\xe0\x3b\x78\x62\x66\xc9\x10\xea"
5037 "\x1f\xb7\x29\x0a\x45\xa1\x1d\x1e"
5038 "\x1d\xe2\x65\x61\x50\x9c\xd7\x05"
5039 "\xf2\x0b\x5b\x12\x61\x02\xc8\xe5"
5040 "\x63\x4f\x20\x0c\x07\x17\x33\x5e"
5041 "\x03\x9a\x53\x0f\x2e\x55\xfe\x50"
5042 "\x43\x7d\xd0\xb6\x7e\x5a\xda\xae"
5043 "\x58\xef\x15\xa9\x83\xd9\x46\xb1"
5044 "\x42\xaa\xf5\x02\x6c\xce\x92\x06"
5045 "\x1b\xdb\x66\x45\x91\x79\xc2\x2d"
5046 "\xe6\x53\xd3\x14\xfd\xbb\x44\x63"
5047 "\xc6\xd7\x3d\x7a\x0c\x75\x78\x9d"
5048 "\x5c\xa6\x39\xb3\xe5\x63\xca\x8b"
5049 "\xfe\xd3\xef\x60\x83\xf6\x8e\x70"
5050 "\xb6\x67\xc7\x77\xed\x23\xef\x4c"
5051 "\xf0\xed\x2d\x07\x59\x6f\xc1\x01"
5052 "\x34\x37\x08\xab\xd9\x1f\x09\xb1"
5053 "\xce\x5b\x17\xff\x74\xf8\x9c\xd5"
5054 "\x2c\x56\x39\x79\x0f\x69\x44\x75"
5055 "\x58\x27\x01\xc4\xbf\xa7\xa1\x1d"
5056 "\x90\x17\x77\x86\x5a\x3f\xd9\xd1"
5057 "\x0e\xa0\x10\xf8\xec\x1e\xa5\x7f"
5058 "\x5e\x36\xd1\xe3\x04\x2c\x70\xf7"
5059 "\x8e\xc0\x98\x2f\x6c\x94\x2b\x41"
5060 "\xb7\x60\x00\xb7\x2e\xb8\x02\x8d"
5061 "\xb8\xb0\xd3\x86\xba\x1d\xd7\x90"
5062 "\xd6\xb6\xe1\xfc\xd7\xd8\x28\x06"
5063 "\x63\x9b\xce\x61\x24\x79\xc0\x70"
5064 "\x52\xd0\xb6\xd4\x28\x95\x24\x87"
5065 "\x03\x1f\xb7\x9a\xda\xa3\xfb\x52"
5066 "\x5b\x68\xe7\x4c\x8c\x24\xe1\x42"
5067 "\xf7\xd5\xfd\xad\x06\x32\x9f\xba"
5068 "\xc1\xfc\xdd\xc6\xfc\xfc\xb3\x38"
5069 "\x74\x56\x58\x40\x02\x37\x52\x2c"
5070 "\x55\xcc\xb3\x9e\x7a\xe9\xd4\x38"
5071 "\x41\x5e\x0c\x35\xe2\x11\xd1\x13"
5072 "\xf8\xb7\x8d\x72\x6b\x22\x2a\xb0"
5073 "\xdb\x08\xba\x35\xb9\x3f\xc8\xd3"
5074 "\x24\x90\xec\x58\xd2\x09\xc7\x2d"
5075 "\xed\x38\x80\x36\x72\x43\x27\x49"
5076 "\x4a\x80\x8a\xa2\xe8\xd3\xda\x30"
5077 "\x7d\xb6\x82\x37\x86\x92\x86\x3e"
5078 "\x08\xb2\x28\x5a\x55\x44\x24\x7d"
5079 "\x40\x48\x8a\xb6\x89\x58\x08\xa0"
5080 "\xd6\x6d\x3a\x17\xbf\xf6\x54\xa2"
5081 "\xf5\xd3\x8c\x0f\x78\x12\x57\x8b"
5082 "\xd5\xc2\xfd\x58\x5b\x7f\x38\xe3"
5083 "\xcc\xb7\x7c\x48\xb3\x20\xe8\x81"
5084 "\x14\x32\x45\x05\xe0\xdb\x9f\x75"
5085 "\x85\xb4\x6a\xfc\x95\xe3\x54\x22"
5086 "\x12\xee\x30\xfe\xd8\x30\xef\x34"
5087 "\x50\xab\x46\x30\x98\x2f\xb7\xc0"
5088 "\x15\xa2\x83\xb6\xf2\x06\x21\xa2"
5089 "\xc3\x26\x37\x14\xd1\x4d\xb5\x10"
5090 "\x52\x76\x4d\x6a\xee\xb5\x2b\x15"
5091 "\xb7\xf9\x51\xe8\x2a\xaf\xc7\xfa"
5092 "\x77\xaf\xb0\x05\x4d\xd1\x68\x8e"
5093 "\x74\x05\x9f\x9d\x93\xa5\x3e\x7f"
5094 "\x4e\x5f\x9d\xcb\x09\xc7\x83\xe3"
5095 "\x02\x9d\x27\x1f\xef\x85\x05\x8d"
5096 "\xec\x55\x88\x0f\x0d\x7c\x4c\xe8"
5097 "\xa1\x75\xa0\xd8\x06\x47\x14\xef"
5098 "\xaa\x61\xcf\x26\x15\xad\xd8\xa3"
5099 "\xaa\x75\xf2\x78\x4a\x5a\x61\xdf"
5100 "\x8b\xc7\x04\xbc\xb2\x32\xd2\x7e"
5101 "\x42\xee\xb4\x2f\x51\xff\x7b\x2e"
5102 "\xd3\x02\xe8\xdc\x5d\x0d\x50\xdc"
5103 "\xae\xb7\x46\xf9\xa8\xe6\xd0\x16"
5104 "\xcc\xe6\x2c\x81\xc7\xad\xe9\xf0"
5105 "\x05\x72\x6d\x3d\x0a\x7a\xa9\x02"
5106 "\xac\x82\x93\x6e\xb6\x1c\x28\xfc"
5107 "\x44\x12\xfb\x73\x77\xd4\x13\x39"
5108 "\x29\x88\x8a\xf3\x5c\xa6\x36\xa0"
5109 "\x2a\xed\x7e\xb1\x1d\xd6\x4c\x6b"
5110 "\x41\x01\x18\x5d\x5d\x07\x97\xa6"
5111 "\x4b\xef\x31\x18\xea\xac\xb1\x84"
5112 "\x21\xed\xda\x86",
5113 .rlen = 4100,
5114 },
5115};
5116
5117static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5118 { /* From RFC 3686 */
5119 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5120 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5121 "\x00\x00\x00\x30",
5122 .klen = 20,
5123 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
5124 .input = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
5125 "\x2d\x61\x75\xa3\x26\x13\x11\xb8",
5126 .ilen = 16,
5127 .result = "Single block msg",
5128 .rlen = 16,
5129 }, {
5130 .key = "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
5131 "\x43\xd6\xce\x1f\x32\x53\x91\x63"
5132 "\x00\x6c\xb6\xdb",
5133 .klen = 20,
5134 .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
5135 .input = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
5136 "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
5137 "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
5138 "\xfc\xe6\x30\xdf\x91\x41\xbe\x28",
5139 .ilen = 32,
5140 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
5141 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5142 "\x10\x11\x12\x13\x14\x15\x16\x17"
5143 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
5144 .rlen = 32,
5145 }, {
5146 .key = "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
5147 "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
5148 "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
5149 "\x00\x00\x00\x48",
5150 .klen = 28,
5151 .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
5152 .input = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
5153 "\x4e\x79\x35\xa0\x03\xcb\xe9\x28",
5154 .ilen = 16,
5155 .result = "Single block msg",
5156 .rlen = 16,
5157 }, {
5158 .key = "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
5159 "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
5160 "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
5161 "\x00\x96\xb0\x3b",
5162 .klen = 28,
5163 .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
5164 .input = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
5165 "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
5166 "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
5167 "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00",
5168 .ilen = 32,
5169 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
5170 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5171 "\x10\x11\x12\x13\x14\x15\x16\x17"
5172 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
5173 .rlen = 32,
5174 }, {
5175 .key = "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
5176 "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
5177 "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
5178 "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
5179 "\x00\x00\x00\x60",
5180 .klen = 36,
5181 .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
5182 .input = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
5183 "\x56\x08\x63\xdc\x71\xe3\xe0\xc0",
5184 .ilen = 16,
5185 .result = "Single block msg",
5186 .rlen = 16,
5187 }, {
5188 .key = "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
5189 "\x07\x96\x36\x58\x79\xef\xf8\x86"
5190 "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
5191 "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
5192 "\x00\xfa\xac\x24",
5193 .klen = 36,
5194 .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
5195 .input = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
5196 "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
5197 "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
5198 "\x55\x30\x83\x1d\x93\x44\xaf\x1c",
5199 .ilen = 32,
5200 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
5201 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5202 "\x10\x11\x12\x13\x14\x15\x16\x17"
5203 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
5204 .rlen = 32,
5205 },
5206};
5207
5208static struct aead_testvec aes_gcm_enc_tv_template[] = {
5209 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5210 .key = zeroed_string,
5211 .klen = 16,
5212 .result = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
5213 "\x36\x7f\x1d\x57\xa4\xe7\x45\x5a",
5214 .rlen = 16,
5215 }, {
5216 .key = zeroed_string,
5217 .klen = 16,
5218 .input = zeroed_string,
5219 .ilen = 16,
5220 .result = "\x03\x88\xda\xce\x60\xb6\xa3\x92"
5221 "\xf3\x28\xc2\xb9\x71\xb2\xfe\x78"
5222 "\xab\x6e\x47\xd4\x2c\xec\x13\xbd"
5223 "\xf5\x3a\x67\xb2\x12\x57\xbd\xdf",
5224 .rlen = 32,
5225 }, {
5226 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5227 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5228 .klen = 16,
5229 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5230 "\xde\xca\xf8\x88",
5231 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5232 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5233 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5234 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5235 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5236 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5237 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5238 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5239 .ilen = 64,
5240 .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5241 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5242 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5243 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5244 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5245 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5246 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5247 "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
5248 "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
5249 "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4",
5250 .rlen = 80,
5251 }, {
5252 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5253 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5254 .klen = 16,
5255 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5256 "\xde\xca\xf8\x88",
5257 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5258 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5259 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5260 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5261 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5262 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5263 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5264 "\xba\x63\x7b\x39",
5265 .ilen = 60,
5266 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5267 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5268 "\xab\xad\xda\xd2",
5269 .alen = 20,
5270 .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5271 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5272 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5273 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5274 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5275 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5276 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5277 "\x3d\x58\xe0\x91"
5278 "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
5279 "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47",
5280 .rlen = 76,
5281 }, {
5282 .key = zeroed_string,
5283 .klen = 24,
5284 .result = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
5285 "\xa0\x0e\xd1\xf3\x12\x57\x24\x35",
5286 .rlen = 16,
5287 }, {
5288 .key = zeroed_string,
5289 .klen = 24,
5290 .input = zeroed_string,
5291 .ilen = 16,
5292 .result = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
5293 "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
5294 "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
5295 "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb",
5296 .rlen = 32,
5297 }, {
5298 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5299 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5300 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5301 .klen = 24,
5302 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5303 "\xde\xca\xf8\x88",
5304 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5305 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5306 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5307 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5308 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5309 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5310 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5311 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5312 .ilen = 64,
5313 .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5314 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5315 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5316 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5317 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5318 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5319 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5320 "\xcc\xda\x27\x10\xac\xad\xe2\x56"
5321 "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
5322 "\xb1\x18\x02\x4d\xb8\x67\x4a\x14",
5323 .rlen = 80,
5324 }, {
5325 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5326 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5327 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5328 .klen = 24,
5329 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5330 "\xde\xca\xf8\x88",
5331 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5332 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5333 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5334 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5335 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5336 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5337 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5338 "\xba\x63\x7b\x39",
5339 .ilen = 60,
5340 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5341 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5342 "\xab\xad\xda\xd2",
5343 .alen = 20,
5344 .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5345 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5346 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5347 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5348 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5349 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5350 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5351 "\xcc\xda\x27\x10"
5352 "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
5353 "\x37\xba\x55\xbd\x6d\x27\x61\x8c",
5354 .rlen = 76,
5355 .np = 2,
5356 .tap = { 32, 28 },
5357 .anp = 2,
5358 .atap = { 8, 12 }
5359 }, {
5360 .key = zeroed_string,
5361 .klen = 32,
5362 .result = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
5363 "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b",
5364 .rlen = 16,
5365 }
5366};
5367
5368static struct aead_testvec aes_gcm_dec_tv_template[] = {
5369 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5370 .key = zeroed_string,
5371 .klen = 32,
5372 .input = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
5373 "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18"
5374 "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0"
5375 "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19",
5376 .ilen = 32,
5377 .result = zeroed_string,
5378 .rlen = 16,
5379 }, {
5380 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5381 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5382 "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5383 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5384 .klen = 32,
5385 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5386 "\xde\xca\xf8\x88",
5387 .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
5388 "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
5389 "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
5390 "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
5391 "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
5392 "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
5393 "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
5394 "\xbc\xc9\xf6\x62\x89\x80\x15\xad"
5395 "\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
5396 "\xec\x1a\x50\x22\x70\xe3\xcc\x6c",
5397 .ilen = 80,
5398 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5399 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5400 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5401 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5402 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5403 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5404 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5405 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5406 .rlen = 64,
5407 }, {
5408 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5409 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5410 "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5411 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5412 .klen = 32,
5413 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5414 "\xde\xca\xf8\x88",
5415 .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
5416 "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
5417 "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
5418 "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
5419 "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
5420 "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
5421 "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
5422 "\xbc\xc9\xf6\x62"
5423 "\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
5424 "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b",
5425 .ilen = 76,
5426 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5427 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5428 "\xab\xad\xda\xd2",
5429 .alen = 20,
5430 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5431 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5432 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5433 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5434 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5435 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5436 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5437 "\xba\x63\x7b\x39",
5438 .rlen = 60,
5439 .np = 2,
5440 .tap = { 48, 28 },
5441 .anp = 3,
5442 .atap = { 8, 8, 4 }
5443 }, {
5444 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5445 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5446 .klen = 16,
5447 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5448 "\xde\xca\xf8\x88",
5449 .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5450 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5451 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5452 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5453 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5454 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5455 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5456 "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
5457 "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
5458 "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4",
5459 .ilen = 80,
5460 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5461 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5462 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5463 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5464 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5465 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5466 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5467 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5468 .rlen = 64,
5469 }, {
5470 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5471 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5472 .klen = 16,
5473 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5474 "\xde\xca\xf8\x88",
5475 .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5476 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5477 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5478 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5479 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5480 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5481 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5482 "\x3d\x58\xe0\x91"
5483 "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
5484 "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47",
5485 .ilen = 76,
5486 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5487 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5488 "\xab\xad\xda\xd2",
5489 .alen = 20,
5490 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5491 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5492 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5493 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5494 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5495 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5496 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5497 "\xba\x63\x7b\x39",
5498 .rlen = 60,
5499 }, {
5500 .key = zeroed_string,
5501 .klen = 24,
5502 .input = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
5503 "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
5504 "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
5505 "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb",
5506 .ilen = 32,
5507 .result = zeroed_string,
5508 .rlen = 16,
5509 }, {
5510 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5511 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5512 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5513 .klen = 24,
5514 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5515 "\xde\xca\xf8\x88",
5516 .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5517 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5518 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5519 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5520 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5521 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5522 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5523 "\xcc\xda\x27\x10\xac\xad\xe2\x56"
5524 "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
5525 "\xb1\x18\x02\x4d\xb8\x67\x4a\x14",
5526 .ilen = 80,
5527 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5528 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5529 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5530 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5531 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5532 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5533 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5534 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5535 .rlen = 64,
5536 }, {
5537 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5538 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5539 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5540 .klen = 24,
5541 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5542 "\xde\xca\xf8\x88",
5543 .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5544 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5545 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5546 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5547 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5548 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5549 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5550 "\xcc\xda\x27\x10"
5551 "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
5552 "\x37\xba\x55\xbd\x6d\x27\x61\x8c",
5553 .ilen = 76,
5554 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5555 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5556 "\xab\xad\xda\xd2",
5557 .alen = 20,
5558 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5559 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5560 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5561 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5562 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5563 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5564 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5565 "\xba\x63\x7b\x39",
5566 .rlen = 60,
5567 }
5568};
5569
5570static struct aead_testvec aes_ccm_enc_tv_template[] = {
5571 { /* From RFC 3610 */
5572 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5573 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5574 .klen = 16,
5575 .iv = "\x01\x00\x00\x00\x03\x02\x01\x00"
5576 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5577 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5578 .alen = 8,
5579 .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5580 "\x10\x11\x12\x13\x14\x15\x16\x17"
5581 "\x18\x19\x1a\x1b\x1c\x1d\x1e",
5582 .ilen = 23,
5583 .result = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
5584 "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
5585 "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
5586 "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
5587 .rlen = 31,
5588 }, {
5589 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5590 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5591 .klen = 16,
5592 .iv = "\x01\x00\x00\x00\x07\x06\x05\x04"
5593 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5594 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5595 "\x08\x09\x0a\x0b",
5596 .alen = 12,
5597 .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5598 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5599 "\x1c\x1d\x1e\x1f",
5600 .ilen = 20,
5601 .result = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
5602 "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
5603 "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
5604 "\x7d\x9c\x2d\x93",
5605 .rlen = 28,
5606 }, {
5607 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5608 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5609 .klen = 16,
5610 .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08"
5611 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5612 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5613 .alen = 8,
5614 .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5615 "\x10\x11\x12\x13\x14\x15\x16\x17"
5616 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
5617 "\x20",
5618 .ilen = 25,
5619 .result = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
5620 "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
5621 "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
5622 "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
5623 "\x7e\x5f\x4e",
5624 .rlen = 35,
5625 }, {
5626 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5627 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5628 .klen = 16,
5629 .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
5630 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5631 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5632 "\x08\x09\x0a\x0b",
5633 .alen = 12,
5634 .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5635 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5636 "\x1c\x1d\x1e",
5637 .ilen = 19,
5638 .result = "\x07\x34\x25\x94\x15\x77\x85\x15"
5639 "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
5640 "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
5641 "\x4d\x99\x99\x88\xdd",
5642 .rlen = 29,
5643 }, {
5644 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5645 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5646 .klen = 16,
5647 .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
5648 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5649 .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
5650 .alen = 8,
5651 .input = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
5652 "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
5653 "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
5654 .ilen = 24,
5655 .result = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
5656 "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
5657 "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
5658 "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
5659 .rlen = 32,
5660 }, {
5661 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5662 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5663 .klen = 16,
5664 .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
5665 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5666 .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
5667 "\x20\xea\x60\xc0",
5668 .alen = 12,
5669 .input = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
5670 "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
5671 "\x3a\x80\x3b\xa8\x7f",
5672 .ilen = 21,
5673 .result = "\x00\x97\x69\xec\xab\xdf\x48\x62"
5674 "\x55\x94\xc5\x92\x51\xe6\x03\x57"
5675 "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
5676 "\x5a\xe0\x70\x45\x51",
5677 .rlen = 29,
5678 }, {
5679 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5680 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5681 .klen = 16,
5682 .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
5683 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5684 .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
5685 .alen = 8,
5686 .input = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
5687 "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
5688 "\x98\x09\xd6\x7d\xbe\xdd\x18",
5689 .ilen = 23,
5690 .result = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
5691 "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
5692 "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
5693 "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
5694 "\xba",
5695 .rlen = 33,
5696 },
5697};
5698
5699static struct aead_testvec aes_ccm_dec_tv_template[] = {
5700 { /* From RFC 3610 */
5701 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5702 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5703 .klen = 16,
5704 .iv = "\x01\x00\x00\x00\x03\x02\x01\x00"
5705 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5706 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5707 .alen = 8,
5708 .input = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
5709 "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
5710 "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
5711 "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
5712 .ilen = 31,
5713 .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5714 "\x10\x11\x12\x13\x14\x15\x16\x17"
5715 "\x18\x19\x1a\x1b\x1c\x1d\x1e",
5716 .rlen = 23,
5717 }, {
5718 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5719 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5720 .klen = 16,
5721 .iv = "\x01\x00\x00\x00\x07\x06\x05\x04"
5722 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5723 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5724 "\x08\x09\x0a\x0b",
5725 .alen = 12,
5726 .input = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
5727 "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
5728 "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
5729 "\x7d\x9c\x2d\x93",
5730 .ilen = 28,
5731 .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5732 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5733 "\x1c\x1d\x1e\x1f",
5734 .rlen = 20,
5735 }, {
5736 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5737 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5738 .klen = 16,
5739 .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08"
5740 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5741 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5742 .alen = 8,
5743 .input = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
5744 "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
5745 "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
5746 "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
5747 "\x7e\x5f\x4e",
5748 .ilen = 35,
5749 .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5750 "\x10\x11\x12\x13\x14\x15\x16\x17"
5751 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
5752 "\x20",
5753 .rlen = 25,
5754 }, {
5755 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5756 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5757 .klen = 16,
5758 .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
5759 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5760 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5761 "\x08\x09\x0a\x0b",
5762 .alen = 12,
5763 .input = "\x07\x34\x25\x94\x15\x77\x85\x15"
5764 "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
5765 "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
5766 "\x4d\x99\x99\x88\xdd",
5767 .ilen = 29,
5768 .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5769 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5770 "\x1c\x1d\x1e",
5771 .rlen = 19,
5772 }, {
5773 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5774 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5775 .klen = 16,
5776 .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
5777 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5778 .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
5779 .alen = 8,
5780 .input = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
5781 "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
5782 "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
5783 "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
5784 .ilen = 32,
5785 .result = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
5786 "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
5787 "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
5788 .rlen = 24,
5789 }, {
5790 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5791 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5792 .klen = 16,
5793 .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
5794 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5795 .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
5796 "\x20\xea\x60\xc0",
5797 .alen = 12,
5798 .input = "\x00\x97\x69\xec\xab\xdf\x48\x62"
5799 "\x55\x94\xc5\x92\x51\xe6\x03\x57"
5800 "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
5801 "\x5a\xe0\x70\x45\x51",
5802 .ilen = 29,
5803 .result = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
5804 "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
5805 "\x3a\x80\x3b\xa8\x7f",
5806 .rlen = 21,
5807 }, {
5808 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5809 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5810 .klen = 16,
5811 .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
5812 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5813 .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
5814 .alen = 8,
5815 .input = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
5816 "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
5817 "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
5818 "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
5819 "\xba",
5820 .ilen = 33,
5821 .result = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
5822 "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
5823 "\x98\x09\xd6\x7d\xbe\xdd\x18",
5824 .rlen = 23,
5825 },
5826};
5827
5828/* Cast5 test vectors from RFC 2144 */
5829#define CAST5_ENC_TEST_VECTORS 3
5830#define CAST5_DEC_TEST_VECTORS 3
5831
5832static struct cipher_testvec cast5_enc_tv_template[] = {
5833 {
5834 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5835 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5836 .klen = 16,
5837 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5838 .ilen = 8,
5839 .result = "\x23\x8b\x4f\xe5\x84\x7e\x44\xb2",
5840 .rlen = 8,
5841 }, {
5842 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5843 "\x23\x45",
5844 .klen = 10,
5845 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5846 .ilen = 8,
5847 .result = "\xeb\x6a\x71\x1a\x2c\x02\x27\x1b",
5848 .rlen = 8,
5849 }, {
5850 .key = "\x01\x23\x45\x67\x12",
5851 .klen = 5,
5852 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5853 .ilen = 8,
5854 .result = "\x7a\xc8\x16\xd1\x6e\x9b\x30\x2e",
5855 .rlen = 8,
5856 },
5857};
5858
5859static struct cipher_testvec cast5_dec_tv_template[] = {
5860 {
5861 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5862 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5863 .klen = 16,
5864 .input = "\x23\x8b\x4f\xe5\x84\x7e\x44\xb2",
5865 .ilen = 8,
5866 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5867 .rlen = 8,
5868 }, {
5869 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5870 "\x23\x45",
5871 .klen = 10,
5872 .input = "\xeb\x6a\x71\x1a\x2c\x02\x27\x1b",
5873 .ilen = 8,
5874 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5875 .rlen = 8,
5876 }, {
5877 .key = "\x01\x23\x45\x67\x12",
5878 .klen = 5,
5879 .input = "\x7a\xc8\x16\xd1\x6e\x9b\x30\x2e",
5880 .ilen = 8,
5881 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5882 .rlen = 8,
5883 },
5884};
5885
5886/*
5887 * ARC4 test vectors from OpenSSL
5888 */
5889#define ARC4_ENC_TEST_VECTORS 7
5890#define ARC4_DEC_TEST_VECTORS 7
5891
5892static struct cipher_testvec arc4_enc_tv_template[] = {
5893 {
5894 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5895 .klen = 8,
5896 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5897 .ilen = 8,
5898 .result = "\x75\xb7\x87\x80\x99\xe0\xc5\x96",
5899 .rlen = 8,
5900 }, {
5901 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5902 .klen = 8,
5903 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
5904 .ilen = 8,
5905 .result = "\x74\x94\xc2\xe7\x10\x4b\x08\x79",
5906 .rlen = 8,
5907 }, {
5908 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
5909 .klen = 8,
5910 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
5911 .ilen = 8,
5912 .result = "\xde\x18\x89\x41\xa3\x37\x5d\x3a",
5913 .rlen = 8,
5914 }, {
5915 .key = "\xef\x01\x23\x45",
5916 .klen = 4,
5917 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5918 "\x00\x00\x00\x00\x00\x00\x00\x00"
5919 "\x00\x00\x00\x00",
5920 .ilen = 20,
5921 .result = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
5922 "\xbd\x61\x5a\x11\x62\xe1\xc7\xba"
5923 "\x36\xb6\x78\x58",
5924 .rlen = 20,
5925 }, {
5926 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5927 .klen = 8,
5928 .input = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5929 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5930 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5931 "\x12\x34\x56\x78",
5932 .ilen = 28,
5933 .result = "\x66\xa0\x94\x9f\x8a\xf7\xd6\x89"
5934 "\x1f\x7f\x83\x2b\xa8\x33\xc0\x0c"
5935 "\x89\x2e\xbe\x30\x14\x3c\xe2\x87"
5936 "\x40\x01\x1e\xcf",
5937 .rlen = 28,
5938 }, {
5939 .key = "\xef\x01\x23\x45",
5940 .klen = 4,
5941 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5942 "\x00\x00",
5943 .ilen = 10,
5944 .result = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
5945 "\xbd\x61",
5946 .rlen = 10,
5947 }, {
5948 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
5949 "\x00\x00\x00\x00\x00\x00\x00\x00",
5950 .klen = 16,
5951 .input = "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
5952 .ilen = 8,
5953 .result = "\x69\x72\x36\x59\x1B\x52\x42\xB1",
5954 .rlen = 8,
5955 },
5956};
5957
5958static struct cipher_testvec arc4_dec_tv_template[] = {
5959 {
5960 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5961 .klen = 8,
5962 .input = "\x75\xb7\x87\x80\x99\xe0\xc5\x96",
5963 .ilen = 8,
5964 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5965 .rlen = 8,
5966 }, {
5967 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5968 .klen = 8,
5969 .input = "\x74\x94\xc2\xe7\x10\x4b\x08\x79",
5970 .ilen = 8,
5971 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
5972 .rlen = 8,
5973 }, {
5974 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
5975 .klen = 8,
5976 .input = "\xde\x18\x89\x41\xa3\x37\x5d\x3a",
5977 .ilen = 8,
5978 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
5979 .rlen = 8,
5980 }, {
5981 .key = "\xef\x01\x23\x45",
5982 .klen = 4,
5983 .input = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
5984 "\xbd\x61\x5a\x11\x62\xe1\xc7\xba"
5985 "\x36\xb6\x78\x58",
5986 .ilen = 20,
5987 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
5988 "\x00\x00\x00\x00\x00\x00\x00\x00"
5989 "\x00\x00\x00\x00",
5990 .rlen = 20,
5991 }, {
5992 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5993 .klen = 8,
5994 .input = "\x66\xa0\x94\x9f\x8a\xf7\xd6\x89"
5995 "\x1f\x7f\x83\x2b\xa8\x33\xc0\x0c"
5996 "\x89\x2e\xbe\x30\x14\x3c\xe2\x87"
5997 "\x40\x01\x1e\xcf",
5998 .ilen = 28,
5999 .result = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
6000 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
6001 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
6002 "\x12\x34\x56\x78",
6003 .rlen = 28,
6004 }, {
6005 .key = "\xef\x01\x23\x45",
6006 .klen = 4,
6007 .input = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
6008 "\xbd\x61",
6009 .ilen = 10,
6010 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
6011 "\x00\x00",
6012 .rlen = 10,
6013 }, {
6014 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
6015 "\x00\x00\x00\x00\x00\x00\x00\x00",
6016 .klen = 16,
6017 .input = "\x69\x72\x36\x59\x1B\x52\x42\xB1",
6018 .ilen = 8,
6019 .result = "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
6020 .rlen = 8,
6021 },
6022};
6023
6024/*
6025 * TEA test vectors
6026 */
6027#define TEA_ENC_TEST_VECTORS 4
6028#define TEA_DEC_TEST_VECTORS 4
6029
6030static struct cipher_testvec tea_enc_tv_template[] = {
6031 {
6032 .key = zeroed_string,
6033 .klen = 16,
6034 .input = zeroed_string,
6035 .ilen = 8,
6036 .result = "\x0a\x3a\xea\x41\x40\xa9\xba\x94",
6037 .rlen = 8,
6038 }, {
6039 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6040 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6041 .klen = 16,
6042 .input = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6043 .ilen = 8,
6044 .result = "\x77\x5d\x2a\x6a\xf6\xce\x92\x09",
6045 .rlen = 8,
6046 }, {
6047 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6048 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6049 .klen = 16,
6050 .input = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6051 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6052 .ilen = 16,
6053 .result = "\xbe\x7a\xbb\x81\x95\x2d\x1f\x1e"
6054 "\xdd\x89\xa1\x25\x04\x21\xdf\x95",
6055 .rlen = 16,
6056 }, {
6057 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6058 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6059 .klen = 16,
6060 .input = "\x54\x65\x61\x20\x69\x73\x20\x67"
6061 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6062 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6063 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6064 .ilen = 32,
6065 .result = "\xe0\x4d\x5d\x3c\xb7\x8c\x36\x47"
6066 "\x94\x18\x95\x91\xa9\xfc\x49\xf8"
6067 "\x44\xd1\x2d\xc2\x99\xb8\x08\x2a"
6068 "\x07\x89\x73\xc2\x45\x92\xc6\x90",
6069 .rlen = 32,
6070 }
6071};
6072
6073static struct cipher_testvec tea_dec_tv_template[] = {
6074 {
6075 .key = zeroed_string,
6076 .klen = 16,
6077 .input = "\x0a\x3a\xea\x41\x40\xa9\xba\x94",
6078 .ilen = 8,
6079 .result = zeroed_string,
6080 .rlen = 8,
6081 }, {
6082 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6083 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6084 .klen = 16,
6085 .input = "\x77\x5d\x2a\x6a\xf6\xce\x92\x09",
6086 .ilen = 8,
6087 .result = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6088 .rlen = 8,
6089 }, {
6090 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6091 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6092 .klen = 16,
6093 .input = "\xbe\x7a\xbb\x81\x95\x2d\x1f\x1e"
6094 "\xdd\x89\xa1\x25\x04\x21\xdf\x95",
6095 .ilen = 16,
6096 .result = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6097 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6098 .rlen = 16,
6099 }, {
6100 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6101 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6102 .klen = 16,
6103 .input = "\xe0\x4d\x5d\x3c\xb7\x8c\x36\x47"
6104 "\x94\x18\x95\x91\xa9\xfc\x49\xf8"
6105 "\x44\xd1\x2d\xc2\x99\xb8\x08\x2a"
6106 "\x07\x89\x73\xc2\x45\x92\xc6\x90",
6107 .ilen = 32,
6108 .result = "\x54\x65\x61\x20\x69\x73\x20\x67"
6109 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6110 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6111 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6112 .rlen = 32,
6113 }
6114};
6115
6116/*
6117 * XTEA test vectors
6118 */
6119#define XTEA_ENC_TEST_VECTORS 4
6120#define XTEA_DEC_TEST_VECTORS 4
6121
6122static struct cipher_testvec xtea_enc_tv_template[] = {
6123 {
6124 .key = zeroed_string,
6125 .klen = 16,
6126 .input = zeroed_string,
6127 .ilen = 8,
6128 .result = "\xd8\xd4\xe9\xde\xd9\x1e\x13\xf7",
6129 .rlen = 8,
6130 }, {
6131 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6132 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6133 .klen = 16,
6134 .input = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6135 .ilen = 8,
6136 .result = "\x94\xeb\xc8\x96\x84\x6a\x49\xa8",
6137 .rlen = 8,
6138 }, {
6139 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6140 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6141 .klen = 16,
6142 .input = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6143 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6144 .ilen = 16,
6145 .result = "\x3e\xce\xae\x22\x60\x56\xa8\x9d"
6146 "\x77\x4d\xd4\xb4\x87\x24\xe3\x9a",
6147 .rlen = 16,
6148 }, {
6149 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6150 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6151 .klen = 16,
6152 .input = "\x54\x65\x61\x20\x69\x73\x20\x67"
6153 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6154 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6155 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6156 .ilen = 32,
6157 .result = "\x99\x81\x9f\x5d\x6f\x4b\x31\x3a"
6158 "\x86\xff\x6f\xd0\xe3\x87\x70\x07"
6159 "\x4d\xb8\xcf\xf3\x99\x50\xb3\xd4"
6160 "\x73\xa2\xfa\xc9\x16\x59\x5d\x81",
6161 .rlen = 32,
6162 }
6163};
6164
6165static struct cipher_testvec xtea_dec_tv_template[] = {
6166 {
6167 .key = zeroed_string,
6168 .klen = 16,
6169 .input = "\xd8\xd4\xe9\xde\xd9\x1e\x13\xf7",
6170 .ilen = 8,
6171 .result = zeroed_string,
6172 .rlen = 8,
6173 }, {
6174 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6175 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6176 .klen = 16,
6177 .input = "\x94\xeb\xc8\x96\x84\x6a\x49\xa8",
6178 .ilen = 8,
6179 .result = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6180 .rlen = 8,
6181 }, {
6182 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6183 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6184 .klen = 16,
6185 .input = "\x3e\xce\xae\x22\x60\x56\xa8\x9d"
6186 "\x77\x4d\xd4\xb4\x87\x24\xe3\x9a",
6187 .ilen = 16,
6188 .result = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6189 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6190 .rlen = 16,
6191 }, {
6192 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6193 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6194 .klen = 16,
6195 .input = "\x99\x81\x9f\x5d\x6f\x4b\x31\x3a"
6196 "\x86\xff\x6f\xd0\xe3\x87\x70\x07"
6197 "\x4d\xb8\xcf\xf3\x99\x50\xb3\xd4"
6198 "\x73\xa2\xfa\xc9\x16\x59\x5d\x81",
6199 .ilen = 32,
6200 .result = "\x54\x65\x61\x20\x69\x73\x20\x67"
6201 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6202 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6203 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6204 .rlen = 32,
6205 }
6206};
6207
6208/*
6209 * KHAZAD test vectors.
6210 */
6211#define KHAZAD_ENC_TEST_VECTORS 5
6212#define KHAZAD_DEC_TEST_VECTORS 5
6213
6214static struct cipher_testvec khazad_enc_tv_template[] = {
6215 {
6216 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
6217 "\x00\x00\x00\x00\x00\x00\x00\x00",
6218 .klen = 16,
6219 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
6220 .ilen = 8,
6221 .result = "\x49\xa4\xce\x32\xac\x19\x0e\x3f",
6222 .rlen = 8,
6223 }, {
6224 .key = "\x38\x38\x38\x38\x38\x38\x38\x38"
6225 "\x38\x38\x38\x38\x38\x38\x38\x38",
6226 .klen = 16,
6227 .input = "\x38\x38\x38\x38\x38\x38\x38\x38",
6228 .ilen = 8,
6229 .result = "\x7e\x82\x12\xa1\xd9\x5b\xe4\xf9",
6230 .rlen = 8,
6231 }, {
6232 .key = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2"
6233 "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6234 .klen = 16,
6235 .input = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6236 .ilen = 8,
6237 .result = "\xaa\xbe\xc1\x95\xc5\x94\x1a\x9c",
6238 .rlen = 8,
6239 }, {
6240 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6241 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6242 .klen = 16,
6243 .input = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6244 .ilen = 8,
6245 .result = "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6246 .rlen = 8,
6247 }, {
6248 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6249 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6250 .klen = 16,
6251 .input = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6252 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6253 .ilen = 16,
6254 .result = "\x04\x74\xf5\x70\x50\x16\xd3\xb8"
6255 "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6256 .rlen = 16,
6257 },
6258};
6259
6260static struct cipher_testvec khazad_dec_tv_template[] = {
6261 {
6262 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
6263 "\x00\x00\x00\x00\x00\x00\x00\x00",
6264 .klen = 16,
6265 .input = "\x49\xa4\xce\x32\xac\x19\x0e\x3f",
6266 .ilen = 8,
6267 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
6268 .rlen = 8,
6269 }, {
6270 .key = "\x38\x38\x38\x38\x38\x38\x38\x38"
6271 "\x38\x38\x38\x38\x38\x38\x38\x38",
6272 .klen = 16,
6273 .input = "\x7e\x82\x12\xa1\xd9\x5b\xe4\xf9",
6274 .ilen = 8,
6275 .result = "\x38\x38\x38\x38\x38\x38\x38\x38",
6276 .rlen = 8,
6277 }, {
6278 .key = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2"
6279 "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6280 .klen = 16,
6281 .input = "\xaa\xbe\xc1\x95\xc5\x94\x1a\x9c",
6282 .ilen = 8,
6283 .result = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6284 .rlen = 8,
6285 }, {
6286 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6287 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6288 .klen = 16,
6289 .input = "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6290 .ilen = 8,
6291 .result = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6292 .rlen = 8,
6293 }, {
6294 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6295 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6296 .klen = 16,
6297 .input = "\x04\x74\xf5\x70\x50\x16\xd3\xb8"
6298 "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6299 .ilen = 16,
6300 .result = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6301 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6302 .rlen = 16,
6303 },
6304};
6305
6306/*
6307 * Anubis test vectors.
6308 */
6309
6310#define ANUBIS_ENC_TEST_VECTORS 5
6311#define ANUBIS_DEC_TEST_VECTORS 5
6312#define ANUBIS_CBC_ENC_TEST_VECTORS 2
6313#define ANUBIS_CBC_DEC_TEST_VECTORS 2
6314
6315static struct cipher_testvec anubis_enc_tv_template[] = {
6316 {
6317 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6318 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6319 .klen = 16,
6320 .input = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6321 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6322 .ilen = 16,
6323 .result = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6324 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90",
6325 .rlen = 16,
6326 }, {
6327
6328 .key = "\x03\x03\x03\x03\x03\x03\x03\x03"
6329 "\x03\x03\x03\x03\x03\x03\x03\x03"
6330 "\x03\x03\x03\x03",
6331 .klen = 20,
6332 .input = "\x03\x03\x03\x03\x03\x03\x03\x03"
6333 "\x03\x03\x03\x03\x03\x03\x03\x03",
6334 .ilen = 16,
6335 .result = "\xdb\xf1\x42\xf4\xd1\x8a\xc7\x49"
6336 "\x87\x41\x6f\x82\x0a\x98\x64\xae",
6337 .rlen = 16,
6338 }, {
6339 .key = "\x24\x24\x24\x24\x24\x24\x24\x24"
6340 "\x24\x24\x24\x24\x24\x24\x24\x24"
6341 "\x24\x24\x24\x24\x24\x24\x24\x24"
6342 "\x24\x24\x24\x24",
6343 .klen = 28,
6344 .input = "\x24\x24\x24\x24\x24\x24\x24\x24"
6345 "\x24\x24\x24\x24\x24\x24\x24\x24",
6346 .ilen = 16,
6347 .result = "\xfd\x1b\x4a\xe3\xbf\xf0\xad\x3d"
6348 "\x06\xd3\x61\x27\xfd\x13\x9e\xde",
6349 .rlen = 16,
6350 }, {
6351 .key = "\x25\x25\x25\x25\x25\x25\x25\x25"
6352 "\x25\x25\x25\x25\x25\x25\x25\x25"
6353 "\x25\x25\x25\x25\x25\x25\x25\x25"
6354 "\x25\x25\x25\x25\x25\x25\x25\x25",
6355 .klen = 32,
6356 .input = "\x25\x25\x25\x25\x25\x25\x25\x25"
6357 "\x25\x25\x25\x25\x25\x25\x25\x25",
6358 .ilen = 16,
6359 .result = "\x1a\x91\xfb\x2b\xb7\x78\x6b\xc4"
6360 "\x17\xd9\xff\x40\x3b\x0e\xe5\xfe",
6361 .rlen = 16,
6362 }, {
6363 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6364 "\x35\x35\x35\x35\x35\x35\x35\x35"
6365 "\x35\x35\x35\x35\x35\x35\x35\x35"
6366 "\x35\x35\x35\x35\x35\x35\x35\x35"
6367 "\x35\x35\x35\x35\x35\x35\x35\x35",
6368 .klen = 40,
6369 .input = "\x35\x35\x35\x35\x35\x35\x35\x35"
6370 "\x35\x35\x35\x35\x35\x35\x35\x35",
6371 .ilen = 16,
6372 .result = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6373 "\x9e\xc6\x84\x0f\x17\x21\x07\xee",
6374 .rlen = 16,
6375 },
6376};
6377
6378static struct cipher_testvec anubis_dec_tv_template[] = {
6379 {
6380 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6381 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6382 .klen = 16,
6383 .input = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6384 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90",
6385 .ilen = 16,
6386 .result = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6387 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6388 .rlen = 16,
6389 }, {
6390
6391 .key = "\x03\x03\x03\x03\x03\x03\x03\x03"
6392 "\x03\x03\x03\x03\x03\x03\x03\x03"
6393 "\x03\x03\x03\x03",
6394 .klen = 20,
6395 .input = "\xdb\xf1\x42\xf4\xd1\x8a\xc7\x49"
6396 "\x87\x41\x6f\x82\x0a\x98\x64\xae",
6397 .ilen = 16,
6398 .result = "\x03\x03\x03\x03\x03\x03\x03\x03"
6399 "\x03\x03\x03\x03\x03\x03\x03\x03",
6400 .rlen = 16,
6401 }, {
6402 .key = "\x24\x24\x24\x24\x24\x24\x24\x24"
6403 "\x24\x24\x24\x24\x24\x24\x24\x24"
6404 "\x24\x24\x24\x24\x24\x24\x24\x24"
6405 "\x24\x24\x24\x24",
6406 .klen = 28,
6407 .input = "\xfd\x1b\x4a\xe3\xbf\xf0\xad\x3d"
6408 "\x06\xd3\x61\x27\xfd\x13\x9e\xde",
6409 .ilen = 16,
6410 .result = "\x24\x24\x24\x24\x24\x24\x24\x24"
6411 "\x24\x24\x24\x24\x24\x24\x24\x24",
6412 .rlen = 16,
6413 }, {
6414 .key = "\x25\x25\x25\x25\x25\x25\x25\x25"
6415 "\x25\x25\x25\x25\x25\x25\x25\x25"
6416 "\x25\x25\x25\x25\x25\x25\x25\x25"
6417 "\x25\x25\x25\x25\x25\x25\x25\x25",
6418 .klen = 32,
6419 .input = "\x1a\x91\xfb\x2b\xb7\x78\x6b\xc4"
6420 "\x17\xd9\xff\x40\x3b\x0e\xe5\xfe",
6421 .ilen = 16,
6422 .result = "\x25\x25\x25\x25\x25\x25\x25\x25"
6423 "\x25\x25\x25\x25\x25\x25\x25\x25",
6424 .rlen = 16,
6425 }, {
6426 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6427 "\x35\x35\x35\x35\x35\x35\x35\x35"
6428 "\x35\x35\x35\x35\x35\x35\x35\x35"
6429 "\x35\x35\x35\x35\x35\x35\x35\x35"
6430 "\x35\x35\x35\x35\x35\x35\x35\x35",
6431 .input = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6432 "\x9e\xc6\x84\x0f\x17\x21\x07\xee",
6433 .klen = 40,
6434 .ilen = 16,
6435 .result = "\x35\x35\x35\x35\x35\x35\x35\x35"
6436 "\x35\x35\x35\x35\x35\x35\x35\x35",
6437 .rlen = 16,
6438 },
6439};
6440
6441static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
6442 {
6443 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6444 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6445 .klen = 16,
6446 .input = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6447 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6448 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6449 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6450 .ilen = 32,
6451 .result = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6452 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90"
6453 "\x86\xd8\xb5\x6f\x98\x5e\x8a\x66"
6454 "\x4f\x1f\x78\xa1\xbb\x37\xf1\xbe",
6455 .rlen = 32,
6456 }, {
6457 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6458 "\x35\x35\x35\x35\x35\x35\x35\x35"
6459 "\x35\x35\x35\x35\x35\x35\x35\x35"
6460 "\x35\x35\x35\x35\x35\x35\x35\x35"
6461 "\x35\x35\x35\x35\x35\x35\x35\x35",
6462 .klen = 40,
6463 .input = "\x35\x35\x35\x35\x35\x35\x35\x35"
6464 "\x35\x35\x35\x35\x35\x35\x35\x35"
6465 "\x35\x35\x35\x35\x35\x35\x35\x35"
6466 "\x35\x35\x35\x35\x35\x35\x35\x35",
6467 .ilen = 32,
6468 .result = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6469 "\x9e\xc6\x84\x0f\x17\x21\x07\xee"
6470 "\xa2\xbc\x06\x98\xc6\x4b\xda\x75"
6471 "\x2e\xaa\xbe\x58\xce\x01\x5b\xc7",
6472 .rlen = 32,
6473 },
6474};
6475
6476static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
6477 {
6478 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6479 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6480 .klen = 16,
6481 .input = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6482 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90"
6483 "\x86\xd8\xb5\x6f\x98\x5e\x8a\x66"
6484 "\x4f\x1f\x78\xa1\xbb\x37\xf1\xbe",
6485 .ilen = 32,
6486 .result = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6487 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6488 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6489 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6490 .rlen = 32,
6491 }, {
6492 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6493 "\x35\x35\x35\x35\x35\x35\x35\x35"
6494 "\x35\x35\x35\x35\x35\x35\x35\x35"
6495 "\x35\x35\x35\x35\x35\x35\x35\x35"
6496 "\x35\x35\x35\x35\x35\x35\x35\x35",
6497 .klen = 40,
6498 .input = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6499 "\x9e\xc6\x84\x0f\x17\x21\x07\xee"
6500 "\xa2\xbc\x06\x98\xc6\x4b\xda\x75"
6501 "\x2e\xaa\xbe\x58\xce\x01\x5b\xc7",
6502 .ilen = 32,
6503 .result = "\x35\x35\x35\x35\x35\x35\x35\x35"
6504 "\x35\x35\x35\x35\x35\x35\x35\x35"
6505 "\x35\x35\x35\x35\x35\x35\x35\x35"
6506 "\x35\x35\x35\x35\x35\x35\x35\x35",
6507 .rlen = 32,
6508 },
6509};
6510
6511/*
6512 * XETA test vectors
6513 */
6514#define XETA_ENC_TEST_VECTORS 4
6515#define XETA_DEC_TEST_VECTORS 4
6516
6517static struct cipher_testvec xeta_enc_tv_template[] = {
6518 {
6519 .key = zeroed_string,
6520 .klen = 16,
6521 .input = zeroed_string,
6522 .ilen = 8,
6523 .result = "\xaa\x22\x96\xe5\x6c\x61\xf3\x45",
6524 .rlen = 8,
6525 }, {
6526 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6527 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6528 .klen = 16,
6529 .input = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6530 .ilen = 8,
6531 .result = "\x82\x3e\xeb\x35\xdc\xdd\xd9\xc3",
6532 .rlen = 8,
6533 }, {
6534 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6535 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6536 .klen = 16,
6537 .input = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6538 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6539 .ilen = 16,
6540 .result = "\xe2\x04\xdb\xf2\x89\x85\x9e\xea"
6541 "\x61\x35\xaa\xed\xb5\xcb\x71\x2c",
6542 .rlen = 16,
6543 }, {
6544 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6545 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6546 .klen = 16,
6547 .input = "\x54\x65\x61\x20\x69\x73\x20\x67"
6548 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6549 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6550 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6551 .ilen = 32,
6552 .result = "\x0b\x03\xcd\x8a\xbe\x95\xfd\xb1"
6553 "\xc1\x44\x91\x0b\xa5\xc9\x1b\xb4"
6554 "\xa9\xda\x1e\x9e\xb1\x3e\x2a\x8f"
6555 "\xea\xa5\x6a\x85\xd1\xf4\xa8\xa5",
6556 .rlen = 32,
6557 }
6558};
6559
6560static struct cipher_testvec xeta_dec_tv_template[] = {
6561 {
6562 .key = zeroed_string,
6563 .klen = 16,
6564 .input = "\xaa\x22\x96\xe5\x6c\x61\xf3\x45",
6565 .ilen = 8,
6566 .result = zeroed_string,
6567 .rlen = 8,
6568 }, {
6569 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6570 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6571 .klen = 16,
6572 .input = "\x82\x3e\xeb\x35\xdc\xdd\xd9\xc3",
6573 .ilen = 8,
6574 .result = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6575 .rlen = 8,
6576 }, {
6577 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6578 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6579 .klen = 16,
6580 .input = "\xe2\x04\xdb\xf2\x89\x85\x9e\xea"
6581 "\x61\x35\xaa\xed\xb5\xcb\x71\x2c",
6582 .ilen = 16,
6583 .result = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6584 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6585 .rlen = 16,
6586 }, {
6587 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6588 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6589 .klen = 16,
6590 .input = "\x0b\x03\xcd\x8a\xbe\x95\xfd\xb1"
6591 "\xc1\x44\x91\x0b\xa5\xc9\x1b\xb4"
6592 "\xa9\xda\x1e\x9e\xb1\x3e\x2a\x8f"
6593 "\xea\xa5\x6a\x85\xd1\xf4\xa8\xa5",
6594 .ilen = 32,
6595 .result = "\x54\x65\x61\x20\x69\x73\x20\x67"
6596 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6597 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6598 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6599 .rlen = 32,
6600 }
6601};
6602
6603/*
6604 * FCrypt test vectors
6605 */
6606#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6607#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6608
6609static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6610 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6611 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6612 .klen = 8,
6613 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6614 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
6615 .ilen = 8,
6616 .result = "\x0E\x09\x00\xC7\x3E\xF7\xED\x41",
6617 .rlen = 8,
6618 }, {
6619 .key = "\x11\x44\x77\xAA\xDD\x00\x33\x66",
6620 .klen = 8,
6621 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6622 .input = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0",
6623 .ilen = 8,
6624 .result = "\xD8\xED\x78\x74\x77\xEC\x06\x80",
6625 .rlen = 8,
6626 }, { /* From Arla */
6627 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6628 .klen = 8,
6629 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6630 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
6631 .ilen = 48,
6632 .result = "\x00\xf0\x0e\x11\x75\xe6\x23\x82"
6633 "\xee\xac\x98\x62\x44\x51\xe4\x84"
6634 "\xc3\x59\xd8\xaa\x64\x60\xae\xf7"
6635 "\xd2\xd9\x13\x79\x72\xa3\x45\x03"
6636 "\x23\xb5\x62\xd7\x0c\xf5\x27\xd1"
6637 "\xf8\x91\x3c\xac\x44\x22\x92\xef",
6638 .rlen = 48,
6639 }, {
6640 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6641 .klen = 8,
6642 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6643 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
6644 .ilen = 48,
6645 .result = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6646 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6647 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6648 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6649 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6650 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6651 .rlen = 48,
6652 }, { /* split-page version */
6653 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6654 .klen = 8,
6655 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6656 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
6657 .ilen = 48,
6658 .result = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6659 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6660 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6661 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6662 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6663 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6664 .rlen = 48,
6665 .np = 2,
6666 .tap = { 20, 28 },
6667 }
6668};
6669
6670static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6671 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6672 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6673 .klen = 8,
6674 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6675 .input = "\x0E\x09\x00\xC7\x3E\xF7\xED\x41",
6676 .ilen = 8,
6677 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
6678 .rlen = 8,
6679 }, {
6680 .key = "\x11\x44\x77\xAA\xDD\x00\x33\x66",
6681 .klen = 8,
6682 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6683 .input = "\xD8\xED\x78\x74\x77\xEC\x06\x80",
6684 .ilen = 8,
6685 .result = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0",
6686 .rlen = 8,
6687 }, { /* From Arla */
6688 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6689 .klen = 8,
6690 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6691 .input = "\x00\xf0\x0e\x11\x75\xe6\x23\x82"
6692 "\xee\xac\x98\x62\x44\x51\xe4\x84"
6693 "\xc3\x59\xd8\xaa\x64\x60\xae\xf7"
6694 "\xd2\xd9\x13\x79\x72\xa3\x45\x03"
6695 "\x23\xb5\x62\xd7\x0c\xf5\x27\xd1"
6696 "\xf8\x91\x3c\xac\x44\x22\x92\xef",
6697 .ilen = 48,
6698 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
6699 .rlen = 48,
6700 }, {
6701 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6702 .klen = 8,
6703 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6704 .input = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6705 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6706 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6707 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6708 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6709 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6710 .ilen = 48,
6711 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
6712 .rlen = 48,
6713 }, { /* split-page version */
6714 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6715 .klen = 8,
6716 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6717 .input = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6718 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6719 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6720 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6721 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6722 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6723 .ilen = 48,
6724 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
6725 .rlen = 48,
6726 .np = 2,
6727 .tap = { 20, 28 },
6728 }
6729};
6730
6731/*
6732 * CAMELLIA test vectors.
6733 */
6734#define CAMELLIA_ENC_TEST_VECTORS 3
6735#define CAMELLIA_DEC_TEST_VECTORS 3
6736#define CAMELLIA_CBC_ENC_TEST_VECTORS 2
6737#define CAMELLIA_CBC_DEC_TEST_VECTORS 2
6738
6739static struct cipher_testvec camellia_enc_tv_template[] = {
6740 {
6741 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6742 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6743 .klen = 16,
6744 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6745 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6746 .ilen = 16,
6747 .result = "\x67\x67\x31\x38\x54\x96\x69\x73"
6748 "\x08\x57\x06\x56\x48\xea\xbe\x43",
6749 .rlen = 16,
6750 }, {
6751 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6752 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6753 "\x00\x11\x22\x33\x44\x55\x66\x77",
6754 .klen = 24,
6755 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6756 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6757 .ilen = 16,
6758 .result = "\xb4\x99\x34\x01\xb3\xe9\x96\xf8"
6759 "\x4e\xe5\xce\xe7\xd7\x9b\x09\xb9",
6760 .rlen = 16,
6761 }, {
6762 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6763 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6764 "\x00\x11\x22\x33\x44\x55\x66\x77"
6765 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
6766 .klen = 32,
6767 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6768 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6769 .ilen = 16,
6770 .result = "\x9a\xcc\x23\x7d\xff\x16\xd7\x6c"
6771 "\x20\xef\x7c\x91\x9e\x3a\x75\x09",
6772 .rlen = 16,
6773 },
6774};
6775
6776static struct cipher_testvec camellia_dec_tv_template[] = {
6777 {
6778 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6779 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6780 .klen = 16,
6781 .input = "\x67\x67\x31\x38\x54\x96\x69\x73"
6782 "\x08\x57\x06\x56\x48\xea\xbe\x43",
6783 .ilen = 16,
6784 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6785 "\xfe\xdc\xba\x98\x76\x54\x32\x10", 39 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6786 .rlen = 16,
6787 }, {
6788 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6789 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6790 "\x00\x11\x22\x33\x44\x55\x66\x77",
6791 .klen = 24, 40 .klen = 24,
6792 .input = "\xb4\x99\x34\x01\xb3\xe9\x96\xf8"
6793 "\x4e\xe5\xce\xe7\xd7\x9b\x09\xb9",
6794 .ilen = 16,
6795 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6796 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6797 .rlen = 16,
6798 }, {
6799 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6800 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6801 "\x00\x11\x22\x33\x44\x55\x66\x77"
6802 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
6803 .klen = 32,
6804 .input = "\x9a\xcc\x23\x7d\xff\x16\xd7\x6c"
6805 "\x20\xef\x7c\x91\x9e\x3a\x75\x09",
6806 .ilen = 16,
6807 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6808 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6809 .rlen = 16,
6810 },
6811};
6812
6813static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6814 {
6815 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6816 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6817 .klen = 16,
6818 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
6819 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
6820 .input = "Single block msg",
6821 .ilen = 16,
6822 .result = "\xea\x32\x12\x76\x3b\x50\x10\xe7"
6823 "\x18\xf6\xfd\x5d\xf6\x8f\x13\x51",
6824 .rlen = 16,
6825 }, {
6826 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
6827 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
6828 .klen = 16,
6829 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
6830 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
6831 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
6832 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
6833 "\x10\x11\x12\x13\x14\x15\x16\x17"
6834 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
6835 .ilen = 32,
6836 .result = "\xa5\xdf\x6e\x50\xda\x70\x6c\x01"
6837 "\x4a\xab\xf3\xf2\xd6\xfc\x6c\xfd"
6838 "\x19\xb4\x3e\x57\x1c\x02\x5e\xa0"
6839 "\x15\x78\xe0\x5e\xf2\xcb\x87\x16",
6840 .rlen = 32,
6841 },
6842};
6843
6844static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6845 {
6846 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6847 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6848 .klen = 16,
6849 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
6850 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
6851 .input = "\xea\x32\x12\x76\x3b\x50\x10\xe7"
6852 "\x18\xf6\xfd\x5d\xf6\x8f\x13\x51",
6853 .ilen = 16,
6854 .result = "Single block msg",
6855 .rlen = 16,
6856 }, {
6857 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
6858 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
6859 .klen = 16,
6860 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
6861 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
6862 .input = "\xa5\xdf\x6e\x50\xda\x70\x6c\x01"
6863 "\x4a\xab\xf3\xf2\xd6\xfc\x6c\xfd"
6864 "\x19\xb4\x3e\x57\x1c\x02\x5e\xa0"
6865 "\x15\x78\xe0\x5e\xf2\xcb\x87\x16",
6866 .ilen = 32,
6867 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
6868 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
6869 "\x10\x11\x12\x13\x14\x15\x16\x17"
6870 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
6871 .rlen = 32,
6872 },
6873};
6874
6875/*
6876 * SEED test vectors
6877 */
6878#define SEED_ENC_TEST_VECTORS 4
6879#define SEED_DEC_TEST_VECTORS 4
6880
6881static struct cipher_testvec seed_enc_tv_template[] = {
6882 {
6883 .key = zeroed_string,
6884 .klen = 16,
6885 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
6886 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6887 .ilen = 16,
6888 .result = "\x5e\xba\xc6\xe0\x05\x4e\x16\x68"
6889 "\x19\xaf\xf1\xcc\x6d\x34\x6c\xdb",
6890 .rlen = 16,
6891 }, {
6892 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
6893 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6894 .klen = 16,
6895 .input = zeroed_string,
6896 .ilen = 16,
6897 .result = "\xc1\x1f\x22\xf2\x01\x40\x50\x50"
6898 "\x84\x48\x35\x97\xe4\x37\x0f\x43",
6899 .rlen = 16,
6900 }, {
6901 .key = "\x47\x06\x48\x08\x51\xe6\x1b\xe8"
6902 "\x5d\x74\xbf\xb3\xfd\x95\x61\x85",
6903 .klen = 16,
6904 .input = "\x83\xa2\xf8\xa2\x88\x64\x1f\xb9"
6905 "\xa4\xe9\xa5\xcc\x2f\x13\x1c\x7d",
6906 .ilen = 16,
6907 .result = "\xee\x54\xd1\x3e\xbc\xae\x70\x6d"
6908 "\x22\x6b\xc3\x14\x2c\xd4\x0d\x4a",
6909 .rlen = 16,
6910 }, {
6911 .key = "\x28\xdb\xc3\xbc\x49\xff\xd8\x7d"
6912 "\xcf\xa5\x09\xb1\x1d\x42\x2b\xe7",
6913 .klen = 16,
6914 .input = "\xb4\x1e\x6b\xe2\xeb\xa8\x4a\x14"
6915 "\x8e\x2e\xed\x84\x59\x3c\x5e\xc7",
6916 .ilen = 16,
6917 .result = "\x9b\x9b\x7b\xfc\xd1\x81\x3c\xb9"
6918 "\x5d\x0b\x36\x18\xf4\x0f\x51\x22",
6919 .rlen = 16,
6920 } 41 }
6921}; 42};
6922 43
6923static struct cipher_testvec seed_dec_tv_template[] = {
6924 {
6925 .key = zeroed_string,
6926 .klen = 16,
6927 .input = "\x5e\xba\xc6\xe0\x05\x4e\x16\x68"
6928 "\x19\xaf\xf1\xcc\x6d\x34\x6c\xdb",
6929 .ilen = 16,
6930 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
6931 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6932 .rlen = 16,
6933 }, {
6934 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
6935 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6936 .klen = 16,
6937 .input = "\xc1\x1f\x22\xf2\x01\x40\x50\x50"
6938 "\x84\x48\x35\x97\xe4\x37\x0f\x43",
6939 .ilen = 16,
6940 .result = zeroed_string,
6941 .rlen = 16,
6942 }, {
6943 .key = "\x47\x06\x48\x08\x51\xe6\x1b\xe8"
6944 "\x5d\x74\xbf\xb3\xfd\x95\x61\x85",
6945 .klen = 16,
6946 .input = "\xee\x54\xd1\x3e\xbc\xae\x70\x6d"
6947 "\x22\x6b\xc3\x14\x2c\xd4\x0d\x4a",
6948 .ilen = 16,
6949 .result = "\x83\xa2\xf8\xa2\x88\x64\x1f\xb9"
6950 "\xa4\xe9\xa5\xcc\x2f\x13\x1c\x7d",
6951 .rlen = 16,
6952 }, {
6953 .key = "\x28\xdb\xc3\xbc\x49\xff\xd8\x7d"
6954 "\xcf\xa5\x09\xb1\x1d\x42\x2b\xe7",
6955 .klen = 16,
6956 .input = "\x9b\x9b\x7b\xfc\xd1\x81\x3c\xb9"
6957 "\x5d\x0b\x36\x18\xf4\x0f\x51\x22",
6958 .ilen = 16,
6959 .result = "\xb4\x1e\x6b\xe2\xeb\xa8\x4a\x14"
6960 "\x8e\x2e\xed\x84\x59\x3c\x5e\xc7",
6961 .rlen = 16,
6962 }
6963};
6964
6965#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6966static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6967 /*
6968 * Testvectors from verified.test-vectors submitted to ECRYPT.
6969 * They are truncated to size 39, 64, 111, 129 to test a variety
6970 * of input length.
6971 */
6972 { /* Set 3, vector 0 */
6973 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
6974 "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
6975 .klen = 16,
6976 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6977 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
6978 "\x00\x00\x00\x00\x00\x00\x00\x00"
6979 "\x00\x00\x00\x00\x00\x00\x00\x00"
6980 "\x00\x00\x00\x00\x00\x00\x00\x00"
6981 "\x00\x00\x00\x00\x00\x00\x00",
6982 .ilen = 39,
6983 .result = "\x2D\xD5\xC3\xF7\xBA\x2B\x20\xF7"
6984 "\x68\x02\x41\x0C\x68\x86\x88\x89"
6985 "\x5A\xD8\xC1\xBD\x4E\xA6\xC9\xB1"
6986 "\x40\xFB\x9B\x90\xE2\x10\x49\xBF"
6987 "\x58\x3F\x52\x79\x70\xEB\xC1",
6988 .rlen = 39,
6989 }, { /* Set 5, vector 0 */
6990 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6991 "\x00\x00\x00\x00\x00\x00\x00\x00",
6992 .klen = 16,
6993 .iv = "\x80\x00\x00\x00\x00\x00\x00\x00",
6994 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
6995 "\x00\x00\x00\x00\x00\x00\x00\x00"
6996 "\x00\x00\x00\x00\x00\x00\x00\x00"
6997 "\x00\x00\x00\x00\x00\x00\x00\x00"
6998 "\x00\x00\x00\x00\x00\x00\x00\x00"
6999 "\x00\x00\x00\x00\x00\x00\x00\x00"
7000 "\x00\x00\x00\x00\x00\x00\x00\x00"
7001 "\x00\x00\x00\x00\x00\x00\x00\x00",
7002 .ilen = 64,
7003 .result = "\xB6\x6C\x1E\x44\x46\xDD\x95\x57"
7004 "\xE5\x78\xE2\x23\xB0\xB7\x68\x01"
7005 "\x7B\x23\xB2\x67\xBB\x02\x34\xAE"
7006 "\x46\x26\xBF\x44\x3F\x21\x97\x76"
7007 "\x43\x6F\xB1\x9F\xD0\xE8\x86\x6F"
7008 "\xCD\x0D\xE9\xA9\x53\x8F\x4A\x09"
7009 "\xCA\x9A\xC0\x73\x2E\x30\xBC\xF9"
7010 "\x8E\x4F\x13\xE4\xB9\xE2\x01\xD9",
7011 .rlen = 64,
7012 }, { /* Set 3, vector 27 */
7013 .key = "\x1B\x1C\x1D\x1E\x1F\x20\x21\x22"
7014 "\x23\x24\x25\x26\x27\x28\x29\x2A"
7015 "\x2B\x2C\x2D\x2E\x2F\x30\x31\x32"
7016 "\x33\x34\x35\x36\x37\x38\x39\x3A",
7017 .klen = 32,
7018 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
7019 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
7020 "\x00\x00\x00\x00\x00\x00\x00\x00"
7021 "\x00\x00\x00\x00\x00\x00\x00\x00"
7022 "\x00\x00\x00\x00\x00\x00\x00\x00"
7023 "\x00\x00\x00\x00\x00\x00\x00\x00"
7024 "\x00\x00\x00\x00\x00\x00\x00\x00"
7025 "\x00\x00\x00\x00\x00\x00\x00\x00"
7026 "\x00\x00\x00\x00\x00\x00\x00\x00"
7027 "\x00\x00\x00\x00\x00\x00\x00\x00"
7028 "\x00\x00\x00\x00\x00\x00\x00\x00"
7029 "\x00\x00\x00\x00\x00\x00\x00\x00"
7030 "\x00\x00\x00\x00\x00\x00\x00\x00"
7031 "\x00\x00\x00\x00\x00\x00\x00\x00"
7032 "\x00\x00\x00\x00\x00\x00\x00",
7033 .ilen = 111,
7034 .result = "\xAE\x39\x50\x8E\xAC\x9A\xEC\xE7"
7035 "\xBF\x97\xBB\x20\xB9\xDE\xE4\x1F"
7036 "\x87\xD9\x47\xF8\x28\x91\x35\x98"
7037 "\xDB\x72\xCC\x23\x29\x48\x56\x5E"
7038 "\x83\x7E\x0B\xF3\x7D\x5D\x38\x7B"
7039 "\x2D\x71\x02\xB4\x3B\xB5\xD8\x23"
7040 "\xB0\x4A\xDF\x3C\xEC\xB6\xD9\x3B"
7041 "\x9B\xA7\x52\xBE\xC5\xD4\x50\x59"
7042 "\x15\x14\xB4\x0E\x40\xE6\x53\xD1"
7043 "\x83\x9C\x5B\xA0\x92\x29\x6B\x5E"
7044 "\x96\x5B\x1E\x2F\xD3\xAC\xC1\x92"
7045 "\xB1\x41\x3F\x19\x2F\xC4\x3B\xC6"
7046 "\x95\x46\x45\x54\xE9\x75\x03\x08"
7047 "\x44\xAF\xE5\x8A\x81\x12\x09",
7048 .rlen = 111,
7049 }, { /* Set 5, vector 27 */
7050 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
7051 "\x00\x00\x00\x00\x00\x00\x00\x00"
7052 "\x00\x00\x00\x00\x00\x00\x00\x00"
7053 "\x00\x00\x00\x00\x00\x00\x00\x00",
7054 .klen = 32,
7055 .iv = "\x00\x00\x00\x10\x00\x00\x00\x00",
7056 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
7057 "\x00\x00\x00\x00\x00\x00\x00\x00"
7058 "\x00\x00\x00\x00\x00\x00\x00\x00"
7059 "\x00\x00\x00\x00\x00\x00\x00\x00"
7060 "\x00\x00\x00\x00\x00\x00\x00\x00"
7061 "\x00\x00\x00\x00\x00\x00\x00\x00"
7062 "\x00\x00\x00\x00\x00\x00\x00\x00"
7063 "\x00\x00\x00\x00\x00\x00\x00\x00"
7064 "\x00\x00\x00\x00\x00\x00\x00\x00"
7065 "\x00\x00\x00\x00\x00\x00\x00\x00"
7066 "\x00\x00\x00\x00\x00\x00\x00\x00"
7067 "\x00\x00\x00\x00\x00\x00\x00\x00"
7068 "\x00\x00\x00\x00\x00\x00\x00\x00"
7069 "\x00\x00\x00\x00\x00\x00\x00\x00"
7070 "\x00\x00\x00\x00\x00\x00\x00\x00"
7071 "\x00\x00\x00\x00\x00\x00\x00\x00"
7072 "\x00",
7073 .ilen = 129,
7074 .result = "\xD2\xDB\x1A\x5C\xF1\xC1\xAC\xDB"
7075 "\xE8\x1A\x7A\x43\x40\xEF\x53\x43"
7076 "\x5E\x7F\x4B\x1A\x50\x52\x3F\x8D"
7077 "\x28\x3D\xCF\x85\x1D\x69\x6E\x60"
7078 "\xF2\xDE\x74\x56\x18\x1B\x84\x10"
7079 "\xD4\x62\xBA\x60\x50\xF0\x61\xF2"
7080 "\x1C\x78\x7F\xC1\x24\x34\xAF\x58"
7081 "\xBF\x2C\x59\xCA\x90\x77\xF3\xB0"
7082 "\x5B\x4A\xDF\x89\xCE\x2C\x2F\xFC"
7083 "\x67\xF0\xE3\x45\xE8\xB3\xB3\x75"
7084 "\xA0\x95\x71\xA1\x29\x39\x94\xCA"
7085 "\x45\x2F\xBD\xCB\x10\xB6\xBE\x9F"
7086 "\x8E\xF9\xB2\x01\x0A\x5A\x0A\xB7"
7087 "\x6B\x9D\x70\x8E\x4B\xD6\x2F\xCD"
7088 "\x2E\x40\x48\x75\xE9\xE2\x21\x45"
7089 "\x0B\xC9\xB6\xB5\x66\xBC\x9A\x59"
7090 "\x5A",
7091 .rlen = 129,
7092 }, { /* large test vector generated using Crypto++ */
7093 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
7094 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
7095 "\x10\x11\x12\x13\x14\x15\x16\x17"
7096 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
7097 .klen = 32,
7098 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
7099 "\x00\x00\x00\x00\x00\x00\x00\x00",
7100 .input =
7101 "\x00\x01\x02\x03\x04\x05\x06\x07"
7102 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
7103 "\x10\x11\x12\x13\x14\x15\x16\x17"
7104 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
7105 "\x20\x21\x22\x23\x24\x25\x26\x27"
7106 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
7107 "\x30\x31\x32\x33\x34\x35\x36\x37"
7108 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
7109 "\x40\x41\x42\x43\x44\x45\x46\x47"
7110 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
7111 "\x50\x51\x52\x53\x54\x55\x56\x57"
7112 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
7113 "\x60\x61\x62\x63\x64\x65\x66\x67"
7114 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
7115 "\x70\x71\x72\x73\x74\x75\x76\x77"
7116 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
7117 "\x80\x81\x82\x83\x84\x85\x86\x87"
7118 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
7119 "\x90\x91\x92\x93\x94\x95\x96\x97"
7120 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
7121 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
7122 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
7123 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
7124 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
7125 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
7126 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
7127 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
7128 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
7129 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
7130 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
7131 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
7132 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
7133 "\x00\x03\x06\x09\x0c\x0f\x12\x15"
7134 "\x18\x1b\x1e\x21\x24\x27\x2a\x2d"
7135 "\x30\x33\x36\x39\x3c\x3f\x42\x45"
7136 "\x48\x4b\x4e\x51\x54\x57\x5a\x5d"
7137 "\x60\x63\x66\x69\x6c\x6f\x72\x75"
7138 "\x78\x7b\x7e\x81\x84\x87\x8a\x8d"
7139 "\x90\x93\x96\x99\x9c\x9f\xa2\xa5"
7140 "\xa8\xab\xae\xb1\xb4\xb7\xba\xbd"
7141 "\xc0\xc3\xc6\xc9\xcc\xcf\xd2\xd5"
7142 "\xd8\xdb\xde\xe1\xe4\xe7\xea\xed"
7143 "\xf0\xf3\xf6\xf9\xfc\xff\x02\x05"
7144 "\x08\x0b\x0e\x11\x14\x17\x1a\x1d"
7145 "\x20\x23\x26\x29\x2c\x2f\x32\x35"
7146 "\x38\x3b\x3e\x41\x44\x47\x4a\x4d"
7147 "\x50\x53\x56\x59\x5c\x5f\x62\x65"
7148 "\x68\x6b\x6e\x71\x74\x77\x7a\x7d"
7149 "\x80\x83\x86\x89\x8c\x8f\x92\x95"
7150 "\x98\x9b\x9e\xa1\xa4\xa7\xaa\xad"
7151 "\xb0\xb3\xb6\xb9\xbc\xbf\xc2\xc5"
7152 "\xc8\xcb\xce\xd1\xd4\xd7\xda\xdd"
7153 "\xe0\xe3\xe6\xe9\xec\xef\xf2\xf5"
7154 "\xf8\xfb\xfe\x01\x04\x07\x0a\x0d"
7155 "\x10\x13\x16\x19\x1c\x1f\x22\x25"
7156 "\x28\x2b\x2e\x31\x34\x37\x3a\x3d"
7157 "\x40\x43\x46\x49\x4c\x4f\x52\x55"
7158 "\x58\x5b\x5e\x61\x64\x67\x6a\x6d"
7159 "\x70\x73\x76\x79\x7c\x7f\x82\x85"
7160 "\x88\x8b\x8e\x91\x94\x97\x9a\x9d"
7161 "\xa0\xa3\xa6\xa9\xac\xaf\xb2\xb5"
7162 "\xb8\xbb\xbe\xc1\xc4\xc7\xca\xcd"
7163 "\xd0\xd3\xd6\xd9\xdc\xdf\xe2\xe5"
7164 "\xe8\xeb\xee\xf1\xf4\xf7\xfa\xfd"
7165 "\x00\x05\x0a\x0f\x14\x19\x1e\x23"
7166 "\x28\x2d\x32\x37\x3c\x41\x46\x4b"
7167 "\x50\x55\x5a\x5f\x64\x69\x6e\x73"
7168 "\x78\x7d\x82\x87\x8c\x91\x96\x9b"
7169 "\xa0\xa5\xaa\xaf\xb4\xb9\xbe\xc3"
7170 "\xc8\xcd\xd2\xd7\xdc\xe1\xe6\xeb"
7171 "\xf0\xf5\xfa\xff\x04\x09\x0e\x13"
7172 "\x18\x1d\x22\x27\x2c\x31\x36\x3b"
7173 "\x40\x45\x4a\x4f\x54\x59\x5e\x63"
7174 "\x68\x6d\x72\x77\x7c\x81\x86\x8b"
7175 "\x90\x95\x9a\x9f\xa4\xa9\xae\xb3"
7176 "\xb8\xbd\xc2\xc7\xcc\xd1\xd6\xdb"
7177 "\xe0\xe5\xea\xef\xf4\xf9\xfe\x03"
7178 "\x08\x0d\x12\x17\x1c\x21\x26\x2b"
7179 "\x30\x35\x3a\x3f\x44\x49\x4e\x53"
7180 "\x58\x5d\x62\x67\x6c\x71\x76\x7b"
7181 "\x80\x85\x8a\x8f\x94\x99\x9e\xa3"
7182 "\xa8\xad\xb2\xb7\xbc\xc1\xc6\xcb"
7183 "\xd0\xd5\xda\xdf\xe4\xe9\xee\xf3"
7184 "\xf8\xfd\x02\x07\x0c\x11\x16\x1b"
7185 "\x20\x25\x2a\x2f\x34\x39\x3e\x43"
7186 "\x48\x4d\x52\x57\x5c\x61\x66\x6b"
7187 "\x70\x75\x7a\x7f\x84\x89\x8e\x93"
7188 "\x98\x9d\xa2\xa7\xac\xb1\xb6\xbb"
7189 "\xc0\xc5\xca\xcf\xd4\xd9\xde\xe3"
7190 "\xe8\xed\xf2\xf7\xfc\x01\x06\x0b"
7191 "\x10\x15\x1a\x1f\x24\x29\x2e\x33"
7192 "\x38\x3d\x42\x47\x4c\x51\x56\x5b"
7193 "\x60\x65\x6a\x6f\x74\x79\x7e\x83"
7194 "\x88\x8d\x92\x97\x9c\xa1\xa6\xab"
7195 "\xb0\xb5\xba\xbf\xc4\xc9\xce\xd3"
7196 "\xd8\xdd\xe2\xe7\xec\xf1\xf6\xfb"
7197 "\x00\x07\x0e\x15\x1c\x23\x2a\x31"
7198 "\x38\x3f\x46\x4d\x54\x5b\x62\x69"
7199 "\x70\x77\x7e\x85\x8c\x93\x9a\xa1"
7200 "\xa8\xaf\xb6\xbd\xc4\xcb\xd2\xd9"
7201 "\xe0\xe7\xee\xf5\xfc\x03\x0a\x11"
7202 "\x18\x1f\x26\x2d\x34\x3b\x42\x49"
7203 "\x50\x57\x5e\x65\x6c\x73\x7a\x81"
7204 "\x88\x8f\x96\x9d\xa4\xab\xb2\xb9"
7205 "\xc0\xc7\xce\xd5\xdc\xe3\xea\xf1"
7206 "\xf8\xff\x06\x0d\x14\x1b\x22\x29"
7207 "\x30\x37\x3e\x45\x4c\x53\x5a\x61"
7208 "\x68\x6f\x76\x7d\x84\x8b\x92\x99"
7209 "\xa0\xa7\xae\xb5\xbc\xc3\xca\xd1"
7210 "\xd8\xdf\xe6\xed\xf4\xfb\x02\x09"
7211 "\x10\x17\x1e\x25\x2c\x33\x3a\x41"
7212 "\x48\x4f\x56\x5d\x64\x6b\x72\x79"
7213 "\x80\x87\x8e\x95\x9c\xa3\xaa\xb1"
7214 "\xb8\xbf\xc6\xcd\xd4\xdb\xe2\xe9"
7215 "\xf0\xf7\xfe\x05\x0c\x13\x1a\x21"
7216 "\x28\x2f\x36\x3d\x44\x4b\x52\x59"
7217 "\x60\x67\x6e\x75\x7c\x83\x8a\x91"
7218 "\x98\x9f\xa6\xad\xb4\xbb\xc2\xc9"
7219 "\xd0\xd7\xde\xe5\xec\xf3\xfa\x01"
7220 "\x08\x0f\x16\x1d\x24\x2b\x32\x39"
7221 "\x40\x47\x4e\x55\x5c\x63\x6a\x71"
7222 "\x78\x7f\x86\x8d\x94\x9b\xa2\xa9"
7223 "\xb0\xb7\xbe\xc5\xcc\xd3\xda\xe1"
7224 "\xe8\xef\xf6\xfd\x04\x0b\x12\x19"
7225 "\x20\x27\x2e\x35\x3c\x43\x4a\x51"
7226 "\x58\x5f\x66\x6d\x74\x7b\x82\x89"
7227 "\x90\x97\x9e\xa5\xac\xb3\xba\xc1"
7228 "\xc8\xcf\xd6\xdd\xe4\xeb\xf2\xf9"
7229 "\x00\x09\x12\x1b\x24\x2d\x36\x3f"
7230 "\x48\x51\x5a\x63\x6c\x75\x7e\x87"
7231 "\x90\x99\xa2\xab\xb4\xbd\xc6\xcf"
7232 "\xd8\xe1\xea\xf3\xfc\x05\x0e\x17"
7233 "\x20\x29\x32\x3b\x44\x4d\x56\x5f"
7234 "\x68\x71\x7a\x83\x8c\x95\x9e\xa7"
7235 "\xb0\xb9\xc2\xcb\xd4\xdd\xe6\xef"
7236 "\xf8\x01\x0a\x13\x1c\x25\x2e\x37"
7237 "\x40\x49\x52\x5b\x64\x6d\x76\x7f"
7238 "\x88\x91\x9a\xa3\xac\xb5\xbe\xc7"
7239 "\xd0\xd9\xe2\xeb\xf4\xfd\x06\x0f"
7240 "\x18\x21\x2a\x33\x3c\x45\x4e\x57"
7241 "\x60\x69\x72\x7b\x84\x8d\x96\x9f"
7242 "\xa8\xb1\xba\xc3\xcc\xd5\xde\xe7"
7243 "\xf0\xf9\x02\x0b\x14\x1d\x26\x2f"
7244 "\x38\x41\x4a\x53\x5c\x65\x6e\x77"
7245 "\x80\x89\x92\x9b\xa4\xad\xb6\xbf"
7246 "\xc8\xd1\xda\xe3\xec\xf5\xfe\x07"
7247 "\x10\x19\x22\x2b\x34\x3d\x46\x4f"
7248 "\x58\x61\x6a\x73\x7c\x85\x8e\x97"
7249 "\xa0\xa9\xb2\xbb\xc4\xcd\xd6\xdf"
7250 "\xe8\xf1\xfa\x03\x0c\x15\x1e\x27"
7251 "\x30\x39\x42\x4b\x54\x5d\x66\x6f"
7252 "\x78\x81\x8a\x93\x9c\xa5\xae\xb7"
7253 "\xc0\xc9\xd2\xdb\xe4\xed\xf6\xff"
7254 "\x08\x11\x1a\x23\x2c\x35\x3e\x47"
7255 "\x50\x59\x62\x6b\x74\x7d\x86\x8f"
7256 "\x98\xa1\xaa\xb3\xbc\xc5\xce\xd7"
7257 "\xe0\xe9\xf2\xfb\x04\x0d\x16\x1f"
7258 "\x28\x31\x3a\x43\x4c\x55\x5e\x67"
7259 "\x70\x79\x82\x8b\x94\x9d\xa6\xaf"
7260 "\xb8\xc1\xca\xd3\xdc\xe5\xee\xf7"
7261 "\x00\x0b\x16\x21\x2c\x37\x42\x4d"
7262 "\x58\x63\x6e\x79\x84\x8f\x9a\xa5"
7263 "\xb0\xbb\xc6\xd1\xdc\xe7\xf2\xfd"
7264 "\x08\x13\x1e\x29\x34\x3f\x4a\x55"
7265 "\x60\x6b\x76\x81\x8c\x97\xa2\xad"
7266 "\xb8\xc3\xce\xd9\xe4\xef\xfa\x05"
7267 "\x10\x1b\x26\x31\x3c\x47\x52\x5d"
7268 "\x68\x73\x7e\x89\x94\x9f\xaa\xb5"
7269 "\xc0\xcb\xd6\xe1\xec\xf7\x02\x0d"
7270 "\x18\x23\x2e\x39\x44\x4f\x5a\x65"
7271 "\x70\x7b\x86\x91\x9c\xa7\xb2\xbd"
7272 "\xc8\xd3\xde\xe9\xf4\xff\x0a\x15"
7273 "\x20\x2b\x36\x41\x4c\x57\x62\x6d"
7274 "\x78\x83\x8e\x99\xa4\xaf\xba\xc5"
7275 "\xd0\xdb\xe6\xf1\xfc\x07\x12\x1d"
7276 "\x28\x33\x3e\x49\x54\x5f\x6a\x75"
7277 "\x80\x8b\x96\xa1\xac\xb7\xc2\xcd"
7278 "\xd8\xe3\xee\xf9\x04\x0f\x1a\x25"
7279 "\x30\x3b\x46\x51\x5c\x67\x72\x7d"
7280 "\x88\x93\x9e\xa9\xb4\xbf\xca\xd5"
7281 "\xe0\xeb\xf6\x01\x0c\x17\x22\x2d"
7282 "\x38\x43\x4e\x59\x64\x6f\x7a\x85"
7283 "\x90\x9b\xa6\xb1\xbc\xc7\xd2\xdd"
7284 "\xe8\xf3\xfe\x09\x14\x1f\x2a\x35"
7285 "\x40\x4b\x56\x61\x6c\x77\x82\x8d"
7286 "\x98\xa3\xae\xb9\xc4\xcf\xda\xe5"
7287 "\xf0\xfb\x06\x11\x1c\x27\x32\x3d"
7288 "\x48\x53\x5e\x69\x74\x7f\x8a\x95"
7289 "\xa0\xab\xb6\xc1\xcc\xd7\xe2\xed"
7290 "\xf8\x03\x0e\x19\x24\x2f\x3a\x45"
7291 "\x50\x5b\x66\x71\x7c\x87\x92\x9d"
7292 "\xa8\xb3\xbe\xc9\xd4\xdf\xea\xf5"
7293 "\x00\x0d\x1a\x27\x34\x41\x4e\x5b"
7294 "\x68\x75\x82\x8f\x9c\xa9\xb6\xc3"
7295 "\xd0\xdd\xea\xf7\x04\x11\x1e\x2b"
7296 "\x38\x45\x52\x5f\x6c\x79\x86\x93"
7297 "\xa0\xad\xba\xc7\xd4\xe1\xee\xfb"
7298 "\x08\x15\x22\x2f\x3c\x49\x56\x63"
7299 "\x70\x7d\x8a\x97\xa4\xb1\xbe\xcb"
7300 "\xd8\xe5\xf2\xff\x0c\x19\x26\x33"
7301 "\x40\x4d\x5a\x67\x74\x81\x8e\x9b"
7302 "\xa8\xb5\xc2\xcf\xdc\xe9\xf6\x03"
7303 "\x10\x1d\x2a\x37\x44\x51\x5e\x6b"
7304 "\x78\x85\x92\x9f\xac\xb9\xc6\xd3"
7305 "\xe0\xed\xfa\x07\x14\x21\x2e\x3b"
7306 "\x48\x55\x62\x6f\x7c\x89\x96\xa3"
7307 "\xb0\xbd\xca\xd7\xe4\xf1\xfe\x0b"
7308 "\x18\x25\x32\x3f\x4c\x59\x66\x73"
7309 "\x80\x8d\x9a\xa7\xb4\xc1\xce\xdb"
7310 "\xe8\xf5\x02\x0f\x1c\x29\x36\x43"
7311 "\x50\x5d\x6a\x77\x84\x91\x9e\xab"
7312 "\xb8\xc5\xd2\xdf\xec\xf9\x06\x13"
7313 "\x20\x2d\x3a\x47\x54\x61\x6e\x7b"
7314 "\x88\x95\xa2\xaf\xbc\xc9\xd6\xe3"
7315 "\xf0\xfd\x0a\x17\x24\x31\x3e\x4b"
7316 "\x58\x65\x72\x7f\x8c\x99\xa6\xb3"
7317 "\xc0\xcd\xda\xe7\xf4\x01\x0e\x1b"
7318 "\x28\x35\x42\x4f\x5c\x69\x76\x83"
7319 "\x90\x9d\xaa\xb7\xc4\xd1\xde\xeb"
7320 "\xf8\x05\x12\x1f\x2c\x39\x46\x53"
7321 "\x60\x6d\x7a\x87\x94\xa1\xae\xbb"
7322 "\xc8\xd5\xe2\xef\xfc\x09\x16\x23"
7323 "\x30\x3d\x4a\x57\x64\x71\x7e\x8b"
7324 "\x98\xa5\xb2\xbf\xcc\xd9\xe6\xf3"
7325 "\x00\x0f\x1e\x2d\x3c\x4b\x5a\x69"
7326 "\x78\x87\x96\xa5\xb4\xc3\xd2\xe1"
7327 "\xf0\xff\x0e\x1d\x2c\x3b\x4a\x59"
7328 "\x68\x77\x86\x95\xa4\xb3\xc2\xd1"
7329 "\xe0\xef\xfe\x0d\x1c\x2b\x3a\x49"
7330 "\x58\x67\x76\x85\x94\xa3\xb2\xc1"
7331 "\xd0\xdf\xee\xfd\x0c\x1b\x2a\x39"
7332 "\x48\x57\x66\x75\x84\x93\xa2\xb1"
7333 "\xc0\xcf\xde\xed\xfc\x0b\x1a\x29"
7334 "\x38\x47\x56\x65\x74\x83\x92\xa1"
7335 "\xb0\xbf\xce\xdd\xec\xfb\x0a\x19"
7336 "\x28\x37\x46\x55\x64\x73\x82\x91"
7337 "\xa0\xaf\xbe\xcd\xdc\xeb\xfa\x09"
7338 "\x18\x27\x36\x45\x54\x63\x72\x81"
7339 "\x90\x9f\xae\xbd\xcc\xdb\xea\xf9"
7340 "\x08\x17\x26\x35\x44\x53\x62\x71"
7341 "\x80\x8f\x9e\xad\xbc\xcb\xda\xe9"
7342 "\xf8\x07\x16\x25\x34\x43\x52\x61"
7343 "\x70\x7f\x8e\x9d\xac\xbb\xca\xd9"
7344 "\xe8\xf7\x06\x15\x24\x33\x42\x51"
7345 "\x60\x6f\x7e\x8d\x9c\xab\xba\xc9"
7346 "\xd8\xe7\xf6\x05\x14\x23\x32\x41"
7347 "\x50\x5f\x6e\x7d\x8c\x9b\xaa\xb9"
7348 "\xc8\xd7\xe6\xf5\x04\x13\x22\x31"
7349 "\x40\x4f\x5e\x6d\x7c\x8b\x9a\xa9"
7350 "\xb8\xc7\xd6\xe5\xf4\x03\x12\x21"
7351 "\x30\x3f\x4e\x5d\x6c\x7b\x8a\x99"
7352 "\xa8\xb7\xc6\xd5\xe4\xf3\x02\x11"
7353 "\x20\x2f\x3e\x4d\x5c\x6b\x7a\x89"
7354 "\x98\xa7\xb6\xc5\xd4\xe3\xf2\x01"
7355 "\x10\x1f\x2e\x3d\x4c\x5b\x6a\x79"
7356 "\x88\x97\xa6\xb5\xc4\xd3\xe2\xf1"
7357 "\x00\x11\x22\x33\x44\x55\x66\x77"
7358 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff"
7359 "\x10\x21\x32\x43\x54\x65\x76\x87"
7360 "\x98\xa9\xba\xcb\xdc\xed\xfe\x0f"
7361 "\x20\x31\x42\x53\x64\x75\x86\x97"
7362 "\xa8\xb9\xca\xdb\xec\xfd\x0e\x1f"
7363 "\x30\x41\x52\x63\x74\x85\x96\xa7"
7364 "\xb8\xc9\xda\xeb\xfc\x0d\x1e\x2f"
7365 "\x40\x51\x62\x73\x84\x95\xa6\xb7"
7366 "\xc8\xd9\xea\xfb\x0c\x1d\x2e\x3f"
7367 "\x50\x61\x72\x83\x94\xa5\xb6\xc7"
7368 "\xd8\xe9\xfa\x0b\x1c\x2d\x3e\x4f"
7369 "\x60\x71\x82\x93\xa4\xb5\xc6\xd7"
7370 "\xe8\xf9\x0a\x1b\x2c\x3d\x4e\x5f"
7371 "\x70\x81\x92\xa3\xb4\xc5\xd6\xe7"
7372 "\xf8\x09\x1a\x2b\x3c\x4d\x5e\x6f"
7373 "\x80\x91\xa2\xb3\xc4\xd5\xe6\xf7"
7374 "\x08\x19\x2a\x3b\x4c\x5d\x6e\x7f"
7375 "\x90\xa1\xb2\xc3\xd4\xe5\xf6\x07"
7376 "\x18\x29\x3a\x4b\x5c\x6d\x7e\x8f"
7377 "\xa0\xb1\xc2\xd3\xe4\xf5\x06\x17"
7378 "\x28\x39\x4a\x5b\x6c\x7d\x8e\x9f"
7379 "\xb0\xc1\xd2\xe3\xf4\x05\x16\x27"
7380 "\x38\x49\x5a\x6b\x7c\x8d\x9e\xaf"
7381 "\xc0\xd1\xe2\xf3\x04\x15\x26\x37"
7382 "\x48\x59\x6a\x7b\x8c\x9d\xae\xbf"
7383 "\xd0\xe1\xf2\x03\x14\x25\x36\x47"
7384 "\x58\x69\x7a\x8b\x9c\xad\xbe\xcf"
7385 "\xe0\xf1\x02\x13\x24\x35\x46\x57"
7386 "\x68\x79\x8a\x9b\xac\xbd\xce\xdf"
7387 "\xf0\x01\x12\x23\x34\x45\x56\x67"
7388 "\x78\x89\x9a\xab\xbc\xcd\xde\xef"
7389 "\x00\x13\x26\x39\x4c\x5f\x72\x85"
7390 "\x98\xab\xbe\xd1\xe4\xf7\x0a\x1d"
7391 "\x30\x43\x56\x69\x7c\x8f\xa2\xb5"
7392 "\xc8\xdb\xee\x01\x14\x27\x3a\x4d"
7393 "\x60\x73\x86\x99\xac\xbf\xd2\xe5"
7394 "\xf8\x0b\x1e\x31\x44\x57\x6a\x7d"
7395 "\x90\xa3\xb6\xc9\xdc\xef\x02\x15"
7396 "\x28\x3b\x4e\x61\x74\x87\x9a\xad"
7397 "\xc0\xd3\xe6\xf9\x0c\x1f\x32\x45"
7398 "\x58\x6b\x7e\x91\xa4\xb7\xca\xdd"
7399 "\xf0\x03\x16\x29\x3c\x4f\x62\x75"
7400 "\x88\x9b\xae\xc1\xd4\xe7\xfa\x0d"
7401 "\x20\x33\x46\x59\x6c\x7f\x92\xa5"
7402 "\xb8\xcb\xde\xf1\x04\x17\x2a\x3d"
7403 "\x50\x63\x76\x89\x9c\xaf\xc2\xd5"
7404 "\xe8\xfb\x0e\x21\x34\x47\x5a\x6d"
7405 "\x80\x93\xa6\xb9\xcc\xdf\xf2\x05"
7406 "\x18\x2b\x3e\x51\x64\x77\x8a\x9d"
7407 "\xb0\xc3\xd6\xe9\xfc\x0f\x22\x35"
7408 "\x48\x5b\x6e\x81\x94\xa7\xba\xcd"
7409 "\xe0\xf3\x06\x19\x2c\x3f\x52\x65"
7410 "\x78\x8b\x9e\xb1\xc4\xd7\xea\xfd"
7411 "\x10\x23\x36\x49\x5c\x6f\x82\x95"
7412 "\xa8\xbb\xce\xe1\xf4\x07\x1a\x2d"
7413 "\x40\x53\x66\x79\x8c\x9f\xb2\xc5"
7414 "\xd8\xeb\xfe\x11\x24\x37\x4a\x5d"
7415 "\x70\x83\x96\xa9\xbc\xcf\xe2\xf5"
7416 "\x08\x1b\x2e\x41\x54\x67\x7a\x8d"
7417 "\xa0\xb3\xc6\xd9\xec\xff\x12\x25"
7418 "\x38\x4b\x5e\x71\x84\x97\xaa\xbd"
7419 "\xd0\xe3\xf6\x09\x1c\x2f\x42\x55"
7420 "\x68\x7b\x8e\xa1\xb4\xc7\xda\xed"
7421 "\x00\x15\x2a\x3f\x54\x69\x7e\x93"
7422 "\xa8\xbd\xd2\xe7\xfc\x11\x26\x3b"
7423 "\x50\x65\x7a\x8f\xa4\xb9\xce\xe3"
7424 "\xf8\x0d\x22\x37\x4c\x61\x76\x8b"
7425 "\xa0\xb5\xca\xdf\xf4\x09\x1e\x33"
7426 "\x48\x5d\x72\x87\x9c\xb1\xc6\xdb"
7427 "\xf0\x05\x1a\x2f\x44\x59\x6e\x83"
7428 "\x98\xad\xc2\xd7\xec\x01\x16\x2b"
7429 "\x40\x55\x6a\x7f\x94\xa9\xbe\xd3"
7430 "\xe8\xfd\x12\x27\x3c\x51\x66\x7b"
7431 "\x90\xa5\xba\xcf\xe4\xf9\x0e\x23"
7432 "\x38\x4d\x62\x77\x8c\xa1\xb6\xcb"
7433 "\xe0\xf5\x0a\x1f\x34\x49\x5e\x73"
7434 "\x88\x9d\xb2\xc7\xdc\xf1\x06\x1b"
7435 "\x30\x45\x5a\x6f\x84\x99\xae\xc3"
7436 "\xd8\xed\x02\x17\x2c\x41\x56\x6b"
7437 "\x80\x95\xaa\xbf\xd4\xe9\xfe\x13"
7438 "\x28\x3d\x52\x67\x7c\x91\xa6\xbb"
7439 "\xd0\xe5\xfa\x0f\x24\x39\x4e\x63"
7440 "\x78\x8d\xa2\xb7\xcc\xe1\xf6\x0b"
7441 "\x20\x35\x4a\x5f\x74\x89\x9e\xb3"
7442 "\xc8\xdd\xf2\x07\x1c\x31\x46\x5b"
7443 "\x70\x85\x9a\xaf\xc4\xd9\xee\x03"
7444 "\x18\x2d\x42\x57\x6c\x81\x96\xab"
7445 "\xc0\xd5\xea\xff\x14\x29\x3e\x53"
7446 "\x68\x7d\x92\xa7\xbc\xd1\xe6\xfb"
7447 "\x10\x25\x3a\x4f\x64\x79\x8e\xa3"
7448 "\xb8\xcd\xe2\xf7\x0c\x21\x36\x4b"
7449 "\x60\x75\x8a\x9f\xb4\xc9\xde\xf3"
7450 "\x08\x1d\x32\x47\x5c\x71\x86\x9b"
7451 "\xb0\xc5\xda\xef\x04\x19\x2e\x43"
7452 "\x58\x6d\x82\x97\xac\xc1\xd6\xeb"
7453 "\x00\x17\x2e\x45\x5c\x73\x8a\xa1"
7454 "\xb8\xcf\xe6\xfd\x14\x2b\x42\x59"
7455 "\x70\x87\x9e\xb5\xcc\xe3\xfa\x11"
7456 "\x28\x3f\x56\x6d\x84\x9b\xb2\xc9"
7457 "\xe0\xf7\x0e\x25\x3c\x53\x6a\x81"
7458 "\x98\xaf\xc6\xdd\xf4\x0b\x22\x39"
7459 "\x50\x67\x7e\x95\xac\xc3\xda\xf1"
7460 "\x08\x1f\x36\x4d\x64\x7b\x92\xa9"
7461 "\xc0\xd7\xee\x05\x1c\x33\x4a\x61"
7462 "\x78\x8f\xa6\xbd\xd4\xeb\x02\x19"
7463 "\x30\x47\x5e\x75\x8c\xa3\xba\xd1"
7464 "\xe8\xff\x16\x2d\x44\x5b\x72\x89"
7465 "\xa0\xb7\xce\xe5\xfc\x13\x2a\x41"
7466 "\x58\x6f\x86\x9d\xb4\xcb\xe2\xf9"
7467 "\x10\x27\x3e\x55\x6c\x83\x9a\xb1"
7468 "\xc8\xdf\xf6\x0d\x24\x3b\x52\x69"
7469 "\x80\x97\xae\xc5\xdc\xf3\x0a\x21"
7470 "\x38\x4f\x66\x7d\x94\xab\xc2\xd9"
7471 "\xf0\x07\x1e\x35\x4c\x63\x7a\x91"
7472 "\xa8\xbf\xd6\xed\x04\x1b\x32\x49"
7473 "\x60\x77\x8e\xa5\xbc\xd3\xea\x01"
7474 "\x18\x2f\x46\x5d\x74\x8b\xa2\xb9"
7475 "\xd0\xe7\xfe\x15\x2c\x43\x5a\x71"
7476 "\x88\x9f\xb6\xcd\xe4\xfb\x12\x29"
7477 "\x40\x57\x6e\x85\x9c\xb3\xca\xe1"
7478 "\xf8\x0f\x26\x3d\x54\x6b\x82\x99"
7479 "\xb0\xc7\xde\xf5\x0c\x23\x3a\x51"
7480 "\x68\x7f\x96\xad\xc4\xdb\xf2\x09"
7481 "\x20\x37\x4e\x65\x7c\x93\xaa\xc1"
7482 "\xd8\xef\x06\x1d\x34\x4b\x62\x79"
7483 "\x90\xa7\xbe\xd5\xec\x03\x1a\x31"
7484 "\x48\x5f\x76\x8d\xa4\xbb\xd2\xe9"
7485 "\x00\x19\x32\x4b\x64\x7d\x96\xaf"
7486 "\xc8\xe1\xfa\x13\x2c\x45\x5e\x77"
7487 "\x90\xa9\xc2\xdb\xf4\x0d\x26\x3f"
7488 "\x58\x71\x8a\xa3\xbc\xd5\xee\x07"
7489 "\x20\x39\x52\x6b\x84\x9d\xb6\xcf"
7490 "\xe8\x01\x1a\x33\x4c\x65\x7e\x97"
7491 "\xb0\xc9\xe2\xfb\x14\x2d\x46\x5f"
7492 "\x78\x91\xaa\xc3\xdc\xf5\x0e\x27"
7493 "\x40\x59\x72\x8b\xa4\xbd\xd6\xef"
7494 "\x08\x21\x3a\x53\x6c\x85\x9e\xb7"
7495 "\xd0\xe9\x02\x1b\x34\x4d\x66\x7f"
7496 "\x98\xb1\xca\xe3\xfc\x15\x2e\x47"
7497 "\x60\x79\x92\xab\xc4\xdd\xf6\x0f"
7498 "\x28\x41\x5a\x73\x8c\xa5\xbe\xd7"
7499 "\xf0\x09\x22\x3b\x54\x6d\x86\x9f"
7500 "\xb8\xd1\xea\x03\x1c\x35\x4e\x67"
7501 "\x80\x99\xb2\xcb\xe4\xfd\x16\x2f"
7502 "\x48\x61\x7a\x93\xac\xc5\xde\xf7"
7503 "\x10\x29\x42\x5b\x74\x8d\xa6\xbf"
7504 "\xd8\xf1\x0a\x23\x3c\x55\x6e\x87"
7505 "\xa0\xb9\xd2\xeb\x04\x1d\x36\x4f"
7506 "\x68\x81\x9a\xb3\xcc\xe5\xfe\x17"
7507 "\x30\x49\x62\x7b\x94\xad\xc6\xdf"
7508 "\xf8\x11\x2a\x43\x5c\x75\x8e\xa7"
7509 "\xc0\xd9\xf2\x0b\x24\x3d\x56\x6f"
7510 "\x88\xa1\xba\xd3\xec\x05\x1e\x37"
7511 "\x50\x69\x82\x9b\xb4\xcd\xe6\xff"
7512 "\x18\x31\x4a\x63\x7c\x95\xae\xc7"
7513 "\xe0\xf9\x12\x2b\x44\x5d\x76\x8f"
7514 "\xa8\xc1\xda\xf3\x0c\x25\x3e\x57"
7515 "\x70\x89\xa2\xbb\xd4\xed\x06\x1f"
7516 "\x38\x51\x6a\x83\x9c\xb5\xce\xe7"
7517 "\x00\x1b\x36\x51\x6c\x87\xa2\xbd"
7518 "\xd8\xf3\x0e\x29\x44\x5f\x7a\x95"
7519 "\xb0\xcb\xe6\x01\x1c\x37\x52\x6d"
7520 "\x88\xa3\xbe\xd9\xf4\x0f\x2a\x45"
7521 "\x60\x7b\x96\xb1\xcc\xe7\x02\x1d"
7522 "\x38\x53\x6e\x89\xa4\xbf\xda\xf5"
7523 "\x10\x2b\x46\x61\x7c\x97\xb2\xcd"
7524 "\xe8\x03\x1e\x39\x54\x6f\x8a\xa5"
7525 "\xc0\xdb\xf6\x11\x2c\x47\x62\x7d"
7526 "\x98\xb3\xce\xe9\x04\x1f\x3a\x55"
7527 "\x70\x8b\xa6\xc1\xdc\xf7\x12\x2d"
7528 "\x48\x63\x7e\x99\xb4\xcf\xea\x05"
7529 "\x20\x3b\x56\x71\x8c\xa7\xc2\xdd"
7530 "\xf8\x13\x2e\x49\x64\x7f\x9a\xb5"
7531 "\xd0\xeb\x06\x21\x3c\x57\x72\x8d"
7532 "\xa8\xc3\xde\xf9\x14\x2f\x4a\x65"
7533 "\x80\x9b\xb6\xd1\xec\x07\x22\x3d"
7534 "\x58\x73\x8e\xa9\xc4\xdf\xfa\x15"
7535 "\x30\x4b\x66\x81\x9c\xb7\xd2\xed"
7536 "\x08\x23\x3e\x59\x74\x8f\xaa\xc5"
7537 "\xe0\xfb\x16\x31\x4c\x67\x82\x9d"
7538 "\xb8\xd3\xee\x09\x24\x3f\x5a\x75"
7539 "\x90\xab\xc6\xe1\xfc\x17\x32\x4d"
7540 "\x68\x83\x9e\xb9\xd4\xef\x0a\x25"
7541 "\x40\x5b\x76\x91\xac\xc7\xe2\xfd"
7542 "\x18\x33\x4e\x69\x84\x9f\xba\xd5"
7543 "\xf0\x0b\x26\x41\x5c\x77\x92\xad"
7544 "\xc8\xe3\xfe\x19\x34\x4f\x6a\x85"
7545 "\xa0\xbb\xd6\xf1\x0c\x27\x42\x5d"
7546 "\x78\x93\xae\xc9\xe4\xff\x1a\x35"
7547 "\x50\x6b\x86\xa1\xbc\xd7\xf2\x0d"
7548 "\x28\x43\x5e\x79\x94\xaf\xca\xe5"
7549 "\x00\x1d\x3a\x57\x74\x91\xae\xcb"
7550 "\xe8\x05\x22\x3f\x5c\x79\x96\xb3"
7551 "\xd0\xed\x0a\x27\x44\x61\x7e\x9b"
7552 "\xb8\xd5\xf2\x0f\x2c\x49\x66\x83"
7553 "\xa0\xbd\xda\xf7\x14\x31\x4e\x6b"
7554 "\x88\xa5\xc2\xdf\xfc\x19\x36\x53"
7555 "\x70\x8d\xaa\xc7\xe4\x01\x1e\x3b"
7556 "\x58\x75\x92\xaf\xcc\xe9\x06\x23"
7557 "\x40\x5d\x7a\x97\xb4\xd1\xee\x0b"
7558 "\x28\x45\x62\x7f\x9c\xb9\xd6\xf3"
7559 "\x10\x2d\x4a\x67\x84\xa1\xbe\xdb"
7560 "\xf8\x15\x32\x4f\x6c\x89\xa6\xc3"
7561 "\xe0\xfd\x1a\x37\x54\x71\x8e\xab"
7562 "\xc8\xe5\x02\x1f\x3c\x59\x76\x93"
7563 "\xb0\xcd\xea\x07\x24\x41\x5e\x7b"
7564 "\x98\xb5\xd2\xef\x0c\x29\x46\x63"
7565 "\x80\x9d\xba\xd7\xf4\x11\x2e\x4b"
7566 "\x68\x85\xa2\xbf\xdc\xf9\x16\x33"
7567 "\x50\x6d\x8a\xa7\xc4\xe1\xfe\x1b"
7568 "\x38\x55\x72\x8f\xac\xc9\xe6\x03"
7569 "\x20\x3d\x5a\x77\x94\xb1\xce\xeb"
7570 "\x08\x25\x42\x5f\x7c\x99\xb6\xd3"
7571 "\xf0\x0d\x2a\x47\x64\x81\x9e\xbb"
7572 "\xd8\xf5\x12\x2f\x4c\x69\x86\xa3"
7573 "\xc0\xdd\xfa\x17\x34\x51\x6e\x8b"
7574 "\xa8\xc5\xe2\xff\x1c\x39\x56\x73"
7575 "\x90\xad\xca\xe7\x04\x21\x3e\x5b"
7576 "\x78\x95\xb2\xcf\xec\x09\x26\x43"
7577 "\x60\x7d\x9a\xb7\xd4\xf1\x0e\x2b"
7578 "\x48\x65\x82\x9f\xbc\xd9\xf6\x13"
7579 "\x30\x4d\x6a\x87\xa4\xc1\xde\xfb"
7580 "\x18\x35\x52\x6f\x8c\xa9\xc6\xe3"
7581 "\x00\x1f\x3e\x5d\x7c\x9b\xba\xd9"
7582 "\xf8\x17\x36\x55\x74\x93\xb2\xd1"
7583 "\xf0\x0f\x2e\x4d\x6c\x8b\xaa\xc9"
7584 "\xe8\x07\x26\x45\x64\x83\xa2\xc1"
7585 "\xe0\xff\x1e\x3d\x5c\x7b\x9a\xb9"
7586 "\xd8\xf7\x16\x35\x54\x73\x92\xb1"
7587 "\xd0\xef\x0e\x2d\x4c\x6b\x8a\xa9"
7588 "\xc8\xe7\x06\x25\x44\x63\x82\xa1"
7589 "\xc0\xdf\xfe\x1d\x3c\x5b\x7a\x99"
7590 "\xb8\xd7\xf6\x15\x34\x53\x72\x91"
7591 "\xb0\xcf\xee\x0d\x2c\x4b\x6a\x89"
7592 "\xa8\xc7\xe6\x05\x24\x43\x62\x81"
7593 "\xa0\xbf\xde\xfd\x1c\x3b\x5a\x79"
7594 "\x98\xb7\xd6\xf5\x14\x33\x52\x71"
7595 "\x90\xaf\xce\xed\x0c\x2b\x4a\x69"
7596 "\x88\xa7\xc6\xe5\x04\x23\x42\x61"
7597 "\x80\x9f\xbe\xdd\xfc\x1b\x3a\x59"
7598 "\x78\x97\xb6\xd5\xf4\x13\x32\x51"
7599 "\x70\x8f\xae\xcd\xec\x0b\x2a\x49"
7600 "\x68\x87\xa6\xc5\xe4\x03\x22\x41"
7601 "\x60\x7f\x9e\xbd\xdc\xfb\x1a\x39"
7602 "\x58\x77\x96\xb5\xd4\xf3\x12\x31"
7603 "\x50\x6f\x8e\xad\xcc\xeb\x0a\x29"
7604 "\x48\x67\x86\xa5\xc4\xe3\x02\x21"
7605 "\x40\x5f\x7e\x9d\xbc\xdb\xfa\x19"
7606 "\x38\x57\x76\x95\xb4\xd3\xf2\x11"
7607 "\x30\x4f\x6e\x8d\xac\xcb\xea\x09"
7608 "\x28\x47\x66\x85\xa4\xc3\xe2\x01"
7609 "\x20\x3f\x5e\x7d\x9c\xbb\xda\xf9"
7610 "\x18\x37\x56\x75\x94\xb3\xd2\xf1"
7611 "\x10\x2f\x4e\x6d\x8c\xab\xca\xe9"
7612 "\x08\x27\x46\x65\x84\xa3\xc2\xe1"
7613 "\x00\x21\x42\x63",
7614 .ilen = 4100,
7615 .result =
7616 "\xb5\x81\xf5\x64\x18\x73\xe3\xf0"
7617 "\x4c\x13\xf2\x77\x18\x60\x65\x5e"
7618 "\x29\x01\xce\x98\x55\x53\xf9\x0c"
7619 "\x2a\x08\xd5\x09\xb3\x57\x55\x56"
7620 "\xc5\xe9\x56\x90\xcb\x6a\xa3\xc0"
7621 "\xff\xc4\x79\xb4\xd2\x97\x5d\xc4"
7622 "\x43\xd1\xfe\x94\x7b\x88\x06\x5a"
7623 "\xb2\x9e\x2c\xfc\x44\x03\xb7\x90"
7624 "\xa0\xc1\xba\x6a\x33\xb8\xc7\xb2"
7625 "\x9d\xe1\x12\x4f\xc0\x64\xd4\x01"
7626 "\xfe\x8c\x7a\x66\xf7\xe6\x5a\x91"
7627 "\xbb\xde\x56\x86\xab\x65\x21\x30"
7628 "\x00\x84\x65\x24\xa5\x7d\x85\xb4"
7629 "\xe3\x17\xed\x3a\xb7\x6f\xb4\x0b"
7630 "\x0b\xaf\x15\xae\x5a\x8f\xf2\x0c"
7631 "\x2f\x27\xf4\x09\xd8\xd2\x96\xb7"
7632 "\x71\xf2\xc5\x99\x4d\x7e\x7f\x75"
7633 "\x77\x89\x30\x8b\x59\xdb\xa2\xb2"
7634 "\xa0\xf3\x19\x39\x2b\xc5\x7e\x3f"
7635 "\x4f\xd9\xd3\x56\x28\x97\x44\xdc"
7636 "\xc0\x8b\x77\x24\xd9\x52\xe7\xc5"
7637 "\xaf\xf6\x7d\x59\xb2\x44\x05\x1d"
7638 "\xb1\xb0\x11\xa5\x0f\xec\x33\xe1"
7639 "\x6d\x1b\x4e\x1f\xff\x57\x91\xb4"
7640 "\x5b\x9a\x96\xc5\x53\xbc\xae\x20"
7641 "\x3c\xbb\x14\xe2\xe8\x22\x33\xc1"
7642 "\x5e\x76\x9e\x46\x99\xf6\x2a\x15"
7643 "\xc6\x97\x02\xa0\x66\x43\xd1\xa6"
7644 "\x31\xa6\x9f\xfb\xf4\xd3\x69\xe5"
7645 "\xcd\x76\x95\xb8\x7a\x82\x7f\x21"
7646 "\x45\xff\x3f\xce\x55\xf6\x95\x10"
7647 "\x08\x77\x10\x43\xc6\xf3\x09\xe5"
7648 "\x68\xe7\x3c\xad\x00\x52\x45\x0d"
7649 "\xfe\x2d\xc6\xc2\x94\x8c\x12\x1d"
7650 "\xe6\x25\xae\x98\x12\x8e\x19\x9c"
7651 "\x81\x68\xb1\x11\xf6\x69\xda\xe3"
7652 "\x62\x08\x18\x7a\x25\x49\x28\xac"
7653 "\xba\x71\x12\x0b\xe4\xa2\xe5\xc7"
7654 "\x5d\x8e\xec\x49\x40\x21\xbf\x5a"
7655 "\x98\xf3\x02\x68\x55\x03\x7f\x8a"
7656 "\xe5\x94\x0c\x32\x5c\x07\x82\x63"
7657 "\xaf\x6f\x91\x40\x84\x8e\x52\x25"
7658 "\xd0\xb0\x29\x53\x05\xe2\x50\x7a"
7659 "\x34\xeb\xc9\x46\x20\xa8\x3d\xde"
7660 "\x7f\x16\x5f\x36\xc5\x2e\xdc\xd1"
7661 "\x15\x47\xc7\x50\x40\x6d\x91\xc5"
7662 "\xe7\x93\x95\x1a\xd3\x57\xbc\x52"
7663 "\x33\xee\x14\x19\x22\x52\x89\xa7"
7664 "\x4a\x25\x56\x77\x4b\xca\xcf\x0a"
7665 "\xe1\xf5\x35\x85\x30\x7e\x59\x4a"
7666 "\xbd\x14\x5b\xdf\xe3\x46\xcb\xac"
7667 "\x1f\x6c\x96\x0e\xf4\x81\xd1\x99"
7668 "\xca\x88\x63\x3d\x02\x58\x6b\xa9"
7669 "\xe5\x9f\xb3\x00\xb2\x54\xc6\x74"
7670 "\x1c\xbf\x46\xab\x97\xcc\xf8\x54"
7671 "\x04\x07\x08\x52\xe6\xc0\xda\x93"
7672 "\x74\x7d\x93\x99\x5d\x78\x68\xa6"
7673 "\x2e\x6b\xd3\x6a\x69\xcc\x12\x6b"
7674 "\xd4\xc7\xa5\xc6\xe7\xf6\x03\x04"
7675 "\x5d\xcd\x61\x5e\x17\x40\xdc\xd1"
7676 "\x5c\xf5\x08\xdf\x5c\x90\x85\xa4"
7677 "\xaf\xf6\x78\xbb\x0d\xf1\xf4\xa4"
7678 "\x54\x26\x72\x9e\x61\xfa\x86\xcf"
7679 "\xe8\x9e\xa1\xe0\xc7\x48\x23\xae"
7680 "\x5a\x90\xae\x75\x0a\x74\x18\x89"
7681 "\x05\xb1\x92\xb2\x7f\xd0\x1b\xa6"
7682 "\x62\x07\x25\x01\xc7\xc2\x4f\xf9"
7683 "\xe8\xfe\x63\x95\x80\x07\xb4\x26"
7684 "\xcc\xd1\x26\xb6\xc4\x3f\x9e\xcb"
7685 "\x8e\x3b\x2e\x44\x16\xd3\x10\x9a"
7686 "\x95\x08\xeb\xc8\xcb\xeb\xbf\x6f"
7687 "\x0b\xcd\x1f\xc8\xca\x86\xaa\xec"
7688 "\x33\xe6\x69\xf4\x45\x25\x86\x3a"
7689 "\x22\x94\x4f\x00\x23\x6a\x44\xc2"
7690 "\x49\x97\x33\xab\x36\x14\x0a\x70"
7691 "\x24\xc3\xbe\x04\x3b\x79\xa0\xf9"
7692 "\xb8\xe7\x76\x29\x22\x83\xd7\xf2"
7693 "\x94\xf4\x41\x49\xba\x5f\x7b\x07"
7694 "\xb5\xfb\xdb\x03\x1a\x9f\xb6\x4c"
7695 "\xc2\x2e\x37\x40\x49\xc3\x38\x16"
7696 "\xe2\x4f\x77\x82\xb0\x68\x4c\x71"
7697 "\x1d\x57\x61\x9c\xd9\x4e\x54\x99"
7698 "\x47\x13\x28\x73\x3c\xbb\x00\x90"
7699 "\xf3\x4d\xc9\x0e\xfd\xe7\xb1\x71"
7700 "\xd3\x15\x79\xbf\xcc\x26\x2f\xbd"
7701 "\xad\x6c\x50\x69\x6c\x3e\x6d\x80"
7702 "\x9a\xea\x78\xaf\x19\xb2\x0d\x4d"
7703 "\xad\x04\x07\xae\x22\x90\x4a\x93"
7704 "\x32\x0e\x36\x9b\x1b\x46\xba\x3b"
7705 "\xb4\xac\xc6\xd1\xa2\x31\x53\x3b"
7706 "\x2a\x3d\x45\xfe\x03\x61\x10\x85"
7707 "\x17\x69\xa6\x78\xcc\x6c\x87\x49"
7708 "\x53\xf9\x80\x10\xde\x80\xa2\x41"
7709 "\x6a\xc3\x32\x02\xad\x6d\x3c\x56"
7710 "\x00\x71\x51\x06\xa7\xbd\xfb\xef"
7711 "\x3c\xb5\x9f\xfc\x48\x7d\x53\x7c"
7712 "\x66\xb0\x49\x23\xc4\x47\x10\x0e"
7713 "\xe5\x6c\x74\x13\xe6\xc5\x3f\xaa"
7714 "\xde\xff\x07\x44\xdd\x56\x1b\xad"
7715 "\x09\x77\xfb\x5b\x12\xb8\x0d\x38"
7716 "\x17\x37\x35\x7b\x9b\xbc\xfe\xd4"
7717 "\x7e\x8b\xda\x7e\x5b\x04\xa7\x22"
7718 "\xa7\x31\xa1\x20\x86\xc7\x1b\x99"
7719 "\xdb\xd1\x89\xf4\x94\xa3\x53\x69"
7720 "\x8d\xe7\xe8\x74\x11\x8d\x74\xd6"
7721 "\x07\x37\x91\x9f\xfd\x67\x50\x3a"
7722 "\xc9\xe1\xf4\x36\xd5\xa0\x47\xd1"
7723 "\xf9\xe5\x39\xa3\x31\xac\x07\x36"
7724 "\x23\xf8\x66\x18\x14\x28\x34\x0f"
7725 "\xb8\xd0\xe7\x29\xb3\x04\x4b\x55"
7726 "\x01\x41\xb2\x75\x8d\xcb\x96\x85"
7727 "\x3a\xfb\xab\x2b\x9e\xfa\x58\x20"
7728 "\x44\x1f\xc0\x14\x22\x75\x61\xe8"
7729 "\xaa\x19\xcf\xf1\x82\x56\xf4\xd7"
7730 "\x78\x7b\x3d\x5f\xb3\x9e\x0b\x8a"
7731 "\x57\x50\xdb\x17\x41\x65\x4d\xa3"
7732 "\x02\xc9\x9c\x9c\x53\xfb\x39\x39"
7733 "\x9b\x1d\x72\x24\xda\xb7\x39\xbe"
7734 "\x13\x3b\xfa\x29\xda\x9e\x54\x64"
7735 "\x6e\xba\xd8\xa1\xcb\xb3\x36\xfa"
7736 "\xcb\x47\x85\xe9\x61\x38\xbc\xbe"
7737 "\xc5\x00\x38\x2a\x54\xf7\xc4\xb9"
7738 "\xb3\xd3\x7b\xa0\xa0\xf8\x72\x7f"
7739 "\x8c\x8e\x82\x0e\xc6\x1c\x75\x9d"
7740 "\xca\x8e\x61\x87\xde\xad\x80\xd2"
7741 "\xf5\xf9\x80\xef\x15\x75\xaf\xf5"
7742 "\x80\xfb\xff\x6d\x1e\x25\xb7\x40"
7743 "\x61\x6a\x39\x5a\x6a\xb5\x31\xab"
7744 "\x97\x8a\x19\x89\x44\x40\xc0\xa6"
7745 "\xb4\x4e\x30\x32\x7b\x13\xe7\x67"
7746 "\xa9\x8b\x57\x04\xc2\x01\xa6\xf4"
7747 "\x28\x99\xad\x2c\x76\xa3\x78\xc2"
7748 "\x4a\xe6\xca\x5c\x50\x6a\xc1\xb0"
7749 "\x62\x4b\x10\x8e\x7c\x17\x43\xb3"
7750 "\x17\x66\x1c\x3e\x8d\x69\xf0\x5a"
7751 "\x71\xf5\x97\xdc\xd1\x45\xdd\x28"
7752 "\xf3\x5d\xdf\x53\x7b\x11\xe5\xbc"
7753 "\x4c\xdb\x1b\x51\x6b\xe9\xfb\x3d"
7754 "\xc1\xc3\x2c\xb9\x71\xf5\xb6\xb2"
7755 "\x13\x36\x79\x80\x53\xe8\xd3\xa6"
7756 "\x0a\xaf\xfd\x56\x97\xf7\x40\x8e"
7757 "\x45\xce\xf8\xb0\x9e\x5c\x33\x82"
7758 "\xb0\x44\x56\xfc\x05\x09\xe9\x2a"
7759 "\xac\x26\x80\x14\x1d\xc8\x3a\x35"
7760 "\x4c\x82\x97\xfd\x76\xb7\xa9\x0a"
7761 "\x35\x58\x79\x8e\x0f\x66\xea\xaf"
7762 "\x51\x6c\x09\xa9\x6e\x9b\xcb\x9a"
7763 "\x31\x47\xa0\x2f\x7c\x71\xb4\x4a"
7764 "\x11\xaa\x8c\x66\xc5\x64\xe6\x3a"
7765 "\x54\xda\x24\x6a\xc4\x41\x65\x46"
7766 "\x82\xa0\x0a\x0f\x5f\xfb\x25\xd0"
7767 "\x2c\x91\xa7\xee\xc4\x81\x07\x86"
7768 "\x75\x5e\x33\x69\x97\xe4\x2c\xa8"
7769 "\x9d\x9f\x0b\x6a\xbe\xad\x98\xda"
7770 "\x6d\x94\x41\xda\x2c\x1e\x89\xc4"
7771 "\xc2\xaf\x1e\x00\x05\x0b\x83\x60"
7772 "\xbd\x43\xea\x15\x23\x7f\xb9\xac"
7773 "\xee\x4f\x2c\xaf\x2a\xf3\xdf\xd0"
7774 "\xf3\x19\x31\xbb\x4a\x74\x84\x17"
7775 "\x52\x32\x2c\x7d\x61\xe4\xcb\xeb"
7776 "\x80\x38\x15\x52\xcb\x6f\xea\xe5"
7777 "\x73\x9c\xd9\x24\x69\xc6\x95\x32"
7778 "\x21\xc8\x11\xe4\xdc\x36\xd7\x93"
7779 "\x38\x66\xfb\xb2\x7f\x3a\xb9\xaf"
7780 "\x31\xdd\x93\x75\x78\x8a\x2c\x94"
7781 "\x87\x1a\x58\xec\x9e\x7d\x4d\xba"
7782 "\xe1\xe5\x4d\xfc\xbc\xa4\x2a\x14"
7783 "\xef\xcc\xa7\xec\xab\x43\x09\x18"
7784 "\xd3\xab\x68\xd1\x07\x99\x44\x47"
7785 "\xd6\x83\x85\x3b\x30\xea\xa9\x6b"
7786 "\x63\xea\xc4\x07\xfb\x43\x2f\xa4"
7787 "\xaa\xb0\xab\x03\x89\xce\x3f\x8c"
7788 "\x02\x7c\x86\x54\xbc\x88\xaf\x75"
7789 "\xd2\xdc\x63\x17\xd3\x26\xf6\x96"
7790 "\xa9\x3c\xf1\x61\x8c\x11\x18\xcc"
7791 "\xd6\xea\x5b\xe2\xcd\xf0\xf1\xb2"
7792 "\xe5\x35\x90\x1f\x85\x4c\x76\x5b"
7793 "\x66\xce\x44\xa4\x32\x9f\xe6\x7b"
7794 "\x71\x6e\x9f\x58\x15\x67\x72\x87"
7795 "\x64\x8e\x3a\x44\x45\xd4\x76\xfa"
7796 "\xc2\xf6\xef\x85\x05\x18\x7a\x9b"
7797 "\xba\x41\x54\xac\xf0\xfc\x59\x12"
7798 "\x3f\xdf\xa0\xe5\x8a\x65\xfd\x3a"
7799 "\x62\x8d\x83\x2c\x03\xbe\x05\x76"
7800 "\x2e\x53\x49\x97\x94\x33\xae\x40"
7801 "\x81\x15\xdb\x6e\xad\xaa\xf5\x4b"
7802 "\xe3\x98\x70\xdf\xe0\x7c\xcd\xdb"
7803 "\x02\xd4\x7d\x2f\xc1\xe6\xb4\xf3"
7804 "\xd7\x0d\x7a\xd9\x23\x9e\x87\x2d"
7805 "\xce\x87\xad\xcc\x72\x05\x00\x29"
7806 "\xdc\x73\x7f\x64\xc1\x15\x0e\xc2"
7807 "\xdf\xa7\x5f\xeb\x41\xa1\xcd\xef"
7808 "\x5c\x50\x79\x2a\x56\x56\x71\x8c"
7809 "\xac\xc0\x79\x50\x69\xca\x59\x32"
7810 "\x65\xf2\x54\xe4\x52\x38\x76\xd1"
7811 "\x5e\xde\x26\x9e\xfb\x75\x2e\x11"
7812 "\xb5\x10\xf4\x17\x73\xf5\x89\xc7"
7813 "\x4f\x43\x5c\x8e\x7c\xb9\x05\x52"
7814 "\x24\x40\x99\xfe\x9b\x85\x0b\x6c"
7815 "\x22\x3e\x8b\xae\x86\xa1\xd2\x79"
7816 "\x05\x68\x6b\xab\xe3\x41\x49\xed"
7817 "\x15\xa1\x8d\x40\x2d\x61\xdf\x1a"
7818 "\x59\xc9\x26\x8b\xef\x30\x4c\x88"
7819 "\x4b\x10\xf8\x8d\xa6\x92\x9f\x4b"
7820 "\xf3\xc4\x53\x0b\x89\x5d\x28\x92"
7821 "\xcf\x78\xb2\xc0\x5d\xed\x7e\xfc"
7822 "\xc0\x12\x23\x5f\x5a\x78\x86\x43"
7823 "\x6e\x27\xf7\x5a\xa7\x6a\xed\x19"
7824 "\x04\xf0\xb3\x12\xd1\xbd\x0e\x89"
7825 "\x6e\xbc\x96\xa8\xd8\x49\x39\x9f"
7826 "\x7e\x67\xf0\x2e\x3e\x01\xa9\xba"
7827 "\xec\x8b\x62\x8e\xcb\x4a\x70\x43"
7828 "\xc7\xc2\xc4\xca\x82\x03\x73\xe9"
7829 "\x11\xdf\xcf\x54\xea\xc9\xb0\x95"
7830 "\x51\xc0\x13\x3d\x92\x05\xfa\xf4"
7831 "\xa9\x34\xc8\xce\x6c\x3d\x54\xcc"
7832 "\xc4\xaf\xf1\xdc\x11\x44\x26\xa2"
7833 "\xaf\xf1\x85\x75\x7d\x03\x61\x68"
7834 "\x4e\x78\xc6\x92\x7d\x86\x7d\x77"
7835 "\xdc\x71\x72\xdb\xc6\xae\xa1\xcb"
7836 "\x70\x9a\x0b\x19\xbe\x4a\x6c\x2a"
7837 "\xe2\xba\x6c\x64\x9a\x13\x28\xdf"
7838 "\x85\x75\xe6\x43\xf6\x87\x08\x68"
7839 "\x6e\xba\x6e\x79\x9f\x04\xbc\x23"
7840 "\x50\xf6\x33\x5c\x1f\x24\x25\xbe"
7841 "\x33\x47\x80\x45\x56\xa3\xa7\xd7"
7842 "\x7a\xb1\x34\x0b\x90\x3c\x9c\xad"
7843 "\x44\x5f\x9e\x0e\x9d\xd4\xbd\x93"
7844 "\x5e\xfa\x3c\xe0\xb0\xd9\xed\xf3"
7845 "\xd6\x2e\xff\x24\xd8\x71\x6c\xed"
7846 "\xaf\x55\xeb\x22\xac\x93\x68\x32"
7847 "\x05\x5b\x47\xdd\xc6\x4a\xcb\xc7"
7848 "\x10\xe1\x3c\x92\x1a\xf3\x23\x78"
7849 "\x2b\xa1\xd2\x80\xf4\x12\xb1\x20"
7850 "\x8f\xff\x26\x35\xdd\xfb\xc7\x4e"
7851 "\x78\xf1\x2d\x50\x12\x77\xa8\x60"
7852 "\x7c\x0f\xf5\x16\x2f\x63\x70\x2a"
7853 "\xc0\x96\x80\x4e\x0a\xb4\x93\x35"
7854 "\x5d\x1d\x3f\x56\xf7\x2f\xbb\x90"
7855 "\x11\x16\x8f\xa2\xec\x47\xbe\xac"
7856 "\x56\x01\x26\x56\xb1\x8c\xb2\x10"
7857 "\xf9\x1a\xca\xf5\xd1\xb7\x39\x20"
7858 "\x63\xf1\x69\x20\x4f\x13\x12\x1f"
7859 "\x5b\x65\xfc\x98\xf7\xc4\x7a\xbe"
7860 "\xf7\x26\x4d\x2b\x84\x7b\x42\xad"
7861 "\xd8\x7a\x0a\xb4\xd8\x74\xbf\xc1"
7862 "\xf0\x6e\xb4\x29\xa3\xbb\xca\x46"
7863 "\x67\x70\x6a\x2d\xce\x0e\xa2\x8a"
7864 "\xa9\x87\xbf\x05\xc4\xc1\x04\xa3"
7865 "\xab\xd4\x45\x43\x8c\xb6\x02\xb0"
7866 "\x41\xc8\xfc\x44\x3d\x59\xaa\x2e"
7867 "\x44\x21\x2a\x8d\x88\x9d\x57\xf4"
7868 "\xa0\x02\x77\xb8\xa6\xa0\xe6\x75"
7869 "\x5c\x82\x65\x3e\x03\x5c\x29\x8f"
7870 "\x38\x55\xab\x33\x26\xef\x9f\x43"
7871 "\x52\xfd\x68\xaf\x36\xb4\xbb\x9a"
7872 "\x58\x09\x09\x1b\xc3\x65\x46\x46"
7873 "\x1d\xa7\x94\x18\x23\x50\x2c\xca"
7874 "\x2c\x55\x19\x97\x01\x9d\x93\x3b"
7875 "\x63\x86\xf2\x03\x67\x45\xd2\x72"
7876 "\x28\x52\x6c\xf4\xe3\x1c\xb5\x11"
7877 "\x13\xf1\xeb\x21\xc7\xd9\x56\x82"
7878 "\x2b\x82\x39\xbd\x69\x54\xed\x62"
7879 "\xc3\xe2\xde\x73\xd4\x6a\x12\xae"
7880 "\x13\x21\x7f\x4b\x5b\xfc\xbf\xe8"
7881 "\x2b\xbe\x56\xba\x68\x8b\x9a\xb1"
7882 "\x6e\xfa\xbf\x7e\x5a\x4b\xf1\xac"
7883 "\x98\x65\x85\xd1\x93\x53\xd3\x7b"
7884 "\x09\xdd\x4b\x10\x6d\x84\xb0\x13"
7885 "\x65\xbd\xcf\x52\x09\xc4\x85\xe2"
7886 "\x84\x74\x15\x65\xb7\xf7\x51\xaf"
7887 "\x55\xad\xa4\xd1\x22\x54\x70\x94"
7888 "\xa0\x1c\x90\x41\xfd\x99\xd7\x5a"
7889 "\x31\xef\xaa\x25\xd0\x7f\x4f\xea"
7890 "\x1d\x55\x42\xe5\x49\xb0\xd0\x46"
7891 "\x62\x36\x43\xb2\x82\x15\x75\x50"
7892 "\xa4\x72\xeb\x54\x27\x1f\x8a\xe4"
7893 "\x7d\xe9\x66\xc5\xf1\x53\xa4\xd1"
7894 "\x0c\xeb\xb8\xf8\xbc\xd4\xe2\xe7"
7895 "\xe1\xf8\x4b\xcb\xa9\xa1\xaf\x15"
7896 "\x83\xcb\x72\xd0\x33\x79\x00\x2d"
7897 "\x9f\xd7\xf1\x2e\x1e\x10\xe4\x45"
7898 "\xc0\x75\x3a\x39\xea\x68\xf7\x5d"
7899 "\x1b\x73\x8f\xe9\x8e\x0f\x72\x47"
7900 "\xae\x35\x0a\x31\x7a\x14\x4d\x4a"
7901 "\x6f\x47\xf7\x7e\x91\x6e\x74\x8b"
7902 "\x26\x47\xf9\xc3\xf9\xde\x70\xf5"
7903 "\x61\xab\xa9\x27\x9f\x82\xe4\x9c"
7904 "\x89\x91\x3f\x2e\x6a\xfd\xb5\x49"
7905 "\xe9\xfd\x59\x14\x36\x49\x40\x6d"
7906 "\x32\xd8\x85\x42\xf3\xa5\xdf\x0c"
7907 "\xa8\x27\xd7\x54\xe2\x63\x2f\xf2"
7908 "\x7e\x8b\x8b\xe7\xf1\x9a\x95\x35"
7909 "\x43\xdc\x3a\xe4\xb6\xf4\xd0\xdf"
7910 "\x9c\xcb\x94\xf3\x21\xa0\x77\x50"
7911 "\xe2\xc6\xc4\xc6\x5f\x09\x64\x5b"
7912 "\x92\x90\xd8\xe1\xd1\xed\x4b\x42"
7913 "\xd7\x37\xaf\x65\x3d\x11\x39\xb6"
7914 "\x24\x8a\x60\xae\xd6\x1e\xbf\x0e"
7915 "\x0d\xd7\xdc\x96\x0e\x65\x75\x4e"
7916 "\x29\x06\x9d\xa4\x51\x3a\x10\x63"
7917 "\x8f\x17\x07\xd5\x8e\x3c\xf4\x28"
7918 "\x00\x5a\x5b\x05\x19\xd8\xc0\x6c"
7919 "\xe5\x15\xe4\x9c\x9d\x71\x9d\x5e"
7920 "\x94\x29\x1a\xa7\x80\xfa\x0e\x33"
7921 "\x03\xdd\xb7\x3e\x9a\xa9\x26\x18"
7922 "\x37\xa9\x64\x08\x4d\x94\x5a\x88"
7923 "\xca\x35\xce\x81\x02\xe3\x1f\x1b"
7924 "\x89\x1a\x77\x85\xe3\x41\x6d\x32"
7925 "\x42\x19\x23\x7d\xc8\x73\xee\x25"
7926 "\x85\x0d\xf8\x31\x25\x79\x1b\x6f"
7927 "\x79\x25\xd2\xd8\xd4\x23\xfd\xf7"
7928 "\x82\x36\x6a\x0c\x46\x22\x15\xe9"
7929 "\xff\x72\x41\x91\x91\x7d\x3a\xb7"
7930 "\xdd\x65\x99\x70\xf6\x8d\x84\xf8"
7931 "\x67\x15\x20\x11\xd6\xb2\x55\x7b"
7932 "\xdb\x87\xee\xef\x55\x89\x2a\x59"
7933 "\x2b\x07\x8f\x43\x8a\x59\x3c\x01"
7934 "\x8b\x65\x54\xa1\x66\xd5\x38\xbd"
7935 "\xc6\x30\xa9\xcc\x49\xb6\xa8\x1b"
7936 "\xb8\xc0\x0e\xe3\x45\x28\xe2\xff"
7937 "\x41\x9f\x7e\x7c\xd1\xae\x9e\x25"
7938 "\x3f\x4c\x7c\x7c\xf4\xa8\x26\x4d"
7939 "\x5c\xfd\x4b\x27\x18\xf9\x61\x76"
7940 "\x48\xba\x0c\x6b\xa9\x4d\xfc\xf5"
7941 "\x3b\x35\x7e\x2f\x4a\xa9\xc2\x9a"
7942 "\xae\xab\x86\x09\x89\xc9\xc2\x40"
7943 "\x39\x2c\x81\xb3\xb8\x17\x67\xc2"
7944 "\x0d\x32\x4a\x3a\x67\x81\xd7\x1a"
7945 "\x34\x52\xc5\xdb\x0a\xf5\x63\x39"
7946 "\xea\x1f\xe1\x7c\xa1\x9e\xc1\x35"
7947 "\xe3\xb1\x18\x45\x67\xf9\x22\x38"
7948 "\x95\xd9\x34\x34\x86\xc6\x41\x94"
7949 "\x15\xf9\x5b\x41\xa6\x87\x8b\xf8"
7950 "\xd5\xe1\x1b\xe2\x5b\xf3\x86\x10"
7951 "\xff\xe6\xae\x69\x76\xbc\x0d\xb4"
7952 "\x09\x90\x0c\xa2\x65\x0c\xad\x74"
7953 "\xf5\xd7\xff\xda\xc1\xce\x85\xbe"
7954 "\x00\xa7\xff\x4d\x2f\x65\xd3\x8c"
7955 "\x86\x2d\x05\xe8\xed\x3e\x6b\x8b"
7956 "\x0f\x3d\x83\x8c\xf1\x1d\x5b\x96"
7957 "\x2e\xb1\x9c\xc2\x98\xe1\x70\xb9"
7958 "\xba\x5c\x8a\x43\xd6\x34\xa7\x2d"
7959 "\xc9\x92\xae\xf2\xa5\x7b\x05\x49"
7960 "\xa7\x33\x34\x86\xca\xe4\x96\x23"
7961 "\x76\x5b\xf2\xc6\xf1\x51\x28\x42"
7962 "\x7b\xcc\x76\x8f\xfa\xa2\xad\x31"
7963 "\xd4\xd6\x7a\x6d\x25\x25\x54\xe4"
7964 "\x3f\x50\x59\xe1\x5c\x05\xb7\x27"
7965 "\x48\xbf\x07\xec\x1b\x13\xbe\x2b"
7966 "\xa1\x57\x2b\xd5\xab\xd7\xd0\x4c"
7967 "\x1e\xcb\x71\x9b\xc5\x90\x85\xd3"
7968 "\xde\x59\xec\x71\xeb\x89\xbb\xd0"
7969 "\x09\x50\xe1\x16\x3f\xfd\x1c\x34"
7970 "\xc3\x1c\xa1\x10\x77\x53\x98\xef"
7971 "\xf2\xfd\xa5\x01\x59\xc2\x9b\x26"
7972 "\xc7\x42\xd9\x49\xda\x58\x2b\x6e"
7973 "\x9f\x53\x19\x76\x7e\xd9\xc9\x0e"
7974 "\x68\xc8\x7f\x51\x22\x42\xef\x49"
7975 "\xa4\x55\xb6\x36\xac\x09\xc7\x31"
7976 "\x88\x15\x4b\x2e\x8f\x3a\x08\xf7"
7977 "\xd8\xf7\xa8\xc5\xa9\x33\xa6\x45"
7978 "\xe4\xc4\x94\x76\xf3\x0d\x8f\x7e"
7979 "\xc8\xf6\xbc\x23\x0a\xb6\x4c\xd3"
7980 "\x6a\xcd\x36\xc2\x90\x5c\x5c\x3c"
7981 "\x65\x7b\xc2\xd6\xcc\xe6\x0d\x87"
7982 "\x73\x2e\x71\x79\x16\x06\x63\x28"
7983 "\x09\x15\xd8\x89\x38\x38\x3d\xb5"
7984 "\x42\x1c\x08\x24\xf7\x2a\xd2\x9d"
7985 "\xc8\xca\xef\xf9\x27\xd8\x07\x86"
7986 "\xf7\x43\x0b\x55\x15\x3f\x9f\x83"
7987 "\xef\xdc\x49\x9d\x2a\xc1\x54\x62"
7988 "\xbd\x9b\x66\x55\x9f\xb7\x12\xf3"
7989 "\x1b\x4d\x9d\x2a\x5c\xed\x87\x75"
7990 "\x87\x26\xec\x61\x2c\xb4\x0f\x89"
7991 "\xb0\xfb\x2e\x68\x5d\x15\xc7\x8d"
7992 "\x2e\xc0\xd9\xec\xaf\x4f\xd2\x25"
7993 "\x29\xe8\xd2\x26\x2b\x67\xe9\xfc"
7994 "\x2b\xa8\x67\x96\x12\x1f\x5b\x96"
7995 "\xc6\x14\x53\xaf\x44\xea\xd6\xe2"
7996 "\x94\x98\xe4\x12\x93\x4c\x92\xe0"
7997 "\x18\xa5\x8d\x2d\xe4\x71\x3c\x47"
7998 "\x4c\xf7\xe6\x47\x9e\xc0\x68\xdf"
7999 "\xd4\xf5\x5a\x74\xb1\x2b\x29\x03"
8000 "\x19\x07\xaf\x90\x62\x5c\x68\x98"
8001 "\x48\x16\x11\x02\x9d\xee\xb4\x9b"
8002 "\xe5\x42\x7f\x08\xfd\x16\x32\x0b"
8003 "\xd0\xb3\xfa\x2b\xb7\x99\xf9\x29"
8004 "\xcd\x20\x45\x9f\xb3\x1a\x5d\xa2"
8005 "\xaf\x4d\xe0\xbd\x42\x0d\xbc\x74"
8006 "\x99\x9c\x8e\x53\x1a\xb4\x3e\xbd"
8007 "\xa2\x9a\x2d\xf7\xf8\x39\x0f\x67"
8008 "\x63\xfc\x6b\xc0\xaf\xb3\x4b\x4f"
8009 "\x55\xc4\xcf\xa7\xc8\x04\x11\x3e"
8010 "\x14\x32\xbb\x1b\x38\x77\xd6\x7f"
8011 "\x54\x4c\xdf\x75\xf3\x07\x2d\x33"
8012 "\x9b\xa8\x20\xe1\x7b\x12\xb5\xf3"
8013 "\xef\x2f\xce\x72\xe5\x24\x60\xc1"
8014 "\x30\xe2\xab\xa1\x8e\x11\x09\xa8"
8015 "\x21\x33\x44\xfe\x7f\x35\x32\x93"
8016 "\x39\xa7\xad\x8b\x79\x06\xb2\xcb"
8017 "\x4e\xa9\x5f\xc7\xba\x74\x29\xec"
8018 "\x93\xa0\x4e\x54\x93\xc0\xbc\x55"
8019 "\x64\xf0\x48\xe5\x57\x99\xee\x75"
8020 "\xd6\x79\x0f\x66\xb7\xc6\x57\x76"
8021 "\xf7\xb7\xf3\x9c\xc5\x60\xe8\x7f"
8022 "\x83\x76\xd6\x0e\xaa\xe6\x90\x39"
8023 "\x1d\xa6\x32\x6a\x34\xe3\x55\xf8"
8024 "\x58\xa0\x58\x7d\x33\xe0\x22\x39"
8025 "\x44\x64\x87\x86\x5a\x2f\xa7\x7e"
8026 "\x0f\x38\xea\xb0\x30\xcc\x61\xa5"
8027 "\x6a\x32\xae\x1e\xf7\xe9\xd0\xa9"
8028 "\x0c\x32\x4b\xb5\x49\x28\xab\x85"
8029 "\x2f\x8e\x01\x36\x38\x52\xd0\xba"
8030 "\xd6\x02\x78\xf8\x0e\x3e\x9c\x8b"
8031 "\x6b\x45\x99\x3f\x5c\xfe\x58\xf1"
8032 "\x5c\x94\x04\xe1\xf5\x18\x6d\x51"
8033 "\xb2\x5d\x18\x20\xb6\xc2\x9a\x42"
8034 "\x1d\xb3\xab\x3c\xb6\x3a\x13\x03"
8035 "\xb2\x46\x82\x4f\xfc\x64\xbc\x4f"
8036 "\xca\xfa\x9c\xc0\xd5\xa7\xbd\x11"
8037 "\xb7\xe4\x5a\xf6\x6f\x4d\x4d\x54"
8038 "\xea\xa4\x98\x66\xd4\x22\x3b\xd3"
8039 "\x8f\x34\x47\xd9\x7c\xf4\x72\x3b"
8040 "\x4d\x02\x77\xf6\xd6\xdd\x08\x0a"
8041 "\x81\xe1\x86\x89\x3e\x56\x10\x3c"
8042 "\xba\xd7\x81\x8c\x08\xbc\x8b\xe2"
8043 "\x53\xec\xa7\x89\xee\xc8\x56\xb5"
8044 "\x36\x2c\xb2\x03\xba\x99\xdd\x7c"
8045 "\x48\xa0\xb0\xbc\x91\x33\xe9\xa8"
8046 "\xcb\xcd\xcf\x59\x5f\x1f\x15\xe2"
8047 "\x56\xf5\x4e\x01\x35\x27\x45\x77"
8048 "\x47\xc8\xbc\xcb\x7e\x39\xc1\x97"
8049 "\x28\xd3\x84\xfc\x2c\x3e\xc8\xad"
8050 "\x9c\xf8\x8a\x61\x9c\x28\xaa\xc5"
8051 "\x99\x20\x43\x85\x9d\xa5\xe2\x8b"
8052 "\xb8\xae\xeb\xd0\x32\x0d\x52\x78"
8053 "\x09\x56\x3f\xc7\xd8\x7e\x26\xfc"
8054 "\x37\xfb\x6f\x04\xfc\xfa\x92\x10"
8055 "\xac\xf8\x3e\x21\xdc\x8c\x21\x16"
8056 "\x7d\x67\x6e\xf6\xcd\xda\xb6\x98"
8057 "\x23\xab\x23\x3c\xb2\x10\xa0\x53"
8058 "\x5a\x56\x9f\xc5\xd0\xff\xbb\xe4"
8059 "\x98\x3c\x69\x1e\xdb\x38\x8f\x7e"
8060 "\x0f\xd2\x98\x88\x81\x8b\x45\x67"
8061 "\xea\x33\xf1\xeb\xe9\x97\x55\x2e"
8062 "\xd9\xaa\xeb\x5a\xec\xda\xe1\x68"
8063 "\xa8\x9d\x3c\x84\x7c\x05\x3d\x62"
8064 "\x87\x8f\x03\x21\x28\x95\x0c\x89"
8065 "\x25\x22\x4a\xb0\x93\xa9\x50\xa2"
8066 "\x2f\x57\x6e\x18\x42\x19\x54\x0c"
8067 "\x55\x67\xc6\x11\x49\xf4\x5c\xd2"
8068 "\xe9\x3d\xdd\x8b\x48\x71\x21\x00"
8069 "\xc3\x9a\x6c\x85\x74\x28\x83\x4a"
8070 "\x1b\x31\x05\xe1\x06\x92\xe7\xda"
8071 "\x85\x73\x78\x45\x20\x7f\xae\x13"
8072 "\x7c\x33\x06\x22\xf4\x83\xf9\x35"
8073 "\x3f\x6c\x71\xa8\x4e\x48\xbe\x9b"
8074 "\xce\x8a\xba\xda\xbe\x28\x08\xf7"
8075 "\xe2\x14\x8c\x71\xea\x72\xf9\x33"
8076 "\xf2\x88\x3f\xd7\xbb\x69\x6c\x29"
8077 "\x19\xdc\x84\xce\x1f\x12\x4f\xc8"
8078 "\xaf\xa5\x04\xba\x5a\xab\xb0\xd9"
8079 "\x14\x1f\x6c\x68\x98\x39\x89\x7a"
8080 "\xd9\xd8\x2f\xdf\xa8\x47\x4a\x25"
8081 "\xe2\xfb\x33\xf4\x59\x78\xe1\x68"
8082 "\x85\xcf\xfe\x59\x20\xd4\x05\x1d"
8083 "\x80\x99\xae\xbc\xca\xae\x0f\x2f"
8084 "\x65\x43\x34\x8e\x7e\xac\xd3\x93"
8085 "\x2f\xac\x6d\x14\x3d\x02\x07\x70"
8086 "\x9d\xa4\xf3\x1b\x5c\x36\xfc\x01"
8087 "\x73\x34\x85\x0c\x6c\xd6\xf1\xbd"
8088 "\x3f\xdf\xee\xf5\xd9\xba\x56\xef"
8089 "\xf4\x9b\x6b\xee\x9f\x5a\x78\x6d"
8090 "\x32\x19\xf4\xf7\xf8\x4c\x69\x0b"
8091 "\x4b\xbc\xbb\xb7\xf2\x85\xaf\x70"
8092 "\x75\x24\x6c\x54\xa7\x0e\x4d\x1d"
8093 "\x01\xbf\x08\xac\xcf\x7f\x2c\xe3"
8094 "\x14\x89\x5e\x70\x5a\x99\x92\xcd"
8095 "\x01\x84\xc8\xd2\xab\xe5\x4f\x58"
8096 "\xe7\x0f\x2f\x0e\xff\x68\xea\xfd"
8097 "\x15\xb3\x17\xe6\xb0\xe7\x85\xd8"
8098 "\x23\x2e\x05\xc7\xc9\xc4\x46\x1f"
8099 "\xe1\x9e\x49\x20\x23\x24\x4d\x7e"
8100 "\x29\x65\xff\xf4\xb6\xfd\x1a\x85"
8101 "\xc4\x16\xec\xfc\xea\x7b\xd6\x2c"
8102 "\x43\xf8\xb7\xbf\x79\xc0\x85\xcd"
8103 "\xef\xe1\x98\xd3\xa5\xf7\x90\x8c"
8104 "\xe9\x7f\x80\x6b\xd2\xac\x4c\x30"
8105 "\xa7\xc6\x61\x6c\xd2\xf9\x2c\xff"
8106 "\x30\xbc\x22\x81\x7d\x93\x12\xe4"
8107 "\x0a\xcd\xaf\xdd\xe8\xab\x0a\x1e"
8108 "\x13\xa4\x27\xc3\x5f\xf7\x4b\xbb"
8109 "\x37\x09\x4b\x91\x6f\x92\x4f\xaf"
8110 "\x52\xee\xdf\xef\x09\x6f\xf7\x5c"
8111 "\x6e\x12\x17\x72\x63\x57\xc7\xba"
8112 "\x3b\x6b\x38\x32\x73\x1b\x9c\x80"
8113 "\xc1\x7a\xc6\xcf\xcd\x35\xc0\x6b"
8114 "\x31\x1a\x6b\xe9\xd8\x2c\x29\x3f"
8115 "\x96\xfb\xb6\xcd\x13\x91\x3b\xc2"
8116 "\xd2\xa3\x31\x8d\xa4\xcd\x57\xcd"
8117 "\x13\x3d\x64\xfd\x06\xce\xe6\xdc"
8118 "\x0c\x24\x43\x31\x40\x57\xf1\x72"
8119 "\x17\xe3\x3a\x63\x6d\x35\xcf\x5d"
8120 "\x97\x40\x59\xdd\xf7\x3c\x02\xf7"
8121 "\x1c\x7e\x05\xbb\xa9\x0d\x01\xb1"
8122 "\x8e\xc0\x30\xa9\x53\x24\xc9\x89"
8123 "\x84\x6d\xaa\xd0\xcd\x91\xc2\x4d"
8124 "\x91\xb0\x89\xe2\xbf\x83\x44\xaa"
8125 "\x28\x72\x23\xa0\xc2\xad\xad\x1c"
8126 "\xfc\x3f\x09\x7a\x0b\xdc\xc5\x1b"
8127 "\x87\x13\xc6\x5b\x59\x8d\xf2\xc8"
8128 "\xaf\xdf\x11\x95",
8129 .rlen = 4100,
8130 },
8131};
8132
8133/*
8134 * CTS (Cipher Text Stealing) mode tests
8135 */
8136#define CTS_MODE_ENC_TEST_VECTORS 6
8137#define CTS_MODE_DEC_TEST_VECTORS 6
8138static struct cipher_testvec cts_mode_enc_tv_template[] = {
8139 { /* from rfc3962 */
8140 .klen = 16,
8141 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8142 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8143 .ilen = 17,
8144 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8145 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8146 "\x20",
8147 .rlen = 17,
8148 .result = "\xc6\x35\x35\x68\xf2\xbf\x8c\xb4"
8149 "\xd8\xa5\x80\x36\x2d\xa7\xff\x7f"
8150 "\x97",
8151 }, {
8152 .klen = 16,
8153 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8154 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8155 .ilen = 31,
8156 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8157 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8158 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8159 "\x20\x47\x61\x75\x27\x73\x20",
8160 .rlen = 31,
8161 .result = "\xfc\x00\x78\x3e\x0e\xfd\xb2\xc1"
8162 "\xd4\x45\xd4\xc8\xef\xf7\xed\x22"
8163 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8164 "\xc0\x7b\x25\xe2\x5e\xcf\xe5",
8165 }, {
8166 .klen = 16,
8167 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8168 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8169 .ilen = 32,
8170 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8171 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8172 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8173 "\x20\x47\x61\x75\x27\x73\x20\x43",
8174 .rlen = 32,
8175 .result = "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8176 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8177 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8178 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84",
8179 }, {
8180 .klen = 16,
8181 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8182 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8183 .ilen = 47,
8184 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8185 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8186 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8187 "\x20\x47\x61\x75\x27\x73\x20\x43"
8188 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8189 "\x70\x6c\x65\x61\x73\x65\x2c",
8190 .rlen = 47,
8191 .result = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8192 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8193 "\xb3\xff\xfd\x94\x0c\x16\xa1\x8c"
8194 "\x1b\x55\x49\xd2\xf8\x38\x02\x9e"
8195 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8196 "\xbe\x7f\xcb\xcc\x98\xeb\xf5",
8197 }, {
8198 .klen = 16,
8199 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8200 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8201 .ilen = 48,
8202 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8203 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8204 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8205 "\x20\x47\x61\x75\x27\x73\x20\x43"
8206 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8207 "\x70\x6c\x65\x61\x73\x65\x2c\x20",
8208 .rlen = 48,
8209 .result = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8210 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8211 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8212 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8"
8213 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8214 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8",
8215 }, {
8216 .klen = 16,
8217 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8218 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8219 .ilen = 64,
8220 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8221 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8222 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8223 "\x20\x47\x61\x75\x27\x73\x20\x43"
8224 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8225 "\x70\x6c\x65\x61\x73\x65\x2c\x20"
8226 "\x61\x6e\x64\x20\x77\x6f\x6e\x74"
8227 "\x6f\x6e\x20\x73\x6f\x75\x70\x2e",
8228 .rlen = 64,
8229 .result = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8230 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8231 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8232 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8233 "\x48\x07\xef\xe8\x36\xee\x89\xa5"
8234 "\x26\x73\x0d\xbc\x2f\x7b\xc8\x40"
8235 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8236 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8",
8237 }
8238};
8239
8240static struct cipher_testvec cts_mode_dec_tv_template[] = {
8241 { /* from rfc3962 */
8242 .klen = 16,
8243 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8244 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8245 .rlen = 17,
8246 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8247 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8248 "\x20",
8249 .ilen = 17,
8250 .input = "\xc6\x35\x35\x68\xf2\xbf\x8c\xb4"
8251 "\xd8\xa5\x80\x36\x2d\xa7\xff\x7f"
8252 "\x97",
8253 }, {
8254 .klen = 16,
8255 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8256 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8257 .rlen = 31,
8258 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8259 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8260 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8261 "\x20\x47\x61\x75\x27\x73\x20",
8262 .ilen = 31,
8263 .input = "\xfc\x00\x78\x3e\x0e\xfd\xb2\xc1"
8264 "\xd4\x45\xd4\xc8\xef\xf7\xed\x22"
8265 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8266 "\xc0\x7b\x25\xe2\x5e\xcf\xe5",
8267 }, {
8268 .klen = 16,
8269 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8270 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8271 .rlen = 32,
8272 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8273 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8274 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8275 "\x20\x47\x61\x75\x27\x73\x20\x43",
8276 .ilen = 32,
8277 .input = "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8278 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8279 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8280 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84",
8281 }, {
8282 .klen = 16,
8283 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8284 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8285 .rlen = 47,
8286 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8287 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8288 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8289 "\x20\x47\x61\x75\x27\x73\x20\x43"
8290 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8291 "\x70\x6c\x65\x61\x73\x65\x2c",
8292 .ilen = 47,
8293 .input = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8294 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8295 "\xb3\xff\xfd\x94\x0c\x16\xa1\x8c"
8296 "\x1b\x55\x49\xd2\xf8\x38\x02\x9e"
8297 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8298 "\xbe\x7f\xcb\xcc\x98\xeb\xf5",
8299 }, {
8300 .klen = 16,
8301 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8302 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8303 .rlen = 48,
8304 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8305 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8306 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8307 "\x20\x47\x61\x75\x27\x73\x20\x43"
8308 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8309 "\x70\x6c\x65\x61\x73\x65\x2c\x20",
8310 .ilen = 48,
8311 .input = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8312 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8313 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8314 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8"
8315 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8316 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8",
8317 }, {
8318 .klen = 16,
8319 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8320 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8321 .rlen = 64,
8322 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8323 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8324 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8325 "\x20\x47\x61\x75\x27\x73\x20\x43"
8326 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8327 "\x70\x6c\x65\x61\x73\x65\x2c\x20"
8328 "\x61\x6e\x64\x20\x77\x6f\x6e\x74"
8329 "\x6f\x6e\x20\x73\x6f\x75\x70\x2e",
8330 .ilen = 64,
8331 .input = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8332 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8333 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8334 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8335 "\x48\x07\xef\xe8\x36\xee\x89\xa5"
8336 "\x26\x73\x0d\xbc\x2f\x7b\xc8\x40"
8337 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8338 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8",
8339 }
8340};
8341
8342/*
8343 * Compression stuff.
8344 */
8345#define COMP_BUF_SIZE 512
8346
8347struct comp_testvec {
8348 int inlen, outlen;
8349 char input[COMP_BUF_SIZE];
8350 char output[COMP_BUF_SIZE];
8351};
8352
8353/*
8354 * Deflate test vectors (null-terminated strings).
8355 * Params: winbits=11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
8356 */
8357#define DEFLATE_COMP_TEST_VECTORS 2
8358#define DEFLATE_DECOMP_TEST_VECTORS 2
8359
8360static struct comp_testvec deflate_comp_tv_template[] = {
8361 {
8362 .inlen = 70,
8363 .outlen = 38,
8364 .input = "Join us now and share the software "
8365 "Join us now and share the software ",
8366 .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8367 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8368 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8369 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8370 "\x71\xbc\x08\x2b\x01\x00",
8371 }, {
8372 .inlen = 191,
8373 .outlen = 122,
8374 .input = "This document describes a compression method based on the DEFLATE"
8375 "compression algorithm. This document defines the application of "
8376 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8377 .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8378 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8379 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8380 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8381 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8382 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8383 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8384 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8385 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8386 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8387 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8388 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8389 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8390 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8391 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8392 "\xfa\x02",
8393 },
8394};
8395
8396static struct comp_testvec deflate_decomp_tv_template[] = {
8397 {
8398 .inlen = 122,
8399 .outlen = 191,
8400 .input = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8401 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8402 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8403 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8404 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8405 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8406 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8407 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8408 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8409 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8410 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8411 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8412 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8413 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8414 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8415 "\xfa\x02",
8416 .output = "This document describes a compression method based on the DEFLATE"
8417 "compression algorithm. This document defines the application of "
8418 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8419 }, {
8420 .inlen = 38,
8421 .outlen = 70,
8422 .input = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8423 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8424 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8425 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8426 "\x71\xbc\x08\x2b\x01\x00",
8427 .output = "Join us now and share the software "
8428 "Join us now and share the software ",
8429 },
8430};
8431
8432/*
8433 * LZO test vectors (null-terminated strings).
8434 */
8435#define LZO_COMP_TEST_VECTORS 2
8436#define LZO_DECOMP_TEST_VECTORS 2
8437
8438static struct comp_testvec lzo_comp_tv_template[] = {
8439 {
8440 .inlen = 70,
8441 .outlen = 46,
8442 .input = "Join us now and share the software "
8443 "Join us now and share the software ",
8444 .output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
8445 "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
8446 "\x64\x20\x73\x68\x61\x72\x65\x20"
8447 "\x74\x68\x65\x20\x73\x6f\x66\x74"
8448 "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
8449 "\x3d\x88\x00\x11\x00\x00",
8450 }, {
8451 .inlen = 159,
8452 .outlen = 133,
8453 .input = "This document describes a compression method based on the LZO "
8454 "compression algorithm. This document defines the application of "
8455 "the LZO algorithm used in UBIFS.",
8456 .output = "\x00\x2b\x54\x68\x69\x73\x20\x64"
8457 "\x6f\x63\x75\x6d\x65\x6e\x74\x20"
8458 "\x64\x65\x73\x63\x72\x69\x62\x65"
8459 "\x73\x20\x61\x20\x63\x6f\x6d\x70"
8460 "\x72\x65\x73\x73\x69\x6f\x6e\x20"
8461 "\x6d\x65\x74\x68\x6f\x64\x20\x62"
8462 "\x61\x73\x65\x64\x20\x6f\x6e\x20"
8463 "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
8464 "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
8465 "\x69\x74\x68\x6d\x2e\x20\x20\x54"
8466 "\x68\x69\x73\x2a\x54\x01\x02\x66"
8467 "\x69\x6e\x65\x73\x94\x06\x05\x61"
8468 "\x70\x70\x6c\x69\x63\x61\x74\x76"
8469 "\x0a\x6f\x66\x88\x02\x60\x09\x27"
8470 "\xf0\x00\x0c\x20\x75\x73\x65\x64"
8471 "\x20\x69\x6e\x20\x55\x42\x49\x46"
8472 "\x53\x2e\x11\x00\x00",
8473 },
8474};
8475
8476static struct comp_testvec lzo_decomp_tv_template[] = {
8477 {
8478 .inlen = 133,
8479 .outlen = 159,
8480 .input = "\x00\x2b\x54\x68\x69\x73\x20\x64"
8481 "\x6f\x63\x75\x6d\x65\x6e\x74\x20"
8482 "\x64\x65\x73\x63\x72\x69\x62\x65"
8483 "\x73\x20\x61\x20\x63\x6f\x6d\x70"
8484 "\x72\x65\x73\x73\x69\x6f\x6e\x20"
8485 "\x6d\x65\x74\x68\x6f\x64\x20\x62"
8486 "\x61\x73\x65\x64\x20\x6f\x6e\x20"
8487 "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
8488 "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
8489 "\x69\x74\x68\x6d\x2e\x20\x20\x54"
8490 "\x68\x69\x73\x2a\x54\x01\x02\x66"
8491 "\x69\x6e\x65\x73\x94\x06\x05\x61"
8492 "\x70\x70\x6c\x69\x63\x61\x74\x76"
8493 "\x0a\x6f\x66\x88\x02\x60\x09\x27"
8494 "\xf0\x00\x0c\x20\x75\x73\x65\x64"
8495 "\x20\x69\x6e\x20\x55\x42\x49\x46"
8496 "\x53\x2e\x11\x00\x00",
8497 .output = "This document describes a compression method based on the LZO "
8498 "compression algorithm. This document defines the application of "
8499 "the LZO algorithm used in UBIFS.",
8500 }, {
8501 .inlen = 46,
8502 .outlen = 70,
8503 .input = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
8504 "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
8505 "\x64\x20\x73\x68\x61\x72\x65\x20"
8506 "\x74\x68\x65\x20\x73\x6f\x66\x74"
8507 "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
8508 "\x3d\x88\x00\x11\x00\x00",
8509 .output = "Join us now and share the software "
8510 "Join us now and share the software ",
8511 },
8512};
8513
8514/*
8515 * Michael MIC test vectors from IEEE 802.11i
8516 */
8517#define MICHAEL_MIC_TEST_VECTORS 6
8518
8519static struct hash_testvec michael_mic_tv_template[] = {
8520 {
8521 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
8522 .ksize = 8,
8523 .plaintext = zeroed_string,
8524 .psize = 0,
8525 .digest = "\x82\x92\x5c\x1c\xa1\xd1\x30\xb8",
8526 },
8527 {
8528 .key = "\x82\x92\x5c\x1c\xa1\xd1\x30\xb8",
8529 .ksize = 8,
8530 .plaintext = "M",
8531 .psize = 1,
8532 .digest = "\x43\x47\x21\xca\x40\x63\x9b\x3f",
8533 },
8534 {
8535 .key = "\x43\x47\x21\xca\x40\x63\x9b\x3f",
8536 .ksize = 8,
8537 .plaintext = "Mi",
8538 .psize = 2,
8539 .digest = "\xe8\xf9\xbe\xca\xe9\x7e\x5d\x29",
8540 },
8541 {
8542 .key = "\xe8\xf9\xbe\xca\xe9\x7e\x5d\x29",
8543 .ksize = 8,
8544 .plaintext = "Mic",
8545 .psize = 3,
8546 .digest = "\x90\x03\x8f\xc6\xcf\x13\xc1\xdb",
8547 },
8548 {
8549 .key = "\x90\x03\x8f\xc6\xcf\x13\xc1\xdb",
8550 .ksize = 8,
8551 .plaintext = "Mich",
8552 .psize = 4,
8553 .digest = "\xd5\x5e\x10\x05\x10\x12\x89\x86",
8554 },
8555 {
8556 .key = "\xd5\x5e\x10\x05\x10\x12\x89\x86",
8557 .ksize = 8,
8558 .plaintext = "Michael",
8559 .psize = 7,
8560 .digest = "\x0a\x94\x2b\x12\x4e\xca\xa5\x46",
8561 }
8562};
8563
8564/*
8565 * CRC32C test vectors
8566 */
8567#define CRC32C_TEST_VECTORS 14
8568
8569static struct hash_testvec crc32c_tv_template[] = {
8570 {
8571 .psize = 0,
8572 .digest = "\x00\x00\x00\x00",
8573 },
8574 {
8575 .key = "\x87\xa9\xcb\xed",
8576 .ksize = 4,
8577 .psize = 0,
8578 .digest = "\x78\x56\x34\x12",
8579 },
8580 {
8581 .key = "\xff\xff\xff\xff",
8582 .ksize = 4,
8583 .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08"
8584 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
8585 "\x11\x12\x13\x14\x15\x16\x17\x18"
8586 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
8587 "\x21\x22\x23\x24\x25\x26\x27\x28",
8588 .psize = 40,
8589 .digest = "\x7f\x15\x2c\x0e",
8590 },
8591 {
8592 .key = "\xff\xff\xff\xff",
8593 .ksize = 4,
8594 .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
8595 "\x31\x32\x33\x34\x35\x36\x37\x38"
8596 "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
8597 "\x41\x42\x43\x44\x45\x46\x47\x48"
8598 "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50",
8599 .psize = 40,
8600 .digest = "\xf6\xeb\x80\xe9",
8601 },
8602 {
8603 .key = "\xff\xff\xff\xff",
8604 .ksize = 4,
8605 .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58"
8606 "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
8607 "\x61\x62\x63\x64\x65\x66\x67\x68"
8608 "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
8609 "\x71\x72\x73\x74\x75\x76\x77\x78",
8610 .psize = 40,
8611 .digest = "\xed\xbd\x74\xde",
8612 },
8613 {
8614 .key = "\xff\xff\xff\xff",
8615 .ksize = 4,
8616 .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
8617 "\x81\x82\x83\x84\x85\x86\x87\x88"
8618 "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
8619 "\x91\x92\x93\x94\x95\x96\x97\x98"
8620 "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0",
8621 .psize = 40,
8622 .digest = "\x62\xc8\x79\xd5",
8623 },
8624 {
8625 .key = "\xff\xff\xff\xff",
8626 .ksize = 4,
8627 .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8"
8628 "\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
8629 "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8"
8630 "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
8631 "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8",
8632 .psize = 40,
8633 .digest = "\xd0\x9a\x97\xba",
8634 },
8635 {
8636 .key = "\xff\xff\xff\xff",
8637 .ksize = 4,
8638 .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
8639 "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8"
8640 "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
8641 "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8"
8642 "\xe9\xea\xeb\xec\xed\xee\xef\xf0",
8643 .psize = 40,
8644 .digest = "\x13\xd9\x29\x2b",
8645 },
8646 {
8647 .key = "\x80\xea\xd3\xf1",
8648 .ksize = 4,
8649 .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
8650 "\x31\x32\x33\x34\x35\x36\x37\x38"
8651 "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
8652 "\x41\x42\x43\x44\x45\x46\x47\x48"
8653 "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50",
8654 .psize = 40,
8655 .digest = "\x0c\xb5\xe2\xa2",
8656 },
8657 {
8658 .key = "\xf3\x4a\x1d\x5d",
8659 .ksize = 4,
8660 .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58"
8661 "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
8662 "\x61\x62\x63\x64\x65\x66\x67\x68"
8663 "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
8664 "\x71\x72\x73\x74\x75\x76\x77\x78",
8665 .psize = 40,
8666 .digest = "\xd1\x7f\xfb\xa6",
8667 },
8668 {
8669 .key = "\x2e\x80\x04\x59",
8670 .ksize = 4,
8671 .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
8672 "\x81\x82\x83\x84\x85\x86\x87\x88"
8673 "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
8674 "\x91\x92\x93\x94\x95\x96\x97\x98"
8675 "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0",
8676 .psize = 40,
8677 .digest = "\x59\x33\xe6\x7a",
8678 },
8679 {
8680 .key = "\xa6\xcc\x19\x85",
8681 .ksize = 4,
8682 .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8"
8683 "\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
8684 "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8"
8685 "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
8686 "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8",
8687 .psize = 40,
8688 .digest = "\xbe\x03\x01\xd2",
8689 },
8690 {
8691 .key = "\x41\xfc\xfe\x2d",
8692 .ksize = 4,
8693 .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
8694 "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8"
8695 "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
8696 "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8"
8697 "\xe9\xea\xeb\xec\xed\xee\xef\xf0",
8698 .psize = 40,
8699 .digest = "\x75\xd3\xc5\x24",
8700 },
8701 {
8702 .key = "\xff\xff\xff\xff",
8703 .ksize = 4,
8704 .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08"
8705 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
8706 "\x11\x12\x13\x14\x15\x16\x17\x18"
8707 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
8708 "\x21\x22\x23\x24\x25\x26\x27\x28"
8709 "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
8710 "\x31\x32\x33\x34\x35\x36\x37\x38"
8711 "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
8712 "\x41\x42\x43\x44\x45\x46\x47\x48"
8713 "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50"
8714 "\x51\x52\x53\x54\x55\x56\x57\x58"
8715 "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
8716 "\x61\x62\x63\x64\x65\x66\x67\x68"
8717 "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
8718 "\x71\x72\x73\x74\x75\x76\x77\x78"
8719 "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
8720 "\x81\x82\x83\x84\x85\x86\x87\x88"
8721 "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
8722 "\x91\x92\x93\x94\x95\x96\x97\x98"
8723 "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0"
8724 "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8"
8725 "\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
8726 "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8"
8727 "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
8728 "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8"
8729 "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
8730 "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8"
8731 "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
8732 "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8"
8733 "\xe9\xea\xeb\xec\xed\xee\xef\xf0",
8734 .psize = 240,
8735 .digest = "\x75\xd3\xc5\x24",
8736 .np = 2,
8737 .tap = { 31, 209 }
8738 },
8739};
8740
8741/* 44/*
8742 * Cipher speed tests 45 * Cipher speed tests
8743 */ 46 */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
new file mode 100644
index 000000000000..b828c6cf1b1d
--- /dev/null
+++ b/crypto/testmgr.c
@@ -0,0 +1,1868 @@
1/*
2 * Algorithm testing framework and tests.
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
6 * Copyright (c) 2007 Nokia Siemens Networks
7 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/hash.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/scatterlist.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22
23#include "internal.h"
24#include "testmgr.h"
25
26/*
27 * Need slab memory for testing (size in number of pages).
28 */
29#define XBUFSIZE 8
30
31/*
32 * Indexes into the xbuf to simulate cross-page access.
33 */
34#define IDX1 32
35#define IDX2 32400
36#define IDX3 1
37#define IDX4 8193
38#define IDX5 22222
39#define IDX6 17101
40#define IDX7 27333
41#define IDX8 3000
42
43/*
44* Used by test_cipher()
45*/
46#define ENCRYPT 1
47#define DECRYPT 0
48
49struct tcrypt_result {
50 struct completion completion;
51 int err;
52};
53
54struct aead_test_suite {
55 struct {
56 struct aead_testvec *vecs;
57 unsigned int count;
58 } enc, dec;
59};
60
61struct cipher_test_suite {
62 struct {
63 struct cipher_testvec *vecs;
64 unsigned int count;
65 } enc, dec;
66};
67
68struct comp_test_suite {
69 struct {
70 struct comp_testvec *vecs;
71 unsigned int count;
72 } comp, decomp;
73};
74
75struct hash_test_suite {
76 struct hash_testvec *vecs;
77 unsigned int count;
78};
79
80struct alg_test_desc {
81 const char *alg;
82 int (*test)(const struct alg_test_desc *desc, const char *driver,
83 u32 type, u32 mask);
84
85 union {
86 struct aead_test_suite aead;
87 struct cipher_test_suite cipher;
88 struct comp_test_suite comp;
89 struct hash_test_suite hash;
90 } suite;
91};
92
93static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
94
95static char *xbuf[XBUFSIZE];
96static char *axbuf[XBUFSIZE];
97
98static void hexdump(unsigned char *buf, unsigned int len)
99{
100 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
101 16, 1,
102 buf, len, false);
103}
104
105static void tcrypt_complete(struct crypto_async_request *req, int err)
106{
107 struct tcrypt_result *res = req->data;
108
109 if (err == -EINPROGRESS)
110 return;
111
112 res->err = err;
113 complete(&res->completion);
114}
115
116static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
117 unsigned int tcount)
118{
119 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
120 unsigned int i, j, k, temp;
121 struct scatterlist sg[8];
122 char result[64];
123 struct ahash_request *req;
124 struct tcrypt_result tresult;
125 int ret;
126 void *hash_buff;
127
128 init_completion(&tresult.completion);
129
130 req = ahash_request_alloc(tfm, GFP_KERNEL);
131 if (!req) {
132 printk(KERN_ERR "alg: hash: Failed to allocate request for "
133 "%s\n", algo);
134 ret = -ENOMEM;
135 goto out_noreq;
136 }
137 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
138 tcrypt_complete, &tresult);
139
140 for (i = 0; i < tcount; i++) {
141 memset(result, 0, 64);
142
143 hash_buff = xbuf[0];
144
145 memcpy(hash_buff, template[i].plaintext, template[i].psize);
146 sg_init_one(&sg[0], hash_buff, template[i].psize);
147
148 if (template[i].ksize) {
149 crypto_ahash_clear_flags(tfm, ~0);
150 ret = crypto_ahash_setkey(tfm, template[i].key,
151 template[i].ksize);
152 if (ret) {
153 printk(KERN_ERR "alg: hash: setkey failed on "
154 "test %d for %s: ret=%d\n", i + 1, algo,
155 -ret);
156 goto out;
157 }
158 }
159
160 ahash_request_set_crypt(req, sg, result, template[i].psize);
161 ret = crypto_ahash_digest(req);
162 switch (ret) {
163 case 0:
164 break;
165 case -EINPROGRESS:
166 case -EBUSY:
167 ret = wait_for_completion_interruptible(
168 &tresult.completion);
169 if (!ret && !(ret = tresult.err)) {
170 INIT_COMPLETION(tresult.completion);
171 break;
172 }
173 /* fall through */
174 default:
175 printk(KERN_ERR "alg: hash: digest failed on test %d "
176 "for %s: ret=%d\n", i + 1, algo, -ret);
177 goto out;
178 }
179
180 if (memcmp(result, template[i].digest,
181 crypto_ahash_digestsize(tfm))) {
182 printk(KERN_ERR "alg: hash: Test %d failed for %s\n",
183 i + 1, algo);
184 hexdump(result, crypto_ahash_digestsize(tfm));
185 ret = -EINVAL;
186 goto out;
187 }
188 }
189
190 j = 0;
191 for (i = 0; i < tcount; i++) {
192 if (template[i].np) {
193 j++;
194 memset(result, 0, 64);
195
196 temp = 0;
197 sg_init_table(sg, template[i].np);
198 for (k = 0; k < template[i].np; k++) {
199 sg_set_buf(&sg[k],
200 memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
201 offset_in_page(IDX[k]),
202 template[i].plaintext + temp,
203 template[i].tap[k]),
204 template[i].tap[k]);
205 temp += template[i].tap[k];
206 }
207
208 if (template[i].ksize) {
209 crypto_ahash_clear_flags(tfm, ~0);
210 ret = crypto_ahash_setkey(tfm, template[i].key,
211 template[i].ksize);
212
213 if (ret) {
214 printk(KERN_ERR "alg: hash: setkey "
215 "failed on chunking test %d "
216 "for %s: ret=%d\n", j, algo,
217 -ret);
218 goto out;
219 }
220 }
221
222 ahash_request_set_crypt(req, sg, result,
223 template[i].psize);
224 ret = crypto_ahash_digest(req);
225 switch (ret) {
226 case 0:
227 break;
228 case -EINPROGRESS:
229 case -EBUSY:
230 ret = wait_for_completion_interruptible(
231 &tresult.completion);
232 if (!ret && !(ret = tresult.err)) {
233 INIT_COMPLETION(tresult.completion);
234 break;
235 }
236 /* fall through */
237 default:
238 printk(KERN_ERR "alg: hash: digest failed "
239 "on chunking test %d for %s: "
240 "ret=%d\n", j, algo, -ret);
241 goto out;
242 }
243
244 if (memcmp(result, template[i].digest,
245 crypto_ahash_digestsize(tfm))) {
246 printk(KERN_ERR "alg: hash: Chunking test %d "
247 "failed for %s\n", j, algo);
248 hexdump(result, crypto_ahash_digestsize(tfm));
249 ret = -EINVAL;
250 goto out;
251 }
252 }
253 }
254
255 ret = 0;
256
257out:
258 ahash_request_free(req);
259out_noreq:
260 return ret;
261}
262
263static int test_aead(struct crypto_aead *tfm, int enc,
264 struct aead_testvec *template, unsigned int tcount)
265{
266 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
267 unsigned int i, j, k, n, temp;
268 int ret = 0;
269 char *q;
270 char *key;
271 struct aead_request *req;
272 struct scatterlist sg[8];
273 struct scatterlist asg[8];
274 const char *e;
275 struct tcrypt_result result;
276 unsigned int authsize;
277 void *input;
278 void *assoc;
279 char iv[MAX_IVLEN];
280
281 if (enc == ENCRYPT)
282 e = "encryption";
283 else
284 e = "decryption";
285
286 init_completion(&result.completion);
287
288 req = aead_request_alloc(tfm, GFP_KERNEL);
289 if (!req) {
290 printk(KERN_ERR "alg: aead: Failed to allocate request for "
291 "%s\n", algo);
292 ret = -ENOMEM;
293 goto out;
294 }
295
296 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
297 tcrypt_complete, &result);
298
299 for (i = 0, j = 0; i < tcount; i++) {
300 if (!template[i].np) {
301 j++;
302
303 /* some tepmplates have no input data but they will
304 * touch input
305 */
306 input = xbuf[0];
307 assoc = axbuf[0];
308
309 memcpy(input, template[i].input, template[i].ilen);
310 memcpy(assoc, template[i].assoc, template[i].alen);
311 if (template[i].iv)
312 memcpy(iv, template[i].iv, MAX_IVLEN);
313 else
314 memset(iv, 0, MAX_IVLEN);
315
316 crypto_aead_clear_flags(tfm, ~0);
317 if (template[i].wk)
318 crypto_aead_set_flags(
319 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
320
321 key = template[i].key;
322
323 ret = crypto_aead_setkey(tfm, key,
324 template[i].klen);
325 if (!ret == template[i].fail) {
326 printk(KERN_ERR "alg: aead: setkey failed on "
327 "test %d for %s: flags=%x\n", j, algo,
328 crypto_aead_get_flags(tfm));
329 goto out;
330 } else if (ret)
331 continue;
332
333 authsize = abs(template[i].rlen - template[i].ilen);
334 ret = crypto_aead_setauthsize(tfm, authsize);
335 if (ret) {
336 printk(KERN_ERR "alg: aead: Failed to set "
337 "authsize to %u on test %d for %s\n",
338 authsize, j, algo);
339 goto out;
340 }
341
342 sg_init_one(&sg[0], input,
343 template[i].ilen + (enc ? authsize : 0));
344
345 sg_init_one(&asg[0], assoc, template[i].alen);
346
347 aead_request_set_crypt(req, sg, sg,
348 template[i].ilen, iv);
349
350 aead_request_set_assoc(req, asg, template[i].alen);
351
352 ret = enc ?
353 crypto_aead_encrypt(req) :
354 crypto_aead_decrypt(req);
355
356 switch (ret) {
357 case 0:
358 break;
359 case -EINPROGRESS:
360 case -EBUSY:
361 ret = wait_for_completion_interruptible(
362 &result.completion);
363 if (!ret && !(ret = result.err)) {
364 INIT_COMPLETION(result.completion);
365 break;
366 }
367 /* fall through */
368 default:
369 printk(KERN_ERR "alg: aead: %s failed on test "
370 "%d for %s: ret=%d\n", e, j, algo, -ret);
371 goto out;
372 }
373
374 q = input;
375 if (memcmp(q, template[i].result, template[i].rlen)) {
376 printk(KERN_ERR "alg: aead: Test %d failed on "
377 "%s for %s\n", j, e, algo);
378 hexdump(q, template[i].rlen);
379 ret = -EINVAL;
380 goto out;
381 }
382 }
383 }
384
385 for (i = 0, j = 0; i < tcount; i++) {
386 if (template[i].np) {
387 j++;
388
389 if (template[i].iv)
390 memcpy(iv, template[i].iv, MAX_IVLEN);
391 else
392 memset(iv, 0, MAX_IVLEN);
393
394 crypto_aead_clear_flags(tfm, ~0);
395 if (template[i].wk)
396 crypto_aead_set_flags(
397 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
398 key = template[i].key;
399
400 ret = crypto_aead_setkey(tfm, key, template[i].klen);
401 if (!ret == template[i].fail) {
402 printk(KERN_ERR "alg: aead: setkey failed on "
403 "chunk test %d for %s: flags=%x\n", j,
404 algo, crypto_aead_get_flags(tfm));
405 goto out;
406 } else if (ret)
407 continue;
408
409 authsize = abs(template[i].rlen - template[i].ilen);
410
411 ret = -EINVAL;
412 sg_init_table(sg, template[i].np);
413 for (k = 0, temp = 0; k < template[i].np; k++) {
414 if (WARN_ON(offset_in_page(IDX[k]) +
415 template[i].tap[k] > PAGE_SIZE))
416 goto out;
417
418 q = xbuf[IDX[k] >> PAGE_SHIFT] +
419 offset_in_page(IDX[k]);
420
421 memcpy(q, template[i].input + temp,
422 template[i].tap[k]);
423
424 n = template[i].tap[k];
425 if (k == template[i].np - 1 && enc)
426 n += authsize;
427 if (offset_in_page(q) + n < PAGE_SIZE)
428 q[n] = 0;
429
430 sg_set_buf(&sg[k], q, template[i].tap[k]);
431 temp += template[i].tap[k];
432 }
433
434 ret = crypto_aead_setauthsize(tfm, authsize);
435 if (ret) {
436 printk(KERN_ERR "alg: aead: Failed to set "
437 "authsize to %u on chunk test %d for "
438 "%s\n", authsize, j, algo);
439 goto out;
440 }
441
442 if (enc) {
443 if (WARN_ON(sg[k - 1].offset +
444 sg[k - 1].length + authsize >
445 PAGE_SIZE)) {
446 ret = -EINVAL;
447 goto out;
448 }
449
450 sg[k - 1].length += authsize;
451 }
452
453 sg_init_table(asg, template[i].anp);
454 for (k = 0, temp = 0; k < template[i].anp; k++) {
455 sg_set_buf(&asg[k],
456 memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
457 offset_in_page(IDX[k]),
458 template[i].assoc + temp,
459 template[i].atap[k]),
460 template[i].atap[k]);
461 temp += template[i].atap[k];
462 }
463
464 aead_request_set_crypt(req, sg, sg,
465 template[i].ilen,
466 iv);
467
468 aead_request_set_assoc(req, asg, template[i].alen);
469
470 ret = enc ?
471 crypto_aead_encrypt(req) :
472 crypto_aead_decrypt(req);
473
474 switch (ret) {
475 case 0:
476 break;
477 case -EINPROGRESS:
478 case -EBUSY:
479 ret = wait_for_completion_interruptible(
480 &result.completion);
481 if (!ret && !(ret = result.err)) {
482 INIT_COMPLETION(result.completion);
483 break;
484 }
485 /* fall through */
486 default:
487 printk(KERN_ERR "alg: aead: %s failed on "
488 "chunk test %d for %s: ret=%d\n", e, j,
489 algo, -ret);
490 goto out;
491 }
492
493 ret = -EINVAL;
494 for (k = 0, temp = 0; k < template[i].np; k++) {
495 q = xbuf[IDX[k] >> PAGE_SHIFT] +
496 offset_in_page(IDX[k]);
497
498 n = template[i].tap[k];
499 if (k == template[i].np - 1)
500 n += enc ? authsize : -authsize;
501
502 if (memcmp(q, template[i].result + temp, n)) {
503 printk(KERN_ERR "alg: aead: Chunk "
504 "test %d failed on %s at page "
505 "%u for %s\n", j, e, k, algo);
506 hexdump(q, n);
507 goto out;
508 }
509
510 q += n;
511 if (k == template[i].np - 1 && !enc) {
512 if (memcmp(q, template[i].input +
513 temp + n, authsize))
514 n = authsize;
515 else
516 n = 0;
517 } else {
518 for (n = 0; offset_in_page(q + n) &&
519 q[n]; n++)
520 ;
521 }
522 if (n) {
523 printk(KERN_ERR "alg: aead: Result "
524 "buffer corruption in chunk "
525 "test %d on %s at page %u for "
526 "%s: %u bytes:\n", j, e, k,
527 algo, n);
528 hexdump(q, n);
529 goto out;
530 }
531
532 temp += template[i].tap[k];
533 }
534 }
535 }
536
537 ret = 0;
538
539out:
540 aead_request_free(req);
541 return ret;
542}
543
544static int test_cipher(struct crypto_cipher *tfm, int enc,
545 struct cipher_testvec *template, unsigned int tcount)
546{
547 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
548 unsigned int i, j, k;
549 int ret;
550 char *q;
551 const char *e;
552 void *data;
553
554 if (enc == ENCRYPT)
555 e = "encryption";
556 else
557 e = "decryption";
558
559 j = 0;
560 for (i = 0; i < tcount; i++) {
561 if (template[i].np)
562 continue;
563
564 j++;
565
566 data = xbuf[0];
567 memcpy(data, template[i].input, template[i].ilen);
568
569 crypto_cipher_clear_flags(tfm, ~0);
570 if (template[i].wk)
571 crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
572
573 ret = crypto_cipher_setkey(tfm, template[i].key,
574 template[i].klen);
575 if (!ret == template[i].fail) {
576 printk(KERN_ERR "alg: cipher: setkey failed "
577 "on test %d for %s: flags=%x\n", j,
578 algo, crypto_cipher_get_flags(tfm));
579 goto out;
580 } else if (ret)
581 continue;
582
583 for (k = 0; k < template[i].ilen;
584 k += crypto_cipher_blocksize(tfm)) {
585 if (enc)
586 crypto_cipher_encrypt_one(tfm, data + k,
587 data + k);
588 else
589 crypto_cipher_decrypt_one(tfm, data + k,
590 data + k);
591 }
592
593 q = data;
594 if (memcmp(q, template[i].result, template[i].rlen)) {
595 printk(KERN_ERR "alg: cipher: Test %d failed "
596 "on %s for %s\n", j, e, algo);
597 hexdump(q, template[i].rlen);
598 ret = -EINVAL;
599 goto out;
600 }
601 }
602
603 ret = 0;
604
605out:
606 return ret;
607}
608
609static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
610 struct cipher_testvec *template, unsigned int tcount)
611{
612 const char *algo =
613 crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
614 unsigned int i, j, k, n, temp;
615 int ret;
616 char *q;
617 struct ablkcipher_request *req;
618 struct scatterlist sg[8];
619 const char *e;
620 struct tcrypt_result result;
621 void *data;
622 char iv[MAX_IVLEN];
623
624 if (enc == ENCRYPT)
625 e = "encryption";
626 else
627 e = "decryption";
628
629 init_completion(&result.completion);
630
631 req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
632 if (!req) {
633 printk(KERN_ERR "alg: skcipher: Failed to allocate request "
634 "for %s\n", algo);
635 ret = -ENOMEM;
636 goto out;
637 }
638
639 ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
640 tcrypt_complete, &result);
641
642 j = 0;
643 for (i = 0; i < tcount; i++) {
644 if (template[i].iv)
645 memcpy(iv, template[i].iv, MAX_IVLEN);
646 else
647 memset(iv, 0, MAX_IVLEN);
648
649 if (!(template[i].np)) {
650 j++;
651
652 data = xbuf[0];
653 memcpy(data, template[i].input, template[i].ilen);
654
655 crypto_ablkcipher_clear_flags(tfm, ~0);
656 if (template[i].wk)
657 crypto_ablkcipher_set_flags(
658 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
659
660 ret = crypto_ablkcipher_setkey(tfm, template[i].key,
661 template[i].klen);
662 if (!ret == template[i].fail) {
663 printk(KERN_ERR "alg: skcipher: setkey failed "
664 "on test %d for %s: flags=%x\n", j,
665 algo, crypto_ablkcipher_get_flags(tfm));
666 goto out;
667 } else if (ret)
668 continue;
669
670 sg_init_one(&sg[0], data, template[i].ilen);
671
672 ablkcipher_request_set_crypt(req, sg, sg,
673 template[i].ilen, iv);
674 ret = enc ?
675 crypto_ablkcipher_encrypt(req) :
676 crypto_ablkcipher_decrypt(req);
677
678 switch (ret) {
679 case 0:
680 break;
681 case -EINPROGRESS:
682 case -EBUSY:
683 ret = wait_for_completion_interruptible(
684 &result.completion);
685 if (!ret && !((ret = result.err))) {
686 INIT_COMPLETION(result.completion);
687 break;
688 }
689 /* fall through */
690 default:
691 printk(KERN_ERR "alg: skcipher: %s failed on "
692 "test %d for %s: ret=%d\n", e, j, algo,
693 -ret);
694 goto out;
695 }
696
697 q = data;
698 if (memcmp(q, template[i].result, template[i].rlen)) {
699 printk(KERN_ERR "alg: skcipher: Test %d "
700 "failed on %s for %s\n", j, e, algo);
701 hexdump(q, template[i].rlen);
702 ret = -EINVAL;
703 goto out;
704 }
705 }
706 }
707
708 j = 0;
709 for (i = 0; i < tcount; i++) {
710
711 if (template[i].iv)
712 memcpy(iv, template[i].iv, MAX_IVLEN);
713 else
714 memset(iv, 0, MAX_IVLEN);
715
716 if (template[i].np) {
717 j++;
718
719 crypto_ablkcipher_clear_flags(tfm, ~0);
720 if (template[i].wk)
721 crypto_ablkcipher_set_flags(
722 tfm, CRYPTO_TFM_REQ_WEAK_KEY);
723
724 ret = crypto_ablkcipher_setkey(tfm, template[i].key,
725 template[i].klen);
726 if (!ret == template[i].fail) {
727 printk(KERN_ERR "alg: skcipher: setkey failed "
728 "on chunk test %d for %s: flags=%x\n",
729 j, algo,
730 crypto_ablkcipher_get_flags(tfm));
731 goto out;
732 } else if (ret)
733 continue;
734
735 temp = 0;
736 ret = -EINVAL;
737 sg_init_table(sg, template[i].np);
738 for (k = 0; k < template[i].np; k++) {
739 if (WARN_ON(offset_in_page(IDX[k]) +
740 template[i].tap[k] > PAGE_SIZE))
741 goto out;
742
743 q = xbuf[IDX[k] >> PAGE_SHIFT] +
744 offset_in_page(IDX[k]);
745
746 memcpy(q, template[i].input + temp,
747 template[i].tap[k]);
748
749 if (offset_in_page(q) + template[i].tap[k] <
750 PAGE_SIZE)
751 q[template[i].tap[k]] = 0;
752
753 sg_set_buf(&sg[k], q, template[i].tap[k]);
754
755 temp += template[i].tap[k];
756 }
757
758 ablkcipher_request_set_crypt(req, sg, sg,
759 template[i].ilen, iv);
760
761 ret = enc ?
762 crypto_ablkcipher_encrypt(req) :
763 crypto_ablkcipher_decrypt(req);
764
765 switch (ret) {
766 case 0:
767 break;
768 case -EINPROGRESS:
769 case -EBUSY:
770 ret = wait_for_completion_interruptible(
771 &result.completion);
772 if (!ret && !((ret = result.err))) {
773 INIT_COMPLETION(result.completion);
774 break;
775 }
776 /* fall through */
777 default:
778 printk(KERN_ERR "alg: skcipher: %s failed on "
779 "chunk test %d for %s: ret=%d\n", e, j,
780 algo, -ret);
781 goto out;
782 }
783
784 temp = 0;
785 ret = -EINVAL;
786 for (k = 0; k < template[i].np; k++) {
787 q = xbuf[IDX[k] >> PAGE_SHIFT] +
788 offset_in_page(IDX[k]);
789
790 if (memcmp(q, template[i].result + temp,
791 template[i].tap[k])) {
792 printk(KERN_ERR "alg: skcipher: Chunk "
793 "test %d failed on %s at page "
794 "%u for %s\n", j, e, k, algo);
795 hexdump(q, template[i].tap[k]);
796 goto out;
797 }
798
799 q += template[i].tap[k];
800 for (n = 0; offset_in_page(q + n) && q[n]; n++)
801 ;
802 if (n) {
803 printk(KERN_ERR "alg: skcipher: "
804 "Result buffer corruption in "
805 "chunk test %d on %s at page "
806 "%u for %s: %u bytes:\n", j, e,
807 k, algo, n);
808 hexdump(q, n);
809 goto out;
810 }
811 temp += template[i].tap[k];
812 }
813 }
814 }
815
816 ret = 0;
817
818out:
819 ablkcipher_request_free(req);
820 return ret;
821}
822
823static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
824 struct comp_testvec *dtemplate, int ctcount, int dtcount)
825{
826 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
827 unsigned int i;
828 char result[COMP_BUF_SIZE];
829 int ret;
830
831 for (i = 0; i < ctcount; i++) {
832 int ilen, dlen = COMP_BUF_SIZE;
833
834 memset(result, 0, sizeof (result));
835
836 ilen = ctemplate[i].inlen;
837 ret = crypto_comp_compress(tfm, ctemplate[i].input,
838 ilen, result, &dlen);
839 if (ret) {
840 printk(KERN_ERR "alg: comp: compression failed "
841 "on test %d for %s: ret=%d\n", i + 1, algo,
842 -ret);
843 goto out;
844 }
845
846 if (memcmp(result, ctemplate[i].output, dlen)) {
847 printk(KERN_ERR "alg: comp: Compression test %d "
848 "failed for %s\n", i + 1, algo);
849 hexdump(result, dlen);
850 ret = -EINVAL;
851 goto out;
852 }
853 }
854
855 for (i = 0; i < dtcount; i++) {
856 int ilen, ret, dlen = COMP_BUF_SIZE;
857
858 memset(result, 0, sizeof (result));
859
860 ilen = dtemplate[i].inlen;
861 ret = crypto_comp_decompress(tfm, dtemplate[i].input,
862 ilen, result, &dlen);
863 if (ret) {
864 printk(KERN_ERR "alg: comp: decompression failed "
865 "on test %d for %s: ret=%d\n", i + 1, algo,
866 -ret);
867 goto out;
868 }
869
870 if (memcmp(result, dtemplate[i].output, dlen)) {
871 printk(KERN_ERR "alg: comp: Decompression test %d "
872 "failed for %s\n", i + 1, algo);
873 hexdump(result, dlen);
874 ret = -EINVAL;
875 goto out;
876 }
877 }
878
879 ret = 0;
880
881out:
882 return ret;
883}
884
885static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
886 u32 type, u32 mask)
887{
888 struct crypto_aead *tfm;
889 int err = 0;
890
891 tfm = crypto_alloc_aead(driver, type, mask);
892 if (IS_ERR(tfm)) {
893 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
894 "%ld\n", driver, PTR_ERR(tfm));
895 return PTR_ERR(tfm);
896 }
897
898 if (desc->suite.aead.enc.vecs) {
899 err = test_aead(tfm, ENCRYPT, desc->suite.aead.enc.vecs,
900 desc->suite.aead.enc.count);
901 if (err)
902 goto out;
903 }
904
905 if (!err && desc->suite.aead.dec.vecs)
906 err = test_aead(tfm, DECRYPT, desc->suite.aead.dec.vecs,
907 desc->suite.aead.dec.count);
908
909out:
910 crypto_free_aead(tfm);
911 return err;
912}
913
914static int alg_test_cipher(const struct alg_test_desc *desc,
915 const char *driver, u32 type, u32 mask)
916{
917 struct crypto_cipher *tfm;
918 int err = 0;
919
920 tfm = crypto_alloc_cipher(driver, type, mask);
921 if (IS_ERR(tfm)) {
922 printk(KERN_ERR "alg: cipher: Failed to load transform for "
923 "%s: %ld\n", driver, PTR_ERR(tfm));
924 return PTR_ERR(tfm);
925 }
926
927 if (desc->suite.cipher.enc.vecs) {
928 err = test_cipher(tfm, ENCRYPT, desc->suite.cipher.enc.vecs,
929 desc->suite.cipher.enc.count);
930 if (err)
931 goto out;
932 }
933
934 if (desc->suite.cipher.dec.vecs)
935 err = test_cipher(tfm, DECRYPT, desc->suite.cipher.dec.vecs,
936 desc->suite.cipher.dec.count);
937
938out:
939 crypto_free_cipher(tfm);
940 return err;
941}
942
943static int alg_test_skcipher(const struct alg_test_desc *desc,
944 const char *driver, u32 type, u32 mask)
945{
946 struct crypto_ablkcipher *tfm;
947 int err = 0;
948
949 tfm = crypto_alloc_ablkcipher(driver, type, mask);
950 if (IS_ERR(tfm)) {
951 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
952 "%s: %ld\n", driver, PTR_ERR(tfm));
953 return PTR_ERR(tfm);
954 }
955
956 if (desc->suite.cipher.enc.vecs) {
957 err = test_skcipher(tfm, ENCRYPT, desc->suite.cipher.enc.vecs,
958 desc->suite.cipher.enc.count);
959 if (err)
960 goto out;
961 }
962
963 if (desc->suite.cipher.dec.vecs)
964 err = test_skcipher(tfm, DECRYPT, desc->suite.cipher.dec.vecs,
965 desc->suite.cipher.dec.count);
966
967out:
968 crypto_free_ablkcipher(tfm);
969 return err;
970}
971
972static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
973 u32 type, u32 mask)
974{
975 struct crypto_comp *tfm;
976 int err;
977
978 tfm = crypto_alloc_comp(driver, type, mask);
979 if (IS_ERR(tfm)) {
980 printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
981 "%ld\n", driver, PTR_ERR(tfm));
982 return PTR_ERR(tfm);
983 }
984
985 err = test_comp(tfm, desc->suite.comp.comp.vecs,
986 desc->suite.comp.decomp.vecs,
987 desc->suite.comp.comp.count,
988 desc->suite.comp.decomp.count);
989
990 crypto_free_comp(tfm);
991 return err;
992}
993
994static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
995 u32 type, u32 mask)
996{
997 struct crypto_ahash *tfm;
998 int err;
999
1000 tfm = crypto_alloc_ahash(driver, type, mask);
1001 if (IS_ERR(tfm)) {
1002 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1003 "%ld\n", driver, PTR_ERR(tfm));
1004 return PTR_ERR(tfm);
1005 }
1006
1007 err = test_hash(tfm, desc->suite.hash.vecs, desc->suite.hash.count);
1008
1009 crypto_free_ahash(tfm);
1010 return err;
1011}
1012
1013/* Please keep this list sorted by algorithm name. */
1014static const struct alg_test_desc alg_test_descs[] = {
1015 {
1016 .alg = "cbc(aes)",
1017 .test = alg_test_skcipher,
1018 .suite = {
1019 .cipher = {
1020 .enc = {
1021 .vecs = aes_cbc_enc_tv_template,
1022 .count = AES_CBC_ENC_TEST_VECTORS
1023 },
1024 .dec = {
1025 .vecs = aes_cbc_dec_tv_template,
1026 .count = AES_CBC_DEC_TEST_VECTORS
1027 }
1028 }
1029 }
1030 }, {
1031 .alg = "cbc(anubis)",
1032 .test = alg_test_skcipher,
1033 .suite = {
1034 .cipher = {
1035 .enc = {
1036 .vecs = anubis_cbc_enc_tv_template,
1037 .count = ANUBIS_CBC_ENC_TEST_VECTORS
1038 },
1039 .dec = {
1040 .vecs = anubis_cbc_dec_tv_template,
1041 .count = ANUBIS_CBC_DEC_TEST_VECTORS
1042 }
1043 }
1044 }
1045 }, {
1046 .alg = "cbc(blowfish)",
1047 .test = alg_test_skcipher,
1048 .suite = {
1049 .cipher = {
1050 .enc = {
1051 .vecs = bf_cbc_enc_tv_template,
1052 .count = BF_CBC_ENC_TEST_VECTORS
1053 },
1054 .dec = {
1055 .vecs = bf_cbc_dec_tv_template,
1056 .count = BF_CBC_DEC_TEST_VECTORS
1057 }
1058 }
1059 }
1060 }, {
1061 .alg = "cbc(camellia)",
1062 .test = alg_test_skcipher,
1063 .suite = {
1064 .cipher = {
1065 .enc = {
1066 .vecs = camellia_cbc_enc_tv_template,
1067 .count = CAMELLIA_CBC_ENC_TEST_VECTORS
1068 },
1069 .dec = {
1070 .vecs = camellia_cbc_dec_tv_template,
1071 .count = CAMELLIA_CBC_DEC_TEST_VECTORS
1072 }
1073 }
1074 }
1075 }, {
1076 .alg = "cbc(des)",
1077 .test = alg_test_skcipher,
1078 .suite = {
1079 .cipher = {
1080 .enc = {
1081 .vecs = des_cbc_enc_tv_template,
1082 .count = DES_CBC_ENC_TEST_VECTORS
1083 },
1084 .dec = {
1085 .vecs = des_cbc_dec_tv_template,
1086 .count = DES_CBC_DEC_TEST_VECTORS
1087 }
1088 }
1089 }
1090 }, {
1091 .alg = "cbc(des3_ede)",
1092 .test = alg_test_skcipher,
1093 .suite = {
1094 .cipher = {
1095 .enc = {
1096 .vecs = des3_ede_cbc_enc_tv_template,
1097 .count = DES3_EDE_CBC_ENC_TEST_VECTORS
1098 },
1099 .dec = {
1100 .vecs = des3_ede_cbc_dec_tv_template,
1101 .count = DES3_EDE_CBC_DEC_TEST_VECTORS
1102 }
1103 }
1104 }
1105 }, {
1106 .alg = "cbc(twofish)",
1107 .test = alg_test_skcipher,
1108 .suite = {
1109 .cipher = {
1110 .enc = {
1111 .vecs = tf_cbc_enc_tv_template,
1112 .count = TF_CBC_ENC_TEST_VECTORS
1113 },
1114 .dec = {
1115 .vecs = tf_cbc_dec_tv_template,
1116 .count = TF_CBC_DEC_TEST_VECTORS
1117 }
1118 }
1119 }
1120 }, {
1121 .alg = "ccm(aes)",
1122 .test = alg_test_aead,
1123 .suite = {
1124 .aead = {
1125 .enc = {
1126 .vecs = aes_ccm_enc_tv_template,
1127 .count = AES_CCM_ENC_TEST_VECTORS
1128 },
1129 .dec = {
1130 .vecs = aes_ccm_dec_tv_template,
1131 .count = AES_CCM_DEC_TEST_VECTORS
1132 }
1133 }
1134 }
1135 }, {
1136 .alg = "crc32c",
1137 .test = alg_test_hash,
1138 .suite = {
1139 .hash = {
1140 .vecs = crc32c_tv_template,
1141 .count = CRC32C_TEST_VECTORS
1142 }
1143 }
1144 }, {
1145 .alg = "cts(cbc(aes))",
1146 .test = alg_test_skcipher,
1147 .suite = {
1148 .cipher = {
1149 .enc = {
1150 .vecs = cts_mode_enc_tv_template,
1151 .count = CTS_MODE_ENC_TEST_VECTORS
1152 },
1153 .dec = {
1154 .vecs = cts_mode_dec_tv_template,
1155 .count = CTS_MODE_DEC_TEST_VECTORS
1156 }
1157 }
1158 }
1159 }, {
1160 .alg = "deflate",
1161 .test = alg_test_comp,
1162 .suite = {
1163 .comp = {
1164 .comp = {
1165 .vecs = deflate_comp_tv_template,
1166 .count = DEFLATE_COMP_TEST_VECTORS
1167 },
1168 .decomp = {
1169 .vecs = deflate_decomp_tv_template,
1170 .count = DEFLATE_DECOMP_TEST_VECTORS
1171 }
1172 }
1173 }
1174 }, {
1175 .alg = "ecb(aes)",
1176 .test = alg_test_skcipher,
1177 .suite = {
1178 .cipher = {
1179 .enc = {
1180 .vecs = aes_enc_tv_template,
1181 .count = AES_ENC_TEST_VECTORS
1182 },
1183 .dec = {
1184 .vecs = aes_dec_tv_template,
1185 .count = AES_DEC_TEST_VECTORS
1186 }
1187 }
1188 }
1189 }, {
1190 .alg = "ecb(anubis)",
1191 .test = alg_test_skcipher,
1192 .suite = {
1193 .cipher = {
1194 .enc = {
1195 .vecs = anubis_enc_tv_template,
1196 .count = ANUBIS_ENC_TEST_VECTORS
1197 },
1198 .dec = {
1199 .vecs = anubis_dec_tv_template,
1200 .count = ANUBIS_DEC_TEST_VECTORS
1201 }
1202 }
1203 }
1204 }, {
1205 .alg = "ecb(arc4)",
1206 .test = alg_test_skcipher,
1207 .suite = {
1208 .cipher = {
1209 .enc = {
1210 .vecs = arc4_enc_tv_template,
1211 .count = ARC4_ENC_TEST_VECTORS
1212 },
1213 .dec = {
1214 .vecs = arc4_dec_tv_template,
1215 .count = ARC4_DEC_TEST_VECTORS
1216 }
1217 }
1218 }
1219 }, {
1220 .alg = "ecb(blowfish)",
1221 .test = alg_test_skcipher,
1222 .suite = {
1223 .cipher = {
1224 .enc = {
1225 .vecs = bf_enc_tv_template,
1226 .count = BF_ENC_TEST_VECTORS
1227 },
1228 .dec = {
1229 .vecs = bf_dec_tv_template,
1230 .count = BF_DEC_TEST_VECTORS
1231 }
1232 }
1233 }
1234 }, {
1235 .alg = "ecb(camellia)",
1236 .test = alg_test_skcipher,
1237 .suite = {
1238 .cipher = {
1239 .enc = {
1240 .vecs = camellia_enc_tv_template,
1241 .count = CAMELLIA_ENC_TEST_VECTORS
1242 },
1243 .dec = {
1244 .vecs = camellia_dec_tv_template,
1245 .count = CAMELLIA_DEC_TEST_VECTORS
1246 }
1247 }
1248 }
1249 }, {
1250 .alg = "ecb(cast5)",
1251 .test = alg_test_skcipher,
1252 .suite = {
1253 .cipher = {
1254 .enc = {
1255 .vecs = cast5_enc_tv_template,
1256 .count = CAST5_ENC_TEST_VECTORS
1257 },
1258 .dec = {
1259 .vecs = cast5_dec_tv_template,
1260 .count = CAST5_DEC_TEST_VECTORS
1261 }
1262 }
1263 }
1264 }, {
1265 .alg = "ecb(cast6)",
1266 .test = alg_test_skcipher,
1267 .suite = {
1268 .cipher = {
1269 .enc = {
1270 .vecs = cast6_enc_tv_template,
1271 .count = CAST6_ENC_TEST_VECTORS
1272 },
1273 .dec = {
1274 .vecs = cast6_dec_tv_template,
1275 .count = CAST6_DEC_TEST_VECTORS
1276 }
1277 }
1278 }
1279 }, {
1280 .alg = "ecb(des)",
1281 .test = alg_test_skcipher,
1282 .suite = {
1283 .cipher = {
1284 .enc = {
1285 .vecs = des_enc_tv_template,
1286 .count = DES_ENC_TEST_VECTORS
1287 },
1288 .dec = {
1289 .vecs = des_dec_tv_template,
1290 .count = DES_DEC_TEST_VECTORS
1291 }
1292 }
1293 }
1294 }, {
1295 .alg = "ecb(des3_ede)",
1296 .test = alg_test_skcipher,
1297 .suite = {
1298 .cipher = {
1299 .enc = {
1300 .vecs = des3_ede_enc_tv_template,
1301 .count = DES3_EDE_ENC_TEST_VECTORS
1302 },
1303 .dec = {
1304 .vecs = des3_ede_dec_tv_template,
1305 .count = DES3_EDE_DEC_TEST_VECTORS
1306 }
1307 }
1308 }
1309 }, {
1310 .alg = "ecb(khazad)",
1311 .test = alg_test_skcipher,
1312 .suite = {
1313 .cipher = {
1314 .enc = {
1315 .vecs = khazad_enc_tv_template,
1316 .count = KHAZAD_ENC_TEST_VECTORS
1317 },
1318 .dec = {
1319 .vecs = khazad_dec_tv_template,
1320 .count = KHAZAD_DEC_TEST_VECTORS
1321 }
1322 }
1323 }
1324 }, {
1325 .alg = "ecb(seed)",
1326 .test = alg_test_skcipher,
1327 .suite = {
1328 .cipher = {
1329 .enc = {
1330 .vecs = seed_enc_tv_template,
1331 .count = SEED_ENC_TEST_VECTORS
1332 },
1333 .dec = {
1334 .vecs = seed_dec_tv_template,
1335 .count = SEED_DEC_TEST_VECTORS
1336 }
1337 }
1338 }
1339 }, {
1340 .alg = "ecb(serpent)",
1341 .test = alg_test_skcipher,
1342 .suite = {
1343 .cipher = {
1344 .enc = {
1345 .vecs = serpent_enc_tv_template,
1346 .count = SERPENT_ENC_TEST_VECTORS
1347 },
1348 .dec = {
1349 .vecs = serpent_dec_tv_template,
1350 .count = SERPENT_DEC_TEST_VECTORS
1351 }
1352 }
1353 }
1354 }, {
1355 .alg = "ecb(tea)",
1356 .test = alg_test_skcipher,
1357 .suite = {
1358 .cipher = {
1359 .enc = {
1360 .vecs = tea_enc_tv_template,
1361 .count = TEA_ENC_TEST_VECTORS
1362 },
1363 .dec = {
1364 .vecs = tea_dec_tv_template,
1365 .count = TEA_DEC_TEST_VECTORS
1366 }
1367 }
1368 }
1369 }, {
1370 .alg = "ecb(tnepres)",
1371 .test = alg_test_skcipher,
1372 .suite = {
1373 .cipher = {
1374 .enc = {
1375 .vecs = tnepres_enc_tv_template,
1376 .count = TNEPRES_ENC_TEST_VECTORS
1377 },
1378 .dec = {
1379 .vecs = tnepres_dec_tv_template,
1380 .count = TNEPRES_DEC_TEST_VECTORS
1381 }
1382 }
1383 }
1384 }, {
1385 .alg = "ecb(twofish)",
1386 .test = alg_test_skcipher,
1387 .suite = {
1388 .cipher = {
1389 .enc = {
1390 .vecs = tf_enc_tv_template,
1391 .count = TF_ENC_TEST_VECTORS
1392 },
1393 .dec = {
1394 .vecs = tf_dec_tv_template,
1395 .count = TF_DEC_TEST_VECTORS
1396 }
1397 }
1398 }
1399 }, {
1400 .alg = "ecb(xeta)",
1401 .test = alg_test_skcipher,
1402 .suite = {
1403 .cipher = {
1404 .enc = {
1405 .vecs = xeta_enc_tv_template,
1406 .count = XETA_ENC_TEST_VECTORS
1407 },
1408 .dec = {
1409 .vecs = xeta_dec_tv_template,
1410 .count = XETA_DEC_TEST_VECTORS
1411 }
1412 }
1413 }
1414 }, {
1415 .alg = "ecb(xtea)",
1416 .test = alg_test_skcipher,
1417 .suite = {
1418 .cipher = {
1419 .enc = {
1420 .vecs = xtea_enc_tv_template,
1421 .count = XTEA_ENC_TEST_VECTORS
1422 },
1423 .dec = {
1424 .vecs = xtea_dec_tv_template,
1425 .count = XTEA_DEC_TEST_VECTORS
1426 }
1427 }
1428 }
1429 }, {
1430 .alg = "gcm(aes)",
1431 .test = alg_test_aead,
1432 .suite = {
1433 .aead = {
1434 .enc = {
1435 .vecs = aes_gcm_enc_tv_template,
1436 .count = AES_GCM_ENC_TEST_VECTORS
1437 },
1438 .dec = {
1439 .vecs = aes_gcm_dec_tv_template,
1440 .count = AES_GCM_DEC_TEST_VECTORS
1441 }
1442 }
1443 }
1444 }, {
1445 .alg = "hmac(md5)",
1446 .test = alg_test_hash,
1447 .suite = {
1448 .hash = {
1449 .vecs = hmac_md5_tv_template,
1450 .count = HMAC_MD5_TEST_VECTORS
1451 }
1452 }
1453 }, {
1454 .alg = "hmac(rmd128)",
1455 .test = alg_test_hash,
1456 .suite = {
1457 .hash = {
1458 .vecs = hmac_rmd128_tv_template,
1459 .count = HMAC_RMD128_TEST_VECTORS
1460 }
1461 }
1462 }, {
1463 .alg = "hmac(rmd160)",
1464 .test = alg_test_hash,
1465 .suite = {
1466 .hash = {
1467 .vecs = hmac_rmd160_tv_template,
1468 .count = HMAC_RMD160_TEST_VECTORS
1469 }
1470 }
1471 }, {
1472 .alg = "hmac(sha1)",
1473 .test = alg_test_hash,
1474 .suite = {
1475 .hash = {
1476 .vecs = hmac_sha1_tv_template,
1477 .count = HMAC_SHA1_TEST_VECTORS
1478 }
1479 }
1480 }, {
1481 .alg = "hmac(sha224)",
1482 .test = alg_test_hash,
1483 .suite = {
1484 .hash = {
1485 .vecs = hmac_sha224_tv_template,
1486 .count = HMAC_SHA224_TEST_VECTORS
1487 }
1488 }
1489 }, {
1490 .alg = "hmac(sha256)",
1491 .test = alg_test_hash,
1492 .suite = {
1493 .hash = {
1494 .vecs = hmac_sha256_tv_template,
1495 .count = HMAC_SHA256_TEST_VECTORS
1496 }
1497 }
1498 }, {
1499 .alg = "hmac(sha384)",
1500 .test = alg_test_hash,
1501 .suite = {
1502 .hash = {
1503 .vecs = hmac_sha384_tv_template,
1504 .count = HMAC_SHA384_TEST_VECTORS
1505 }
1506 }
1507 }, {
1508 .alg = "hmac(sha512)",
1509 .test = alg_test_hash,
1510 .suite = {
1511 .hash = {
1512 .vecs = hmac_sha512_tv_template,
1513 .count = HMAC_SHA512_TEST_VECTORS
1514 }
1515 }
1516 }, {
1517 .alg = "lrw(aes)",
1518 .test = alg_test_skcipher,
1519 .suite = {
1520 .cipher = {
1521 .enc = {
1522 .vecs = aes_lrw_enc_tv_template,
1523 .count = AES_LRW_ENC_TEST_VECTORS
1524 },
1525 .dec = {
1526 .vecs = aes_lrw_dec_tv_template,
1527 .count = AES_LRW_DEC_TEST_VECTORS
1528 }
1529 }
1530 }
1531 }, {
1532 .alg = "lzo",
1533 .test = alg_test_comp,
1534 .suite = {
1535 .comp = {
1536 .comp = {
1537 .vecs = lzo_comp_tv_template,
1538 .count = LZO_COMP_TEST_VECTORS
1539 },
1540 .decomp = {
1541 .vecs = lzo_decomp_tv_template,
1542 .count = LZO_DECOMP_TEST_VECTORS
1543 }
1544 }
1545 }
1546 }, {
1547 .alg = "md4",
1548 .test = alg_test_hash,
1549 .suite = {
1550 .hash = {
1551 .vecs = md4_tv_template,
1552 .count = MD4_TEST_VECTORS
1553 }
1554 }
1555 }, {
1556 .alg = "md5",
1557 .test = alg_test_hash,
1558 .suite = {
1559 .hash = {
1560 .vecs = md5_tv_template,
1561 .count = MD5_TEST_VECTORS
1562 }
1563 }
1564 }, {
1565 .alg = "michael_mic",
1566 .test = alg_test_hash,
1567 .suite = {
1568 .hash = {
1569 .vecs = michael_mic_tv_template,
1570 .count = MICHAEL_MIC_TEST_VECTORS
1571 }
1572 }
1573 }, {
1574 .alg = "pcbc(fcrypt)",
1575 .test = alg_test_skcipher,
1576 .suite = {
1577 .cipher = {
1578 .enc = {
1579 .vecs = fcrypt_pcbc_enc_tv_template,
1580 .count = FCRYPT_ENC_TEST_VECTORS
1581 },
1582 .dec = {
1583 .vecs = fcrypt_pcbc_dec_tv_template,
1584 .count = FCRYPT_DEC_TEST_VECTORS
1585 }
1586 }
1587 }
1588 }, {
1589 .alg = "rfc3686(ctr(aes))",
1590 .test = alg_test_skcipher,
1591 .suite = {
1592 .cipher = {
1593 .enc = {
1594 .vecs = aes_ctr_enc_tv_template,
1595 .count = AES_CTR_ENC_TEST_VECTORS
1596 },
1597 .dec = {
1598 .vecs = aes_ctr_dec_tv_template,
1599 .count = AES_CTR_DEC_TEST_VECTORS
1600 }
1601 }
1602 }
1603 }, {
1604 .alg = "rmd128",
1605 .test = alg_test_hash,
1606 .suite = {
1607 .hash = {
1608 .vecs = rmd128_tv_template,
1609 .count = RMD128_TEST_VECTORS
1610 }
1611 }
1612 }, {
1613 .alg = "rmd160",
1614 .test = alg_test_hash,
1615 .suite = {
1616 .hash = {
1617 .vecs = rmd160_tv_template,
1618 .count = RMD160_TEST_VECTORS
1619 }
1620 }
1621 }, {
1622 .alg = "rmd256",
1623 .test = alg_test_hash,
1624 .suite = {
1625 .hash = {
1626 .vecs = rmd256_tv_template,
1627 .count = RMD256_TEST_VECTORS
1628 }
1629 }
1630 }, {
1631 .alg = "rmd320",
1632 .test = alg_test_hash,
1633 .suite = {
1634 .hash = {
1635 .vecs = rmd320_tv_template,
1636 .count = RMD320_TEST_VECTORS
1637 }
1638 }
1639 }, {
1640 .alg = "salsa20",
1641 .test = alg_test_skcipher,
1642 .suite = {
1643 .cipher = {
1644 .enc = {
1645 .vecs = salsa20_stream_enc_tv_template,
1646 .count = SALSA20_STREAM_ENC_TEST_VECTORS
1647 }
1648 }
1649 }
1650 }, {
1651 .alg = "sha1",
1652 .test = alg_test_hash,
1653 .suite = {
1654 .hash = {
1655 .vecs = sha1_tv_template,
1656 .count = SHA1_TEST_VECTORS
1657 }
1658 }
1659 }, {
1660 .alg = "sha224",
1661 .test = alg_test_hash,
1662 .suite = {
1663 .hash = {
1664 .vecs = sha224_tv_template,
1665 .count = SHA224_TEST_VECTORS
1666 }
1667 }
1668 }, {
1669 .alg = "sha256",
1670 .test = alg_test_hash,
1671 .suite = {
1672 .hash = {
1673 .vecs = sha256_tv_template,
1674 .count = SHA256_TEST_VECTORS
1675 }
1676 }
1677 }, {
1678 .alg = "sha384",
1679 .test = alg_test_hash,
1680 .suite = {
1681 .hash = {
1682 .vecs = sha384_tv_template,
1683 .count = SHA384_TEST_VECTORS
1684 }
1685 }
1686 }, {
1687 .alg = "sha512",
1688 .test = alg_test_hash,
1689 .suite = {
1690 .hash = {
1691 .vecs = sha512_tv_template,
1692 .count = SHA512_TEST_VECTORS
1693 }
1694 }
1695 }, {
1696 .alg = "tgr128",
1697 .test = alg_test_hash,
1698 .suite = {
1699 .hash = {
1700 .vecs = tgr128_tv_template,
1701 .count = TGR128_TEST_VECTORS
1702 }
1703 }
1704 }, {
1705 .alg = "tgr160",
1706 .test = alg_test_hash,
1707 .suite = {
1708 .hash = {
1709 .vecs = tgr160_tv_template,
1710 .count = TGR160_TEST_VECTORS
1711 }
1712 }
1713 }, {
1714 .alg = "tgr192",
1715 .test = alg_test_hash,
1716 .suite = {
1717 .hash = {
1718 .vecs = tgr192_tv_template,
1719 .count = TGR192_TEST_VECTORS
1720 }
1721 }
1722 }, {
1723 .alg = "wp256",
1724 .test = alg_test_hash,
1725 .suite = {
1726 .hash = {
1727 .vecs = wp256_tv_template,
1728 .count = WP256_TEST_VECTORS
1729 }
1730 }
1731 }, {
1732 .alg = "wp384",
1733 .test = alg_test_hash,
1734 .suite = {
1735 .hash = {
1736 .vecs = wp384_tv_template,
1737 .count = WP384_TEST_VECTORS
1738 }
1739 }
1740 }, {
1741 .alg = "wp512",
1742 .test = alg_test_hash,
1743 .suite = {
1744 .hash = {
1745 .vecs = wp512_tv_template,
1746 .count = WP512_TEST_VECTORS
1747 }
1748 }
1749 }, {
1750 .alg = "xcbc(aes)",
1751 .test = alg_test_hash,
1752 .suite = {
1753 .hash = {
1754 .vecs = aes_xcbc128_tv_template,
1755 .count = XCBC_AES_TEST_VECTORS
1756 }
1757 }
1758 }, {
1759 .alg = "xts(aes)",
1760 .test = alg_test_skcipher,
1761 .suite = {
1762 .cipher = {
1763 .enc = {
1764 .vecs = aes_xts_enc_tv_template,
1765 .count = AES_XTS_ENC_TEST_VECTORS
1766 },
1767 .dec = {
1768 .vecs = aes_xts_dec_tv_template,
1769 .count = AES_XTS_DEC_TEST_VECTORS
1770 }
1771 }
1772 }
1773 }
1774};
1775
1776static int alg_find_test(const char *alg)
1777{
1778 int start = 0;
1779 int end = ARRAY_SIZE(alg_test_descs);
1780
1781 while (start < end) {
1782 int i = (start + end) / 2;
1783 int diff = strcmp(alg_test_descs[i].alg, alg);
1784
1785 if (diff > 0) {
1786 end = i;
1787 continue;
1788 }
1789
1790 if (diff < 0) {
1791 start = i + 1;
1792 continue;
1793 }
1794
1795 return i;
1796 }
1797
1798 return -1;
1799}
1800
1801int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
1802{
1803 int i;
1804
1805 if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
1806 char nalg[CRYPTO_MAX_ALG_NAME];
1807
1808 if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >=
1809 sizeof(nalg))
1810 return -ENAMETOOLONG;
1811
1812 i = alg_find_test(nalg);
1813 if (i < 0)
1814 goto notest;
1815
1816 return alg_test_cipher(alg_test_descs + i, driver, type, mask);
1817 }
1818
1819 i = alg_find_test(alg);
1820 if (i < 0)
1821 goto notest;
1822
1823 return alg_test_descs[i].test(alg_test_descs + i, driver,
1824 type, mask);
1825
1826notest:
1827 printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
1828 return 0;
1829}
1830EXPORT_SYMBOL_GPL(alg_test);
1831
1832int __init testmgr_init(void)
1833{
1834 int i;
1835
1836 for (i = 0; i < XBUFSIZE; i++) {
1837 xbuf[i] = (void *)__get_free_page(GFP_KERNEL);
1838 if (!xbuf[i])
1839 goto err_free_xbuf;
1840 }
1841
1842 for (i = 0; i < XBUFSIZE; i++) {
1843 axbuf[i] = (void *)__get_free_page(GFP_KERNEL);
1844 if (!axbuf[i])
1845 goto err_free_axbuf;
1846 }
1847
1848 return 0;
1849
1850err_free_axbuf:
1851 for (i = 0; i < XBUFSIZE && axbuf[i]; i++)
1852 free_page((unsigned long)axbuf[i]);
1853err_free_xbuf:
1854 for (i = 0; i < XBUFSIZE && xbuf[i]; i++)
1855 free_page((unsigned long)xbuf[i]);
1856
1857 return -ENOMEM;
1858}
1859
1860void testmgr_exit(void)
1861{
1862 int i;
1863
1864 for (i = 0; i < XBUFSIZE; i++)
1865 free_page((unsigned long)axbuf[i]);
1866 for (i = 0; i < XBUFSIZE; i++)
1867 free_page((unsigned long)xbuf[i]);
1868}
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
new file mode 100644
index 000000000000..dee94d9ecfba
--- /dev/null
+++ b/crypto/testmgr.h
@@ -0,0 +1,8738 @@
1/*
2 * Algorithm testing framework and tests.
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
6 * Copyright (c) 2007 Nokia Siemens Networks
7 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15#ifndef _CRYPTO_TESTMGR_H
16#define _CRYPTO_TESTMGR_H
17
18#define MAX_DIGEST_SIZE 64
19#define MAX_TAP 8
20
21#define MAX_KEYLEN 56
22#define MAX_IVLEN 32
23
24struct hash_testvec {
25 /* only used with keyed hash algorithms */
26 char *key;
27 char *plaintext;
28 char *digest;
29 unsigned char tap[MAX_TAP];
30 unsigned char psize;
31 unsigned char np;
32 unsigned char ksize;
33};
34
35struct cipher_testvec {
36 char *key;
37 char *iv;
38 char *input;
39 char *result;
40 unsigned short tap[MAX_TAP];
41 int np;
42 unsigned char fail;
43 unsigned char wk; /* weak key flag */
44 unsigned char klen;
45 unsigned short ilen;
46 unsigned short rlen;
47};
48
49struct aead_testvec {
50 char *key;
51 char *iv;
52 char *input;
53 char *assoc;
54 char *result;
55 unsigned char tap[MAX_TAP];
56 unsigned char atap[MAX_TAP];
57 int np;
58 int anp;
59 unsigned char fail;
60 unsigned char wk; /* weak key flag */
61 unsigned char klen;
62 unsigned short ilen;
63 unsigned short alen;
64 unsigned short rlen;
65};
66
67static char zeroed_string[48];
68
69/*
70 * MD4 test vectors from RFC1320
71 */
72#define MD4_TEST_VECTORS 7
73
74static struct hash_testvec md4_tv_template [] = {
75 {
76 .plaintext = "",
77 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
78 "\xb7\x3c\x59\xd7\xe0\xc0\x89\xc0",
79 }, {
80 .plaintext = "a",
81 .psize = 1,
82 .digest = "\xbd\xe5\x2c\xb3\x1d\xe3\x3e\x46"
83 "\x24\x5e\x05\xfb\xdb\xd6\xfb\x24",
84 }, {
85 .plaintext = "abc",
86 .psize = 3,
87 .digest = "\xa4\x48\x01\x7a\xaf\x21\xd8\x52"
88 "\x5f\xc1\x0a\xe8\x7a\xa6\x72\x9d",
89 }, {
90 .plaintext = "message digest",
91 .psize = 14,
92 .digest = "\xd9\x13\x0a\x81\x64\x54\x9f\xe8"
93 "\x18\x87\x48\x06\xe1\xc7\x01\x4b",
94 }, {
95 .plaintext = "abcdefghijklmnopqrstuvwxyz",
96 .psize = 26,
97 .digest = "\xd7\x9e\x1c\x30\x8a\xa5\xbb\xcd"
98 "\xee\xa8\xed\x63\xdf\x41\x2d\xa9",
99 .np = 2,
100 .tap = { 13, 13 },
101 }, {
102 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
103 .psize = 62,
104 .digest = "\x04\x3f\x85\x82\xf2\x41\xdb\x35"
105 "\x1c\xe6\x27\xe1\x53\xe7\xf0\xe4",
106 }, {
107 .plaintext = "123456789012345678901234567890123456789012345678901234567890123"
108 "45678901234567890",
109 .psize = 80,
110 .digest = "\xe3\x3b\x4d\xdc\x9c\x38\xf2\x19"
111 "\x9c\x3e\x7b\x16\x4f\xcc\x05\x36",
112 },
113};
114
115/*
116 * MD5 test vectors from RFC1321
117 */
118#define MD5_TEST_VECTORS 7
119
120static struct hash_testvec md5_tv_template[] = {
121 {
122 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
123 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
124 }, {
125 .plaintext = "a",
126 .psize = 1,
127 .digest = "\x0c\xc1\x75\xb9\xc0\xf1\xb6\xa8"
128 "\x31\xc3\x99\xe2\x69\x77\x26\x61",
129 }, {
130 .plaintext = "abc",
131 .psize = 3,
132 .digest = "\x90\x01\x50\x98\x3c\xd2\x4f\xb0"
133 "\xd6\x96\x3f\x7d\x28\xe1\x7f\x72",
134 }, {
135 .plaintext = "message digest",
136 .psize = 14,
137 .digest = "\xf9\x6b\x69\x7d\x7c\xb7\x93\x8d"
138 "\x52\x5a\x2f\x31\xaa\xf1\x61\xd0",
139 }, {
140 .plaintext = "abcdefghijklmnopqrstuvwxyz",
141 .psize = 26,
142 .digest = "\xc3\xfc\xd3\xd7\x61\x92\xe4\x00"
143 "\x7d\xfb\x49\x6c\xca\x67\xe1\x3b",
144 .np = 2,
145 .tap = {13, 13}
146 }, {
147 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
148 .psize = 62,
149 .digest = "\xd1\x74\xab\x98\xd2\x77\xd9\xf5"
150 "\xa5\x61\x1c\x2c\x9f\x41\x9d\x9f",
151 }, {
152 .plaintext = "12345678901234567890123456789012345678901234567890123456789012"
153 "345678901234567890",
154 .psize = 80,
155 .digest = "\x57\xed\xf4\xa2\x2b\xe3\xc9\x55"
156 "\xac\x49\xda\x2e\x21\x07\xb6\x7a",
157 }
158
159};
160
161/*
162 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
163 */
164#define RMD128_TEST_VECTORS 10
165
166static struct hash_testvec rmd128_tv_template[] = {
167 {
168 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
169 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
170 }, {
171 .plaintext = "a",
172 .psize = 1,
173 .digest = "\x86\xbe\x7a\xfa\x33\x9d\x0f\xc7"
174 "\xcf\xc7\x85\xe7\x2f\x57\x8d\x33",
175 }, {
176 .plaintext = "abc",
177 .psize = 3,
178 .digest = "\xc1\x4a\x12\x19\x9c\x66\xe4\xba"
179 "\x84\x63\x6b\x0f\x69\x14\x4c\x77",
180 }, {
181 .plaintext = "message digest",
182 .psize = 14,
183 .digest = "\x9e\x32\x7b\x3d\x6e\x52\x30\x62"
184 "\xaf\xc1\x13\x2d\x7d\xf9\xd1\xb8",
185 }, {
186 .plaintext = "abcdefghijklmnopqrstuvwxyz",
187 .psize = 26,
188 .digest = "\xfd\x2a\xa6\x07\xf7\x1d\xc8\xf5"
189 "\x10\x71\x49\x22\xb3\x71\x83\x4e",
190 }, {
191 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
192 "fghijklmnopqrstuvwxyz0123456789",
193 .psize = 62,
194 .digest = "\xd1\xe9\x59\xeb\x17\x9c\x91\x1f"
195 "\xae\xa4\x62\x4c\x60\xc5\xc7\x02",
196 }, {
197 .plaintext = "1234567890123456789012345678901234567890"
198 "1234567890123456789012345678901234567890",
199 .psize = 80,
200 .digest = "\x3f\x45\xef\x19\x47\x32\xc2\xdb"
201 "\xb2\xc4\xa2\xc7\x69\x79\x5f\xa3",
202 }, {
203 .plaintext = "abcdbcdecdefdefgefghfghighij"
204 "hijkijkljklmklmnlmnomnopnopq",
205 .psize = 56,
206 .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d"
207 "\xdc\x22\xe8\x8b\x49\x13\x3a\x06",
208 .np = 2,
209 .tap = { 28, 28 },
210 }, {
211 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
212 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
213 "lmnopqrsmnopqrstnopqrstu",
214 .psize = 112,
215 .digest = "\xd4\xec\xc9\x13\xe1\xdf\x77\x6b"
216 "\xf4\x8d\xe9\xd5\x5b\x1f\x25\x46",
217 }, {
218 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
219 .psize = 32,
220 .digest = "\x13\xfc\x13\xe8\xef\xff\x34\x7d"
221 "\xe1\x93\xff\x46\xdb\xac\xcf\xd4",
222 }
223};
224
225/*
226 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
227 */
228#define RMD160_TEST_VECTORS 10
229
230static struct hash_testvec rmd160_tv_template[] = {
231 {
232 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
233 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
234 }, {
235 .plaintext = "a",
236 .psize = 1,
237 .digest = "\x0b\xdc\x9d\x2d\x25\x6b\x3e\xe9\xda\xae"
238 "\x34\x7b\xe6\xf4\xdc\x83\x5a\x46\x7f\xfe",
239 }, {
240 .plaintext = "abc",
241 .psize = 3,
242 .digest = "\x8e\xb2\x08\xf7\xe0\x5d\x98\x7a\x9b\x04"
243 "\x4a\x8e\x98\xc6\xb0\x87\xf1\x5a\x0b\xfc",
244 }, {
245 .plaintext = "message digest",
246 .psize = 14,
247 .digest = "\x5d\x06\x89\xef\x49\xd2\xfa\xe5\x72\xb8"
248 "\x81\xb1\x23\xa8\x5f\xfa\x21\x59\x5f\x36",
249 }, {
250 .plaintext = "abcdefghijklmnopqrstuvwxyz",
251 .psize = 26,
252 .digest = "\xf7\x1c\x27\x10\x9c\x69\x2c\x1b\x56\xbb"
253 "\xdc\xeb\x5b\x9d\x28\x65\xb3\x70\x8d\xbc",
254 }, {
255 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
256 "fghijklmnopqrstuvwxyz0123456789",
257 .psize = 62,
258 .digest = "\xb0\xe2\x0b\x6e\x31\x16\x64\x02\x86\xed"
259 "\x3a\x87\xa5\x71\x30\x79\xb2\x1f\x51\x89",
260 }, {
261 .plaintext = "1234567890123456789012345678901234567890"
262 "1234567890123456789012345678901234567890",
263 .psize = 80,
264 .digest = "\x9b\x75\x2e\x45\x57\x3d\x4b\x39\xf4\xdb"
265 "\xd3\x32\x3c\xab\x82\xbf\x63\x32\x6b\xfb",
266 }, {
267 .plaintext = "abcdbcdecdefdefgefghfghighij"
268 "hijkijkljklmklmnlmnomnopnopq",
269 .psize = 56,
270 .digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05"
271 "\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b",
272 .np = 2,
273 .tap = { 28, 28 },
274 }, {
275 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi"
276 "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr"
277 "lmnopqrsmnopqrstnopqrstu",
278 .psize = 112,
279 .digest = "\x6f\x3f\xa3\x9b\x6b\x50\x3c\x38\x4f\x91"
280 "\x9a\x49\xa7\xaa\x5c\x2c\x08\xbd\xfb\x45",
281 }, {
282 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
283 .psize = 32,
284 .digest = "\x94\xc2\x64\x11\x54\x04\xe6\x33\x79\x0d"
285 "\xfc\xc8\x7b\x58\x7d\x36\x77\x06\x7d\x9f",
286 }
287};
288
289/*
290 * RIPEMD-256 test vectors
291 */
292#define RMD256_TEST_VECTORS 8
293
294static struct hash_testvec rmd256_tv_template[] = {
295 {
296 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
297 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
298 "\x2d\x97\x74\xfb\x1e\x5d\x02\x63"
299 "\x80\xae\x01\x68\xe3\xc5\x52\x2d",
300 }, {
301 .plaintext = "a",
302 .psize = 1,
303 .digest = "\xf9\x33\x3e\x45\xd8\x57\xf5\xd9"
304 "\x0a\x91\xba\xb7\x0a\x1e\xba\x0c"
305 "\xfb\x1b\xe4\xb0\x78\x3c\x9a\xcf"
306 "\xcd\x88\x3a\x91\x34\x69\x29\x25",
307 }, {
308 .plaintext = "abc",
309 .psize = 3,
310 .digest = "\xaf\xbd\x6e\x22\x8b\x9d\x8c\xbb"
311 "\xce\xf5\xca\x2d\x03\xe6\xdb\xa1"
312 "\x0a\xc0\xbc\x7d\xcb\xe4\x68\x0e"
313 "\x1e\x42\xd2\xe9\x75\x45\x9b\x65",
314 }, {
315 .plaintext = "message digest",
316 .psize = 14,
317 .digest = "\x87\xe9\x71\x75\x9a\x1c\xe4\x7a"
318 "\x51\x4d\x5c\x91\x4c\x39\x2c\x90"
319 "\x18\xc7\xc4\x6b\xc1\x44\x65\x55"
320 "\x4a\xfc\xdf\x54\xa5\x07\x0c\x0e",
321 }, {
322 .plaintext = "abcdefghijklmnopqrstuvwxyz",
323 .psize = 26,
324 .digest = "\x64\x9d\x30\x34\x75\x1e\xa2\x16"
325 "\x77\x6b\xf9\xa1\x8a\xcc\x81\xbc"
326 "\x78\x96\x11\x8a\x51\x97\x96\x87"
327 "\x82\xdd\x1f\xd9\x7d\x8d\x51\x33",
328 }, {
329 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
330 "fghijklmnopqrstuvwxyz0123456789",
331 .psize = 62,
332 .digest = "\x57\x40\xa4\x08\xac\x16\xb7\x20"
333 "\xb8\x44\x24\xae\x93\x1c\xbb\x1f"
334 "\xe3\x63\xd1\xd0\xbf\x40\x17\xf1"
335 "\xa8\x9f\x7e\xa6\xde\x77\xa0\xb8",
336 }, {
337 .plaintext = "1234567890123456789012345678901234567890"
338 "1234567890123456789012345678901234567890",
339 .psize = 80,
340 .digest = "\x06\xfd\xcc\x7a\x40\x95\x48\xaa"
341 "\xf9\x13\x68\xc0\x6a\x62\x75\xb5"
342 "\x53\xe3\xf0\x99\xbf\x0e\xa4\xed"
343 "\xfd\x67\x78\xdf\x89\xa8\x90\xdd",
344 }, {
345 .plaintext = "abcdbcdecdefdefgefghfghighij"
346 "hijkijkljklmklmnlmnomnopnopq",
347 .psize = 56,
348 .digest = "\x38\x43\x04\x55\x83\xaa\xc6\xc8"
349 "\xc8\xd9\x12\x85\x73\xe7\xa9\x80"
350 "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e"
351 "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f",
352 .np = 2,
353 .tap = { 28, 28 },
354 }
355};
356
357/*
358 * RIPEMD-320 test vectors
359 */
360#define RMD320_TEST_VECTORS 8
361
362static struct hash_testvec rmd320_tv_template[] = {
363 {
364 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
365 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
366 "\xeb\xc6\x1e\x85\x57\x17\x7d\x70\x5a\x0e"
367 "\xc8\x80\x15\x1c\x3a\x32\xa0\x08\x99\xb8",
368 }, {
369 .plaintext = "a",
370 .psize = 1,
371 .digest = "\xce\x78\x85\x06\x38\xf9\x26\x58\xa5\xa5"
372 "\x85\x09\x75\x79\x92\x6d\xda\x66\x7a\x57"
373 "\x16\x56\x2c\xfc\xf6\xfb\xe7\x7f\x63\x54"
374 "\x2f\x99\xb0\x47\x05\xd6\x97\x0d\xff\x5d",
375 }, {
376 .plaintext = "abc",
377 .psize = 3,
378 .digest = "\xde\x4c\x01\xb3\x05\x4f\x89\x30\xa7\x9d"
379 "\x09\xae\x73\x8e\x92\x30\x1e\x5a\x17\x08"
380 "\x5b\xef\xfd\xc1\xb8\xd1\x16\x71\x3e\x74"
381 "\xf8\x2f\xa9\x42\xd6\x4c\xdb\xc4\x68\x2d",
382 }, {
383 .plaintext = "message digest",
384 .psize = 14,
385 .digest = "\x3a\x8e\x28\x50\x2e\xd4\x5d\x42\x2f\x68"
386 "\x84\x4f\x9d\xd3\x16\xe7\xb9\x85\x33\xfa"
387 "\x3f\x2a\x91\xd2\x9f\x84\xd4\x25\xc8\x8d"
388 "\x6b\x4e\xff\x72\x7d\xf6\x6a\x7c\x01\x97",
389 }, {
390 .plaintext = "abcdefghijklmnopqrstuvwxyz",
391 .psize = 26,
392 .digest = "\xca\xbd\xb1\x81\x0b\x92\x47\x0a\x20\x93"
393 "\xaa\x6b\xce\x05\x95\x2c\x28\x34\x8c\xf4"
394 "\x3f\xf6\x08\x41\x97\x51\x66\xbb\x40\xed"
395 "\x23\x40\x04\xb8\x82\x44\x63\xe6\xb0\x09",
396 }, {
397 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcde"
398 "fghijklmnopqrstuvwxyz0123456789",
399 .psize = 62,
400 .digest = "\xed\x54\x49\x40\xc8\x6d\x67\xf2\x50\xd2"
401 "\x32\xc3\x0b\x7b\x3e\x57\x70\xe0\xc6\x0c"
402 "\x8c\xb9\xa4\xca\xfe\x3b\x11\x38\x8a\xf9"
403 "\x92\x0e\x1b\x99\x23\x0b\x84\x3c\x86\xa4",
404 }, {
405 .plaintext = "1234567890123456789012345678901234567890"
406 "1234567890123456789012345678901234567890",
407 .psize = 80,
408 .digest = "\x55\x78\x88\xaf\x5f\x6d\x8e\xd6\x2a\xb6"
409 "\x69\x45\xc6\xd2\xa0\xa4\x7e\xcd\x53\x41"
410 "\xe9\x15\xeb\x8f\xea\x1d\x05\x24\x95\x5f"
411 "\x82\x5d\xc7\x17\xe4\xa0\x08\xab\x2d\x42",
412 }, {
413 .plaintext = "abcdbcdecdefdefgefghfghighij"
414 "hijkijkljklmklmnlmnomnopnopq",
415 .psize = 56,
416 .digest = "\xd0\x34\xa7\x95\x0c\xf7\x22\x02\x1b\xa4"
417 "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59"
418 "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b"
419 "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac",
420 .np = 2,
421 .tap = { 28, 28 },
422 }
423};
424
425/*
426 * SHA1 test vectors from from FIPS PUB 180-1
427 */
428#define SHA1_TEST_VECTORS 2
429
430static struct hash_testvec sha1_tv_template[] = {
431 {
432 .plaintext = "abc",
433 .psize = 3,
434 .digest = "\xa9\x99\x3e\x36\x47\x06\x81\x6a\xba\x3e"
435 "\x25\x71\x78\x50\xc2\x6c\x9c\xd0\xd8\x9d",
436 }, {
437 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
438 .psize = 56,
439 .digest = "\x84\x98\x3e\x44\x1c\x3b\xd2\x6e\xba\xae"
440 "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1",
441 .np = 2,
442 .tap = { 28, 28 }
443 }
444};
445
446
447/*
448 * SHA224 test vectors from from FIPS PUB 180-2
449 */
450#define SHA224_TEST_VECTORS 2
451
452static struct hash_testvec sha224_tv_template[] = {
453 {
454 .plaintext = "abc",
455 .psize = 3,
456 .digest = "\x23\x09\x7D\x22\x34\x05\xD8\x22"
457 "\x86\x42\xA4\x77\xBD\xA2\x55\xB3"
458 "\x2A\xAD\xBC\xE4\xBD\xA0\xB3\xF7"
459 "\xE3\x6C\x9D\xA7",
460 }, {
461 .plaintext =
462 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
463 .psize = 56,
464 .digest = "\x75\x38\x8B\x16\x51\x27\x76\xCC"
465 "\x5D\xBA\x5D\xA1\xFD\x89\x01\x50"
466 "\xB0\xC6\x45\x5C\xB4\xF5\x8B\x19"
467 "\x52\x52\x25\x25",
468 .np = 2,
469 .tap = { 28, 28 }
470 }
471};
472
473/*
474 * SHA256 test vectors from from NIST
475 */
476#define SHA256_TEST_VECTORS 2
477
478static struct hash_testvec sha256_tv_template[] = {
479 {
480 .plaintext = "abc",
481 .psize = 3,
482 .digest = "\xba\x78\x16\xbf\x8f\x01\xcf\xea"
483 "\x41\x41\x40\xde\x5d\xae\x22\x23"
484 "\xb0\x03\x61\xa3\x96\x17\x7a\x9c"
485 "\xb4\x10\xff\x61\xf2\x00\x15\xad",
486 }, {
487 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
488 .psize = 56,
489 .digest = "\x24\x8d\x6a\x61\xd2\x06\x38\xb8"
490 "\xe5\xc0\x26\x93\x0c\x3e\x60\x39"
491 "\xa3\x3c\xe4\x59\x64\xff\x21\x67"
492 "\xf6\xec\xed\xd4\x19\xdb\x06\xc1",
493 .np = 2,
494 .tap = { 28, 28 }
495 },
496};
497
498/*
499 * SHA384 test vectors from from NIST and kerneli
500 */
501#define SHA384_TEST_VECTORS 4
502
503static struct hash_testvec sha384_tv_template[] = {
504 {
505 .plaintext= "abc",
506 .psize = 3,
507 .digest = "\xcb\x00\x75\x3f\x45\xa3\x5e\x8b"
508 "\xb5\xa0\x3d\x69\x9a\xc6\x50\x07"
509 "\x27\x2c\x32\xab\x0e\xde\xd1\x63"
510 "\x1a\x8b\x60\x5a\x43\xff\x5b\xed"
511 "\x80\x86\x07\x2b\xa1\xe7\xcc\x23"
512 "\x58\xba\xec\xa1\x34\xc8\x25\xa7",
513 }, {
514 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
515 .psize = 56,
516 .digest = "\x33\x91\xfd\xdd\xfc\x8d\xc7\x39"
517 "\x37\x07\xa6\x5b\x1b\x47\x09\x39"
518 "\x7c\xf8\xb1\xd1\x62\xaf\x05\xab"
519 "\xfe\x8f\x45\x0d\xe5\xf3\x6b\xc6"
520 "\xb0\x45\x5a\x85\x20\xbc\x4e\x6f"
521 "\x5f\xe9\x5b\x1f\xe3\xc8\x45\x2b",
522 }, {
523 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
524 "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
525 .psize = 112,
526 .digest = "\x09\x33\x0c\x33\xf7\x11\x47\xe8"
527 "\x3d\x19\x2f\xc7\x82\xcd\x1b\x47"
528 "\x53\x11\x1b\x17\x3b\x3b\x05\xd2"
529 "\x2f\xa0\x80\x86\xe3\xb0\xf7\x12"
530 "\xfc\xc7\xc7\x1a\x55\x7e\x2d\xb9"
531 "\x66\xc3\xe9\xfa\x91\x74\x60\x39",
532 }, {
533 .plaintext = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd"
534 "efghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz",
535 .psize = 104,
536 .digest = "\x3d\x20\x89\x73\xab\x35\x08\xdb"
537 "\xbd\x7e\x2c\x28\x62\xba\x29\x0a"
538 "\xd3\x01\x0e\x49\x78\xc1\x98\xdc"
539 "\x4d\x8f\xd0\x14\xe5\x82\x82\x3a"
540 "\x89\xe1\x6f\x9b\x2a\x7b\xbc\x1a"
541 "\xc9\x38\xe2\xd1\x99\xe8\xbe\xa4",
542 .np = 4,
543 .tap = { 26, 26, 26, 26 }
544 },
545};
546
547/*
548 * SHA512 test vectors from from NIST and kerneli
549 */
550#define SHA512_TEST_VECTORS 4
551
552static struct hash_testvec sha512_tv_template[] = {
553 {
554 .plaintext = "abc",
555 .psize = 3,
556 .digest = "\xdd\xaf\x35\xa1\x93\x61\x7a\xba"
557 "\xcc\x41\x73\x49\xae\x20\x41\x31"
558 "\x12\xe6\xfa\x4e\x89\xa9\x7e\xa2"
559 "\x0a\x9e\xee\xe6\x4b\x55\xd3\x9a"
560 "\x21\x92\x99\x2a\x27\x4f\xc1\xa8"
561 "\x36\xba\x3c\x23\xa3\xfe\xeb\xbd"
562 "\x45\x4d\x44\x23\x64\x3c\xe8\x0e"
563 "\x2a\x9a\xc9\x4f\xa5\x4c\xa4\x9f",
564 }, {
565 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
566 .psize = 56,
567 .digest = "\x20\x4a\x8f\xc6\xdd\xa8\x2f\x0a"
568 "\x0c\xed\x7b\xeb\x8e\x08\xa4\x16"
569 "\x57\xc1\x6e\xf4\x68\xb2\x28\xa8"
570 "\x27\x9b\xe3\x31\xa7\x03\xc3\x35"
571 "\x96\xfd\x15\xc1\x3b\x1b\x07\xf9"
572 "\xaa\x1d\x3b\xea\x57\x78\x9c\xa0"
573 "\x31\xad\x85\xc7\xa7\x1d\xd7\x03"
574 "\x54\xec\x63\x12\x38\xca\x34\x45",
575 }, {
576 .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
577 "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
578 .psize = 112,
579 .digest = "\x8e\x95\x9b\x75\xda\xe3\x13\xda"
580 "\x8c\xf4\xf7\x28\x14\xfc\x14\x3f"
581 "\x8f\x77\x79\xc6\xeb\x9f\x7f\xa1"
582 "\x72\x99\xae\xad\xb6\x88\x90\x18"
583 "\x50\x1d\x28\x9e\x49\x00\xf7\xe4"
584 "\x33\x1b\x99\xde\xc4\xb5\x43\x3a"
585 "\xc7\xd3\x29\xee\xb6\xdd\x26\x54"
586 "\x5e\x96\xe5\x5b\x87\x4b\xe9\x09",
587 }, {
588 .plaintext = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd"
589 "efghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz",
590 .psize = 104,
591 .digest = "\x93\x0d\x0c\xef\xcb\x30\xff\x11"
592 "\x33\xb6\x89\x81\x21\xf1\xcf\x3d"
593 "\x27\x57\x8a\xfc\xaf\xe8\x67\x7c"
594 "\x52\x57\xcf\x06\x99\x11\xf7\x5d"
595 "\x8f\x58\x31\xb5\x6e\xbf\xda\x67"
596 "\xb2\x78\xe6\x6d\xff\x8b\x84\xfe"
597 "\x2b\x28\x70\xf7\x42\xa5\x80\xd8"
598 "\xed\xb4\x19\x87\x23\x28\x50\xc9",
599 .np = 4,
600 .tap = { 26, 26, 26, 26 }
601 },
602};
603
604
605/*
606 * WHIRLPOOL test vectors from Whirlpool package
607 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
608 * submission
609 */
610#define WP512_TEST_VECTORS 8
611
612static struct hash_testvec wp512_tv_template[] = {
613 {
614 .plaintext = "",
615 .psize = 0,
616 .digest = "\x19\xFA\x61\xD7\x55\x22\xA4\x66"
617 "\x9B\x44\xE3\x9C\x1D\x2E\x17\x26"
618 "\xC5\x30\x23\x21\x30\xD4\x07\xF8"
619 "\x9A\xFE\xE0\x96\x49\x97\xF7\xA7"
620 "\x3E\x83\xBE\x69\x8B\x28\x8F\xEB"
621 "\xCF\x88\xE3\xE0\x3C\x4F\x07\x57"
622 "\xEA\x89\x64\xE5\x9B\x63\xD9\x37"
623 "\x08\xB1\x38\xCC\x42\xA6\x6E\xB3",
624
625
626 }, {
627 .plaintext = "a",
628 .psize = 1,
629 .digest = "\x8A\xCA\x26\x02\x79\x2A\xEC\x6F"
630 "\x11\xA6\x72\x06\x53\x1F\xB7\xD7"
631 "\xF0\xDF\xF5\x94\x13\x14\x5E\x69"
632 "\x73\xC4\x50\x01\xD0\x08\x7B\x42"
633 "\xD1\x1B\xC6\x45\x41\x3A\xEF\xF6"
634 "\x3A\x42\x39\x1A\x39\x14\x5A\x59"
635 "\x1A\x92\x20\x0D\x56\x01\x95\xE5"
636 "\x3B\x47\x85\x84\xFD\xAE\x23\x1A",
637 }, {
638 .plaintext = "abc",
639 .psize = 3,
640 .digest = "\x4E\x24\x48\xA4\xC6\xF4\x86\xBB"
641 "\x16\xB6\x56\x2C\x73\xB4\x02\x0B"
642 "\xF3\x04\x3E\x3A\x73\x1B\xCE\x72"
643 "\x1A\xE1\xB3\x03\xD9\x7E\x6D\x4C"
644 "\x71\x81\xEE\xBD\xB6\xC5\x7E\x27"
645 "\x7D\x0E\x34\x95\x71\x14\xCB\xD6"
646 "\xC7\x97\xFC\x9D\x95\xD8\xB5\x82"
647 "\xD2\x25\x29\x20\x76\xD4\xEE\xF5",
648 }, {
649 .plaintext = "message digest",
650 .psize = 14,
651 .digest = "\x37\x8C\x84\xA4\x12\x6E\x2D\xC6"
652 "\xE5\x6D\xCC\x74\x58\x37\x7A\xAC"
653 "\x83\x8D\x00\x03\x22\x30\xF5\x3C"
654 "\xE1\xF5\x70\x0C\x0F\xFB\x4D\x3B"
655 "\x84\x21\x55\x76\x59\xEF\x55\xC1"
656 "\x06\xB4\xB5\x2A\xC5\xA4\xAA\xA6"
657 "\x92\xED\x92\x00\x52\x83\x8F\x33"
658 "\x62\xE8\x6D\xBD\x37\xA8\x90\x3E",
659 }, {
660 .plaintext = "abcdefghijklmnopqrstuvwxyz",
661 .psize = 26,
662 .digest = "\xF1\xD7\x54\x66\x26\x36\xFF\xE9"
663 "\x2C\x82\xEB\xB9\x21\x2A\x48\x4A"
664 "\x8D\x38\x63\x1E\xAD\x42\x38\xF5"
665 "\x44\x2E\xE1\x3B\x80\x54\xE4\x1B"
666 "\x08\xBF\x2A\x92\x51\xC3\x0B\x6A"
667 "\x0B\x8A\xAE\x86\x17\x7A\xB4\xA6"
668 "\xF6\x8F\x67\x3E\x72\x07\x86\x5D"
669 "\x5D\x98\x19\xA3\xDB\xA4\xEB\x3B",
670 }, {
671 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
672 "abcdefghijklmnopqrstuvwxyz0123456789",
673 .psize = 62,
674 .digest = "\xDC\x37\xE0\x08\xCF\x9E\xE6\x9B"
675 "\xF1\x1F\x00\xED\x9A\xBA\x26\x90"
676 "\x1D\xD7\xC2\x8C\xDE\xC0\x66\xCC"
677 "\x6A\xF4\x2E\x40\xF8\x2F\x3A\x1E"
678 "\x08\xEB\xA2\x66\x29\x12\x9D\x8F"
679 "\xB7\xCB\x57\x21\x1B\x92\x81\xA6"
680 "\x55\x17\xCC\x87\x9D\x7B\x96\x21"
681 "\x42\xC6\x5F\x5A\x7A\xF0\x14\x67",
682 }, {
683 .plaintext = "1234567890123456789012345678901234567890"
684 "1234567890123456789012345678901234567890",
685 .psize = 80,
686 .digest = "\x46\x6E\xF1\x8B\xAB\xB0\x15\x4D"
687 "\x25\xB9\xD3\x8A\x64\x14\xF5\xC0"
688 "\x87\x84\x37\x2B\xCC\xB2\x04\xD6"
689 "\x54\x9C\x4A\xFA\xDB\x60\x14\x29"
690 "\x4D\x5B\xD8\xDF\x2A\x6C\x44\xE5"
691 "\x38\xCD\x04\x7B\x26\x81\xA5\x1A"
692 "\x2C\x60\x48\x1E\x88\xC5\xA2\x0B"
693 "\x2C\x2A\x80\xCF\x3A\x9A\x08\x3B",
694 }, {
695 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
696 .psize = 32,
697 .digest = "\x2A\x98\x7E\xA4\x0F\x91\x70\x61"
698 "\xF5\xD6\xF0\xA0\xE4\x64\x4F\x48"
699 "\x8A\x7A\x5A\x52\xDE\xEE\x65\x62"
700 "\x07\xC5\x62\xF9\x88\xE9\x5C\x69"
701 "\x16\xBD\xC8\x03\x1B\xC5\xBE\x1B"
702 "\x7B\x94\x76\x39\xFE\x05\x0B\x56"
703 "\x93\x9B\xAA\xA0\xAD\xFF\x9A\xE6"
704 "\x74\x5B\x7B\x18\x1C\x3B\xE3\xFD",
705 },
706};
707
708#define WP384_TEST_VECTORS 8
709
710static struct hash_testvec wp384_tv_template[] = {
711 {
712 .plaintext = "",
713 .psize = 0,
714 .digest = "\x19\xFA\x61\xD7\x55\x22\xA4\x66"
715 "\x9B\x44\xE3\x9C\x1D\x2E\x17\x26"
716 "\xC5\x30\x23\x21\x30\xD4\x07\xF8"
717 "\x9A\xFE\xE0\x96\x49\x97\xF7\xA7"
718 "\x3E\x83\xBE\x69\x8B\x28\x8F\xEB"
719 "\xCF\x88\xE3\xE0\x3C\x4F\x07\x57",
720
721
722 }, {
723 .plaintext = "a",
724 .psize = 1,
725 .digest = "\x8A\xCA\x26\x02\x79\x2A\xEC\x6F"
726 "\x11\xA6\x72\x06\x53\x1F\xB7\xD7"
727 "\xF0\xDF\xF5\x94\x13\x14\x5E\x69"
728 "\x73\xC4\x50\x01\xD0\x08\x7B\x42"
729 "\xD1\x1B\xC6\x45\x41\x3A\xEF\xF6"
730 "\x3A\x42\x39\x1A\x39\x14\x5A\x59",
731 }, {
732 .plaintext = "abc",
733 .psize = 3,
734 .digest = "\x4E\x24\x48\xA4\xC6\xF4\x86\xBB"
735 "\x16\xB6\x56\x2C\x73\xB4\x02\x0B"
736 "\xF3\x04\x3E\x3A\x73\x1B\xCE\x72"
737 "\x1A\xE1\xB3\x03\xD9\x7E\x6D\x4C"
738 "\x71\x81\xEE\xBD\xB6\xC5\x7E\x27"
739 "\x7D\x0E\x34\x95\x71\x14\xCB\xD6",
740 }, {
741 .plaintext = "message digest",
742 .psize = 14,
743 .digest = "\x37\x8C\x84\xA4\x12\x6E\x2D\xC6"
744 "\xE5\x6D\xCC\x74\x58\x37\x7A\xAC"
745 "\x83\x8D\x00\x03\x22\x30\xF5\x3C"
746 "\xE1\xF5\x70\x0C\x0F\xFB\x4D\x3B"
747 "\x84\x21\x55\x76\x59\xEF\x55\xC1"
748 "\x06\xB4\xB5\x2A\xC5\xA4\xAA\xA6",
749 }, {
750 .plaintext = "abcdefghijklmnopqrstuvwxyz",
751 .psize = 26,
752 .digest = "\xF1\xD7\x54\x66\x26\x36\xFF\xE9"
753 "\x2C\x82\xEB\xB9\x21\x2A\x48\x4A"
754 "\x8D\x38\x63\x1E\xAD\x42\x38\xF5"
755 "\x44\x2E\xE1\x3B\x80\x54\xE4\x1B"
756 "\x08\xBF\x2A\x92\x51\xC3\x0B\x6A"
757 "\x0B\x8A\xAE\x86\x17\x7A\xB4\xA6",
758 }, {
759 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
760 "abcdefghijklmnopqrstuvwxyz0123456789",
761 .psize = 62,
762 .digest = "\xDC\x37\xE0\x08\xCF\x9E\xE6\x9B"
763 "\xF1\x1F\x00\xED\x9A\xBA\x26\x90"
764 "\x1D\xD7\xC2\x8C\xDE\xC0\x66\xCC"
765 "\x6A\xF4\x2E\x40\xF8\x2F\x3A\x1E"
766 "\x08\xEB\xA2\x66\x29\x12\x9D\x8F"
767 "\xB7\xCB\x57\x21\x1B\x92\x81\xA6",
768 }, {
769 .plaintext = "1234567890123456789012345678901234567890"
770 "1234567890123456789012345678901234567890",
771 .psize = 80,
772 .digest = "\x46\x6E\xF1\x8B\xAB\xB0\x15\x4D"
773 "\x25\xB9\xD3\x8A\x64\x14\xF5\xC0"
774 "\x87\x84\x37\x2B\xCC\xB2\x04\xD6"
775 "\x54\x9C\x4A\xFA\xDB\x60\x14\x29"
776 "\x4D\x5B\xD8\xDF\x2A\x6C\x44\xE5"
777 "\x38\xCD\x04\x7B\x26\x81\xA5\x1A",
778 }, {
779 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
780 .psize = 32,
781 .digest = "\x2A\x98\x7E\xA4\x0F\x91\x70\x61"
782 "\xF5\xD6\xF0\xA0\xE4\x64\x4F\x48"
783 "\x8A\x7A\x5A\x52\xDE\xEE\x65\x62"
784 "\x07\xC5\x62\xF9\x88\xE9\x5C\x69"
785 "\x16\xBD\xC8\x03\x1B\xC5\xBE\x1B"
786 "\x7B\x94\x76\x39\xFE\x05\x0B\x56",
787 },
788};
789
790#define WP256_TEST_VECTORS 8
791
792static struct hash_testvec wp256_tv_template[] = {
793 {
794 .plaintext = "",
795 .psize = 0,
796 .digest = "\x19\xFA\x61\xD7\x55\x22\xA4\x66"
797 "\x9B\x44\xE3\x9C\x1D\x2E\x17\x26"
798 "\xC5\x30\x23\x21\x30\xD4\x07\xF8"
799 "\x9A\xFE\xE0\x96\x49\x97\xF7\xA7",
800
801
802 }, {
803 .plaintext = "a",
804 .psize = 1,
805 .digest = "\x8A\xCA\x26\x02\x79\x2A\xEC\x6F"
806 "\x11\xA6\x72\x06\x53\x1F\xB7\xD7"
807 "\xF0\xDF\xF5\x94\x13\x14\x5E\x69"
808 "\x73\xC4\x50\x01\xD0\x08\x7B\x42",
809 }, {
810 .plaintext = "abc",
811 .psize = 3,
812 .digest = "\x4E\x24\x48\xA4\xC6\xF4\x86\xBB"
813 "\x16\xB6\x56\x2C\x73\xB4\x02\x0B"
814 "\xF3\x04\x3E\x3A\x73\x1B\xCE\x72"
815 "\x1A\xE1\xB3\x03\xD9\x7E\x6D\x4C",
816 }, {
817 .plaintext = "message digest",
818 .psize = 14,
819 .digest = "\x37\x8C\x84\xA4\x12\x6E\x2D\xC6"
820 "\xE5\x6D\xCC\x74\x58\x37\x7A\xAC"
821 "\x83\x8D\x00\x03\x22\x30\xF5\x3C"
822 "\xE1\xF5\x70\x0C\x0F\xFB\x4D\x3B",
823 }, {
824 .plaintext = "abcdefghijklmnopqrstuvwxyz",
825 .psize = 26,
826 .digest = "\xF1\xD7\x54\x66\x26\x36\xFF\xE9"
827 "\x2C\x82\xEB\xB9\x21\x2A\x48\x4A"
828 "\x8D\x38\x63\x1E\xAD\x42\x38\xF5"
829 "\x44\x2E\xE1\x3B\x80\x54\xE4\x1B",
830 }, {
831 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
832 "abcdefghijklmnopqrstuvwxyz0123456789",
833 .psize = 62,
834 .digest = "\xDC\x37\xE0\x08\xCF\x9E\xE6\x9B"
835 "\xF1\x1F\x00\xED\x9A\xBA\x26\x90"
836 "\x1D\xD7\xC2\x8C\xDE\xC0\x66\xCC"
837 "\x6A\xF4\x2E\x40\xF8\x2F\x3A\x1E",
838 }, {
839 .plaintext = "1234567890123456789012345678901234567890"
840 "1234567890123456789012345678901234567890",
841 .psize = 80,
842 .digest = "\x46\x6E\xF1\x8B\xAB\xB0\x15\x4D"
843 "\x25\xB9\xD3\x8A\x64\x14\xF5\xC0"
844 "\x87\x84\x37\x2B\xCC\xB2\x04\xD6"
845 "\x54\x9C\x4A\xFA\xDB\x60\x14\x29",
846 }, {
847 .plaintext = "abcdbcdecdefdefgefghfghighijhijk",
848 .psize = 32,
849 .digest = "\x2A\x98\x7E\xA4\x0F\x91\x70\x61"
850 "\xF5\xD6\xF0\xA0\xE4\x64\x4F\x48"
851 "\x8A\x7A\x5A\x52\xDE\xEE\x65\x62"
852 "\x07\xC5\x62\xF9\x88\xE9\x5C\x69",
853 },
854};
855
856/*
857 * TIGER test vectors from Tiger website
858 */
859#define TGR192_TEST_VECTORS 6
860
861static struct hash_testvec tgr192_tv_template[] = {
862 {
863 .plaintext = "",
864 .psize = 0,
865 .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
866 "\x16\x16\x6e\x76\xb1\xbb\x92\x5f"
867 "\xf3\x73\xde\x2d\x49\x58\x4e\x7a",
868 }, {
869 .plaintext = "abc",
870 .psize = 3,
871 .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
872 "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf"
873 "\x93\x5f\x7b\x95\x1c\x13\x29\x51",
874 }, {
875 .plaintext = "Tiger",
876 .psize = 5,
877 .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
878 "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec"
879 "\x37\x79\x0c\x11\x6f\x9d\x2b\xdf",
880 }, {
881 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
882 .psize = 64,
883 .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
884 "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e"
885 "\xb5\x86\x44\x50\x34\xa5\xa3\x86",
886 }, {
887 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
888 .psize = 64,
889 .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
890 "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9"
891 "\x57\x89\x65\x65\x97\x5f\x91\x97",
892 }, {
893 .plaintext = "Tiger - A Fast New Hash Function, "
894 "by Ross Anderson and Eli Biham, "
895 "proceedings of Fast Software Encryption 3, "
896 "Cambridge, 1996.",
897 .psize = 125,
898 .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
899 "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24"
900 "\xdd\x68\x15\x1d\x50\x39\x74\xfc",
901 },
902};
903
904#define TGR160_TEST_VECTORS 6
905
906static struct hash_testvec tgr160_tv_template[] = {
907 {
908 .plaintext = "",
909 .psize = 0,
910 .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
911 "\x16\x16\x6e\x76\xb1\xbb\x92\x5f"
912 "\xf3\x73\xde\x2d",
913 }, {
914 .plaintext = "abc",
915 .psize = 3,
916 .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
917 "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf"
918 "\x93\x5f\x7b\x95",
919 }, {
920 .plaintext = "Tiger",
921 .psize = 5,
922 .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
923 "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec"
924 "\x37\x79\x0c\x11",
925 }, {
926 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
927 .psize = 64,
928 .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
929 "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e"
930 "\xb5\x86\x44\x50",
931 }, {
932 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
933 .psize = 64,
934 .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
935 "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9"
936 "\x57\x89\x65\x65",
937 }, {
938 .plaintext = "Tiger - A Fast New Hash Function, "
939 "by Ross Anderson and Eli Biham, "
940 "proceedings of Fast Software Encryption 3, "
941 "Cambridge, 1996.",
942 .psize = 125,
943 .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
944 "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24"
945 "\xdd\x68\x15\x1d",
946 },
947};
948
949#define TGR128_TEST_VECTORS 6
950
951static struct hash_testvec tgr128_tv_template[] = {
952 {
953 .plaintext = "",
954 .psize = 0,
955 .digest = "\x24\xf0\x13\x0c\x63\xac\x93\x32"
956 "\x16\x16\x6e\x76\xb1\xbb\x92\x5f",
957 }, {
958 .plaintext = "abc",
959 .psize = 3,
960 .digest = "\xf2\x58\xc1\xe8\x84\x14\xab\x2a"
961 "\x52\x7a\xb5\x41\xff\xc5\xb8\xbf",
962 }, {
963 .plaintext = "Tiger",
964 .psize = 5,
965 .digest = "\x9f\x00\xf5\x99\x07\x23\x00\xdd"
966 "\x27\x6a\xbb\x38\xc8\xeb\x6d\xec",
967 }, {
968 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-",
969 .psize = 64,
970 .digest = "\x87\xfb\x2a\x90\x83\x85\x1c\xf7"
971 "\x47\x0d\x2c\xf8\x10\xe6\xdf\x9e",
972 }, {
973 .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZ=abcdefghijklmnopqrstuvwxyz+0123456789",
974 .psize = 64,
975 .digest = "\x46\x7d\xb8\x08\x63\xeb\xce\x48"
976 "\x8d\xf1\xcd\x12\x61\x65\x5d\xe9",
977 }, {
978 .plaintext = "Tiger - A Fast New Hash Function, "
979 "by Ross Anderson and Eli Biham, "
980 "proceedings of Fast Software Encryption 3, "
981 "Cambridge, 1996.",
982 .psize = 125,
983 .digest = "\x3d\x9a\xeb\x03\xd1\xbd\x1a\x63"
984 "\x57\xb2\x77\x4d\xfd\x6d\x5b\x24",
985 },
986};
987
988/*
989 * HMAC-MD5 test vectors from RFC2202
990 * (These need to be fixed to not use strlen).
991 */
992#define HMAC_MD5_TEST_VECTORS 7
993
994static struct hash_testvec hmac_md5_tv_template[] =
995{
996 {
997 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
998 .ksize = 16,
999 .plaintext = "Hi There",
1000 .psize = 8,
1001 .digest = "\x92\x94\x72\x7a\x36\x38\xbb\x1c"
1002 "\x13\xf4\x8e\xf8\x15\x8b\xfc\x9d",
1003 }, {
1004 .key = "Jefe",
1005 .ksize = 4,
1006 .plaintext = "what do ya want for nothing?",
1007 .psize = 28,
1008 .digest = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03"
1009 "\xea\xa8\x6e\x31\x0a\x5d\xb7\x38",
1010 .np = 2,
1011 .tap = {14, 14}
1012 }, {
1013 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1014 .ksize = 16,
1015 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1016 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1017 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1018 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1019 .psize = 50,
1020 .digest = "\x56\xbe\x34\x52\x1d\x14\x4c\x88"
1021 "\xdb\xb8\xc7\x33\xf0\xe8\xb3\xf6",
1022 }, {
1023 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1024 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1025 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1026 .ksize = 25,
1027 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1028 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1029 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1030 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1031 .psize = 50,
1032 .digest = "\x69\x7e\xaf\x0a\xca\x3a\x3a\xea"
1033 "\x3a\x75\x16\x47\x46\xff\xaa\x79",
1034 }, {
1035 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1036 .ksize = 16,
1037 .plaintext = "Test With Truncation",
1038 .psize = 20,
1039 .digest = "\x56\x46\x1e\xf2\x34\x2e\xdc\x00"
1040 "\xf9\xba\xb9\x95\x69\x0e\xfd\x4c",
1041 }, {
1042 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1043 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1044 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1045 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1046 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1047 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1048 "\xaa\xaa",
1049 .ksize = 80,
1050 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1051 .psize = 54,
1052 .digest = "\x6b\x1a\xb7\xfe\x4b\xd7\xbf\x8f"
1053 "\x0b\x62\xe6\xce\x61\xb9\xd0\xcd",
1054 }, {
1055 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1056 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1057 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1058 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1059 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1060 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1061 "\xaa\xaa",
1062 .ksize = 80,
1063 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1064 "Block-Size Data",
1065 .psize = 73,
1066 .digest = "\x6f\x63\x0f\xad\x67\xcd\xa0\xee"
1067 "\x1f\xb1\xf5\x62\xdb\x3a\xa5\x3e",
1068 },
1069};
1070
1071/*
1072 * HMAC-RIPEMD128 test vectors from RFC2286
1073 */
1074#define HMAC_RMD128_TEST_VECTORS 7
1075
1076static struct hash_testvec hmac_rmd128_tv_template[] = {
1077 {
1078 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1079 .ksize = 16,
1080 .plaintext = "Hi There",
1081 .psize = 8,
1082 .digest = "\xfb\xf6\x1f\x94\x92\xaa\x4b\xbf"
1083 "\x81\xc1\x72\xe8\x4e\x07\x34\xdb",
1084 }, {
1085 .key = "Jefe",
1086 .ksize = 4,
1087 .plaintext = "what do ya want for nothing?",
1088 .psize = 28,
1089 .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34"
1090 "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b",
1091 .np = 2,
1092 .tap = { 14, 14 },
1093 }, {
1094 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1095 .ksize = 16,
1096 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1097 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1098 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1099 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1100 .psize = 50,
1101 .digest = "\x09\xf0\xb2\x84\x6d\x2f\x54\x3d"
1102 "\xa3\x63\xcb\xec\x8d\x62\xa3\x8d",
1103 }, {
1104 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1105 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1106 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1107 .ksize = 25,
1108 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1109 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1110 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1111 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1112 .psize = 50,
1113 .digest = "\xbd\xbb\xd7\xcf\x03\xe4\x4b\x5a"
1114 "\xa6\x0a\xf8\x15\xbe\x4d\x22\x94",
1115 }, {
1116 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1117 .ksize = 16,
1118 .plaintext = "Test With Truncation",
1119 .psize = 20,
1120 .digest = "\xe7\x98\x08\xf2\x4b\x25\xfd\x03"
1121 "\x1c\x15\x5f\x0d\x55\x1d\x9a\x3a",
1122 }, {
1123 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1124 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1125 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1126 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1127 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1128 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1129 "\xaa\xaa",
1130 .ksize = 80,
1131 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1132 .psize = 54,
1133 .digest = "\xdc\x73\x29\x28\xde\x98\x10\x4a"
1134 "\x1f\x59\xd3\x73\xc1\x50\xac\xbb",
1135 }, {
1136 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1137 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1138 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1139 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1140 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1141 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1142 "\xaa\xaa",
1143 .ksize = 80,
1144 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1145 "Block-Size Data",
1146 .psize = 73,
1147 .digest = "\x5c\x6b\xec\x96\x79\x3e\x16\xd4"
1148 "\x06\x90\xc2\x37\x63\x5f\x30\xc5",
1149 },
1150};
1151
1152/*
1153 * HMAC-RIPEMD160 test vectors from RFC2286
1154 */
1155#define HMAC_RMD160_TEST_VECTORS 7
1156
1157static struct hash_testvec hmac_rmd160_tv_template[] = {
1158 {
1159 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1160 .ksize = 20,
1161 .plaintext = "Hi There",
1162 .psize = 8,
1163 .digest = "\x24\xcb\x4b\xd6\x7d\x20\xfc\x1a\x5d\x2e"
1164 "\xd7\x73\x2d\xcc\x39\x37\x7f\x0a\x56\x68",
1165 }, {
1166 .key = "Jefe",
1167 .ksize = 4,
1168 .plaintext = "what do ya want for nothing?",
1169 .psize = 28,
1170 .digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4"
1171 "\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69",
1172 .np = 2,
1173 .tap = { 14, 14 },
1174 }, {
1175 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1176 .ksize = 20,
1177 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1178 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1179 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1180 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1181 .psize = 50,
1182 .digest = "\xb0\xb1\x05\x36\x0d\xe7\x59\x96\x0a\xb4"
1183 "\xf3\x52\x98\xe1\x16\xe2\x95\xd8\xe7\xc1",
1184 }, {
1185 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1186 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1187 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1188 .ksize = 25,
1189 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1190 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1191 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1192 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1193 .psize = 50,
1194 .digest = "\xd5\xca\x86\x2f\x4d\x21\xd5\xe6\x10\xe1"
1195 "\x8b\x4c\xf1\xbe\xb9\x7a\x43\x65\xec\xf4",
1196 }, {
1197 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1198 .ksize = 20,
1199 .plaintext = "Test With Truncation",
1200 .psize = 20,
1201 .digest = "\x76\x19\x69\x39\x78\xf9\x1d\x90\x53\x9a"
1202 "\xe7\x86\x50\x0f\xf3\xd8\xe0\x51\x8e\x39",
1203 }, {
1204 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1205 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1206 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1207 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1208 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1209 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1210 "\xaa\xaa",
1211 .ksize = 80,
1212 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1213 .psize = 54,
1214 .digest = "\x64\x66\xca\x07\xac\x5e\xac\x29\xe1\xbd"
1215 "\x52\x3e\x5a\xda\x76\x05\xb7\x91\xfd\x8b",
1216 }, {
1217 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1218 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1219 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1220 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1221 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1222 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1223 "\xaa\xaa",
1224 .ksize = 80,
1225 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1226 "Block-Size Data",
1227 .psize = 73,
1228 .digest = "\x69\xea\x60\x79\x8d\x71\x61\x6c\xce\x5f"
1229 "\xd0\x87\x1e\x23\x75\x4c\xd7\x5d\x5a\x0a",
1230 },
1231};
1232
1233/*
1234 * HMAC-SHA1 test vectors from RFC2202
1235 */
1236#define HMAC_SHA1_TEST_VECTORS 7
1237
1238static struct hash_testvec hmac_sha1_tv_template[] = {
1239 {
1240 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
1241 .ksize = 20,
1242 .plaintext = "Hi There",
1243 .psize = 8,
1244 .digest = "\xb6\x17\x31\x86\x55\x05\x72\x64"
1245 "\xe2\x8b\xc0\xb6\xfb\x37\x8c\x8e\xf1"
1246 "\x46\xbe",
1247 }, {
1248 .key = "Jefe",
1249 .ksize = 4,
1250 .plaintext = "what do ya want for nothing?",
1251 .psize = 28,
1252 .digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74"
1253 "\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79",
1254 .np = 2,
1255 .tap = { 14, 14 }
1256 }, {
1257 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1258 .ksize = 20,
1259 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1260 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1261 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1262 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1263 .psize = 50,
1264 .digest = "\x12\x5d\x73\x42\xb9\xac\x11\xcd\x91\xa3"
1265 "\x9a\xf4\x8a\xa1\x7b\x4f\x63\xf1\x75\xd3",
1266 }, {
1267 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1268 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1269 "\x11\x12\x13\x14\x15\x16\x17\x18\x19",
1270 .ksize = 25,
1271 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1272 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1273 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1274 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1275 .psize = 50,
1276 .digest = "\x4c\x90\x07\xf4\x02\x62\x50\xc6\xbc\x84"
1277 "\x14\xf9\xbf\x50\xc8\x6c\x2d\x72\x35\xda",
1278 }, {
1279 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c",
1280 .ksize = 20,
1281 .plaintext = "Test With Truncation",
1282 .psize = 20,
1283 .digest = "\x4c\x1a\x03\x42\x4b\x55\xe0\x7f\xe7\xf2"
1284 "\x7b\xe1\xd5\x8b\xb9\x32\x4a\x9a\x5a\x04",
1285 }, {
1286 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1287 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1288 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1289 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1290 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1291 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1292 "\xaa\xaa",
1293 .ksize = 80,
1294 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1295 .psize = 54,
1296 .digest = "\xaa\x4a\xe5\xe1\x52\x72\xd0\x0e\x95\x70"
1297 "\x56\x37\xce\x8a\x3b\x55\xed\x40\x21\x12",
1298 }, {
1299 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1300 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1301 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1302 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1303 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1304 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1305 "\xaa\xaa",
1306 .ksize = 80,
1307 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than One "
1308 "Block-Size Data",
1309 .psize = 73,
1310 .digest = "\xe8\xe9\x9d\x0f\x45\x23\x7d\x78\x6d\x6b"
1311 "\xba\xa7\x96\x5c\x78\x08\xbb\xff\x1a\x91",
1312 },
1313};
1314
1315
1316/*
1317 * SHA224 HMAC test vectors from RFC4231
1318 */
1319#define HMAC_SHA224_TEST_VECTORS 4
1320
1321static struct hash_testvec hmac_sha224_tv_template[] = {
1322 {
1323 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1324 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1325 "\x0b\x0b\x0b\x0b",
1326 .ksize = 20,
1327 /* ("Hi There") */
1328 .plaintext = "\x48\x69\x20\x54\x68\x65\x72\x65",
1329 .psize = 8,
1330 .digest = "\x89\x6f\xb1\x12\x8a\xbb\xdf\x19"
1331 "\x68\x32\x10\x7c\xd4\x9d\xf3\x3f"
1332 "\x47\xb4\xb1\x16\x99\x12\xba\x4f"
1333 "\x53\x68\x4b\x22",
1334 }, {
1335 .key = "Jefe",
1336 .ksize = 4,
1337 /* ("what do ya want for nothing?") */
1338 .plaintext = "\x77\x68\x61\x74\x20\x64\x6f\x20"
1339 "\x79\x61\x20\x77\x61\x6e\x74\x20"
1340 "\x66\x6f\x72\x20\x6e\x6f\x74\x68"
1341 "\x69\x6e\x67\x3f",
1342 .psize = 28,
1343 .digest = "\xa3\x0e\x01\x09\x8b\xc6\xdb\xbf"
1344 "\x45\x69\x0f\x3a\x7e\x9e\x6d\x0f"
1345 "\x8b\xbe\xa2\xa3\x9e\x61\x48\x00"
1346 "\x8f\xd0\x5e\x44",
1347 .np = 4,
1348 .tap = { 7, 7, 7, 7 }
1349 }, {
1350 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1351 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1352 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1353 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1354 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1355 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1356 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1357 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1358 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1359 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1360 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1361 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1362 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1363 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1364 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1365 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1366 "\xaa\xaa\xaa",
1367 .ksize = 131,
1368 /* ("Test Using Larger Than Block-Size Key - Hash Key First") */
1369 .plaintext = "\x54\x65\x73\x74\x20\x55\x73\x69"
1370 "\x6e\x67\x20\x4c\x61\x72\x67\x65"
1371 "\x72\x20\x54\x68\x61\x6e\x20\x42"
1372 "\x6c\x6f\x63\x6b\x2d\x53\x69\x7a"
1373 "\x65\x20\x4b\x65\x79\x20\x2d\x20"
1374 "\x48\x61\x73\x68\x20\x4b\x65\x79"
1375 "\x20\x46\x69\x72\x73\x74",
1376 .psize = 54,
1377 .digest = "\x95\xe9\xa0\xdb\x96\x20\x95\xad"
1378 "\xae\xbe\x9b\x2d\x6f\x0d\xbc\xe2"
1379 "\xd4\x99\xf1\x12\xf2\xd2\xb7\x27"
1380 "\x3f\xa6\x87\x0e",
1381 }, {
1382 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1383 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1384 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1385 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1386 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1387 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1388 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1389 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1390 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1391 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1392 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1393 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1394 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1395 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1396 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1397 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1398 "\xaa\xaa\xaa",
1399 .ksize = 131,
1400 /* ("This is a test using a larger than block-size key and a")
1401 (" larger than block-size data. The key needs to be")
1402 (" hashed before being used by the HMAC algorithm.") */
1403 .plaintext = "\x54\x68\x69\x73\x20\x69\x73\x20"
1404 "\x61\x20\x74\x65\x73\x74\x20\x75"
1405 "\x73\x69\x6e\x67\x20\x61\x20\x6c"
1406 "\x61\x72\x67\x65\x72\x20\x74\x68"
1407 "\x61\x6e\x20\x62\x6c\x6f\x63\x6b"
1408 "\x2d\x73\x69\x7a\x65\x20\x6b\x65"
1409 "\x79\x20\x61\x6e\x64\x20\x61\x20"
1410 "\x6c\x61\x72\x67\x65\x72\x20\x74"
1411 "\x68\x61\x6e\x20\x62\x6c\x6f\x63"
1412 "\x6b\x2d\x73\x69\x7a\x65\x20\x64"
1413 "\x61\x74\x61\x2e\x20\x54\x68\x65"
1414 "\x20\x6b\x65\x79\x20\x6e\x65\x65"
1415 "\x64\x73\x20\x74\x6f\x20\x62\x65"
1416 "\x20\x68\x61\x73\x68\x65\x64\x20"
1417 "\x62\x65\x66\x6f\x72\x65\x20\x62"
1418 "\x65\x69\x6e\x67\x20\x75\x73\x65"
1419 "\x64\x20\x62\x79\x20\x74\x68\x65"
1420 "\x20\x48\x4d\x41\x43\x20\x61\x6c"
1421 "\x67\x6f\x72\x69\x74\x68\x6d\x2e",
1422 .psize = 152,
1423 .digest = "\x3a\x85\x41\x66\xac\x5d\x9f\x02"
1424 "\x3f\x54\xd5\x17\xd0\xb3\x9d\xbd"
1425 "\x94\x67\x70\xdb\x9c\x2b\x95\xc9"
1426 "\xf6\xf5\x65\xd1",
1427 },
1428};
1429
1430/*
1431 * HMAC-SHA256 test vectors from
1432 * draft-ietf-ipsec-ciph-sha-256-01.txt
1433 */
1434#define HMAC_SHA256_TEST_VECTORS 10
1435
1436static struct hash_testvec hmac_sha256_tv_template[] = {
1437 {
1438 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1439 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1440 "\x11\x12\x13\x14\x15\x16\x17\x18"
1441 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20",
1442 .ksize = 32,
1443 .plaintext = "abc",
1444 .psize = 3,
1445 .digest = "\xa2\x1b\x1f\x5d\x4c\xf4\xf7\x3a"
1446 "\x4d\xd9\x39\x75\x0f\x7a\x06\x6a"
1447 "\x7f\x98\xcc\x13\x1c\xb1\x6a\x66"
1448 "\x92\x75\x90\x21\xcf\xab\x81\x81",
1449 }, {
1450 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1451 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1452 "\x11\x12\x13\x14\x15\x16\x17\x18"
1453 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20",
1454 .ksize = 32,
1455 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
1456 .psize = 56,
1457 .digest = "\x10\x4f\xdc\x12\x57\x32\x8f\x08"
1458 "\x18\x4b\xa7\x31\x31\xc5\x3c\xae"
1459 "\xe6\x98\xe3\x61\x19\x42\x11\x49"
1460 "\xea\x8c\x71\x24\x56\x69\x7d\x30",
1461 }, {
1462 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1463 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1464 "\x11\x12\x13\x14\x15\x16\x17\x18"
1465 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20",
1466 .ksize = 32,
1467 .plaintext = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
1468 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
1469 .psize = 112,
1470 .digest = "\x47\x03\x05\xfc\x7e\x40\xfe\x34"
1471 "\xd3\xee\xb3\xe7\x73\xd9\x5a\xab"
1472 "\x73\xac\xf0\xfd\x06\x04\x47\xa5"
1473 "\xeb\x45\x95\xbf\x33\xa9\xd1\xa3",
1474 }, {
1475 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1476 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1477 "\x0b\x0b\x0b\x0b\x0b\x0b",
1478 .ksize = 32,
1479 .plaintext = "Hi There",
1480 .psize = 8,
1481 .digest = "\x19\x8a\x60\x7e\xb4\x4b\xfb\xc6"
1482 "\x99\x03\xa0\xf1\xcf\x2b\xbd\xc5"
1483 "\xba\x0a\xa3\xf3\xd9\xae\x3c\x1c"
1484 "\x7a\x3b\x16\x96\xa0\xb6\x8c\xf7",
1485 }, {
1486 .key = "Jefe",
1487 .ksize = 4,
1488 .plaintext = "what do ya want for nothing?",
1489 .psize = 28,
1490 .digest = "\x5b\xdc\xc1\x46\xbf\x60\x75\x4e"
1491 "\x6a\x04\x24\x26\x08\x95\x75\xc7"
1492 "\x5a\x00\x3f\x08\x9d\x27\x39\x83"
1493 "\x9d\xec\x58\xb9\x64\xec\x38\x43",
1494 .np = 2,
1495 .tap = { 14, 14 }
1496 }, {
1497 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1498 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1499 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
1500 .ksize = 32,
1501 .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1502 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1503 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
1504 "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
1505 .psize = 50,
1506 .digest = "\xcd\xcb\x12\x20\xd1\xec\xcc\xea"
1507 "\x91\xe5\x3a\xba\x30\x92\xf9\x62"
1508 "\xe5\x49\xfe\x6c\xe9\xed\x7f\xdc"
1509 "\x43\x19\x1f\xbd\xe4\x5c\x30\xb0",
1510 }, {
1511 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
1512 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
1513 "\x11\x12\x13\x14\x15\x16\x17\x18"
1514 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
1515 "\x21\x22\x23\x24\x25",
1516 .ksize = 37,
1517 .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1518 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1519 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
1520 "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
1521 .psize = 50,
1522 .digest = "\xd4\x63\x3c\x17\xf6\xfb\x8d\x74"
1523 "\x4c\x66\xde\xe0\xf8\xf0\x74\x55"
1524 "\x6e\xc4\xaf\x55\xef\x07\x99\x85"
1525 "\x41\x46\x8e\xb4\x9b\xd2\xe9\x17",
1526 }, {
1527 .key = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
1528 "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
1529 "\x0c\x0c\x0c\x0c\x0c\x0c",
1530 .ksize = 32,
1531 .plaintext = "Test With Truncation",
1532 .psize = 20,
1533 .digest = "\x75\x46\xaf\x01\x84\x1f\xc0\x9b"
1534 "\x1a\xb9\xc3\x74\x9a\x5f\x1c\x17"
1535 "\xd4\xf5\x89\x66\x8a\x58\x7b\x27"
1536 "\x00\xa9\xc9\x7c\x11\x93\xcf\x42",
1537 }, {
1538 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1539 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1540 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1541 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1542 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1543 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1544 "\xaa\xaa",
1545 .ksize = 80,
1546 .plaintext = "Test Using Larger Than Block-Size Key - Hash Key First",
1547 .psize = 54,
1548 .digest = "\x69\x53\x02\x5e\xd9\x6f\x0c\x09"
1549 "\xf8\x0a\x96\xf7\x8e\x65\x38\xdb"
1550 "\xe2\xe7\xb8\x20\xe3\xdd\x97\x0e"
1551 "\x7d\xdd\x39\x09\x1b\x32\x35\x2f",
1552 }, {
1553 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1554 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1555 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1556 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1557 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1558 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1559 "\xaa\xaa",
1560 .ksize = 80,
1561 .plaintext = "Test Using Larger Than Block-Size Key and Larger Than "
1562 "One Block-Size Data",
1563 .psize = 73,
1564 .digest = "\x63\x55\xac\x22\xe8\x90\xd0\xa3"
1565 "\xc8\x48\x1a\x5c\xa4\x82\x5b\xc8"
1566 "\x84\xd3\xe7\xa1\xff\x98\xa2\xfc"
1567 "\x2a\xc7\xd8\xe0\x64\xc3\xb2\xe6",
1568 },
1569};
1570
1571#define XCBC_AES_TEST_VECTORS 6
1572
1573static struct hash_testvec aes_xcbc128_tv_template[] = {
1574 {
1575 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1576 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1577 .plaintext = zeroed_string,
1578 .digest = "\x75\xf0\x25\x1d\x52\x8a\xc0\x1c"
1579 "\x45\x73\xdf\xd5\x84\xd7\x9f\x29",
1580 .psize = 0,
1581 .ksize = 16,
1582 }, {
1583 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1584 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1585 .plaintext = "\x00\x01\x02",
1586 .digest = "\x5b\x37\x65\x80\xae\x2f\x19\xaf"
1587 "\xe7\x21\x9c\xee\xf1\x72\x75\x6f",
1588 .psize = 3,
1589 .ksize = 16,
1590 } , {
1591 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1592 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1593 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1594 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1595 .digest = "\xd2\xa2\x46\xfa\x34\x9b\x68\xa7"
1596 "\x99\x98\xa4\x39\x4f\xf7\xa2\x63",
1597 .psize = 16,
1598 .ksize = 16,
1599 }, {
1600 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1601 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1602 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1603 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
1604 "\x10\x11\x12\x13",
1605 .digest = "\x47\xf5\x1b\x45\x64\x96\x62\x15"
1606 "\xb8\x98\x5c\x63\x05\x5e\xd3\x08",
1607 .tap = { 10, 10 },
1608 .psize = 20,
1609 .np = 2,
1610 .ksize = 16,
1611 }, {
1612 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1613 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1614 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1615 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
1616 "\x10\x11\x12\x13\x14\x15\x16\x17"
1617 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
1618 .digest = "\xf5\x4f\x0e\xc8\xd2\xb9\xf3\xd3"
1619 "\x68\x07\x73\x4b\xd5\x28\x3f\xd4",
1620 .psize = 32,
1621 .ksize = 16,
1622 }, {
1623 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1624 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1625 .plaintext = "\x00\x01\x02\x03\x04\x05\x06\x07"
1626 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
1627 "\x10\x11\x12\x13\x14\x15\x16\x17"
1628 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
1629 "\x20\x21",
1630 .digest = "\xbe\xcb\xb3\xbc\xcd\xb5\x18\xa3"
1631 "\x06\x77\xd5\x48\x1f\xb6\xb4\xd8",
1632 .tap = { 17, 17 },
1633 .psize = 34,
1634 .np = 2,
1635 .ksize = 16,
1636 }
1637};
1638
1639/*
1640 * SHA384 HMAC test vectors from RFC4231
1641 */
1642
1643#define HMAC_SHA384_TEST_VECTORS 4
1644
1645static struct hash_testvec hmac_sha384_tv_template[] = {
1646 {
1647 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1648 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1649 "\x0b\x0b\x0b\x0b",
1650 .ksize = 20,
1651 .plaintext = "Hi There",
1652 .psize = 8,
1653 .digest = "\xaf\xd0\x39\x44\xd8\x48\x95\x62"
1654 "\x6b\x08\x25\xf4\xab\x46\x90\x7f"
1655 "\x15\xf9\xda\xdb\xe4\x10\x1e\xc6"
1656 "\x82\xaa\x03\x4c\x7c\xeb\xc5\x9c"
1657 "\xfa\xea\x9e\xa9\x07\x6e\xde\x7f"
1658 "\x4a\xf1\x52\xe8\xb2\xfa\x9c\xb6",
1659 }, {
1660 .key = "Jefe",
1661 .ksize = 4,
1662 .plaintext = "what do ya want for nothing?",
1663 .psize = 28,
1664 .digest = "\xaf\x45\xd2\xe3\x76\x48\x40\x31"
1665 "\x61\x7f\x78\xd2\xb5\x8a\x6b\x1b"
1666 "\x9c\x7e\xf4\x64\xf5\xa0\x1b\x47"
1667 "\xe4\x2e\xc3\x73\x63\x22\x44\x5e"
1668 "\x8e\x22\x40\xca\x5e\x69\xe2\xc7"
1669 "\x8b\x32\x39\xec\xfa\xb2\x16\x49",
1670 .np = 4,
1671 .tap = { 7, 7, 7, 7 }
1672 }, {
1673 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1674 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1675 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1676 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1677 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1678 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1679 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1680 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1681 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1682 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1683 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1684 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1685 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1686 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1687 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1688 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1689 "\xaa\xaa\xaa",
1690 .ksize = 131,
1691 .plaintext = "Test Using Larger Than Block-Siz"
1692 "e Key - Hash Key First",
1693 .psize = 54,
1694 .digest = "\x4e\xce\x08\x44\x85\x81\x3e\x90"
1695 "\x88\xd2\xc6\x3a\x04\x1b\xc5\xb4"
1696 "\x4f\x9e\xf1\x01\x2a\x2b\x58\x8f"
1697 "\x3c\xd1\x1f\x05\x03\x3a\xc4\xc6"
1698 "\x0c\x2e\xf6\xab\x40\x30\xfe\x82"
1699 "\x96\x24\x8d\xf1\x63\xf4\x49\x52",
1700 }, {
1701 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1702 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1703 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1704 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1705 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1706 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1707 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1708 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1709 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1710 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1711 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1712 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1713 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1714 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1715 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1716 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1717 "\xaa\xaa\xaa",
1718 .ksize = 131,
1719 .plaintext = "This is a test u"
1720 "sing a larger th"
1721 "an block-size ke"
1722 "y and a larger t"
1723 "han block-size d"
1724 "ata. The key nee"
1725 "ds to be hashed "
1726 "before being use"
1727 "d by the HMAC al"
1728 "gorithm.",
1729 .psize = 152,
1730 .digest = "\x66\x17\x17\x8e\x94\x1f\x02\x0d"
1731 "\x35\x1e\x2f\x25\x4e\x8f\xd3\x2c"
1732 "\x60\x24\x20\xfe\xb0\xb8\xfb\x9a"
1733 "\xdc\xce\xbb\x82\x46\x1e\x99\xc5"
1734 "\xa6\x78\xcc\x31\xe7\x99\x17\x6d"
1735 "\x38\x60\xe6\x11\x0c\x46\x52\x3e",
1736 },
1737};
1738
1739/*
1740 * SHA512 HMAC test vectors from RFC4231
1741 */
1742
1743#define HMAC_SHA512_TEST_VECTORS 4
1744
1745static struct hash_testvec hmac_sha512_tv_template[] = {
1746 {
1747 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1748 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
1749 "\x0b\x0b\x0b\x0b",
1750 .ksize = 20,
1751 .plaintext = "Hi There",
1752 .psize = 8,
1753 .digest = "\x87\xaa\x7c\xde\xa5\xef\x61\x9d"
1754 "\x4f\xf0\xb4\x24\x1a\x1d\x6c\xb0"
1755 "\x23\x79\xf4\xe2\xce\x4e\xc2\x78"
1756 "\x7a\xd0\xb3\x05\x45\xe1\x7c\xde"
1757 "\xda\xa8\x33\xb7\xd6\xb8\xa7\x02"
1758 "\x03\x8b\x27\x4e\xae\xa3\xf4\xe4"
1759 "\xbe\x9d\x91\x4e\xeb\x61\xf1\x70"
1760 "\x2e\x69\x6c\x20\x3a\x12\x68\x54",
1761 }, {
1762 .key = "Jefe",
1763 .ksize = 4,
1764 .plaintext = "what do ya want for nothing?",
1765 .psize = 28,
1766 .digest = "\x16\x4b\x7a\x7b\xfc\xf8\x19\xe2"
1767 "\xe3\x95\xfb\xe7\x3b\x56\xe0\xa3"
1768 "\x87\xbd\x64\x22\x2e\x83\x1f\xd6"
1769 "\x10\x27\x0c\xd7\xea\x25\x05\x54"
1770 "\x97\x58\xbf\x75\xc0\x5a\x99\x4a"
1771 "\x6d\x03\x4f\x65\xf8\xf0\xe6\xfd"
1772 "\xca\xea\xb1\xa3\x4d\x4a\x6b\x4b"
1773 "\x63\x6e\x07\x0a\x38\xbc\xe7\x37",
1774 .np = 4,
1775 .tap = { 7, 7, 7, 7 }
1776 }, {
1777 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1778 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1779 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1780 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1781 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1782 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1783 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1784 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1785 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1786 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1787 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1788 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1789 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1790 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1791 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1792 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1793 "\xaa\xaa\xaa",
1794 .ksize = 131,
1795 .plaintext = "Test Using Large"
1796 "r Than Block-Siz"
1797 "e Key - Hash Key"
1798 " First",
1799 .psize = 54,
1800 .digest = "\x80\xb2\x42\x63\xc7\xc1\xa3\xeb"
1801 "\xb7\x14\x93\xc1\xdd\x7b\xe8\xb4"
1802 "\x9b\x46\xd1\xf4\x1b\x4a\xee\xc1"
1803 "\x12\x1b\x01\x37\x83\xf8\xf3\x52"
1804 "\x6b\x56\xd0\x37\xe0\x5f\x25\x98"
1805 "\xbd\x0f\xd2\x21\x5d\x6a\x1e\x52"
1806 "\x95\xe6\x4f\x73\xf6\x3f\x0a\xec"
1807 "\x8b\x91\x5a\x98\x5d\x78\x65\x98",
1808 }, {
1809 .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1810 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1811 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1812 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1813 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1814 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1815 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1816 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1817 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1818 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1819 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1820 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1821 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1822 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1823 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1824 "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
1825 "\xaa\xaa\xaa",
1826 .ksize = 131,
1827 .plaintext =
1828 "This is a test u"
1829 "sing a larger th"
1830 "an block-size ke"
1831 "y and a larger t"
1832 "han block-size d"
1833 "ata. The key nee"
1834 "ds to be hashed "
1835 "before being use"
1836 "d by the HMAC al"
1837 "gorithm.",
1838 .psize = 152,
1839 .digest = "\xe3\x7b\x6a\x77\x5d\xc8\x7d\xba"
1840 "\xa4\xdf\xa9\xf9\x6e\x5e\x3f\xfd"
1841 "\xde\xbd\x71\xf8\x86\x72\x89\x86"
1842 "\x5d\xf5\xa3\x2d\x20\xcd\xc9\x44"
1843 "\xb6\x02\x2c\xac\x3c\x49\x82\xb1"
1844 "\x0d\x5e\xeb\x55\xc3\xe4\xde\x15"
1845 "\x13\x46\x76\xfb\x6d\xe0\x44\x60"
1846 "\x65\xc9\x74\x40\xfa\x8c\x6a\x58",
1847 },
1848};
1849
1850/*
1851 * DES test vectors.
1852 */
1853#define DES_ENC_TEST_VECTORS 10
1854#define DES_DEC_TEST_VECTORS 4
1855#define DES_CBC_ENC_TEST_VECTORS 5
1856#define DES_CBC_DEC_TEST_VECTORS 4
1857#define DES3_EDE_ENC_TEST_VECTORS 3
1858#define DES3_EDE_DEC_TEST_VECTORS 3
1859#define DES3_EDE_CBC_ENC_TEST_VECTORS 1
1860#define DES3_EDE_CBC_DEC_TEST_VECTORS 1
1861
1862static struct cipher_testvec des_enc_tv_template[] = {
1863 { /* From Applied Cryptography */
1864 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1865 .klen = 8,
1866 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1867 .ilen = 8,
1868 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1869 .rlen = 8,
1870 }, { /* Same key, different plaintext block */
1871 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1872 .klen = 8,
1873 .input = "\x22\x33\x44\x55\x66\x77\x88\x99",
1874 .ilen = 8,
1875 .result = "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1876 .rlen = 8,
1877 }, { /* Sbox test from NBS */
1878 .key = "\x7c\xa1\x10\x45\x4a\x1a\x6e\x57",
1879 .klen = 8,
1880 .input = "\x01\xa1\xd6\xd0\x39\x77\x67\x42",
1881 .ilen = 8,
1882 .result = "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
1883 .rlen = 8,
1884 }, { /* Three blocks */
1885 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1886 .klen = 8,
1887 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1888 "\x22\x33\x44\x55\x66\x77\x88\x99"
1889 "\xca\xfe\xba\xbe\xfe\xed\xbe\xef",
1890 .ilen = 24,
1891 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1892 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b"
1893 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
1894 .rlen = 24,
1895 }, { /* Weak key */
1896 .fail = 1,
1897 .wk = 1,
1898 .key = "\x01\x01\x01\x01\x01\x01\x01\x01",
1899 .klen = 8,
1900 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1901 .ilen = 8,
1902 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1903 .rlen = 8,
1904 }, { /* Two blocks -- for testing encryption across pages */
1905 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1906 .klen = 8,
1907 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1908 "\x22\x33\x44\x55\x66\x77\x88\x99",
1909 .ilen = 16,
1910 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1911 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1912 .rlen = 16,
1913 .np = 2,
1914 .tap = { 8, 8 }
1915 }, { /* Four blocks -- for testing encryption with chunking */
1916 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1917 .klen = 8,
1918 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1919 "\x22\x33\x44\x55\x66\x77\x88\x99"
1920 "\xca\xfe\xba\xbe\xfe\xed\xbe\xef"
1921 "\x22\x33\x44\x55\x66\x77\x88\x99",
1922 .ilen = 32,
1923 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1924 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b"
1925 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90"
1926 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1927 .rlen = 32,
1928 .np = 3,
1929 .tap = { 14, 10, 8 }
1930 }, {
1931 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1932 .klen = 8,
1933 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1934 "\x22\x33\x44\x55\x66\x77\x88\x99"
1935 "\xca\xfe\xba\xbe\xfe\xed\xbe\xef",
1936 .ilen = 24,
1937 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1938 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b"
1939 "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
1940 .rlen = 24,
1941 .np = 4,
1942 .tap = { 2, 1, 3, 18 }
1943 }, {
1944 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1945 .klen = 8,
1946 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1947 "\x22\x33\x44\x55\x66\x77\x88\x99",
1948 .ilen = 16,
1949 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1950 "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b",
1951 .rlen = 16,
1952 .np = 5,
1953 .tap = { 2, 2, 2, 2, 8 }
1954 }, {
1955 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1956 .klen = 8,
1957 .input = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1958 .ilen = 8,
1959 .result = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1960 .rlen = 8,
1961 .np = 8,
1962 .tap = { 1, 1, 1, 1, 1, 1, 1, 1 }
1963 },
1964};
1965
1966static struct cipher_testvec des_dec_tv_template[] = {
1967 { /* From Applied Cryptography */
1968 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1969 .klen = 8,
1970 .input = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d",
1971 .ilen = 8,
1972 .result = "\x01\x23\x45\x67\x89\xab\xcd\xe7",
1973 .rlen = 8,
1974 }, { /* Sbox test from NBS */
1975 .key = "\x7c\xa1\x10\x45\x4a\x1a\x6e\x57",
1976 .klen = 8,
1977 .input = "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
1978 .ilen = 8,
1979 .result = "\x01\xa1\xd6\xd0\x39\x77\x67\x42",
1980 .rlen = 8,
1981 }, { /* Two blocks, for chunking test */
1982 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1983 .klen = 8,
1984 .input = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1985 "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
1986 .ilen = 16,
1987 .result = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1988 "\xa3\x99\x7b\xca\xaf\x69\xa0\xf5",
1989 .rlen = 16,
1990 .np = 2,
1991 .tap = { 8, 8 }
1992 }, {
1993 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
1994 .klen = 8,
1995 .input = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d"
1996 "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b",
1997 .ilen = 16,
1998 .result = "\x01\x23\x45\x67\x89\xab\xcd\xe7"
1999 "\xa3\x99\x7b\xca\xaf\x69\xa0\xf5",
2000 .rlen = 16,
2001 .np = 3,
2002 .tap = { 3, 12, 1 }
2003 },
2004};
2005
2006static struct cipher_testvec des_cbc_enc_tv_template[] = {
2007 { /* From OpenSSL */
2008 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2009 .klen = 8,
2010 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2011 .input = "\x37\x36\x35\x34\x33\x32\x31\x20"
2012 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2013 "\x68\x65\x20\x74\x69\x6d\x65\x20",
2014 .ilen = 24,
2015 .result = "\xcc\xd1\x73\xff\xab\x20\x39\xf4"
2016 "\xac\xd8\xae\xfd\xdf\xd8\xa1\xeb"
2017 "\x46\x8e\x91\x15\x78\x88\xba\x68",
2018 .rlen = 24,
2019 }, { /* FIPS Pub 81 */
2020 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2021 .klen = 8,
2022 .iv = "\x12\x34\x56\x78\x90\xab\xcd\xef",
2023 .input = "\x4e\x6f\x77\x20\x69\x73\x20\x74",
2024 .ilen = 8,
2025 .result = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2026 .rlen = 8,
2027 }, {
2028 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2029 .klen = 8,
2030 .iv = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2031 .input = "\x68\x65\x20\x74\x69\x6d\x65\x20",
2032 .ilen = 8,
2033 .result = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2034 .rlen = 8,
2035 }, {
2036 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2037 .klen = 8,
2038 .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2039 .input = "\x66\x6f\x72\x20\x61\x6c\x6c\x20",
2040 .ilen = 8,
2041 .result = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
2042 .rlen = 8,
2043 }, { /* Copy of openssl vector for chunk testing */
2044 /* From OpenSSL */
2045 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2046 .klen = 8,
2047 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2048 .input = "\x37\x36\x35\x34\x33\x32\x31\x20"
2049 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2050 "\x68\x65\x20\x74\x69\x6d\x65\x20",
2051 .ilen = 24,
2052 .result = "\xcc\xd1\x73\xff\xab\x20\x39\xf4"
2053 "\xac\xd8\xae\xfd\xdf\xd8\xa1\xeb"
2054 "\x46\x8e\x91\x15\x78\x88\xba\x68",
2055 .rlen = 24,
2056 .np = 2,
2057 .tap = { 13, 11 }
2058 },
2059};
2060
2061static struct cipher_testvec des_cbc_dec_tv_template[] = {
2062 { /* FIPS Pub 81 */
2063 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2064 .klen = 8,
2065 .iv = "\x12\x34\x56\x78\x90\xab\xcd\xef",
2066 .input = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2067 .ilen = 8,
2068 .result = "\x4e\x6f\x77\x20\x69\x73\x20\x74",
2069 .rlen = 8,
2070 }, {
2071 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2072 .klen = 8,
2073 .iv = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c",
2074 .input = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2075 .ilen = 8,
2076 .result = "\x68\x65\x20\x74\x69\x6d\x65\x20",
2077 .rlen = 8,
2078 }, {
2079 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2080 .klen = 8,
2081 .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2082 .input = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
2083 .ilen = 8,
2084 .result = "\x66\x6f\x72\x20\x61\x6c\x6c\x20",
2085 .rlen = 8,
2086 }, { /* Copy of above, for chunk testing */
2087 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2088 .klen = 8,
2089 .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f",
2090 .input = "\x68\x37\x88\x49\x9a\x7c\x05\xf6",
2091 .ilen = 8,
2092 .result = "\x66\x6f\x72\x20\x61\x6c\x6c\x20",
2093 .rlen = 8,
2094 .np = 2,
2095 .tap = { 4, 4 }
2096 },
2097};
2098
2099static struct cipher_testvec des3_ede_enc_tv_template[] = {
2100 { /* These are from openssl */
2101 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2102 "\x55\x55\x55\x55\x55\x55\x55\x55"
2103 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2104 .klen = 24,
2105 .input = "\x73\x6f\x6d\x65\x64\x61\x74\x61",
2106 .ilen = 8,
2107 .result = "\x18\xd7\x48\xe5\x63\x62\x05\x72",
2108 .rlen = 8,
2109 }, {
2110 .key = "\x03\x52\x02\x07\x67\x20\x82\x17"
2111 "\x86\x02\x87\x66\x59\x08\x21\x98"
2112 "\x64\x05\x6a\xbd\xfe\xa9\x34\x57",
2113 .klen = 24,
2114 .input = "\x73\x71\x75\x69\x67\x67\x6c\x65",
2115 .ilen = 8,
2116 .result = "\xc0\x7d\x2a\x0f\xa5\x66\xfa\x30",
2117 .rlen = 8,
2118 }, {
2119 .key = "\x10\x46\x10\x34\x89\x98\x80\x20"
2120 "\x91\x07\xd0\x15\x89\x19\x01\x01"
2121 "\x19\x07\x92\x10\x98\x1a\x01\x01",
2122 .klen = 24,
2123 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
2124 .ilen = 8,
2125 .result = "\xe1\xef\x62\xc3\x32\xfe\x82\x5b",
2126 .rlen = 8,
2127 },
2128};
2129
2130static struct cipher_testvec des3_ede_dec_tv_template[] = {
2131 { /* These are from openssl */
2132 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2133 "\x55\x55\x55\x55\x55\x55\x55\x55"
2134 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2135 .klen = 24,
2136 .input = "\x18\xd7\x48\xe5\x63\x62\x05\x72",
2137 .ilen = 8,
2138 .result = "\x73\x6f\x6d\x65\x64\x61\x74\x61",
2139 .rlen = 8,
2140 }, {
2141 .key = "\x03\x52\x02\x07\x67\x20\x82\x17"
2142 "\x86\x02\x87\x66\x59\x08\x21\x98"
2143 "\x64\x05\x6a\xbd\xfe\xa9\x34\x57",
2144 .klen = 24,
2145 .input = "\xc0\x7d\x2a\x0f\xa5\x66\xfa\x30",
2146 .ilen = 8,
2147 .result = "\x73\x71\x75\x69\x67\x67\x6c\x65",
2148 .rlen = 8,
2149 }, {
2150 .key = "\x10\x46\x10\x34\x89\x98\x80\x20"
2151 "\x91\x07\xd0\x15\x89\x19\x01\x01"
2152 "\x19\x07\x92\x10\x98\x1a\x01\x01",
2153 .klen = 24,
2154 .input = "\xe1\xef\x62\xc3\x32\xfe\x82\x5b",
2155 .ilen = 8,
2156 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
2157 .rlen = 8,
2158 },
2159};
2160
2161static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
2162 { /* Generated from openssl */
2163 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2164 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2165 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2166 .klen = 24,
2167 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2168 .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2169 "\x53\x20\x63\x65\x65\x72\x73\x74"
2170 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2171 "\x20\x79\x65\x53\x72\x63\x74\x65"
2172 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2173 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2174 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2175 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2176 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2177 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2178 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2179 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2180 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2181 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2182 "\x63\x65\x65\x72\x73\x74\x54\x20"
2183 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2184 .ilen = 128,
2185 .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2186 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2187 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2188 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2189 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2190 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2191 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2192 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2193 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2194 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2195 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2196 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2197 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2198 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2199 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2200 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2201 .rlen = 128,
2202 },
2203};
2204
2205static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
2206 { /* Generated from openssl */
2207 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
2208 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
2209 "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
2210 .klen = 24,
2211 .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
2212 .input = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4"
2213 "\x67\x17\x21\xc7\x6e\x8a\xd5\x49"
2214 "\x74\xb3\x49\x05\xc5\x1c\xd0\xed"
2215 "\x12\x56\x5c\x53\x96\xb6\x00\x7d"
2216 "\x90\x48\xfc\xf5\x8d\x29\x39\xcc"
2217 "\x8a\xd5\x35\x18\x36\x23\x4e\xd7"
2218 "\x76\xd1\xda\x0c\x94\x67\xbb\x04"
2219 "\x8b\xf2\x03\x6c\xa8\xcf\xb6\xea"
2220 "\x22\x64\x47\xaa\x8f\x75\x13\xbf"
2221 "\x9f\xc2\xc3\xf0\xc9\x56\xc5\x7a"
2222 "\x71\x63\x2e\x89\x7b\x1e\x12\xca"
2223 "\xe2\x5f\xaf\xd8\xa4\xf8\xc9\x7a"
2224 "\xd6\xf9\x21\x31\x62\x44\x45\xa6"
2225 "\xd6\xbc\x5a\xd3\x2d\x54\x43\xcc"
2226 "\x9d\xde\xa5\x70\xe9\x42\x45\x8a"
2227 "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19",
2228 .ilen = 128,
2229 .result = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
2230 "\x53\x20\x63\x65\x65\x72\x73\x74"
2231 "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
2232 "\x20\x79\x65\x53\x72\x63\x74\x65"
2233 "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
2234 "\x79\x6e\x53\x20\x63\x65\x65\x72"
2235 "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
2236 "\x6e\x61\x20\x79\x65\x53\x72\x63"
2237 "\x74\x65\x20\x73\x6f\x54\x20\x6f"
2238 "\x61\x4d\x79\x6e\x53\x20\x63\x65"
2239 "\x65\x72\x73\x74\x54\x20\x6f\x6f"
2240 "\x4d\x20\x6e\x61\x20\x79\x65\x53"
2241 "\x72\x63\x74\x65\x20\x73\x6f\x54"
2242 "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
2243 "\x63\x65\x65\x72\x73\x74\x54\x20"
2244 "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
2245 .rlen = 128,
2246 },
2247};
2248
2249/*
2250 * Blowfish test vectors.
2251 */
2252#define BF_ENC_TEST_VECTORS 6
2253#define BF_DEC_TEST_VECTORS 6
2254#define BF_CBC_ENC_TEST_VECTORS 1
2255#define BF_CBC_DEC_TEST_VECTORS 1
2256
2257static struct cipher_testvec bf_enc_tv_template[] = {
2258 { /* DES test vectors from OpenSSL */
2259 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
2260 .klen = 8,
2261 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
2262 .ilen = 8,
2263 .result = "\x4e\xf9\x97\x45\x61\x98\xdd\x78",
2264 .rlen = 8,
2265 }, {
2266 .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e",
2267 .klen = 8,
2268 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2269 .ilen = 8,
2270 .result = "\xa7\x90\x79\x51\x08\xea\x3c\xae",
2271 .rlen = 8,
2272 }, {
2273 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2274 .klen = 8,
2275 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2276 .ilen = 8,
2277 .result = "\xe8\x7a\x24\x4e\x2c\xc8\x5e\x82",
2278 .rlen = 8,
2279 }, { /* Vary the keylength... */
2280 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2281 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f",
2282 .klen = 16,
2283 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2284 .ilen = 8,
2285 .result = "\x93\x14\x28\x87\xee\x3b\xe1\x5c",
2286 .rlen = 8,
2287 }, {
2288 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2289 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2290 "\x00\x11\x22\x33\x44",
2291 .klen = 21,
2292 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2293 .ilen = 8,
2294 .result = "\xe6\xf5\x1e\xd7\x9b\x9d\xb2\x1f",
2295 .rlen = 8,
2296 }, { /* Generated with bf488 */
2297 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2298 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2299 "\x00\x11\x22\x33\x44\x55\x66\x77"
2300 "\x04\x68\x91\x04\xc2\xfd\x3b\x2f"
2301 "\x58\x40\x23\x64\x1a\xba\x61\x76"
2302 "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e"
2303 "\xff\xff\xff\xff\xff\xff\xff\xff",
2304 .klen = 56,
2305 .input = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2306 .ilen = 8,
2307 .result = "\xc0\x45\x04\x01\x2e\x4e\x1f\x53",
2308 .rlen = 8,
2309 },
2310};
2311
2312static struct cipher_testvec bf_dec_tv_template[] = {
2313 { /* DES test vectors from OpenSSL */
2314 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
2315 .klen = 8,
2316 .input = "\x4e\xf9\x97\x45\x61\x98\xdd\x78",
2317 .ilen = 8,
2318 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
2319 .rlen = 8,
2320 }, {
2321 .key = "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e",
2322 .klen = 8,
2323 .input = "\xa7\x90\x79\x51\x08\xea\x3c\xae",
2324 .ilen = 8,
2325 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
2326 .rlen = 8,
2327 }, {
2328 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2329 .klen = 8,
2330 .input = "\xe8\x7a\x24\x4e\x2c\xc8\x5e\x82",
2331 .ilen = 8,
2332 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2333 .rlen = 8,
2334 }, { /* Vary the keylength... */
2335 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2336 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f",
2337 .klen = 16,
2338 .input = "\x93\x14\x28\x87\xee\x3b\xe1\x5c",
2339 .ilen = 8,
2340 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2341 .rlen = 8,
2342 }, {
2343 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2344 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2345 "\x00\x11\x22\x33\x44",
2346 .klen = 21,
2347 .input = "\xe6\xf5\x1e\xd7\x9b\x9d\xb2\x1f",
2348 .ilen = 8,
2349 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2350 .rlen = 8,
2351 }, { /* Generated with bf488, using OpenSSL, Libgcrypt and Nettle */
2352 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87"
2353 "\x78\x69\x5a\x4b\x3c\x2d\x1e\x0f"
2354 "\x00\x11\x22\x33\x44\x55\x66\x77"
2355 "\x04\x68\x91\x04\xc2\xfd\x3b\x2f"
2356 "\x58\x40\x23\x64\x1a\xba\x61\x76"
2357 "\x1f\x1f\x1f\x1f\x0e\x0e\x0e\x0e"
2358 "\xff\xff\xff\xff\xff\xff\xff\xff",
2359 .klen = 56,
2360 .input = "\xc0\x45\x04\x01\x2e\x4e\x1f\x53",
2361 .ilen = 8,
2362 .result = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2363 .rlen = 8,
2364 },
2365};
2366
2367static struct cipher_testvec bf_cbc_enc_tv_template[] = {
2368 { /* From OpenSSL */
2369 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2370 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2371 .klen = 16,
2372 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2373 .input = "\x37\x36\x35\x34\x33\x32\x31\x20"
2374 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2375 "\x68\x65\x20\x74\x69\x6d\x65\x20"
2376 "\x66\x6f\x72\x20\x00\x00\x00\x00",
2377 .ilen = 32,
2378 .result = "\x6b\x77\xb4\xd6\x30\x06\xde\xe6"
2379 "\x05\xb1\x56\xe2\x74\x03\x97\x93"
2380 "\x58\xde\xb9\xe7\x15\x46\x16\xd9"
2381 "\x59\xf1\x65\x2b\xd5\xff\x92\xcc",
2382 .rlen = 32,
2383 },
2384};
2385
2386static struct cipher_testvec bf_cbc_dec_tv_template[] = {
2387 { /* From OpenSSL */
2388 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2389 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
2390 .klen = 16,
2391 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
2392 .input = "\x6b\x77\xb4\xd6\x30\x06\xde\xe6"
2393 "\x05\xb1\x56\xe2\x74\x03\x97\x93"
2394 "\x58\xde\xb9\xe7\x15\x46\x16\xd9"
2395 "\x59\xf1\x65\x2b\xd5\xff\x92\xcc",
2396 .ilen = 32,
2397 .result = "\x37\x36\x35\x34\x33\x32\x31\x20"
2398 "\x4e\x6f\x77\x20\x69\x73\x20\x74"
2399 "\x68\x65\x20\x74\x69\x6d\x65\x20"
2400 "\x66\x6f\x72\x20\x00\x00\x00\x00",
2401 .rlen = 32,
2402 },
2403};
2404
2405/*
2406 * Twofish test vectors.
2407 */
2408#define TF_ENC_TEST_VECTORS 3
2409#define TF_DEC_TEST_VECTORS 3
2410#define TF_CBC_ENC_TEST_VECTORS 4
2411#define TF_CBC_DEC_TEST_VECTORS 4
2412
2413static struct cipher_testvec tf_enc_tv_template[] = {
2414 {
2415 .key = zeroed_string,
2416 .klen = 16,
2417 .input = zeroed_string,
2418 .ilen = 16,
2419 .result = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2420 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2421 .rlen = 16,
2422 }, {
2423 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2424 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
2425 "\x00\x11\x22\x33\x44\x55\x66\x77",
2426 .klen = 24,
2427 .input = zeroed_string,
2428 .ilen = 16,
2429 .result = "\xcf\xd1\xd2\xe5\xa9\xbe\x9c\xdf"
2430 "\x50\x1f\x13\xb8\x92\xbd\x22\x48",
2431 .rlen = 16,
2432 }, {
2433 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2434 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
2435 "\x00\x11\x22\x33\x44\x55\x66\x77"
2436 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2437 .klen = 32,
2438 .input = zeroed_string,
2439 .ilen = 16,
2440 .result = "\x37\x52\x7b\xe0\x05\x23\x34\xb8"
2441 "\x9f\x0c\xfc\xca\xe8\x7c\xfa\x20",
2442 .rlen = 16,
2443 },
2444};
2445
2446static struct cipher_testvec tf_dec_tv_template[] = {
2447 {
2448 .key = zeroed_string,
2449 .klen = 16,
2450 .input = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2451 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2452 .ilen = 16,
2453 .result = zeroed_string,
2454 .rlen = 16,
2455 }, {
2456 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2457 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
2458 "\x00\x11\x22\x33\x44\x55\x66\x77",
2459 .klen = 24,
2460 .input = "\xcf\xd1\xd2\xe5\xa9\xbe\x9c\xdf"
2461 "\x50\x1f\x13\xb8\x92\xbd\x22\x48",
2462 .ilen = 16,
2463 .result = zeroed_string,
2464 .rlen = 16,
2465 }, {
2466 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
2467 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
2468 "\x00\x11\x22\x33\x44\x55\x66\x77"
2469 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2470 .klen = 32,
2471 .input = "\x37\x52\x7b\xe0\x05\x23\x34\xb8"
2472 "\x9f\x0c\xfc\xca\xe8\x7c\xfa\x20",
2473 .ilen = 16,
2474 .result = zeroed_string,
2475 .rlen = 16,
2476 },
2477};
2478
2479static struct cipher_testvec tf_cbc_enc_tv_template[] = {
2480 { /* Generated with Nettle */
2481 .key = zeroed_string,
2482 .klen = 16,
2483 .iv = zeroed_string,
2484 .input = zeroed_string,
2485 .ilen = 16,
2486 .result = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2487 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2488 .rlen = 16,
2489 }, {
2490 .key = zeroed_string,
2491 .klen = 16,
2492 .iv = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2493 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2494 .input = zeroed_string,
2495 .ilen = 16,
2496 .result = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2497 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2498 .rlen = 16,
2499 }, {
2500 .key = zeroed_string,
2501 .klen = 16,
2502 .iv = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2503 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2504 .input = zeroed_string,
2505 .ilen = 16,
2506 .result = "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2507 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2508 .rlen = 16,
2509 }, {
2510 .key = zeroed_string,
2511 .klen = 16,
2512 .iv = zeroed_string,
2513 .input = zeroed_string,
2514 .ilen = 48,
2515 .result = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2516 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a"
2517 "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2518 "\x86\xcb\x08\x6b\x78\x9f\x54\x19"
2519 "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2520 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2521 .rlen = 48,
2522 },
2523};
2524
2525static struct cipher_testvec tf_cbc_dec_tv_template[] = {
2526 { /* Reverse of the first four above */
2527 .key = zeroed_string,
2528 .klen = 16,
2529 .iv = zeroed_string,
2530 .input = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2531 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2532 .ilen = 16,
2533 .result = zeroed_string,
2534 .rlen = 16,
2535 }, {
2536 .key = zeroed_string,
2537 .klen = 16,
2538 .iv = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2539 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a",
2540 .input = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2541 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2542 .ilen = 16,
2543 .result = zeroed_string,
2544 .rlen = 16,
2545 }, {
2546 .key = zeroed_string,
2547 .klen = 16,
2548 .iv = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2549 "\x86\xcb\x08\x6b\x78\x9f\x54\x19",
2550 .input = "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2551 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2552 .ilen = 16,
2553 .result = zeroed_string,
2554 .rlen = 16,
2555 }, {
2556 .key = zeroed_string,
2557 .klen = 16,
2558 .iv = zeroed_string,
2559 .input = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32"
2560 "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a"
2561 "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e"
2562 "\x86\xcb\x08\x6b\x78\x9f\x54\x19"
2563 "\x05\xef\x8c\x61\xa8\x11\x58\x26"
2564 "\x34\xba\x5c\xb7\x10\x6a\xa6\x41",
2565 .ilen = 48,
2566 .result = zeroed_string,
2567 .rlen = 48,
2568 },
2569};
2570
2571/*
2572 * Serpent test vectors. These are backwards because Serpent writes
2573 * octet sequences in right-to-left mode.
2574 */
2575#define SERPENT_ENC_TEST_VECTORS 4
2576#define SERPENT_DEC_TEST_VECTORS 4
2577
2578#define TNEPRES_ENC_TEST_VECTORS 4
2579#define TNEPRES_DEC_TEST_VECTORS 4
2580
2581static struct cipher_testvec serpent_enc_tv_template[] = {
2582 {
2583 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2584 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2585 .ilen = 16,
2586 .result = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
2587 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
2588 .rlen = 16,
2589 }, {
2590 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2591 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2592 .klen = 16,
2593 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2594 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2595 .ilen = 16,
2596 .result = "\x4c\x7d\x8a\x32\x80\x72\xa2\x2c"
2597 "\x82\x3e\x4a\x1f\x3a\xcd\xa1\x6d",
2598 .rlen = 16,
2599 }, {
2600 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2601 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2602 "\x10\x11\x12\x13\x14\x15\x16\x17"
2603 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2604 .klen = 32,
2605 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2606 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2607 .ilen = 16,
2608 .result = "\xde\x26\x9f\xf8\x33\xe4\x32\xb8"
2609 "\x5b\x2e\x88\xd2\x70\x1c\xe7\x5c",
2610 .rlen = 16,
2611 }, {
2612 .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80",
2613 .klen = 16,
2614 .input = zeroed_string,
2615 .ilen = 16,
2616 .result = "\xdd\xd2\x6b\x98\xa5\xff\xd8\x2c"
2617 "\x05\x34\x5a\x9d\xad\xbf\xaf\x49",
2618 .rlen = 16,
2619 },
2620};
2621
2622static struct cipher_testvec tnepres_enc_tv_template[] = {
2623 { /* KeySize=128, PT=0, I=1 */
2624 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
2625 "\x00\x00\x00\x00\x00\x00\x00\x00",
2626 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
2627 "\x00\x00\x00\x00\x00\x00\x00\x00",
2628 .klen = 16,
2629 .ilen = 16,
2630 .result = "\x49\xaf\xbf\xad\x9d\x5a\x34\x05"
2631 "\x2c\xd8\xff\xa5\x98\x6b\xd2\xdd",
2632 .rlen = 16,
2633 }, { /* KeySize=192, PT=0, I=1 */
2634 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
2635 "\x00\x00\x00\x00\x00\x00\x00\x00"
2636 "\x00\x00\x00\x00\x00\x00\x00\x00",
2637 .klen = 24,
2638 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
2639 "\x00\x00\x00\x00\x00\x00\x00\x00",
2640 .ilen = 16,
2641 .result = "\xe7\x8e\x54\x02\xc7\x19\x55\x68"
2642 "\xac\x36\x78\xf7\xa3\xf6\x0c\x66",
2643 .rlen = 16,
2644 }, { /* KeySize=256, PT=0, I=1 */
2645 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
2646 "\x00\x00\x00\x00\x00\x00\x00\x00"
2647 "\x00\x00\x00\x00\x00\x00\x00\x00"
2648 "\x00\x00\x00\x00\x00\x00\x00\x00",
2649 .klen = 32,
2650 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
2651 "\x00\x00\x00\x00\x00\x00\x00\x00",
2652 .ilen = 16,
2653 .result = "\xab\xed\x96\xe7\x66\xbf\x28\xcb"
2654 "\xc0\xeb\xd2\x1a\x82\xef\x08\x19",
2655 .rlen = 16,
2656 }, { /* KeySize=256, I=257 */
2657 .key = "\x1f\x1e\x1d\x1c\x1b\x1a\x19\x18"
2658 "\x17\x16\x15\x14\x13\x12\x11\x10"
2659 "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08"
2660 "\x07\x06\x05\x04\x03\x02\x01\x00",
2661 .klen = 32,
2662 .input = "\x0f\x0e\x0d\x0c\x0b\x0a\x09\x08"
2663 "\x07\x06\x05\x04\x03\x02\x01\x00",
2664 .ilen = 16,
2665 .result = "\x5c\xe7\x1c\x70\xd2\x88\x2e\x5b"
2666 "\xb8\x32\xe4\x33\xf8\x9f\x26\xde",
2667 .rlen = 16,
2668 },
2669};
2670
2671
2672static struct cipher_testvec serpent_dec_tv_template[] = {
2673 {
2674 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
2675 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
2676 .ilen = 16,
2677 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2678 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2679 .rlen = 16,
2680 }, {
2681 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2682 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2683 .klen = 16,
2684 .input = "\x4c\x7d\x8a\x32\x80\x72\xa2\x2c"
2685 "\x82\x3e\x4a\x1f\x3a\xcd\xa1\x6d",
2686 .ilen = 16,
2687 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2688 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2689 .rlen = 16,
2690 }, {
2691 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2692 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2693 "\x10\x11\x12\x13\x14\x15\x16\x17"
2694 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2695 .klen = 32,
2696 .input = "\xde\x26\x9f\xf8\x33\xe4\x32\xb8"
2697 "\x5b\x2e\x88\xd2\x70\x1c\xe7\x5c",
2698 .ilen = 16,
2699 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2700 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2701 .rlen = 16,
2702 }, {
2703 .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80",
2704 .klen = 16,
2705 .input = "\xdd\xd2\x6b\x98\xa5\xff\xd8\x2c"
2706 "\x05\x34\x5a\x9d\xad\xbf\xaf\x49",
2707 .ilen = 16,
2708 .result = zeroed_string,
2709 .rlen = 16,
2710 },
2711};
2712
2713static struct cipher_testvec tnepres_dec_tv_template[] = {
2714 {
2715 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
2716 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
2717 .ilen = 16,
2718 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2719 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2720 .rlen = 16,
2721 }, {
2722 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2723 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2724 .klen = 16,
2725 .input = "\xea\xf4\xd7\xfc\xd8\x01\x34\x47"
2726 "\x81\x45\x0b\xfa\x0c\xd6\xad\x6e",
2727 .ilen = 16,
2728 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2729 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2730 .rlen = 16,
2731 }, {
2732 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2733 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2734 "\x10\x11\x12\x13\x14\x15\x16\x17"
2735 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2736 .klen = 32,
2737 .input = "\x64\xa9\x1a\x37\xed\x9f\xe7\x49"
2738 "\xa8\x4e\x76\xd6\xf5\x0d\x78\xee",
2739 .ilen = 16,
2740 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
2741 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2742 .rlen = 16,
2743 }, { /* KeySize=128, I=121 */
2744 .key = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80",
2745 .klen = 16,
2746 .input = "\x3d\xda\xbf\xc0\x06\xda\xab\x06"
2747 "\x46\x2a\xf4\xef\x81\x54\x4e\x26",
2748 .ilen = 16,
2749 .result = zeroed_string,
2750 .rlen = 16,
2751 },
2752};
2753
2754
2755/* Cast6 test vectors from RFC 2612 */
2756#define CAST6_ENC_TEST_VECTORS 3
2757#define CAST6_DEC_TEST_VECTORS 3
2758
2759static struct cipher_testvec cast6_enc_tv_template[] = {
2760 {
2761 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2762 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
2763 .klen = 16,
2764 .input = zeroed_string,
2765 .ilen = 16,
2766 .result = "\xc8\x42\xa0\x89\x72\xb4\x3d\x20"
2767 "\x83\x6c\x91\xd1\xb7\x53\x0f\x6b",
2768 .rlen = 16,
2769 }, {
2770 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2771 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2772 "\xba\xc7\x7a\x77\x17\x94\x28\x63",
2773 .klen = 24,
2774 .input = zeroed_string,
2775 .ilen = 16,
2776 .result = "\x1b\x38\x6c\x02\x10\xdc\xad\xcb"
2777 "\xdd\x0e\x41\xaa\x08\xa7\xa7\xe8",
2778 .rlen = 16,
2779 }, {
2780 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2781 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2782 "\x8d\x7c\x47\xce\x26\x49\x08\x46"
2783 "\x1c\xc1\xb5\x13\x7a\xe6\xb6\x04",
2784 .klen = 32,
2785 .input = zeroed_string,
2786 .ilen = 16,
2787 .result = "\x4f\x6a\x20\x38\x28\x68\x97\xb9"
2788 "\xc9\x87\x01\x36\x55\x33\x17\xfa",
2789 .rlen = 16,
2790 },
2791};
2792
2793static struct cipher_testvec cast6_dec_tv_template[] = {
2794 {
2795 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2796 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
2797 .klen = 16,
2798 .input = "\xc8\x42\xa0\x89\x72\xb4\x3d\x20"
2799 "\x83\x6c\x91\xd1\xb7\x53\x0f\x6b",
2800 .ilen = 16,
2801 .result = zeroed_string,
2802 .rlen = 16,
2803 }, {
2804 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2805 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2806 "\xba\xc7\x7a\x77\x17\x94\x28\x63",
2807 .klen = 24,
2808 .input = "\x1b\x38\x6c\x02\x10\xdc\xad\xcb"
2809 "\xdd\x0e\x41\xaa\x08\xa7\xa7\xe8",
2810 .ilen = 16,
2811 .result = zeroed_string,
2812 .rlen = 16,
2813 }, {
2814 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
2815 "\xbe\xd0\xac\x83\x94\x0a\xc2\x98"
2816 "\x8d\x7c\x47\xce\x26\x49\x08\x46"
2817 "\x1c\xc1\xb5\x13\x7a\xe6\xb6\x04",
2818 .klen = 32,
2819 .input = "\x4f\x6a\x20\x38\x28\x68\x97\xb9"
2820 "\xc9\x87\x01\x36\x55\x33\x17\xfa",
2821 .ilen = 16,
2822 .result = zeroed_string,
2823 .rlen = 16,
2824 },
2825};
2826
2827
2828/*
2829 * AES test vectors.
2830 */
2831#define AES_ENC_TEST_VECTORS 3
2832#define AES_DEC_TEST_VECTORS 3
2833#define AES_CBC_ENC_TEST_VECTORS 4
2834#define AES_CBC_DEC_TEST_VECTORS 4
2835#define AES_LRW_ENC_TEST_VECTORS 8
2836#define AES_LRW_DEC_TEST_VECTORS 8
2837#define AES_XTS_ENC_TEST_VECTORS 4
2838#define AES_XTS_DEC_TEST_VECTORS 4
2839#define AES_CTR_ENC_TEST_VECTORS 7
2840#define AES_CTR_DEC_TEST_VECTORS 6
2841#define AES_GCM_ENC_TEST_VECTORS 9
2842#define AES_GCM_DEC_TEST_VECTORS 8
2843#define AES_CCM_ENC_TEST_VECTORS 7
2844#define AES_CCM_DEC_TEST_VECTORS 7
2845
2846static struct cipher_testvec aes_enc_tv_template[] = {
2847 { /* From FIPS-197 */
2848 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2849 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2850 .klen = 16,
2851 .input = "\x00\x11\x22\x33\x44\x55\x66\x77"
2852 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2853 .ilen = 16,
2854 .result = "\x69\xc4\xe0\xd8\x6a\x7b\x04\x30"
2855 "\xd8\xcd\xb7\x80\x70\xb4\xc5\x5a",
2856 .rlen = 16,
2857 }, {
2858 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2859 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2860 "\x10\x11\x12\x13\x14\x15\x16\x17",
2861 .klen = 24,
2862 .input = "\x00\x11\x22\x33\x44\x55\x66\x77"
2863 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2864 .ilen = 16,
2865 .result = "\xdd\xa9\x7c\xa4\x86\x4c\xdf\xe0"
2866 "\x6e\xaf\x70\xa0\xec\x0d\x71\x91",
2867 .rlen = 16,
2868 }, {
2869 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2870 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2871 "\x10\x11\x12\x13\x14\x15\x16\x17"
2872 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2873 .klen = 32,
2874 .input = "\x00\x11\x22\x33\x44\x55\x66\x77"
2875 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2876 .ilen = 16,
2877 .result = "\x8e\xa2\xb7\xca\x51\x67\x45\xbf"
2878 "\xea\xfc\x49\x90\x4b\x49\x60\x89",
2879 .rlen = 16,
2880 },
2881};
2882
2883static struct cipher_testvec aes_dec_tv_template[] = {
2884 { /* From FIPS-197 */
2885 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2886 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2887 .klen = 16,
2888 .input = "\x69\xc4\xe0\xd8\x6a\x7b\x04\x30"
2889 "\xd8\xcd\xb7\x80\x70\xb4\xc5\x5a",
2890 .ilen = 16,
2891 .result = "\x00\x11\x22\x33\x44\x55\x66\x77"
2892 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2893 .rlen = 16,
2894 }, {
2895 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2896 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2897 "\x10\x11\x12\x13\x14\x15\x16\x17",
2898 .klen = 24,
2899 .input = "\xdd\xa9\x7c\xa4\x86\x4c\xdf\xe0"
2900 "\x6e\xaf\x70\xa0\xec\x0d\x71\x91",
2901 .ilen = 16,
2902 .result = "\x00\x11\x22\x33\x44\x55\x66\x77"
2903 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2904 .rlen = 16,
2905 }, {
2906 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
2907 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2908 "\x10\x11\x12\x13\x14\x15\x16\x17"
2909 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2910 .klen = 32,
2911 .input = "\x8e\xa2\xb7\xca\x51\x67\x45\xbf"
2912 "\xea\xfc\x49\x90\x4b\x49\x60\x89",
2913 .ilen = 16,
2914 .result = "\x00\x11\x22\x33\x44\x55\x66\x77"
2915 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
2916 .rlen = 16,
2917 },
2918};
2919
2920static struct cipher_testvec aes_cbc_enc_tv_template[] = {
2921 { /* From RFC 3602 */
2922 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
2923 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
2924 .klen = 16,
2925 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
2926 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
2927 .input = "Single block msg",
2928 .ilen = 16,
2929 .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
2930 "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
2931 .rlen = 16,
2932 }, {
2933 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
2934 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
2935 .klen = 16,
2936 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
2937 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
2938 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
2939 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
2940 "\x10\x11\x12\x13\x14\x15\x16\x17"
2941 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
2942 .ilen = 32,
2943 .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
2944 "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
2945 "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
2946 "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1",
2947 .rlen = 32,
2948 }, { /* From NIST SP800-38A */
2949 .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
2950 "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
2951 "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
2952 .klen = 24,
2953 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2954 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2955 .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
2956 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
2957 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
2958 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
2959 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
2960 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
2961 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
2962 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
2963 .ilen = 64,
2964 .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
2965 "\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
2966 "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
2967 "\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
2968 "\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
2969 "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
2970 "\x08\xb0\xe2\x79\x88\x59\x88\x81"
2971 "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd",
2972 .rlen = 64,
2973 }, {
2974 .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
2975 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
2976 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
2977 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
2978 .klen = 32,
2979 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
2980 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
2981 .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
2982 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
2983 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
2984 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
2985 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
2986 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
2987 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
2988 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
2989 .ilen = 64,
2990 .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
2991 "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
2992 "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
2993 "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
2994 "\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
2995 "\xa5\x30\xe2\x63\x04\x23\x14\x61"
2996 "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
2997 "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
2998 .rlen = 64,
2999 },
3000};
3001
3002static struct cipher_testvec aes_cbc_dec_tv_template[] = {
3003 { /* From RFC 3602 */
3004 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
3005 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
3006 .klen = 16,
3007 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
3008 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
3009 .input = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
3010 "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
3011 .ilen = 16,
3012 .result = "Single block msg",
3013 .rlen = 16,
3014 }, {
3015 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
3016 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
3017 .klen = 16,
3018 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
3019 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
3020 .input = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
3021 "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
3022 "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
3023 "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1",
3024 .ilen = 32,
3025 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
3026 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3027 "\x10\x11\x12\x13\x14\x15\x16\x17"
3028 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
3029 .rlen = 32,
3030 }, { /* From NIST SP800-38A */
3031 .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
3032 "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
3033 "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
3034 .klen = 24,
3035 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
3036 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
3037 .input = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
3038 "\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
3039 "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
3040 "\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
3041 "\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
3042 "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
3043 "\x08\xb0\xe2\x79\x88\x59\x88\x81"
3044 "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd",
3045 .ilen = 64,
3046 .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
3047 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
3048 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
3049 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
3050 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
3051 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
3052 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
3053 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
3054 .rlen = 64,
3055 }, {
3056 .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
3057 "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
3058 "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
3059 "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
3060 .klen = 32,
3061 .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
3062 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
3063 .input = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
3064 "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
3065 "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
3066 "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
3067 "\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
3068 "\xa5\x30\xe2\x63\x04\x23\x14\x61"
3069 "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
3070 "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
3071 .ilen = 64,
3072 .result = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
3073 "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
3074 "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
3075 "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
3076 "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
3077 "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
3078 "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
3079 "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
3080 .rlen = 64,
3081 },
3082};
3083
3084static struct cipher_testvec aes_lrw_enc_tv_template[] = {
3085 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
3086 { /* LRW-32-AES 1 */
3087 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
3088 "\x4c\x26\x84\x14\xb5\x68\x01\x85"
3089 "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
3090 "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
3091 .klen = 32,
3092 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3093 "\x00\x00\x00\x00\x00\x00\x00\x01",
3094 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3095 "\x38\x39\x41\x42\x43\x44\x45\x46",
3096 .ilen = 16,
3097 .result = "\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
3098 "\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
3099 .rlen = 16,
3100 }, { /* LRW-32-AES 2 */
3101 .key = "\x59\x70\x47\x14\xf5\x57\x47\x8c"
3102 "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
3103 "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
3104 "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
3105 .klen = 32,
3106 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3107 "\x00\x00\x00\x00\x00\x00\x00\x02",
3108 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3109 "\x38\x39\x41\x42\x43\x44\x45\x46",
3110 .ilen = 16,
3111 .result = "\x00\xc8\x2b\xae\x95\xbb\xcd\xe5"
3112 "\x27\x4f\x07\x69\xb2\x60\xe1\x36",
3113 .rlen = 16,
3114 }, { /* LRW-32-AES 3 */
3115 .key = "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
3116 "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
3117 "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
3118 "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
3119 .klen = 32,
3120 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3121 "\x00\x00\x00\x02\x00\x00\x00\x00",
3122 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3123 "\x38\x39\x41\x42\x43\x44\x45\x46",
3124 .ilen = 16,
3125 .result = "\x76\x32\x21\x83\xed\x8f\xf1\x82"
3126 "\xf9\x59\x62\x03\x69\x0e\x5e\x01",
3127 .rlen = 16,
3128 }, { /* LRW-32-AES 4 */
3129 .key = "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
3130 "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
3131 "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
3132 "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
3133 "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
3134 .klen = 40,
3135 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3136 "\x00\x00\x00\x00\x00\x00\x00\x01",
3137 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3138 "\x38\x39\x41\x42\x43\x44\x45\x46",
3139 .ilen = 16,
3140 .result = "\x9c\x0f\x15\x2f\x55\xa2\xd8\xf0"
3141 "\xd6\x7b\x8f\x9e\x28\x22\xbc\x41",
3142 .rlen = 16,
3143 }, { /* LRW-32-AES 5 */
3144 .key = "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
3145 "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
3146 "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
3147 "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
3148 "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
3149 .klen = 40,
3150 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3151 "\x00\x00\x00\x02\x00\x00\x00\x00",
3152 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3153 "\x38\x39\x41\x42\x43\x44\x45\x46",
3154 .ilen = 16,
3155 .result = "\xd4\x27\x6a\x7f\x14\x91\x3d\x65"
3156 "\xc8\x60\x48\x02\x87\xe3\x34\x06",
3157 .rlen = 16,
3158 }, { /* LRW-32-AES 6 */
3159 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3160 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3161 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3162 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3163 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3164 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3165 .klen = 48,
3166 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3167 "\x00\x00\x00\x00\x00\x00\x00\x01",
3168 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3169 "\x38\x39\x41\x42\x43\x44\x45\x46",
3170 .ilen = 16,
3171 .result = "\xbd\x06\xb8\xe1\xdb\x98\x89\x9e"
3172 "\xc4\x98\xe4\x91\xcf\x1c\x70\x2b",
3173 .rlen = 16,
3174 }, { /* LRW-32-AES 7 */
3175 .key = "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
3176 "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
3177 "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
3178 "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
3179 "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
3180 "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
3181 .klen = 48,
3182 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3183 "\x00\x00\x00\x02\x00\x00\x00\x00",
3184 .input = "\x30\x31\x32\x33\x34\x35\x36\x37"
3185 "\x38\x39\x41\x42\x43\x44\x45\x46",
3186 .ilen = 16,
3187 .result = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
3188 "\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
3189 .rlen = 16,
3190 }, {
3191/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
3192 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3193 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3194 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3195 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3196 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3197 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3198 .klen = 48,
3199 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3200 "\x00\x00\x00\x00\x00\x00\x00\x01",
3201 .input = "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
3202 "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
3203 "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
3204 "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
3205 "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
3206 "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
3207 "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
3208 "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
3209 "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
3210 "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
3211 "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
3212 "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
3213 "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
3214 "\x4c\x96\x12\xed\x7c\x92\x03\x01"
3215 "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
3216 "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
3217 "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
3218 "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
3219 "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
3220 "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
3221 "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
3222 "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
3223 "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
3224 "\x76\x12\x73\x44\x1a\x56\xd7\x72"
3225 "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
3226 "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
3227 "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
3228 "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
3229 "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
3230 "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
3231 "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
3232 "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
3233 "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
3234 "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
3235 "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
3236 "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
3237 "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
3238 "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
3239 "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
3240 "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
3241 "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
3242 "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
3243 "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
3244 "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
3245 "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
3246 "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
3247 "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
3248 "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
3249 "\x62\x73\x65\xfd\x46\x63\x25\x3d"
3250 "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
3251 "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
3252 "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
3253 "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
3254 "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
3255 "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
3256 "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
3257 "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
3258 "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
3259 "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
3260 "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
3261 "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
3262 "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
3263 "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
3264 "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
3265 .ilen = 512,
3266 .result = "\x1a\x1d\xa9\x30\xad\xf9\x2f\x9b"
3267 "\xb6\x1d\xae\xef\xf0\x2f\xf8\x5a"
3268 "\x39\x3c\xbf\x2a\xb2\x45\xb2\x23"
3269 "\x1b\x63\x3c\xcf\xaa\xbe\xcf\x4e"
3270 "\xfa\xe8\x29\xc2\x20\x68\x2b\x3c"
3271 "\x2e\x8b\xf7\x6e\x25\xbd\xe3\x3d"
3272 "\x66\x27\xd6\xaf\xd6\x64\x3e\xe3"
3273 "\xe8\x58\x46\x97\x39\x51\x07\xde"
3274 "\xcb\x37\xbc\xa9\xc0\x5f\x75\xc3"
3275 "\x0e\x84\x23\x1d\x16\xd4\x1c\x59"
3276 "\x9c\x1a\x02\x55\xab\x3a\x97\x1d"
3277 "\xdf\xdd\xc7\x06\x51\xd7\x70\xae"
3278 "\x23\xc6\x8c\xf5\x1e\xa0\xe5\x82"
3279 "\xb8\xb2\xbf\x04\xa0\x32\x8e\x68"
3280 "\xeb\xaf\x6e\x2d\x94\x22\x2f\xce"
3281 "\x4c\xb5\x59\xe2\xa2\x2f\xa0\x98"
3282 "\x1a\x97\xc6\xd4\xb5\x00\x59\xf2"
3283 "\x84\x14\x72\xb1\x9a\x6e\xa3\x7f"
3284 "\xea\x20\xe7\xcb\x65\x77\x3a\xdf"
3285 "\xc8\x97\x67\x15\xc2\x2a\x27\xcc"
3286 "\x18\x55\xa1\x24\x0b\x24\x24\xaf"
3287 "\x5b\xec\x68\xb8\xc8\xf5\xba\x63"
3288 "\xff\xed\x89\xce\xd5\x3d\x88\xf3"
3289 "\x25\xef\x05\x7c\x3a\xef\xeb\xd8"
3290 "\x7a\x32\x0d\xd1\x1e\x58\x59\x99"
3291 "\x90\x25\xb5\x26\xb0\xe3\x2b\x6c"
3292 "\x4c\xa9\x8b\x84\x4f\x5e\x01\x50"
3293 "\x41\x30\x58\xc5\x62\x74\x52\x1d"
3294 "\x45\x24\x6a\x42\x64\x4f\x97\x1c"
3295 "\xa8\x66\xb5\x6d\x79\xd4\x0d\x48"
3296 "\xc5\x5f\xf3\x90\x32\xdd\xdd\xe1"
3297 "\xe4\xa9\x9f\xfc\xc3\x52\x5a\x46"
3298 "\xe4\x81\x84\x95\x36\x59\x7a\x6b"
3299 "\xaa\xb3\x60\xad\xce\x9f\x9f\x28"
3300 "\xe0\x01\x75\x22\xc4\x4e\xa9\x62"
3301 "\x5c\x62\x0d\x00\xcb\x13\xe8\x43"
3302 "\x72\xd4\x2d\x53\x46\xb5\xd1\x16"
3303 "\x22\x18\xdf\x34\x33\xf5\xd6\x1c"
3304 "\xb8\x79\x78\x97\x94\xff\x72\x13"
3305 "\x4c\x27\xfc\xcb\xbf\x01\x53\xa6"
3306 "\xb4\x50\x6e\xde\xdf\xb5\x43\xa4"
3307 "\x59\xdf\x52\xf9\x7c\xe0\x11\x6f"
3308 "\x2d\x14\x8e\x24\x61\x2c\xe1\x17"
3309 "\xcc\xce\x51\x0c\x19\x8a\x82\x30"
3310 "\x94\xd5\x3d\x6a\x53\x06\x5e\xbd"
3311 "\xb7\xeb\xfa\xfd\x27\x51\xde\x85"
3312 "\x1e\x86\x53\x11\x53\x94\x00\xee"
3313 "\x2b\x8c\x08\x2a\xbf\xdd\xae\x11"
3314 "\xcb\x1e\xa2\x07\x9a\x80\xcf\x62"
3315 "\x9b\x09\xdc\x95\x3c\x96\x8e\xb1"
3316 "\x09\xbd\xe4\xeb\xdb\xca\x70\x7a"
3317 "\x9e\xfa\x31\x18\x45\x3c\x21\x33"
3318 "\xb0\xb3\x2b\xea\xf3\x71\x2d\xe1"
3319 "\x03\xad\x1b\x48\xd4\x67\x27\xf0"
3320 "\x62\xe4\x3d\xfb\x9b\x08\x76\xe7"
3321 "\xdd\x2b\x01\x39\x04\x5a\x58\x7a"
3322 "\xf7\x11\x90\xec\xbd\x51\x5c\x32"
3323 "\x6b\xd7\x35\x39\x02\x6b\xf2\xa6"
3324 "\xd0\x0d\x07\xe1\x06\xc4\x5b\x7d"
3325 "\xe4\x6a\xd7\xee\x15\x1f\x83\xb4"
3326 "\xa3\xa7\x5e\xc3\x90\xb7\xef\xd3"
3327 "\xb7\x4f\xf8\x92\x4c\xb7\x3c\x29"
3328 "\xcd\x7e\x2b\x5d\x43\xea\x42\xe7"
3329 "\x74\x3f\x7d\x58\x88\x75\xde\x3e",
3330 .rlen = 512,
3331 }
3332};
3333
3334static struct cipher_testvec aes_lrw_dec_tv_template[] = {
3335 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
3336 /* same as enc vectors with input and result reversed */
3337 { /* LRW-32-AES 1 */
3338 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
3339 "\x4c\x26\x84\x14\xb5\x68\x01\x85"
3340 "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
3341 "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
3342 .klen = 32,
3343 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3344 "\x00\x00\x00\x00\x00\x00\x00\x01",
3345 .input = "\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
3346 "\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
3347 .ilen = 16,
3348 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3349 "\x38\x39\x41\x42\x43\x44\x45\x46",
3350 .rlen = 16,
3351 }, { /* LRW-32-AES 2 */
3352 .key = "\x59\x70\x47\x14\xf5\x57\x47\x8c"
3353 "\xd7\x79\xe8\x0f\x54\x88\x79\x44"
3354 "\x0d\x48\xf0\xb7\xb1\x5a\x53\xea"
3355 "\x1c\xaa\x6b\x29\xc2\xca\xfb\xaf",
3356 .klen = 32,
3357 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3358 "\x00\x00\x00\x00\x00\x00\x00\x02",
3359 .input = "\x00\xc8\x2b\xae\x95\xbb\xcd\xe5"
3360 "\x27\x4f\x07\x69\xb2\x60\xe1\x36",
3361 .ilen = 16,
3362 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3363 "\x38\x39\x41\x42\x43\x44\x45\x46",
3364 .rlen = 16,
3365 }, { /* LRW-32-AES 3 */
3366 .key = "\xd8\x2a\x91\x34\xb2\x6a\x56\x50"
3367 "\x30\xfe\x69\xe2\x37\x7f\x98\x47"
3368 "\xcd\xf9\x0b\x16\x0c\x64\x8f\xb6"
3369 "\xb0\x0d\x0d\x1b\xae\x85\x87\x1f",
3370 .klen = 32,
3371 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3372 "\x00\x00\x00\x02\x00\x00\x00\x00",
3373 .input = "\x76\x32\x21\x83\xed\x8f\xf1\x82"
3374 "\xf9\x59\x62\x03\x69\x0e\x5e\x01",
3375 .ilen = 16,
3376 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3377 "\x38\x39\x41\x42\x43\x44\x45\x46",
3378 .rlen = 16,
3379 }, { /* LRW-32-AES 4 */
3380 .key = "\x0f\x6a\xef\xf8\xd3\xd2\xbb\x15"
3381 "\x25\x83\xf7\x3c\x1f\x01\x28\x74"
3382 "\xca\xc6\xbc\x35\x4d\x4a\x65\x54"
3383 "\x90\xae\x61\xcf\x7b\xae\xbd\xcc"
3384 "\xad\xe4\x94\xc5\x4a\x29\xae\x70",
3385 .klen = 40,
3386 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3387 "\x00\x00\x00\x00\x00\x00\x00\x01",
3388 .input = "\x9c\x0f\x15\x2f\x55\xa2\xd8\xf0"
3389 "\xd6\x7b\x8f\x9e\x28\x22\xbc\x41",
3390 .ilen = 16,
3391 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3392 "\x38\x39\x41\x42\x43\x44\x45\x46",
3393 .rlen = 16,
3394 }, { /* LRW-32-AES 5 */
3395 .key = "\x8a\xd4\xee\x10\x2f\xbd\x81\xff"
3396 "\xf8\x86\xce\xac\x93\xc5\xad\xc6"
3397 "\xa0\x19\x07\xc0\x9d\xf7\xbb\xdd"
3398 "\x52\x13\xb2\xb7\xf0\xff\x11\xd8"
3399 "\xd6\x08\xd0\xcd\x2e\xb1\x17\x6f",
3400 .klen = 40,
3401 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3402 "\x00\x00\x00\x02\x00\x00\x00\x00",
3403 .input = "\xd4\x27\x6a\x7f\x14\x91\x3d\x65"
3404 "\xc8\x60\x48\x02\x87\xe3\x34\x06",
3405 .ilen = 16,
3406 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3407 "\x38\x39\x41\x42\x43\x44\x45\x46",
3408 .rlen = 16,
3409 }, { /* LRW-32-AES 6 */
3410 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3411 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3412 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3413 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3414 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3415 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3416 .klen = 48,
3417 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3418 "\x00\x00\x00\x00\x00\x00\x00\x01",
3419 .input = "\xbd\x06\xb8\xe1\xdb\x98\x89\x9e"
3420 "\xc4\x98\xe4\x91\xcf\x1c\x70\x2b",
3421 .ilen = 16,
3422 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3423 "\x38\x39\x41\x42\x43\x44\x45\x46",
3424 .rlen = 16,
3425 }, { /* LRW-32-AES 7 */
3426 .key = "\xfb\x76\x15\xb2\x3d\x80\x89\x1d"
3427 "\xd4\x70\x98\x0b\xc7\x95\x84\xc8"
3428 "\xb2\xfb\x64\xce\x60\x97\x87\x8d"
3429 "\x17\xfc\xe4\x5a\x49\xe8\x30\xb7"
3430 "\x6e\x78\x17\xe7\x2d\x5e\x12\xd4"
3431 "\x60\x64\x04\x7a\xf1\x2f\x9e\x0c",
3432 .klen = 48,
3433 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3434 "\x00\x00\x00\x02\x00\x00\x00\x00",
3435 .input = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
3436 "\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
3437 .ilen = 16,
3438 .result = "\x30\x31\x32\x33\x34\x35\x36\x37"
3439 "\x38\x39\x41\x42\x43\x44\x45\x46",
3440 .rlen = 16,
3441 }, {
3442/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
3443 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
3444 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
3445 "\xfe\xf1\xa9\xf3\x7b\xbc\x8d\x21"
3446 "\xa7\x9c\x21\xf8\xcb\x90\x02\x89"
3447 "\xa8\x45\x34\x8e\xc8\xc5\xb5\xf1"
3448 "\x26\xf5\x0e\x76\xfe\xfd\x1b\x1e",
3449 .klen = 48,
3450 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3451 "\x00\x00\x00\x00\x00\x00\x00\x01",
3452 .input = "\x1a\x1d\xa9\x30\xad\xf9\x2f\x9b"
3453 "\xb6\x1d\xae\xef\xf0\x2f\xf8\x5a"
3454 "\x39\x3c\xbf\x2a\xb2\x45\xb2\x23"
3455 "\x1b\x63\x3c\xcf\xaa\xbe\xcf\x4e"
3456 "\xfa\xe8\x29\xc2\x20\x68\x2b\x3c"
3457 "\x2e\x8b\xf7\x6e\x25\xbd\xe3\x3d"
3458 "\x66\x27\xd6\xaf\xd6\x64\x3e\xe3"
3459 "\xe8\x58\x46\x97\x39\x51\x07\xde"
3460 "\xcb\x37\xbc\xa9\xc0\x5f\x75\xc3"
3461 "\x0e\x84\x23\x1d\x16\xd4\x1c\x59"
3462 "\x9c\x1a\x02\x55\xab\x3a\x97\x1d"
3463 "\xdf\xdd\xc7\x06\x51\xd7\x70\xae"
3464 "\x23\xc6\x8c\xf5\x1e\xa0\xe5\x82"
3465 "\xb8\xb2\xbf\x04\xa0\x32\x8e\x68"
3466 "\xeb\xaf\x6e\x2d\x94\x22\x2f\xce"
3467 "\x4c\xb5\x59\xe2\xa2\x2f\xa0\x98"
3468 "\x1a\x97\xc6\xd4\xb5\x00\x59\xf2"
3469 "\x84\x14\x72\xb1\x9a\x6e\xa3\x7f"
3470 "\xea\x20\xe7\xcb\x65\x77\x3a\xdf"
3471 "\xc8\x97\x67\x15\xc2\x2a\x27\xcc"
3472 "\x18\x55\xa1\x24\x0b\x24\x24\xaf"
3473 "\x5b\xec\x68\xb8\xc8\xf5\xba\x63"
3474 "\xff\xed\x89\xce\xd5\x3d\x88\xf3"
3475 "\x25\xef\x05\x7c\x3a\xef\xeb\xd8"
3476 "\x7a\x32\x0d\xd1\x1e\x58\x59\x99"
3477 "\x90\x25\xb5\x26\xb0\xe3\x2b\x6c"
3478 "\x4c\xa9\x8b\x84\x4f\x5e\x01\x50"
3479 "\x41\x30\x58\xc5\x62\x74\x52\x1d"
3480 "\x45\x24\x6a\x42\x64\x4f\x97\x1c"
3481 "\xa8\x66\xb5\x6d\x79\xd4\x0d\x48"
3482 "\xc5\x5f\xf3\x90\x32\xdd\xdd\xe1"
3483 "\xe4\xa9\x9f\xfc\xc3\x52\x5a\x46"
3484 "\xe4\x81\x84\x95\x36\x59\x7a\x6b"
3485 "\xaa\xb3\x60\xad\xce\x9f\x9f\x28"
3486 "\xe0\x01\x75\x22\xc4\x4e\xa9\x62"
3487 "\x5c\x62\x0d\x00\xcb\x13\xe8\x43"
3488 "\x72\xd4\x2d\x53\x46\xb5\xd1\x16"
3489 "\x22\x18\xdf\x34\x33\xf5\xd6\x1c"
3490 "\xb8\x79\x78\x97\x94\xff\x72\x13"
3491 "\x4c\x27\xfc\xcb\xbf\x01\x53\xa6"
3492 "\xb4\x50\x6e\xde\xdf\xb5\x43\xa4"
3493 "\x59\xdf\x52\xf9\x7c\xe0\x11\x6f"
3494 "\x2d\x14\x8e\x24\x61\x2c\xe1\x17"
3495 "\xcc\xce\x51\x0c\x19\x8a\x82\x30"
3496 "\x94\xd5\x3d\x6a\x53\x06\x5e\xbd"
3497 "\xb7\xeb\xfa\xfd\x27\x51\xde\x85"
3498 "\x1e\x86\x53\x11\x53\x94\x00\xee"
3499 "\x2b\x8c\x08\x2a\xbf\xdd\xae\x11"
3500 "\xcb\x1e\xa2\x07\x9a\x80\xcf\x62"
3501 "\x9b\x09\xdc\x95\x3c\x96\x8e\xb1"
3502 "\x09\xbd\xe4\xeb\xdb\xca\x70\x7a"
3503 "\x9e\xfa\x31\x18\x45\x3c\x21\x33"
3504 "\xb0\xb3\x2b\xea\xf3\x71\x2d\xe1"
3505 "\x03\xad\x1b\x48\xd4\x67\x27\xf0"
3506 "\x62\xe4\x3d\xfb\x9b\x08\x76\xe7"
3507 "\xdd\x2b\x01\x39\x04\x5a\x58\x7a"
3508 "\xf7\x11\x90\xec\xbd\x51\x5c\x32"
3509 "\x6b\xd7\x35\x39\x02\x6b\xf2\xa6"
3510 "\xd0\x0d\x07\xe1\x06\xc4\x5b\x7d"
3511 "\xe4\x6a\xd7\xee\x15\x1f\x83\xb4"
3512 "\xa3\xa7\x5e\xc3\x90\xb7\xef\xd3"
3513 "\xb7\x4f\xf8\x92\x4c\xb7\x3c\x29"
3514 "\xcd\x7e\x2b\x5d\x43\xea\x42\xe7"
3515 "\x74\x3f\x7d\x58\x88\x75\xde\x3e",
3516 .ilen = 512,
3517 .result = "\x05\x11\xb7\x18\xab\xc6\x2d\xac"
3518 "\x70\x5d\xf6\x22\x94\xcd\xe5\x6c"
3519 "\x17\x6b\xf6\x1c\xf0\xf3\x6e\xf8"
3520 "\x50\x38\x1f\x71\x49\xb6\x57\xd6"
3521 "\x8f\xcb\x8d\x6b\xe3\xa6\x29\x90"
3522 "\xfe\x2a\x62\x82\xae\x6d\x8b\xf6"
3523 "\xad\x1e\x9e\x20\x5f\x38\xbe\x04"
3524 "\xda\x10\x8e\xed\xa2\xa4\x87\xab"
3525 "\xda\x6b\xb4\x0c\x75\xba\xd3\x7c"
3526 "\xc9\xac\x42\x31\x95\x7c\xc9\x04"
3527 "\xeb\xd5\x6e\x32\x69\x8a\xdb\xa6"
3528 "\x15\xd7\x3f\x4f\x2f\x66\x69\x03"
3529 "\x9c\x1f\x54\x0f\xde\x1f\xf3\x65"
3530 "\x4c\x96\x12\xed\x7c\x92\x03\x01"
3531 "\x6f\xbc\x35\x93\xac\xf1\x27\xf1"
3532 "\xb4\x96\x82\x5a\x5f\xb0\xa0\x50"
3533 "\x89\xa4\x8e\x66\x44\x85\xcc\xfd"
3534 "\x33\x14\x70\xe3\x96\xb2\xc3\xd3"
3535 "\xbb\x54\x5a\x1a\xf9\x74\xa2\xc5"
3536 "\x2d\x64\x75\xdd\xb4\x54\xe6\x74"
3537 "\x8c\xd3\x9d\x9e\x86\xab\x51\x53"
3538 "\xb7\x93\x3e\x6f\xd0\x4e\x2c\x40"
3539 "\xf6\xa8\x2e\x3e\x9d\xf4\x66\xa5"
3540 "\x76\x12\x73\x44\x1a\x56\xd7\x72"
3541 "\x88\xcd\x21\x8c\x4c\x0f\xfe\xda"
3542 "\x95\xe0\x3a\xa6\xa5\x84\x46\xcd"
3543 "\xd5\x3e\x9d\x3a\xe2\x67\xe6\x60"
3544 "\x1a\xe2\x70\x85\x58\xc2\x1b\x09"
3545 "\xe1\xd7\x2c\xca\xad\xa8\x8f\xf9"
3546 "\xac\xb3\x0e\xdb\xca\x2e\xe2\xb8"
3547 "\x51\x71\xd9\x3c\x6c\xf1\x56\xf8"
3548 "\xea\x9c\xf1\xfb\x0c\xe6\xb7\x10"
3549 "\x1c\xf8\xa9\x7c\xe8\x53\x35\xc1"
3550 "\x90\x3e\x76\x4a\x74\xa4\x21\x2c"
3551 "\xf6\x2c\x4e\x0f\x94\x3a\x88\x2e"
3552 "\x41\x09\x6a\x33\x7d\xf6\xdd\x3f"
3553 "\x8d\x23\x31\x74\x84\xeb\x88\x6e"
3554 "\xcc\xb9\xbc\x22\x83\x19\x07\x22"
3555 "\xa5\x2d\xdf\xa5\xf3\x80\x85\x78"
3556 "\x84\x39\x6a\x6d\x6a\x99\x4f\xa5"
3557 "\x15\xfe\x46\xb0\xe4\x6c\xa5\x41"
3558 "\x3c\xce\x8f\x42\x60\x71\xa7\x75"
3559 "\x08\x40\x65\x8a\x82\xbf\xf5\x43"
3560 "\x71\x96\xa9\x4d\x44\x8a\x20\xbe"
3561 "\xfa\x4d\xbb\xc0\x7d\x31\x96\x65"
3562 "\xe7\x75\xe5\x3e\xfd\x92\x3b\xc9"
3563 "\x55\xbb\x16\x7e\xf7\xc2\x8c\xa4"
3564 "\x40\x1d\xe5\xef\x0e\xdf\xe4\x9a"
3565 "\x62\x73\x65\xfd\x46\x63\x25\x3d"
3566 "\x2b\xaf\xe5\x64\xfe\xa5\x5c\xcf"
3567 "\x24\xf3\xb4\xac\x64\xba\xdf\x4b"
3568 "\xc6\x96\x7d\x81\x2d\x8d\x97\xf7"
3569 "\xc5\x68\x77\x84\x32\x2b\xcc\x85"
3570 "\x74\x96\xf0\x12\x77\x61\xb9\xeb"
3571 "\x71\xaa\x82\xcb\x1c\xdb\x89\xc8"
3572 "\xc6\xb5\xe3\x5c\x7d\x39\x07\x24"
3573 "\xda\x39\x87\x45\xc0\x2b\xbb\x01"
3574 "\xac\xbc\x2a\x5c\x7f\xfc\xe8\xce"
3575 "\x6d\x9c\x6f\xed\xd3\xc1\xa1\xd6"
3576 "\xc5\x55\xa9\x66\x2f\xe1\xc8\x32"
3577 "\xa6\x5d\xa4\x3a\x98\x73\xe8\x45"
3578 "\xa4\xc7\xa8\xb4\xf6\x13\x03\xf6"
3579 "\xe9\x2e\xc4\x29\x0f\x84\xdb\xc4"
3580 "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
3581 .rlen = 512,
3582 }
3583};
3584
3585static struct cipher_testvec aes_xts_enc_tv_template[] = {
3586 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
3587 { /* XTS-AES 1 */
3588 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
3589 "\x00\x00\x00\x00\x00\x00\x00\x00"
3590 "\x00\x00\x00\x00\x00\x00\x00\x00"
3591 "\x00\x00\x00\x00\x00\x00\x00\x00",
3592 .klen = 32,
3593 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3594 "\x00\x00\x00\x00\x00\x00\x00\x00",
3595 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
3596 "\x00\x00\x00\x00\x00\x00\x00\x00"
3597 "\x00\x00\x00\x00\x00\x00\x00\x00"
3598 "\x00\x00\x00\x00\x00\x00\x00\x00",
3599 .ilen = 32,
3600 .result = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
3601 "\x9b\x9f\xe9\xa3\xea\xdd\xa6\x92"
3602 "\xcd\x43\xd2\xf5\x95\x98\xed\x85"
3603 "\x8c\x02\xc2\x65\x2f\xbf\x92\x2e",
3604 .rlen = 32,
3605 }, { /* XTS-AES 2 */
3606 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
3607 "\x11\x11\x11\x11\x11\x11\x11\x11"
3608 "\x22\x22\x22\x22\x22\x22\x22\x22"
3609 "\x22\x22\x22\x22\x22\x22\x22\x22",
3610 .klen = 32,
3611 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3612 "\x00\x00\x00\x00\x00\x00\x00\x00",
3613 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
3614 "\x44\x44\x44\x44\x44\x44\x44\x44"
3615 "\x44\x44\x44\x44\x44\x44\x44\x44"
3616 "\x44\x44\x44\x44\x44\x44\x44\x44",
3617 .ilen = 32,
3618 .result = "\xc4\x54\x18\x5e\x6a\x16\x93\x6e"
3619 "\x39\x33\x40\x38\xac\xef\x83\x8b"
3620 "\xfb\x18\x6f\xff\x74\x80\xad\xc4"
3621 "\x28\x93\x82\xec\xd6\xd3\x94\xf0",
3622 .rlen = 32,
3623 }, { /* XTS-AES 3 */
3624 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
3625 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
3626 "\x22\x22\x22\x22\x22\x22\x22\x22"
3627 "\x22\x22\x22\x22\x22\x22\x22\x22",
3628 .klen = 32,
3629 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3630 "\x00\x00\x00\x00\x00\x00\x00\x00",
3631 .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
3632 "\x44\x44\x44\x44\x44\x44\x44\x44"
3633 "\x44\x44\x44\x44\x44\x44\x44\x44"
3634 "\x44\x44\x44\x44\x44\x44\x44\x44",
3635 .ilen = 32,
3636 .result = "\xaf\x85\x33\x6b\x59\x7a\xfc\x1a"
3637 "\x90\x0b\x2e\xb2\x1e\xc9\x49\xd2"
3638 "\x92\xdf\x4c\x04\x7e\x0b\x21\x53"
3639 "\x21\x86\xa5\x97\x1a\x22\x7a\x89",
3640 .rlen = 32,
3641 }, { /* XTS-AES 4 */
3642 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
3643 "\x23\x53\x60\x28\x74\x71\x35\x26"
3644 "\x31\x41\x59\x26\x53\x58\x97\x93"
3645 "\x23\x84\x62\x64\x33\x83\x27\x95",
3646 .klen = 32,
3647 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3648 "\x00\x00\x00\x00\x00\x00\x00\x00",
3649 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
3650 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3651 "\x10\x11\x12\x13\x14\x15\x16\x17"
3652 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3653 "\x20\x21\x22\x23\x24\x25\x26\x27"
3654 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3655 "\x30\x31\x32\x33\x34\x35\x36\x37"
3656 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3657 "\x40\x41\x42\x43\x44\x45\x46\x47"
3658 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3659 "\x50\x51\x52\x53\x54\x55\x56\x57"
3660 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3661 "\x60\x61\x62\x63\x64\x65\x66\x67"
3662 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3663 "\x70\x71\x72\x73\x74\x75\x76\x77"
3664 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3665 "\x80\x81\x82\x83\x84\x85\x86\x87"
3666 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3667 "\x90\x91\x92\x93\x94\x95\x96\x97"
3668 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3669 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3670 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3671 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3672 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3673 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3674 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3675 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3676 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3677 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3678 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3679 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3680 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
3681 "\x00\x01\x02\x03\x04\x05\x06\x07"
3682 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3683 "\x10\x11\x12\x13\x14\x15\x16\x17"
3684 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3685 "\x20\x21\x22\x23\x24\x25\x26\x27"
3686 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3687 "\x30\x31\x32\x33\x34\x35\x36\x37"
3688 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3689 "\x40\x41\x42\x43\x44\x45\x46\x47"
3690 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3691 "\x50\x51\x52\x53\x54\x55\x56\x57"
3692 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3693 "\x60\x61\x62\x63\x64\x65\x66\x67"
3694 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3695 "\x70\x71\x72\x73\x74\x75\x76\x77"
3696 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3697 "\x80\x81\x82\x83\x84\x85\x86\x87"
3698 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3699 "\x90\x91\x92\x93\x94\x95\x96\x97"
3700 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3701 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3702 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3703 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3704 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3705 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3706 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3707 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3708 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3709 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3710 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3711 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3712 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
3713 .ilen = 512,
3714 .result = "\x27\xa7\x47\x9b\xef\xa1\xd4\x76"
3715 "\x48\x9f\x30\x8c\xd4\xcf\xa6\xe2"
3716 "\xa9\x6e\x4b\xbe\x32\x08\xff\x25"
3717 "\x28\x7d\xd3\x81\x96\x16\xe8\x9c"
3718 "\xc7\x8c\xf7\xf5\xe5\x43\x44\x5f"
3719 "\x83\x33\xd8\xfa\x7f\x56\x00\x00"
3720 "\x05\x27\x9f\xa5\xd8\xb5\xe4\xad"
3721 "\x40\xe7\x36\xdd\xb4\xd3\x54\x12"
3722 "\x32\x80\x63\xfd\x2a\xab\x53\xe5"
3723 "\xea\x1e\x0a\x9f\x33\x25\x00\xa5"
3724 "\xdf\x94\x87\xd0\x7a\x5c\x92\xcc"
3725 "\x51\x2c\x88\x66\xc7\xe8\x60\xce"
3726 "\x93\xfd\xf1\x66\xa2\x49\x12\xb4"
3727 "\x22\x97\x61\x46\xae\x20\xce\x84"
3728 "\x6b\xb7\xdc\x9b\xa9\x4a\x76\x7a"
3729 "\xae\xf2\x0c\x0d\x61\xad\x02\x65"
3730 "\x5e\xa9\x2d\xc4\xc4\xe4\x1a\x89"
3731 "\x52\xc6\x51\xd3\x31\x74\xbe\x51"
3732 "\xa1\x0c\x42\x11\x10\xe6\xd8\x15"
3733 "\x88\xed\xe8\x21\x03\xa2\x52\xd8"
3734 "\xa7\x50\xe8\x76\x8d\xef\xff\xed"
3735 "\x91\x22\x81\x0a\xae\xb9\x9f\x91"
3736 "\x72\xaf\x82\xb6\x04\xdc\x4b\x8e"
3737 "\x51\xbc\xb0\x82\x35\xa6\xf4\x34"
3738 "\x13\x32\xe4\xca\x60\x48\x2a\x4b"
3739 "\xa1\xa0\x3b\x3e\x65\x00\x8f\xc5"
3740 "\xda\x76\xb7\x0b\xf1\x69\x0d\xb4"
3741 "\xea\xe2\x9c\x5f\x1b\xad\xd0\x3c"
3742 "\x5c\xcf\x2a\x55\xd7\x05\xdd\xcd"
3743 "\x86\xd4\x49\x51\x1c\xeb\x7e\xc3"
3744 "\x0b\xf1\x2b\x1f\xa3\x5b\x91\x3f"
3745 "\x9f\x74\x7a\x8a\xfd\x1b\x13\x0e"
3746 "\x94\xbf\xf9\x4e\xff\xd0\x1a\x91"
3747 "\x73\x5c\xa1\x72\x6a\xcd\x0b\x19"
3748 "\x7c\x4e\x5b\x03\x39\x36\x97\xe1"
3749 "\x26\x82\x6f\xb6\xbb\xde\x8e\xcc"
3750 "\x1e\x08\x29\x85\x16\xe2\xc9\xed"
3751 "\x03\xff\x3c\x1b\x78\x60\xf6\xde"
3752 "\x76\xd4\xce\xcd\x94\xc8\x11\x98"
3753 "\x55\xef\x52\x97\xca\x67\xe9\xf3"
3754 "\xe7\xff\x72\xb1\xe9\x97\x85\xca"
3755 "\x0a\x7e\x77\x20\xc5\xb3\x6d\xc6"
3756 "\xd7\x2c\xac\x95\x74\xc8\xcb\xbc"
3757 "\x2f\x80\x1e\x23\xe5\x6f\xd3\x44"
3758 "\xb0\x7f\x22\x15\x4b\xeb\xa0\xf0"
3759 "\x8c\xe8\x89\x1e\x64\x3e\xd9\x95"
3760 "\xc9\x4d\x9a\x69\xc9\xf1\xb5\xf4"
3761 "\x99\x02\x7a\x78\x57\x2a\xee\xbd"
3762 "\x74\xd2\x0c\xc3\x98\x81\xc2\x13"
3763 "\xee\x77\x0b\x10\x10\xe4\xbe\xa7"
3764 "\x18\x84\x69\x77\xae\x11\x9f\x7a"
3765 "\x02\x3a\xb5\x8c\xca\x0a\xd7\x52"
3766 "\xaf\xe6\x56\xbb\x3c\x17\x25\x6a"
3767 "\x9f\x6e\x9b\xf1\x9f\xdd\x5a\x38"
3768 "\xfc\x82\xbb\xe8\x72\xc5\x53\x9e"
3769 "\xdb\x60\x9e\xf4\xf7\x9c\x20\x3e"
3770 "\xbb\x14\x0f\x2e\x58\x3c\xb2\xad"
3771 "\x15\xb4\xaa\x5b\x65\x50\x16\xa8"
3772 "\x44\x92\x77\xdb\xd4\x77\xef\x2c"
3773 "\x8d\x6c\x01\x7d\xb7\x38\xb1\x8d"
3774 "\xeb\x4a\x42\x7d\x19\x23\xce\x3f"
3775 "\xf2\x62\x73\x57\x79\xa4\x18\xf2"
3776 "\x0a\x28\x2d\xf9\x20\x14\x7b\xea"
3777 "\xbe\x42\x1e\xe5\x31\x9d\x05\x68",
3778 .rlen = 512,
3779 }
3780};
3781
3782static struct cipher_testvec aes_xts_dec_tv_template[] = {
3783 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
3784 { /* XTS-AES 1 */
3785 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
3786 "\x00\x00\x00\x00\x00\x00\x00\x00"
3787 "\x00\x00\x00\x00\x00\x00\x00\x00"
3788 "\x00\x00\x00\x00\x00\x00\x00\x00",
3789 .klen = 32,
3790 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3791 "\x00\x00\x00\x00\x00\x00\x00\x00",
3792 .input = "\x91\x7c\xf6\x9e\xbd\x68\xb2\xec"
3793 "\x9b\x9f\xe9\xa3\xea\xdd\xa6\x92"
3794 "\xcd\x43\xd2\xf5\x95\x98\xed\x85"
3795 "\x8c\x02\xc2\x65\x2f\xbf\x92\x2e",
3796 .ilen = 32,
3797 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
3798 "\x00\x00\x00\x00\x00\x00\x00\x00"
3799 "\x00\x00\x00\x00\x00\x00\x00\x00"
3800 "\x00\x00\x00\x00\x00\x00\x00\x00",
3801 .rlen = 32,
3802 }, { /* XTS-AES 2 */
3803 .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
3804 "\x11\x11\x11\x11\x11\x11\x11\x11"
3805 "\x22\x22\x22\x22\x22\x22\x22\x22"
3806 "\x22\x22\x22\x22\x22\x22\x22\x22",
3807 .klen = 32,
3808 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3809 "\x00\x00\x00\x00\x00\x00\x00\x00",
3810 .input = "\xc4\x54\x18\x5e\x6a\x16\x93\x6e"
3811 "\x39\x33\x40\x38\xac\xef\x83\x8b"
3812 "\xfb\x18\x6f\xff\x74\x80\xad\xc4"
3813 "\x28\x93\x82\xec\xd6\xd3\x94\xf0",
3814 .ilen = 32,
3815 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
3816 "\x44\x44\x44\x44\x44\x44\x44\x44"
3817 "\x44\x44\x44\x44\x44\x44\x44\x44"
3818 "\x44\x44\x44\x44\x44\x44\x44\x44",
3819 .rlen = 32,
3820 }, { /* XTS-AES 3 */
3821 .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
3822 "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
3823 "\x22\x22\x22\x22\x22\x22\x22\x22"
3824 "\x22\x22\x22\x22\x22\x22\x22\x22",
3825 .klen = 32,
3826 .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3827 "\x00\x00\x00\x00\x00\x00\x00\x00",
3828 .input = "\xaf\x85\x33\x6b\x59\x7a\xfc\x1a"
3829 "\x90\x0b\x2e\xb2\x1e\xc9\x49\xd2"
3830 "\x92\xdf\x4c\x04\x7e\x0b\x21\x53"
3831 "\x21\x86\xa5\x97\x1a\x22\x7a\x89",
3832 .ilen = 32,
3833 .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
3834 "\x44\x44\x44\x44\x44\x44\x44\x44"
3835 "\x44\x44\x44\x44\x44\x44\x44\x44"
3836 "\x44\x44\x44\x44\x44\x44\x44\x44",
3837 .rlen = 32,
3838 }, { /* XTS-AES 4 */
3839 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
3840 "\x23\x53\x60\x28\x74\x71\x35\x26"
3841 "\x31\x41\x59\x26\x53\x58\x97\x93"
3842 "\x23\x84\x62\x64\x33\x83\x27\x95",
3843 .klen = 32,
3844 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3845 "\x00\x00\x00\x00\x00\x00\x00\x00",
3846 .input = "\x27\xa7\x47\x9b\xef\xa1\xd4\x76"
3847 "\x48\x9f\x30\x8c\xd4\xcf\xa6\xe2"
3848 "\xa9\x6e\x4b\xbe\x32\x08\xff\x25"
3849 "\x28\x7d\xd3\x81\x96\x16\xe8\x9c"
3850 "\xc7\x8c\xf7\xf5\xe5\x43\x44\x5f"
3851 "\x83\x33\xd8\xfa\x7f\x56\x00\x00"
3852 "\x05\x27\x9f\xa5\xd8\xb5\xe4\xad"
3853 "\x40\xe7\x36\xdd\xb4\xd3\x54\x12"
3854 "\x32\x80\x63\xfd\x2a\xab\x53\xe5"
3855 "\xea\x1e\x0a\x9f\x33\x25\x00\xa5"
3856 "\xdf\x94\x87\xd0\x7a\x5c\x92\xcc"
3857 "\x51\x2c\x88\x66\xc7\xe8\x60\xce"
3858 "\x93\xfd\xf1\x66\xa2\x49\x12\xb4"
3859 "\x22\x97\x61\x46\xae\x20\xce\x84"
3860 "\x6b\xb7\xdc\x9b\xa9\x4a\x76\x7a"
3861 "\xae\xf2\x0c\x0d\x61\xad\x02\x65"
3862 "\x5e\xa9\x2d\xc4\xc4\xe4\x1a\x89"
3863 "\x52\xc6\x51\xd3\x31\x74\xbe\x51"
3864 "\xa1\x0c\x42\x11\x10\xe6\xd8\x15"
3865 "\x88\xed\xe8\x21\x03\xa2\x52\xd8"
3866 "\xa7\x50\xe8\x76\x8d\xef\xff\xed"
3867 "\x91\x22\x81\x0a\xae\xb9\x9f\x91"
3868 "\x72\xaf\x82\xb6\x04\xdc\x4b\x8e"
3869 "\x51\xbc\xb0\x82\x35\xa6\xf4\x34"
3870 "\x13\x32\xe4\xca\x60\x48\x2a\x4b"
3871 "\xa1\xa0\x3b\x3e\x65\x00\x8f\xc5"
3872 "\xda\x76\xb7\x0b\xf1\x69\x0d\xb4"
3873 "\xea\xe2\x9c\x5f\x1b\xad\xd0\x3c"
3874 "\x5c\xcf\x2a\x55\xd7\x05\xdd\xcd"
3875 "\x86\xd4\x49\x51\x1c\xeb\x7e\xc3"
3876 "\x0b\xf1\x2b\x1f\xa3\x5b\x91\x3f"
3877 "\x9f\x74\x7a\x8a\xfd\x1b\x13\x0e"
3878 "\x94\xbf\xf9\x4e\xff\xd0\x1a\x91"
3879 "\x73\x5c\xa1\x72\x6a\xcd\x0b\x19"
3880 "\x7c\x4e\x5b\x03\x39\x36\x97\xe1"
3881 "\x26\x82\x6f\xb6\xbb\xde\x8e\xcc"
3882 "\x1e\x08\x29\x85\x16\xe2\xc9\xed"
3883 "\x03\xff\x3c\x1b\x78\x60\xf6\xde"
3884 "\x76\xd4\xce\xcd\x94\xc8\x11\x98"
3885 "\x55\xef\x52\x97\xca\x67\xe9\xf3"
3886 "\xe7\xff\x72\xb1\xe9\x97\x85\xca"
3887 "\x0a\x7e\x77\x20\xc5\xb3\x6d\xc6"
3888 "\xd7\x2c\xac\x95\x74\xc8\xcb\xbc"
3889 "\x2f\x80\x1e\x23\xe5\x6f\xd3\x44"
3890 "\xb0\x7f\x22\x15\x4b\xeb\xa0\xf0"
3891 "\x8c\xe8\x89\x1e\x64\x3e\xd9\x95"
3892 "\xc9\x4d\x9a\x69\xc9\xf1\xb5\xf4"
3893 "\x99\x02\x7a\x78\x57\x2a\xee\xbd"
3894 "\x74\xd2\x0c\xc3\x98\x81\xc2\x13"
3895 "\xee\x77\x0b\x10\x10\xe4\xbe\xa7"
3896 "\x18\x84\x69\x77\xae\x11\x9f\x7a"
3897 "\x02\x3a\xb5\x8c\xca\x0a\xd7\x52"
3898 "\xaf\xe6\x56\xbb\x3c\x17\x25\x6a"
3899 "\x9f\x6e\x9b\xf1\x9f\xdd\x5a\x38"
3900 "\xfc\x82\xbb\xe8\x72\xc5\x53\x9e"
3901 "\xdb\x60\x9e\xf4\xf7\x9c\x20\x3e"
3902 "\xbb\x14\x0f\x2e\x58\x3c\xb2\xad"
3903 "\x15\xb4\xaa\x5b\x65\x50\x16\xa8"
3904 "\x44\x92\x77\xdb\xd4\x77\xef\x2c"
3905 "\x8d\x6c\x01\x7d\xb7\x38\xb1\x8d"
3906 "\xeb\x4a\x42\x7d\x19\x23\xce\x3f"
3907 "\xf2\x62\x73\x57\x79\xa4\x18\xf2"
3908 "\x0a\x28\x2d\xf9\x20\x14\x7b\xea"
3909 "\xbe\x42\x1e\xe5\x31\x9d\x05\x68",
3910 .ilen = 512,
3911 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
3912 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3913 "\x10\x11\x12\x13\x14\x15\x16\x17"
3914 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3915 "\x20\x21\x22\x23\x24\x25\x26\x27"
3916 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3917 "\x30\x31\x32\x33\x34\x35\x36\x37"
3918 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3919 "\x40\x41\x42\x43\x44\x45\x46\x47"
3920 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3921 "\x50\x51\x52\x53\x54\x55\x56\x57"
3922 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3923 "\x60\x61\x62\x63\x64\x65\x66\x67"
3924 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3925 "\x70\x71\x72\x73\x74\x75\x76\x77"
3926 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3927 "\x80\x81\x82\x83\x84\x85\x86\x87"
3928 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3929 "\x90\x91\x92\x93\x94\x95\x96\x97"
3930 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3931 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3932 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3933 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3934 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3935 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3936 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3937 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3938 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3939 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3940 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3941 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3942 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
3943 "\x00\x01\x02\x03\x04\x05\x06\x07"
3944 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3945 "\x10\x11\x12\x13\x14\x15\x16\x17"
3946 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3947 "\x20\x21\x22\x23\x24\x25\x26\x27"
3948 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3949 "\x30\x31\x32\x33\x34\x35\x36\x37"
3950 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3951 "\x40\x41\x42\x43\x44\x45\x46\x47"
3952 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3953 "\x50\x51\x52\x53\x54\x55\x56\x57"
3954 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3955 "\x60\x61\x62\x63\x64\x65\x66\x67"
3956 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3957 "\x70\x71\x72\x73\x74\x75\x76\x77"
3958 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3959 "\x80\x81\x82\x83\x84\x85\x86\x87"
3960 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3961 "\x90\x91\x92\x93\x94\x95\x96\x97"
3962 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3963 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3964 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3965 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3966 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3967 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3968 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3969 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3970 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3971 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3972 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3973 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3974 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
3975 .rlen = 512,
3976 }
3977};
3978
3979
3980static struct cipher_testvec aes_ctr_enc_tv_template[] = {
3981 { /* From RFC 3686 */
3982 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
3983 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
3984 "\x00\x00\x00\x30",
3985 .klen = 20,
3986 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
3987 .input = "Single block msg",
3988 .ilen = 16,
3989 .result = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
3990 "\x2d\x61\x75\xa3\x26\x13\x11\xb8",
3991 .rlen = 16,
3992 }, {
3993 .key = "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
3994 "\x43\xd6\xce\x1f\x32\x53\x91\x63"
3995 "\x00\x6c\xb6\xdb",
3996 .klen = 20,
3997 .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
3998 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
3999 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4000 "\x10\x11\x12\x13\x14\x15\x16\x17"
4001 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
4002 .ilen = 32,
4003 .result = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
4004 "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
4005 "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
4006 "\xfc\xe6\x30\xdf\x91\x41\xbe\x28",
4007 .rlen = 32,
4008 }, {
4009 .key = "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
4010 "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
4011 "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
4012 "\x00\x00\x00\x48",
4013 .klen = 28,
4014 .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
4015 .input = "Single block msg",
4016 .ilen = 16,
4017 .result = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
4018 "\x4e\x79\x35\xa0\x03\xcb\xe9\x28",
4019 .rlen = 16,
4020 }, {
4021 .key = "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
4022 "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
4023 "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
4024 "\x00\x96\xb0\x3b",
4025 .klen = 28,
4026 .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
4027 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
4028 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4029 "\x10\x11\x12\x13\x14\x15\x16\x17"
4030 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
4031 .ilen = 32,
4032 .result = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
4033 "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
4034 "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
4035 "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00",
4036 .rlen = 32,
4037 }, {
4038 .key = "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
4039 "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
4040 "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
4041 "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
4042 "\x00\x00\x00\x60",
4043 .klen = 36,
4044 .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
4045 .input = "Single block msg",
4046 .ilen = 16,
4047 .result = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
4048 "\x56\x08\x63\xdc\x71\xe3\xe0\xc0",
4049 .rlen = 16,
4050 }, {
4051 .key = "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
4052 "\x07\x96\x36\x58\x79\xef\xf8\x86"
4053 "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
4054 "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
4055 "\x00\xfa\xac\x24",
4056 .klen = 36,
4057 .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
4058 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
4059 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4060 "\x10\x11\x12\x13\x14\x15\x16\x17"
4061 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
4062 .ilen = 32,
4063 .result = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
4064 "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
4065 "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
4066 "\x55\x30\x83\x1d\x93\x44\xaf\x1c",
4067 .rlen = 32,
4068 }, {
4069 // generated using Crypto++
4070 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4071 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4072 "\x10\x11\x12\x13\x14\x15\x16\x17"
4073 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
4074 "\x00\x00\x00\x00",
4075 .klen = 32 + 4,
4076 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
4077 .input =
4078 "\x00\x01\x02\x03\x04\x05\x06\x07"
4079 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4080 "\x10\x11\x12\x13\x14\x15\x16\x17"
4081 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
4082 "\x20\x21\x22\x23\x24\x25\x26\x27"
4083 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
4084 "\x30\x31\x32\x33\x34\x35\x36\x37"
4085 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
4086 "\x40\x41\x42\x43\x44\x45\x46\x47"
4087 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
4088 "\x50\x51\x52\x53\x54\x55\x56\x57"
4089 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
4090 "\x60\x61\x62\x63\x64\x65\x66\x67"
4091 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
4092 "\x70\x71\x72\x73\x74\x75\x76\x77"
4093 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
4094 "\x80\x81\x82\x83\x84\x85\x86\x87"
4095 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
4096 "\x90\x91\x92\x93\x94\x95\x96\x97"
4097 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
4098 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
4099 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
4100 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
4101 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
4102 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
4103 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
4104 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
4105 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
4106 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
4107 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
4108 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
4109 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
4110 "\x00\x03\x06\x09\x0c\x0f\x12\x15"
4111 "\x18\x1b\x1e\x21\x24\x27\x2a\x2d"
4112 "\x30\x33\x36\x39\x3c\x3f\x42\x45"
4113 "\x48\x4b\x4e\x51\x54\x57\x5a\x5d"
4114 "\x60\x63\x66\x69\x6c\x6f\x72\x75"
4115 "\x78\x7b\x7e\x81\x84\x87\x8a\x8d"
4116 "\x90\x93\x96\x99\x9c\x9f\xa2\xa5"
4117 "\xa8\xab\xae\xb1\xb4\xb7\xba\xbd"
4118 "\xc0\xc3\xc6\xc9\xcc\xcf\xd2\xd5"
4119 "\xd8\xdb\xde\xe1\xe4\xe7\xea\xed"
4120 "\xf0\xf3\xf6\xf9\xfc\xff\x02\x05"
4121 "\x08\x0b\x0e\x11\x14\x17\x1a\x1d"
4122 "\x20\x23\x26\x29\x2c\x2f\x32\x35"
4123 "\x38\x3b\x3e\x41\x44\x47\x4a\x4d"
4124 "\x50\x53\x56\x59\x5c\x5f\x62\x65"
4125 "\x68\x6b\x6e\x71\x74\x77\x7a\x7d"
4126 "\x80\x83\x86\x89\x8c\x8f\x92\x95"
4127 "\x98\x9b\x9e\xa1\xa4\xa7\xaa\xad"
4128 "\xb0\xb3\xb6\xb9\xbc\xbf\xc2\xc5"
4129 "\xc8\xcb\xce\xd1\xd4\xd7\xda\xdd"
4130 "\xe0\xe3\xe6\xe9\xec\xef\xf2\xf5"
4131 "\xf8\xfb\xfe\x01\x04\x07\x0a\x0d"
4132 "\x10\x13\x16\x19\x1c\x1f\x22\x25"
4133 "\x28\x2b\x2e\x31\x34\x37\x3a\x3d"
4134 "\x40\x43\x46\x49\x4c\x4f\x52\x55"
4135 "\x58\x5b\x5e\x61\x64\x67\x6a\x6d"
4136 "\x70\x73\x76\x79\x7c\x7f\x82\x85"
4137 "\x88\x8b\x8e\x91\x94\x97\x9a\x9d"
4138 "\xa0\xa3\xa6\xa9\xac\xaf\xb2\xb5"
4139 "\xb8\xbb\xbe\xc1\xc4\xc7\xca\xcd"
4140 "\xd0\xd3\xd6\xd9\xdc\xdf\xe2\xe5"
4141 "\xe8\xeb\xee\xf1\xf4\xf7\xfa\xfd"
4142 "\x00\x05\x0a\x0f\x14\x19\x1e\x23"
4143 "\x28\x2d\x32\x37\x3c\x41\x46\x4b"
4144 "\x50\x55\x5a\x5f\x64\x69\x6e\x73"
4145 "\x78\x7d\x82\x87\x8c\x91\x96\x9b"
4146 "\xa0\xa5\xaa\xaf\xb4\xb9\xbe\xc3"
4147 "\xc8\xcd\xd2\xd7\xdc\xe1\xe6\xeb"
4148 "\xf0\xf5\xfa\xff\x04\x09\x0e\x13"
4149 "\x18\x1d\x22\x27\x2c\x31\x36\x3b"
4150 "\x40\x45\x4a\x4f\x54\x59\x5e\x63"
4151 "\x68\x6d\x72\x77\x7c\x81\x86\x8b"
4152 "\x90\x95\x9a\x9f\xa4\xa9\xae\xb3"
4153 "\xb8\xbd\xc2\xc7\xcc\xd1\xd6\xdb"
4154 "\xe0\xe5\xea\xef\xf4\xf9\xfe\x03"
4155 "\x08\x0d\x12\x17\x1c\x21\x26\x2b"
4156 "\x30\x35\x3a\x3f\x44\x49\x4e\x53"
4157 "\x58\x5d\x62\x67\x6c\x71\x76\x7b"
4158 "\x80\x85\x8a\x8f\x94\x99\x9e\xa3"
4159 "\xa8\xad\xb2\xb7\xbc\xc1\xc6\xcb"
4160 "\xd0\xd5\xda\xdf\xe4\xe9\xee\xf3"
4161 "\xf8\xfd\x02\x07\x0c\x11\x16\x1b"
4162 "\x20\x25\x2a\x2f\x34\x39\x3e\x43"
4163 "\x48\x4d\x52\x57\x5c\x61\x66\x6b"
4164 "\x70\x75\x7a\x7f\x84\x89\x8e\x93"
4165 "\x98\x9d\xa2\xa7\xac\xb1\xb6\xbb"
4166 "\xc0\xc5\xca\xcf\xd4\xd9\xde\xe3"
4167 "\xe8\xed\xf2\xf7\xfc\x01\x06\x0b"
4168 "\x10\x15\x1a\x1f\x24\x29\x2e\x33"
4169 "\x38\x3d\x42\x47\x4c\x51\x56\x5b"
4170 "\x60\x65\x6a\x6f\x74\x79\x7e\x83"
4171 "\x88\x8d\x92\x97\x9c\xa1\xa6\xab"
4172 "\xb0\xb5\xba\xbf\xc4\xc9\xce\xd3"
4173 "\xd8\xdd\xe2\xe7\xec\xf1\xf6\xfb"
4174 "\x00\x07\x0e\x15\x1c\x23\x2a\x31"
4175 "\x38\x3f\x46\x4d\x54\x5b\x62\x69"
4176 "\x70\x77\x7e\x85\x8c\x93\x9a\xa1"
4177 "\xa8\xaf\xb6\xbd\xc4\xcb\xd2\xd9"
4178 "\xe0\xe7\xee\xf5\xfc\x03\x0a\x11"
4179 "\x18\x1f\x26\x2d\x34\x3b\x42\x49"
4180 "\x50\x57\x5e\x65\x6c\x73\x7a\x81"
4181 "\x88\x8f\x96\x9d\xa4\xab\xb2\xb9"
4182 "\xc0\xc7\xce\xd5\xdc\xe3\xea\xf1"
4183 "\xf8\xff\x06\x0d\x14\x1b\x22\x29"
4184 "\x30\x37\x3e\x45\x4c\x53\x5a\x61"
4185 "\x68\x6f\x76\x7d\x84\x8b\x92\x99"
4186 "\xa0\xa7\xae\xb5\xbc\xc3\xca\xd1"
4187 "\xd8\xdf\xe6\xed\xf4\xfb\x02\x09"
4188 "\x10\x17\x1e\x25\x2c\x33\x3a\x41"
4189 "\x48\x4f\x56\x5d\x64\x6b\x72\x79"
4190 "\x80\x87\x8e\x95\x9c\xa3\xaa\xb1"
4191 "\xb8\xbf\xc6\xcd\xd4\xdb\xe2\xe9"
4192 "\xf0\xf7\xfe\x05\x0c\x13\x1a\x21"
4193 "\x28\x2f\x36\x3d\x44\x4b\x52\x59"
4194 "\x60\x67\x6e\x75\x7c\x83\x8a\x91"
4195 "\x98\x9f\xa6\xad\xb4\xbb\xc2\xc9"
4196 "\xd0\xd7\xde\xe5\xec\xf3\xfa\x01"
4197 "\x08\x0f\x16\x1d\x24\x2b\x32\x39"
4198 "\x40\x47\x4e\x55\x5c\x63\x6a\x71"
4199 "\x78\x7f\x86\x8d\x94\x9b\xa2\xa9"
4200 "\xb0\xb7\xbe\xc5\xcc\xd3\xda\xe1"
4201 "\xe8\xef\xf6\xfd\x04\x0b\x12\x19"
4202 "\x20\x27\x2e\x35\x3c\x43\x4a\x51"
4203 "\x58\x5f\x66\x6d\x74\x7b\x82\x89"
4204 "\x90\x97\x9e\xa5\xac\xb3\xba\xc1"
4205 "\xc8\xcf\xd6\xdd\xe4\xeb\xf2\xf9"
4206 "\x00\x09\x12\x1b\x24\x2d\x36\x3f"
4207 "\x48\x51\x5a\x63\x6c\x75\x7e\x87"
4208 "\x90\x99\xa2\xab\xb4\xbd\xc6\xcf"
4209 "\xd8\xe1\xea\xf3\xfc\x05\x0e\x17"
4210 "\x20\x29\x32\x3b\x44\x4d\x56\x5f"
4211 "\x68\x71\x7a\x83\x8c\x95\x9e\xa7"
4212 "\xb0\xb9\xc2\xcb\xd4\xdd\xe6\xef"
4213 "\xf8\x01\x0a\x13\x1c\x25\x2e\x37"
4214 "\x40\x49\x52\x5b\x64\x6d\x76\x7f"
4215 "\x88\x91\x9a\xa3\xac\xb5\xbe\xc7"
4216 "\xd0\xd9\xe2\xeb\xf4\xfd\x06\x0f"
4217 "\x18\x21\x2a\x33\x3c\x45\x4e\x57"
4218 "\x60\x69\x72\x7b\x84\x8d\x96\x9f"
4219 "\xa8\xb1\xba\xc3\xcc\xd5\xde\xe7"
4220 "\xf0\xf9\x02\x0b\x14\x1d\x26\x2f"
4221 "\x38\x41\x4a\x53\x5c\x65\x6e\x77"
4222 "\x80\x89\x92\x9b\xa4\xad\xb6\xbf"
4223 "\xc8\xd1\xda\xe3\xec\xf5\xfe\x07"
4224 "\x10\x19\x22\x2b\x34\x3d\x46\x4f"
4225 "\x58\x61\x6a\x73\x7c\x85\x8e\x97"
4226 "\xa0\xa9\xb2\xbb\xc4\xcd\xd6\xdf"
4227 "\xe8\xf1\xfa\x03\x0c\x15\x1e\x27"
4228 "\x30\x39\x42\x4b\x54\x5d\x66\x6f"
4229 "\x78\x81\x8a\x93\x9c\xa5\xae\xb7"
4230 "\xc0\xc9\xd2\xdb\xe4\xed\xf6\xff"
4231 "\x08\x11\x1a\x23\x2c\x35\x3e\x47"
4232 "\x50\x59\x62\x6b\x74\x7d\x86\x8f"
4233 "\x98\xa1\xaa\xb3\xbc\xc5\xce\xd7"
4234 "\xe0\xe9\xf2\xfb\x04\x0d\x16\x1f"
4235 "\x28\x31\x3a\x43\x4c\x55\x5e\x67"
4236 "\x70\x79\x82\x8b\x94\x9d\xa6\xaf"
4237 "\xb8\xc1\xca\xd3\xdc\xe5\xee\xf7"
4238 "\x00\x0b\x16\x21\x2c\x37\x42\x4d"
4239 "\x58\x63\x6e\x79\x84\x8f\x9a\xa5"
4240 "\xb0\xbb\xc6\xd1\xdc\xe7\xf2\xfd"
4241 "\x08\x13\x1e\x29\x34\x3f\x4a\x55"
4242 "\x60\x6b\x76\x81\x8c\x97\xa2\xad"
4243 "\xb8\xc3\xce\xd9\xe4\xef\xfa\x05"
4244 "\x10\x1b\x26\x31\x3c\x47\x52\x5d"
4245 "\x68\x73\x7e\x89\x94\x9f\xaa\xb5"
4246 "\xc0\xcb\xd6\xe1\xec\xf7\x02\x0d"
4247 "\x18\x23\x2e\x39\x44\x4f\x5a\x65"
4248 "\x70\x7b\x86\x91\x9c\xa7\xb2\xbd"
4249 "\xc8\xd3\xde\xe9\xf4\xff\x0a\x15"
4250 "\x20\x2b\x36\x41\x4c\x57\x62\x6d"
4251 "\x78\x83\x8e\x99\xa4\xaf\xba\xc5"
4252 "\xd0\xdb\xe6\xf1\xfc\x07\x12\x1d"
4253 "\x28\x33\x3e\x49\x54\x5f\x6a\x75"
4254 "\x80\x8b\x96\xa1\xac\xb7\xc2\xcd"
4255 "\xd8\xe3\xee\xf9\x04\x0f\x1a\x25"
4256 "\x30\x3b\x46\x51\x5c\x67\x72\x7d"
4257 "\x88\x93\x9e\xa9\xb4\xbf\xca\xd5"
4258 "\xe0\xeb\xf6\x01\x0c\x17\x22\x2d"
4259 "\x38\x43\x4e\x59\x64\x6f\x7a\x85"
4260 "\x90\x9b\xa6\xb1\xbc\xc7\xd2\xdd"
4261 "\xe8\xf3\xfe\x09\x14\x1f\x2a\x35"
4262 "\x40\x4b\x56\x61\x6c\x77\x82\x8d"
4263 "\x98\xa3\xae\xb9\xc4\xcf\xda\xe5"
4264 "\xf0\xfb\x06\x11\x1c\x27\x32\x3d"
4265 "\x48\x53\x5e\x69\x74\x7f\x8a\x95"
4266 "\xa0\xab\xb6\xc1\xcc\xd7\xe2\xed"
4267 "\xf8\x03\x0e\x19\x24\x2f\x3a\x45"
4268 "\x50\x5b\x66\x71\x7c\x87\x92\x9d"
4269 "\xa8\xb3\xbe\xc9\xd4\xdf\xea\xf5"
4270 "\x00\x0d\x1a\x27\x34\x41\x4e\x5b"
4271 "\x68\x75\x82\x8f\x9c\xa9\xb6\xc3"
4272 "\xd0\xdd\xea\xf7\x04\x11\x1e\x2b"
4273 "\x38\x45\x52\x5f\x6c\x79\x86\x93"
4274 "\xa0\xad\xba\xc7\xd4\xe1\xee\xfb"
4275 "\x08\x15\x22\x2f\x3c\x49\x56\x63"
4276 "\x70\x7d\x8a\x97\xa4\xb1\xbe\xcb"
4277 "\xd8\xe5\xf2\xff\x0c\x19\x26\x33"
4278 "\x40\x4d\x5a\x67\x74\x81\x8e\x9b"
4279 "\xa8\xb5\xc2\xcf\xdc\xe9\xf6\x03"
4280 "\x10\x1d\x2a\x37\x44\x51\x5e\x6b"
4281 "\x78\x85\x92\x9f\xac\xb9\xc6\xd3"
4282 "\xe0\xed\xfa\x07\x14\x21\x2e\x3b"
4283 "\x48\x55\x62\x6f\x7c\x89\x96\xa3"
4284 "\xb0\xbd\xca\xd7\xe4\xf1\xfe\x0b"
4285 "\x18\x25\x32\x3f\x4c\x59\x66\x73"
4286 "\x80\x8d\x9a\xa7\xb4\xc1\xce\xdb"
4287 "\xe8\xf5\x02\x0f\x1c\x29\x36\x43"
4288 "\x50\x5d\x6a\x77\x84\x91\x9e\xab"
4289 "\xb8\xc5\xd2\xdf\xec\xf9\x06\x13"
4290 "\x20\x2d\x3a\x47\x54\x61\x6e\x7b"
4291 "\x88\x95\xa2\xaf\xbc\xc9\xd6\xe3"
4292 "\xf0\xfd\x0a\x17\x24\x31\x3e\x4b"
4293 "\x58\x65\x72\x7f\x8c\x99\xa6\xb3"
4294 "\xc0\xcd\xda\xe7\xf4\x01\x0e\x1b"
4295 "\x28\x35\x42\x4f\x5c\x69\x76\x83"
4296 "\x90\x9d\xaa\xb7\xc4\xd1\xde\xeb"
4297 "\xf8\x05\x12\x1f\x2c\x39\x46\x53"
4298 "\x60\x6d\x7a\x87\x94\xa1\xae\xbb"
4299 "\xc8\xd5\xe2\xef\xfc\x09\x16\x23"
4300 "\x30\x3d\x4a\x57\x64\x71\x7e\x8b"
4301 "\x98\xa5\xb2\xbf\xcc\xd9\xe6\xf3"
4302 "\x00\x0f\x1e\x2d\x3c\x4b\x5a\x69"
4303 "\x78\x87\x96\xa5\xb4\xc3\xd2\xe1"
4304 "\xf0\xff\x0e\x1d\x2c\x3b\x4a\x59"
4305 "\x68\x77\x86\x95\xa4\xb3\xc2\xd1"
4306 "\xe0\xef\xfe\x0d\x1c\x2b\x3a\x49"
4307 "\x58\x67\x76\x85\x94\xa3\xb2\xc1"
4308 "\xd0\xdf\xee\xfd\x0c\x1b\x2a\x39"
4309 "\x48\x57\x66\x75\x84\x93\xa2\xb1"
4310 "\xc0\xcf\xde\xed\xfc\x0b\x1a\x29"
4311 "\x38\x47\x56\x65\x74\x83\x92\xa1"
4312 "\xb0\xbf\xce\xdd\xec\xfb\x0a\x19"
4313 "\x28\x37\x46\x55\x64\x73\x82\x91"
4314 "\xa0\xaf\xbe\xcd\xdc\xeb\xfa\x09"
4315 "\x18\x27\x36\x45\x54\x63\x72\x81"
4316 "\x90\x9f\xae\xbd\xcc\xdb\xea\xf9"
4317 "\x08\x17\x26\x35\x44\x53\x62\x71"
4318 "\x80\x8f\x9e\xad\xbc\xcb\xda\xe9"
4319 "\xf8\x07\x16\x25\x34\x43\x52\x61"
4320 "\x70\x7f\x8e\x9d\xac\xbb\xca\xd9"
4321 "\xe8\xf7\x06\x15\x24\x33\x42\x51"
4322 "\x60\x6f\x7e\x8d\x9c\xab\xba\xc9"
4323 "\xd8\xe7\xf6\x05\x14\x23\x32\x41"
4324 "\x50\x5f\x6e\x7d\x8c\x9b\xaa\xb9"
4325 "\xc8\xd7\xe6\xf5\x04\x13\x22\x31"
4326 "\x40\x4f\x5e\x6d\x7c\x8b\x9a\xa9"
4327 "\xb8\xc7\xd6\xe5\xf4\x03\x12\x21"
4328 "\x30\x3f\x4e\x5d\x6c\x7b\x8a\x99"
4329 "\xa8\xb7\xc6\xd5\xe4\xf3\x02\x11"
4330 "\x20\x2f\x3e\x4d\x5c\x6b\x7a\x89"
4331 "\x98\xa7\xb6\xc5\xd4\xe3\xf2\x01"
4332 "\x10\x1f\x2e\x3d\x4c\x5b\x6a\x79"
4333 "\x88\x97\xa6\xb5\xc4\xd3\xe2\xf1"
4334 "\x00\x11\x22\x33\x44\x55\x66\x77"
4335 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff"
4336 "\x10\x21\x32\x43\x54\x65\x76\x87"
4337 "\x98\xa9\xba\xcb\xdc\xed\xfe\x0f"
4338 "\x20\x31\x42\x53\x64\x75\x86\x97"
4339 "\xa8\xb9\xca\xdb\xec\xfd\x0e\x1f"
4340 "\x30\x41\x52\x63\x74\x85\x96\xa7"
4341 "\xb8\xc9\xda\xeb\xfc\x0d\x1e\x2f"
4342 "\x40\x51\x62\x73\x84\x95\xa6\xb7"
4343 "\xc8\xd9\xea\xfb\x0c\x1d\x2e\x3f"
4344 "\x50\x61\x72\x83\x94\xa5\xb6\xc7"
4345 "\xd8\xe9\xfa\x0b\x1c\x2d\x3e\x4f"
4346 "\x60\x71\x82\x93\xa4\xb5\xc6\xd7"
4347 "\xe8\xf9\x0a\x1b\x2c\x3d\x4e\x5f"
4348 "\x70\x81\x92\xa3\xb4\xc5\xd6\xe7"
4349 "\xf8\x09\x1a\x2b\x3c\x4d\x5e\x6f"
4350 "\x80\x91\xa2\xb3\xc4\xd5\xe6\xf7"
4351 "\x08\x19\x2a\x3b\x4c\x5d\x6e\x7f"
4352 "\x90\xa1\xb2\xc3\xd4\xe5\xf6\x07"
4353 "\x18\x29\x3a\x4b\x5c\x6d\x7e\x8f"
4354 "\xa0\xb1\xc2\xd3\xe4\xf5\x06\x17"
4355 "\x28\x39\x4a\x5b\x6c\x7d\x8e\x9f"
4356 "\xb0\xc1\xd2\xe3\xf4\x05\x16\x27"
4357 "\x38\x49\x5a\x6b\x7c\x8d\x9e\xaf"
4358 "\xc0\xd1\xe2\xf3\x04\x15\x26\x37"
4359 "\x48\x59\x6a\x7b\x8c\x9d\xae\xbf"
4360 "\xd0\xe1\xf2\x03\x14\x25\x36\x47"
4361 "\x58\x69\x7a\x8b\x9c\xad\xbe\xcf"
4362 "\xe0\xf1\x02\x13\x24\x35\x46\x57"
4363 "\x68\x79\x8a\x9b\xac\xbd\xce\xdf"
4364 "\xf0\x01\x12\x23\x34\x45\x56\x67"
4365 "\x78\x89\x9a\xab\xbc\xcd\xde\xef"
4366 "\x00\x13\x26\x39\x4c\x5f\x72\x85"
4367 "\x98\xab\xbe\xd1\xe4\xf7\x0a\x1d"
4368 "\x30\x43\x56\x69\x7c\x8f\xa2\xb5"
4369 "\xc8\xdb\xee\x01\x14\x27\x3a\x4d"
4370 "\x60\x73\x86\x99\xac\xbf\xd2\xe5"
4371 "\xf8\x0b\x1e\x31\x44\x57\x6a\x7d"
4372 "\x90\xa3\xb6\xc9\xdc\xef\x02\x15"
4373 "\x28\x3b\x4e\x61\x74\x87\x9a\xad"
4374 "\xc0\xd3\xe6\xf9\x0c\x1f\x32\x45"
4375 "\x58\x6b\x7e\x91\xa4\xb7\xca\xdd"
4376 "\xf0\x03\x16\x29\x3c\x4f\x62\x75"
4377 "\x88\x9b\xae\xc1\xd4\xe7\xfa\x0d"
4378 "\x20\x33\x46\x59\x6c\x7f\x92\xa5"
4379 "\xb8\xcb\xde\xf1\x04\x17\x2a\x3d"
4380 "\x50\x63\x76\x89\x9c\xaf\xc2\xd5"
4381 "\xe8\xfb\x0e\x21\x34\x47\x5a\x6d"
4382 "\x80\x93\xa6\xb9\xcc\xdf\xf2\x05"
4383 "\x18\x2b\x3e\x51\x64\x77\x8a\x9d"
4384 "\xb0\xc3\xd6\xe9\xfc\x0f\x22\x35"
4385 "\x48\x5b\x6e\x81\x94\xa7\xba\xcd"
4386 "\xe0\xf3\x06\x19\x2c\x3f\x52\x65"
4387 "\x78\x8b\x9e\xb1\xc4\xd7\xea\xfd"
4388 "\x10\x23\x36\x49\x5c\x6f\x82\x95"
4389 "\xa8\xbb\xce\xe1\xf4\x07\x1a\x2d"
4390 "\x40\x53\x66\x79\x8c\x9f\xb2\xc5"
4391 "\xd8\xeb\xfe\x11\x24\x37\x4a\x5d"
4392 "\x70\x83\x96\xa9\xbc\xcf\xe2\xf5"
4393 "\x08\x1b\x2e\x41\x54\x67\x7a\x8d"
4394 "\xa0\xb3\xc6\xd9\xec\xff\x12\x25"
4395 "\x38\x4b\x5e\x71\x84\x97\xaa\xbd"
4396 "\xd0\xe3\xf6\x09\x1c\x2f\x42\x55"
4397 "\x68\x7b\x8e\xa1\xb4\xc7\xda\xed"
4398 "\x00\x15\x2a\x3f\x54\x69\x7e\x93"
4399 "\xa8\xbd\xd2\xe7\xfc\x11\x26\x3b"
4400 "\x50\x65\x7a\x8f\xa4\xb9\xce\xe3"
4401 "\xf8\x0d\x22\x37\x4c\x61\x76\x8b"
4402 "\xa0\xb5\xca\xdf\xf4\x09\x1e\x33"
4403 "\x48\x5d\x72\x87\x9c\xb1\xc6\xdb"
4404 "\xf0\x05\x1a\x2f\x44\x59\x6e\x83"
4405 "\x98\xad\xc2\xd7\xec\x01\x16\x2b"
4406 "\x40\x55\x6a\x7f\x94\xa9\xbe\xd3"
4407 "\xe8\xfd\x12\x27\x3c\x51\x66\x7b"
4408 "\x90\xa5\xba\xcf\xe4\xf9\x0e\x23"
4409 "\x38\x4d\x62\x77\x8c\xa1\xb6\xcb"
4410 "\xe0\xf5\x0a\x1f\x34\x49\x5e\x73"
4411 "\x88\x9d\xb2\xc7\xdc\xf1\x06\x1b"
4412 "\x30\x45\x5a\x6f\x84\x99\xae\xc3"
4413 "\xd8\xed\x02\x17\x2c\x41\x56\x6b"
4414 "\x80\x95\xaa\xbf\xd4\xe9\xfe\x13"
4415 "\x28\x3d\x52\x67\x7c\x91\xa6\xbb"
4416 "\xd0\xe5\xfa\x0f\x24\x39\x4e\x63"
4417 "\x78\x8d\xa2\xb7\xcc\xe1\xf6\x0b"
4418 "\x20\x35\x4a\x5f\x74\x89\x9e\xb3"
4419 "\xc8\xdd\xf2\x07\x1c\x31\x46\x5b"
4420 "\x70\x85\x9a\xaf\xc4\xd9\xee\x03"
4421 "\x18\x2d\x42\x57\x6c\x81\x96\xab"
4422 "\xc0\xd5\xea\xff\x14\x29\x3e\x53"
4423 "\x68\x7d\x92\xa7\xbc\xd1\xe6\xfb"
4424 "\x10\x25\x3a\x4f\x64\x79\x8e\xa3"
4425 "\xb8\xcd\xe2\xf7\x0c\x21\x36\x4b"
4426 "\x60\x75\x8a\x9f\xb4\xc9\xde\xf3"
4427 "\x08\x1d\x32\x47\x5c\x71\x86\x9b"
4428 "\xb0\xc5\xda\xef\x04\x19\x2e\x43"
4429 "\x58\x6d\x82\x97\xac\xc1\xd6\xeb"
4430 "\x00\x17\x2e\x45\x5c\x73\x8a\xa1"
4431 "\xb8\xcf\xe6\xfd\x14\x2b\x42\x59"
4432 "\x70\x87\x9e\xb5\xcc\xe3\xfa\x11"
4433 "\x28\x3f\x56\x6d\x84\x9b\xb2\xc9"
4434 "\xe0\xf7\x0e\x25\x3c\x53\x6a\x81"
4435 "\x98\xaf\xc6\xdd\xf4\x0b\x22\x39"
4436 "\x50\x67\x7e\x95\xac\xc3\xda\xf1"
4437 "\x08\x1f\x36\x4d\x64\x7b\x92\xa9"
4438 "\xc0\xd7\xee\x05\x1c\x33\x4a\x61"
4439 "\x78\x8f\xa6\xbd\xd4\xeb\x02\x19"
4440 "\x30\x47\x5e\x75\x8c\xa3\xba\xd1"
4441 "\xe8\xff\x16\x2d\x44\x5b\x72\x89"
4442 "\xa0\xb7\xce\xe5\xfc\x13\x2a\x41"
4443 "\x58\x6f\x86\x9d\xb4\xcb\xe2\xf9"
4444 "\x10\x27\x3e\x55\x6c\x83\x9a\xb1"
4445 "\xc8\xdf\xf6\x0d\x24\x3b\x52\x69"
4446 "\x80\x97\xae\xc5\xdc\xf3\x0a\x21"
4447 "\x38\x4f\x66\x7d\x94\xab\xc2\xd9"
4448 "\xf0\x07\x1e\x35\x4c\x63\x7a\x91"
4449 "\xa8\xbf\xd6\xed\x04\x1b\x32\x49"
4450 "\x60\x77\x8e\xa5\xbc\xd3\xea\x01"
4451 "\x18\x2f\x46\x5d\x74\x8b\xa2\xb9"
4452 "\xd0\xe7\xfe\x15\x2c\x43\x5a\x71"
4453 "\x88\x9f\xb6\xcd\xe4\xfb\x12\x29"
4454 "\x40\x57\x6e\x85\x9c\xb3\xca\xe1"
4455 "\xf8\x0f\x26\x3d\x54\x6b\x82\x99"
4456 "\xb0\xc7\xde\xf5\x0c\x23\x3a\x51"
4457 "\x68\x7f\x96\xad\xc4\xdb\xf2\x09"
4458 "\x20\x37\x4e\x65\x7c\x93\xaa\xc1"
4459 "\xd8\xef\x06\x1d\x34\x4b\x62\x79"
4460 "\x90\xa7\xbe\xd5\xec\x03\x1a\x31"
4461 "\x48\x5f\x76\x8d\xa4\xbb\xd2\xe9"
4462 "\x00\x19\x32\x4b\x64\x7d\x96\xaf"
4463 "\xc8\xe1\xfa\x13\x2c\x45\x5e\x77"
4464 "\x90\xa9\xc2\xdb\xf4\x0d\x26\x3f"
4465 "\x58\x71\x8a\xa3\xbc\xd5\xee\x07"
4466 "\x20\x39\x52\x6b\x84\x9d\xb6\xcf"
4467 "\xe8\x01\x1a\x33\x4c\x65\x7e\x97"
4468 "\xb0\xc9\xe2\xfb\x14\x2d\x46\x5f"
4469 "\x78\x91\xaa\xc3\xdc\xf5\x0e\x27"
4470 "\x40\x59\x72\x8b\xa4\xbd\xd6\xef"
4471 "\x08\x21\x3a\x53\x6c\x85\x9e\xb7"
4472 "\xd0\xe9\x02\x1b\x34\x4d\x66\x7f"
4473 "\x98\xb1\xca\xe3\xfc\x15\x2e\x47"
4474 "\x60\x79\x92\xab\xc4\xdd\xf6\x0f"
4475 "\x28\x41\x5a\x73\x8c\xa5\xbe\xd7"
4476 "\xf0\x09\x22\x3b\x54\x6d\x86\x9f"
4477 "\xb8\xd1\xea\x03\x1c\x35\x4e\x67"
4478 "\x80\x99\xb2\xcb\xe4\xfd\x16\x2f"
4479 "\x48\x61\x7a\x93\xac\xc5\xde\xf7"
4480 "\x10\x29\x42\x5b\x74\x8d\xa6\xbf"
4481 "\xd8\xf1\x0a\x23\x3c\x55\x6e\x87"
4482 "\xa0\xb9\xd2\xeb\x04\x1d\x36\x4f"
4483 "\x68\x81\x9a\xb3\xcc\xe5\xfe\x17"
4484 "\x30\x49\x62\x7b\x94\xad\xc6\xdf"
4485 "\xf8\x11\x2a\x43\x5c\x75\x8e\xa7"
4486 "\xc0\xd9\xf2\x0b\x24\x3d\x56\x6f"
4487 "\x88\xa1\xba\xd3\xec\x05\x1e\x37"
4488 "\x50\x69\x82\x9b\xb4\xcd\xe6\xff"
4489 "\x18\x31\x4a\x63\x7c\x95\xae\xc7"
4490 "\xe0\xf9\x12\x2b\x44\x5d\x76\x8f"
4491 "\xa8\xc1\xda\xf3\x0c\x25\x3e\x57"
4492 "\x70\x89\xa2\xbb\xd4\xed\x06\x1f"
4493 "\x38\x51\x6a\x83\x9c\xb5\xce\xe7"
4494 "\x00\x1b\x36\x51\x6c\x87\xa2\xbd"
4495 "\xd8\xf3\x0e\x29\x44\x5f\x7a\x95"
4496 "\xb0\xcb\xe6\x01\x1c\x37\x52\x6d"
4497 "\x88\xa3\xbe\xd9\xf4\x0f\x2a\x45"
4498 "\x60\x7b\x96\xb1\xcc\xe7\x02\x1d"
4499 "\x38\x53\x6e\x89\xa4\xbf\xda\xf5"
4500 "\x10\x2b\x46\x61\x7c\x97\xb2\xcd"
4501 "\xe8\x03\x1e\x39\x54\x6f\x8a\xa5"
4502 "\xc0\xdb\xf6\x11\x2c\x47\x62\x7d"
4503 "\x98\xb3\xce\xe9\x04\x1f\x3a\x55"
4504 "\x70\x8b\xa6\xc1\xdc\xf7\x12\x2d"
4505 "\x48\x63\x7e\x99\xb4\xcf\xea\x05"
4506 "\x20\x3b\x56\x71\x8c\xa7\xc2\xdd"
4507 "\xf8\x13\x2e\x49\x64\x7f\x9a\xb5"
4508 "\xd0\xeb\x06\x21\x3c\x57\x72\x8d"
4509 "\xa8\xc3\xde\xf9\x14\x2f\x4a\x65"
4510 "\x80\x9b\xb6\xd1\xec\x07\x22\x3d"
4511 "\x58\x73\x8e\xa9\xc4\xdf\xfa\x15"
4512 "\x30\x4b\x66\x81\x9c\xb7\xd2\xed"
4513 "\x08\x23\x3e\x59\x74\x8f\xaa\xc5"
4514 "\xe0\xfb\x16\x31\x4c\x67\x82\x9d"
4515 "\xb8\xd3\xee\x09\x24\x3f\x5a\x75"
4516 "\x90\xab\xc6\xe1\xfc\x17\x32\x4d"
4517 "\x68\x83\x9e\xb9\xd4\xef\x0a\x25"
4518 "\x40\x5b\x76\x91\xac\xc7\xe2\xfd"
4519 "\x18\x33\x4e\x69\x84\x9f\xba\xd5"
4520 "\xf0\x0b\x26\x41\x5c\x77\x92\xad"
4521 "\xc8\xe3\xfe\x19\x34\x4f\x6a\x85"
4522 "\xa0\xbb\xd6\xf1\x0c\x27\x42\x5d"
4523 "\x78\x93\xae\xc9\xe4\xff\x1a\x35"
4524 "\x50\x6b\x86\xa1\xbc\xd7\xf2\x0d"
4525 "\x28\x43\x5e\x79\x94\xaf\xca\xe5"
4526 "\x00\x1d\x3a\x57\x74\x91\xae\xcb"
4527 "\xe8\x05\x22\x3f\x5c\x79\x96\xb3"
4528 "\xd0\xed\x0a\x27\x44\x61\x7e\x9b"
4529 "\xb8\xd5\xf2\x0f\x2c\x49\x66\x83"
4530 "\xa0\xbd\xda\xf7\x14\x31\x4e\x6b"
4531 "\x88\xa5\xc2\xdf\xfc\x19\x36\x53"
4532 "\x70\x8d\xaa\xc7\xe4\x01\x1e\x3b"
4533 "\x58\x75\x92\xaf\xcc\xe9\x06\x23"
4534 "\x40\x5d\x7a\x97\xb4\xd1\xee\x0b"
4535 "\x28\x45\x62\x7f\x9c\xb9\xd6\xf3"
4536 "\x10\x2d\x4a\x67\x84\xa1\xbe\xdb"
4537 "\xf8\x15\x32\x4f\x6c\x89\xa6\xc3"
4538 "\xe0\xfd\x1a\x37\x54\x71\x8e\xab"
4539 "\xc8\xe5\x02\x1f\x3c\x59\x76\x93"
4540 "\xb0\xcd\xea\x07\x24\x41\x5e\x7b"
4541 "\x98\xb5\xd2\xef\x0c\x29\x46\x63"
4542 "\x80\x9d\xba\xd7\xf4\x11\x2e\x4b"
4543 "\x68\x85\xa2\xbf\xdc\xf9\x16\x33"
4544 "\x50\x6d\x8a\xa7\xc4\xe1\xfe\x1b"
4545 "\x38\x55\x72\x8f\xac\xc9\xe6\x03"
4546 "\x20\x3d\x5a\x77\x94\xb1\xce\xeb"
4547 "\x08\x25\x42\x5f\x7c\x99\xb6\xd3"
4548 "\xf0\x0d\x2a\x47\x64\x81\x9e\xbb"
4549 "\xd8\xf5\x12\x2f\x4c\x69\x86\xa3"
4550 "\xc0\xdd\xfa\x17\x34\x51\x6e\x8b"
4551 "\xa8\xc5\xe2\xff\x1c\x39\x56\x73"
4552 "\x90\xad\xca\xe7\x04\x21\x3e\x5b"
4553 "\x78\x95\xb2\xcf\xec\x09\x26\x43"
4554 "\x60\x7d\x9a\xb7\xd4\xf1\x0e\x2b"
4555 "\x48\x65\x82\x9f\xbc\xd9\xf6\x13"
4556 "\x30\x4d\x6a\x87\xa4\xc1\xde\xfb"
4557 "\x18\x35\x52\x6f\x8c\xa9\xc6\xe3"
4558 "\x00\x1f\x3e\x5d\x7c\x9b\xba\xd9"
4559 "\xf8\x17\x36\x55\x74\x93\xb2\xd1"
4560 "\xf0\x0f\x2e\x4d\x6c\x8b\xaa\xc9"
4561 "\xe8\x07\x26\x45\x64\x83\xa2\xc1"
4562 "\xe0\xff\x1e\x3d\x5c\x7b\x9a\xb9"
4563 "\xd8\xf7\x16\x35\x54\x73\x92\xb1"
4564 "\xd0\xef\x0e\x2d\x4c\x6b\x8a\xa9"
4565 "\xc8\xe7\x06\x25\x44\x63\x82\xa1"
4566 "\xc0\xdf\xfe\x1d\x3c\x5b\x7a\x99"
4567 "\xb8\xd7\xf6\x15\x34\x53\x72\x91"
4568 "\xb0\xcf\xee\x0d\x2c\x4b\x6a\x89"
4569 "\xa8\xc7\xe6\x05\x24\x43\x62\x81"
4570 "\xa0\xbf\xde\xfd\x1c\x3b\x5a\x79"
4571 "\x98\xb7\xd6\xf5\x14\x33\x52\x71"
4572 "\x90\xaf\xce\xed\x0c\x2b\x4a\x69"
4573 "\x88\xa7\xc6\xe5\x04\x23\x42\x61"
4574 "\x80\x9f\xbe\xdd\xfc\x1b\x3a\x59"
4575 "\x78\x97\xb6\xd5\xf4\x13\x32\x51"
4576 "\x70\x8f\xae\xcd\xec\x0b\x2a\x49"
4577 "\x68\x87\xa6\xc5\xe4\x03\x22\x41"
4578 "\x60\x7f\x9e\xbd\xdc\xfb\x1a\x39"
4579 "\x58\x77\x96\xb5\xd4\xf3\x12\x31"
4580 "\x50\x6f\x8e\xad\xcc\xeb\x0a\x29"
4581 "\x48\x67\x86\xa5\xc4\xe3\x02\x21"
4582 "\x40\x5f\x7e\x9d\xbc\xdb\xfa\x19"
4583 "\x38\x57\x76\x95\xb4\xd3\xf2\x11"
4584 "\x30\x4f\x6e\x8d\xac\xcb\xea\x09"
4585 "\x28\x47\x66\x85\xa4\xc3\xe2\x01"
4586 "\x20\x3f\x5e\x7d\x9c\xbb\xda\xf9"
4587 "\x18\x37\x56\x75\x94\xb3\xd2\xf1"
4588 "\x10\x2f\x4e\x6d\x8c\xab\xca\xe9"
4589 "\x08\x27\x46\x65\x84\xa3\xc2\xe1"
4590 "\x00\x21\x42\x63",
4591 .ilen = 4100,
4592 .result =
4593 "\xf0\x5c\x74\xad\x4e\xbc\x99\xe2"
4594 "\xae\xff\x91\x3a\x44\xcf\x38\x32"
4595 "\x1e\xad\xa7\xcd\xa1\x39\x95\xaa"
4596 "\x10\xb1\xb3\x2e\x04\x31\x8f\x86"
4597 "\xf2\x62\x74\x70\x0c\xa4\x46\x08"
4598 "\xa8\xb7\x99\xa8\xe9\xd2\x73\x79"
4599 "\x7e\x6e\xd4\x8f\x1e\xc7\x8e\x31"
4600 "\x0b\xfa\x4b\xce\xfd\xf3\x57\x71"
4601 "\xe9\x46\x03\xa5\x3d\x34\x00\xe2"
4602 "\x18\xff\x75\x6d\x06\x2d\x00\xab"
4603 "\xb9\x3e\x6c\x59\xc5\x84\x06\xb5"
4604 "\x8b\xd0\x89\x9c\x4a\x79\x16\xc6"
4605 "\x3d\x74\x54\xfa\x44\xcd\x23\x26"
4606 "\x5c\xcf\x7e\x28\x92\x32\xbf\xdf"
4607 "\xa7\x20\x3c\x74\x58\x2a\x9a\xde"
4608 "\x61\x00\x1c\x4f\xff\x59\xc4\x22"
4609 "\xac\x3c\xd0\xe8\x6c\xf9\x97\x1b"
4610 "\x58\x9b\xad\x71\xe8\xa9\xb5\x0d"
4611 "\xee\x2f\x04\x1f\x7f\xbc\x99\xee"
4612 "\x84\xff\x42\x60\xdc\x3a\x18\xa5"
4613 "\x81\xf9\xef\xdc\x7a\x0f\x65\x41"
4614 "\x2f\xa3\xd3\xf9\xc2\xcb\xc0\x4d"
4615 "\x8f\xd3\x76\x96\xad\x49\x6d\x38"
4616 "\x3d\x39\x0b\x6c\x80\xb7\x54\x69"
4617 "\xf0\x2c\x90\x02\x29\x0d\x1c\x12"
4618 "\xad\x55\xc3\x8b\x68\xd9\xcc\xb3"
4619 "\xb2\x64\x33\x90\x5e\xca\x4b\xe2"
4620 "\xfb\x75\xdc\x63\xf7\x9f\x82\x74"
4621 "\xf0\xc9\xaa\x7f\xe9\x2a\x9b\x33"
4622 "\xbc\x88\x00\x7f\xca\xb2\x1f\x14"
4623 "\xdb\xc5\x8e\x7b\x11\x3c\x3e\x08"
4624 "\xf3\x83\xe8\xe0\x94\x86\x2e\x92"
4625 "\x78\x6b\x01\xc9\xc7\x83\xba\x21"
4626 "\x6a\x25\x15\x33\x4e\x45\x08\xec"
4627 "\x35\xdb\xe0\x6e\x31\x51\x79\xa9"
4628 "\x42\x44\x65\xc1\xa0\xf1\xf9\x2a"
4629 "\x70\xd5\xb6\xc6\xc1\x8c\x39\xfc"
4630 "\x25\xa6\x55\xd9\xdd\x2d\x4c\xec"
4631 "\x49\xc6\xeb\x0e\xa8\x25\x2a\x16"
4632 "\x1b\x66\x84\xda\xe2\x92\xe5\xc0"
4633 "\xc8\x53\x07\xaf\x80\x84\xec\xfd"
4634 "\xcd\xd1\x6e\xcd\x6f\x6a\xf5\x36"
4635 "\xc5\x15\xe5\x25\x7d\x77\xd1\x1a"
4636 "\x93\x36\xa9\xcf\x7c\xa4\x54\x4a"
4637 "\x06\x51\x48\x4e\xf6\x59\x87\xd2"
4638 "\x04\x02\xef\xd3\x44\xde\x76\x31"
4639 "\xb3\x34\x17\x1b\x9d\x66\x11\x9f"
4640 "\x1e\xcc\x17\xe9\xc7\x3c\x1b\xe7"
4641 "\xcb\x50\x08\xfc\xdc\x2b\x24\xdb"
4642 "\x65\x83\xd0\x3b\xe3\x30\xea\x94"
4643 "\x6c\xe7\xe8\x35\x32\xc7\xdb\x64"
4644 "\xb4\x01\xab\x36\x2c\x77\x13\xaf"
4645 "\xf8\x2b\x88\x3f\x54\x39\xc4\x44"
4646 "\xfe\xef\x6f\x68\x34\xbe\x0f\x05"
4647 "\x16\x6d\xf6\x0a\x30\xe7\xe3\xed"
4648 "\xc4\xde\x3c\x1b\x13\xd8\xdb\xfe"
4649 "\x41\x62\xe5\x28\xd4\x8d\xa3\xc7"
4650 "\x93\x97\xc6\x48\x45\x1d\x9f\x83"
4651 "\xdf\x4b\x40\x3e\x42\x25\x87\x80"
4652 "\x4c\x7d\xa8\xd4\x98\x23\x95\x75"
4653 "\x41\x8c\xda\x41\x9b\xd4\xa7\x06"
4654 "\xb5\xf1\x71\x09\x53\xbe\xca\xbf"
4655 "\x32\x03\xed\xf0\x50\x1c\x56\x39"
4656 "\x5b\xa4\x75\x18\xf7\x9b\x58\xef"
4657 "\x53\xfc\x2a\x38\x23\x15\x75\xcd"
4658 "\x45\xe5\x5a\x82\x55\xba\x21\xfa"
4659 "\xd4\xbd\xc6\x94\x7c\xc5\x80\x12"
4660 "\xf7\x4b\x32\xc4\x9a\x82\xd8\x28"
4661 "\x8f\xd9\xc2\x0f\x60\x03\xbe\x5e"
4662 "\x21\xd6\x5f\x58\xbf\x5c\xb1\x32"
4663 "\x82\x8d\xa9\xe5\xf2\x66\x1a\xc0"
4664 "\xa0\xbc\x58\x2f\x71\xf5\x2f\xed"
4665 "\xd1\x26\xb9\xd8\x49\x5a\x07\x19"
4666 "\x01\x7c\x59\xb0\xf8\xa4\xb7\xd3"
4667 "\x7b\x1a\x8c\x38\xf4\x50\xa4\x59"
4668 "\xb0\xcc\x41\x0b\x88\x7f\xe5\x31"
4669 "\xb3\x42\xba\xa2\x7e\xd4\x32\x71"
4670 "\x45\x87\x48\xa9\xc2\xf2\x89\xb3"
4671 "\xe4\xa7\x7e\x52\x15\x61\xfa\xfe"
4672 "\xc9\xdd\x81\xeb\x13\xab\xab\xc3"
4673 "\x98\x59\xd8\x16\x3d\x14\x7a\x1c"
4674 "\x3c\x41\x9a\x16\x16\x9b\xd2\xd2"
4675 "\x69\x3a\x29\x23\xac\x86\x32\xa5"
4676 "\x48\x9c\x9e\xf3\x47\x77\x81\x70"
4677 "\x24\xe8\x85\xd2\xf5\xb5\xfa\xff"
4678 "\x59\x6a\xd3\x50\x59\x43\x59\xde"
4679 "\xd9\xf1\x55\xa5\x0c\xc3\x1a\x1a"
4680 "\x18\x34\x0d\x1a\x63\x33\xed\x10"
4681 "\xe0\x1d\x2a\x18\xd2\xc0\x54\xa8"
4682 "\xca\xb5\x9a\xd3\xdd\xca\x45\x84"
4683 "\x50\xe7\x0f\xfe\xa4\x99\x5a\xbe"
4684 "\x43\x2d\x9a\xcb\x92\x3f\x5a\x1d"
4685 "\x85\xd8\xc9\xdf\x68\xc9\x12\x80"
4686 "\x56\x0c\xdc\x00\xdc\x3a\x7d\x9d"
4687 "\xa3\xa2\xe8\x4d\xbf\xf9\x70\xa0"
4688 "\xa4\x13\x4f\x6b\xaf\x0a\x89\x7f"
4689 "\xda\xf0\xbf\x9b\xc8\x1d\xe5\xf8"
4690 "\x2e\x8b\x07\xb5\x73\x1b\xcc\xa2"
4691 "\xa6\xad\x30\xbc\x78\x3c\x5b\x10"
4692 "\xfa\x5e\x62\x2d\x9e\x64\xb3\x33"
4693 "\xce\xf9\x1f\x86\xe7\x8b\xa2\xb8"
4694 "\xe8\x99\x57\x8c\x11\xed\x66\xd9"
4695 "\x3c\x72\xb9\xc3\xe6\x4e\x17\x3a"
4696 "\x6a\xcb\x42\x24\x06\xed\x3e\x4e"
4697 "\xa3\xe8\x6a\x94\xda\x0d\x4e\xd5"
4698 "\x14\x19\xcf\xb6\x26\xd8\x2e\xcc"
4699 "\x64\x76\x38\x49\x4d\xfe\x30\x6d"
4700 "\xe4\xc8\x8c\x7b\xc4\xe0\x35\xba"
4701 "\x22\x6e\x76\xe1\x1a\xf2\x53\xc3"
4702 "\x28\xa2\x82\x1f\x61\x69\xad\xc1"
4703 "\x7b\x28\x4b\x1e\x6c\x85\x95\x9b"
4704 "\x51\xb5\x17\x7f\x12\x69\x8c\x24"
4705 "\xd5\xc7\x5a\x5a\x11\x54\xff\x5a"
4706 "\xf7\x16\xc3\x91\xa6\xf0\xdc\x0a"
4707 "\xb6\xa7\x4a\x0d\x7a\x58\xfe\xa5"
4708 "\xf5\xcb\x8f\x7b\x0e\xea\x57\xe7"
4709 "\xbd\x79\xd6\x1c\x88\x23\x6c\xf2"
4710 "\x4d\x29\x77\x53\x35\x6a\x00\x8d"
4711 "\xcd\xa3\x58\xbe\x77\x99\x18\xf8"
4712 "\xe6\xe1\x8f\xe9\x37\x8f\xe3\xe2"
4713 "\x5a\x8a\x93\x25\xaf\xf3\x78\x80"
4714 "\xbe\xa6\x1b\xc6\xac\x8b\x1c\x91"
4715 "\x58\xe1\x9f\x89\x35\x9d\x1d\x21"
4716 "\x29\x9f\xf4\x99\x02\x27\x0f\xa8"
4717 "\x4f\x79\x94\x2b\x33\x2c\xda\xa2"
4718 "\x26\x39\x83\x94\xef\x27\xd8\x53"
4719 "\x8f\x66\x0d\xe4\x41\x7d\x34\xcd"
4720 "\x43\x7c\x95\x0a\x53\xef\x66\xda"
4721 "\x7e\x9b\xf3\x93\xaf\xd0\x73\x71"
4722 "\xba\x40\x9b\x74\xf8\xd7\xd7\x41"
4723 "\x6d\xaf\x72\x9c\x8d\x21\x87\x3c"
4724 "\xfd\x0a\x90\xa9\x47\x96\x9e\xd3"
4725 "\x88\xee\x73\xcf\x66\x2f\x52\x56"
4726 "\x6d\xa9\x80\x4c\xe2\x6f\x62\x88"
4727 "\x3f\x0e\x54\x17\x48\x80\x5d\xd3"
4728 "\xc3\xda\x25\x3d\xa1\xc8\xcb\x9f"
4729 "\x9b\x70\xb3\xa1\xeb\x04\x52\xa1"
4730 "\xf2\x22\x0f\xfc\xc8\x18\xfa\xf9"
4731 "\x85\x9c\xf1\xac\xeb\x0c\x02\x46"
4732 "\x75\xd2\xf5\x2c\xe3\xd2\x59\x94"
4733 "\x12\xf3\x3c\xfc\xd7\x92\xfa\x36"
4734 "\xba\x61\x34\x38\x7c\xda\x48\x3e"
4735 "\x08\xc9\x39\x23\x5e\x02\x2c\x1a"
4736 "\x18\x7e\xb4\xd9\xfd\x9e\x40\x02"
4737 "\xb1\x33\x37\x32\xe7\xde\xd6\xd0"
4738 "\x7c\x58\x65\x4b\xf8\x34\x27\x9c"
4739 "\x44\xb4\xbd\xe9\xe9\x4c\x78\x7d"
4740 "\x4b\x9f\xce\xb1\xcd\x47\xa5\x37"
4741 "\xe5\x6d\xbd\xb9\x43\x94\x0a\xd4"
4742 "\xd6\xf9\x04\x5f\xb5\x66\x6c\x1a"
4743 "\x35\x12\xe3\x36\x28\x27\x36\x58"
4744 "\x01\x2b\x79\xe4\xba\x6d\x10\x7d"
4745 "\x65\xdf\x84\x95\xf4\xd5\xb6\x8f"
4746 "\x2b\x9f\x96\x00\x86\x60\xf0\x21"
4747 "\x76\xa8\x6a\x8c\x28\x1c\xb3\x6b"
4748 "\x97\xd7\xb6\x53\x2a\xcc\xab\x40"
4749 "\x9d\x62\x79\x58\x52\xe6\x65\xb7"
4750 "\xab\x55\x67\x9c\x89\x7c\x03\xb0"
4751 "\x73\x59\xc5\x81\xf5\x18\x17\x5c"
4752 "\x89\xf3\x78\x35\x44\x62\x78\x72"
4753 "\xd0\x96\xeb\x31\xe7\x87\x77\x14"
4754 "\x99\x51\xf2\x59\x26\x9e\xb5\xa6"
4755 "\x45\xfe\x6e\xbd\x07\x4c\x94\x5a"
4756 "\xa5\x7d\xfc\xf1\x2b\x77\xe2\xfe"
4757 "\x17\xd4\x84\xa0\xac\xb5\xc7\xda"
4758 "\xa9\x1a\xb6\xf3\x74\x11\xb4\x9d"
4759 "\xfb\x79\x2e\x04\x2d\x50\x28\x83"
4760 "\xbf\xc6\x52\xd3\x34\xd6\xe8\x7a"
4761 "\xb6\xea\xe7\xa8\x6c\x15\x1e\x2c"
4762 "\x57\xbc\x48\x4e\x5f\x5c\xb6\x92"
4763 "\xd2\x49\x77\x81\x6d\x90\x70\xae"
4764 "\x98\xa1\x03\x0d\x6b\xb9\x77\x14"
4765 "\xf1\x4e\x23\xd3\xf8\x68\xbd\xc2"
4766 "\xfe\x04\xb7\x5c\xc5\x17\x60\x8f"
4767 "\x65\x54\xa4\x7a\x42\xdc\x18\x0d"
4768 "\xb5\xcf\x0f\xd3\xc7\x91\x66\x1b"
4769 "\x45\x42\x27\x75\x50\xe5\xee\xb8"
4770 "\x7f\x33\x2c\xba\x4a\x92\x4d\x2c"
4771 "\x3c\xe3\x0d\x80\x01\xba\x0d\x29"
4772 "\xd8\x3c\xe9\x13\x16\x57\xe6\xea"
4773 "\x94\x52\xe7\x00\x4d\x30\xb0\x0f"
4774 "\x35\xb8\xb8\xa7\xb1\xb5\x3b\x44"
4775 "\xe1\x2f\xfd\x88\xed\x43\xe7\x52"
4776 "\x10\x93\xb3\x8a\x30\x6b\x0a\xf7"
4777 "\x23\xc6\x50\x9d\x4a\xb0\xde\xc3"
4778 "\xdc\x9b\x2f\x01\x56\x36\x09\xc5"
4779 "\x2f\x6b\xfe\xf1\xd8\x27\x45\x03"
4780 "\x30\x5e\x5c\x5b\xb4\x62\x0e\x1a"
4781 "\xa9\x21\x2b\x92\x94\x87\x62\x57"
4782 "\x4c\x10\x74\x1a\xf1\x0a\xc5\x84"
4783 "\x3b\x9e\x72\x02\xd7\xcc\x09\x56"
4784 "\xbd\x54\xc1\xf0\xc3\xe3\xb3\xf8"
4785 "\xd2\x0d\x61\xcb\xef\xce\x0d\x05"
4786 "\xb0\x98\xd9\x8e\x4f\xf9\xbc\x93"
4787 "\xa6\xea\xc8\xcf\x10\x53\x4b\xf1"
4788 "\xec\xfc\x89\xf9\x64\xb0\x22\xbf"
4789 "\x9e\x55\x46\x9f\x7c\x50\x8e\x84"
4790 "\x54\x20\x98\xd7\x6c\x40\x1e\xdb"
4791 "\x69\x34\x78\x61\x24\x21\x9c\x8a"
4792 "\xb3\x62\x31\x8b\x6e\xf5\x2a\x35"
4793 "\x86\x13\xb1\x6c\x64\x2e\x41\xa5"
4794 "\x05\xf2\x42\xba\xd2\x3a\x0d\x8e"
4795 "\x8a\x59\x94\x3c\xcf\x36\x27\x82"
4796 "\xc2\x45\xee\x58\xcd\x88\xb4\xec"
4797 "\xde\xb2\x96\x0a\xaf\x38\x6f\x88"
4798 "\xd7\xd8\xe1\xdf\xb9\x96\xa9\x0a"
4799 "\xb1\x95\x28\x86\x20\xe9\x17\x49"
4800 "\xa2\x29\x38\xaa\xa5\xe9\x6e\xf1"
4801 "\x19\x27\xc0\xd5\x2a\x22\xc3\x0b"
4802 "\xdb\x7c\x73\x10\xb9\xba\x89\x76"
4803 "\x54\xae\x7d\x71\xb3\x93\xf6\x32"
4804 "\xe6\x47\x43\x55\xac\xa0\x0d\xc2"
4805 "\x93\x27\x4a\x8e\x0e\x74\x15\xc7"
4806 "\x0b\x85\xd9\x0c\xa9\x30\x7a\x3e"
4807 "\xea\x8f\x85\x6d\x3a\x12\x4f\x72"
4808 "\x69\x58\x7a\x80\xbb\xb5\x97\xf3"
4809 "\xcf\x70\xd2\x5d\xdd\x4d\x21\x79"
4810 "\x54\x4d\xe4\x05\xe8\xbd\xc2\x62"
4811 "\xb1\x3b\x77\x1c\xd6\x5c\xf3\xa0"
4812 "\x79\x00\xa8\x6c\x29\xd9\x18\x24"
4813 "\x36\xa2\x46\xc0\x96\x65\x7f\xbd"
4814 "\x2a\xed\x36\x16\x0c\xaa\x9f\xf4"
4815 "\xc5\xb4\xe2\x12\xed\x69\xed\x4f"
4816 "\x26\x2c\x39\x52\x89\x98\xe7\x2c"
4817 "\x99\xa4\x9e\xa3\x9b\x99\x46\x7a"
4818 "\x3a\xdc\xa8\x59\xa3\xdb\xc3\x3b"
4819 "\x95\x0d\x3b\x09\x6e\xee\x83\x5d"
4820 "\x32\x4d\xed\xab\xfa\x98\x14\x4e"
4821 "\xc3\x15\x45\x53\x61\xc4\x93\xbd"
4822 "\x90\xf4\x99\x95\x4c\xe6\x76\x92"
4823 "\x29\x90\x46\x30\x92\x69\x7d\x13"
4824 "\xf2\xa5\xcd\x69\x49\x44\xb2\x0f"
4825 "\x63\x40\x36\x5f\x09\xe2\x78\xf8"
4826 "\x91\xe3\xe2\xfa\x10\xf7\xc8\x24"
4827 "\xa8\x89\x32\x5c\x37\x25\x1d\xb2"
4828 "\xea\x17\x8a\x0a\xa9\x64\xc3\x7c"
4829 "\x3c\x7c\xbd\xc6\x79\x34\xe7\xe2"
4830 "\x85\x8e\xbf\xf8\xde\x92\xa0\xae"
4831 "\x20\xc4\xf6\xbb\x1f\x38\x19\x0e"
4832 "\xe8\x79\x9c\xa1\x23\xe9\x54\x7e"
4833 "\x37\x2f\xe2\x94\x32\xaf\xa0\x23"
4834 "\x49\xe4\xc0\xb3\xac\x00\x8f\x36"
4835 "\x05\xc4\xa6\x96\xec\x05\x98\x4f"
4836 "\x96\x67\x57\x1f\x20\x86\x1b\x2d"
4837 "\x69\xe4\x29\x93\x66\x5f\xaf\x6b"
4838 "\x88\x26\x2c\x67\x02\x4b\x52\xd0"
4839 "\x83\x7a\x43\x1f\xc0\x71\x15\x25"
4840 "\x77\x65\x08\x60\x11\x76\x4c\x8d"
4841 "\xed\xa9\x27\xc6\xb1\x2a\x2c\x6a"
4842 "\x4a\x97\xf5\xc6\xb7\x70\x42\xd3"
4843 "\x03\xd1\x24\x95\xec\x6d\xab\x38"
4844 "\x72\xce\xe2\x8b\x33\xd7\x51\x09"
4845 "\xdc\x45\xe0\x09\x96\x32\xf3\xc4"
4846 "\x84\xdc\x73\x73\x2d\x1b\x11\x98"
4847 "\xc5\x0e\x69\x28\x94\xc7\xb5\x4d"
4848 "\xc8\x8a\xd0\xaa\x13\x2e\x18\x74"
4849 "\xdd\xd1\x1e\xf3\x90\xe8\xfc\x9a"
4850 "\x72\x4a\x0e\xd1\xe4\xfb\x0d\x96"
4851 "\xd1\x0c\x79\x85\x1b\x1c\xfe\xe1"
4852 "\x62\x8f\x7a\x73\x32\xab\xc8\x18"
4853 "\x69\xe3\x34\x30\xdf\x13\xa6\xe5"
4854 "\xe8\x0e\x67\x7f\x81\x11\xb4\x60"
4855 "\xc7\xbd\x79\x65\x50\xdc\xc4\x5b"
4856 "\xde\x39\xa4\x01\x72\x63\xf3\xd1"
4857 "\x64\x4e\xdf\xfc\x27\x92\x37\x0d"
4858 "\x57\xcd\x11\x4f\x11\x04\x8e\x1d"
4859 "\x16\xf7\xcd\x92\x9a\x99\x30\x14"
4860 "\xf1\x7c\x67\x1b\x1f\x41\x0b\xe8"
4861 "\x32\xe8\xb8\xc1\x4f\x54\x86\x4f"
4862 "\xe5\x79\x81\x73\xcd\x43\x59\x68"
4863 "\x73\x02\x3b\x78\x21\x72\x43\x00"
4864 "\x49\x17\xf7\x00\xaf\x68\x24\x53"
4865 "\x05\x0a\xc3\x33\xe0\x33\x3f\x69"
4866 "\xd2\x84\x2f\x0b\xed\xde\x04\xf4"
4867 "\x11\x94\x13\x69\x51\x09\x28\xde"
4868 "\x57\x5c\xef\xdc\x9a\x49\x1c\x17"
4869 "\x97\xf3\x96\xc1\x7f\x5d\x2e\x7d"
4870 "\x55\xb8\xb3\x02\x09\xb3\x1f\xe7"
4871 "\xc9\x8d\xa3\x36\x34\x8a\x77\x13"
4872 "\x30\x63\x4c\xa5\xcd\xc3\xe0\x7e"
4873 "\x05\xa1\x7b\x0c\xcb\x74\x47\x31"
4874 "\x62\x03\x43\xf1\x87\xb4\xb0\x85"
4875 "\x87\x8e\x4b\x25\xc7\xcf\xae\x4b"
4876 "\x36\x46\x3e\x62\xbc\x6f\xeb\x5f"
4877 "\x73\xac\xe6\x07\xee\xc1\xa1\xd6"
4878 "\xc4\xab\xc9\xd6\x89\x45\xe1\xf1"
4879 "\x04\x4e\x1a\x6f\xbb\x4f\x3a\xa3"
4880 "\xa0\xcb\xa3\x0a\xd8\x71\x35\x55"
4881 "\xe4\xbc\x2e\x04\x06\xe6\xff\x5b"
4882 "\x1c\xc0\x11\x7c\xc5\x17\xf3\x38"
4883 "\xcf\xe9\xba\x0f\x0e\xef\x02\xc2"
4884 "\x8d\xc6\xbc\x4b\x67\x20\x95\xd7"
4885 "\x2c\x45\x5b\x86\x44\x8c\x6f\x2e"
4886 "\x7e\x9f\x1c\x77\xba\x6b\x0e\xa3"
4887 "\x69\xdc\xab\x24\x57\x60\x47\xc1"
4888 "\xd1\xa5\x9d\x23\xe6\xb1\x37\xfe"
4889 "\x93\xd2\x4c\x46\xf9\x0c\xc6\xfb"
4890 "\xd6\x9d\x99\x69\xab\x7a\x07\x0c"
4891 "\x65\xe7\xc4\x08\x96\xe2\xa5\x01"
4892 "\x3f\x46\x07\x05\x7e\xe8\x9a\x90"
4893 "\x50\xdc\xe9\x7a\xea\xa1\x39\x6e"
4894 "\x66\xe4\x6f\xa5\x5f\xb2\xd9\x5b"
4895 "\xf5\xdb\x2a\x32\xf0\x11\x6f\x7c"
4896 "\x26\x10\x8f\x3d\x80\xe9\x58\xf7"
4897 "\xe0\xa8\x57\xf8\xdb\x0e\xce\x99"
4898 "\x63\x19\x3d\xd5\xec\x1b\x77\x69"
4899 "\x98\xf6\xe4\x5f\x67\x17\x4b\x09"
4900 "\x85\x62\x82\x70\x18\xe2\x9a\x78"
4901 "\xe2\x62\xbd\xb4\xf1\x42\xc6\xfb"
4902 "\x08\xd0\xbd\xeb\x4e\x09\xf2\xc8"
4903 "\x1e\xdc\x3d\x32\x21\x56\x9c\x4f"
4904 "\x35\xf3\x61\x06\x72\x84\xc4\x32"
4905 "\xf2\xf1\xfa\x0b\x2f\xc3\xdb\x02"
4906 "\x04\xc2\xde\x57\x64\x60\x8d\xcf"
4907 "\xcb\x86\x5d\x97\x3e\xb1\x9c\x01"
4908 "\xd6\x28\x8f\x99\xbc\x46\xeb\x05"
4909 "\xaf\x7e\xb8\x21\x2a\x56\x85\x1c"
4910 "\xb3\x71\xa0\xde\xca\x96\xf1\x78"
4911 "\x49\xa2\x99\x81\x80\x5c\x01\xf5"
4912 "\xa0\xa2\x56\x63\xe2\x70\x07\xa5"
4913 "\x95\xd6\x85\xeb\x36\x9e\xa9\x51"
4914 "\x66\x56\x5f\x1d\x02\x19\xe2\xf6"
4915 "\x4f\x73\x38\x09\x75\x64\x48\xe0"
4916 "\xf1\x7e\x0e\xe8\x9d\xf9\xed\x94"
4917 "\xfe\x16\x26\x62\x49\x74\xf4\xb0"
4918 "\xd4\xa9\x6c\xb0\xfd\x53\xe9\x81"
4919 "\xe0\x7a\xbf\xcf\xb5\xc4\x01\x81"
4920 "\x79\x99\x77\x01\x3b\xe9\xa2\xb6"
4921 "\xe6\x6a\x8a\x9e\x56\x1c\x8d\x1e"
4922 "\x8f\x06\x55\x2c\x6c\xdc\x92\x87"
4923 "\x64\x3b\x4b\x19\xa1\x13\x64\x1d"
4924 "\x4a\xe9\xc0\x00\xb8\x95\xef\x6b"
4925 "\x1a\x86\x6d\x37\x52\x02\xc2\xe0"
4926 "\xc8\xbb\x42\x0c\x02\x21\x4a\xc9"
4927 "\xef\xa0\x54\xe4\x5e\x16\x53\x81"
4928 "\x70\x62\x10\xaf\xde\xb8\xb5\xd3"
4929 "\xe8\x5e\x6c\xc3\x8a\x3e\x18\x07"
4930 "\xf2\x2f\x7d\xa7\xe1\x3d\x4e\xb4"
4931 "\x26\xa7\xa3\x93\x86\xb2\x04\x1e"
4932 "\x53\x5d\x86\xd6\xde\x65\xca\xe3"
4933 "\x4e\xc1\xcf\xef\xc8\x70\x1b\x83"
4934 "\x13\xdd\x18\x8b\x0d\x76\xd2\xf6"
4935 "\x37\x7a\x93\x7a\x50\x11\x9f\x96"
4936 "\x86\x25\xfd\xac\xdc\xbe\x18\x93"
4937 "\x19\x6b\xec\x58\x4f\xb9\x75\xa7"
4938 "\xdd\x3f\x2f\xec\xc8\x5a\x84\xab"
4939 "\xd5\xe4\x8a\x07\xf6\x4d\x23\xd6"
4940 "\x03\xfb\x03\x6a\xea\x66\xbf\xd4"
4941 "\xb1\x34\xfb\x78\xe9\x55\xdc\x7c"
4942 "\x3d\x9c\xe5\x9a\xac\xc3\x7a\x80"
4943 "\x24\x6d\xa0\xef\x25\x7c\xb7\xea"
4944 "\xce\x4d\x5f\x18\x60\xce\x87\x22"
4945 "\x66\x2f\xd5\xdd\xdd\x02\x21\x75"
4946 "\x82\xa0\x1f\x58\xc6\xd3\x62\xf7"
4947 "\x32\xd8\xaf\x1e\x07\x77\x51\x96"
4948 "\xd5\x6b\x1e\x7e\x80\x02\xe8\x67"
4949 "\xea\x17\x0b\x10\xd2\x3f\x28\x25"
4950 "\x4f\x05\x77\x02\x14\x69\xf0\x2c"
4951 "\xbe\x0c\xf1\x74\x30\xd1\xb9\x9b"
4952 "\xfc\x8c\xbb\x04\x16\xd9\xba\xc3"
4953 "\xbc\x91\x8a\xc4\x30\xa4\xb0\x12"
4954 "\x4c\x21\x87\xcb\xc9\x1d\x16\x96"
4955 "\x07\x6f\x23\x54\xb9\x6f\x79\xe5"
4956 "\x64\xc0\x64\xda\xb1\xae\xdd\x60"
4957 "\x6c\x1a\x9d\xd3\x04\x8e\x45\xb0"
4958 "\x92\x61\xd0\x48\x81\xed\x5e\x1d"
4959 "\xa0\xc9\xa4\x33\xc7\x13\x51\x5d"
4960 "\x7f\x83\x73\xb6\x70\x18\x65\x3e"
4961 "\x2f\x0e\x7a\x12\x39\x98\xab\xd8"
4962 "\x7e\x6f\xa3\xd1\xba\x56\xad\xbd"
4963 "\xf0\x03\x01\x1c\x85\x35\x9f\xeb"
4964 "\x19\x63\xa1\xaf\xfe\x2d\x35\x50"
4965 "\x39\xa0\x65\x7c\x95\x7e\x6b\xfe"
4966 "\xc1\xac\x07\x7c\x98\x4f\xbe\x57"
4967 "\xa7\x22\xec\xe2\x7e\x29\x09\x53"
4968 "\xe8\xbf\xb4\x7e\x3f\x8f\xfc\x14"
4969 "\xce\x54\xf9\x18\x58\xb5\xff\x44"
4970 "\x05\x9d\xce\x1b\xb6\x82\x23\xc8"
4971 "\x2e\xbc\x69\xbb\x4a\x29\x0f\x65"
4972 "\x94\xf0\x63\x06\x0e\xef\x8c\xbd"
4973 "\xff\xfd\xb0\x21\x6e\x57\x05\x75"
4974 "\xda\xd5\xc4\xeb\x8d\x32\xf7\x50"
4975 "\xd3\x6f\x22\xed\x5f\x8e\xa2\x5b"
4976 "\x80\x8c\xc8\x78\x40\x24\x4b\x89"
4977 "\x30\xce\x7a\x97\x0e\xc4\xaf\xef"
4978 "\x9b\xb4\xcd\x66\x74\x14\x04\x2b"
4979 "\xf7\xce\x0b\x1c\x6e\xc2\x78\x8c"
4980 "\xca\xc5\xd0\x1c\x95\x4a\x91\x2d"
4981 "\xa7\x20\xeb\x86\x52\xb7\x67\xd8"
4982 "\x0c\xd6\x04\x14\xde\x51\x74\x75"
4983 "\xe7\x11\xb4\x87\xa3\x3d\x2d\xad"
4984 "\x4f\xef\xa0\x0f\x70\x00\x6d\x13"
4985 "\x19\x1d\x41\x50\xe9\xd8\xf0\x32"
4986 "\x71\xbc\xd3\x11\xf2\xac\xbe\xaf"
4987 "\x75\x46\x65\x4e\x07\x34\x37\xa3"
4988 "\x89\xfe\x75\xd4\x70\x4c\xc6\x3f"
4989 "\x69\x24\x0e\x38\x67\x43\x8c\xde"
4990 "\x06\xb5\xb8\xe7\xc4\xf0\x41\x8f"
4991 "\xf0\xbd\x2f\x0b\xb9\x18\xf8\xde"
4992 "\x64\xb1\xdb\xee\x00\x50\x77\xe1"
4993 "\xc7\xff\xa6\xfa\xdd\x70\xf4\xe3"
4994 "\x93\xe9\x77\x35\x3d\x4b\x2f\x2b"
4995 "\x6d\x55\xf0\xfc\x88\x54\x4e\x89"
4996 "\xc1\x8a\x23\x31\x2d\x14\x2a\xb8"
4997 "\x1b\x15\xdd\x9e\x6e\x7b\xda\x05"
4998 "\x91\x7d\x62\x64\x96\x72\xde\xfc"
4999 "\xc1\xec\xf0\x23\x51\x6f\xdb\x5b"
5000 "\x1d\x08\x57\xce\x09\xb8\xf6\xcd"
5001 "\x8d\x95\xf2\x20\xbf\x0f\x20\x57"
5002 "\x98\x81\x84\x4f\x15\x5c\x76\xe7"
5003 "\x3e\x0a\x3a\x6c\xc4\x8a\xbe\x78"
5004 "\x74\x77\xc3\x09\x4b\x5d\x48\xe4"
5005 "\xc8\xcb\x0b\xea\x17\x28\xcf\xcf"
5006 "\x31\x32\x44\xa4\xe5\x0e\x1a\x98"
5007 "\x94\xc4\xf0\xff\xae\x3e\x44\xe8"
5008 "\xa5\xb3\xb5\x37\x2f\xe8\xaf\x6f"
5009 "\x28\xc1\x37\x5f\x31\xd2\xb9\x33"
5010 "\xb1\xb2\x52\x94\x75\x2c\x29\x59"
5011 "\x06\xc2\x25\xe8\x71\x65\x4e\xed"
5012 "\xc0\x9c\xb1\xbb\x25\xdc\x6c\xe7"
5013 "\x4b\xa5\x7a\x54\x7a\x60\xff\x7a"
5014 "\xe0\x50\x40\x96\x35\x63\xe4\x0b"
5015 "\x76\xbd\xa4\x65\x00\x1b\x57\x88"
5016 "\xae\xed\x39\x88\x42\x11\x3c\xed"
5017 "\x85\x67\x7d\xb9\x68\x82\xe9\x43"
5018 "\x3c\x47\x53\xfa\xe8\xf8\x9f\x1f"
5019 "\x9f\xef\x0f\xf7\x30\xd9\x30\x0e"
5020 "\xb9\x9f\x69\x18\x2f\x7e\xf8\xf8"
5021 "\xf8\x8c\x0f\xd4\x02\x4d\xea\xcd"
5022 "\x0a\x9c\x6f\x71\x6d\x5a\x4c\x60"
5023 "\xce\x20\x56\x32\xc6\xc5\x99\x1f"
5024 "\x09\xe6\x4e\x18\x1a\x15\x13\xa8"
5025 "\x7d\xb1\x6b\xc0\xb2\x6d\xf8\x26"
5026 "\x66\xf8\x3d\x18\x74\x70\x66\x7a"
5027 "\x34\x17\xde\xba\x47\xf1\x06\x18"
5028 "\xcb\xaf\xeb\x4a\x1e\x8f\xa7\x77"
5029 "\xe0\x3b\x78\x62\x66\xc9\x10\xea"
5030 "\x1f\xb7\x29\x0a\x45\xa1\x1d\x1e"
5031 "\x1d\xe2\x65\x61\x50\x9c\xd7\x05"
5032 "\xf2\x0b\x5b\x12\x61\x02\xc8\xe5"
5033 "\x63\x4f\x20\x0c\x07\x17\x33\x5e"
5034 "\x03\x9a\x53\x0f\x2e\x55\xfe\x50"
5035 "\x43\x7d\xd0\xb6\x7e\x5a\xda\xae"
5036 "\x58\xef\x15\xa9\x83\xd9\x46\xb1"
5037 "\x42\xaa\xf5\x02\x6c\xce\x92\x06"
5038 "\x1b\xdb\x66\x45\x91\x79\xc2\x2d"
5039 "\xe6\x53\xd3\x14\xfd\xbb\x44\x63"
5040 "\xc6\xd7\x3d\x7a\x0c\x75\x78\x9d"
5041 "\x5c\xa6\x39\xb3\xe5\x63\xca\x8b"
5042 "\xfe\xd3\xef\x60\x83\xf6\x8e\x70"
5043 "\xb6\x67\xc7\x77\xed\x23\xef\x4c"
5044 "\xf0\xed\x2d\x07\x59\x6f\xc1\x01"
5045 "\x34\x37\x08\xab\xd9\x1f\x09\xb1"
5046 "\xce\x5b\x17\xff\x74\xf8\x9c\xd5"
5047 "\x2c\x56\x39\x79\x0f\x69\x44\x75"
5048 "\x58\x27\x01\xc4\xbf\xa7\xa1\x1d"
5049 "\x90\x17\x77\x86\x5a\x3f\xd9\xd1"
5050 "\x0e\xa0\x10\xf8\xec\x1e\xa5\x7f"
5051 "\x5e\x36\xd1\xe3\x04\x2c\x70\xf7"
5052 "\x8e\xc0\x98\x2f\x6c\x94\x2b\x41"
5053 "\xb7\x60\x00\xb7\x2e\xb8\x02\x8d"
5054 "\xb8\xb0\xd3\x86\xba\x1d\xd7\x90"
5055 "\xd6\xb6\xe1\xfc\xd7\xd8\x28\x06"
5056 "\x63\x9b\xce\x61\x24\x79\xc0\x70"
5057 "\x52\xd0\xb6\xd4\x28\x95\x24\x87"
5058 "\x03\x1f\xb7\x9a\xda\xa3\xfb\x52"
5059 "\x5b\x68\xe7\x4c\x8c\x24\xe1\x42"
5060 "\xf7\xd5\xfd\xad\x06\x32\x9f\xba"
5061 "\xc1\xfc\xdd\xc6\xfc\xfc\xb3\x38"
5062 "\x74\x56\x58\x40\x02\x37\x52\x2c"
5063 "\x55\xcc\xb3\x9e\x7a\xe9\xd4\x38"
5064 "\x41\x5e\x0c\x35\xe2\x11\xd1\x13"
5065 "\xf8\xb7\x8d\x72\x6b\x22\x2a\xb0"
5066 "\xdb\x08\xba\x35\xb9\x3f\xc8\xd3"
5067 "\x24\x90\xec\x58\xd2\x09\xc7\x2d"
5068 "\xed\x38\x80\x36\x72\x43\x27\x49"
5069 "\x4a\x80\x8a\xa2\xe8\xd3\xda\x30"
5070 "\x7d\xb6\x82\x37\x86\x92\x86\x3e"
5071 "\x08\xb2\x28\x5a\x55\x44\x24\x7d"
5072 "\x40\x48\x8a\xb6\x89\x58\x08\xa0"
5073 "\xd6\x6d\x3a\x17\xbf\xf6\x54\xa2"
5074 "\xf5\xd3\x8c\x0f\x78\x12\x57\x8b"
5075 "\xd5\xc2\xfd\x58\x5b\x7f\x38\xe3"
5076 "\xcc\xb7\x7c\x48\xb3\x20\xe8\x81"
5077 "\x14\x32\x45\x05\xe0\xdb\x9f\x75"
5078 "\x85\xb4\x6a\xfc\x95\xe3\x54\x22"
5079 "\x12\xee\x30\xfe\xd8\x30\xef\x34"
5080 "\x50\xab\x46\x30\x98\x2f\xb7\xc0"
5081 "\x15\xa2\x83\xb6\xf2\x06\x21\xa2"
5082 "\xc3\x26\x37\x14\xd1\x4d\xb5\x10"
5083 "\x52\x76\x4d\x6a\xee\xb5\x2b\x15"
5084 "\xb7\xf9\x51\xe8\x2a\xaf\xc7\xfa"
5085 "\x77\xaf\xb0\x05\x4d\xd1\x68\x8e"
5086 "\x74\x05\x9f\x9d\x93\xa5\x3e\x7f"
5087 "\x4e\x5f\x9d\xcb\x09\xc7\x83\xe3"
5088 "\x02\x9d\x27\x1f\xef\x85\x05\x8d"
5089 "\xec\x55\x88\x0f\x0d\x7c\x4c\xe8"
5090 "\xa1\x75\xa0\xd8\x06\x47\x14\xef"
5091 "\xaa\x61\xcf\x26\x15\xad\xd8\xa3"
5092 "\xaa\x75\xf2\x78\x4a\x5a\x61\xdf"
5093 "\x8b\xc7\x04\xbc\xb2\x32\xd2\x7e"
5094 "\x42\xee\xb4\x2f\x51\xff\x7b\x2e"
5095 "\xd3\x02\xe8\xdc\x5d\x0d\x50\xdc"
5096 "\xae\xb7\x46\xf9\xa8\xe6\xd0\x16"
5097 "\xcc\xe6\x2c\x81\xc7\xad\xe9\xf0"
5098 "\x05\x72\x6d\x3d\x0a\x7a\xa9\x02"
5099 "\xac\x82\x93\x6e\xb6\x1c\x28\xfc"
5100 "\x44\x12\xfb\x73\x77\xd4\x13\x39"
5101 "\x29\x88\x8a\xf3\x5c\xa6\x36\xa0"
5102 "\x2a\xed\x7e\xb1\x1d\xd6\x4c\x6b"
5103 "\x41\x01\x18\x5d\x5d\x07\x97\xa6"
5104 "\x4b\xef\x31\x18\xea\xac\xb1\x84"
5105 "\x21\xed\xda\x86",
5106 .rlen = 4100,
5107 .np = 2,
5108 .tap = { 4064, 36 },
5109 },
5110};
5111
5112static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5113 { /* From RFC 3686 */
5114 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5115 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5116 "\x00\x00\x00\x30",
5117 .klen = 20,
5118 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
5119 .input = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
5120 "\x2d\x61\x75\xa3\x26\x13\x11\xb8",
5121 .ilen = 16,
5122 .result = "Single block msg",
5123 .rlen = 16,
5124 }, {
5125 .key = "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
5126 "\x43\xd6\xce\x1f\x32\x53\x91\x63"
5127 "\x00\x6c\xb6\xdb",
5128 .klen = 20,
5129 .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
5130 .input = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
5131 "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
5132 "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
5133 "\xfc\xe6\x30\xdf\x91\x41\xbe\x28",
5134 .ilen = 32,
5135 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
5136 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5137 "\x10\x11\x12\x13\x14\x15\x16\x17"
5138 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
5139 .rlen = 32,
5140 }, {
5141 .key = "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
5142 "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
5143 "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
5144 "\x00\x00\x00\x48",
5145 .klen = 28,
5146 .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
5147 .input = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
5148 "\x4e\x79\x35\xa0\x03\xcb\xe9\x28",
5149 .ilen = 16,
5150 .result = "Single block msg",
5151 .rlen = 16,
5152 }, {
5153 .key = "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
5154 "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
5155 "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
5156 "\x00\x96\xb0\x3b",
5157 .klen = 28,
5158 .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
5159 .input = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
5160 "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
5161 "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
5162 "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00",
5163 .ilen = 32,
5164 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
5165 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5166 "\x10\x11\x12\x13\x14\x15\x16\x17"
5167 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
5168 .rlen = 32,
5169 }, {
5170 .key = "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
5171 "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
5172 "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
5173 "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
5174 "\x00\x00\x00\x60",
5175 .klen = 36,
5176 .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
5177 .input = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
5178 "\x56\x08\x63\xdc\x71\xe3\xe0\xc0",
5179 .ilen = 16,
5180 .result = "Single block msg",
5181 .rlen = 16,
5182 }, {
5183 .key = "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
5184 "\x07\x96\x36\x58\x79\xef\xf8\x86"
5185 "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
5186 "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
5187 "\x00\xfa\xac\x24",
5188 .klen = 36,
5189 .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
5190 .input = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
5191 "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
5192 "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
5193 "\x55\x30\x83\x1d\x93\x44\xaf\x1c",
5194 .ilen = 32,
5195 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
5196 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5197 "\x10\x11\x12\x13\x14\x15\x16\x17"
5198 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
5199 .rlen = 32,
5200 },
5201};
5202
5203static struct aead_testvec aes_gcm_enc_tv_template[] = {
5204 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5205 .key = zeroed_string,
5206 .klen = 16,
5207 .result = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
5208 "\x36\x7f\x1d\x57\xa4\xe7\x45\x5a",
5209 .rlen = 16,
5210 }, {
5211 .key = zeroed_string,
5212 .klen = 16,
5213 .input = zeroed_string,
5214 .ilen = 16,
5215 .result = "\x03\x88\xda\xce\x60\xb6\xa3\x92"
5216 "\xf3\x28\xc2\xb9\x71\xb2\xfe\x78"
5217 "\xab\x6e\x47\xd4\x2c\xec\x13\xbd"
5218 "\xf5\x3a\x67\xb2\x12\x57\xbd\xdf",
5219 .rlen = 32,
5220 }, {
5221 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5222 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5223 .klen = 16,
5224 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5225 "\xde\xca\xf8\x88",
5226 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5227 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5228 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5229 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5230 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5231 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5232 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5233 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5234 .ilen = 64,
5235 .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5236 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5237 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5238 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5239 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5240 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5241 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5242 "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
5243 "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
5244 "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4",
5245 .rlen = 80,
5246 }, {
5247 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5248 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5249 .klen = 16,
5250 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5251 "\xde\xca\xf8\x88",
5252 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5253 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5254 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5255 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5256 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5257 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5258 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5259 "\xba\x63\x7b\x39",
5260 .ilen = 60,
5261 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5262 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5263 "\xab\xad\xda\xd2",
5264 .alen = 20,
5265 .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5266 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5267 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5268 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5269 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5270 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5271 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5272 "\x3d\x58\xe0\x91"
5273 "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
5274 "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47",
5275 .rlen = 76,
5276 }, {
5277 .key = zeroed_string,
5278 .klen = 24,
5279 .result = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
5280 "\xa0\x0e\xd1\xf3\x12\x57\x24\x35",
5281 .rlen = 16,
5282 }, {
5283 .key = zeroed_string,
5284 .klen = 24,
5285 .input = zeroed_string,
5286 .ilen = 16,
5287 .result = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
5288 "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
5289 "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
5290 "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb",
5291 .rlen = 32,
5292 }, {
5293 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5294 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5295 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5296 .klen = 24,
5297 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5298 "\xde\xca\xf8\x88",
5299 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5300 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5301 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5302 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5303 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5304 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5305 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5306 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5307 .ilen = 64,
5308 .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5309 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5310 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5311 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5312 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5313 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5314 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5315 "\xcc\xda\x27\x10\xac\xad\xe2\x56"
5316 "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
5317 "\xb1\x18\x02\x4d\xb8\x67\x4a\x14",
5318 .rlen = 80,
5319 }, {
5320 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5321 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5322 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5323 .klen = 24,
5324 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5325 "\xde\xca\xf8\x88",
5326 .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5327 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5328 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5329 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5330 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5331 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5332 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5333 "\xba\x63\x7b\x39",
5334 .ilen = 60,
5335 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5336 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5337 "\xab\xad\xda\xd2",
5338 .alen = 20,
5339 .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5340 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5341 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5342 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5343 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5344 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5345 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5346 "\xcc\xda\x27\x10"
5347 "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
5348 "\x37\xba\x55\xbd\x6d\x27\x61\x8c",
5349 .rlen = 76,
5350 .np = 2,
5351 .tap = { 32, 28 },
5352 .anp = 2,
5353 .atap = { 8, 12 }
5354 }, {
5355 .key = zeroed_string,
5356 .klen = 32,
5357 .result = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
5358 "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b",
5359 .rlen = 16,
5360 }
5361};
5362
5363static struct aead_testvec aes_gcm_dec_tv_template[] = {
5364 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5365 .key = zeroed_string,
5366 .klen = 32,
5367 .input = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
5368 "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18"
5369 "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0"
5370 "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19",
5371 .ilen = 32,
5372 .result = zeroed_string,
5373 .rlen = 16,
5374 }, {
5375 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5376 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5377 "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5378 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5379 .klen = 32,
5380 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5381 "\xde\xca\xf8\x88",
5382 .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
5383 "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
5384 "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
5385 "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
5386 "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
5387 "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
5388 "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
5389 "\xbc\xc9\xf6\x62\x89\x80\x15\xad"
5390 "\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
5391 "\xec\x1a\x50\x22\x70\xe3\xcc\x6c",
5392 .ilen = 80,
5393 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5394 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5395 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5396 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5397 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5398 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5399 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5400 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5401 .rlen = 64,
5402 }, {
5403 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5404 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5405 "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5406 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5407 .klen = 32,
5408 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5409 "\xde\xca\xf8\x88",
5410 .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
5411 "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
5412 "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
5413 "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
5414 "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
5415 "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
5416 "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
5417 "\xbc\xc9\xf6\x62"
5418 "\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
5419 "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b",
5420 .ilen = 76,
5421 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5422 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5423 "\xab\xad\xda\xd2",
5424 .alen = 20,
5425 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5426 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5427 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5428 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5429 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5430 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5431 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5432 "\xba\x63\x7b\x39",
5433 .rlen = 60,
5434 .np = 2,
5435 .tap = { 48, 28 },
5436 .anp = 3,
5437 .atap = { 8, 8, 4 }
5438 }, {
5439 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5440 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5441 .klen = 16,
5442 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5443 "\xde\xca\xf8\x88",
5444 .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5445 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5446 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5447 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5448 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5449 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5450 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5451 "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
5452 "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
5453 "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4",
5454 .ilen = 80,
5455 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5456 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5457 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5458 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5459 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5460 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5461 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5462 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5463 .rlen = 64,
5464 }, {
5465 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5466 "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
5467 .klen = 16,
5468 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5469 "\xde\xca\xf8\x88",
5470 .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24"
5471 "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
5472 "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
5473 "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
5474 "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
5475 "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
5476 "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
5477 "\x3d\x58\xe0\x91"
5478 "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
5479 "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47",
5480 .ilen = 76,
5481 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5482 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5483 "\xab\xad\xda\xd2",
5484 .alen = 20,
5485 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5486 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5487 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5488 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5489 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5490 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5491 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5492 "\xba\x63\x7b\x39",
5493 .rlen = 60,
5494 }, {
5495 .key = zeroed_string,
5496 .klen = 24,
5497 .input = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
5498 "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
5499 "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
5500 "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb",
5501 .ilen = 32,
5502 .result = zeroed_string,
5503 .rlen = 16,
5504 }, {
5505 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5506 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5507 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5508 .klen = 24,
5509 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5510 "\xde\xca\xf8\x88",
5511 .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5512 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5513 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5514 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5515 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5516 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5517 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5518 "\xcc\xda\x27\x10\xac\xad\xe2\x56"
5519 "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
5520 "\xb1\x18\x02\x4d\xb8\x67\x4a\x14",
5521 .ilen = 80,
5522 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5523 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5524 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5525 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5526 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5527 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5528 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5529 "\xba\x63\x7b\x39\x1a\xaf\xd2\x55",
5530 .rlen = 64,
5531 }, {
5532 .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
5533 "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
5534 "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
5535 .klen = 24,
5536 .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
5537 "\xde\xca\xf8\x88",
5538 .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
5539 "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
5540 "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
5541 "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
5542 "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
5543 "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
5544 "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
5545 "\xcc\xda\x27\x10"
5546 "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
5547 "\x37\xba\x55\xbd\x6d\x27\x61\x8c",
5548 .ilen = 76,
5549 .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5550 "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
5551 "\xab\xad\xda\xd2",
5552 .alen = 20,
5553 .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
5554 "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
5555 "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
5556 "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
5557 "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
5558 "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
5559 "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
5560 "\xba\x63\x7b\x39",
5561 .rlen = 60,
5562 }
5563};
5564
5565static struct aead_testvec aes_ccm_enc_tv_template[] = {
5566 { /* From RFC 3610 */
5567 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5568 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5569 .klen = 16,
5570 .iv = "\x01\x00\x00\x00\x03\x02\x01\x00"
5571 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5572 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5573 .alen = 8,
5574 .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5575 "\x10\x11\x12\x13\x14\x15\x16\x17"
5576 "\x18\x19\x1a\x1b\x1c\x1d\x1e",
5577 .ilen = 23,
5578 .result = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
5579 "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
5580 "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
5581 "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
5582 .rlen = 31,
5583 }, {
5584 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5585 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5586 .klen = 16,
5587 .iv = "\x01\x00\x00\x00\x07\x06\x05\x04"
5588 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5589 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5590 "\x08\x09\x0a\x0b",
5591 .alen = 12,
5592 .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5593 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5594 "\x1c\x1d\x1e\x1f",
5595 .ilen = 20,
5596 .result = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
5597 "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
5598 "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
5599 "\x7d\x9c\x2d\x93",
5600 .rlen = 28,
5601 }, {
5602 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5603 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5604 .klen = 16,
5605 .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08"
5606 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5607 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5608 .alen = 8,
5609 .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5610 "\x10\x11\x12\x13\x14\x15\x16\x17"
5611 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
5612 "\x20",
5613 .ilen = 25,
5614 .result = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
5615 "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
5616 "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
5617 "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
5618 "\x7e\x5f\x4e",
5619 .rlen = 35,
5620 }, {
5621 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5622 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5623 .klen = 16,
5624 .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
5625 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5626 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5627 "\x08\x09\x0a\x0b",
5628 .alen = 12,
5629 .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5630 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5631 "\x1c\x1d\x1e",
5632 .ilen = 19,
5633 .result = "\x07\x34\x25\x94\x15\x77\x85\x15"
5634 "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
5635 "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
5636 "\x4d\x99\x99\x88\xdd",
5637 .rlen = 29,
5638 }, {
5639 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5640 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5641 .klen = 16,
5642 .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
5643 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5644 .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
5645 .alen = 8,
5646 .input = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
5647 "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
5648 "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
5649 .ilen = 24,
5650 .result = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
5651 "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
5652 "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
5653 "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
5654 .rlen = 32,
5655 }, {
5656 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5657 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5658 .klen = 16,
5659 .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
5660 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5661 .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
5662 "\x20\xea\x60\xc0",
5663 .alen = 12,
5664 .input = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
5665 "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
5666 "\x3a\x80\x3b\xa8\x7f",
5667 .ilen = 21,
5668 .result = "\x00\x97\x69\xec\xab\xdf\x48\x62"
5669 "\x55\x94\xc5\x92\x51\xe6\x03\x57"
5670 "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
5671 "\x5a\xe0\x70\x45\x51",
5672 .rlen = 29,
5673 }, {
5674 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5675 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5676 .klen = 16,
5677 .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
5678 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5679 .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
5680 .alen = 8,
5681 .input = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
5682 "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
5683 "\x98\x09\xd6\x7d\xbe\xdd\x18",
5684 .ilen = 23,
5685 .result = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
5686 "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
5687 "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
5688 "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
5689 "\xba",
5690 .rlen = 33,
5691 },
5692};
5693
5694static struct aead_testvec aes_ccm_dec_tv_template[] = {
5695 { /* From RFC 3610 */
5696 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5697 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5698 .klen = 16,
5699 .iv = "\x01\x00\x00\x00\x03\x02\x01\x00"
5700 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5701 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5702 .alen = 8,
5703 .input = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2"
5704 "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80"
5705 "\x6d\x5f\x6b\x61\xda\xc3\x84\x17"
5706 "\xe8\xd1\x2c\xfd\xf9\x26\xe0",
5707 .ilen = 31,
5708 .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5709 "\x10\x11\x12\x13\x14\x15\x16\x17"
5710 "\x18\x19\x1a\x1b\x1c\x1d\x1e",
5711 .rlen = 23,
5712 }, {
5713 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5714 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5715 .klen = 16,
5716 .iv = "\x01\x00\x00\x00\x07\x06\x05\x04"
5717 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5718 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5719 "\x08\x09\x0a\x0b",
5720 .alen = 12,
5721 .input = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb"
5722 "\x9d\x4e\x13\x12\x53\x65\x8a\xd8"
5723 "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07"
5724 "\x7d\x9c\x2d\x93",
5725 .ilen = 28,
5726 .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5727 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5728 "\x1c\x1d\x1e\x1f",
5729 .rlen = 20,
5730 }, {
5731 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5732 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5733 .klen = 16,
5734 .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08"
5735 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5736 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07",
5737 .alen = 8,
5738 .input = "\x82\x53\x1a\x60\xcc\x24\x94\x5a"
5739 "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d"
5740 "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1"
5741 "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1"
5742 "\x7e\x5f\x4e",
5743 .ilen = 35,
5744 .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
5745 "\x10\x11\x12\x13\x14\x15\x16\x17"
5746 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
5747 "\x20",
5748 .rlen = 25,
5749 }, {
5750 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5751 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5752 .klen = 16,
5753 .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09"
5754 "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00",
5755 .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
5756 "\x08\x09\x0a\x0b",
5757 .alen = 12,
5758 .input = "\x07\x34\x25\x94\x15\x77\x85\x15"
5759 "\x2b\x07\x40\x98\x33\x0a\xbb\x14"
5760 "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b"
5761 "\x4d\x99\x99\x88\xdd",
5762 .ilen = 29,
5763 .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
5764 "\x14\x15\x16\x17\x18\x19\x1a\x1b"
5765 "\x1c\x1d\x1e",
5766 .rlen = 19,
5767 }, {
5768 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5769 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5770 .klen = 16,
5771 .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63"
5772 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5773 .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb",
5774 .alen = 8,
5775 .input = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa"
5776 "\xa0\x72\x6c\x55\xd3\x78\x06\x12"
5777 "\x98\xc8\x5c\x92\x81\x4a\xbc\x33"
5778 "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a",
5779 .ilen = 32,
5780 .result = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a"
5781 "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf"
5782 "\xb7\x9c\x70\x28\x94\x9c\xd0\xec",
5783 .rlen = 24,
5784 }, {
5785 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5786 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5787 .klen = 16,
5788 .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70"
5789 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5790 .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81"
5791 "\x20\xea\x60\xc0",
5792 .alen = 12,
5793 .input = "\x00\x97\x69\xec\xab\xdf\x48\x62"
5794 "\x55\x94\xc5\x92\x51\xe6\x03\x57"
5795 "\x22\x67\x5e\x04\xc8\x47\x09\x9e"
5796 "\x5a\xe0\x70\x45\x51",
5797 .ilen = 29,
5798 .result = "\x64\x35\xac\xba\xfb\x11\xa8\x2e"
5799 "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9"
5800 "\x3a\x80\x3b\xa8\x7f",
5801 .rlen = 21,
5802 }, {
5803 .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3"
5804 "\x25\xa7\x62\x36\xdf\x93\xcc\x6b",
5805 .klen = 16,
5806 .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c"
5807 "\x3c\x96\x96\x76\x6c\xfa\x00\x00",
5808 .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8",
5809 .alen = 8,
5810 .input = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6"
5811 "\xdb\x38\x6a\x99\xac\x1a\xef\x23"
5812 "\xad\xe0\xb5\x29\x39\xcb\x6a\x63"
5813 "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6"
5814 "\xba",
5815 .ilen = 33,
5816 .result = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01"
5817 "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
5818 "\x98\x09\xd6\x7d\xbe\xdd\x18",
5819 .rlen = 23,
5820 },
5821};
5822
5823/* Cast5 test vectors from RFC 2144 */
5824#define CAST5_ENC_TEST_VECTORS 3
5825#define CAST5_DEC_TEST_VECTORS 3
5826
5827static struct cipher_testvec cast5_enc_tv_template[] = {
5828 {
5829 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5830 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5831 .klen = 16,
5832 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5833 .ilen = 8,
5834 .result = "\x23\x8b\x4f\xe5\x84\x7e\x44\xb2",
5835 .rlen = 8,
5836 }, {
5837 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5838 "\x23\x45",
5839 .klen = 10,
5840 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5841 .ilen = 8,
5842 .result = "\xeb\x6a\x71\x1a\x2c\x02\x27\x1b",
5843 .rlen = 8,
5844 }, {
5845 .key = "\x01\x23\x45\x67\x12",
5846 .klen = 5,
5847 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5848 .ilen = 8,
5849 .result = "\x7a\xc8\x16\xd1\x6e\x9b\x30\x2e",
5850 .rlen = 8,
5851 },
5852};
5853
5854static struct cipher_testvec cast5_dec_tv_template[] = {
5855 {
5856 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5857 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5858 .klen = 16,
5859 .input = "\x23\x8b\x4f\xe5\x84\x7e\x44\xb2",
5860 .ilen = 8,
5861 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5862 .rlen = 8,
5863 }, {
5864 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5865 "\x23\x45",
5866 .klen = 10,
5867 .input = "\xeb\x6a\x71\x1a\x2c\x02\x27\x1b",
5868 .ilen = 8,
5869 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5870 .rlen = 8,
5871 }, {
5872 .key = "\x01\x23\x45\x67\x12",
5873 .klen = 5,
5874 .input = "\x7a\xc8\x16\xd1\x6e\x9b\x30\x2e",
5875 .ilen = 8,
5876 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5877 .rlen = 8,
5878 },
5879};
5880
5881/*
5882 * ARC4 test vectors from OpenSSL
5883 */
5884#define ARC4_ENC_TEST_VECTORS 7
5885#define ARC4_DEC_TEST_VECTORS 7
5886
5887static struct cipher_testvec arc4_enc_tv_template[] = {
5888 {
5889 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5890 .klen = 8,
5891 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5892 .ilen = 8,
5893 .result = "\x75\xb7\x87\x80\x99\xe0\xc5\x96",
5894 .rlen = 8,
5895 }, {
5896 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5897 .klen = 8,
5898 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
5899 .ilen = 8,
5900 .result = "\x74\x94\xc2\xe7\x10\x4b\x08\x79",
5901 .rlen = 8,
5902 }, {
5903 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
5904 .klen = 8,
5905 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
5906 .ilen = 8,
5907 .result = "\xde\x18\x89\x41\xa3\x37\x5d\x3a",
5908 .rlen = 8,
5909 }, {
5910 .key = "\xef\x01\x23\x45",
5911 .klen = 4,
5912 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5913 "\x00\x00\x00\x00\x00\x00\x00\x00"
5914 "\x00\x00\x00\x00",
5915 .ilen = 20,
5916 .result = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
5917 "\xbd\x61\x5a\x11\x62\xe1\xc7\xba"
5918 "\x36\xb6\x78\x58",
5919 .rlen = 20,
5920 }, {
5921 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5922 .klen = 8,
5923 .input = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5924 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5925 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5926 "\x12\x34\x56\x78",
5927 .ilen = 28,
5928 .result = "\x66\xa0\x94\x9f\x8a\xf7\xd6\x89"
5929 "\x1f\x7f\x83\x2b\xa8\x33\xc0\x0c"
5930 "\x89\x2e\xbe\x30\x14\x3c\xe2\x87"
5931 "\x40\x01\x1e\xcf",
5932 .rlen = 28,
5933 }, {
5934 .key = "\xef\x01\x23\x45",
5935 .klen = 4,
5936 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5937 "\x00\x00",
5938 .ilen = 10,
5939 .result = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
5940 "\xbd\x61",
5941 .rlen = 10,
5942 }, {
5943 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
5944 "\x00\x00\x00\x00\x00\x00\x00\x00",
5945 .klen = 16,
5946 .input = "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
5947 .ilen = 8,
5948 .result = "\x69\x72\x36\x59\x1B\x52\x42\xB1",
5949 .rlen = 8,
5950 },
5951};
5952
5953static struct cipher_testvec arc4_dec_tv_template[] = {
5954 {
5955 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5956 .klen = 8,
5957 .input = "\x75\xb7\x87\x80\x99\xe0\xc5\x96",
5958 .ilen = 8,
5959 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5960 .rlen = 8,
5961 }, {
5962 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5963 .klen = 8,
5964 .input = "\x74\x94\xc2\xe7\x10\x4b\x08\x79",
5965 .ilen = 8,
5966 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
5967 .rlen = 8,
5968 }, {
5969 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
5970 .klen = 8,
5971 .input = "\xde\x18\x89\x41\xa3\x37\x5d\x3a",
5972 .ilen = 8,
5973 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
5974 .rlen = 8,
5975 }, {
5976 .key = "\xef\x01\x23\x45",
5977 .klen = 4,
5978 .input = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
5979 "\xbd\x61\x5a\x11\x62\xe1\xc7\xba"
5980 "\x36\xb6\x78\x58",
5981 .ilen = 20,
5982 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
5983 "\x00\x00\x00\x00\x00\x00\x00\x00"
5984 "\x00\x00\x00\x00",
5985 .rlen = 20,
5986 }, {
5987 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5988 .klen = 8,
5989 .input = "\x66\xa0\x94\x9f\x8a\xf7\xd6\x89"
5990 "\x1f\x7f\x83\x2b\xa8\x33\xc0\x0c"
5991 "\x89\x2e\xbe\x30\x14\x3c\xe2\x87"
5992 "\x40\x01\x1e\xcf",
5993 .ilen = 28,
5994 .result = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5995 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5996 "\x12\x34\x56\x78\x9A\xBC\xDE\xF0"
5997 "\x12\x34\x56\x78",
5998 .rlen = 28,
5999 }, {
6000 .key = "\xef\x01\x23\x45",
6001 .klen = 4,
6002 .input = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf"
6003 "\xbd\x61",
6004 .ilen = 10,
6005 .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
6006 "\x00\x00",
6007 .rlen = 10,
6008 }, {
6009 .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
6010 "\x00\x00\x00\x00\x00\x00\x00\x00",
6011 .klen = 16,
6012 .input = "\x69\x72\x36\x59\x1B\x52\x42\xB1",
6013 .ilen = 8,
6014 .result = "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
6015 .rlen = 8,
6016 },
6017};
6018
6019/*
6020 * TEA test vectors
6021 */
6022#define TEA_ENC_TEST_VECTORS 4
6023#define TEA_DEC_TEST_VECTORS 4
6024
6025static struct cipher_testvec tea_enc_tv_template[] = {
6026 {
6027 .key = zeroed_string,
6028 .klen = 16,
6029 .input = zeroed_string,
6030 .ilen = 8,
6031 .result = "\x0a\x3a\xea\x41\x40\xa9\xba\x94",
6032 .rlen = 8,
6033 }, {
6034 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6035 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6036 .klen = 16,
6037 .input = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6038 .ilen = 8,
6039 .result = "\x77\x5d\x2a\x6a\xf6\xce\x92\x09",
6040 .rlen = 8,
6041 }, {
6042 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6043 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6044 .klen = 16,
6045 .input = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6046 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6047 .ilen = 16,
6048 .result = "\xbe\x7a\xbb\x81\x95\x2d\x1f\x1e"
6049 "\xdd\x89\xa1\x25\x04\x21\xdf\x95",
6050 .rlen = 16,
6051 }, {
6052 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6053 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6054 .klen = 16,
6055 .input = "\x54\x65\x61\x20\x69\x73\x20\x67"
6056 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6057 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6058 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6059 .ilen = 32,
6060 .result = "\xe0\x4d\x5d\x3c\xb7\x8c\x36\x47"
6061 "\x94\x18\x95\x91\xa9\xfc\x49\xf8"
6062 "\x44\xd1\x2d\xc2\x99\xb8\x08\x2a"
6063 "\x07\x89\x73\xc2\x45\x92\xc6\x90",
6064 .rlen = 32,
6065 }
6066};
6067
6068static struct cipher_testvec tea_dec_tv_template[] = {
6069 {
6070 .key = zeroed_string,
6071 .klen = 16,
6072 .input = "\x0a\x3a\xea\x41\x40\xa9\xba\x94",
6073 .ilen = 8,
6074 .result = zeroed_string,
6075 .rlen = 8,
6076 }, {
6077 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6078 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6079 .klen = 16,
6080 .input = "\x77\x5d\x2a\x6a\xf6\xce\x92\x09",
6081 .ilen = 8,
6082 .result = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6083 .rlen = 8,
6084 }, {
6085 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6086 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6087 .klen = 16,
6088 .input = "\xbe\x7a\xbb\x81\x95\x2d\x1f\x1e"
6089 "\xdd\x89\xa1\x25\x04\x21\xdf\x95",
6090 .ilen = 16,
6091 .result = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6092 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6093 .rlen = 16,
6094 }, {
6095 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6096 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6097 .klen = 16,
6098 .input = "\xe0\x4d\x5d\x3c\xb7\x8c\x36\x47"
6099 "\x94\x18\x95\x91\xa9\xfc\x49\xf8"
6100 "\x44\xd1\x2d\xc2\x99\xb8\x08\x2a"
6101 "\x07\x89\x73\xc2\x45\x92\xc6\x90",
6102 .ilen = 32,
6103 .result = "\x54\x65\x61\x20\x69\x73\x20\x67"
6104 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6105 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6106 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6107 .rlen = 32,
6108 }
6109};
6110
6111/*
6112 * XTEA test vectors
6113 */
6114#define XTEA_ENC_TEST_VECTORS 4
6115#define XTEA_DEC_TEST_VECTORS 4
6116
6117static struct cipher_testvec xtea_enc_tv_template[] = {
6118 {
6119 .key = zeroed_string,
6120 .klen = 16,
6121 .input = zeroed_string,
6122 .ilen = 8,
6123 .result = "\xd8\xd4\xe9\xde\xd9\x1e\x13\xf7",
6124 .rlen = 8,
6125 }, {
6126 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6127 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6128 .klen = 16,
6129 .input = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6130 .ilen = 8,
6131 .result = "\x94\xeb\xc8\x96\x84\x6a\x49\xa8",
6132 .rlen = 8,
6133 }, {
6134 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6135 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6136 .klen = 16,
6137 .input = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6138 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6139 .ilen = 16,
6140 .result = "\x3e\xce\xae\x22\x60\x56\xa8\x9d"
6141 "\x77\x4d\xd4\xb4\x87\x24\xe3\x9a",
6142 .rlen = 16,
6143 }, {
6144 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6145 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6146 .klen = 16,
6147 .input = "\x54\x65\x61\x20\x69\x73\x20\x67"
6148 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6149 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6150 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6151 .ilen = 32,
6152 .result = "\x99\x81\x9f\x5d\x6f\x4b\x31\x3a"
6153 "\x86\xff\x6f\xd0\xe3\x87\x70\x07"
6154 "\x4d\xb8\xcf\xf3\x99\x50\xb3\xd4"
6155 "\x73\xa2\xfa\xc9\x16\x59\x5d\x81",
6156 .rlen = 32,
6157 }
6158};
6159
6160static struct cipher_testvec xtea_dec_tv_template[] = {
6161 {
6162 .key = zeroed_string,
6163 .klen = 16,
6164 .input = "\xd8\xd4\xe9\xde\xd9\x1e\x13\xf7",
6165 .ilen = 8,
6166 .result = zeroed_string,
6167 .rlen = 8,
6168 }, {
6169 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6170 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6171 .klen = 16,
6172 .input = "\x94\xeb\xc8\x96\x84\x6a\x49\xa8",
6173 .ilen = 8,
6174 .result = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6175 .rlen = 8,
6176 }, {
6177 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6178 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6179 .klen = 16,
6180 .input = "\x3e\xce\xae\x22\x60\x56\xa8\x9d"
6181 "\x77\x4d\xd4\xb4\x87\x24\xe3\x9a",
6182 .ilen = 16,
6183 .result = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6184 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6185 .rlen = 16,
6186 }, {
6187 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6188 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6189 .klen = 16,
6190 .input = "\x99\x81\x9f\x5d\x6f\x4b\x31\x3a"
6191 "\x86\xff\x6f\xd0\xe3\x87\x70\x07"
6192 "\x4d\xb8\xcf\xf3\x99\x50\xb3\xd4"
6193 "\x73\xa2\xfa\xc9\x16\x59\x5d\x81",
6194 .ilen = 32,
6195 .result = "\x54\x65\x61\x20\x69\x73\x20\x67"
6196 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6197 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6198 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6199 .rlen = 32,
6200 }
6201};
6202
6203/*
6204 * KHAZAD test vectors.
6205 */
6206#define KHAZAD_ENC_TEST_VECTORS 5
6207#define KHAZAD_DEC_TEST_VECTORS 5
6208
6209static struct cipher_testvec khazad_enc_tv_template[] = {
6210 {
6211 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
6212 "\x00\x00\x00\x00\x00\x00\x00\x00",
6213 .klen = 16,
6214 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
6215 .ilen = 8,
6216 .result = "\x49\xa4\xce\x32\xac\x19\x0e\x3f",
6217 .rlen = 8,
6218 }, {
6219 .key = "\x38\x38\x38\x38\x38\x38\x38\x38"
6220 "\x38\x38\x38\x38\x38\x38\x38\x38",
6221 .klen = 16,
6222 .input = "\x38\x38\x38\x38\x38\x38\x38\x38",
6223 .ilen = 8,
6224 .result = "\x7e\x82\x12\xa1\xd9\x5b\xe4\xf9",
6225 .rlen = 8,
6226 }, {
6227 .key = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2"
6228 "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6229 .klen = 16,
6230 .input = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6231 .ilen = 8,
6232 .result = "\xaa\xbe\xc1\x95\xc5\x94\x1a\x9c",
6233 .rlen = 8,
6234 }, {
6235 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6236 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6237 .klen = 16,
6238 .input = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6239 .ilen = 8,
6240 .result = "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6241 .rlen = 8,
6242 }, {
6243 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6244 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6245 .klen = 16,
6246 .input = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6247 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6248 .ilen = 16,
6249 .result = "\x04\x74\xf5\x70\x50\x16\xd3\xb8"
6250 "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6251 .rlen = 16,
6252 },
6253};
6254
6255static struct cipher_testvec khazad_dec_tv_template[] = {
6256 {
6257 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
6258 "\x00\x00\x00\x00\x00\x00\x00\x00",
6259 .klen = 16,
6260 .input = "\x49\xa4\xce\x32\xac\x19\x0e\x3f",
6261 .ilen = 8,
6262 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
6263 .rlen = 8,
6264 }, {
6265 .key = "\x38\x38\x38\x38\x38\x38\x38\x38"
6266 "\x38\x38\x38\x38\x38\x38\x38\x38",
6267 .klen = 16,
6268 .input = "\x7e\x82\x12\xa1\xd9\x5b\xe4\xf9",
6269 .ilen = 8,
6270 .result = "\x38\x38\x38\x38\x38\x38\x38\x38",
6271 .rlen = 8,
6272 }, {
6273 .key = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2"
6274 "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6275 .klen = 16,
6276 .input = "\xaa\xbe\xc1\x95\xc5\x94\x1a\x9c",
6277 .ilen = 8,
6278 .result = "\xa2\xa2\xa2\xa2\xa2\xa2\xa2\xa2",
6279 .rlen = 8,
6280 }, {
6281 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6282 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6283 .klen = 16,
6284 .input = "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6285 .ilen = 8,
6286 .result = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6287 .rlen = 8,
6288 }, {
6289 .key = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6290 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6291 .klen = 16,
6292 .input = "\x04\x74\xf5\x70\x50\x16\xd3\xb8"
6293 "\x04\x74\xf5\x70\x50\x16\xd3\xb8",
6294 .ilen = 16,
6295 .result = "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f"
6296 "\x2f\x2f\x2f\x2f\x2f\x2f\x2f\x2f",
6297 .rlen = 16,
6298 },
6299};
6300
6301/*
6302 * Anubis test vectors.
6303 */
6304
6305#define ANUBIS_ENC_TEST_VECTORS 5
6306#define ANUBIS_DEC_TEST_VECTORS 5
6307#define ANUBIS_CBC_ENC_TEST_VECTORS 2
6308#define ANUBIS_CBC_DEC_TEST_VECTORS 2
6309
6310static struct cipher_testvec anubis_enc_tv_template[] = {
6311 {
6312 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6313 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6314 .klen = 16,
6315 .input = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6316 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6317 .ilen = 16,
6318 .result = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6319 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90",
6320 .rlen = 16,
6321 }, {
6322
6323 .key = "\x03\x03\x03\x03\x03\x03\x03\x03"
6324 "\x03\x03\x03\x03\x03\x03\x03\x03"
6325 "\x03\x03\x03\x03",
6326 .klen = 20,
6327 .input = "\x03\x03\x03\x03\x03\x03\x03\x03"
6328 "\x03\x03\x03\x03\x03\x03\x03\x03",
6329 .ilen = 16,
6330 .result = "\xdb\xf1\x42\xf4\xd1\x8a\xc7\x49"
6331 "\x87\x41\x6f\x82\x0a\x98\x64\xae",
6332 .rlen = 16,
6333 }, {
6334 .key = "\x24\x24\x24\x24\x24\x24\x24\x24"
6335 "\x24\x24\x24\x24\x24\x24\x24\x24"
6336 "\x24\x24\x24\x24\x24\x24\x24\x24"
6337 "\x24\x24\x24\x24",
6338 .klen = 28,
6339 .input = "\x24\x24\x24\x24\x24\x24\x24\x24"
6340 "\x24\x24\x24\x24\x24\x24\x24\x24",
6341 .ilen = 16,
6342 .result = "\xfd\x1b\x4a\xe3\xbf\xf0\xad\x3d"
6343 "\x06\xd3\x61\x27\xfd\x13\x9e\xde",
6344 .rlen = 16,
6345 }, {
6346 .key = "\x25\x25\x25\x25\x25\x25\x25\x25"
6347 "\x25\x25\x25\x25\x25\x25\x25\x25"
6348 "\x25\x25\x25\x25\x25\x25\x25\x25"
6349 "\x25\x25\x25\x25\x25\x25\x25\x25",
6350 .klen = 32,
6351 .input = "\x25\x25\x25\x25\x25\x25\x25\x25"
6352 "\x25\x25\x25\x25\x25\x25\x25\x25",
6353 .ilen = 16,
6354 .result = "\x1a\x91\xfb\x2b\xb7\x78\x6b\xc4"
6355 "\x17\xd9\xff\x40\x3b\x0e\xe5\xfe",
6356 .rlen = 16,
6357 }, {
6358 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6359 "\x35\x35\x35\x35\x35\x35\x35\x35"
6360 "\x35\x35\x35\x35\x35\x35\x35\x35"
6361 "\x35\x35\x35\x35\x35\x35\x35\x35"
6362 "\x35\x35\x35\x35\x35\x35\x35\x35",
6363 .klen = 40,
6364 .input = "\x35\x35\x35\x35\x35\x35\x35\x35"
6365 "\x35\x35\x35\x35\x35\x35\x35\x35",
6366 .ilen = 16,
6367 .result = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6368 "\x9e\xc6\x84\x0f\x17\x21\x07\xee",
6369 .rlen = 16,
6370 },
6371};
6372
6373static struct cipher_testvec anubis_dec_tv_template[] = {
6374 {
6375 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6376 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6377 .klen = 16,
6378 .input = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6379 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90",
6380 .ilen = 16,
6381 .result = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6382 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6383 .rlen = 16,
6384 }, {
6385
6386 .key = "\x03\x03\x03\x03\x03\x03\x03\x03"
6387 "\x03\x03\x03\x03\x03\x03\x03\x03"
6388 "\x03\x03\x03\x03",
6389 .klen = 20,
6390 .input = "\xdb\xf1\x42\xf4\xd1\x8a\xc7\x49"
6391 "\x87\x41\x6f\x82\x0a\x98\x64\xae",
6392 .ilen = 16,
6393 .result = "\x03\x03\x03\x03\x03\x03\x03\x03"
6394 "\x03\x03\x03\x03\x03\x03\x03\x03",
6395 .rlen = 16,
6396 }, {
6397 .key = "\x24\x24\x24\x24\x24\x24\x24\x24"
6398 "\x24\x24\x24\x24\x24\x24\x24\x24"
6399 "\x24\x24\x24\x24\x24\x24\x24\x24"
6400 "\x24\x24\x24\x24",
6401 .klen = 28,
6402 .input = "\xfd\x1b\x4a\xe3\xbf\xf0\xad\x3d"
6403 "\x06\xd3\x61\x27\xfd\x13\x9e\xde",
6404 .ilen = 16,
6405 .result = "\x24\x24\x24\x24\x24\x24\x24\x24"
6406 "\x24\x24\x24\x24\x24\x24\x24\x24",
6407 .rlen = 16,
6408 }, {
6409 .key = "\x25\x25\x25\x25\x25\x25\x25\x25"
6410 "\x25\x25\x25\x25\x25\x25\x25\x25"
6411 "\x25\x25\x25\x25\x25\x25\x25\x25"
6412 "\x25\x25\x25\x25\x25\x25\x25\x25",
6413 .klen = 32,
6414 .input = "\x1a\x91\xfb\x2b\xb7\x78\x6b\xc4"
6415 "\x17\xd9\xff\x40\x3b\x0e\xe5\xfe",
6416 .ilen = 16,
6417 .result = "\x25\x25\x25\x25\x25\x25\x25\x25"
6418 "\x25\x25\x25\x25\x25\x25\x25\x25",
6419 .rlen = 16,
6420 }, {
6421 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6422 "\x35\x35\x35\x35\x35\x35\x35\x35"
6423 "\x35\x35\x35\x35\x35\x35\x35\x35"
6424 "\x35\x35\x35\x35\x35\x35\x35\x35"
6425 "\x35\x35\x35\x35\x35\x35\x35\x35",
6426 .input = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6427 "\x9e\xc6\x84\x0f\x17\x21\x07\xee",
6428 .klen = 40,
6429 .ilen = 16,
6430 .result = "\x35\x35\x35\x35\x35\x35\x35\x35"
6431 "\x35\x35\x35\x35\x35\x35\x35\x35",
6432 .rlen = 16,
6433 },
6434};
6435
6436static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
6437 {
6438 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6439 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6440 .klen = 16,
6441 .input = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6442 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6443 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6444 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6445 .ilen = 32,
6446 .result = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6447 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90"
6448 "\x86\xd8\xb5\x6f\x98\x5e\x8a\x66"
6449 "\x4f\x1f\x78\xa1\xbb\x37\xf1\xbe",
6450 .rlen = 32,
6451 }, {
6452 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6453 "\x35\x35\x35\x35\x35\x35\x35\x35"
6454 "\x35\x35\x35\x35\x35\x35\x35\x35"
6455 "\x35\x35\x35\x35\x35\x35\x35\x35"
6456 "\x35\x35\x35\x35\x35\x35\x35\x35",
6457 .klen = 40,
6458 .input = "\x35\x35\x35\x35\x35\x35\x35\x35"
6459 "\x35\x35\x35\x35\x35\x35\x35\x35"
6460 "\x35\x35\x35\x35\x35\x35\x35\x35"
6461 "\x35\x35\x35\x35\x35\x35\x35\x35",
6462 .ilen = 32,
6463 .result = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6464 "\x9e\xc6\x84\x0f\x17\x21\x07\xee"
6465 "\xa2\xbc\x06\x98\xc6\x4b\xda\x75"
6466 "\x2e\xaa\xbe\x58\xce\x01\x5b\xc7",
6467 .rlen = 32,
6468 },
6469};
6470
6471static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
6472 {
6473 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6474 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6475 .klen = 16,
6476 .input = "\x6d\xc5\xda\xa2\x26\x7d\x62\x6f"
6477 "\x08\xb7\x52\x8e\x6e\x6e\x86\x90"
6478 "\x86\xd8\xb5\x6f\x98\x5e\x8a\x66"
6479 "\x4f\x1f\x78\xa1\xbb\x37\xf1\xbe",
6480 .ilen = 32,
6481 .result = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6482 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6483 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
6484 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
6485 .rlen = 32,
6486 }, {
6487 .key = "\x35\x35\x35\x35\x35\x35\x35\x35"
6488 "\x35\x35\x35\x35\x35\x35\x35\x35"
6489 "\x35\x35\x35\x35\x35\x35\x35\x35"
6490 "\x35\x35\x35\x35\x35\x35\x35\x35"
6491 "\x35\x35\x35\x35\x35\x35\x35\x35",
6492 .klen = 40,
6493 .input = "\xa5\x2c\x85\x6f\x9c\xba\xa0\x97"
6494 "\x9e\xc6\x84\x0f\x17\x21\x07\xee"
6495 "\xa2\xbc\x06\x98\xc6\x4b\xda\x75"
6496 "\x2e\xaa\xbe\x58\xce\x01\x5b\xc7",
6497 .ilen = 32,
6498 .result = "\x35\x35\x35\x35\x35\x35\x35\x35"
6499 "\x35\x35\x35\x35\x35\x35\x35\x35"
6500 "\x35\x35\x35\x35\x35\x35\x35\x35"
6501 "\x35\x35\x35\x35\x35\x35\x35\x35",
6502 .rlen = 32,
6503 },
6504};
6505
6506/*
6507 * XETA test vectors
6508 */
6509#define XETA_ENC_TEST_VECTORS 4
6510#define XETA_DEC_TEST_VECTORS 4
6511
6512static struct cipher_testvec xeta_enc_tv_template[] = {
6513 {
6514 .key = zeroed_string,
6515 .klen = 16,
6516 .input = zeroed_string,
6517 .ilen = 8,
6518 .result = "\xaa\x22\x96\xe5\x6c\x61\xf3\x45",
6519 .rlen = 8,
6520 }, {
6521 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6522 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6523 .klen = 16,
6524 .input = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6525 .ilen = 8,
6526 .result = "\x82\x3e\xeb\x35\xdc\xdd\xd9\xc3",
6527 .rlen = 8,
6528 }, {
6529 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6530 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6531 .klen = 16,
6532 .input = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6533 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6534 .ilen = 16,
6535 .result = "\xe2\x04\xdb\xf2\x89\x85\x9e\xea"
6536 "\x61\x35\xaa\xed\xb5\xcb\x71\x2c",
6537 .rlen = 16,
6538 }, {
6539 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6540 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6541 .klen = 16,
6542 .input = "\x54\x65\x61\x20\x69\x73\x20\x67"
6543 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6544 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6545 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6546 .ilen = 32,
6547 .result = "\x0b\x03\xcd\x8a\xbe\x95\xfd\xb1"
6548 "\xc1\x44\x91\x0b\xa5\xc9\x1b\xb4"
6549 "\xa9\xda\x1e\x9e\xb1\x3e\x2a\x8f"
6550 "\xea\xa5\x6a\x85\xd1\xf4\xa8\xa5",
6551 .rlen = 32,
6552 }
6553};
6554
6555static struct cipher_testvec xeta_dec_tv_template[] = {
6556 {
6557 .key = zeroed_string,
6558 .klen = 16,
6559 .input = "\xaa\x22\x96\xe5\x6c\x61\xf3\x45",
6560 .ilen = 8,
6561 .result = zeroed_string,
6562 .rlen = 8,
6563 }, {
6564 .key = "\x2b\x02\x05\x68\x06\x14\x49\x76"
6565 "\x77\x5d\x0e\x26\x6c\x28\x78\x43",
6566 .klen = 16,
6567 .input = "\x82\x3e\xeb\x35\xdc\xdd\xd9\xc3",
6568 .ilen = 8,
6569 .result = "\x74\x65\x73\x74\x20\x6d\x65\x2e",
6570 .rlen = 8,
6571 }, {
6572 .key = "\x09\x65\x43\x11\x66\x44\x39\x25"
6573 "\x51\x3a\x16\x10\x0a\x08\x12\x6e",
6574 .klen = 16,
6575 .input = "\xe2\x04\xdb\xf2\x89\x85\x9e\xea"
6576 "\x61\x35\xaa\xed\xb5\xcb\x71\x2c",
6577 .ilen = 16,
6578 .result = "\x6c\x6f\x6e\x67\x65\x72\x5f\x74"
6579 "\x65\x73\x74\x5f\x76\x65\x63\x74",
6580 .rlen = 16,
6581 }, {
6582 .key = "\x4d\x76\x32\x17\x05\x3f\x75\x2c"
6583 "\x5d\x04\x16\x36\x15\x72\x63\x2f",
6584 .klen = 16,
6585 .input = "\x0b\x03\xcd\x8a\xbe\x95\xfd\xb1"
6586 "\xc1\x44\x91\x0b\xa5\xc9\x1b\xb4"
6587 "\xa9\xda\x1e\x9e\xb1\x3e\x2a\x8f"
6588 "\xea\xa5\x6a\x85\xd1\xf4\xa8\xa5",
6589 .ilen = 32,
6590 .result = "\x54\x65\x61\x20\x69\x73\x20\x67"
6591 "\x6f\x6f\x64\x20\x66\x6f\x72\x20"
6592 "\x79\x6f\x75\x21\x21\x21\x20\x72"
6593 "\x65\x61\x6c\x6c\x79\x21\x21\x21",
6594 .rlen = 32,
6595 }
6596};
6597
6598/*
6599 * FCrypt test vectors
6600 */
6601#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6602#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6603
6604static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6605 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6606 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6607 .klen = 8,
6608 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6609 .input = "\x00\x00\x00\x00\x00\x00\x00\x00",
6610 .ilen = 8,
6611 .result = "\x0E\x09\x00\xC7\x3E\xF7\xED\x41",
6612 .rlen = 8,
6613 }, {
6614 .key = "\x11\x44\x77\xAA\xDD\x00\x33\x66",
6615 .klen = 8,
6616 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6617 .input = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0",
6618 .ilen = 8,
6619 .result = "\xD8\xED\x78\x74\x77\xEC\x06\x80",
6620 .rlen = 8,
6621 }, { /* From Arla */
6622 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6623 .klen = 8,
6624 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6625 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
6626 .ilen = 48,
6627 .result = "\x00\xf0\x0e\x11\x75\xe6\x23\x82"
6628 "\xee\xac\x98\x62\x44\x51\xe4\x84"
6629 "\xc3\x59\xd8\xaa\x64\x60\xae\xf7"
6630 "\xd2\xd9\x13\x79\x72\xa3\x45\x03"
6631 "\x23\xb5\x62\xd7\x0c\xf5\x27\xd1"
6632 "\xf8\x91\x3c\xac\x44\x22\x92\xef",
6633 .rlen = 48,
6634 }, {
6635 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6636 .klen = 8,
6637 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6638 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
6639 .ilen = 48,
6640 .result = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6641 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6642 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6643 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6644 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6645 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6646 .rlen = 48,
6647 }, { /* split-page version */
6648 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6649 .klen = 8,
6650 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6651 .input = "The quick brown fox jumps over the lazy dogs.\0\0",
6652 .ilen = 48,
6653 .result = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6654 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6655 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6656 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6657 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6658 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6659 .rlen = 48,
6660 .np = 2,
6661 .tap = { 20, 28 },
6662 }
6663};
6664
6665static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6666 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6667 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6668 .klen = 8,
6669 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6670 .input = "\x0E\x09\x00\xC7\x3E\xF7\xED\x41",
6671 .ilen = 8,
6672 .result = "\x00\x00\x00\x00\x00\x00\x00\x00",
6673 .rlen = 8,
6674 }, {
6675 .key = "\x11\x44\x77\xAA\xDD\x00\x33\x66",
6676 .klen = 8,
6677 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6678 .input = "\xD8\xED\x78\x74\x77\xEC\x06\x80",
6679 .ilen = 8,
6680 .result = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0",
6681 .rlen = 8,
6682 }, { /* From Arla */
6683 .key = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6684 .klen = 8,
6685 .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6686 .input = "\x00\xf0\x0e\x11\x75\xe6\x23\x82"
6687 "\xee\xac\x98\x62\x44\x51\xe4\x84"
6688 "\xc3\x59\xd8\xaa\x64\x60\xae\xf7"
6689 "\xd2\xd9\x13\x79\x72\xa3\x45\x03"
6690 "\x23\xb5\x62\xd7\x0c\xf5\x27\xd1"
6691 "\xf8\x91\x3c\xac\x44\x22\x92\xef",
6692 .ilen = 48,
6693 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
6694 .rlen = 48,
6695 }, {
6696 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6697 .klen = 8,
6698 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6699 .input = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6700 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6701 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6702 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6703 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6704 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6705 .ilen = 48,
6706 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
6707 .rlen = 48,
6708 }, { /* split-page version */
6709 .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6710 .klen = 8,
6711 .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
6712 .input = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c"
6713 "\x01\x88\x7f\x3e\x31\x6e\x62\x9d"
6714 "\xd8\xe0\x57\xa3\x06\x3a\x42\x58"
6715 "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0"
6716 "\x19\x89\x09\x1c\x2a\x8e\x8c\x94"
6717 "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f",
6718 .ilen = 48,
6719 .result = "The quick brown fox jumps over the lazy dogs.\0\0",
6720 .rlen = 48,
6721 .np = 2,
6722 .tap = { 20, 28 },
6723 }
6724};
6725
6726/*
6727 * CAMELLIA test vectors.
6728 */
6729#define CAMELLIA_ENC_TEST_VECTORS 3
6730#define CAMELLIA_DEC_TEST_VECTORS 3
6731#define CAMELLIA_CBC_ENC_TEST_VECTORS 2
6732#define CAMELLIA_CBC_DEC_TEST_VECTORS 2
6733
6734static struct cipher_testvec camellia_enc_tv_template[] = {
6735 {
6736 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6737 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6738 .klen = 16,
6739 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6740 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6741 .ilen = 16,
6742 .result = "\x67\x67\x31\x38\x54\x96\x69\x73"
6743 "\x08\x57\x06\x56\x48\xea\xbe\x43",
6744 .rlen = 16,
6745 }, {
6746 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6747 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6748 "\x00\x11\x22\x33\x44\x55\x66\x77",
6749 .klen = 24,
6750 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6751 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6752 .ilen = 16,
6753 .result = "\xb4\x99\x34\x01\xb3\xe9\x96\xf8"
6754 "\x4e\xe5\xce\xe7\xd7\x9b\x09\xb9",
6755 .rlen = 16,
6756 }, {
6757 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6758 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6759 "\x00\x11\x22\x33\x44\x55\x66\x77"
6760 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
6761 .klen = 32,
6762 .input = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6763 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6764 .ilen = 16,
6765 .result = "\x9a\xcc\x23\x7d\xff\x16\xd7\x6c"
6766 "\x20\xef\x7c\x91\x9e\x3a\x75\x09",
6767 .rlen = 16,
6768 },
6769};
6770
6771static struct cipher_testvec camellia_dec_tv_template[] = {
6772 {
6773 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6774 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6775 .klen = 16,
6776 .input = "\x67\x67\x31\x38\x54\x96\x69\x73"
6777 "\x08\x57\x06\x56\x48\xea\xbe\x43",
6778 .ilen = 16,
6779 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6780 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6781 .rlen = 16,
6782 }, {
6783 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6784 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6785 "\x00\x11\x22\x33\x44\x55\x66\x77",
6786 .klen = 24,
6787 .input = "\xb4\x99\x34\x01\xb3\xe9\x96\xf8"
6788 "\x4e\xe5\xce\xe7\xd7\x9b\x09\xb9",
6789 .ilen = 16,
6790 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6791 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6792 .rlen = 16,
6793 }, {
6794 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6795 "\xfe\xdc\xba\x98\x76\x54\x32\x10"
6796 "\x00\x11\x22\x33\x44\x55\x66\x77"
6797 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
6798 .klen = 32,
6799 .input = "\x9a\xcc\x23\x7d\xff\x16\xd7\x6c"
6800 "\x20\xef\x7c\x91\x9e\x3a\x75\x09",
6801 .ilen = 16,
6802 .result = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6803 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6804 .rlen = 16,
6805 },
6806};
6807
6808static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6809 {
6810 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6811 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6812 .klen = 16,
6813 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
6814 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
6815 .input = "Single block msg",
6816 .ilen = 16,
6817 .result = "\xea\x32\x12\x76\x3b\x50\x10\xe7"
6818 "\x18\xf6\xfd\x5d\xf6\x8f\x13\x51",
6819 .rlen = 16,
6820 }, {
6821 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
6822 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
6823 .klen = 16,
6824 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
6825 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
6826 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
6827 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
6828 "\x10\x11\x12\x13\x14\x15\x16\x17"
6829 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
6830 .ilen = 32,
6831 .result = "\xa5\xdf\x6e\x50\xda\x70\x6c\x01"
6832 "\x4a\xab\xf3\xf2\xd6\xfc\x6c\xfd"
6833 "\x19\xb4\x3e\x57\x1c\x02\x5e\xa0"
6834 "\x15\x78\xe0\x5e\xf2\xcb\x87\x16",
6835 .rlen = 32,
6836 },
6837};
6838
6839static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6840 {
6841 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6842 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6843 .klen = 16,
6844 .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
6845 "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
6846 .input = "\xea\x32\x12\x76\x3b\x50\x10\xe7"
6847 "\x18\xf6\xfd\x5d\xf6\x8f\x13\x51",
6848 .ilen = 16,
6849 .result = "Single block msg",
6850 .rlen = 16,
6851 }, {
6852 .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
6853 "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
6854 .klen = 16,
6855 .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
6856 "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
6857 .input = "\xa5\xdf\x6e\x50\xda\x70\x6c\x01"
6858 "\x4a\xab\xf3\xf2\xd6\xfc\x6c\xfd"
6859 "\x19\xb4\x3e\x57\x1c\x02\x5e\xa0"
6860 "\x15\x78\xe0\x5e\xf2\xcb\x87\x16",
6861 .ilen = 32,
6862 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
6863 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
6864 "\x10\x11\x12\x13\x14\x15\x16\x17"
6865 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
6866 .rlen = 32,
6867 },
6868};
6869
6870/*
6871 * SEED test vectors
6872 */
6873#define SEED_ENC_TEST_VECTORS 4
6874#define SEED_DEC_TEST_VECTORS 4
6875
6876static struct cipher_testvec seed_enc_tv_template[] = {
6877 {
6878 .key = zeroed_string,
6879 .klen = 16,
6880 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
6881 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6882 .ilen = 16,
6883 .result = "\x5e\xba\xc6\xe0\x05\x4e\x16\x68"
6884 "\x19\xaf\xf1\xcc\x6d\x34\x6c\xdb",
6885 .rlen = 16,
6886 }, {
6887 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
6888 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6889 .klen = 16,
6890 .input = zeroed_string,
6891 .ilen = 16,
6892 .result = "\xc1\x1f\x22\xf2\x01\x40\x50\x50"
6893 "\x84\x48\x35\x97\xe4\x37\x0f\x43",
6894 .rlen = 16,
6895 }, {
6896 .key = "\x47\x06\x48\x08\x51\xe6\x1b\xe8"
6897 "\x5d\x74\xbf\xb3\xfd\x95\x61\x85",
6898 .klen = 16,
6899 .input = "\x83\xa2\xf8\xa2\x88\x64\x1f\xb9"
6900 "\xa4\xe9\xa5\xcc\x2f\x13\x1c\x7d",
6901 .ilen = 16,
6902 .result = "\xee\x54\xd1\x3e\xbc\xae\x70\x6d"
6903 "\x22\x6b\xc3\x14\x2c\xd4\x0d\x4a",
6904 .rlen = 16,
6905 }, {
6906 .key = "\x28\xdb\xc3\xbc\x49\xff\xd8\x7d"
6907 "\xcf\xa5\x09\xb1\x1d\x42\x2b\xe7",
6908 .klen = 16,
6909 .input = "\xb4\x1e\x6b\xe2\xeb\xa8\x4a\x14"
6910 "\x8e\x2e\xed\x84\x59\x3c\x5e\xc7",
6911 .ilen = 16,
6912 .result = "\x9b\x9b\x7b\xfc\xd1\x81\x3c\xb9"
6913 "\x5d\x0b\x36\x18\xf4\x0f\x51\x22",
6914 .rlen = 16,
6915 }
6916};
6917
6918static struct cipher_testvec seed_dec_tv_template[] = {
6919 {
6920 .key = zeroed_string,
6921 .klen = 16,
6922 .input = "\x5e\xba\xc6\xe0\x05\x4e\x16\x68"
6923 "\x19\xaf\xf1\xcc\x6d\x34\x6c\xdb",
6924 .ilen = 16,
6925 .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
6926 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6927 .rlen = 16,
6928 }, {
6929 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
6930 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6931 .klen = 16,
6932 .input = "\xc1\x1f\x22\xf2\x01\x40\x50\x50"
6933 "\x84\x48\x35\x97\xe4\x37\x0f\x43",
6934 .ilen = 16,
6935 .result = zeroed_string,
6936 .rlen = 16,
6937 }, {
6938 .key = "\x47\x06\x48\x08\x51\xe6\x1b\xe8"
6939 "\x5d\x74\xbf\xb3\xfd\x95\x61\x85",
6940 .klen = 16,
6941 .input = "\xee\x54\xd1\x3e\xbc\xae\x70\x6d"
6942 "\x22\x6b\xc3\x14\x2c\xd4\x0d\x4a",
6943 .ilen = 16,
6944 .result = "\x83\xa2\xf8\xa2\x88\x64\x1f\xb9"
6945 "\xa4\xe9\xa5\xcc\x2f\x13\x1c\x7d",
6946 .rlen = 16,
6947 }, {
6948 .key = "\x28\xdb\xc3\xbc\x49\xff\xd8\x7d"
6949 "\xcf\xa5\x09\xb1\x1d\x42\x2b\xe7",
6950 .klen = 16,
6951 .input = "\x9b\x9b\x7b\xfc\xd1\x81\x3c\xb9"
6952 "\x5d\x0b\x36\x18\xf4\x0f\x51\x22",
6953 .ilen = 16,
6954 .result = "\xb4\x1e\x6b\xe2\xeb\xa8\x4a\x14"
6955 "\x8e\x2e\xed\x84\x59\x3c\x5e\xc7",
6956 .rlen = 16,
6957 }
6958};
6959
6960#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6961static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6962 /*
6963 * Testvectors from verified.test-vectors submitted to ECRYPT.
6964 * They are truncated to size 39, 64, 111, 129 to test a variety
6965 * of input length.
6966 */
6967 { /* Set 3, vector 0 */
6968 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
6969 "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
6970 .klen = 16,
6971 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
6972 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
6973 "\x00\x00\x00\x00\x00\x00\x00\x00"
6974 "\x00\x00\x00\x00\x00\x00\x00\x00"
6975 "\x00\x00\x00\x00\x00\x00\x00\x00"
6976 "\x00\x00\x00\x00\x00\x00\x00",
6977 .ilen = 39,
6978 .result = "\x2D\xD5\xC3\xF7\xBA\x2B\x20\xF7"
6979 "\x68\x02\x41\x0C\x68\x86\x88\x89"
6980 "\x5A\xD8\xC1\xBD\x4E\xA6\xC9\xB1"
6981 "\x40\xFB\x9B\x90\xE2\x10\x49\xBF"
6982 "\x58\x3F\x52\x79\x70\xEB\xC1",
6983 .rlen = 39,
6984 }, { /* Set 5, vector 0 */
6985 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6986 "\x00\x00\x00\x00\x00\x00\x00\x00",
6987 .klen = 16,
6988 .iv = "\x80\x00\x00\x00\x00\x00\x00\x00",
6989 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
6990 "\x00\x00\x00\x00\x00\x00\x00\x00"
6991 "\x00\x00\x00\x00\x00\x00\x00\x00"
6992 "\x00\x00\x00\x00\x00\x00\x00\x00"
6993 "\x00\x00\x00\x00\x00\x00\x00\x00"
6994 "\x00\x00\x00\x00\x00\x00\x00\x00"
6995 "\x00\x00\x00\x00\x00\x00\x00\x00"
6996 "\x00\x00\x00\x00\x00\x00\x00\x00",
6997 .ilen = 64,
6998 .result = "\xB6\x6C\x1E\x44\x46\xDD\x95\x57"
6999 "\xE5\x78\xE2\x23\xB0\xB7\x68\x01"
7000 "\x7B\x23\xB2\x67\xBB\x02\x34\xAE"
7001 "\x46\x26\xBF\x44\x3F\x21\x97\x76"
7002 "\x43\x6F\xB1\x9F\xD0\xE8\x86\x6F"
7003 "\xCD\x0D\xE9\xA9\x53\x8F\x4A\x09"
7004 "\xCA\x9A\xC0\x73\x2E\x30\xBC\xF9"
7005 "\x8E\x4F\x13\xE4\xB9\xE2\x01\xD9",
7006 .rlen = 64,
7007 }, { /* Set 3, vector 27 */
7008 .key = "\x1B\x1C\x1D\x1E\x1F\x20\x21\x22"
7009 "\x23\x24\x25\x26\x27\x28\x29\x2A"
7010 "\x2B\x2C\x2D\x2E\x2F\x30\x31\x32"
7011 "\x33\x34\x35\x36\x37\x38\x39\x3A",
7012 .klen = 32,
7013 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
7014 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
7015 "\x00\x00\x00\x00\x00\x00\x00\x00"
7016 "\x00\x00\x00\x00\x00\x00\x00\x00"
7017 "\x00\x00\x00\x00\x00\x00\x00\x00"
7018 "\x00\x00\x00\x00\x00\x00\x00\x00"
7019 "\x00\x00\x00\x00\x00\x00\x00\x00"
7020 "\x00\x00\x00\x00\x00\x00\x00\x00"
7021 "\x00\x00\x00\x00\x00\x00\x00\x00"
7022 "\x00\x00\x00\x00\x00\x00\x00\x00"
7023 "\x00\x00\x00\x00\x00\x00\x00\x00"
7024 "\x00\x00\x00\x00\x00\x00\x00\x00"
7025 "\x00\x00\x00\x00\x00\x00\x00\x00"
7026 "\x00\x00\x00\x00\x00\x00\x00\x00"
7027 "\x00\x00\x00\x00\x00\x00\x00",
7028 .ilen = 111,
7029 .result = "\xAE\x39\x50\x8E\xAC\x9A\xEC\xE7"
7030 "\xBF\x97\xBB\x20\xB9\xDE\xE4\x1F"
7031 "\x87\xD9\x47\xF8\x28\x91\x35\x98"
7032 "\xDB\x72\xCC\x23\x29\x48\x56\x5E"
7033 "\x83\x7E\x0B\xF3\x7D\x5D\x38\x7B"
7034 "\x2D\x71\x02\xB4\x3B\xB5\xD8\x23"
7035 "\xB0\x4A\xDF\x3C\xEC\xB6\xD9\x3B"
7036 "\x9B\xA7\x52\xBE\xC5\xD4\x50\x59"
7037 "\x15\x14\xB4\x0E\x40\xE6\x53\xD1"
7038 "\x83\x9C\x5B\xA0\x92\x29\x6B\x5E"
7039 "\x96\x5B\x1E\x2F\xD3\xAC\xC1\x92"
7040 "\xB1\x41\x3F\x19\x2F\xC4\x3B\xC6"
7041 "\x95\x46\x45\x54\xE9\x75\x03\x08"
7042 "\x44\xAF\xE5\x8A\x81\x12\x09",
7043 .rlen = 111,
7044 }, { /* Set 5, vector 27 */
7045 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
7046 "\x00\x00\x00\x00\x00\x00\x00\x00"
7047 "\x00\x00\x00\x00\x00\x00\x00\x00"
7048 "\x00\x00\x00\x00\x00\x00\x00\x00",
7049 .klen = 32,
7050 .iv = "\x00\x00\x00\x10\x00\x00\x00\x00",
7051 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
7052 "\x00\x00\x00\x00\x00\x00\x00\x00"
7053 "\x00\x00\x00\x00\x00\x00\x00\x00"
7054 "\x00\x00\x00\x00\x00\x00\x00\x00"
7055 "\x00\x00\x00\x00\x00\x00\x00\x00"
7056 "\x00\x00\x00\x00\x00\x00\x00\x00"
7057 "\x00\x00\x00\x00\x00\x00\x00\x00"
7058 "\x00\x00\x00\x00\x00\x00\x00\x00"
7059 "\x00\x00\x00\x00\x00\x00\x00\x00"
7060 "\x00\x00\x00\x00\x00\x00\x00\x00"
7061 "\x00\x00\x00\x00\x00\x00\x00\x00"
7062 "\x00\x00\x00\x00\x00\x00\x00\x00"
7063 "\x00\x00\x00\x00\x00\x00\x00\x00"
7064 "\x00\x00\x00\x00\x00\x00\x00\x00"
7065 "\x00\x00\x00\x00\x00\x00\x00\x00"
7066 "\x00\x00\x00\x00\x00\x00\x00\x00"
7067 "\x00",
7068 .ilen = 129,
7069 .result = "\xD2\xDB\x1A\x5C\xF1\xC1\xAC\xDB"
7070 "\xE8\x1A\x7A\x43\x40\xEF\x53\x43"
7071 "\x5E\x7F\x4B\x1A\x50\x52\x3F\x8D"
7072 "\x28\x3D\xCF\x85\x1D\x69\x6E\x60"
7073 "\xF2\xDE\x74\x56\x18\x1B\x84\x10"
7074 "\xD4\x62\xBA\x60\x50\xF0\x61\xF2"
7075 "\x1C\x78\x7F\xC1\x24\x34\xAF\x58"
7076 "\xBF\x2C\x59\xCA\x90\x77\xF3\xB0"
7077 "\x5B\x4A\xDF\x89\xCE\x2C\x2F\xFC"
7078 "\x67\xF0\xE3\x45\xE8\xB3\xB3\x75"
7079 "\xA0\x95\x71\xA1\x29\x39\x94\xCA"
7080 "\x45\x2F\xBD\xCB\x10\xB6\xBE\x9F"
7081 "\x8E\xF9\xB2\x01\x0A\x5A\x0A\xB7"
7082 "\x6B\x9D\x70\x8E\x4B\xD6\x2F\xCD"
7083 "\x2E\x40\x48\x75\xE9\xE2\x21\x45"
7084 "\x0B\xC9\xB6\xB5\x66\xBC\x9A\x59"
7085 "\x5A",
7086 .rlen = 129,
7087 }, { /* large test vector generated using Crypto++ */
7088 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
7089 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
7090 "\x10\x11\x12\x13\x14\x15\x16\x17"
7091 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
7092 .klen = 32,
7093 .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
7094 "\x00\x00\x00\x00\x00\x00\x00\x00",
7095 .input =
7096 "\x00\x01\x02\x03\x04\x05\x06\x07"
7097 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
7098 "\x10\x11\x12\x13\x14\x15\x16\x17"
7099 "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
7100 "\x20\x21\x22\x23\x24\x25\x26\x27"
7101 "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
7102 "\x30\x31\x32\x33\x34\x35\x36\x37"
7103 "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
7104 "\x40\x41\x42\x43\x44\x45\x46\x47"
7105 "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
7106 "\x50\x51\x52\x53\x54\x55\x56\x57"
7107 "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
7108 "\x60\x61\x62\x63\x64\x65\x66\x67"
7109 "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
7110 "\x70\x71\x72\x73\x74\x75\x76\x77"
7111 "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
7112 "\x80\x81\x82\x83\x84\x85\x86\x87"
7113 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
7114 "\x90\x91\x92\x93\x94\x95\x96\x97"
7115 "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
7116 "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
7117 "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
7118 "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
7119 "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
7120 "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
7121 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
7122 "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
7123 "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
7124 "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
7125 "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
7126 "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
7127 "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
7128 "\x00\x03\x06\x09\x0c\x0f\x12\x15"
7129 "\x18\x1b\x1e\x21\x24\x27\x2a\x2d"
7130 "\x30\x33\x36\x39\x3c\x3f\x42\x45"
7131 "\x48\x4b\x4e\x51\x54\x57\x5a\x5d"
7132 "\x60\x63\x66\x69\x6c\x6f\x72\x75"
7133 "\x78\x7b\x7e\x81\x84\x87\x8a\x8d"
7134 "\x90\x93\x96\x99\x9c\x9f\xa2\xa5"
7135 "\xa8\xab\xae\xb1\xb4\xb7\xba\xbd"
7136 "\xc0\xc3\xc6\xc9\xcc\xcf\xd2\xd5"
7137 "\xd8\xdb\xde\xe1\xe4\xe7\xea\xed"
7138 "\xf0\xf3\xf6\xf9\xfc\xff\x02\x05"
7139 "\x08\x0b\x0e\x11\x14\x17\x1a\x1d"
7140 "\x20\x23\x26\x29\x2c\x2f\x32\x35"
7141 "\x38\x3b\x3e\x41\x44\x47\x4a\x4d"
7142 "\x50\x53\x56\x59\x5c\x5f\x62\x65"
7143 "\x68\x6b\x6e\x71\x74\x77\x7a\x7d"
7144 "\x80\x83\x86\x89\x8c\x8f\x92\x95"
7145 "\x98\x9b\x9e\xa1\xa4\xa7\xaa\xad"
7146 "\xb0\xb3\xb6\xb9\xbc\xbf\xc2\xc5"
7147 "\xc8\xcb\xce\xd1\xd4\xd7\xda\xdd"
7148 "\xe0\xe3\xe6\xe9\xec\xef\xf2\xf5"
7149 "\xf8\xfb\xfe\x01\x04\x07\x0a\x0d"
7150 "\x10\x13\x16\x19\x1c\x1f\x22\x25"
7151 "\x28\x2b\x2e\x31\x34\x37\x3a\x3d"
7152 "\x40\x43\x46\x49\x4c\x4f\x52\x55"
7153 "\x58\x5b\x5e\x61\x64\x67\x6a\x6d"
7154 "\x70\x73\x76\x79\x7c\x7f\x82\x85"
7155 "\x88\x8b\x8e\x91\x94\x97\x9a\x9d"
7156 "\xa0\xa3\xa6\xa9\xac\xaf\xb2\xb5"
7157 "\xb8\xbb\xbe\xc1\xc4\xc7\xca\xcd"
7158 "\xd0\xd3\xd6\xd9\xdc\xdf\xe2\xe5"
7159 "\xe8\xeb\xee\xf1\xf4\xf7\xfa\xfd"
7160 "\x00\x05\x0a\x0f\x14\x19\x1e\x23"
7161 "\x28\x2d\x32\x37\x3c\x41\x46\x4b"
7162 "\x50\x55\x5a\x5f\x64\x69\x6e\x73"
7163 "\x78\x7d\x82\x87\x8c\x91\x96\x9b"
7164 "\xa0\xa5\xaa\xaf\xb4\xb9\xbe\xc3"
7165 "\xc8\xcd\xd2\xd7\xdc\xe1\xe6\xeb"
7166 "\xf0\xf5\xfa\xff\x04\x09\x0e\x13"
7167 "\x18\x1d\x22\x27\x2c\x31\x36\x3b"
7168 "\x40\x45\x4a\x4f\x54\x59\x5e\x63"
7169 "\x68\x6d\x72\x77\x7c\x81\x86\x8b"
7170 "\x90\x95\x9a\x9f\xa4\xa9\xae\xb3"
7171 "\xb8\xbd\xc2\xc7\xcc\xd1\xd6\xdb"
7172 "\xe0\xe5\xea\xef\xf4\xf9\xfe\x03"
7173 "\x08\x0d\x12\x17\x1c\x21\x26\x2b"
7174 "\x30\x35\x3a\x3f\x44\x49\x4e\x53"
7175 "\x58\x5d\x62\x67\x6c\x71\x76\x7b"
7176 "\x80\x85\x8a\x8f\x94\x99\x9e\xa3"
7177 "\xa8\xad\xb2\xb7\xbc\xc1\xc6\xcb"
7178 "\xd0\xd5\xda\xdf\xe4\xe9\xee\xf3"
7179 "\xf8\xfd\x02\x07\x0c\x11\x16\x1b"
7180 "\x20\x25\x2a\x2f\x34\x39\x3e\x43"
7181 "\x48\x4d\x52\x57\x5c\x61\x66\x6b"
7182 "\x70\x75\x7a\x7f\x84\x89\x8e\x93"
7183 "\x98\x9d\xa2\xa7\xac\xb1\xb6\xbb"
7184 "\xc0\xc5\xca\xcf\xd4\xd9\xde\xe3"
7185 "\xe8\xed\xf2\xf7\xfc\x01\x06\x0b"
7186 "\x10\x15\x1a\x1f\x24\x29\x2e\x33"
7187 "\x38\x3d\x42\x47\x4c\x51\x56\x5b"
7188 "\x60\x65\x6a\x6f\x74\x79\x7e\x83"
7189 "\x88\x8d\x92\x97\x9c\xa1\xa6\xab"
7190 "\xb0\xb5\xba\xbf\xc4\xc9\xce\xd3"
7191 "\xd8\xdd\xe2\xe7\xec\xf1\xf6\xfb"
7192 "\x00\x07\x0e\x15\x1c\x23\x2a\x31"
7193 "\x38\x3f\x46\x4d\x54\x5b\x62\x69"
7194 "\x70\x77\x7e\x85\x8c\x93\x9a\xa1"
7195 "\xa8\xaf\xb6\xbd\xc4\xcb\xd2\xd9"
7196 "\xe0\xe7\xee\xf5\xfc\x03\x0a\x11"
7197 "\x18\x1f\x26\x2d\x34\x3b\x42\x49"
7198 "\x50\x57\x5e\x65\x6c\x73\x7a\x81"
7199 "\x88\x8f\x96\x9d\xa4\xab\xb2\xb9"
7200 "\xc0\xc7\xce\xd5\xdc\xe3\xea\xf1"
7201 "\xf8\xff\x06\x0d\x14\x1b\x22\x29"
7202 "\x30\x37\x3e\x45\x4c\x53\x5a\x61"
7203 "\x68\x6f\x76\x7d\x84\x8b\x92\x99"
7204 "\xa0\xa7\xae\xb5\xbc\xc3\xca\xd1"
7205 "\xd8\xdf\xe6\xed\xf4\xfb\x02\x09"
7206 "\x10\x17\x1e\x25\x2c\x33\x3a\x41"
7207 "\x48\x4f\x56\x5d\x64\x6b\x72\x79"
7208 "\x80\x87\x8e\x95\x9c\xa3\xaa\xb1"
7209 "\xb8\xbf\xc6\xcd\xd4\xdb\xe2\xe9"
7210 "\xf0\xf7\xfe\x05\x0c\x13\x1a\x21"
7211 "\x28\x2f\x36\x3d\x44\x4b\x52\x59"
7212 "\x60\x67\x6e\x75\x7c\x83\x8a\x91"
7213 "\x98\x9f\xa6\xad\xb4\xbb\xc2\xc9"
7214 "\xd0\xd7\xde\xe5\xec\xf3\xfa\x01"
7215 "\x08\x0f\x16\x1d\x24\x2b\x32\x39"
7216 "\x40\x47\x4e\x55\x5c\x63\x6a\x71"
7217 "\x78\x7f\x86\x8d\x94\x9b\xa2\xa9"
7218 "\xb0\xb7\xbe\xc5\xcc\xd3\xda\xe1"
7219 "\xe8\xef\xf6\xfd\x04\x0b\x12\x19"
7220 "\x20\x27\x2e\x35\x3c\x43\x4a\x51"
7221 "\x58\x5f\x66\x6d\x74\x7b\x82\x89"
7222 "\x90\x97\x9e\xa5\xac\xb3\xba\xc1"
7223 "\xc8\xcf\xd6\xdd\xe4\xeb\xf2\xf9"
7224 "\x00\x09\x12\x1b\x24\x2d\x36\x3f"
7225 "\x48\x51\x5a\x63\x6c\x75\x7e\x87"
7226 "\x90\x99\xa2\xab\xb4\xbd\xc6\xcf"
7227 "\xd8\xe1\xea\xf3\xfc\x05\x0e\x17"
7228 "\x20\x29\x32\x3b\x44\x4d\x56\x5f"
7229 "\x68\x71\x7a\x83\x8c\x95\x9e\xa7"
7230 "\xb0\xb9\xc2\xcb\xd4\xdd\xe6\xef"
7231 "\xf8\x01\x0a\x13\x1c\x25\x2e\x37"
7232 "\x40\x49\x52\x5b\x64\x6d\x76\x7f"
7233 "\x88\x91\x9a\xa3\xac\xb5\xbe\xc7"
7234 "\xd0\xd9\xe2\xeb\xf4\xfd\x06\x0f"
7235 "\x18\x21\x2a\x33\x3c\x45\x4e\x57"
7236 "\x60\x69\x72\x7b\x84\x8d\x96\x9f"
7237 "\xa8\xb1\xba\xc3\xcc\xd5\xde\xe7"
7238 "\xf0\xf9\x02\x0b\x14\x1d\x26\x2f"
7239 "\x38\x41\x4a\x53\x5c\x65\x6e\x77"
7240 "\x80\x89\x92\x9b\xa4\xad\xb6\xbf"
7241 "\xc8\xd1\xda\xe3\xec\xf5\xfe\x07"
7242 "\x10\x19\x22\x2b\x34\x3d\x46\x4f"
7243 "\x58\x61\x6a\x73\x7c\x85\x8e\x97"
7244 "\xa0\xa9\xb2\xbb\xc4\xcd\xd6\xdf"
7245 "\xe8\xf1\xfa\x03\x0c\x15\x1e\x27"
7246 "\x30\x39\x42\x4b\x54\x5d\x66\x6f"
7247 "\x78\x81\x8a\x93\x9c\xa5\xae\xb7"
7248 "\xc0\xc9\xd2\xdb\xe4\xed\xf6\xff"
7249 "\x08\x11\x1a\x23\x2c\x35\x3e\x47"
7250 "\x50\x59\x62\x6b\x74\x7d\x86\x8f"
7251 "\x98\xa1\xaa\xb3\xbc\xc5\xce\xd7"
7252 "\xe0\xe9\xf2\xfb\x04\x0d\x16\x1f"
7253 "\x28\x31\x3a\x43\x4c\x55\x5e\x67"
7254 "\x70\x79\x82\x8b\x94\x9d\xa6\xaf"
7255 "\xb8\xc1\xca\xd3\xdc\xe5\xee\xf7"
7256 "\x00\x0b\x16\x21\x2c\x37\x42\x4d"
7257 "\x58\x63\x6e\x79\x84\x8f\x9a\xa5"
7258 "\xb0\xbb\xc6\xd1\xdc\xe7\xf2\xfd"
7259 "\x08\x13\x1e\x29\x34\x3f\x4a\x55"
7260 "\x60\x6b\x76\x81\x8c\x97\xa2\xad"
7261 "\xb8\xc3\xce\xd9\xe4\xef\xfa\x05"
7262 "\x10\x1b\x26\x31\x3c\x47\x52\x5d"
7263 "\x68\x73\x7e\x89\x94\x9f\xaa\xb5"
7264 "\xc0\xcb\xd6\xe1\xec\xf7\x02\x0d"
7265 "\x18\x23\x2e\x39\x44\x4f\x5a\x65"
7266 "\x70\x7b\x86\x91\x9c\xa7\xb2\xbd"
7267 "\xc8\xd3\xde\xe9\xf4\xff\x0a\x15"
7268 "\x20\x2b\x36\x41\x4c\x57\x62\x6d"
7269 "\x78\x83\x8e\x99\xa4\xaf\xba\xc5"
7270 "\xd0\xdb\xe6\xf1\xfc\x07\x12\x1d"
7271 "\x28\x33\x3e\x49\x54\x5f\x6a\x75"
7272 "\x80\x8b\x96\xa1\xac\xb7\xc2\xcd"
7273 "\xd8\xe3\xee\xf9\x04\x0f\x1a\x25"
7274 "\x30\x3b\x46\x51\x5c\x67\x72\x7d"
7275 "\x88\x93\x9e\xa9\xb4\xbf\xca\xd5"
7276 "\xe0\xeb\xf6\x01\x0c\x17\x22\x2d"
7277 "\x38\x43\x4e\x59\x64\x6f\x7a\x85"
7278 "\x90\x9b\xa6\xb1\xbc\xc7\xd2\xdd"
7279 "\xe8\xf3\xfe\x09\x14\x1f\x2a\x35"
7280 "\x40\x4b\x56\x61\x6c\x77\x82\x8d"
7281 "\x98\xa3\xae\xb9\xc4\xcf\xda\xe5"
7282 "\xf0\xfb\x06\x11\x1c\x27\x32\x3d"
7283 "\x48\x53\x5e\x69\x74\x7f\x8a\x95"
7284 "\xa0\xab\xb6\xc1\xcc\xd7\xe2\xed"
7285 "\xf8\x03\x0e\x19\x24\x2f\x3a\x45"
7286 "\x50\x5b\x66\x71\x7c\x87\x92\x9d"
7287 "\xa8\xb3\xbe\xc9\xd4\xdf\xea\xf5"
7288 "\x00\x0d\x1a\x27\x34\x41\x4e\x5b"
7289 "\x68\x75\x82\x8f\x9c\xa9\xb6\xc3"
7290 "\xd0\xdd\xea\xf7\x04\x11\x1e\x2b"
7291 "\x38\x45\x52\x5f\x6c\x79\x86\x93"
7292 "\xa0\xad\xba\xc7\xd4\xe1\xee\xfb"
7293 "\x08\x15\x22\x2f\x3c\x49\x56\x63"
7294 "\x70\x7d\x8a\x97\xa4\xb1\xbe\xcb"
7295 "\xd8\xe5\xf2\xff\x0c\x19\x26\x33"
7296 "\x40\x4d\x5a\x67\x74\x81\x8e\x9b"
7297 "\xa8\xb5\xc2\xcf\xdc\xe9\xf6\x03"
7298 "\x10\x1d\x2a\x37\x44\x51\x5e\x6b"
7299 "\x78\x85\x92\x9f\xac\xb9\xc6\xd3"
7300 "\xe0\xed\xfa\x07\x14\x21\x2e\x3b"
7301 "\x48\x55\x62\x6f\x7c\x89\x96\xa3"
7302 "\xb0\xbd\xca\xd7\xe4\xf1\xfe\x0b"
7303 "\x18\x25\x32\x3f\x4c\x59\x66\x73"
7304 "\x80\x8d\x9a\xa7\xb4\xc1\xce\xdb"
7305 "\xe8\xf5\x02\x0f\x1c\x29\x36\x43"
7306 "\x50\x5d\x6a\x77\x84\x91\x9e\xab"
7307 "\xb8\xc5\xd2\xdf\xec\xf9\x06\x13"
7308 "\x20\x2d\x3a\x47\x54\x61\x6e\x7b"
7309 "\x88\x95\xa2\xaf\xbc\xc9\xd6\xe3"
7310 "\xf0\xfd\x0a\x17\x24\x31\x3e\x4b"
7311 "\x58\x65\x72\x7f\x8c\x99\xa6\xb3"
7312 "\xc0\xcd\xda\xe7\xf4\x01\x0e\x1b"
7313 "\x28\x35\x42\x4f\x5c\x69\x76\x83"
7314 "\x90\x9d\xaa\xb7\xc4\xd1\xde\xeb"
7315 "\xf8\x05\x12\x1f\x2c\x39\x46\x53"
7316 "\x60\x6d\x7a\x87\x94\xa1\xae\xbb"
7317 "\xc8\xd5\xe2\xef\xfc\x09\x16\x23"
7318 "\x30\x3d\x4a\x57\x64\x71\x7e\x8b"
7319 "\x98\xa5\xb2\xbf\xcc\xd9\xe6\xf3"
7320 "\x00\x0f\x1e\x2d\x3c\x4b\x5a\x69"
7321 "\x78\x87\x96\xa5\xb4\xc3\xd2\xe1"
7322 "\xf0\xff\x0e\x1d\x2c\x3b\x4a\x59"
7323 "\x68\x77\x86\x95\xa4\xb3\xc2\xd1"
7324 "\xe0\xef\xfe\x0d\x1c\x2b\x3a\x49"
7325 "\x58\x67\x76\x85\x94\xa3\xb2\xc1"
7326 "\xd0\xdf\xee\xfd\x0c\x1b\x2a\x39"
7327 "\x48\x57\x66\x75\x84\x93\xa2\xb1"
7328 "\xc0\xcf\xde\xed\xfc\x0b\x1a\x29"
7329 "\x38\x47\x56\x65\x74\x83\x92\xa1"
7330 "\xb0\xbf\xce\xdd\xec\xfb\x0a\x19"
7331 "\x28\x37\x46\x55\x64\x73\x82\x91"
7332 "\xa0\xaf\xbe\xcd\xdc\xeb\xfa\x09"
7333 "\x18\x27\x36\x45\x54\x63\x72\x81"
7334 "\x90\x9f\xae\xbd\xcc\xdb\xea\xf9"
7335 "\x08\x17\x26\x35\x44\x53\x62\x71"
7336 "\x80\x8f\x9e\xad\xbc\xcb\xda\xe9"
7337 "\xf8\x07\x16\x25\x34\x43\x52\x61"
7338 "\x70\x7f\x8e\x9d\xac\xbb\xca\xd9"
7339 "\xe8\xf7\x06\x15\x24\x33\x42\x51"
7340 "\x60\x6f\x7e\x8d\x9c\xab\xba\xc9"
7341 "\xd8\xe7\xf6\x05\x14\x23\x32\x41"
7342 "\x50\x5f\x6e\x7d\x8c\x9b\xaa\xb9"
7343 "\xc8\xd7\xe6\xf5\x04\x13\x22\x31"
7344 "\x40\x4f\x5e\x6d\x7c\x8b\x9a\xa9"
7345 "\xb8\xc7\xd6\xe5\xf4\x03\x12\x21"
7346 "\x30\x3f\x4e\x5d\x6c\x7b\x8a\x99"
7347 "\xa8\xb7\xc6\xd5\xe4\xf3\x02\x11"
7348 "\x20\x2f\x3e\x4d\x5c\x6b\x7a\x89"
7349 "\x98\xa7\xb6\xc5\xd4\xe3\xf2\x01"
7350 "\x10\x1f\x2e\x3d\x4c\x5b\x6a\x79"
7351 "\x88\x97\xa6\xb5\xc4\xd3\xe2\xf1"
7352 "\x00\x11\x22\x33\x44\x55\x66\x77"
7353 "\x88\x99\xaa\xbb\xcc\xdd\xee\xff"
7354 "\x10\x21\x32\x43\x54\x65\x76\x87"
7355 "\x98\xa9\xba\xcb\xdc\xed\xfe\x0f"
7356 "\x20\x31\x42\x53\x64\x75\x86\x97"
7357 "\xa8\xb9\xca\xdb\xec\xfd\x0e\x1f"
7358 "\x30\x41\x52\x63\x74\x85\x96\xa7"
7359 "\xb8\xc9\xda\xeb\xfc\x0d\x1e\x2f"
7360 "\x40\x51\x62\x73\x84\x95\xa6\xb7"
7361 "\xc8\xd9\xea\xfb\x0c\x1d\x2e\x3f"
7362 "\x50\x61\x72\x83\x94\xa5\xb6\xc7"
7363 "\xd8\xe9\xfa\x0b\x1c\x2d\x3e\x4f"
7364 "\x60\x71\x82\x93\xa4\xb5\xc6\xd7"
7365 "\xe8\xf9\x0a\x1b\x2c\x3d\x4e\x5f"
7366 "\x70\x81\x92\xa3\xb4\xc5\xd6\xe7"
7367 "\xf8\x09\x1a\x2b\x3c\x4d\x5e\x6f"
7368 "\x80\x91\xa2\xb3\xc4\xd5\xe6\xf7"
7369 "\x08\x19\x2a\x3b\x4c\x5d\x6e\x7f"
7370 "\x90\xa1\xb2\xc3\xd4\xe5\xf6\x07"
7371 "\x18\x29\x3a\x4b\x5c\x6d\x7e\x8f"
7372 "\xa0\xb1\xc2\xd3\xe4\xf5\x06\x17"
7373 "\x28\x39\x4a\x5b\x6c\x7d\x8e\x9f"
7374 "\xb0\xc1\xd2\xe3\xf4\x05\x16\x27"
7375 "\x38\x49\x5a\x6b\x7c\x8d\x9e\xaf"
7376 "\xc0\xd1\xe2\xf3\x04\x15\x26\x37"
7377 "\x48\x59\x6a\x7b\x8c\x9d\xae\xbf"
7378 "\xd0\xe1\xf2\x03\x14\x25\x36\x47"
7379 "\x58\x69\x7a\x8b\x9c\xad\xbe\xcf"
7380 "\xe0\xf1\x02\x13\x24\x35\x46\x57"
7381 "\x68\x79\x8a\x9b\xac\xbd\xce\xdf"
7382 "\xf0\x01\x12\x23\x34\x45\x56\x67"
7383 "\x78\x89\x9a\xab\xbc\xcd\xde\xef"
7384 "\x00\x13\x26\x39\x4c\x5f\x72\x85"
7385 "\x98\xab\xbe\xd1\xe4\xf7\x0a\x1d"
7386 "\x30\x43\x56\x69\x7c\x8f\xa2\xb5"
7387 "\xc8\xdb\xee\x01\x14\x27\x3a\x4d"
7388 "\x60\x73\x86\x99\xac\xbf\xd2\xe5"
7389 "\xf8\x0b\x1e\x31\x44\x57\x6a\x7d"
7390 "\x90\xa3\xb6\xc9\xdc\xef\x02\x15"
7391 "\x28\x3b\x4e\x61\x74\x87\x9a\xad"
7392 "\xc0\xd3\xe6\xf9\x0c\x1f\x32\x45"
7393 "\x58\x6b\x7e\x91\xa4\xb7\xca\xdd"
7394 "\xf0\x03\x16\x29\x3c\x4f\x62\x75"
7395 "\x88\x9b\xae\xc1\xd4\xe7\xfa\x0d"
7396 "\x20\x33\x46\x59\x6c\x7f\x92\xa5"
7397 "\xb8\xcb\xde\xf1\x04\x17\x2a\x3d"
7398 "\x50\x63\x76\x89\x9c\xaf\xc2\xd5"
7399 "\xe8\xfb\x0e\x21\x34\x47\x5a\x6d"
7400 "\x80\x93\xa6\xb9\xcc\xdf\xf2\x05"
7401 "\x18\x2b\x3e\x51\x64\x77\x8a\x9d"
7402 "\xb0\xc3\xd6\xe9\xfc\x0f\x22\x35"
7403 "\x48\x5b\x6e\x81\x94\xa7\xba\xcd"
7404 "\xe0\xf3\x06\x19\x2c\x3f\x52\x65"
7405 "\x78\x8b\x9e\xb1\xc4\xd7\xea\xfd"
7406 "\x10\x23\x36\x49\x5c\x6f\x82\x95"
7407 "\xa8\xbb\xce\xe1\xf4\x07\x1a\x2d"
7408 "\x40\x53\x66\x79\x8c\x9f\xb2\xc5"
7409 "\xd8\xeb\xfe\x11\x24\x37\x4a\x5d"
7410 "\x70\x83\x96\xa9\xbc\xcf\xe2\xf5"
7411 "\x08\x1b\x2e\x41\x54\x67\x7a\x8d"
7412 "\xa0\xb3\xc6\xd9\xec\xff\x12\x25"
7413 "\x38\x4b\x5e\x71\x84\x97\xaa\xbd"
7414 "\xd0\xe3\xf6\x09\x1c\x2f\x42\x55"
7415 "\x68\x7b\x8e\xa1\xb4\xc7\xda\xed"
7416 "\x00\x15\x2a\x3f\x54\x69\x7e\x93"
7417 "\xa8\xbd\xd2\xe7\xfc\x11\x26\x3b"
7418 "\x50\x65\x7a\x8f\xa4\xb9\xce\xe3"
7419 "\xf8\x0d\x22\x37\x4c\x61\x76\x8b"
7420 "\xa0\xb5\xca\xdf\xf4\x09\x1e\x33"
7421 "\x48\x5d\x72\x87\x9c\xb1\xc6\xdb"
7422 "\xf0\x05\x1a\x2f\x44\x59\x6e\x83"
7423 "\x98\xad\xc2\xd7\xec\x01\x16\x2b"
7424 "\x40\x55\x6a\x7f\x94\xa9\xbe\xd3"
7425 "\xe8\xfd\x12\x27\x3c\x51\x66\x7b"
7426 "\x90\xa5\xba\xcf\xe4\xf9\x0e\x23"
7427 "\x38\x4d\x62\x77\x8c\xa1\xb6\xcb"
7428 "\xe0\xf5\x0a\x1f\x34\x49\x5e\x73"
7429 "\x88\x9d\xb2\xc7\xdc\xf1\x06\x1b"
7430 "\x30\x45\x5a\x6f\x84\x99\xae\xc3"
7431 "\xd8\xed\x02\x17\x2c\x41\x56\x6b"
7432 "\x80\x95\xaa\xbf\xd4\xe9\xfe\x13"
7433 "\x28\x3d\x52\x67\x7c\x91\xa6\xbb"
7434 "\xd0\xe5\xfa\x0f\x24\x39\x4e\x63"
7435 "\x78\x8d\xa2\xb7\xcc\xe1\xf6\x0b"
7436 "\x20\x35\x4a\x5f\x74\x89\x9e\xb3"
7437 "\xc8\xdd\xf2\x07\x1c\x31\x46\x5b"
7438 "\x70\x85\x9a\xaf\xc4\xd9\xee\x03"
7439 "\x18\x2d\x42\x57\x6c\x81\x96\xab"
7440 "\xc0\xd5\xea\xff\x14\x29\x3e\x53"
7441 "\x68\x7d\x92\xa7\xbc\xd1\xe6\xfb"
7442 "\x10\x25\x3a\x4f\x64\x79\x8e\xa3"
7443 "\xb8\xcd\xe2\xf7\x0c\x21\x36\x4b"
7444 "\x60\x75\x8a\x9f\xb4\xc9\xde\xf3"
7445 "\x08\x1d\x32\x47\x5c\x71\x86\x9b"
7446 "\xb0\xc5\xda\xef\x04\x19\x2e\x43"
7447 "\x58\x6d\x82\x97\xac\xc1\xd6\xeb"
7448 "\x00\x17\x2e\x45\x5c\x73\x8a\xa1"
7449 "\xb8\xcf\xe6\xfd\x14\x2b\x42\x59"
7450 "\x70\x87\x9e\xb5\xcc\xe3\xfa\x11"
7451 "\x28\x3f\x56\x6d\x84\x9b\xb2\xc9"
7452 "\xe0\xf7\x0e\x25\x3c\x53\x6a\x81"
7453 "\x98\xaf\xc6\xdd\xf4\x0b\x22\x39"
7454 "\x50\x67\x7e\x95\xac\xc3\xda\xf1"
7455 "\x08\x1f\x36\x4d\x64\x7b\x92\xa9"
7456 "\xc0\xd7\xee\x05\x1c\x33\x4a\x61"
7457 "\x78\x8f\xa6\xbd\xd4\xeb\x02\x19"
7458 "\x30\x47\x5e\x75\x8c\xa3\xba\xd1"
7459 "\xe8\xff\x16\x2d\x44\x5b\x72\x89"
7460 "\xa0\xb7\xce\xe5\xfc\x13\x2a\x41"
7461 "\x58\x6f\x86\x9d\xb4\xcb\xe2\xf9"
7462 "\x10\x27\x3e\x55\x6c\x83\x9a\xb1"
7463 "\xc8\xdf\xf6\x0d\x24\x3b\x52\x69"
7464 "\x80\x97\xae\xc5\xdc\xf3\x0a\x21"
7465 "\x38\x4f\x66\x7d\x94\xab\xc2\xd9"
7466 "\xf0\x07\x1e\x35\x4c\x63\x7a\x91"
7467 "\xa8\xbf\xd6\xed\x04\x1b\x32\x49"
7468 "\x60\x77\x8e\xa5\xbc\xd3\xea\x01"
7469 "\x18\x2f\x46\x5d\x74\x8b\xa2\xb9"
7470 "\xd0\xe7\xfe\x15\x2c\x43\x5a\x71"
7471 "\x88\x9f\xb6\xcd\xe4\xfb\x12\x29"
7472 "\x40\x57\x6e\x85\x9c\xb3\xca\xe1"
7473 "\xf8\x0f\x26\x3d\x54\x6b\x82\x99"
7474 "\xb0\xc7\xde\xf5\x0c\x23\x3a\x51"
7475 "\x68\x7f\x96\xad\xc4\xdb\xf2\x09"
7476 "\x20\x37\x4e\x65\x7c\x93\xaa\xc1"
7477 "\xd8\xef\x06\x1d\x34\x4b\x62\x79"
7478 "\x90\xa7\xbe\xd5\xec\x03\x1a\x31"
7479 "\x48\x5f\x76\x8d\xa4\xbb\xd2\xe9"
7480 "\x00\x19\x32\x4b\x64\x7d\x96\xaf"
7481 "\xc8\xe1\xfa\x13\x2c\x45\x5e\x77"
7482 "\x90\xa9\xc2\xdb\xf4\x0d\x26\x3f"
7483 "\x58\x71\x8a\xa3\xbc\xd5\xee\x07"
7484 "\x20\x39\x52\x6b\x84\x9d\xb6\xcf"
7485 "\xe8\x01\x1a\x33\x4c\x65\x7e\x97"
7486 "\xb0\xc9\xe2\xfb\x14\x2d\x46\x5f"
7487 "\x78\x91\xaa\xc3\xdc\xf5\x0e\x27"
7488 "\x40\x59\x72\x8b\xa4\xbd\xd6\xef"
7489 "\x08\x21\x3a\x53\x6c\x85\x9e\xb7"
7490 "\xd0\xe9\x02\x1b\x34\x4d\x66\x7f"
7491 "\x98\xb1\xca\xe3\xfc\x15\x2e\x47"
7492 "\x60\x79\x92\xab\xc4\xdd\xf6\x0f"
7493 "\x28\x41\x5a\x73\x8c\xa5\xbe\xd7"
7494 "\xf0\x09\x22\x3b\x54\x6d\x86\x9f"
7495 "\xb8\xd1\xea\x03\x1c\x35\x4e\x67"
7496 "\x80\x99\xb2\xcb\xe4\xfd\x16\x2f"
7497 "\x48\x61\x7a\x93\xac\xc5\xde\xf7"
7498 "\x10\x29\x42\x5b\x74\x8d\xa6\xbf"
7499 "\xd8\xf1\x0a\x23\x3c\x55\x6e\x87"
7500 "\xa0\xb9\xd2\xeb\x04\x1d\x36\x4f"
7501 "\x68\x81\x9a\xb3\xcc\xe5\xfe\x17"
7502 "\x30\x49\x62\x7b\x94\xad\xc6\xdf"
7503 "\xf8\x11\x2a\x43\x5c\x75\x8e\xa7"
7504 "\xc0\xd9\xf2\x0b\x24\x3d\x56\x6f"
7505 "\x88\xa1\xba\xd3\xec\x05\x1e\x37"
7506 "\x50\x69\x82\x9b\xb4\xcd\xe6\xff"
7507 "\x18\x31\x4a\x63\x7c\x95\xae\xc7"
7508 "\xe0\xf9\x12\x2b\x44\x5d\x76\x8f"
7509 "\xa8\xc1\xda\xf3\x0c\x25\x3e\x57"
7510 "\x70\x89\xa2\xbb\xd4\xed\x06\x1f"
7511 "\x38\x51\x6a\x83\x9c\xb5\xce\xe7"
7512 "\x00\x1b\x36\x51\x6c\x87\xa2\xbd"
7513 "\xd8\xf3\x0e\x29\x44\x5f\x7a\x95"
7514 "\xb0\xcb\xe6\x01\x1c\x37\x52\x6d"
7515 "\x88\xa3\xbe\xd9\xf4\x0f\x2a\x45"
7516 "\x60\x7b\x96\xb1\xcc\xe7\x02\x1d"
7517 "\x38\x53\x6e\x89\xa4\xbf\xda\xf5"
7518 "\x10\x2b\x46\x61\x7c\x97\xb2\xcd"
7519 "\xe8\x03\x1e\x39\x54\x6f\x8a\xa5"
7520 "\xc0\xdb\xf6\x11\x2c\x47\x62\x7d"
7521 "\x98\xb3\xce\xe9\x04\x1f\x3a\x55"
7522 "\x70\x8b\xa6\xc1\xdc\xf7\x12\x2d"
7523 "\x48\x63\x7e\x99\xb4\xcf\xea\x05"
7524 "\x20\x3b\x56\x71\x8c\xa7\xc2\xdd"
7525 "\xf8\x13\x2e\x49\x64\x7f\x9a\xb5"
7526 "\xd0\xeb\x06\x21\x3c\x57\x72\x8d"
7527 "\xa8\xc3\xde\xf9\x14\x2f\x4a\x65"
7528 "\x80\x9b\xb6\xd1\xec\x07\x22\x3d"
7529 "\x58\x73\x8e\xa9\xc4\xdf\xfa\x15"
7530 "\x30\x4b\x66\x81\x9c\xb7\xd2\xed"
7531 "\x08\x23\x3e\x59\x74\x8f\xaa\xc5"
7532 "\xe0\xfb\x16\x31\x4c\x67\x82\x9d"
7533 "\xb8\xd3\xee\x09\x24\x3f\x5a\x75"
7534 "\x90\xab\xc6\xe1\xfc\x17\x32\x4d"
7535 "\x68\x83\x9e\xb9\xd4\xef\x0a\x25"
7536 "\x40\x5b\x76\x91\xac\xc7\xe2\xfd"
7537 "\x18\x33\x4e\x69\x84\x9f\xba\xd5"
7538 "\xf0\x0b\x26\x41\x5c\x77\x92\xad"
7539 "\xc8\xe3\xfe\x19\x34\x4f\x6a\x85"
7540 "\xa0\xbb\xd6\xf1\x0c\x27\x42\x5d"
7541 "\x78\x93\xae\xc9\xe4\xff\x1a\x35"
7542 "\x50\x6b\x86\xa1\xbc\xd7\xf2\x0d"
7543 "\x28\x43\x5e\x79\x94\xaf\xca\xe5"
7544 "\x00\x1d\x3a\x57\x74\x91\xae\xcb"
7545 "\xe8\x05\x22\x3f\x5c\x79\x96\xb3"
7546 "\xd0\xed\x0a\x27\x44\x61\x7e\x9b"
7547 "\xb8\xd5\xf2\x0f\x2c\x49\x66\x83"
7548 "\xa0\xbd\xda\xf7\x14\x31\x4e\x6b"
7549 "\x88\xa5\xc2\xdf\xfc\x19\x36\x53"
7550 "\x70\x8d\xaa\xc7\xe4\x01\x1e\x3b"
7551 "\x58\x75\x92\xaf\xcc\xe9\x06\x23"
7552 "\x40\x5d\x7a\x97\xb4\xd1\xee\x0b"
7553 "\x28\x45\x62\x7f\x9c\xb9\xd6\xf3"
7554 "\x10\x2d\x4a\x67\x84\xa1\xbe\xdb"
7555 "\xf8\x15\x32\x4f\x6c\x89\xa6\xc3"
7556 "\xe0\xfd\x1a\x37\x54\x71\x8e\xab"
7557 "\xc8\xe5\x02\x1f\x3c\x59\x76\x93"
7558 "\xb0\xcd\xea\x07\x24\x41\x5e\x7b"
7559 "\x98\xb5\xd2\xef\x0c\x29\x46\x63"
7560 "\x80\x9d\xba\xd7\xf4\x11\x2e\x4b"
7561 "\x68\x85\xa2\xbf\xdc\xf9\x16\x33"
7562 "\x50\x6d\x8a\xa7\xc4\xe1\xfe\x1b"
7563 "\x38\x55\x72\x8f\xac\xc9\xe6\x03"
7564 "\x20\x3d\x5a\x77\x94\xb1\xce\xeb"
7565 "\x08\x25\x42\x5f\x7c\x99\xb6\xd3"
7566 "\xf0\x0d\x2a\x47\x64\x81\x9e\xbb"
7567 "\xd8\xf5\x12\x2f\x4c\x69\x86\xa3"
7568 "\xc0\xdd\xfa\x17\x34\x51\x6e\x8b"
7569 "\xa8\xc5\xe2\xff\x1c\x39\x56\x73"
7570 "\x90\xad\xca\xe7\x04\x21\x3e\x5b"
7571 "\x78\x95\xb2\xcf\xec\x09\x26\x43"
7572 "\x60\x7d\x9a\xb7\xd4\xf1\x0e\x2b"
7573 "\x48\x65\x82\x9f\xbc\xd9\xf6\x13"
7574 "\x30\x4d\x6a\x87\xa4\xc1\xde\xfb"
7575 "\x18\x35\x52\x6f\x8c\xa9\xc6\xe3"
7576 "\x00\x1f\x3e\x5d\x7c\x9b\xba\xd9"
7577 "\xf8\x17\x36\x55\x74\x93\xb2\xd1"
7578 "\xf0\x0f\x2e\x4d\x6c\x8b\xaa\xc9"
7579 "\xe8\x07\x26\x45\x64\x83\xa2\xc1"
7580 "\xe0\xff\x1e\x3d\x5c\x7b\x9a\xb9"
7581 "\xd8\xf7\x16\x35\x54\x73\x92\xb1"
7582 "\xd0\xef\x0e\x2d\x4c\x6b\x8a\xa9"
7583 "\xc8\xe7\x06\x25\x44\x63\x82\xa1"
7584 "\xc0\xdf\xfe\x1d\x3c\x5b\x7a\x99"
7585 "\xb8\xd7\xf6\x15\x34\x53\x72\x91"
7586 "\xb0\xcf\xee\x0d\x2c\x4b\x6a\x89"
7587 "\xa8\xc7\xe6\x05\x24\x43\x62\x81"
7588 "\xa0\xbf\xde\xfd\x1c\x3b\x5a\x79"
7589 "\x98\xb7\xd6\xf5\x14\x33\x52\x71"
7590 "\x90\xaf\xce\xed\x0c\x2b\x4a\x69"
7591 "\x88\xa7\xc6\xe5\x04\x23\x42\x61"
7592 "\x80\x9f\xbe\xdd\xfc\x1b\x3a\x59"
7593 "\x78\x97\xb6\xd5\xf4\x13\x32\x51"
7594 "\x70\x8f\xae\xcd\xec\x0b\x2a\x49"
7595 "\x68\x87\xa6\xc5\xe4\x03\x22\x41"
7596 "\x60\x7f\x9e\xbd\xdc\xfb\x1a\x39"
7597 "\x58\x77\x96\xb5\xd4\xf3\x12\x31"
7598 "\x50\x6f\x8e\xad\xcc\xeb\x0a\x29"
7599 "\x48\x67\x86\xa5\xc4\xe3\x02\x21"
7600 "\x40\x5f\x7e\x9d\xbc\xdb\xfa\x19"
7601 "\x38\x57\x76\x95\xb4\xd3\xf2\x11"
7602 "\x30\x4f\x6e\x8d\xac\xcb\xea\x09"
7603 "\x28\x47\x66\x85\xa4\xc3\xe2\x01"
7604 "\x20\x3f\x5e\x7d\x9c\xbb\xda\xf9"
7605 "\x18\x37\x56\x75\x94\xb3\xd2\xf1"
7606 "\x10\x2f\x4e\x6d\x8c\xab\xca\xe9"
7607 "\x08\x27\x46\x65\x84\xa3\xc2\xe1"
7608 "\x00\x21\x42\x63",
7609 .ilen = 4100,
7610 .result =
7611 "\xb5\x81\xf5\x64\x18\x73\xe3\xf0"
7612 "\x4c\x13\xf2\x77\x18\x60\x65\x5e"
7613 "\x29\x01\xce\x98\x55\x53\xf9\x0c"
7614 "\x2a\x08\xd5\x09\xb3\x57\x55\x56"
7615 "\xc5\xe9\x56\x90\xcb\x6a\xa3\xc0"
7616 "\xff\xc4\x79\xb4\xd2\x97\x5d\xc4"
7617 "\x43\xd1\xfe\x94\x7b\x88\x06\x5a"
7618 "\xb2\x9e\x2c\xfc\x44\x03\xb7\x90"
7619 "\xa0\xc1\xba\x6a\x33\xb8\xc7\xb2"
7620 "\x9d\xe1\x12\x4f\xc0\x64\xd4\x01"
7621 "\xfe\x8c\x7a\x66\xf7\xe6\x5a\x91"
7622 "\xbb\xde\x56\x86\xab\x65\x21\x30"
7623 "\x00\x84\x65\x24\xa5\x7d\x85\xb4"
7624 "\xe3\x17\xed\x3a\xb7\x6f\xb4\x0b"
7625 "\x0b\xaf\x15\xae\x5a\x8f\xf2\x0c"
7626 "\x2f\x27\xf4\x09\xd8\xd2\x96\xb7"
7627 "\x71\xf2\xc5\x99\x4d\x7e\x7f\x75"
7628 "\x77\x89\x30\x8b\x59\xdb\xa2\xb2"
7629 "\xa0\xf3\x19\x39\x2b\xc5\x7e\x3f"
7630 "\x4f\xd9\xd3\x56\x28\x97\x44\xdc"
7631 "\xc0\x8b\x77\x24\xd9\x52\xe7\xc5"
7632 "\xaf\xf6\x7d\x59\xb2\x44\x05\x1d"
7633 "\xb1\xb0\x11\xa5\x0f\xec\x33\xe1"
7634 "\x6d\x1b\x4e\x1f\xff\x57\x91\xb4"
7635 "\x5b\x9a\x96\xc5\x53\xbc\xae\x20"
7636 "\x3c\xbb\x14\xe2\xe8\x22\x33\xc1"
7637 "\x5e\x76\x9e\x46\x99\xf6\x2a\x15"
7638 "\xc6\x97\x02\xa0\x66\x43\xd1\xa6"
7639 "\x31\xa6\x9f\xfb\xf4\xd3\x69\xe5"
7640 "\xcd\x76\x95\xb8\x7a\x82\x7f\x21"
7641 "\x45\xff\x3f\xce\x55\xf6\x95\x10"
7642 "\x08\x77\x10\x43\xc6\xf3\x09\xe5"
7643 "\x68\xe7\x3c\xad\x00\x52\x45\x0d"
7644 "\xfe\x2d\xc6\xc2\x94\x8c\x12\x1d"
7645 "\xe6\x25\xae\x98\x12\x8e\x19\x9c"
7646 "\x81\x68\xb1\x11\xf6\x69\xda\xe3"
7647 "\x62\x08\x18\x7a\x25\x49\x28\xac"
7648 "\xba\x71\x12\x0b\xe4\xa2\xe5\xc7"
7649 "\x5d\x8e\xec\x49\x40\x21\xbf\x5a"
7650 "\x98\xf3\x02\x68\x55\x03\x7f\x8a"
7651 "\xe5\x94\x0c\x32\x5c\x07\x82\x63"
7652 "\xaf\x6f\x91\x40\x84\x8e\x52\x25"
7653 "\xd0\xb0\x29\x53\x05\xe2\x50\x7a"
7654 "\x34\xeb\xc9\x46\x20\xa8\x3d\xde"
7655 "\x7f\x16\x5f\x36\xc5\x2e\xdc\xd1"
7656 "\x15\x47\xc7\x50\x40\x6d\x91\xc5"
7657 "\xe7\x93\x95\x1a\xd3\x57\xbc\x52"
7658 "\x33\xee\x14\x19\x22\x52\x89\xa7"
7659 "\x4a\x25\x56\x77\x4b\xca\xcf\x0a"
7660 "\xe1\xf5\x35\x85\x30\x7e\x59\x4a"
7661 "\xbd\x14\x5b\xdf\xe3\x46\xcb\xac"
7662 "\x1f\x6c\x96\x0e\xf4\x81\xd1\x99"
7663 "\xca\x88\x63\x3d\x02\x58\x6b\xa9"
7664 "\xe5\x9f\xb3\x00\xb2\x54\xc6\x74"
7665 "\x1c\xbf\x46\xab\x97\xcc\xf8\x54"
7666 "\x04\x07\x08\x52\xe6\xc0\xda\x93"
7667 "\x74\x7d\x93\x99\x5d\x78\x68\xa6"
7668 "\x2e\x6b\xd3\x6a\x69\xcc\x12\x6b"
7669 "\xd4\xc7\xa5\xc6\xe7\xf6\x03\x04"
7670 "\x5d\xcd\x61\x5e\x17\x40\xdc\xd1"
7671 "\x5c\xf5\x08\xdf\x5c\x90\x85\xa4"
7672 "\xaf\xf6\x78\xbb\x0d\xf1\xf4\xa4"
7673 "\x54\x26\x72\x9e\x61\xfa\x86\xcf"
7674 "\xe8\x9e\xa1\xe0\xc7\x48\x23\xae"
7675 "\x5a\x90\xae\x75\x0a\x74\x18\x89"
7676 "\x05\xb1\x92\xb2\x7f\xd0\x1b\xa6"
7677 "\x62\x07\x25\x01\xc7\xc2\x4f\xf9"
7678 "\xe8\xfe\x63\x95\x80\x07\xb4\x26"
7679 "\xcc\xd1\x26\xb6\xc4\x3f\x9e\xcb"
7680 "\x8e\x3b\x2e\x44\x16\xd3\x10\x9a"
7681 "\x95\x08\xeb\xc8\xcb\xeb\xbf\x6f"
7682 "\x0b\xcd\x1f\xc8\xca\x86\xaa\xec"
7683 "\x33\xe6\x69\xf4\x45\x25\x86\x3a"
7684 "\x22\x94\x4f\x00\x23\x6a\x44\xc2"
7685 "\x49\x97\x33\xab\x36\x14\x0a\x70"
7686 "\x24\xc3\xbe\x04\x3b\x79\xa0\xf9"
7687 "\xb8\xe7\x76\x29\x22\x83\xd7\xf2"
7688 "\x94\xf4\x41\x49\xba\x5f\x7b\x07"
7689 "\xb5\xfb\xdb\x03\x1a\x9f\xb6\x4c"
7690 "\xc2\x2e\x37\x40\x49\xc3\x38\x16"
7691 "\xe2\x4f\x77\x82\xb0\x68\x4c\x71"
7692 "\x1d\x57\x61\x9c\xd9\x4e\x54\x99"
7693 "\x47\x13\x28\x73\x3c\xbb\x00\x90"
7694 "\xf3\x4d\xc9\x0e\xfd\xe7\xb1\x71"
7695 "\xd3\x15\x79\xbf\xcc\x26\x2f\xbd"
7696 "\xad\x6c\x50\x69\x6c\x3e\x6d\x80"
7697 "\x9a\xea\x78\xaf\x19\xb2\x0d\x4d"
7698 "\xad\x04\x07\xae\x22\x90\x4a\x93"
7699 "\x32\x0e\x36\x9b\x1b\x46\xba\x3b"
7700 "\xb4\xac\xc6\xd1\xa2\x31\x53\x3b"
7701 "\x2a\x3d\x45\xfe\x03\x61\x10\x85"
7702 "\x17\x69\xa6\x78\xcc\x6c\x87\x49"
7703 "\x53\xf9\x80\x10\xde\x80\xa2\x41"
7704 "\x6a\xc3\x32\x02\xad\x6d\x3c\x56"
7705 "\x00\x71\x51\x06\xa7\xbd\xfb\xef"
7706 "\x3c\xb5\x9f\xfc\x48\x7d\x53\x7c"
7707 "\x66\xb0\x49\x23\xc4\x47\x10\x0e"
7708 "\xe5\x6c\x74\x13\xe6\xc5\x3f\xaa"
7709 "\xde\xff\x07\x44\xdd\x56\x1b\xad"
7710 "\x09\x77\xfb\x5b\x12\xb8\x0d\x38"
7711 "\x17\x37\x35\x7b\x9b\xbc\xfe\xd4"
7712 "\x7e\x8b\xda\x7e\x5b\x04\xa7\x22"
7713 "\xa7\x31\xa1\x20\x86\xc7\x1b\x99"
7714 "\xdb\xd1\x89\xf4\x94\xa3\x53\x69"
7715 "\x8d\xe7\xe8\x74\x11\x8d\x74\xd6"
7716 "\x07\x37\x91\x9f\xfd\x67\x50\x3a"
7717 "\xc9\xe1\xf4\x36\xd5\xa0\x47\xd1"
7718 "\xf9\xe5\x39\xa3\x31\xac\x07\x36"
7719 "\x23\xf8\x66\x18\x14\x28\x34\x0f"
7720 "\xb8\xd0\xe7\x29\xb3\x04\x4b\x55"
7721 "\x01\x41\xb2\x75\x8d\xcb\x96\x85"
7722 "\x3a\xfb\xab\x2b\x9e\xfa\x58\x20"
7723 "\x44\x1f\xc0\x14\x22\x75\x61\xe8"
7724 "\xaa\x19\xcf\xf1\x82\x56\xf4\xd7"
7725 "\x78\x7b\x3d\x5f\xb3\x9e\x0b\x8a"
7726 "\x57\x50\xdb\x17\x41\x65\x4d\xa3"
7727 "\x02\xc9\x9c\x9c\x53\xfb\x39\x39"
7728 "\x9b\x1d\x72\x24\xda\xb7\x39\xbe"
7729 "\x13\x3b\xfa\x29\xda\x9e\x54\x64"
7730 "\x6e\xba\xd8\xa1\xcb\xb3\x36\xfa"
7731 "\xcb\x47\x85\xe9\x61\x38\xbc\xbe"
7732 "\xc5\x00\x38\x2a\x54\xf7\xc4\xb9"
7733 "\xb3\xd3\x7b\xa0\xa0\xf8\x72\x7f"
7734 "\x8c\x8e\x82\x0e\xc6\x1c\x75\x9d"
7735 "\xca\x8e\x61\x87\xde\xad\x80\xd2"
7736 "\xf5\xf9\x80\xef\x15\x75\xaf\xf5"
7737 "\x80\xfb\xff\x6d\x1e\x25\xb7\x40"
7738 "\x61\x6a\x39\x5a\x6a\xb5\x31\xab"
7739 "\x97\x8a\x19\x89\x44\x40\xc0\xa6"
7740 "\xb4\x4e\x30\x32\x7b\x13\xe7\x67"
7741 "\xa9\x8b\x57\x04\xc2\x01\xa6\xf4"
7742 "\x28\x99\xad\x2c\x76\xa3\x78\xc2"
7743 "\x4a\xe6\xca\x5c\x50\x6a\xc1\xb0"
7744 "\x62\x4b\x10\x8e\x7c\x17\x43\xb3"
7745 "\x17\x66\x1c\x3e\x8d\x69\xf0\x5a"
7746 "\x71\xf5\x97\xdc\xd1\x45\xdd\x28"
7747 "\xf3\x5d\xdf\x53\x7b\x11\xe5\xbc"
7748 "\x4c\xdb\x1b\x51\x6b\xe9\xfb\x3d"
7749 "\xc1\xc3\x2c\xb9\x71\xf5\xb6\xb2"
7750 "\x13\x36\x79\x80\x53\xe8\xd3\xa6"
7751 "\x0a\xaf\xfd\x56\x97\xf7\x40\x8e"
7752 "\x45\xce\xf8\xb0\x9e\x5c\x33\x82"
7753 "\xb0\x44\x56\xfc\x05\x09\xe9\x2a"
7754 "\xac\x26\x80\x14\x1d\xc8\x3a\x35"
7755 "\x4c\x82\x97\xfd\x76\xb7\xa9\x0a"
7756 "\x35\x58\x79\x8e\x0f\x66\xea\xaf"
7757 "\x51\x6c\x09\xa9\x6e\x9b\xcb\x9a"
7758 "\x31\x47\xa0\x2f\x7c\x71\xb4\x4a"
7759 "\x11\xaa\x8c\x66\xc5\x64\xe6\x3a"
7760 "\x54\xda\x24\x6a\xc4\x41\x65\x46"
7761 "\x82\xa0\x0a\x0f\x5f\xfb\x25\xd0"
7762 "\x2c\x91\xa7\xee\xc4\x81\x07\x86"
7763 "\x75\x5e\x33\x69\x97\xe4\x2c\xa8"
7764 "\x9d\x9f\x0b\x6a\xbe\xad\x98\xda"
7765 "\x6d\x94\x41\xda\x2c\x1e\x89\xc4"
7766 "\xc2\xaf\x1e\x00\x05\x0b\x83\x60"
7767 "\xbd\x43\xea\x15\x23\x7f\xb9\xac"
7768 "\xee\x4f\x2c\xaf\x2a\xf3\xdf\xd0"
7769 "\xf3\x19\x31\xbb\x4a\x74\x84\x17"
7770 "\x52\x32\x2c\x7d\x61\xe4\xcb\xeb"
7771 "\x80\x38\x15\x52\xcb\x6f\xea\xe5"
7772 "\x73\x9c\xd9\x24\x69\xc6\x95\x32"
7773 "\x21\xc8\x11\xe4\xdc\x36\xd7\x93"
7774 "\x38\x66\xfb\xb2\x7f\x3a\xb9\xaf"
7775 "\x31\xdd\x93\x75\x78\x8a\x2c\x94"
7776 "\x87\x1a\x58\xec\x9e\x7d\x4d\xba"
7777 "\xe1\xe5\x4d\xfc\xbc\xa4\x2a\x14"
7778 "\xef\xcc\xa7\xec\xab\x43\x09\x18"
7779 "\xd3\xab\x68\xd1\x07\x99\x44\x47"
7780 "\xd6\x83\x85\x3b\x30\xea\xa9\x6b"
7781 "\x63\xea\xc4\x07\xfb\x43\x2f\xa4"
7782 "\xaa\xb0\xab\x03\x89\xce\x3f\x8c"
7783 "\x02\x7c\x86\x54\xbc\x88\xaf\x75"
7784 "\xd2\xdc\x63\x17\xd3\x26\xf6\x96"
7785 "\xa9\x3c\xf1\x61\x8c\x11\x18\xcc"
7786 "\xd6\xea\x5b\xe2\xcd\xf0\xf1\xb2"
7787 "\xe5\x35\x90\x1f\x85\x4c\x76\x5b"
7788 "\x66\xce\x44\xa4\x32\x9f\xe6\x7b"
7789 "\x71\x6e\x9f\x58\x15\x67\x72\x87"
7790 "\x64\x8e\x3a\x44\x45\xd4\x76\xfa"
7791 "\xc2\xf6\xef\x85\x05\x18\x7a\x9b"
7792 "\xba\x41\x54\xac\xf0\xfc\x59\x12"
7793 "\x3f\xdf\xa0\xe5\x8a\x65\xfd\x3a"
7794 "\x62\x8d\x83\x2c\x03\xbe\x05\x76"
7795 "\x2e\x53\x49\x97\x94\x33\xae\x40"
7796 "\x81\x15\xdb\x6e\xad\xaa\xf5\x4b"
7797 "\xe3\x98\x70\xdf\xe0\x7c\xcd\xdb"
7798 "\x02\xd4\x7d\x2f\xc1\xe6\xb4\xf3"
7799 "\xd7\x0d\x7a\xd9\x23\x9e\x87\x2d"
7800 "\xce\x87\xad\xcc\x72\x05\x00\x29"
7801 "\xdc\x73\x7f\x64\xc1\x15\x0e\xc2"
7802 "\xdf\xa7\x5f\xeb\x41\xa1\xcd\xef"
7803 "\x5c\x50\x79\x2a\x56\x56\x71\x8c"
7804 "\xac\xc0\x79\x50\x69\xca\x59\x32"
7805 "\x65\xf2\x54\xe4\x52\x38\x76\xd1"
7806 "\x5e\xde\x26\x9e\xfb\x75\x2e\x11"
7807 "\xb5\x10\xf4\x17\x73\xf5\x89\xc7"
7808 "\x4f\x43\x5c\x8e\x7c\xb9\x05\x52"
7809 "\x24\x40\x99\xfe\x9b\x85\x0b\x6c"
7810 "\x22\x3e\x8b\xae\x86\xa1\xd2\x79"
7811 "\x05\x68\x6b\xab\xe3\x41\x49\xed"
7812 "\x15\xa1\x8d\x40\x2d\x61\xdf\x1a"
7813 "\x59\xc9\x26\x8b\xef\x30\x4c\x88"
7814 "\x4b\x10\xf8\x8d\xa6\x92\x9f\x4b"
7815 "\xf3\xc4\x53\x0b\x89\x5d\x28\x92"
7816 "\xcf\x78\xb2\xc0\x5d\xed\x7e\xfc"
7817 "\xc0\x12\x23\x5f\x5a\x78\x86\x43"
7818 "\x6e\x27\xf7\x5a\xa7\x6a\xed\x19"
7819 "\x04\xf0\xb3\x12\xd1\xbd\x0e\x89"
7820 "\x6e\xbc\x96\xa8\xd8\x49\x39\x9f"
7821 "\x7e\x67\xf0\x2e\x3e\x01\xa9\xba"
7822 "\xec\x8b\x62\x8e\xcb\x4a\x70\x43"
7823 "\xc7\xc2\xc4\xca\x82\x03\x73\xe9"
7824 "\x11\xdf\xcf\x54\xea\xc9\xb0\x95"
7825 "\x51\xc0\x13\x3d\x92\x05\xfa\xf4"
7826 "\xa9\x34\xc8\xce\x6c\x3d\x54\xcc"
7827 "\xc4\xaf\xf1\xdc\x11\x44\x26\xa2"
7828 "\xaf\xf1\x85\x75\x7d\x03\x61\x68"
7829 "\x4e\x78\xc6\x92\x7d\x86\x7d\x77"
7830 "\xdc\x71\x72\xdb\xc6\xae\xa1\xcb"
7831 "\x70\x9a\x0b\x19\xbe\x4a\x6c\x2a"
7832 "\xe2\xba\x6c\x64\x9a\x13\x28\xdf"
7833 "\x85\x75\xe6\x43\xf6\x87\x08\x68"
7834 "\x6e\xba\x6e\x79\x9f\x04\xbc\x23"
7835 "\x50\xf6\x33\x5c\x1f\x24\x25\xbe"
7836 "\x33\x47\x80\x45\x56\xa3\xa7\xd7"
7837 "\x7a\xb1\x34\x0b\x90\x3c\x9c\xad"
7838 "\x44\x5f\x9e\x0e\x9d\xd4\xbd\x93"
7839 "\x5e\xfa\x3c\xe0\xb0\xd9\xed\xf3"
7840 "\xd6\x2e\xff\x24\xd8\x71\x6c\xed"
7841 "\xaf\x55\xeb\x22\xac\x93\x68\x32"
7842 "\x05\x5b\x47\xdd\xc6\x4a\xcb\xc7"
7843 "\x10\xe1\x3c\x92\x1a\xf3\x23\x78"
7844 "\x2b\xa1\xd2\x80\xf4\x12\xb1\x20"
7845 "\x8f\xff\x26\x35\xdd\xfb\xc7\x4e"
7846 "\x78\xf1\x2d\x50\x12\x77\xa8\x60"
7847 "\x7c\x0f\xf5\x16\x2f\x63\x70\x2a"
7848 "\xc0\x96\x80\x4e\x0a\xb4\x93\x35"
7849 "\x5d\x1d\x3f\x56\xf7\x2f\xbb\x90"
7850 "\x11\x16\x8f\xa2\xec\x47\xbe\xac"
7851 "\x56\x01\x26\x56\xb1\x8c\xb2\x10"
7852 "\xf9\x1a\xca\xf5\xd1\xb7\x39\x20"
7853 "\x63\xf1\x69\x20\x4f\x13\x12\x1f"
7854 "\x5b\x65\xfc\x98\xf7\xc4\x7a\xbe"
7855 "\xf7\x26\x4d\x2b\x84\x7b\x42\xad"
7856 "\xd8\x7a\x0a\xb4\xd8\x74\xbf\xc1"
7857 "\xf0\x6e\xb4\x29\xa3\xbb\xca\x46"
7858 "\x67\x70\x6a\x2d\xce\x0e\xa2\x8a"
7859 "\xa9\x87\xbf\x05\xc4\xc1\x04\xa3"
7860 "\xab\xd4\x45\x43\x8c\xb6\x02\xb0"
7861 "\x41\xc8\xfc\x44\x3d\x59\xaa\x2e"
7862 "\x44\x21\x2a\x8d\x88\x9d\x57\xf4"
7863 "\xa0\x02\x77\xb8\xa6\xa0\xe6\x75"
7864 "\x5c\x82\x65\x3e\x03\x5c\x29\x8f"
7865 "\x38\x55\xab\x33\x26\xef\x9f\x43"
7866 "\x52\xfd\x68\xaf\x36\xb4\xbb\x9a"
7867 "\x58\x09\x09\x1b\xc3\x65\x46\x46"
7868 "\x1d\xa7\x94\x18\x23\x50\x2c\xca"
7869 "\x2c\x55\x19\x97\x01\x9d\x93\x3b"
7870 "\x63\x86\xf2\x03\x67\x45\xd2\x72"
7871 "\x28\x52\x6c\xf4\xe3\x1c\xb5\x11"
7872 "\x13\xf1\xeb\x21\xc7\xd9\x56\x82"
7873 "\x2b\x82\x39\xbd\x69\x54\xed\x62"
7874 "\xc3\xe2\xde\x73\xd4\x6a\x12\xae"
7875 "\x13\x21\x7f\x4b\x5b\xfc\xbf\xe8"
7876 "\x2b\xbe\x56\xba\x68\x8b\x9a\xb1"
7877 "\x6e\xfa\xbf\x7e\x5a\x4b\xf1\xac"
7878 "\x98\x65\x85\xd1\x93\x53\xd3\x7b"
7879 "\x09\xdd\x4b\x10\x6d\x84\xb0\x13"
7880 "\x65\xbd\xcf\x52\x09\xc4\x85\xe2"
7881 "\x84\x74\x15\x65\xb7\xf7\x51\xaf"
7882 "\x55\xad\xa4\xd1\x22\x54\x70\x94"
7883 "\xa0\x1c\x90\x41\xfd\x99\xd7\x5a"
7884 "\x31\xef\xaa\x25\xd0\x7f\x4f\xea"
7885 "\x1d\x55\x42\xe5\x49\xb0\xd0\x46"
7886 "\x62\x36\x43\xb2\x82\x15\x75\x50"
7887 "\xa4\x72\xeb\x54\x27\x1f\x8a\xe4"
7888 "\x7d\xe9\x66\xc5\xf1\x53\xa4\xd1"
7889 "\x0c\xeb\xb8\xf8\xbc\xd4\xe2\xe7"
7890 "\xe1\xf8\x4b\xcb\xa9\xa1\xaf\x15"
7891 "\x83\xcb\x72\xd0\x33\x79\x00\x2d"
7892 "\x9f\xd7\xf1\x2e\x1e\x10\xe4\x45"
7893 "\xc0\x75\x3a\x39\xea\x68\xf7\x5d"
7894 "\x1b\x73\x8f\xe9\x8e\x0f\x72\x47"
7895 "\xae\x35\x0a\x31\x7a\x14\x4d\x4a"
7896 "\x6f\x47\xf7\x7e\x91\x6e\x74\x8b"
7897 "\x26\x47\xf9\xc3\xf9\xde\x70\xf5"
7898 "\x61\xab\xa9\x27\x9f\x82\xe4\x9c"
7899 "\x89\x91\x3f\x2e\x6a\xfd\xb5\x49"
7900 "\xe9\xfd\x59\x14\x36\x49\x40\x6d"
7901 "\x32\xd8\x85\x42\xf3\xa5\xdf\x0c"
7902 "\xa8\x27\xd7\x54\xe2\x63\x2f\xf2"
7903 "\x7e\x8b\x8b\xe7\xf1\x9a\x95\x35"
7904 "\x43\xdc\x3a\xe4\xb6\xf4\xd0\xdf"
7905 "\x9c\xcb\x94\xf3\x21\xa0\x77\x50"
7906 "\xe2\xc6\xc4\xc6\x5f\x09\x64\x5b"
7907 "\x92\x90\xd8\xe1\xd1\xed\x4b\x42"
7908 "\xd7\x37\xaf\x65\x3d\x11\x39\xb6"
7909 "\x24\x8a\x60\xae\xd6\x1e\xbf\x0e"
7910 "\x0d\xd7\xdc\x96\x0e\x65\x75\x4e"
7911 "\x29\x06\x9d\xa4\x51\x3a\x10\x63"
7912 "\x8f\x17\x07\xd5\x8e\x3c\xf4\x28"
7913 "\x00\x5a\x5b\x05\x19\xd8\xc0\x6c"
7914 "\xe5\x15\xe4\x9c\x9d\x71\x9d\x5e"
7915 "\x94\x29\x1a\xa7\x80\xfa\x0e\x33"
7916 "\x03\xdd\xb7\x3e\x9a\xa9\x26\x18"
7917 "\x37\xa9\x64\x08\x4d\x94\x5a\x88"
7918 "\xca\x35\xce\x81\x02\xe3\x1f\x1b"
7919 "\x89\x1a\x77\x85\xe3\x41\x6d\x32"
7920 "\x42\x19\x23\x7d\xc8\x73\xee\x25"
7921 "\x85\x0d\xf8\x31\x25\x79\x1b\x6f"
7922 "\x79\x25\xd2\xd8\xd4\x23\xfd\xf7"
7923 "\x82\x36\x6a\x0c\x46\x22\x15\xe9"
7924 "\xff\x72\x41\x91\x91\x7d\x3a\xb7"
7925 "\xdd\x65\x99\x70\xf6\x8d\x84\xf8"
7926 "\x67\x15\x20\x11\xd6\xb2\x55\x7b"
7927 "\xdb\x87\xee\xef\x55\x89\x2a\x59"
7928 "\x2b\x07\x8f\x43\x8a\x59\x3c\x01"
7929 "\x8b\x65\x54\xa1\x66\xd5\x38\xbd"
7930 "\xc6\x30\xa9\xcc\x49\xb6\xa8\x1b"
7931 "\xb8\xc0\x0e\xe3\x45\x28\xe2\xff"
7932 "\x41\x9f\x7e\x7c\xd1\xae\x9e\x25"
7933 "\x3f\x4c\x7c\x7c\xf4\xa8\x26\x4d"
7934 "\x5c\xfd\x4b\x27\x18\xf9\x61\x76"
7935 "\x48\xba\x0c\x6b\xa9\x4d\xfc\xf5"
7936 "\x3b\x35\x7e\x2f\x4a\xa9\xc2\x9a"
7937 "\xae\xab\x86\x09\x89\xc9\xc2\x40"
7938 "\x39\x2c\x81\xb3\xb8\x17\x67\xc2"
7939 "\x0d\x32\x4a\x3a\x67\x81\xd7\x1a"
7940 "\x34\x52\xc5\xdb\x0a\xf5\x63\x39"
7941 "\xea\x1f\xe1\x7c\xa1\x9e\xc1\x35"
7942 "\xe3\xb1\x18\x45\x67\xf9\x22\x38"
7943 "\x95\xd9\x34\x34\x86\xc6\x41\x94"
7944 "\x15\xf9\x5b\x41\xa6\x87\x8b\xf8"
7945 "\xd5\xe1\x1b\xe2\x5b\xf3\x86\x10"
7946 "\xff\xe6\xae\x69\x76\xbc\x0d\xb4"
7947 "\x09\x90\x0c\xa2\x65\x0c\xad\x74"
7948 "\xf5\xd7\xff\xda\xc1\xce\x85\xbe"
7949 "\x00\xa7\xff\x4d\x2f\x65\xd3\x8c"
7950 "\x86\x2d\x05\xe8\xed\x3e\x6b\x8b"
7951 "\x0f\x3d\x83\x8c\xf1\x1d\x5b\x96"
7952 "\x2e\xb1\x9c\xc2\x98\xe1\x70\xb9"
7953 "\xba\x5c\x8a\x43\xd6\x34\xa7\x2d"
7954 "\xc9\x92\xae\xf2\xa5\x7b\x05\x49"
7955 "\xa7\x33\x34\x86\xca\xe4\x96\x23"
7956 "\x76\x5b\xf2\xc6\xf1\x51\x28\x42"
7957 "\x7b\xcc\x76\x8f\xfa\xa2\xad\x31"
7958 "\xd4\xd6\x7a\x6d\x25\x25\x54\xe4"
7959 "\x3f\x50\x59\xe1\x5c\x05\xb7\x27"
7960 "\x48\xbf\x07\xec\x1b\x13\xbe\x2b"
7961 "\xa1\x57\x2b\xd5\xab\xd7\xd0\x4c"
7962 "\x1e\xcb\x71\x9b\xc5\x90\x85\xd3"
7963 "\xde\x59\xec\x71\xeb\x89\xbb\xd0"
7964 "\x09\x50\xe1\x16\x3f\xfd\x1c\x34"
7965 "\xc3\x1c\xa1\x10\x77\x53\x98\xef"
7966 "\xf2\xfd\xa5\x01\x59\xc2\x9b\x26"
7967 "\xc7\x42\xd9\x49\xda\x58\x2b\x6e"
7968 "\x9f\x53\x19\x76\x7e\xd9\xc9\x0e"
7969 "\x68\xc8\x7f\x51\x22\x42\xef\x49"
7970 "\xa4\x55\xb6\x36\xac\x09\xc7\x31"
7971 "\x88\x15\x4b\x2e\x8f\x3a\x08\xf7"
7972 "\xd8\xf7\xa8\xc5\xa9\x33\xa6\x45"
7973 "\xe4\xc4\x94\x76\xf3\x0d\x8f\x7e"
7974 "\xc8\xf6\xbc\x23\x0a\xb6\x4c\xd3"
7975 "\x6a\xcd\x36\xc2\x90\x5c\x5c\x3c"
7976 "\x65\x7b\xc2\xd6\xcc\xe6\x0d\x87"
7977 "\x73\x2e\x71\x79\x16\x06\x63\x28"
7978 "\x09\x15\xd8\x89\x38\x38\x3d\xb5"
7979 "\x42\x1c\x08\x24\xf7\x2a\xd2\x9d"
7980 "\xc8\xca\xef\xf9\x27\xd8\x07\x86"
7981 "\xf7\x43\x0b\x55\x15\x3f\x9f\x83"
7982 "\xef\xdc\x49\x9d\x2a\xc1\x54\x62"
7983 "\xbd\x9b\x66\x55\x9f\xb7\x12\xf3"
7984 "\x1b\x4d\x9d\x2a\x5c\xed\x87\x75"
7985 "\x87\x26\xec\x61\x2c\xb4\x0f\x89"
7986 "\xb0\xfb\x2e\x68\x5d\x15\xc7\x8d"
7987 "\x2e\xc0\xd9\xec\xaf\x4f\xd2\x25"
7988 "\x29\xe8\xd2\x26\x2b\x67\xe9\xfc"
7989 "\x2b\xa8\x67\x96\x12\x1f\x5b\x96"
7990 "\xc6\x14\x53\xaf\x44\xea\xd6\xe2"
7991 "\x94\x98\xe4\x12\x93\x4c\x92\xe0"
7992 "\x18\xa5\x8d\x2d\xe4\x71\x3c\x47"
7993 "\x4c\xf7\xe6\x47\x9e\xc0\x68\xdf"
7994 "\xd4\xf5\x5a\x74\xb1\x2b\x29\x03"
7995 "\x19\x07\xaf\x90\x62\x5c\x68\x98"
7996 "\x48\x16\x11\x02\x9d\xee\xb4\x9b"
7997 "\xe5\x42\x7f\x08\xfd\x16\x32\x0b"
7998 "\xd0\xb3\xfa\x2b\xb7\x99\xf9\x29"
7999 "\xcd\x20\x45\x9f\xb3\x1a\x5d\xa2"
8000 "\xaf\x4d\xe0\xbd\x42\x0d\xbc\x74"
8001 "\x99\x9c\x8e\x53\x1a\xb4\x3e\xbd"
8002 "\xa2\x9a\x2d\xf7\xf8\x39\x0f\x67"
8003 "\x63\xfc\x6b\xc0\xaf\xb3\x4b\x4f"
8004 "\x55\xc4\xcf\xa7\xc8\x04\x11\x3e"
8005 "\x14\x32\xbb\x1b\x38\x77\xd6\x7f"
8006 "\x54\x4c\xdf\x75\xf3\x07\x2d\x33"
8007 "\x9b\xa8\x20\xe1\x7b\x12\xb5\xf3"
8008 "\xef\x2f\xce\x72\xe5\x24\x60\xc1"
8009 "\x30\xe2\xab\xa1\x8e\x11\x09\xa8"
8010 "\x21\x33\x44\xfe\x7f\x35\x32\x93"
8011 "\x39\xa7\xad\x8b\x79\x06\xb2\xcb"
8012 "\x4e\xa9\x5f\xc7\xba\x74\x29\xec"
8013 "\x93\xa0\x4e\x54\x93\xc0\xbc\x55"
8014 "\x64\xf0\x48\xe5\x57\x99\xee\x75"
8015 "\xd6\x79\x0f\x66\xb7\xc6\x57\x76"
8016 "\xf7\xb7\xf3\x9c\xc5\x60\xe8\x7f"
8017 "\x83\x76\xd6\x0e\xaa\xe6\x90\x39"
8018 "\x1d\xa6\x32\x6a\x34\xe3\x55\xf8"
8019 "\x58\xa0\x58\x7d\x33\xe0\x22\x39"
8020 "\x44\x64\x87\x86\x5a\x2f\xa7\x7e"
8021 "\x0f\x38\xea\xb0\x30\xcc\x61\xa5"
8022 "\x6a\x32\xae\x1e\xf7\xe9\xd0\xa9"
8023 "\x0c\x32\x4b\xb5\x49\x28\xab\x85"
8024 "\x2f\x8e\x01\x36\x38\x52\xd0\xba"
8025 "\xd6\x02\x78\xf8\x0e\x3e\x9c\x8b"
8026 "\x6b\x45\x99\x3f\x5c\xfe\x58\xf1"
8027 "\x5c\x94\x04\xe1\xf5\x18\x6d\x51"
8028 "\xb2\x5d\x18\x20\xb6\xc2\x9a\x42"
8029 "\x1d\xb3\xab\x3c\xb6\x3a\x13\x03"
8030 "\xb2\x46\x82\x4f\xfc\x64\xbc\x4f"
8031 "\xca\xfa\x9c\xc0\xd5\xa7\xbd\x11"
8032 "\xb7\xe4\x5a\xf6\x6f\x4d\x4d\x54"
8033 "\xea\xa4\x98\x66\xd4\x22\x3b\xd3"
8034 "\x8f\x34\x47\xd9\x7c\xf4\x72\x3b"
8035 "\x4d\x02\x77\xf6\xd6\xdd\x08\x0a"
8036 "\x81\xe1\x86\x89\x3e\x56\x10\x3c"
8037 "\xba\xd7\x81\x8c\x08\xbc\x8b\xe2"
8038 "\x53\xec\xa7\x89\xee\xc8\x56\xb5"
8039 "\x36\x2c\xb2\x03\xba\x99\xdd\x7c"
8040 "\x48\xa0\xb0\xbc\x91\x33\xe9\xa8"
8041 "\xcb\xcd\xcf\x59\x5f\x1f\x15\xe2"
8042 "\x56\xf5\x4e\x01\x35\x27\x45\x77"
8043 "\x47\xc8\xbc\xcb\x7e\x39\xc1\x97"
8044 "\x28\xd3\x84\xfc\x2c\x3e\xc8\xad"
8045 "\x9c\xf8\x8a\x61\x9c\x28\xaa\xc5"
8046 "\x99\x20\x43\x85\x9d\xa5\xe2\x8b"
8047 "\xb8\xae\xeb\xd0\x32\x0d\x52\x78"
8048 "\x09\x56\x3f\xc7\xd8\x7e\x26\xfc"
8049 "\x37\xfb\x6f\x04\xfc\xfa\x92\x10"
8050 "\xac\xf8\x3e\x21\xdc\x8c\x21\x16"
8051 "\x7d\x67\x6e\xf6\xcd\xda\xb6\x98"
8052 "\x23\xab\x23\x3c\xb2\x10\xa0\x53"
8053 "\x5a\x56\x9f\xc5\xd0\xff\xbb\xe4"
8054 "\x98\x3c\x69\x1e\xdb\x38\x8f\x7e"
8055 "\x0f\xd2\x98\x88\x81\x8b\x45\x67"
8056 "\xea\x33\xf1\xeb\xe9\x97\x55\x2e"
8057 "\xd9\xaa\xeb\x5a\xec\xda\xe1\x68"
8058 "\xa8\x9d\x3c\x84\x7c\x05\x3d\x62"
8059 "\x87\x8f\x03\x21\x28\x95\x0c\x89"
8060 "\x25\x22\x4a\xb0\x93\xa9\x50\xa2"
8061 "\x2f\x57\x6e\x18\x42\x19\x54\x0c"
8062 "\x55\x67\xc6\x11\x49\xf4\x5c\xd2"
8063 "\xe9\x3d\xdd\x8b\x48\x71\x21\x00"
8064 "\xc3\x9a\x6c\x85\x74\x28\x83\x4a"
8065 "\x1b\x31\x05\xe1\x06\x92\xe7\xda"
8066 "\x85\x73\x78\x45\x20\x7f\xae\x13"
8067 "\x7c\x33\x06\x22\xf4\x83\xf9\x35"
8068 "\x3f\x6c\x71\xa8\x4e\x48\xbe\x9b"
8069 "\xce\x8a\xba\xda\xbe\x28\x08\xf7"
8070 "\xe2\x14\x8c\x71\xea\x72\xf9\x33"
8071 "\xf2\x88\x3f\xd7\xbb\x69\x6c\x29"
8072 "\x19\xdc\x84\xce\x1f\x12\x4f\xc8"
8073 "\xaf\xa5\x04\xba\x5a\xab\xb0\xd9"
8074 "\x14\x1f\x6c\x68\x98\x39\x89\x7a"
8075 "\xd9\xd8\x2f\xdf\xa8\x47\x4a\x25"
8076 "\xe2\xfb\x33\xf4\x59\x78\xe1\x68"
8077 "\x85\xcf\xfe\x59\x20\xd4\x05\x1d"
8078 "\x80\x99\xae\xbc\xca\xae\x0f\x2f"
8079 "\x65\x43\x34\x8e\x7e\xac\xd3\x93"
8080 "\x2f\xac\x6d\x14\x3d\x02\x07\x70"
8081 "\x9d\xa4\xf3\x1b\x5c\x36\xfc\x01"
8082 "\x73\x34\x85\x0c\x6c\xd6\xf1\xbd"
8083 "\x3f\xdf\xee\xf5\xd9\xba\x56\xef"
8084 "\xf4\x9b\x6b\xee\x9f\x5a\x78\x6d"
8085 "\x32\x19\xf4\xf7\xf8\x4c\x69\x0b"
8086 "\x4b\xbc\xbb\xb7\xf2\x85\xaf\x70"
8087 "\x75\x24\x6c\x54\xa7\x0e\x4d\x1d"
8088 "\x01\xbf\x08\xac\xcf\x7f\x2c\xe3"
8089 "\x14\x89\x5e\x70\x5a\x99\x92\xcd"
8090 "\x01\x84\xc8\xd2\xab\xe5\x4f\x58"
8091 "\xe7\x0f\x2f\x0e\xff\x68\xea\xfd"
8092 "\x15\xb3\x17\xe6\xb0\xe7\x85\xd8"
8093 "\x23\x2e\x05\xc7\xc9\xc4\x46\x1f"
8094 "\xe1\x9e\x49\x20\x23\x24\x4d\x7e"
8095 "\x29\x65\xff\xf4\xb6\xfd\x1a\x85"
8096 "\xc4\x16\xec\xfc\xea\x7b\xd6\x2c"
8097 "\x43\xf8\xb7\xbf\x79\xc0\x85\xcd"
8098 "\xef\xe1\x98\xd3\xa5\xf7\x90\x8c"
8099 "\xe9\x7f\x80\x6b\xd2\xac\x4c\x30"
8100 "\xa7\xc6\x61\x6c\xd2\xf9\x2c\xff"
8101 "\x30\xbc\x22\x81\x7d\x93\x12\xe4"
8102 "\x0a\xcd\xaf\xdd\xe8\xab\x0a\x1e"
8103 "\x13\xa4\x27\xc3\x5f\xf7\x4b\xbb"
8104 "\x37\x09\x4b\x91\x6f\x92\x4f\xaf"
8105 "\x52\xee\xdf\xef\x09\x6f\xf7\x5c"
8106 "\x6e\x12\x17\x72\x63\x57\xc7\xba"
8107 "\x3b\x6b\x38\x32\x73\x1b\x9c\x80"
8108 "\xc1\x7a\xc6\xcf\xcd\x35\xc0\x6b"
8109 "\x31\x1a\x6b\xe9\xd8\x2c\x29\x3f"
8110 "\x96\xfb\xb6\xcd\x13\x91\x3b\xc2"
8111 "\xd2\xa3\x31\x8d\xa4\xcd\x57\xcd"
8112 "\x13\x3d\x64\xfd\x06\xce\xe6\xdc"
8113 "\x0c\x24\x43\x31\x40\x57\xf1\x72"
8114 "\x17\xe3\x3a\x63\x6d\x35\xcf\x5d"
8115 "\x97\x40\x59\xdd\xf7\x3c\x02\xf7"
8116 "\x1c\x7e\x05\xbb\xa9\x0d\x01\xb1"
8117 "\x8e\xc0\x30\xa9\x53\x24\xc9\x89"
8118 "\x84\x6d\xaa\xd0\xcd\x91\xc2\x4d"
8119 "\x91\xb0\x89\xe2\xbf\x83\x44\xaa"
8120 "\x28\x72\x23\xa0\xc2\xad\xad\x1c"
8121 "\xfc\x3f\x09\x7a\x0b\xdc\xc5\x1b"
8122 "\x87\x13\xc6\x5b\x59\x8d\xf2\xc8"
8123 "\xaf\xdf\x11\x95",
8124 .rlen = 4100,
8125 .np = 2,
8126 .tap = { 4064, 36 },
8127 },
8128};
8129
8130/*
8131 * CTS (Cipher Text Stealing) mode tests
8132 */
8133#define CTS_MODE_ENC_TEST_VECTORS 6
8134#define CTS_MODE_DEC_TEST_VECTORS 6
8135static struct cipher_testvec cts_mode_enc_tv_template[] = {
8136 { /* from rfc3962 */
8137 .klen = 16,
8138 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8139 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8140 .ilen = 17,
8141 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8142 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8143 "\x20",
8144 .rlen = 17,
8145 .result = "\xc6\x35\x35\x68\xf2\xbf\x8c\xb4"
8146 "\xd8\xa5\x80\x36\x2d\xa7\xff\x7f"
8147 "\x97",
8148 }, {
8149 .klen = 16,
8150 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8151 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8152 .ilen = 31,
8153 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8154 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8155 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8156 "\x20\x47\x61\x75\x27\x73\x20",
8157 .rlen = 31,
8158 .result = "\xfc\x00\x78\x3e\x0e\xfd\xb2\xc1"
8159 "\xd4\x45\xd4\xc8\xef\xf7\xed\x22"
8160 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8161 "\xc0\x7b\x25\xe2\x5e\xcf\xe5",
8162 }, {
8163 .klen = 16,
8164 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8165 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8166 .ilen = 32,
8167 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8168 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8169 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8170 "\x20\x47\x61\x75\x27\x73\x20\x43",
8171 .rlen = 32,
8172 .result = "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8173 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8174 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8175 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84",
8176 }, {
8177 .klen = 16,
8178 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8179 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8180 .ilen = 47,
8181 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8182 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8183 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8184 "\x20\x47\x61\x75\x27\x73\x20\x43"
8185 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8186 "\x70\x6c\x65\x61\x73\x65\x2c",
8187 .rlen = 47,
8188 .result = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8189 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8190 "\xb3\xff\xfd\x94\x0c\x16\xa1\x8c"
8191 "\x1b\x55\x49\xd2\xf8\x38\x02\x9e"
8192 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8193 "\xbe\x7f\xcb\xcc\x98\xeb\xf5",
8194 }, {
8195 .klen = 16,
8196 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8197 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8198 .ilen = 48,
8199 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8200 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8201 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8202 "\x20\x47\x61\x75\x27\x73\x20\x43"
8203 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8204 "\x70\x6c\x65\x61\x73\x65\x2c\x20",
8205 .rlen = 48,
8206 .result = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8207 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8208 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8209 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8"
8210 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8211 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8",
8212 }, {
8213 .klen = 16,
8214 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8215 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8216 .ilen = 64,
8217 .input = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8218 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8219 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8220 "\x20\x47\x61\x75\x27\x73\x20\x43"
8221 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8222 "\x70\x6c\x65\x61\x73\x65\x2c\x20"
8223 "\x61\x6e\x64\x20\x77\x6f\x6e\x74"
8224 "\x6f\x6e\x20\x73\x6f\x75\x70\x2e",
8225 .rlen = 64,
8226 .result = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8227 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8228 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8229 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8230 "\x48\x07\xef\xe8\x36\xee\x89\xa5"
8231 "\x26\x73\x0d\xbc\x2f\x7b\xc8\x40"
8232 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8233 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8",
8234 }
8235};
8236
8237static struct cipher_testvec cts_mode_dec_tv_template[] = {
8238 { /* from rfc3962 */
8239 .klen = 16,
8240 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8241 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8242 .rlen = 17,
8243 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8244 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8245 "\x20",
8246 .ilen = 17,
8247 .input = "\xc6\x35\x35\x68\xf2\xbf\x8c\xb4"
8248 "\xd8\xa5\x80\x36\x2d\xa7\xff\x7f"
8249 "\x97",
8250 }, {
8251 .klen = 16,
8252 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8253 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8254 .rlen = 31,
8255 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8256 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8257 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8258 "\x20\x47\x61\x75\x27\x73\x20",
8259 .ilen = 31,
8260 .input = "\xfc\x00\x78\x3e\x0e\xfd\xb2\xc1"
8261 "\xd4\x45\xd4\xc8\xef\xf7\xed\x22"
8262 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8263 "\xc0\x7b\x25\xe2\x5e\xcf\xe5",
8264 }, {
8265 .klen = 16,
8266 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8267 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8268 .rlen = 32,
8269 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8270 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8271 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8272 "\x20\x47\x61\x75\x27\x73\x20\x43",
8273 .ilen = 32,
8274 .input = "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8275 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8276 "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8277 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84",
8278 }, {
8279 .klen = 16,
8280 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8281 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8282 .rlen = 47,
8283 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8284 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8285 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8286 "\x20\x47\x61\x75\x27\x73\x20\x43"
8287 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8288 "\x70\x6c\x65\x61\x73\x65\x2c",
8289 .ilen = 47,
8290 .input = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8291 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8292 "\xb3\xff\xfd\x94\x0c\x16\xa1\x8c"
8293 "\x1b\x55\x49\xd2\xf8\x38\x02\x9e"
8294 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8295 "\xbe\x7f\xcb\xcc\x98\xeb\xf5",
8296 }, {
8297 .klen = 16,
8298 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8299 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8300 .rlen = 48,
8301 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8302 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8303 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8304 "\x20\x47\x61\x75\x27\x73\x20\x43"
8305 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8306 "\x70\x6c\x65\x61\x73\x65\x2c\x20",
8307 .ilen = 48,
8308 .input = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8309 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8310 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8311 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8"
8312 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8313 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8",
8314 }, {
8315 .klen = 16,
8316 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
8317 "\x74\x65\x72\x69\x79\x61\x6b\x69",
8318 .rlen = 64,
8319 .result = "\x49\x20\x77\x6f\x75\x6c\x64\x20"
8320 "\x6c\x69\x6b\x65\x20\x74\x68\x65"
8321 "\x20\x47\x65\x6e\x65\x72\x61\x6c"
8322 "\x20\x47\x61\x75\x27\x73\x20\x43"
8323 "\x68\x69\x63\x6b\x65\x6e\x2c\x20"
8324 "\x70\x6c\x65\x61\x73\x65\x2c\x20"
8325 "\x61\x6e\x64\x20\x77\x6f\x6e\x74"
8326 "\x6f\x6e\x20\x73\x6f\x75\x70\x2e",
8327 .ilen = 64,
8328 .input = "\x97\x68\x72\x68\xd6\xec\xcc\xc0"
8329 "\xc0\x7b\x25\xe2\x5e\xcf\xe5\x84"
8330 "\x39\x31\x25\x23\xa7\x86\x62\xd5"
8331 "\xbe\x7f\xcb\xcc\x98\xeb\xf5\xa8"
8332 "\x48\x07\xef\xe8\x36\xee\x89\xa5"
8333 "\x26\x73\x0d\xbc\x2f\x7b\xc8\x40"
8334 "\x9d\xad\x8b\xbb\x96\xc4\xcd\xc0"
8335 "\x3b\xc1\x03\xe1\xa1\x94\xbb\xd8",
8336 }
8337};
8338
8339/*
8340 * Compression stuff.
8341 */
8342#define COMP_BUF_SIZE 512
8343
8344struct comp_testvec {
8345 int inlen, outlen;
8346 char input[COMP_BUF_SIZE];
8347 char output[COMP_BUF_SIZE];
8348};
8349
8350/*
8351 * Deflate test vectors (null-terminated strings).
8352 * Params: winbits=11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
8353 */
8354#define DEFLATE_COMP_TEST_VECTORS 2
8355#define DEFLATE_DECOMP_TEST_VECTORS 2
8356
8357static struct comp_testvec deflate_comp_tv_template[] = {
8358 {
8359 .inlen = 70,
8360 .outlen = 38,
8361 .input = "Join us now and share the software "
8362 "Join us now and share the software ",
8363 .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8364 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8365 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8366 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8367 "\x71\xbc\x08\x2b\x01\x00",
8368 }, {
8369 .inlen = 191,
8370 .outlen = 122,
8371 .input = "This document describes a compression method based on the DEFLATE"
8372 "compression algorithm. This document defines the application of "
8373 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8374 .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8375 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8376 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8377 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8378 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8379 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8380 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8381 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8382 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8383 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8384 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8385 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8386 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8387 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8388 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8389 "\xfa\x02",
8390 },
8391};
8392
8393static struct comp_testvec deflate_decomp_tv_template[] = {
8394 {
8395 .inlen = 122,
8396 .outlen = 191,
8397 .input = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
8398 "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
8399 "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
8400 "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
8401 "\x68\x12\x51\xae\x76\x67\xd6\x27"
8402 "\x19\x88\x1a\xde\x85\xab\x21\xf2"
8403 "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
8404 "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
8405 "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
8406 "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
8407 "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
8408 "\x52\x37\xed\x0e\x52\x6b\x59\x02"
8409 "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
8410 "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
8411 "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
8412 "\xfa\x02",
8413 .output = "This document describes a compression method based on the DEFLATE"
8414 "compression algorithm. This document defines the application of "
8415 "the DEFLATE algorithm to the IP Payload Compression Protocol.",
8416 }, {
8417 .inlen = 38,
8418 .outlen = 70,
8419 .input = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
8420 "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
8421 "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
8422 "\x48\x55\x28\xce\x4f\x2b\x29\x07"
8423 "\x71\xbc\x08\x2b\x01\x00",
8424 .output = "Join us now and share the software "
8425 "Join us now and share the software ",
8426 },
8427};
8428
8429/*
8430 * LZO test vectors (null-terminated strings).
8431 */
8432#define LZO_COMP_TEST_VECTORS 2
8433#define LZO_DECOMP_TEST_VECTORS 2
8434
8435static struct comp_testvec lzo_comp_tv_template[] = {
8436 {
8437 .inlen = 70,
8438 .outlen = 46,
8439 .input = "Join us now and share the software "
8440 "Join us now and share the software ",
8441 .output = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
8442 "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
8443 "\x64\x20\x73\x68\x61\x72\x65\x20"
8444 "\x74\x68\x65\x20\x73\x6f\x66\x74"
8445 "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
8446 "\x3d\x88\x00\x11\x00\x00",
8447 }, {
8448 .inlen = 159,
8449 .outlen = 133,
8450 .input = "This document describes a compression method based on the LZO "
8451 "compression algorithm. This document defines the application of "
8452 "the LZO algorithm used in UBIFS.",
8453 .output = "\x00\x2b\x54\x68\x69\x73\x20\x64"
8454 "\x6f\x63\x75\x6d\x65\x6e\x74\x20"
8455 "\x64\x65\x73\x63\x72\x69\x62\x65"
8456 "\x73\x20\x61\x20\x63\x6f\x6d\x70"
8457 "\x72\x65\x73\x73\x69\x6f\x6e\x20"
8458 "\x6d\x65\x74\x68\x6f\x64\x20\x62"
8459 "\x61\x73\x65\x64\x20\x6f\x6e\x20"
8460 "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
8461 "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
8462 "\x69\x74\x68\x6d\x2e\x20\x20\x54"
8463 "\x68\x69\x73\x2a\x54\x01\x02\x66"
8464 "\x69\x6e\x65\x73\x94\x06\x05\x61"
8465 "\x70\x70\x6c\x69\x63\x61\x74\x76"
8466 "\x0a\x6f\x66\x88\x02\x60\x09\x27"
8467 "\xf0\x00\x0c\x20\x75\x73\x65\x64"
8468 "\x20\x69\x6e\x20\x55\x42\x49\x46"
8469 "\x53\x2e\x11\x00\x00",
8470 },
8471};
8472
8473static struct comp_testvec lzo_decomp_tv_template[] = {
8474 {
8475 .inlen = 133,
8476 .outlen = 159,
8477 .input = "\x00\x2b\x54\x68\x69\x73\x20\x64"
8478 "\x6f\x63\x75\x6d\x65\x6e\x74\x20"
8479 "\x64\x65\x73\x63\x72\x69\x62\x65"
8480 "\x73\x20\x61\x20\x63\x6f\x6d\x70"
8481 "\x72\x65\x73\x73\x69\x6f\x6e\x20"
8482 "\x6d\x65\x74\x68\x6f\x64\x20\x62"
8483 "\x61\x73\x65\x64\x20\x6f\x6e\x20"
8484 "\x74\x68\x65\x20\x4c\x5a\x4f\x2b"
8485 "\x8c\x00\x0d\x61\x6c\x67\x6f\x72"
8486 "\x69\x74\x68\x6d\x2e\x20\x20\x54"
8487 "\x68\x69\x73\x2a\x54\x01\x02\x66"
8488 "\x69\x6e\x65\x73\x94\x06\x05\x61"
8489 "\x70\x70\x6c\x69\x63\x61\x74\x76"
8490 "\x0a\x6f\x66\x88\x02\x60\x09\x27"
8491 "\xf0\x00\x0c\x20\x75\x73\x65\x64"
8492 "\x20\x69\x6e\x20\x55\x42\x49\x46"
8493 "\x53\x2e\x11\x00\x00",
8494 .output = "This document describes a compression method based on the LZO "
8495 "compression algorithm. This document defines the application of "
8496 "the LZO algorithm used in UBIFS.",
8497 }, {
8498 .inlen = 46,
8499 .outlen = 70,
8500 .input = "\x00\x0d\x4a\x6f\x69\x6e\x20\x75"
8501 "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
8502 "\x64\x20\x73\x68\x61\x72\x65\x20"
8503 "\x74\x68\x65\x20\x73\x6f\x66\x74"
8504 "\x77\x70\x01\x01\x4a\x6f\x69\x6e"
8505 "\x3d\x88\x00\x11\x00\x00",
8506 .output = "Join us now and share the software "
8507 "Join us now and share the software ",
8508 },
8509};
8510
8511/*
8512 * Michael MIC test vectors from IEEE 802.11i
8513 */
8514#define MICHAEL_MIC_TEST_VECTORS 6
8515
8516static struct hash_testvec michael_mic_tv_template[] = {
8517 {
8518 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
8519 .ksize = 8,
8520 .plaintext = zeroed_string,
8521 .psize = 0,
8522 .digest = "\x82\x92\x5c\x1c\xa1\xd1\x30\xb8",
8523 },
8524 {
8525 .key = "\x82\x92\x5c\x1c\xa1\xd1\x30\xb8",
8526 .ksize = 8,
8527 .plaintext = "M",
8528 .psize = 1,
8529 .digest = "\x43\x47\x21\xca\x40\x63\x9b\x3f",
8530 },
8531 {
8532 .key = "\x43\x47\x21\xca\x40\x63\x9b\x3f",
8533 .ksize = 8,
8534 .plaintext = "Mi",
8535 .psize = 2,
8536 .digest = "\xe8\xf9\xbe\xca\xe9\x7e\x5d\x29",
8537 },
8538 {
8539 .key = "\xe8\xf9\xbe\xca\xe9\x7e\x5d\x29",
8540 .ksize = 8,
8541 .plaintext = "Mic",
8542 .psize = 3,
8543 .digest = "\x90\x03\x8f\xc6\xcf\x13\xc1\xdb",
8544 },
8545 {
8546 .key = "\x90\x03\x8f\xc6\xcf\x13\xc1\xdb",
8547 .ksize = 8,
8548 .plaintext = "Mich",
8549 .psize = 4,
8550 .digest = "\xd5\x5e\x10\x05\x10\x12\x89\x86",
8551 },
8552 {
8553 .key = "\xd5\x5e\x10\x05\x10\x12\x89\x86",
8554 .ksize = 8,
8555 .plaintext = "Michael",
8556 .psize = 7,
8557 .digest = "\x0a\x94\x2b\x12\x4e\xca\xa5\x46",
8558 }
8559};
8560
8561/*
8562 * CRC32C test vectors
8563 */
8564#define CRC32C_TEST_VECTORS 14
8565
8566static struct hash_testvec crc32c_tv_template[] = {
8567 {
8568 .psize = 0,
8569 .digest = "\x00\x00\x00\x00",
8570 },
8571 {
8572 .key = "\x87\xa9\xcb\xed",
8573 .ksize = 4,
8574 .psize = 0,
8575 .digest = "\x78\x56\x34\x12",
8576 },
8577 {
8578 .key = "\xff\xff\xff\xff",
8579 .ksize = 4,
8580 .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08"
8581 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
8582 "\x11\x12\x13\x14\x15\x16\x17\x18"
8583 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
8584 "\x21\x22\x23\x24\x25\x26\x27\x28",
8585 .psize = 40,
8586 .digest = "\x7f\x15\x2c\x0e",
8587 },
8588 {
8589 .key = "\xff\xff\xff\xff",
8590 .ksize = 4,
8591 .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
8592 "\x31\x32\x33\x34\x35\x36\x37\x38"
8593 "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
8594 "\x41\x42\x43\x44\x45\x46\x47\x48"
8595 "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50",
8596 .psize = 40,
8597 .digest = "\xf6\xeb\x80\xe9",
8598 },
8599 {
8600 .key = "\xff\xff\xff\xff",
8601 .ksize = 4,
8602 .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58"
8603 "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
8604 "\x61\x62\x63\x64\x65\x66\x67\x68"
8605 "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
8606 "\x71\x72\x73\x74\x75\x76\x77\x78",
8607 .psize = 40,
8608 .digest = "\xed\xbd\x74\xde",
8609 },
8610 {
8611 .key = "\xff\xff\xff\xff",
8612 .ksize = 4,
8613 .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
8614 "\x81\x82\x83\x84\x85\x86\x87\x88"
8615 "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
8616 "\x91\x92\x93\x94\x95\x96\x97\x98"
8617 "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0",
8618 .psize = 40,
8619 .digest = "\x62\xc8\x79\xd5",
8620 },
8621 {
8622 .key = "\xff\xff\xff\xff",
8623 .ksize = 4,
8624 .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8"
8625 "\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
8626 "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8"
8627 "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
8628 "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8",
8629 .psize = 40,
8630 .digest = "\xd0\x9a\x97\xba",
8631 },
8632 {
8633 .key = "\xff\xff\xff\xff",
8634 .ksize = 4,
8635 .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
8636 "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8"
8637 "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
8638 "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8"
8639 "\xe9\xea\xeb\xec\xed\xee\xef\xf0",
8640 .psize = 40,
8641 .digest = "\x13\xd9\x29\x2b",
8642 },
8643 {
8644 .key = "\x80\xea\xd3\xf1",
8645 .ksize = 4,
8646 .plaintext = "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
8647 "\x31\x32\x33\x34\x35\x36\x37\x38"
8648 "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
8649 "\x41\x42\x43\x44\x45\x46\x47\x48"
8650 "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50",
8651 .psize = 40,
8652 .digest = "\x0c\xb5\xe2\xa2",
8653 },
8654 {
8655 .key = "\xf3\x4a\x1d\x5d",
8656 .ksize = 4,
8657 .plaintext = "\x51\x52\x53\x54\x55\x56\x57\x58"
8658 "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
8659 "\x61\x62\x63\x64\x65\x66\x67\x68"
8660 "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
8661 "\x71\x72\x73\x74\x75\x76\x77\x78",
8662 .psize = 40,
8663 .digest = "\xd1\x7f\xfb\xa6",
8664 },
8665 {
8666 .key = "\x2e\x80\x04\x59",
8667 .ksize = 4,
8668 .plaintext = "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
8669 "\x81\x82\x83\x84\x85\x86\x87\x88"
8670 "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
8671 "\x91\x92\x93\x94\x95\x96\x97\x98"
8672 "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0",
8673 .psize = 40,
8674 .digest = "\x59\x33\xe6\x7a",
8675 },
8676 {
8677 .key = "\xa6\xcc\x19\x85",
8678 .ksize = 4,
8679 .plaintext = "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8"
8680 "\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
8681 "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8"
8682 "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
8683 "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8",
8684 .psize = 40,
8685 .digest = "\xbe\x03\x01\xd2",
8686 },
8687 {
8688 .key = "\x41\xfc\xfe\x2d",
8689 .ksize = 4,
8690 .plaintext = "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
8691 "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8"
8692 "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
8693 "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8"
8694 "\xe9\xea\xeb\xec\xed\xee\xef\xf0",
8695 .psize = 40,
8696 .digest = "\x75\xd3\xc5\x24",
8697 },
8698 {
8699 .key = "\xff\xff\xff\xff",
8700 .ksize = 4,
8701 .plaintext = "\x01\x02\x03\x04\x05\x06\x07\x08"
8702 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
8703 "\x11\x12\x13\x14\x15\x16\x17\x18"
8704 "\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20"
8705 "\x21\x22\x23\x24\x25\x26\x27\x28"
8706 "\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30"
8707 "\x31\x32\x33\x34\x35\x36\x37\x38"
8708 "\x39\x3a\x3b\x3c\x3d\x3e\x3f\x40"
8709 "\x41\x42\x43\x44\x45\x46\x47\x48"
8710 "\x49\x4a\x4b\x4c\x4d\x4e\x4f\x50"
8711 "\x51\x52\x53\x54\x55\x56\x57\x58"
8712 "\x59\x5a\x5b\x5c\x5d\x5e\x5f\x60"
8713 "\x61\x62\x63\x64\x65\x66\x67\x68"
8714 "\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70"
8715 "\x71\x72\x73\x74\x75\x76\x77\x78"
8716 "\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80"
8717 "\x81\x82\x83\x84\x85\x86\x87\x88"
8718 "\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90"
8719 "\x91\x92\x93\x94\x95\x96\x97\x98"
8720 "\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0"
8721 "\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8"
8722 "\xa9\xaa\xab\xac\xad\xae\xaf\xb0"
8723 "\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8"
8724 "\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0"
8725 "\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8"
8726 "\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0"
8727 "\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8"
8728 "\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0"
8729 "\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8"
8730 "\xe9\xea\xeb\xec\xed\xee\xef\xf0",
8731 .psize = 240,
8732 .digest = "\x75\xd3\xc5\x24",
8733 .np = 2,
8734 .tap = { 31, 209 }
8735 },
8736};
8737
8738#endif /* _CRYPTO_TESTMGR_H */
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 11c8c19f0fb7..f17cd4b572f8 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -663,7 +663,7 @@ config HAVE_PATA_PLATFORM
663 663
664config PATA_PLATFORM 664config PATA_PLATFORM
665 tristate "Generic platform device PATA support" 665 tristate "Generic platform device PATA support"
666 depends on EMBEDDED || ARCH_RPC || PPC || HAVE_PATA_PLATFORM 666 depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
667 help 667 help
668 This option enables support for generic directly connected ATA 668 This option enables support for generic directly connected ATA
669 devices commonly found on embedded systems. 669 devices commonly found on embedded systems.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2e1a7cb2ed5f..aeadd00411a1 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -267,8 +267,8 @@ struct ahci_port_priv {
267 * per PM slot */ 267 * per PM slot */
268}; 268};
269 269
270static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 270static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
271static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 271static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
272static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 272static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
273static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 273static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
274static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 274static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
@@ -316,6 +316,7 @@ static struct device_attribute *ahci_shost_attrs[] = {
316 316
317static struct device_attribute *ahci_sdev_attrs[] = { 317static struct device_attribute *ahci_sdev_attrs[] = {
318 &dev_attr_sw_activity, 318 &dev_attr_sw_activity,
319 &dev_attr_unload_heads,
319 NULL 320 NULL
320}; 321};
321 322
@@ -820,10 +821,10 @@ static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
820 return 0; 821 return 0;
821} 822}
822 823
823static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 824static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
824{ 825{
825 void __iomem *port_mmio = ahci_port_base(ap); 826 void __iomem *port_mmio = ahci_port_base(link->ap);
826 int offset = ahci_scr_offset(ap, sc_reg); 827 int offset = ahci_scr_offset(link->ap, sc_reg);
827 828
828 if (offset) { 829 if (offset) {
829 *val = readl(port_mmio + offset); 830 *val = readl(port_mmio + offset);
@@ -832,10 +833,10 @@ static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
832 return -EINVAL; 833 return -EINVAL;
833} 834}
834 835
835static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 836static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
836{ 837{
837 void __iomem *port_mmio = ahci_port_base(ap); 838 void __iomem *port_mmio = ahci_port_base(link->ap);
838 int offset = ahci_scr_offset(ap, sc_reg); 839 int offset = ahci_scr_offset(link->ap, sc_reg);
839 840
840 if (offset) { 841 if (offset) {
841 writel(val, port_mmio + offset); 842 writel(val, port_mmio + offset);
@@ -973,7 +974,7 @@ static void ahci_disable_alpm(struct ata_port *ap)
973 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); 974 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
974 975
975 /* go ahead and clean out PhyRdy Change from Serror too */ 976 /* go ahead and clean out PhyRdy Change from Serror too */
976 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 977 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
977 978
978 /* 979 /*
979 * Clear flag to indicate that we should ignore all PhyRdy 980 * Clear flag to indicate that we should ignore all PhyRdy
@@ -1937,8 +1938,8 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1937 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1938 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1938 1939
1939 /* AHCI needs SError cleared; otherwise, it might lock up */ 1940 /* AHCI needs SError cleared; otherwise, it might lock up */
1940 ahci_scr_read(ap, SCR_ERROR, &serror); 1941 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1941 ahci_scr_write(ap, SCR_ERROR, serror); 1942 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1942 host_ehi->serror |= serror; 1943 host_ehi->serror |= serror;
1943 1944
1944 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1945 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
@@ -2027,7 +2028,7 @@ static void ahci_port_intr(struct ata_port *ap)
2027 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && 2028 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2028 (status & PORT_IRQ_PHYRDY)) { 2029 (status & PORT_IRQ_PHYRDY)) {
2029 status &= ~PORT_IRQ_PHYRDY; 2030 status &= ~PORT_IRQ_PHYRDY;
2030 ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18))); 2031 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2031 } 2032 }
2032 2033
2033 if (unlikely(status & PORT_IRQ_ERROR)) { 2034 if (unlikely(status & PORT_IRQ_ERROR)) {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index e6b4606e36b6..e9e32ed6b1a3 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -165,8 +165,10 @@ static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
165static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); 165static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
166static int ich_pata_cable_detect(struct ata_port *ap); 166static int ich_pata_cable_detect(struct ata_port *ap);
167static u8 piix_vmw_bmdma_status(struct ata_port *ap); 167static u8 piix_vmw_bmdma_status(struct ata_port *ap);
168static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val); 168static int piix_sidpr_scr_read(struct ata_link *link,
169static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val); 169 unsigned int reg, u32 *val);
170static int piix_sidpr_scr_write(struct ata_link *link,
171 unsigned int reg, u32 val);
170#ifdef CONFIG_PM 172#ifdef CONFIG_PM
171static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 173static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
172static int piix_pci_device_resume(struct pci_dev *pdev); 174static int piix_pci_device_resume(struct pci_dev *pdev);
@@ -278,12 +280,15 @@ static const struct pci_device_id piix_pci_tbl[] = {
278 /* SATA Controller IDE (PCH) */ 280 /* SATA Controller IDE (PCH) */
279 { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 281 { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
280 /* SATA Controller IDE (PCH) */ 282 /* SATA Controller IDE (PCH) */
283 { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
284 /* SATA Controller IDE (PCH) */
281 { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 285 { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
282 /* SATA Controller IDE (PCH) */ 286 /* SATA Controller IDE (PCH) */
287 { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
288 /* SATA Controller IDE (PCH) */
283 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 289 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
284 /* SATA Controller IDE (PCH) */ 290 /* SATA Controller IDE (PCH) */
285 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 291 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
286
287 { } /* terminate list */ 292 { } /* terminate list */
288}; 293};
289 294
@@ -582,6 +587,7 @@ static const struct ich_laptop ich_laptop[] = {
582 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ 587 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
583 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ 588 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
584 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ 589 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
590 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
585 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 591 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
586 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ 592 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
587 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ 593 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
@@ -885,23 +891,9 @@ static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
885 * Serial ATA Index/Data Pair Superset Registers access 891 * Serial ATA Index/Data Pair Superset Registers access
886 * 892 *
887 * Beginning from ICH8, there's a sane way to access SCRs using index 893 * Beginning from ICH8, there's a sane way to access SCRs using index
888 * and data register pair located at BAR5. This creates an 894 * and data register pair located at BAR5 which means that we have
889 * interesting problem of mapping two SCRs to one port. 895 * separate SCRs for master and slave. This is handled using libata
890 * 896 * slave_link facility.
891 * Although they have separate SCRs, the master and slave aren't
892 * independent enough to be treated as separate links - e.g. softreset
893 * resets both. Also, there's no protocol defined for hard resetting
894 * singled device sharing the virtual port (no defined way to acquire
895 * device signature). This is worked around by merging the SCR values
896 * into one sensible value and requesting follow-up SRST after
897 * hardreset.
898 *
899 * SCR merging is perfomed in nibbles which is the unit contents in
900 * SCRs are organized. If two values are equal, the value is used.
901 * When they differ, merge table which lists precedence of possible
902 * values is consulted and the first match or the last entry when
903 * nothing matches is used. When there's no merge table for the
904 * specific nibble, value from the first port is used.
905 */ 897 */
906static const int piix_sidx_map[] = { 898static const int piix_sidx_map[] = {
907 [SCR_STATUS] = 0, 899 [SCR_STATUS] = 0,
@@ -909,120 +901,38 @@ static const int piix_sidx_map[] = {
909 [SCR_CONTROL] = 1, 901 [SCR_CONTROL] = 1,
910}; 902};
911 903
912static void piix_sidpr_sel(struct ata_device *dev, unsigned int reg) 904static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
913{ 905{
914 struct ata_port *ap = dev->link->ap; 906 struct ata_port *ap = link->ap;
915 struct piix_host_priv *hpriv = ap->host->private_data; 907 struct piix_host_priv *hpriv = ap->host->private_data;
916 908
917 iowrite32(((ap->port_no * 2 + dev->devno) << 8) | piix_sidx_map[reg], 909 iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
918 hpriv->sidpr + PIIX_SIDPR_IDX); 910 hpriv->sidpr + PIIX_SIDPR_IDX);
919} 911}
920 912
921static int piix_sidpr_read(struct ata_device *dev, unsigned int reg) 913static int piix_sidpr_scr_read(struct ata_link *link,
922{ 914 unsigned int reg, u32 *val)
923 struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
924
925 piix_sidpr_sel(dev, reg);
926 return ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
927}
928
929static void piix_sidpr_write(struct ata_device *dev, unsigned int reg, u32 val)
930{
931 struct piix_host_priv *hpriv = dev->link->ap->host->private_data;
932
933 piix_sidpr_sel(dev, reg);
934 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
935}
936
937static u32 piix_merge_scr(u32 val0, u32 val1, const int * const *merge_tbl)
938{
939 u32 val = 0;
940 int i, mi;
941
942 for (i = 0, mi = 0; i < 32 / 4; i++) {
943 u8 c0 = (val0 >> (i * 4)) & 0xf;
944 u8 c1 = (val1 >> (i * 4)) & 0xf;
945 u8 merged = c0;
946 const int *cur;
947
948 /* if no merge preference, assume the first value */
949 cur = merge_tbl[mi];
950 if (!cur)
951 goto done;
952 mi++;
953
954 /* if two values equal, use it */
955 if (c0 == c1)
956 goto done;
957
958 /* choose the first match or the last from the merge table */
959 while (*cur != -1) {
960 if (c0 == *cur || c1 == *cur)
961 break;
962 cur++;
963 }
964 if (*cur == -1)
965 cur--;
966 merged = *cur;
967 done:
968 val |= merged << (i * 4);
969 }
970
971 return val;
972}
973
974static int piix_sidpr_scr_read(struct ata_port *ap, unsigned int reg, u32 *val)
975{ 915{
976 const int * const sstatus_merge_tbl[] = { 916 struct piix_host_priv *hpriv = link->ap->host->private_data;
977 /* DET */ (const int []){ 1, 3, 0, 4, 3, -1 },
978 /* SPD */ (const int []){ 2, 1, 0, -1 },
979 /* IPM */ (const int []){ 6, 2, 1, 0, -1 },
980 NULL,
981 };
982 const int * const scontrol_merge_tbl[] = {
983 /* DET */ (const int []){ 1, 0, 4, 0, -1 },
984 /* SPD */ (const int []){ 0, 2, 1, 0, -1 },
985 /* IPM */ (const int []){ 0, 1, 2, 3, 0, -1 },
986 NULL,
987 };
988 u32 v0, v1;
989 917
990 if (reg >= ARRAY_SIZE(piix_sidx_map)) 918 if (reg >= ARRAY_SIZE(piix_sidx_map))
991 return -EINVAL; 919 return -EINVAL;
992 920
993 if (!(ap->flags & ATA_FLAG_SLAVE_POSS)) { 921 piix_sidpr_sel(link, reg);
994 *val = piix_sidpr_read(&ap->link.device[0], reg); 922 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
995 return 0;
996 }
997
998 v0 = piix_sidpr_read(&ap->link.device[0], reg);
999 v1 = piix_sidpr_read(&ap->link.device[1], reg);
1000
1001 switch (reg) {
1002 case SCR_STATUS:
1003 *val = piix_merge_scr(v0, v1, sstatus_merge_tbl);
1004 break;
1005 case SCR_ERROR:
1006 *val = v0 | v1;
1007 break;
1008 case SCR_CONTROL:
1009 *val = piix_merge_scr(v0, v1, scontrol_merge_tbl);
1010 break;
1011 }
1012
1013 return 0; 923 return 0;
1014} 924}
1015 925
1016static int piix_sidpr_scr_write(struct ata_port *ap, unsigned int reg, u32 val) 926static int piix_sidpr_scr_write(struct ata_link *link,
927 unsigned int reg, u32 val)
1017{ 928{
929 struct piix_host_priv *hpriv = link->ap->host->private_data;
930
1018 if (reg >= ARRAY_SIZE(piix_sidx_map)) 931 if (reg >= ARRAY_SIZE(piix_sidx_map))
1019 return -EINVAL; 932 return -EINVAL;
1020 933
1021 piix_sidpr_write(&ap->link.device[0], reg, val); 934 piix_sidpr_sel(link, reg);
1022 935 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
1023 if (ap->flags & ATA_FLAG_SLAVE_POSS)
1024 piix_sidpr_write(&ap->link.device[1], reg, val);
1025
1026 return 0; 936 return 0;
1027} 937}
1028 938
@@ -1363,28 +1273,28 @@ static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
1363 return map; 1273 return map;
1364} 1274}
1365 1275
1366static void __devinit piix_init_sidpr(struct ata_host *host) 1276static int __devinit piix_init_sidpr(struct ata_host *host)
1367{ 1277{
1368 struct pci_dev *pdev = to_pci_dev(host->dev); 1278 struct pci_dev *pdev = to_pci_dev(host->dev);
1369 struct piix_host_priv *hpriv = host->private_data; 1279 struct piix_host_priv *hpriv = host->private_data;
1370 struct ata_device *dev0 = &host->ports[0]->link.device[0]; 1280 struct ata_link *link0 = &host->ports[0]->link;
1371 u32 scontrol; 1281 u32 scontrol;
1372 int i; 1282 int i, rc;
1373 1283
1374 /* check for availability */ 1284 /* check for availability */
1375 for (i = 0; i < 4; i++) 1285 for (i = 0; i < 4; i++)
1376 if (hpriv->map[i] == IDE) 1286 if (hpriv->map[i] == IDE)
1377 return; 1287 return 0;
1378 1288
1379 if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR)) 1289 if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
1380 return; 1290 return 0;
1381 1291
1382 if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 || 1292 if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
1383 pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN) 1293 pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
1384 return; 1294 return 0;
1385 1295
1386 if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME)) 1296 if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
1387 return; 1297 return 0;
1388 1298
1389 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; 1299 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
1390 1300
@@ -1392,7 +1302,7 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1392 * Give it a test drive by inhibiting power save modes which 1302 * Give it a test drive by inhibiting power save modes which
1393 * we'll do anyway. 1303 * we'll do anyway.
1394 */ 1304 */
1395 scontrol = piix_sidpr_read(dev0, SCR_CONTROL); 1305 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
1396 1306
1397 /* if IPM is already 3, SCR access is probably working. Don't 1307 /* if IPM is already 3, SCR access is probably working. Don't
1398 * un-inhibit power save modes as BIOS might have inhibited 1308 * un-inhibit power save modes as BIOS might have inhibited
@@ -1400,18 +1310,30 @@ static void __devinit piix_init_sidpr(struct ata_host *host)
1400 */ 1310 */
1401 if ((scontrol & 0xf00) != 0x300) { 1311 if ((scontrol & 0xf00) != 0x300) {
1402 scontrol |= 0x300; 1312 scontrol |= 0x300;
1403 piix_sidpr_write(dev0, SCR_CONTROL, scontrol); 1313 piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
1404 scontrol = piix_sidpr_read(dev0, SCR_CONTROL); 1314 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
1405 1315
1406 if ((scontrol & 0xf00) != 0x300) { 1316 if ((scontrol & 0xf00) != 0x300) {
1407 dev_printk(KERN_INFO, host->dev, "SCR access via " 1317 dev_printk(KERN_INFO, host->dev, "SCR access via "
1408 "SIDPR is available but doesn't work\n"); 1318 "SIDPR is available but doesn't work\n");
1409 return; 1319 return 0;
1410 } 1320 }
1411 } 1321 }
1412 1322
1413 host->ports[0]->ops = &piix_sidpr_sata_ops; 1323 /* okay, SCRs available, set ops and ask libata for slave_link */
1414 host->ports[1]->ops = &piix_sidpr_sata_ops; 1324 for (i = 0; i < 2; i++) {
1325 struct ata_port *ap = host->ports[i];
1326
1327 ap->ops = &piix_sidpr_sata_ops;
1328
1329 if (ap->flags & ATA_FLAG_SLAVE_POSS) {
1330 rc = ata_slave_link_init(ap);
1331 if (rc)
1332 return rc;
1333 }
1334 }
1335
1336 return 0;
1415} 1337}
1416 1338
1417static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) 1339static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
@@ -1521,7 +1443,9 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1521 /* initialize controller */ 1443 /* initialize controller */
1522 if (port_flags & ATA_FLAG_SATA) { 1444 if (port_flags & ATA_FLAG_SATA) {
1523 piix_init_pcs(host, piix_map_db_table[ent->driver_data]); 1445 piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
1524 piix_init_sidpr(host); 1446 rc = piix_init_sidpr(host);
1447 if (rc)
1448 return rc;
1525 } 1449 }
1526 1450
1527 /* apply IOCFG bit18 quirk */ 1451 /* apply IOCFG bit18 quirk */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 79e3a8e7a84a..1ee9499bd343 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -163,6 +163,67 @@ MODULE_LICENSE("GPL");
163MODULE_VERSION(DRV_VERSION); 163MODULE_VERSION(DRV_VERSION);
164 164
165 165
166/*
167 * Iterator helpers. Don't use directly.
168 *
169 * LOCKING:
170 * Host lock or EH context.
171 */
172struct ata_link *__ata_port_next_link(struct ata_port *ap,
173 struct ata_link *link, bool dev_only)
174{
175 /* NULL link indicates start of iteration */
176 if (!link) {
177 if (dev_only && sata_pmp_attached(ap))
178 return ap->pmp_link;
179 return &ap->link;
180 }
181
182 /* we just iterated over the host master link, what's next? */
183 if (link == &ap->link) {
184 if (!sata_pmp_attached(ap)) {
185 if (unlikely(ap->slave_link) && !dev_only)
186 return ap->slave_link;
187 return NULL;
188 }
189 return ap->pmp_link;
190 }
191
192 /* slave_link excludes PMP */
193 if (unlikely(link == ap->slave_link))
194 return NULL;
195
196 /* iterate to the next PMP link */
197 if (++link < ap->pmp_link + ap->nr_pmp_links)
198 return link;
199 return NULL;
200}
201
202/**
203 * ata_dev_phys_link - find physical link for a device
204 * @dev: ATA device to look up physical link for
205 *
206 * Look up physical link which @dev is attached to. Note that
207 * this is different from @dev->link only when @dev is on slave
208 * link. For all other cases, it's the same as @dev->link.
209 *
210 * LOCKING:
211 * Don't care.
212 *
213 * RETURNS:
214 * Pointer to the found physical link.
215 */
216struct ata_link *ata_dev_phys_link(struct ata_device *dev)
217{
218 struct ata_port *ap = dev->link->ap;
219
220 if (!ap->slave_link)
221 return dev->link;
222 if (!dev->devno)
223 return &ap->link;
224 return ap->slave_link;
225}
226
166/** 227/**
167 * ata_force_cbl - force cable type according to libata.force 228 * ata_force_cbl - force cable type according to libata.force
168 * @ap: ATA port of interest 229 * @ap: ATA port of interest
@@ -206,7 +267,8 @@ void ata_force_cbl(struct ata_port *ap)
206 * the host link and all fan-out ports connected via PMP. If the 267 * the host link and all fan-out ports connected via PMP. If the
207 * device part is specified as 0 (e.g. 1.00:), it specifies the 268 * device part is specified as 0 (e.g. 1.00:), it specifies the
208 * first fan-out link not the host link. Device number 15 always 269 * first fan-out link not the host link. Device number 15 always
209 * points to the host link whether PMP is attached or not. 270 * points to the host link whether PMP is attached or not. If the
271 * controller has slave link, device number 16 points to it.
210 * 272 *
211 * LOCKING: 273 * LOCKING:
212 * EH context. 274 * EH context.
@@ -214,12 +276,11 @@ void ata_force_cbl(struct ata_port *ap)
214static void ata_force_link_limits(struct ata_link *link) 276static void ata_force_link_limits(struct ata_link *link)
215{ 277{
216 bool did_spd = false; 278 bool did_spd = false;
217 int linkno, i; 279 int linkno = link->pmp;
280 int i;
218 281
219 if (ata_is_host_link(link)) 282 if (ata_is_host_link(link))
220 linkno = 15; 283 linkno += 15;
221 else
222 linkno = link->pmp;
223 284
224 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 285 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
225 const struct ata_force_ent *fe = &ata_force_tbl[i]; 286 const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -266,9 +327,9 @@ static void ata_force_xfermask(struct ata_device *dev)
266 int alt_devno = devno; 327 int alt_devno = devno;
267 int i; 328 int i;
268 329
269 /* allow n.15 for the first device attached to host port */ 330 /* allow n.15/16 for devices attached to host port */
270 if (ata_is_host_link(dev->link) && devno == 0) 331 if (ata_is_host_link(dev->link))
271 alt_devno = 15; 332 alt_devno += 15;
272 333
273 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 334 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
274 const struct ata_force_ent *fe = &ata_force_tbl[i]; 335 const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -320,9 +381,9 @@ static void ata_force_horkage(struct ata_device *dev)
320 int alt_devno = devno; 381 int alt_devno = devno;
321 int i; 382 int i;
322 383
323 /* allow n.15 for the first device attached to host port */ 384 /* allow n.15/16 for devices attached to host port */
324 if (ata_is_host_link(dev->link) && devno == 0) 385 if (ata_is_host_link(dev->link))
325 alt_devno = 15; 386 alt_devno += 15;
326 387
327 for (i = 0; i < ata_force_tbl_size; i++) { 388 for (i = 0; i < ata_force_tbl_size; i++) {
328 const struct ata_force_ent *fe = &ata_force_tbl[i]; 389 const struct ata_force_ent *fe = &ata_force_tbl[i];
@@ -2681,7 +2742,7 @@ static void sata_print_link_status(struct ata_link *link)
2681 return; 2742 return;
2682 sata_scr_read(link, SCR_CONTROL, &scontrol); 2743 sata_scr_read(link, SCR_CONTROL, &scontrol);
2683 2744
2684 if (ata_link_online(link)) { 2745 if (ata_phys_link_online(link)) {
2685 tmp = (sstatus >> 4) & 0xf; 2746 tmp = (sstatus >> 4) & 0xf;
2686 ata_link_printk(link, KERN_INFO, 2747 ata_link_printk(link, KERN_INFO,
2687 "SATA link up %s (SStatus %X SControl %X)\n", 2748 "SATA link up %s (SStatus %X SControl %X)\n",
@@ -3372,6 +3433,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3372 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3433 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3373 int warned = 0; 3434 int warned = 0;
3374 3435
3436 /* Slave readiness can't be tested separately from master. On
3437 * M/S emulation configuration, this function should be called
3438 * only on the master and it will handle both master and slave.
3439 */
3440 WARN_ON(link == link->ap->slave_link);
3441
3375 if (time_after(nodev_deadline, deadline)) 3442 if (time_after(nodev_deadline, deadline))
3376 nodev_deadline = deadline; 3443 nodev_deadline = deadline;
3377 3444
@@ -3593,7 +3660,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3593 } 3660 }
3594 3661
3595 /* no point in trying softreset on offline link */ 3662 /* no point in trying softreset on offline link */
3596 if (ata_link_offline(link)) 3663 if (ata_phys_link_offline(link))
3597 ehc->i.action &= ~ATA_EH_SOFTRESET; 3664 ehc->i.action &= ~ATA_EH_SOFTRESET;
3598 3665
3599 return 0; 3666 return 0;
@@ -3671,7 +3738,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3671 if (rc) 3738 if (rc)
3672 goto out; 3739 goto out;
3673 /* if link is offline nothing more to do */ 3740 /* if link is offline nothing more to do */
3674 if (ata_link_offline(link)) 3741 if (ata_phys_link_offline(link))
3675 goto out; 3742 goto out;
3676 3743
3677 /* Link is online. From this point, -ENODEV too is an error. */ 3744 /* Link is online. From this point, -ENODEV too is an error. */
@@ -4868,10 +4935,8 @@ int sata_scr_valid(struct ata_link *link)
4868int sata_scr_read(struct ata_link *link, int reg, u32 *val) 4935int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4869{ 4936{
4870 if (ata_is_host_link(link)) { 4937 if (ata_is_host_link(link)) {
4871 struct ata_port *ap = link->ap;
4872
4873 if (sata_scr_valid(link)) 4938 if (sata_scr_valid(link))
4874 return ap->ops->scr_read(ap, reg, val); 4939 return link->ap->ops->scr_read(link, reg, val);
4875 return -EOPNOTSUPP; 4940 return -EOPNOTSUPP;
4876 } 4941 }
4877 4942
@@ -4897,10 +4962,8 @@ int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4897int sata_scr_write(struct ata_link *link, int reg, u32 val) 4962int sata_scr_write(struct ata_link *link, int reg, u32 val)
4898{ 4963{
4899 if (ata_is_host_link(link)) { 4964 if (ata_is_host_link(link)) {
4900 struct ata_port *ap = link->ap;
4901
4902 if (sata_scr_valid(link)) 4965 if (sata_scr_valid(link))
4903 return ap->ops->scr_write(ap, reg, val); 4966 return link->ap->ops->scr_write(link, reg, val);
4904 return -EOPNOTSUPP; 4967 return -EOPNOTSUPP;
4905 } 4968 }
4906 4969
@@ -4925,13 +4988,12 @@ int sata_scr_write(struct ata_link *link, int reg, u32 val)
4925int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 4988int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4926{ 4989{
4927 if (ata_is_host_link(link)) { 4990 if (ata_is_host_link(link)) {
4928 struct ata_port *ap = link->ap;
4929 int rc; 4991 int rc;
4930 4992
4931 if (sata_scr_valid(link)) { 4993 if (sata_scr_valid(link)) {
4932 rc = ap->ops->scr_write(ap, reg, val); 4994 rc = link->ap->ops->scr_write(link, reg, val);
4933 if (rc == 0) 4995 if (rc == 0)
4934 rc = ap->ops->scr_read(ap, reg, &val); 4996 rc = link->ap->ops->scr_read(link, reg, &val);
4935 return rc; 4997 return rc;
4936 } 4998 }
4937 return -EOPNOTSUPP; 4999 return -EOPNOTSUPP;
@@ -4941,7 +5003,7 @@ int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4941} 5003}
4942 5004
4943/** 5005/**
4944 * ata_link_online - test whether the given link is online 5006 * ata_phys_link_online - test whether the given link is online
4945 * @link: ATA link to test 5007 * @link: ATA link to test
4946 * 5008 *
4947 * Test whether @link is online. Note that this function returns 5009 * Test whether @link is online. Note that this function returns
@@ -4952,20 +5014,20 @@ int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4952 * None. 5014 * None.
4953 * 5015 *
4954 * RETURNS: 5016 * RETURNS:
4955 * 1 if the port online status is available and online. 5017 * True if the port online status is available and online.
4956 */ 5018 */
4957int ata_link_online(struct ata_link *link) 5019bool ata_phys_link_online(struct ata_link *link)
4958{ 5020{
4959 u32 sstatus; 5021 u32 sstatus;
4960 5022
4961 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5023 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4962 (sstatus & 0xf) == 0x3) 5024 (sstatus & 0xf) == 0x3)
4963 return 1; 5025 return true;
4964 return 0; 5026 return false;
4965} 5027}
4966 5028
4967/** 5029/**
4968 * ata_link_offline - test whether the given link is offline 5030 * ata_phys_link_offline - test whether the given link is offline
4969 * @link: ATA link to test 5031 * @link: ATA link to test
4970 * 5032 *
4971 * Test whether @link is offline. Note that this function 5033 * Test whether @link is offline. Note that this function
@@ -4976,16 +5038,68 @@ int ata_link_online(struct ata_link *link)
4976 * None. 5038 * None.
4977 * 5039 *
4978 * RETURNS: 5040 * RETURNS:
4979 * 1 if the port offline status is available and offline. 5041 * True if the port offline status is available and offline.
4980 */ 5042 */
4981int ata_link_offline(struct ata_link *link) 5043bool ata_phys_link_offline(struct ata_link *link)
4982{ 5044{
4983 u32 sstatus; 5045 u32 sstatus;
4984 5046
4985 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5047 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4986 (sstatus & 0xf) != 0x3) 5048 (sstatus & 0xf) != 0x3)
4987 return 1; 5049 return true;
4988 return 0; 5050 return false;
5051}
5052
5053/**
5054 * ata_link_online - test whether the given link is online
5055 * @link: ATA link to test
5056 *
5057 * Test whether @link is online. This is identical to
5058 * ata_phys_link_online() when there's no slave link. When
5059 * there's a slave link, this function should only be called on
5060 * the master link and will return true if any of M/S links is
5061 * online.
5062 *
5063 * LOCKING:
5064 * None.
5065 *
5066 * RETURNS:
5067 * True if the port online status is available and online.
5068 */
5069bool ata_link_online(struct ata_link *link)
5070{
5071 struct ata_link *slave = link->ap->slave_link;
5072
5073 WARN_ON(link == slave); /* shouldn't be called on slave link */
5074
5075 return ata_phys_link_online(link) ||
5076 (slave && ata_phys_link_online(slave));
5077}
5078
5079/**
5080 * ata_link_offline - test whether the given link is offline
5081 * @link: ATA link to test
5082 *
5083 * Test whether @link is offline. This is identical to
5084 * ata_phys_link_offline() when there's no slave link. When
5085 * there's a slave link, this function should only be called on
5086 * the master link and will return true if both M/S links are
5087 * offline.
5088 *
5089 * LOCKING:
5090 * None.
5091 *
5092 * RETURNS:
5093 * True if the port offline status is available and offline.
5094 */
5095bool ata_link_offline(struct ata_link *link)
5096{
5097 struct ata_link *slave = link->ap->slave_link;
5098
5099 WARN_ON(link == slave); /* shouldn't be called on slave link */
5100
5101 return ata_phys_link_offline(link) &&
5102 (!slave || ata_phys_link_offline(slave));
4989} 5103}
4990 5104
4991#ifdef CONFIG_PM 5105#ifdef CONFIG_PM
@@ -5127,11 +5241,11 @@ int ata_port_start(struct ata_port *ap)
5127 */ 5241 */
5128void ata_dev_init(struct ata_device *dev) 5242void ata_dev_init(struct ata_device *dev)
5129{ 5243{
5130 struct ata_link *link = dev->link; 5244 struct ata_link *link = ata_dev_phys_link(dev);
5131 struct ata_port *ap = link->ap; 5245 struct ata_port *ap = link->ap;
5132 unsigned long flags; 5246 unsigned long flags;
5133 5247
5134 /* SATA spd limit is bound to the first device */ 5248 /* SATA spd limit is bound to the attached device, reset together */
5135 link->sata_spd_limit = link->hw_sata_spd_limit; 5249 link->sata_spd_limit = link->hw_sata_spd_limit;
5136 link->sata_spd = 0; 5250 link->sata_spd = 0;
5137 5251
@@ -5264,6 +5378,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5264 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5378 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5265 INIT_LIST_HEAD(&ap->eh_done_q); 5379 INIT_LIST_HEAD(&ap->eh_done_q);
5266 init_waitqueue_head(&ap->eh_wait_q); 5380 init_waitqueue_head(&ap->eh_wait_q);
5381 init_completion(&ap->park_req_pending);
5267 init_timer_deferrable(&ap->fastdrain_timer); 5382 init_timer_deferrable(&ap->fastdrain_timer);
5268 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5383 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5269 ap->fastdrain_timer.data = (unsigned long)ap; 5384 ap->fastdrain_timer.data = (unsigned long)ap;
@@ -5294,6 +5409,7 @@ static void ata_host_release(struct device *gendev, void *res)
5294 scsi_host_put(ap->scsi_host); 5409 scsi_host_put(ap->scsi_host);
5295 5410
5296 kfree(ap->pmp_link); 5411 kfree(ap->pmp_link);
5412 kfree(ap->slave_link);
5297 kfree(ap); 5413 kfree(ap);
5298 host->ports[i] = NULL; 5414 host->ports[i] = NULL;
5299 } 5415 }
@@ -5414,6 +5530,68 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5414 return host; 5530 return host;
5415} 5531}
5416 5532
5533/**
5534 * ata_slave_link_init - initialize slave link
5535 * @ap: port to initialize slave link for
5536 *
5537 * Create and initialize slave link for @ap. This enables slave
5538 * link handling on the port.
5539 *
5540 * In libata, a port contains links and a link contains devices.
5541 * There is single host link but if a PMP is attached to it,
5542 * there can be multiple fan-out links. On SATA, there's usually
5543 * a single device connected to a link but PATA and SATA
5544 * controllers emulating TF based interface can have two - master
5545 * and slave.
5546 *
5547 * However, there are a few controllers which don't fit into this
5548 * abstraction too well - SATA controllers which emulate TF
5549 * interface with both master and slave devices but also have
5550 * separate SCR register sets for each device. These controllers
5551 * need separate links for physical link handling
5552 * (e.g. onlineness, link speed) but should be treated like a
5553 * traditional M/S controller for everything else (e.g. command
5554 * issue, softreset).
5555 *
5556 * slave_link is libata's way of handling this class of
5557 * controllers without impacting core layer too much. For
5558 * anything other than physical link handling, the default host
5559 * link is used for both master and slave. For physical link
5560 * handling, separate @ap->slave_link is used. All dirty details
5561 * are implemented inside libata core layer. From LLD's POV, the
5562 * only difference is that prereset, hardreset and postreset are
5563 * called once more for the slave link, so the reset sequence
5564 * looks like the following.
5565 *
5566 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5567 * softreset(M) -> postreset(M) -> postreset(S)
5568 *
5569 * Note that softreset is called only for the master. Softreset
5570 * resets both M/S by definition, so SRST on master should handle
5571 * both (the standard method will work just fine).
5572 *
5573 * LOCKING:
5574 * Should be called before host is registered.
5575 *
5576 * RETURNS:
5577 * 0 on success, -errno on failure.
5578 */
5579int ata_slave_link_init(struct ata_port *ap)
5580{
5581 struct ata_link *link;
5582
5583 WARN_ON(ap->slave_link);
5584 WARN_ON(ap->flags & ATA_FLAG_PMP);
5585
5586 link = kzalloc(sizeof(*link), GFP_KERNEL);
5587 if (!link)
5588 return -ENOMEM;
5589
5590 ata_link_init(ap, link, 1);
5591 ap->slave_link = link;
5592 return 0;
5593}
5594
5417static void ata_host_stop(struct device *gendev, void *res) 5595static void ata_host_stop(struct device *gendev, void *res)
5418{ 5596{
5419 struct ata_host *host = dev_get_drvdata(gendev); 5597 struct ata_host *host = dev_get_drvdata(gendev);
@@ -5640,6 +5818,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5640 5818
5641 /* init sata_spd_limit to the current value */ 5819 /* init sata_spd_limit to the current value */
5642 sata_link_init_spd(&ap->link); 5820 sata_link_init_spd(&ap->link);
5821 if (ap->slave_link)
5822 sata_link_init_spd(ap->slave_link);
5643 5823
5644 /* print per-port info to dmesg */ 5824 /* print per-port info to dmesg */
5645 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 5825 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
@@ -6260,10 +6440,12 @@ EXPORT_SYMBOL_GPL(ata_base_port_ops);
6260EXPORT_SYMBOL_GPL(sata_port_ops); 6440EXPORT_SYMBOL_GPL(sata_port_ops);
6261EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6441EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6262EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6442EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6443EXPORT_SYMBOL_GPL(__ata_port_next_link);
6263EXPORT_SYMBOL_GPL(ata_std_bios_param); 6444EXPORT_SYMBOL_GPL(ata_std_bios_param);
6264EXPORT_SYMBOL_GPL(ata_host_init); 6445EXPORT_SYMBOL_GPL(ata_host_init);
6265EXPORT_SYMBOL_GPL(ata_host_alloc); 6446EXPORT_SYMBOL_GPL(ata_host_alloc);
6266EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6447EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6448EXPORT_SYMBOL_GPL(ata_slave_link_init);
6267EXPORT_SYMBOL_GPL(ata_host_start); 6449EXPORT_SYMBOL_GPL(ata_host_start);
6268EXPORT_SYMBOL_GPL(ata_host_register); 6450EXPORT_SYMBOL_GPL(ata_host_register);
6269EXPORT_SYMBOL_GPL(ata_host_activate); 6451EXPORT_SYMBOL_GPL(ata_host_activate);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c1db2f234d2e..a93247cc395a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/blkdev.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <scsi/scsi.h> 38#include <scsi/scsi.h>
38#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
@@ -79,6 +80,8 @@ enum {
79 */ 80 */
80 ATA_EH_PRERESET_TIMEOUT = 10000, 81 ATA_EH_PRERESET_TIMEOUT = 10000,
81 ATA_EH_FASTDRAIN_INTERVAL = 3000, 82 ATA_EH_FASTDRAIN_INTERVAL = 3000,
83
84 ATA_EH_UA_TRIES = 5,
82}; 85};
83 86
84/* The following table determines how we sequence resets. Each entry 87/* The following table determines how we sequence resets. Each entry
@@ -457,29 +460,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
457 * RETURNS: 460 * RETURNS:
458 * EH_HANDLED or EH_NOT_HANDLED 461 * EH_HANDLED or EH_NOT_HANDLED
459 */ 462 */
460enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 463enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
461{ 464{
462 struct Scsi_Host *host = cmd->device->host; 465 struct Scsi_Host *host = cmd->device->host;
463 struct ata_port *ap = ata_shost_to_port(host); 466 struct ata_port *ap = ata_shost_to_port(host);
464 unsigned long flags; 467 unsigned long flags;
465 struct ata_queued_cmd *qc; 468 struct ata_queued_cmd *qc;
466 enum scsi_eh_timer_return ret; 469 enum blk_eh_timer_return ret;
467 470
468 DPRINTK("ENTER\n"); 471 DPRINTK("ENTER\n");
469 472
470 if (ap->ops->error_handler) { 473 if (ap->ops->error_handler) {
471 ret = EH_NOT_HANDLED; 474 ret = BLK_EH_NOT_HANDLED;
472 goto out; 475 goto out;
473 } 476 }
474 477
475 ret = EH_HANDLED; 478 ret = BLK_EH_HANDLED;
476 spin_lock_irqsave(ap->lock, flags); 479 spin_lock_irqsave(ap->lock, flags);
477 qc = ata_qc_from_tag(ap, ap->link.active_tag); 480 qc = ata_qc_from_tag(ap, ap->link.active_tag);
478 if (qc) { 481 if (qc) {
479 WARN_ON(qc->scsicmd != cmd); 482 WARN_ON(qc->scsicmd != cmd);
480 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 483 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
481 qc->err_mask |= AC_ERR_TIMEOUT; 484 qc->err_mask |= AC_ERR_TIMEOUT;
482 ret = EH_NOT_HANDLED; 485 ret = BLK_EH_NOT_HANDLED;
483 } 486 }
484 spin_unlock_irqrestore(ap->lock, flags); 487 spin_unlock_irqrestore(ap->lock, flags);
485 488
@@ -831,7 +834,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
831 * Note that ATA_QCFLAG_FAILED is unconditionally set after 834 * Note that ATA_QCFLAG_FAILED is unconditionally set after
832 * this function completes. 835 * this function completes.
833 */ 836 */
834 scsi_req_abort_cmd(qc->scsicmd); 837 blk_abort_request(qc->scsicmd->request);
835} 838}
836 839
837/** 840/**
@@ -1357,6 +1360,37 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1357} 1360}
1358 1361
1359/** 1362/**
1363 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1364 * @dev: target ATAPI device
1365 * @r_sense_key: out parameter for sense_key
1366 *
1367 * Perform ATAPI TEST_UNIT_READY.
1368 *
1369 * LOCKING:
1370 * EH context (may sleep).
1371 *
1372 * RETURNS:
1373 * 0 on success, AC_ERR_* mask on failure.
1374 */
1375static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1376{
1377 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1378 struct ata_taskfile tf;
1379 unsigned int err_mask;
1380
1381 ata_tf_init(dev, &tf);
1382
1383 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1384 tf.command = ATA_CMD_PACKET;
1385 tf.protocol = ATAPI_PROT_NODATA;
1386
1387 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1388 if (err_mask == AC_ERR_DEV)
1389 *r_sense_key = tf.feature >> 4;
1390 return err_mask;
1391}
1392
1393/**
1360 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1394 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1361 * @dev: device to perform REQUEST_SENSE to 1395 * @dev: device to perform REQUEST_SENSE to
1362 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1396 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1756,7 +1790,7 @@ static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1756static unsigned int ata_eh_speed_down(struct ata_device *dev, 1790static unsigned int ata_eh_speed_down(struct ata_device *dev,
1757 unsigned int eflags, unsigned int err_mask) 1791 unsigned int eflags, unsigned int err_mask)
1758{ 1792{
1759 struct ata_link *link = dev->link; 1793 struct ata_link *link = ata_dev_phys_link(dev);
1760 int xfer_ok = 0; 1794 int xfer_ok = 0;
1761 unsigned int verdict; 1795 unsigned int verdict;
1762 unsigned int action = 0; 1796 unsigned int action = 0;
@@ -1880,7 +1914,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
1880 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1914 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1881 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1915 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1882 1916
1883 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) 1917 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1918 ata_dev_phys_link(qc->dev) != link)
1884 continue; 1919 continue;
1885 1920
1886 /* inherit upper level err_mask */ 1921 /* inherit upper level err_mask */
@@ -1967,6 +2002,23 @@ void ata_eh_autopsy(struct ata_port *ap)
1967 ata_port_for_each_link(link, ap) 2002 ata_port_for_each_link(link, ap)
1968 ata_eh_link_autopsy(link); 2003 ata_eh_link_autopsy(link);
1969 2004
2005 /* Handle the frigging slave link. Autopsy is done similarly
2006 * but actions and flags are transferred over to the master
2007 * link and handled from there.
2008 */
2009 if (ap->slave_link) {
2010 struct ata_eh_context *mehc = &ap->link.eh_context;
2011 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2012
2013 ata_eh_link_autopsy(ap->slave_link);
2014
2015 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2016 mehc->i.action |= sehc->i.action;
2017 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2018 mehc->i.flags |= sehc->i.flags;
2019 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2020 }
2021
1970 /* Autopsy of fanout ports can affect host link autopsy. 2022 /* Autopsy of fanout ports can affect host link autopsy.
1971 * Perform host link autopsy last. 2023 * Perform host link autopsy last.
1972 */ 2024 */
@@ -2001,7 +2053,8 @@ static void ata_eh_link_report(struct ata_link *link)
2001 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2053 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2002 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2054 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2003 2055
2004 if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || 2056 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2057 ata_dev_phys_link(qc->dev) != link ||
2005 ((qc->flags & ATA_QCFLAG_QUIET) && 2058 ((qc->flags & ATA_QCFLAG_QUIET) &&
2006 qc->err_mask == AC_ERR_DEV)) 2059 qc->err_mask == AC_ERR_DEV))
2007 continue; 2060 continue;
@@ -2068,7 +2121,7 @@ static void ata_eh_link_report(struct ata_link *link)
2068 char cdb_buf[70] = ""; 2121 char cdb_buf[70] = "";
2069 2122
2070 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2123 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2071 qc->dev->link != link || !qc->err_mask) 2124 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2072 continue; 2125 continue;
2073 2126
2074 if (qc->dma_dir != DMA_NONE) { 2127 if (qc->dma_dir != DMA_NONE) {
@@ -2160,12 +2213,14 @@ void ata_eh_report(struct ata_port *ap)
2160} 2213}
2161 2214
2162static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2215static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2163 unsigned int *classes, unsigned long deadline) 2216 unsigned int *classes, unsigned long deadline,
2217 bool clear_classes)
2164{ 2218{
2165 struct ata_device *dev; 2219 struct ata_device *dev;
2166 2220
2167 ata_link_for_each_dev(dev, link) 2221 if (clear_classes)
2168 classes[dev->devno] = ATA_DEV_UNKNOWN; 2222 ata_link_for_each_dev(dev, link)
2223 classes[dev->devno] = ATA_DEV_UNKNOWN;
2169 2224
2170 return reset(link, classes, deadline); 2225 return reset(link, classes, deadline);
2171} 2226}
@@ -2187,17 +2242,20 @@ int ata_eh_reset(struct ata_link *link, int classify,
2187 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2242 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2188{ 2243{
2189 struct ata_port *ap = link->ap; 2244 struct ata_port *ap = link->ap;
2245 struct ata_link *slave = ap->slave_link;
2190 struct ata_eh_context *ehc = &link->eh_context; 2246 struct ata_eh_context *ehc = &link->eh_context;
2247 struct ata_eh_context *sehc = &slave->eh_context;
2191 unsigned int *classes = ehc->classes; 2248 unsigned int *classes = ehc->classes;
2192 unsigned int lflags = link->flags; 2249 unsigned int lflags = link->flags;
2193 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2250 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2194 int max_tries = 0, try = 0; 2251 int max_tries = 0, try = 0;
2252 struct ata_link *failed_link;
2195 struct ata_device *dev; 2253 struct ata_device *dev;
2196 unsigned long deadline, now; 2254 unsigned long deadline, now;
2197 ata_reset_fn_t reset; 2255 ata_reset_fn_t reset;
2198 unsigned long flags; 2256 unsigned long flags;
2199 u32 sstatus; 2257 u32 sstatus;
2200 int nr_known, rc; 2258 int nr_unknown, rc;
2201 2259
2202 /* 2260 /*
2203 * Prepare to reset 2261 * Prepare to reset
@@ -2252,8 +2310,30 @@ int ata_eh_reset(struct ata_link *link, int classify,
2252 } 2310 }
2253 2311
2254 if (prereset) { 2312 if (prereset) {
2255 rc = prereset(link, 2313 unsigned long deadline = ata_deadline(jiffies,
2256 ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT)); 2314 ATA_EH_PRERESET_TIMEOUT);
2315
2316 if (slave) {
2317 sehc->i.action &= ~ATA_EH_RESET;
2318 sehc->i.action |= ehc->i.action;
2319 }
2320
2321 rc = prereset(link, deadline);
2322
2323 /* If present, do prereset on slave link too. Reset
2324 * is skipped iff both master and slave links report
2325 * -ENOENT or clear ATA_EH_RESET.
2326 */
2327 if (slave && (rc == 0 || rc == -ENOENT)) {
2328 int tmp;
2329
2330 tmp = prereset(slave, deadline);
2331 if (tmp != -ENOENT)
2332 rc = tmp;
2333
2334 ehc->i.action |= sehc->i.action;
2335 }
2336
2257 if (rc) { 2337 if (rc) {
2258 if (rc == -ENOENT) { 2338 if (rc == -ENOENT) {
2259 ata_link_printk(link, KERN_DEBUG, 2339 ata_link_printk(link, KERN_DEBUG,
@@ -2302,25 +2382,51 @@ int ata_eh_reset(struct ata_link *link, int classify,
2302 else 2382 else
2303 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2383 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2304 2384
2305 rc = ata_do_reset(link, reset, classes, deadline); 2385 rc = ata_do_reset(link, reset, classes, deadline, true);
2306 if (rc && rc != -EAGAIN) 2386 if (rc && rc != -EAGAIN) {
2387 failed_link = link;
2307 goto fail; 2388 goto fail;
2389 }
2390
2391 /* hardreset slave link if existent */
2392 if (slave && reset == hardreset) {
2393 int tmp;
2394
2395 if (verbose)
2396 ata_link_printk(slave, KERN_INFO,
2397 "hard resetting link\n");
2308 2398
2399 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2400 tmp = ata_do_reset(slave, reset, classes, deadline,
2401 false);
2402 switch (tmp) {
2403 case -EAGAIN:
2404 rc = -EAGAIN;
2405 case 0:
2406 break;
2407 default:
2408 failed_link = slave;
2409 rc = tmp;
2410 goto fail;
2411 }
2412 }
2413
2414 /* perform follow-up SRST if necessary */
2309 if (reset == hardreset && 2415 if (reset == hardreset &&
2310 ata_eh_followup_srst_needed(link, rc, classes)) { 2416 ata_eh_followup_srst_needed(link, rc, classes)) {
2311 /* okay, let's do follow-up softreset */
2312 reset = softreset; 2417 reset = softreset;
2313 2418
2314 if (!reset) { 2419 if (!reset) {
2315 ata_link_printk(link, KERN_ERR, 2420 ata_link_printk(link, KERN_ERR,
2316 "follow-up softreset required " 2421 "follow-up softreset required "
2317 "but no softreset avaliable\n"); 2422 "but no softreset avaliable\n");
2423 failed_link = link;
2318 rc = -EINVAL; 2424 rc = -EINVAL;
2319 goto fail; 2425 goto fail;
2320 } 2426 }
2321 2427
2322 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2428 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2323 rc = ata_do_reset(link, reset, classes, deadline); 2429 rc = ata_do_reset(link, reset, classes, deadline, true);
2324 } 2430 }
2325 } else { 2431 } else {
2326 if (verbose) 2432 if (verbose)
@@ -2341,7 +2447,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2341 dev->pio_mode = XFER_PIO_0; 2447 dev->pio_mode = XFER_PIO_0;
2342 dev->flags &= ~ATA_DFLAG_SLEEPING; 2448 dev->flags &= ~ATA_DFLAG_SLEEPING;
2343 2449
2344 if (ata_link_offline(link)) 2450 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2345 continue; 2451 continue;
2346 2452
2347 /* apply class override */ 2453 /* apply class override */
@@ -2354,6 +2460,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2354 /* record current link speed */ 2460 /* record current link speed */
2355 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2461 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2356 link->sata_spd = (sstatus >> 4) & 0xf; 2462 link->sata_spd = (sstatus >> 4) & 0xf;
2463 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2464 slave->sata_spd = (sstatus >> 4) & 0xf;
2357 2465
2358 /* thaw the port */ 2466 /* thaw the port */
2359 if (ata_is_host_link(link)) 2467 if (ata_is_host_link(link))
@@ -2366,12 +2474,17 @@ int ata_eh_reset(struct ata_link *link, int classify,
2366 * reset and here. This race is mediated by cross checking 2474 * reset and here. This race is mediated by cross checking
2367 * link onlineness and classification result later. 2475 * link onlineness and classification result later.
2368 */ 2476 */
2369 if (postreset) 2477 if (postreset) {
2370 postreset(link, classes); 2478 postreset(link, classes);
2479 if (slave)
2480 postreset(slave, classes);
2481 }
2371 2482
2372 /* clear cached SError */ 2483 /* clear cached SError */
2373 spin_lock_irqsave(link->ap->lock, flags); 2484 spin_lock_irqsave(link->ap->lock, flags);
2374 link->eh_info.serror = 0; 2485 link->eh_info.serror = 0;
2486 if (slave)
2487 slave->eh_info.serror = 0;
2375 spin_unlock_irqrestore(link->ap->lock, flags); 2488 spin_unlock_irqrestore(link->ap->lock, flags);
2376 2489
2377 /* Make sure onlineness and classification result correspond. 2490 /* Make sure onlineness and classification result correspond.
@@ -2381,19 +2494,21 @@ int ata_eh_reset(struct ata_link *link, int classify,
2381 * link onlineness and classification result, those conditions 2494 * link onlineness and classification result, those conditions
2382 * can be reliably detected and retried. 2495 * can be reliably detected and retried.
2383 */ 2496 */
2384 nr_known = 0; 2497 nr_unknown = 0;
2385 ata_link_for_each_dev(dev, link) { 2498 ata_link_for_each_dev(dev, link) {
2386 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2499 /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2387 if (classes[dev->devno] == ATA_DEV_UNKNOWN) 2500 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2388 classes[dev->devno] = ATA_DEV_NONE; 2501 classes[dev->devno] = ATA_DEV_NONE;
2389 else 2502 if (ata_phys_link_online(ata_dev_phys_link(dev)))
2390 nr_known++; 2503 nr_unknown++;
2504 }
2391 } 2505 }
2392 2506
2393 if (classify && !nr_known && ata_link_online(link)) { 2507 if (classify && nr_unknown) {
2394 if (try < max_tries) { 2508 if (try < max_tries) {
2395 ata_link_printk(link, KERN_WARNING, "link online but " 2509 ata_link_printk(link, KERN_WARNING, "link online but "
2396 "device misclassified, retrying\n"); 2510 "device misclassified, retrying\n");
2511 failed_link = link;
2397 rc = -EAGAIN; 2512 rc = -EAGAIN;
2398 goto fail; 2513 goto fail;
2399 } 2514 }
@@ -2404,6 +2519,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2404 2519
2405 /* reset successful, schedule revalidation */ 2520 /* reset successful, schedule revalidation */
2406 ata_eh_done(link, NULL, ATA_EH_RESET); 2521 ata_eh_done(link, NULL, ATA_EH_RESET);
2522 if (slave)
2523 ata_eh_done(slave, NULL, ATA_EH_RESET);
2407 ehc->last_reset = jiffies; 2524 ehc->last_reset = jiffies;
2408 ehc->i.action |= ATA_EH_REVALIDATE; 2525 ehc->i.action |= ATA_EH_REVALIDATE;
2409 2526
@@ -2411,6 +2528,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2411 out: 2528 out:
2412 /* clear hotplug flag */ 2529 /* clear hotplug flag */
2413 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2530 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2531 if (slave)
2532 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2414 2533
2415 spin_lock_irqsave(ap->lock, flags); 2534 spin_lock_irqsave(ap->lock, flags);
2416 ap->pflags &= ~ATA_PFLAG_RESETTING; 2535 ap->pflags &= ~ATA_PFLAG_RESETTING;
@@ -2431,7 +2550,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2431 if (time_before(now, deadline)) { 2550 if (time_before(now, deadline)) {
2432 unsigned long delta = deadline - now; 2551 unsigned long delta = deadline - now;
2433 2552
2434 ata_link_printk(link, KERN_WARNING, 2553 ata_link_printk(failed_link, KERN_WARNING,
2435 "reset failed (errno=%d), retrying in %u secs\n", 2554 "reset failed (errno=%d), retrying in %u secs\n",
2436 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2555 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2437 2556
@@ -2439,13 +2558,92 @@ int ata_eh_reset(struct ata_link *link, int classify,
2439 delta = schedule_timeout_uninterruptible(delta); 2558 delta = schedule_timeout_uninterruptible(delta);
2440 } 2559 }
2441 2560
2442 if (rc == -EPIPE || try == max_tries - 1) 2561 if (try == max_tries - 1) {
2443 sata_down_spd_limit(link); 2562 sata_down_spd_limit(link);
2563 if (slave)
2564 sata_down_spd_limit(slave);
2565 } else if (rc == -EPIPE)
2566 sata_down_spd_limit(failed_link);
2567
2444 if (hardreset) 2568 if (hardreset)
2445 reset = hardreset; 2569 reset = hardreset;
2446 goto retry; 2570 goto retry;
2447} 2571}
2448 2572
2573static inline void ata_eh_pull_park_action(struct ata_port *ap)
2574{
2575 struct ata_link *link;
2576 struct ata_device *dev;
2577 unsigned long flags;
2578
2579 /*
2580 * This function can be thought of as an extended version of
2581 * ata_eh_about_to_do() specially crafted to accommodate the
2582 * requirements of ATA_EH_PARK handling. Since the EH thread
2583 * does not leave the do {} while () loop in ata_eh_recover as
2584 * long as the timeout for a park request to *one* device on
2585 * the port has not expired, and since we still want to pick
2586 * up park requests to other devices on the same port or
2587 * timeout updates for the same device, we have to pull
2588 * ATA_EH_PARK actions from eh_info into eh_context.i
2589 * ourselves at the beginning of each pass over the loop.
2590 *
2591 * Additionally, all write accesses to &ap->park_req_pending
2592 * through INIT_COMPLETION() (see below) or complete_all()
2593 * (see ata_scsi_park_store()) are protected by the host lock.
2594 * As a result we have that park_req_pending.done is zero on
2595 * exit from this function, i.e. when ATA_EH_PARK actions for
2596 * *all* devices on port ap have been pulled into the
2597 * respective eh_context structs. If, and only if,
2598 * park_req_pending.done is non-zero by the time we reach
2599 * wait_for_completion_timeout(), another ATA_EH_PARK action
2600 * has been scheduled for at least one of the devices on port
2601 * ap and we have to cycle over the do {} while () loop in
2602 * ata_eh_recover() again.
2603 */
2604
2605 spin_lock_irqsave(ap->lock, flags);
2606 INIT_COMPLETION(ap->park_req_pending);
2607 ata_port_for_each_link(link, ap) {
2608 ata_link_for_each_dev(dev, link) {
2609 struct ata_eh_info *ehi = &link->eh_info;
2610
2611 link->eh_context.i.dev_action[dev->devno] |=
2612 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2613 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2614 }
2615 }
2616 spin_unlock_irqrestore(ap->lock, flags);
2617}
2618
2619static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2620{
2621 struct ata_eh_context *ehc = &dev->link->eh_context;
2622 struct ata_taskfile tf;
2623 unsigned int err_mask;
2624
2625 ata_tf_init(dev, &tf);
2626 if (park) {
2627 ehc->unloaded_mask |= 1 << dev->devno;
2628 tf.command = ATA_CMD_IDLEIMMEDIATE;
2629 tf.feature = 0x44;
2630 tf.lbal = 0x4c;
2631 tf.lbam = 0x4e;
2632 tf.lbah = 0x55;
2633 } else {
2634 ehc->unloaded_mask &= ~(1 << dev->devno);
2635 tf.command = ATA_CMD_CHK_POWER;
2636 }
2637
2638 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2639 tf.protocol |= ATA_PROT_NODATA;
2640 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2641 if (park && (err_mask || tf.lbal != 0xc4)) {
2642 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2643 ehc->unloaded_mask &= ~(1 << dev->devno);
2644 }
2645}
2646
2449static int ata_eh_revalidate_and_attach(struct ata_link *link, 2647static int ata_eh_revalidate_and_attach(struct ata_link *link,
2450 struct ata_device **r_failed_dev) 2648 struct ata_device **r_failed_dev)
2451{ 2649{
@@ -2472,7 +2670,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2472 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2670 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2473 WARN_ON(dev->class == ATA_DEV_PMP); 2671 WARN_ON(dev->class == ATA_DEV_PMP);
2474 2672
2475 if (ata_link_offline(link)) { 2673 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2476 rc = -EIO; 2674 rc = -EIO;
2477 goto err; 2675 goto err;
2478 } 2676 }
@@ -2610,6 +2808,53 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2610 return rc; 2808 return rc;
2611} 2809}
2612 2810
2811/**
2812 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
2813 * @dev: ATAPI device to clear UA for
2814 *
2815 * Resets and other operations can make an ATAPI device raise
2816 * UNIT ATTENTION which causes the next operation to fail. This
2817 * function clears UA.
2818 *
2819 * LOCKING:
2820 * EH context (may sleep).
2821 *
2822 * RETURNS:
2823 * 0 on success, -errno on failure.
2824 */
2825static int atapi_eh_clear_ua(struct ata_device *dev)
2826{
2827 int i;
2828
2829 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
2830 u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
2831 u8 sense_key = 0;
2832 unsigned int err_mask;
2833
2834 err_mask = atapi_eh_tur(dev, &sense_key);
2835 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
2836 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
2837 "failed (err_mask=0x%x)\n", err_mask);
2838 return -EIO;
2839 }
2840
2841 if (!err_mask || sense_key != UNIT_ATTENTION)
2842 return 0;
2843
2844 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
2845 if (err_mask) {
2846 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
2847 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
2848 return -EIO;
2849 }
2850 }
2851
2852 ata_dev_printk(dev, KERN_WARNING,
2853 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
2854
2855 return 0;
2856}
2857
2613static int ata_link_nr_enabled(struct ata_link *link) 2858static int ata_link_nr_enabled(struct ata_link *link)
2614{ 2859{
2615 struct ata_device *dev; 2860 struct ata_device *dev;
@@ -2697,7 +2942,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2697 /* This is the last chance, better to slow 2942 /* This is the last chance, better to slow
2698 * down than lose it. 2943 * down than lose it.
2699 */ 2944 */
2700 sata_down_spd_limit(dev->link); 2945 sata_down_spd_limit(ata_dev_phys_link(dev));
2701 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2946 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2702 } 2947 }
2703 } 2948 }
@@ -2707,7 +2952,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
2707 ata_dev_disable(dev); 2952 ata_dev_disable(dev);
2708 2953
2709 /* detach if offline */ 2954 /* detach if offline */
2710 if (ata_link_offline(dev->link)) 2955 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2711 ata_eh_detach_dev(dev); 2956 ata_eh_detach_dev(dev);
2712 2957
2713 /* schedule probe if necessary */ 2958 /* schedule probe if necessary */
@@ -2755,7 +3000,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2755 struct ata_device *dev; 3000 struct ata_device *dev;
2756 int nr_failed_devs; 3001 int nr_failed_devs;
2757 int rc; 3002 int rc;
2758 unsigned long flags; 3003 unsigned long flags, deadline;
2759 3004
2760 DPRINTK("ENTER\n"); 3005 DPRINTK("ENTER\n");
2761 3006
@@ -2829,6 +3074,56 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2829 } 3074 }
2830 } 3075 }
2831 3076
3077 do {
3078 unsigned long now;
3079
3080 /*
3081 * clears ATA_EH_PARK in eh_info and resets
3082 * ap->park_req_pending
3083 */
3084 ata_eh_pull_park_action(ap);
3085
3086 deadline = jiffies;
3087 ata_port_for_each_link(link, ap) {
3088 ata_link_for_each_dev(dev, link) {
3089 struct ata_eh_context *ehc = &link->eh_context;
3090 unsigned long tmp;
3091
3092 if (dev->class != ATA_DEV_ATA)
3093 continue;
3094 if (!(ehc->i.dev_action[dev->devno] &
3095 ATA_EH_PARK))
3096 continue;
3097 tmp = dev->unpark_deadline;
3098 if (time_before(deadline, tmp))
3099 deadline = tmp;
3100 else if (time_before_eq(tmp, jiffies))
3101 continue;
3102 if (ehc->unloaded_mask & (1 << dev->devno))
3103 continue;
3104
3105 ata_eh_park_issue_cmd(dev, 1);
3106 }
3107 }
3108
3109 now = jiffies;
3110 if (time_before_eq(deadline, now))
3111 break;
3112
3113 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3114 deadline - now);
3115 } while (deadline);
3116 ata_port_for_each_link(link, ap) {
3117 ata_link_for_each_dev(dev, link) {
3118 if (!(link->eh_context.unloaded_mask &
3119 (1 << dev->devno)))
3120 continue;
3121
3122 ata_eh_park_issue_cmd(dev, 0);
3123 ata_eh_done(link, dev, ATA_EH_PARK);
3124 }
3125 }
3126
2832 /* the rest */ 3127 /* the rest */
2833 ata_port_for_each_link(link, ap) { 3128 ata_port_for_each_link(link, ap) {
2834 struct ata_eh_context *ehc = &link->eh_context; 3129 struct ata_eh_context *ehc = &link->eh_context;
@@ -2852,6 +3147,20 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
2852 ehc->i.flags &= ~ATA_EHI_SETMODE; 3147 ehc->i.flags &= ~ATA_EHI_SETMODE;
2853 } 3148 }
2854 3149
3150 /* If reset has been issued, clear UA to avoid
3151 * disrupting the current users of the device.
3152 */
3153 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3154 ata_link_for_each_dev(dev, link) {
3155 if (dev->class != ATA_DEV_ATAPI)
3156 continue;
3157 rc = atapi_eh_clear_ua(dev);
3158 if (rc)
3159 goto dev_fail;
3160 }
3161 }
3162
3163 /* configure link power saving */
2855 if (ehc->i.action & ATA_EH_LPM) 3164 if (ehc->i.action & ATA_EH_LPM)
2856 ata_link_for_each_dev(dev, link) 3165 ata_link_for_each_dev(dev, link)
2857 ata_dev_enable_pm(dev, ap->pm_policy); 3166 ata_dev_enable_pm(dev, ap->pm_policy);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b9d3ba423cb2..59fe051957ef 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -183,6 +183,105 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
183 ata_scsi_lpm_show, ata_scsi_lpm_put); 183 ata_scsi_lpm_show, ata_scsi_lpm_put);
184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 184EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
185 185
186static ssize_t ata_scsi_park_show(struct device *device,
187 struct device_attribute *attr, char *buf)
188{
189 struct scsi_device *sdev = to_scsi_device(device);
190 struct ata_port *ap;
191 struct ata_link *link;
192 struct ata_device *dev;
193 unsigned long flags;
194 unsigned int uninitialized_var(msecs);
195 int rc = 0;
196
197 ap = ata_shost_to_port(sdev->host);
198
199 spin_lock_irqsave(ap->lock, flags);
200 dev = ata_scsi_find_dev(ap, sdev);
201 if (!dev) {
202 rc = -ENODEV;
203 goto unlock;
204 }
205 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
206 rc = -EOPNOTSUPP;
207 goto unlock;
208 }
209
210 link = dev->link;
211 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
212 link->eh_context.unloaded_mask & (1 << dev->devno) &&
213 time_after(dev->unpark_deadline, jiffies))
214 msecs = jiffies_to_msecs(dev->unpark_deadline - jiffies);
215 else
216 msecs = 0;
217
218unlock:
219 spin_unlock_irq(ap->lock);
220
221 return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
222}
223
224static ssize_t ata_scsi_park_store(struct device *device,
225 struct device_attribute *attr,
226 const char *buf, size_t len)
227{
228 struct scsi_device *sdev = to_scsi_device(device);
229 struct ata_port *ap;
230 struct ata_device *dev;
231 long int input;
232 unsigned long flags;
233 int rc;
234
235 rc = strict_strtol(buf, 10, &input);
236 if (rc || input < -2)
237 return -EINVAL;
238 if (input > ATA_TMOUT_MAX_PARK) {
239 rc = -EOVERFLOW;
240 input = ATA_TMOUT_MAX_PARK;
241 }
242
243 ap = ata_shost_to_port(sdev->host);
244
245 spin_lock_irqsave(ap->lock, flags);
246 dev = ata_scsi_find_dev(ap, sdev);
247 if (unlikely(!dev)) {
248 rc = -ENODEV;
249 goto unlock;
250 }
251 if (dev->class != ATA_DEV_ATA) {
252 rc = -EOPNOTSUPP;
253 goto unlock;
254 }
255
256 if (input >= 0) {
257 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
258 rc = -EOPNOTSUPP;
259 goto unlock;
260 }
261
262 dev->unpark_deadline = ata_deadline(jiffies, input);
263 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
264 ata_port_schedule_eh(ap);
265 complete(&ap->park_req_pending);
266 } else {
267 switch (input) {
268 case -1:
269 dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
270 break;
271 case -2:
272 dev->flags |= ATA_DFLAG_NO_UNLOAD;
273 break;
274 }
275 }
276unlock:
277 spin_unlock_irqrestore(ap->lock, flags);
278
279 return rc ? rc : len;
280}
281DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
282 ata_scsi_park_show, ata_scsi_park_store);
283EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
284
186static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 285static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
187{ 286{
188 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 287 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
@@ -269,6 +368,12 @@ DEVICE_ATTR(sw_activity, S_IWUGO | S_IRUGO, ata_scsi_activity_show,
269 ata_scsi_activity_store); 368 ata_scsi_activity_store);
270EXPORT_SYMBOL_GPL(dev_attr_sw_activity); 369EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
271 370
371struct device_attribute *ata_common_sdev_attrs[] = {
372 &dev_attr_unload_heads,
373 NULL
374};
375EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
376
272static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 377static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
273 void (*done)(struct scsi_cmnd *)) 378 void (*done)(struct scsi_cmnd *))
274{ 379{
@@ -954,6 +1059,9 @@ static int atapi_drain_needed(struct request *rq)
954static int ata_scsi_dev_config(struct scsi_device *sdev, 1059static int ata_scsi_dev_config(struct scsi_device *sdev,
955 struct ata_device *dev) 1060 struct ata_device *dev)
956{ 1061{
1062 if (!ata_id_has_unload(dev->id))
1063 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1064
957 /* configure max sectors */ 1065 /* configure max sectors */
958 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 1066 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
959 1067
@@ -977,6 +1085,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
977 1085
978 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); 1086 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
979 } else { 1087 } else {
1088 if (ata_id_is_ssd(dev->id))
1089 queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
1090 sdev->request_queue);
1091
980 /* ATA devices must be sector aligned */ 1092 /* ATA devices must be sector aligned */
981 blk_queue_update_dma_alignment(sdev->request_queue, 1093 blk_queue_update_dma_alignment(sdev->request_queue,
982 ATA_SECT_SIZE - 1); 1094 ATA_SECT_SIZE - 1);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index ade5c75b6144..fe2839e58774 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -70,6 +70,7 @@ extern int atapi_passthru16;
70extern int libata_fua; 70extern int libata_fua;
71extern int libata_noacpi; 71extern int libata_noacpi;
72extern int libata_allow_tpm; 72extern int libata_allow_tpm;
73extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
73extern void ata_force_cbl(struct ata_port *ap); 74extern void ata_force_cbl(struct ata_port *ap);
74extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); 75extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
75extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); 76extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
@@ -107,6 +108,8 @@ extern void ata_qc_issue(struct ata_queued_cmd *qc);
107extern void __ata_qc_complete(struct ata_queued_cmd *qc); 108extern void __ata_qc_complete(struct ata_queued_cmd *qc);
108extern int atapi_check_dma(struct ata_queued_cmd *qc); 109extern int atapi_check_dma(struct ata_queued_cmd *qc);
109extern void swap_buf_le16(u16 *buf, unsigned int buf_words); 110extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
111extern bool ata_phys_link_online(struct ata_link *link);
112extern bool ata_phys_link_offline(struct ata_link *link);
110extern void ata_dev_init(struct ata_device *dev); 113extern void ata_dev_init(struct ata_device *dev);
111extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); 114extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp);
112extern int sata_link_init_spd(struct ata_link *link); 115extern int sata_link_init_spd(struct ata_link *link);
@@ -152,7 +155,7 @@ extern int ata_bus_probe(struct ata_port *ap);
152/* libata-eh.c */ 155/* libata-eh.c */
153extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); 156extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
154extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); 157extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
155extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 158extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
156extern void ata_scsi_error(struct Scsi_Host *host); 159extern void ata_scsi_error(struct Scsi_Host *host);
157extern void ata_port_wait_eh(struct ata_port *ap); 160extern void ata_port_wait_eh(struct ata_port *ap);
158extern void ata_eh_fastdrain_timerfn(unsigned long arg); 161extern void ata_eh_fastdrain_timerfn(unsigned long arg);
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index d3932901a3b3..1266924c11f9 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1632,6 +1632,8 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
1632 return -ENODEV; 1632 return -ENODEV;
1633 } 1633 }
1634 1634
1635 dev_set_drvdata(&pdev->dev, host);
1636
1635 return 0; 1637 return 0;
1636} 1638}
1637 1639
@@ -1648,6 +1650,7 @@ static int __devexit bfin_atapi_remove(struct platform_device *pdev)
1648 struct ata_host *host = dev_get_drvdata(dev); 1650 struct ata_host *host = dev_get_drvdata(dev);
1649 1651
1650 ata_host_detach(host); 1652 ata_host_detach(host);
1653 dev_set_drvdata(&pdev->dev, NULL);
1651 1654
1652 peripheral_free_list(atapi_io_port); 1655 peripheral_free_list(atapi_io_port);
1653 1656
@@ -1655,27 +1658,44 @@ static int __devexit bfin_atapi_remove(struct platform_device *pdev)
1655} 1658}
1656 1659
1657#ifdef CONFIG_PM 1660#ifdef CONFIG_PM
1658int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state) 1661static int bfin_atapi_suspend(struct platform_device *pdev, pm_message_t state)
1659{ 1662{
1660 return 0; 1663 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1664 if (host)
1665 return ata_host_suspend(host, state);
1666 else
1667 return 0;
1661} 1668}
1662 1669
1663int bfin_atapi_resume(struct platform_device *pdev) 1670static int bfin_atapi_resume(struct platform_device *pdev)
1664{ 1671{
1672 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1673 int ret;
1674
1675 if (host) {
1676 ret = bfin_reset_controller(host);
1677 if (ret) {
1678 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
1679 return ret;
1680 }
1681 ata_host_resume(host);
1682 }
1683
1665 return 0; 1684 return 0;
1666} 1685}
1686#else
1687#define bfin_atapi_suspend NULL
1688#define bfin_atapi_resume NULL
1667#endif 1689#endif
1668 1690
1669static struct platform_driver bfin_atapi_driver = { 1691static struct platform_driver bfin_atapi_driver = {
1670 .probe = bfin_atapi_probe, 1692 .probe = bfin_atapi_probe,
1671 .remove = __devexit_p(bfin_atapi_remove), 1693 .remove = __devexit_p(bfin_atapi_remove),
1694 .suspend = bfin_atapi_suspend,
1695 .resume = bfin_atapi_resume,
1672 .driver = { 1696 .driver = {
1673 .name = DRV_NAME, 1697 .name = DRV_NAME,
1674 .owner = THIS_MODULE, 1698 .owner = THIS_MODULE,
1675#ifdef CONFIG_PM
1676 .suspend = bfin_atapi_suspend,
1677 .resume = bfin_atapi_resume,
1678#endif
1679 }, 1699 },
1680}; 1700};
1681 1701
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index e970b227fbce..a598bb36aafc 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -230,7 +230,7 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
230 tmpbyte & 1, tmpbyte & 0x30); 230 tmpbyte & 1, tmpbyte & 0x30);
231 231
232 *try_mmio = 0; 232 *try_mmio = 0;
233#ifdef CONFIG_PPC_MERGE 233#ifdef CONFIG_PPC
234 if (machine_is(cell)) 234 if (machine_is(cell))
235 *try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5); 235 *try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
236#endif 236#endif
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 3924e7209a44..1a56db92ff7a 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -469,10 +469,10 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
469 return true; 469 return true;
470} 470}
471 471
472static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in, 472static int sata_fsl_scr_write(struct ata_link *link,
473 u32 val) 473 unsigned int sc_reg_in, u32 val)
474{ 474{
475 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 475 struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
476 void __iomem *ssr_base = host_priv->ssr_base; 476 void __iomem *ssr_base = host_priv->ssr_base;
477 unsigned int sc_reg; 477 unsigned int sc_reg;
478 478
@@ -493,10 +493,10 @@ static int sata_fsl_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
493 return 0; 493 return 0;
494} 494}
495 495
496static int sata_fsl_scr_read(struct ata_port *ap, unsigned int sc_reg_in, 496static int sata_fsl_scr_read(struct ata_link *link,
497 u32 *val) 497 unsigned int sc_reg_in, u32 *val)
498{ 498{
499 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 499 struct sata_fsl_host_priv *host_priv = link->ap->host->private_data;
500 void __iomem *ssr_base = host_priv->ssr_base; 500 void __iomem *ssr_base = host_priv->ssr_base;
501 unsigned int sc_reg; 501 unsigned int sc_reg;
502 502
@@ -645,12 +645,12 @@ static int sata_fsl_port_start(struct ata_port *ap)
645 * Workaround for 8315DS board 3gbps link-up issue, 645 * Workaround for 8315DS board 3gbps link-up issue,
646 * currently limit SATA port to GEN1 speed 646 * currently limit SATA port to GEN1 speed
647 */ 647 */
648 sata_fsl_scr_read(ap, SCR_CONTROL, &temp); 648 sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
649 temp &= ~(0xF << 4); 649 temp &= ~(0xF << 4);
650 temp |= (0x1 << 4); 650 temp |= (0x1 << 4);
651 sata_fsl_scr_write(ap, SCR_CONTROL, temp); 651 sata_fsl_scr_write(&ap->link, SCR_CONTROL, temp);
652 652
653 sata_fsl_scr_read(ap, SCR_CONTROL, &temp); 653 sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
654 dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n", 654 dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n",
655 temp); 655 temp);
656#endif 656#endif
@@ -868,7 +868,7 @@ issue_srst:
868 ioread32(CQ + hcr_base), 868 ioread32(CQ + hcr_base),
869 ioread32(CA + hcr_base), ioread32(CC + hcr_base)); 869 ioread32(CA + hcr_base), ioread32(CC + hcr_base));
870 870
871 sata_fsl_scr_read(ap, SCR_ERROR, &Serror); 871 sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
872 872
873 DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); 873 DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
874 DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); 874 DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
@@ -972,9 +972,9 @@ static void sata_fsl_error_intr(struct ata_port *ap)
972 * Handle & Clear SError 972 * Handle & Clear SError
973 */ 973 */
974 974
975 sata_fsl_scr_read(ap, SCR_ERROR, &SError); 975 sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
976 if (unlikely(SError & 0xFFFF0000)) { 976 if (unlikely(SError & 0xFFFF0000)) {
977 sata_fsl_scr_write(ap, SCR_ERROR, SError); 977 sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
978 } 978 }
979 979
980 DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", 980 DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
@@ -1091,7 +1091,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1091 1091
1092 hstatus = ioread32(hcr_base + HSTATUS); 1092 hstatus = ioread32(hcr_base + HSTATUS);
1093 1093
1094 sata_fsl_scr_read(ap, SCR_ERROR, &SError); 1094 sata_fsl_scr_read(&ap->link, SCR_ERROR, &SError);
1095 1095
1096 if (unlikely(SError & 0xFFFF0000)) { 1096 if (unlikely(SError & 0xFFFF0000)) {
1097 DPRINTK("serror @host_intr : 0x%x\n", SError); 1097 DPRINTK("serror @host_intr : 0x%x\n", SError);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 5032c32fa505..fbbd87c96f10 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -269,9 +269,9 @@ static void inic_reset_port(void __iomem *port_base)
269 writeb(0xff, port_base + PORT_IRQ_STAT); 269 writeb(0xff, port_base + PORT_IRQ_STAT);
270} 270}
271 271
272static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 272static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
273{ 273{
274 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; 274 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
275 void __iomem *addr; 275 void __iomem *addr;
276 276
277 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 277 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
@@ -286,9 +286,9 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
286 return 0; 286 return 0;
287} 287}
288 288
289static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 289static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
290{ 290{
291 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; 291 void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR;
292 292
293 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 293 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
294 return -EINVAL; 294 return -EINVAL;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c815f8ecf6e6..2b24ae58b52e 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -493,10 +493,10 @@ struct mv_hw_ops {
493 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 493 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
494}; 494};
495 495
496static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 496static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
497static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 497static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
498static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); 498static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
499static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 499static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
500static int mv_port_start(struct ata_port *ap); 500static int mv_port_start(struct ata_port *ap);
501static void mv_port_stop(struct ata_port *ap); 501static void mv_port_stop(struct ata_port *ap);
502static int mv_qc_defer(struct ata_queued_cmd *qc); 502static int mv_qc_defer(struct ata_queued_cmd *qc);
@@ -1070,23 +1070,23 @@ static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1070 return ofs; 1070 return ofs;
1071} 1071}
1072 1072
1073static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 1073static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1074{ 1074{
1075 unsigned int ofs = mv_scr_offset(sc_reg_in); 1075 unsigned int ofs = mv_scr_offset(sc_reg_in);
1076 1076
1077 if (ofs != 0xffffffffU) { 1077 if (ofs != 0xffffffffU) {
1078 *val = readl(mv_ap_base(ap) + ofs); 1078 *val = readl(mv_ap_base(link->ap) + ofs);
1079 return 0; 1079 return 0;
1080 } else 1080 } else
1081 return -EINVAL; 1081 return -EINVAL;
1082} 1082}
1083 1083
1084static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1084static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1085{ 1085{
1086 unsigned int ofs = mv_scr_offset(sc_reg_in); 1086 unsigned int ofs = mv_scr_offset(sc_reg_in);
1087 1087
1088 if (ofs != 0xffffffffU) { 1088 if (ofs != 0xffffffffU) {
1089 writelfl(val, mv_ap_base(ap) + ofs); 1089 writelfl(val, mv_ap_base(link->ap) + ofs);
1090 return 0; 1090 return 0;
1091 } else 1091 } else
1092 return -EINVAL; 1092 return -EINVAL;
@@ -2251,11 +2251,11 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2251 return ofs; 2251 return ofs;
2252} 2252}
2253 2253
2254static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val) 2254static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2255{ 2255{
2256 struct mv_host_priv *hpriv = ap->host->private_data; 2256 struct mv_host_priv *hpriv = link->ap->host->private_data;
2257 void __iomem *mmio = hpriv->base; 2257 void __iomem *mmio = hpriv->base;
2258 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2258 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2259 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2259 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2260 2260
2261 if (ofs != 0xffffffffU) { 2261 if (ofs != 0xffffffffU) {
@@ -2265,11 +2265,11 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
2265 return -EINVAL; 2265 return -EINVAL;
2266} 2266}
2267 2267
2268static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 2268static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
2269{ 2269{
2270 struct mv_host_priv *hpriv = ap->host->private_data; 2270 struct mv_host_priv *hpriv = link->ap->host->private_data;
2271 void __iomem *mmio = hpriv->base; 2271 void __iomem *mmio = hpriv->base;
2272 void __iomem *addr = mv5_phy_base(mmio, ap->port_no); 2272 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2273 unsigned int ofs = mv5_scr_offset(sc_reg_in); 2273 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2274 2274
2275 if (ofs != 0xffffffffU) { 2275 if (ofs != 0xffffffffU) {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 14601dc05e41..fae3841de0d8 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -302,8 +302,8 @@ static void nv_ck804_host_stop(struct ata_host *host);
302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); 303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); 304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 307
308static void nv_nf2_freeze(struct ata_port *ap); 308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap); 309static void nv_nf2_thaw(struct ata_port *ap);
@@ -1511,21 +1511,21 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1511 return ret; 1511 return ret;
1512} 1512}
1513 1513
1514static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 1514static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1515{ 1515{
1516 if (sc_reg > SCR_CONTROL) 1516 if (sc_reg > SCR_CONTROL)
1517 return -EINVAL; 1517 return -EINVAL;
1518 1518
1519 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4)); 1519 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1520 return 0; 1520 return 0;
1521} 1521}
1522 1522
1523static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 1523static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1524{ 1524{
1525 if (sc_reg > SCR_CONTROL) 1525 if (sc_reg > SCR_CONTROL)
1526 return -EINVAL; 1526 return -EINVAL;
1527 1527
1528 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 1528 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1529 return 0; 1529 return 0;
1530} 1530}
1531 1531
@@ -2218,9 +2218,9 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2218 if (!pp->qc_active) 2218 if (!pp->qc_active)
2219 return; 2219 return;
2220 2220
2221 if (ap->ops->scr_read(ap, SCR_ERROR, &serror)) 2221 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2222 return; 2222 return;
2223 ap->ops->scr_write(ap, SCR_ERROR, serror); 2223 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2224 2224
2225 if (ata_stat & ATA_ERR) { 2225 if (ata_stat & ATA_ERR) {
2226 ata_ehi_clear_desc(ehi); 2226 ata_ehi_clear_desc(ehi);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 030665ba76b7..750d8cdc00cd 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -137,8 +137,8 @@ struct pdc_port_priv {
137 dma_addr_t pkt_dma; 137 dma_addr_t pkt_dma;
138}; 138};
139 139
140static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 140static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
141static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 141static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
142static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 142static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
143static int pdc_common_port_start(struct ata_port *ap); 143static int pdc_common_port_start(struct ata_port *ap);
144static int pdc_sata_port_start(struct ata_port *ap); 144static int pdc_sata_port_start(struct ata_port *ap);
@@ -386,19 +386,21 @@ static int pdc_sata_cable_detect(struct ata_port *ap)
386 return ATA_CBL_SATA; 386 return ATA_CBL_SATA;
387} 387}
388 388
389static int pdc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 389static int pdc_sata_scr_read(struct ata_link *link,
390 unsigned int sc_reg, u32 *val)
390{ 391{
391 if (sc_reg > SCR_CONTROL) 392 if (sc_reg > SCR_CONTROL)
392 return -EINVAL; 393 return -EINVAL;
393 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); 394 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
394 return 0; 395 return 0;
395} 396}
396 397
397static int pdc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 398static int pdc_sata_scr_write(struct ata_link *link,
399 unsigned int sc_reg, u32 val)
398{ 400{
399 if (sc_reg > SCR_CONTROL) 401 if (sc_reg > SCR_CONTROL)
400 return -EINVAL; 402 return -EINVAL;
401 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 403 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
402 return 0; 404 return 0;
403} 405}
404 406
@@ -731,7 +733,7 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
731 if (sata_scr_valid(&ap->link)) { 733 if (sata_scr_valid(&ap->link)) {
732 u32 serror; 734 u32 serror;
733 735
734 pdc_sata_scr_read(ap, SCR_ERROR, &serror); 736 pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
735 ehi->serror |= serror; 737 ehi->serror |= serror;
736 } 738 }
737 739
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 1600107047cf..a000c86ac859 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -111,8 +111,8 @@ struct qs_port_priv {
111 qs_state_t state; 111 qs_state_t state;
112}; 112};
113 113
114static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 114static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
115static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 115static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
116static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 116static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
117static int qs_port_start(struct ata_port *ap); 117static int qs_port_start(struct ata_port *ap);
118static void qs_host_stop(struct ata_host *host); 118static void qs_host_stop(struct ata_host *host);
@@ -242,11 +242,11 @@ static int qs_prereset(struct ata_link *link, unsigned long deadline)
242 return ata_sff_prereset(link, deadline); 242 return ata_sff_prereset(link, deadline);
243} 243}
244 244
245static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 245static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
246{ 246{
247 if (sc_reg > SCR_CONTROL) 247 if (sc_reg > SCR_CONTROL)
248 return -EINVAL; 248 return -EINVAL;
249 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 8)); 249 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
250 return 0; 250 return 0;
251} 251}
252 252
@@ -256,11 +256,11 @@ static void qs_error_handler(struct ata_port *ap)
256 ata_std_error_handler(ap); 256 ata_std_error_handler(ap);
257} 257}
258 258
259static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 259static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
260{ 260{
261 if (sc_reg > SCR_CONTROL) 261 if (sc_reg > SCR_CONTROL)
262 return -EINVAL; 262 return -EINVAL;
263 writel(val, ap->ioaddr.scr_addr + (sc_reg * 8)); 263 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
264 return 0; 264 return 0;
265} 265}
266 266
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 88bf4212590f..031d7b7dee34 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -115,8 +115,8 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
115static int sil_pci_device_resume(struct pci_dev *pdev); 115static int sil_pci_device_resume(struct pci_dev *pdev);
116#endif 116#endif
117static void sil_dev_config(struct ata_device *dev); 117static void sil_dev_config(struct ata_device *dev);
118static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 118static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
119static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 119static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
120static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 120static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
121static void sil_freeze(struct ata_port *ap); 121static void sil_freeze(struct ata_port *ap);
122static void sil_thaw(struct ata_port *ap); 122static void sil_thaw(struct ata_port *ap);
@@ -317,9 +317,9 @@ static inline void __iomem *sil_scr_addr(struct ata_port *ap,
317 return NULL; 317 return NULL;
318} 318}
319 319
320static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 320static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
321{ 321{
322 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 322 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
323 323
324 if (mmio) { 324 if (mmio) {
325 *val = readl(mmio); 325 *val = readl(mmio);
@@ -328,9 +328,9 @@ static int sil_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
328 return -EINVAL; 328 return -EINVAL;
329} 329}
330 330
331static int sil_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 331static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
332{ 332{
333 void __iomem *mmio = sil_scr_addr(ap, sc_reg); 333 void __iomem *mmio = sil_scr_addr(link->ap, sc_reg);
334 334
335 if (mmio) { 335 if (mmio) {
336 writel(val, mmio); 336 writel(val, mmio);
@@ -352,8 +352,8 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
352 * controllers continue to assert IRQ as long as 352 * controllers continue to assert IRQ as long as
353 * SError bits are pending. Clear SError immediately. 353 * SError bits are pending. Clear SError immediately.
354 */ 354 */
355 sil_scr_read(ap, SCR_ERROR, &serror); 355 sil_scr_read(&ap->link, SCR_ERROR, &serror);
356 sil_scr_write(ap, SCR_ERROR, serror); 356 sil_scr_write(&ap->link, SCR_ERROR, serror);
357 357
358 /* Sometimes spurious interrupts occur, double check 358 /* Sometimes spurious interrupts occur, double check
359 * it's PHYRDY CHG. 359 * it's PHYRDY CHG.
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 84ffcc26a74b..4621807a1a6a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -340,8 +340,8 @@ struct sil24_port_priv {
340}; 340};
341 341
342static void sil24_dev_config(struct ata_device *dev); 342static void sil24_dev_config(struct ata_device *dev);
343static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val); 343static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
344static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); 344static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
345static int sil24_qc_defer(struct ata_queued_cmd *qc); 345static int sil24_qc_defer(struct ata_queued_cmd *qc);
346static void sil24_qc_prep(struct ata_queued_cmd *qc); 346static void sil24_qc_prep(struct ata_queued_cmd *qc);
347static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 347static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
@@ -504,9 +504,9 @@ static int sil24_scr_map[] = {
504 [SCR_ACTIVE] = 3, 504 [SCR_ACTIVE] = 3,
505}; 505};
506 506
507static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 507static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val)
508{ 508{
509 void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL; 509 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
510 510
511 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 511 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
512 void __iomem *addr; 512 void __iomem *addr;
@@ -517,9 +517,9 @@ static int sil24_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val)
517 return -EINVAL; 517 return -EINVAL;
518} 518}
519 519
520static int sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 520static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val)
521{ 521{
522 void __iomem *scr_addr = sil24_port_base(ap) + PORT_SCONTROL; 522 void __iomem *scr_addr = sil24_port_base(link->ap) + PORT_SCONTROL;
523 523
524 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 524 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
525 void __iomem *addr; 525 void __iomem *addr;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 1010b3069bd5..9c43b4e7c4a6 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -64,8 +64,8 @@ enum {
64}; 64};
65 65
66static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 66static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
67static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 67static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
68static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 68static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
69 69
70static const struct pci_device_id sis_pci_tbl[] = { 70static const struct pci_device_id sis_pci_tbl[] = {
71 { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */ 71 { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
@@ -134,10 +134,11 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
134 return addr; 134 return addr;
135} 135}
136 136
137static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 137static u32 sis_scr_cfg_read(struct ata_link *link,
138 unsigned int sc_reg, u32 *val)
138{ 139{
139 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 140 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
140 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 141 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
141 u32 val2 = 0; 142 u32 val2 = 0;
142 u8 pmr; 143 u8 pmr;
143 144
@@ -158,10 +159,11 @@ static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
158 return 0; 159 return 0;
159} 160}
160 161
161static int sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 162static int sis_scr_cfg_write(struct ata_link *link,
163 unsigned int sc_reg, u32 val)
162{ 164{
163 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 165 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
164 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 166 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
165 u8 pmr; 167 u8 pmr;
166 168
167 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ 169 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@@ -178,8 +180,9 @@ static int sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
178 return 0; 180 return 0;
179} 181}
180 182
181static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 183static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
182{ 184{
185 struct ata_port *ap = link->ap;
183 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 186 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
184 u8 pmr; 187 u8 pmr;
185 188
@@ -187,7 +190,7 @@ static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
187 return -EINVAL; 190 return -EINVAL;
188 191
189 if (ap->flags & SIS_FLAG_CFGSCR) 192 if (ap->flags & SIS_FLAG_CFGSCR)
190 return sis_scr_cfg_read(ap, sc_reg, val); 193 return sis_scr_cfg_read(link, sc_reg, val);
191 194
192 pci_read_config_byte(pdev, SIS_PMR, &pmr); 195 pci_read_config_byte(pdev, SIS_PMR, &pmr);
193 196
@@ -202,8 +205,9 @@ static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
202 return 0; 205 return 0;
203} 206}
204 207
205static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 208static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
206{ 209{
210 struct ata_port *ap = link->ap;
207 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 211 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
208 u8 pmr; 212 u8 pmr;
209 213
@@ -213,7 +217,7 @@ static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
213 pci_read_config_byte(pdev, SIS_PMR, &pmr); 217 pci_read_config_byte(pdev, SIS_PMR, &pmr);
214 218
215 if (ap->flags & SIS_FLAG_CFGSCR) 219 if (ap->flags & SIS_FLAG_CFGSCR)
216 return sis_scr_cfg_write(ap, sc_reg, val); 220 return sis_scr_cfg_write(link, sc_reg, val);
217 else { 221 else {
218 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 222 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
219 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || 223 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index fb13b82aacba..609d147813ae 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -123,20 +123,22 @@ static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
123 } 123 }
124} 124}
125 125
126static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 126static int k2_sata_scr_read(struct ata_link *link,
127 unsigned int sc_reg, u32 *val)
127{ 128{
128 if (sc_reg > SCR_CONTROL) 129 if (sc_reg > SCR_CONTROL)
129 return -EINVAL; 130 return -EINVAL;
130 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); 131 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
131 return 0; 132 return 0;
132} 133}
133 134
134 135
135static int k2_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 136static int k2_sata_scr_write(struct ata_link *link,
137 unsigned int sc_reg, u32 val)
136{ 138{
137 if (sc_reg > SCR_CONTROL) 139 if (sc_reg > SCR_CONTROL)
138 return -EINVAL; 140 return -EINVAL;
139 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 141 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
140 return 0; 142 return 0;
141} 143}
142 144
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index db529b849948..019575bb3e08 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -57,8 +57,8 @@ struct uli_priv {
57}; 57};
58 58
59static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 59static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
60static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 60static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
61static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 61static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
62 62
63static const struct pci_device_id uli_pci_tbl[] = { 63static const struct pci_device_id uli_pci_tbl[] = {
64 { PCI_VDEVICE(AL, 0x5289), uli_5289 }, 64 { PCI_VDEVICE(AL, 0x5289), uli_5289 },
@@ -107,39 +107,39 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
107 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); 107 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
108} 108}
109 109
110static u32 uli_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg) 110static u32 uli_scr_cfg_read(struct ata_link *link, unsigned int sc_reg)
111{ 111{
112 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 112 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
113 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 113 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg);
114 u32 val; 114 u32 val;
115 115
116 pci_read_config_dword(pdev, cfg_addr, &val); 116 pci_read_config_dword(pdev, cfg_addr, &val);
117 return val; 117 return val;
118} 118}
119 119
120static void uli_scr_cfg_write(struct ata_port *ap, unsigned int scr, u32 val) 120static void uli_scr_cfg_write(struct ata_link *link, unsigned int scr, u32 val)
121{ 121{
122 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 122 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
123 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr); 123 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, scr);
124 124
125 pci_write_config_dword(pdev, cfg_addr, val); 125 pci_write_config_dword(pdev, cfg_addr, val);
126} 126}
127 127
128static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 128static int uli_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
129{ 129{
130 if (sc_reg > SCR_CONTROL) 130 if (sc_reg > SCR_CONTROL)
131 return -EINVAL; 131 return -EINVAL;
132 132
133 *val = uli_scr_cfg_read(ap, sc_reg); 133 *val = uli_scr_cfg_read(link, sc_reg);
134 return 0; 134 return 0;
135} 135}
136 136
137static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 137static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
138{ 138{
139 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0 139 if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
140 return -EINVAL; 140 return -EINVAL;
141 141
142 uli_scr_cfg_write(ap, sc_reg, val); 142 uli_scr_cfg_write(link, sc_reg, val);
143 return 0; 143 return 0;
144} 144}
145 145
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 96deeb354e16..1cfa74535d91 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -68,8 +68,8 @@ enum {
68}; 68};
69 69
70static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 70static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
71static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 71static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
72static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 72static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
73static void svia_noop_freeze(struct ata_port *ap); 73static void svia_noop_freeze(struct ata_port *ap);
74static int vt6420_prereset(struct ata_link *link, unsigned long deadline); 74static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
75static int vt6421_pata_cable_detect(struct ata_port *ap); 75static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -152,19 +152,19 @@ MODULE_LICENSE("GPL");
152MODULE_DEVICE_TABLE(pci, svia_pci_tbl); 152MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
153MODULE_VERSION(DRV_VERSION); 153MODULE_VERSION(DRV_VERSION);
154 154
155static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 155static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
156{ 156{
157 if (sc_reg > SCR_CONTROL) 157 if (sc_reg > SCR_CONTROL)
158 return -EINVAL; 158 return -EINVAL;
159 *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg)); 159 *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
160 return 0; 160 return 0;
161} 161}
162 162
163static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 163static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
164{ 164{
165 if (sc_reg > SCR_CONTROL) 165 if (sc_reg > SCR_CONTROL)
166 return -EINVAL; 166 return -EINVAL;
167 iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 167 iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
168 return 0; 168 return 0;
169} 169}
170 170
@@ -210,20 +210,20 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
210 goto skip_scr; 210 goto skip_scr;
211 211
212 /* Resume phy. This is the old SATA resume sequence */ 212 /* Resume phy. This is the old SATA resume sequence */
213 svia_scr_write(ap, SCR_CONTROL, 0x300); 213 svia_scr_write(link, SCR_CONTROL, 0x300);
214 svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */ 214 svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
215 215
216 /* wait for phy to become ready, if necessary */ 216 /* wait for phy to become ready, if necessary */
217 do { 217 do {
218 msleep(200); 218 msleep(200);
219 svia_scr_read(ap, SCR_STATUS, &sstatus); 219 svia_scr_read(link, SCR_STATUS, &sstatus);
220 if ((sstatus & 0xf) != 1) 220 if ((sstatus & 0xf) != 1)
221 break; 221 break;
222 } while (time_before(jiffies, timeout)); 222 } while (time_before(jiffies, timeout));
223 223
224 /* open code sata_print_link_status() */ 224 /* open code sata_print_link_status() */
225 svia_scr_read(ap, SCR_STATUS, &sstatus); 225 svia_scr_read(link, SCR_STATUS, &sstatus);
226 svia_scr_read(ap, SCR_CONTROL, &scontrol); 226 svia_scr_read(link, SCR_CONTROL, &scontrol);
227 227
228 online = (sstatus & 0xf) == 0x3; 228 online = (sstatus & 0xf) == 0x3;
229 229
@@ -232,7 +232,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
232 online ? "up" : "down", sstatus, scontrol); 232 online ? "up" : "down", sstatus, scontrol);
233 233
234 /* SStatus is read one more time */ 234 /* SStatus is read one more time */
235 svia_scr_read(ap, SCR_STATUS, &sstatus); 235 svia_scr_read(link, SCR_STATUS, &sstatus);
236 236
237 if (!online) { 237 if (!online) {
238 /* tell EH to bail */ 238 /* tell EH to bail */
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index f3d635c0a2e9..c57cdff9e6bd 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -98,20 +98,22 @@ enum {
98 VSC_SATA_INT_PHY_CHANGE), 98 VSC_SATA_INT_PHY_CHANGE),
99}; 99};
100 100
101static int vsc_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 101static int vsc_sata_scr_read(struct ata_link *link,
102 unsigned int sc_reg, u32 *val)
102{ 103{
103 if (sc_reg > SCR_CONTROL) 104 if (sc_reg > SCR_CONTROL)
104 return -EINVAL; 105 return -EINVAL;
105 *val = readl(ap->ioaddr.scr_addr + (sc_reg * 4)); 106 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
106 return 0; 107 return 0;
107} 108}
108 109
109 110
110static int vsc_sata_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val) 111static int vsc_sata_scr_write(struct ata_link *link,
112 unsigned int sc_reg, u32 val)
111{ 113{
112 if (sc_reg > SCR_CONTROL) 114 if (sc_reg > SCR_CONTROL)
113 return -EINVAL; 115 return -EINVAL;
114 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 116 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
115 return 0; 117 return 0;
116} 118}
117 119
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 31dc0cd84afa..0a5f055dffba 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -54,7 +54,7 @@ struct driver_private {
54 */ 54 */
55struct class_private { 55struct class_private {
56 struct kset class_subsys; 56 struct kset class_subsys;
57 struct list_head class_devices; 57 struct klist class_devices;
58 struct list_head class_interfaces; 58 struct list_head class_interfaces;
59 struct kset class_dirs; 59 struct kset class_dirs;
60 struct mutex class_mutex; 60 struct mutex class_mutex;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index cc5e28c8885c..eb85e4312301 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -135,6 +135,20 @@ static void remove_class_attrs(struct class *cls)
135 } 135 }
136} 136}
137 137
138static void klist_class_dev_get(struct klist_node *n)
139{
140 struct device *dev = container_of(n, struct device, knode_class);
141
142 get_device(dev);
143}
144
145static void klist_class_dev_put(struct klist_node *n)
146{
147 struct device *dev = container_of(n, struct device, knode_class);
148
149 put_device(dev);
150}
151
138int __class_register(struct class *cls, struct lock_class_key *key) 152int __class_register(struct class *cls, struct lock_class_key *key)
139{ 153{
140 struct class_private *cp; 154 struct class_private *cp;
@@ -145,7 +159,7 @@ int __class_register(struct class *cls, struct lock_class_key *key)
145 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 159 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
146 if (!cp) 160 if (!cp)
147 return -ENOMEM; 161 return -ENOMEM;
148 INIT_LIST_HEAD(&cp->class_devices); 162 klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
149 INIT_LIST_HEAD(&cp->class_interfaces); 163 INIT_LIST_HEAD(&cp->class_interfaces);
150 kset_init(&cp->class_dirs); 164 kset_init(&cp->class_dirs);
151 __mutex_init(&cp->class_mutex, "struct class mutex", key); 165 __mutex_init(&cp->class_mutex, "struct class mutex", key);
@@ -269,6 +283,71 @@ char *make_class_name(const char *name, struct kobject *kobj)
269#endif 283#endif
270 284
271/** 285/**
286 * class_dev_iter_init - initialize class device iterator
287 * @iter: class iterator to initialize
288 * @class: the class we wanna iterate over
289 * @start: the device to start iterating from, if any
290 * @type: device_type of the devices to iterate over, NULL for all
291 *
292 * Initialize class iterator @iter such that it iterates over devices
293 * of @class. If @start is set, the list iteration will start there,
294 * otherwise if it is NULL, the iteration starts at the beginning of
295 * the list.
296 */
297void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
298 struct device *start, const struct device_type *type)
299{
300 struct klist_node *start_knode = NULL;
301
302 if (start)
303 start_knode = &start->knode_class;
304 klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
305 iter->type = type;
306}
307EXPORT_SYMBOL_GPL(class_dev_iter_init);
308
309/**
310 * class_dev_iter_next - iterate to the next device
311 * @iter: class iterator to proceed
312 *
313 * Proceed @iter to the next device and return it. Returns NULL if
314 * iteration is complete.
315 *
316 * The returned device is referenced and won't be released till
317 * iterator is proceed to the next device or exited. The caller is
318 * free to do whatever it wants to do with the device including
319 * calling back into class code.
320 */
321struct device *class_dev_iter_next(struct class_dev_iter *iter)
322{
323 struct klist_node *knode;
324 struct device *dev;
325
326 while (1) {
327 knode = klist_next(&iter->ki);
328 if (!knode)
329 return NULL;
330 dev = container_of(knode, struct device, knode_class);
331 if (!iter->type || iter->type == dev->type)
332 return dev;
333 }
334}
335EXPORT_SYMBOL_GPL(class_dev_iter_next);
336
337/**
338 * class_dev_iter_exit - finish iteration
339 * @iter: class iterator to finish
340 *
341 * Finish an iteration. Always call this function after iteration is
342 * complete whether the iteration ran till the end or not.
343 */
344void class_dev_iter_exit(struct class_dev_iter *iter)
345{
346 klist_iter_exit(&iter->ki);
347}
348EXPORT_SYMBOL_GPL(class_dev_iter_exit);
349
350/**
272 * class_for_each_device - device iterator 351 * class_for_each_device - device iterator
273 * @class: the class we're iterating 352 * @class: the class we're iterating
274 * @start: the device to start with in the list, if any. 353 * @start: the device to start with in the list, if any.
@@ -283,13 +362,13 @@ char *make_class_name(const char *name, struct kobject *kobj)
283 * We check the return of @fn each time. If it returns anything 362 * We check the return of @fn each time. If it returns anything
284 * other than 0, we break out and return that value. 363 * other than 0, we break out and return that value.
285 * 364 *
286 * Note, we hold class->class_mutex in this function, so it can not be 365 * @fn is allowed to do anything including calling back into class
287 * re-acquired in @fn, otherwise it will self-deadlocking. For 366 * code. There's no locking restriction.
288 * example, calls to add or remove class members would be verboten.
289 */ 367 */
290int class_for_each_device(struct class *class, struct device *start, 368int class_for_each_device(struct class *class, struct device *start,
291 void *data, int (*fn)(struct device *, void *)) 369 void *data, int (*fn)(struct device *, void *))
292{ 370{
371 struct class_dev_iter iter;
293 struct device *dev; 372 struct device *dev;
294 int error = 0; 373 int error = 0;
295 374
@@ -301,20 +380,13 @@ int class_for_each_device(struct class *class, struct device *start,
301 return -EINVAL; 380 return -EINVAL;
302 } 381 }
303 382
304 mutex_lock(&class->p->class_mutex); 383 class_dev_iter_init(&iter, class, start, NULL);
305 list_for_each_entry(dev, &class->p->class_devices, node) { 384 while ((dev = class_dev_iter_next(&iter))) {
306 if (start) {
307 if (start == dev)
308 start = NULL;
309 continue;
310 }
311 dev = get_device(dev);
312 error = fn(dev, data); 385 error = fn(dev, data);
313 put_device(dev);
314 if (error) 386 if (error)
315 break; 387 break;
316 } 388 }
317 mutex_unlock(&class->p->class_mutex); 389 class_dev_iter_exit(&iter);
318 390
319 return error; 391 return error;
320} 392}
@@ -337,16 +409,15 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
337 * 409 *
338 * Note, you will need to drop the reference with put_device() after use. 410 * Note, you will need to drop the reference with put_device() after use.
339 * 411 *
340 * We hold class->class_mutex in this function, so it can not be 412 * @fn is allowed to do anything including calling back into class
341 * re-acquired in @match, otherwise it will self-deadlocking. For 413 * code. There's no locking restriction.
342 * example, calls to add or remove class members would be verboten.
343 */ 414 */
344struct device *class_find_device(struct class *class, struct device *start, 415struct device *class_find_device(struct class *class, struct device *start,
345 void *data, 416 void *data,
346 int (*match)(struct device *, void *)) 417 int (*match)(struct device *, void *))
347{ 418{
419 struct class_dev_iter iter;
348 struct device *dev; 420 struct device *dev;
349 int found = 0;
350 421
351 if (!class) 422 if (!class)
352 return NULL; 423 return NULL;
@@ -356,29 +427,23 @@ struct device *class_find_device(struct class *class, struct device *start,
356 return NULL; 427 return NULL;
357 } 428 }
358 429
359 mutex_lock(&class->p->class_mutex); 430 class_dev_iter_init(&iter, class, start, NULL);
360 list_for_each_entry(dev, &class->p->class_devices, node) { 431 while ((dev = class_dev_iter_next(&iter))) {
361 if (start) {
362 if (start == dev)
363 start = NULL;
364 continue;
365 }
366 dev = get_device(dev);
367 if (match(dev, data)) { 432 if (match(dev, data)) {
368 found = 1; 433 get_device(dev);
369 break; 434 break;
370 } else 435 }
371 put_device(dev);
372 } 436 }
373 mutex_unlock(&class->p->class_mutex); 437 class_dev_iter_exit(&iter);
374 438
375 return found ? dev : NULL; 439 return dev;
376} 440}
377EXPORT_SYMBOL_GPL(class_find_device); 441EXPORT_SYMBOL_GPL(class_find_device);
378 442
379int class_interface_register(struct class_interface *class_intf) 443int class_interface_register(struct class_interface *class_intf)
380{ 444{
381 struct class *parent; 445 struct class *parent;
446 struct class_dev_iter iter;
382 struct device *dev; 447 struct device *dev;
383 448
384 if (!class_intf || !class_intf->class) 449 if (!class_intf || !class_intf->class)
@@ -391,8 +456,10 @@ int class_interface_register(struct class_interface *class_intf)
391 mutex_lock(&parent->p->class_mutex); 456 mutex_lock(&parent->p->class_mutex);
392 list_add_tail(&class_intf->node, &parent->p->class_interfaces); 457 list_add_tail(&class_intf->node, &parent->p->class_interfaces);
393 if (class_intf->add_dev) { 458 if (class_intf->add_dev) {
394 list_for_each_entry(dev, &parent->p->class_devices, node) 459 class_dev_iter_init(&iter, parent, NULL, NULL);
460 while ((dev = class_dev_iter_next(&iter)))
395 class_intf->add_dev(dev, class_intf); 461 class_intf->add_dev(dev, class_intf);
462 class_dev_iter_exit(&iter);
396 } 463 }
397 mutex_unlock(&parent->p->class_mutex); 464 mutex_unlock(&parent->p->class_mutex);
398 465
@@ -402,6 +469,7 @@ int class_interface_register(struct class_interface *class_intf)
402void class_interface_unregister(struct class_interface *class_intf) 469void class_interface_unregister(struct class_interface *class_intf)
403{ 470{
404 struct class *parent = class_intf->class; 471 struct class *parent = class_intf->class;
472 struct class_dev_iter iter;
405 struct device *dev; 473 struct device *dev;
406 474
407 if (!parent) 475 if (!parent)
@@ -410,8 +478,10 @@ void class_interface_unregister(struct class_interface *class_intf)
410 mutex_lock(&parent->p->class_mutex); 478 mutex_lock(&parent->p->class_mutex);
411 list_del_init(&class_intf->node); 479 list_del_init(&class_intf->node);
412 if (class_intf->remove_dev) { 480 if (class_intf->remove_dev) {
413 list_for_each_entry(dev, &parent->p->class_devices, node) 481 class_dev_iter_init(&iter, parent, NULL, NULL);
482 while ((dev = class_dev_iter_next(&iter)))
414 class_intf->remove_dev(dev, class_intf); 483 class_intf->remove_dev(dev, class_intf);
484 class_dev_iter_exit(&iter);
415 } 485 }
416 mutex_unlock(&parent->p->class_mutex); 486 mutex_unlock(&parent->p->class_mutex);
417 487
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d021c98605b3..b98cb1416a2d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -536,7 +536,6 @@ void device_initialize(struct device *dev)
536 klist_init(&dev->klist_children, klist_children_get, 536 klist_init(&dev->klist_children, klist_children_get,
537 klist_children_put); 537 klist_children_put);
538 INIT_LIST_HEAD(&dev->dma_pools); 538 INIT_LIST_HEAD(&dev->dma_pools);
539 INIT_LIST_HEAD(&dev->node);
540 init_MUTEX(&dev->sem); 539 init_MUTEX(&dev->sem);
541 spin_lock_init(&dev->devres_lock); 540 spin_lock_init(&dev->devres_lock);
542 INIT_LIST_HEAD(&dev->devres_head); 541 INIT_LIST_HEAD(&dev->devres_head);
@@ -916,7 +915,8 @@ int device_add(struct device *dev)
916 if (dev->class) { 915 if (dev->class) {
917 mutex_lock(&dev->class->p->class_mutex); 916 mutex_lock(&dev->class->p->class_mutex);
918 /* tie the class to the device */ 917 /* tie the class to the device */
919 list_add_tail(&dev->node, &dev->class->p->class_devices); 918 klist_add_tail(&dev->knode_class,
919 &dev->class->p->class_devices);
920 920
921 /* notify any interfaces that the device is here */ 921 /* notify any interfaces that the device is here */
922 list_for_each_entry(class_intf, 922 list_for_each_entry(class_intf,
@@ -1032,7 +1032,7 @@ void device_del(struct device *dev)
1032 if (class_intf->remove_dev) 1032 if (class_intf->remove_dev)
1033 class_intf->remove_dev(dev, class_intf); 1033 class_intf->remove_dev(dev, class_intf);
1034 /* remove the device from the class list */ 1034 /* remove the device from the class list */
1035 list_del_init(&dev->node); 1035 klist_del(&dev->knode_class);
1036 mutex_unlock(&dev->class->p->class_mutex); 1036 mutex_unlock(&dev->class->p->class_mutex);
1037 } 1037 }
1038 device_remove_file(dev, &uevent_attr); 1038 device_remove_file(dev, &uevent_attr);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 0c39782b2660..aa69556c3485 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -109,12 +109,12 @@ static const struct attribute_group attr_group = {
109static int 109static int
110aoedisk_add_sysfs(struct aoedev *d) 110aoedisk_add_sysfs(struct aoedev *d)
111{ 111{
112 return sysfs_create_group(&d->gd->dev.kobj, &attr_group); 112 return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
113} 113}
114void 114void
115aoedisk_rm_sysfs(struct aoedev *d) 115aoedisk_rm_sysfs(struct aoedev *d)
116{ 116{
117 sysfs_remove_group(&d->gd->dev.kobj, &attr_group); 117 sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
118} 118}
119 119
120static int 120static int
@@ -276,7 +276,7 @@ aoeblk_gdalloc(void *vp)
276 gd->first_minor = d->sysminor * AOE_PARTITIONS; 276 gd->first_minor = d->sysminor * AOE_PARTITIONS;
277 gd->fops = &aoe_bdops; 277 gd->fops = &aoe_bdops;
278 gd->private_data = d; 278 gd->private_data = d;
279 gd->capacity = d->ssize; 279 set_capacity(gd, d->ssize);
280 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", 280 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
281 d->aoemajor, d->aoeminor); 281 d->aoemajor, d->aoeminor);
282 282
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f1746295d06..961d29a53cab 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -645,7 +645,7 @@ aoecmd_sleepwork(struct work_struct *work)
645 unsigned long flags; 645 unsigned long flags;
646 u64 ssize; 646 u64 ssize;
647 647
648 ssize = d->gd->capacity; 648 ssize = get_capacity(d->gd);
649 bd = bdget_disk(d->gd, 0); 649 bd = bdget_disk(d->gd, 0);
650 650
651 if (bd) { 651 if (bd) {
@@ -707,7 +707,7 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
707 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) 707 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
708 return; 708 return;
709 if (d->gd != NULL) { 709 if (d->gd != NULL) {
710 d->gd->capacity = ssize; 710 set_capacity(d->gd, ssize);
711 d->flags |= DEVFL_NEWSIZE; 711 d->flags |= DEVFL_NEWSIZE;
712 } else 712 } else
713 d->flags |= DEVFL_GDALLOC; 713 d->flags |= DEVFL_GDALLOC;
@@ -756,12 +756,17 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
756 unsigned long n_sect = bio->bi_size >> 9; 756 unsigned long n_sect = bio->bi_size >> 9;
757 const int rw = bio_data_dir(bio); 757 const int rw = bio_data_dir(bio);
758 struct hd_struct *part; 758 struct hd_struct *part;
759 int cpu;
759 760
760 part = get_part(disk, sector); 761 cpu = part_stat_lock();
761 all_stat_inc(disk, part, ios[rw], sector); 762 part = disk_map_sector_rcu(disk, sector);
762 all_stat_add(disk, part, ticks[rw], duration, sector); 763
763 all_stat_add(disk, part, sectors[rw], n_sect, sector); 764 part_stat_inc(cpu, part, ios[rw]);
764 all_stat_add(disk, part, io_ticks, duration, sector); 765 part_stat_add(cpu, part, ticks[rw], duration);
766 part_stat_add(cpu, part, sectors[rw], n_sect);
767 part_stat_add(cpu, part, io_ticks, duration);
768
769 part_stat_unlock();
765} 770}
766 771
767void 772void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index a1d813ab0d6b..6a8038d115b5 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -91,7 +91,7 @@ aoedev_downdev(struct aoedev *d)
91 } 91 }
92 92
93 if (d->gd) 93 if (d->gd)
94 d->gd->capacity = 0; 94 set_capacity(d->gd, 0);
95 95
96 d->flags &= ~DEVFL_UP; 96 d->flags &= ~DEVFL_UP;
97} 97}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b73116ef9236..1e1f9153000c 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3460,8 +3460,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3460 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); 3460 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3461 3461
3462 hba[i]->cmd_pool_bits = 3462 hba[i]->cmd_pool_bits =
3463 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG - 3463 kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3464 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); 3464 * sizeof(unsigned long), GFP_KERNEL);
3465 hba[i]->cmd_pool = (CommandList_struct *) 3465 hba[i]->cmd_pool = (CommandList_struct *)
3466 pci_alloc_consistent(hba[i]->pdev, 3466 pci_alloc_consistent(hba[i]->pdev,
3467 hba[i]->nr_cmds * sizeof(CommandList_struct), 3467 hba[i]->nr_cmds * sizeof(CommandList_struct),
@@ -3493,8 +3493,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3493 /* command and error info recs zeroed out before 3493 /* command and error info recs zeroed out before
3494 they are used */ 3494 they are used */
3495 memset(hba[i]->cmd_pool_bits, 0, 3495 memset(hba[i]->cmd_pool_bits, 0,
3496 ((hba[i]->nr_cmds + BITS_PER_LONG - 3496 DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
3497 1) / BITS_PER_LONG) * sizeof(unsigned long)); 3497 * sizeof(unsigned long));
3498 3498
3499 hba[i]->num_luns = 0; 3499 hba[i]->num_luns = 0;
3500 hba[i]->highest_lun = -1; 3500 hba[i]->highest_lun = -1;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e1233aabda77..a3fd87b41444 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -365,7 +365,7 @@ struct scsi2map {
365 365
366static int 366static int
367cciss_scsi_add_entry(int ctlr, int hostno, 367cciss_scsi_add_entry(int ctlr, int hostno,
368 unsigned char *scsi3addr, int devtype, 368 struct cciss_scsi_dev_t *device,
369 struct scsi2map *added, int *nadded) 369 struct scsi2map *added, int *nadded)
370{ 370{
371 /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 371 /* assumes hba[ctlr]->scsi_ctlr->lock is held */
@@ -384,12 +384,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
384 lun = 0; 384 lun = 0;
385 /* Is this device a non-zero lun of a multi-lun device */ 385 /* Is this device a non-zero lun of a multi-lun device */
386 /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */ 386 /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
387 if (scsi3addr[4] != 0) { 387 if (device->scsi3addr[4] != 0) {
388 /* Search through our list and find the device which */ 388 /* Search through our list and find the device which */
389 /* has the same 8 byte LUN address, excepting byte 4. */ 389 /* has the same 8 byte LUN address, excepting byte 4. */
390 /* Assign the same bus and target for this new LUN. */ 390 /* Assign the same bus and target for this new LUN. */
391 /* Use the logical unit number from the firmware. */ 391 /* Use the logical unit number from the firmware. */
392 memcpy(addr1, scsi3addr, 8); 392 memcpy(addr1, device->scsi3addr, 8);
393 addr1[4] = 0; 393 addr1[4] = 0;
394 for (i = 0; i < n; i++) { 394 for (i = 0; i < n; i++) {
395 sd = &ccissscsi[ctlr].dev[i]; 395 sd = &ccissscsi[ctlr].dev[i];
@@ -399,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
399 if (memcmp(addr1, addr2, 8) == 0) { 399 if (memcmp(addr1, addr2, 8) == 0) {
400 bus = sd->bus; 400 bus = sd->bus;
401 target = sd->target; 401 target = sd->target;
402 lun = scsi3addr[4]; 402 lun = device->scsi3addr[4];
403 break; 403 break;
404 } 404 }
405 } 405 }
@@ -420,8 +420,12 @@ cciss_scsi_add_entry(int ctlr, int hostno,
420 added[*nadded].lun = sd->lun; 420 added[*nadded].lun = sd->lun;
421 (*nadded)++; 421 (*nadded)++;
422 422
423 memcpy(&sd->scsi3addr[0], scsi3addr, 8); 423 memcpy(sd->scsi3addr, device->scsi3addr, 8);
424 sd->devtype = devtype; 424 memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
425 memcpy(sd->revision, device->revision, sizeof(sd->revision));
426 memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
427 sd->devtype = device->devtype;
428
425 ccissscsi[ctlr].ndevices++; 429 ccissscsi[ctlr].ndevices++;
426 430
427 /* initially, (before registering with scsi layer) we don't 431 /* initially, (before registering with scsi layer) we don't
@@ -487,6 +491,22 @@ static void fixup_botched_add(int ctlr, char *scsi3addr)
487 CPQ_TAPE_UNLOCK(ctlr, flags); 491 CPQ_TAPE_UNLOCK(ctlr, flags);
488} 492}
489 493
494static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
495 struct cciss_scsi_dev_t *dev2)
496{
497 return dev1->devtype == dev2->devtype &&
498 memcmp(dev1->scsi3addr, dev2->scsi3addr,
499 sizeof(dev1->scsi3addr)) == 0 &&
500 memcmp(dev1->device_id, dev2->device_id,
501 sizeof(dev1->device_id)) == 0 &&
502 memcmp(dev1->vendor, dev2->vendor,
503 sizeof(dev1->vendor)) == 0 &&
504 memcmp(dev1->model, dev2->model,
505 sizeof(dev1->model)) == 0 &&
506 memcmp(dev1->revision, dev2->revision,
507 sizeof(dev1->revision)) == 0;
508}
509
490static int 510static int
491adjust_cciss_scsi_table(int ctlr, int hostno, 511adjust_cciss_scsi_table(int ctlr, int hostno,
492 struct cciss_scsi_dev_t sd[], int nsds) 512 struct cciss_scsi_dev_t sd[], int nsds)
@@ -532,7 +552,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
532 for (j=0;j<nsds;j++) { 552 for (j=0;j<nsds;j++) {
533 if (SCSI3ADDR_EQ(sd[j].scsi3addr, 553 if (SCSI3ADDR_EQ(sd[j].scsi3addr,
534 csd->scsi3addr)) { 554 csd->scsi3addr)) {
535 if (sd[j].devtype == csd->devtype) 555 if (device_is_the_same(&sd[j], csd))
536 found=2; 556 found=2;
537 else 557 else
538 found=1; 558 found=1;
@@ -548,22 +568,26 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
548 cciss_scsi_remove_entry(ctlr, hostno, i, 568 cciss_scsi_remove_entry(ctlr, hostno, i,
549 removed, &nremoved); 569 removed, &nremoved);
550 /* remove ^^^, hence i not incremented */ 570 /* remove ^^^, hence i not incremented */
551 } 571 } else if (found == 1) { /* device is different in some way */
552 else if (found == 1) { /* device is different kind */
553 changes++; 572 changes++;
554 printk("cciss%d: device c%db%dt%dl%d type changed " 573 printk("cciss%d: device c%db%dt%dl%d has changed.\n",
555 "(device type now %s).\n", 574 ctlr, hostno, csd->bus, csd->target, csd->lun);
556 ctlr, hostno, csd->bus, csd->target, csd->lun,
557 scsi_device_type(csd->devtype));
558 cciss_scsi_remove_entry(ctlr, hostno, i, 575 cciss_scsi_remove_entry(ctlr, hostno, i,
559 removed, &nremoved); 576 removed, &nremoved);
560 /* remove ^^^, hence i not incremented */ 577 /* remove ^^^, hence i not incremented */
561 if (cciss_scsi_add_entry(ctlr, hostno, 578 if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
562 &sd[j].scsi3addr[0], sd[j].devtype,
563 added, &nadded) != 0) 579 added, &nadded) != 0)
564 /* we just removed one, so add can't fail. */ 580 /* we just removed one, so add can't fail. */
565 BUG(); 581 BUG();
566 csd->devtype = sd[j].devtype; 582 csd->devtype = sd[j].devtype;
583 memcpy(csd->device_id, sd[j].device_id,
584 sizeof(csd->device_id));
585 memcpy(csd->vendor, sd[j].vendor,
586 sizeof(csd->vendor));
587 memcpy(csd->model, sd[j].model,
588 sizeof(csd->model));
589 memcpy(csd->revision, sd[j].revision,
590 sizeof(csd->revision));
567 } else /* device is same as it ever was, */ 591 } else /* device is same as it ever was, */
568 i++; /* so just move along. */ 592 i++; /* so just move along. */
569 } 593 }
@@ -577,7 +601,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
577 csd = &ccissscsi[ctlr].dev[j]; 601 csd = &ccissscsi[ctlr].dev[j];
578 if (SCSI3ADDR_EQ(sd[i].scsi3addr, 602 if (SCSI3ADDR_EQ(sd[i].scsi3addr,
579 csd->scsi3addr)) { 603 csd->scsi3addr)) {
580 if (sd[i].devtype == csd->devtype) 604 if (device_is_the_same(&sd[i], csd))
581 found=2; /* found device */ 605 found=2; /* found device */
582 else 606 else
583 found=1; /* found a bug. */ 607 found=1; /* found a bug. */
@@ -586,16 +610,14 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
586 } 610 }
587 if (!found) { 611 if (!found) {
588 changes++; 612 changes++;
589 if (cciss_scsi_add_entry(ctlr, hostno, 613 if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
590
591 &sd[i].scsi3addr[0], sd[i].devtype,
592 added, &nadded) != 0) 614 added, &nadded) != 0)
593 break; 615 break;
594 } else if (found == 1) { 616 } else if (found == 1) {
595 /* should never happen... */ 617 /* should never happen... */
596 changes++; 618 changes++;
597 printk("cciss%d: device unexpectedly changed type\n", 619 printk(KERN_WARNING "cciss%d: device "
598 ctlr); 620 "unexpectedly changed\n", ctlr);
599 /* but if it does happen, we just ignore that device */ 621 /* but if it does happen, we just ignore that device */
600 } 622 }
601 } 623 }
@@ -1012,7 +1034,8 @@ cciss_scsi_interpret_error(CommandList_struct *cp)
1012 1034
1013static int 1035static int
1014cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 1036cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
1015 unsigned char *buf, unsigned char bufsize) 1037 unsigned char page, unsigned char *buf,
1038 unsigned char bufsize)
1016{ 1039{
1017 int rc; 1040 int rc;
1018 CommandList_struct *cp; 1041 CommandList_struct *cp;
@@ -1032,8 +1055,8 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
1032 ei = cp->err_info; 1055 ei = cp->err_info;
1033 1056
1034 cdb[0] = CISS_INQUIRY; 1057 cdb[0] = CISS_INQUIRY;
1035 cdb[1] = 0; 1058 cdb[1] = (page != 0);
1036 cdb[2] = 0; 1059 cdb[2] = page;
1037 cdb[3] = 0; 1060 cdb[3] = 0;
1038 cdb[4] = bufsize; 1061 cdb[4] = bufsize;
1039 cdb[5] = 0; 1062 cdb[5] = 0;
@@ -1053,6 +1076,25 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
1053 return rc; 1076 return rc;
1054} 1077}
1055 1078
1079/* Get the device id from inquiry page 0x83 */
1080static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
1081 unsigned char *device_id, int buflen)
1082{
1083 int rc;
1084 unsigned char *buf;
1085
1086 if (buflen > 16)
1087 buflen = 16;
1088 buf = kzalloc(64, GFP_KERNEL);
1089 if (!buf)
1090 return -1;
1091 rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
1092 if (rc == 0)
1093 memcpy(device_id, &buf[8], buflen);
1094 kfree(buf);
1095 return rc != 0;
1096}
1097
1056static int 1098static int
1057cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 1099cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
1058 ReportLunData_struct *buf, int bufsize) 1100 ReportLunData_struct *buf, int bufsize)
@@ -1142,25 +1184,21 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1142 ctlr_info_t *c; 1184 ctlr_info_t *c;
1143 __u32 num_luns=0; 1185 __u32 num_luns=0;
1144 unsigned char *ch; 1186 unsigned char *ch;
1145 /* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */ 1187 struct cciss_scsi_dev_t *currentsd, *this_device;
1146 struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
1147 int ncurrent=0; 1188 int ncurrent=0;
1148 int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; 1189 int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
1149 int i; 1190 int i;
1150 1191
1151 c = (ctlr_info_t *) hba[cntl_num]; 1192 c = (ctlr_info_t *) hba[cntl_num];
1152 ld_buff = kzalloc(reportlunsize, GFP_KERNEL); 1193 ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
1153 if (ld_buff == NULL) {
1154 printk(KERN_ERR "cciss: out of memory\n");
1155 return;
1156 }
1157 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1194 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1158 if (inq_buff == NULL) { 1195 currentsd = kzalloc(sizeof(*currentsd) *
1159 printk(KERN_ERR "cciss: out of memory\n"); 1196 (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
1160 kfree(ld_buff); 1197 if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
1161 return; 1198 printk(KERN_ERR "cciss: out of memory\n");
1199 goto out;
1162 } 1200 }
1163 1201 this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
1164 if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) { 1202 if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
1165 ch = &ld_buff->LUNListLength[0]; 1203 ch = &ld_buff->LUNListLength[0];
1166 num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; 1204 num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
@@ -1179,23 +1217,34 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1179 1217
1180 1218
1181 /* adjust our table of devices */ 1219 /* adjust our table of devices */
1182 for(i=0; i<num_luns; i++) 1220 for (i = 0; i < num_luns; i++) {
1183 {
1184 int devtype;
1185
1186 /* for each physical lun, do an inquiry */ 1221 /* for each physical lun, do an inquiry */
1187 if (ld_buff->LUN[i][3] & 0xC0) continue; 1222 if (ld_buff->LUN[i][3] & 0xC0) continue;
1188 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); 1223 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
1189 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); 1224 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
1190 1225
1191 if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff, 1226 if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
1192 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 1227 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
1193 /* Inquiry failed (msg printed already) */ 1228 /* Inquiry failed (msg printed already) */
1194 devtype = 0; /* so we will skip this device. */ 1229 continue; /* so we will skip this device. */
1195 } else /* what kind of device is this? */ 1230
1196 devtype = (inq_buff[0] & 0x1f); 1231 this_device->devtype = (inq_buff[0] & 0x1f);
1197 1232 this_device->bus = -1;
1198 switch (devtype) 1233 this_device->target = -1;
1234 this_device->lun = -1;
1235 memcpy(this_device->scsi3addr, scsi3addr, 8);
1236 memcpy(this_device->vendor, &inq_buff[8],
1237 sizeof(this_device->vendor));
1238 memcpy(this_device->model, &inq_buff[16],
1239 sizeof(this_device->model));
1240 memcpy(this_device->revision, &inq_buff[32],
1241 sizeof(this_device->revision));
1242 memset(this_device->device_id, 0,
1243 sizeof(this_device->device_id));
1244 cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
1245 this_device->device_id, sizeof(this_device->device_id));
1246
1247 switch (this_device->devtype)
1199 { 1248 {
1200 case 0x05: /* CD-ROM */ { 1249 case 0x05: /* CD-ROM */ {
1201 1250
@@ -1220,15 +1269,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1220 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 1269 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
1221 printk(KERN_INFO "cciss%d: %s ignored, " 1270 printk(KERN_INFO "cciss%d: %s ignored, "
1222 "too many devices.\n", cntl_num, 1271 "too many devices.\n", cntl_num,
1223 scsi_device_type(devtype)); 1272 scsi_device_type(this_device->devtype));
1224 break; 1273 break;
1225 } 1274 }
1226 memcpy(&currentsd[ncurrent].scsi3addr[0], 1275 currentsd[ncurrent] = *this_device;
1227 &scsi3addr[0], 8);
1228 currentsd[ncurrent].devtype = devtype;
1229 currentsd[ncurrent].bus = -1;
1230 currentsd[ncurrent].target = -1;
1231 currentsd[ncurrent].lun = -1;
1232 ncurrent++; 1276 ncurrent++;
1233 break; 1277 break;
1234 default: 1278 default:
@@ -1240,6 +1284,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1240out: 1284out:
1241 kfree(inq_buff); 1285 kfree(inq_buff);
1242 kfree(ld_buff); 1286 kfree(ld_buff);
1287 kfree(currentsd);
1243 return; 1288 return;
1244} 1289}
1245 1290
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
index d9c2c586502f..7b750245ae76 100644
--- a/drivers/block/cciss_scsi.h
+++ b/drivers/block/cciss_scsi.h
@@ -66,6 +66,10 @@ struct cciss_scsi_dev_t {
66 int devtype; 66 int devtype;
67 int bus, target, lun; /* as presented to the OS */ 67 int bus, target, lun; /* as presented to the OS */
68 unsigned char scsi3addr[8]; /* as presented to the HW */ 68 unsigned char scsi3addr[8]; /* as presented to the HW */
69 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
70 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
71 unsigned char model[16]; /* bytes 16-31 of inquiry data */
72 unsigned char revision[4]; /* bytes 32-35 of inquiry data */
69}; 73};
70 74
71struct cciss_scsi_hba_t { 75struct cciss_scsi_hba_t {
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 09c14341e6e3..3d967525e9a9 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -424,7 +424,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
424 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t), 424 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
425 &(hba[i]->cmd_pool_dhandle)); 425 &(hba[i]->cmd_pool_dhandle));
426 hba[i]->cmd_pool_bits = kcalloc( 426 hba[i]->cmd_pool_bits = kcalloc(
427 (NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG, sizeof(unsigned long), 427 DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
428 GFP_KERNEL); 428 GFP_KERNEL);
429 429
430 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool) 430 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 395f8ea7981c..cf64ddf5d839 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -423,8 +423,15 @@ static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
423 * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical 423 * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
424 * side 0 is on physical side 0 (but with the misnamed sector IDs). 424 * side 0 is on physical side 0 (but with the misnamed sector IDs).
425 * 'stretch' should probably be renamed to something more general, like 425 * 'stretch' should probably be renamed to something more general, like
426 * 'options'. Other parameters should be self-explanatory (see also 426 * 'options'.
427 * setfdprm(8)). 427 *
428 * Bits 2 through 9 of 'stretch' tell the number of the first sector.
429 * The LSB (bit 2) is flipped. For most disks, the first sector
430 * is 1 (represented by 0x00<<2). For some CP/M and music sampler
431 * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
432 * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
433 *
434 * Other parameters should be self-explanatory (see also setfdprm(8)).
428 */ 435 */
429/* 436/*
430 Size 437 Size
@@ -1355,20 +1362,20 @@ static void fdc_specify(void)
1355 } 1362 }
1356 1363
1357 /* Convert step rate from microseconds to milliseconds and 4 bits */ 1364 /* Convert step rate from microseconds to milliseconds and 4 bits */
1358 srt = 16 - (DP->srt * scale_dtr / 1000 + NOMINAL_DTR - 1) / NOMINAL_DTR; 1365 srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
1359 if (slow_floppy) { 1366 if (slow_floppy) {
1360 srt = srt / 4; 1367 srt = srt / 4;
1361 } 1368 }
1362 SUPBOUND(srt, 0xf); 1369 SUPBOUND(srt, 0xf);
1363 INFBOUND(srt, 0); 1370 INFBOUND(srt, 0);
1364 1371
1365 hlt = (DP->hlt * scale_dtr / 2 + NOMINAL_DTR - 1) / NOMINAL_DTR; 1372 hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
1366 if (hlt < 0x01) 1373 if (hlt < 0x01)
1367 hlt = 0x01; 1374 hlt = 0x01;
1368 else if (hlt > 0x7f) 1375 else if (hlt > 0x7f)
1369 hlt = hlt_max_code; 1376 hlt = hlt_max_code;
1370 1377
1371 hut = (DP->hut * scale_dtr / 16 + NOMINAL_DTR - 1) / NOMINAL_DTR; 1378 hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
1372 if (hut < 0x1) 1379 if (hut < 0x1)
1373 hut = 0x1; 1380 hut = 0x1;
1374 else if (hut > 0xf) 1381 else if (hut > 0xf)
@@ -2236,9 +2243,9 @@ static void setup_format_params(int track)
2236 } 2243 }
2237 } 2244 }
2238 } 2245 }
2239 if (_floppy->stretch & FD_ZEROBASED) { 2246 if (_floppy->stretch & FD_SECTBASEMASK) {
2240 for (count = 0; count < F_SECT_PER_TRACK; count++) 2247 for (count = 0; count < F_SECT_PER_TRACK; count++)
2241 here[count].sect--; 2248 here[count].sect += FD_SECTBASE(_floppy) - 1;
2242 } 2249 }
2243} 2250}
2244 2251
@@ -2385,7 +2392,7 @@ static void rw_interrupt(void)
2385 2392
2386#ifdef FLOPPY_SANITY_CHECK 2393#ifdef FLOPPY_SANITY_CHECK
2387 if (nr_sectors / ssize > 2394 if (nr_sectors / ssize >
2388 (in_sector_offset + current_count_sectors + ssize - 1) / ssize) { 2395 DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
2389 DPRINT("long rw: %x instead of %lx\n", 2396 DPRINT("long rw: %x instead of %lx\n",
2390 nr_sectors, current_count_sectors); 2397 nr_sectors, current_count_sectors);
2391 printk("rs=%d s=%d\n", R_SECTOR, SECTOR); 2398 printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
@@ -2649,7 +2656,7 @@ static int make_raw_rw_request(void)
2649 } 2656 }
2650 HEAD = fsector_t / _floppy->sect; 2657 HEAD = fsector_t / _floppy->sect;
2651 2658
2652 if (((_floppy->stretch & (FD_SWAPSIDES | FD_ZEROBASED)) || 2659 if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
2653 TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect) 2660 TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect)
2654 max_sector = _floppy->sect; 2661 max_sector = _floppy->sect;
2655 2662
@@ -2679,7 +2686,7 @@ static int make_raw_rw_request(void)
2679 CODE2SIZE; 2686 CODE2SIZE;
2680 SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE; 2687 SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
2681 SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) + 2688 SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
2682 ((_floppy->stretch & FD_ZEROBASED) ? 0 : 1); 2689 FD_SECTBASE(_floppy);
2683 2690
2684 /* tracksize describes the size which can be filled up with sectors 2691 /* tracksize describes the size which can be filled up with sectors
2685 * of size ssize. 2692 * of size ssize.
@@ -3311,7 +3318,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3311 g->head <= 0 || 3318 g->head <= 0 ||
3312 g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) || 3319 g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
3313 /* check if reserved bits are set */ 3320 /* check if reserved bits are set */
3314 (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_ZEROBASED)) != 0) 3321 (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
3315 return -EINVAL; 3322 return -EINVAL;
3316 if (type) { 3323 if (type) {
3317 if (!capable(CAP_SYS_ADMIN)) 3324 if (!capable(CAP_SYS_ADMIN))
@@ -3356,7 +3363,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3356 if (DRS->maxblock > user_params[drive].sect || 3363 if (DRS->maxblock > user_params[drive].sect ||
3357 DRS->maxtrack || 3364 DRS->maxtrack ||
3358 ((user_params[drive].sect ^ oldStretch) & 3365 ((user_params[drive].sect ^ oldStretch) &
3359 (FD_SWAPSIDES | FD_ZEROBASED))) 3366 (FD_SWAPSIDES | FD_SECTBASEMASK)))
3360 invalidate_drive(bdev); 3367 invalidate_drive(bdev);
3361 else 3368 else
3362 process_fd_request(); 3369 process_fd_request();
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1778e4a2c672..7b3351260d56 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -403,7 +403,7 @@ static int nbd_do_it(struct nbd_device *lo)
403 BUG_ON(lo->magic != LO_MAGIC); 403 BUG_ON(lo->magic != LO_MAGIC);
404 404
405 lo->pid = current->pid; 405 lo->pid = current->pid;
406 ret = sysfs_create_file(&lo->disk->dev.kobj, &pid_attr.attr); 406 ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
407 if (ret) { 407 if (ret) {
408 printk(KERN_ERR "nbd: sysfs_create_file failed!"); 408 printk(KERN_ERR "nbd: sysfs_create_file failed!");
409 return ret; 409 return ret;
@@ -412,7 +412,7 @@ static int nbd_do_it(struct nbd_device *lo)
412 while ((req = nbd_read_stat(lo)) != NULL) 412 while ((req = nbd_read_stat(lo)) != NULL)
413 nbd_end_request(req); 413 nbd_end_request(req);
414 414
415 sysfs_remove_file(&lo->disk->dev.kobj, &pid_attr.attr); 415 sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
416 return 0; 416 return 0;
417} 417}
418 418
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 29b7a648cc6e..0e077150568b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2544,7 +2544,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2544 if (last_zone != zone) { 2544 if (last_zone != zone) {
2545 BUG_ON(last_zone != zone + pd->settings.size); 2545 BUG_ON(last_zone != zone + pd->settings.size);
2546 first_sectors = last_zone - bio->bi_sector; 2546 first_sectors = last_zone - bio->bi_sector;
2547 bp = bio_split(bio, bio_split_pool, first_sectors); 2547 bp = bio_split(bio, first_sectors);
2548 BUG_ON(!bp); 2548 BUG_ON(!bp);
2549 pkt_make_request(q, &bp->bio1); 2549 pkt_make_request(q, &bp->bio1);
2550 pkt_make_request(q, &bp->bio2); 2550 pkt_make_request(q, &bp->bio2);
@@ -2911,7 +2911,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2911 if (!disk->queue) 2911 if (!disk->queue)
2912 goto out_mem2; 2912 goto out_mem2;
2913 2913
2914 pd->pkt_dev = MKDEV(disk->major, disk->first_minor); 2914 pd->pkt_dev = MKDEV(pktdev_major, idx);
2915 ret = pkt_new_dev(pd, dev); 2915 ret = pkt_new_dev(pd, dev);
2916 if (ret) 2916 if (ret)
2917 goto out_new_dev; 2917 goto out_new_dev;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d797e209951d..936466f62afd 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -199,7 +199,8 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
199 if (blk_fs_request(req)) { 199 if (blk_fs_request(req)) {
200 if (ps3disk_submit_request_sg(dev, req)) 200 if (ps3disk_submit_request_sg(dev, req))
201 break; 201 break;
202 } else if (req->cmd_type == REQ_TYPE_FLUSH) { 202 } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
203 req->cmd[0] == REQ_LB_OP_FLUSH) {
203 if (ps3disk_submit_flush_request(dev, req)) 204 if (ps3disk_submit_flush_request(dev, req))
204 break; 205 break;
205 } else { 206 } else {
@@ -257,7 +258,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
257 return IRQ_HANDLED; 258 return IRQ_HANDLED;
258 } 259 }
259 260
260 if (req->cmd_type == REQ_TYPE_FLUSH) { 261 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
262 req->cmd[0] == REQ_LB_OP_FLUSH) {
261 read = 0; 263 read = 0;
262 num_sectors = req->hard_cur_sectors; 264 num_sectors = req->hard_cur_sectors;
263 op = "flush"; 265 op = "flush";
@@ -405,7 +407,8 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
405 407
406 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 408 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
407 409
408 req->cmd_type = REQ_TYPE_FLUSH; 410 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
411 req->cmd[0] = REQ_LB_OP_FLUSH;
409} 412}
410 413
411static unsigned long ps3disk_mask; 414static unsigned long ps3disk_mask;
@@ -538,7 +541,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
538 struct ps3disk_private *priv = dev->sbd.core.driver_data; 541 struct ps3disk_private *priv = dev->sbd.core.driver_data;
539 542
540 mutex_lock(&ps3disk_mask_mutex); 543 mutex_lock(&ps3disk_mask_mutex);
541 __clear_bit(priv->gendisk->first_minor / PS3DISK_MINORS, 544 __clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS,
542 &ps3disk_mask); 545 &ps3disk_mask);
543 mutex_unlock(&ps3disk_mask_mutex); 546 mutex_unlock(&ps3disk_mask_mutex);
544 del_gendisk(priv->gendisk); 547 del_gendisk(priv->gendisk);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 42251095134f..6ec5fc052786 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -47,20 +47,20 @@ static void blk_done(struct virtqueue *vq)
47 47
48 spin_lock_irqsave(&vblk->lock, flags); 48 spin_lock_irqsave(&vblk->lock, flags);
49 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 49 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
50 int uptodate; 50 int error;
51 switch (vbr->status) { 51 switch (vbr->status) {
52 case VIRTIO_BLK_S_OK: 52 case VIRTIO_BLK_S_OK:
53 uptodate = 1; 53 error = 0;
54 break; 54 break;
55 case VIRTIO_BLK_S_UNSUPP: 55 case VIRTIO_BLK_S_UNSUPP:
56 uptodate = -ENOTTY; 56 error = -ENOTTY;
57 break; 57 break;
58 default: 58 default:
59 uptodate = 0; 59 error = -EIO;
60 break; 60 break;
61 } 61 }
62 62
63 end_dequeued_request(vbr->req, uptodate); 63 __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
64 list_del(&vbr->list); 64 list_del(&vbr->list);
65 mempool_free(vbr, vblk->pool); 65 mempool_free(vbr, vblk->pool);
66 } 66 }
@@ -84,11 +84,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
84 if (blk_fs_request(vbr->req)) { 84 if (blk_fs_request(vbr->req)) {
85 vbr->out_hdr.type = 0; 85 vbr->out_hdr.type = 0;
86 vbr->out_hdr.sector = vbr->req->sector; 86 vbr->out_hdr.sector = vbr->req->sector;
87 vbr->out_hdr.ioprio = vbr->req->ioprio; 87 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
88 } else if (blk_pc_request(vbr->req)) { 88 } else if (blk_pc_request(vbr->req)) {
89 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 89 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
90 vbr->out_hdr.sector = 0; 90 vbr->out_hdr.sector = 0;
91 vbr->out_hdr.ioprio = vbr->req->ioprio; 91 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
92 } else { 92 } else {
93 /* We don't put anything else in the queue. */ 93 /* We don't put anything else in the queue. */
94 BUG(); 94 BUG();
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3ca643cafccd..bff602ccccf3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -105,15 +105,17 @@ static DEFINE_SPINLOCK(blkif_io_lock);
105#define GRANT_INVALID_REF 0 105#define GRANT_INVALID_REF 0
106 106
107#define PARTS_PER_DISK 16 107#define PARTS_PER_DISK 16
108#define PARTS_PER_EXT_DISK 256
108 109
109#define BLKIF_MAJOR(dev) ((dev)>>8) 110#define BLKIF_MAJOR(dev) ((dev)>>8)
110#define BLKIF_MINOR(dev) ((dev) & 0xff) 111#define BLKIF_MINOR(dev) ((dev) & 0xff)
111 112
112#define DEV_NAME "xvd" /* name in /dev */ 113#define EXT_SHIFT 28
114#define EXTENDED (1<<EXT_SHIFT)
115#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
116#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
113 117
114/* Information about our VBDs. */ 118#define DEV_NAME "xvd" /* name in /dev */
115#define MAX_VBDS 64
116static LIST_HEAD(vbds_list);
117 119
118static int get_id_from_freelist(struct blkfront_info *info) 120static int get_id_from_freelist(struct blkfront_info *info)
119{ 121{
@@ -386,31 +388,60 @@ static int xlvbd_barrier(struct blkfront_info *info)
386} 388}
387 389
388 390
389static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, 391static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
390 int vdevice, u16 vdisk_info, u16 sector_size, 392 struct blkfront_info *info,
391 struct blkfront_info *info) 393 u16 vdisk_info, u16 sector_size)
392{ 394{
393 struct gendisk *gd; 395 struct gendisk *gd;
394 int nr_minors = 1; 396 int nr_minors = 1;
395 int err = -ENODEV; 397 int err = -ENODEV;
398 unsigned int offset;
399 int minor;
400 int nr_parts;
396 401
397 BUG_ON(info->gd != NULL); 402 BUG_ON(info->gd != NULL);
398 BUG_ON(info->rq != NULL); 403 BUG_ON(info->rq != NULL);
399 404
400 if ((minor % PARTS_PER_DISK) == 0) 405 if ((info->vdevice>>EXT_SHIFT) > 1) {
401 nr_minors = PARTS_PER_DISK; 406 /* this is above the extended range; something is wrong */
407 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
408 return -ENODEV;
409 }
410
411 if (!VDEV_IS_EXTENDED(info->vdevice)) {
412 minor = BLKIF_MINOR(info->vdevice);
413 nr_parts = PARTS_PER_DISK;
414 } else {
415 minor = BLKIF_MINOR_EXT(info->vdevice);
416 nr_parts = PARTS_PER_EXT_DISK;
417 }
418
419 if ((minor % nr_parts) == 0)
420 nr_minors = nr_parts;
402 421
403 gd = alloc_disk(nr_minors); 422 gd = alloc_disk(nr_minors);
404 if (gd == NULL) 423 if (gd == NULL)
405 goto out; 424 goto out;
406 425
407 if (nr_minors > 1) 426 offset = minor / nr_parts;
408 sprintf(gd->disk_name, "%s%c", DEV_NAME, 427
409 'a' + minor / PARTS_PER_DISK); 428 if (nr_minors > 1) {
410 else 429 if (offset < 26)
411 sprintf(gd->disk_name, "%s%c%d", DEV_NAME, 430 sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
412 'a' + minor / PARTS_PER_DISK, 431 else
413 minor % PARTS_PER_DISK); 432 sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
433 'a' + ((offset / 26)-1), 'a' + (offset % 26));
434 } else {
435 if (offset < 26)
436 sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
437 'a' + offset,
438 minor & (nr_parts - 1));
439 else
440 sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
441 'a' + ((offset / 26) - 1),
442 'a' + (offset % 26),
443 minor & (nr_parts - 1));
444 }
414 445
415 gd->major = XENVBD_MAJOR; 446 gd->major = XENVBD_MAJOR;
416 gd->first_minor = minor; 447 gd->first_minor = minor;
@@ -699,8 +730,13 @@ static int blkfront_probe(struct xenbus_device *dev,
699 err = xenbus_scanf(XBT_NIL, dev->nodename, 730 err = xenbus_scanf(XBT_NIL, dev->nodename,
700 "virtual-device", "%i", &vdevice); 731 "virtual-device", "%i", &vdevice);
701 if (err != 1) { 732 if (err != 1) {
702 xenbus_dev_fatal(dev, err, "reading virtual-device"); 733 /* go looking in the extended area instead */
703 return err; 734 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
735 "%i", &vdevice);
736 if (err != 1) {
737 xenbus_dev_fatal(dev, err, "reading virtual-device");
738 return err;
739 }
704 } 740 }
705 741
706 info = kzalloc(sizeof(*info), GFP_KERNEL); 742 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -861,9 +897,7 @@ static void blkfront_connect(struct blkfront_info *info)
861 if (err) 897 if (err)
862 info->feature_barrier = 0; 898 info->feature_barrier = 0;
863 899
864 err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice), 900 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
865 sectors, info->vdevice,
866 binfo, sector_size, info);
867 if (err) { 901 if (err) {
868 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", 902 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
869 info->xbdev->otherend); 903 info->xbdev->otherend);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1e55a658e6ce..32f3a8ed8d3d 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -256,7 +256,6 @@ static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
256 BT_ERR("%s urb %p submission failed (%d)", 256 BT_ERR("%s urb %p submission failed (%d)",
257 hdev->name, urb, -err); 257 hdev->name, urb, -err);
258 usb_unanchor_urb(urb); 258 usb_unanchor_urb(urb);
259 kfree(buf);
260 } 259 }
261 260
262 usb_free_urb(urb); 261 usb_free_urb(urb);
@@ -298,7 +297,6 @@ static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
298 BT_ERR("%s urb %p submission failed (%d)", 297 BT_ERR("%s urb %p submission failed (%d)",
299 hdev->name, urb, -err); 298 hdev->name, urb, -err);
300 usb_unanchor_urb(urb); 299 usb_unanchor_urb(urb);
301 kfree(buf);
302 } 300 }
303 301
304 usb_free_urb(urb); 302 usb_free_urb(urb);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 29ae99817c60..af472e052732 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,7 @@ static struct usb_device_id blacklist_table[] = {
102 { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 102 { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
103 103
104 /* Broadcom BCM2046 */ 104 /* Broadcom BCM2046 */
105 { USB_DEVICE(0x0a5c, 0x2146), .driver_info = BTUSB_RESET },
105 { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET }, 106 { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET },
106 107
107 /* Apple MacBook Pro with Broadcom chip */ 108 /* Apple MacBook Pro with Broadcom chip */
@@ -113,6 +114,7 @@ static struct usb_device_id blacklist_table[] = {
113 114
114 /* Targus ACB10US */ 115 /* Targus ACB10US */
115 { USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET }, 116 { USB_DEVICE(0x0a5c, 0x2100), .driver_info = BTUSB_RESET },
117 { USB_DEVICE(0x0a5c, 0x2154), .driver_info = BTUSB_RESET },
116 118
117 /* ANYCOM Bluetooth USB-200 and USB-250 */ 119 /* ANYCOM Bluetooth USB-200 and USB-250 */
118 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET }, 120 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = BTUSB_RESET },
@@ -150,6 +152,9 @@ static struct usb_device_id blacklist_table[] = {
150 { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 152 { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
151 { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, 153 { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU },
152 154
155 /* Belkin F8T016 device */
156 { USB_DEVICE(0x050d, 0x016a), .driver_info = BTUSB_RESET },
157
153 /* Digianswer devices */ 158 /* Digianswer devices */
154 { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, 159 { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
155 { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, 160 { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
@@ -271,7 +276,6 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev)
271 BT_ERR("%s urb %p submission failed (%d)", 276 BT_ERR("%s urb %p submission failed (%d)",
272 hdev->name, urb, -err); 277 hdev->name, urb, -err);
273 usb_unanchor_urb(urb); 278 usb_unanchor_urb(urb);
274 kfree(buf);
275 } 279 }
276 280
277 usb_free_urb(urb); 281 usb_free_urb(urb);
@@ -354,7 +358,6 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev)
354 BT_ERR("%s urb %p submission failed (%d)", 358 BT_ERR("%s urb %p submission failed (%d)",
355 hdev->name, urb, -err); 359 hdev->name, urb, -err);
356 usb_unanchor_urb(urb); 360 usb_unanchor_urb(urb);
357 kfree(buf);
358 } 361 }
359 362
360 usb_free_urb(urb); 363 usb_free_urb(urb);
@@ -475,7 +478,6 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev)
475 BT_ERR("%s urb %p submission failed (%d)", 478 BT_ERR("%s urb %p submission failed (%d)",
476 hdev->name, urb, -err); 479 hdev->name, urb, -err);
477 usb_unanchor_urb(urb); 480 usb_unanchor_urb(urb);
478 kfree(buf);
479 } 481 }
480 482
481 usb_free_urb(urb); 483 usb_free_urb(urb);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 74031de517e6..d47f2f80accd 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2097,7 +2097,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2097 2097
2098 len = nr * CD_FRAMESIZE_RAW; 2098 len = nr * CD_FRAMESIZE_RAW;
2099 2099
2100 ret = blk_rq_map_user(q, rq, ubuf, len); 2100 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
2101 if (ret) 2101 if (ret)
2102 break; 2102 break;
2103 2103
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1231d95aa695..d6ba77a2dd7b 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -624,14 +624,14 @@ static void gdrom_readdisk_dma(struct work_struct *work)
624 ctrl_outb(1, GDROM_DMA_STATUS_REG); 624 ctrl_outb(1, GDROM_DMA_STATUS_REG);
625 wait_event_interruptible_timeout(request_queue, 625 wait_event_interruptible_timeout(request_queue,
626 gd.transfer == 0, GDROM_DEFAULT_TIMEOUT); 626 gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
627 err = gd.transfer; 627 err = gd.transfer ? -EIO : 0;
628 gd.transfer = 0; 628 gd.transfer = 0;
629 gd.pending = 0; 629 gd.pending = 0;
630 /* now seek to take the request spinlock 630 /* now seek to take the request spinlock
631 * before handling ending the request */ 631 * before handling ending the request */
632 spin_lock(&gdrom_lock); 632 spin_lock(&gdrom_lock);
633 list_del_init(&req->queuelist); 633 list_del_init(&req->queuelist);
634 end_dequeued_request(req, 1 - err); 634 __blk_end_request(req, err, blk_rq_bytes(req));
635 } 635 }
636 spin_unlock(&gdrom_lock); 636 spin_unlock(&gdrom_lock);
637 kfree(read_command); 637 kfree(read_command);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 7ce1ac4baa6d..6af435b89867 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -661,10 +661,10 @@ void add_disk_randomness(struct gendisk *disk)
661 if (!disk || !disk->random) 661 if (!disk || !disk->random)
662 return; 662 return;
663 /* first major is 1, so we get >= 0x200 here */ 663 /* first major is 1, so we get >= 0x200 here */
664 DEBUG_ENT("disk event %d:%d\n", disk->major, disk->first_minor); 664 DEBUG_ENT("disk event %d:%d\n",
665 MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
665 666
666 add_timer_randomness(disk->random, 667 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
667 0x100 + MKDEV(disk->major, disk->first_minor));
668} 668}
669#endif 669#endif
670 670
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 3738cfa209ff..f5fc64f89c5c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -6,6 +6,7 @@ menuconfig TCG_TPM
6 tristate "TPM Hardware Support" 6 tristate "TPM Hardware Support"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM
8 depends on EXPERIMENTAL 8 depends on EXPERIMENTAL
9 select SECURITYFS
9 ---help--- 10 ---help---
10 If you have a TPM security chip in your system, which 11 If you have a TPM security chip in your system, which
11 implements the Trusted Computing Group's specification, 12 implements the Trusted Computing Group's specification,
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index d568c65c1370..d9e7a49d6cbf 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -279,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
279 { "OTES1 Fan", 36, 2, 60, 1, 0 }, 279 { "OTES1 Fan", 36, 2, 60, 1, 0 },
280 { NULL, 0, 0, 0, 0, 0 } } 280 { NULL, 0, 0, 0, 0, 0 } }
281 }, 281 },
282 { 0x0011, NULL /* Abit AT8 32X, need DMI string */, { 282 { 0x0011, "AT8 32X(ATI RD580-ULI M1575)", {
283 { "CPU Core", 0, 0, 10, 1, 0 }, 283 { "CPU Core", 0, 0, 10, 1, 0 },
284 { "DDR", 1, 0, 20, 1, 0 }, 284 { "DDR", 1, 0, 20, 1, 0 },
285 { "DDR VTT", 2, 0, 10, 1, 0 }, 285 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -303,6 +303,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
303 { "SYS Fan", 34, 2, 60, 1, 0 }, 303 { "SYS Fan", 34, 2, 60, 1, 0 },
304 { "AUX1 Fan", 35, 2, 60, 1, 0 }, 304 { "AUX1 Fan", 35, 2, 60, 1, 0 },
305 { "AUX2 Fan", 36, 2, 60, 1, 0 }, 305 { "AUX2 Fan", 36, 2, 60, 1, 0 },
306 { "AUX3 Fan", 37, 2, 60, 1, 0 },
306 { NULL, 0, 0, 0, 0, 0 } } 307 { NULL, 0, 0, 0, 0, 0 } }
307 }, 308 },
308 { 0x0012, NULL /* Abit AN8 32X, need DMI string */, { 309 { 0x0012, NULL /* Abit AN8 32X, need DMI string */, {
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index f1133081cc42..d793cc011990 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -46,6 +46,8 @@
46#include <linux/err.h> 46#include <linux/err.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/sysfs.h> 48#include <linux/sysfs.h>
49#include <linux/string.h>
50#include <linux/dmi.h>
49#include <asm/io.h> 51#include <asm/io.h>
50 52
51#define DRVNAME "it87" 53#define DRVNAME "it87"
@@ -236,6 +238,8 @@ struct it87_sio_data {
236 /* Values read from Super-I/O config space */ 238 /* Values read from Super-I/O config space */
237 u8 revision; 239 u8 revision;
238 u8 vid_value; 240 u8 vid_value;
241 /* Values set based on DMI strings */
242 u8 skip_pwm;
239}; 243};
240 244
241/* For each registered chip, we need to keep some data in memory. 245/* For each registered chip, we need to keep some data in memory.
@@ -964,6 +968,7 @@ static int __init it87_find(unsigned short *address,
964{ 968{
965 int err = -ENODEV; 969 int err = -ENODEV;
966 u16 chip_type; 970 u16 chip_type;
971 const char *board_vendor, *board_name;
967 972
968 superio_enter(); 973 superio_enter();
969 chip_type = force_id ? force_id : superio_inw(DEVID); 974 chip_type = force_id ? force_id : superio_inw(DEVID);
@@ -1022,6 +1027,24 @@ static int __init it87_find(unsigned short *address,
1022 pr_info("it87: in7 is VCCH (+5V Stand-By)\n"); 1027 pr_info("it87: in7 is VCCH (+5V Stand-By)\n");
1023 } 1028 }
1024 1029
1030 /* Disable specific features based on DMI strings */
1031 board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
1032 board_name = dmi_get_system_info(DMI_BOARD_NAME);
1033 if (board_vendor && board_name) {
1034 if (strcmp(board_vendor, "nVIDIA") == 0
1035 && strcmp(board_name, "FN68PT") == 0) {
1036 /* On the Shuttle SN68PT, FAN_CTL2 is apparently not
1037 connected to a fan, but to something else. One user
1038 has reported instant system power-off when changing
1039 the PWM2 duty cycle, so we disable it.
1040 I use the board name string as the trigger in case
1041 the same board is ever used in other systems. */
1042 pr_info("it87: Disabling pwm2 due to "
1043 "hardware constraints\n");
1044 sio_data->skip_pwm = (1 << 1);
1045 }
1046 }
1047
1025exit: 1048exit:
1026 superio_exit(); 1049 superio_exit();
1027 return err; 1050 return err;
@@ -1168,25 +1191,33 @@ static int __devinit it87_probe(struct platform_device *pdev)
1168 } 1191 }
1169 1192
1170 if (enable_pwm_interface) { 1193 if (enable_pwm_interface) {
1171 if ((err = device_create_file(dev, 1194 if (!(sio_data->skip_pwm & (1 << 0))) {
1172 &sensor_dev_attr_pwm1_enable.dev_attr)) 1195 if ((err = device_create_file(dev,
1173 || (err = device_create_file(dev, 1196 &sensor_dev_attr_pwm1_enable.dev_attr))
1174 &sensor_dev_attr_pwm2_enable.dev_attr)) 1197 || (err = device_create_file(dev,
1175 || (err = device_create_file(dev, 1198 &sensor_dev_attr_pwm1.dev_attr))
1176 &sensor_dev_attr_pwm3_enable.dev_attr)) 1199 || (err = device_create_file(dev,
1177 || (err = device_create_file(dev, 1200 &dev_attr_pwm1_freq)))
1178 &sensor_dev_attr_pwm1.dev_attr)) 1201 goto ERROR4;
1179 || (err = device_create_file(dev, 1202 }
1180 &sensor_dev_attr_pwm2.dev_attr)) 1203 if (!(sio_data->skip_pwm & (1 << 1))) {
1181 || (err = device_create_file(dev, 1204 if ((err = device_create_file(dev,
1182 &sensor_dev_attr_pwm3.dev_attr)) 1205 &sensor_dev_attr_pwm2_enable.dev_attr))
1183 || (err = device_create_file(dev, 1206 || (err = device_create_file(dev,
1184 &dev_attr_pwm1_freq)) 1207 &sensor_dev_attr_pwm2.dev_attr))
1185 || (err = device_create_file(dev, 1208 || (err = device_create_file(dev,
1186 &dev_attr_pwm2_freq)) 1209 &dev_attr_pwm2_freq)))
1187 || (err = device_create_file(dev, 1210 goto ERROR4;
1188 &dev_attr_pwm3_freq))) 1211 }
1189 goto ERROR4; 1212 if (!(sio_data->skip_pwm & (1 << 2))) {
1213 if ((err = device_create_file(dev,
1214 &sensor_dev_attr_pwm3_enable.dev_attr))
1215 || (err = device_create_file(dev,
1216 &sensor_dev_attr_pwm3.dev_attr))
1217 || (err = device_create_file(dev,
1218 &dev_attr_pwm3_freq)))
1219 goto ERROR4;
1220 }
1190 } 1221 }
1191 1222
1192 if (data->type == it8712 || data->type == it8716 1223 if (data->type == it8712 || data->type == it8716
@@ -1546,6 +1577,7 @@ static int __init sm_it87_init(void)
1546 unsigned short isa_address=0; 1577 unsigned short isa_address=0;
1547 struct it87_sio_data sio_data; 1578 struct it87_sio_data sio_data;
1548 1579
1580 memset(&sio_data, 0, sizeof(struct it87_sio_data));
1549 err = it87_find(&isa_address, &sio_data); 1581 err = it87_find(&isa_address, &sio_data);
1550 if (err) 1582 if (err)
1551 return err; 1583 return err;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index f16bb4667238..03c2cb6a58bc 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1113,7 +1113,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
1113 1113
1114 if (write) { 1114 if (write) {
1115 /* disk has become write protected */ 1115 /* disk has become write protected */
1116 if (cd->disk->policy) { 1116 if (get_disk_ro(cd->disk)) {
1117 cdrom_end_request(drive, 0); 1117 cdrom_end_request(drive, 0);
1118 return ide_stopped; 1118 return ide_stopped;
1119 } 1119 }
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 07ef88bd109b..33ea8c048717 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -41,6 +41,12 @@
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/div64.h> 42#include <asm/div64.h>
43 43
44#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
45#define IDE_DISK_MINORS (1 << PARTN_BITS)
46#else
47#define IDE_DISK_MINORS 0
48#endif
49
44struct ide_disk_obj { 50struct ide_disk_obj {
45 ide_drive_t *drive; 51 ide_drive_t *drive;
46 ide_driver_t *driver; 52 ide_driver_t *driver;
@@ -1151,8 +1157,7 @@ static int ide_disk_probe(ide_drive_t *drive)
1151 if (!idkp) 1157 if (!idkp)
1152 goto failed; 1158 goto failed;
1153 1159
1154 g = alloc_disk_node(1 << PARTN_BITS, 1160 g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
1155 hwif_to_node(drive->hwif));
1156 if (!g) 1161 if (!g)
1157 goto out_free_idkp; 1162 goto out_free_idkp;
1158 1163
@@ -1178,9 +1183,11 @@ static int ide_disk_probe(ide_drive_t *drive)
1178 } else 1183 } else
1179 drive->attach = 1; 1184 drive->attach = 1;
1180 1185
1181 g->minors = 1 << PARTN_BITS; 1186 g->minors = IDE_DISK_MINORS;
1182 g->driverfs_dev = &drive->gendev; 1187 g->driverfs_dev = &drive->gendev;
1183 g->flags = drive->removable ? GENHD_FL_REMOVABLE : 0; 1188 g->flags |= GENHD_FL_EXT_DEVT;
1189 if (drive->removable)
1190 g->flags |= GENHD_FL_REMOVABLE;
1184 set_capacity(g, idedisk_capacity(drive)); 1191 set_capacity(g, idedisk_capacity(drive));
1185 g->fops = &idedisk_ops; 1192 g->fops = &idedisk_ops;
1186 add_disk(g); 1193 add_disk(g);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index a51a30e9eab3..70aa86c8807e 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1188,7 +1188,7 @@ static struct kobject *exact_match(dev_t dev, int *part, void *data)
1188{ 1188{
1189 struct gendisk *p = data; 1189 struct gendisk *p = data;
1190 *part &= (1 << PARTN_BITS) - 1; 1190 *part &= (1 << PARTN_BITS) - 1;
1191 return &p->dev.kobj; 1191 return &disk_to_dev(p)->kobj;
1192} 1192}
1193 1193
1194static int exact_lock(dev_t dev, void *data) 1194static int exact_lock(dev_t dev, void *data)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 922d35f4fc08..3cab0cedfca2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3748,6 +3748,7 @@ error1:
3748 cm_remove_port_fs(port); 3748 cm_remove_port_fs(port);
3749 } 3749 }
3750 device_unregister(cm_dev->device); 3750 device_unregister(cm_dev->device);
3751 kfree(cm_dev);
3751} 3752}
3752 3753
3753static void cm_remove_one(struct ib_device *ib_device) 3754static void cm_remove_one(struct ib_device *ib_device)
@@ -3776,6 +3777,7 @@ static void cm_remove_one(struct ib_device *ib_device)
3776 cm_remove_port_fs(port); 3777 cm_remove_port_fs(port);
3777 } 3778 }
3778 device_unregister(cm_dev->device); 3779 device_unregister(cm_dev->device);
3780 kfree(cm_dev);
3779} 3781}
3780 3782
3781static int __init ib_cm_init(void) 3783static int __init ib_cm_init(void)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1adf2efd3cb3..49c45feccd5b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1697,9 +1697,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1697 u8 port_num = mad_agent_priv->agent.port_num; 1697 u8 port_num = mad_agent_priv->agent.port_num;
1698 u8 lmc; 1698 u8 lmc;
1699 1699
1700 send_resp = ((struct ib_mad *)(wr->send_buf.mad))-> 1700 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1701 mad_hdr.method & IB_MGMT_METHOD_RESP; 1701 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1702 rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1703 1702
1704 if (send_resp == rcv_resp) 1703 if (send_resp == rcv_resp)
1705 /* both requests, or both responses. GIDs different */ 1704 /* both requests, or both responses. GIDs different */
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 2acf9b62cf99..69580e282af0 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -272,7 +272,6 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
272 pr_debug("%s: Invalid QP type: %d\n", __func__, 272 pr_debug("%s: Invalid QP type: %d\n", __func__,
273 init_attr->qp_type); 273 init_attr->qp_type);
274 return ERR_PTR(-EINVAL); 274 return ERR_PTR(-EINVAL);
275 break;
276 } 275 }
277 276
278 if (err) { 277 if (err) {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index eb778bfd6f66..ecff98043589 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1155,13 +1155,11 @@ static int iwch_query_port(struct ib_device *ibdev,
1155 u8 port, struct ib_port_attr *props) 1155 u8 port, struct ib_port_attr *props)
1156{ 1156{
1157 PDBG("%s ibdev %p\n", __func__, ibdev); 1157 PDBG("%s ibdev %p\n", __func__, ibdev);
1158
1159 memset(props, 0, sizeof(struct ib_port_attr));
1158 props->max_mtu = IB_MTU_4096; 1160 props->max_mtu = IB_MTU_4096;
1159 props->lid = 0; 1161 props->active_mtu = IB_MTU_2048;
1160 props->lmc = 0;
1161 props->sm_lid = 0;
1162 props->sm_sl = 0;
1163 props->state = IB_PORT_ACTIVE; 1162 props->state = IB_PORT_ACTIVE;
1164 props->phys_state = 0;
1165 props->port_cap_flags = 1163 props->port_cap_flags =
1166 IB_PORT_CM_SUP | 1164 IB_PORT_CM_SUP |
1167 IB_PORT_SNMP_TUNNEL_SUP | 1165 IB_PORT_SNMP_TUNNEL_SUP |
@@ -1170,7 +1168,6 @@ static int iwch_query_port(struct ib_device *ibdev,
1170 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 1168 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1171 props->gid_tbl_len = 1; 1169 props->gid_tbl_len = 1;
1172 props->pkey_tbl_len = 1; 1170 props->pkey_tbl_len = 1;
1173 props->qkey_viol_cntr = 0;
1174 props->active_width = 2; 1171 props->active_width = 2;
1175 props->active_speed = 2; 1172 props->active_speed = 2;
1176 props->max_msg_sz = -1; 1173 props->max_msg_sz = -1;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1ab919f836a8..5d7b7855afb9 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -164,6 +164,13 @@ struct ehca_qmap_entry {
164 u16 reported; 164 u16 reported;
165}; 165};
166 166
167struct ehca_queue_map {
168 struct ehca_qmap_entry *map;
169 unsigned int entries;
170 unsigned int tail;
171 unsigned int left_to_poll;
172};
173
167struct ehca_qp { 174struct ehca_qp {
168 union { 175 union {
169 struct ib_qp ib_qp; 176 struct ib_qp ib_qp;
@@ -173,8 +180,9 @@ struct ehca_qp {
173 enum ehca_ext_qp_type ext_type; 180 enum ehca_ext_qp_type ext_type;
174 enum ib_qp_state state; 181 enum ib_qp_state state;
175 struct ipz_queue ipz_squeue; 182 struct ipz_queue ipz_squeue;
176 struct ehca_qmap_entry *sq_map; 183 struct ehca_queue_map sq_map;
177 struct ipz_queue ipz_rqueue; 184 struct ipz_queue ipz_rqueue;
185 struct ehca_queue_map rq_map;
178 struct h_galpas galpas; 186 struct h_galpas galpas;
179 u32 qkey; 187 u32 qkey;
180 u32 real_qp_num; 188 u32 real_qp_num;
@@ -204,6 +212,8 @@ struct ehca_qp {
204 atomic_t nr_events; /* events seen */ 212 atomic_t nr_events; /* events seen */
205 wait_queue_head_t wait_completion; 213 wait_queue_head_t wait_completion;
206 int mig_armed; 214 int mig_armed;
215 struct list_head sq_err_node;
216 struct list_head rq_err_node;
207}; 217};
208 218
209#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 219#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
@@ -233,6 +243,8 @@ struct ehca_cq {
233 /* mmap counter for resources mapped into user space */ 243 /* mmap counter for resources mapped into user space */
234 u32 mm_count_queue; 244 u32 mm_count_queue;
235 u32 mm_count_galpa; 245 u32 mm_count_galpa;
246 struct list_head sqp_err_list;
247 struct list_head rqp_err_list;
236}; 248};
237 249
238enum ehca_mr_flag { 250enum ehca_mr_flag {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 5540b276a33c..33647a95eb9a 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -276,6 +276,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
276 for (i = 0; i < QP_HASHTAB_LEN; i++) 276 for (i = 0; i < QP_HASHTAB_LEN; i++)
277 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]); 277 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
278 278
279 INIT_LIST_HEAD(&my_cq->sqp_err_list);
280 INIT_LIST_HEAD(&my_cq->rqp_err_list);
281
279 if (context) { 282 if (context) {
280 struct ipz_queue *ipz_queue = &my_cq->ipz_queue; 283 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
281 struct ehca_create_cq_resp resp; 284 struct ehca_create_cq_resp resp;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index a8a2ea585d2f..8f7f282ead65 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -197,6 +197,8 @@ void ehca_poll_eqs(unsigned long data);
197int ehca_calc_ipd(struct ehca_shca *shca, int port, 197int ehca_calc_ipd(struct ehca_shca *shca, int port,
198 enum ib_rate path_rate, u32 *ipd); 198 enum ib_rate path_rate, u32 *ipd);
199 199
200void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
201
200#ifdef CONFIG_PPC_64K_PAGES 202#ifdef CONFIG_PPC_64K_PAGES
201void *ehca_alloc_fw_ctrlblock(gfp_t flags); 203void *ehca_alloc_fw_ctrlblock(gfp_t flags);
202void ehca_free_fw_ctrlblock(void *ptr); 204void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index b6bcee036734..4dbe2870e014 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -396,6 +396,50 @@ static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
396 queue->is_small = (queue->page_size != 0); 396 queue->is_small = (queue->page_size != 0);
397} 397}
398 398
399/* needs to be called with cq->spinlock held */
400void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
401{
402 struct list_head *list, *node;
403
404 /* TODO: support low latency QPs */
405 if (qp->ext_type == EQPT_LLQP)
406 return;
407
408 if (on_sq) {
409 list = &qp->send_cq->sqp_err_list;
410 node = &qp->sq_err_node;
411 } else {
412 list = &qp->recv_cq->rqp_err_list;
413 node = &qp->rq_err_node;
414 }
415
416 if (list_empty(node))
417 list_add_tail(node, list);
418
419 return;
420}
421
422static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
423{
424 unsigned long flags;
425
426 spin_lock_irqsave(&cq->spinlock, flags);
427
428 if (!list_empty(node))
429 list_del_init(node);
430
431 spin_unlock_irqrestore(&cq->spinlock, flags);
432}
433
434static void reset_queue_map(struct ehca_queue_map *qmap)
435{
436 int i;
437
438 qmap->tail = 0;
439 for (i = 0; i < qmap->entries; i++)
440 qmap->map[i].reported = 1;
441}
442
399/* 443/*
400 * Create an ib_qp struct that is either a QP or an SRQ, depending on 444 * Create an ib_qp struct that is either a QP or an SRQ, depending on
401 * the value of the is_srq parameter. If init_attr and srq_init_attr share 445 * the value of the is_srq parameter. If init_attr and srq_init_attr share
@@ -407,12 +451,11 @@ static struct ehca_qp *internal_create_qp(
407 struct ib_srq_init_attr *srq_init_attr, 451 struct ib_srq_init_attr *srq_init_attr,
408 struct ib_udata *udata, int is_srq) 452 struct ib_udata *udata, int is_srq)
409{ 453{
410 struct ehca_qp *my_qp; 454 struct ehca_qp *my_qp, *my_srq = NULL;
411 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 455 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
412 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 456 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
413 ib_device); 457 ib_device);
414 struct ib_ucontext *context = NULL; 458 struct ib_ucontext *context = NULL;
415 u32 nr_qes;
416 u64 h_ret; 459 u64 h_ret;
417 int is_llqp = 0, has_srq = 0; 460 int is_llqp = 0, has_srq = 0;
418 int qp_type, max_send_sge, max_recv_sge, ret; 461 int qp_type, max_send_sge, max_recv_sge, ret;
@@ -457,8 +500,7 @@ static struct ehca_qp *internal_create_qp(
457 500
458 /* handle SRQ base QPs */ 501 /* handle SRQ base QPs */
459 if (init_attr->srq) { 502 if (init_attr->srq) {
460 struct ehca_qp *my_srq = 503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
461 container_of(init_attr->srq, struct ehca_qp, ib_srq);
462 504
463 has_srq = 1; 505 has_srq = 1;
464 parms.ext_type = EQPT_SRQBASE; 506 parms.ext_type = EQPT_SRQBASE;
@@ -716,15 +758,19 @@ static struct ehca_qp *internal_create_qp(
716 "and pages ret=%i", ret); 758 "and pages ret=%i", ret);
717 goto create_qp_exit2; 759 goto create_qp_exit2;
718 } 760 }
719 nr_qes = my_qp->ipz_squeue.queue_length / 761
762 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
720 my_qp->ipz_squeue.qe_size; 763 my_qp->ipz_squeue.qe_size;
721 my_qp->sq_map = vmalloc(nr_qes * 764 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
722 sizeof(struct ehca_qmap_entry)); 765 sizeof(struct ehca_qmap_entry));
723 if (!my_qp->sq_map) { 766 if (!my_qp->sq_map.map) {
724 ehca_err(pd->device, "Couldn't allocate squeue " 767 ehca_err(pd->device, "Couldn't allocate squeue "
725 "map ret=%i", ret); 768 "map ret=%i", ret);
726 goto create_qp_exit3; 769 goto create_qp_exit3;
727 } 770 }
771 INIT_LIST_HEAD(&my_qp->sq_err_node);
772 /* to avoid the generation of bogus flush CQEs */
773 reset_queue_map(&my_qp->sq_map);
728 } 774 }
729 775
730 if (HAS_RQ(my_qp)) { 776 if (HAS_RQ(my_qp)) {
@@ -736,6 +782,25 @@ static struct ehca_qp *internal_create_qp(
736 "and pages ret=%i", ret); 782 "and pages ret=%i", ret);
737 goto create_qp_exit4; 783 goto create_qp_exit4;
738 } 784 }
785
786 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
787 my_qp->ipz_rqueue.qe_size;
788 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
789 sizeof(struct ehca_qmap_entry));
790 if (!my_qp->rq_map.map) {
791 ehca_err(pd->device, "Couldn't allocate squeue "
792 "map ret=%i", ret);
793 goto create_qp_exit5;
794 }
795 INIT_LIST_HEAD(&my_qp->rq_err_node);
796 /* to avoid the generation of bogus flush CQEs */
797 reset_queue_map(&my_qp->rq_map);
798 } else if (init_attr->srq) {
799 /* this is a base QP, use the queue map of the SRQ */
800 my_qp->rq_map = my_srq->rq_map;
801 INIT_LIST_HEAD(&my_qp->rq_err_node);
802
803 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
739 } 804 }
740 805
741 if (is_srq) { 806 if (is_srq) {
@@ -799,7 +864,7 @@ static struct ehca_qp *internal_create_qp(
799 if (ret) { 864 if (ret) {
800 ehca_err(pd->device, 865 ehca_err(pd->device,
801 "Couldn't assign qp to send_cq ret=%i", ret); 866 "Couldn't assign qp to send_cq ret=%i", ret);
802 goto create_qp_exit6; 867 goto create_qp_exit7;
803 } 868 }
804 } 869 }
805 870
@@ -825,25 +890,29 @@ static struct ehca_qp *internal_create_qp(
825 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 890 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
826 ehca_err(pd->device, "Copy to udata failed"); 891 ehca_err(pd->device, "Copy to udata failed");
827 ret = -EINVAL; 892 ret = -EINVAL;
828 goto create_qp_exit7; 893 goto create_qp_exit8;
829 } 894 }
830 } 895 }
831 896
832 return my_qp; 897 return my_qp;
833 898
834create_qp_exit7: 899create_qp_exit8:
835 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); 900 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
836 901
837create_qp_exit6: 902create_qp_exit7:
838 kfree(my_qp->mod_qp_parm); 903 kfree(my_qp->mod_qp_parm);
839 904
905create_qp_exit6:
906 if (HAS_RQ(my_qp))
907 vfree(my_qp->rq_map.map);
908
840create_qp_exit5: 909create_qp_exit5:
841 if (HAS_RQ(my_qp)) 910 if (HAS_RQ(my_qp))
842 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 911 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
843 912
844create_qp_exit4: 913create_qp_exit4:
845 if (HAS_SQ(my_qp)) 914 if (HAS_SQ(my_qp))
846 vfree(my_qp->sq_map); 915 vfree(my_qp->sq_map.map);
847 916
848create_qp_exit3: 917create_qp_exit3:
849 if (HAS_SQ(my_qp)) 918 if (HAS_SQ(my_qp))
@@ -1035,6 +1104,101 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1035 return 0; 1104 return 0;
1036} 1105}
1037 1106
1107static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1108 struct ehca_queue_map *qmap)
1109{
1110 void *wqe_v;
1111 u64 q_ofs;
1112 u32 wqe_idx;
1113
1114 /* convert real to abs address */
1115 wqe_p = wqe_p & (~(1UL << 63));
1116
1117 wqe_v = abs_to_virt(wqe_p);
1118
1119 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1120 ehca_gen_err("Invalid offset for calculating left cqes "
1121 "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v);
1122 return -EFAULT;
1123 }
1124
1125 wqe_idx = q_ofs / ipz_queue->qe_size;
1126 if (wqe_idx < qmap->tail)
1127 qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
1128 else
1129 qmap->left_to_poll = wqe_idx - qmap->tail;
1130
1131 return 0;
1132}
1133
1134static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1135{
1136 u64 h_ret;
1137 void *send_wqe_p, *recv_wqe_p;
1138 int ret;
1139 unsigned long flags;
1140 int qp_num = my_qp->ib_qp.qp_num;
1141
1142 /* this hcall is not supported on base QPs */
1143 if (my_qp->ext_type != EQPT_SRQBASE) {
1144 /* get send and receive wqe pointer */
1145 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1146 my_qp->ipz_qp_handle, &my_qp->pf,
1147 &send_wqe_p, &recv_wqe_p, 4);
1148 if (h_ret != H_SUCCESS) {
1149 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1150 "failed ehca_qp=%p qp_num=%x h_ret=%li",
1151 my_qp, qp_num, h_ret);
1152 return ehca2ib_return_code(h_ret);
1153 }
1154
1155 /*
1156 * acquire lock to ensure that nobody is polling the cq which
1157 * could mean that the qmap->tail pointer is in an
1158 * inconsistent state.
1159 */
1160 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1161 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1162 &my_qp->sq_map);
1163 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1164 if (ret)
1165 return ret;
1166
1167
1168 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1169 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1170 &my_qp->rq_map);
1171 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1172 if (ret)
1173 return ret;
1174 } else {
1175 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1176 my_qp->sq_map.left_to_poll = 0;
1177 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1178
1179 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1180 my_qp->rq_map.left_to_poll = 0;
1181 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1182 }
1183
1184 /* this assures flush cqes being generated only for pending wqes */
1185 if ((my_qp->sq_map.left_to_poll == 0) &&
1186 (my_qp->rq_map.left_to_poll == 0)) {
1187 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1188 ehca_add_to_err_list(my_qp, 1);
1189 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1190
1191 if (HAS_RQ(my_qp)) {
1192 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1193 ehca_add_to_err_list(my_qp, 0);
1194 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1195 flags);
1196 }
1197 }
1198
1199 return 0;
1200}
1201
1038/* 1202/*
1039 * internal_modify_qp with circumvention to handle aqp0 properly 1203 * internal_modify_qp with circumvention to handle aqp0 properly
1040 * smi_reset2init indicates if this is an internal reset-to-init-call for 1204 * smi_reset2init indicates if this is an internal reset-to-init-call for
@@ -1539,10 +1703,27 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1539 goto modify_qp_exit2; 1703 goto modify_qp_exit2;
1540 } 1704 }
1541 } 1705 }
1706 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) {
1707 ret = check_for_left_cqes(my_qp, shca);
1708 if (ret)
1709 goto modify_qp_exit2;
1710 }
1542 1711
1543 if (statetrans == IB_QPST_ANY2RESET) { 1712 if (statetrans == IB_QPST_ANY2RESET) {
1544 ipz_qeit_reset(&my_qp->ipz_rqueue); 1713 ipz_qeit_reset(&my_qp->ipz_rqueue);
1545 ipz_qeit_reset(&my_qp->ipz_squeue); 1714 ipz_qeit_reset(&my_qp->ipz_squeue);
1715
1716 if (qp_cur_state == IB_QPS_ERR) {
1717 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1718
1719 if (HAS_RQ(my_qp))
1720 del_from_err_list(my_qp->recv_cq,
1721 &my_qp->rq_err_node);
1722 }
1723 reset_queue_map(&my_qp->sq_map);
1724
1725 if (HAS_RQ(my_qp))
1726 reset_queue_map(&my_qp->rq_map);
1546 } 1727 }
1547 1728
1548 if (attr_mask & IB_QP_QKEY) 1729 if (attr_mask & IB_QP_QKEY)
@@ -1958,6 +2139,16 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1958 idr_remove(&ehca_qp_idr, my_qp->token); 2139 idr_remove(&ehca_qp_idr, my_qp->token);
1959 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 2140 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1960 2141
2142 /*
2143 * SRQs will never get into an error list and do not have a recv_cq,
2144 * so we need to skip them here.
2145 */
2146 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp))
2147 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2148
2149 if (HAS_SQ(my_qp))
2150 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2151
1961 /* now wait until all pending events have completed */ 2152 /* now wait until all pending events have completed */
1962 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events)); 2153 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
1963 2154
@@ -1983,7 +2174,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1983 if (qp_type == IB_QPT_GSI) { 2174 if (qp_type == IB_QPT_GSI) {
1984 struct ib_event event; 2175 struct ib_event event;
1985 ehca_info(dev, "device %s: port %x is inactive.", 2176 ehca_info(dev, "device %s: port %x is inactive.",
1986 shca->ib_device.name, port_num); 2177 shca->ib_device.name, port_num);
1987 event.device = &shca->ib_device; 2178 event.device = &shca->ib_device;
1988 event.event = IB_EVENT_PORT_ERR; 2179 event.event = IB_EVENT_PORT_ERR;
1989 event.element.port_num = port_num; 2180 event.element.port_num = port_num;
@@ -1991,11 +2182,15 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1991 ib_dispatch_event(&event); 2182 ib_dispatch_event(&event);
1992 } 2183 }
1993 2184
1994 if (HAS_RQ(my_qp)) 2185 if (HAS_RQ(my_qp)) {
1995 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 2186 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2187
2188 vfree(my_qp->rq_map.map);
2189 }
1996 if (HAS_SQ(my_qp)) { 2190 if (HAS_SQ(my_qp)) {
1997 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 2191 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
1998 vfree(my_qp->sq_map); 2192
2193 vfree(my_qp->sq_map.map);
1999 } 2194 }
2000 kmem_cache_free(qp_cache, my_qp); 2195 kmem_cache_free(qp_cache, my_qp);
2001 atomic_dec(&shca->num_qps); 2196 atomic_dec(&shca->num_qps);
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 4426d82fe798..64928079eafa 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -53,9 +53,25 @@
53/* in RC traffic, insert an empty RDMA READ every this many packets */ 53/* in RC traffic, insert an empty RDMA READ every this many packets */
54#define ACK_CIRC_THRESHOLD 2000000 54#define ACK_CIRC_THRESHOLD 2000000
55 55
56static u64 replace_wr_id(u64 wr_id, u16 idx)
57{
58 u64 ret;
59
60 ret = wr_id & ~QMAP_IDX_MASK;
61 ret |= idx & QMAP_IDX_MASK;
62
63 return ret;
64}
65
66static u16 get_app_wr_id(u64 wr_id)
67{
68 return wr_id & QMAP_IDX_MASK;
69}
70
56static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, 71static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
57 struct ehca_wqe *wqe_p, 72 struct ehca_wqe *wqe_p,
58 struct ib_recv_wr *recv_wr) 73 struct ib_recv_wr *recv_wr,
74 u32 rq_map_idx)
59{ 75{
60 u8 cnt_ds; 76 u8 cnt_ds;
61 if (unlikely((recv_wr->num_sge < 0) || 77 if (unlikely((recv_wr->num_sge < 0) ||
@@ -69,7 +85,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
69 /* clear wqe header until sglist */ 85 /* clear wqe header until sglist */
70 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); 86 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
71 87
72 wqe_p->work_request_id = recv_wr->wr_id; 88 wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
73 wqe_p->nr_of_data_seg = recv_wr->num_sge; 89 wqe_p->nr_of_data_seg = recv_wr->num_sge;
74 90
75 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) { 91 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
@@ -146,6 +162,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
146 u64 dma_length; 162 u64 dma_length;
147 struct ehca_av *my_av; 163 struct ehca_av *my_av;
148 u32 remote_qkey = send_wr->wr.ud.remote_qkey; 164 u32 remote_qkey = send_wr->wr.ud.remote_qkey;
165 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
149 166
150 if (unlikely((send_wr->num_sge < 0) || 167 if (unlikely((send_wr->num_sge < 0) ||
151 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { 168 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
@@ -158,11 +175,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
158 /* clear wqe header until sglist */ 175 /* clear wqe header until sglist */
159 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); 176 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
160 177
161 wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK; 178 wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
162 wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK;
163 179
164 qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK; 180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
165 qp->sq_map[sq_map_idx].reported = 0; 181 qmap_entry->reported = 0;
166 182
167 switch (send_wr->opcode) { 183 switch (send_wr->opcode) {
168 case IB_WR_SEND: 184 case IB_WR_SEND:
@@ -496,7 +512,9 @@ static int internal_post_recv(struct ehca_qp *my_qp,
496 struct ehca_wqe *wqe_p; 512 struct ehca_wqe *wqe_p;
497 int wqe_cnt = 0; 513 int wqe_cnt = 0;
498 int ret = 0; 514 int ret = 0;
515 u32 rq_map_idx;
499 unsigned long flags; 516 unsigned long flags;
517 struct ehca_qmap_entry *qmap_entry;
500 518
501 if (unlikely(!HAS_RQ(my_qp))) { 519 if (unlikely(!HAS_RQ(my_qp))) {
502 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d", 520 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
@@ -524,8 +542,15 @@ static int internal_post_recv(struct ehca_qp *my_qp,
524 } 542 }
525 goto post_recv_exit0; 543 goto post_recv_exit0;
526 } 544 }
545 /*
546 * Get the index of the WQE in the recv queue. The same index
547 * is used for writing into the rq_map.
548 */
549 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
550
527 /* write a RECV WQE into the QUEUE */ 551 /* write a RECV WQE into the QUEUE */
528 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr); 552 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr,
553 rq_map_idx);
529 /* 554 /*
530 * if something failed, 555 * if something failed,
531 * reset the free entry pointer to the start value 556 * reset the free entry pointer to the start value
@@ -540,6 +565,11 @@ static int internal_post_recv(struct ehca_qp *my_qp,
540 } 565 }
541 goto post_recv_exit0; 566 goto post_recv_exit0;
542 } 567 }
568
569 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
570 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
571 qmap_entry->reported = 0;
572
543 wqe_cnt++; 573 wqe_cnt++;
544 } /* eof for cur_recv_wr */ 574 } /* eof for cur_recv_wr */
545 575
@@ -596,10 +626,12 @@ static const u8 ib_wc_opcode[255] = {
596/* internal function to poll one entry of cq */ 626/* internal function to poll one entry of cq */
597static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) 627static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
598{ 628{
599 int ret = 0; 629 int ret = 0, qmap_tail_idx;
600 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 630 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
601 struct ehca_cqe *cqe; 631 struct ehca_cqe *cqe;
602 struct ehca_qp *my_qp; 632 struct ehca_qp *my_qp;
633 struct ehca_qmap_entry *qmap_entry;
634 struct ehca_queue_map *qmap;
603 int cqe_count = 0, is_error; 635 int cqe_count = 0, is_error;
604 636
605repoll: 637repoll:
@@ -674,27 +706,52 @@ repoll:
674 goto repoll; 706 goto repoll;
675 wc->qp = &my_qp->ib_qp; 707 wc->qp = &my_qp->ib_qp;
676 708
677 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) { 709 if (is_error) {
678 struct ehca_qmap_entry *qmap_entry;
679 /* 710 /*
680 * We got a send completion and need to restore the original 711 * set left_to_poll to 0 because in error state, we will not
681 * wr_id. 712 * get any additional CQEs
682 */ 713 */
683 qmap_entry = &my_qp->sq_map[cqe->work_request_id & 714 ehca_add_to_err_list(my_qp, 1);
684 QMAP_IDX_MASK]; 715 my_qp->sq_map.left_to_poll = 0;
685 716
686 if (qmap_entry->reported) { 717 if (HAS_RQ(my_qp))
687 ehca_warn(cq->device, "Double cqe on qp_num=%#x", 718 ehca_add_to_err_list(my_qp, 0);
688 my_qp->real_qp_num); 719 my_qp->rq_map.left_to_poll = 0;
689 /* found a double cqe, discard it and read next one */ 720 }
690 goto repoll; 721
691 } 722 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
692 wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK; 723 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
693 wc->wr_id |= qmap_entry->app_wr_id; 724 /* We got a send completion. */
694 qmap_entry->reported = 1; 725 qmap = &my_qp->sq_map;
695 } else 726 else
696 /* We got a receive completion. */ 727 /* We got a receive completion. */
697 wc->wr_id = cqe->work_request_id; 728 qmap = &my_qp->rq_map;
729
730 qmap_entry = &qmap->map[qmap_tail_idx];
731 if (qmap_entry->reported) {
732 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
733 my_qp->real_qp_num);
734 /* found a double cqe, discard it and read next one */
735 goto repoll;
736 }
737
738 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
739 qmap_entry->reported = 1;
740
741 /* this is a proper completion, we need to advance the tail pointer */
742 if (++qmap->tail == qmap->entries)
743 qmap->tail = 0;
744
745 /* if left_to_poll is decremented to 0, add the QP to the error list */
746 if (qmap->left_to_poll > 0) {
747 qmap->left_to_poll--;
748 if ((my_qp->sq_map.left_to_poll == 0) &&
749 (my_qp->rq_map.left_to_poll == 0)) {
750 ehca_add_to_err_list(my_qp, 1);
751 if (HAS_RQ(my_qp))
752 ehca_add_to_err_list(my_qp, 0);
753 }
754 }
698 755
699 /* eval ib_wc_opcode */ 756 /* eval ib_wc_opcode */
700 wc->opcode = ib_wc_opcode[cqe->optype]-1; 757 wc->opcode = ib_wc_opcode[cqe->optype]-1;
@@ -733,13 +790,88 @@ poll_cq_one_exit0:
733 return ret; 790 return ret;
734} 791}
735 792
793static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
794 struct ib_wc *wc, int num_entries,
795 struct ipz_queue *ipz_queue, int on_sq)
796{
797 int nr = 0;
798 struct ehca_wqe *wqe;
799 u64 offset;
800 struct ehca_queue_map *qmap;
801 struct ehca_qmap_entry *qmap_entry;
802
803 if (on_sq)
804 qmap = &my_qp->sq_map;
805 else
806 qmap = &my_qp->rq_map;
807
808 qmap_entry = &qmap->map[qmap->tail];
809
810 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
811 /* generate flush CQE */
812 memset(wc, 0, sizeof(*wc));
813
814 offset = qmap->tail * ipz_queue->qe_size;
815 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
816 if (!wqe) {
817 ehca_err(cq->device, "Invalid wqe offset=%#lx on "
818 "qp_num=%#x", offset, my_qp->real_qp_num);
819 return nr;
820 }
821
822 wc->wr_id = replace_wr_id(wqe->work_request_id,
823 qmap_entry->app_wr_id);
824
825 if (on_sq) {
826 switch (wqe->optype) {
827 case WQE_OPTYPE_SEND:
828 wc->opcode = IB_WC_SEND;
829 break;
830 case WQE_OPTYPE_RDMAWRITE:
831 wc->opcode = IB_WC_RDMA_WRITE;
832 break;
833 case WQE_OPTYPE_RDMAREAD:
834 wc->opcode = IB_WC_RDMA_READ;
835 break;
836 default:
837 ehca_err(cq->device, "Invalid optype=%x",
838 wqe->optype);
839 return nr;
840 }
841 } else
842 wc->opcode = IB_WC_RECV;
843
844 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
845 wc->ex.imm_data = wqe->immediate_data;
846 wc->wc_flags |= IB_WC_WITH_IMM;
847 }
848
849 wc->status = IB_WC_WR_FLUSH_ERR;
850
851 wc->qp = &my_qp->ib_qp;
852
853 /* mark as reported and advance tail pointer */
854 qmap_entry->reported = 1;
855 if (++qmap->tail == qmap->entries)
856 qmap->tail = 0;
857 qmap_entry = &qmap->map[qmap->tail];
858
859 wc++; nr++;
860 }
861
862 return nr;
863
864}
865
736int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) 866int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
737{ 867{
738 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 868 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
739 int nr; 869 int nr;
870 struct ehca_qp *err_qp;
740 struct ib_wc *current_wc = wc; 871 struct ib_wc *current_wc = wc;
741 int ret = 0; 872 int ret = 0;
742 unsigned long flags; 873 unsigned long flags;
874 int entries_left = num_entries;
743 875
744 if (num_entries < 1) { 876 if (num_entries < 1) {
745 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p " 877 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
@@ -749,15 +881,40 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
749 } 881 }
750 882
751 spin_lock_irqsave(&my_cq->spinlock, flags); 883 spin_lock_irqsave(&my_cq->spinlock, flags);
752 for (nr = 0; nr < num_entries; nr++) { 884
885 /* generate flush cqes for send queues */
886 list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
887 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
888 &err_qp->ipz_squeue, 1);
889 entries_left -= nr;
890 current_wc += nr;
891
892 if (entries_left == 0)
893 break;
894 }
895
896 /* generate flush cqes for receive queues */
897 list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
898 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
899 &err_qp->ipz_rqueue, 0);
900 entries_left -= nr;
901 current_wc += nr;
902
903 if (entries_left == 0)
904 break;
905 }
906
907 for (nr = 0; nr < entries_left; nr++) {
753 ret = ehca_poll_cq_one(cq, current_wc); 908 ret = ehca_poll_cq_one(cq, current_wc);
754 if (ret) 909 if (ret)
755 break; 910 break;
756 current_wc++; 911 current_wc++;
757 } /* eof for nr */ 912 } /* eof for nr */
913 entries_left -= nr;
914
758 spin_unlock_irqrestore(&my_cq->spinlock, flags); 915 spin_unlock_irqrestore(&my_cq->spinlock, flags);
759 if (ret == -EAGAIN || !ret) 916 if (ret == -EAGAIN || !ret)
760 ret = nr; 917 ret = num_entries - entries_left;
761 918
762poll_cq_exit0: 919poll_cq_exit0:
763 return ret; 920 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 97710522624d..7b93cda1a4bd 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -675,7 +675,8 @@ static void send_rc_ack(struct ipath_qp *qp)
675 hdr.lrh[0] = cpu_to_be16(lrh0); 675 hdr.lrh[0] = cpu_to_be16(lrh0);
676 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 676 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
677 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 677 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
678 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid); 678 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
679 qp->remote_ah_attr.src_path_bits);
679 ohdr->bth[0] = cpu_to_be32(bth0); 680 ohdr->bth[0] = cpu_to_be32(bth0);
680 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 681 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
681 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); 682 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index af051f757663..fc0f6d9e6030 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -618,7 +618,8 @@ void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
618 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 618 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
619 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 619 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
620 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); 620 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
621 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); 621 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
622 qp->remote_ah_attr.src_path_bits);
622 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); 623 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
623 bth0 |= extra_bytes << 20; 624 bth0 |= extra_bytes << 20;
624 ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22)); 625 ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index b766e40e9ebf..eabc4247860b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -340,9 +340,16 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
340 int acc; 340 int acc;
341 int ret; 341 int ret;
342 unsigned long flags; 342 unsigned long flags;
343 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
343 344
344 spin_lock_irqsave(&qp->s_lock, flags); 345 spin_lock_irqsave(&qp->s_lock, flags);
345 346
347 if (qp->ibqp.qp_type != IB_QPT_SMI &&
348 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
349 ret = -ENETDOWN;
350 goto bail;
351 }
352
346 /* Check that state is OK to post send. */ 353 /* Check that state is OK to post send. */
347 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) 354 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
348 goto bail_inval; 355 goto bail_inval;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9559248f265b..baa01deb2436 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1058,6 +1058,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1058 else 1058 else
1059 sqd_event = 0; 1059 sqd_event = 0;
1060 1060
1061 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1062 context->rlkey |= (1 << 4);
1063
1061 /* 1064 /*
1062 * Before passing a kernel QP to the HW, make sure that the 1065 * Before passing a kernel QP to the HW, make sure that the
1063 * ownership bits of the send queue are set and the SQ 1066 * ownership bits of the send queue are set and the SQ
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cc440f90000b..65ad359fdf16 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -149,18 +149,10 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
149 ((pci_resource_len(dev->pdev, 0) - 1) & 149 ((pci_resource_len(dev->pdev, 0) - 1) &
150 dev->catas_err.addr); 150 dev->catas_err.addr);
151 151
152 if (!request_mem_region(addr, dev->catas_err.size * 4,
153 DRV_NAME)) {
154 mthca_warn(dev, "couldn't request catastrophic error region "
155 "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
156 return;
157 }
158
159 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); 152 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
160 if (!dev->catas_err.map) { 153 if (!dev->catas_err.map) {
161 mthca_warn(dev, "couldn't map catastrophic error region " 154 mthca_warn(dev, "couldn't map catastrophic error region "
162 "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); 155 "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
163 release_mem_region(addr, dev->catas_err.size * 4);
164 return; 156 return;
165 } 157 }
166 158
@@ -175,13 +167,8 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
175{ 167{
176 del_timer_sync(&dev->catas_err.timer); 168 del_timer_sync(&dev->catas_err.timer);
177 169
178 if (dev->catas_err.map) { 170 if (dev->catas_err.map)
179 iounmap(dev->catas_err.map); 171 iounmap(dev->catas_err.map);
180 release_mem_region(pci_resource_start(dev->pdev, 0) +
181 ((pci_resource_len(dev->pdev, 0) - 1) &
182 dev->catas_err.addr),
183 dev->catas_err.size * 4);
184 }
185 172
186 spin_lock_irq(&catas_lock); 173 spin_lock_irq(&catas_lock);
187 list_del(&dev->catas_err.list); 174 list_del(&dev->catas_err.list);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cc6858f0b65b..28f0e0c40d7d 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -652,27 +652,13 @@ static int mthca_map_reg(struct mthca_dev *dev,
652{ 652{
653 unsigned long base = pci_resource_start(dev->pdev, 0); 653 unsigned long base = pci_resource_start(dev->pdev, 0);
654 654
655 if (!request_mem_region(base + offset, size, DRV_NAME))
656 return -EBUSY;
657
658 *map = ioremap(base + offset, size); 655 *map = ioremap(base + offset, size);
659 if (!*map) { 656 if (!*map)
660 release_mem_region(base + offset, size);
661 return -ENOMEM; 657 return -ENOMEM;
662 }
663 658
664 return 0; 659 return 0;
665} 660}
666 661
667static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,
668 unsigned long size, void __iomem *map)
669{
670 unsigned long base = pci_resource_start(dev->pdev, 0);
671
672 release_mem_region(base + offset, size);
673 iounmap(map);
674}
675
676static int mthca_map_eq_regs(struct mthca_dev *dev) 662static int mthca_map_eq_regs(struct mthca_dev *dev)
677{ 663{
678 if (mthca_is_memfree(dev)) { 664 if (mthca_is_memfree(dev)) {
@@ -699,9 +685,7 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
699 dev->fw.arbel.eq_arm_base) + 4, 4, 685 dev->fw.arbel.eq_arm_base) + 4, 4,
700 &dev->eq_regs.arbel.eq_arm)) { 686 &dev->eq_regs.arbel.eq_arm)) {
701 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); 687 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
702 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & 688 iounmap(dev->clr_base);
703 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
704 dev->clr_base);
705 return -ENOMEM; 689 return -ENOMEM;
706 } 690 }
707 691
@@ -710,12 +694,8 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
710 MTHCA_EQ_SET_CI_SIZE, 694 MTHCA_EQ_SET_CI_SIZE,
711 &dev->eq_regs.arbel.eq_set_ci_base)) { 695 &dev->eq_regs.arbel.eq_set_ci_base)) {
712 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); 696 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
713 mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & 697 iounmap(dev->eq_regs.arbel.eq_arm);
714 dev->fw.arbel.eq_arm_base) + 4, 4, 698 iounmap(dev->clr_base);
715 dev->eq_regs.arbel.eq_arm);
716 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
717 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
718 dev->clr_base);
719 return -ENOMEM; 699 return -ENOMEM;
720 } 700 }
721 } else { 701 } else {
@@ -731,8 +711,7 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
731 &dev->eq_regs.tavor.ecr_base)) { 711 &dev->eq_regs.tavor.ecr_base)) {
732 mthca_err(dev, "Couldn't map ecr register, " 712 mthca_err(dev, "Couldn't map ecr register, "
733 "aborting.\n"); 713 "aborting.\n");
734 mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, 714 iounmap(dev->clr_base);
735 dev->clr_base);
736 return -ENOMEM; 715 return -ENOMEM;
737 } 716 }
738 } 717 }
@@ -744,22 +723,12 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
744static void mthca_unmap_eq_regs(struct mthca_dev *dev) 723static void mthca_unmap_eq_regs(struct mthca_dev *dev)
745{ 724{
746 if (mthca_is_memfree(dev)) { 725 if (mthca_is_memfree(dev)) {
747 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & 726 iounmap(dev->eq_regs.arbel.eq_set_ci_base);
748 dev->fw.arbel.eq_set_ci_base, 727 iounmap(dev->eq_regs.arbel.eq_arm);
749 MTHCA_EQ_SET_CI_SIZE, 728 iounmap(dev->clr_base);
750 dev->eq_regs.arbel.eq_set_ci_base);
751 mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
752 dev->fw.arbel.eq_arm_base) + 4, 4,
753 dev->eq_regs.arbel.eq_arm);
754 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
755 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
756 dev->clr_base);
757 } else { 729 } else {
758 mthca_unmap_reg(dev, MTHCA_ECR_BASE, 730 iounmap(dev->eq_regs.tavor.ecr_base);
759 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, 731 iounmap(dev->clr_base);
760 dev->eq_regs.tavor.ecr_base);
761 mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
762 dev->clr_base);
763 } 732 }
764} 733}
765 734
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index fb9f91b60f30..52f60f4eea00 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -921,58 +921,6 @@ err_uar_table_free:
921 return err; 921 return err;
922} 922}
923 923
924static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
925{
926 int err;
927
928 /*
929 * We can't just use pci_request_regions() because the MSI-X
930 * table is right in the middle of the first BAR. If we did
931 * pci_request_region and grab all of the first BAR, then
932 * setting up MSI-X would fail, since the PCI core wants to do
933 * request_mem_region on the MSI-X vector table.
934 *
935 * So just request what we need right now, and request any
936 * other regions we need when setting up EQs.
937 */
938 if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
939 MTHCA_HCR_SIZE, DRV_NAME))
940 return -EBUSY;
941
942 err = pci_request_region(pdev, 2, DRV_NAME);
943 if (err)
944 goto err_bar2_failed;
945
946 if (!ddr_hidden) {
947 err = pci_request_region(pdev, 4, DRV_NAME);
948 if (err)
949 goto err_bar4_failed;
950 }
951
952 return 0;
953
954err_bar4_failed:
955 pci_release_region(pdev, 2);
956
957err_bar2_failed:
958 release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
959 MTHCA_HCR_SIZE);
960
961 return err;
962}
963
964static void mthca_release_regions(struct pci_dev *pdev,
965 int ddr_hidden)
966{
967 if (!ddr_hidden)
968 pci_release_region(pdev, 4);
969
970 pci_release_region(pdev, 2);
971
972 release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
973 MTHCA_HCR_SIZE);
974}
975
976static int mthca_enable_msi_x(struct mthca_dev *mdev) 924static int mthca_enable_msi_x(struct mthca_dev *mdev)
977{ 925{
978 struct msix_entry entries[3]; 926 struct msix_entry entries[3];
@@ -1059,7 +1007,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1059 if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) 1007 if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
1060 ddr_hidden = 1; 1008 ddr_hidden = 1;
1061 1009
1062 err = mthca_request_regions(pdev, ddr_hidden); 1010 err = pci_request_regions(pdev, DRV_NAME);
1063 if (err) { 1011 if (err) {
1064 dev_err(&pdev->dev, "Cannot obtain PCI resources, " 1012 dev_err(&pdev->dev, "Cannot obtain PCI resources, "
1065 "aborting.\n"); 1013 "aborting.\n");
@@ -1196,7 +1144,7 @@ err_free_dev:
1196 ib_dealloc_device(&mdev->ib_dev); 1144 ib_dealloc_device(&mdev->ib_dev);
1197 1145
1198err_free_res: 1146err_free_res:
1199 mthca_release_regions(pdev, ddr_hidden); 1147 pci_release_regions(pdev);
1200 1148
1201err_disable_pdev: 1149err_disable_pdev:
1202 pci_disable_device(pdev); 1150 pci_disable_device(pdev);
@@ -1240,8 +1188,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
1240 pci_disable_msix(pdev); 1188 pci_disable_msix(pdev);
1241 1189
1242 ib_dealloc_device(&mdev->ib_dev); 1190 ib_dealloc_device(&mdev->ib_dev);
1243 mthca_release_regions(pdev, mdev->mthca_flags & 1191 pci_release_regions(pdev);
1244 MTHCA_FLAG_DDR_HIDDEN);
1245 pci_disable_device(pdev); 1192 pci_disable_device(pdev);
1246 pci_set_drvdata(pdev, NULL); 1193 pci_set_drvdata(pdev, NULL);
1247 } 1194 }
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b0cab64e5e3d..a2b04d62b1a4 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -70,27 +70,31 @@ int interrupt_mod_interval = 0;
70 70
71/* Interoperability */ 71/* Interoperability */
72int mpa_version = 1; 72int mpa_version = 1;
73module_param(mpa_version, int, 0); 73module_param(mpa_version, int, 0644);
74MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)"); 74MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
75 75
76/* Interoperability */ 76/* Interoperability */
77int disable_mpa_crc = 0; 77int disable_mpa_crc = 0;
78module_param(disable_mpa_crc, int, 0); 78module_param(disable_mpa_crc, int, 0644);
79MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC"); 79MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
80 80
81unsigned int send_first = 0; 81unsigned int send_first = 0;
82module_param(send_first, int, 0); 82module_param(send_first, int, 0644);
83MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection"); 83MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
84 84
85 85
86unsigned int nes_drv_opt = 0; 86unsigned int nes_drv_opt = 0;
87module_param(nes_drv_opt, int, 0); 87module_param(nes_drv_opt, int, 0644);
88MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); 88MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
89 89
90unsigned int nes_debug_level = 0; 90unsigned int nes_debug_level = 0;
91module_param_named(debug_level, nes_debug_level, uint, 0644); 91module_param_named(debug_level, nes_debug_level, uint, 0644);
92MODULE_PARM_DESC(debug_level, "Enable debug output level"); 92MODULE_PARM_DESC(debug_level, "Enable debug output level");
93 93
94unsigned int wqm_quanta = 0x10000;
95module_param(wqm_quanta, int, 0644);
96MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
97
94LIST_HEAD(nes_adapter_list); 98LIST_HEAD(nes_adapter_list);
95static LIST_HEAD(nes_dev_list); 99static LIST_HEAD(nes_dev_list);
96 100
@@ -557,12 +561,32 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
557 goto bail5; 561 goto bail5;
558 } 562 }
559 nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; 563 nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
564 nesdev->nesadapter->wqm_quanta = wqm_quanta;
560 565
561 /* nesdev->base_doorbell_index = 566 /* nesdev->base_doorbell_index =
562 nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ 567 nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
563 nesdev->base_doorbell_index = 1; 568 nesdev->base_doorbell_index = 1;
564 nesdev->doorbell_start = nesdev->nesadapter->doorbell_start; 569 nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
565 nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count; 570 if (nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
571 switch (PCI_FUNC(nesdev->pcidev->devfn) %
572 nesdev->nesadapter->port_count) {
573 case 1:
574 nesdev->mac_index = 2;
575 break;
576 case 2:
577 nesdev->mac_index = 1;
578 break;
579 case 3:
580 nesdev->mac_index = 3;
581 break;
582 case 0:
583 default:
584 nesdev->mac_index = 0;
585 }
586 } else {
587 nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) %
588 nesdev->nesadapter->port_count;
589 }
566 590
567 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); 591 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
568 592
@@ -581,7 +605,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
581 nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) | 605 nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
582 (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); 606 (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
583 if (PCI_FUNC(nesdev->pcidev->devfn) < 4) { 607 if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
584 nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24)); 608 nesdev->int_req |= (1 << (PCI_FUNC(nesdev->mac_index)+24));
585 } 609 }
586 610
587 /* TODO: This really should be the first driver to load, not function 0 */ 611 /* TODO: This really should be the first driver to load, not function 0 */
@@ -772,14 +796,14 @@ static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
772 796
773 list_for_each_entry(nesdev, &nes_dev_list, list) { 797 list_for_each_entry(nesdev, &nes_dev_list, list) {
774 if (i == ee_flsh_adapter) { 798 if (i == ee_flsh_adapter) {
775 devfn = nesdev->nesadapter->devfn; 799 devfn = nesdev->pcidev->devfn;
776 bus_number = nesdev->nesadapter->bus_number; 800 bus_number = nesdev->pcidev->bus->number;
777 break; 801 break;
778 } 802 }
779 i++; 803 i++;
780 } 804 }
781 805
782 return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn); 806 return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn);
783} 807}
784 808
785static ssize_t nes_store_adapter(struct device_driver *ddp, 809static ssize_t nes_store_adapter(struct device_driver *ddp,
@@ -1050,6 +1074,55 @@ static ssize_t nes_store_idx_data(struct device_driver *ddp,
1050 return strnlen(buf, count); 1074 return strnlen(buf, count);
1051} 1075}
1052 1076
1077
1078/**
1079 * nes_show_wqm_quanta
1080 */
1081static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
1082{
1083 u32 wqm_quanta_value = 0xdead;
1084 u32 i = 0;
1085 struct nes_device *nesdev;
1086
1087 list_for_each_entry(nesdev, &nes_dev_list, list) {
1088 if (i == ee_flsh_adapter) {
1089 wqm_quanta_value = nesdev->nesadapter->wqm_quanta;
1090 break;
1091 }
1092 i++;
1093 }
1094
1095 return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta);
1096}
1097
1098
1099/**
1100 * nes_store_wqm_quanta
1101 */
1102static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
1103 const char *buf, size_t count)
1104{
1105 unsigned long wqm_quanta_value;
1106 u32 wqm_config1;
1107 u32 i = 0;
1108 struct nes_device *nesdev;
1109
1110 strict_strtoul(buf, 0, &wqm_quanta_value);
1111 list_for_each_entry(nesdev, &nes_dev_list, list) {
1112 if (i == ee_flsh_adapter) {
1113 nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
1114 wqm_config1 = nes_read_indexed(nesdev,
1115 NES_IDX_WQM_CONFIG1);
1116 nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1,
1117 ((wqm_quanta_value << 1) |
1118 (wqm_config1 & 0x00000001)));
1119 break;
1120 }
1121 i++;
1122 }
1123 return strnlen(buf, count);
1124}
1125
1053static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR, 1126static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
1054 nes_show_adapter, nes_store_adapter); 1127 nes_show_adapter, nes_store_adapter);
1055static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR, 1128static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
@@ -1068,6 +1141,8 @@ static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
1068 nes_show_idx_addr, nes_store_idx_addr); 1141 nes_show_idx_addr, nes_store_idx_addr);
1069static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR, 1142static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
1070 nes_show_idx_data, nes_store_idx_data); 1143 nes_show_idx_data, nes_store_idx_data);
1144static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR,
1145 nes_show_wqm_quanta, nes_store_wqm_quanta);
1071 1146
1072static int nes_create_driver_sysfs(struct pci_driver *drv) 1147static int nes_create_driver_sysfs(struct pci_driver *drv)
1073{ 1148{
@@ -1081,6 +1156,7 @@ static int nes_create_driver_sysfs(struct pci_driver *drv)
1081 error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data); 1156 error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
1082 error |= driver_create_file(&drv->driver, &driver_attr_idx_addr); 1157 error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
1083 error |= driver_create_file(&drv->driver, &driver_attr_idx_data); 1158 error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
1159 error |= driver_create_file(&drv->driver, &driver_attr_wqm_quanta);
1084 return error; 1160 return error;
1085} 1161}
1086 1162
@@ -1095,6 +1171,7 @@ static void nes_remove_driver_sysfs(struct pci_driver *drv)
1095 driver_remove_file(&drv->driver, &driver_attr_nonidx_data); 1171 driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
1096 driver_remove_file(&drv->driver, &driver_attr_idx_addr); 1172 driver_remove_file(&drv->driver, &driver_attr_idx_addr);
1097 driver_remove_file(&drv->driver, &driver_attr_idx_data); 1173 driver_remove_file(&drv->driver, &driver_attr_idx_data);
1174 driver_remove_file(&drv->driver, &driver_attr_wqm_quanta);
1098} 1175}
1099 1176
1100/** 1177/**
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 8eb7ae96974d..1595dc7bba9d 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -169,7 +169,7 @@ extern int disable_mpa_crc;
169extern unsigned int send_first; 169extern unsigned int send_first;
170extern unsigned int nes_drv_opt; 170extern unsigned int nes_drv_opt;
171extern unsigned int nes_debug_level; 171extern unsigned int nes_debug_level;
172 172extern unsigned int wqm_quanta;
173extern struct list_head nes_adapter_list; 173extern struct list_head nes_adapter_list;
174 174
175extern atomic_t cm_connects; 175extern atomic_t cm_connects;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 499d3cf83e1f..2caf9da81ad5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -52,7 +52,7 @@
52#include <linux/random.h> 52#include <linux/random.h>
53#include <linux/list.h> 53#include <linux/list.h>
54#include <linux/threads.h> 54#include <linux/threads.h>
55 55#include <net/arp.h>
56#include <net/neighbour.h> 56#include <net/neighbour.h>
57#include <net/route.h> 57#include <net/route.h>
58#include <net/ip_fib.h> 58#include <net/ip_fib.h>
@@ -1019,23 +1019,43 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
1019 1019
1020 1020
1021/** 1021/**
1022 * nes_addr_send_arp 1022 * nes_addr_resolve_neigh
1023 */ 1023 */
1024static void nes_addr_send_arp(u32 dst_ip) 1024static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
1025{ 1025{
1026 struct rtable *rt; 1026 struct rtable *rt;
1027 struct flowi fl; 1027 struct flowi fl;
1028 struct neighbour *neigh;
1029 int rc = -1;
1030 DECLARE_MAC_BUF(mac);
1028 1031
1029 memset(&fl, 0, sizeof fl); 1032 memset(&fl, 0, sizeof fl);
1030 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1033 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
1031 if (ip_route_output_key(&init_net, &rt, &fl)) { 1034 if (ip_route_output_key(&init_net, &rt, &fl)) {
1032 printk("%s: ip_route_output_key failed for 0x%08X\n", 1035 printk("%s: ip_route_output_key failed for 0x%08X\n",
1033 __func__, dst_ip); 1036 __func__, dst_ip);
1034 return; 1037 return rc;
1038 }
1039
1040 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev);
1041 if (neigh) {
1042 if (neigh->nud_state & NUD_VALID) {
1043 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
1044 " is %s, Gateway is 0x%08X \n", dst_ip,
1045 print_mac(mac, neigh->ha), ntohl(rt->rt_gateway));
1046 nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
1047 dst_ip, NES_ARP_ADD);
1048 rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
1049 NES_ARP_RESOLVE);
1050 }
1051 neigh_release(neigh);
1035 } 1052 }
1036 1053
1037 neigh_event_send(rt->u.dst.neighbour, NULL); 1054 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
1055 neigh_event_send(rt->u.dst.neighbour, NULL);
1056
1038 ip_rt_put(rt); 1057 ip_rt_put(rt);
1058 return rc;
1039} 1059}
1040 1060
1041 1061
@@ -1108,9 +1128,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1108 /* get the mac addr for the remote node */ 1128 /* get the mac addr for the remote node */
1109 arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); 1129 arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
1110 if (arpindex < 0) { 1130 if (arpindex < 0) {
1111 kfree(cm_node); 1131 arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr);
1112 nes_addr_send_arp(cm_info->rem_addr); 1132 if (arpindex < 0) {
1113 return NULL; 1133 kfree(cm_node);
1134 return NULL;
1135 }
1114 } 1136 }
1115 1137
1116 /* copy the mac addr to node context */ 1138 /* copy the mac addr to node context */
@@ -1826,7 +1848,7 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1826/** 1848/**
1827 * mini_cm_connect - make a connection node with params 1849 * mini_cm_connect - make a connection node with params
1828 */ 1850 */
1829struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, 1851static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1830 struct nes_vnic *nesvnic, u16 private_data_len, 1852 struct nes_vnic *nesvnic, u16 private_data_len,
1831 void *private_data, struct nes_cm_info *cm_info) 1853 void *private_data, struct nes_cm_info *cm_info)
1832{ 1854{
@@ -2007,7 +2029,6 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
2007 ret = rem_ref_cm_node(cm_core, cm_node); 2029 ret = rem_ref_cm_node(cm_core, cm_node);
2008 break; 2030 break;
2009 } 2031 }
2010 cm_node->cm_id = NULL;
2011 return ret; 2032 return ret;
2012} 2033}
2013 2034
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 1513d4066f1b..7c49cc882d75 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -55,18 +55,19 @@ u32 int_mod_cq_depth_24;
55u32 int_mod_cq_depth_16; 55u32 int_mod_cq_depth_16;
56u32 int_mod_cq_depth_4; 56u32 int_mod_cq_depth_4;
57u32 int_mod_cq_depth_1; 57u32 int_mod_cq_depth_1;
58 58static const u8 nes_max_critical_error_count = 100;
59#include "nes_cm.h" 59#include "nes_cm.h"
60 60
61static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq); 61static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
62static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count); 62static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
63static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, 63static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
64 u8 OneG_Mode); 64 struct nes_adapter *nesadapter, u8 OneG_Mode);
65static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq); 65static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
66static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq); 66static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
67static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq); 67static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
68static void nes_process_iwarp_aeqe(struct nes_device *nesdev, 68static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
69 struct nes_hw_aeqe *aeqe); 69 struct nes_hw_aeqe *aeqe);
70static void process_critical_error(struct nes_device *nesdev);
70static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); 71static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
71static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); 72static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
72 73
@@ -222,11 +223,10 @@ static void nes_nic_tune_timer(struct nes_device *nesdev)
222 } 223 }
223 224
224 /* boundary checking */ 225 /* boundary checking */
225 if (shared_timer->timer_in_use > NES_NIC_FAST_TIMER_HIGH) 226 if (shared_timer->timer_in_use > shared_timer->threshold_high)
226 shared_timer->timer_in_use = NES_NIC_FAST_TIMER_HIGH; 227 shared_timer->timer_in_use = shared_timer->threshold_high;
227 else if (shared_timer->timer_in_use < NES_NIC_FAST_TIMER_LOW) { 228 else if (shared_timer->timer_in_use < shared_timer->threshold_low)
228 shared_timer->timer_in_use = NES_NIC_FAST_TIMER_LOW; 229 shared_timer->timer_in_use = shared_timer->threshold_low;
229 }
230 230
231 nesdev->currcq_count = 0; 231 nesdev->currcq_count = 0;
232 232
@@ -292,9 +292,6 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
292 292
293 if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0) 293 if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0)
294 return NULL; 294 return NULL;
295 if (nes_init_serdes(nesdev, hw_rev, port_count, OneG_Mode))
296 return NULL;
297 nes_init_csr_ne020(nesdev, hw_rev, port_count);
298 295
299 max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE); 296 max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE);
300 nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp); 297 nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp);
@@ -353,6 +350,22 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
353 nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n", 350 nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
354 nesadapter, (u32)sizeof(struct nes_adapter), adapter_size); 351 nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
355 352
353 if (nes_read_eeprom_values(nesdev, nesadapter)) {
354 printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
355 kfree(nesadapter);
356 return NULL;
357 }
358
359 if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
360 OneG_Mode)) {
361 kfree(nesadapter);
362 return NULL;
363 }
364 nes_init_csr_ne020(nesdev, hw_rev, port_count);
365
366 memset(nesadapter->pft_mcast_map, 255,
367 sizeof nesadapter->pft_mcast_map);
368
356 /* populate the new nesadapter */ 369 /* populate the new nesadapter */
357 nesadapter->devfn = nesdev->pcidev->devfn; 370 nesadapter->devfn = nesdev->pcidev->devfn;
358 nesadapter->bus_number = nesdev->pcidev->bus->number; 371 nesadapter->bus_number = nesdev->pcidev->bus->number;
@@ -468,20 +481,25 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
468 481
469 /* setup port configuration */ 482 /* setup port configuration */
470 if (nesadapter->port_count == 1) { 483 if (nesadapter->port_count == 1) {
471 u32temp = 0x00000000; 484 nesadapter->log_port = 0x00000000;
472 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) 485 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT)
473 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002); 486 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002);
474 else 487 else
475 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); 488 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
476 } else { 489 } else {
477 if (nesadapter->port_count == 2) 490 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
478 u32temp = 0x00000044; 491 nesadapter->log_port = 0x000000D8;
479 else 492 } else {
480 u32temp = 0x000000e4; 493 if (nesadapter->port_count == 2)
494 nesadapter->log_port = 0x00000044;
495 else
496 nesadapter->log_port = 0x000000e4;
497 }
481 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); 498 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
482 } 499 }
483 500
484 nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, u32temp); 501 nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT,
502 nesadapter->log_port);
485 nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n", 503 nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n",
486 nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT)); 504 nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT));
487 505
@@ -706,23 +724,43 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
706 * nes_init_serdes 724 * nes_init_serdes
707 */ 725 */
708static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, 726static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
709 u8 OneG_Mode) 727 struct nes_adapter *nesadapter, u8 OneG_Mode)
710{ 728{
711 int i; 729 int i;
712 u32 u32temp; 730 u32 u32temp;
731 u32 serdes_common_control;
713 732
714 if (hw_rev != NE020_REV) { 733 if (hw_rev != NE020_REV) {
715 /* init serdes 0 */ 734 /* init serdes 0 */
716 735
717 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); 736 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
718 if (!OneG_Mode) 737 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
738 serdes_common_control = nes_read_indexed(nesdev,
739 NES_IDX_ETH_SERDES_COMMON_CONTROL0);
740 serdes_common_control |= 0x000000100;
741 nes_write_indexed(nesdev,
742 NES_IDX_ETH_SERDES_COMMON_CONTROL0,
743 serdes_common_control);
744 } else if (!OneG_Mode) {
719 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); 745 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
720 if (port_count > 1) { 746 }
747 if (((port_count > 1) &&
748 (nesadapter->phy_type[0] != NES_PHY_TYPE_PUMA_1G)) ||
749 ((port_count > 2) &&
750 (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) {
721 /* init serdes 1 */ 751 /* init serdes 1 */
722 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); 752 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
723 if (!OneG_Mode) 753 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
754 serdes_common_control = nes_read_indexed(nesdev,
755 NES_IDX_ETH_SERDES_COMMON_CONTROL1);
756 serdes_common_control |= 0x000000100;
757 nes_write_indexed(nesdev,
758 NES_IDX_ETH_SERDES_COMMON_CONTROL1,
759 serdes_common_control);
760 } else if (!OneG_Mode) {
724 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); 761 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
725 } 762 }
763 }
726 } else { 764 } else {
727 /* init serdes 0 */ 765 /* init serdes 0 */
728 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); 766 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
@@ -826,7 +864,8 @@ static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_cou
826 864
827 nes_write_indexed(nesdev, 0x00005000, 0x00018000); 865 nes_write_indexed(nesdev, 0x00005000, 0x00018000);
828 /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */ 866 /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */
829 nes_write_indexed(nesdev, 0x00005004, 0x00020001); 867 nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1, (wqm_quanta << 1) |
868 0x00000001);
830 nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F); 869 nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F);
831 nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F); 870 nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F);
832 nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F); 871 nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F);
@@ -1226,6 +1265,7 @@ int nes_init_phy(struct nes_device *nesdev)
1226 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { 1265 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
1227 printk(PFX "%s: Programming mdc config for 1G\n", __func__); 1266 printk(PFX "%s: Programming mdc config for 1G\n", __func__);
1228 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1267 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1268 tx_config &= 0xFFFFFFE3;
1229 tx_config |= 0x04; 1269 tx_config |= 0x04;
1230 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1270 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1231 } 1271 }
@@ -1291,7 +1331,8 @@ int nes_init_phy(struct nes_device *nesdev)
1291 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { 1331 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
1292 /* setup 10G MDIO operation */ 1332 /* setup 10G MDIO operation */
1293 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1333 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1294 tx_config |= 0x14; 1334 tx_config &= 0xFFFFFFE3;
1335 tx_config |= 0x15;
1295 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1336 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1296 } 1337 }
1297 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { 1338 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
@@ -1315,7 +1356,7 @@ int nes_init_phy(struct nes_device *nesdev)
1315 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008); 1356 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008);
1316 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098); 1357 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098);
1317 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00); 1358 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00);
1318 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0000); 1359 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0001);
1319 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528); 1360 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528);
1320 1361
1321 /* 1362 /*
@@ -1759,9 +1800,14 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1759 */ 1800 */
1760void nes_destroy_nic_qp(struct nes_vnic *nesvnic) 1801void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
1761{ 1802{
1803 u64 u64temp;
1804 dma_addr_t bus_address;
1762 struct nes_device *nesdev = nesvnic->nesdev; 1805 struct nes_device *nesdev = nesvnic->nesdev;
1763 struct nes_hw_cqp_wqe *cqp_wqe; 1806 struct nes_hw_cqp_wqe *cqp_wqe;
1807 struct nes_hw_nic_sq_wqe *nic_sqe;
1764 struct nes_hw_nic_rq_wqe *nic_rqe; 1808 struct nes_hw_nic_rq_wqe *nic_rqe;
1809 __le16 *wqe_fragment_length;
1810 u16 wqe_fragment_index;
1765 u64 wqe_frag; 1811 u64 wqe_frag;
1766 u32 cqp_head; 1812 u32 cqp_head;
1767 unsigned long flags; 1813 unsigned long flags;
@@ -1770,14 +1816,69 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
1770 /* Free remaining NIC receive buffers */ 1816 /* Free remaining NIC receive buffers */
1771 while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) { 1817 while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
1772 nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail]; 1818 nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
1773 wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]); 1819 wqe_frag = (u64)le32_to_cpu(
1774 wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; 1820 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
1821 wqe_frag |= ((u64)le32_to_cpu(
1822 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX]))<<32;
1775 pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag, 1823 pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
1776 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); 1824 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
1777 dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]); 1825 dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
1778 nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1); 1826 nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
1779 } 1827 }
1780 1828
1829 /* Free remaining NIC transmit buffers */
1830 while (nesvnic->nic.sq_head != nesvnic->nic.sq_tail) {
1831 nic_sqe = &nesvnic->nic.sq_vbase[nesvnic->nic.sq_tail];
1832 wqe_fragment_index = 1;
1833 wqe_fragment_length = (__le16 *)
1834 &nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
1835 /* bump past the vlan tag */
1836 wqe_fragment_length++;
1837 if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
1838 u64temp = (u64)le32_to_cpu(
1839 nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
1840 wqe_fragment_index*2]);
1841 u64temp += ((u64)le32_to_cpu(
1842 nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX
1843 + wqe_fragment_index*2]))<<32;
1844 bus_address = (dma_addr_t)u64temp;
1845 if (test_and_clear_bit(nesvnic->nic.sq_tail,
1846 nesvnic->nic.first_frag_overflow)) {
1847 pci_unmap_single(nesdev->pcidev,
1848 bus_address,
1849 le16_to_cpu(wqe_fragment_length[
1850 wqe_fragment_index++]),
1851 PCI_DMA_TODEVICE);
1852 }
1853 for (; wqe_fragment_index < 5; wqe_fragment_index++) {
1854 if (wqe_fragment_length[wqe_fragment_index]) {
1855 u64temp = le32_to_cpu(
1856 nic_sqe->wqe_words[
1857 NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
1858 wqe_fragment_index*2]);
1859 u64temp += ((u64)le32_to_cpu(
1860 nic_sqe->wqe_words[
1861 NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+
1862 wqe_fragment_index*2]))<<32;
1863 bus_address = (dma_addr_t)u64temp;
1864 pci_unmap_page(nesdev->pcidev,
1865 bus_address,
1866 le16_to_cpu(
1867 wqe_fragment_length[
1868 wqe_fragment_index]),
1869 PCI_DMA_TODEVICE);
1870 } else
1871 break;
1872 }
1873 }
1874 if (nesvnic->nic.tx_skb[nesvnic->nic.sq_tail])
1875 dev_kfree_skb(
1876 nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]);
1877
1878 nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail)
1879 & (nesvnic->nic.sq_size - 1);
1880 }
1881
1781 spin_lock_irqsave(&nesdev->cqp.lock, flags); 1882 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1782 1883
1783 /* Destroy NIC QP */ 1884 /* Destroy NIC QP */
@@ -1894,7 +1995,30 @@ int nes_napi_isr(struct nes_device *nesdev)
1894 } 1995 }
1895} 1996}
1896 1997
1897 1998static void process_critical_error(struct nes_device *nesdev)
1999{
2000 u32 debug_error;
2001 u32 nes_idx_debug_error_masks0 = 0;
2002 u16 error_module = 0;
2003
2004 debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
2005 printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
2006 (u16)debug_error);
2007 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
2008 0x01010000 | (debug_error & 0x0000ffff));
2009 if (crit_err_count++ > 10)
2010 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
2011 error_module = (u16) (debug_error & 0x1F00) >> 8;
2012 if (++nesdev->nesadapter->crit_error_count[error_module-1] >=
2013 nes_max_critical_error_count) {
2014 printk(KERN_ERR PFX "Masking off critical error for module "
2015 "0x%02X\n", (u16)error_module);
2016 nes_idx_debug_error_masks0 = nes_read_indexed(nesdev,
2017 NES_IDX_DEBUG_ERROR_MASKS0);
2018 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0,
2019 nes_idx_debug_error_masks0 | (1 << error_module));
2020 }
2021}
1898/** 2022/**
1899 * nes_dpc 2023 * nes_dpc
1900 */ 2024 */
@@ -1909,7 +2033,6 @@ void nes_dpc(unsigned long param)
1909 u32 timer_stat; 2033 u32 timer_stat;
1910 u32 temp_int_stat; 2034 u32 temp_int_stat;
1911 u32 intf_int_stat; 2035 u32 intf_int_stat;
1912 u32 debug_error;
1913 u32 processed_intf_int = 0; 2036 u32 processed_intf_int = 0;
1914 u16 processed_timer_int = 0; 2037 u16 processed_timer_int = 0;
1915 u16 completion_ints = 0; 2038 u16 completion_ints = 0;
@@ -1987,14 +2110,7 @@ void nes_dpc(unsigned long param)
1987 intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT); 2110 intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
1988 intf_int_stat &= nesdev->intf_int_req; 2111 intf_int_stat &= nesdev->intf_int_req;
1989 if (NES_INTF_INT_CRITERR & intf_int_stat) { 2112 if (NES_INTF_INT_CRITERR & intf_int_stat) {
1990 debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS); 2113 process_critical_error(nesdev);
1991 printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
1992 (u16)debug_error);
1993 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
1994 0x01010000 | (debug_error & 0x0000ffff));
1995 /* BUG(); */
1996 if (crit_err_count++ > 10)
1997 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
1998 } 2114 }
1999 if (NES_INTF_INT_PCIERR & intf_int_stat) { 2115 if (NES_INTF_INT_PCIERR & intf_int_stat) {
2000 printk(KERN_ERR PFX "PCI Error reported by device!!!\n"); 2116 printk(KERN_ERR PFX "PCI Error reported by device!!!\n");
@@ -2258,7 +2374,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2258 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2374 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2259 } 2375 }
2260 /* read the PHY interrupt status register */ 2376 /* read the PHY interrupt status register */
2261 if (nesadapter->OneG_Mode) { 2377 if ((nesadapter->OneG_Mode) &&
2378 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
2262 do { 2379 do {
2263 nes_read_1G_phy_reg(nesdev, 0x1a, 2380 nes_read_1G_phy_reg(nesdev, 0x1a,
2264 nesadapter->phy_index[mac_index], &phy_data); 2381 nesadapter->phy_index[mac_index], &phy_data);
@@ -3077,6 +3194,22 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3077 nes_cm_disconn(nesqp); 3194 nes_cm_disconn(nesqp);
3078 break; 3195 break;
3079 /* TODO: additional AEs need to be here */ 3196 /* TODO: additional AEs need to be here */
3197 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3198 nesqp = *((struct nes_qp **)&context);
3199 spin_lock_irqsave(&nesqp->lock, flags);
3200 nesqp->hw_iwarp_state = iwarp_state;
3201 nesqp->hw_tcp_state = tcp_state;
3202 nesqp->last_aeq = async_event_id;
3203 spin_unlock_irqrestore(&nesqp->lock, flags);
3204 if (nesqp->ibqp.event_handler) {
3205 ibevent.device = nesqp->ibqp.device;
3206 ibevent.element.qp = &nesqp->ibqp;
3207 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3208 nesqp->ibqp.event_handler(&ibevent,
3209 nesqp->ibqp.qp_context);
3210 }
3211 nes_cm_disconn(nesqp);
3212 break;
3080 default: 3213 default:
3081 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", 3214 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
3082 async_event_id); 3215 async_event_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 7b81e0ae0076..610b9d859597 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -156,6 +156,7 @@ enum indexed_regs {
156 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004, 156 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
157 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008, 157 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
158 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c, 158 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
159 NES_IDX_WQM_CONFIG1 = 0x5004,
159 NES_IDX_CM_CONFIG = 0x5100, 160 NES_IDX_CM_CONFIG = 0x5100,
160 NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000, 161 NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
161 NES_IDX_NIC_PHYPORT_TO_USW = 0x6008, 162 NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
@@ -967,6 +968,7 @@ struct nes_arp_entry {
967#define DEFAULT_JUMBO_NES_QL_TARGET 40 968#define DEFAULT_JUMBO_NES_QL_TARGET 40
968#define DEFAULT_JUMBO_NES_QL_HIGH 128 969#define DEFAULT_JUMBO_NES_QL_HIGH 128
969#define NES_NIC_CQ_DOWNWARD_TREND 16 970#define NES_NIC_CQ_DOWNWARD_TREND 16
971#define NES_PFT_SIZE 48
970 972
971struct nes_hw_tune_timer { 973struct nes_hw_tune_timer {
972 /* u16 cq_count; */ 974 /* u16 cq_count; */
@@ -1079,6 +1081,7 @@ struct nes_adapter {
1079 u32 et_rx_max_coalesced_frames_high; 1081 u32 et_rx_max_coalesced_frames_high;
1080 u32 et_rate_sample_interval; 1082 u32 et_rate_sample_interval;
1081 u32 timer_int_limit; 1083 u32 timer_int_limit;
1084 u32 wqm_quanta;
1082 1085
1083 /* Adapter base MAC address */ 1086 /* Adapter base MAC address */
1084 u32 mac_addr_low; 1087 u32 mac_addr_low;
@@ -1094,12 +1097,14 @@ struct nes_adapter {
1094 u16 pd_config_base[4]; 1097 u16 pd_config_base[4];
1095 1098
1096 u16 link_interrupt_count[4]; 1099 u16 link_interrupt_count[4];
1100 u8 crit_error_count[32];
1097 1101
1098 /* the phy index for each port */ 1102 /* the phy index for each port */
1099 u8 phy_index[4]; 1103 u8 phy_index[4];
1100 u8 mac_sw_state[4]; 1104 u8 mac_sw_state[4];
1101 u8 mac_link_down[4]; 1105 u8 mac_link_down[4];
1102 u8 phy_type[4]; 1106 u8 phy_type[4];
1107 u8 log_port;
1103 1108
1104 /* PCI information */ 1109 /* PCI information */
1105 unsigned int devfn; 1110 unsigned int devfn;
@@ -1113,6 +1118,7 @@ struct nes_adapter {
1113 u8 virtwq; 1118 u8 virtwq;
1114 u8 et_use_adaptive_rx_coalesce; 1119 u8 et_use_adaptive_rx_coalesce;
1115 u8 adapter_fcn_count; 1120 u8 adapter_fcn_count;
1121 u8 pft_mcast_map[NES_PFT_SIZE];
1116}; 1122};
1117 1123
1118struct nes_pbl { 1124struct nes_pbl {
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 1b0938c87774..730358637bb6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -91,6 +91,7 @@ static struct nic_qp_map *nic_qp_mapping_per_function[] = {
91static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 91static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
92 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 92 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
93static int debug = -1; 93static int debug = -1;
94static int nics_per_function = 1;
94 95
95/** 96/**
96 * nes_netdev_poll 97 * nes_netdev_poll
@@ -201,7 +202,8 @@ static int nes_netdev_open(struct net_device *netdev)
201 nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW" 202 nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
202 " (Addr:%08X) = %08X, HIGH = %08X.\n", 203 " (Addr:%08X) = %08X, HIGH = %08X.\n",
203 i, nesvnic->qp_nic_index[i], 204 i, nesvnic->qp_nic_index[i],
204 NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8), 205 NES_IDX_PERFECT_FILTER_LOW+
206 (nesvnic->qp_nic_index[i] * 8),
205 macaddr_low, 207 macaddr_low,
206 (u32)macaddr_high | NES_MAC_ADDR_VALID | 208 (u32)macaddr_high | NES_MAC_ADDR_VALID |
207 ((((u32)nesvnic->nic_index) << 16))); 209 ((((u32)nesvnic->nic_index) << 16)));
@@ -272,14 +274,18 @@ static int nes_netdev_stop(struct net_device *netdev)
272 break; 274 break;
273 } 275 }
274 276
275 if (first_nesvnic->netdev_open == 0) 277 if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) &&
278 (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) !=
279 PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
280 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+
281 (0x200*nesdev->mac_index), 0xffffffff);
282 nes_write_indexed(first_nesvnic->nesdev,
283 NES_IDX_MAC_INT_MASK+
284 (0x200*first_nesvnic->nesdev->mac_index),
285 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
286 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
287 } else {
276 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff); 288 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
277 else if ((first_nesvnic != nesvnic) &&
278 (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) != PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
279 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index), 0xffffffff);
280 nes_write_indexed(first_nesvnic->nesdev, NES_IDX_MAC_INT_MASK + (0x200 * first_nesvnic->nesdev->mac_index),
281 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
282 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
283 } 289 }
284 290
285 nic_active_mask = ~((u32)(1 << nesvnic->nic_index)); 291 nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
@@ -437,7 +443,7 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
437 struct nes_hw_nic_sq_wqe *nic_sqe; 443 struct nes_hw_nic_sq_wqe *nic_sqe;
438 struct tcphdr *tcph; 444 struct tcphdr *tcph;
439 /* struct udphdr *udph; */ 445 /* struct udphdr *udph; */
440#define NES_MAX_TSO_FRAGS 18 446#define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS
441 /* 64K segment plus overflow on each side */ 447 /* 64K segment plus overflow on each side */
442 dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS]; 448 dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
443 dma_addr_t bus_address; 449 dma_addr_t bus_address;
@@ -605,6 +611,8 @@ tso_sq_no_longer_full:
605 wqe_fragment_length[wqe_fragment_index] = 0; 611 wqe_fragment_length[wqe_fragment_index] = 0;
606 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, 612 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
607 bus_address); 613 bus_address);
614 tso_wqe_length += skb_headlen(skb) -
615 original_first_length;
608 } 616 }
609 while (wqe_fragment_index < 5) { 617 while (wqe_fragment_index < 5) {
610 wqe_fragment_length[wqe_fragment_index] = 618 wqe_fragment_length[wqe_fragment_index] =
@@ -827,6 +835,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
827{ 835{
828 struct nes_vnic *nesvnic = netdev_priv(netdev); 836 struct nes_vnic *nesvnic = netdev_priv(netdev);
829 struct nes_device *nesdev = nesvnic->nesdev; 837 struct nes_device *nesdev = nesvnic->nesdev;
838 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
830 struct dev_mc_list *multicast_addr; 839 struct dev_mc_list *multicast_addr;
831 u32 nic_active_bit; 840 u32 nic_active_bit;
832 u32 nic_active; 841 u32 nic_active;
@@ -836,7 +845,12 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
836 u8 mc_all_on = 0; 845 u8 mc_all_on = 0;
837 u8 mc_index; 846 u8 mc_index;
838 int mc_nic_index = -1; 847 int mc_nic_index = -1;
848 u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count *
849 nics_per_function, 4);
850 u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated;
851 unsigned long flags;
839 852
853 spin_lock_irqsave(&nesadapter->resource_lock, flags);
840 nic_active_bit = 1 << nesvnic->nic_index; 854 nic_active_bit = 1 << nesvnic->nic_index;
841 855
842 if (netdev->flags & IFF_PROMISC) { 856 if (netdev->flags & IFF_PROMISC) {
@@ -847,7 +861,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
847 nic_active |= nic_active_bit; 861 nic_active |= nic_active_bit;
848 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); 862 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
849 mc_all_on = 1; 863 mc_all_on = 1;
850 } else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) || 864 } else if ((netdev->flags & IFF_ALLMULTI) ||
851 (nesvnic->nic_index > 3)) { 865 (nesvnic->nic_index > 3)) {
852 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); 866 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
853 nic_active |= nic_active_bit; 867 nic_active |= nic_active_bit;
@@ -866,17 +880,34 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
866 } 880 }
867 881
868 nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n", 882 nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
869 netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0, 883 netdev->mc_count, !!(netdev->flags & IFF_PROMISC),
870 (netdev->flags & IFF_ALLMULTI)?1:0); 884 !!(netdev->flags & IFF_ALLMULTI));
871 if (!mc_all_on) { 885 if (!mc_all_on) {
872 multicast_addr = netdev->mc_list; 886 multicast_addr = netdev->mc_list;
873 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80; 887 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
874 perfect_filter_register_address += nesvnic->nic_index*0x40; 888 pft_entries_preallocated * 0x8;
875 for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) { 889 for (mc_index = 0; mc_index < max_pft_entries_avaiable;
876 while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0)) 890 mc_index++) {
891 while (multicast_addr && nesvnic->mcrq_mcast_filter &&
892 ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic,
893 multicast_addr->dmi_addr)) == 0)) {
877 multicast_addr = multicast_addr->next; 894 multicast_addr = multicast_addr->next;
895 }
878 if (mc_nic_index < 0) 896 if (mc_nic_index < 0)
879 mc_nic_index = nesvnic->nic_index; 897 mc_nic_index = nesvnic->nic_index;
898 while (nesadapter->pft_mcast_map[mc_index] < 16 &&
899 nesadapter->pft_mcast_map[mc_index] !=
900 nesvnic->nic_index &&
901 mc_index < max_pft_entries_avaiable) {
902 nes_debug(NES_DBG_NIC_RX,
903 "mc_index=%d skipping nic_index=%d,\
904 used for=%d \n", mc_index,
905 nesvnic->nic_index,
906 nesadapter->pft_mcast_map[mc_index]);
907 mc_index++;
908 }
909 if (mc_index >= max_pft_entries_avaiable)
910 break;
880 if (multicast_addr) { 911 if (multicast_addr) {
881 DECLARE_MAC_BUF(mac); 912 DECLARE_MAC_BUF(mac);
882 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n", 913 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
@@ -897,15 +928,33 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
897 (u32)macaddr_high | NES_MAC_ADDR_VALID | 928 (u32)macaddr_high | NES_MAC_ADDR_VALID |
898 ((((u32)(1<<mc_nic_index)) << 16))); 929 ((((u32)(1<<mc_nic_index)) << 16)));
899 multicast_addr = multicast_addr->next; 930 multicast_addr = multicast_addr->next;
931 nesadapter->pft_mcast_map[mc_index] =
932 nesvnic->nic_index;
900 } else { 933 } else {
901 nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n", 934 nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
902 perfect_filter_register_address+(mc_index * 8)); 935 perfect_filter_register_address+(mc_index * 8));
903 nes_write_indexed(nesdev, 936 nes_write_indexed(nesdev,
904 perfect_filter_register_address+4+(mc_index * 8), 937 perfect_filter_register_address+4+(mc_index * 8),
905 0); 938 0);
939 nesadapter->pft_mcast_map[mc_index] = 255;
906 } 940 }
907 } 941 }
942 /* PFT is not large enough */
943 if (multicast_addr && multicast_addr->next) {
944 nic_active = nes_read_indexed(nesdev,
945 NES_IDX_NIC_MULTICAST_ALL);
946 nic_active |= nic_active_bit;
947 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
948 nic_active);
949 nic_active = nes_read_indexed(nesdev,
950 NES_IDX_NIC_UNICAST_ALL);
951 nic_active &= ~nic_active_bit;
952 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL,
953 nic_active);
954 }
908 } 955 }
956
957 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
909} 958}
910 959
911 960
@@ -918,6 +967,10 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
918 struct nes_device *nesdev = nesvnic->nesdev; 967 struct nes_device *nesdev = nesvnic->nesdev;
919 int ret = 0; 968 int ret = 0;
920 u8 jumbomode = 0; 969 u8 jumbomode = 0;
970 u32 nic_active;
971 u32 nic_active_bit;
972 u32 uc_all_active;
973 u32 mc_all_active;
921 974
922 if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu)) 975 if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
923 return -EINVAL; 976 return -EINVAL;
@@ -931,8 +984,24 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
931 nes_nic_init_timer_defaults(nesdev, jumbomode); 984 nes_nic_init_timer_defaults(nesdev, jumbomode);
932 985
933 if (netif_running(netdev)) { 986 if (netif_running(netdev)) {
987 nic_active_bit = 1 << nesvnic->nic_index;
988 mc_all_active = nes_read_indexed(nesdev,
989 NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit;
990 uc_all_active = nes_read_indexed(nesdev,
991 NES_IDX_NIC_UNICAST_ALL) & nic_active_bit;
992
934 nes_netdev_stop(netdev); 993 nes_netdev_stop(netdev);
935 nes_netdev_open(netdev); 994 nes_netdev_open(netdev);
995
996 nic_active = nes_read_indexed(nesdev,
997 NES_IDX_NIC_MULTICAST_ALL);
998 nic_active |= mc_all_active;
999 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
1000 nic_active);
1001
1002 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
1003 nic_active |= uc_all_active;
1004 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
936 } 1005 }
937 1006
938 return ret; 1007 return ret;
@@ -1208,10 +1277,12 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
1208 struct ethtool_drvinfo *drvinfo) 1277 struct ethtool_drvinfo *drvinfo)
1209{ 1278{
1210 struct nes_vnic *nesvnic = netdev_priv(netdev); 1279 struct nes_vnic *nesvnic = netdev_priv(netdev);
1280 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1211 1281
1212 strcpy(drvinfo->driver, DRV_NAME); 1282 strcpy(drvinfo->driver, DRV_NAME);
1213 strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev)); 1283 strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
1214 strcpy(drvinfo->fw_version, "TBD"); 1284 sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
1285 nesadapter->firmware_version & 0x000000ff);
1215 strcpy(drvinfo->version, DRV_VERSION); 1286 strcpy(drvinfo->version, DRV_VERSION);
1216 drvinfo->n_stats = nes_netdev_get_stats_count(netdev); 1287 drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
1217 drvinfo->testinfo_len = 0; 1288 drvinfo->testinfo_len = 0;
@@ -1587,7 +1658,9 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1587 nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id, 1658 nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
1588 nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index); 1659 nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
1589 1660
1590 if (nesvnic->nesdev->nesadapter->port_count == 1) { 1661 if (nesvnic->nesdev->nesadapter->port_count == 1 &&
1662 nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) {
1663
1591 nesvnic->qp_nic_index[0] = nesvnic->nic_index; 1664 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1592 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1; 1665 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
1593 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) { 1666 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
@@ -1598,11 +1671,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1598 nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3; 1671 nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
1599 } 1672 }
1600 } else { 1673 } else {
1601 if (nesvnic->nesdev->nesadapter->port_count == 2) { 1674 if (nesvnic->nesdev->nesadapter->port_count == 2 ||
1602 nesvnic->qp_nic_index[0] = nesvnic->nic_index; 1675 (nesvnic->nesdev->nesadapter->port_count == 1 &&
1603 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2; 1676 nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) {
1604 nesvnic->qp_nic_index[2] = 0xf; 1677 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1605 nesvnic->qp_nic_index[3] = 0xf; 1678 nesvnic->qp_nic_index[1] = nesvnic->nic_index
1679 + 2;
1680 nesvnic->qp_nic_index[2] = 0xf;
1681 nesvnic->qp_nic_index[3] = 0xf;
1606 } else { 1682 } else {
1607 nesvnic->qp_nic_index[0] = nesvnic->nic_index; 1683 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1608 nesvnic->qp_nic_index[1] = 0xf; 1684 nesvnic->qp_nic_index[1] = 0xf;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index d79942e84979..932e56fcf774 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1467,7 +1467,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1467 default: 1467 default:
1468 nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type); 1468 nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
1469 return ERR_PTR(-EINVAL); 1469 return ERR_PTR(-EINVAL);
1470 break;
1471 } 1470 }
1472 1471
1473 /* update the QP table */ 1472 /* update the QP table */
@@ -2498,7 +2497,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2498 nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr); 2497 nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr);
2499 2498
2500 return ibmr; 2499 return ibmr;
2501 break;
2502 case IWNES_MEMREG_TYPE_QP: 2500 case IWNES_MEMREG_TYPE_QP:
2503 case IWNES_MEMREG_TYPE_CQ: 2501 case IWNES_MEMREG_TYPE_CQ:
2504 nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL); 2502 nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
@@ -2572,7 +2570,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2572 nesmr->ibmr.lkey = -1; 2570 nesmr->ibmr.lkey = -1;
2573 nesmr->mode = req.reg_type; 2571 nesmr->mode = req.reg_type;
2574 return &nesmr->ibmr; 2572 return &nesmr->ibmr;
2575 break;
2576 } 2573 }
2577 2574
2578 return ERR_PTR(-ENOSYS); 2575 return ERR_PTR(-ENOSYS);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 05eb41b8ab63..68ba5c3482e4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -268,10 +268,9 @@ struct ipoib_lro {
268}; 268};
269 269
270/* 270/*
271 * Device private locking: tx_lock protects members used in TX fast 271 * Device private locking: network stack tx_lock protects members used
272 * path (and we use LLTX so upper layers don't do extra locking). 272 * in TX fast path, lock protects everything else. lock nests inside
273 * lock protects everything else. lock nests inside of tx_lock (ie 273 * of tx_lock (ie tx_lock must be acquired first if needed).
274 * tx_lock must be acquired first if needed).
275 */ 274 */
276struct ipoib_dev_priv { 275struct ipoib_dev_priv {
277 spinlock_t lock; 276 spinlock_t lock;
@@ -320,7 +319,6 @@ struct ipoib_dev_priv {
320 319
321 struct ipoib_rx_buf *rx_ring; 320 struct ipoib_rx_buf *rx_ring;
322 321
323 spinlock_t tx_lock;
324 struct ipoib_tx_buf *tx_ring; 322 struct ipoib_tx_buf *tx_ring;
325 unsigned tx_head; 323 unsigned tx_head;
326 unsigned tx_tail; 324 unsigned tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 341ffedafed6..7b14c2c39500 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -786,7 +786,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
786 786
787 dev_kfree_skb_any(tx_req->skb); 787 dev_kfree_skb_any(tx_req->skb);
788 788
789 spin_lock_irqsave(&priv->tx_lock, flags); 789 netif_tx_lock(dev);
790
790 ++tx->tx_tail; 791 ++tx->tx_tail;
791 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 792 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
792 netif_queue_stopped(dev) && 793 netif_queue_stopped(dev) &&
@@ -801,7 +802,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
801 "(status=%d, wrid=%d vend_err %x)\n", 802 "(status=%d, wrid=%d vend_err %x)\n",
802 wc->status, wr_id, wc->vendor_err); 803 wc->status, wr_id, wc->vendor_err);
803 804
804 spin_lock(&priv->lock); 805 spin_lock_irqsave(&priv->lock, flags);
805 neigh = tx->neigh; 806 neigh = tx->neigh;
806 807
807 if (neigh) { 808 if (neigh) {
@@ -821,10 +822,10 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
821 822
822 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 823 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
823 824
824 spin_unlock(&priv->lock); 825 spin_unlock_irqrestore(&priv->lock, flags);
825 } 826 }
826 827
827 spin_unlock_irqrestore(&priv->tx_lock, flags); 828 netif_tx_unlock(dev);
828} 829}
829 830
830int ipoib_cm_dev_open(struct net_device *dev) 831int ipoib_cm_dev_open(struct net_device *dev)
@@ -1149,7 +1150,6 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1149{ 1150{
1150 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1151 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1151 struct ipoib_cm_tx_buf *tx_req; 1152 struct ipoib_cm_tx_buf *tx_req;
1152 unsigned long flags;
1153 unsigned long begin; 1153 unsigned long begin;
1154 1154
1155 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1155 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1180,12 +1180,12 @@ timeout:
1180 DMA_TO_DEVICE); 1180 DMA_TO_DEVICE);
1181 dev_kfree_skb_any(tx_req->skb); 1181 dev_kfree_skb_any(tx_req->skb);
1182 ++p->tx_tail; 1182 ++p->tx_tail;
1183 spin_lock_irqsave(&priv->tx_lock, flags); 1183 netif_tx_lock_bh(p->dev);
1184 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 1184 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1185 netif_queue_stopped(p->dev) && 1185 netif_queue_stopped(p->dev) &&
1186 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 1186 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1187 netif_wake_queue(p->dev); 1187 netif_wake_queue(p->dev);
1188 spin_unlock_irqrestore(&priv->tx_lock, flags); 1188 netif_tx_unlock_bh(p->dev);
1189 } 1189 }
1190 1190
1191 if (p->qp) 1191 if (p->qp)
@@ -1202,6 +1202,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1202 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 1202 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1203 struct net_device *dev = priv->dev; 1203 struct net_device *dev = priv->dev;
1204 struct ipoib_neigh *neigh; 1204 struct ipoib_neigh *neigh;
1205 unsigned long flags;
1205 int ret; 1206 int ret;
1206 1207
1207 switch (event->event) { 1208 switch (event->event) {
@@ -1220,8 +1221,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1220 case IB_CM_REJ_RECEIVED: 1221 case IB_CM_REJ_RECEIVED:
1221 case IB_CM_TIMEWAIT_EXIT: 1222 case IB_CM_TIMEWAIT_EXIT:
1222 ipoib_dbg(priv, "CM error %d.\n", event->event); 1223 ipoib_dbg(priv, "CM error %d.\n", event->event);
1223 spin_lock_irq(&priv->tx_lock); 1224 netif_tx_lock_bh(dev);
1224 spin_lock(&priv->lock); 1225 spin_lock_irqsave(&priv->lock, flags);
1225 neigh = tx->neigh; 1226 neigh = tx->neigh;
1226 1227
1227 if (neigh) { 1228 if (neigh) {
@@ -1239,8 +1240,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1239 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1240 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1240 } 1241 }
1241 1242
1242 spin_unlock(&priv->lock); 1243 spin_unlock_irqrestore(&priv->lock, flags);
1243 spin_unlock_irq(&priv->tx_lock); 1244 netif_tx_unlock_bh(dev);
1244 break; 1245 break;
1245 default: 1246 default:
1246 break; 1247 break;
@@ -1294,19 +1295,24 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1294 struct ib_sa_path_rec pathrec; 1295 struct ib_sa_path_rec pathrec;
1295 u32 qpn; 1296 u32 qpn;
1296 1297
1297 spin_lock_irqsave(&priv->tx_lock, flags); 1298 netif_tx_lock_bh(dev);
1298 spin_lock(&priv->lock); 1299 spin_lock_irqsave(&priv->lock, flags);
1300
1299 while (!list_empty(&priv->cm.start_list)) { 1301 while (!list_empty(&priv->cm.start_list)) {
1300 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1302 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1301 list_del_init(&p->list); 1303 list_del_init(&p->list);
1302 neigh = p->neigh; 1304 neigh = p->neigh;
1303 qpn = IPOIB_QPN(neigh->neighbour->ha); 1305 qpn = IPOIB_QPN(neigh->neighbour->ha);
1304 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1306 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1305 spin_unlock(&priv->lock); 1307
1306 spin_unlock_irqrestore(&priv->tx_lock, flags); 1308 spin_unlock_irqrestore(&priv->lock, flags);
1309 netif_tx_unlock_bh(dev);
1310
1307 ret = ipoib_cm_tx_init(p, qpn, &pathrec); 1311 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1308 spin_lock_irqsave(&priv->tx_lock, flags); 1312
1309 spin_lock(&priv->lock); 1313 netif_tx_lock_bh(dev);
1314 spin_lock_irqsave(&priv->lock, flags);
1315
1310 if (ret) { 1316 if (ret) {
1311 neigh = p->neigh; 1317 neigh = p->neigh;
1312 if (neigh) { 1318 if (neigh) {
@@ -1320,44 +1326,52 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1320 kfree(p); 1326 kfree(p);
1321 } 1327 }
1322 } 1328 }
1323 spin_unlock(&priv->lock); 1329
1324 spin_unlock_irqrestore(&priv->tx_lock, flags); 1330 spin_unlock_irqrestore(&priv->lock, flags);
1331 netif_tx_unlock_bh(dev);
1325} 1332}
1326 1333
1327static void ipoib_cm_tx_reap(struct work_struct *work) 1334static void ipoib_cm_tx_reap(struct work_struct *work)
1328{ 1335{
1329 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1336 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1330 cm.reap_task); 1337 cm.reap_task);
1338 struct net_device *dev = priv->dev;
1331 struct ipoib_cm_tx *p; 1339 struct ipoib_cm_tx *p;
1340 unsigned long flags;
1341
1342 netif_tx_lock_bh(dev);
1343 spin_lock_irqsave(&priv->lock, flags);
1332 1344
1333 spin_lock_irq(&priv->tx_lock);
1334 spin_lock(&priv->lock);
1335 while (!list_empty(&priv->cm.reap_list)) { 1345 while (!list_empty(&priv->cm.reap_list)) {
1336 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1346 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1337 list_del(&p->list); 1347 list_del(&p->list);
1338 spin_unlock(&priv->lock); 1348 spin_unlock_irqrestore(&priv->lock, flags);
1339 spin_unlock_irq(&priv->tx_lock); 1349 netif_tx_unlock_bh(dev);
1340 ipoib_cm_tx_destroy(p); 1350 ipoib_cm_tx_destroy(p);
1341 spin_lock_irq(&priv->tx_lock); 1351 netif_tx_lock_bh(dev);
1342 spin_lock(&priv->lock); 1352 spin_lock_irqsave(&priv->lock, flags);
1343 } 1353 }
1344 spin_unlock(&priv->lock); 1354
1345 spin_unlock_irq(&priv->tx_lock); 1355 spin_unlock_irqrestore(&priv->lock, flags);
1356 netif_tx_unlock_bh(dev);
1346} 1357}
1347 1358
1348static void ipoib_cm_skb_reap(struct work_struct *work) 1359static void ipoib_cm_skb_reap(struct work_struct *work)
1349{ 1360{
1350 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1361 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1351 cm.skb_task); 1362 cm.skb_task);
1363 struct net_device *dev = priv->dev;
1352 struct sk_buff *skb; 1364 struct sk_buff *skb;
1353 1365 unsigned long flags;
1354 unsigned mtu = priv->mcast_mtu; 1366 unsigned mtu = priv->mcast_mtu;
1355 1367
1356 spin_lock_irq(&priv->tx_lock); 1368 netif_tx_lock_bh(dev);
1357 spin_lock(&priv->lock); 1369 spin_lock_irqsave(&priv->lock, flags);
1370
1358 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { 1371 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1359 spin_unlock(&priv->lock); 1372 spin_unlock_irqrestore(&priv->lock, flags);
1360 spin_unlock_irq(&priv->tx_lock); 1373 netif_tx_unlock_bh(dev);
1374
1361 if (skb->protocol == htons(ETH_P_IP)) 1375 if (skb->protocol == htons(ETH_P_IP))
1362 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 1376 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1363#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1377#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1365,11 +1379,13 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
1365 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); 1379 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
1366#endif 1380#endif
1367 dev_kfree_skb_any(skb); 1381 dev_kfree_skb_any(skb);
1368 spin_lock_irq(&priv->tx_lock); 1382
1369 spin_lock(&priv->lock); 1383 netif_tx_lock_bh(dev);
1384 spin_lock_irqsave(&priv->lock, flags);
1370 } 1385 }
1371 spin_unlock(&priv->lock); 1386
1372 spin_unlock_irq(&priv->tx_lock); 1387 spin_unlock_irqrestore(&priv->lock, flags);
1388 netif_tx_unlock_bh(dev);
1373} 1389}
1374 1390
1375void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, 1391void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 66cafa20c246..0e748aeeae99 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -468,21 +468,22 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
468static void drain_tx_cq(struct net_device *dev) 468static void drain_tx_cq(struct net_device *dev)
469{ 469{
470 struct ipoib_dev_priv *priv = netdev_priv(dev); 470 struct ipoib_dev_priv *priv = netdev_priv(dev);
471 unsigned long flags;
472 471
473 spin_lock_irqsave(&priv->tx_lock, flags); 472 netif_tx_lock(dev);
474 while (poll_tx(priv)) 473 while (poll_tx(priv))
475 ; /* nothing */ 474 ; /* nothing */
476 475
477 if (netif_queue_stopped(dev)) 476 if (netif_queue_stopped(dev))
478 mod_timer(&priv->poll_timer, jiffies + 1); 477 mod_timer(&priv->poll_timer, jiffies + 1);
479 478
480 spin_unlock_irqrestore(&priv->tx_lock, flags); 479 netif_tx_unlock(dev);
481} 480}
482 481
483void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) 482void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
484{ 483{
485 drain_tx_cq((struct net_device *)dev_ptr); 484 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
485
486 mod_timer(&priv->poll_timer, jiffies);
486} 487}
487 488
488static inline int post_send(struct ipoib_dev_priv *priv, 489static inline int post_send(struct ipoib_dev_priv *priv,
@@ -614,17 +615,20 @@ static void __ipoib_reap_ah(struct net_device *dev)
614 struct ipoib_dev_priv *priv = netdev_priv(dev); 615 struct ipoib_dev_priv *priv = netdev_priv(dev);
615 struct ipoib_ah *ah, *tah; 616 struct ipoib_ah *ah, *tah;
616 LIST_HEAD(remove_list); 617 LIST_HEAD(remove_list);
618 unsigned long flags;
619
620 netif_tx_lock_bh(dev);
621 spin_lock_irqsave(&priv->lock, flags);
617 622
618 spin_lock_irq(&priv->tx_lock);
619 spin_lock(&priv->lock);
620 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 623 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
621 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 624 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
622 list_del(&ah->list); 625 list_del(&ah->list);
623 ib_destroy_ah(ah->ah); 626 ib_destroy_ah(ah->ah);
624 kfree(ah); 627 kfree(ah);
625 } 628 }
626 spin_unlock(&priv->lock); 629
627 spin_unlock_irq(&priv->tx_lock); 630 spin_unlock_irqrestore(&priv->lock, flags);
631 netif_tx_unlock_bh(dev);
628} 632}
629 633
630void ipoib_reap_ah(struct work_struct *work) 634void ipoib_reap_ah(struct work_struct *work)
@@ -761,6 +765,14 @@ void ipoib_drain_cq(struct net_device *dev)
761{ 765{
762 struct ipoib_dev_priv *priv = netdev_priv(dev); 766 struct ipoib_dev_priv *priv = netdev_priv(dev);
763 int i, n; 767 int i, n;
768
769 /*
770 * We call completion handling routines that expect to be
771 * called from the BH-disabled NAPI poll context, so disable
772 * BHs here too.
773 */
774 local_bh_disable();
775
764 do { 776 do {
765 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); 777 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
766 for (i = 0; i < n; ++i) { 778 for (i = 0; i < n; ++i) {
@@ -784,6 +796,8 @@ void ipoib_drain_cq(struct net_device *dev)
784 796
785 while (poll_tx(priv)) 797 while (poll_tx(priv))
786 ; /* nothing */ 798 ; /* nothing */
799
800 local_bh_enable();
787} 801}
788 802
789int ipoib_ib_dev_stop(struct net_device *dev, int flush) 803int ipoib_ib_dev_stop(struct net_device *dev, int flush)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e9ca3cb57d52..c0ee514396df 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -373,9 +373,10 @@ void ipoib_flush_paths(struct net_device *dev)
373 struct ipoib_dev_priv *priv = netdev_priv(dev); 373 struct ipoib_dev_priv *priv = netdev_priv(dev);
374 struct ipoib_path *path, *tp; 374 struct ipoib_path *path, *tp;
375 LIST_HEAD(remove_list); 375 LIST_HEAD(remove_list);
376 unsigned long flags;
376 377
377 spin_lock_irq(&priv->tx_lock); 378 netif_tx_lock_bh(dev);
378 spin_lock(&priv->lock); 379 spin_lock_irqsave(&priv->lock, flags);
379 380
380 list_splice_init(&priv->path_list, &remove_list); 381 list_splice_init(&priv->path_list, &remove_list);
381 382
@@ -385,15 +386,16 @@ void ipoib_flush_paths(struct net_device *dev)
385 list_for_each_entry_safe(path, tp, &remove_list, list) { 386 list_for_each_entry_safe(path, tp, &remove_list, list) {
386 if (path->query) 387 if (path->query)
387 ib_sa_cancel_query(path->query_id, path->query); 388 ib_sa_cancel_query(path->query_id, path->query);
388 spin_unlock(&priv->lock); 389 spin_unlock_irqrestore(&priv->lock, flags);
389 spin_unlock_irq(&priv->tx_lock); 390 netif_tx_unlock_bh(dev);
390 wait_for_completion(&path->done); 391 wait_for_completion(&path->done);
391 path_free(dev, path); 392 path_free(dev, path);
392 spin_lock_irq(&priv->tx_lock); 393 netif_tx_lock_bh(dev);
393 spin_lock(&priv->lock); 394 spin_lock_irqsave(&priv->lock, flags);
394 } 395 }
395 spin_unlock(&priv->lock); 396
396 spin_unlock_irq(&priv->tx_lock); 397 spin_unlock_irqrestore(&priv->lock, flags);
398 netif_tx_unlock_bh(dev);
397} 399}
398 400
399static void path_rec_completion(int status, 401static void path_rec_completion(int status,
@@ -555,6 +557,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
555 struct ipoib_dev_priv *priv = netdev_priv(dev); 557 struct ipoib_dev_priv *priv = netdev_priv(dev);
556 struct ipoib_path *path; 558 struct ipoib_path *path;
557 struct ipoib_neigh *neigh; 559 struct ipoib_neigh *neigh;
560 unsigned long flags;
558 561
559 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev); 562 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
560 if (!neigh) { 563 if (!neigh) {
@@ -563,11 +566,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
563 return; 566 return;
564 } 567 }
565 568
566 /* 569 spin_lock_irqsave(&priv->lock, flags);
567 * We can only be called from ipoib_start_xmit, so we're
568 * inside tx_lock -- no need to save/restore flags.
569 */
570 spin_lock(&priv->lock);
571 570
572 path = __path_find(dev, skb->dst->neighbour->ha + 4); 571 path = __path_find(dev, skb->dst->neighbour->ha + 4);
573 if (!path) { 572 if (!path) {
@@ -614,7 +613,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
614 __skb_queue_tail(&neigh->queue, skb); 613 __skb_queue_tail(&neigh->queue, skb);
615 } 614 }
616 615
617 spin_unlock(&priv->lock); 616 spin_unlock_irqrestore(&priv->lock, flags);
618 return; 617 return;
619 618
620err_list: 619err_list:
@@ -626,7 +625,7 @@ err_drop:
626 ++dev->stats.tx_dropped; 625 ++dev->stats.tx_dropped;
627 dev_kfree_skb_any(skb); 626 dev_kfree_skb_any(skb);
628 627
629 spin_unlock(&priv->lock); 628 spin_unlock_irqrestore(&priv->lock, flags);
630} 629}
631 630
632static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 631static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
@@ -650,12 +649,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
650{ 649{
651 struct ipoib_dev_priv *priv = netdev_priv(dev); 650 struct ipoib_dev_priv *priv = netdev_priv(dev);
652 struct ipoib_path *path; 651 struct ipoib_path *path;
652 unsigned long flags;
653 653
654 /* 654 spin_lock_irqsave(&priv->lock, flags);
655 * We can only be called from ipoib_start_xmit, so we're
656 * inside tx_lock -- no need to save/restore flags.
657 */
658 spin_lock(&priv->lock);
659 655
660 path = __path_find(dev, phdr->hwaddr + 4); 656 path = __path_find(dev, phdr->hwaddr + 4);
661 if (!path || !path->valid) { 657 if (!path || !path->valid) {
@@ -667,7 +663,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
667 __skb_queue_tail(&path->queue, skb); 663 __skb_queue_tail(&path->queue, skb);
668 664
669 if (path_rec_start(dev, path)) { 665 if (path_rec_start(dev, path)) {
670 spin_unlock(&priv->lock); 666 spin_unlock_irqrestore(&priv->lock, flags);
671 path_free(dev, path); 667 path_free(dev, path);
672 return; 668 return;
673 } else 669 } else
@@ -677,7 +673,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
677 dev_kfree_skb_any(skb); 673 dev_kfree_skb_any(skb);
678 } 674 }
679 675
680 spin_unlock(&priv->lock); 676 spin_unlock_irqrestore(&priv->lock, flags);
681 return; 677 return;
682 } 678 }
683 679
@@ -696,7 +692,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
696 dev_kfree_skb_any(skb); 692 dev_kfree_skb_any(skb);
697 } 693 }
698 694
699 spin_unlock(&priv->lock); 695 spin_unlock_irqrestore(&priv->lock, flags);
700} 696}
701 697
702static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 698static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -705,13 +701,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
705 struct ipoib_neigh *neigh; 701 struct ipoib_neigh *neigh;
706 unsigned long flags; 702 unsigned long flags;
707 703
708 if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
709 return NETDEV_TX_LOCKED;
710
711 if (likely(skb->dst && skb->dst->neighbour)) { 704 if (likely(skb->dst && skb->dst->neighbour)) {
712 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 705 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
713 ipoib_path_lookup(skb, dev); 706 ipoib_path_lookup(skb, dev);
714 goto out; 707 return NETDEV_TX_OK;
715 } 708 }
716 709
717 neigh = *to_ipoib_neigh(skb->dst->neighbour); 710 neigh = *to_ipoib_neigh(skb->dst->neighbour);
@@ -721,7 +714,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
721 skb->dst->neighbour->ha + 4, 714 skb->dst->neighbour->ha + 4,
722 sizeof(union ib_gid))) || 715 sizeof(union ib_gid))) ||
723 (neigh->dev != dev))) { 716 (neigh->dev != dev))) {
724 spin_lock(&priv->lock); 717 spin_lock_irqsave(&priv->lock, flags);
725 /* 718 /*
726 * It's safe to call ipoib_put_ah() inside 719 * It's safe to call ipoib_put_ah() inside
727 * priv->lock here, because we know that 720 * priv->lock here, because we know that
@@ -732,25 +725,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
732 ipoib_put_ah(neigh->ah); 725 ipoib_put_ah(neigh->ah);
733 list_del(&neigh->list); 726 list_del(&neigh->list);
734 ipoib_neigh_free(dev, neigh); 727 ipoib_neigh_free(dev, neigh);
735 spin_unlock(&priv->lock); 728 spin_unlock_irqrestore(&priv->lock, flags);
736 ipoib_path_lookup(skb, dev); 729 ipoib_path_lookup(skb, dev);
737 goto out; 730 return NETDEV_TX_OK;
738 } 731 }
739 732
740 if (ipoib_cm_get(neigh)) { 733 if (ipoib_cm_get(neigh)) {
741 if (ipoib_cm_up(neigh)) { 734 if (ipoib_cm_up(neigh)) {
742 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 735 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
743 goto out; 736 return NETDEV_TX_OK;
744 } 737 }
745 } else if (neigh->ah) { 738 } else if (neigh->ah) {
746 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); 739 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
747 goto out; 740 return NETDEV_TX_OK;
748 } 741 }
749 742
750 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 743 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
751 spin_lock(&priv->lock); 744 spin_lock_irqsave(&priv->lock, flags);
752 __skb_queue_tail(&neigh->queue, skb); 745 __skb_queue_tail(&neigh->queue, skb);
753 spin_unlock(&priv->lock); 746 spin_unlock_irqrestore(&priv->lock, flags);
754 } else { 747 } else {
755 ++dev->stats.tx_dropped; 748 ++dev->stats.tx_dropped;
756 dev_kfree_skb_any(skb); 749 dev_kfree_skb_any(skb);
@@ -779,16 +772,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
779 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); 772 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
780 dev_kfree_skb_any(skb); 773 dev_kfree_skb_any(skb);
781 ++dev->stats.tx_dropped; 774 ++dev->stats.tx_dropped;
782 goto out; 775 return NETDEV_TX_OK;
783 } 776 }
784 777
785 unicast_arp_send(skb, dev, phdr); 778 unicast_arp_send(skb, dev, phdr);
786 } 779 }
787 } 780 }
788 781
789out:
790 spin_unlock_irqrestore(&priv->tx_lock, flags);
791
792 return NETDEV_TX_OK; 782 return NETDEV_TX_OK;
793} 783}
794 784
@@ -1052,7 +1042,6 @@ static void ipoib_setup(struct net_device *dev)
1052 dev->type = ARPHRD_INFINIBAND; 1042 dev->type = ARPHRD_INFINIBAND;
1053 dev->tx_queue_len = ipoib_sendq_size * 2; 1043 dev->tx_queue_len = ipoib_sendq_size * 2;
1054 dev->features = (NETIF_F_VLAN_CHALLENGED | 1044 dev->features = (NETIF_F_VLAN_CHALLENGED |
1055 NETIF_F_LLTX |
1056 NETIF_F_HIGHDMA); 1045 NETIF_F_HIGHDMA);
1057 1046
1058 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1047 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
@@ -1064,7 +1053,6 @@ static void ipoib_setup(struct net_device *dev)
1064 ipoib_lro_setup(priv); 1053 ipoib_lro_setup(priv);
1065 1054
1066 spin_lock_init(&priv->lock); 1055 spin_lock_init(&priv->lock);
1067 spin_lock_init(&priv->tx_lock);
1068 1056
1069 mutex_init(&priv->vlan_mutex); 1057 mutex_init(&priv->vlan_mutex);
1070 1058
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aae28620a6e5..d9d1223c3fd5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
69 struct net_device *dev = mcast->dev; 69 struct net_device *dev = mcast->dev;
70 struct ipoib_dev_priv *priv = netdev_priv(dev); 70 struct ipoib_dev_priv *priv = netdev_priv(dev);
71 struct ipoib_neigh *neigh, *tmp; 71 struct ipoib_neigh *neigh, *tmp;
72 unsigned long flags;
73 int tx_dropped = 0; 72 int tx_dropped = 0;
74 73
75 ipoib_dbg_mcast(netdev_priv(dev), 74 ipoib_dbg_mcast(netdev_priv(dev),
76 "deleting multicast group " IPOIB_GID_FMT "\n", 75 "deleting multicast group " IPOIB_GID_FMT "\n",
77 IPOIB_GID_ARG(mcast->mcmember.mgid)); 76 IPOIB_GID_ARG(mcast->mcmember.mgid));
78 77
79 spin_lock_irqsave(&priv->lock, flags); 78 spin_lock_irq(&priv->lock);
80 79
81 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { 80 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
82 /* 81 /*
@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
90 ipoib_neigh_free(dev, neigh); 89 ipoib_neigh_free(dev, neigh);
91 } 90 }
92 91
93 spin_unlock_irqrestore(&priv->lock, flags); 92 spin_unlock_irq(&priv->lock);
94 93
95 if (mcast->ah) 94 if (mcast->ah)
96 ipoib_put_ah(mcast->ah); 95 ipoib_put_ah(mcast->ah);
@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
100 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 99 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
101 } 100 }
102 101
103 spin_lock_irqsave(&priv->tx_lock, flags); 102 netif_tx_lock_bh(dev);
104 dev->stats.tx_dropped += tx_dropped; 103 dev->stats.tx_dropped += tx_dropped;
105 spin_unlock_irqrestore(&priv->tx_lock, flags); 104 netif_tx_unlock_bh(dev);
106 105
107 kfree(mcast); 106 kfree(mcast);
108} 107}
@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
259 } 258 }
260 259
261 /* actually send any queued packets */ 260 /* actually send any queued packets */
262 spin_lock_irq(&priv->tx_lock); 261 netif_tx_lock_bh(dev);
263 while (!skb_queue_empty(&mcast->pkt_queue)) { 262 while (!skb_queue_empty(&mcast->pkt_queue)) {
264 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 263 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
265 spin_unlock_irq(&priv->tx_lock); 264 netif_tx_unlock_bh(dev);
266 265
267 skb->dev = dev; 266 skb->dev = dev;
268 267
@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
273 272
274 if (dev_queue_xmit(skb)) 273 if (dev_queue_xmit(skb))
275 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 274 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
276 spin_lock_irq(&priv->tx_lock); 275 netif_tx_lock_bh(dev);
277 } 276 }
278 spin_unlock_irq(&priv->tx_lock); 277 netif_tx_unlock_bh(dev);
279 278
280 return 0; 279 return 0;
281} 280}
@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status,
286{ 285{
287 struct ipoib_mcast *mcast = multicast->context; 286 struct ipoib_mcast *mcast = multicast->context;
288 struct net_device *dev = mcast->dev; 287 struct net_device *dev = mcast->dev;
289 struct ipoib_dev_priv *priv = netdev_priv(dev);
290 288
291 /* We trap for port events ourselves. */ 289 /* We trap for port events ourselves. */
292 if (status == -ENETRESET) 290 if (status == -ENETRESET)
@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status,
302 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 300 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
303 301
304 /* Flush out any queued packets */ 302 /* Flush out any queued packets */
305 spin_lock_irq(&priv->tx_lock); 303 netif_tx_lock_bh(dev);
306 while (!skb_queue_empty(&mcast->pkt_queue)) { 304 while (!skb_queue_empty(&mcast->pkt_queue)) {
307 ++dev->stats.tx_dropped; 305 ++dev->stats.tx_dropped;
308 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 306 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
309 } 307 }
310 spin_unlock_irq(&priv->tx_lock); 308 netif_tx_unlock_bh(dev);
311 309
312 /* Clear the busy flag so we try again */ 310 /* Clear the busy flag so we try again */
313 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, 311 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
@@ -662,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
662{ 660{
663 struct ipoib_dev_priv *priv = netdev_priv(dev); 661 struct ipoib_dev_priv *priv = netdev_priv(dev);
664 struct ipoib_mcast *mcast; 662 struct ipoib_mcast *mcast;
663 unsigned long flags;
665 664
666 /* 665 spin_lock_irqsave(&priv->lock, flags);
667 * We can only be called from ipoib_start_xmit, so we're
668 * inside tx_lock -- no need to save/restore flags.
669 */
670 spin_lock(&priv->lock);
671 666
672 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 667 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
673 !priv->broadcast || 668 !priv->broadcast ||
@@ -738,7 +733,7 @@ out:
738 } 733 }
739 734
740unlock: 735unlock:
741 spin_unlock(&priv->lock); 736 spin_unlock_irqrestore(&priv->lock, flags);
742} 737}
743 738
744void ipoib_mcast_dev_flush(struct net_device *dev) 739void ipoib_mcast_dev_flush(struct net_device *dev)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 13956437bc81..682ef9e6acd3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -333,7 +333,6 @@ static void crypt_convert_init(struct crypt_config *cc,
333 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 333 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
334 ctx->sector = sector + cc->iv_offset; 334 ctx->sector = sector + cc->iv_offset;
335 init_completion(&ctx->restart); 335 init_completion(&ctx->restart);
336 atomic_set(&ctx->pending, 1);
337} 336}
338 337
339static int crypt_convert_block(struct crypt_config *cc, 338static int crypt_convert_block(struct crypt_config *cc,
@@ -408,6 +407,8 @@ static int crypt_convert(struct crypt_config *cc,
408{ 407{
409 int r; 408 int r;
410 409
410 atomic_set(&ctx->pending, 1);
411
411 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 412 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
412 ctx->idx_out < ctx->bio_out->bi_vcnt) { 413 ctx->idx_out < ctx->bio_out->bi_vcnt) {
413 414
@@ -456,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
456/* 457/*
457 * Generate a new unfragmented bio with the given size 458 * Generate a new unfragmented bio with the given size
458 * This should never violate the device limitations 459 * This should never violate the device limitations
459 * May return a smaller bio when running out of pages 460 * May return a smaller bio when running out of pages, indicated by
461 * *out_of_pages set to 1.
460 */ 462 */
461static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) 463static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
464 unsigned *out_of_pages)
462{ 465{
463 struct crypt_config *cc = io->target->private; 466 struct crypt_config *cc = io->target->private;
464 struct bio *clone; 467 struct bio *clone;
@@ -472,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
472 return NULL; 475 return NULL;
473 476
474 clone_init(io, clone); 477 clone_init(io, clone);
478 *out_of_pages = 0;
475 479
476 for (i = 0; i < nr_iovecs; i++) { 480 for (i = 0; i < nr_iovecs; i++) {
477 page = mempool_alloc(cc->page_pool, gfp_mask); 481 page = mempool_alloc(cc->page_pool, gfp_mask);
478 if (!page) 482 if (!page) {
483 *out_of_pages = 1;
479 break; 484 break;
485 }
480 486
481 /* 487 /*
482 * if additional pages cannot be allocated without waiting, 488 * if additional pages cannot be allocated without waiting,
@@ -517,6 +523,27 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
517 } 523 }
518} 524}
519 525
526static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
527 struct bio *bio, sector_t sector)
528{
529 struct crypt_config *cc = ti->private;
530 struct dm_crypt_io *io;
531
532 io = mempool_alloc(cc->io_pool, GFP_NOIO);
533 io->target = ti;
534 io->base_bio = bio;
535 io->sector = sector;
536 io->error = 0;
537 atomic_set(&io->pending, 0);
538
539 return io;
540}
541
542static void crypt_inc_pending(struct dm_crypt_io *io)
543{
544 atomic_inc(&io->pending);
545}
546
520/* 547/*
521 * One of the bios was finished. Check for completion of 548 * One of the bios was finished. Check for completion of
522 * the whole request and correctly clean up the buffer. 549 * the whole request and correctly clean up the buffer.
@@ -591,7 +618,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
591 struct bio *base_bio = io->base_bio; 618 struct bio *base_bio = io->base_bio;
592 struct bio *clone; 619 struct bio *clone;
593 620
594 atomic_inc(&io->pending); 621 crypt_inc_pending(io);
595 622
596 /* 623 /*
597 * The block layer might modify the bvec array, so always 624 * The block layer might modify the bvec array, so always
@@ -653,6 +680,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
653 crypt_free_buffer_pages(cc, clone); 680 crypt_free_buffer_pages(cc, clone);
654 bio_put(clone); 681 bio_put(clone);
655 io->error = -EIO; 682 io->error = -EIO;
683 crypt_dec_pending(io);
656 return; 684 return;
657 } 685 }
658 686
@@ -664,28 +692,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
664 692
665 if (async) 693 if (async)
666 kcryptd_queue_io(io); 694 kcryptd_queue_io(io);
667 else { 695 else
668 atomic_inc(&io->pending);
669 generic_make_request(clone); 696 generic_make_request(clone);
670 }
671} 697}
672 698
673static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io) 699static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
674{ 700{
675 struct crypt_config *cc = io->target->private; 701 struct crypt_config *cc = io->target->private;
676 struct bio *clone; 702 struct bio *clone;
703 int crypt_finished;
704 unsigned out_of_pages = 0;
677 unsigned remaining = io->base_bio->bi_size; 705 unsigned remaining = io->base_bio->bi_size;
678 int r; 706 int r;
679 707
680 /* 708 /*
709 * Prevent io from disappearing until this function completes.
710 */
711 crypt_inc_pending(io);
712 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector);
713
714 /*
681 * The allocated buffers can be smaller than the whole bio, 715 * The allocated buffers can be smaller than the whole bio,
682 * so repeat the whole process until all the data can be handled. 716 * so repeat the whole process until all the data can be handled.
683 */ 717 */
684 while (remaining) { 718 while (remaining) {
685 clone = crypt_alloc_buffer(io, remaining); 719 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
686 if (unlikely(!clone)) { 720 if (unlikely(!clone)) {
687 io->error = -ENOMEM; 721 io->error = -ENOMEM;
688 return; 722 break;
689 } 723 }
690 724
691 io->ctx.bio_out = clone; 725 io->ctx.bio_out = clone;
@@ -693,37 +727,32 @@ static void kcryptd_crypt_write_convert_loop(struct dm_crypt_io *io)
693 727
694 remaining -= clone->bi_size; 728 remaining -= clone->bi_size;
695 729
730 crypt_inc_pending(io);
696 r = crypt_convert(cc, &io->ctx); 731 r = crypt_convert(cc, &io->ctx);
732 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
697 733
698 if (atomic_dec_and_test(&io->ctx.pending)) { 734 /* Encryption was already finished, submit io now */
699 /* processed, no running async crypto */ 735 if (crypt_finished) {
700 kcryptd_crypt_write_io_submit(io, r, 0); 736 kcryptd_crypt_write_io_submit(io, r, 0);
701 if (unlikely(r < 0))
702 return;
703 } else
704 atomic_inc(&io->pending);
705 737
706 /* out of memory -> run queues */ 738 /*
707 if (unlikely(remaining)) { 739 * If there was an error, do not try next fragments.
708 /* wait for async crypto then reinitialize pending */ 740 * For async, error is processed in async handler.
709 wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); 741 */
710 atomic_set(&io->ctx.pending, 1); 742 if (unlikely(r < 0))
711 congestion_wait(WRITE, HZ/100); 743 break;
712 } 744 }
713 }
714}
715 745
716static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) 746 /*
717{ 747 * Out of memory -> run queues
718 struct crypt_config *cc = io->target->private; 748 * But don't wait if split was due to the io size restriction
719 749 */
720 /* 750 if (unlikely(out_of_pages))
721 * Prevent io from disappearing until this function completes. 751 congestion_wait(WRITE, HZ/100);
722 */
723 atomic_inc(&io->pending);
724 752
725 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); 753 if (unlikely(remaining))
726 kcryptd_crypt_write_convert_loop(io); 754 wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
755 }
727 756
728 crypt_dec_pending(io); 757 crypt_dec_pending(io);
729} 758}
@@ -741,7 +770,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
741 struct crypt_config *cc = io->target->private; 770 struct crypt_config *cc = io->target->private;
742 int r = 0; 771 int r = 0;
743 772
744 atomic_inc(&io->pending); 773 crypt_inc_pending(io);
745 774
746 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 775 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
747 io->sector); 776 io->sector);
@@ -1108,15 +1137,9 @@ static void crypt_dtr(struct dm_target *ti)
1108static int crypt_map(struct dm_target *ti, struct bio *bio, 1137static int crypt_map(struct dm_target *ti, struct bio *bio,
1109 union map_info *map_context) 1138 union map_info *map_context)
1110{ 1139{
1111 struct crypt_config *cc = ti->private;
1112 struct dm_crypt_io *io; 1140 struct dm_crypt_io *io;
1113 1141
1114 io = mempool_alloc(cc->io_pool, GFP_NOIO); 1142 io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
1115 io->target = ti;
1116 io->base_bio = bio;
1117 io->sector = bio->bi_sector - ti->begin;
1118 io->error = 0;
1119 atomic_set(&io->pending, 0);
1120 1143
1121 if (bio_data_dir(io->base_bio) == READ) 1144 if (bio_data_dir(io->base_bio) == READ)
1122 kcryptd_queue_io(io); 1145 kcryptd_queue_io(io);
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 41f408068a7c..769ab677f8e0 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -108,12 +108,12 @@ struct pstore {
108 * Used to keep track of which metadata area the data in 108 * Used to keep track of which metadata area the data in
109 * 'chunk' refers to. 109 * 'chunk' refers to.
110 */ 110 */
111 uint32_t current_area; 111 chunk_t current_area;
112 112
113 /* 113 /*
114 * The next free chunk for an exception. 114 * The next free chunk for an exception.
115 */ 115 */
116 uint32_t next_free; 116 chunk_t next_free;
117 117
118 /* 118 /*
119 * The index of next free exception in the current 119 * The index of next free exception in the current
@@ -175,7 +175,7 @@ static void do_metadata(struct work_struct *work)
175/* 175/*
176 * Read or write a chunk aligned and sized block of data from a device. 176 * Read or write a chunk aligned and sized block of data from a device.
177 */ 177 */
178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) 178static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
179{ 179{
180 struct dm_io_region where = { 180 struct dm_io_region where = {
181 .bdev = ps->snap->cow->bdev, 181 .bdev = ps->snap->cow->bdev,
@@ -209,16 +209,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
209} 209}
210 210
211/* 211/*
212 * Convert a metadata area index to a chunk index.
213 */
214static chunk_t area_location(struct pstore *ps, chunk_t area)
215{
216 return 1 + ((ps->exceptions_per_area + 1) * area);
217}
218
219/*
212 * Read or write a metadata area. Remembering to skip the first 220 * Read or write a metadata area. Remembering to skip the first
213 * chunk which holds the header. 221 * chunk which holds the header.
214 */ 222 */
215static int area_io(struct pstore *ps, uint32_t area, int rw) 223static int area_io(struct pstore *ps, chunk_t area, int rw)
216{ 224{
217 int r; 225 int r;
218 uint32_t chunk; 226 chunk_t chunk;
219 227
220 /* convert a metadata area index to a chunk index */ 228 chunk = area_location(ps, area);
221 chunk = 1 + ((ps->exceptions_per_area + 1) * area);
222 229
223 r = chunk_io(ps, chunk, rw, 0); 230 r = chunk_io(ps, chunk, rw, 0);
224 if (r) 231 if (r)
@@ -228,7 +235,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
228 return 0; 235 return 0;
229} 236}
230 237
231static int zero_area(struct pstore *ps, uint32_t area) 238static int zero_area(struct pstore *ps, chunk_t area)
232{ 239{
233 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); 240 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
234 return area_io(ps, area, WRITE); 241 return area_io(ps, area, WRITE);
@@ -404,7 +411,7 @@ static int insert_exceptions(struct pstore *ps, int *full)
404 411
405static int read_exceptions(struct pstore *ps) 412static int read_exceptions(struct pstore *ps)
406{ 413{
407 uint32_t area; 414 chunk_t area;
408 int r, full = 1; 415 int r, full = 1;
409 416
410 /* 417 /*
@@ -517,6 +524,7 @@ static int persistent_prepare(struct exception_store *store,
517{ 524{
518 struct pstore *ps = get_info(store); 525 struct pstore *ps = get_info(store);
519 uint32_t stride; 526 uint32_t stride;
527 chunk_t next_free;
520 sector_t size = get_dev_size(store->snap->cow->bdev); 528 sector_t size = get_dev_size(store->snap->cow->bdev);
521 529
522 /* Is there enough room ? */ 530 /* Is there enough room ? */
@@ -530,7 +538,8 @@ static int persistent_prepare(struct exception_store *store,
530 * into account the location of the metadata chunks. 538 * into account the location of the metadata chunks.
531 */ 539 */
532 stride = (ps->exceptions_per_area + 1); 540 stride = (ps->exceptions_per_area + 1);
533 if ((++ps->next_free % stride) == 1) 541 next_free = ++ps->next_free;
542 if (sector_div(next_free, stride) == 1)
534 ps->next_free++; 543 ps->next_free++;
535 544
536 atomic_inc(&ps->pending_count); 545 atomic_inc(&ps->pending_count);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b262c0042de3..dca401dc70a0 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -426,7 +426,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
426 old_nl->next = (uint32_t) ((void *) nl - 426 old_nl->next = (uint32_t) ((void *) nl -
427 (void *) old_nl); 427 (void *) old_nl);
428 disk = dm_disk(hc->md); 428 disk = dm_disk(hc->md);
429 nl->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor)); 429 nl->dev = huge_encode_dev(disk_devt(disk));
430 nl->next = 0; 430 nl->next = 0;
431 strcpy(nl->name, hc->name); 431 strcpy(nl->name, hc->name);
432 432
@@ -539,7 +539,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
539 if (dm_suspended(md)) 539 if (dm_suspended(md))
540 param->flags |= DM_SUSPEND_FLAG; 540 param->flags |= DM_SUSPEND_FLAG;
541 541
542 param->dev = huge_encode_dev(MKDEV(disk->major, disk->first_minor)); 542 param->dev = huge_encode_dev(disk_devt(disk));
543 543
544 /* 544 /*
545 * Yes, this will be out of date by the time it gets back 545 * Yes, this will be out of date by the time it gets back
@@ -548,7 +548,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
548 */ 548 */
549 param->open_count = dm_open_count(md); 549 param->open_count = dm_open_count(md);
550 550
551 if (disk->policy) 551 if (get_disk_ro(disk))
552 param->flags |= DM_READONLY_FLAG; 552 param->flags |= DM_READONLY_FLAG;
553 553
554 param->event_nr = dm_get_event_nr(md); 554 param->event_nr = dm_get_event_nr(md);
@@ -1131,7 +1131,7 @@ static void retrieve_deps(struct dm_table *table,
1131 unsigned int count = 0; 1131 unsigned int count = 0;
1132 struct list_head *tmp; 1132 struct list_head *tmp;
1133 size_t len, needed; 1133 size_t len, needed;
1134 struct dm_dev *dd; 1134 struct dm_dev_internal *dd;
1135 struct dm_target_deps *deps; 1135 struct dm_target_deps *deps;
1136 1136
1137 deps = get_result_buffer(param, param_size, &len); 1137 deps = get_result_buffer(param, param_size, &len);
@@ -1157,7 +1157,7 @@ static void retrieve_deps(struct dm_table *table,
1157 deps->count = count; 1157 deps->count = count;
1158 count = 0; 1158 count = 0;
1159 list_for_each_entry (dd, dm_table_get_devices(table), list) 1159 list_for_each_entry (dd, dm_table_get_devices(table), list)
1160 deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev); 1160 deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
1161 1161
1162 param->data_size = param->data_start + needed; 1162 param->data_size = param->data_start + needed;
1163} 1163}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c2fcf28b4c70..103304c1e3b0 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -30,9 +30,11 @@ struct pgpath {
30 struct list_head list; 30 struct list_head list;
31 31
32 struct priority_group *pg; /* Owning PG */ 32 struct priority_group *pg; /* Owning PG */
33 unsigned is_active; /* Path status */
33 unsigned fail_count; /* Cumulative failure count */ 34 unsigned fail_count; /* Cumulative failure count */
34 35
35 struct dm_path path; 36 struct dm_path path;
37 struct work_struct deactivate_path;
36}; 38};
37 39
38#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 40#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -112,6 +114,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
112static void process_queued_ios(struct work_struct *work); 114static void process_queued_ios(struct work_struct *work);
113static void trigger_event(struct work_struct *work); 115static void trigger_event(struct work_struct *work);
114static void activate_path(struct work_struct *work); 116static void activate_path(struct work_struct *work);
117static void deactivate_path(struct work_struct *work);
115 118
116 119
117/*----------------------------------------------- 120/*-----------------------------------------------
@@ -122,8 +125,10 @@ static struct pgpath *alloc_pgpath(void)
122{ 125{
123 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); 126 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
124 127
125 if (pgpath) 128 if (pgpath) {
126 pgpath->path.is_active = 1; 129 pgpath->is_active = 1;
130 INIT_WORK(&pgpath->deactivate_path, deactivate_path);
131 }
127 132
128 return pgpath; 133 return pgpath;
129} 134}
@@ -133,6 +138,14 @@ static void free_pgpath(struct pgpath *pgpath)
133 kfree(pgpath); 138 kfree(pgpath);
134} 139}
135 140
141static void deactivate_path(struct work_struct *work)
142{
143 struct pgpath *pgpath =
144 container_of(work, struct pgpath, deactivate_path);
145
146 blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
147}
148
136static struct priority_group *alloc_priority_group(void) 149static struct priority_group *alloc_priority_group(void)
137{ 150{
138 struct priority_group *pg; 151 struct priority_group *pg;
@@ -563,12 +576,12 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
563 /* we need at least a path arg */ 576 /* we need at least a path arg */
564 if (as->argc < 1) { 577 if (as->argc < 1) {
565 ti->error = "no device given"; 578 ti->error = "no device given";
566 return NULL; 579 return ERR_PTR(-EINVAL);
567 } 580 }
568 581
569 p = alloc_pgpath(); 582 p = alloc_pgpath();
570 if (!p) 583 if (!p)
571 return NULL; 584 return ERR_PTR(-ENOMEM);
572 585
573 r = dm_get_device(ti, shift(as), ti->begin, ti->len, 586 r = dm_get_device(ti, shift(as), ti->begin, ti->len,
574 dm_table_get_mode(ti->table), &p->path.dev); 587 dm_table_get_mode(ti->table), &p->path.dev);
@@ -596,7 +609,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
596 609
597 bad: 610 bad:
598 free_pgpath(p); 611 free_pgpath(p);
599 return NULL; 612 return ERR_PTR(r);
600} 613}
601 614
602static struct priority_group *parse_priority_group(struct arg_set *as, 615static struct priority_group *parse_priority_group(struct arg_set *as,
@@ -614,14 +627,14 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
614 627
615 if (as->argc < 2) { 628 if (as->argc < 2) {
616 as->argc = 0; 629 as->argc = 0;
617 ti->error = "not enough priority group aruments"; 630 ti->error = "not enough priority group arguments";
618 return NULL; 631 return ERR_PTR(-EINVAL);
619 } 632 }
620 633
621 pg = alloc_priority_group(); 634 pg = alloc_priority_group();
622 if (!pg) { 635 if (!pg) {
623 ti->error = "couldn't allocate priority group"; 636 ti->error = "couldn't allocate priority group";
624 return NULL; 637 return ERR_PTR(-ENOMEM);
625 } 638 }
626 pg->m = m; 639 pg->m = m;
627 640
@@ -654,8 +667,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
654 path_args.argv = as->argv; 667 path_args.argv = as->argv;
655 668
656 pgpath = parse_path(&path_args, &pg->ps, ti); 669 pgpath = parse_path(&path_args, &pg->ps, ti);
657 if (!pgpath) 670 if (IS_ERR(pgpath)) {
671 r = PTR_ERR(pgpath);
658 goto bad; 672 goto bad;
673 }
659 674
660 pgpath->pg = pg; 675 pgpath->pg = pg;
661 list_add_tail(&pgpath->list, &pg->pgpaths); 676 list_add_tail(&pgpath->list, &pg->pgpaths);
@@ -666,7 +681,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
666 681
667 bad: 682 bad:
668 free_priority_group(pg, ti); 683 free_priority_group(pg, ti);
669 return NULL; 684 return ERR_PTR(r);
670} 685}
671 686
672static int parse_hw_handler(struct arg_set *as, struct multipath *m) 687static int parse_hw_handler(struct arg_set *as, struct multipath *m)
@@ -785,8 +800,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
785 struct priority_group *pg; 800 struct priority_group *pg;
786 801
787 pg = parse_priority_group(&as, m); 802 pg = parse_priority_group(&as, m);
788 if (!pg) { 803 if (IS_ERR(pg)) {
789 r = -EINVAL; 804 r = PTR_ERR(pg);
790 goto bad; 805 goto bad;
791 } 806 }
792 807
@@ -852,13 +867,13 @@ static int fail_path(struct pgpath *pgpath)
852 867
853 spin_lock_irqsave(&m->lock, flags); 868 spin_lock_irqsave(&m->lock, flags);
854 869
855 if (!pgpath->path.is_active) 870 if (!pgpath->is_active)
856 goto out; 871 goto out;
857 872
858 DMWARN("Failing path %s.", pgpath->path.dev->name); 873 DMWARN("Failing path %s.", pgpath->path.dev->name);
859 874
860 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); 875 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
861 pgpath->path.is_active = 0; 876 pgpath->is_active = 0;
862 pgpath->fail_count++; 877 pgpath->fail_count++;
863 878
864 m->nr_valid_paths--; 879 m->nr_valid_paths--;
@@ -870,6 +885,7 @@ static int fail_path(struct pgpath *pgpath)
870 pgpath->path.dev->name, m->nr_valid_paths); 885 pgpath->path.dev->name, m->nr_valid_paths);
871 886
872 queue_work(kmultipathd, &m->trigger_event); 887 queue_work(kmultipathd, &m->trigger_event);
888 queue_work(kmultipathd, &pgpath->deactivate_path);
873 889
874out: 890out:
875 spin_unlock_irqrestore(&m->lock, flags); 891 spin_unlock_irqrestore(&m->lock, flags);
@@ -888,7 +904,7 @@ static int reinstate_path(struct pgpath *pgpath)
888 904
889 spin_lock_irqsave(&m->lock, flags); 905 spin_lock_irqsave(&m->lock, flags);
890 906
891 if (pgpath->path.is_active) 907 if (pgpath->is_active)
892 goto out; 908 goto out;
893 909
894 if (!pgpath->pg->ps.type->reinstate_path) { 910 if (!pgpath->pg->ps.type->reinstate_path) {
@@ -902,7 +918,7 @@ static int reinstate_path(struct pgpath *pgpath)
902 if (r) 918 if (r)
903 goto out; 919 goto out;
904 920
905 pgpath->path.is_active = 1; 921 pgpath->is_active = 1;
906 922
907 m->current_pgpath = NULL; 923 m->current_pgpath = NULL;
908 if (!m->nr_valid_paths++ && m->queue_size) 924 if (!m->nr_valid_paths++ && m->queue_size)
@@ -1290,7 +1306,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
1290 1306
1291 list_for_each_entry(p, &pg->pgpaths, list) { 1307 list_for_each_entry(p, &pg->pgpaths, list) {
1292 DMEMIT("%s %s %u ", p->path.dev->name, 1308 DMEMIT("%s %s %u ", p->path.dev->name,
1293 p->path.is_active ? "A" : "F", 1309 p->is_active ? "A" : "F",
1294 p->fail_count); 1310 p->fail_count);
1295 if (pg->ps.type->status) 1311 if (pg->ps.type->status)
1296 sz += pg->ps.type->status(&pg->ps, 1312 sz += pg->ps.type->status(&pg->ps,
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
index c198b856a452..e230f7196259 100644
--- a/drivers/md/dm-mpath.h
+++ b/drivers/md/dm-mpath.h
@@ -13,8 +13,6 @@ struct dm_dev;
13 13
14struct dm_path { 14struct dm_path {
15 struct dm_dev *dev; /* Read-only */ 15 struct dm_dev *dev; /* Read-only */
16 unsigned is_active; /* Read-only */
17
18 void *pscontext; /* For path-selector use */ 16 void *pscontext; /* For path-selector use */
19}; 17};
20 18
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ff05fe893083..29913e42c4ab 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,7 +842,9 @@ static int recover(struct mirror_set *ms, struct region *reg)
842 } 842 }
843 843
844 /* hand to kcopyd */ 844 /* hand to kcopyd */
845 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); 845 if (!errors_handled(ms))
846 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
847
846 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, 848 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
847 flags, recovery_complete, reg); 849 flags, recovery_complete, reg);
848 850
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4de90ab3968b..b745d8ac625b 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -284,8 +284,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
284 284
285 memset(major_minor, 0, sizeof(major_minor)); 285 memset(major_minor, 0, sizeof(major_minor));
286 sprintf(major_minor, "%d:%d", 286 sprintf(major_minor, "%d:%d",
287 bio->bi_bdev->bd_disk->major, 287 MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
288 bio->bi_bdev->bd_disk->first_minor); 288 MINOR(disk_devt(bio->bi_bdev->bd_disk)));
289 289
290 /* 290 /*
291 * Test to see which stripe drive triggered the event 291 * Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 61f441409234..a740a6950f59 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -250,7 +250,8 @@ static void free_devices(struct list_head *devices)
250 struct list_head *tmp, *next; 250 struct list_head *tmp, *next;
251 251
252 list_for_each_safe(tmp, next, devices) { 252 list_for_each_safe(tmp, next, devices) {
253 struct dm_dev *dd = list_entry(tmp, struct dm_dev, list); 253 struct dm_dev_internal *dd =
254 list_entry(tmp, struct dm_dev_internal, list);
254 kfree(dd); 255 kfree(dd);
255 } 256 }
256} 257}
@@ -327,12 +328,12 @@ static int lookup_device(const char *path, dev_t *dev)
327/* 328/*
328 * See if we've already got a device in the list. 329 * See if we've already got a device in the list.
329 */ 330 */
330static struct dm_dev *find_device(struct list_head *l, dev_t dev) 331static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
331{ 332{
332 struct dm_dev *dd; 333 struct dm_dev_internal *dd;
333 334
334 list_for_each_entry (dd, l, list) 335 list_for_each_entry (dd, l, list)
335 if (dd->bdev->bd_dev == dev) 336 if (dd->dm_dev.bdev->bd_dev == dev)
336 return dd; 337 return dd;
337 338
338 return NULL; 339 return NULL;
@@ -341,45 +342,47 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
341/* 342/*
342 * Open a device so we can use it as a map destination. 343 * Open a device so we can use it as a map destination.
343 */ 344 */
344static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md) 345static int open_dev(struct dm_dev_internal *d, dev_t dev,
346 struct mapped_device *md)
345{ 347{
346 static char *_claim_ptr = "I belong to device-mapper"; 348 static char *_claim_ptr = "I belong to device-mapper";
347 struct block_device *bdev; 349 struct block_device *bdev;
348 350
349 int r; 351 int r;
350 352
351 BUG_ON(d->bdev); 353 BUG_ON(d->dm_dev.bdev);
352 354
353 bdev = open_by_devnum(dev, d->mode); 355 bdev = open_by_devnum(dev, d->dm_dev.mode);
354 if (IS_ERR(bdev)) 356 if (IS_ERR(bdev))
355 return PTR_ERR(bdev); 357 return PTR_ERR(bdev);
356 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); 358 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
357 if (r) 359 if (r)
358 blkdev_put(bdev); 360 blkdev_put(bdev);
359 else 361 else
360 d->bdev = bdev; 362 d->dm_dev.bdev = bdev;
361 return r; 363 return r;
362} 364}
363 365
364/* 366/*
365 * Close a device that we've been using. 367 * Close a device that we've been using.
366 */ 368 */
367static void close_dev(struct dm_dev *d, struct mapped_device *md) 369static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
368{ 370{
369 if (!d->bdev) 371 if (!d->dm_dev.bdev)
370 return; 372 return;
371 373
372 bd_release_from_disk(d->bdev, dm_disk(md)); 374 bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
373 blkdev_put(d->bdev); 375 blkdev_put(d->dm_dev.bdev);
374 d->bdev = NULL; 376 d->dm_dev.bdev = NULL;
375} 377}
376 378
377/* 379/*
378 * If possible, this checks an area of a destination device is valid. 380 * If possible, this checks an area of a destination device is valid.
379 */ 381 */
380static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) 382static int check_device_area(struct dm_dev_internal *dd, sector_t start,
383 sector_t len)
381{ 384{
382 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; 385 sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
383 386
384 if (!dev_size) 387 if (!dev_size)
385 return 1; 388 return 1;
@@ -392,16 +395,17 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
392 * careful to leave things as they were if we fail to reopen the 395 * careful to leave things as they were if we fail to reopen the
393 * device. 396 * device.
394 */ 397 */
395static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md) 398static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
399 struct mapped_device *md)
396{ 400{
397 int r; 401 int r;
398 struct dm_dev dd_copy; 402 struct dm_dev_internal dd_copy;
399 dev_t dev = dd->bdev->bd_dev; 403 dev_t dev = dd->dm_dev.bdev->bd_dev;
400 404
401 dd_copy = *dd; 405 dd_copy = *dd;
402 406
403 dd->mode |= new_mode; 407 dd->dm_dev.mode |= new_mode;
404 dd->bdev = NULL; 408 dd->dm_dev.bdev = NULL;
405 r = open_dev(dd, dev, md); 409 r = open_dev(dd, dev, md);
406 if (!r) 410 if (!r)
407 close_dev(&dd_copy, md); 411 close_dev(&dd_copy, md);
@@ -421,7 +425,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
421{ 425{
422 int r; 426 int r;
423 dev_t uninitialized_var(dev); 427 dev_t uninitialized_var(dev);
424 struct dm_dev *dd; 428 struct dm_dev_internal *dd;
425 unsigned int major, minor; 429 unsigned int major, minor;
426 430
427 BUG_ON(!t); 431 BUG_ON(!t);
@@ -443,20 +447,20 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
443 if (!dd) 447 if (!dd)
444 return -ENOMEM; 448 return -ENOMEM;
445 449
446 dd->mode = mode; 450 dd->dm_dev.mode = mode;
447 dd->bdev = NULL; 451 dd->dm_dev.bdev = NULL;
448 452
449 if ((r = open_dev(dd, dev, t->md))) { 453 if ((r = open_dev(dd, dev, t->md))) {
450 kfree(dd); 454 kfree(dd);
451 return r; 455 return r;
452 } 456 }
453 457
454 format_dev_t(dd->name, dev); 458 format_dev_t(dd->dm_dev.name, dev);
455 459
456 atomic_set(&dd->count, 0); 460 atomic_set(&dd->count, 0);
457 list_add(&dd->list, &t->devices); 461 list_add(&dd->list, &t->devices);
458 462
459 } else if (dd->mode != (mode | dd->mode)) { 463 } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
460 r = upgrade_mode(dd, mode, t->md); 464 r = upgrade_mode(dd, mode, t->md);
461 if (r) 465 if (r)
462 return r; 466 return r;
@@ -465,11 +469,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
465 469
466 if (!check_device_area(dd, start, len)) { 470 if (!check_device_area(dd, start, len)) {
467 DMWARN("device %s too small for target", path); 471 DMWARN("device %s too small for target", path);
468 dm_put_device(ti, dd); 472 dm_put_device(ti, &dd->dm_dev);
469 return -EINVAL; 473 return -EINVAL;
470 } 474 }
471 475
472 *result = dd; 476 *result = &dd->dm_dev;
473 477
474 return 0; 478 return 0;
475} 479}
@@ -478,6 +482,13 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
478{ 482{
479 struct request_queue *q = bdev_get_queue(bdev); 483 struct request_queue *q = bdev_get_queue(bdev);
480 struct io_restrictions *rs = &ti->limits; 484 struct io_restrictions *rs = &ti->limits;
485 char b[BDEVNAME_SIZE];
486
487 if (unlikely(!q)) {
488 DMWARN("%s: Cannot set limits for nonexistent device %s",
489 dm_device_name(ti->table->md), bdevname(bdev, b));
490 return;
491 }
481 492
482 /* 493 /*
483 * Combine the device limits low. 494 * Combine the device limits low.
@@ -540,8 +551,11 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
540/* 551/*
541 * Decrement a devices use count and remove it if necessary. 552 * Decrement a devices use count and remove it if necessary.
542 */ 553 */
543void dm_put_device(struct dm_target *ti, struct dm_dev *dd) 554void dm_put_device(struct dm_target *ti, struct dm_dev *d)
544{ 555{
556 struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
557 dm_dev);
558
545 if (atomic_dec_and_test(&dd->count)) { 559 if (atomic_dec_and_test(&dd->count)) {
546 close_dev(dd, ti->table->md); 560 close_dev(dd, ti->table->md);
547 list_del(&dd->list); 561 list_del(&dd->list);
@@ -937,13 +951,20 @@ int dm_table_resume_targets(struct dm_table *t)
937 951
938int dm_table_any_congested(struct dm_table *t, int bdi_bits) 952int dm_table_any_congested(struct dm_table *t, int bdi_bits)
939{ 953{
940 struct dm_dev *dd; 954 struct dm_dev_internal *dd;
941 struct list_head *devices = dm_table_get_devices(t); 955 struct list_head *devices = dm_table_get_devices(t);
942 int r = 0; 956 int r = 0;
943 957
944 list_for_each_entry(dd, devices, list) { 958 list_for_each_entry(dd, devices, list) {
945 struct request_queue *q = bdev_get_queue(dd->bdev); 959 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
946 r |= bdi_congested(&q->backing_dev_info, bdi_bits); 960 char b[BDEVNAME_SIZE];
961
962 if (likely(q))
963 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
964 else
965 DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
966 dm_device_name(t->md),
967 bdevname(dd->dm_dev.bdev, b));
947 } 968 }
948 969
949 return r; 970 return r;
@@ -951,13 +972,19 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
951 972
952void dm_table_unplug_all(struct dm_table *t) 973void dm_table_unplug_all(struct dm_table *t)
953{ 974{
954 struct dm_dev *dd; 975 struct dm_dev_internal *dd;
955 struct list_head *devices = dm_table_get_devices(t); 976 struct list_head *devices = dm_table_get_devices(t);
956 977
957 list_for_each_entry(dd, devices, list) { 978 list_for_each_entry(dd, devices, list) {
958 struct request_queue *q = bdev_get_queue(dd->bdev); 979 struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
959 980 char b[BDEVNAME_SIZE];
960 blk_unplug(q); 981
982 if (likely(q))
983 blk_unplug(q);
984 else
985 DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
986 dm_device_name(t->md),
987 bdevname(dd->dm_dev.bdev, b));
961 } 988 }
962} 989}
963 990
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ace998ce59f6..327de03a5bdf 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -377,13 +377,14 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
377static void start_io_acct(struct dm_io *io) 377static void start_io_acct(struct dm_io *io)
378{ 378{
379 struct mapped_device *md = io->md; 379 struct mapped_device *md = io->md;
380 int cpu;
380 381
381 io->start_time = jiffies; 382 io->start_time = jiffies;
382 383
383 preempt_disable(); 384 cpu = part_stat_lock();
384 disk_round_stats(dm_disk(md)); 385 part_round_stats(cpu, &dm_disk(md)->part0);
385 preempt_enable(); 386 part_stat_unlock();
386 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 387 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
387} 388}
388 389
389static int end_io_acct(struct dm_io *io) 390static int end_io_acct(struct dm_io *io)
@@ -391,15 +392,16 @@ static int end_io_acct(struct dm_io *io)
391 struct mapped_device *md = io->md; 392 struct mapped_device *md = io->md;
392 struct bio *bio = io->bio; 393 struct bio *bio = io->bio;
393 unsigned long duration = jiffies - io->start_time; 394 unsigned long duration = jiffies - io->start_time;
394 int pending; 395 int pending, cpu;
395 int rw = bio_data_dir(bio); 396 int rw = bio_data_dir(bio);
396 397
397 preempt_disable(); 398 cpu = part_stat_lock();
398 disk_round_stats(dm_disk(md)); 399 part_round_stats(cpu, &dm_disk(md)->part0);
399 preempt_enable(); 400 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
400 dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 401 part_stat_unlock();
401 402
402 disk_stat_add(dm_disk(md), ticks[rw], duration); 403 dm_disk(md)->part0.in_flight = pending =
404 atomic_dec_return(&md->pending);
403 405
404 return !pending; 406 return !pending;
405} 407}
@@ -885,6 +887,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
885 int r = -EIO; 887 int r = -EIO;
886 int rw = bio_data_dir(bio); 888 int rw = bio_data_dir(bio);
887 struct mapped_device *md = q->queuedata; 889 struct mapped_device *md = q->queuedata;
890 int cpu;
888 891
889 /* 892 /*
890 * There is no use in forwarding any barrier request since we can't 893 * There is no use in forwarding any barrier request since we can't
@@ -897,8 +900,10 @@ static int dm_request(struct request_queue *q, struct bio *bio)
897 900
898 down_read(&md->io_lock); 901 down_read(&md->io_lock);
899 902
900 disk_stat_inc(dm_disk(md), ios[rw]); 903 cpu = part_stat_lock();
901 disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); 904 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
905 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
906 part_stat_unlock();
902 907
903 /* 908 /*
904 * If we're suspended we have to queue 909 * If we're suspended we have to queue
@@ -1146,7 +1151,7 @@ static void unlock_fs(struct mapped_device *md);
1146 1151
1147static void free_dev(struct mapped_device *md) 1152static void free_dev(struct mapped_device *md)
1148{ 1153{
1149 int minor = md->disk->first_minor; 1154 int minor = MINOR(disk_devt(md->disk));
1150 1155
1151 if (md->suspended_bdev) { 1156 if (md->suspended_bdev) {
1152 unlock_fs(md); 1157 unlock_fs(md);
@@ -1182,7 +1187,7 @@ static void event_callback(void *context)
1182 list_splice_init(&md->uevent_list, &uevents); 1187 list_splice_init(&md->uevent_list, &uevents);
1183 spin_unlock_irqrestore(&md->uevent_lock, flags); 1188 spin_unlock_irqrestore(&md->uevent_lock, flags);
1184 1189
1185 dm_send_uevents(&uevents, &md->disk->dev.kobj); 1190 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1186 1191
1187 atomic_inc(&md->event_nr); 1192 atomic_inc(&md->event_nr);
1188 wake_up(&md->eventq); 1193 wake_up(&md->eventq);
@@ -1267,7 +1272,7 @@ static struct mapped_device *dm_find_md(dev_t dev)
1267 1272
1268 md = idr_find(&_minor_idr, minor); 1273 md = idr_find(&_minor_idr, minor);
1269 if (md && (md == MINOR_ALLOCED || 1274 if (md && (md == MINOR_ALLOCED ||
1270 (dm_disk(md)->first_minor != minor) || 1275 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1271 test_bit(DMF_FREEING, &md->flags))) { 1276 test_bit(DMF_FREEING, &md->flags))) {
1272 md = NULL; 1277 md = NULL;
1273 goto out; 1278 goto out;
@@ -1318,7 +1323,8 @@ void dm_put(struct mapped_device *md)
1318 1323
1319 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 1324 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1320 map = dm_get_table(md); 1325 map = dm_get_table(md);
1321 idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor); 1326 idr_replace(&_minor_idr, MINOR_ALLOCED,
1327 MINOR(disk_devt(dm_disk(md))));
1322 set_bit(DMF_FREEING, &md->flags); 1328 set_bit(DMF_FREEING, &md->flags);
1323 spin_unlock(&_minor_lock); 1329 spin_unlock(&_minor_lock);
1324 if (!dm_suspended(md)) { 1330 if (!dm_suspended(md)) {
@@ -1638,7 +1644,7 @@ out:
1638 *---------------------------------------------------------------*/ 1644 *---------------------------------------------------------------*/
1639void dm_kobject_uevent(struct mapped_device *md) 1645void dm_kobject_uevent(struct mapped_device *md)
1640{ 1646{
1641 kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE); 1647 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
1642} 1648}
1643 1649
1644uint32_t dm_next_uevent_seq(struct mapped_device *md) 1650uint32_t dm_next_uevent_seq(struct mapped_device *md)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1e59a0b0a78a..cd189da2b2fa 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -25,13 +25,10 @@
25/* 25/*
26 * List of devices that a metadevice uses and should open/close. 26 * List of devices that a metadevice uses and should open/close.
27 */ 27 */
28struct dm_dev { 28struct dm_dev_internal {
29 struct list_head list; 29 struct list_head list;
30
31 atomic_t count; 30 atomic_t count;
32 int mode; 31 struct dm_dev dm_dev;
33 struct block_device *bdev;
34 char name[16];
35}; 32};
36 33
37struct dm_table; 34struct dm_table;
@@ -49,7 +46,6 @@ void dm_table_presuspend_targets(struct dm_table *t);
49void dm_table_postsuspend_targets(struct dm_table *t); 46void dm_table_postsuspend_targets(struct dm_table *t);
50int dm_table_resume_targets(struct dm_table *t); 47int dm_table_resume_targets(struct dm_table *t);
51int dm_table_any_congested(struct dm_table *t, int bdi_bits); 48int dm_table_any_congested(struct dm_table *t, int bdi_bits);
52void dm_table_unplug_all(struct dm_table *t);
53 49
54/* 50/*
55 * To check the return value from dm_table_find_target(). 51 * To check the return value from dm_table_find_target().
@@ -93,8 +89,6 @@ void dm_linear_exit(void);
93int dm_stripe_init(void); 89int dm_stripe_init(void);
94void dm_stripe_exit(void); 90void dm_stripe_exit(void);
95 91
96void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
97union map_info *dm_get_mapinfo(struct bio *bio);
98int dm_open_count(struct mapped_device *md); 92int dm_open_count(struct mapped_device *md);
99int dm_lock_for_deletion(struct mapped_device *md); 93int dm_lock_for_deletion(struct mapped_device *md);
100 94
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b1eebf88c209..b9cbee688fae 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -318,14 +318,18 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
318 mddev_t *mddev = q->queuedata; 318 mddev_t *mddev = q->queuedata;
319 dev_info_t *tmp_dev; 319 dev_info_t *tmp_dev;
320 sector_t block; 320 sector_t block;
321 int cpu;
321 322
322 if (unlikely(bio_barrier(bio))) { 323 if (unlikely(bio_barrier(bio))) {
323 bio_endio(bio, -EOPNOTSUPP); 324 bio_endio(bio, -EOPNOTSUPP);
324 return 0; 325 return 0;
325 } 326 }
326 327
327 disk_stat_inc(mddev->gendisk, ios[rw]); 328 cpu = part_stat_lock();
328 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 329 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
330 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
331 bio_sectors(bio));
332 part_stat_unlock();
329 333
330 tmp_dev = which_dev(mddev, bio->bi_sector); 334 tmp_dev = which_dev(mddev, bio->bi_sector);
331 block = bio->bi_sector >> 1; 335 block = bio->bi_sector >> 1;
@@ -349,7 +353,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
349 * split it. 353 * split it.
350 */ 354 */
351 struct bio_pair *bp; 355 struct bio_pair *bp;
352 bp = bio_split(bio, bio_split_pool, 356 bp = bio_split(bio,
353 ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector); 357 ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector);
354 if (linear_make_request(q, &bp->bio1)) 358 if (linear_make_request(q, &bp->bio1))
355 generic_make_request(&bp->bio1); 359 generic_make_request(&bp->bio1);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index deeac4b44173..0a3a4bdcd4af 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1464,10 +1464,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1464 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1464 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1465 goto fail; 1465 goto fail;
1466 1466
1467 if (rdev->bdev->bd_part) 1467 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
1468 ko = &rdev->bdev->bd_part->dev.kobj;
1469 else
1470 ko = &rdev->bdev->bd_disk->dev.kobj;
1471 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { 1468 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1472 kobject_del(&rdev->kobj); 1469 kobject_del(&rdev->kobj);
1473 goto fail; 1470 goto fail;
@@ -3470,8 +3467,8 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
3470 disk->queue = mddev->queue; 3467 disk->queue = mddev->queue;
3471 add_disk(disk); 3468 add_disk(disk);
3472 mddev->gendisk = disk; 3469 mddev->gendisk = disk;
3473 error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj, 3470 error = kobject_init_and_add(&mddev->kobj, &md_ktype,
3474 "%s", "md"); 3471 &disk_to_dev(disk)->kobj, "%s", "md");
3475 mutex_unlock(&disks_mutex); 3472 mutex_unlock(&disks_mutex);
3476 if (error) 3473 if (error)
3477 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3474 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
@@ -3761,7 +3758,7 @@ static int do_md_run(mddev_t * mddev)
3761 sysfs_notify(&mddev->kobj, NULL, "array_state"); 3758 sysfs_notify(&mddev->kobj, NULL, "array_state");
3762 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 3759 sysfs_notify(&mddev->kobj, NULL, "sync_action");
3763 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3760 sysfs_notify(&mddev->kobj, NULL, "degraded");
3764 kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE); 3761 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
3765 return 0; 3762 return 0;
3766} 3763}
3767 3764
@@ -5549,8 +5546,8 @@ static int is_mddev_idle(mddev_t *mddev)
5549 rcu_read_lock(); 5546 rcu_read_lock();
5550 rdev_for_each_rcu(rdev, mddev) { 5547 rdev_for_each_rcu(rdev, mddev) {
5551 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 5548 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5552 curr_events = disk_stat_read(disk, sectors[0]) + 5549 curr_events = part_stat_read(&disk->part0, sectors[0]) +
5553 disk_stat_read(disk, sectors[1]) - 5550 part_stat_read(&disk->part0, sectors[1]) -
5554 atomic_read(&disk->sync_io); 5551 atomic_read(&disk->sync_io);
5555 /* sync IO will cause sync_io to increase before the disk_stats 5552 /* sync IO will cause sync_io to increase before the disk_stats
5556 * as sync_io is counted when a request starts, and 5553 * as sync_io is counted when a request starts, and
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index c4779ccba1c3..8bb8794129b3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -147,6 +147,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
147 struct multipath_bh * mp_bh; 147 struct multipath_bh * mp_bh;
148 struct multipath_info *multipath; 148 struct multipath_info *multipath;
149 const int rw = bio_data_dir(bio); 149 const int rw = bio_data_dir(bio);
150 int cpu;
150 151
151 if (unlikely(bio_barrier(bio))) { 152 if (unlikely(bio_barrier(bio))) {
152 bio_endio(bio, -EOPNOTSUPP); 153 bio_endio(bio, -EOPNOTSUPP);
@@ -158,8 +159,11 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
158 mp_bh->master_bio = bio; 159 mp_bh->master_bio = bio;
159 mp_bh->mddev = mddev; 160 mp_bh->mddev = mddev;
160 161
161 disk_stat_inc(mddev->gendisk, ios[rw]); 162 cpu = part_stat_lock();
162 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 163 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
164 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
165 bio_sectors(bio));
166 part_stat_unlock();
163 167
164 mp_bh->path = multipath_map(conf); 168 mp_bh->path = multipath_map(conf);
165 if (mp_bh->path < 0) { 169 if (mp_bh->path < 0) {
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 183610635661..53508a8a981d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -399,14 +399,18 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
399 sector_t chunk; 399 sector_t chunk;
400 sector_t block, rsect; 400 sector_t block, rsect;
401 const int rw = bio_data_dir(bio); 401 const int rw = bio_data_dir(bio);
402 int cpu;
402 403
403 if (unlikely(bio_barrier(bio))) { 404 if (unlikely(bio_barrier(bio))) {
404 bio_endio(bio, -EOPNOTSUPP); 405 bio_endio(bio, -EOPNOTSUPP);
405 return 0; 406 return 0;
406 } 407 }
407 408
408 disk_stat_inc(mddev->gendisk, ios[rw]); 409 cpu = part_stat_lock();
409 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 410 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
411 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
412 bio_sectors(bio));
413 part_stat_unlock();
410 414
411 chunk_size = mddev->chunk_size >> 10; 415 chunk_size = mddev->chunk_size >> 10;
412 chunk_sects = mddev->chunk_size >> 9; 416 chunk_sects = mddev->chunk_size >> 9;
@@ -423,7 +427,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
423 /* This is a one page bio that upper layers 427 /* This is a one page bio that upper layers
424 * refuse to split for us, so we need to split it. 428 * refuse to split for us, so we need to split it.
425 */ 429 */
426 bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 430 bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)));
427 if (raid0_make_request(q, &bp->bio1)) 431 if (raid0_make_request(q, &bp->bio1))
428 generic_make_request(&bp->bio1); 432 generic_make_request(&bp->bio1);
429 if (raid0_make_request(q, &bp->bio2)) 433 if (raid0_make_request(q, &bp->bio2))
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 03a5ab705c20..b9764429d856 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -779,7 +779,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
779 struct page **behind_pages = NULL; 779 struct page **behind_pages = NULL;
780 const int rw = bio_data_dir(bio); 780 const int rw = bio_data_dir(bio);
781 const int do_sync = bio_sync(bio); 781 const int do_sync = bio_sync(bio);
782 int do_barriers; 782 int cpu, do_barriers;
783 mdk_rdev_t *blocked_rdev; 783 mdk_rdev_t *blocked_rdev;
784 784
785 /* 785 /*
@@ -804,8 +804,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
804 804
805 bitmap = mddev->bitmap; 805 bitmap = mddev->bitmap;
806 806
807 disk_stat_inc(mddev->gendisk, ios[rw]); 807 cpu = part_stat_lock();
808 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 808 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
809 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
810 bio_sectors(bio));
811 part_stat_unlock();
809 812
810 /* 813 /*
811 * make_request() can abort the operation when READA is being 814 * make_request() can abort the operation when READA is being
@@ -1302,9 +1305,6 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1302 sbio->bi_size = r1_bio->sectors << 9; 1305 sbio->bi_size = r1_bio->sectors << 9;
1303 sbio->bi_idx = 0; 1306 sbio->bi_idx = 0;
1304 sbio->bi_phys_segments = 0; 1307 sbio->bi_phys_segments = 0;
1305 sbio->bi_hw_segments = 0;
1306 sbio->bi_hw_front_size = 0;
1307 sbio->bi_hw_back_size = 0;
1308 sbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1308 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1309 sbio->bi_flags |= 1 << BIO_UPTODATE; 1309 sbio->bi_flags |= 1 << BIO_UPTODATE;
1310 sbio->bi_next = NULL; 1310 sbio->bi_next = NULL;
@@ -1790,7 +1790,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1790 bio->bi_vcnt = 0; 1790 bio->bi_vcnt = 0;
1791 bio->bi_idx = 0; 1791 bio->bi_idx = 0;
1792 bio->bi_phys_segments = 0; 1792 bio->bi_phys_segments = 0;
1793 bio->bi_hw_segments = 0;
1794 bio->bi_size = 0; 1793 bio->bi_size = 0;
1795 bio->bi_end_io = NULL; 1794 bio->bi_end_io = NULL;
1796 bio->bi_private = NULL; 1795 bio->bi_private = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e34cd0e62473..8bdc9bfc2887 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -789,6 +789,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
789 mirror_info_t *mirror; 789 mirror_info_t *mirror;
790 r10bio_t *r10_bio; 790 r10bio_t *r10_bio;
791 struct bio *read_bio; 791 struct bio *read_bio;
792 int cpu;
792 int i; 793 int i;
793 int chunk_sects = conf->chunk_mask + 1; 794 int chunk_sects = conf->chunk_mask + 1;
794 const int rw = bio_data_dir(bio); 795 const int rw = bio_data_dir(bio);
@@ -816,7 +817,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
816 /* This is a one page bio that upper layers 817 /* This is a one page bio that upper layers
817 * refuse to split for us, so we need to split it. 818 * refuse to split for us, so we need to split it.
818 */ 819 */
819 bp = bio_split(bio, bio_split_pool, 820 bp = bio_split(bio,
820 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 821 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
821 if (make_request(q, &bp->bio1)) 822 if (make_request(q, &bp->bio1))
822 generic_make_request(&bp->bio1); 823 generic_make_request(&bp->bio1);
@@ -843,8 +844,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
843 */ 844 */
844 wait_barrier(conf); 845 wait_barrier(conf);
845 846
846 disk_stat_inc(mddev->gendisk, ios[rw]); 847 cpu = part_stat_lock();
847 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 848 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
849 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
850 bio_sectors(bio));
851 part_stat_unlock();
848 852
849 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 853 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
850 854
@@ -1345,9 +1349,6 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1345 tbio->bi_size = r10_bio->sectors << 9; 1349 tbio->bi_size = r10_bio->sectors << 9;
1346 tbio->bi_idx = 0; 1350 tbio->bi_idx = 0;
1347 tbio->bi_phys_segments = 0; 1351 tbio->bi_phys_segments = 0;
1348 tbio->bi_hw_segments = 0;
1349 tbio->bi_hw_front_size = 0;
1350 tbio->bi_hw_back_size = 0;
1351 tbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1352 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1352 tbio->bi_flags |= 1 << BIO_UPTODATE; 1353 tbio->bi_flags |= 1 << BIO_UPTODATE;
1353 tbio->bi_next = NULL; 1354 tbio->bi_next = NULL;
@@ -1947,7 +1948,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1947 bio->bi_vcnt = 0; 1948 bio->bi_vcnt = 0;
1948 bio->bi_idx = 0; 1949 bio->bi_idx = 0;
1949 bio->bi_phys_segments = 0; 1950 bio->bi_phys_segments = 0;
1950 bio->bi_hw_segments = 0;
1951 bio->bi_size = 0; 1951 bio->bi_size = 0;
1952 } 1952 }
1953 1953
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 224de022e7c5..ae16794bef20 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -101,6 +101,40 @@
101const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 101const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
102#endif 102#endif
103 103
104/*
105 * We maintain a biased count of active stripes in the bottom 16 bits of
106 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
107 */
108static inline int raid5_bi_phys_segments(struct bio *bio)
109{
110 return bio->bi_phys_segments & 0xffff;
111}
112
113static inline int raid5_bi_hw_segments(struct bio *bio)
114{
115 return (bio->bi_phys_segments >> 16) & 0xffff;
116}
117
118static inline int raid5_dec_bi_phys_segments(struct bio *bio)
119{
120 --bio->bi_phys_segments;
121 return raid5_bi_phys_segments(bio);
122}
123
124static inline int raid5_dec_bi_hw_segments(struct bio *bio)
125{
126 unsigned short val = raid5_bi_hw_segments(bio);
127
128 --val;
129 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
130 return val;
131}
132
133static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
134{
135 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
136}
137
104static inline int raid6_next_disk(int disk, int raid_disks) 138static inline int raid6_next_disk(int disk, int raid_disks)
105{ 139{
106 disk++; 140 disk++;
@@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
507 while (rbi && rbi->bi_sector < 541 while (rbi && rbi->bi_sector <
508 dev->sector + STRIPE_SECTORS) { 542 dev->sector + STRIPE_SECTORS) {
509 rbi2 = r5_next_bio(rbi, dev->sector); 543 rbi2 = r5_next_bio(rbi, dev->sector);
510 if (--rbi->bi_phys_segments == 0) { 544 if (!raid5_dec_bi_phys_segments(rbi)) {
511 rbi->bi_next = return_bi; 545 rbi->bi_next = return_bi;
512 return_bi = rbi; 546 return_bi = rbi;
513 } 547 }
@@ -1725,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
1725 if (*bip) 1759 if (*bip)
1726 bi->bi_next = *bip; 1760 bi->bi_next = *bip;
1727 *bip = bi; 1761 *bip = bi;
1728 bi->bi_phys_segments ++; 1762 bi->bi_phys_segments++;
1729 spin_unlock_irq(&conf->device_lock); 1763 spin_unlock_irq(&conf->device_lock);
1730 spin_unlock(&sh->lock); 1764 spin_unlock(&sh->lock);
1731 1765
@@ -1819,7 +1853,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
1819 sh->dev[i].sector + STRIPE_SECTORS) { 1853 sh->dev[i].sector + STRIPE_SECTORS) {
1820 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1854 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1821 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1855 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1822 if (--bi->bi_phys_segments == 0) { 1856 if (!raid5_dec_bi_phys_segments(bi)) {
1823 md_write_end(conf->mddev); 1857 md_write_end(conf->mddev);
1824 bi->bi_next = *return_bi; 1858 bi->bi_next = *return_bi;
1825 *return_bi = bi; 1859 *return_bi = bi;
@@ -1834,7 +1868,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
1834 sh->dev[i].sector + STRIPE_SECTORS) { 1868 sh->dev[i].sector + STRIPE_SECTORS) {
1835 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1869 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1836 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1870 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1837 if (--bi->bi_phys_segments == 0) { 1871 if (!raid5_dec_bi_phys_segments(bi)) {
1838 md_write_end(conf->mddev); 1872 md_write_end(conf->mddev);
1839 bi->bi_next = *return_bi; 1873 bi->bi_next = *return_bi;
1840 *return_bi = bi; 1874 *return_bi = bi;
@@ -1858,7 +1892,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
1858 struct bio *nextbi = 1892 struct bio *nextbi =
1859 r5_next_bio(bi, sh->dev[i].sector); 1893 r5_next_bio(bi, sh->dev[i].sector);
1860 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1894 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1861 if (--bi->bi_phys_segments == 0) { 1895 if (!raid5_dec_bi_phys_segments(bi)) {
1862 bi->bi_next = *return_bi; 1896 bi->bi_next = *return_bi;
1863 *return_bi = bi; 1897 *return_bi = bi;
1864 } 1898 }
@@ -2033,7 +2067,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
2033 while (wbi && wbi->bi_sector < 2067 while (wbi && wbi->bi_sector <
2034 dev->sector + STRIPE_SECTORS) { 2068 dev->sector + STRIPE_SECTORS) {
2035 wbi2 = r5_next_bio(wbi, dev->sector); 2069 wbi2 = r5_next_bio(wbi, dev->sector);
2036 if (--wbi->bi_phys_segments == 0) { 2070 if (!raid5_dec_bi_phys_segments(wbi)) {
2037 md_write_end(conf->mddev); 2071 md_write_end(conf->mddev);
2038 wbi->bi_next = *return_bi; 2072 wbi->bi_next = *return_bi;
2039 *return_bi = wbi; 2073 *return_bi = wbi;
@@ -2814,7 +2848,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2814 copy_data(0, rbi, dev->page, dev->sector); 2848 copy_data(0, rbi, dev->page, dev->sector);
2815 rbi2 = r5_next_bio(rbi, dev->sector); 2849 rbi2 = r5_next_bio(rbi, dev->sector);
2816 spin_lock_irq(&conf->device_lock); 2850 spin_lock_irq(&conf->device_lock);
2817 if (--rbi->bi_phys_segments == 0) { 2851 if (!raid5_dec_bi_phys_segments(rbi)) {
2818 rbi->bi_next = return_bi; 2852 rbi->bi_next = return_bi;
2819 return_bi = rbi; 2853 return_bi = rbi;
2820 } 2854 }
@@ -3155,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3155 if(bi) { 3189 if(bi) {
3156 conf->retry_read_aligned_list = bi->bi_next; 3190 conf->retry_read_aligned_list = bi->bi_next;
3157 bi->bi_next = NULL; 3191 bi->bi_next = NULL;
3192 /*
3193 * this sets the active strip count to 1 and the processed
3194 * strip count to zero (upper 8 bits)
3195 */
3158 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3196 bi->bi_phys_segments = 1; /* biased count of active stripes */
3159 bi->bi_hw_segments = 0; /* count of processed stripes */
3160 } 3197 }
3161 3198
3162 return bi; 3199 return bi;
@@ -3206,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi)
3206 if ((bi->bi_size>>9) > q->max_sectors) 3243 if ((bi->bi_size>>9) > q->max_sectors)
3207 return 0; 3244 return 0;
3208 blk_recount_segments(q, bi); 3245 blk_recount_segments(q, bi);
3209 if (bi->bi_phys_segments > q->max_phys_segments || 3246 if (bi->bi_phys_segments > q->max_phys_segments)
3210 bi->bi_hw_segments > q->max_hw_segments)
3211 return 0; 3247 return 0;
3212 3248
3213 if (q->merge_bvec_fn) 3249 if (q->merge_bvec_fn)
@@ -3351,7 +3387,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3351 sector_t logical_sector, last_sector; 3387 sector_t logical_sector, last_sector;
3352 struct stripe_head *sh; 3388 struct stripe_head *sh;
3353 const int rw = bio_data_dir(bi); 3389 const int rw = bio_data_dir(bi);
3354 int remaining; 3390 int cpu, remaining;
3355 3391
3356 if (unlikely(bio_barrier(bi))) { 3392 if (unlikely(bio_barrier(bi))) {
3357 bio_endio(bi, -EOPNOTSUPP); 3393 bio_endio(bi, -EOPNOTSUPP);
@@ -3360,8 +3396,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
3360 3396
3361 md_write_start(mddev, bi); 3397 md_write_start(mddev, bi);
3362 3398
3363 disk_stat_inc(mddev->gendisk, ios[rw]); 3399 cpu = part_stat_lock();
3364 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 3400 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3401 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3402 bio_sectors(bi));
3403 part_stat_unlock();
3365 3404
3366 if (rw == READ && 3405 if (rw == READ &&
3367 mddev->reshape_position == MaxSector && 3406 mddev->reshape_position == MaxSector &&
@@ -3468,7 +3507,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3468 3507
3469 } 3508 }
3470 spin_lock_irq(&conf->device_lock); 3509 spin_lock_irq(&conf->device_lock);
3471 remaining = --bi->bi_phys_segments; 3510 remaining = raid5_dec_bi_phys_segments(bi);
3472 spin_unlock_irq(&conf->device_lock); 3511 spin_unlock_irq(&conf->device_lock);
3473 if (remaining == 0) { 3512 if (remaining == 0) {
3474 3513
@@ -3752,7 +3791,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3752 sector += STRIPE_SECTORS, 3791 sector += STRIPE_SECTORS,
3753 scnt++) { 3792 scnt++) {
3754 3793
3755 if (scnt < raid_bio->bi_hw_segments) 3794 if (scnt < raid5_bi_hw_segments(raid_bio))
3756 /* already done this stripe */ 3795 /* already done this stripe */
3757 continue; 3796 continue;
3758 3797
@@ -3760,7 +3799,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3760 3799
3761 if (!sh) { 3800 if (!sh) {
3762 /* failed to get a stripe - must wait */ 3801 /* failed to get a stripe - must wait */
3763 raid_bio->bi_hw_segments = scnt; 3802 raid5_set_bi_hw_segments(raid_bio, scnt);
3764 conf->retry_read_aligned = raid_bio; 3803 conf->retry_read_aligned = raid_bio;
3765 return handled; 3804 return handled;
3766 } 3805 }
@@ -3768,7 +3807,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3768 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 3807 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
3769 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 3808 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
3770 release_stripe(sh); 3809 release_stripe(sh);
3771 raid_bio->bi_hw_segments = scnt; 3810 raid5_set_bi_hw_segments(raid_bio, scnt);
3772 conf->retry_read_aligned = raid_bio; 3811 conf->retry_read_aligned = raid_bio;
3773 return handled; 3812 return handled;
3774 } 3813 }
@@ -3778,7 +3817,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3778 handled++; 3817 handled++;
3779 } 3818 }
3780 spin_lock_irq(&conf->device_lock); 3819 spin_lock_irq(&conf->device_lock);
3781 remaining = --raid_bio->bi_phys_segments; 3820 remaining = raid5_dec_bi_phys_segments(raid_bio);
3782 spin_unlock_irq(&conf->device_lock); 3821 spin_unlock_irq(&conf->device_lock);
3783 if (remaining == 0) 3822 if (remaining == 0)
3784 bio_endio(raid_bio, 0); 3823 bio_endio(raid_bio, 0);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index d2d2318dafa4..6e291bf8237a 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -197,7 +197,7 @@ static int mspro_block_bd_open(struct inode *inode, struct file *filp)
197static int mspro_block_disk_release(struct gendisk *disk) 197static int mspro_block_disk_release(struct gendisk *disk)
198{ 198{
199 struct mspro_block_data *msb = disk->private_data; 199 struct mspro_block_data *msb = disk->private_data;
200 int disk_id = disk->first_minor >> MSPRO_BLOCK_PART_SHIFT; 200 int disk_id = MINOR(disk_devt(disk)) >> MSPRO_BLOCK_PART_SHIFT;
201 201
202 mutex_lock(&mspro_block_disk_lock); 202 mutex_lock(&mspro_block_disk_lock);
203 203
@@ -828,7 +828,7 @@ static void mspro_block_submit_req(struct request_queue *q)
828 828
829 if (msb->eject) { 829 if (msb->eject) {
830 while ((req = elv_next_request(q)) != NULL) 830 while ((req = elv_next_request(q)) != NULL)
831 end_queued_request(req, -ENODEV); 831 __blk_end_request(req, -ENODEV, blk_rq_bytes(req));
832 832
833 return; 833 return;
834 } 834 }
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
index facdb9893c84..1ee8501e90f1 100644
--- a/drivers/misc/eeepc-laptop.c
+++ b/drivers/misc/eeepc-laptop.c
@@ -450,12 +450,14 @@ static int eeepc_get_fan_pwm(void)
450 int value = 0; 450 int value = 0;
451 451
452 read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value); 452 read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
453 value = value * 255 / 100;
453 return (value); 454 return (value);
454} 455}
455 456
456static void eeepc_set_fan_pwm(int value) 457static void eeepc_set_fan_pwm(int value)
457{ 458{
458 value = SENSORS_LIMIT(value, 0, 100); 459 value = SENSORS_LIMIT(value, 0, 255);
460 value = value * 100 / 255;
459 ec_write(EEEPC_EC_SC02, value); 461 ec_write(EEEPC_EC_SC02, value);
460} 462}
461 463
@@ -520,15 +522,23 @@ static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
520 static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0); 522 static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
521 523
522EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL); 524EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
523EEEPC_CREATE_SENSOR_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, 525EEEPC_CREATE_SENSOR_ATTR(pwm1, S_IRUGO | S_IWUSR,
524 eeepc_get_fan_pwm, eeepc_set_fan_pwm); 526 eeepc_get_fan_pwm, eeepc_set_fan_pwm);
525EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, 527EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
526 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl); 528 eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
527 529
530static ssize_t
531show_name(struct device *dev, struct device_attribute *attr, char *buf)
532{
533 return sprintf(buf, "eeepc\n");
534}
535static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
536
528static struct attribute *hwmon_attributes[] = { 537static struct attribute *hwmon_attributes[] = {
529 &sensor_dev_attr_fan1_pwm.dev_attr.attr, 538 &sensor_dev_attr_pwm1.dev_attr.attr,
530 &sensor_dev_attr_fan1_input.dev_attr.attr, 539 &sensor_dev_attr_fan1_input.dev_attr.attr,
531 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 540 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
541 &sensor_dev_attr_name.dev_attr.attr,
532 NULL 542 NULL
533}; 543};
534 544
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ebc8b9d77613..efacee0404a0 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -29,6 +29,7 @@
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
32#include <linux/string_helpers.h>
32 33
33#include <linux/mmc/card.h> 34#include <linux/mmc/card.h>
34#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
@@ -83,7 +84,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
83 mutex_lock(&open_lock); 84 mutex_lock(&open_lock);
84 md->usage--; 85 md->usage--;
85 if (md->usage == 0) { 86 if (md->usage == 0) {
86 int devidx = md->disk->first_minor >> MMC_SHIFT; 87 int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
87 __clear_bit(devidx, dev_use); 88 __clear_bit(devidx, dev_use);
88 89
89 put_disk(md->disk); 90 put_disk(md->disk);
@@ -532,6 +533,8 @@ static int mmc_blk_probe(struct mmc_card *card)
532 struct mmc_blk_data *md; 533 struct mmc_blk_data *md;
533 int err; 534 int err;
534 535
536 char cap_str[10];
537
535 /* 538 /*
536 * Check that the card supports the command class(es) we need. 539 * Check that the card supports the command class(es) we need.
537 */ 540 */
@@ -546,10 +549,11 @@ static int mmc_blk_probe(struct mmc_card *card)
546 if (err) 549 if (err)
547 goto out; 550 goto out;
548 551
549 printk(KERN_INFO "%s: %s %s %lluKiB %s\n", 552 string_get_size(get_capacity(md->disk) << 9, STRING_UNITS_2,
553 cap_str, sizeof(cap_str));
554 printk(KERN_INFO "%s: %s %s %s %s\n",
550 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 555 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
551 (unsigned long long)(get_capacity(md->disk) >> 1), 556 cap_str, md->read_only ? "(ro)" : "");
552 md->read_only ? "(ro)" : "");
553 557
554 mmc_set_drvdata(card, md); 558 mmc_set_drvdata(card, md);
555 add_disk(md->disk); 559 add_disk(md->disk);
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index f34f20c78911..9bf581c4f740 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1005,6 +1005,29 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
1005 return ftl_write((void *)dev, buf, block, 1); 1005 return ftl_write((void *)dev, buf, block, 1);
1006} 1006}
1007 1007
1008static int ftl_discardsect(struct mtd_blktrans_dev *dev,
1009 unsigned long sector, unsigned nr_sects)
1010{
1011 partition_t *part = (void *)dev;
1012 uint32_t bsize = 1 << part->header.EraseUnitSize;
1013
1014 DEBUG(1, "FTL erase sector %ld for %d sectors\n",
1015 sector, nr_sects);
1016
1017 while (nr_sects) {
1018 uint32_t old_addr = part->VirtualBlockMap[sector];
1019 if (old_addr != 0xffffffff) {
1020 part->VirtualBlockMap[sector] = 0xffffffff;
1021 part->EUNInfo[old_addr/bsize].Deleted++;
1022 if (set_bam_entry(part, old_addr, 0))
1023 return -EIO;
1024 }
1025 nr_sects--;
1026 sector++;
1027 }
1028
1029 return 0;
1030}
1008/*====================================================================*/ 1031/*====================================================================*/
1009 1032
1010static void ftl_freepart(partition_t *part) 1033static void ftl_freepart(partition_t *part)
@@ -1069,6 +1092,7 @@ static struct mtd_blktrans_ops ftl_tr = {
1069 .blksize = SECTOR_SIZE, 1092 .blksize = SECTOR_SIZE,
1070 .readsect = ftl_readsect, 1093 .readsect = ftl_readsect,
1071 .writesect = ftl_writesect, 1094 .writesect = ftl_writesect,
1095 .discard = ftl_discardsect,
1072 .getgeo = ftl_getgeo, 1096 .getgeo = ftl_getgeo,
1073 .add_mtd = ftl_add_mtd, 1097 .add_mtd = ftl_add_mtd,
1074 .remove_dev = ftl_remove_dev, 1098 .remove_dev = ftl_remove_dev,
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 9ff007c4962c..681d5aca2af4 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,6 +32,14 @@ struct mtd_blkcore_priv {
32 spinlock_t queue_lock; 32 spinlock_t queue_lock;
33}; 33};
34 34
35static int blktrans_discard_request(struct request_queue *q,
36 struct request *req)
37{
38 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
39 req->cmd[0] = REQ_LB_OP_DISCARD;
40 return 0;
41}
42
35static int do_blktrans_request(struct mtd_blktrans_ops *tr, 43static int do_blktrans_request(struct mtd_blktrans_ops *tr,
36 struct mtd_blktrans_dev *dev, 44 struct mtd_blktrans_dev *dev,
37 struct request *req) 45 struct request *req)
@@ -44,6 +52,10 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
44 52
45 buf = req->buffer; 53 buf = req->buffer;
46 54
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return !tr->discard(dev, block, nsect);
58
47 if (!blk_fs_request(req)) 59 if (!blk_fs_request(req))
48 return 0; 60 return 0;
49 61
@@ -367,6 +379,10 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
367 379
368 tr->blkcore_priv->rq->queuedata = tr; 380 tr->blkcore_priv->rq->queuedata = tr;
369 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 381 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
382 if (tr->discard)
383 blk_queue_set_discard(tr->blkcore_priv->rq,
384 blktrans_discard_request);
385
370 tr->blkshift = ffs(tr->blksize) - 1; 386 tr->blkshift = ffs(tr->blksize) - 1;
371 387
372 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 388 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index 26f5abc9c3f7..e83f34f1b5ba 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -2,12 +2,15 @@
2# Makefile for the Linux Plug-and-Play Support. 2# Makefile for the Linux Plug-and-Play Support.
3# 3#
4 4
5obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o system.o 5obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o
6 6
7obj-$(CONFIG_PNPACPI) += pnpacpi/ 7obj-$(CONFIG_PNPACPI) += pnpacpi/
8obj-$(CONFIG_PNPBIOS) += pnpbios/ 8obj-$(CONFIG_PNPBIOS) += pnpbios/
9obj-$(CONFIG_ISAPNP) += isapnp/ 9obj-$(CONFIG_ISAPNP) += isapnp/
10 10
11# pnp_system_init goes after pnpacpi/pnpbios init
12obj-y += system.o
13
11ifeq ($(CONFIG_PNP_DEBUG),y) 14ifeq ($(CONFIG_PNP_DEBUG),y)
12EXTRA_CFLAGS += -DDEBUG 15EXTRA_CFLAGS += -DDEBUG
13endif 16endif
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c1b9ea34977b..53561d72b4ee 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -268,7 +268,7 @@ static int __init pnpacpi_init(void)
268 return 0; 268 return 0;
269} 269}
270 270
271subsys_initcall(pnpacpi_init); 271fs_initcall(pnpacpi_init);
272 272
273static int __init pnpacpi_setup(char *str) 273static int __init pnpacpi_setup(char *str)
274{ 274{
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 19a4be1a9a31..662dfcddedc6 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -571,7 +571,7 @@ static int __init pnpbios_init(void)
571 return 0; 571 return 0;
572} 572}
573 573
574subsys_initcall(pnpbios_init); 574fs_initcall(pnpbios_init);
575 575
576static int __init pnpbios_thread_init(void) 576static int __init pnpbios_thread_init(void)
577{ 577{
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 03c0e40a92ff..e3b5c4d3036e 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -76,7 +76,8 @@ dasd_devices_show(struct seq_file *m, void *v)
76 /* Print kdev. */ 76 /* Print kdev. */
77 if (block->gdp) 77 if (block->gdp)
78 seq_printf(m, " at (%3d:%6d)", 78 seq_printf(m, " at (%3d:%6d)",
79 block->gdp->major, block->gdp->first_minor); 79 MAJOR(disk_devt(block->gdp)),
80 MINOR(disk_devt(block->gdp)));
80 else 81 else
81 seq_printf(m, " at (???:??????)"); 82 seq_printf(m, " at (???:??????)");
82 /* Print device name. */ 83 /* Print device name. */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 711b3004b3e6..9481e4a3f76e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -114,7 +114,7 @@ dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
114 found = 0; 114 found = 0;
115 // test if minor available 115 // test if minor available
116 list_for_each_entry(entry, &dcssblk_devices, lh) 116 list_for_each_entry(entry, &dcssblk_devices, lh)
117 if (minor == entry->gd->first_minor) 117 if (minor == MINOR(disk_devt(entry->gd)))
118 found++; 118 found++;
119 if (!found) break; // got unused minor 119 if (!found) break; // got unused minor
120 } 120 }
@@ -397,7 +397,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
397 goto unload_seg; 397 goto unload_seg;
398 } 398 }
399 sprintf(dev_info->gd->disk_name, "dcssblk%d", 399 sprintf(dev_info->gd->disk_name, "dcssblk%d",
400 dev_info->gd->first_minor); 400 MINOR(disk_devt(dev_info->gd)));
401 list_add_tail(&dev_info->lh, &dcssblk_devices); 401 list_add_tail(&dev_info->lh, &dcssblk_devices);
402 402
403 if (!try_module_get(THIS_MODULE)) { 403 if (!try_module_get(THIS_MODULE)) {
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 90abfd06ed55..24255e42dc30 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -88,11 +88,13 @@ static int __init zfcp_device_setup(char *devstr)
88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); 88 strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE);
89 89
90 token = strsep(&str, ","); 90 token = strsep(&str, ",");
91 if (!token || strict_strtoull(token, 0, &zfcp_data.init_wwpn)) 91 if (!token || strict_strtoull(token, 0,
92 (unsigned long long *) &zfcp_data.init_wwpn))
92 goto err_out; 93 goto err_out;
93 94
94 token = strsep(&str, ","); 95 token = strsep(&str, ",");
95 if (!token || strict_strtoull(token, 0, &zfcp_data.init_fcp_lun)) 96 if (!token || strict_strtoull(token, 0,
97 (unsigned long long *) &zfcp_data.init_fcp_lun))
96 goto err_out; 98 goto err_out;
97 99
98 kfree(str); 100 kfree(str);
@@ -100,24 +102,10 @@ static int __init zfcp_device_setup(char *devstr)
100 102
101 err_out: 103 err_out:
102 kfree(str); 104 kfree(str);
103 pr_err("zfcp: Parse error for device parameter string %s, " 105 pr_err("zfcp: %s is not a valid SCSI device\n", devstr);
104 "device not attached.\n", devstr);
105 return 0; 106 return 0;
106} 107}
107 108
108static struct zfcp_adapter *zfcp_get_adapter_by_busid(char *bus_id)
109{
110 struct zfcp_adapter *adapter;
111
112 list_for_each_entry(adapter, &zfcp_data.adapter_list_head, list)
113 if ((strncmp(bus_id, adapter->ccw_device->dev.bus_id,
114 BUS_ID_SIZE) == 0) &&
115 !(atomic_read(&adapter->status) &
116 ZFCP_STATUS_COMMON_REMOVE))
117 return adapter;
118 return NULL;
119}
120
121static void __init zfcp_init_device_configure(void) 109static void __init zfcp_init_device_configure(void)
122{ 110{
123 struct zfcp_adapter *adapter; 111 struct zfcp_adapter *adapter;
@@ -141,7 +129,12 @@ static void __init zfcp_init_device_configure(void)
141 goto out_unit; 129 goto out_unit;
142 up(&zfcp_data.config_sema); 130 up(&zfcp_data.config_sema);
143 ccw_device_set_online(adapter->ccw_device); 131 ccw_device_set_online(adapter->ccw_device);
132
144 zfcp_erp_wait(adapter); 133 zfcp_erp_wait(adapter);
134 wait_event(adapter->erp_done_wqh,
135 !(atomic_read(&unit->status) &
136 ZFCP_STATUS_UNIT_SCSI_WORK_PENDING));
137
145 down(&zfcp_data.config_sema); 138 down(&zfcp_data.config_sema);
146 zfcp_unit_put(unit); 139 zfcp_unit_put(unit);
147out_unit: 140out_unit:
@@ -180,9 +173,9 @@ static int __init zfcp_module_init(void)
180 if (!zfcp_data.gid_pn_cache) 173 if (!zfcp_data.gid_pn_cache)
181 goto out_gid_cache; 174 goto out_gid_cache;
182 175
183 INIT_LIST_HEAD(&zfcp_data.adapter_list_head); 176 zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
184 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
185 177
178 INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
186 sema_init(&zfcp_data.config_sema, 1); 179 sema_init(&zfcp_data.config_sema, 1);
187 rwlock_init(&zfcp_data.config_lock); 180 rwlock_init(&zfcp_data.config_lock);
188 181
@@ -193,13 +186,14 @@ static int __init zfcp_module_init(void)
193 186
194 retval = misc_register(&zfcp_cfdc_misc); 187 retval = misc_register(&zfcp_cfdc_misc);
195 if (retval) { 188 if (retval) {
196 pr_err("zfcp: registration of misc device zfcp_cfdc failed\n"); 189 pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n");
197 goto out_misc; 190 goto out_misc;
198 } 191 }
199 192
200 retval = zfcp_ccw_register(); 193 retval = zfcp_ccw_register();
201 if (retval) { 194 if (retval) {
202 pr_err("zfcp: Registration with common I/O layer failed.\n"); 195 pr_err("zfcp: The zfcp device driver could not register with "
196 "the common I/O layer\n");
203 goto out_ccw_register; 197 goto out_ccw_register;
204 } 198 }
205 199
@@ -231,8 +225,7 @@ module_init(zfcp_module_init);
231 * 225 *
232 * Returns: pointer to zfcp_unit or NULL 226 * Returns: pointer to zfcp_unit or NULL
233 */ 227 */
234struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, 228struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
235 fcp_lun_t fcp_lun)
236{ 229{
237 struct zfcp_unit *unit; 230 struct zfcp_unit *unit;
238 231
@@ -251,7 +244,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port,
251 * Returns: pointer to zfcp_port or NULL 244 * Returns: pointer to zfcp_port or NULL
252 */ 245 */
253struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, 246struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
254 wwn_t wwpn) 247 u64 wwpn)
255{ 248{
256 struct zfcp_port *port; 249 struct zfcp_port *port;
257 250
@@ -276,7 +269,7 @@ static void zfcp_sysfs_unit_release(struct device *dev)
276 * 269 *
277 * Sets up some unit internal structures and creates sysfs entry. 270 * Sets up some unit internal structures and creates sysfs entry.
278 */ 271 */
279struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun) 272struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
280{ 273{
281 struct zfcp_unit *unit; 274 struct zfcp_unit *unit;
282 275
@@ -290,7 +283,8 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
290 unit->port = port; 283 unit->port = port;
291 unit->fcp_lun = fcp_lun; 284 unit->fcp_lun = fcp_lun;
292 285
293 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx", fcp_lun); 286 snprintf(unit->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx",
287 (unsigned long long) fcp_lun);
294 unit->sysfs_device.parent = &port->sysfs_device; 288 unit->sysfs_device.parent = &port->sysfs_device;
295 unit->sysfs_device.release = zfcp_sysfs_unit_release; 289 unit->sysfs_device.release = zfcp_sysfs_unit_release;
296 dev_set_drvdata(&unit->sysfs_device, unit); 290 dev_set_drvdata(&unit->sysfs_device, unit);
@@ -323,7 +317,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
323 } 317 }
324 318
325 zfcp_unit_get(unit); 319 zfcp_unit_get(unit);
326 unit->scsi_lun = scsilun_to_int((struct scsi_lun *)&unit->fcp_lun);
327 320
328 write_lock_irq(&zfcp_data.config_lock); 321 write_lock_irq(&zfcp_data.config_lock);
329 list_add_tail(&unit->list, &port->unit_list_head); 322 list_add_tail(&unit->list, &port->unit_list_head);
@@ -332,7 +325,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
332 325
333 write_unlock_irq(&zfcp_data.config_lock); 326 write_unlock_irq(&zfcp_data.config_lock);
334 327
335 port->units++;
336 zfcp_port_get(port); 328 zfcp_port_get(port);
337 329
338 return unit; 330 return unit;
@@ -351,11 +343,10 @@ err_out_free:
351 */ 343 */
352void zfcp_unit_dequeue(struct zfcp_unit *unit) 344void zfcp_unit_dequeue(struct zfcp_unit *unit)
353{ 345{
354 zfcp_unit_wait(unit); 346 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
355 write_lock_irq(&zfcp_data.config_lock); 347 write_lock_irq(&zfcp_data.config_lock);
356 list_del(&unit->list); 348 list_del(&unit->list);
357 write_unlock_irq(&zfcp_data.config_lock); 349 write_unlock_irq(&zfcp_data.config_lock);
358 unit->port->units--;
359 zfcp_port_put(unit->port); 350 zfcp_port_put(unit->port);
360 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs); 351 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
361 device_unregister(&unit->sysfs_device); 352 device_unregister(&unit->sysfs_device);
@@ -416,11 +407,6 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
416 mempool_destroy(adapter->pool.data_gid_pn); 407 mempool_destroy(adapter->pool.data_gid_pn);
417} 408}
418 409
419static void zfcp_dummy_release(struct device *dev)
420{
421 return;
422}
423
424/** 410/**
425 * zfcp_status_read_refill - refill the long running status_read_requests 411 * zfcp_status_read_refill - refill the long running status_read_requests
426 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled 412 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled
@@ -450,19 +436,6 @@ static void _zfcp_status_read_scheduler(struct work_struct *work)
450 stat_work)); 436 stat_work));
451} 437}
452 438
453static int zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
454{
455 struct zfcp_port *port;
456
457 port = zfcp_port_enqueue(adapter, 0, ZFCP_STATUS_PORT_WKA,
458 ZFCP_DID_DIRECTORY_SERVICE);
459 if (IS_ERR(port))
460 return PTR_ERR(port);
461 zfcp_port_put(port);
462
463 return 0;
464}
465
466/** 439/**
467 * zfcp_adapter_enqueue - enqueue a new adapter to the list 440 * zfcp_adapter_enqueue - enqueue a new adapter to the list
468 * @ccw_device: pointer to the struct cc_device 441 * @ccw_device: pointer to the struct cc_device
@@ -508,7 +481,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
508 init_waitqueue_head(&adapter->erp_done_wqh); 481 init_waitqueue_head(&adapter->erp_done_wqh);
509 482
510 INIT_LIST_HEAD(&adapter->port_list_head); 483 INIT_LIST_HEAD(&adapter->port_list_head);
511 INIT_LIST_HEAD(&adapter->port_remove_lh);
512 INIT_LIST_HEAD(&adapter->erp_ready_head); 484 INIT_LIST_HEAD(&adapter->erp_ready_head);
513 INIT_LIST_HEAD(&adapter->erp_running_head); 485 INIT_LIST_HEAD(&adapter->erp_running_head);
514 486
@@ -518,7 +490,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
518 spin_lock_init(&adapter->san_dbf_lock); 490 spin_lock_init(&adapter->san_dbf_lock);
519 spin_lock_init(&adapter->scsi_dbf_lock); 491 spin_lock_init(&adapter->scsi_dbf_lock);
520 spin_lock_init(&adapter->rec_dbf_lock); 492 spin_lock_init(&adapter->rec_dbf_lock);
521 spin_lock_init(&adapter->req_q.lock); 493 spin_lock_init(&adapter->req_q_lock);
522 494
523 rwlock_init(&adapter->erp_lock); 495 rwlock_init(&adapter->erp_lock);
524 rwlock_init(&adapter->abort_lock); 496 rwlock_init(&adapter->abort_lock);
@@ -537,28 +509,15 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
537 &zfcp_sysfs_adapter_attrs)) 509 &zfcp_sysfs_adapter_attrs))
538 goto sysfs_failed; 510 goto sysfs_failed;
539 511
540 adapter->generic_services.parent = &adapter->ccw_device->dev;
541 adapter->generic_services.release = zfcp_dummy_release;
542 snprintf(adapter->generic_services.bus_id, BUS_ID_SIZE,
543 "generic_services");
544
545 if (device_register(&adapter->generic_services))
546 goto generic_services_failed;
547
548 write_lock_irq(&zfcp_data.config_lock); 512 write_lock_irq(&zfcp_data.config_lock);
549 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 513 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
550 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head); 514 list_add_tail(&adapter->list, &zfcp_data.adapter_list_head);
551 write_unlock_irq(&zfcp_data.config_lock); 515 write_unlock_irq(&zfcp_data.config_lock);
552 516
553 zfcp_data.adapters++; 517 zfcp_fc_nameserver_init(adapter);
554
555 zfcp_nameserver_enqueue(adapter);
556 518
557 return 0; 519 return 0;
558 520
559generic_services_failed:
560 sysfs_remove_group(&ccw_device->dev.kobj,
561 &zfcp_sysfs_adapter_attrs);
562sysfs_failed: 521sysfs_failed:
563 zfcp_adapter_debug_unregister(adapter); 522 zfcp_adapter_debug_unregister(adapter);
564debug_register_failed: 523debug_register_failed:
@@ -585,7 +544,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
585 cancel_work_sync(&adapter->scan_work); 544 cancel_work_sync(&adapter->scan_work);
586 cancel_work_sync(&adapter->stat_work); 545 cancel_work_sync(&adapter->stat_work);
587 zfcp_adapter_scsi_unregister(adapter); 546 zfcp_adapter_scsi_unregister(adapter);
588 device_unregister(&adapter->generic_services);
589 sysfs_remove_group(&adapter->ccw_device->dev.kobj, 547 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
590 &zfcp_sysfs_adapter_attrs); 548 &zfcp_sysfs_adapter_attrs);
591 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 549 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
@@ -603,9 +561,6 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
603 list_del(&adapter->list); 561 list_del(&adapter->list);
604 write_unlock_irq(&zfcp_data.config_lock); 562 write_unlock_irq(&zfcp_data.config_lock);
605 563
606 /* decrease number of adapters in list */
607 zfcp_data.adapters--;
608
609 zfcp_qdio_free(adapter); 564 zfcp_qdio_free(adapter);
610 565
611 zfcp_free_low_mem_buffers(adapter); 566 zfcp_free_low_mem_buffers(adapter);
@@ -633,21 +588,19 @@ static void zfcp_sysfs_port_release(struct device *dev)
633 * d_id is used to enqueue ports with a well known address like the Directory 588 * d_id is used to enqueue ports with a well known address like the Directory
634 * Service for nameserver lookup. 589 * Service for nameserver lookup.
635 */ 590 */
636struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn, 591struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
637 u32 status, u32 d_id) 592 u32 status, u32 d_id)
638{ 593{
639 struct zfcp_port *port; 594 struct zfcp_port *port;
640 int retval; 595 int retval;
641 char *bus_id;
642 596
643 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 597 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
644 if (!port) 598 if (!port)
645 return ERR_PTR(-ENOMEM); 599 return ERR_PTR(-ENOMEM);
646 600
647 init_waitqueue_head(&port->remove_wq); 601 init_waitqueue_head(&port->remove_wq);
648
649 INIT_LIST_HEAD(&port->unit_list_head); 602 INIT_LIST_HEAD(&port->unit_list_head);
650 INIT_LIST_HEAD(&port->unit_remove_lh); 603 INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
651 604
652 port->adapter = adapter; 605 port->adapter = adapter;
653 port->d_id = d_id; 606 port->d_id = d_id;
@@ -657,34 +610,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
657 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); 610 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
658 atomic_set(&port->refcount, 0); 611 atomic_set(&port->refcount, 0);
659 612
660 if (status & ZFCP_STATUS_PORT_WKA) { 613 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "0x%016llx",
661 switch (d_id) { 614 (unsigned long long) wwpn);
662 case ZFCP_DID_DIRECTORY_SERVICE: 615 port->sysfs_device.parent = &adapter->ccw_device->dev;
663 bus_id = "directory";
664 break;
665 case ZFCP_DID_MANAGEMENT_SERVICE:
666 bus_id = "management";
667 break;
668 case ZFCP_DID_KEY_DISTRIBUTION_SERVICE:
669 bus_id = "key_distribution";
670 break;
671 case ZFCP_DID_ALIAS_SERVICE:
672 bus_id = "alias";
673 break;
674 case ZFCP_DID_TIME_SERVICE:
675 bus_id = "time";
676 break;
677 default:
678 kfree(port);
679 return ERR_PTR(-EINVAL);
680 }
681 snprintf(port->sysfs_device.bus_id, BUS_ID_SIZE, "%s", bus_id);
682 port->sysfs_device.parent = &adapter->generic_services;
683 } else {
684 snprintf(port->sysfs_device.bus_id,
685 BUS_ID_SIZE, "0x%016llx", wwpn);
686 port->sysfs_device.parent = &adapter->ccw_device->dev;
687 }
688 616
689 port->sysfs_device.release = zfcp_sysfs_port_release; 617 port->sysfs_device.release = zfcp_sysfs_port_release;
690 dev_set_drvdata(&port->sysfs_device, port); 618 dev_set_drvdata(&port->sysfs_device, port);
@@ -700,12 +628,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
700 if (device_register(&port->sysfs_device)) 628 if (device_register(&port->sysfs_device))
701 goto err_out_free; 629 goto err_out_free;
702 630
703 if (status & ZFCP_STATUS_PORT_WKA) 631 retval = sysfs_create_group(&port->sysfs_device.kobj,
704 retval = sysfs_create_group(&port->sysfs_device.kobj, 632 &zfcp_sysfs_port_attrs);
705 &zfcp_sysfs_ns_port_attrs);
706 else
707 retval = sysfs_create_group(&port->sysfs_device.kobj,
708 &zfcp_sysfs_port_attrs);
709 633
710 if (retval) { 634 if (retval) {
711 device_unregister(&port->sysfs_device); 635 device_unregister(&port->sysfs_device);
@@ -718,10 +642,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, wwn_t wwpn,
718 list_add_tail(&port->list, &adapter->port_list_head); 642 list_add_tail(&port->list, &adapter->port_list_head);
719 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 643 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
720 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status); 644 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
721 if (d_id == ZFCP_DID_DIRECTORY_SERVICE)
722 if (!adapter->nameserver_port)
723 adapter->nameserver_port = port;
724 adapter->ports++;
725 645
726 write_unlock_irq(&zfcp_data.config_lock); 646 write_unlock_irq(&zfcp_data.config_lock);
727 647
@@ -740,21 +660,15 @@ err_out:
740 */ 660 */
741void zfcp_port_dequeue(struct zfcp_port *port) 661void zfcp_port_dequeue(struct zfcp_port *port)
742{ 662{
743 zfcp_port_wait(port); 663 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
744 write_lock_irq(&zfcp_data.config_lock); 664 write_lock_irq(&zfcp_data.config_lock);
745 list_del(&port->list); 665 list_del(&port->list);
746 port->adapter->ports--;
747 write_unlock_irq(&zfcp_data.config_lock); 666 write_unlock_irq(&zfcp_data.config_lock);
748 if (port->rport) 667 if (port->rport)
749 fc_remote_port_delete(port->rport); 668 fc_remote_port_delete(port->rport);
750 port->rport = NULL; 669 port->rport = NULL;
751 zfcp_adapter_put(port->adapter); 670 zfcp_adapter_put(port->adapter);
752 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA) 671 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
753 sysfs_remove_group(&port->sysfs_device.kobj,
754 &zfcp_sysfs_ns_port_attrs);
755 else
756 sysfs_remove_group(&port->sysfs_device.kobj,
757 &zfcp_sysfs_port_attrs);
758 device_unregister(&port->sysfs_device); 672 device_unregister(&port->sysfs_device);
759} 673}
760 674
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 51b6a05f4d12..b04038c74786 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -25,7 +25,8 @@ static int zfcp_ccw_probe(struct ccw_device *ccw_device)
25 down(&zfcp_data.config_sema); 25 down(&zfcp_data.config_sema);
26 if (zfcp_adapter_enqueue(ccw_device)) { 26 if (zfcp_adapter_enqueue(ccw_device)) {
27 dev_err(&ccw_device->dev, 27 dev_err(&ccw_device->dev,
28 "Setup of data structures failed.\n"); 28 "Setting up data structures for the "
29 "FCP adapter failed\n");
29 retval = -EINVAL; 30 retval = -EINVAL;
30 } 31 }
31 up(&zfcp_data.config_sema); 32 up(&zfcp_data.config_sema);
@@ -46,6 +47,8 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
46 struct zfcp_adapter *adapter; 47 struct zfcp_adapter *adapter;
47 struct zfcp_port *port, *p; 48 struct zfcp_port *port, *p;
48 struct zfcp_unit *unit, *u; 49 struct zfcp_unit *unit, *u;
50 LIST_HEAD(unit_remove_lh);
51 LIST_HEAD(port_remove_lh);
49 52
50 ccw_device_set_offline(ccw_device); 53 ccw_device_set_offline(ccw_device);
51 down(&zfcp_data.config_sema); 54 down(&zfcp_data.config_sema);
@@ -54,26 +57,26 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
54 write_lock_irq(&zfcp_data.config_lock); 57 write_lock_irq(&zfcp_data.config_lock);
55 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 58 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
56 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { 59 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) {
57 list_move(&unit->list, &port->unit_remove_lh); 60 list_move(&unit->list, &unit_remove_lh);
58 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, 61 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
59 &unit->status); 62 &unit->status);
60 } 63 }
61 list_move(&port->list, &adapter->port_remove_lh); 64 list_move(&port->list, &port_remove_lh);
62 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 65 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
63 } 66 }
64 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 67 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
65 write_unlock_irq(&zfcp_data.config_lock); 68 write_unlock_irq(&zfcp_data.config_lock);
66 69
67 list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) { 70 list_for_each_entry_safe(port, p, &port_remove_lh, list) {
68 list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) { 71 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) {
69 if (atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED, 72 if (atomic_read(&unit->status) &
70 &unit->status)) 73 ZFCP_STATUS_UNIT_REGISTERED)
71 scsi_remove_device(unit->device); 74 scsi_remove_device(unit->device);
72 zfcp_unit_dequeue(unit); 75 zfcp_unit_dequeue(unit);
73 } 76 }
74 zfcp_port_dequeue(port); 77 zfcp_port_dequeue(port);
75 } 78 }
76 zfcp_adapter_wait(adapter); 79 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
77 zfcp_adapter_dequeue(adapter); 80 zfcp_adapter_dequeue(adapter);
78 81
79 up(&zfcp_data.config_sema); 82 up(&zfcp_data.config_sema);
@@ -156,15 +159,18 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
156 159
157 switch (event) { 160 switch (event) {
158 case CIO_GONE: 161 case CIO_GONE:
159 dev_warn(&adapter->ccw_device->dev, "device gone\n"); 162 dev_warn(&adapter->ccw_device->dev,
163 "The FCP device has been detached\n");
160 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL); 164 zfcp_erp_adapter_shutdown(adapter, 0, 87, NULL);
161 break; 165 break;
162 case CIO_NO_PATH: 166 case CIO_NO_PATH:
163 dev_warn(&adapter->ccw_device->dev, "no path\n"); 167 dev_warn(&adapter->ccw_device->dev,
168 "The CHPID for the FCP device is offline\n");
164 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL); 169 zfcp_erp_adapter_shutdown(adapter, 0, 88, NULL);
165 break; 170 break;
166 case CIO_OPER: 171 case CIO_OPER:
167 dev_info(&adapter->ccw_device->dev, "operational again\n"); 172 dev_info(&adapter->ccw_device->dev,
173 "The FCP device is operational again\n");
168 zfcp_erp_modify_adapter_status(adapter, 11, NULL, 174 zfcp_erp_modify_adapter_status(adapter, 11, NULL,
169 ZFCP_STATUS_COMMON_RUNNING, 175 ZFCP_STATUS_COMMON_RUNNING,
170 ZFCP_SET); 176 ZFCP_SET);
@@ -220,3 +226,20 @@ int __init zfcp_ccw_register(void)
220{ 226{
221 return ccw_driver_register(&zfcp_ccw_driver); 227 return ccw_driver_register(&zfcp_ccw_driver);
222} 228}
229
230/**
231 * zfcp_get_adapter_by_busid - find zfcp_adapter struct
232 * @busid: bus id string of zfcp adapter to find
233 */
234struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
235{
236 struct ccw_device *ccw_device;
237 struct zfcp_adapter *adapter = NULL;
238
239 ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
240 if (ccw_device) {
241 adapter = dev_get_drvdata(&ccw_device->dev);
242 put_device(&ccw_device->dev);
243 }
244 return adapter;
245}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index fca48b88fc53..060f5f2352ec 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -318,6 +318,26 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
318 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 318 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
319} 319}
320 320
321/**
322 * zfcp_hba_dbf_event_berr - trace event for bit error threshold
323 * @adapter: adapter affected by this QDIO related event
324 * @req: fsf request
325 */
326void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter,
327 struct zfcp_fsf_req *req)
328{
329 struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
330 struct fsf_status_read_buffer *sr_buf = req->data;
331 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
332 unsigned long flags;
333
334 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
335 memset(r, 0, sizeof(*r));
336 strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
337 memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
338 debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
339 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
340}
321static void zfcp_hba_dbf_view_response(char **p, 341static void zfcp_hba_dbf_view_response(char **p,
322 struct zfcp_hba_dbf_record_response *r) 342 struct zfcp_hba_dbf_record_response *r)
323{ 343{
@@ -399,6 +419,30 @@ static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
399 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); 419 zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
400} 420}
401 421
422static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
423{
424 zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
425 zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
426 zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count);
427 zfcp_dbf_out(p, "prim_seq_err", "%d",
428 r->primitive_sequence_error_count);
429 zfcp_dbf_out(p, "inval_trans_word_err", "%d",
430 r->invalid_transmission_word_error_count);
431 zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count);
432 zfcp_dbf_out(p, "prim_seq_event_to", "%d",
433 r->primitive_sequence_event_timeout_count);
434 zfcp_dbf_out(p, "elast_buf_overrun_err", "%d",
435 r->elastic_buffer_overrun_error_count);
436 zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d",
437 r->advertised_receive_b2b_credit);
438 zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d",
439 r->current_receive_b2b_credit);
440 zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d",
441 r->advertised_transmit_b2b_credit);
442 zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d",
443 r->current_transmit_b2b_credit);
444}
445
402static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, 446static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
403 char *out_buf, const char *in_buf) 447 char *out_buf, const char *in_buf)
404{ 448{
@@ -418,6 +462,8 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
418 zfcp_hba_dbf_view_status(&p, &r->u.status); 462 zfcp_hba_dbf_view_status(&p, &r->u.status);
419 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) 463 else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
420 zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); 464 zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
465 else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
466 zfcp_hba_dbf_view_berr(&p, &r->u.berr);
421 467
422 p += sprintf(p, "\n"); 468 p += sprintf(p, "\n");
423 return p - out_buf; 469 return p - out_buf;
@@ -519,14 +565,14 @@ static const char *zfcp_rec_dbf_ids[] = {
519 [75] = "physical port recovery escalation after failed port " 565 [75] = "physical port recovery escalation after failed port "
520 "recovery", 566 "recovery",
521 [76] = "port recovery escalation after failed unit recovery", 567 [76] = "port recovery escalation after failed unit recovery",
522 [77] = "recovery opening nameserver port", 568 [77] = "",
523 [78] = "duplicate request id", 569 [78] = "duplicate request id",
524 [79] = "link down", 570 [79] = "link down",
525 [80] = "exclusive read-only unit access unsupported", 571 [80] = "exclusive read-only unit access unsupported",
526 [81] = "shared read-write unit access unsupported", 572 [81] = "shared read-write unit access unsupported",
527 [82] = "incoming rscn", 573 [82] = "incoming rscn",
528 [83] = "incoming wwpn", 574 [83] = "incoming wwpn",
529 [84] = "", 575 [84] = "wka port handle not valid close port",
530 [85] = "online", 576 [85] = "online",
531 [86] = "offline", 577 [86] = "offline",
532 [87] = "ccw device gone", 578 [87] = "ccw device gone",
@@ -570,7 +616,7 @@ static const char *zfcp_rec_dbf_ids[] = {
570 [125] = "need newer zfcp", 616 [125] = "need newer zfcp",
571 [126] = "need newer microcode", 617 [126] = "need newer microcode",
572 [127] = "arbitrated loop not supported", 618 [127] = "arbitrated loop not supported",
573 [128] = "unknown topology", 619 [128] = "",
574 [129] = "qtcb size mismatch", 620 [129] = "qtcb size mismatch",
575 [130] = "unknown fsf status ecd", 621 [130] = "unknown fsf status ecd",
576 [131] = "fcp request too big", 622 [131] = "fcp request too big",
@@ -829,9 +875,9 @@ void zfcp_rec_dbf_event_action(u8 id2, struct zfcp_erp_action *erp_action)
829void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 875void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
830{ 876{
831 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 877 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
832 struct zfcp_port *port = ct->port; 878 struct zfcp_wka_port *wka_port = ct->wka_port;
833 struct zfcp_adapter *adapter = port->adapter; 879 struct zfcp_adapter *adapter = wka_port->adapter;
834 struct ct_hdr *hdr = zfcp_sg_to_address(ct->req); 880 struct ct_hdr *hdr = sg_virt(ct->req);
835 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 881 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
836 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; 882 struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
837 unsigned long flags; 883 unsigned long flags;
@@ -842,7 +888,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
842 r->fsf_reqid = (unsigned long)fsf_req; 888 r->fsf_reqid = (unsigned long)fsf_req;
843 r->fsf_seqno = fsf_req->seq_no; 889 r->fsf_seqno = fsf_req->seq_no;
844 r->s_id = fc_host_port_id(adapter->scsi_host); 890 r->s_id = fc_host_port_id(adapter->scsi_host);
845 r->d_id = port->d_id; 891 r->d_id = wka_port->d_id;
846 oct->cmd_req_code = hdr->cmd_rsp_code; 892 oct->cmd_req_code = hdr->cmd_rsp_code;
847 oct->revision = hdr->revision; 893 oct->revision = hdr->revision;
848 oct->gs_type = hdr->gs_type; 894 oct->gs_type = hdr->gs_type;
@@ -863,9 +909,9 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
863void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 909void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
864{ 910{
865 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 911 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
866 struct zfcp_port *port = ct->port; 912 struct zfcp_wka_port *wka_port = ct->wka_port;
867 struct zfcp_adapter *adapter = port->adapter; 913 struct zfcp_adapter *adapter = wka_port->adapter;
868 struct ct_hdr *hdr = zfcp_sg_to_address(ct->resp); 914 struct ct_hdr *hdr = sg_virt(ct->resp);
869 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; 915 struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
870 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; 916 struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
871 unsigned long flags; 917 unsigned long flags;
@@ -875,7 +921,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
875 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 921 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
876 r->fsf_reqid = (unsigned long)fsf_req; 922 r->fsf_reqid = (unsigned long)fsf_req;
877 r->fsf_seqno = fsf_req->seq_no; 923 r->fsf_seqno = fsf_req->seq_no;
878 r->s_id = port->d_id; 924 r->s_id = wka_port->d_id;
879 r->d_id = fc_host_port_id(adapter->scsi_host); 925 r->d_id = fc_host_port_id(adapter->scsi_host);
880 rct->cmd_rsp_code = hdr->cmd_rsp_code; 926 rct->cmd_rsp_code = hdr->cmd_rsp_code;
881 rct->revision = hdr->revision; 927 rct->revision = hdr->revision;
@@ -922,8 +968,8 @@ void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
922 968
923 zfcp_san_dbf_event_els("oels", 2, fsf_req, 969 zfcp_san_dbf_event_els("oels", 2, fsf_req,
924 fc_host_port_id(els->adapter->scsi_host), 970 fc_host_port_id(els->adapter->scsi_host),
925 els->d_id, *(u8 *) zfcp_sg_to_address(els->req), 971 els->d_id, *(u8 *) sg_virt(els->req),
926 zfcp_sg_to_address(els->req), els->req->length); 972 sg_virt(els->req), els->req->length);
927} 973}
928 974
929/** 975/**
@@ -936,8 +982,7 @@ void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
936 982
937 zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, 983 zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
938 fc_host_port_id(els->adapter->scsi_host), 984 fc_host_port_id(els->adapter->scsi_host),
939 *(u8 *)zfcp_sg_to_address(els->req), 985 *(u8 *)sg_virt(els->req), sg_virt(els->resp),
940 zfcp_sg_to_address(els->resp),
941 els->resp->length); 986 els->resp->length);
942} 987}
943 988
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 0ddb18449d11..e8f450801fea 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -151,6 +151,7 @@ struct zfcp_hba_dbf_record {
151 struct zfcp_hba_dbf_record_response response; 151 struct zfcp_hba_dbf_record_response response;
152 struct zfcp_hba_dbf_record_status status; 152 struct zfcp_hba_dbf_record_status status;
153 struct zfcp_hba_dbf_record_qdio qdio; 153 struct zfcp_hba_dbf_record_qdio qdio;
154 struct fsf_bit_error_payload berr;
154 } u; 155 } u;
155} __attribute__ ((packed)); 156} __attribute__ ((packed));
156 157
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 67f45fc62f53..73eb41580f25 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -39,29 +39,6 @@
39 39
40/********************* GENERAL DEFINES *********************************/ 40/********************* GENERAL DEFINES *********************************/
41 41
42/**
43 * zfcp_sg_to_address - determine kernel address from struct scatterlist
44 * @list: struct scatterlist
45 * Return: kernel address
46 */
47static inline void *
48zfcp_sg_to_address(struct scatterlist *list)
49{
50 return sg_virt(list);
51}
52
53/**
54 * zfcp_address_to_sg - set up struct scatterlist from kernel address
55 * @address: kernel address
56 * @list: struct scatterlist
57 * @size: buffer size
58 */
59static inline void
60zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
61{
62 sg_set_buf(list, address, size);
63}
64
65#define REQUEST_LIST_SIZE 128 42#define REQUEST_LIST_SIZE 128
66 43
67/********************* SCSI SPECIFIC DEFINES *********************************/ 44/********************* SCSI SPECIFIC DEFINES *********************************/
@@ -101,11 +78,6 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
101 78
102/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 79/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
103 80
104typedef unsigned long long wwn_t;
105typedef unsigned long long fcp_lun_t;
106/* data length field may be at variable position in FCP-2 FCP_CMND IU */
107typedef unsigned int fcp_dl_t;
108
109/* timeout for name-server lookup (in seconds) */ 81/* timeout for name-server lookup (in seconds) */
110#define ZFCP_NS_GID_PN_TIMEOUT 10 82#define ZFCP_NS_GID_PN_TIMEOUT 10
111 83
@@ -129,7 +101,7 @@ typedef unsigned int fcp_dl_t;
129 101
130/* FCP(-2) FCP_CMND IU */ 102/* FCP(-2) FCP_CMND IU */
131struct fcp_cmnd_iu { 103struct fcp_cmnd_iu {
132 fcp_lun_t fcp_lun; /* FCP logical unit number */ 104 u64 fcp_lun; /* FCP logical unit number */
133 u8 crn; /* command reference number */ 105 u8 crn; /* command reference number */
134 u8 reserved0:5; /* reserved */ 106 u8 reserved0:5; /* reserved */
135 u8 task_attribute:3; /* task attribute */ 107 u8 task_attribute:3; /* task attribute */
@@ -204,7 +176,7 @@ struct fcp_rscn_element {
204struct fcp_logo { 176struct fcp_logo {
205 u32 command; 177 u32 command;
206 u32 nport_did; 178 u32 nport_did;
207 wwn_t nport_wwpn; 179 u64 nport_wwpn;
208} __attribute__((packed)); 180} __attribute__((packed));
209 181
210/* 182/*
@@ -218,13 +190,6 @@ struct fcp_logo {
218#define ZFCP_LS_RSCN 0x61 190#define ZFCP_LS_RSCN 0x61
219#define ZFCP_LS_RNID 0x78 191#define ZFCP_LS_RNID 0x78
220 192
221struct zfcp_ls_rjt_par {
222 u8 action;
223 u8 reason_code;
224 u8 reason_expl;
225 u8 vendor_unique;
226} __attribute__ ((packed));
227
228struct zfcp_ls_adisc { 193struct zfcp_ls_adisc {
229 u8 code; 194 u8 code;
230 u8 field[3]; 195 u8 field[3];
@@ -234,20 +199,6 @@ struct zfcp_ls_adisc {
234 u32 nport_id; 199 u32 nport_id;
235} __attribute__ ((packed)); 200} __attribute__ ((packed));
236 201
237struct zfcp_ls_adisc_acc {
238 u8 code;
239 u8 field[3];
240 u32 hard_nport_id;
241 u64 wwpn;
242 u64 wwnn;
243 u32 nport_id;
244} __attribute__ ((packed));
245
246struct zfcp_rc_entry {
247 u8 code;
248 const char *description;
249};
250
251/* 202/*
252 * FC-GS-2 stuff 203 * FC-GS-2 stuff
253 */ 204 */
@@ -281,9 +232,7 @@ struct zfcp_rc_entry {
281#define ZFCP_STATUS_COMMON_RUNNING 0x40000000 232#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
282#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000 233#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
283#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000 234#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
284#define ZFCP_STATUS_COMMON_OPENING 0x08000000
285#define ZFCP_STATUS_COMMON_OPEN 0x04000000 235#define ZFCP_STATUS_COMMON_OPEN 0x04000000
286#define ZFCP_STATUS_COMMON_CLOSING 0x02000000
287#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000 236#define ZFCP_STATUS_COMMON_ERP_INUSE 0x01000000
288#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000 237#define ZFCP_STATUS_COMMON_ACCESS_DENIED 0x00800000
289#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000 238#define ZFCP_STATUS_COMMON_ACCESS_BOXED 0x00400000
@@ -291,16 +240,15 @@ struct zfcp_rc_entry {
291 240
292/* adapter status */ 241/* adapter status */
293#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 242#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
294#define ZFCP_STATUS_ADAPTER_REGISTERED 0x00000004
295#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 243#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
296#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 244#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
297#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020 245#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020
298#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 246#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
299#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 247#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
300#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 248#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
301#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
302 249
303/* FC-PH/FC-GS well-known address identifiers for generic services */ 250/* FC-PH/FC-GS well-known address identifiers for generic services */
251#define ZFCP_DID_WKA 0xFFFFF0
304#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA 252#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
305#define ZFCP_DID_TIME_SERVICE 0xFFFFFB 253#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
306#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC 254#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
@@ -312,29 +260,27 @@ struct zfcp_rc_entry {
312#define ZFCP_STATUS_PORT_DID_DID 0x00000002 260#define ZFCP_STATUS_PORT_DID_DID 0x00000002
313#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004 261#define ZFCP_STATUS_PORT_PHYS_CLOSING 0x00000004
314#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008 262#define ZFCP_STATUS_PORT_NO_WWPN 0x00000008
315#define ZFCP_STATUS_PORT_NO_SCSI_ID 0x00000010
316#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020 263#define ZFCP_STATUS_PORT_INVALID_WWPN 0x00000020
317 264
318/* for ports with well known addresses */ 265/* well known address (WKA) port status*/
319#define ZFCP_STATUS_PORT_WKA \ 266enum zfcp_wka_status {
320 (ZFCP_STATUS_PORT_NO_WWPN | \ 267 ZFCP_WKA_PORT_OFFLINE,
321 ZFCP_STATUS_PORT_NO_SCSI_ID) 268 ZFCP_WKA_PORT_CLOSING,
269 ZFCP_WKA_PORT_OPENING,
270 ZFCP_WKA_PORT_ONLINE,
271};
322 272
323/* logical unit status */ 273/* logical unit status */
324#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
325#define ZFCP_STATUS_UNIT_SHARED 0x00000004 274#define ZFCP_STATUS_UNIT_SHARED 0x00000004
326#define ZFCP_STATUS_UNIT_READONLY 0x00000008 275#define ZFCP_STATUS_UNIT_READONLY 0x00000008
327#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010 276#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
328#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020 277#define ZFCP_STATUS_UNIT_SCSI_WORK_PENDING 0x00000020
329 278
330/* FSF request status (this does not have a common part) */ 279/* FSF request status (this does not have a common part) */
331#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
332#define ZFCP_STATUS_FSFREQ_POOL 0x00000001
333#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 280#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
334#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004 281#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004
335#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 282#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
336#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 283#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
337#define ZFCP_STATUS_FSFREQ_ABORTING 0x00000020
338#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 284#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
339#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 285#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
340#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100 286#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
@@ -379,7 +325,7 @@ struct ct_hdr {
379 * a port name is required */ 325 * a port name is required */
380struct ct_iu_gid_pn_req { 326struct ct_iu_gid_pn_req {
381 struct ct_hdr header; 327 struct ct_hdr header;
382 wwn_t wwpn; 328 u64 wwpn;
383} __attribute__ ((packed)); 329} __attribute__ ((packed));
384 330
385/* FS_ACC IU and data unit for GID_PN nameserver request */ 331/* FS_ACC IU and data unit for GID_PN nameserver request */
@@ -388,11 +334,9 @@ struct ct_iu_gid_pn_resp {
388 u32 d_id; 334 u32 d_id;
389} __attribute__ ((packed)); 335} __attribute__ ((packed));
390 336
391typedef void (*zfcp_send_ct_handler_t)(unsigned long);
392
393/** 337/**
394 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct 338 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
395 * @port: port where the request is sent to 339 * @wka_port: port where the request is sent to
396 * @req: scatter-gather list for request 340 * @req: scatter-gather list for request
397 * @resp: scatter-gather list for response 341 * @resp: scatter-gather list for response
398 * @req_count: number of elements in request scatter-gather list 342 * @req_count: number of elements in request scatter-gather list
@@ -404,12 +348,12 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
404 * @status: used to pass error status to calling function 348 * @status: used to pass error status to calling function
405 */ 349 */
406struct zfcp_send_ct { 350struct zfcp_send_ct {
407 struct zfcp_port *port; 351 struct zfcp_wka_port *wka_port;
408 struct scatterlist *req; 352 struct scatterlist *req;
409 struct scatterlist *resp; 353 struct scatterlist *resp;
410 unsigned int req_count; 354 unsigned int req_count;
411 unsigned int resp_count; 355 unsigned int resp_count;
412 zfcp_send_ct_handler_t handler; 356 void (*handler)(unsigned long);
413 unsigned long handler_data; 357 unsigned long handler_data;
414 int timeout; 358 int timeout;
415 struct completion *completion; 359 struct completion *completion;
@@ -426,8 +370,6 @@ struct zfcp_gid_pn_data {
426 struct zfcp_port *port; 370 struct zfcp_port *port;
427}; 371};
428 372
429typedef void (*zfcp_send_els_handler_t)(unsigned long);
430
431/** 373/**
432 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els 374 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
433 * @adapter: adapter where request is sent from 375 * @adapter: adapter where request is sent from
@@ -451,22 +393,28 @@ struct zfcp_send_els {
451 struct scatterlist *resp; 393 struct scatterlist *resp;
452 unsigned int req_count; 394 unsigned int req_count;
453 unsigned int resp_count; 395 unsigned int resp_count;
454 zfcp_send_els_handler_t handler; 396 void (*handler)(unsigned long);
455 unsigned long handler_data; 397 unsigned long handler_data;
456 struct completion *completion; 398 struct completion *completion;
457 int ls_code; 399 int ls_code;
458 int status; 400 int status;
459}; 401};
460 402
403struct zfcp_wka_port {
404 struct zfcp_adapter *adapter;
405 wait_queue_head_t completion_wq;
406 enum zfcp_wka_status status;
407 atomic_t refcount;
408 u32 d_id;
409 u32 handle;
410 struct mutex mutex;
411 struct delayed_work work;
412};
413
461struct zfcp_qdio_queue { 414struct zfcp_qdio_queue {
462 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 415 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
463 u8 first; /* index of next free bfr 416 u8 first; /* index of next free bfr in queue */
464 in queue (free_count>0) */ 417 atomic_t count; /* number of free buffers in queue */
465 atomic_t count; /* number of free buffers
466 in queue */
467 spinlock_t lock; /* lock for operations on queue */
468 int pci_batch; /* SBALs since PCI indication
469 was last set */
470}; 418};
471 419
472struct zfcp_erp_action { 420struct zfcp_erp_action {
@@ -475,7 +423,7 @@ struct zfcp_erp_action {
475 struct zfcp_adapter *adapter; /* device which should be recovered */ 423 struct zfcp_adapter *adapter; /* device which should be recovered */
476 struct zfcp_port *port; 424 struct zfcp_port *port;
477 struct zfcp_unit *unit; 425 struct zfcp_unit *unit;
478 volatile u32 status; /* recovery status */ 426 u32 status; /* recovery status */
479 u32 step; /* active step of this erp action */ 427 u32 step; /* active step of this erp action */
480 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending 428 struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
481 for this action */ 429 for this action */
@@ -506,8 +454,8 @@ struct zfcp_adapter {
506 atomic_t refcount; /* reference count */ 454 atomic_t refcount; /* reference count */
507 wait_queue_head_t remove_wq; /* can be used to wait for 455 wait_queue_head_t remove_wq; /* can be used to wait for
508 refcount drop to zero */ 456 refcount drop to zero */
509 wwn_t peer_wwnn; /* P2P peer WWNN */ 457 u64 peer_wwnn; /* P2P peer WWNN */
510 wwn_t peer_wwpn; /* P2P peer WWPN */ 458 u64 peer_wwpn; /* P2P peer WWPN */
511 u32 peer_d_id; /* P2P peer D_ID */ 459 u32 peer_d_id; /* P2P peer D_ID */
512 struct ccw_device *ccw_device; /* S/390 ccw device */ 460 struct ccw_device *ccw_device; /* S/390 ccw device */
513 u32 hydra_version; /* Hydra version */ 461 u32 hydra_version; /* Hydra version */
@@ -518,13 +466,13 @@ struct zfcp_adapter {
518 u16 timer_ticks; /* time int for a tick */ 466 u16 timer_ticks; /* time int for a tick */
519 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 467 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
520 struct list_head port_list_head; /* remote port list */ 468 struct list_head port_list_head; /* remote port list */
521 struct list_head port_remove_lh; /* head of ports to be
522 removed */
523 u32 ports; /* number of remote ports */
524 unsigned long req_no; /* unique FSF req number */ 469 unsigned long req_no; /* unique FSF req number */
525 struct list_head *req_list; /* list of pending reqs */ 470 struct list_head *req_list; /* list of pending reqs */
526 spinlock_t req_list_lock; /* request list lock */ 471 spinlock_t req_list_lock; /* request list lock */
527 struct zfcp_qdio_queue req_q; /* request queue */ 472 struct zfcp_qdio_queue req_q; /* request queue */
473 spinlock_t req_q_lock; /* for operations on queue */
474 int req_q_pci_batch; /* SBALs since PCI indication
475 was last set */
528 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 476 u32 fsf_req_seq_no; /* FSF cmnd seq number */
529 wait_queue_head_t request_wq; /* can be used to wait for 477 wait_queue_head_t request_wq; /* can be used to wait for
530 more avaliable SBALs */ 478 more avaliable SBALs */
@@ -548,7 +496,7 @@ struct zfcp_adapter {
548 actions */ 496 actions */
549 u32 erp_low_mem_count; /* nr of erp actions waiting 497 u32 erp_low_mem_count; /* nr of erp actions waiting
550 for memory */ 498 for memory */
551 struct zfcp_port *nameserver_port; /* adapter's nameserver */ 499 struct zfcp_wka_port nsp; /* adapter's nameserver */
552 debug_info_t *rec_dbf; 500 debug_info_t *rec_dbf;
553 debug_info_t *hba_dbf; 501 debug_info_t *hba_dbf;
554 debug_info_t *san_dbf; /* debug feature areas */ 502 debug_info_t *san_dbf; /* debug feature areas */
@@ -563,11 +511,11 @@ struct zfcp_adapter {
563 struct zfcp_scsi_dbf_record scsi_dbf_buf; 511 struct zfcp_scsi_dbf_record scsi_dbf_buf;
564 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 512 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
565 struct qdio_initialize qdio_init_data; /* for qdio_establish */ 513 struct qdio_initialize qdio_init_data; /* for qdio_establish */
566 struct device generic_services; /* directory for WKA ports */
567 struct fc_host_statistics *fc_stats; 514 struct fc_host_statistics *fc_stats;
568 struct fsf_qtcb_bottom_port *stats_reset_data; 515 struct fsf_qtcb_bottom_port *stats_reset_data;
569 unsigned long stats_reset; 516 unsigned long stats_reset;
570 struct work_struct scan_work; 517 struct work_struct scan_work;
518 atomic_t qdio_outb_full; /* queue full incidents */
571}; 519};
572 520
573struct zfcp_port { 521struct zfcp_port {
@@ -579,18 +527,16 @@ struct zfcp_port {
579 refcount drop to zero */ 527 refcount drop to zero */
580 struct zfcp_adapter *adapter; /* adapter used to access port */ 528 struct zfcp_adapter *adapter; /* adapter used to access port */
581 struct list_head unit_list_head; /* head of logical unit list */ 529 struct list_head unit_list_head; /* head of logical unit list */
582 struct list_head unit_remove_lh; /* head of luns to be removed
583 list */
584 u32 units; /* # of logical units in list */
585 atomic_t status; /* status of this remote port */ 530 atomic_t status; /* status of this remote port */
586 wwn_t wwnn; /* WWNN if known */ 531 u64 wwnn; /* WWNN if known */
587 wwn_t wwpn; /* WWPN */ 532 u64 wwpn; /* WWPN */
588 u32 d_id; /* D_ID */ 533 u32 d_id; /* D_ID */
589 u32 handle; /* handle assigned by FSF */ 534 u32 handle; /* handle assigned by FSF */
590 struct zfcp_erp_action erp_action; /* pending error recovery */ 535 struct zfcp_erp_action erp_action; /* pending error recovery */
591 atomic_t erp_counter; 536 atomic_t erp_counter;
592 u32 maxframe_size; 537 u32 maxframe_size;
593 u32 supported_classes; 538 u32 supported_classes;
539 struct work_struct gid_pn_work;
594}; 540};
595 541
596struct zfcp_unit { 542struct zfcp_unit {
@@ -601,8 +547,7 @@ struct zfcp_unit {
601 refcount drop to zero */ 547 refcount drop to zero */
602 struct zfcp_port *port; /* remote port of unit */ 548 struct zfcp_port *port; /* remote port of unit */
603 atomic_t status; /* status of this logical unit */ 549 atomic_t status; /* status of this logical unit */
604 unsigned int scsi_lun; /* own SCSI LUN */ 550 u64 fcp_lun; /* own FCP_LUN */
605 fcp_lun_t fcp_lun; /* own FCP_LUN */
606 u32 handle; /* handle assigned by FSF */ 551 u32 handle; /* handle assigned by FSF */
607 struct scsi_device *device; /* scsi device struct pointer */ 552 struct scsi_device *device; /* scsi device struct pointer */
608 struct zfcp_erp_action erp_action; /* pending error recovery */ 553 struct zfcp_erp_action erp_action; /* pending error recovery */
@@ -625,7 +570,7 @@ struct zfcp_fsf_req {
625 u8 sbal_response; /* SBAL used in interrupt */ 570 u8 sbal_response; /* SBAL used in interrupt */
626 wait_queue_head_t completion_wq; /* can be used by a routine 571 wait_queue_head_t completion_wq; /* can be used by a routine
627 to wait for completion */ 572 to wait for completion */
628 volatile u32 status; /* status of this request */ 573 u32 status; /* status of this request */
629 u32 fsf_command; /* FSF Command copy */ 574 u32 fsf_command; /* FSF Command copy */
630 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 575 struct fsf_qtcb *qtcb; /* address of associated QTCB */
631 u32 seq_no; /* Sequence number of request */ 576 u32 seq_no; /* Sequence number of request */
@@ -644,11 +589,7 @@ struct zfcp_fsf_req {
644struct zfcp_data { 589struct zfcp_data {
645 struct scsi_host_template scsi_host_template; 590 struct scsi_host_template scsi_host_template;
646 struct scsi_transport_template *scsi_transport_template; 591 struct scsi_transport_template *scsi_transport_template;
647 atomic_t status; /* Module status flags */
648 struct list_head adapter_list_head; /* head of adapter list */ 592 struct list_head adapter_list_head; /* head of adapter list */
649 struct list_head adapter_remove_lh; /* head of adapters to be
650 removed */
651 u32 adapters; /* # of adapters in list */
652 rwlock_t config_lock; /* serialises changes 593 rwlock_t config_lock; /* serialises changes
653 to adapter/port/unit 594 to adapter/port/unit
654 lists */ 595 lists */
@@ -656,11 +597,12 @@ struct zfcp_data {
656 changes */ 597 changes */
657 atomic_t loglevel; /* current loglevel */ 598 atomic_t loglevel; /* current loglevel */
658 char init_busid[BUS_ID_SIZE]; 599 char init_busid[BUS_ID_SIZE];
659 wwn_t init_wwpn; 600 u64 init_wwpn;
660 fcp_lun_t init_fcp_lun; 601 u64 init_fcp_lun;
661 struct kmem_cache *fsf_req_qtcb_cache; 602 struct kmem_cache *fsf_req_qtcb_cache;
662 struct kmem_cache *sr_buffer_cache; 603 struct kmem_cache *sr_buffer_cache;
663 struct kmem_cache *gid_pn_cache; 604 struct kmem_cache *gid_pn_cache;
605 struct workqueue_struct *work_queue;
664}; 606};
665 607
666/* struct used by memory pools for fsf_requests */ 608/* struct used by memory pools for fsf_requests */
@@ -677,14 +619,7 @@ struct zfcp_fsf_req_qtcb {
677#define ZFCP_SET 0x00000100 619#define ZFCP_SET 0x00000100
678#define ZFCP_CLEAR 0x00000200 620#define ZFCP_CLEAR 0x00000200
679 621
680#ifndef atomic_test_mask
681#define atomic_test_mask(mask, target) \
682 ((atomic_read(target) & mask) == mask)
683#endif
684
685#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id) 622#define zfcp_get_busid_by_adapter(adapter) (adapter->ccw_device->dev.bus_id)
686#define zfcp_get_busid_by_port(port) (zfcp_get_busid_by_adapter(port->adapter))
687#define zfcp_get_busid_by_unit(unit) (zfcp_get_busid_by_port(unit->port))
688 623
689/* 624/*
690 * Helper functions for request ID management. 625 * Helper functions for request ID management.
@@ -745,12 +680,6 @@ zfcp_unit_put(struct zfcp_unit *unit)
745} 680}
746 681
747static inline void 682static inline void
748zfcp_unit_wait(struct zfcp_unit *unit)
749{
750 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
751}
752
753static inline void
754zfcp_port_get(struct zfcp_port *port) 683zfcp_port_get(struct zfcp_port *port)
755{ 684{
756 atomic_inc(&port->refcount); 685 atomic_inc(&port->refcount);
@@ -764,12 +693,6 @@ zfcp_port_put(struct zfcp_port *port)
764} 693}
765 694
766static inline void 695static inline void
767zfcp_port_wait(struct zfcp_port *port)
768{
769 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
770}
771
772static inline void
773zfcp_adapter_get(struct zfcp_adapter *adapter) 696zfcp_adapter_get(struct zfcp_adapter *adapter)
774{ 697{
775 atomic_inc(&adapter->refcount); 698 atomic_inc(&adapter->refcount);
@@ -782,10 +705,4 @@ zfcp_adapter_put(struct zfcp_adapter *adapter)
782 wake_up(&adapter->remove_wq); 705 wake_up(&adapter->remove_wq);
783} 706}
784 707
785static inline void
786zfcp_adapter_wait(struct zfcp_adapter *adapter)
787{
788 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
789}
790
791#endif /* ZFCP_DEF_H */ 708#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 643ac4bba5b5..782313131870 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -23,7 +23,6 @@ enum zfcp_erp_steps {
23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, 23 ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001,
24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, 24 ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010,
25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, 25 ZFCP_ERP_STEP_PORT_CLOSING = 0x0100,
26 ZFCP_ERP_STEP_NAMESERVER_OPEN = 0x0200,
27 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400, 26 ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400,
28 ZFCP_ERP_STEP_PORT_OPENING = 0x0800, 27 ZFCP_ERP_STEP_PORT_OPENING = 0x0800,
29 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, 28 ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000,
@@ -532,8 +531,7 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
532 struct zfcp_port *port; 531 struct zfcp_port *port;
533 532
534 list_for_each_entry(port, &adapter->port_list_head, list) 533 list_for_each_entry(port, &adapter->port_list_head, list)
535 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)) 534 _zfcp_erp_port_reopen(port, clear, id, ref);
536 _zfcp_erp_port_reopen(port, clear, id, ref);
537} 535}
538 536
539static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id, 537static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, u8 id,
@@ -669,8 +667,6 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act)
669 int ret; 667 int ret;
670 struct zfcp_adapter *adapter = act->adapter; 668 struct zfcp_adapter *adapter = act->adapter;
671 669
672 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
673
674 write_lock_irq(&adapter->erp_lock); 670 write_lock_irq(&adapter->erp_lock);
675 zfcp_erp_action_to_running(act); 671 zfcp_erp_action_to_running(act);
676 write_unlock_irq(&adapter->erp_lock); 672 write_unlock_irq(&adapter->erp_lock);
@@ -741,8 +737,7 @@ static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *act,
741 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 737 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
742 failed_qdio: 738 failed_qdio:
743 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 739 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
744 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 740 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
745 ZFCP_STATUS_ADAPTER_XPORT_OK,
746 &act->adapter->status); 741 &act->adapter->status);
747 return retval; 742 return retval;
748} 743}
@@ -751,15 +746,11 @@ static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *act)
751{ 746{
752 int retval; 747 int retval;
753 748
754 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
755 zfcp_erp_adapter_strategy_generic(act, 1); /* close */ 749 zfcp_erp_adapter_strategy_generic(act, 1); /* close */
756 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &act->adapter->status);
757 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 750 if (act->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
758 return ZFCP_ERP_EXIT; 751 return ZFCP_ERP_EXIT;
759 752
760 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
761 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */ 753 retval = zfcp_erp_adapter_strategy_generic(act, 0); /* open */
762 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &act->adapter->status);
763 754
764 if (retval == ZFCP_ERP_FAILED) 755 if (retval == ZFCP_ERP_FAILED)
765 ssleep(8); 756 ssleep(8);
@@ -783,10 +774,7 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act)
783 774
784static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) 775static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port)
785{ 776{
786 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 777 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
787 ZFCP_STATUS_COMMON_CLOSING |
788 ZFCP_STATUS_COMMON_ACCESS_DENIED |
789 ZFCP_STATUS_PORT_DID_DID |
790 ZFCP_STATUS_PORT_PHYS_CLOSING | 778 ZFCP_STATUS_PORT_PHYS_CLOSING |
791 ZFCP_STATUS_PORT_INVALID_WWPN, 779 ZFCP_STATUS_PORT_INVALID_WWPN,
792 &port->status); 780 &port->status);
@@ -839,73 +827,12 @@ static int zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
839 return ZFCP_ERP_CONTINUES; 827 return ZFCP_ERP_CONTINUES;
840} 828}
841 829
842static void zfcp_erp_port_strategy_open_ns_wake(struct zfcp_erp_action *ns_act)
843{
844 unsigned long flags;
845 struct zfcp_adapter *adapter = ns_act->adapter;
846 struct zfcp_erp_action *act, *tmp;
847 int status;
848
849 read_lock_irqsave(&adapter->erp_lock, flags);
850 list_for_each_entry_safe(act, tmp, &adapter->erp_running_head, list) {
851 if (act->step == ZFCP_ERP_STEP_NAMESERVER_OPEN) {
852 status = atomic_read(&adapter->nameserver_port->status);
853 if (status & ZFCP_STATUS_COMMON_ERP_FAILED)
854 zfcp_erp_port_failed(act->port, 27, NULL);
855 zfcp_erp_action_ready(act);
856 }
857 }
858 read_unlock_irqrestore(&adapter->erp_lock, flags);
859}
860
861static int zfcp_erp_port_strategy_open_nameserver(struct zfcp_erp_action *act)
862{
863 int retval;
864
865 switch (act->step) {
866 case ZFCP_ERP_STEP_UNINITIALIZED:
867 case ZFCP_ERP_STEP_PHYS_PORT_CLOSING:
868 case ZFCP_ERP_STEP_PORT_CLOSING:
869 return zfcp_erp_port_strategy_open_port(act);
870
871 case ZFCP_ERP_STEP_PORT_OPENING:
872 if (atomic_read(&act->port->status) & ZFCP_STATUS_COMMON_OPEN)
873 retval = ZFCP_ERP_SUCCEEDED;
874 else
875 retval = ZFCP_ERP_FAILED;
876 /* this is needed anyway */
877 zfcp_erp_port_strategy_open_ns_wake(act);
878 return retval;
879
880 default:
881 return ZFCP_ERP_FAILED;
882 }
883}
884
885static int zfcp_erp_port_strategy_open_lookup(struct zfcp_erp_action *act)
886{
887 int retval;
888
889 retval = zfcp_fc_ns_gid_pn_request(act);
890 if (retval == -ENOMEM)
891 return ZFCP_ERP_NOMEM;
892 act->step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
893 if (retval)
894 return ZFCP_ERP_FAILED;
895 return ZFCP_ERP_CONTINUES;
896}
897
898static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) 830static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
899{ 831{
900 struct zfcp_adapter *adapter = act->adapter; 832 struct zfcp_adapter *adapter = act->adapter;
901 struct zfcp_port *port = act->port; 833 struct zfcp_port *port = act->port;
902 834
903 if (port->wwpn != adapter->peer_wwpn) { 835 if (port->wwpn != adapter->peer_wwpn) {
904 dev_err(&adapter->ccw_device->dev,
905 "Failed to open port 0x%016Lx, "
906 "Peer WWPN 0x%016Lx does not "
907 "match.\n", port->wwpn,
908 adapter->peer_wwpn);
909 zfcp_erp_port_failed(port, 25, NULL); 836 zfcp_erp_port_failed(port, 25, NULL);
910 return ZFCP_ERP_FAILED; 837 return ZFCP_ERP_FAILED;
911 } 838 }
@@ -914,11 +841,25 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
914 return zfcp_erp_port_strategy_open_port(act); 841 return zfcp_erp_port_strategy_open_port(act);
915} 842}
916 843
844void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
845{
846 int retval;
847 struct zfcp_port *port = container_of(work, struct zfcp_port,
848 gid_pn_work);
849
850 retval = zfcp_fc_ns_gid_pn(&port->erp_action);
851 if (retval == -ENOMEM)
852 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_NOMEM);
853 port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
854 if (retval)
855 zfcp_erp_notify(&port->erp_action, ZFCP_ERP_FAILED);
856
857}
858
917static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) 859static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
918{ 860{
919 struct zfcp_adapter *adapter = act->adapter; 861 struct zfcp_adapter *adapter = act->adapter;
920 struct zfcp_port *port = act->port; 862 struct zfcp_port *port = act->port;
921 struct zfcp_port *ns_port = adapter->nameserver_port;
922 int p_status = atomic_read(&port->status); 863 int p_status = atomic_read(&port->status);
923 864
924 switch (act->step) { 865 switch (act->step) {
@@ -927,28 +868,10 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
927 case ZFCP_ERP_STEP_PORT_CLOSING: 868 case ZFCP_ERP_STEP_PORT_CLOSING:
928 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) 869 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
929 return zfcp_erp_open_ptp_port(act); 870 return zfcp_erp_open_ptp_port(act);
930 if (!ns_port) { 871 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
931 dev_err(&adapter->ccw_device->dev, 872 queue_work(zfcp_data.work_queue, &port->gid_pn_work);
932 "Nameserver port unavailable.\n"); 873 return ZFCP_ERP_CONTINUES;
933 return ZFCP_ERP_FAILED;
934 }
935 if (!(atomic_read(&ns_port->status) &
936 ZFCP_STATUS_COMMON_UNBLOCKED)) {
937 /* nameserver port may live again */
938 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING,
939 &ns_port->status);
940 if (zfcp_erp_port_reopen(ns_port, 0, 77, act) >= 0) {
941 act->step = ZFCP_ERP_STEP_NAMESERVER_OPEN;
942 return ZFCP_ERP_CONTINUES;
943 }
944 return ZFCP_ERP_FAILED;
945 } 874 }
946 /* else nameserver port is already open, fall through */
947 case ZFCP_ERP_STEP_NAMESERVER_OPEN:
948 if (!(atomic_read(&ns_port->status) & ZFCP_STATUS_COMMON_OPEN))
949 return ZFCP_ERP_FAILED;
950 return zfcp_erp_port_strategy_open_lookup(act);
951
952 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: 875 case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
953 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) { 876 if (!(p_status & ZFCP_STATUS_PORT_DID_DID)) {
954 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) { 877 if (p_status & (ZFCP_STATUS_PORT_INVALID_WWPN)) {
@@ -961,25 +884,26 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
961 884
962 case ZFCP_ERP_STEP_PORT_OPENING: 885 case ZFCP_ERP_STEP_PORT_OPENING:
963 /* D_ID might have changed during open */ 886 /* D_ID might have changed during open */
964 if ((p_status & ZFCP_STATUS_COMMON_OPEN) && 887 if (p_status & ZFCP_STATUS_COMMON_OPEN) {
965 (p_status & ZFCP_STATUS_PORT_DID_DID)) 888 if (p_status & ZFCP_STATUS_PORT_DID_DID)
966 return ZFCP_ERP_SUCCEEDED; 889 return ZFCP_ERP_SUCCEEDED;
890 else {
891 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
892 return ZFCP_ERP_CONTINUES;
893 }
967 /* fall through otherwise */ 894 /* fall through otherwise */
895 }
968 } 896 }
969 return ZFCP_ERP_FAILED; 897 return ZFCP_ERP_FAILED;
970} 898}
971 899
972static int zfcp_erp_port_strategy_open(struct zfcp_erp_action *act)
973{
974 if (atomic_read(&act->port->status) & (ZFCP_STATUS_PORT_WKA))
975 return zfcp_erp_port_strategy_open_nameserver(act);
976 return zfcp_erp_port_strategy_open_common(act);
977}
978
979static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) 900static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
980{ 901{
981 struct zfcp_port *port = erp_action->port; 902 struct zfcp_port *port = erp_action->port;
982 903
904 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)
905 goto close_init_done;
906
983 switch (erp_action->step) { 907 switch (erp_action->step) {
984 case ZFCP_ERP_STEP_UNINITIALIZED: 908 case ZFCP_ERP_STEP_UNINITIALIZED:
985 zfcp_erp_port_strategy_clearstati(port); 909 zfcp_erp_port_strategy_clearstati(port);
@@ -992,19 +916,17 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
992 return ZFCP_ERP_FAILED; 916 return ZFCP_ERP_FAILED;
993 break; 917 break;
994 } 918 }
919
920close_init_done:
995 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) 921 if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
996 return ZFCP_ERP_EXIT; 922 return ZFCP_ERP_EXIT;
997 else
998 return zfcp_erp_port_strategy_open(erp_action);
999 923
1000 return ZFCP_ERP_FAILED; 924 return zfcp_erp_port_strategy_open_common(erp_action);
1001} 925}
1002 926
1003static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) 927static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
1004{ 928{
1005 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING | 929 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1006 ZFCP_STATUS_COMMON_CLOSING |
1007 ZFCP_STATUS_COMMON_ACCESS_DENIED |
1008 ZFCP_STATUS_UNIT_SHARED | 930 ZFCP_STATUS_UNIT_SHARED |
1009 ZFCP_STATUS_UNIT_READONLY, 931 ZFCP_STATUS_UNIT_READONLY,
1010 &unit->status); 932 &unit->status);
@@ -1065,8 +987,14 @@ static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
1065 break; 987 break;
1066 case ZFCP_ERP_FAILED : 988 case ZFCP_ERP_FAILED :
1067 atomic_inc(&unit->erp_counter); 989 atomic_inc(&unit->erp_counter);
1068 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) 990 if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) {
991 dev_err(&unit->port->adapter->ccw_device->dev,
992 "ERP failed for unit 0x%016Lx on "
993 "port 0x%016Lx\n",
994 (unsigned long long)unit->fcp_lun,
995 (unsigned long long)unit->port->wwpn);
1069 zfcp_erp_unit_failed(unit, 21, NULL); 996 zfcp_erp_unit_failed(unit, 21, NULL);
997 }
1070 break; 998 break;
1071 } 999 }
1072 1000
@@ -1091,8 +1019,12 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
1091 result = ZFCP_ERP_EXIT; 1019 result = ZFCP_ERP_EXIT;
1092 } 1020 }
1093 atomic_inc(&port->erp_counter); 1021 atomic_inc(&port->erp_counter);
1094 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) 1022 if (atomic_read(&port->erp_counter) > ZFCP_MAX_ERPS) {
1023 dev_err(&port->adapter->ccw_device->dev,
1024 "ERP failed for remote port 0x%016Lx\n",
1025 (unsigned long long)port->wwpn);
1095 zfcp_erp_port_failed(port, 22, NULL); 1026 zfcp_erp_port_failed(port, 22, NULL);
1027 }
1096 break; 1028 break;
1097 } 1029 }
1098 1030
@@ -1114,8 +1046,12 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
1114 1046
1115 case ZFCP_ERP_FAILED : 1047 case ZFCP_ERP_FAILED :
1116 atomic_inc(&adapter->erp_counter); 1048 atomic_inc(&adapter->erp_counter);
1117 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) 1049 if (atomic_read(&adapter->erp_counter) > ZFCP_MAX_ERPS) {
1050 dev_err(&adapter->ccw_device->dev,
1051 "ERP cannot recover an error "
1052 "on the FCP device\n");
1118 zfcp_erp_adapter_failed(adapter, 23, NULL); 1053 zfcp_erp_adapter_failed(adapter, 23, NULL);
1054 }
1119 break; 1055 break;
1120 } 1056 }
1121 1057
@@ -1250,9 +1186,10 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
1250 struct zfcp_unit *unit = p->unit; 1186 struct zfcp_unit *unit = p->unit;
1251 struct fc_rport *rport = unit->port->rport; 1187 struct fc_rport *rport = unit->port->rport;
1252 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, 1188 scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
1253 unit->scsi_lun, 0); 1189 scsilun_to_int((struct scsi_lun *)&unit->fcp_lun), 0);
1254 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1190 atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1255 zfcp_unit_put(unit); 1191 zfcp_unit_put(unit);
1192 wake_up(&unit->port->adapter->erp_done_wqh);
1256 kfree(p); 1193 kfree(p);
1257} 1194}
1258 1195
@@ -1263,9 +1200,9 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1263 p = kzalloc(sizeof(*p), GFP_KERNEL); 1200 p = kzalloc(sizeof(*p), GFP_KERNEL);
1264 if (!p) { 1201 if (!p) {
1265 dev_err(&unit->port->adapter->ccw_device->dev, 1202 dev_err(&unit->port->adapter->ccw_device->dev,
1266 "Out of resources. Could not register unit " 1203 "Registering unit 0x%016Lx on port 0x%016Lx failed\n",
1267 "0x%016Lx on port 0x%016Lx with SCSI stack.\n", 1204 (unsigned long long)unit->fcp_lun,
1268 unit->fcp_lun, unit->port->wwpn); 1205 (unsigned long long)unit->port->wwpn);
1269 return; 1206 return;
1270 } 1207 }
1271 1208
@@ -1273,7 +1210,7 @@ static void zfcp_erp_schedule_work(struct zfcp_unit *unit)
1273 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status); 1210 atomic_set_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
1274 INIT_WORK(&p->work, zfcp_erp_scsi_scan); 1211 INIT_WORK(&p->work, zfcp_erp_scsi_scan);
1275 p->unit = unit; 1212 p->unit = unit;
1276 schedule_work(&p->work); 1213 queue_work(zfcp_data.work_queue, &p->work);
1277} 1214}
1278 1215
1279static void zfcp_erp_rport_register(struct zfcp_port *port) 1216static void zfcp_erp_rport_register(struct zfcp_port *port)
@@ -1286,8 +1223,8 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
1286 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 1223 port->rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
1287 if (!port->rport) { 1224 if (!port->rport) {
1288 dev_err(&port->adapter->ccw_device->dev, 1225 dev_err(&port->adapter->ccw_device->dev,
1289 "Failed registration of rport " 1226 "Registering port 0x%016Lx failed\n",
1290 "0x%016Lx.\n", port->wwpn); 1227 (unsigned long long)port->wwpn);
1291 return; 1228 return;
1292 } 1229 }
1293 1230
@@ -1299,12 +1236,12 @@ static void zfcp_erp_rport_register(struct zfcp_port *port)
1299static void zfcp_erp_rports_del(struct zfcp_adapter *adapter) 1236static void zfcp_erp_rports_del(struct zfcp_adapter *adapter)
1300{ 1237{
1301 struct zfcp_port *port; 1238 struct zfcp_port *port;
1302 list_for_each_entry(port, &adapter->port_list_head, list) 1239 list_for_each_entry(port, &adapter->port_list_head, list) {
1303 if (port->rport && !(atomic_read(&port->status) & 1240 if (!port->rport)
1304 ZFCP_STATUS_PORT_WKA)) { 1241 continue;
1305 fc_remote_port_delete(port->rport); 1242 fc_remote_port_delete(port->rport);
1306 port->rport = NULL; 1243 port->rport = NULL;
1307 } 1244 }
1308} 1245}
1309 1246
1310static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) 1247static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
@@ -1459,9 +1396,9 @@ static int zfcp_erp_thread(void *data)
1459 zfcp_erp_wakeup(adapter); 1396 zfcp_erp_wakeup(adapter);
1460 } 1397 }
1461 1398
1462 zfcp_rec_dbf_event_thread(4, adapter); 1399 zfcp_rec_dbf_event_thread_lock(4, adapter);
1463 down_interruptible(&adapter->erp_ready_sem); 1400 down_interruptible(&adapter->erp_ready_sem);
1464 zfcp_rec_dbf_event_thread(5, adapter); 1401 zfcp_rec_dbf_event_thread_lock(5, adapter);
1465 } 1402 }
1466 1403
1467 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); 1404 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
@@ -1484,7 +1421,7 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
1484 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); 1421 retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
1485 if (retval < 0) { 1422 if (retval < 0) {
1486 dev_err(&adapter->ccw_device->dev, 1423 dev_err(&adapter->ccw_device->dev,
1487 "Creation of ERP thread failed.\n"); 1424 "Creating an ERP thread for the FCP device failed.\n");
1488 return retval; 1425 return retval;
1489 } 1426 }
1490 wait_event(adapter->erp_thread_wqh, 1427 wait_event(adapter->erp_thread_wqh,
@@ -1506,7 +1443,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
1506{ 1443{
1507 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); 1444 atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
1508 up(&adapter->erp_ready_sem); 1445 up(&adapter->erp_ready_sem);
1509 zfcp_rec_dbf_event_thread_lock(2, adapter); 1446 zfcp_rec_dbf_event_thread_lock(3, adapter);
1510 1447
1511 wait_event(adapter->erp_thread_wqh, 1448 wait_event(adapter->erp_thread_wqh,
1512 !(atomic_read(&adapter->status) & 1449 !(atomic_read(&adapter->status) &
@@ -1526,7 +1463,6 @@ void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, u8 id, void *ref)
1526{ 1463{
1527 zfcp_erp_modify_adapter_status(adapter, id, ref, 1464 zfcp_erp_modify_adapter_status(adapter, id, ref,
1528 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1465 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1529 dev_err(&adapter->ccw_device->dev, "Adapter ERP failed.\n");
1530} 1466}
1531 1467
1532/** 1468/**
@@ -1539,15 +1475,6 @@ void zfcp_erp_port_failed(struct zfcp_port *port, u8 id, void *ref)
1539{ 1475{
1540 zfcp_erp_modify_port_status(port, id, ref, 1476 zfcp_erp_modify_port_status(port, id, ref,
1541 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1477 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1542
1543 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_WKA)
1544 dev_err(&port->adapter->ccw_device->dev,
1545 "Port ERP failed for WKA port d_id=0x%06x.\n",
1546 port->d_id);
1547 else
1548 dev_err(&port->adapter->ccw_device->dev,
1549 "Port ERP failed for port wwpn=0x%016Lx.\n",
1550 port->wwpn);
1551} 1478}
1552 1479
1553/** 1480/**
@@ -1560,10 +1487,6 @@ void zfcp_erp_unit_failed(struct zfcp_unit *unit, u8 id, void *ref)
1560{ 1487{
1561 zfcp_erp_modify_unit_status(unit, id, ref, 1488 zfcp_erp_modify_unit_status(unit, id, ref,
1562 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); 1489 ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
1563
1564 dev_err(&unit->port->adapter->ccw_device->dev,
1565 "Unit ERP failed for unit 0x%016Lx on port 0x%016Lx.\n",
1566 unit->fcp_lun, unit->port->wwpn);
1567} 1490}
1568 1491
1569/** 1492/**
@@ -1754,9 +1677,8 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, u8 id,
1754 1677
1755 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1678 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1756 ZFCP_STATUS_COMMON_ACCESS_BOXED))) { 1679 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1757 if (!(status & ZFCP_STATUS_PORT_WKA)) 1680 list_for_each_entry(unit, &port->unit_list_head, list)
1758 list_for_each_entry(unit, &port->unit_list_head, list) 1681 zfcp_erp_unit_access_changed(unit, id, ref);
1759 zfcp_erp_unit_access_changed(unit, id, ref);
1760 return; 1682 return;
1761 } 1683 }
1762 1684
@@ -1779,10 +1701,7 @@ void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, u8 id,
1779 return; 1701 return;
1780 1702
1781 read_lock_irqsave(&zfcp_data.config_lock, flags); 1703 read_lock_irqsave(&zfcp_data.config_lock, flags);
1782 if (adapter->nameserver_port)
1783 zfcp_erp_port_access_changed(adapter->nameserver_port, id, ref);
1784 list_for_each_entry(port, &adapter->port_list_head, list) 1704 list_for_each_entry(port, &adapter->port_list_head, list)
1785 if (port != adapter->nameserver_port) 1705 zfcp_erp_port_access_changed(port, id, ref);
1786 zfcp_erp_port_access_changed(port, id, ref);
1787 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1706 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1788} 1707}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index edfdb21591f3..b5adeda93e1d 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -12,16 +12,14 @@
12#include "zfcp_def.h" 12#include "zfcp_def.h"
13 13
14/* zfcp_aux.c */ 14/* zfcp_aux.c */
15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, 15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
16 fcp_lun_t); 16extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
17extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *,
18 wwn_t);
19extern int zfcp_adapter_enqueue(struct ccw_device *); 17extern int zfcp_adapter_enqueue(struct ccw_device *);
20extern void zfcp_adapter_dequeue(struct zfcp_adapter *); 18extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, wwn_t, u32, 19extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
22 u32); 20 u32);
23extern void zfcp_port_dequeue(struct zfcp_port *); 21extern void zfcp_port_dequeue(struct zfcp_port *);
24extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, fcp_lun_t); 22extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
25extern void zfcp_unit_dequeue(struct zfcp_unit *); 23extern void zfcp_unit_dequeue(struct zfcp_unit *);
26extern int zfcp_reqlist_isempty(struct zfcp_adapter *); 24extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
27extern void zfcp_sg_free_table(struct scatterlist *, int); 25extern void zfcp_sg_free_table(struct scatterlist *, int);
@@ -29,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
29 27
30/* zfcp_ccw.c */ 28/* zfcp_ccw.c */
31extern int zfcp_ccw_register(void); 29extern int zfcp_ccw_register(void);
30extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
32 31
33/* zfcp_cfdc.c */ 32/* zfcp_cfdc.c */
34extern struct miscdevice zfcp_cfdc_misc; 33extern struct miscdevice zfcp_cfdc_misc;
@@ -50,6 +49,8 @@ extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
50 struct fsf_status_read_buffer *); 49 struct fsf_status_read_buffer *);
51extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, 50extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
52 int); 51 int);
52extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
53 struct zfcp_fsf_req *);
53extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); 54extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
54extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); 55extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
55extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); 56extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
@@ -91,17 +92,21 @@ extern void zfcp_erp_port_access_denied(struct zfcp_port *, u8, void *);
91extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *); 92extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, u8, void *);
92extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *); 93extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, u8, void *);
93extern void zfcp_erp_timeout_handler(unsigned long); 94extern void zfcp_erp_timeout_handler(unsigned long);
95extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
94 96
95/* zfcp_fc.c */ 97/* zfcp_fc.c */
96extern int zfcp_scan_ports(struct zfcp_adapter *); 98extern int zfcp_scan_ports(struct zfcp_adapter *);
97extern void _zfcp_scan_ports_later(struct work_struct *); 99extern void _zfcp_scan_ports_later(struct work_struct *);
98extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 100extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
99extern int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *); 101extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
100extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 102extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
101extern void zfcp_test_link(struct zfcp_port *); 103extern void zfcp_test_link(struct zfcp_port *);
104extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
102 105
103/* zfcp_fsf.c */ 106/* zfcp_fsf.c */
104extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 107extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
108extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *);
109extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *);
105extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 110extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
106extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 111extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
107extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 112extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -135,10 +140,8 @@ extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
135extern int zfcp_qdio_allocate(struct zfcp_adapter *); 140extern int zfcp_qdio_allocate(struct zfcp_adapter *);
136extern void zfcp_qdio_free(struct zfcp_adapter *); 141extern void zfcp_qdio_free(struct zfcp_adapter *);
137extern int zfcp_qdio_send(struct zfcp_fsf_req *); 142extern int zfcp_qdio_send(struct zfcp_fsf_req *);
138extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req( 143extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
139 struct zfcp_fsf_req *); 144extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
140extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_curr(
141 struct zfcp_fsf_req *);
142extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, 145extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
143 struct scatterlist *, int); 146 struct scatterlist *, int);
144extern int zfcp_qdio_open(struct zfcp_adapter *); 147extern int zfcp_qdio_open(struct zfcp_adapter *);
@@ -148,14 +151,12 @@ extern void zfcp_qdio_close(struct zfcp_adapter *);
148extern struct zfcp_data zfcp_data; 151extern struct zfcp_data zfcp_data;
149extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); 152extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
150extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 153extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
151extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
152extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 154extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
153extern struct fc_function_template zfcp_transport_functions; 155extern struct fc_function_template zfcp_transport_functions;
154 156
155/* zfcp_sysfs.c */ 157/* zfcp_sysfs.c */
156extern struct attribute_group zfcp_sysfs_unit_attrs; 158extern struct attribute_group zfcp_sysfs_unit_attrs;
157extern struct attribute_group zfcp_sysfs_adapter_attrs; 159extern struct attribute_group zfcp_sysfs_adapter_attrs;
158extern struct attribute_group zfcp_sysfs_ns_port_attrs;
159extern struct attribute_group zfcp_sysfs_port_attrs; 160extern struct attribute_group zfcp_sysfs_port_attrs;
160extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 161extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
161extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 162extern struct device_attribute *zfcp_sysfs_shost_attrs[];
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 56196c98c07b..1a7c80a77ff5 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -39,6 +39,84 @@ struct zfcp_gpn_ft {
39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS]; 39 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
40}; 40};
41 41
42struct zfcp_fc_ns_handler_data {
43 struct completion done;
44 void (*handler)(unsigned long);
45 unsigned long handler_data;
46};
47
48static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
49{
50 if (mutex_lock_interruptible(&wka_port->mutex))
51 return -ERESTARTSYS;
52
53 if (wka_port->status != ZFCP_WKA_PORT_ONLINE) {
54 wka_port->status = ZFCP_WKA_PORT_OPENING;
55 if (zfcp_fsf_open_wka_port(wka_port))
56 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
57 }
58
59 mutex_unlock(&wka_port->mutex);
60
61 wait_event_timeout(
62 wka_port->completion_wq,
63 wka_port->status == ZFCP_WKA_PORT_ONLINE ||
64 wka_port->status == ZFCP_WKA_PORT_OFFLINE,
65 HZ >> 1);
66
67 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) {
68 atomic_inc(&wka_port->refcount);
69 return 0;
70 }
71 return -EIO;
72}
73
74static void zfcp_wka_port_offline(struct work_struct *work)
75{
76 struct delayed_work *dw = container_of(work, struct delayed_work, work);
77 struct zfcp_wka_port *wka_port =
78 container_of(dw, struct zfcp_wka_port, work);
79
80 wait_event(wka_port->completion_wq,
81 atomic_read(&wka_port->refcount) == 0);
82
83 mutex_lock(&wka_port->mutex);
84 if ((atomic_read(&wka_port->refcount) != 0) ||
85 (wka_port->status != ZFCP_WKA_PORT_ONLINE))
86 goto out;
87
88 wka_port->status = ZFCP_WKA_PORT_CLOSING;
89 if (zfcp_fsf_close_wka_port(wka_port)) {
90 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
91 wake_up(&wka_port->completion_wq);
92 }
93out:
94 mutex_unlock(&wka_port->mutex);
95}
96
97static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
98{
99 if (atomic_dec_return(&wka_port->refcount) != 0)
100 return;
101 /* wait 10 miliseconds, other reqs might pop in */
102 schedule_delayed_work(&wka_port->work, HZ / 100);
103}
104
105void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter)
106{
107 struct zfcp_wka_port *wka_port = &adapter->nsp;
108
109 init_waitqueue_head(&wka_port->completion_wq);
110
111 wka_port->adapter = adapter;
112 wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE;
113
114 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
115 atomic_set(&wka_port->refcount, 0);
116 mutex_init(&wka_port->mutex);
117 INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
118}
119
42static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 120static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
43 struct fcp_rscn_element *elem) 121 struct fcp_rscn_element *elem)
44{ 122{
@@ -47,10 +125,8 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
47 125
48 read_lock_irqsave(&zfcp_data.config_lock, flags); 126 read_lock_irqsave(&zfcp_data.config_lock, flags);
49 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 127 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
50 if (atomic_test_mask(ZFCP_STATUS_PORT_WKA, &port->status))
51 continue;
52 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */ 128 /* FIXME: ZFCP_STATUS_PORT_DID_DID check is racy */
53 if (!atomic_test_mask(ZFCP_STATUS_PORT_DID_DID, &port->status)) 129 if (!(atomic_read(&port->status) & ZFCP_STATUS_PORT_DID_DID))
54 /* Try to connect to unused ports anyway. */ 130 /* Try to connect to unused ports anyway. */
55 zfcp_erp_port_reopen(port, 131 zfcp_erp_port_reopen(port,
56 ZFCP_STATUS_COMMON_ERP_FAILED, 132 ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -102,7 +178,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
102 schedule_work(&fsf_req->adapter->scan_work); 178 schedule_work(&fsf_req->adapter->scan_work);
103} 179}
104 180
105static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, wwn_t wwpn) 181static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
106{ 182{
107 struct zfcp_adapter *adapter = req->adapter; 183 struct zfcp_adapter *adapter = req->adapter;
108 struct zfcp_port *port; 184 struct zfcp_port *port;
@@ -157,7 +233,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
157 zfcp_fc_incoming_rscn(fsf_req); 233 zfcp_fc_incoming_rscn(fsf_req);
158} 234}
159 235
160static void zfcp_ns_gid_pn_handler(unsigned long data) 236static void zfcp_fc_ns_handler(unsigned long data)
237{
238 struct zfcp_fc_ns_handler_data *compl_rec =
239 (struct zfcp_fc_ns_handler_data *) data;
240
241 if (compl_rec->handler)
242 compl_rec->handler(compl_rec->handler_data);
243
244 complete(&compl_rec->done);
245}
246
247static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
161{ 248{
162 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data; 249 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
163 struct zfcp_send_ct *ct = &gid_pn->ct; 250 struct zfcp_send_ct *ct = &gid_pn->ct;
@@ -166,43 +253,31 @@ static void zfcp_ns_gid_pn_handler(unsigned long data)
166 struct zfcp_port *port = gid_pn->port; 253 struct zfcp_port *port = gid_pn->port;
167 254
168 if (ct->status) 255 if (ct->status)
169 goto out; 256 return;
170 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) { 257 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) {
171 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status); 258 atomic_set_mask(ZFCP_STATUS_PORT_INVALID_WWPN, &port->status);
172 goto out; 259 return;
173 } 260 }
174 /* paranoia */ 261 /* paranoia */
175 if (ct_iu_req->wwpn != port->wwpn) 262 if (ct_iu_req->wwpn != port->wwpn)
176 goto out; 263 return;
177 /* looks like a valid d_id */ 264 /* looks like a valid d_id */
178 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 265 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
179 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status); 266 atomic_set_mask(ZFCP_STATUS_PORT_DID_DID, &port->status);
180out:
181 mempool_free(gid_pn, port->adapter->pool.data_gid_pn);
182} 267}
183 268
184/** 269int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
185 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request 270 struct zfcp_gid_pn_data *gid_pn)
186 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
187 * return: -ENOMEM on error, 0 otherwise
188 */
189int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
190{ 271{
191 int ret;
192 struct zfcp_gid_pn_data *gid_pn;
193 struct zfcp_adapter *adapter = erp_action->adapter; 272 struct zfcp_adapter *adapter = erp_action->adapter;
194 273 struct zfcp_fc_ns_handler_data compl_rec;
195 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); 274 int ret;
196 if (!gid_pn)
197 return -ENOMEM;
198
199 memset(gid_pn, 0, sizeof(*gid_pn));
200 275
201 /* setup parameters for send generic command */ 276 /* setup parameters for send generic command */
202 gid_pn->port = erp_action->port; 277 gid_pn->port = erp_action->port;
203 gid_pn->ct.port = adapter->nameserver_port; 278 gid_pn->ct.wka_port = &adapter->nsp;
204 gid_pn->ct.handler = zfcp_ns_gid_pn_handler; 279 gid_pn->ct.handler = zfcp_fc_ns_handler;
205 gid_pn->ct.handler_data = (unsigned long) gid_pn; 280 gid_pn->ct.handler_data = (unsigned long) &compl_rec;
206 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 281 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
207 gid_pn->ct.req = &gid_pn->req; 282 gid_pn->ct.req = &gid_pn->req;
208 gid_pn->ct.resp = &gid_pn->resp; 283 gid_pn->ct.resp = &gid_pn->resp;
@@ -222,10 +297,42 @@ int zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
222 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE; 297 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
223 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; 298 gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
224 299
300 init_completion(&compl_rec.done);
301 compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
302 compl_rec.handler_data = (unsigned long) gid_pn;
225 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, 303 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
226 erp_action); 304 erp_action);
305 if (!ret)
306 wait_for_completion(&compl_rec.done);
307 return ret;
308}
309
310/**
311 * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
312 * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
313 * return: -ENOMEM on error, 0 otherwise
314 */
315int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
316{
317 int ret;
318 struct zfcp_gid_pn_data *gid_pn;
319 struct zfcp_adapter *adapter = erp_action->adapter;
320
321 gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
322 if (!gid_pn)
323 return -ENOMEM;
324
325 memset(gid_pn, 0, sizeof(*gid_pn));
326
327 ret = zfcp_wka_port_get(&adapter->nsp);
227 if (ret) 328 if (ret)
228 mempool_free(gid_pn, adapter->pool.data_gid_pn); 329 goto out;
330
331 ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
332
333 zfcp_wka_port_put(&adapter->nsp);
334out:
335 mempool_free(gid_pn, adapter->pool.data_gid_pn);
229 return ret; 336 return ret;
230} 337}
231 338
@@ -255,14 +362,14 @@ struct zfcp_els_adisc {
255 struct scatterlist req; 362 struct scatterlist req;
256 struct scatterlist resp; 363 struct scatterlist resp;
257 struct zfcp_ls_adisc ls_adisc; 364 struct zfcp_ls_adisc ls_adisc;
258 struct zfcp_ls_adisc_acc ls_adisc_acc; 365 struct zfcp_ls_adisc ls_adisc_acc;
259}; 366};
260 367
261static void zfcp_fc_adisc_handler(unsigned long data) 368static void zfcp_fc_adisc_handler(unsigned long data)
262{ 369{
263 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; 370 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data;
264 struct zfcp_port *port = adisc->els.port; 371 struct zfcp_port *port = adisc->els.port;
265 struct zfcp_ls_adisc_acc *ls_adisc = &adisc->ls_adisc_acc; 372 struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc;
266 373
267 if (adisc->els.status) { 374 if (adisc->els.status) {
268 /* request rejected or timed out */ 375 /* request rejected or timed out */
@@ -295,7 +402,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
295 sg_init_one(adisc->els.req, &adisc->ls_adisc, 402 sg_init_one(adisc->els.req, &adisc->ls_adisc,
296 sizeof(struct zfcp_ls_adisc)); 403 sizeof(struct zfcp_ls_adisc));
297 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, 404 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
298 sizeof(struct zfcp_ls_adisc_acc)); 405 sizeof(struct zfcp_ls_adisc));
299 406
300 adisc->els.req_count = 1; 407 adisc->els.req_count = 1;
301 adisc->els.resp_count = 1; 408 adisc->els.resp_count = 1;
@@ -338,30 +445,6 @@ void zfcp_test_link(struct zfcp_port *port)
338 zfcp_erp_port_forced_reopen(port, 0, 65, NULL); 445 zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
339} 446}
340 447
341static int zfcp_scan_get_nameserver(struct zfcp_adapter *adapter)
342{
343 int ret;
344
345 if (!adapter->nameserver_port)
346 return -EINTR;
347
348 if (!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
349 &adapter->nameserver_port->status)) {
350 ret = zfcp_erp_port_reopen(adapter->nameserver_port, 0, 148,
351 NULL);
352 if (ret)
353 return ret;
354 zfcp_erp_wait(adapter);
355 }
356 return !atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
357 &adapter->nameserver_port->status);
358}
359
360static void zfcp_gpn_ft_handler(unsigned long _done)
361{
362 complete((struct completion *)_done);
363}
364
365static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft) 448static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
366{ 449{
367 struct scatterlist *sg = &gpn_ft->sg_req; 450 struct scatterlist *sg = &gpn_ft->sg_req;
@@ -403,7 +486,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
403{ 486{
404 struct zfcp_send_ct *ct = &gpn_ft->ct; 487 struct zfcp_send_ct *ct = &gpn_ft->ct;
405 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 488 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
406 struct completion done; 489 struct zfcp_fc_ns_handler_data compl_rec;
407 int ret; 490 int ret;
408 491
409 /* prepare CT IU for GPN_FT */ 492 /* prepare CT IU for GPN_FT */
@@ -420,19 +503,20 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
420 req->fc4_type = ZFCP_CT_SCSI_FCP; 503 req->fc4_type = ZFCP_CT_SCSI_FCP;
421 504
422 /* prepare zfcp_send_ct */ 505 /* prepare zfcp_send_ct */
423 ct->port = adapter->nameserver_port; 506 ct->wka_port = &adapter->nsp;
424 ct->handler = zfcp_gpn_ft_handler; 507 ct->handler = zfcp_fc_ns_handler;
425 ct->handler_data = (unsigned long)&done; 508 ct->handler_data = (unsigned long)&compl_rec;
426 ct->timeout = 10; 509 ct->timeout = 10;
427 ct->req = &gpn_ft->sg_req; 510 ct->req = &gpn_ft->sg_req;
428 ct->resp = gpn_ft->sg_resp; 511 ct->resp = gpn_ft->sg_resp;
429 ct->req_count = 1; 512 ct->req_count = 1;
430 ct->resp_count = ZFCP_GPN_FT_BUFFERS; 513 ct->resp_count = ZFCP_GPN_FT_BUFFERS;
431 514
432 init_completion(&done); 515 init_completion(&compl_rec.done);
516 compl_rec.handler = NULL;
433 ret = zfcp_fsf_send_ct(ct, NULL, NULL); 517 ret = zfcp_fsf_send_ct(ct, NULL, NULL);
434 if (!ret) 518 if (!ret)
435 wait_for_completion(&done); 519 wait_for_completion(&compl_rec.done);
436 return ret; 520 return ret;
437} 521}
438 522
@@ -442,9 +526,8 @@ static void zfcp_validate_port(struct zfcp_port *port)
442 526
443 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); 527 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
444 528
445 if (port == adapter->nameserver_port) 529 if ((port->supported_classes != 0) ||
446 return; 530 !list_empty(&port->unit_list_head)) {
447 if ((port->supported_classes != 0) || (port->units != 0)) {
448 zfcp_port_put(port); 531 zfcp_port_put(port);
449 return; 532 return;
450 } 533 }
@@ -460,7 +543,7 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
460 struct scatterlist *sg = gpn_ft->sg_resp; 543 struct scatterlist *sg = gpn_ft->sg_resp;
461 struct ct_hdr *hdr = sg_virt(sg); 544 struct ct_hdr *hdr = sg_virt(sg);
462 struct gpn_ft_resp_acc *acc = sg_virt(sg); 545 struct gpn_ft_resp_acc *acc = sg_virt(sg);
463 struct zfcp_adapter *adapter = ct->port->adapter; 546 struct zfcp_adapter *adapter = ct->wka_port->adapter;
464 struct zfcp_port *port, *tmp; 547 struct zfcp_port *port, *tmp;
465 u32 d_id; 548 u32 d_id;
466 int ret = 0, x, last = 0; 549 int ret = 0, x, last = 0;
@@ -490,6 +573,9 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
490 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | 573 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 |
491 acc->port_id[2]; 574 acc->port_id[2];
492 575
576 /* don't attach ports with a well known address */
577 if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA)
578 continue;
493 /* skip the adapter's port and known remote ports */ 579 /* skip the adapter's port and known remote ports */
494 if (acc->wwpn == fc_host_port_name(adapter->scsi_host)) 580 if (acc->wwpn == fc_host_port_name(adapter->scsi_host))
495 continue; 581 continue;
@@ -528,13 +614,15 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
528 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT) 614 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
529 return 0; 615 return 0;
530 616
531 ret = zfcp_scan_get_nameserver(adapter); 617 ret = zfcp_wka_port_get(&adapter->nsp);
532 if (ret) 618 if (ret)
533 return ret; 619 return ret;
534 620
535 gpn_ft = zfcp_alloc_sg_env(); 621 gpn_ft = zfcp_alloc_sg_env();
536 if (!gpn_ft) 622 if (!gpn_ft) {
537 return -ENOMEM; 623 ret = -ENOMEM;
624 goto out;
625 }
538 626
539 for (i = 0; i < 3; i++) { 627 for (i = 0; i < 3; i++) {
540 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter); 628 ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
@@ -547,7 +635,8 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
547 } 635 }
548 } 636 }
549 zfcp_free_sg_env(gpn_ft); 637 zfcp_free_sg_env(gpn_ft);
550 638out:
639 zfcp_wka_port_put(&adapter->nsp);
551 return ret; 640 return ret;
552} 641}
553 642
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 49dbeb754e5f..739356a5c123 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -50,19 +50,16 @@ static u32 fsf_qtcb_type[] = {
50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 50 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
51}; 51};
52 52
53static const char *zfcp_act_subtable_type[] = {
54 "unknown", "OS", "WWPN", "DID", "LUN"
55};
56
57static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) 53static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
58{ 54{
59 u16 subtable = table >> 16; 55 u16 subtable = table >> 16;
60 u16 rule = table & 0xffff; 56 u16 rule = table & 0xffff;
57 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
61 58
62 if (subtable && subtable < ARRAY_SIZE(zfcp_act_subtable_type)) 59 if (subtable && subtable < ARRAY_SIZE(act_type))
63 dev_warn(&adapter->ccw_device->dev, 60 dev_warn(&adapter->ccw_device->dev,
64 "Access denied in subtable %s, rule %d.\n", 61 "Access denied according to ACT rule type %s, "
65 zfcp_act_subtable_type[subtable], rule); 62 "rule %d\n", act_type[subtable], rule);
66} 63}
67 64
68static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, 65static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
@@ -70,8 +67,8 @@ static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
70{ 67{
71 struct fsf_qtcb_header *header = &req->qtcb->header; 68 struct fsf_qtcb_header *header = &req->qtcb->header;
72 dev_warn(&req->adapter->ccw_device->dev, 69 dev_warn(&req->adapter->ccw_device->dev,
73 "Access denied, cannot send command to port 0x%016Lx.\n", 70 "Access denied to port 0x%016Lx\n",
74 port->wwpn); 71 (unsigned long long)port->wwpn);
75 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 72 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
76 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 73 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
77 zfcp_erp_port_access_denied(port, 55, req); 74 zfcp_erp_port_access_denied(port, 55, req);
@@ -83,8 +80,9 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
83{ 80{
84 struct fsf_qtcb_header *header = &req->qtcb->header; 81 struct fsf_qtcb_header *header = &req->qtcb->header;
85 dev_warn(&req->adapter->ccw_device->dev, 82 dev_warn(&req->adapter->ccw_device->dev,
86 "Access denied for unit 0x%016Lx on port 0x%016Lx.\n", 83 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
87 unit->fcp_lun, unit->port->wwpn); 84 (unsigned long long)unit->fcp_lun,
85 (unsigned long long)unit->port->wwpn);
88 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); 86 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
89 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); 87 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
90 zfcp_erp_unit_access_denied(unit, 59, req); 88 zfcp_erp_unit_access_denied(unit, 59, req);
@@ -93,9 +91,8 @@ static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
93 91
94static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 92static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
95{ 93{
96 dev_err(&req->adapter->ccw_device->dev, 94 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
97 "Required FC class not supported by adapter, " 95 "operational because of an unsupported FC class\n");
98 "shutting down adapter.\n");
99 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req); 96 zfcp_erp_adapter_shutdown(req->adapter, 0, 123, req);
100 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 97 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
101} 98}
@@ -171,42 +168,6 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
171 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 168 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
172} 169}
173 170
174static void zfcp_fsf_bit_error_threshold(struct zfcp_fsf_req *req)
175{
176 struct zfcp_adapter *adapter = req->adapter;
177 struct fsf_status_read_buffer *sr_buf = req->data;
178 struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
179
180 dev_warn(&adapter->ccw_device->dev,
181 "Warning: bit error threshold data "
182 "received for the adapter: "
183 "link failures = %i, loss of sync errors = %i, "
184 "loss of signal errors = %i, "
185 "primitive sequence errors = %i, "
186 "invalid transmission word errors = %i, "
187 "CRC errors = %i).\n",
188 err->link_failure_error_count,
189 err->loss_of_sync_error_count,
190 err->loss_of_signal_error_count,
191 err->primitive_sequence_error_count,
192 err->invalid_transmission_word_error_count,
193 err->crc_error_count);
194 dev_warn(&adapter->ccw_device->dev,
195 "Additional bit error threshold data of the adapter: "
196 "primitive sequence event time-outs = %i, "
197 "elastic buffer overrun errors = %i, "
198 "advertised receive buffer-to-buffer credit = %i, "
199 "current receice buffer-to-buffer credit = %i, "
200 "advertised transmit buffer-to-buffer credit = %i, "
201 "current transmit buffer-to-buffer credit = %i).\n",
202 err->primitive_sequence_event_timeout_count,
203 err->elastic_buffer_overrun_error_count,
204 err->advertised_receive_b2b_credit,
205 err->current_receive_b2b_credit,
206 err->advertised_transmit_b2b_credit,
207 err->current_transmit_b2b_credit);
208}
209
210static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id, 171static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
211 struct fsf_link_down_info *link_down) 172 struct fsf_link_down_info *link_down)
212{ 173{
@@ -223,62 +184,66 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, u8 id,
223 switch (link_down->error_code) { 184 switch (link_down->error_code) {
224 case FSF_PSQ_LINK_NO_LIGHT: 185 case FSF_PSQ_LINK_NO_LIGHT:
225 dev_warn(&req->adapter->ccw_device->dev, 186 dev_warn(&req->adapter->ccw_device->dev,
226 "The local link is down: no light detected.\n"); 187 "There is no light signal from the local "
188 "fibre channel cable\n");
227 break; 189 break;
228 case FSF_PSQ_LINK_WRAP_PLUG: 190 case FSF_PSQ_LINK_WRAP_PLUG:
229 dev_warn(&req->adapter->ccw_device->dev, 191 dev_warn(&req->adapter->ccw_device->dev,
230 "The local link is down: wrap plug detected.\n"); 192 "There is a wrap plug instead of a fibre "
193 "channel cable\n");
231 break; 194 break;
232 case FSF_PSQ_LINK_NO_FCP: 195 case FSF_PSQ_LINK_NO_FCP:
233 dev_warn(&req->adapter->ccw_device->dev, 196 dev_warn(&req->adapter->ccw_device->dev,
234 "The local link is down: " 197 "The adjacent fibre channel node does not "
235 "adjacent node on link does not support FCP.\n"); 198 "support FCP\n");
236 break; 199 break;
237 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 200 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
238 dev_warn(&req->adapter->ccw_device->dev, 201 dev_warn(&req->adapter->ccw_device->dev,
239 "The local link is down: " 202 "The FCP device is suspended because of a "
240 "firmware update in progress.\n"); 203 "firmware update\n");
241 break; 204 break;
242 case FSF_PSQ_LINK_INVALID_WWPN: 205 case FSF_PSQ_LINK_INVALID_WWPN:
243 dev_warn(&req->adapter->ccw_device->dev, 206 dev_warn(&req->adapter->ccw_device->dev,
244 "The local link is down: " 207 "The FCP device detected a WWPN that is "
245 "duplicate or invalid WWPN detected.\n"); 208 "duplicate or not valid\n");
246 break; 209 break;
247 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 210 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
248 dev_warn(&req->adapter->ccw_device->dev, 211 dev_warn(&req->adapter->ccw_device->dev,
249 "The local link is down: " 212 "The fibre channel fabric does not support NPIV\n");
250 "no support for NPIV by Fabric.\n");
251 break; 213 break;
252 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 214 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
253 dev_warn(&req->adapter->ccw_device->dev, 215 dev_warn(&req->adapter->ccw_device->dev,
254 "The local link is down: " 216 "The FCP adapter cannot support more NPIV ports\n");
255 "out of resource in FCP daughtercard.\n");
256 break; 217 break;
257 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 218 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
258 dev_warn(&req->adapter->ccw_device->dev, 219 dev_warn(&req->adapter->ccw_device->dev,
259 "The local link is down: " 220 "The adjacent switch cannot support "
260 "out of resource in Fabric.\n"); 221 "more NPIV ports\n");
261 break; 222 break;
262 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 223 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
263 dev_warn(&req->adapter->ccw_device->dev, 224 dev_warn(&req->adapter->ccw_device->dev,
264 "The local link is down: " 225 "The FCP adapter could not log in to the "
265 "unable to login to Fabric.\n"); 226 "fibre channel fabric\n");
266 break; 227 break;
267 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 228 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
268 dev_warn(&req->adapter->ccw_device->dev, 229 dev_warn(&req->adapter->ccw_device->dev,
269 "WWPN assignment file corrupted on adapter.\n"); 230 "The WWPN assignment file on the FCP adapter "
231 "has been damaged\n");
270 break; 232 break;
271 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 233 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
272 dev_warn(&req->adapter->ccw_device->dev, 234 dev_warn(&req->adapter->ccw_device->dev,
273 "Mode table corrupted on adapter.\n"); 235 "The mode table on the FCP adapter "
236 "has been damaged\n");
274 break; 237 break;
275 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 238 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
276 dev_warn(&req->adapter->ccw_device->dev, 239 dev_warn(&req->adapter->ccw_device->dev,
277 "No WWPN for assignment table on adapter.\n"); 240 "All NPIV ports on the FCP adapter have "
241 "been assigned\n");
278 break; 242 break;
279 default: 243 default:
280 dev_warn(&req->adapter->ccw_device->dev, 244 dev_warn(&req->adapter->ccw_device->dev,
281 "The local link to adapter is down.\n"); 245 "The link between the FCP adapter and "
246 "the FC fabric is down\n");
282 } 247 }
283out: 248out:
284 zfcp_erp_adapter_failed(adapter, id, req); 249 zfcp_erp_adapter_failed(adapter, id, req);
@@ -286,27 +251,18 @@ out:
286 251
287static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 252static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
288{ 253{
289 struct zfcp_adapter *adapter = req->adapter;
290 struct fsf_status_read_buffer *sr_buf = req->data; 254 struct fsf_status_read_buffer *sr_buf = req->data;
291 struct fsf_link_down_info *ldi = 255 struct fsf_link_down_info *ldi =
292 (struct fsf_link_down_info *) &sr_buf->payload; 256 (struct fsf_link_down_info *) &sr_buf->payload;
293 257
294 switch (sr_buf->status_subtype) { 258 switch (sr_buf->status_subtype) {
295 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 259 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
296 dev_warn(&adapter->ccw_device->dev,
297 "Physical link is down.\n");
298 zfcp_fsf_link_down_info_eval(req, 38, ldi); 260 zfcp_fsf_link_down_info_eval(req, 38, ldi);
299 break; 261 break;
300 case FSF_STATUS_READ_SUB_FDISC_FAILED: 262 case FSF_STATUS_READ_SUB_FDISC_FAILED:
301 dev_warn(&adapter->ccw_device->dev,
302 "Local link is down "
303 "due to failed FDISC login.\n");
304 zfcp_fsf_link_down_info_eval(req, 39, ldi); 263 zfcp_fsf_link_down_info_eval(req, 39, ldi);
305 break; 264 break;
306 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 265 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
307 dev_warn(&adapter->ccw_device->dev,
308 "Local link is down "
309 "due to firmware update on adapter.\n");
310 zfcp_fsf_link_down_info_eval(req, 40, NULL); 266 zfcp_fsf_link_down_info_eval(req, 40, NULL);
311 }; 267 };
312} 268}
@@ -335,14 +291,17 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
335 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 291 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
336 break; 292 break;
337 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 293 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
338 zfcp_fsf_bit_error_threshold(req); 294 dev_warn(&adapter->ccw_device->dev,
295 "The error threshold for checksum statistics "
296 "has been exceeded\n");
297 zfcp_hba_dbf_event_berr(adapter, req);
339 break; 298 break;
340 case FSF_STATUS_READ_LINK_DOWN: 299 case FSF_STATUS_READ_LINK_DOWN:
341 zfcp_fsf_status_read_link_down(req); 300 zfcp_fsf_status_read_link_down(req);
342 break; 301 break;
343 case FSF_STATUS_READ_LINK_UP: 302 case FSF_STATUS_READ_LINK_UP:
344 dev_info(&adapter->ccw_device->dev, 303 dev_info(&adapter->ccw_device->dev,
345 "Local link was replugged.\n"); 304 "The local link has been restored\n");
346 /* All ports should be marked as ready to run again */ 305 /* All ports should be marked as ready to run again */
347 zfcp_erp_modify_adapter_status(adapter, 30, NULL, 306 zfcp_erp_modify_adapter_status(adapter, 30, NULL,
348 ZFCP_STATUS_COMMON_RUNNING, 307 ZFCP_STATUS_COMMON_RUNNING,
@@ -370,7 +329,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
370 zfcp_fsf_req_free(req); 329 zfcp_fsf_req_free(req);
371 330
372 atomic_inc(&adapter->stat_miss); 331 atomic_inc(&adapter->stat_miss);
373 schedule_work(&adapter->stat_work); 332 queue_work(zfcp_data.work_queue, &adapter->stat_work);
374} 333}
375 334
376static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 335static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
@@ -386,8 +345,8 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
386 break; 345 break;
387 case FSF_SQ_NO_RECOM: 346 case FSF_SQ_NO_RECOM:
388 dev_err(&req->adapter->ccw_device->dev, 347 dev_err(&req->adapter->ccw_device->dev,
389 "No recommendation could be given for a " 348 "The FCP adapter reported a problem "
390 "problem on the adapter.\n"); 349 "that cannot be recovered\n");
391 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req); 350 zfcp_erp_adapter_shutdown(req->adapter, 0, 121, req);
392 break; 351 break;
393 } 352 }
@@ -403,8 +362,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
403 switch (req->qtcb->header.fsf_status) { 362 switch (req->qtcb->header.fsf_status) {
404 case FSF_UNKNOWN_COMMAND: 363 case FSF_UNKNOWN_COMMAND:
405 dev_err(&req->adapter->ccw_device->dev, 364 dev_err(&req->adapter->ccw_device->dev,
406 "Command issued by the device driver (0x%x) is " 365 "The FCP adapter does not recognize the command 0x%x\n",
407 "not known by the adapter.\n",
408 req->qtcb->header.fsf_command); 366 req->qtcb->header.fsf_command);
409 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req); 367 zfcp_erp_adapter_shutdown(req->adapter, 0, 120, req);
410 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 368 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -435,11 +393,9 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
435 return; 393 return;
436 case FSF_PROT_QTCB_VERSION_ERROR: 394 case FSF_PROT_QTCB_VERSION_ERROR:
437 dev_err(&adapter->ccw_device->dev, 395 dev_err(&adapter->ccw_device->dev,
438 "The QTCB version requested by zfcp (0x%x) is not " 396 "QTCB version 0x%x not supported by FCP adapter "
439 "supported by the FCP adapter (lowest supported " 397 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
440 "0x%x, highest supported 0x%x).\n", 398 psq->word[0], psq->word[1]);
441 FSF_QTCB_CURRENT_VERSION, psq->word[0],
442 psq->word[1]);
443 zfcp_erp_adapter_shutdown(adapter, 0, 117, req); 399 zfcp_erp_adapter_shutdown(adapter, 0, 117, req);
444 break; 400 break;
445 case FSF_PROT_ERROR_STATE: 401 case FSF_PROT_ERROR_STATE:
@@ -449,8 +405,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
449 break; 405 break;
450 case FSF_PROT_UNSUPP_QTCB_TYPE: 406 case FSF_PROT_UNSUPP_QTCB_TYPE:
451 dev_err(&adapter->ccw_device->dev, 407 dev_err(&adapter->ccw_device->dev,
452 "Packet header type used by the device driver is " 408 "The QTCB type is not supported by the FCP adapter\n");
453 "incompatible with that used on the adapter.\n");
454 zfcp_erp_adapter_shutdown(adapter, 0, 118, req); 409 zfcp_erp_adapter_shutdown(adapter, 0, 118, req);
455 break; 410 break;
456 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 411 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
@@ -459,7 +414,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
459 break; 414 break;
460 case FSF_PROT_DUPLICATE_REQUEST_ID: 415 case FSF_PROT_DUPLICATE_REQUEST_ID:
461 dev_err(&adapter->ccw_device->dev, 416 dev_err(&adapter->ccw_device->dev,
462 "The request identifier 0x%Lx is ambiguous.\n", 417 "0x%Lx is an ambiguous request identifier\n",
463 (unsigned long long)qtcb->bottom.support.req_handle); 418 (unsigned long long)qtcb->bottom.support.req_handle);
464 zfcp_erp_adapter_shutdown(adapter, 0, 78, req); 419 zfcp_erp_adapter_shutdown(adapter, 0, 78, req);
465 break; 420 break;
@@ -479,9 +434,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
479 break; 434 break;
480 default: 435 default:
481 dev_err(&adapter->ccw_device->dev, 436 dev_err(&adapter->ccw_device->dev,
482 "Transfer protocol status information" 437 "0x%x is not a valid transfer protocol status\n",
483 "provided by the adapter (0x%x) "
484 "is not compatible with the device driver.\n",
485 qtcb->prefix.prot_status); 438 qtcb->prefix.prot_status);
486 zfcp_erp_adapter_shutdown(adapter, 0, 119, req); 439 zfcp_erp_adapter_shutdown(adapter, 0, 119, req);
487 } 440 }
@@ -559,33 +512,17 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
559 adapter->peer_wwpn = bottom->plogi_payload.wwpn; 512 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
560 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 513 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
561 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 514 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
562 if (req->erp_action)
563 dev_info(&adapter->ccw_device->dev,
564 "Point-to-Point fibrechannel "
565 "configuration detected.\n");
566 break; 515 break;
567 case FSF_TOPO_FABRIC: 516 case FSF_TOPO_FABRIC:
568 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 517 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
569 if (req->erp_action)
570 dev_info(&adapter->ccw_device->dev,
571 "Switched fabric fibrechannel "
572 "network detected.\n");
573 break; 518 break;
574 case FSF_TOPO_AL: 519 case FSF_TOPO_AL:
575 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 520 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
576 dev_err(&adapter->ccw_device->dev,
577 "Unsupported arbitrated loop fibrechannel "
578 "topology detected, shutting down "
579 "adapter.\n");
580 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
581 return -EIO;
582 default: 521 default:
583 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
584 dev_err(&adapter->ccw_device->dev, 522 dev_err(&adapter->ccw_device->dev,
585 "The fibrechannel topology reported by the" 523 "Unknown or unsupported arbitrated loop "
586 " adapter is not known by the zfcp driver," 524 "fibre channel topology detected\n");
587 " shutting down adapter.\n"); 525 zfcp_erp_adapter_shutdown(adapter, 0, 127, req);
588 zfcp_erp_adapter_shutdown(adapter, 0, 128, req);
589 return -EIO; 526 return -EIO;
590 } 527 }
591 528
@@ -616,11 +553,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
616 553
617 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 554 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
618 dev_err(&adapter->ccw_device->dev, 555 dev_err(&adapter->ccw_device->dev,
619 "Maximum QTCB size (%d bytes) allowed by " 556 "FCP adapter maximum QTCB size (%d bytes) "
620 "the adapter is lower than the minimum " 557 "is too small\n",
621 "required by the driver (%ld bytes).\n", 558 bottom->max_qtcb_size);
622 bottom->max_qtcb_size,
623 sizeof(struct fsf_qtcb));
624 zfcp_erp_adapter_shutdown(adapter, 0, 129, req); 559 zfcp_erp_adapter_shutdown(adapter, 0, 129, req);
625 return; 560 return;
626 } 561 }
@@ -656,15 +591,15 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
656 591
657 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 592 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
658 dev_err(&adapter->ccw_device->dev, 593 dev_err(&adapter->ccw_device->dev,
659 "The adapter only supports newer control block " 594 "The FCP adapter only supports newer "
660 "versions, try updated device driver.\n"); 595 "control block versions\n");
661 zfcp_erp_adapter_shutdown(adapter, 0, 125, req); 596 zfcp_erp_adapter_shutdown(adapter, 0, 125, req);
662 return; 597 return;
663 } 598 }
664 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 599 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
665 dev_err(&adapter->ccw_device->dev, 600 dev_err(&adapter->ccw_device->dev,
666 "The adapter only supports older control block " 601 "The FCP adapter only supports older "
667 "versions, consider a microcode upgrade.\n"); 602 "control block versions\n");
668 zfcp_erp_adapter_shutdown(adapter, 0, 126, req); 603 zfcp_erp_adapter_shutdown(adapter, 0, 126, req);
669 } 604 }
670} 605}
@@ -688,7 +623,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
688 623
689static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 624static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
690{ 625{
691 struct zfcp_adapter *adapter = req->adapter;
692 struct fsf_qtcb *qtcb = req->qtcb; 626 struct fsf_qtcb *qtcb = req->qtcb;
693 627
694 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 628 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
@@ -697,38 +631,47 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
697 switch (qtcb->header.fsf_status) { 631 switch (qtcb->header.fsf_status) {
698 case FSF_GOOD: 632 case FSF_GOOD:
699 zfcp_fsf_exchange_port_evaluate(req); 633 zfcp_fsf_exchange_port_evaluate(req);
700 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
701 break; 634 break;
702 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 635 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
703 zfcp_fsf_exchange_port_evaluate(req); 636 zfcp_fsf_exchange_port_evaluate(req);
704 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
705 zfcp_fsf_link_down_info_eval(req, 43, 637 zfcp_fsf_link_down_info_eval(req, 43,
706 &qtcb->header.fsf_status_qual.link_down_info); 638 &qtcb->header.fsf_status_qual.link_down_info);
707 break; 639 break;
708 } 640 }
709} 641}
710 642
711static int zfcp_fsf_sbal_check(struct zfcp_qdio_queue *queue) 643static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
712{ 644{
713 spin_lock_bh(&queue->lock); 645 struct zfcp_qdio_queue *req_q = &adapter->req_q;
714 if (atomic_read(&queue->count)) 646
647 spin_lock_bh(&adapter->req_q_lock);
648 if (atomic_read(&req_q->count))
715 return 1; 649 return 1;
716 spin_unlock_bh(&queue->lock); 650 spin_unlock_bh(&adapter->req_q_lock);
717 return 0; 651 return 0;
718} 652}
719 653
654static int zfcp_fsf_sbal_available(struct zfcp_adapter *adapter)
655{
656 unsigned int count = atomic_read(&adapter->req_q.count);
657 if (!count)
658 atomic_inc(&adapter->qdio_outb_full);
659 return count > 0;
660}
661
720static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) 662static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
721{ 663{
722 long ret; 664 long ret;
723 struct zfcp_qdio_queue *req_q = &adapter->req_q;
724 665
725 spin_unlock_bh(&req_q->lock); 666 spin_unlock_bh(&adapter->req_q_lock);
726 ret = wait_event_interruptible_timeout(adapter->request_wq, 667 ret = wait_event_interruptible_timeout(adapter->request_wq,
727 zfcp_fsf_sbal_check(req_q), 5 * HZ); 668 zfcp_fsf_sbal_check(adapter), 5 * HZ);
728 if (ret > 0) 669 if (ret > 0)
729 return 0; 670 return 0;
671 if (!ret)
672 atomic_inc(&adapter->qdio_outb_full);
730 673
731 spin_lock_bh(&req_q->lock); 674 spin_lock_bh(&adapter->req_q_lock);
732 return -EIO; 675 return -EIO;
733} 676}
734 677
@@ -765,7 +708,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
765 u32 fsf_cmd, int req_flags, 708 u32 fsf_cmd, int req_flags,
766 mempool_t *pool) 709 mempool_t *pool)
767{ 710{
768 volatile struct qdio_buffer_element *sbale; 711 struct qdio_buffer_element *sbale;
769 712
770 struct zfcp_fsf_req *req; 713 struct zfcp_fsf_req *req;
771 struct zfcp_qdio_queue *req_q = &adapter->req_q; 714 struct zfcp_qdio_queue *req_q = &adapter->req_q;
@@ -867,10 +810,10 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
867{ 810{
868 struct zfcp_fsf_req *req; 811 struct zfcp_fsf_req *req;
869 struct fsf_status_read_buffer *sr_buf; 812 struct fsf_status_read_buffer *sr_buf;
870 volatile struct qdio_buffer_element *sbale; 813 struct qdio_buffer_element *sbale;
871 int retval = -EIO; 814 int retval = -EIO;
872 815
873 spin_lock_bh(&adapter->req_q.lock); 816 spin_lock_bh(&adapter->req_q_lock);
874 if (zfcp_fsf_req_sbal_get(adapter)) 817 if (zfcp_fsf_req_sbal_get(adapter))
875 goto out; 818 goto out;
876 819
@@ -910,7 +853,7 @@ failed_buf:
910 zfcp_fsf_req_free(req); 853 zfcp_fsf_req_free(req);
911 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); 854 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
912out: 855out:
913 spin_unlock_bh(&adapter->req_q.lock); 856 spin_unlock_bh(&adapter->req_q_lock);
914 return retval; 857 return retval;
915} 858}
916 859
@@ -980,11 +923,11 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
980 struct zfcp_unit *unit, 923 struct zfcp_unit *unit,
981 int req_flags) 924 int req_flags)
982{ 925{
983 volatile struct qdio_buffer_element *sbale; 926 struct qdio_buffer_element *sbale;
984 struct zfcp_fsf_req *req = NULL; 927 struct zfcp_fsf_req *req = NULL;
985 928
986 spin_lock(&adapter->req_q.lock); 929 spin_lock(&adapter->req_q_lock);
987 if (!atomic_read(&adapter->req_q.count)) 930 if (!zfcp_fsf_sbal_available(adapter))
988 goto out; 931 goto out;
989 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, 932 req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
990 req_flags, adapter->pool.fsf_req_abort); 933 req_flags, adapter->pool.fsf_req_abort);
@@ -1013,7 +956,7 @@ out_error_free:
1013 zfcp_fsf_req_free(req); 956 zfcp_fsf_req_free(req);
1014 req = NULL; 957 req = NULL;
1015out: 958out:
1016 spin_unlock(&adapter->req_q.lock); 959 spin_unlock(&adapter->req_q_lock);
1017 return req; 960 return req;
1018} 961}
1019 962
@@ -1021,7 +964,6 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1021{ 964{
1022 struct zfcp_adapter *adapter = req->adapter; 965 struct zfcp_adapter *adapter = req->adapter;
1023 struct zfcp_send_ct *send_ct = req->data; 966 struct zfcp_send_ct *send_ct = req->data;
1024 struct zfcp_port *port = send_ct->port;
1025 struct fsf_qtcb_header *header = &req->qtcb->header; 967 struct fsf_qtcb_header *header = &req->qtcb->header;
1026 968
1027 send_ct->status = -EINVAL; 969 send_ct->status = -EINVAL;
@@ -1040,17 +982,14 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1040 case FSF_ADAPTER_STATUS_AVAILABLE: 982 case FSF_ADAPTER_STATUS_AVAILABLE:
1041 switch (header->fsf_status_qual.word[0]){ 983 switch (header->fsf_status_qual.word[0]){
1042 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 984 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1043 zfcp_test_link(port);
1044 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 985 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1045 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 986 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1046 break; 987 break;
1047 } 988 }
1048 break; 989 break;
1049 case FSF_ACCESS_DENIED: 990 case FSF_ACCESS_DENIED:
1050 zfcp_fsf_access_denied_port(req, port);
1051 break; 991 break;
1052 case FSF_PORT_BOXED: 992 case FSF_PORT_BOXED:
1053 zfcp_erp_port_boxed(port, 49, req);
1054 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 993 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1055 ZFCP_STATUS_FSFREQ_RETRY; 994 ZFCP_STATUS_FSFREQ_RETRY;
1056 break; 995 break;
@@ -1101,12 +1040,12 @@ static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
1101int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, 1040int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1102 struct zfcp_erp_action *erp_action) 1041 struct zfcp_erp_action *erp_action)
1103{ 1042{
1104 struct zfcp_port *port = ct->port; 1043 struct zfcp_wka_port *wka_port = ct->wka_port;
1105 struct zfcp_adapter *adapter = port->adapter; 1044 struct zfcp_adapter *adapter = wka_port->adapter;
1106 struct zfcp_fsf_req *req; 1045 struct zfcp_fsf_req *req;
1107 int ret = -EIO; 1046 int ret = -EIO;
1108 1047
1109 spin_lock_bh(&adapter->req_q.lock); 1048 spin_lock_bh(&adapter->req_q_lock);
1110 if (zfcp_fsf_req_sbal_get(adapter)) 1049 if (zfcp_fsf_req_sbal_get(adapter))
1111 goto out; 1050 goto out;
1112 1051
@@ -1123,7 +1062,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1123 goto failed_send; 1062 goto failed_send;
1124 1063
1125 req->handler = zfcp_fsf_send_ct_handler; 1064 req->handler = zfcp_fsf_send_ct_handler;
1126 req->qtcb->header.port_handle = port->handle; 1065 req->qtcb->header.port_handle = wka_port->handle;
1127 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1066 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1128 req->qtcb->bottom.support.timeout = ct->timeout; 1067 req->qtcb->bottom.support.timeout = ct->timeout;
1129 req->data = ct; 1068 req->data = ct;
@@ -1148,7 +1087,7 @@ failed_send:
1148 if (erp_action) 1087 if (erp_action)
1149 erp_action->fsf_req = NULL; 1088 erp_action->fsf_req = NULL;
1150out: 1089out:
1151 spin_unlock_bh(&adapter->req_q.lock); 1090 spin_unlock_bh(&adapter->req_q_lock);
1152 return ret; 1091 return ret;
1153} 1092}
1154 1093
@@ -1218,8 +1157,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1218 ZFCP_STATUS_COMMON_UNBLOCKED))) 1157 ZFCP_STATUS_COMMON_UNBLOCKED)))
1219 return -EBUSY; 1158 return -EBUSY;
1220 1159
1221 spin_lock(&adapter->req_q.lock); 1160 spin_lock(&adapter->req_q_lock);
1222 if (!atomic_read(&adapter->req_q.count)) 1161 if (!zfcp_fsf_sbal_available(adapter))
1223 goto out; 1162 goto out;
1224 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, 1163 req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
1225 ZFCP_REQ_AUTO_CLEANUP, NULL); 1164 ZFCP_REQ_AUTO_CLEANUP, NULL);
@@ -1228,8 +1167,8 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1228 goto out; 1167 goto out;
1229 } 1168 }
1230 1169
1231 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 1170 ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
1232 FSF_MAX_SBALS_PER_ELS_REQ); 1171
1233 if (ret) 1172 if (ret)
1234 goto failed_send; 1173 goto failed_send;
1235 1174
@@ -1252,19 +1191,19 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1252failed_send: 1191failed_send:
1253 zfcp_fsf_req_free(req); 1192 zfcp_fsf_req_free(req);
1254out: 1193out:
1255 spin_unlock(&adapter->req_q.lock); 1194 spin_unlock(&adapter->req_q_lock);
1256 return ret; 1195 return ret;
1257} 1196}
1258 1197
1259int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1198int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1260{ 1199{
1261 volatile struct qdio_buffer_element *sbale; 1200 struct qdio_buffer_element *sbale;
1262 struct zfcp_fsf_req *req; 1201 struct zfcp_fsf_req *req;
1263 struct zfcp_adapter *adapter = erp_action->adapter; 1202 struct zfcp_adapter *adapter = erp_action->adapter;
1264 int retval = -EIO; 1203 int retval = -EIO;
1265 1204
1266 spin_lock_bh(&adapter->req_q.lock); 1205 spin_lock_bh(&adapter->req_q_lock);
1267 if (!atomic_read(&adapter->req_q.count)) 1206 if (!zfcp_fsf_sbal_available(adapter))
1268 goto out; 1207 goto out;
1269 req = zfcp_fsf_req_create(adapter, 1208 req = zfcp_fsf_req_create(adapter,
1270 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1209 FSF_QTCB_EXCHANGE_CONFIG_DATA,
@@ -1295,18 +1234,18 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1295 erp_action->fsf_req = NULL; 1234 erp_action->fsf_req = NULL;
1296 } 1235 }
1297out: 1236out:
1298 spin_unlock_bh(&adapter->req_q.lock); 1237 spin_unlock_bh(&adapter->req_q_lock);
1299 return retval; 1238 return retval;
1300} 1239}
1301 1240
1302int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, 1241int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1303 struct fsf_qtcb_bottom_config *data) 1242 struct fsf_qtcb_bottom_config *data)
1304{ 1243{
1305 volatile struct qdio_buffer_element *sbale; 1244 struct qdio_buffer_element *sbale;
1306 struct zfcp_fsf_req *req = NULL; 1245 struct zfcp_fsf_req *req = NULL;
1307 int retval = -EIO; 1246 int retval = -EIO;
1308 1247
1309 spin_lock_bh(&adapter->req_q.lock); 1248 spin_lock_bh(&adapter->req_q_lock);
1310 if (zfcp_fsf_req_sbal_get(adapter)) 1249 if (zfcp_fsf_req_sbal_get(adapter))
1311 goto out; 1250 goto out;
1312 1251
@@ -1334,7 +1273,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
1334 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1273 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1335 retval = zfcp_fsf_req_send(req); 1274 retval = zfcp_fsf_req_send(req);
1336out: 1275out:
1337 spin_unlock_bh(&adapter->req_q.lock); 1276 spin_unlock_bh(&adapter->req_q_lock);
1338 if (!retval) 1277 if (!retval)
1339 wait_event(req->completion_wq, 1278 wait_event(req->completion_wq,
1340 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1279 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1351,7 +1290,7 @@ out:
1351 */ 1290 */
1352int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1291int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1353{ 1292{
1354 volatile struct qdio_buffer_element *sbale; 1293 struct qdio_buffer_element *sbale;
1355 struct zfcp_fsf_req *req; 1294 struct zfcp_fsf_req *req;
1356 struct zfcp_adapter *adapter = erp_action->adapter; 1295 struct zfcp_adapter *adapter = erp_action->adapter;
1357 int retval = -EIO; 1296 int retval = -EIO;
@@ -1359,8 +1298,8 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1359 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1298 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1360 return -EOPNOTSUPP; 1299 return -EOPNOTSUPP;
1361 1300
1362 spin_lock_bh(&adapter->req_q.lock); 1301 spin_lock_bh(&adapter->req_q_lock);
1363 if (!atomic_read(&adapter->req_q.count)) 1302 if (!zfcp_fsf_sbal_available(adapter))
1364 goto out; 1303 goto out;
1365 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 1304 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
1366 ZFCP_REQ_AUTO_CLEANUP, 1305 ZFCP_REQ_AUTO_CLEANUP,
@@ -1385,7 +1324,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1385 erp_action->fsf_req = NULL; 1324 erp_action->fsf_req = NULL;
1386 } 1325 }
1387out: 1326out:
1388 spin_unlock_bh(&adapter->req_q.lock); 1327 spin_unlock_bh(&adapter->req_q_lock);
1389 return retval; 1328 return retval;
1390} 1329}
1391 1330
@@ -1398,15 +1337,15 @@ out:
1398int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, 1337int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1399 struct fsf_qtcb_bottom_port *data) 1338 struct fsf_qtcb_bottom_port *data)
1400{ 1339{
1401 volatile struct qdio_buffer_element *sbale; 1340 struct qdio_buffer_element *sbale;
1402 struct zfcp_fsf_req *req = NULL; 1341 struct zfcp_fsf_req *req = NULL;
1403 int retval = -EIO; 1342 int retval = -EIO;
1404 1343
1405 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1344 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1406 return -EOPNOTSUPP; 1345 return -EOPNOTSUPP;
1407 1346
1408 spin_lock_bh(&adapter->req_q.lock); 1347 spin_lock_bh(&adapter->req_q_lock);
1409 if (!atomic_read(&adapter->req_q.count)) 1348 if (!zfcp_fsf_sbal_available(adapter))
1410 goto out; 1349 goto out;
1411 1350
1412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, 1351 req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
@@ -1427,7 +1366,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
1427 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1366 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1428 retval = zfcp_fsf_req_send(req); 1367 retval = zfcp_fsf_req_send(req);
1429out: 1368out:
1430 spin_unlock_bh(&adapter->req_q.lock); 1369 spin_unlock_bh(&adapter->req_q_lock);
1431 if (!retval) 1370 if (!retval)
1432 wait_event(req->completion_wq, 1371 wait_event(req->completion_wq,
1433 req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 1372 req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
@@ -1443,7 +1382,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1443 struct fsf_plogi *plogi; 1382 struct fsf_plogi *plogi;
1444 1383
1445 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1384 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1446 goto skip_fsfstatus; 1385 return;
1447 1386
1448 switch (header->fsf_status) { 1387 switch (header->fsf_status) {
1449 case FSF_PORT_ALREADY_OPEN: 1388 case FSF_PORT_ALREADY_OPEN:
@@ -1453,9 +1392,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1453 break; 1392 break;
1454 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1393 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1455 dev_warn(&req->adapter->ccw_device->dev, 1394 dev_warn(&req->adapter->ccw_device->dev,
1456 "The adapter is out of resources. The remote port " 1395 "Not enough FCP adapter resources to open "
1457 "0x%016Lx could not be opened, disabling it.\n", 1396 "remote port 0x%016Lx\n",
1458 port->wwpn); 1397 (unsigned long long)port->wwpn);
1459 zfcp_erp_port_failed(port, 31, req); 1398 zfcp_erp_port_failed(port, 31, req);
1460 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1399 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1461 break; 1400 break;
@@ -1467,8 +1406,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1467 break; 1406 break;
1468 case FSF_SQ_NO_RETRY_POSSIBLE: 1407 case FSF_SQ_NO_RETRY_POSSIBLE:
1469 dev_warn(&req->adapter->ccw_device->dev, 1408 dev_warn(&req->adapter->ccw_device->dev,
1470 "The remote port 0x%016Lx could not be " 1409 "Remote port 0x%016Lx could not be opened\n",
1471 "opened. Disabling it.\n", port->wwpn); 1410 (unsigned long long)port->wwpn);
1472 zfcp_erp_port_failed(port, 32, req); 1411 zfcp_erp_port_failed(port, 32, req);
1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1412 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1474 break; 1413 break;
@@ -1496,9 +1435,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1496 * another GID_PN straight after a port has been opened. 1435 * another GID_PN straight after a port has been opened.
1497 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1436 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1498 */ 1437 */
1499 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_NO_WWPN)
1500 break;
1501
1502 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1438 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1503 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) { 1439 if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
1504 if (plogi->serv_param.wwpn != port->wwpn) 1440 if (plogi->serv_param.wwpn != port->wwpn)
@@ -1514,9 +1450,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1514 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1450 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1515 break; 1451 break;
1516 } 1452 }
1517
1518skip_fsfstatus:
1519 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &port->status);
1520} 1453}
1521 1454
1522/** 1455/**
@@ -1526,12 +1459,12 @@ skip_fsfstatus:
1526 */ 1459 */
1527int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1460int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1528{ 1461{
1529 volatile struct qdio_buffer_element *sbale; 1462 struct qdio_buffer_element *sbale;
1530 struct zfcp_adapter *adapter = erp_action->adapter; 1463 struct zfcp_adapter *adapter = erp_action->adapter;
1531 struct zfcp_fsf_req *req; 1464 struct zfcp_fsf_req *req;
1532 int retval = -EIO; 1465 int retval = -EIO;
1533 1466
1534 spin_lock_bh(&adapter->req_q.lock); 1467 spin_lock_bh(&adapter->req_q_lock);
1535 if (zfcp_fsf_req_sbal_get(adapter)) 1468 if (zfcp_fsf_req_sbal_get(adapter))
1536 goto out; 1469 goto out;
1537 1470
@@ -1553,7 +1486,6 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1553 req->data = erp_action->port; 1486 req->data = erp_action->port;
1554 req->erp_action = erp_action; 1487 req->erp_action = erp_action;
1555 erp_action->fsf_req = req; 1488 erp_action->fsf_req = req;
1556 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
1557 1489
1558 zfcp_fsf_start_erp_timer(req); 1490 zfcp_fsf_start_erp_timer(req);
1559 retval = zfcp_fsf_req_send(req); 1491 retval = zfcp_fsf_req_send(req);
@@ -1562,7 +1494,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1562 erp_action->fsf_req = NULL; 1494 erp_action->fsf_req = NULL;
1563 } 1495 }
1564out: 1496out:
1565 spin_unlock_bh(&adapter->req_q.lock); 1497 spin_unlock_bh(&adapter->req_q_lock);
1566 return retval; 1498 return retval;
1567} 1499}
1568 1500
@@ -1571,7 +1503,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1571 struct zfcp_port *port = req->data; 1503 struct zfcp_port *port = req->data;
1572 1504
1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1505 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1574 goto skip_fsfstatus; 1506 return;
1575 1507
1576 switch (req->qtcb->header.fsf_status) { 1508 switch (req->qtcb->header.fsf_status) {
1577 case FSF_PORT_HANDLE_NOT_VALID: 1509 case FSF_PORT_HANDLE_NOT_VALID:
@@ -1586,9 +1518,6 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1586 ZFCP_CLEAR); 1518 ZFCP_CLEAR);
1587 break; 1519 break;
1588 } 1520 }
1589
1590skip_fsfstatus:
1591 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &port->status);
1592} 1521}
1593 1522
1594/** 1523/**
@@ -1598,12 +1527,12 @@ skip_fsfstatus:
1598 */ 1527 */
1599int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1528int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1600{ 1529{
1601 volatile struct qdio_buffer_element *sbale; 1530 struct qdio_buffer_element *sbale;
1602 struct zfcp_adapter *adapter = erp_action->adapter; 1531 struct zfcp_adapter *adapter = erp_action->adapter;
1603 struct zfcp_fsf_req *req; 1532 struct zfcp_fsf_req *req;
1604 int retval = -EIO; 1533 int retval = -EIO;
1605 1534
1606 spin_lock_bh(&adapter->req_q.lock); 1535 spin_lock_bh(&adapter->req_q_lock);
1607 if (zfcp_fsf_req_sbal_get(adapter)) 1536 if (zfcp_fsf_req_sbal_get(adapter))
1608 goto out; 1537 goto out;
1609 1538
@@ -1624,7 +1553,6 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1624 req->erp_action = erp_action; 1553 req->erp_action = erp_action;
1625 req->qtcb->header.port_handle = erp_action->port->handle; 1554 req->qtcb->header.port_handle = erp_action->port->handle;
1626 erp_action->fsf_req = req; 1555 erp_action->fsf_req = req;
1627 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
1628 1556
1629 zfcp_fsf_start_erp_timer(req); 1557 zfcp_fsf_start_erp_timer(req);
1630 retval = zfcp_fsf_req_send(req); 1558 retval = zfcp_fsf_req_send(req);
@@ -1633,7 +1561,131 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1633 erp_action->fsf_req = NULL; 1561 erp_action->fsf_req = NULL;
1634 } 1562 }
1635out: 1563out:
1636 spin_unlock_bh(&adapter->req_q.lock); 1564 spin_unlock_bh(&adapter->req_q_lock);
1565 return retval;
1566}
1567
1568static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1569{
1570 struct zfcp_wka_port *wka_port = req->data;
1571 struct fsf_qtcb_header *header = &req->qtcb->header;
1572
1573 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1574 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1575 goto out;
1576 }
1577
1578 switch (header->fsf_status) {
1579 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1580 dev_warn(&req->adapter->ccw_device->dev,
1581 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1582 case FSF_ADAPTER_STATUS_AVAILABLE:
1583 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1584 case FSF_ACCESS_DENIED:
1585 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1586 break;
1587 case FSF_PORT_ALREADY_OPEN:
1588 case FSF_GOOD:
1589 wka_port->handle = header->port_handle;
1590 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1591 }
1592out:
1593 wake_up(&wka_port->completion_wq);
1594}
1595
1596/**
1597 * zfcp_fsf_open_wka_port - create and send open wka-port request
1598 * @wka_port: pointer to struct zfcp_wka_port
1599 * Returns: 0 on success, error otherwise
1600 */
1601int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1602{
1603 struct qdio_buffer_element *sbale;
1604 struct zfcp_adapter *adapter = wka_port->adapter;
1605 struct zfcp_fsf_req *req;
1606 int retval = -EIO;
1607
1608 spin_lock_bh(&adapter->req_q_lock);
1609 if (zfcp_fsf_req_sbal_get(adapter))
1610 goto out;
1611
1612 req = zfcp_fsf_req_create(adapter,
1613 FSF_QTCB_OPEN_PORT_WITH_DID,
1614 ZFCP_REQ_AUTO_CLEANUP,
1615 adapter->pool.fsf_req_erp);
1616 if (unlikely(IS_ERR(req))) {
1617 retval = PTR_ERR(req);
1618 goto out;
1619 }
1620
1621 sbale = zfcp_qdio_sbale_req(req);
1622 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1623 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1624
1625 req->handler = zfcp_fsf_open_wka_port_handler;
1626 req->qtcb->bottom.support.d_id = wka_port->d_id;
1627 req->data = wka_port;
1628
1629 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1630 retval = zfcp_fsf_req_send(req);
1631 if (retval)
1632 zfcp_fsf_req_free(req);
1633out:
1634 spin_unlock_bh(&adapter->req_q_lock);
1635 return retval;
1636}
1637
1638static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1639{
1640 struct zfcp_wka_port *wka_port = req->data;
1641
1642 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1643 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1644 zfcp_erp_adapter_reopen(wka_port->adapter, 0, 84, req);
1645 }
1646
1647 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1648 wake_up(&wka_port->completion_wq);
1649}
1650
1651/**
1652 * zfcp_fsf_close_wka_port - create and send close wka port request
1653 * @erp_action: pointer to struct zfcp_erp_action
1654 * Returns: 0 on success, error otherwise
1655 */
1656int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1657{
1658 struct qdio_buffer_element *sbale;
1659 struct zfcp_adapter *adapter = wka_port->adapter;
1660 struct zfcp_fsf_req *req;
1661 int retval = -EIO;
1662
1663 spin_lock_bh(&adapter->req_q_lock);
1664 if (zfcp_fsf_req_sbal_get(adapter))
1665 goto out;
1666
1667 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
1668 ZFCP_REQ_AUTO_CLEANUP,
1669 adapter->pool.fsf_req_erp);
1670 if (unlikely(IS_ERR(req))) {
1671 retval = PTR_ERR(req);
1672 goto out;
1673 }
1674
1675 sbale = zfcp_qdio_sbale_req(req);
1676 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1677 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1678
1679 req->handler = zfcp_fsf_close_wka_port_handler;
1680 req->data = wka_port;
1681 req->qtcb->header.port_handle = wka_port->handle;
1682
1683 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1684 retval = zfcp_fsf_req_send(req);
1685 if (retval)
1686 zfcp_fsf_req_free(req);
1687out:
1688 spin_unlock_bh(&adapter->req_q_lock);
1637 return retval; 1689 return retval;
1638} 1690}
1639 1691
@@ -1695,12 +1747,12 @@ skip_fsfstatus:
1695 */ 1747 */
1696int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1748int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1697{ 1749{
1698 volatile struct qdio_buffer_element *sbale; 1750 struct qdio_buffer_element *sbale;
1699 struct zfcp_adapter *adapter = erp_action->adapter; 1751 struct zfcp_adapter *adapter = erp_action->adapter;
1700 struct zfcp_fsf_req *req; 1752 struct zfcp_fsf_req *req;
1701 int retval = -EIO; 1753 int retval = -EIO;
1702 1754
1703 spin_lock_bh(&adapter->req_q.lock); 1755 spin_lock_bh(&adapter->req_q_lock);
1704 if (zfcp_fsf_req_sbal_get(adapter)) 1756 if (zfcp_fsf_req_sbal_get(adapter))
1705 goto out; 1757 goto out;
1706 1758
@@ -1731,7 +1783,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1731 erp_action->fsf_req = NULL; 1783 erp_action->fsf_req = NULL;
1732 } 1784 }
1733out: 1785out:
1734 spin_unlock_bh(&adapter->req_q.lock); 1786 spin_unlock_bh(&adapter->req_q_lock);
1735 return retval; 1787 return retval;
1736} 1788}
1737 1789
@@ -1746,7 +1798,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1746 int exclusive, readwrite; 1798 int exclusive, readwrite;
1747 1799
1748 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1800 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1749 goto skip_fsfstatus; 1801 return;
1750 1802
1751 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1803 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1752 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1804 ZFCP_STATUS_COMMON_ACCESS_BOXED |
@@ -1774,14 +1826,12 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1774 case FSF_LUN_SHARING_VIOLATION: 1826 case FSF_LUN_SHARING_VIOLATION:
1775 if (header->fsf_status_qual.word[0]) 1827 if (header->fsf_status_qual.word[0])
1776 dev_warn(&adapter->ccw_device->dev, 1828 dev_warn(&adapter->ccw_device->dev,
1777 "FCP-LUN 0x%Lx at the remote port " 1829 "LUN 0x%Lx on port 0x%Lx is already in "
1778 "with WWPN 0x%Lx " 1830 "use by CSS%d, MIF Image ID %x\n",
1779 "connected to the adapter " 1831 (unsigned long long)unit->fcp_lun,
1780 "is already in use in LPAR%d, CSS%d.\n", 1832 (unsigned long long)unit->port->wwpn,
1781 unit->fcp_lun, 1833 queue_designator->cssid,
1782 unit->port->wwpn, 1834 queue_designator->hla);
1783 queue_designator->hla,
1784 queue_designator->cssid);
1785 else 1835 else
1786 zfcp_act_eval_err(adapter, 1836 zfcp_act_eval_err(adapter,
1787 header->fsf_status_qual.word[2]); 1837 header->fsf_status_qual.word[2]);
@@ -1792,9 +1842,10 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1792 break; 1842 break;
1793 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1843 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1794 dev_warn(&adapter->ccw_device->dev, 1844 dev_warn(&adapter->ccw_device->dev,
1795 "The adapter ran out of resources. There is no " 1845 "No handle is available for LUN "
1796 "handle available for unit 0x%016Lx on port 0x%016Lx.", 1846 "0x%016Lx on port 0x%016Lx\n",
1797 unit->fcp_lun, unit->port->wwpn); 1847 (unsigned long long)unit->fcp_lun,
1848 (unsigned long long)unit->port->wwpn);
1798 zfcp_erp_unit_failed(unit, 34, req); 1849 zfcp_erp_unit_failed(unit, 34, req);
1799 /* fall through */ 1850 /* fall through */
1800 case FSF_INVALID_COMMAND_OPTION: 1851 case FSF_INVALID_COMMAND_OPTION:
@@ -1831,26 +1882,29 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1831 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, 1882 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1832 &unit->status); 1883 &unit->status);
1833 dev_info(&adapter->ccw_device->dev, 1884 dev_info(&adapter->ccw_device->dev,
1834 "Read-only access for unit 0x%016Lx " 1885 "SCSI device at LUN 0x%016Lx on port "
1835 "on port 0x%016Lx.\n", 1886 "0x%016Lx opened read-only\n",
1836 unit->fcp_lun, unit->port->wwpn); 1887 (unsigned long long)unit->fcp_lun,
1888 (unsigned long long)unit->port->wwpn);
1837 } 1889 }
1838 1890
1839 if (exclusive && !readwrite) { 1891 if (exclusive && !readwrite) {
1840 dev_err(&adapter->ccw_device->dev, 1892 dev_err(&adapter->ccw_device->dev,
1841 "Exclusive access of read-only unit " 1893 "Exclusive read-only access not "
1842 "0x%016Lx on port 0x%016Lx not " 1894 "supported (unit 0x%016Lx, "
1843 "supported, disabling unit.\n", 1895 "port 0x%016Lx)\n",
1844 unit->fcp_lun, unit->port->wwpn); 1896 (unsigned long long)unit->fcp_lun,
1897 (unsigned long long)unit->port->wwpn);
1845 zfcp_erp_unit_failed(unit, 35, req); 1898 zfcp_erp_unit_failed(unit, 35, req);
1846 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1899 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1847 zfcp_erp_unit_shutdown(unit, 0, 80, req); 1900 zfcp_erp_unit_shutdown(unit, 0, 80, req);
1848 } else if (!exclusive && readwrite) { 1901 } else if (!exclusive && readwrite) {
1849 dev_err(&adapter->ccw_device->dev, 1902 dev_err(&adapter->ccw_device->dev,
1850 "Shared access of read-write unit " 1903 "Shared read-write access not "
1851 "0x%016Lx on port 0x%016Lx not " 1904 "supported (unit 0x%016Lx, port "
1852 "supported, disabling unit.\n", 1905 "0x%016Lx\n)",
1853 unit->fcp_lun, unit->port->wwpn); 1906 (unsigned long long)unit->fcp_lun,
1907 (unsigned long long)unit->port->wwpn);
1854 zfcp_erp_unit_failed(unit, 36, req); 1908 zfcp_erp_unit_failed(unit, 36, req);
1855 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1909 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1856 zfcp_erp_unit_shutdown(unit, 0, 81, req); 1910 zfcp_erp_unit_shutdown(unit, 0, 81, req);
@@ -1858,9 +1912,6 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1858 } 1912 }
1859 break; 1913 break;
1860 } 1914 }
1861
1862skip_fsfstatus:
1863 atomic_clear_mask(ZFCP_STATUS_COMMON_OPENING, &unit->status);
1864} 1915}
1865 1916
1866/** 1917/**
@@ -1870,12 +1921,12 @@ skip_fsfstatus:
1870 */ 1921 */
1871int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 1922int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1872{ 1923{
1873 volatile struct qdio_buffer_element *sbale; 1924 struct qdio_buffer_element *sbale;
1874 struct zfcp_adapter *adapter = erp_action->adapter; 1925 struct zfcp_adapter *adapter = erp_action->adapter;
1875 struct zfcp_fsf_req *req; 1926 struct zfcp_fsf_req *req;
1876 int retval = -EIO; 1927 int retval = -EIO;
1877 1928
1878 spin_lock_bh(&adapter->req_q.lock); 1929 spin_lock_bh(&adapter->req_q_lock);
1879 if (zfcp_fsf_req_sbal_get(adapter)) 1930 if (zfcp_fsf_req_sbal_get(adapter))
1880 goto out; 1931 goto out;
1881 1932
@@ -1901,8 +1952,6 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1901 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1952 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1902 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1953 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1903 1954
1904 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
1905
1906 zfcp_fsf_start_erp_timer(req); 1955 zfcp_fsf_start_erp_timer(req);
1907 retval = zfcp_fsf_req_send(req); 1956 retval = zfcp_fsf_req_send(req);
1908 if (retval) { 1957 if (retval) {
@@ -1910,7 +1959,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1910 erp_action->fsf_req = NULL; 1959 erp_action->fsf_req = NULL;
1911 } 1960 }
1912out: 1961out:
1913 spin_unlock_bh(&adapter->req_q.lock); 1962 spin_unlock_bh(&adapter->req_q_lock);
1914 return retval; 1963 return retval;
1915} 1964}
1916 1965
@@ -1919,7 +1968,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1919 struct zfcp_unit *unit = req->data; 1968 struct zfcp_unit *unit = req->data;
1920 1969
1921 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1970 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1922 goto skip_fsfstatus; 1971 return;
1923 1972
1924 switch (req->qtcb->header.fsf_status) { 1973 switch (req->qtcb->header.fsf_status) {
1925 case FSF_PORT_HANDLE_NOT_VALID: 1974 case FSF_PORT_HANDLE_NOT_VALID:
@@ -1949,8 +1998,6 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
1949 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 1998 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1950 break; 1999 break;
1951 } 2000 }
1952skip_fsfstatus:
1953 atomic_clear_mask(ZFCP_STATUS_COMMON_CLOSING, &unit->status);
1954} 2001}
1955 2002
1956/** 2003/**
@@ -1960,12 +2007,12 @@ skip_fsfstatus:
1960 */ 2007 */
1961int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 2008int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1962{ 2009{
1963 volatile struct qdio_buffer_element *sbale; 2010 struct qdio_buffer_element *sbale;
1964 struct zfcp_adapter *adapter = erp_action->adapter; 2011 struct zfcp_adapter *adapter = erp_action->adapter;
1965 struct zfcp_fsf_req *req; 2012 struct zfcp_fsf_req *req;
1966 int retval = -EIO; 2013 int retval = -EIO;
1967 2014
1968 spin_lock_bh(&adapter->req_q.lock); 2015 spin_lock_bh(&adapter->req_q_lock);
1969 if (zfcp_fsf_req_sbal_get(adapter)) 2016 if (zfcp_fsf_req_sbal_get(adapter))
1970 goto out; 2017 goto out;
1971 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, 2018 req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
@@ -1986,7 +2033,6 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1986 req->data = erp_action->unit; 2033 req->data = erp_action->unit;
1987 req->erp_action = erp_action; 2034 req->erp_action = erp_action;
1988 erp_action->fsf_req = req; 2035 erp_action->fsf_req = req;
1989 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
1990 2036
1991 zfcp_fsf_start_erp_timer(req); 2037 zfcp_fsf_start_erp_timer(req);
1992 retval = zfcp_fsf_req_send(req); 2038 retval = zfcp_fsf_req_send(req);
@@ -1995,7 +2041,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
1995 erp_action->fsf_req = NULL; 2041 erp_action->fsf_req = NULL;
1996 } 2042 }
1997out: 2043out:
1998 spin_unlock_bh(&adapter->req_q.lock); 2044 spin_unlock_bh(&adapter->req_q_lock);
1999 return retval; 2045 return retval;
2000} 2046}
2001 2047
@@ -2156,21 +2202,21 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2156 break; 2202 break;
2157 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2203 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2158 dev_err(&req->adapter->ccw_device->dev, 2204 dev_err(&req->adapter->ccw_device->dev,
2159 "Invalid data direction (%d) given for unit " 2205 "Incorrect direction %d, unit 0x%016Lx on port "
2160 "0x%016Lx on port 0x%016Lx, shutting down " 2206 "0x%016Lx closed\n",
2161 "adapter.\n",
2162 req->qtcb->bottom.io.data_direction, 2207 req->qtcb->bottom.io.data_direction,
2163 unit->fcp_lun, unit->port->wwpn); 2208 (unsigned long long)unit->fcp_lun,
2209 (unsigned long long)unit->port->wwpn);
2164 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req); 2210 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 133, req);
2165 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2211 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2166 break; 2212 break;
2167 case FSF_CMND_LENGTH_NOT_VALID: 2213 case FSF_CMND_LENGTH_NOT_VALID:
2168 dev_err(&req->adapter->ccw_device->dev, 2214 dev_err(&req->adapter->ccw_device->dev,
2169 "An invalid control-data-block length field (%d) " 2215 "Incorrect CDB length %d, unit 0x%016Lx on "
2170 "was found in a command for unit 0x%016Lx on port " 2216 "port 0x%016Lx closed\n",
2171 "0x%016Lx. Shutting down adapter.\n",
2172 req->qtcb->bottom.io.fcp_cmnd_length, 2217 req->qtcb->bottom.io.fcp_cmnd_length,
2173 unit->fcp_lun, unit->port->wwpn); 2218 (unsigned long long)unit->fcp_lun,
2219 (unsigned long long)unit->port->wwpn);
2174 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req); 2220 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, 134, req);
2175 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2221 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2176 break; 2222 break;
@@ -2201,6 +2247,20 @@ skip_fsfstatus:
2201 } 2247 }
2202} 2248}
2203 2249
2250static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2251{
2252 u32 *fcp_dl_ptr;
2253
2254 /*
2255 * fcp_dl_addr = start address of fcp_cmnd structure +
2256 * size of fixed part + size of dynamically sized add_dcp_cdb field
2257 * SEE FCP-2 documentation
2258 */
2259 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2260 (fcp_cmd->add_fcp_cdb_length << 2));
2261 *fcp_dl_ptr = fcp_dl;
2262}
2263
2204/** 2264/**
2205 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2265 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2206 * @adapter: adapter where scsi command is issued 2266 * @adapter: adapter where scsi command is issued
@@ -2223,8 +2283,8 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2223 ZFCP_STATUS_COMMON_UNBLOCKED))) 2283 ZFCP_STATUS_COMMON_UNBLOCKED)))
2224 return -EBUSY; 2284 return -EBUSY;
2225 2285
2226 spin_lock(&adapter->req_q.lock); 2286 spin_lock(&adapter->req_q_lock);
2227 if (!atomic_read(&adapter->req_q.count)) 2287 if (!zfcp_fsf_sbal_available(adapter))
2228 goto out; 2288 goto out;
2229 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2289 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2230 adapter->pool.fsf_req_scsi); 2290 adapter->pool.fsf_req_scsi);
@@ -2286,7 +2346,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2286 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 2346 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2287 2347
2288 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2348 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2289 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(fcp_dl_t); 2349 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2290 2350
2291 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, 2351 real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
2292 scsi_sglist(scsi_cmnd), 2352 scsi_sglist(scsi_cmnd),
@@ -2296,10 +2356,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
2296 retval = -EIO; 2356 retval = -EIO;
2297 else { 2357 else {
2298 dev_err(&adapter->ccw_device->dev, 2358 dev_err(&adapter->ccw_device->dev,
2299 "SCSI request too large. " 2359 "Oversize data package, unit 0x%016Lx "
2300 "Shutting down unit 0x%016Lx on port " 2360 "on port 0x%016Lx closed\n",
2301 "0x%016Lx.\n", unit->fcp_lun, 2361 (unsigned long long)unit->fcp_lun,
2302 unit->port->wwpn); 2362 (unsigned long long)unit->port->wwpn);
2303 zfcp_erp_unit_shutdown(unit, 0, 131, req); 2363 zfcp_erp_unit_shutdown(unit, 0, 131, req);
2304 retval = -EINVAL; 2364 retval = -EINVAL;
2305 } 2365 }
@@ -2322,7 +2382,7 @@ failed_scsi_cmnd:
2322 zfcp_fsf_req_free(req); 2382 zfcp_fsf_req_free(req);
2323 scsi_cmnd->host_scribble = NULL; 2383 scsi_cmnd->host_scribble = NULL;
2324out: 2384out:
2325 spin_unlock(&adapter->req_q.lock); 2385 spin_unlock(&adapter->req_q_lock);
2326 return retval; 2386 return retval;
2327} 2387}
2328 2388
@@ -2338,7 +2398,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2338 struct zfcp_unit *unit, 2398 struct zfcp_unit *unit,
2339 u8 tm_flags, int req_flags) 2399 u8 tm_flags, int req_flags)
2340{ 2400{
2341 volatile struct qdio_buffer_element *sbale; 2401 struct qdio_buffer_element *sbale;
2342 struct zfcp_fsf_req *req = NULL; 2402 struct zfcp_fsf_req *req = NULL;
2343 struct fcp_cmnd_iu *fcp_cmnd_iu; 2403 struct fcp_cmnd_iu *fcp_cmnd_iu;
2344 2404
@@ -2346,8 +2406,8 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2346 ZFCP_STATUS_COMMON_UNBLOCKED))) 2406 ZFCP_STATUS_COMMON_UNBLOCKED)))
2347 return NULL; 2407 return NULL;
2348 2408
2349 spin_lock(&adapter->req_q.lock); 2409 spin_lock(&adapter->req_q_lock);
2350 if (!atomic_read(&adapter->req_q.count)) 2410 if (!zfcp_fsf_sbal_available(adapter))
2351 goto out; 2411 goto out;
2352 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags, 2412 req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, req_flags,
2353 adapter->pool.fsf_req_scsi); 2413 adapter->pool.fsf_req_scsi);
@@ -2362,7 +2422,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2362 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2422 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2363 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2423 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2364 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2424 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2365 sizeof(fcp_dl_t); 2425 sizeof(u32);
2366 2426
2367 sbale = zfcp_qdio_sbale_req(req); 2427 sbale = zfcp_qdio_sbale_req(req);
2368 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2428 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
@@ -2379,7 +2439,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_adapter *adapter,
2379 zfcp_fsf_req_free(req); 2439 zfcp_fsf_req_free(req);
2380 req = NULL; 2440 req = NULL;
2381out: 2441out:
2382 spin_unlock(&adapter->req_q.lock); 2442 spin_unlock(&adapter->req_q_lock);
2383 return req; 2443 return req;
2384} 2444}
2385 2445
@@ -2398,7 +2458,7 @@ static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2398struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, 2458struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2399 struct zfcp_fsf_cfdc *fsf_cfdc) 2459 struct zfcp_fsf_cfdc *fsf_cfdc)
2400{ 2460{
2401 volatile struct qdio_buffer_element *sbale; 2461 struct qdio_buffer_element *sbale;
2402 struct zfcp_fsf_req *req = NULL; 2462 struct zfcp_fsf_req *req = NULL;
2403 struct fsf_qtcb_bottom_support *bottom; 2463 struct fsf_qtcb_bottom_support *bottom;
2404 int direction, retval = -EIO, bytes; 2464 int direction, retval = -EIO, bytes;
@@ -2417,7 +2477,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2417 return ERR_PTR(-EINVAL); 2477 return ERR_PTR(-EINVAL);
2418 } 2478 }
2419 2479
2420 spin_lock_bh(&adapter->req_q.lock); 2480 spin_lock_bh(&adapter->req_q_lock);
2421 if (zfcp_fsf_req_sbal_get(adapter)) 2481 if (zfcp_fsf_req_sbal_get(adapter))
2422 goto out; 2482 goto out;
2423 2483
@@ -2447,7 +2507,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2447 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2507 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2448 retval = zfcp_fsf_req_send(req); 2508 retval = zfcp_fsf_req_send(req);
2449out: 2509out:
2450 spin_unlock_bh(&adapter->req_q.lock); 2510 spin_unlock_bh(&adapter->req_q_lock);
2451 2511
2452 if (!retval) { 2512 if (!retval) {
2453 wait_event(req->completion_wq, 2513 wait_event(req->completion_wq,
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index bf94b4da0763..fd3a88777ac8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -71,13 +71,6 @@
71#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041 71#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
72#define FSF_ELS_COMMAND_REJECTED 0x00000050 72#define FSF_ELS_COMMAND_REJECTED 0x00000050
73#define FSF_GENERIC_COMMAND_REJECTED 0x00000051 73#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
74#define FSF_OPERATION_PARTIALLY_SUCCESSFUL 0x00000052
75#define FSF_AUTHORIZATION_FAILURE 0x00000053
76#define FSF_CFDC_ERROR_DETECTED 0x00000054
77#define FSF_CONTROL_FILE_UPDATE_ERROR 0x00000055
78#define FSF_CONTROL_FILE_TOO_LARGE 0x00000056
79#define FSF_ACCESS_CONFLICT_DETECTED 0x00000057
80#define FSF_CONFLICTS_OVERRULED 0x00000058
81#define FSF_PORT_BOXED 0x00000059 74#define FSF_PORT_BOXED 0x00000059
82#define FSF_LUN_BOXED 0x0000005A 75#define FSF_LUN_BOXED 0x0000005A
83#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B 76#define FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE 0x0000005B
@@ -85,9 +78,7 @@
85#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061 78#define FSF_REQUEST_SIZE_TOO_LARGE 0x00000061
86#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062 79#define FSF_RESPONSE_SIZE_TOO_LARGE 0x00000062
87#define FSF_SBAL_MISMATCH 0x00000063 80#define FSF_SBAL_MISMATCH 0x00000063
88#define FSF_OPEN_PORT_WITHOUT_PRLI 0x00000064
89#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD 81#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
90#define FSF_FCP_RSP_AVAILABLE 0x000000AF
91#define FSF_UNKNOWN_COMMAND 0x000000E2 82#define FSF_UNKNOWN_COMMAND 0x000000E2
92#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 83#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
93#define FSF_INVALID_COMMAND_OPTION 0x000000E5 84#define FSF_INVALID_COMMAND_OPTION 0x000000E5
@@ -102,20 +93,9 @@
102#define FSF_SQ_RETRY_IF_POSSIBLE 0x02 93#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
103#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03 94#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
104#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04 95#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
105#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
106#define FSF_SQ_COMMAND_ABORTED 0x06 96#define FSF_SQ_COMMAND_ABORTED 0x06
107#define FSF_SQ_NO_RETRY_POSSIBLE 0x07 97#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
108 98
109/* FSF status qualifier for CFDC commands */
110#define FSF_SQ_CFDC_HARDENED_ON_SE 0x00000000
111#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE 0x00000001
112#define FSF_SQ_CFDC_COULD_NOT_HARDEN_ON_SE2 0x00000002
113/* CFDC subtable codes */
114#define FSF_SQ_CFDC_SUBTABLE_OS 0x0001
115#define FSF_SQ_CFDC_SUBTABLE_PORT_WWPN 0x0002
116#define FSF_SQ_CFDC_SUBTABLE_PORT_DID 0x0003
117#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
118
119/* FSF status qualifier (most significant 4 bytes), local link down */ 99/* FSF status qualifier (most significant 4 bytes), local link down */
120#define FSF_PSQ_LINK_NO_LIGHT 0x00000004 100#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
121#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008 101#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
@@ -145,7 +125,6 @@
145#define FSF_STATUS_READ_LINK_UP 0x00000006 125#define FSF_STATUS_READ_LINK_UP 0x00000006
146#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009 126#define FSF_STATUS_READ_NOTIFICATION_LOST 0x00000009
147#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A 127#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
148#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
149#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C 128#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
150 129
151/* status subtypes in status read buffer */ 130/* status subtypes in status read buffer */
@@ -159,20 +138,9 @@
159 138
160/* status subtypes for unsolicited status notification lost */ 139/* status subtypes for unsolicited status notification lost */
161#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001 140#define FSF_STATUS_READ_SUB_INCOMING_ELS 0x00000001
162#define FSF_STATUS_READ_SUB_SENSE_DATA 0x00000002
163#define FSF_STATUS_READ_SUB_LINK_STATUS 0x00000004
164#define FSF_STATUS_READ_SUB_PORT_CLOSED 0x00000008
165#define FSF_STATUS_READ_SUB_BIT_ERROR_THRESHOLD 0x00000010
166#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020 141#define FSF_STATUS_READ_SUB_ACT_UPDATED 0x00000020
167#define FSF_STATUS_READ_SUB_ACT_HARDENED 0x00000040
168#define FSF_STATUS_READ_SUB_FEATURE_UPDATE_ALERT 0x00000080
169
170/* status subtypes for CFDC */
171#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
172#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
173 142
174/* topologie that is detected by the adapter */ 143/* topologie that is detected by the adapter */
175#define FSF_TOPO_ERROR 0x00000000
176#define FSF_TOPO_P2P 0x00000001 144#define FSF_TOPO_P2P 0x00000001
177#define FSF_TOPO_FABRIC 0x00000002 145#define FSF_TOPO_FABRIC 0x00000002
178#define FSF_TOPO_AL 0x00000003 146#define FSF_TOPO_AL 0x00000003
@@ -180,17 +148,13 @@
180/* data direction for FCP commands */ 148/* data direction for FCP commands */
181#define FSF_DATADIR_WRITE 0x00000001 149#define FSF_DATADIR_WRITE 0x00000001
182#define FSF_DATADIR_READ 0x00000002 150#define FSF_DATADIR_READ 0x00000002
183#define FSF_DATADIR_READ_WRITE 0x00000003
184#define FSF_DATADIR_CMND 0x00000004 151#define FSF_DATADIR_CMND 0x00000004
185 152
186/* fc service class */ 153/* fc service class */
187#define FSF_CLASS_1 0x00000001
188#define FSF_CLASS_2 0x00000002
189#define FSF_CLASS_3 0x00000003 154#define FSF_CLASS_3 0x00000003
190 155
191/* SBAL chaining */ 156/* SBAL chaining */
192#define FSF_MAX_SBALS_PER_REQ 36 157#define FSF_MAX_SBALS_PER_REQ 36
193#define FSF_MAX_SBALS_PER_ELS_REQ 2
194 158
195/* logging space behind QTCB */ 159/* logging space behind QTCB */
196#define FSF_QTCB_LOG_SIZE 1024 160#define FSF_QTCB_LOG_SIZE 1024
@@ -200,50 +164,16 @@
200#define FSF_FEATURE_LUN_SHARING 0x00000004 164#define FSF_FEATURE_LUN_SHARING 0x00000004
201#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008 165#define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
202#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 166#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
203#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
204#define FSF_FEATURE_UPDATE_ALERT 0x00000100 167#define FSF_FEATURE_UPDATE_ALERT 0x00000100
205#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200 168#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
206 169
207/* host connection features */ 170/* host connection features */
208#define FSF_FEATURE_NPIV_MODE 0x00000001 171#define FSF_FEATURE_NPIV_MODE 0x00000001
209#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
210 172
211/* option */ 173/* option */
212#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001 174#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
213#define FSF_OPEN_LUN_REPLICATE_SENSE 0x00000002
214
215/* adapter types */
216#define FSF_ADAPTER_TYPE_FICON 0x00000001
217#define FSF_ADAPTER_TYPE_FICON_EXPRESS 0x00000002
218
219/* port types */
220#define FSF_HBA_PORTTYPE_UNKNOWN 0x00000001
221#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
222#define FSF_HBA_PORTTYPE_NPORT 0x00000005
223#define FSF_HBA_PORTTYPE_PTP 0x00000021
224/* following are not defined and used by FSF Spec
225 but are additionally defined by FC-HBA */
226#define FSF_HBA_PORTTYPE_OTHER 0x00000002
227#define FSF_HBA_PORTTYPE_NOTPRESENT 0x00000003
228#define FSF_HBA_PORTTYPE_NLPORT 0x00000006
229#define FSF_HBA_PORTTYPE_FLPORT 0x00000007
230#define FSF_HBA_PORTTYPE_FPORT 0x00000008
231#define FSF_HBA_PORTTYPE_LPORT 0x00000020
232
233/* port states */
234#define FSF_HBA_PORTSTATE_UNKNOWN 0x00000001
235#define FSF_HBA_PORTSTATE_ONLINE 0x00000002
236#define FSF_HBA_PORTSTATE_OFFLINE 0x00000003
237#define FSF_HBA_PORTSTATE_LINKDOWN 0x00000006
238#define FSF_HBA_PORTSTATE_ERROR 0x00000007
239
240/* IO states of adapter */
241#define FSF_IOSTAT_NPORT_RJT 0x00000004
242#define FSF_IOSTAT_FABRIC_RJT 0x00000005
243#define FSF_IOSTAT_LS_RJT 0x00000009
244 175
245/* open LUN access flags*/ 176/* open LUN access flags*/
246#define FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED 0x01000000
247#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000 177#define FSF_UNIT_ACCESS_EXCLUSIVE 0x02000000
248#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000 178#define FSF_UNIT_ACCESS_OUTBOUND_TRANSFER 0x10000000
249 179
@@ -265,11 +195,6 @@ struct fsf_queue_designator {
265 u32 res1; 195 u32 res1;
266} __attribute__ ((packed)); 196} __attribute__ ((packed));
267 197
268struct fsf_port_closed_payload {
269 struct fsf_queue_designator queue_designator;
270 u32 port_handle;
271} __attribute__ ((packed));
272
273struct fsf_bit_error_payload { 198struct fsf_bit_error_payload {
274 u32 res1; 199 u32 res1;
275 u32 link_failure_error_count; 200 u32 link_failure_error_count;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 69d632d851d9..3e05080e62d4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -28,7 +28,7 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
28 return 0; 28 return 0;
29} 29}
30 30
31static volatile struct qdio_buffer_element * 31static struct qdio_buffer_element *
32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) 32zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
33{ 33{
34 return &q->sbal[sbal_idx]->element[sbale_idx]; 34 return &q->sbal[sbal_idx]->element[sbale_idx];
@@ -57,7 +57,7 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter)
57 57
58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id) 58static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, u8 id)
59{ 59{
60 dev_warn(&adapter->ccw_device->dev, "QDIO problem occurred.\n"); 60 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
61 61
62 zfcp_erp_adapter_reopen(adapter, 62 zfcp_erp_adapter_reopen(adapter,
63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 63 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
@@ -145,7 +145,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
145{ 145{
146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; 146 struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
147 struct zfcp_qdio_queue *queue = &adapter->resp_q; 147 struct zfcp_qdio_queue *queue = &adapter->resp_q;
148 volatile struct qdio_buffer_element *sbale; 148 struct qdio_buffer_element *sbale;
149 int sbal_idx, sbale_idx, sbal_no; 149 int sbal_idx, sbale_idx, sbal_no;
150 150
151 if (unlikely(qdio_err)) { 151 if (unlikely(qdio_err)) {
@@ -174,8 +174,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
174 174
175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) 175 if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
176 dev_warn(&adapter->ccw_device->dev, 176 dev_warn(&adapter->ccw_device->dev,
177 "Protocol violation by adapter. " 177 "A QDIO protocol error occurred, "
178 "Continuing operations.\n"); 178 "operations continue\n");
179 } 179 }
180 180
181 /* 181 /*
@@ -190,8 +190,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
190 * @fsf_req: pointer to struct fsf_req 190 * @fsf_req: pointer to struct fsf_req
191 * Returns: pointer to qdio_buffer_element (SBALE) structure 191 * Returns: pointer to qdio_buffer_element (SBALE) structure
192 */ 192 */
193volatile struct qdio_buffer_element * 193struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
194zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
195{ 194{
196 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); 195 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
197} 196}
@@ -201,8 +200,7 @@ zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
201 * @fsf_req: pointer to struct fsf_req 200 * @fsf_req: pointer to struct fsf_req
202 * Returns: pointer to qdio_buffer_element (SBALE) structure 201 * Returns: pointer to qdio_buffer_element (SBALE) structure
203 */ 202 */
204volatile struct qdio_buffer_element * 203struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
205zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
206{ 204{
207 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 205 return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
208 req->sbale_curr); 206 req->sbale_curr);
@@ -216,10 +214,10 @@ static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
216 % QDIO_MAX_BUFFERS_PER_Q; 214 % QDIO_MAX_BUFFERS_PER_Q;
217} 215}
218 216
219static volatile struct qdio_buffer_element * 217static struct qdio_buffer_element *
220zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 218zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
221{ 219{
222 volatile struct qdio_buffer_element *sbale; 220 struct qdio_buffer_element *sbale;
223 221
224 /* set last entry flag in current SBALE of current SBAL */ 222 /* set last entry flag in current SBALE of current SBAL */
225 sbale = zfcp_qdio_sbale_curr(fsf_req); 223 sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -250,7 +248,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
250 return sbale; 248 return sbale;
251} 249}
252 250
253static volatile struct qdio_buffer_element * 251static struct qdio_buffer_element *
254zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 252zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
255{ 253{
256 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 254 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -273,7 +271,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
273 unsigned int sbtype, void *start_addr, 271 unsigned int sbtype, void *start_addr,
274 unsigned int total_length) 272 unsigned int total_length)
275{ 273{
276 volatile struct qdio_buffer_element *sbale; 274 struct qdio_buffer_element *sbale;
277 unsigned long remaining, length; 275 unsigned long remaining, length;
278 void *addr; 276 void *addr;
279 277
@@ -282,6 +280,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
282 addr += length, remaining -= length) { 280 addr += length, remaining -= length) {
283 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); 281 sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
284 if (!sbale) { 282 if (!sbale) {
283 atomic_inc(&fsf_req->adapter->qdio_outb_full);
285 zfcp_qdio_undo_sbals(fsf_req); 284 zfcp_qdio_undo_sbals(fsf_req);
286 return -EINVAL; 285 return -EINVAL;
287 } 286 }
@@ -307,7 +306,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
307int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 306int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
308 struct scatterlist *sg, int max_sbals) 307 struct scatterlist *sg, int max_sbals)
309{ 308{
310 volatile struct qdio_buffer_element *sbale; 309 struct qdio_buffer_element *sbale;
311 int retval, bytes = 0; 310 int retval, bytes = 0;
312 311
313 /* figure out last allowed SBAL */ 312 /* figure out last allowed SBAL */
@@ -344,10 +343,10 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
344 int first = fsf_req->sbal_first; 343 int first = fsf_req->sbal_first;
345 int count = fsf_req->sbal_number; 344 int count = fsf_req->sbal_number;
346 int retval, pci, pci_batch; 345 int retval, pci, pci_batch;
347 volatile struct qdio_buffer_element *sbale; 346 struct qdio_buffer_element *sbale;
348 347
349 /* acknowledgements for transferred buffers */ 348 /* acknowledgements for transferred buffers */
350 pci_batch = req_q->pci_batch + count; 349 pci_batch = adapter->req_q_pci_batch + count;
351 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) { 350 if (unlikely(pci_batch >= ZFCP_QDIO_PCI_INTERVAL)) {
352 pci_batch %= ZFCP_QDIO_PCI_INTERVAL; 351 pci_batch %= ZFCP_QDIO_PCI_INTERVAL;
353 pci = first + count - (pci_batch + 1); 352 pci = first + count - (pci_batch + 1);
@@ -367,7 +366,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
367 atomic_sub(count, &req_q->count); 366 atomic_sub(count, &req_q->count);
368 req_q->first += count; 367 req_q->first += count;
369 req_q->first %= QDIO_MAX_BUFFERS_PER_Q; 368 req_q->first %= QDIO_MAX_BUFFERS_PER_Q;
370 req_q->pci_batch = pci_batch; 369 adapter->req_q_pci_batch = pci_batch;
371 return 0; 370 return 0;
372} 371}
373 372
@@ -418,14 +417,14 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
418 struct zfcp_qdio_queue *req_q; 417 struct zfcp_qdio_queue *req_q;
419 int first, count; 418 int first, count;
420 419
421 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 420 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
422 return; 421 return;
423 422
424 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 423 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
425 req_q = &adapter->req_q; 424 req_q = &adapter->req_q;
426 spin_lock_bh(&req_q->lock); 425 spin_lock_bh(&adapter->req_q_lock);
427 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 426 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
428 spin_unlock_bh(&req_q->lock); 427 spin_unlock_bh(&adapter->req_q_lock);
429 428
430 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 429 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
431 430
@@ -438,7 +437,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
438 } 437 }
439 req_q->first = 0; 438 req_q->first = 0;
440 atomic_set(&req_q->count, 0); 439 atomic_set(&req_q->count, 0);
441 req_q->pci_batch = 0; 440 adapter->req_q_pci_batch = 0;
442 adapter->resp_q.first = 0; 441 adapter->resp_q.first = 0;
443 atomic_set(&adapter->resp_q.count, 0); 442 atomic_set(&adapter->resp_q.count, 0);
444} 443}
@@ -450,23 +449,17 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
450 */ 449 */
451int zfcp_qdio_open(struct zfcp_adapter *adapter) 450int zfcp_qdio_open(struct zfcp_adapter *adapter)
452{ 451{
453 volatile struct qdio_buffer_element *sbale; 452 struct qdio_buffer_element *sbale;
454 int cc; 453 int cc;
455 454
456 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) 455 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
457 return -EIO; 456 return -EIO;
458 457
459 if (qdio_establish(&adapter->qdio_init_data)) { 458 if (qdio_establish(&adapter->qdio_init_data))
460 dev_err(&adapter->ccw_device->dev, 459 goto failed_establish;
461 "Establish of QDIO queues failed.\n");
462 return -EIO;
463 }
464 460
465 if (qdio_activate(adapter->ccw_device)) { 461 if (qdio_activate(adapter->ccw_device))
466 dev_err(&adapter->ccw_device->dev,
467 "Activate of QDIO queues failed.\n");
468 goto failed_qdio; 462 goto failed_qdio;
469 }
470 463
471 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 464 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
472 sbale = &(adapter->resp_q.sbal[cc]->element[0]); 465 sbale = &(adapter->resp_q.sbal[cc]->element[0]);
@@ -476,20 +469,20 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
476 } 469 }
477 470
478 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, 471 if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
479 QDIO_MAX_BUFFERS_PER_Q)) { 472 QDIO_MAX_BUFFERS_PER_Q))
480 dev_err(&adapter->ccw_device->dev,
481 "Init of QDIO response queue failed.\n");
482 goto failed_qdio; 473 goto failed_qdio;
483 }
484 474
485 /* set index of first avalable SBALS / number of available SBALS */ 475 /* set index of first avalable SBALS / number of available SBALS */
486 adapter->req_q.first = 0; 476 adapter->req_q.first = 0;
487 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); 477 atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
488 adapter->req_q.pci_batch = 0; 478 adapter->req_q_pci_batch = 0;
489 479
490 return 0; 480 return 0;
491 481
492failed_qdio: 482failed_qdio:
493 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 483 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
484failed_establish:
485 dev_err(&adapter->ccw_device->dev,
486 "Setting up the QDIO connection to the FCP adapter failed\n");
494 return -EIO; 487 return -EIO;
495} 488}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index aeae56b00b45..ca8f85f3dad4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -21,20 +21,6 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
21 return fcp_sns_info_ptr; 21 return fcp_sns_info_ptr;
22} 22}
23 23
24void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
25{
26 fcp_dl_t *fcp_dl_ptr;
27
28 /*
29 * fcp_dl_addr = start address of fcp_cmnd structure +
30 * size of fixed part + size of dynamically sized add_dcp_cdb field
31 * SEE FCP-2 documentation
32 */
33 fcp_dl_ptr = (fcp_dl_t *) ((unsigned char *) &fcp_cmd[1] +
34 (fcp_cmd->add_fcp_cdb_length << 2));
35 *fcp_dl_ptr = fcp_dl;
36}
37
38static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 24static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
39{ 25{
40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 26 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -119,13 +105,17 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
119{ 105{
120 struct zfcp_port *port; 106 struct zfcp_port *port;
121 struct zfcp_unit *unit; 107 struct zfcp_unit *unit;
108 int scsi_lun;
122 109
123 list_for_each_entry(port, &adapter->port_list_head, list) { 110 list_for_each_entry(port, &adapter->port_list_head, list) {
124 if (!port->rport || (id != port->rport->scsi_target_id)) 111 if (!port->rport || (id != port->rport->scsi_target_id))
125 continue; 112 continue;
126 list_for_each_entry(unit, &port->unit_list_head, list) 113 list_for_each_entry(unit, &port->unit_list_head, list) {
127 if (lun == unit->scsi_lun) 114 scsi_lun = scsilun_to_int(
115 (struct scsi_lun *)&unit->fcp_lun);
116 if (lun == scsi_lun)
128 return unit; 117 return unit;
118 }
129 } 119 }
130 120
131 return NULL; 121 return NULL;
@@ -183,7 +173,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
183 return retval; 173 return retval;
184 } 174 }
185 fsf_req->data = NULL; 175 fsf_req->data = NULL;
186 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
187 176
188 /* don't access old fsf_req after releasing the abort_lock */ 177 /* don't access old fsf_req after releasing the abort_lock */
189 write_unlock_irqrestore(&adapter->abort_lock, flags); 178 write_unlock_irqrestore(&adapter->abort_lock, flags);
@@ -294,7 +283,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
294 sizeof (struct zfcp_adapter *)); 283 sizeof (struct zfcp_adapter *));
295 if (!adapter->scsi_host) { 284 if (!adapter->scsi_host) {
296 dev_err(&adapter->ccw_device->dev, 285 dev_err(&adapter->ccw_device->dev,
297 "registration with SCSI stack failed."); 286 "Registering the FCP device with the "
287 "SCSI stack failed\n");
298 return -EIO; 288 return -EIO;
299 } 289 }
300 290
@@ -312,7 +302,6 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
312 scsi_host_put(adapter->scsi_host); 302 scsi_host_put(adapter->scsi_host);
313 return -EIO; 303 return -EIO;
314 } 304 }
315 atomic_set_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
316 305
317 return 0; 306 return 0;
318} 307}
@@ -336,7 +325,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
336 scsi_remove_host(shost); 325 scsi_remove_host(shost);
337 scsi_host_put(shost); 326 scsi_host_put(shost);
338 adapter->scsi_host = NULL; 327 adapter->scsi_host = NULL;
339 atomic_clear_mask(ZFCP_STATUS_ADAPTER_REGISTERED, &adapter->status);
340 328
341 return; 329 return;
342} 330}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 2e85c6c49e7d..2809d789b55c 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -26,9 +26,9 @@ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", 26ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n",
27 atomic_read(&adapter->status)); 27 atomic_read(&adapter->status));
28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", 28ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n",
29 adapter->peer_wwnn); 29 (unsigned long long) adapter->peer_wwnn);
30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", 30ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n",
31 adapter->peer_wwpn); 31 (unsigned long long) adapter->peer_wwpn);
32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", 32ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n",
33 adapter->peer_d_id); 33 adapter->peer_d_id);
34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", 34ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n",
@@ -135,8 +135,9 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
135{ 135{
136 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 136 struct zfcp_adapter *adapter = dev_get_drvdata(dev);
137 struct zfcp_port *port; 137 struct zfcp_port *port;
138 wwn_t wwpn; 138 u64 wwpn;
139 int retval = 0; 139 int retval = 0;
140 LIST_HEAD(port_remove_lh);
140 141
141 down(&zfcp_data.config_sema); 142 down(&zfcp_data.config_sema);
142 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { 143 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -144,7 +145,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
144 goto out; 145 goto out;
145 } 146 }
146 147
147 if (strict_strtoull(buf, 0, &wwpn)) { 148 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) {
148 retval = -EINVAL; 149 retval = -EINVAL;
149 goto out; 150 goto out;
150 } 151 }
@@ -154,7 +155,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
154 if (port && (atomic_read(&port->refcount) == 0)) { 155 if (port && (atomic_read(&port->refcount) == 0)) {
155 zfcp_port_get(port); 156 zfcp_port_get(port);
156 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status); 157 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
157 list_move(&port->list, &adapter->port_remove_lh); 158 list_move(&port->list, &port_remove_lh);
158 } else 159 } else
159 port = NULL; 160 port = NULL;
160 write_unlock_irq(&zfcp_data.config_lock); 161 write_unlock_irq(&zfcp_data.config_lock);
@@ -200,7 +201,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
200{ 201{
201 struct zfcp_port *port = dev_get_drvdata(dev); 202 struct zfcp_port *port = dev_get_drvdata(dev);
202 struct zfcp_unit *unit; 203 struct zfcp_unit *unit;
203 fcp_lun_t fcp_lun; 204 u64 fcp_lun;
204 int retval = -EINVAL; 205 int retval = -EINVAL;
205 206
206 down(&zfcp_data.config_sema); 207 down(&zfcp_data.config_sema);
@@ -209,7 +210,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
209 goto out; 210 goto out;
210 } 211 }
211 212
212 if (strict_strtoull(buf, 0, &fcp_lun)) 213 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
213 goto out; 214 goto out;
214 215
215 unit = zfcp_unit_enqueue(port, fcp_lun); 216 unit = zfcp_unit_enqueue(port, fcp_lun);
@@ -233,8 +234,9 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
233{ 234{
234 struct zfcp_port *port = dev_get_drvdata(dev); 235 struct zfcp_port *port = dev_get_drvdata(dev);
235 struct zfcp_unit *unit; 236 struct zfcp_unit *unit;
236 fcp_lun_t fcp_lun; 237 u64 fcp_lun;
237 int retval = 0; 238 int retval = 0;
239 LIST_HEAD(unit_remove_lh);
238 240
239 down(&zfcp_data.config_sema); 241 down(&zfcp_data.config_sema);
240 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 242 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
@@ -242,7 +244,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
242 goto out; 244 goto out;
243 } 245 }
244 246
245 if (strict_strtoull(buf, 0, &fcp_lun)) { 247 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) {
246 retval = -EINVAL; 248 retval = -EINVAL;
247 goto out; 249 goto out;
248 } 250 }
@@ -252,7 +254,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
252 if (unit && (atomic_read(&unit->refcount) == 0)) { 254 if (unit && (atomic_read(&unit->refcount) == 0)) {
253 zfcp_unit_get(unit); 255 zfcp_unit_get(unit);
254 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status); 256 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
255 list_move(&unit->list, &port->unit_remove_lh); 257 list_move(&unit->list, &unit_remove_lh);
256 } else 258 } else
257 unit = NULL; 259 unit = NULL;
258 260
@@ -273,22 +275,7 @@ out:
273} 275}
274static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 276static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
275 277
276static struct attribute *zfcp_port_ns_attrs[] = { 278static struct attribute *zfcp_port_attrs[] = {
277 &dev_attr_port_failed.attr,
278 &dev_attr_port_in_recovery.attr,
279 &dev_attr_port_status.attr,
280 &dev_attr_port_access_denied.attr,
281 NULL
282};
283
284/**
285 * zfcp_sysfs_ns_port_attrs - sysfs attributes for nameserver
286 */
287struct attribute_group zfcp_sysfs_ns_port_attrs = {
288 .attrs = zfcp_port_ns_attrs,
289};
290
291static struct attribute *zfcp_port_no_ns_attrs[] = {
292 &dev_attr_unit_add.attr, 279 &dev_attr_unit_add.attr,
293 &dev_attr_unit_remove.attr, 280 &dev_attr_unit_remove.attr,
294 &dev_attr_port_failed.attr, 281 &dev_attr_port_failed.attr,
@@ -302,7 +289,7 @@ static struct attribute *zfcp_port_no_ns_attrs[] = {
302 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports 289 * zfcp_sysfs_port_attrs - sysfs attributes for all other ports
303 */ 290 */
304struct attribute_group zfcp_sysfs_port_attrs = { 291struct attribute_group zfcp_sysfs_port_attrs = {
305 .attrs = zfcp_port_no_ns_attrs, 292 .attrs = zfcp_port_attrs,
306}; 293};
307 294
308static struct attribute *zfcp_unit_attrs[] = { 295static struct attribute *zfcp_unit_attrs[] = {
@@ -395,8 +382,10 @@ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
395 382
396ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", 383ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
397 unit->port->adapter->ccw_device->dev.bus_id); 384 unit->port->adapter->ccw_device->dev.bus_id);
398ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", unit->port->wwpn); 385ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
399ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", unit->fcp_lun); 386 (unsigned long long) unit->port->wwpn);
387ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
388 (unsigned long long) unit->fcp_lun);
400 389
401struct device_attribute *zfcp_sysfs_sdev_attrs[] = { 390struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
402 &dev_attr_fcp_lun, 391 &dev_attr_fcp_lun,
@@ -487,10 +476,23 @@ ZFCP_SHOST_ATTR(megabytes, "%llu %llu\n",
487ZFCP_SHOST_ATTR(seconds_active, "%llu\n", 476ZFCP_SHOST_ATTR(seconds_active, "%llu\n",
488 (unsigned long long) stat_info.seconds_act); 477 (unsigned long long) stat_info.seconds_act);
489 478
479static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev,
480 struct device_attribute *attr,
481 char *buf)
482{
483 struct Scsi_Host *scsi_host = class_to_shost(dev);
484 struct zfcp_adapter *adapter =
485 (struct zfcp_adapter *) scsi_host->hostdata[0];
486
487 return sprintf(buf, "%d\n", atomic_read(&adapter->qdio_outb_full));
488}
489static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
490
490struct device_attribute *zfcp_sysfs_shost_attrs[] = { 491struct device_attribute *zfcp_sysfs_shost_attrs[] = {
491 &dev_attr_utilization, 492 &dev_attr_utilization,
492 &dev_attr_requests, 493 &dev_attr_requests,
493 &dev_attr_megabytes, 494 &dev_attr_megabytes,
494 &dev_attr_seconds_active, 495 &dev_attr_seconds_active,
496 &dev_attr_queue_full,
495 NULL 497 NULL
496}; 498};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4e0322b1c1ea..d3b211af4e1c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1325,14 +1325,6 @@ config SCSI_QLOGIC_FAS
1325 To compile this driver as a module, choose M here: the 1325 To compile this driver as a module, choose M here: the
1326 module will be called qlogicfas. 1326 module will be called qlogicfas.
1327 1327
1328config SCSI_QLOGIC_FC_FIRMWARE
1329 bool "Include loadable firmware in driver"
1330 depends on SCSI_QLOGIC_FC
1331 help
1332 Say Y to include ISP2X00 Fabric Initiator/Target Firmware, with
1333 expanded LUN addressing and FcTape (FCP-2) support, in the
1334 qlogicfc driver. This is required on some platforms.
1335
1336config SCSI_QLOGIC_1280 1328config SCSI_QLOGIC_1280
1337 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support" 1329 tristate "Qlogic QLA 1240/1x80/1x160 SCSI support"
1338 depends on PCI && SCSI 1330 depends on PCI && SCSI
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index aa4e77c25273..8abfd06b5a72 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
1139 srbcmd->id = cpu_to_le32(scmd_id(cmd)); 1139 srbcmd->id = cpu_to_le32(scmd_id(cmd));
1140 srbcmd->lun = cpu_to_le32(cmd->device->lun); 1140 srbcmd->lun = cpu_to_le32(cmd->device->lun);
1141 srbcmd->flags = cpu_to_le32(flag); 1141 srbcmd->flags = cpu_to_le32(flag);
1142 timeout = cmd->timeout_per_command/HZ; 1142 timeout = cmd->request->timeout/HZ;
1143 if (timeout == 0) 1143 if (timeout == 0)
1144 timeout = 1; 1144 timeout = 1;
1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds 1145 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index ef693e8412e9..8f45570a8a01 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -84,7 +84,7 @@ struct clariion_dh_data {
84 /* 84 /*
85 * I/O buffer for both MODE_SELECT and INQUIRY commands. 85 * I/O buffer for both MODE_SELECT and INQUIRY commands.
86 */ 86 */
87 char buffer[CLARIION_BUFFER_SIZE]; 87 unsigned char buffer[CLARIION_BUFFER_SIZE];
88 /* 88 /*
89 * SCSI sense buffer for commands -- assumes serial issuance 89 * SCSI sense buffer for commands -- assumes serial issuance
90 * and completion sequence of all commands for same multipath. 90 * and completion sequence of all commands for same multipath.
@@ -176,7 +176,7 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
176 err = SCSI_DH_DEV_TEMP_BUSY; 176 err = SCSI_DH_DEV_TEMP_BUSY;
177 goto out; 177 goto out;
178 } 178 }
179 if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) { 179 if (csdev->buffer[4] > 2) {
180 /* Invalid buffer format */ 180 /* Invalid buffer format */
181 sdev_printk(KERN_NOTICE, sdev, 181 sdev_printk(KERN_NOTICE, sdev,
182 "%s: invalid VPD page 0xC0 format\n", 182 "%s: invalid VPD page 0xC0 format\n",
@@ -278,7 +278,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
278 return NULL; 278 return NULL;
279 } 279 }
280 280
281 memset(rq->cmd, 0, BLK_MAX_CDB);
282 rq->cmd_len = COMMAND_SIZE(cmd); 281 rq->cmd_len = COMMAND_SIZE(cmd);
283 rq->cmd[0] = cmd; 282 rq->cmd[0] = cmd;
284 283
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index a6a4ef3ad51c..5e93c88ad66b 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -114,7 +114,6 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
114 req->cmd_type = REQ_TYPE_BLOCK_PC; 114 req->cmd_type = REQ_TYPE_BLOCK_PC;
115 req->cmd_flags |= REQ_FAILFAST; 115 req->cmd_flags |= REQ_FAILFAST;
116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); 116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
117 memset(req->cmd, 0, MAX_COMMAND_SIZE);
118 req->cmd[0] = TEST_UNIT_READY; 117 req->cmd[0] = TEST_UNIT_READY;
119 req->timeout = HP_SW_TIMEOUT; 118 req->timeout = HP_SW_TIMEOUT;
120 req->sense = h->sense; 119 req->sense = h->sense;
@@ -207,7 +206,6 @@ static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
207 req->cmd_type = REQ_TYPE_BLOCK_PC; 206 req->cmd_type = REQ_TYPE_BLOCK_PC;
208 req->cmd_flags |= REQ_FAILFAST; 207 req->cmd_flags |= REQ_FAILFAST;
209 req->cmd_len = COMMAND_SIZE(START_STOP); 208 req->cmd_len = COMMAND_SIZE(START_STOP);
210 memset(req->cmd, 0, MAX_COMMAND_SIZE);
211 req->cmd[0] = START_STOP; 209 req->cmd[0] = START_STOP;
212 req->cmd[4] = 1; /* Start spin cycle */ 210 req->cmd[4] = 1; /* Start spin cycle */
213 req->timeout = HP_SW_TIMEOUT; 211 req->timeout = HP_SW_TIMEOUT;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 6e2f130d56de..50bf95f3b5c4 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -225,8 +225,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
225 return NULL; 225 return NULL;
226 } 226 }
227 227
228 memset(rq->cmd, 0, BLK_MAX_CDB);
229
230 rq->cmd_type = REQ_TYPE_BLOCK_PC; 228 rq->cmd_type = REQ_TYPE_BLOCK_PC;
231 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 229 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
232 rq->retries = RDAC_RETRIES; 230 rq->retries = RDAC_RETRIES;
@@ -590,6 +588,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
590 {"STK", "OPENstorage D280"}, 588 {"STK", "OPENstorage D280"},
591 {"SUN", "CSM200_R"}, 589 {"SUN", "CSM200_R"},
592 {"SUN", "LCSM100_F"}, 590 {"SUN", "LCSM100_F"},
591 {"DELL", "MD3000"},
592 {"DELL", "MD3000i"},
593 {NULL, NULL}, 593 {NULL, NULL},
594}; 594};
595 595
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 822d5214692b..c387c15a2128 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
464 464
465 /* use request field to save the ptr. to completion struct. */ 465 /* use request field to save the ptr. to completion struct. */
466 scp->request = (struct request *)&wait; 466 scp->request = (struct request *)&wait;
467 scp->timeout_per_command = timeout*HZ;
468 scp->cmd_len = 12; 467 scp->cmd_len = 12;
469 scp->cmnd = cmnd; 468 scp->cmnd = cmnd;
470 cmndinfo.priority = IOCTL_PRI; 469 cmndinfo.priority = IOCTL_PRI;
@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
1995 register Scsi_Cmnd *pscp; 1994 register Scsi_Cmnd *pscp;
1996 register Scsi_Cmnd *nscp; 1995 register Scsi_Cmnd *nscp;
1997 ulong flags; 1996 ulong flags;
1998 unchar b, t;
1999 1997
2000 TRACE(("gdth_putq() priority %d\n",priority)); 1998 TRACE(("gdth_putq() priority %d\n",priority));
2001 spin_lock_irqsave(&ha->smp_lock, flags); 1999 spin_lock_irqsave(&ha->smp_lock, flags);
2002 2000
2003 if (!cmndinfo->internal_command) { 2001 if (!cmndinfo->internal_command)
2004 cmndinfo->priority = priority; 2002 cmndinfo->priority = priority;
2005 b = scp->device->channel;
2006 t = scp->device->id;
2007 if (priority >= DEFAULT_PRI) {
2008 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
2009 (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
2010 TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
2011 cmndinfo->timeout = gdth_update_timeout(scp, 0);
2012 }
2013 }
2014 }
2015 2003
2016 if (ha->req_first==NULL) { 2004 if (ha->req_first==NULL) {
2017 ha->req_first = scp; /* queue was empty */ 2005 ha->req_first = scp; /* queue was empty */
@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
3899 return ((const char *)ha->binfo.type_string); 3887 return ((const char *)ha->binfo.type_string);
3900} 3888}
3901 3889
3890static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
3891{
3892 gdth_ha_str *ha = shost_priv(scp->device->host);
3893 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
3894 unchar b, t;
3895 ulong flags;
3896 enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
3897
3898 TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
3899 b = scp->device->channel;
3900 t = scp->device->id;
3901
3902 /*
3903 * We don't really honor the command timeout, but we try to
3904 * honor 6 times of the actual command timeout! So reset the
3905 * timer if this is less than 6th timeout on this command!
3906 */
3907 if (++cmndinfo->timeout_count < 6)
3908 retval = BLK_EH_RESET_TIMER;
3909
3910 /* Reset the timeout if it is locked IO */
3911 spin_lock_irqsave(&ha->smp_lock, flags);
3912 if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
3913 (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
3914 TRACE2(("%s(): locked IO, reset timeout\n", __func__));
3915 retval = BLK_EH_RESET_TIMER;
3916 }
3917 spin_unlock_irqrestore(&ha->smp_lock, flags);
3918
3919 return retval;
3920}
3921
3922
3902static int gdth_eh_bus_reset(Scsi_Cmnd *scp) 3923static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
3903{ 3924{
3904 gdth_ha_str *ha = shost_priv(scp->device->host); 3925 gdth_ha_str *ha = shost_priv(scp->device->host);
@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
3992 BUG_ON(!cmndinfo); 4013 BUG_ON(!cmndinfo);
3993 4014
3994 scp->scsi_done = done; 4015 scp->scsi_done = done;
3995 gdth_update_timeout(scp, scp->timeout_per_command * 6); 4016 cmndinfo->timeout_count = 0;
3996 cmndinfo->priority = DEFAULT_PRI; 4017 cmndinfo->priority = DEFAULT_PRI;
3997 4018
3998 return __gdth_queuecommand(ha, scp, cmndinfo); 4019 return __gdth_queuecommand(ha, scp, cmndinfo);
@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
4096 ha->hdr[j].lock = 1; 4117 ha->hdr[j].lock = 1;
4097 spin_unlock_irqrestore(&ha->smp_lock, flags); 4118 spin_unlock_irqrestore(&ha->smp_lock, flags);
4098 gdth_wait_completion(ha, ha->bus_cnt, j); 4119 gdth_wait_completion(ha, ha->bus_cnt, j);
4099 gdth_stop_timeout(ha, ha->bus_cnt, j);
4100 } else { 4120 } else {
4101 spin_lock_irqsave(&ha->smp_lock, flags); 4121 spin_lock_irqsave(&ha->smp_lock, flags);
4102 ha->hdr[j].lock = 0; 4122 ha->hdr[j].lock = 0;
4103 spin_unlock_irqrestore(&ha->smp_lock, flags); 4123 spin_unlock_irqrestore(&ha->smp_lock, flags);
4104 gdth_start_timeout(ha, ha->bus_cnt, j);
4105 gdth_next(ha); 4124 gdth_next(ha);
4106 } 4125 }
4107 } 4126 }
@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
4539 spin_lock_irqsave(&ha->smp_lock, flags); 4558 spin_lock_irqsave(&ha->smp_lock, flags);
4540 ha->raw[i].lock = 1; 4559 ha->raw[i].lock = 1;
4541 spin_unlock_irqrestore(&ha->smp_lock, flags); 4560 spin_unlock_irqrestore(&ha->smp_lock, flags);
4542 for (j = 0; j < ha->tid_cnt; ++j) { 4561 for (j = 0; j < ha->tid_cnt; ++j)
4543 gdth_wait_completion(ha, i, j); 4562 gdth_wait_completion(ha, i, j);
4544 gdth_stop_timeout(ha, i, j);
4545 }
4546 } else { 4563 } else {
4547 spin_lock_irqsave(&ha->smp_lock, flags); 4564 spin_lock_irqsave(&ha->smp_lock, flags);
4548 ha->raw[i].lock = 0; 4565 ha->raw[i].lock = 0;
4549 spin_unlock_irqrestore(&ha->smp_lock, flags); 4566 spin_unlock_irqrestore(&ha->smp_lock, flags);
4550 for (j = 0; j < ha->tid_cnt; ++j) { 4567 for (j = 0; j < ha->tid_cnt; ++j)
4551 gdth_start_timeout(ha, i, j);
4552 gdth_next(ha); 4568 gdth_next(ha);
4553 }
4554 } 4569 }
4555 } 4570 }
4556 break; 4571 break;
@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
4644 .slave_configure = gdth_slave_configure, 4659 .slave_configure = gdth_slave_configure,
4645 .bios_param = gdth_bios_param, 4660 .bios_param = gdth_bios_param,
4646 .proc_info = gdth_proc_info, 4661 .proc_info = gdth_proc_info,
4662 .eh_timed_out = gdth_timed_out,
4647 .proc_name = "gdth", 4663 .proc_name = "gdth",
4648 .can_queue = GDTH_MAXCMDS, 4664 .can_queue = GDTH_MAXCMDS,
4649 .this_id = -1, 4665 .this_id = -1,
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index ca92476727cf..1646444e9bd5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -916,7 +916,7 @@ typedef struct {
916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ 916 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
917 dma_addr_t sense_paddr; /* sense dma-addr */ 917 dma_addr_t sense_paddr; /* sense dma-addr */
918 unchar priority; 918 unchar priority;
919 int timeout; 919 int timeout_count; /* # of timeout calls */
920 volatile int wait_for_completion; 920 volatile int wait_for_completion;
921 ushort status; 921 ushort status;
922 ulong32 info; 922 ulong32 info;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index ce0228e26aec..59349a316e13 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
748 } 748 }
749 spin_unlock_irqrestore(&ha->smp_lock, flags); 749 spin_unlock_irqrestore(&ha->smp_lock, flags);
750} 750}
751
752static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
753{
754 ulong flags;
755 Scsi_Cmnd *scp;
756 unchar b, t;
757
758 spin_lock_irqsave(&ha->smp_lock, flags);
759
760 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
761 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
762 if (!cmndinfo->internal_command) {
763 b = scp->device->channel;
764 t = scp->device->id;
765 if (t == (unchar)id && b == (unchar)busnum) {
766 TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
767 cmndinfo->timeout = gdth_update_timeout(scp, 0);
768 }
769 }
770 }
771 spin_unlock_irqrestore(&ha->smp_lock, flags);
772}
773
774static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
775{
776 ulong flags;
777 Scsi_Cmnd *scp;
778 unchar b, t;
779
780 spin_lock_irqsave(&ha->smp_lock, flags);
781
782 for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
783 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
784 if (!cmndinfo->internal_command) {
785 b = scp->device->channel;
786 t = scp->device->id;
787 if (t == (unchar)id && b == (unchar)busnum) {
788 TRACE2(("gdth_start_timeout(): update_timeout()\n"));
789 gdth_update_timeout(scp, cmndinfo->timeout);
790 }
791 }
792 }
793 spin_unlock_irqrestore(&ha->smp_lock, flags);
794}
795
796static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
797{
798 int oldto;
799
800 oldto = scp->timeout_per_command;
801 scp->timeout_per_command = timeout;
802
803 if (timeout == 0) {
804 del_timer(&scp->eh_timeout);
805 scp->eh_timeout.data = (unsigned long) NULL;
806 scp->eh_timeout.expires = 0;
807 } else {
808 if (scp->eh_timeout.data != (unsigned long) NULL)
809 del_timer(&scp->eh_timeout);
810 scp->eh_timeout.data = (unsigned long) scp;
811 scp->eh_timeout.expires = jiffies + timeout;
812 add_timer(&scp->eh_timeout);
813 }
814
815 return oldto;
816}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 45e6fdacf36e..9b900cc9ebe8 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
20 ulong64 *paddr); 20 ulong64 *paddr);
21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr); 21static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); 22static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
23static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
24static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
25static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
26 23
27#endif 24#endif
28 25
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fed0b02ebc1d..3fdbb13e80a8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -464,7 +464,7 @@ static int __scsi_host_match(struct device *dev, void *data)
464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) 464struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
465{ 465{
466 struct device *cdev; 466 struct device *cdev;
467 struct Scsi_Host *shost = ERR_PTR(-ENXIO); 467 struct Scsi_Host *shost = NULL;
468 468
469 cdev = class_find_device(&shost_class, NULL, &hostnum, 469 cdev = class_find_device(&shost_class, NULL, &hostnum,
470 __scsi_host_match); 470 __scsi_host_match);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7b1502c0ab6e..87e09f35d3d4 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
756 init_event_struct(evt_struct, 756 init_event_struct(evt_struct,
757 handle_cmd_rsp, 757 handle_cmd_rsp,
758 VIOSRP_SRP_FORMAT, 758 VIOSRP_SRP_FORMAT,
759 cmnd->timeout_per_command/HZ); 759 cmnd->request->timeout/HZ);
760 760
761 evt_struct->cmnd = cmnd; 761 evt_struct->cmnd = cmnd;
762 evt_struct->cmnd_done = done; 762 evt_struct->cmnd_done = done;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 461331d3dc45..81c16cba5417 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
612 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); 612 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
613 pc->scsi_cmd = cmd; 613 pc->scsi_cmd = cmd;
614 pc->done = done; 614 pc->done = done;
615 pc->timeout = jiffies + cmd->timeout_per_command; 615 pc->timeout = jiffies + cmd->request->timeout;
616 616
617 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { 617 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
618 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); 618 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e7a3a6554425..d30eb7ba018e 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
3670 sdev->no_uld_attach = 1; 3670 sdev->no_uld_attach = 1;
3671 } 3671 }
3672 if (ipr_is_vset_device(res)) { 3672 if (ipr_is_vset_device(res)) {
3673 sdev->timeout = IPR_VSET_RW_TIMEOUT; 3673 blk_queue_rq_timeout(sdev->request_queue,
3674 IPR_VSET_RW_TIMEOUT);
3674 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 3675 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3675 } 3676 }
3676 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) 3677 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc9e6ddf41df..ef683f0d2b5a 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
3818 scb->cmd.dcdb.segment_4G = 0; 3818 scb->cmd.dcdb.segment_4G = 0;
3819 scb->cmd.dcdb.enhanced_sg = 0; 3819 scb->cmd.dcdb.enhanced_sg = 0;
3820 3820
3821 TimeOut = scb->scsi_cmd->timeout_per_command; 3821 TimeOut = scb->scsi_cmd->request->timeout;
3822 3822
3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ 3823 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
3824 if (!scb->sg_len) { 3824 if (!scb->sg_len) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 299e075a7b34..da7b67d30d9a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1456,7 +1456,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1456 if (lun == task->sc->device->lun || lun == -1) { 1456 if (lun == task->sc->device->lun || lun == -1) {
1457 debug_scsi("failing in progress sc %p itt 0x%x\n", 1457 debug_scsi("failing in progress sc %p itt 0x%x\n",
1458 task->sc, task->itt); 1458 task->sc, task->itt);
1459 fail_command(conn, task, DID_BUS_BUSY << 16); 1459 fail_command(conn, task, error << 16);
1460 } 1460 }
1461 } 1461 }
1462} 1462}
@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
1476 scsi_queue_work(conn->session->host, &conn->xmitwork); 1476 scsi_queue_work(conn->session->host, &conn->xmitwork);
1477} 1477}
1478 1478
1479static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) 1479static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1480{ 1480{
1481 struct iscsi_cls_session *cls_session; 1481 struct iscsi_cls_session *cls_session;
1482 struct iscsi_session *session; 1482 struct iscsi_session *session;
1483 struct iscsi_conn *conn; 1483 struct iscsi_conn *conn;
1484 enum scsi_eh_timer_return rc = EH_NOT_HANDLED; 1484 enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
1485 1485
1486 cls_session = starget_to_session(scsi_target(scmd->device)); 1486 cls_session = starget_to_session(scsi_target(scmd->device));
1487 session = cls_session->dd_data; 1487 session = cls_session->dd_data;
@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1494 * We are probably in the middle of iscsi recovery so let 1494 * We are probably in the middle of iscsi recovery so let
1495 * that complete and handle the error. 1495 * that complete and handle the error.
1496 */ 1496 */
1497 rc = EH_RESET_TIMER; 1497 rc = BLK_EH_RESET_TIMER;
1498 goto done; 1498 goto done;
1499 } 1499 }
1500 1500
1501 conn = session->leadconn; 1501 conn = session->leadconn;
1502 if (!conn) { 1502 if (!conn) {
1503 /* In the middle of shuting down */ 1503 /* In the middle of shuting down */
1504 rc = EH_RESET_TIMER; 1504 rc = BLK_EH_RESET_TIMER;
1505 goto done; 1505 goto done;
1506 } 1506 }
1507 1507
@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1513 */ 1513 */
1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + 1514 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1515 (conn->ping_timeout * HZ), jiffies)) 1515 (conn->ping_timeout * HZ), jiffies))
1516 rc = EH_RESET_TIMER; 1516 rc = BLK_EH_RESET_TIMER;
1517 /* 1517 /*
1518 * if we are about to check the transport then give the command 1518 * if we are about to check the transport then give the command
1519 * more time 1519 * more time
1520 */ 1520 */
1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), 1521 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1522 jiffies)) 1522 jiffies))
1523 rc = EH_RESET_TIMER; 1523 rc = BLK_EH_RESET_TIMER;
1524 /* if in the middle of checking the transport then give us more time */ 1524 /* if in the middle of checking the transport then give us more time */
1525 if (conn->ping_task) 1525 if (conn->ping_task)
1526 rc = EH_RESET_TIMER; 1526 rc = BLK_EH_RESET_TIMER;
1527done: 1527done:
1528 spin_unlock(&session->lock); 1528 spin_unlock(&session->lock);
1529 debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh"); 1529 debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
1530 "timer reset" : "nh");
1530 return rc; 1531 return rc;
1531} 1532}
1532 1533
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 48ee8c7f5bdd..e15501170698 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -294,10 +294,10 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
294 } 294 }
295} 295}
296 296
297static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in, 297static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
298 u32 val) 298 u32 val)
299{ 299{
300 struct domain_device *dev = ap->private_data; 300 struct domain_device *dev = link->ap->private_data;
301 301
302 SAS_DPRINTK("STUB %s\n", __func__); 302 SAS_DPRINTK("STUB %s\n", __func__);
303 switch (sc_reg_in) { 303 switch (sc_reg_in) {
@@ -319,10 +319,10 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
319 return 0; 319 return 0;
320} 320}
321 321
322static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in, 322static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
323 u32 *val) 323 u32 *val)
324{ 324{
325 struct domain_device *dev = ap->private_data; 325 struct domain_device *dev = link->ap->private_data;
326 326
327 SAS_DPRINTK("STUB %s\n", __func__); 327 SAS_DPRINTK("STUB %s\n", __func__);
328 switch (sc_reg_in) { 328 switch (sc_reg_in) {
@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
398 398
399 /* Bounce SCSI-initiated commands to the SCSI EH */ 399 /* Bounce SCSI-initiated commands to the SCSI EH */
400 if (qc->scsicmd) { 400 if (qc->scsicmd) {
401 scsi_req_abort_cmd(qc->scsicmd); 401 blk_abort_request(qc->scsicmd->request);
402 scsi_schedule_eh(qc->scsicmd->device->host); 402 scsi_schedule_eh(qc->scsicmd->device->host);
403 return; 403 return;
404 } 404 }
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b4f9368f116a..0001374bd6b2 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
55int sas_register_ports(struct sas_ha_struct *sas_ha); 55int sas_register_ports(struct sas_ha_struct *sas_ha);
56void sas_unregister_ports(struct sas_ha_struct *sas_ha); 56void sas_unregister_ports(struct sas_ha_struct *sas_ha);
57 57
58enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 58enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
59 59
60int sas_init_queue(struct sas_ha_struct *sas_ha); 60int sas_init_queue(struct sas_ha_struct *sas_ha);
61int sas_init_events(struct sas_ha_struct *sas_ha); 61int sas_init_events(struct sas_ha_struct *sas_ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index a8e3ef309070..744838780ada 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -673,43 +673,43 @@ out:
673 return; 673 return;
674} 674}
675 675
676enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 676enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
677{ 677{
678 struct sas_task *task = TO_SAS_TASK(cmd); 678 struct sas_task *task = TO_SAS_TASK(cmd);
679 unsigned long flags; 679 unsigned long flags;
680 680
681 if (!task) { 681 if (!task) {
682 cmd->timeout_per_command /= 2; 682 cmd->request->timeout /= 2;
683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n", 683 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
684 cmd, task, (cmd->timeout_per_command ? 684 cmd, task, (cmd->request->timeout ?
685 "EH_RESET_TIMER" : "EH_NOT_HANDLED")); 685 "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
686 if (!cmd->timeout_per_command) 686 if (!cmd->request->timeout)
687 return EH_NOT_HANDLED; 687 return BLK_EH_NOT_HANDLED;
688 return EH_RESET_TIMER; 688 return BLK_EH_RESET_TIMER;
689 } 689 }
690 690
691 spin_lock_irqsave(&task->task_state_lock, flags); 691 spin_lock_irqsave(&task->task_state_lock, flags);
692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED); 692 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
693 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 693 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
694 spin_unlock_irqrestore(&task->task_state_lock, flags); 694 spin_unlock_irqrestore(&task->task_state_lock, flags);
695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", 695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
696 cmd, task); 696 "BLK_EH_HANDLED\n", cmd, task);
697 return EH_HANDLED; 697 return BLK_EH_HANDLED;
698 } 698 }
699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) { 699 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
700 spin_unlock_irqrestore(&task->task_state_lock, flags); 700 spin_unlock_irqrestore(&task->task_state_lock, flags);
701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: " 701 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
702 "EH_RESET_TIMER\n", 702 "BLK_EH_RESET_TIMER\n",
703 cmd, task); 703 cmd, task);
704 return EH_RESET_TIMER; 704 return BLK_EH_RESET_TIMER;
705 } 705 }
706 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 706 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
707 spin_unlock_irqrestore(&task->task_state_lock, flags); 707 spin_unlock_irqrestore(&task->task_state_lock, flags);
708 708
709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", 709 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
710 cmd, task); 710 cmd, task);
711 711
712 return EH_NOT_HANDLED; 712 return BLK_EH_NOT_HANDLED;
713} 713}
714 714
715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 715int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *task)
1039 return; 1039 return;
1040 } 1040 }
1041 1041
1042 scsi_req_abort_cmd(sc); 1042 blk_abort_request(sc->request);
1043 scsi_schedule_eh(sc->device->host); 1043 scsi_schedule_eh(sc->device->host);
1044} 1044}
1045 1045
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 97b763378e7d..afe1de998763 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1167 * cmd has not been completed within the timeout period. 1167 * cmd has not been completed within the timeout period.
1168 */ 1168 */
1169static enum 1169static enum
1170scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 1170blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1171{ 1171{
1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; 1172 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
1173 struct megasas_instance *instance; 1173 struct megasas_instance *instance;
@@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1175 1175
1176 if (time_after(jiffies, scmd->jiffies_at_alloc + 1176 if (time_after(jiffies, scmd->jiffies_at_alloc +
1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { 1177 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
1178 return EH_NOT_HANDLED; 1178 return BLK_EH_NOT_HANDLED;
1179 } 1179 }
1180 1180
1181 instance = cmd->instance; 1181 instance = cmd->instance;
@@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1189 1189
1190 spin_unlock_irqrestore(instance->host->host_lock, flags); 1190 spin_unlock_irqrestore(instance->host->host_lock, flags);
1191 } 1191 }
1192 return EH_RESET_TIMER; 1192 return BLK_EH_RESET_TIMER;
1193} 1193}
1194 1194
1195/** 1195/**
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index c57c94c0ffd2..3b7240e40819 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
4170 ** 4170 **
4171 **---------------------------------------------------- 4171 **----------------------------------------------------
4172 */ 4172 */
4173 if (np->settle_time && cmd->timeout_per_command >= HZ) { 4173 if (np->settle_time && cmd->request->timeout >= HZ) {
4174 u_long tlimit = jiffies + cmd->timeout_per_command - HZ; 4174 u_long tlimit = jiffies + cmd->request->timeout - HZ;
4175 if (time_after(np->settle_time, tlimit)) 4175 if (time_after(np->settle_time, tlimit))
4176 np->settle_time = tlimit; 4176 np->settle_time = tlimit;
4177 } 4177 }
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 37f9ba0cd798..b6cd12b2e996 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2845 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2846 2846
2847 /* Set ISP command timeout. */ 2847 /* Set ISP command timeout. */
2848 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 2848 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2849 2849
2850 /* Set device target ID and LUN */ 2850 /* Set device target ID and LUN */
2851 pkt->lun = SCSI_LUN_32(cmd); 2851 pkt->lun = SCSI_LUN_32(cmd);
@@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3114 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3115 3115
3116 /* Set ISP command timeout. */ 3116 /* Set ISP command timeout. */
3117 pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ); 3117 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3118 3118
3119 /* Set device target ID and LUN */ 3119 /* Set device target ID and LUN */
3120 pkt->lun = SCSI_LUN_32(cmd); 3120 pkt->lun = SCSI_LUN_32(cmd);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 45e7dcb4b34d..0ddfe7106b3b 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -292,10 +292,11 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
292 valid = 0; 292 valid = 0;
293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 293 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
294 valid = 1; 294 valid = 1;
295 else if (start == (FA_BOOT_CODE_ADDR*4) || 295 else if (start == (ha->flt_region_boot * 4) ||
296 start == (FA_RISC_CODE_ADDR*4)) 296 start == (ha->flt_region_fw * 4))
297 valid = 1; 297 valid = 1;
298 else if (IS_QLA25XX(ha) && start == (FA_VPD_NVRAM_ADDR*4)) 298 else if (IS_QLA25XX(ha) &&
299 start == (ha->flt_region_vpd_nvram * 4))
299 valid = 1; 300 valid = 1;
300 if (!valid) { 301 if (!valid) {
301 qla_printk(KERN_WARNING, ha, 302 qla_printk(KERN_WARNING, ha,
@@ -1065,6 +1066,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1065 pfc_host_stat->dumped_frames = stats->dumped_frames; 1066 pfc_host_stat->dumped_frames = stats->dumped_frames;
1066 pfc_host_stat->nos_count = stats->nos_rcvd; 1067 pfc_host_stat->nos_count = stats->nos_rcvd;
1067 } 1068 }
1069 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1070 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1068 1071
1069done_free: 1072done_free:
1070 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1073 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 94a720eabfd8..83c819216771 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,7 +25,6 @@
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/aer.h> 26#include <linux/aer.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/semaphore.h>
29 28
30#include <scsi/scsi.h> 29#include <scsi/scsi.h>
31#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -2157,6 +2156,8 @@ struct qla_chip_state_84xx {
2157 2156
2158struct qla_statistics { 2157struct qla_statistics {
2159 uint32_t total_isp_aborts; 2158 uint32_t total_isp_aborts;
2159 uint64_t input_bytes;
2160 uint64_t output_bytes;
2160}; 2161};
2161 2162
2162/* 2163/*
@@ -2238,6 +2239,7 @@ typedef struct scsi_qla_host {
2238#define FCPORT_UPDATE_NEEDED 27 2239#define FCPORT_UPDATE_NEEDED 27
2239#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */ 2240#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2240#define UNLOADING 29 2241#define UNLOADING 29
2242#define NPIV_CONFIG_NEEDED 30
2241 2243
2242 uint32_t device_flags; 2244 uint32_t device_flags;
2243#define DFLG_LOCAL_DEVICES BIT_0 2245#define DFLG_LOCAL_DEVICES BIT_0
@@ -2507,7 +2509,6 @@ typedef struct scsi_qla_host {
2507 uint64_t fce_wr, fce_rd; 2509 uint64_t fce_wr, fce_rd;
2508 struct mutex fce_mutex; 2510 struct mutex fce_mutex;
2509 2511
2510 uint32_t hw_event_start;
2511 uint32_t hw_event_ptr; 2512 uint32_t hw_event_ptr;
2512 uint32_t hw_event_pause_errors; 2513 uint32_t hw_event_pause_errors;
2513 2514
@@ -2553,6 +2554,14 @@ typedef struct scsi_qla_host {
2553 uint32_t fdt_unprotect_sec_cmd; 2554 uint32_t fdt_unprotect_sec_cmd;
2554 uint32_t fdt_protect_sec_cmd; 2555 uint32_t fdt_protect_sec_cmd;
2555 2556
2557 uint32_t flt_region_flt;
2558 uint32_t flt_region_fdt;
2559 uint32_t flt_region_boot;
2560 uint32_t flt_region_fw;
2561 uint32_t flt_region_vpd_nvram;
2562 uint32_t flt_region_hw_event;
2563 uint32_t flt_region_npiv_conf;
2564
2556 /* Needed for BEACON */ 2565 /* Needed for BEACON */
2557 uint16_t beacon_blink_led; 2566 uint16_t beacon_blink_led;
2558 uint8_t beacon_color_state; 2567 uint8_t beacon_color_state;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index cf194517400d..d1d14202575a 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -789,14 +789,23 @@ struct device_reg_24xx {
789#define FA_RISC_CODE_ADDR 0x20000 789#define FA_RISC_CODE_ADDR 0x20000
790#define FA_RISC_CODE_SEGMENTS 2 790#define FA_RISC_CODE_SEGMENTS 2
791 791
792#define FA_FLASH_DESCR_ADDR_24 0x11000
793#define FA_FLASH_LAYOUT_ADDR_24 0x11400
794#define FA_NPIV_CONF0_ADDR_24 0x16000
795#define FA_NPIV_CONF1_ADDR_24 0x17000
796
792#define FA_FW_AREA_ADDR 0x40000 797#define FA_FW_AREA_ADDR 0x40000
793#define FA_VPD_NVRAM_ADDR 0x48000 798#define FA_VPD_NVRAM_ADDR 0x48000
794#define FA_FEATURE_ADDR 0x4C000 799#define FA_FEATURE_ADDR 0x4C000
795#define FA_FLASH_DESCR_ADDR 0x50000 800#define FA_FLASH_DESCR_ADDR 0x50000
801#define FA_FLASH_LAYOUT_ADDR 0x50400
796#define FA_HW_EVENT0_ADDR 0x54000 802#define FA_HW_EVENT0_ADDR 0x54000
797#define FA_HW_EVENT1_ADDR 0x54200 803#define FA_HW_EVENT1_ADDR 0x54400
798#define FA_HW_EVENT_SIZE 0x200 804#define FA_HW_EVENT_SIZE 0x200
799#define FA_HW_EVENT_ENTRY_SIZE 4 805#define FA_HW_EVENT_ENTRY_SIZE 4
806#define FA_NPIV_CONF0_ADDR 0x5C000
807#define FA_NPIV_CONF1_ADDR 0x5D000
808
800/* 809/*
801 * Flash Error Log Event Codes. 810 * Flash Error Log Event Codes.
802 */ 811 */
@@ -806,10 +815,6 @@ struct device_reg_24xx {
806#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 815#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
807#define HW_EVENT_FLASH_FW_ERR 0xF024 816#define HW_EVENT_FLASH_FW_ERR 0xF024
808 817
809#define FA_BOOT_LOG_ADDR 0x58000
810#define FA_FW_DUMP0_ADDR 0x60000
811#define FA_FW_DUMP1_ADDR 0x70000
812
813 uint32_t flash_data; /* Flash/NVRAM BIOS data. */ 818 uint32_t flash_data; /* Flash/NVRAM BIOS data. */
814 819
815 uint32_t ctrl_status; /* Control/Status. */ 820 uint32_t ctrl_status; /* Control/Status. */
@@ -1203,6 +1208,62 @@ struct qla_fdt_layout {
1203 uint8_t unused2[65]; 1208 uint8_t unused2[65];
1204}; 1209};
1205 1210
1211/* Flash Layout Table ********************************************************/
1212
1213struct qla_flt_location {
1214 uint8_t sig[4];
1215 uint32_t start_lo;
1216 uint32_t start_hi;
1217 uint16_t unused;
1218 uint16_t checksum;
1219};
1220
1221struct qla_flt_header {
1222 uint16_t version;
1223 uint16_t length;
1224 uint16_t checksum;
1225 uint16_t unused;
1226};
1227
1228#define FLT_REG_FW 0x01
1229#define FLT_REG_BOOT_CODE 0x07
1230#define FLT_REG_VPD_0 0x14
1231#define FLT_REG_NVRAM_0 0x15
1232#define FLT_REG_VPD_1 0x16
1233#define FLT_REG_NVRAM_1 0x17
1234#define FLT_REG_FDT 0x1a
1235#define FLT_REG_FLT 0x1c
1236#define FLT_REG_HW_EVENT_0 0x1d
1237#define FLT_REG_HW_EVENT_1 0x1f
1238#define FLT_REG_NPIV_CONF_0 0x29
1239#define FLT_REG_NPIV_CONF_1 0x2a
1240
1241struct qla_flt_region {
1242 uint32_t code;
1243 uint32_t size;
1244 uint32_t start;
1245 uint32_t end;
1246};
1247
1248/* Flash NPIV Configuration Table ********************************************/
1249
1250struct qla_npiv_header {
1251 uint8_t sig[2];
1252 uint16_t version;
1253 uint16_t entries;
1254 uint16_t unused[4];
1255 uint16_t checksum;
1256};
1257
1258struct qla_npiv_entry {
1259 uint16_t flags;
1260 uint16_t vf_id;
1261 uint16_t qos;
1262 uint16_t unused1;
1263 uint8_t port_name[WWN_SIZE];
1264 uint8_t node_name[WWN_SIZE];
1265};
1266
1206/* 84XX Support **************************************************************/ 1267/* 84XX Support **************************************************************/
1207 1268
1208#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */ 1269#define MBA_ISP84XX_ALERT 0x800f /* Alert Notification. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b156735e9a6..753dbe6cce6e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -313,9 +313,11 @@ extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t, 313extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
314 uint16_t, uint16_t); 314 uint16_t, uint16_t);
315 315
316extern void qla2xxx_get_flash_info(scsi_qla_host_t *); 316extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); 317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
318 318
319extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
320
319/* 321/*
320 * Global Function Prototypes in qla_dbg.c source file. 322 * Global Function Prototypes in qla_dbg.c source file.
321 */ 323 */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ee89ddd64aae..a470f2d3270d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -83,6 +83,13 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
83 83
84 ha->isp_ops->reset_chip(ha); 84 ha->isp_ops->reset_chip(ha);
85 85
86 rval = qla2xxx_get_flash_info(ha);
87 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no));
90 return (rval);
91 }
92
86 ha->isp_ops->get_flash_version(ha, ha->request_ring); 93 ha->isp_ops->get_flash_version(ha, ha->request_ring);
87 94
88 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
@@ -109,7 +116,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
109 rval = qla2x00_setup_chip(ha); 116 rval = qla2x00_setup_chip(ha);
110 if (rval) 117 if (rval)
111 return (rval); 118 return (rval);
112 qla2xxx_get_flash_info(ha);
113 } 119 }
114 if (IS_QLA84XX(ha)) { 120 if (IS_QLA84XX(ha)) {
115 ha->cs84xx = qla84xx_get_chip(ha); 121 ha->cs84xx = qla84xx_get_chip(ha);
@@ -2016,7 +2022,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2016 DEBUG3(printk("%s: exiting normally\n", __func__)); 2022 DEBUG3(printk("%s: exiting normally\n", __func__));
2017 } 2023 }
2018 2024
2019 /* Restore state if a resync event occured during processing */ 2025 /* Restore state if a resync event occurred during processing */
2020 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2026 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
2021 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2027 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2022 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2028 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
@@ -2561,7 +2567,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2561 rval = QLA_SUCCESS; 2567 rval = QLA_SUCCESS;
2562 2568
2563 /* Try GID_PT to get device list, else GAN. */ 2569 /* Try GID_PT to get device list, else GAN. */
2564 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_ATOMIC); 2570 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
2565 if (!swl) { 2571 if (!swl) {
2566 /*EMPTY*/ 2572 /*EMPTY*/
2567 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2573 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
@@ -3751,7 +3757,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3751 rval = QLA_SUCCESS; 3757 rval = QLA_SUCCESS;
3752 3758
3753 segments = FA_RISC_CODE_SEGMENTS; 3759 segments = FA_RISC_CODE_SEGMENTS;
3754 faddr = FA_RISC_CODE_ADDR; 3760 faddr = ha->flt_region_fw;
3755 dcode = (uint32_t *)ha->request_ring; 3761 dcode = (uint32_t *)ha->request_ring;
3756 *srisc_addr = 0; 3762 *srisc_addr = 0;
3757 3763
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 92fafbdbbaab..e90afad120ee 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -52,7 +52,7 @@ to_qla_parent(scsi_qla_host_t *ha)
52 * @ha: HA context 52 * @ha: HA context
53 * @ha_locked: is function called with the hardware lock 53 * @ha_locked: is function called with the hardware lock
54 * 54 *
55 * Returns non-zero if a failure occured, else zero. 55 * Returns non-zero if a failure occurred, else zero.
56 */ 56 */
57static inline int 57static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) 58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d57669aa4615..85bc0a48598b 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -21,17 +21,22 @@ static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
21 * Returns the proper CF_* direction based on CDB. 21 * Returns the proper CF_* direction based on CDB.
22 */ 22 */
23static inline uint16_t 23static inline uint16_t
24qla2x00_get_cmd_direction(struct scsi_cmnd *cmd) 24qla2x00_get_cmd_direction(srb_t *sp)
25{ 25{
26 uint16_t cflags; 26 uint16_t cflags;
27 27
28 cflags = 0; 28 cflags = 0;
29 29
30 /* Set transfer direction */ 30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) 31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 32 cflags = CF_WRITE;
33 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 33 sp->fcport->ha->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
34 cflags = CF_READ; 36 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd);
39 }
35 return (cflags); 40 return (cflags);
36} 41}
37 42
@@ -169,7 +174,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
169 174
170 ha = sp->ha; 175 ha = sp->ha;
171 176
172 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
173 178
174 /* Three DSDs are available in the Command Type 2 IOCB */ 179 /* Three DSDs are available in the Command Type 2 IOCB */
175 avail_dsds = 3; 180 avail_dsds = 3;
@@ -228,7 +233,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
228 233
229 ha = sp->ha; 234 ha = sp->ha;
230 235
231 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd)); 236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
232 237
233 /* Two DSDs are available in the Command Type 3 IOCB */ 238 /* Two DSDs are available in the Command Type 3 IOCB */
234 avail_dsds = 2; 239 avail_dsds = 2;
@@ -262,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
262 * qla2x00_start_scsi() - Send a SCSI command to the ISP 267 * qla2x00_start_scsi() - Send a SCSI command to the ISP
263 * @sp: command to send to the ISP 268 * @sp: command to send to the ISP
264 * 269 *
265 * Returns non-zero if a failure occured, else zero. 270 * Returns non-zero if a failure occurred, else zero.
266 */ 271 */
267int 272int
268qla2x00_start_scsi(srb_t *sp) 273qla2x00_start_scsi(srb_t *sp)
@@ -407,7 +412,7 @@ queuing_error:
407 * 412 *
408 * Can be called from both normal and interrupt context. 413 * Can be called from both normal and interrupt context.
409 * 414 *
410 * Returns non-zero if a failure occured, else zero. 415 * Returns non-zero if a failure occurred, else zero.
411 */ 416 */
412int 417int
413__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
@@ -625,12 +630,17 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
625 ha = sp->ha; 630 ha = sp->ha;
626 631
627 /* Set transfer direction */ 632 /* Set transfer direction */
628 if (cmd->sc_data_direction == DMA_TO_DEVICE) 633 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
629 cmd_pkt->task_mgmt_flags = 634 cmd_pkt->task_mgmt_flags =
630 __constant_cpu_to_le16(TMF_WRITE_DATA); 635 __constant_cpu_to_le16(TMF_WRITE_DATA);
631 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 636 sp->fcport->ha->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
632 cmd_pkt->task_mgmt_flags = 639 cmd_pkt->task_mgmt_flags =
633 __constant_cpu_to_le16(TMF_READ_DATA); 640 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd);
643 }
634 644
635 /* One DSD is available in the Command Type 3 IOCB */ 645 /* One DSD is available in the Command Type 3 IOCB */
636 avail_dsds = 1; 646 avail_dsds = 1;
@@ -666,7 +676,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
666 * qla24xx_start_scsi() - Send a SCSI command to the ISP 676 * qla24xx_start_scsi() - Send a SCSI command to the ISP
667 * @sp: command to send to the ISP 677 * @sp: command to send to the ISP
668 * 678 *
669 * Returns non-zero if a failure occured, else zero. 679 * Returns non-zero if a failure occurred, else zero.
670 */ 680 */
671int 681int
672qla24xx_start_scsi(srb_t *sp) 682qla24xx_start_scsi(srb_t *sp)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index bf41887cdd65..fc4bfa7f839c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -391,9 +391,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
391 break; 391 break;
392 392
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, 394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no,
395 mb[1])); 395 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); 396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 397
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 399 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -460,7 +460,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 461 ha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 462 qla_printk(KERN_INFO, ha,
463 "LIP reset occured (%x).\n", mb[1]); 463 "LIP reset occurred (%x).\n", mb[1]);
464 464
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 466 atomic_set(&ha->loop_state, LOOP_DOWN);
@@ -543,7 +543,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
543 543
544 case MBA_PORT_UPDATE: /* Port database update */ 544 case MBA_PORT_UPDATE: /* Port database update */
545 /* 545 /*
546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process 547 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 548 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 549 */
@@ -589,7 +589,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 590 ha->host_no, mb[1], mb[2], mb[3]));
591 591
592 rscn_entry = (mb[1] << 16) | mb[2]; 592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
594 ha->d_id.b.al_pa; 594 ha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 595 if (rscn_entry == host_pid) {
@@ -600,6 +600,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
600 break; 600 break;
601 } 601 }
602 602
603 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
603 rscn_queue_index = ha->rscn_in_ptr + 1; 605 rscn_queue_index = ha->rscn_in_ptr + 1;
604 if (rscn_queue_index == MAX_RSCN_COUNT) 606 if (rscn_queue_index == MAX_RSCN_COUNT)
605 rscn_queue_index = 0; 607 rscn_queue_index = 0;
@@ -1060,8 +1062,9 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1060 resid = resid_len; 1062 resid = resid_len;
1061 /* Use F/W calculated residual length. */ 1063 /* Use F/W calculated residual length. */
1062 if (IS_FWI2_CAPABLE(ha)) { 1064 if (IS_FWI2_CAPABLE(ha)) {
1063 if (scsi_status & SS_RESIDUAL_UNDER && 1065 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1064 resid != fw_resid_len) { 1066 lscsi_status = 0;
1067 } else if (resid != fw_resid_len) {
1065 scsi_status &= ~SS_RESIDUAL_UNDER; 1068 scsi_status &= ~SS_RESIDUAL_UNDER;
1066 lscsi_status = 0; 1069 lscsi_status = 0;
1067 } 1070 }
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 813bc7784c0a..36bc6851e23d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -233,7 +233,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
233 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 233 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
234 "isp_abort_needed.\n", __func__, ha->host_no)); 234 "isp_abort_needed.\n", __func__, ha->host_no));
235 qla_printk(KERN_WARNING, ha, 235 qla_printk(KERN_WARNING, ha,
236 "Mailbox command timeout occured. Scheduling ISP " 236 "Mailbox command timeout occurred. Scheduling ISP "
237 "abort.\n"); 237 "abort.\n");
238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
239 qla2xxx_wake_dpc(ha); 239 qla2xxx_wake_dpc(ha);
@@ -244,7 +244,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
244 DEBUG2_3_11(printk("%s(%ld): timeout calling " 244 DEBUG2_3_11(printk("%s(%ld): timeout calling "
245 "abort_isp\n", __func__, ha->host_no)); 245 "abort_isp\n", __func__, ha->host_no));
246 qla_printk(KERN_WARNING, ha, 246 qla_printk(KERN_WARNING, ha,
247 "Mailbox command timeout occured. Issuing ISP " 247 "Mailbox command timeout occurred. Issuing ISP "
248 "abort.\n"); 248 "abort.\n");
249 249
250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
@@ -1995,7 +1995,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
1995 char *pmap; 1995 char *pmap;
1996 dma_addr_t pmap_dma; 1996 dma_addr_t pmap_dma;
1997 1997
1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_ATOMIC, &pmap_dma); 1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
1999 if (pmap == NULL) { 1999 if (pmap == NULL) {
2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
2001 __func__, ha->host_no)); 2001 __func__, ha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6d0f0e5f2827..3433441b956a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1517,6 +1517,7 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
1517 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1517 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
1518 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1518 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
1519 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1519 set_bit(RSCN_UPDATE, &ha->dpc_flags);
1520 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
1520} 1521}
1521 1522
1522static int 1523static int
@@ -1663,8 +1664,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1663 ha->gid_list_info_size = 8; 1664 ha->gid_list_info_size = 8;
1664 ha->optrom_size = OPTROM_SIZE_25XX; 1665 ha->optrom_size = OPTROM_SIZE_25XX;
1665 ha->isp_ops = &qla25xx_isp_ops; 1666 ha->isp_ops = &qla25xx_isp_ops;
1666 ha->hw_event_start = PCI_FUNC(pdev->devfn) ?
1667 FA_HW_EVENT1_ADDR: FA_HW_EVENT0_ADDR;
1668 } 1667 }
1669 host->can_queue = ha->request_q_length + 128; 1668 host->can_queue = ha->request_q_length + 128;
1670 1669
@@ -2433,6 +2432,12 @@ qla2x00_do_dpc(void *data)
2433 ha->host_no)); 2432 ha->host_no));
2434 } 2433 }
2435 2434
2435 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) &&
2436 atomic_read(&ha->loop_state) == LOOP_READY) {
2437 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags);
2438 qla2xxx_flash_npiv_conf(ha);
2439 }
2440
2436 if (!ha->interrupts_on) 2441 if (!ha->interrupts_on)
2437 ha->isp_ops->enable_intrs(ha); 2442 ha->isp_ops->enable_intrs(ha);
2438 2443
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1bca74474935..90a13211717f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -543,23 +543,198 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
543 } 543 }
544} 544}
545 545
546void 546static int
547qla2xxx_get_flash_info(scsi_qla_host_t *ha) 547qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
548{
549 const char *loc, *locations[] = { "DEF", "PCI" };
550 uint32_t pcihdr, pcids;
551 uint32_t *dcode;
552 uint8_t *buf, *bcode, last_image;
553 uint16_t cnt, chksum, *wptr;
554 struct qla_flt_location *fltl;
555
556 /*
557 * FLT-location structure resides after the last PCI region.
558 */
559
560 /* Begin with sane defaults. */
561 loc = locations[0];
562 *start = IS_QLA24XX_TYPE(ha) ? FA_FLASH_LAYOUT_ADDR_24:
563 FA_FLASH_LAYOUT_ADDR;
564
565 /* Begin with first PCI expansion ROM header. */
566 buf = (uint8_t *)ha->request_ring;
567 dcode = (uint32_t *)ha->request_ring;
568 pcihdr = 0;
569 last_image = 1;
570 do {
571 /* Verify PCI expansion ROM header. */
572 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20);
573 bcode = buf + (pcihdr % 4);
574 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
575 goto end;
576
577 /* Locate PCI data structure. */
578 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
579 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20);
580 bcode = buf + (pcihdr % 4);
581
582 /* Validate signature of PCI data structure. */
583 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
584 bcode[0x2] != 'I' || bcode[0x3] != 'R')
585 goto end;
586
587 last_image = bcode[0x15] & BIT_7;
588
589 /* Locate next PCI expansion ROM. */
590 pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
591 } while (!last_image);
592
593 /* Now verify FLT-location structure. */
594 fltl = (struct qla_flt_location *)ha->request_ring;
595 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2,
596 sizeof(struct qla_flt_location) >> 2);
597 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
598 fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
599 goto end;
600
601 wptr = (uint16_t *)ha->request_ring;
602 cnt = sizeof(struct qla_flt_location) >> 1;
603 for (chksum = 0; cnt; cnt--)
604 chksum += le16_to_cpu(*wptr++);
605 if (chksum) {
606 qla_printk(KERN_ERR, ha,
607 "Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
608 qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
609 return QLA_FUNCTION_FAILED;
610 }
611
612 /* Good data. Use specified location. */
613 loc = locations[1];
614 *start = le16_to_cpu(fltl->start_hi) << 16 |
615 le16_to_cpu(fltl->start_lo);
616end:
617 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
618 return QLA_SUCCESS;
619}
620
621static void
622qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
623{
624 const char *loc, *locations[] = { "DEF", "FLT" };
625 uint16_t *wptr;
626 uint16_t cnt, chksum;
627 uint32_t start;
628 struct qla_flt_header *flt;
629 struct qla_flt_region *region;
630
631 ha->flt_region_flt = flt_addr;
632 wptr = (uint16_t *)ha->request_ring;
633 flt = (struct qla_flt_header *)ha->request_ring;
634 region = (struct qla_flt_region *)&flt[1];
635 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
636 flt_addr << 2, OPTROM_BURST_SIZE);
637 if (*wptr == __constant_cpu_to_le16(0xffff))
638 goto no_flash_data;
639 if (flt->version != __constant_cpu_to_le16(1)) {
640 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
641 "version=0x%x length=0x%x checksum=0x%x.\n",
642 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
643 le16_to_cpu(flt->checksum)));
644 goto no_flash_data;
645 }
646
647 cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
648 for (chksum = 0; cnt; cnt--)
649 chksum += le16_to_cpu(*wptr++);
650 if (chksum) {
651 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
652 "version=0x%x length=0x%x checksum=0x%x.\n",
653 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
654 chksum));
655 goto no_flash_data;
656 }
657
658 loc = locations[1];
659 cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
660 for ( ; cnt; cnt--, region++) {
661 /* Store addresses as DWORD offsets. */
662 start = le32_to_cpu(region->start) >> 2;
663
664 DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
665 "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
666 le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
667
668 switch (le32_to_cpu(region->code)) {
669 case FLT_REG_FW:
670 ha->flt_region_fw = start;
671 break;
672 case FLT_REG_BOOT_CODE:
673 ha->flt_region_boot = start;
674 break;
675 case FLT_REG_VPD_0:
676 ha->flt_region_vpd_nvram = start;
677 break;
678 case FLT_REG_FDT:
679 ha->flt_region_fdt = start;
680 break;
681 case FLT_REG_HW_EVENT_0:
682 if (!PCI_FUNC(ha->pdev->devfn))
683 ha->flt_region_hw_event = start;
684 break;
685 case FLT_REG_HW_EVENT_1:
686 if (PCI_FUNC(ha->pdev->devfn))
687 ha->flt_region_hw_event = start;
688 break;
689 case FLT_REG_NPIV_CONF_0:
690 if (!PCI_FUNC(ha->pdev->devfn))
691 ha->flt_region_npiv_conf = start;
692 break;
693 case FLT_REG_NPIV_CONF_1:
694 if (PCI_FUNC(ha->pdev->devfn))
695 ha->flt_region_npiv_conf = start;
696 break;
697 }
698 }
699 goto done;
700
701no_flash_data:
702 /* Use hardcoded defaults. */
703 loc = locations[0];
704 ha->flt_region_fw = FA_RISC_CODE_ADDR;
705 ha->flt_region_boot = FA_BOOT_CODE_ADDR;
706 ha->flt_region_vpd_nvram = FA_VPD_NVRAM_ADDR;
707 ha->flt_region_fdt = IS_QLA24XX_TYPE(ha) ? FA_FLASH_DESCR_ADDR_24:
708 FA_FLASH_DESCR_ADDR;
709 ha->flt_region_hw_event = !PCI_FUNC(ha->pdev->devfn) ?
710 FA_HW_EVENT0_ADDR: FA_HW_EVENT1_ADDR;
711 ha->flt_region_npiv_conf = !PCI_FUNC(ha->pdev->devfn) ?
712 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF0_ADDR_24: FA_NPIV_CONF0_ADDR):
713 (IS_QLA24XX_TYPE(ha) ? FA_NPIV_CONF1_ADDR_24: FA_NPIV_CONF1_ADDR);
714done:
715 DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
716 "vpd_nvram=0x%x fdt=0x%x flt=0x%x hwe=0x%x npiv=0x%x.\n", loc,
717 ha->flt_region_boot, ha->flt_region_fw, ha->flt_region_vpd_nvram,
718 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_hw_event,
719 ha->flt_region_npiv_conf));
720}
721
722static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
548{ 724{
549#define FLASH_BLK_SIZE_32K 0x8000 725#define FLASH_BLK_SIZE_32K 0x8000
550#define FLASH_BLK_SIZE_64K 0x10000 726#define FLASH_BLK_SIZE_64K 0x10000
727 const char *loc, *locations[] = { "MID", "FDT" };
551 uint16_t cnt, chksum; 728 uint16_t cnt, chksum;
552 uint16_t *wptr; 729 uint16_t *wptr;
553 struct qla_fdt_layout *fdt; 730 struct qla_fdt_layout *fdt;
554 uint8_t man_id, flash_id; 731 uint8_t man_id, flash_id;
555 732 uint16_t mid, fid;
556 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
557 return;
558 733
559 wptr = (uint16_t *)ha->request_ring; 734 wptr = (uint16_t *)ha->request_ring;
560 fdt = (struct qla_fdt_layout *)ha->request_ring; 735 fdt = (struct qla_fdt_layout *)ha->request_ring;
561 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 736 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring,
562 FA_FLASH_DESCR_ADDR << 2, OPTROM_BURST_SIZE); 737 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
563 if (*wptr == __constant_cpu_to_le16(0xffff)) 738 if (*wptr == __constant_cpu_to_le16(0xffff))
564 goto no_flash_data; 739 goto no_flash_data;
565 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || 740 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
@@ -577,7 +752,10 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
577 goto no_flash_data; 752 goto no_flash_data;
578 } 753 }
579 754
580 ha->fdt_odd_index = le16_to_cpu(fdt->man_id) == 0x1f; 755 loc = locations[1];
756 mid = le16_to_cpu(fdt->man_id);
757 fid = le16_to_cpu(fdt->id);
758 ha->fdt_odd_index = mid == 0x1f;
581 ha->fdt_wrt_disable = fdt->wrt_disable_bits; 759 ha->fdt_wrt_disable = fdt->wrt_disable_bits;
582 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd); 760 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x0300 | fdt->erase_cmd);
583 ha->fdt_block_size = le32_to_cpu(fdt->block_size); 761 ha->fdt_block_size = le32_to_cpu(fdt->block_size);
@@ -588,16 +766,12 @@ qla2xxx_get_flash_info(scsi_qla_host_t *ha)
588 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd): 766 flash_conf_to_access_addr(0x0300 | fdt->protect_sec_cmd):
589 flash_conf_to_access_addr(0x0336); 767 flash_conf_to_access_addr(0x0336);
590 } 768 }
591 769 goto done;
592 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[FDT]: (0x%x/0x%x) erase=0x%x "
593 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n",
594 le16_to_cpu(fdt->man_id), le16_to_cpu(fdt->id), ha->fdt_erase_cmd,
595 ha->fdt_protect_sec_cmd, ha->fdt_unprotect_sec_cmd,
596 ha->fdt_odd_index, ha->fdt_wrt_disable, ha->fdt_block_size));
597 return;
598
599no_flash_data: 770no_flash_data:
771 loc = locations[0];
600 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); 772 qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
773 mid = man_id;
774 fid = flash_id;
601 ha->fdt_wrt_disable = 0x9c; 775 ha->fdt_wrt_disable = 0x9c;
602 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8); 776 ha->fdt_erase_cmd = flash_conf_to_access_addr(0x03d8);
603 switch (man_id) { 777 switch (man_id) {
@@ -625,14 +799,117 @@ no_flash_data:
625 ha->fdt_block_size = FLASH_BLK_SIZE_64K; 799 ha->fdt_block_size = FLASH_BLK_SIZE_64K;
626 break; 800 break;
627 } 801 }
628 802done:
629 DEBUG2(qla_printk(KERN_DEBUG, ha, "Flash[MID]: (0x%x/0x%x) erase=0x%x " 803 DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
630 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", man_id, flash_id, 804 "pro=%x upro=%x idx=%d wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
631 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd, 805 ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
632 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable, 806 ha->fdt_unprotect_sec_cmd, ha->fdt_odd_index, ha->fdt_wrt_disable,
633 ha->fdt_block_size)); 807 ha->fdt_block_size));
634} 808}
635 809
810int
811qla2xxx_get_flash_info(scsi_qla_host_t *ha)
812{
813 int ret;
814 uint32_t flt_addr;
815
816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
817 return QLA_SUCCESS;
818
819 ret = qla2xxx_find_flt_start(ha, &flt_addr);
820 if (ret != QLA_SUCCESS)
821 return ret;
822
823 qla2xxx_get_flt_info(ha, flt_addr);
824 qla2xxx_get_fdt_info(ha);
825
826 return QLA_SUCCESS;
827}
828
829void
830qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
831{
832#define NPIV_CONFIG_SIZE (16*1024)
833 void *data;
834 uint16_t *wptr;
835 uint16_t cnt, chksum;
836 struct qla_npiv_header hdr;
837 struct qla_npiv_entry *entry;
838
839 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
840 return;
841
842 ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr,
843 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
844 if (hdr.version == __constant_cpu_to_le16(0xffff))
845 return;
846 if (hdr.version != __constant_cpu_to_le16(1)) {
847 DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
848 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
849 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
850 le16_to_cpu(hdr.checksum)));
851 return;
852 }
853
854 data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
855 if (!data) {
856 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
857 "allocate memory.\n"));
858 return;
859 }
860
861 ha->isp_ops->read_optrom(ha, (uint8_t *)data,
862 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
863
864 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
865 sizeof(struct qla_npiv_entry)) >> 1;
866 for (wptr = data, chksum = 0; cnt; cnt--)
867 chksum += le16_to_cpu(*wptr++);
868 if (chksum) {
869 DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
870 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
871 le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
872 chksum));
873 goto done;
874 }
875
876 entry = data + sizeof(struct qla_npiv_header);
877 cnt = le16_to_cpu(hdr.entries);
878 for ( ; cnt; cnt--, entry++) {
879 uint16_t flags;
880 struct fc_vport_identifiers vid;
881 struct fc_vport *vport;
882
883 flags = le16_to_cpu(entry->flags);
884 if (flags == 0xffff)
885 continue;
886 if ((flags & BIT_0) == 0)
887 continue;
888
889 memset(&vid, 0, sizeof(vid));
890 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
891 vid.vport_type = FC_PORTTYPE_NPIV;
892 vid.disable = false;
893 vid.port_name = wwn_to_u64(entry->port_name);
894 vid.node_name = wwn_to_u64(entry->node_name);
895
896 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
897 "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt,
898 (unsigned long long)vid.port_name,
899 (unsigned long long)vid.node_name,
900 le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos)));
901
902 vport = fc_vport_create(ha->host, 0, &vid);
903 if (!vport)
904 qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to "
905 "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt,
906 (unsigned long long)vid.port_name,
907 (unsigned long long)vid.node_name);
908 }
909done:
910 kfree(data);
911}
912
636static void 913static void
637qla24xx_unprotect_flash(scsi_qla_host_t *ha) 914qla24xx_unprotect_flash(scsi_qla_host_t *ha)
638{ 915{
@@ -920,7 +1197,8 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
920 dwptr = (uint32_t *)buf; 1197 dwptr = (uint32_t *)buf;
921 for (i = 0; i < bytes >> 2; i++, naddr++) 1198 for (i = 0; i < bytes >> 2; i++, naddr++)
922 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1199 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
923 flash_data_to_access_addr(FA_VPD_NVRAM_ADDR | naddr))); 1200 flash_data_to_access_addr(ha->flt_region_vpd_nvram |
1201 naddr)));
924 1202
925 return buf; 1203 return buf;
926} 1204}
@@ -935,10 +1213,10 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
935 dbuf = vmalloc(RMW_BUFFER_SIZE); 1213 dbuf = vmalloc(RMW_BUFFER_SIZE);
936 if (!dbuf) 1214 if (!dbuf)
937 return QLA_MEMORY_ALLOC_FAILED; 1215 return QLA_MEMORY_ALLOC_FAILED;
938 ha->isp_ops->read_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1216 ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
939 RMW_BUFFER_SIZE); 1217 RMW_BUFFER_SIZE);
940 memcpy(dbuf + (naddr << 2), buf, bytes); 1218 memcpy(dbuf + (naddr << 2), buf, bytes);
941 ha->isp_ops->write_optrom(ha, dbuf, FA_VPD_NVRAM_ADDR << 2, 1219 ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2,
942 RMW_BUFFER_SIZE); 1220 RMW_BUFFER_SIZE);
943 vfree(dbuf); 1221 vfree(dbuf);
944 1222
@@ -2166,7 +2444,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2166 memset(dbyte, 0, 8); 2444 memset(dbyte, 0, 8);
2167 dcode = (uint16_t *)dbyte; 2445 dcode = (uint16_t *)dbyte;
2168 2446
2169 qla2x00_read_flash_data(ha, dbyte, FA_RISC_CODE_ADDR * 4 + 10, 2447 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2170 8); 2448 8);
2171 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", 2449 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n",
2172 __func__, ha->host_no)); 2450 __func__, ha->host_no));
@@ -2177,7 +2455,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2177 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2455 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2178 dcode[3] == 0)) { 2456 dcode[3] == 0)) {
2179 DEBUG2(printk("%s(): Unrecognized fw revision at " 2457 DEBUG2(printk("%s(): Unrecognized fw revision at "
2180 "%x.\n", __func__, FA_RISC_CODE_ADDR * 4)); 2458 "%x.\n", __func__, ha->flt_region_fw * 4));
2181 } else { 2459 } else {
2182 /* values are in big endian */ 2460 /* values are in big endian */
2183 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2461 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2212,7 +2490,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2212 dcode = mbuf; 2490 dcode = mbuf;
2213 2491
2214 /* Begin with first PCI expansion ROM header. */ 2492 /* Begin with first PCI expansion ROM header. */
2215 pcihdr = 0; 2493 pcihdr = ha->flt_region_boot;
2216 last_image = 1; 2494 last_image = 1;
2217 do { 2495 do {
2218 /* Verify PCI expansion ROM header. */ 2496 /* Verify PCI expansion ROM header. */
@@ -2282,7 +2560,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2282 memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); 2560 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2283 dcode = mbuf; 2561 dcode = mbuf;
2284 2562
2285 qla24xx_read_flash_data(ha, dcode, FA_RISC_CODE_ADDR + 4, 4); 2563 qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4);
2286 for (i = 0; i < 4; i++) 2564 for (i = 0; i < 4; i++)
2287 dcode[i] = be32_to_cpu(dcode[i]); 2565 dcode[i] = be32_to_cpu(dcode[i]);
2288 2566
@@ -2291,7 +2569,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2291 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2569 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2292 dcode[3] == 0)) { 2570 dcode[3] == 0)) {
2293 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", 2571 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n",
2294 __func__, FA_RISC_CODE_ADDR)); 2572 __func__, ha->flt_region_fw));
2295 } else { 2573 } else {
2296 ha->fw_revision[0] = dcode[0]; 2574 ha->fw_revision[0] = dcode[0];
2297 ha->fw_revision[1] = dcode[1]; 2575 ha->fw_revision[1] = dcode[1];
@@ -2355,7 +2633,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2355 /* Locate first empty entry. */ 2633 /* Locate first empty entry. */
2356 for (;;) { 2634 for (;;) {
2357 if (ha->hw_event_ptr >= 2635 if (ha->hw_event_ptr >=
2358 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2636 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2359 DEBUG2(qla_printk(KERN_WARNING, ha, 2637 DEBUG2(qla_printk(KERN_WARNING, ha,
2360 "HW event -- Log Full!\n")); 2638 "HW event -- Log Full!\n"));
2361 return QLA_MEMORY_ALLOC_FAILED; 2639 return QLA_MEMORY_ALLOC_FAILED;
@@ -2391,7 +2669,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2391 int rval; 2669 int rval;
2392 uint32_t marker[2], fdata[4]; 2670 uint32_t marker[2], fdata[4];
2393 2671
2394 if (ha->hw_event_start == 0) 2672 if (ha->flt_region_hw_event == 0)
2395 return QLA_FUNCTION_FAILED; 2673 return QLA_FUNCTION_FAILED;
2396 2674
2397 DEBUG2(qla_printk(KERN_WARNING, ha, 2675 DEBUG2(qla_printk(KERN_WARNING, ha,
@@ -2406,7 +2684,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2406 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER); 2684 QLA_DRIVER_PATCH_VER, QLA_DRIVER_BETA_VER);
2407 2685
2408 /* Locate marker. */ 2686 /* Locate marker. */
2409 ha->hw_event_ptr = ha->hw_event_start; 2687 ha->hw_event_ptr = ha->flt_region_hw_event;
2410 for (;;) { 2688 for (;;) {
2411 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, 2689 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr,
2412 4); 2690 4);
@@ -2415,7 +2693,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2415 break; 2693 break;
2416 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; 2694 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2417 if (ha->hw_event_ptr >= 2695 if (ha->hw_event_ptr >=
2418 ha->hw_event_start + FA_HW_EVENT_SIZE) { 2696 ha->flt_region_hw_event + FA_HW_EVENT_SIZE) {
2419 DEBUG2(qla_printk(KERN_WARNING, ha, 2697 DEBUG2(qla_printk(KERN_WARNING, ha,
2420 "HW event -- Log Full!\n")); 2698 "HW event -- Log Full!\n"));
2421 return QLA_MEMORY_ALLOC_FAILED; 2699 return QLA_MEMORY_ALLOC_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4160e4caa7b9..be5e299df528 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k7" 10#define QLA2XXX_VERSION "8.02.01-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 88bebb13bc52..de8279ad7d89 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
1542 DEBUG2(printk(KERN_INFO 1542 DEBUG2(printk(KERN_INFO
1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," 1543 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, 1544 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
1545 cmd, jiffies, cmd->timeout_per_command / HZ, 1545 cmd, jiffies, cmd->request->timeout / HZ,
1546 ha->dpc_flags, cmd->result, cmd->allowed)); 1546 ha->dpc_flags, cmd->result, cmd->allowed));
1547 1547
1548 /* FIXME: wait for hba to go online */ 1548 /* FIXME: wait for hba to go online */
@@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
1598 DEBUG2(printk(KERN_INFO 1598 DEBUG2(printk(KERN_INFO
1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " 1599 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", 1600 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
1601 ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ, 1601 ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
1602 ha->dpc_flags, cmd->result, cmd->allowed)); 1602 ha->dpc_flags, cmd->result, cmd->allowed));
1603 1603
1604 stat = qla4xxx_reset_target(ha, ddb_entry); 1604 stat = qla4xxx_reset_target(ha, ddb_entry);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index ee6be596503d..2ac3cb2b9081 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
291 unsigned long flags; 291 unsigned long flags;
292 292
293 cmd->device = dev; 293 cmd->device = dev;
294 init_timer(&cmd->eh_timeout);
295 INIT_LIST_HEAD(&cmd->list); 294 INIT_LIST_HEAD(&cmd->list);
296 spin_lock_irqsave(&dev->list_lock, flags); 295 spin_lock_irqsave(&dev->list_lock, flags);
297 list_add_tail(&cmd->list, &dev->cmd_list); 296 list_add_tail(&cmd->list, &dev->cmd_list);
@@ -652,26 +651,33 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
652 unsigned long timeout; 651 unsigned long timeout;
653 int rtn = 0; 652 int rtn = 0;
654 653
654 /*
655 * We will use a queued command if possible, otherwise we will
656 * emulate the queuing and calling of completion function ourselves.
657 */
658 atomic_inc(&cmd->device->iorequest_cnt);
659
655 /* check if the device is still usable */ 660 /* check if the device is still usable */
656 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 661 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
657 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 662 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
658 * returns an immediate error upwards, and signals 663 * returns an immediate error upwards, and signals
659 * that the device is no longer present */ 664 * that the device is no longer present */
660 cmd->result = DID_NO_CONNECT << 16; 665 cmd->result = DID_NO_CONNECT << 16;
661 atomic_inc(&cmd->device->iorequest_cnt); 666 scsi_done(cmd);
662 __scsi_done(cmd);
663 /* return 0 (because the command has been processed) */ 667 /* return 0 (because the command has been processed) */
664 goto out; 668 goto out;
665 } 669 }
666 670
667 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ 671 /* Check to see if the scsi lld made this device blocked. */
668 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { 672 if (unlikely(scsi_device_blocked(cmd->device))) {
669 /* 673 /*
670 * in SDEV_BLOCK, the command is just put back on the device 674 * in blocked state, the command is just put back on
671 * queue. The suspend state has already blocked the queue so 675 * the device queue. The suspend state has already
672 * future requests should not occur until the device 676 * blocked the queue so future requests should not
673 * transitions out of the suspend state. 677 * occur until the device transitions out of the
678 * suspend state.
674 */ 679 */
680
675 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 681 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
676 682
677 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); 683 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
@@ -714,21 +720,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
714 host->resetting = 0; 720 host->resetting = 0;
715 } 721 }
716 722
717 /*
718 * AK: unlikely race here: for some reason the timer could
719 * expire before the serial number is set up below.
720 */
721 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
722
723 scsi_log_send(cmd); 723 scsi_log_send(cmd);
724 724
725 /* 725 /*
726 * We will use a queued command if possible, otherwise we will
727 * emulate the queuing and calling of completion function ourselves.
728 */
729 atomic_inc(&cmd->device->iorequest_cnt);
730
731 /*
732 * Before we queue this command, check if the command 726 * Before we queue this command, check if the command
733 * length exceeds what the host adapter can handle. 727 * length exceeds what the host adapter can handle.
734 */ 728 */
@@ -744,6 +738,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
744 } 738 }
745 739
746 spin_lock_irqsave(host->host_lock, flags); 740 spin_lock_irqsave(host->host_lock, flags);
741 /*
742 * AK: unlikely race here: for some reason the timer could
743 * expire before the serial number is set up below.
744 *
745 * TODO: kill serial or move to blk layer
746 */
747 scsi_cmd_get_serial(host, cmd); 747 scsi_cmd_get_serial(host, cmd);
748 748
749 if (unlikely(host->shost_state == SHOST_DEL)) { 749 if (unlikely(host->shost_state == SHOST_DEL)) {
@@ -754,12 +754,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
754 } 754 }
755 spin_unlock_irqrestore(host->host_lock, flags); 755 spin_unlock_irqrestore(host->host_lock, flags);
756 if (rtn) { 756 if (rtn) {
757 if (scsi_delete_timer(cmd)) { 757 scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
758 atomic_inc(&cmd->device->iodone_cnt); 758 rtn : SCSI_MLQUEUE_HOST_BUSY);
759 scsi_queue_insert(cmd,
760 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
761 rtn : SCSI_MLQUEUE_HOST_BUSY);
762 }
763 SCSI_LOG_MLQUEUE(3, 759 SCSI_LOG_MLQUEUE(3,
764 printk("queuecommand : request rejected\n")); 760 printk("queuecommand : request rejected\n"));
765 } 761 }
@@ -770,24 +766,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
770} 766}
771 767
772/** 768/**
773 * scsi_req_abort_cmd -- Request command recovery for the specified command
774 * @cmd: pointer to the SCSI command of interest
775 *
776 * This function requests that SCSI Core start recovery for the
777 * command by deleting the timer and adding the command to the eh
778 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
779 * implement their own error recovery MAY ignore the timeout event if
780 * they generated scsi_req_abort_cmd.
781 */
782void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
783{
784 if (!scsi_delete_timer(cmd))
785 return;
786 scsi_times_out(cmd);
787}
788EXPORT_SYMBOL(scsi_req_abort_cmd);
789
790/**
791 * scsi_done - Enqueue the finished SCSI command into the done queue. 769 * scsi_done - Enqueue the finished SCSI command into the done queue.
792 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 770 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
793 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 771 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
@@ -802,42 +780,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
802 */ 780 */
803static void scsi_done(struct scsi_cmnd *cmd) 781static void scsi_done(struct scsi_cmnd *cmd)
804{ 782{
805 /* 783 blk_complete_request(cmd->request);
806 * We don't have to worry about this one timing out anymore.
807 * If we are unable to remove the timer, then the command
808 * has already timed out. In which case, we have no choice but to
809 * let the timeout function run, as we have no idea where in fact
810 * that function could really be. It might be on another processor,
811 * etc, etc.
812 */
813 if (!scsi_delete_timer(cmd))
814 return;
815 __scsi_done(cmd);
816}
817
818/* Private entry to scsi_done() to complete a command when the timer
819 * isn't running --- used by scsi_times_out */
820void __scsi_done(struct scsi_cmnd *cmd)
821{
822 struct request *rq = cmd->request;
823
824 /*
825 * Set the serial numbers back to zero
826 */
827 cmd->serial_number = 0;
828
829 atomic_inc(&cmd->device->iodone_cnt);
830 if (cmd->result)
831 atomic_inc(&cmd->device->ioerr_cnt);
832
833 BUG_ON(!rq);
834
835 /*
836 * The uptodate/nbytes values don't matter, as we allow partial
837 * completes and thus will check this in the softirq callback
838 */
839 rq->completion_data = cmd;
840 blk_complete_request(rq);
841} 784}
842 785
843/* Move this to a header if it becomes more generally useful */ 786/* Move this to a header if it becomes more generally useful */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 39ce3aba1dac..fecefa05cb62 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
112} 112}
113 113
114/** 114/**
115 * scsi_add_timer - Start timeout timer for a single scsi command.
116 * @scmd: scsi command that is about to start running.
117 * @timeout: amount of time to allow this command to run.
118 * @complete: timeout function to call if timer isn't canceled.
119 *
120 * Notes:
121 * This should be turned into an inline function. Each scsi command
122 * has its own timer, and as it is added to the queue, we set up the
123 * timer. When the command completes, we cancel the timer.
124 */
125void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
126 void (*complete)(struct scsi_cmnd *))
127{
128
129 /*
130 * If the clock was already running for this command, then
131 * first delete the timer. The timer handling code gets rather
132 * confused if we don't do this.
133 */
134 if (scmd->eh_timeout.function)
135 del_timer(&scmd->eh_timeout);
136
137 scmd->eh_timeout.data = (unsigned long)scmd;
138 scmd->eh_timeout.expires = jiffies + timeout;
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __func__,
143 scmd, timeout, complete));
144
145 add_timer(&scmd->eh_timeout);
146}
147
148/**
149 * scsi_delete_timer - Delete/cancel timer for a given function.
150 * @scmd: Cmd that we are canceling timer for
151 *
152 * Notes:
153 * This should be turned into an inline function.
154 *
155 * Return value:
156 * 1 if we were able to detach the timer. 0 if we blew it, and the
157 * timer function has already started to run.
158 */
159int scsi_delete_timer(struct scsi_cmnd *scmd)
160{
161 int rtn;
162
163 rtn = del_timer(&scmd->eh_timeout);
164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __func__,
167 scmd, rtn));
168
169 scmd->eh_timeout.data = (unsigned long)NULL;
170 scmd->eh_timeout.function = NULL;
171
172 return rtn;
173}
174
175/**
176 * scsi_times_out - Timeout function for normal scsi commands. 115 * scsi_times_out - Timeout function for normal scsi commands.
177 * @scmd: Cmd that is timing out. 116 * @req: request that is timing out.
178 * 117 *
179 * Notes: 118 * Notes:
180 * We do not need to lock this. There is the potential for a race 119 * We do not need to lock this. There is the potential for a race
@@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
182 * normal completion function determines that the timer has already 121 * normal completion function determines that the timer has already
183 * fired, then it mustn't do anything. 122 * fired, then it mustn't do anything.
184 */ 123 */
185void scsi_times_out(struct scsi_cmnd *scmd) 124enum blk_eh_timer_return scsi_times_out(struct request *req)
186{ 125{
187 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 126 struct scsi_cmnd *scmd = req->special;
127 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
128 enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
188 129
189 scsi_log_completion(scmd, TIMEOUT_ERROR); 130 scsi_log_completion(scmd, TIMEOUT_ERROR);
190 131
@@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *scmd)
196 eh_timed_out = NULL; 137 eh_timed_out = NULL;
197 138
198 if (eh_timed_out) 139 if (eh_timed_out)
199 switch (eh_timed_out(scmd)) { 140 rtn = eh_timed_out(scmd);
200 case EH_HANDLED: 141 switch (rtn) {
201 __scsi_done(scmd); 142 case BLK_EH_NOT_HANDLED:
202 return;
203 case EH_RESET_TIMER:
204 scsi_add_timer(scmd, scmd->timeout_per_command,
205 scsi_times_out);
206 return;
207 case EH_NOT_HANDLED:
208 break; 143 break;
144 default:
145 return rtn;
209 } 146 }
210 147
211 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 148 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
212 scmd->result |= DID_TIME_OUT << 16; 149 scmd->result |= DID_TIME_OUT << 16;
213 __scsi_done(scmd); 150 return BLK_EH_HANDLED;
214 } 151 }
152
153 return BLK_EH_NOT_HANDLED;
215} 154}
216 155
217/** 156/**
@@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1793 1732
1794 blk_rq_init(NULL, &req); 1733 blk_rq_init(NULL, &req);
1795 scmd->request = &req; 1734 scmd->request = &req;
1796 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1797 1735
1798 scmd->cmnd = req.cmd; 1736 scmd->cmnd = req.cmd;
1799 1737
@@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1804 1742
1805 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1743 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1806 1744
1807 init_timer(&scmd->eh_timeout);
1808
1809 spin_lock_irqsave(shost->host_lock, flags); 1745 spin_lock_irqsave(shost->host_lock, flags);
1810 shost->tmf_in_progress = 1; 1746 shost->tmf_in_progress = 1;
1811 spin_unlock_irqrestore(shost->host_lock, flags); 1747 spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62307bd794a9..98ee55ced592 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1181,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1181 1181
1182 cmd->transfersize = req->data_len; 1182 cmd->transfersize = req->data_len;
1183 cmd->allowed = req->retries; 1183 cmd->allowed = req->retries;
1184 cmd->timeout_per_command = req->timeout;
1185 return BLKPREP_OK; 1184 return BLKPREP_OK;
1186} 1185}
1187EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); 1186EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
@@ -1251,6 +1250,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1251 break; 1250 break;
1252 case SDEV_QUIESCE: 1251 case SDEV_QUIESCE:
1253 case SDEV_BLOCK: 1252 case SDEV_BLOCK:
1253 case SDEV_CREATED_BLOCK:
1254 /* 1254 /*
1255 * If the devices is blocked we defer normal commands. 1255 * If the devices is blocked we defer normal commands.
1256 */ 1256 */
@@ -1416,17 +1416,26 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1416 spin_unlock(shost->host_lock); 1416 spin_unlock(shost->host_lock);
1417 spin_lock(sdev->request_queue->queue_lock); 1417 spin_lock(sdev->request_queue->queue_lock);
1418 1418
1419 __scsi_done(cmd); 1419 blk_complete_request(req);
1420} 1420}
1421 1421
1422static void scsi_softirq_done(struct request *rq) 1422static void scsi_softirq_done(struct request *rq)
1423{ 1423{
1424 struct scsi_cmnd *cmd = rq->completion_data; 1424 struct scsi_cmnd *cmd = rq->special;
1425 unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; 1425 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1426 int disposition; 1426 int disposition;
1427 1427
1428 INIT_LIST_HEAD(&cmd->eh_entry); 1428 INIT_LIST_HEAD(&cmd->eh_entry);
1429 1429
1430 /*
1431 * Set the serial numbers back to zero
1432 */
1433 cmd->serial_number = 0;
1434
1435 atomic_inc(&cmd->device->iodone_cnt);
1436 if (cmd->result)
1437 atomic_inc(&cmd->device->ioerr_cnt);
1438
1430 disposition = scsi_decide_disposition(cmd); 1439 disposition = scsi_decide_disposition(cmd);
1431 if (disposition != SUCCESS && 1440 if (disposition != SUCCESS &&
1432 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1441 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
@@ -1675,6 +1684,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1675 1684
1676 blk_queue_prep_rq(q, scsi_prep_fn); 1685 blk_queue_prep_rq(q, scsi_prep_fn);
1677 blk_queue_softirq_done(q, scsi_softirq_done); 1686 blk_queue_softirq_done(q, scsi_softirq_done);
1687 blk_queue_rq_timed_out(q, scsi_times_out);
1678 return q; 1688 return q;
1679} 1689}
1680 1690
@@ -2064,10 +2074,13 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2064 2074
2065 switch (state) { 2075 switch (state) {
2066 case SDEV_CREATED: 2076 case SDEV_CREATED:
2067 /* There are no legal states that come back to 2077 switch (oldstate) {
2068 * created. This is the manually initialised start 2078 case SDEV_CREATED_BLOCK:
2069 * state */ 2079 break;
2070 goto illegal; 2080 default:
2081 goto illegal;
2082 }
2083 break;
2071 2084
2072 case SDEV_RUNNING: 2085 case SDEV_RUNNING:
2073 switch (oldstate) { 2086 switch (oldstate) {
@@ -2105,8 +2118,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2105 2118
2106 case SDEV_BLOCK: 2119 case SDEV_BLOCK:
2107 switch (oldstate) { 2120 switch (oldstate) {
2108 case SDEV_CREATED:
2109 case SDEV_RUNNING: 2121 case SDEV_RUNNING:
2122 case SDEV_CREATED_BLOCK:
2123 break;
2124 default:
2125 goto illegal;
2126 }
2127 break;
2128
2129 case SDEV_CREATED_BLOCK:
2130 switch (oldstate) {
2131 case SDEV_CREATED:
2110 break; 2132 break;
2111 default: 2133 default:
2112 goto illegal; 2134 goto illegal;
@@ -2394,8 +2416,12 @@ scsi_internal_device_block(struct scsi_device *sdev)
2394 int err = 0; 2416 int err = 0;
2395 2417
2396 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2418 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2397 if (err) 2419 if (err) {
2398 return err; 2420 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2421
2422 if (err)
2423 return err;
2424 }
2399 2425
2400 /* 2426 /*
2401 * The device has transitioned to SDEV_BLOCK. Stop the 2427 * The device has transitioned to SDEV_BLOCK. Stop the
@@ -2438,8 +2464,12 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
2438 * and goose the device queue if successful. 2464 * and goose the device queue if successful.
2439 */ 2465 */
2440 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2466 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2441 if (err) 2467 if (err) {
2442 return err; 2468 err = scsi_device_set_state(sdev, SDEV_CREATED);
2469
2470 if (err)
2471 return err;
2472 }
2443 2473
2444 spin_lock_irqsave(q->queue_lock, flags); 2474 spin_lock_irqsave(q->queue_lock, flags);
2445 blk_start_queue(q); 2475 blk_start_queue(q);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index ae7ed9a22662..b37e133de805 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -21,6 +21,7 @@
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/security.h> 23#include <linux/security.h>
24#include <linux/delay.h>
24#include <net/sock.h> 25#include <net/sock.h>
25#include <net/netlink.h> 26#include <net/netlink.h>
26 27
@@ -30,6 +31,39 @@
30struct sock *scsi_nl_sock = NULL; 31struct sock *scsi_nl_sock = NULL;
31EXPORT_SYMBOL_GPL(scsi_nl_sock); 32EXPORT_SYMBOL_GPL(scsi_nl_sock);
32 33
34static DEFINE_SPINLOCK(scsi_nl_lock);
35static struct list_head scsi_nl_drivers;
36
37static u32 scsi_nl_state;
38#define STATE_EHANDLER_BSY 0x00000001
39
40struct scsi_nl_transport {
41 int (*msg_handler)(struct sk_buff *);
42 void (*event_handler)(struct notifier_block *, unsigned long, void *);
43 unsigned int refcnt;
44 int flags;
45};
46
47/* flags values (bit flags) */
48#define HANDLER_DELETING 0x1
49
50static struct scsi_nl_transport transports[SCSI_NL_MAX_TRANSPORTS] =
51 { {NULL, }, };
52
53
54struct scsi_nl_drvr {
55 struct list_head next;
56 int (*dmsg_handler)(struct Scsi_Host *shost, void *payload,
57 u32 len, u32 pid);
58 void (*devt_handler)(struct notifier_block *nb,
59 unsigned long event, void *notify_ptr);
60 struct scsi_host_template *hostt;
61 u64 vendor_id;
62 unsigned int refcnt;
63 int flags;
64};
65
66
33 67
34/** 68/**
35 * scsi_nl_rcv_msg - Receive message handler. 69 * scsi_nl_rcv_msg - Receive message handler.
@@ -45,8 +79,9 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
45{ 79{
46 struct nlmsghdr *nlh; 80 struct nlmsghdr *nlh;
47 struct scsi_nl_hdr *hdr; 81 struct scsi_nl_hdr *hdr;
48 uint32_t rlen; 82 unsigned long flags;
49 int err; 83 u32 rlen;
84 int err, tport;
50 85
51 while (skb->len >= NLMSG_SPACE(0)) { 86 while (skb->len >= NLMSG_SPACE(0)) {
52 err = 0; 87 err = 0;
@@ -65,7 +100,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
65 100
66 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { 101 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
67 err = -EBADMSG; 102 err = -EBADMSG;
68 return; 103 goto next_msg;
69 } 104 }
70 105
71 hdr = NLMSG_DATA(nlh); 106 hdr = NLMSG_DATA(nlh);
@@ -83,12 +118,27 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { 118 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
84 printk(KERN_WARNING "%s: discarding partial message\n", 119 printk(KERN_WARNING "%s: discarding partial message\n",
85 __func__); 120 __func__);
86 return; 121 goto next_msg;
87 } 122 }
88 123
89 /* 124 /*
90 * We currently don't support anyone sending us a message 125 * Deliver message to the appropriate transport
91 */ 126 */
127 spin_lock_irqsave(&scsi_nl_lock, flags);
128
129 tport = hdr->transport;
130 if ((tport < SCSI_NL_MAX_TRANSPORTS) &&
131 !(transports[tport].flags & HANDLER_DELETING) &&
132 (transports[tport].msg_handler)) {
133 transports[tport].refcnt++;
134 spin_unlock_irqrestore(&scsi_nl_lock, flags);
135 err = transports[tport].msg_handler(skb);
136 spin_lock_irqsave(&scsi_nl_lock, flags);
137 transports[tport].refcnt--;
138 } else
139 err = -ENOENT;
140
141 spin_unlock_irqrestore(&scsi_nl_lock, flags);
92 142
93next_msg: 143next_msg:
94 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK)) 144 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
@@ -110,14 +160,42 @@ static int
110scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr) 160scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
111{ 161{
112 struct netlink_notify *n = ptr; 162 struct netlink_notify *n = ptr;
163 struct scsi_nl_drvr *driver;
164 unsigned long flags;
165 int tport;
113 166
114 if (n->protocol != NETLINK_SCSITRANSPORT) 167 if (n->protocol != NETLINK_SCSITRANSPORT)
115 return NOTIFY_DONE; 168 return NOTIFY_DONE;
116 169
170 spin_lock_irqsave(&scsi_nl_lock, flags);
171 scsi_nl_state |= STATE_EHANDLER_BSY;
172
117 /* 173 /*
118 * Currently, we are not tracking PID's, etc. There is nothing 174 * Pass event on to any transports that may be listening
119 * to handle.
120 */ 175 */
176 for (tport = 0; tport < SCSI_NL_MAX_TRANSPORTS; tport++) {
177 if (!(transports[tport].flags & HANDLER_DELETING) &&
178 (transports[tport].event_handler)) {
179 spin_unlock_irqrestore(&scsi_nl_lock, flags);
180 transports[tport].event_handler(this, event, ptr);
181 spin_lock_irqsave(&scsi_nl_lock, flags);
182 }
183 }
184
185 /*
186 * Pass event on to any drivers that may be listening
187 */
188 list_for_each_entry(driver, &scsi_nl_drivers, next) {
189 if (!(driver->flags & HANDLER_DELETING) &&
190 (driver->devt_handler)) {
191 spin_unlock_irqrestore(&scsi_nl_lock, flags);
192 driver->devt_handler(this, event, ptr);
193 spin_lock_irqsave(&scsi_nl_lock, flags);
194 }
195 }
196
197 scsi_nl_state &= ~STATE_EHANDLER_BSY;
198 spin_unlock_irqrestore(&scsi_nl_lock, flags);
121 199
122 return NOTIFY_DONE; 200 return NOTIFY_DONE;
123} 201}
@@ -128,7 +206,281 @@ static struct notifier_block scsi_netlink_notifier = {
128 206
129 207
130/** 208/**
131 * scsi_netlink_init - Called by SCSI subsystem to intialize the SCSI transport netlink interface 209 * GENERIC SCSI transport receive and event handlers
210 **/
211
212/**
213 * scsi_generic_msg_handler - receive message handler for GENERIC transport
214 * messages
215 *
216 * @skb: socket receive buffer
217 *
218 **/
219static int
220scsi_generic_msg_handler(struct sk_buff *skb)
221{
222 struct nlmsghdr *nlh = nlmsg_hdr(skb);
223 struct scsi_nl_hdr *snlh = NLMSG_DATA(nlh);
224 struct scsi_nl_drvr *driver;
225 struct Scsi_Host *shost;
226 unsigned long flags;
227 int err = 0, match, pid;
228
229 pid = NETLINK_CREDS(skb)->pid;
230
231 switch (snlh->msgtype) {
232 case SCSI_NL_SHOST_VENDOR:
233 {
234 struct scsi_nl_host_vendor_msg *msg = NLMSG_DATA(nlh);
235
236 /* Locate the driver that corresponds to the message */
237 spin_lock_irqsave(&scsi_nl_lock, flags);
238 match = 0;
239 list_for_each_entry(driver, &scsi_nl_drivers, next) {
240 if (driver->vendor_id == msg->vendor_id) {
241 match = 1;
242 break;
243 }
244 }
245
246 if ((!match) || (!driver->dmsg_handler)) {
247 spin_unlock_irqrestore(&scsi_nl_lock, flags);
248 err = -ESRCH;
249 goto rcv_exit;
250 }
251
252 if (driver->flags & HANDLER_DELETING) {
253 spin_unlock_irqrestore(&scsi_nl_lock, flags);
254 err = -ESHUTDOWN;
255 goto rcv_exit;
256 }
257
258 driver->refcnt++;
259 spin_unlock_irqrestore(&scsi_nl_lock, flags);
260
261
262 /* if successful, scsi_host_lookup takes a shost reference */
263 shost = scsi_host_lookup(msg->host_no);
264 if (!shost) {
265 err = -ENODEV;
266 goto driver_exit;
267 }
268
269 /* is this host owned by the vendor ? */
270 if (shost->hostt != driver->hostt) {
271 err = -EINVAL;
272 goto vendormsg_put;
273 }
274
275 /* pass message on to the driver */
276 err = driver->dmsg_handler(shost, (void *)&msg[1],
277 msg->vmsg_datalen, pid);
278
279vendormsg_put:
280 /* release reference by scsi_host_lookup */
281 scsi_host_put(shost);
282
283driver_exit:
284 /* release our own reference on the registration object */
285 spin_lock_irqsave(&scsi_nl_lock, flags);
286 driver->refcnt--;
287 spin_unlock_irqrestore(&scsi_nl_lock, flags);
288 break;
289 }
290
291 default:
292 err = -EBADR;
293 break;
294 }
295
296rcv_exit:
297 if (err)
298 printk(KERN_WARNING "%s: Msgtype %d failed - err %d\n",
299 __func__, snlh->msgtype, err);
300 return err;
301}
302
303
304/**
305 * scsi_nl_add_transport -
306 * Registers message and event handlers for a transport. Enables
307 * receipt of netlink messages and events to a transport.
308 *
309 * @tport: transport registering handlers
310 * @msg_handler: receive message handler callback
311 * @event_handler: receive event handler callback
312 **/
313int
314scsi_nl_add_transport(u8 tport,
315 int (*msg_handler)(struct sk_buff *),
316 void (*event_handler)(struct notifier_block *, unsigned long, void *))
317{
318 unsigned long flags;
319 int err = 0;
320
321 if (tport >= SCSI_NL_MAX_TRANSPORTS)
322 return -EINVAL;
323
324 spin_lock_irqsave(&scsi_nl_lock, flags);
325
326 if (scsi_nl_state & STATE_EHANDLER_BSY) {
327 spin_unlock_irqrestore(&scsi_nl_lock, flags);
328 msleep(1);
329 spin_lock_irqsave(&scsi_nl_lock, flags);
330 }
331
332 if (transports[tport].msg_handler || transports[tport].event_handler) {
333 err = -EALREADY;
334 goto register_out;
335 }
336
337 transports[tport].msg_handler = msg_handler;
338 transports[tport].event_handler = event_handler;
339 transports[tport].flags = 0;
340 transports[tport].refcnt = 0;
341
342register_out:
343 spin_unlock_irqrestore(&scsi_nl_lock, flags);
344
345 return err;
346}
347EXPORT_SYMBOL_GPL(scsi_nl_add_transport);
348
349
350/**
351 * scsi_nl_remove_transport -
352 * Disable transport receiption of messages and events
353 *
354 * @tport: transport deregistering handlers
355 *
356 **/
357void
358scsi_nl_remove_transport(u8 tport)
359{
360 unsigned long flags;
361
362 spin_lock_irqsave(&scsi_nl_lock, flags);
363 if (scsi_nl_state & STATE_EHANDLER_BSY) {
364 spin_unlock_irqrestore(&scsi_nl_lock, flags);
365 msleep(1);
366 spin_lock_irqsave(&scsi_nl_lock, flags);
367 }
368
369 if (tport < SCSI_NL_MAX_TRANSPORTS) {
370 transports[tport].flags |= HANDLER_DELETING;
371
372 while (transports[tport].refcnt != 0) {
373 spin_unlock_irqrestore(&scsi_nl_lock, flags);
374 schedule_timeout_uninterruptible(HZ/4);
375 spin_lock_irqsave(&scsi_nl_lock, flags);
376 }
377 transports[tport].msg_handler = NULL;
378 transports[tport].event_handler = NULL;
379 transports[tport].flags = 0;
380 }
381
382 spin_unlock_irqrestore(&scsi_nl_lock, flags);
383
384 return;
385}
386EXPORT_SYMBOL_GPL(scsi_nl_remove_transport);
387
388
389/**
390 * scsi_nl_add_driver -
391 * A driver is registering its interfaces for SCSI netlink messages
392 *
393 * @vendor_id: A unique identification value for the driver.
394 * @hostt: address of the driver's host template. Used
395 * to verify an shost is bound to the driver
396 * @nlmsg_handler: receive message handler callback
397 * @nlevt_handler: receive event handler callback
398 *
399 * Returns:
400 * 0 on Success
401 * error result otherwise
402 **/
403int
404scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
405 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
406 u32 len, u32 pid),
407 void (*nlevt_handler)(struct notifier_block *nb,
408 unsigned long event, void *notify_ptr))
409{
410 struct scsi_nl_drvr *driver;
411 unsigned long flags;
412
413 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
414 if (unlikely(!driver)) {
415 printk(KERN_ERR "%s: allocation failure\n", __func__);
416 return -ENOMEM;
417 }
418
419 driver->dmsg_handler = nlmsg_handler;
420 driver->devt_handler = nlevt_handler;
421 driver->hostt = hostt;
422 driver->vendor_id = vendor_id;
423
424 spin_lock_irqsave(&scsi_nl_lock, flags);
425 if (scsi_nl_state & STATE_EHANDLER_BSY) {
426 spin_unlock_irqrestore(&scsi_nl_lock, flags);
427 msleep(1);
428 spin_lock_irqsave(&scsi_nl_lock, flags);
429 }
430 list_add_tail(&driver->next, &scsi_nl_drivers);
431 spin_unlock_irqrestore(&scsi_nl_lock, flags);
432
433 return 0;
434}
435EXPORT_SYMBOL_GPL(scsi_nl_add_driver);
436
437
438/**
439 * scsi_nl_remove_driver -
440 * An driver is unregistering with the SCSI netlink messages
441 *
442 * @vendor_id: The unique identification value for the driver.
443 **/
444void
445scsi_nl_remove_driver(u64 vendor_id)
446{
447 struct scsi_nl_drvr *driver;
448 unsigned long flags;
449
450 spin_lock_irqsave(&scsi_nl_lock, flags);
451 if (scsi_nl_state & STATE_EHANDLER_BSY) {
452 spin_unlock_irqrestore(&scsi_nl_lock, flags);
453 msleep(1);
454 spin_lock_irqsave(&scsi_nl_lock, flags);
455 }
456
457 list_for_each_entry(driver, &scsi_nl_drivers, next) {
458 if (driver->vendor_id == vendor_id) {
459 driver->flags |= HANDLER_DELETING;
460 while (driver->refcnt != 0) {
461 spin_unlock_irqrestore(&scsi_nl_lock, flags);
462 schedule_timeout_uninterruptible(HZ/4);
463 spin_lock_irqsave(&scsi_nl_lock, flags);
464 }
465 list_del(&driver->next);
466 kfree(driver);
467 spin_unlock_irqrestore(&scsi_nl_lock, flags);
468 return;
469 }
470 }
471
472 spin_unlock_irqrestore(&scsi_nl_lock, flags);
473
474 printk(KERN_ERR "%s: removal of driver failed - vendor_id 0x%llx\n",
475 __func__, (unsigned long long)vendor_id);
476 return;
477}
478EXPORT_SYMBOL_GPL(scsi_nl_remove_driver);
479
480
481/**
482 * scsi_netlink_init - Called by SCSI subsystem to intialize
483 * the SCSI transport netlink interface
132 * 484 *
133 **/ 485 **/
134void 486void
@@ -136,6 +488,8 @@ scsi_netlink_init(void)
136{ 488{
137 int error; 489 int error;
138 490
491 INIT_LIST_HEAD(&scsi_nl_drivers);
492
139 error = netlink_register_notifier(&scsi_netlink_notifier); 493 error = netlink_register_notifier(&scsi_netlink_notifier);
140 if (error) { 494 if (error) {
141 printk(KERN_ERR "%s: register of event handler failed - %d\n", 495 printk(KERN_ERR "%s: register of event handler failed - %d\n",
@@ -150,8 +504,15 @@ scsi_netlink_init(void)
150 printk(KERN_ERR "%s: register of recieve handler failed\n", 504 printk(KERN_ERR "%s: register of recieve handler failed\n",
151 __func__); 505 __func__);
152 netlink_unregister_notifier(&scsi_netlink_notifier); 506 netlink_unregister_notifier(&scsi_netlink_notifier);
507 return;
153 } 508 }
154 509
510 /* Register the entry points for the generic SCSI transport */
511 error = scsi_nl_add_transport(SCSI_NL_TRANSPORT,
512 scsi_generic_msg_handler, NULL);
513 if (error)
514 printk(KERN_ERR "%s: register of GENERIC transport handler"
515 " failed - %d\n", __func__, error);
155 return; 516 return;
156} 517}
157 518
@@ -163,6 +524,8 @@ scsi_netlink_init(void)
163void 524void
164scsi_netlink_exit(void) 525scsi_netlink_exit(void)
165{ 526{
527 scsi_nl_remove_transport(SCSI_NL_TRANSPORT);
528
166 if (scsi_nl_sock) { 529 if (scsi_nl_sock) {
167 netlink_kernel_release(scsi_nl_sock); 530 netlink_kernel_release(scsi_nl_sock);
168 netlink_unregister_notifier(&scsi_netlink_notifier); 531 netlink_unregister_notifier(&scsi_netlink_notifier);
@@ -172,3 +535,147 @@ scsi_netlink_exit(void)
172} 535}
173 536
174 537
538/*
539 * Exported Interfaces
540 */
541
542/**
543 * scsi_nl_send_transport_msg -
544 * Generic function to send a single message from a SCSI transport to
545 * a single process
546 *
547 * @pid: receiving pid
548 * @hdr: message payload
549 *
550 **/
551void
552scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr)
553{
554 struct sk_buff *skb;
555 struct nlmsghdr *nlh;
556 const char *fn;
557 char *datab;
558 u32 len, skblen;
559 int err;
560
561 if (!scsi_nl_sock) {
562 err = -ENOENT;
563 fn = "netlink socket";
564 goto msg_fail;
565 }
566
567 len = NLMSG_SPACE(hdr->msglen);
568 skblen = NLMSG_SPACE(len);
569
570 skb = alloc_skb(skblen, GFP_KERNEL);
571 if (!skb) {
572 err = -ENOBUFS;
573 fn = "alloc_skb";
574 goto msg_fail;
575 }
576
577 nlh = nlmsg_put(skb, pid, 0, SCSI_TRANSPORT_MSG, len - sizeof(*nlh), 0);
578 if (!nlh) {
579 err = -ENOBUFS;
580 fn = "nlmsg_put";
581 goto msg_fail_skb;
582 }
583 datab = NLMSG_DATA(nlh);
584 memcpy(datab, hdr, hdr->msglen);
585
586 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
587 if (err < 0) {
588 fn = "nlmsg_unicast";
589 /* nlmsg_unicast already kfree_skb'd */
590 goto msg_fail;
591 }
592
593 return;
594
595msg_fail_skb:
596 kfree_skb(skb);
597msg_fail:
598 printk(KERN_WARNING
599 "%s: Dropped Message : pid %d Transport %d, msgtype x%x, "
600 "msglen %d: %s : err %d\n",
601 __func__, pid, hdr->transport, hdr->msgtype, hdr->msglen,
602 fn, err);
603 return;
604}
605EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg);
606
607
608/**
609 * scsi_nl_send_vendor_msg - called to send a shost vendor unique message
610 * to a specific process id.
611 *
612 * @pid: process id of the receiver
613 * @host_no: host # sending the message
614 * @vendor_id: unique identifier for the driver's vendor
615 * @data_len: amount, in bytes, of vendor unique payload data
616 * @data_buf: pointer to vendor unique data buffer
617 *
618 * Returns:
619 * 0 on succesful return
620 * otherwise, failing error code
621 *
622 * Notes:
623 * This routine assumes no locks are held on entry.
624 */
625int
626scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
627 char *data_buf, u32 data_len)
628{
629 struct sk_buff *skb;
630 struct nlmsghdr *nlh;
631 struct scsi_nl_host_vendor_msg *msg;
632 u32 len, skblen;
633 int err;
634
635 if (!scsi_nl_sock) {
636 err = -ENOENT;
637 goto send_vendor_fail;
638 }
639
640 len = SCSI_NL_MSGALIGN(sizeof(*msg) + data_len);
641 skblen = NLMSG_SPACE(len);
642
643 skb = alloc_skb(skblen, GFP_KERNEL);
644 if (!skb) {
645 err = -ENOBUFS;
646 goto send_vendor_fail;
647 }
648
649 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
650 skblen - sizeof(*nlh), 0);
651 if (!nlh) {
652 err = -ENOBUFS;
653 goto send_vendor_fail_skb;
654 }
655 msg = NLMSG_DATA(nlh);
656
657 INIT_SCSI_NL_HDR(&msg->snlh, SCSI_NL_TRANSPORT,
658 SCSI_NL_SHOST_VENDOR, len);
659 msg->vendor_id = vendor_id;
660 msg->host_no = host_no;
661 msg->vmsg_datalen = data_len; /* bytes */
662 memcpy(&msg[1], data_buf, data_len);
663
664 err = nlmsg_unicast(scsi_nl_sock, skb, pid);
665 if (err)
666 /* nlmsg_multicast already kfree_skb'd */
667 goto send_vendor_fail;
668
669 return 0;
670
671send_vendor_fail_skb:
672 kfree_skb(skb);
673send_vendor_fail:
674 printk(KERN_WARNING
675 "%s: Dropped SCSI Msg : host %d vendor_unique - err %d\n",
676 __func__, host_no, err);
677 return err;
678}
679EXPORT_SYMBOL(scsi_nl_send_vendor_msg);
680
681
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 79f0f7511204..6cddd5dd323c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -4,6 +4,7 @@
4#include <linux/device.h> 4#include <linux/device.h>
5 5
6struct request_queue; 6struct request_queue;
7struct request;
7struct scsi_cmnd; 8struct scsi_cmnd;
8struct scsi_device; 9struct scsi_device;
9struct scsi_host_template; 10struct scsi_host_template;
@@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
27extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); 28extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
28extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 29extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
29extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 30extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
30extern void __scsi_done(struct scsi_cmnd *cmd);
31#ifdef CONFIG_SCSI_LOGGING 31#ifdef CONFIG_SCSI_LOGGING
32void scsi_log_send(struct scsi_cmnd *cmd); 32void scsi_log_send(struct scsi_cmnd *cmd);
33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); 33void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
@@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void);
49extern void scsi_exit_devinfo(void); 49extern void scsi_exit_devinfo(void);
50 50
51/* scsi_error.c */ 51/* scsi_error.c */
52extern void scsi_add_timer(struct scsi_cmnd *, int, 52extern enum blk_eh_timer_return scsi_times_out(struct request *req);
53 void (*)(struct scsi_cmnd *));
54extern int scsi_delete_timer(struct scsi_cmnd *);
55extern void scsi_times_out(struct scsi_cmnd *cmd);
56extern int scsi_error_handler(void *host); 53extern int scsi_error_handler(void *host);
57extern int scsi_decide_disposition(struct scsi_cmnd *cmd); 54extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
58extern void scsi_eh_wakeup(struct Scsi_Host *shost); 55extern void scsi_eh_wakeup(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index c6a904a45bf9..82f7b2dd08a2 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -259,8 +259,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
259 int error = -ENXIO; 259 int error = -ENXIO;
260 260
261 shost = scsi_host_lookup(host); 261 shost = scsi_host_lookup(host);
262 if (IS_ERR(shost)) 262 if (!shost)
263 return PTR_ERR(shost); 263 return error;
264 264
265 if (shost->transportt->user_scan) 265 if (shost->transportt->user_scan)
266 error = shost->transportt->user_scan(shost, channel, id, lun); 266 error = shost->transportt->user_scan(shost, channel, id, lun);
@@ -287,8 +287,8 @@ static int scsi_remove_single_device(uint host, uint channel, uint id, uint lun)
287 int error = -ENXIO; 287 int error = -ENXIO;
288 288
289 shost = scsi_host_lookup(host); 289 shost = scsi_host_lookup(host);
290 if (IS_ERR(shost)) 290 if (!shost)
291 return PTR_ERR(shost); 291 return error;
292 sdev = scsi_device_lookup(shost, channel, id, lun); 292 sdev = scsi_device_lookup(shost, channel, id, lun);
293 if (sdev) { 293 if (sdev) {
294 scsi_remove_device(sdev); 294 scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 34d0de6cd511..334862e26a1b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -730,6 +730,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
730static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 730static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
731 int *bflags, int async) 731 int *bflags, int async)
732{ 732{
733 int ret;
734
733 /* 735 /*
734 * XXX do not save the inquiry, since it can change underneath us, 736 * XXX do not save the inquiry, since it can change underneath us,
735 * save just vendor/model/rev. 737 * save just vendor/model/rev.
@@ -885,7 +887,17 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
885 887
886 /* set the device running here so that slave configure 888 /* set the device running here so that slave configure
887 * may do I/O */ 889 * may do I/O */
888 scsi_device_set_state(sdev, SDEV_RUNNING); 890 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
891 if (ret) {
892 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
893
894 if (ret) {
895 sdev_printk(KERN_ERR, sdev,
896 "in wrong state %s to complete scan\n",
897 scsi_device_state_name(sdev->sdev_state));
898 return SCSI_SCAN_NO_RESPONSE;
899 }
900 }
889 901
890 if (*bflags & BLIST_MS_192_BYTES_FOR_3F) 902 if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
891 sdev->use_192_bytes_for_3f = 1; 903 sdev->use_192_bytes_for_3f = 1;
@@ -899,7 +911,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
899 transport_configure_device(&sdev->sdev_gendev); 911 transport_configure_device(&sdev->sdev_gendev);
900 912
901 if (sdev->host->hostt->slave_configure) { 913 if (sdev->host->hostt->slave_configure) {
902 int ret = sdev->host->hostt->slave_configure(sdev); 914 ret = sdev->host->hostt->slave_configure(sdev);
903 if (ret) { 915 if (ret) {
904 /* 916 /*
905 * if LLDD reports slave not present, don't clutter 917 * if LLDD reports slave not present, don't clutter
@@ -994,7 +1006,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
994 */ 1006 */
995 sdev = scsi_device_lookup_by_target(starget, lun); 1007 sdev = scsi_device_lookup_by_target(starget, lun);
996 if (sdev) { 1008 if (sdev) {
997 if (rescan || sdev->sdev_state != SDEV_CREATED) { 1009 if (rescan || !scsi_device_created(sdev)) {
998 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1010 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
999 "scsi scan: device exists on %s\n", 1011 "scsi scan: device exists on %s\n",
1000 sdev->sdev_gendev.bus_id)); 1012 sdev->sdev_gendev.bus_id));
@@ -1467,7 +1479,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1467 kfree(lun_data); 1479 kfree(lun_data);
1468 out: 1480 out:
1469 scsi_device_put(sdev); 1481 scsi_device_put(sdev);
1470 if (sdev->sdev_state == SDEV_CREATED) 1482 if (scsi_device_created(sdev))
1471 /* 1483 /*
1472 * the sdev we used didn't appear in the report luns scan 1484 * the sdev we used didn't appear in the report luns scan
1473 */ 1485 */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ab3c71869be5..93c28f30bbd7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -34,6 +34,7 @@ static const struct {
34 { SDEV_QUIESCE, "quiesce" }, 34 { SDEV_QUIESCE, "quiesce" },
35 { SDEV_OFFLINE, "offline" }, 35 { SDEV_OFFLINE, "offline" },
36 { SDEV_BLOCK, "blocked" }, 36 { SDEV_BLOCK, "blocked" },
37 { SDEV_CREATED_BLOCK, "created-blocked" },
37}; 38};
38 39
39const char *scsi_device_state_name(enum scsi_device_state state) 40const char *scsi_device_state_name(enum scsi_device_state state)
@@ -560,12 +561,15 @@ sdev_rd_attr (vendor, "%.8s\n");
560sdev_rd_attr (model, "%.16s\n"); 561sdev_rd_attr (model, "%.16s\n");
561sdev_rd_attr (rev, "%.4s\n"); 562sdev_rd_attr (rev, "%.4s\n");
562 563
564/*
565 * TODO: can we make these symlinks to the block layer ones?
566 */
563static ssize_t 567static ssize_t
564sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) 568sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
565{ 569{
566 struct scsi_device *sdev; 570 struct scsi_device *sdev;
567 sdev = to_scsi_device(dev); 571 sdev = to_scsi_device(dev);
568 return snprintf (buf, 20, "%d\n", sdev->timeout / HZ); 572 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
569} 573}
570 574
571static ssize_t 575static ssize_t
@@ -576,7 +580,7 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr,
576 int timeout; 580 int timeout;
577 sdev = to_scsi_device(dev); 581 sdev = to_scsi_device(dev);
578 sscanf (buf, "%d\n", &timeout); 582 sscanf (buf, "%d\n", &timeout);
579 sdev->timeout = timeout * HZ; 583 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
580 return count; 584 return count;
581} 585}
582static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); 586static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 257e097c39af..48ba413f7f6a 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
362 int err; 362 int err;
363 363
364 dprintk("%lx %u\n", uaddr, len); 364 dprintk("%lx %u\n", uaddr, len);
365 err = blk_rq_map_user(q, rq, (void *)uaddr, len); 365 err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
366 if (err) { 366 if (err) {
367 /* 367 /*
368 * TODO: need to fixup sg_tablesize, max_segment_size, 368 * TODO: need to fixup sg_tablesize, max_segment_size,
@@ -460,7 +460,7 @@ int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
460 460
461 /* TODO: replace with a O(1) alg */ 461 /* TODO: replace with a O(1) alg */
462 shost = scsi_host_lookup(host_no); 462 shost = scsi_host_lookup(host_no);
463 if (IS_ERR(shost)) { 463 if (!shost) {
464 printk(KERN_ERR "Could not find host no %d\n", host_no); 464 printk(KERN_ERR "Could not find host no %d\n", host_no);
465 return -EINVAL; 465 return -EINVAL;
466 } 466 }
@@ -550,7 +550,7 @@ int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid); 550 dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
551 551
552 shost = scsi_host_lookup(host_no); 552 shost = scsi_host_lookup(host_no);
553 if (IS_ERR(shost)) { 553 if (!shost) {
554 printk(KERN_ERR "Could not find host no %d\n", host_no); 554 printk(KERN_ERR "Could not find host no %d\n", host_no);
555 return err; 555 return err;
556 } 556 }
@@ -603,7 +603,7 @@ int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id); 603 dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
604 604
605 shost = scsi_host_lookup(host_no); 605 shost = scsi_host_lookup(host_no);
606 if (IS_ERR(shost)) { 606 if (!shost) {
607 printk(KERN_ERR "Could not find host no %d\n", host_no); 607 printk(KERN_ERR "Could not find host no %d\n", host_no);
608 return err; 608 return err;
609 } 609 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 56823fd1fb84..d5f7653bb94b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -40,31 +40,7 @@
40 40
41static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 41static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
42static void fc_vport_sched_delete(struct work_struct *work); 42static void fc_vport_sched_delete(struct work_struct *work);
43 43static int fc_vport_setup(struct Scsi_Host *shost, int channel,
44/*
45 * This is a temporary carrier for creating a vport. It will eventually
46 * be replaced by a real message definition for sgio or netlink.
47 *
48 * fc_vport_identifiers: This set of data contains all elements
49 * to uniquely identify and instantiate a FC virtual port.
50 *
51 * Notes:
52 * symbolic_name: The driver is to append the symbolic_name string data
53 * to the symbolic_node_name data that it generates by default.
54 * the resulting combination should then be registered with the switch.
55 * It is expected that things like Xen may stuff a VM title into
56 * this field.
57 */
58struct fc_vport_identifiers {
59 u64 node_name;
60 u64 port_name;
61 u32 roles;
62 bool disable;
63 enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
64 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
65};
66
67static int fc_vport_create(struct Scsi_Host *shost, int channel,
68 struct device *pdev, struct fc_vport_identifiers *ids, 44 struct device *pdev, struct fc_vport_identifiers *ids,
69 struct fc_vport **vport); 45 struct fc_vport **vport);
70 46
@@ -1760,7 +1736,7 @@ store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1760 vid.disable = false; /* always enabled */ 1736 vid.disable = false; /* always enabled */
1761 1737
1762 /* we only allow support on Channel 0 !!! */ 1738 /* we only allow support on Channel 0 !!! */
1763 stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport); 1739 stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1764 return stat ? stat : count; 1740 return stat ? stat : count;
1765} 1741}
1766static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL, 1742static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
@@ -1950,15 +1926,15 @@ static int fc_vport_match(struct attribute_container *cont,
1950 * Notes: 1926 * Notes:
1951 * This routine assumes no locks are held on entry. 1927 * This routine assumes no locks are held on entry.
1952 */ 1928 */
1953static enum scsi_eh_timer_return 1929static enum blk_eh_timer_return
1954fc_timed_out(struct scsi_cmnd *scmd) 1930fc_timed_out(struct scsi_cmnd *scmd)
1955{ 1931{
1956 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); 1932 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1957 1933
1958 if (rport->port_state == FC_PORTSTATE_BLOCKED) 1934 if (rport->port_state == FC_PORTSTATE_BLOCKED)
1959 return EH_RESET_TIMER; 1935 return BLK_EH_RESET_TIMER;
1960 1936
1961 return EH_NOT_HANDLED; 1937 return BLK_EH_NOT_HANDLED;
1962} 1938}
1963 1939
1964/* 1940/*
@@ -3103,7 +3079,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3103 3079
3104 3080
3105/** 3081/**
3106 * fc_vport_create - allocates and creates a FC virtual port. 3082 * fc_vport_setup - allocates and creates a FC virtual port.
3107 * @shost: scsi host the virtual port is connected to. 3083 * @shost: scsi host the virtual port is connected to.
3108 * @channel: Channel on shost port connected to. 3084 * @channel: Channel on shost port connected to.
3109 * @pdev: parent device for vport 3085 * @pdev: parent device for vport
@@ -3118,7 +3094,7 @@ fc_scsi_scan_rport(struct work_struct *work)
3118 * This routine assumes no locks are held on entry. 3094 * This routine assumes no locks are held on entry.
3119 */ 3095 */
3120static int 3096static int
3121fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev, 3097fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3122 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport) 3098 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3123{ 3099{
3124 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 3100 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
@@ -3231,6 +3207,28 @@ delete_vport:
3231 return error; 3207 return error;
3232} 3208}
3233 3209
3210/**
3211 * fc_vport_create - Admin App or LLDD requests creation of a vport
3212 * @shost: scsi host the virtual port is connected to.
3213 * @channel: channel on shost port connected to.
3214 * @ids: The world wide names, FC4 port roles, etc for
3215 * the virtual port.
3216 *
3217 * Notes:
3218 * This routine assumes no locks are held on entry.
3219 */
3220struct fc_vport *
3221fc_vport_create(struct Scsi_Host *shost, int channel,
3222 struct fc_vport_identifiers *ids)
3223{
3224 int stat;
3225 struct fc_vport *vport;
3226
3227 stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3228 ids, &vport);
3229 return stat ? NULL : vport;
3230}
3231EXPORT_SYMBOL(fc_vport_create);
3234 3232
3235/** 3233/**
3236 * fc_vport_terminate - Admin App or LLDD requests termination of a vport 3234 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 043c3921164f..0ce5f7cdfe2a 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1361,7 +1361,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
1361 return -EINVAL; 1361 return -EINVAL;
1362 1362
1363 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); 1363 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
1364 if (IS_ERR(shost)) { 1364 if (!shost) {
1365 printk(KERN_ERR "target discovery could not find host no %u\n", 1365 printk(KERN_ERR "target discovery could not find host no %u\n",
1366 ev->u.tgt_dscvr.host_no); 1366 ev->u.tgt_dscvr.host_no);
1367 return -ENODEV; 1367 return -ENODEV;
@@ -1387,7 +1387,7 @@ iscsi_set_host_param(struct iscsi_transport *transport,
1387 return -ENOSYS; 1387 return -ENOSYS;
1388 1388
1389 shost = scsi_host_lookup(ev->u.set_host_param.host_no); 1389 shost = scsi_host_lookup(ev->u.set_host_param.host_no);
1390 if (IS_ERR(shost)) { 1390 if (!shost) {
1391 printk(KERN_ERR "set_host_param could not find host no %u\n", 1391 printk(KERN_ERR "set_host_param could not find host no %u\n",
1392 ev->u.set_host_param.host_no); 1392 ev->u.set_host_param.host_no);
1393 return -ENODEV; 1393 return -ENODEV;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d7856454..a7b53be63367 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
47#include <linux/blkpg.h> 47#include <linux/blkpg.h>
48#include <linux/delay.h> 48#include <linux/delay.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/string_helpers.h>
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51 52
52#include <scsi/scsi.h> 53#include <scsi/scsi.h>
@@ -86,6 +87,12 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
86MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); 87MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
87MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); 88MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
88 89
90#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
91#define SD_MINORS 16
92#else
93#define SD_MINORS 0
94#endif
95
89static int sd_revalidate_disk(struct gendisk *); 96static int sd_revalidate_disk(struct gendisk *);
90static int sd_probe(struct device *); 97static int sd_probe(struct device *);
91static int sd_remove(struct device *); 98static int sd_remove(struct device *);
@@ -159,7 +166,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
159 sd_print_sense_hdr(sdkp, &sshdr); 166 sd_print_sense_hdr(sdkp, &sshdr);
160 return -EINVAL; 167 return -EINVAL;
161 } 168 }
162 sd_revalidate_disk(sdkp->disk); 169 revalidate_disk(sdkp->disk);
163 return count; 170 return count;
164} 171}
165 172
@@ -377,7 +384,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
377 sector_t block = rq->sector; 384 sector_t block = rq->sector;
378 sector_t threshold; 385 sector_t threshold;
379 unsigned int this_count = rq->nr_sectors; 386 unsigned int this_count = rq->nr_sectors;
380 unsigned int timeout = sdp->timeout;
381 int ret; 387 int ret;
382 388
383 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 389 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -578,7 +584,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
578 SCpnt->transfersize = sdp->sector_size; 584 SCpnt->transfersize = sdp->sector_size;
579 SCpnt->underflow = this_count << 9; 585 SCpnt->underflow = this_count << 9;
580 SCpnt->allowed = SD_MAX_RETRIES; 586 SCpnt->allowed = SD_MAX_RETRIES;
581 SCpnt->timeout_per_command = timeout;
582 587
583 /* 588 /*
584 * This indicates that the command is ready from our end to be 589 * This indicates that the command is ready from our end to be
@@ -910,7 +915,7 @@ static void sd_rescan(struct device *dev)
910 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); 915 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
911 916
912 if (sdkp) { 917 if (sdkp) {
913 sd_revalidate_disk(sdkp->disk); 918 revalidate_disk(sdkp->disk);
914 scsi_disk_put(sdkp); 919 scsi_disk_put(sdkp);
915 } 920 }
916} 921}
@@ -1429,27 +1434,21 @@ got_data:
1429 */ 1434 */
1430 sector_size = 512; 1435 sector_size = 512;
1431 } 1436 }
1437 blk_queue_hardsect_size(sdp->request_queue, sector_size);
1438
1432 { 1439 {
1433 /* 1440 char cap_str_2[10], cap_str_10[10];
1434 * The msdos fs needs to know the hardware sector size 1441 u64 sz = sdkp->capacity << ffz(~sector_size);
1435 * So I have created this table. See ll_rw_blk.c
1436 * Jacques Gelinas (Jacques@solucorp.qc.ca)
1437 */
1438 int hard_sector = sector_size;
1439 sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
1440 struct request_queue *queue = sdp->request_queue;
1441 sector_t mb = sz;
1442 1442
1443 blk_queue_hardsect_size(queue, hard_sector); 1443 string_get_size(sz, STRING_UNITS_2, cap_str_2,
1444 /* avoid 64-bit division on 32-bit platforms */ 1444 sizeof(cap_str_2));
1445 sector_div(sz, 625); 1445 string_get_size(sz, STRING_UNITS_10, cap_str_10,
1446 mb -= sz - 974; 1446 sizeof(cap_str_10));
1447 sector_div(mb, 1950);
1448 1447
1449 sd_printk(KERN_NOTICE, sdkp, 1448 sd_printk(KERN_NOTICE, sdkp,
1450 "%llu %d-byte hardware sectors (%llu MB)\n", 1449 "%llu %d-byte hardware sectors: (%s/%s)\n",
1451 (unsigned long long)sdkp->capacity, 1450 (unsigned long long)sdkp->capacity,
1452 hard_sector, (unsigned long long)mb); 1451 sector_size, cap_str_10, cap_str_2);
1453 } 1452 }
1454 1453
1455 /* Rescale capacity to 512-byte units */ 1454 /* Rescale capacity to 512-byte units */
@@ -1764,6 +1763,52 @@ static int sd_revalidate_disk(struct gendisk *disk)
1764} 1763}
1765 1764
1766/** 1765/**
1766 * sd_format_disk_name - format disk name
1767 * @prefix: name prefix - ie. "sd" for SCSI disks
1768 * @index: index of the disk to format name for
1769 * @buf: output buffer
1770 * @buflen: length of the output buffer
1771 *
1772 * SCSI disk names starts at sda. The 26th device is sdz and the
1773 * 27th is sdaa. The last one for two lettered suffix is sdzz
1774 * which is followed by sdaaa.
1775 *
1776 * This is basically 26 base counting with one extra 'nil' entry
1777 * at the beggining from the second digit on and can be
1778 * determined using similar method as 26 base conversion with the
1779 * index shifted -1 after each digit is computed.
1780 *
1781 * CONTEXT:
1782 * Don't care.
1783 *
1784 * RETURNS:
1785 * 0 on success, -errno on failure.
1786 */
1787static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
1788{
1789 const int base = 'z' - 'a' + 1;
1790 char *begin = buf + strlen(prefix);
1791 char *end = buf + buflen;
1792 char *p;
1793 int unit;
1794
1795 p = end - 1;
1796 *p = '\0';
1797 unit = base;
1798 do {
1799 if (p == begin)
1800 return -EINVAL;
1801 *--p = 'a' + (index % unit);
1802 index = (index / unit) - 1;
1803 } while (index >= 0);
1804
1805 memmove(begin, p, end - p);
1806 memcpy(buf, prefix, strlen(prefix));
1807
1808 return 0;
1809}
1810
1811/**
1767 * sd_probe - called during driver initialization and whenever a 1812 * sd_probe - called during driver initialization and whenever a
1768 * new scsi device is attached to the system. It is called once 1813 * new scsi device is attached to the system. It is called once
1769 * for each scsi device (not just disks) present. 1814 * for each scsi device (not just disks) present.
@@ -1801,7 +1846,7 @@ static int sd_probe(struct device *dev)
1801 if (!sdkp) 1846 if (!sdkp)
1802 goto out; 1847 goto out;
1803 1848
1804 gd = alloc_disk(16); 1849 gd = alloc_disk(SD_MINORS);
1805 if (!gd) 1850 if (!gd)
1806 goto out_free; 1851 goto out_free;
1807 1852
@@ -1815,8 +1860,8 @@ static int sd_probe(struct device *dev)
1815 if (error) 1860 if (error)
1816 goto out_put; 1861 goto out_put;
1817 1862
1818 error = -EBUSY; 1863 error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
1819 if (index >= SD_MAX_DISKS) 1864 if (error)
1820 goto out_free_index; 1865 goto out_free_index;
1821 1866
1822 sdkp->device = sdp; 1867 sdkp->device = sdp;
@@ -1826,11 +1871,12 @@ static int sd_probe(struct device *dev)
1826 sdkp->openers = 0; 1871 sdkp->openers = 0;
1827 sdkp->previous_state = 1; 1872 sdkp->previous_state = 1;
1828 1873
1829 if (!sdp->timeout) { 1874 if (!sdp->request_queue->rq_timeout) {
1830 if (sdp->type != TYPE_MOD) 1875 if (sdp->type != TYPE_MOD)
1831 sdp->timeout = SD_TIMEOUT; 1876 blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1832 else 1877 else
1833 sdp->timeout = SD_MOD_TIMEOUT; 1878 blk_queue_rq_timeout(sdp->request_queue,
1879 SD_MOD_TIMEOUT);
1834 } 1880 }
1835 1881
1836 device_initialize(&sdkp->dev); 1882 device_initialize(&sdkp->dev);
@@ -1843,24 +1889,12 @@ static int sd_probe(struct device *dev)
1843 1889
1844 get_device(&sdp->sdev_gendev); 1890 get_device(&sdp->sdev_gendev);
1845 1891
1846 gd->major = sd_major((index & 0xf0) >> 4); 1892 if (index < SD_MAX_DISKS) {
1847 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1893 gd->major = sd_major((index & 0xf0) >> 4);
1848 gd->minors = 16; 1894 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
1849 gd->fops = &sd_fops; 1895 gd->minors = SD_MINORS;
1850
1851 if (index < 26) {
1852 sprintf(gd->disk_name, "sd%c", 'a' + index % 26);
1853 } else if (index < (26 + 1) * 26) {
1854 sprintf(gd->disk_name, "sd%c%c",
1855 'a' + index / 26 - 1,'a' + index % 26);
1856 } else {
1857 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
1858 const unsigned int m2 = (index / 26 - 1) % 26;
1859 const unsigned int m3 = index % 26;
1860 sprintf(gd->disk_name, "sd%c%c%c",
1861 'a' + m1, 'a' + m2, 'a' + m3);
1862 } 1896 }
1863 1897 gd->fops = &sd_fops;
1864 gd->private_data = &sdkp->driver; 1898 gd->private_data = &sdkp->driver;
1865 gd->queue = sdkp->device->request_queue; 1899 gd->queue = sdkp->device->request_queue;
1866 1900
@@ -1869,7 +1903,7 @@ static int sd_probe(struct device *dev)
1869 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 1903 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
1870 1904
1871 gd->driverfs_dev = &sdp->sdev_gendev; 1905 gd->driverfs_dev = &sdp->sdev_gendev;
1872 gd->flags = GENHD_FL_DRIVERFS; 1906 gd->flags = GENHD_FL_EXT_DEVT | GENHD_FL_DRIVERFS;
1873 if (sdp->removable) 1907 if (sdp->removable)
1874 gd->flags |= GENHD_FL_REMOVABLE; 1908 gd->flags |= GENHD_FL_REMOVABLE;
1875 1909
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 661f9f21650a..ba9b9bbd4e73 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -47,7 +47,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */
47#include <linux/seq_file.h> 47#include <linux/seq_file.h>
48#include <linux/blkdev.h> 48#include <linux/blkdev.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/scatterlist.h>
51#include <linux/blktrace_api.h> 50#include <linux/blktrace_api.h>
52#include <linux/smp_lock.h> 51#include <linux/smp_lock.h>
53 52
@@ -69,7 +68,6 @@ static void sg_proc_cleanup(void);
69#endif 68#endif
70 69
71#define SG_ALLOW_DIO_DEF 0 70#define SG_ALLOW_DIO_DEF 0
72#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
73 71
74#define SG_MAX_DEVS 32768 72#define SG_MAX_DEVS 32768
75 73
@@ -118,8 +116,8 @@ typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
118 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
119 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ 117 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
120 unsigned bufflen; /* Size of (aggregate) data buffer */ 118 unsigned bufflen; /* Size of (aggregate) data buffer */
121 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 119 struct page **pages;
122 struct scatterlist *buffer;/* scatter list */ 120 int page_order;
123 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
124 unsigned char cmd_opcode; /* first byte of command */ 122 unsigned char cmd_opcode; /* first byte of command */
125} Sg_scatter_hold; 123} Sg_scatter_hold;
@@ -137,6 +135,8 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
137 char orphan; /* 1 -> drop on sight, 0 -> normal */ 135 char orphan; /* 1 -> drop on sight, 0 -> normal */
138 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
139 volatile char done; /* 0->before bh, 1->before read, 2->read */ 137 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 struct request *rq;
139 struct bio *bio;
140} Sg_request; 140} Sg_request;
141 141
142typedef struct sg_fd { /* holds the state of a file descriptor */ 142typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -175,8 +175,8 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
175 175
176static int sg_fasync(int fd, struct file *filp, int mode); 176static int sg_fasync(int fd, struct file *filp, int mode);
177/* tasklet or soft irq callback */ 177/* tasklet or soft irq callback */
178static void sg_cmd_done(void *data, char *sense, int result, int resid); 178static void sg_rq_end_io(struct request *rq, int uptodate);
179static int sg_start_req(Sg_request * srp); 179static int sg_start_req(Sg_request *srp, unsigned char *cmd);
180static void sg_finish_rem_req(Sg_request * srp); 180static void sg_finish_rem_req(Sg_request * srp);
181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 181static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 182static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
@@ -188,17 +188,11 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
188 int read_only, Sg_request **o_srp); 188 int read_only, Sg_request **o_srp);
189static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 189static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking); 190 unsigned char *cmnd, int timeout, int blocking);
191static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
192 int wr_xf, int *countp, unsigned char __user **up);
193static int sg_write_xfer(Sg_request * srp);
194static int sg_read_xfer(Sg_request * srp);
195static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 191static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196static void sg_remove_scat(Sg_scatter_hold * schp); 192static void sg_remove_scat(Sg_scatter_hold * schp);
197static void sg_build_reserve(Sg_fd * sfp, int req_size); 193static void sg_build_reserve(Sg_fd * sfp, int req_size);
198static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 194static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 195static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
201static void sg_page_free(struct page *page, int size);
202static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 196static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
203static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 197static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
204static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 198static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
@@ -206,7 +200,6 @@ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206static Sg_request *sg_add_request(Sg_fd * sfp); 200static Sg_request *sg_add_request(Sg_fd * sfp);
207static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 201static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208static int sg_res_in_use(Sg_fd * sfp); 202static int sg_res_in_use(Sg_fd * sfp);
209static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
210static Sg_device *sg_get_dev(int dev); 203static Sg_device *sg_get_dev(int dev);
211#ifdef CONFIG_SCSI_PROC_FS 204#ifdef CONFIG_SCSI_PROC_FS
212static int sg_last_dev(void); 205static int sg_last_dev(void);
@@ -529,8 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
529 err = -EFAULT; 522 err = -EFAULT;
530 goto err_out; 523 goto err_out;
531 } 524 }
532 err = sg_read_xfer(srp); 525err_out:
533 err_out:
534 sg_finish_rem_req(srp); 526 sg_finish_rem_req(srp);
535 return (0 == err) ? count : err; 527 return (0 == err) ? count : err;
536} 528}
@@ -612,7 +604,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
612 else 604 else
613 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 605 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
614 hp->dxfer_len = mxsize; 606 hp->dxfer_len = mxsize;
615 hp->dxferp = (char __user *)buf + cmd_size; 607 if (hp->dxfer_direction == SG_DXFER_TO_DEV)
608 hp->dxferp = (char __user *)buf + cmd_size;
609 else
610 hp->dxferp = NULL;
616 hp->sbp = NULL; 611 hp->sbp = NULL;
617 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 612 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
618 hp->flags = input_size; /* structure abuse ... */ 613 hp->flags = input_size; /* structure abuse ... */
@@ -732,16 +727,12 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
732 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 727 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
733 (int) cmnd[0], (int) hp->cmd_len)); 728 (int) cmnd[0], (int) hp->cmd_len));
734 729
735 if ((k = sg_start_req(srp))) { 730 k = sg_start_req(srp, cmnd);
731 if (k) {
736 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); 732 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
737 sg_finish_rem_req(srp); 733 sg_finish_rem_req(srp);
738 return k; /* probably out of space --> ENOMEM */ 734 return k; /* probably out of space --> ENOMEM */
739 } 735 }
740 if ((k = sg_write_xfer(srp))) {
741 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
742 sg_finish_rem_req(srp);
743 return k;
744 }
745 if (sdp->detached) { 736 if (sdp->detached) {
746 sg_finish_rem_req(srp); 737 sg_finish_rem_req(srp);
747 return -ENODEV; 738 return -ENODEV;
@@ -763,20 +754,11 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
763 break; 754 break;
764 } 755 }
765 hp->duration = jiffies_to_msecs(jiffies); 756 hp->duration = jiffies_to_msecs(jiffies);
766/* Now send everything of to mid-level. The next time we hear about this 757
767 packet is when sg_cmd_done() is called (i.e. a callback). */ 758 srp->rq->timeout = timeout;
768 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer, 759 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
769 hp->dxfer_len, srp->data.k_use_sg, timeout, 760 srp->rq, 1, sg_rq_end_io);
770 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 761 return 0;
771 GFP_ATOMIC)) {
772 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
773 /*
774 * most likely out of mem, but could also be a bad map
775 */
776 sg_finish_rem_req(srp);
777 return -ENOMEM;
778 } else
779 return 0;
780} 762}
781 763
782static int 764static int
@@ -1192,8 +1174,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1192 Sg_fd *sfp; 1174 Sg_fd *sfp;
1193 unsigned long offset, len, sa; 1175 unsigned long offset, len, sa;
1194 Sg_scatter_hold *rsv_schp; 1176 Sg_scatter_hold *rsv_schp;
1195 struct scatterlist *sg; 1177 int k, length;
1196 int k;
1197 1178
1198 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1179 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1199 return VM_FAULT_SIGBUS; 1180 return VM_FAULT_SIGBUS;
@@ -1203,15 +1184,14 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1203 return VM_FAULT_SIGBUS; 1184 return VM_FAULT_SIGBUS;
1204 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", 1185 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1205 offset, rsv_schp->k_use_sg)); 1186 offset, rsv_schp->k_use_sg));
1206 sg = rsv_schp->buffer;
1207 sa = vma->vm_start; 1187 sa = vma->vm_start;
1208 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1188 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1209 ++k, sg = sg_next(sg)) { 1189 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1210 len = vma->vm_end - sa; 1190 len = vma->vm_end - sa;
1211 len = (len < sg->length) ? len : sg->length; 1191 len = (len < length) ? len : length;
1212 if (offset < len) { 1192 if (offset < len) {
1213 struct page *page; 1193 struct page *page = nth_page(rsv_schp->pages[k],
1214 page = virt_to_page(page_address(sg_page(sg)) + offset); 1194 offset >> PAGE_SHIFT);
1215 get_page(page); /* increment page count */ 1195 get_page(page); /* increment page count */
1216 vmf->page = page; 1196 vmf->page = page;
1217 return 0; /* success */ 1197 return 0; /* success */
@@ -1233,8 +1213,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1233 Sg_fd *sfp; 1213 Sg_fd *sfp;
1234 unsigned long req_sz, len, sa; 1214 unsigned long req_sz, len, sa;
1235 Sg_scatter_hold *rsv_schp; 1215 Sg_scatter_hold *rsv_schp;
1236 int k; 1216 int k, length;
1237 struct scatterlist *sg;
1238 1217
1239 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1218 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1240 return -ENXIO; 1219 return -ENXIO;
@@ -1248,11 +1227,10 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1248 return -ENOMEM; /* cannot map more than reserved buffer */ 1227 return -ENOMEM; /* cannot map more than reserved buffer */
1249 1228
1250 sa = vma->vm_start; 1229 sa = vma->vm_start;
1251 sg = rsv_schp->buffer; 1230 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1252 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1231 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1253 ++k, sg = sg_next(sg)) {
1254 len = vma->vm_end - sa; 1232 len = vma->vm_end - sa;
1255 len = (len < sg->length) ? len : sg->length; 1233 len = (len < length) ? len : length;
1256 sa += len; 1234 sa += len;
1257 } 1235 }
1258 1236
@@ -1263,16 +1241,19 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
1263 return 0; 1241 return 0;
1264} 1242}
1265 1243
1266/* This function is a "bottom half" handler that is called by the 1244/*
1267 * mid level when a command is completed (or has failed). */ 1245 * This function is a "bottom half" handler that is called by the mid
1268static void 1246 * level when a command is completed (or has failed).
1269sg_cmd_done(void *data, char *sense, int result, int resid) 1247 */
1248static void sg_rq_end_io(struct request *rq, int uptodate)
1270{ 1249{
1271 Sg_request *srp = data; 1250 struct sg_request *srp = rq->end_io_data;
1272 Sg_device *sdp = NULL; 1251 Sg_device *sdp = NULL;
1273 Sg_fd *sfp; 1252 Sg_fd *sfp;
1274 unsigned long iflags; 1253 unsigned long iflags;
1275 unsigned int ms; 1254 unsigned int ms;
1255 char *sense;
1256 int result, resid;
1276 1257
1277 if (NULL == srp) { 1258 if (NULL == srp) {
1278 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1259 printk(KERN_ERR "sg_cmd_done: NULL request\n");
@@ -1286,6 +1267,9 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1286 return; 1267 return;
1287 } 1268 }
1288 1269
1270 sense = rq->sense;
1271 result = rq->errors;
1272 resid = rq->data_len;
1289 1273
1290 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1274 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1291 sdp->disk->disk_name, srp->header.pack_id, result)); 1275 sdp->disk->disk_name, srp->header.pack_id, result));
@@ -1296,7 +1280,6 @@ sg_cmd_done(void *data, char *sense, int result, int resid)
1296 if (0 != result) { 1280 if (0 != result) {
1297 struct scsi_sense_hdr sshdr; 1281 struct scsi_sense_hdr sshdr;
1298 1282
1299 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1300 srp->header.status = 0xff & result; 1283 srp->header.status = 0xff & result;
1301 srp->header.masked_status = status_byte(result); 1284 srp->header.masked_status = status_byte(result);
1302 srp->header.msg_status = msg_byte(result); 1285 srp->header.msg_status = msg_byte(result);
@@ -1634,37 +1617,79 @@ exit_sg(void)
1634 idr_destroy(&sg_index_idr); 1617 idr_destroy(&sg_index_idr);
1635} 1618}
1636 1619
1637static int 1620static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1638sg_start_req(Sg_request * srp)
1639{ 1621{
1640 int res; 1622 int res;
1623 struct request *rq;
1641 Sg_fd *sfp = srp->parentfp; 1624 Sg_fd *sfp = srp->parentfp;
1642 sg_io_hdr_t *hp = &srp->header; 1625 sg_io_hdr_t *hp = &srp->header;
1643 int dxfer_len = (int) hp->dxfer_len; 1626 int dxfer_len = (int) hp->dxfer_len;
1644 int dxfer_dir = hp->dxfer_direction; 1627 int dxfer_dir = hp->dxfer_direction;
1628 unsigned int iov_count = hp->iovec_count;
1645 Sg_scatter_hold *req_schp = &srp->data; 1629 Sg_scatter_hold *req_schp = &srp->data;
1646 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1630 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1631 struct request_queue *q = sfp->parentdp->device->request_queue;
1632 struct rq_map_data *md, map_data;
1633 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1634
1635 SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1636 dxfer_len));
1637
1638 rq = blk_get_request(q, rw, GFP_ATOMIC);
1639 if (!rq)
1640 return -ENOMEM;
1641
1642 memcpy(rq->cmd, cmd, hp->cmd_len);
1643
1644 rq->cmd_len = hp->cmd_len;
1645 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1646
1647 srp->rq = rq;
1648 rq->end_io_data = srp;
1649 rq->sense = srp->sense_b;
1650 rq->retries = SG_DEFAULT_RETRIES;
1647 1651
1648 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1649 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1652 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1650 return 0; 1653 return 0;
1651 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1654
1652 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1655 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1653 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1656 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1654 res = sg_build_direct(srp, sfp, dxfer_len); 1657 !sfp->parentdp->device->host->unchecked_isa_dma &&
1655 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1658 blk_rq_aligned(q, hp->dxferp, dxfer_len))
1656 return res; 1659 md = NULL;
1657 } 1660 else
1658 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1661 md = &map_data;
1659 sg_link_reserve(sfp, srp, dxfer_len); 1662
1660 else { 1663 if (md) {
1661 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1664 if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1662 if (res) { 1665 sg_link_reserve(sfp, srp, dxfer_len);
1663 sg_remove_scat(req_schp); 1666 else {
1664 return res; 1667 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1668 if (res)
1669 return res;
1665 } 1670 }
1671
1672 md->pages = req_schp->pages;
1673 md->page_order = req_schp->page_order;
1674 md->nr_entries = req_schp->k_use_sg;
1666 } 1675 }
1667 return 0; 1676
1677 if (iov_count)
1678 res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
1679 hp->dxfer_len, GFP_ATOMIC);
1680 else
1681 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1682 hp->dxfer_len, GFP_ATOMIC);
1683
1684 if (!res) {
1685 srp->bio = rq->bio;
1686
1687 if (!md) {
1688 req_schp->dio_in_use = 1;
1689 hp->info |= SG_INFO_DIRECT_IO;
1690 }
1691 }
1692 return res;
1668} 1693}
1669 1694
1670static void 1695static void
@@ -1678,186 +1703,37 @@ sg_finish_rem_req(Sg_request * srp)
1678 sg_unlink_reserve(sfp, srp); 1703 sg_unlink_reserve(sfp, srp);
1679 else 1704 else
1680 sg_remove_scat(req_schp); 1705 sg_remove_scat(req_schp);
1706
1707 if (srp->rq) {
1708 if (srp->bio)
1709 blk_rq_unmap_user(srp->bio);
1710
1711 blk_put_request(srp->rq);
1712 }
1713
1681 sg_remove_request(sfp, srp); 1714 sg_remove_request(sfp, srp);
1682} 1715}
1683 1716
1684static int 1717static int
1685sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1718sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1686{ 1719{
1687 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1720 int sg_bufflen = tablesize * sizeof(struct page *);
1688 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1721 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1689 1722
1690 /* 1723 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1691 * TODO: test without low_dma, we should not need it since 1724 if (!schp->pages)
1692 * the block layer will bounce the buffer for us
1693 *
1694 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1695 */
1696 if (sfp->low_dma)
1697 gfp_flags |= GFP_DMA;
1698 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1699 if (!schp->buffer)
1700 return -ENOMEM; 1725 return -ENOMEM;
1701 sg_init_table(schp->buffer, tablesize);
1702 schp->sglist_len = sg_bufflen; 1726 schp->sglist_len = sg_bufflen;
1703 return tablesize; /* number of scat_gath elements allocated */ 1727 return tablesize; /* number of scat_gath elements allocated */
1704} 1728}
1705 1729
1706#ifdef SG_ALLOW_DIO_CODE
1707/* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1708 /* TODO: hopefully we can use the generic block layer code */
1709
1710/* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1711 - mapping of all pages not successful
1712 (i.e., either completely successful or fails)
1713*/
1714static int
1715st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1716 unsigned long uaddr, size_t count, int rw)
1717{
1718 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1719 unsigned long start = uaddr >> PAGE_SHIFT;
1720 const int nr_pages = end - start;
1721 int res, i, j;
1722 struct page **pages;
1723
1724 /* User attempted Overflow! */
1725 if ((uaddr + count) < uaddr)
1726 return -EINVAL;
1727
1728 /* Too big */
1729 if (nr_pages > max_pages)
1730 return -ENOMEM;
1731
1732 /* Hmm? */
1733 if (count == 0)
1734 return 0;
1735
1736 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1737 return -ENOMEM;
1738
1739 /* Try to fault in all of the necessary pages */
1740 down_read(&current->mm->mmap_sem);
1741 /* rw==READ means read from drive, write into memory area */
1742 res = get_user_pages(
1743 current,
1744 current->mm,
1745 uaddr,
1746 nr_pages,
1747 rw == READ,
1748 0, /* don't force */
1749 pages,
1750 NULL);
1751 up_read(&current->mm->mmap_sem);
1752
1753 /* Errors and no page mapped should return here */
1754 if (res < nr_pages)
1755 goto out_unmap;
1756
1757 for (i=0; i < nr_pages; i++) {
1758 /* FIXME: flush superflous for rw==READ,
1759 * probably wrong function for rw==WRITE
1760 */
1761 flush_dcache_page(pages[i]);
1762 /* ?? Is locking needed? I don't think so */
1763 /* if (!trylock_page(pages[i]))
1764 goto out_unlock; */
1765 }
1766
1767 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
1768 if (nr_pages > 1) {
1769 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1770 count -= sgl[0].length;
1771 for (i=1; i < nr_pages ; i++)
1772 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
1773 }
1774 else {
1775 sgl[0].length = count;
1776 }
1777
1778 kfree(pages);
1779 return nr_pages;
1780
1781 out_unmap:
1782 if (res > 0) {
1783 for (j=0; j < res; j++)
1784 page_cache_release(pages[j]);
1785 res = 0;
1786 }
1787 kfree(pages);
1788 return res;
1789}
1790
1791
1792/* And unmap them... */
1793static int
1794st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1795 int dirtied)
1796{
1797 int i;
1798
1799 for (i=0; i < nr_pages; i++) {
1800 struct page *page = sg_page(&sgl[i]);
1801
1802 if (dirtied)
1803 SetPageDirty(page);
1804 /* unlock_page(page); */
1805 /* FIXME: cache flush missing for rw==READ
1806 * FIXME: call the correct reference counting function
1807 */
1808 page_cache_release(page);
1809 }
1810
1811 return 0;
1812}
1813
1814/* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1815#endif
1816
1817
1818/* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1819static int
1820sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1821{
1822#ifdef SG_ALLOW_DIO_CODE
1823 sg_io_hdr_t *hp = &srp->header;
1824 Sg_scatter_hold *schp = &srp->data;
1825 int sg_tablesize = sfp->parentdp->sg_tablesize;
1826 int mx_sc_elems, res;
1827 struct scsi_device *sdev = sfp->parentdp->device;
1828
1829 if (((unsigned long)hp->dxferp &
1830 queue_dma_alignment(sdev->request_queue)) != 0)
1831 return 1;
1832
1833 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1834 if (mx_sc_elems <= 0) {
1835 return 1;
1836 }
1837 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1838 (unsigned long)hp->dxferp, dxfer_len,
1839 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1840 if (res <= 0) {
1841 sg_remove_scat(schp);
1842 return 1;
1843 }
1844 schp->k_use_sg = res;
1845 schp->dio_in_use = 1;
1846 hp->info |= SG_INFO_DIRECT_IO;
1847 return 0;
1848#else
1849 return 1;
1850#endif
1851}
1852
1853static int 1730static int
1854sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1731sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1855{ 1732{
1856 struct scatterlist *sg; 1733 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1857 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1858 int sg_tablesize = sfp->parentdp->sg_tablesize; 1734 int sg_tablesize = sfp->parentdp->sg_tablesize;
1859 int blk_size = buff_size; 1735 int blk_size = buff_size, order;
1860 struct page *p = NULL; 1736 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1861 1737
1862 if (blk_size < 0) 1738 if (blk_size < 0)
1863 return -EFAULT; 1739 return -EFAULT;
@@ -1881,15 +1757,26 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1881 } else 1757 } else
1882 scatter_elem_sz_prev = num; 1758 scatter_elem_sz_prev = num;
1883 } 1759 }
1884 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1760
1885 (rem_sz > 0) && (k < mx_sc_elems); 1761 if (sfp->low_dma)
1886 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) { 1762 gfp_mask |= GFP_DMA;
1887 1763
1764 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1765 gfp_mask |= __GFP_ZERO;
1766
1767 order = get_order(num);
1768retry:
1769 ret_sz = 1 << (PAGE_SHIFT + order);
1770
1771 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1772 k++, rem_sz -= ret_sz) {
1773
1888 num = (rem_sz > scatter_elem_sz_prev) ? 1774 num = (rem_sz > scatter_elem_sz_prev) ?
1889 scatter_elem_sz_prev : rem_sz; 1775 scatter_elem_sz_prev : rem_sz;
1890 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1776
1891 if (!p) 1777 schp->pages[k] = alloc_pages(gfp_mask, order);
1892 return -ENOMEM; 1778 if (!schp->pages[k])
1779 goto out;
1893 1780
1894 if (num == scatter_elem_sz_prev) { 1781 if (num == scatter_elem_sz_prev) {
1895 if (unlikely(ret_sz > scatter_elem_sz_prev)) { 1782 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
@@ -1897,12 +1784,12 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1897 scatter_elem_sz_prev = ret_sz; 1784 scatter_elem_sz_prev = ret_sz;
1898 } 1785 }
1899 } 1786 }
1900 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
1901 1787
1902 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " 1788 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1903 "ret_sz=%d\n", k, num, ret_sz)); 1789 "ret_sz=%d\n", k, num, ret_sz));
1904 } /* end of for loop */ 1790 } /* end of for loop */
1905 1791
1792 schp->page_order = order;
1906 schp->k_use_sg = k; 1793 schp->k_use_sg = k;
1907 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " 1794 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1908 "rem_sz=%d\n", k, rem_sz)); 1795 "rem_sz=%d\n", k, rem_sz));
@@ -1910,223 +1797,42 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1910 schp->bufflen = blk_size; 1797 schp->bufflen = blk_size;
1911 if (rem_sz > 0) /* must have failed */ 1798 if (rem_sz > 0) /* must have failed */
1912 return -ENOMEM; 1799 return -ENOMEM;
1913
1914 return 0; 1800 return 0;
1915} 1801out:
1916 1802 for (i = 0; i < k; i++)
1917static int 1803 __free_pages(schp->pages[k], order);
1918sg_write_xfer(Sg_request * srp)
1919{
1920 sg_io_hdr_t *hp = &srp->header;
1921 Sg_scatter_hold *schp = &srp->data;
1922 struct scatterlist *sg = schp->buffer;
1923 int num_xfer = 0;
1924 int j, k, onum, usglen, ksglen, res;
1925 int iovec_count = (int) hp->iovec_count;
1926 int dxfer_dir = hp->dxfer_direction;
1927 unsigned char *p;
1928 unsigned char __user *up;
1929 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1930
1931 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1932 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1933 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1934 if (schp->bufflen < num_xfer)
1935 num_xfer = schp->bufflen;
1936 }
1937 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1938 (new_interface
1939 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1940 return 0;
1941
1942 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1943 num_xfer, iovec_count, schp->k_use_sg));
1944 if (iovec_count) {
1945 onum = iovec_count;
1946 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1947 return -EFAULT;
1948 } else
1949 onum = 1;
1950
1951 ksglen = sg->length;
1952 p = page_address(sg_page(sg));
1953 for (j = 0, k = 0; j < onum; ++j) {
1954 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1955 if (res)
1956 return res;
1957
1958 for (; p; sg = sg_next(sg), ksglen = sg->length,
1959 p = page_address(sg_page(sg))) {
1960 if (usglen <= 0)
1961 break;
1962 if (ksglen > usglen) {
1963 if (usglen >= num_xfer) {
1964 if (__copy_from_user(p, up, num_xfer))
1965 return -EFAULT;
1966 return 0;
1967 }
1968 if (__copy_from_user(p, up, usglen))
1969 return -EFAULT;
1970 p += usglen;
1971 ksglen -= usglen;
1972 break;
1973 } else {
1974 if (ksglen >= num_xfer) {
1975 if (__copy_from_user(p, up, num_xfer))
1976 return -EFAULT;
1977 return 0;
1978 }
1979 if (__copy_from_user(p, up, ksglen))
1980 return -EFAULT;
1981 up += ksglen;
1982 usglen -= ksglen;
1983 }
1984 ++k;
1985 if (k >= schp->k_use_sg)
1986 return 0;
1987 }
1988 }
1989
1990 return 0;
1991}
1992 1804
1993static int 1805 if (--order >= 0)
1994sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1806 goto retry;
1995 int wr_xf, int *countp, unsigned char __user **up)
1996{
1997 int num_xfer = (int) hp->dxfer_len;
1998 unsigned char __user *p = hp->dxferp;
1999 int count;
2000 1807
2001 if (0 == sg_num) { 1808 return -ENOMEM;
2002 if (wr_xf && ('\0' == hp->interface_id))
2003 count = (int) hp->flags; /* holds "old" input_size */
2004 else
2005 count = num_xfer;
2006 } else {
2007 sg_iovec_t iovec;
2008 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
2009 return -EFAULT;
2010 p = iovec.iov_base;
2011 count = (int) iovec.iov_len;
2012 }
2013 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2014 return -EFAULT;
2015 if (up)
2016 *up = p;
2017 if (countp)
2018 *countp = count;
2019 return 0;
2020} 1809}
2021 1810
2022static void 1811static void
2023sg_remove_scat(Sg_scatter_hold * schp) 1812sg_remove_scat(Sg_scatter_hold * schp)
2024{ 1813{
2025 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 1814 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2026 if (schp->buffer && (schp->sglist_len > 0)) { 1815 if (schp->pages && schp->sglist_len > 0) {
2027 struct scatterlist *sg = schp->buffer; 1816 if (!schp->dio_in_use) {
2028
2029 if (schp->dio_in_use) {
2030#ifdef SG_ALLOW_DIO_CODE
2031 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2032#endif
2033 } else {
2034 int k; 1817 int k;
2035 1818
2036 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); 1819 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2037 ++k, sg = sg_next(sg)) {
2038 SCSI_LOG_TIMEOUT(5, printk( 1820 SCSI_LOG_TIMEOUT(5, printk(
2039 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n", 1821 "sg_remove_scat: k=%d, pg=0x%p\n",
2040 k, sg_page(sg), sg->length)); 1822 k, schp->pages[k]));
2041 sg_page_free(sg_page(sg), sg->length); 1823 __free_pages(schp->pages[k], schp->page_order);
2042 } 1824 }
2043 }
2044 kfree(schp->buffer);
2045 }
2046 memset(schp, 0, sizeof (*schp));
2047}
2048 1825
2049static int 1826 kfree(schp->pages);
2050sg_read_xfer(Sg_request * srp)
2051{
2052 sg_io_hdr_t *hp = &srp->header;
2053 Sg_scatter_hold *schp = &srp->data;
2054 struct scatterlist *sg = schp->buffer;
2055 int num_xfer = 0;
2056 int j, k, onum, usglen, ksglen, res;
2057 int iovec_count = (int) hp->iovec_count;
2058 int dxfer_dir = hp->dxfer_direction;
2059 unsigned char *p;
2060 unsigned char __user *up;
2061 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2062
2063 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2064 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2065 num_xfer = hp->dxfer_len;
2066 if (schp->bufflen < num_xfer)
2067 num_xfer = schp->bufflen;
2068 }
2069 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2070 (new_interface
2071 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2072 return 0;
2073
2074 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2075 num_xfer, iovec_count, schp->k_use_sg));
2076 if (iovec_count) {
2077 onum = iovec_count;
2078 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2079 return -EFAULT;
2080 } else
2081 onum = 1;
2082
2083 p = page_address(sg_page(sg));
2084 ksglen = sg->length;
2085 for (j = 0, k = 0; j < onum; ++j) {
2086 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2087 if (res)
2088 return res;
2089
2090 for (; p; sg = sg_next(sg), ksglen = sg->length,
2091 p = page_address(sg_page(sg))) {
2092 if (usglen <= 0)
2093 break;
2094 if (ksglen > usglen) {
2095 if (usglen >= num_xfer) {
2096 if (__copy_to_user(up, p, num_xfer))
2097 return -EFAULT;
2098 return 0;
2099 }
2100 if (__copy_to_user(up, p, usglen))
2101 return -EFAULT;
2102 p += usglen;
2103 ksglen -= usglen;
2104 break;
2105 } else {
2106 if (ksglen >= num_xfer) {
2107 if (__copy_to_user(up, p, num_xfer))
2108 return -EFAULT;
2109 return 0;
2110 }
2111 if (__copy_to_user(up, p, ksglen))
2112 return -EFAULT;
2113 up += ksglen;
2114 usglen -= ksglen;
2115 }
2116 ++k;
2117 if (k >= schp->k_use_sg)
2118 return 0;
2119 } 1827 }
2120 } 1828 }
2121 1829 memset(schp, 0, sizeof (*schp));
2122 return 0;
2123} 1830}
2124 1831
2125static int 1832static int
2126sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 1833sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2127{ 1834{
2128 Sg_scatter_hold *schp = &srp->data; 1835 Sg_scatter_hold *schp = &srp->data;
2129 struct scatterlist *sg = schp->buffer;
2130 int k, num; 1836 int k, num;
2131 1837
2132 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 1838 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
@@ -2134,15 +1840,15 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2134 if ((!outp) || (num_read_xfer <= 0)) 1840 if ((!outp) || (num_read_xfer <= 0))
2135 return 0; 1841 return 0;
2136 1842
2137 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) { 1843 num = 1 << (PAGE_SHIFT + schp->page_order);
2138 num = sg->length; 1844 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
2139 if (num > num_read_xfer) { 1845 if (num > num_read_xfer) {
2140 if (__copy_to_user(outp, page_address(sg_page(sg)), 1846 if (__copy_to_user(outp, page_address(schp->pages[k]),
2141 num_read_xfer)) 1847 num_read_xfer))
2142 return -EFAULT; 1848 return -EFAULT;
2143 break; 1849 break;
2144 } else { 1850 } else {
2145 if (__copy_to_user(outp, page_address(sg_page(sg)), 1851 if (__copy_to_user(outp, page_address(schp->pages[k]),
2146 num)) 1852 num))
2147 return -EFAULT; 1853 return -EFAULT;
2148 num_read_xfer -= num; 1854 num_read_xfer -= num;
@@ -2177,24 +1883,21 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2177{ 1883{
2178 Sg_scatter_hold *req_schp = &srp->data; 1884 Sg_scatter_hold *req_schp = &srp->data;
2179 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1885 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2180 struct scatterlist *sg = rsv_schp->buffer;
2181 int k, num, rem; 1886 int k, num, rem;
2182 1887
2183 srp->res_used = 1; 1888 srp->res_used = 1;
2184 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 1889 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2185 rem = size; 1890 rem = size;
2186 1891
2187 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) { 1892 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
2188 num = sg->length; 1893 for (k = 0; k < rsv_schp->k_use_sg; k++) {
2189 if (rem <= num) { 1894 if (rem <= num) {
2190 sfp->save_scat_len = num;
2191 sg->length = rem;
2192 req_schp->k_use_sg = k + 1; 1895 req_schp->k_use_sg = k + 1;
2193 req_schp->sglist_len = rsv_schp->sglist_len; 1896 req_schp->sglist_len = rsv_schp->sglist_len;
2194 req_schp->buffer = rsv_schp->buffer; 1897 req_schp->pages = rsv_schp->pages;
2195 1898
2196 req_schp->bufflen = size; 1899 req_schp->bufflen = size;
2197 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 1900 req_schp->page_order = rsv_schp->page_order;
2198 break; 1901 break;
2199 } else 1902 } else
2200 rem -= num; 1903 rem -= num;
@@ -2208,22 +1911,13 @@ static void
2208sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 1911sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2209{ 1912{
2210 Sg_scatter_hold *req_schp = &srp->data; 1913 Sg_scatter_hold *req_schp = &srp->data;
2211 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2212 1914
2213 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 1915 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2214 (int) req_schp->k_use_sg)); 1916 (int) req_schp->k_use_sg));
2215 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2216 struct scatterlist *sg = rsv_schp->buffer;
2217
2218 if (sfp->save_scat_len > 0)
2219 (sg + (req_schp->k_use_sg - 1))->length =
2220 (unsigned) sfp->save_scat_len;
2221 else
2222 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2223 }
2224 req_schp->k_use_sg = 0; 1917 req_schp->k_use_sg = 0;
2225 req_schp->bufflen = 0; 1918 req_schp->bufflen = 0;
2226 req_schp->buffer = NULL; 1919 req_schp->pages = NULL;
1920 req_schp->page_order = 0;
2227 req_schp->sglist_len = 0; 1921 req_schp->sglist_len = 0;
2228 sfp->save_scat_len = 0; 1922 sfp->save_scat_len = 0;
2229 srp->res_used = 0; 1923 srp->res_used = 0;
@@ -2481,53 +2175,6 @@ sg_res_in_use(Sg_fd * sfp)
2481 return srp ? 1 : 0; 2175 return srp ? 1 : 0;
2482} 2176}
2483 2177
2484/* The size fetched (value output via retSzp) set when non-NULL return */
2485static struct page *
2486sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2487{
2488 struct page *resp = NULL;
2489 gfp_t page_mask;
2490 int order, a_size;
2491 int resSz;
2492
2493 if ((rqSz <= 0) || (NULL == retSzp))
2494 return resp;
2495
2496 if (lowDma)
2497 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2498 else
2499 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2500
2501 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2502 order++, a_size <<= 1) ;
2503 resSz = a_size; /* rounded up if necessary */
2504 resp = alloc_pages(page_mask, order);
2505 while ((!resp) && order) {
2506 --order;
2507 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2508 resp = alloc_pages(page_mask, order); /* try half */
2509 resSz = a_size;
2510 }
2511 if (resp) {
2512 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2513 memset(page_address(resp), 0, resSz);
2514 *retSzp = resSz;
2515 }
2516 return resp;
2517}
2518
2519static void
2520sg_page_free(struct page *page, int size)
2521{
2522 int order, a_size;
2523
2524 if (!page)
2525 return;
2526 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2527 order++, a_size <<= 1) ;
2528 __free_pages(page, order);
2529}
2530
2531#ifdef CONFIG_SCSI_PROC_FS 2178#ifdef CONFIG_SCSI_PROC_FS
2532static int 2179static int
2533sg_idr_max_id(int id, void *p, void *data) 2180sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def3..0f17009c99d2 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
331 331
332static int sr_prep_fn(struct request_queue *q, struct request *rq) 332static int sr_prep_fn(struct request_queue *q, struct request *rq)
333{ 333{
334 int block=0, this_count, s_size, timeout = SR_TIMEOUT; 334 int block = 0, this_count, s_size;
335 struct scsi_cd *cd; 335 struct scsi_cd *cd;
336 struct scsi_cmnd *SCpnt; 336 struct scsi_cmnd *SCpnt;
337 struct scsi_device *sdp = q->queuedata; 337 struct scsi_device *sdp = q->queuedata;
@@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
461 SCpnt->transfersize = cd->device->sector_size; 461 SCpnt->transfersize = cd->device->sector_size;
462 SCpnt->underflow = this_count << 9; 462 SCpnt->underflow = this_count << 9;
463 SCpnt->allowed = MAX_RETRIES; 463 SCpnt->allowed = MAX_RETRIES;
464 SCpnt->timeout_per_command = timeout;
465 464
466 /* 465 /*
467 * This indicates that the command is ready from our end to be 466 * This indicates that the command is ready from our end to be
@@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
620 disk->fops = &sr_bdops; 619 disk->fops = &sr_bdops;
621 disk->flags = GENHD_FL_CD; 620 disk->flags = GENHD_FL_CD;
622 621
622 blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
623
623 cd->device = sdev; 624 cd->device = sdev;
624 cd->disk = disk; 625 cd->disk = disk;
625 cd->driver = &sr_template; 626 cd->driver = &sr_template;
@@ -878,7 +879,7 @@ static void sr_kref_release(struct kref *kref)
878 struct gendisk *disk = cd->disk; 879 struct gendisk *disk = cd->disk;
879 880
880 spin_lock(&sr_index_lock); 881 spin_lock(&sr_index_lock);
881 clear_bit(disk->first_minor, sr_index_bits); 882 clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
882 spin_unlock(&sr_index_lock); 883 spin_unlock(&sr_index_lock);
883 884
884 unregister_cdrom(&cd->cdi); 885 unregister_cdrom(&cd->cdi);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d39107b7669b..f4e6cde1fd0d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd,
519 * Shorten our settle_time if needed for 519 * Shorten our settle_time if needed for
520 * this command not to time out. 520 * this command not to time out.
521 */ 521 */
522 if (np->s.settle_time_valid && cmd->timeout_per_command) { 522 if (np->s.settle_time_valid && cmd->request->timeout) {
523 unsigned long tlimit = jiffies + cmd->timeout_per_command; 523 unsigned long tlimit = jiffies + cmd->request->timeout;
524 tlimit -= SYM_CONF_TIMER_INTERVAL*2; 524 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
525 if (time_after(np->s.settle_time, tlimit)) { 525 if (time_after(np->s.settle_time, tlimit)) {
526 np->s.settle_time = tlimit; 526 np->s.settle_time = tlimit;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 1723d71cbf3f..69ac6e590f1d 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2573,8 +2573,8 @@ static struct pci_driver dc390_driver = {
2573static int __init dc390_module_init(void) 2573static int __init dc390_module_init(void)
2574{ 2574{
2575 if (!disable_clustering) 2575 if (!disable_clustering)
2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n" 2576 printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n");
2577 "\twith \"disable_clustering=1\" and report to maintainers\n"); 2577 printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n");
2578 2578
2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) { 2579 if (tmscsim[0] == -1 || tmscsim[0] > 15) {
2580 tmscsim[0] = 7; 2580 tmscsim[0] = 7;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index c3e174b35fe6..19caf7c962ac 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -107,7 +107,8 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
107 BUG_ON(bip == NULL); 107 BUG_ON(bip == NULL);
108 108
109 /* A cloned bio doesn't own the integrity metadata */ 109 /* A cloned bio doesn't own the integrity metadata */
110 if (!bio_flagged(bio, BIO_CLONED) && bip->bip_buf != NULL) 110 if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY)
111 && bip->bip_buf != NULL)
111 kfree(bip->bip_buf); 112 kfree(bip->bip_buf);
112 113
113 mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]); 114 mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]);
@@ -150,6 +151,24 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
150} 151}
151EXPORT_SYMBOL(bio_integrity_add_page); 152EXPORT_SYMBOL(bio_integrity_add_page);
152 153
154static int bdev_integrity_enabled(struct block_device *bdev, int rw)
155{
156 struct blk_integrity *bi = bdev_get_integrity(bdev);
157
158 if (bi == NULL)
159 return 0;
160
161 if (rw == READ && bi->verify_fn != NULL &&
162 (bi->flags & INTEGRITY_FLAG_READ))
163 return 1;
164
165 if (rw == WRITE && bi->generate_fn != NULL &&
166 (bi->flags & INTEGRITY_FLAG_WRITE))
167 return 1;
168
169 return 0;
170}
171
153/** 172/**
154 * bio_integrity_enabled - Check whether integrity can be passed 173 * bio_integrity_enabled - Check whether integrity can be passed
155 * @bio: bio to check 174 * @bio: bio to check
@@ -313,6 +332,14 @@ static void bio_integrity_generate(struct bio *bio)
313 } 332 }
314} 333}
315 334
335static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
336{
337 if (bi)
338 return bi->tuple_size;
339
340 return 0;
341}
342
316/** 343/**
317 * bio_integrity_prep - Prepare bio for integrity I/O 344 * bio_integrity_prep - Prepare bio for integrity I/O
318 * @bio: bio to prepare 345 * @bio: bio to prepare
diff --git a/fs/bio.c b/fs/bio.c
index 3cba7ae34d75..77a55bcceedb 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
30 30
31static struct kmem_cache *bio_slab __read_mostly; 31static struct kmem_cache *bio_slab __read_mostly;
32 32
33mempool_t *bio_split_pool __read_mostly; 33static mempool_t *bio_split_pool __read_mostly;
34 34
35/* 35/*
36 * if you change this list, also change bvec_alloc or things will 36 * if you change this list, also change bvec_alloc or things will
@@ -60,25 +60,46 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct
60 struct bio_vec *bvl; 60 struct bio_vec *bvl;
61 61
62 /* 62 /*
63 * see comment near bvec_array define! 63 * If 'bs' is given, lookup the pool and do the mempool alloc.
64 * If not, this is a bio_kmalloc() allocation and just do a
65 * kzalloc() for the exact number of vecs right away.
64 */ 66 */
65 switch (nr) { 67 if (bs) {
66 case 1 : *idx = 0; break; 68 /*
67 case 2 ... 4: *idx = 1; break; 69 * see comment near bvec_array define!
68 case 5 ... 16: *idx = 2; break; 70 */
69 case 17 ... 64: *idx = 3; break; 71 switch (nr) {
70 case 65 ... 128: *idx = 4; break; 72 case 1:
71 case 129 ... BIO_MAX_PAGES: *idx = 5; break; 73 *idx = 0;
74 break;
75 case 2 ... 4:
76 *idx = 1;
77 break;
78 case 5 ... 16:
79 *idx = 2;
80 break;
81 case 17 ... 64:
82 *idx = 3;
83 break;
84 case 65 ... 128:
85 *idx = 4;
86 break;
87 case 129 ... BIO_MAX_PAGES:
88 *idx = 5;
89 break;
72 default: 90 default:
73 return NULL; 91 return NULL;
74 } 92 }
75 /*
76 * idx now points to the pool we want to allocate from
77 */
78 93
79 bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); 94 /*
80 if (bvl) 95 * idx now points to the pool we want to allocate from
81 memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); 96 */
97 bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
98 if (bvl)
99 memset(bvl, 0,
100 bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
101 } else
102 bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
82 103
83 return bvl; 104 return bvl;
84} 105}
@@ -107,10 +128,17 @@ static void bio_fs_destructor(struct bio *bio)
107 bio_free(bio, fs_bio_set); 128 bio_free(bio, fs_bio_set);
108} 129}
109 130
131static void bio_kmalloc_destructor(struct bio *bio)
132{
133 kfree(bio->bi_io_vec);
134 kfree(bio);
135}
136
110void bio_init(struct bio *bio) 137void bio_init(struct bio *bio)
111{ 138{
112 memset(bio, 0, sizeof(*bio)); 139 memset(bio, 0, sizeof(*bio));
113 bio->bi_flags = 1 << BIO_UPTODATE; 140 bio->bi_flags = 1 << BIO_UPTODATE;
141 bio->bi_comp_cpu = -1;
114 atomic_set(&bio->bi_cnt, 1); 142 atomic_set(&bio->bi_cnt, 1);
115} 143}
116 144
@@ -118,19 +146,25 @@ void bio_init(struct bio *bio)
118 * bio_alloc_bioset - allocate a bio for I/O 146 * bio_alloc_bioset - allocate a bio for I/O
119 * @gfp_mask: the GFP_ mask given to the slab allocator 147 * @gfp_mask: the GFP_ mask given to the slab allocator
120 * @nr_iovecs: number of iovecs to pre-allocate 148 * @nr_iovecs: number of iovecs to pre-allocate
121 * @bs: the bio_set to allocate from 149 * @bs: the bio_set to allocate from. If %NULL, just use kmalloc
122 * 150 *
123 * Description: 151 * Description:
124 * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. 152 * bio_alloc_bioset will first try its own mempool to satisfy the allocation.
125 * If %__GFP_WAIT is set then we will block on the internal pool waiting 153 * If %__GFP_WAIT is set then we will block on the internal pool waiting
126 * for a &struct bio to become free. 154 * for a &struct bio to become free. If a %NULL @bs is passed in, we will
155 * fall back to just using @kmalloc to allocate the required memory.
127 * 156 *
128 * allocate bio and iovecs from the memory pools specified by the 157 * allocate bio and iovecs from the memory pools specified by the
129 * bio_set structure. 158 * bio_set structure, or @kmalloc if none given.
130 **/ 159 **/
131struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 160struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
132{ 161{
133 struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); 162 struct bio *bio;
163
164 if (bs)
165 bio = mempool_alloc(bs->bio_pool, gfp_mask);
166 else
167 bio = kmalloc(sizeof(*bio), gfp_mask);
134 168
135 if (likely(bio)) { 169 if (likely(bio)) {
136 struct bio_vec *bvl = NULL; 170 struct bio_vec *bvl = NULL;
@@ -141,7 +175,10 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
141 175
142 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); 176 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
143 if (unlikely(!bvl)) { 177 if (unlikely(!bvl)) {
144 mempool_free(bio, bs->bio_pool); 178 if (bs)
179 mempool_free(bio, bs->bio_pool);
180 else
181 kfree(bio);
145 bio = NULL; 182 bio = NULL;
146 goto out; 183 goto out;
147 } 184 }
@@ -164,6 +201,23 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
164 return bio; 201 return bio;
165} 202}
166 203
204/*
205 * Like bio_alloc(), but doesn't use a mempool backing. This means that
206 * it CAN fail, but while bio_alloc() can only be used for allocations
207 * that have a short (finite) life span, bio_kmalloc() should be used
208 * for more permanent bio allocations (like allocating some bio's for
209 * initalization or setup purposes).
210 */
211struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
212{
213 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
214
215 if (bio)
216 bio->bi_destructor = bio_kmalloc_destructor;
217
218 return bio;
219}
220
167void zero_fill_bio(struct bio *bio) 221void zero_fill_bio(struct bio *bio)
168{ 222{
169 unsigned long flags; 223 unsigned long flags;
@@ -208,14 +262,6 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
208 return bio->bi_phys_segments; 262 return bio->bi_phys_segments;
209} 263}
210 264
211inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
212{
213 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
214 blk_recount_segments(q, bio);
215
216 return bio->bi_hw_segments;
217}
218
219/** 265/**
220 * __bio_clone - clone a bio 266 * __bio_clone - clone a bio
221 * @bio: destination bio 267 * @bio: destination bio
@@ -350,8 +396,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
350 */ 396 */
351 397
352 while (bio->bi_phys_segments >= q->max_phys_segments 398 while (bio->bi_phys_segments >= q->max_phys_segments
353 || bio->bi_hw_segments >= q->max_hw_segments 399 || bio->bi_phys_segments >= q->max_hw_segments) {
354 || BIOVEC_VIRT_OVERSIZE(bio->bi_size)) {
355 400
356 if (retried_segments) 401 if (retried_segments)
357 return 0; 402 return 0;
@@ -395,13 +440,11 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
395 } 440 }
396 441
397 /* If we may be able to merge these biovecs, force a recount */ 442 /* If we may be able to merge these biovecs, force a recount */
398 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec) || 443 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
399 BIOVEC_VIRT_MERGEABLE(bvec-1, bvec)))
400 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 444 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
401 445
402 bio->bi_vcnt++; 446 bio->bi_vcnt++;
403 bio->bi_phys_segments++; 447 bio->bi_phys_segments++;
404 bio->bi_hw_segments++;
405 done: 448 done:
406 bio->bi_size += len; 449 bio->bi_size += len;
407 return len; 450 return len;
@@ -449,16 +492,19 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
449 492
450struct bio_map_data { 493struct bio_map_data {
451 struct bio_vec *iovecs; 494 struct bio_vec *iovecs;
452 int nr_sgvecs;
453 struct sg_iovec *sgvecs; 495 struct sg_iovec *sgvecs;
496 int nr_sgvecs;
497 int is_our_pages;
454}; 498};
455 499
456static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 500static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
457 struct sg_iovec *iov, int iov_count) 501 struct sg_iovec *iov, int iov_count,
502 int is_our_pages)
458{ 503{
459 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt); 504 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
460 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 505 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
461 bmd->nr_sgvecs = iov_count; 506 bmd->nr_sgvecs = iov_count;
507 bmd->is_our_pages = is_our_pages;
462 bio->bi_private = bmd; 508 bio->bi_private = bmd;
463} 509}
464 510
@@ -493,7 +539,8 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
493} 539}
494 540
495static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, 541static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
496 struct sg_iovec *iov, int iov_count, int uncopy) 542 struct sg_iovec *iov, int iov_count, int uncopy,
543 int do_free_page)
497{ 544{
498 int ret = 0, i; 545 int ret = 0, i;
499 struct bio_vec *bvec; 546 struct bio_vec *bvec;
@@ -536,7 +583,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
536 } 583 }
537 } 584 }
538 585
539 if (uncopy) 586 if (do_free_page)
540 __free_page(bvec->bv_page); 587 __free_page(bvec->bv_page);
541 } 588 }
542 589
@@ -553,10 +600,11 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
553int bio_uncopy_user(struct bio *bio) 600int bio_uncopy_user(struct bio *bio)
554{ 601{
555 struct bio_map_data *bmd = bio->bi_private; 602 struct bio_map_data *bmd = bio->bi_private;
556 int ret; 603 int ret = 0;
557
558 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, bmd->nr_sgvecs, 1);
559 604
605 if (!bio_flagged(bio, BIO_NULL_MAPPED))
606 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
607 bmd->nr_sgvecs, 1, bmd->is_our_pages);
560 bio_free_map_data(bmd); 608 bio_free_map_data(bmd);
561 bio_put(bio); 609 bio_put(bio);
562 return ret; 610 return ret;
@@ -565,16 +613,20 @@ int bio_uncopy_user(struct bio *bio)
565/** 613/**
566 * bio_copy_user_iov - copy user data to bio 614 * bio_copy_user_iov - copy user data to bio
567 * @q: destination block queue 615 * @q: destination block queue
616 * @map_data: pointer to the rq_map_data holding pages (if necessary)
568 * @iov: the iovec. 617 * @iov: the iovec.
569 * @iov_count: number of elements in the iovec 618 * @iov_count: number of elements in the iovec
570 * @write_to_vm: bool indicating writing to pages or not 619 * @write_to_vm: bool indicating writing to pages or not
620 * @gfp_mask: memory allocation flags
571 * 621 *
572 * Prepares and returns a bio for indirect user io, bouncing data 622 * Prepares and returns a bio for indirect user io, bouncing data
573 * to/from kernel pages as necessary. Must be paired with 623 * to/from kernel pages as necessary. Must be paired with
574 * call bio_uncopy_user() on io completion. 624 * call bio_uncopy_user() on io completion.
575 */ 625 */
576struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov, 626struct bio *bio_copy_user_iov(struct request_queue *q,
577 int iov_count, int write_to_vm) 627 struct rq_map_data *map_data,
628 struct sg_iovec *iov, int iov_count,
629 int write_to_vm, gfp_t gfp_mask)
578{ 630{
579 struct bio_map_data *bmd; 631 struct bio_map_data *bmd;
580 struct bio_vec *bvec; 632 struct bio_vec *bvec;
@@ -597,25 +649,38 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
597 len += iov[i].iov_len; 649 len += iov[i].iov_len;
598 } 650 }
599 651
600 bmd = bio_alloc_map_data(nr_pages, iov_count, GFP_KERNEL); 652 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
601 if (!bmd) 653 if (!bmd)
602 return ERR_PTR(-ENOMEM); 654 return ERR_PTR(-ENOMEM);
603 655
604 ret = -ENOMEM; 656 ret = -ENOMEM;
605 bio = bio_alloc(GFP_KERNEL, nr_pages); 657 bio = bio_alloc(gfp_mask, nr_pages);
606 if (!bio) 658 if (!bio)
607 goto out_bmd; 659 goto out_bmd;
608 660
609 bio->bi_rw |= (!write_to_vm << BIO_RW); 661 bio->bi_rw |= (!write_to_vm << BIO_RW);
610 662
611 ret = 0; 663 ret = 0;
664 i = 0;
612 while (len) { 665 while (len) {
613 unsigned int bytes = PAGE_SIZE; 666 unsigned int bytes;
667
668 if (map_data)
669 bytes = 1U << (PAGE_SHIFT + map_data->page_order);
670 else
671 bytes = PAGE_SIZE;
614 672
615 if (bytes > len) 673 if (bytes > len)
616 bytes = len; 674 bytes = len;
617 675
618 page = alloc_page(q->bounce_gfp | GFP_KERNEL); 676 if (map_data) {
677 if (i == map_data->nr_entries) {
678 ret = -ENOMEM;
679 break;
680 }
681 page = map_data->pages[i++];
682 } else
683 page = alloc_page(q->bounce_gfp | gfp_mask);
619 if (!page) { 684 if (!page) {
620 ret = -ENOMEM; 685 ret = -ENOMEM;
621 break; 686 break;
@@ -634,16 +699,17 @@ struct bio *bio_copy_user_iov(struct request_queue *q, struct sg_iovec *iov,
634 * success 699 * success
635 */ 700 */
636 if (!write_to_vm) { 701 if (!write_to_vm) {
637 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0); 702 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
638 if (ret) 703 if (ret)
639 goto cleanup; 704 goto cleanup;
640 } 705 }
641 706
642 bio_set_map_data(bmd, bio, iov, iov_count); 707 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
643 return bio; 708 return bio;
644cleanup: 709cleanup:
645 bio_for_each_segment(bvec, bio, i) 710 if (!map_data)
646 __free_page(bvec->bv_page); 711 bio_for_each_segment(bvec, bio, i)
712 __free_page(bvec->bv_page);
647 713
648 bio_put(bio); 714 bio_put(bio);
649out_bmd: 715out_bmd:
@@ -654,29 +720,32 @@ out_bmd:
654/** 720/**
655 * bio_copy_user - copy user data to bio 721 * bio_copy_user - copy user data to bio
656 * @q: destination block queue 722 * @q: destination block queue
723 * @map_data: pointer to the rq_map_data holding pages (if necessary)
657 * @uaddr: start of user address 724 * @uaddr: start of user address
658 * @len: length in bytes 725 * @len: length in bytes
659 * @write_to_vm: bool indicating writing to pages or not 726 * @write_to_vm: bool indicating writing to pages or not
727 * @gfp_mask: memory allocation flags
660 * 728 *
661 * Prepares and returns a bio for indirect user io, bouncing data 729 * Prepares and returns a bio for indirect user io, bouncing data
662 * to/from kernel pages as necessary. Must be paired with 730 * to/from kernel pages as necessary. Must be paired with
663 * call bio_uncopy_user() on io completion. 731 * call bio_uncopy_user() on io completion.
664 */ 732 */
665struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, 733struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
666 unsigned int len, int write_to_vm) 734 unsigned long uaddr, unsigned int len,
735 int write_to_vm, gfp_t gfp_mask)
667{ 736{
668 struct sg_iovec iov; 737 struct sg_iovec iov;
669 738
670 iov.iov_base = (void __user *)uaddr; 739 iov.iov_base = (void __user *)uaddr;
671 iov.iov_len = len; 740 iov.iov_len = len;
672 741
673 return bio_copy_user_iov(q, &iov, 1, write_to_vm); 742 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
674} 743}
675 744
676static struct bio *__bio_map_user_iov(struct request_queue *q, 745static struct bio *__bio_map_user_iov(struct request_queue *q,
677 struct block_device *bdev, 746 struct block_device *bdev,
678 struct sg_iovec *iov, int iov_count, 747 struct sg_iovec *iov, int iov_count,
679 int write_to_vm) 748 int write_to_vm, gfp_t gfp_mask)
680{ 749{
681 int i, j; 750 int i, j;
682 int nr_pages = 0; 751 int nr_pages = 0;
@@ -702,12 +771,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
702 if (!nr_pages) 771 if (!nr_pages)
703 return ERR_PTR(-EINVAL); 772 return ERR_PTR(-EINVAL);
704 773
705 bio = bio_alloc(GFP_KERNEL, nr_pages); 774 bio = bio_alloc(gfp_mask, nr_pages);
706 if (!bio) 775 if (!bio)
707 return ERR_PTR(-ENOMEM); 776 return ERR_PTR(-ENOMEM);
708 777
709 ret = -ENOMEM; 778 ret = -ENOMEM;
710 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 779 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
711 if (!pages) 780 if (!pages)
712 goto out; 781 goto out;
713 782
@@ -786,19 +855,21 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
786 * @uaddr: start of user address 855 * @uaddr: start of user address
787 * @len: length in bytes 856 * @len: length in bytes
788 * @write_to_vm: bool indicating writing to pages or not 857 * @write_to_vm: bool indicating writing to pages or not
858 * @gfp_mask: memory allocation flags
789 * 859 *
790 * Map the user space address into a bio suitable for io to a block 860 * Map the user space address into a bio suitable for io to a block
791 * device. Returns an error pointer in case of error. 861 * device. Returns an error pointer in case of error.
792 */ 862 */
793struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, 863struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
794 unsigned long uaddr, unsigned int len, int write_to_vm) 864 unsigned long uaddr, unsigned int len, int write_to_vm,
865 gfp_t gfp_mask)
795{ 866{
796 struct sg_iovec iov; 867 struct sg_iovec iov;
797 868
798 iov.iov_base = (void __user *)uaddr; 869 iov.iov_base = (void __user *)uaddr;
799 iov.iov_len = len; 870 iov.iov_len = len;
800 871
801 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm); 872 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
802} 873}
803 874
804/** 875/**
@@ -808,18 +879,19 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
808 * @iov: the iovec. 879 * @iov: the iovec.
809 * @iov_count: number of elements in the iovec 880 * @iov_count: number of elements in the iovec
810 * @write_to_vm: bool indicating writing to pages or not 881 * @write_to_vm: bool indicating writing to pages or not
882 * @gfp_mask: memory allocation flags
811 * 883 *
812 * Map the user space address into a bio suitable for io to a block 884 * Map the user space address into a bio suitable for io to a block
813 * device. Returns an error pointer in case of error. 885 * device. Returns an error pointer in case of error.
814 */ 886 */
815struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 887struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
816 struct sg_iovec *iov, int iov_count, 888 struct sg_iovec *iov, int iov_count,
817 int write_to_vm) 889 int write_to_vm, gfp_t gfp_mask)
818{ 890{
819 struct bio *bio; 891 struct bio *bio;
820 892
821 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); 893 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
822 894 gfp_mask);
823 if (IS_ERR(bio)) 895 if (IS_ERR(bio))
824 return bio; 896 return bio;
825 897
@@ -976,48 +1048,13 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
976struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1048struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
977 gfp_t gfp_mask, int reading) 1049 gfp_t gfp_mask, int reading)
978{ 1050{
979 unsigned long kaddr = (unsigned long)data;
980 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
981 unsigned long start = kaddr >> PAGE_SHIFT;
982 const int nr_pages = end - start;
983 struct bio *bio; 1051 struct bio *bio;
984 struct bio_vec *bvec; 1052 struct bio_vec *bvec;
985 struct bio_map_data *bmd; 1053 int i;
986 int i, ret;
987 struct sg_iovec iov;
988
989 iov.iov_base = data;
990 iov.iov_len = len;
991
992 bmd = bio_alloc_map_data(nr_pages, 1, gfp_mask);
993 if (!bmd)
994 return ERR_PTR(-ENOMEM);
995
996 ret = -ENOMEM;
997 bio = bio_alloc(gfp_mask, nr_pages);
998 if (!bio)
999 goto out_bmd;
1000
1001 while (len) {
1002 struct page *page;
1003 unsigned int bytes = PAGE_SIZE;
1004
1005 if (bytes > len)
1006 bytes = len;
1007
1008 page = alloc_page(q->bounce_gfp | gfp_mask);
1009 if (!page) {
1010 ret = -ENOMEM;
1011 goto cleanup;
1012 }
1013
1014 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
1015 ret = -EINVAL;
1016 goto cleanup;
1017 }
1018 1054
1019 len -= bytes; 1055 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1020 } 1056 if (IS_ERR(bio))
1057 return bio;
1021 1058
1022 if (!reading) { 1059 if (!reading) {
1023 void *p = data; 1060 void *p = data;
@@ -1030,20 +1067,9 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1030 } 1067 }
1031 } 1068 }
1032 1069
1033 bio->bi_private = bmd;
1034 bio->bi_end_io = bio_copy_kern_endio; 1070 bio->bi_end_io = bio_copy_kern_endio;
1035 1071
1036 bio_set_map_data(bmd, bio, &iov, 1);
1037 return bio; 1072 return bio;
1038cleanup:
1039 bio_for_each_segment(bvec, bio, i)
1040 __free_page(bvec->bv_page);
1041
1042 bio_put(bio);
1043out_bmd:
1044 bio_free_map_data(bmd);
1045
1046 return ERR_PTR(ret);
1047} 1073}
1048 1074
1049/* 1075/*
@@ -1230,9 +1256,9 @@ static void bio_pair_end_2(struct bio *bi, int err)
1230 * split a bio - only worry about a bio with a single page 1256 * split a bio - only worry about a bio with a single page
1231 * in it's iovec 1257 * in it's iovec
1232 */ 1258 */
1233struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) 1259struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1234{ 1260{
1235 struct bio_pair *bp = mempool_alloc(pool, GFP_NOIO); 1261 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1236 1262
1237 if (!bp) 1263 if (!bp)
1238 return bp; 1264 return bp;
@@ -1266,7 +1292,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1266 bp->bio2.bi_end_io = bio_pair_end_2; 1292 bp->bio2.bi_end_io = bio_pair_end_2;
1267 1293
1268 bp->bio1.bi_private = bi; 1294 bp->bio1.bi_private = bi;
1269 bp->bio2.bi_private = pool; 1295 bp->bio2.bi_private = bio_split_pool;
1270 1296
1271 if (bio_integrity(bi)) 1297 if (bio_integrity(bi))
1272 bio_integrity_split(bi, bp, first_sectors); 1298 bio_integrity_split(bi, bp, first_sectors);
@@ -1274,6 +1300,42 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
1274 return bp; 1300 return bp;
1275} 1301}
1276 1302
1303/**
1304 * bio_sector_offset - Find hardware sector offset in bio
1305 * @bio: bio to inspect
1306 * @index: bio_vec index
1307 * @offset: offset in bv_page
1308 *
1309 * Return the number of hardware sectors between beginning of bio
1310 * and an end point indicated by a bio_vec index and an offset
1311 * within that vector's page.
1312 */
1313sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1314 unsigned int offset)
1315{
1316 unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue);
1317 struct bio_vec *bv;
1318 sector_t sectors;
1319 int i;
1320
1321 sectors = 0;
1322
1323 if (index >= bio->bi_idx)
1324 index = bio->bi_vcnt - 1;
1325
1326 __bio_for_each_segment(bv, bio, i, 0) {
1327 if (i == index) {
1328 if (offset > bv->bv_offset)
1329 sectors += (offset - bv->bv_offset) / sector_sz;
1330 break;
1331 }
1332
1333 sectors += bv->bv_len / sector_sz;
1334 }
1335
1336 return sectors;
1337}
1338EXPORT_SYMBOL(bio_sector_offset);
1277 1339
1278/* 1340/*
1279 * create memory pools for biovec's in a bio_set. 1341 * create memory pools for biovec's in a bio_set.
@@ -1376,6 +1438,7 @@ static int __init init_bio(void)
1376subsys_initcall(init_bio); 1438subsys_initcall(init_bio);
1377 1439
1378EXPORT_SYMBOL(bio_alloc); 1440EXPORT_SYMBOL(bio_alloc);
1441EXPORT_SYMBOL(bio_kmalloc);
1379EXPORT_SYMBOL(bio_put); 1442EXPORT_SYMBOL(bio_put);
1380EXPORT_SYMBOL(bio_free); 1443EXPORT_SYMBOL(bio_free);
1381EXPORT_SYMBOL(bio_endio); 1444EXPORT_SYMBOL(bio_endio);
@@ -1383,7 +1446,6 @@ EXPORT_SYMBOL(bio_init);
1383EXPORT_SYMBOL(__bio_clone); 1446EXPORT_SYMBOL(__bio_clone);
1384EXPORT_SYMBOL(bio_clone); 1447EXPORT_SYMBOL(bio_clone);
1385EXPORT_SYMBOL(bio_phys_segments); 1448EXPORT_SYMBOL(bio_phys_segments);
1386EXPORT_SYMBOL(bio_hw_segments);
1387EXPORT_SYMBOL(bio_add_page); 1449EXPORT_SYMBOL(bio_add_page);
1388EXPORT_SYMBOL(bio_add_pc_page); 1450EXPORT_SYMBOL(bio_add_pc_page);
1389EXPORT_SYMBOL(bio_get_nr_vecs); 1451EXPORT_SYMBOL(bio_get_nr_vecs);
@@ -1393,7 +1455,6 @@ EXPORT_SYMBOL(bio_map_kern);
1393EXPORT_SYMBOL(bio_copy_kern); 1455EXPORT_SYMBOL(bio_copy_kern);
1394EXPORT_SYMBOL(bio_pair_release); 1456EXPORT_SYMBOL(bio_pair_release);
1395EXPORT_SYMBOL(bio_split); 1457EXPORT_SYMBOL(bio_split);
1396EXPORT_SYMBOL(bio_split_pool);
1397EXPORT_SYMBOL(bio_copy_user); 1458EXPORT_SYMBOL(bio_copy_user);
1398EXPORT_SYMBOL(bio_uncopy_user); 1459EXPORT_SYMBOL(bio_uncopy_user);
1399EXPORT_SYMBOL(bioset_create); 1460EXPORT_SYMBOL(bioset_create);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index aff54219e049..d84f0469a016 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -540,22 +540,6 @@ EXPORT_SYMBOL(bd_release);
540 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 540 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
541 */ 541 */
542 542
543static struct kobject *bdev_get_kobj(struct block_device *bdev)
544{
545 if (bdev->bd_contains != bdev)
546 return kobject_get(&bdev->bd_part->dev.kobj);
547 else
548 return kobject_get(&bdev->bd_disk->dev.kobj);
549}
550
551static struct kobject *bdev_get_holder(struct block_device *bdev)
552{
553 if (bdev->bd_contains != bdev)
554 return kobject_get(bdev->bd_part->holder_dir);
555 else
556 return kobject_get(bdev->bd_disk->holder_dir);
557}
558
559static int add_symlink(struct kobject *from, struct kobject *to) 543static int add_symlink(struct kobject *from, struct kobject *to)
560{ 544{
561 if (!from || !to) 545 if (!from || !to)
@@ -604,11 +588,11 @@ static int bd_holder_grab_dirs(struct block_device *bdev,
604 if (!bo->hdev) 588 if (!bo->hdev)
605 goto fail_put_sdir; 589 goto fail_put_sdir;
606 590
607 bo->sdev = bdev_get_kobj(bdev); 591 bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
608 if (!bo->sdev) 592 if (!bo->sdev)
609 goto fail_put_hdev; 593 goto fail_put_hdev;
610 594
611 bo->hdir = bdev_get_holder(bdev); 595 bo->hdir = kobject_get(bdev->bd_part->holder_dir);
612 if (!bo->hdir) 596 if (!bo->hdir)
613 goto fail_put_sdev; 597 goto fail_put_sdev;
614 598
@@ -868,6 +852,87 @@ struct block_device *open_by_devnum(dev_t dev, unsigned mode)
868 852
869EXPORT_SYMBOL(open_by_devnum); 853EXPORT_SYMBOL(open_by_devnum);
870 854
855/**
856 * flush_disk - invalidates all buffer-cache entries on a disk
857 *
858 * @bdev: struct block device to be flushed
859 *
860 * Invalidates all buffer-cache entries on a disk. It should be called
861 * when a disk has been changed -- either by a media change or online
862 * resize.
863 */
864static void flush_disk(struct block_device *bdev)
865{
866 if (__invalidate_device(bdev)) {
867 char name[BDEVNAME_SIZE] = "";
868
869 if (bdev->bd_disk)
870 disk_name(bdev->bd_disk, 0, name);
871 printk(KERN_WARNING "VFS: busy inodes on changed media or "
872 "resized disk %s\n", name);
873 }
874
875 if (!bdev->bd_disk)
876 return;
877 if (disk_partitionable(bdev->bd_disk))
878 bdev->bd_invalidated = 1;
879}
880
881/**
882 * check_disk_size_change - checks for disk size change and adjusts bdev size.
883 * @disk: struct gendisk to check
884 * @bdev: struct bdev to adjust.
885 *
886 * This routine checks to see if the bdev size does not match the disk size
887 * and adjusts it if it differs.
888 */
889void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
890{
891 loff_t disk_size, bdev_size;
892
893 disk_size = (loff_t)get_capacity(disk) << 9;
894 bdev_size = i_size_read(bdev->bd_inode);
895 if (disk_size != bdev_size) {
896 char name[BDEVNAME_SIZE];
897
898 disk_name(disk, 0, name);
899 printk(KERN_INFO
900 "%s: detected capacity change from %lld to %lld\n",
901 name, bdev_size, disk_size);
902 i_size_write(bdev->bd_inode, disk_size);
903 flush_disk(bdev);
904 }
905}
906EXPORT_SYMBOL(check_disk_size_change);
907
908/**
909 * revalidate_disk - wrapper for lower-level driver's revalidate_disk call-back
910 * @disk: struct gendisk to be revalidated
911 *
912 * This routine is a wrapper for lower-level driver's revalidate_disk
913 * call-backs. It is used to do common pre and post operations needed
914 * for all revalidate_disk operations.
915 */
916int revalidate_disk(struct gendisk *disk)
917{
918 struct block_device *bdev;
919 int ret = 0;
920
921 if (disk->fops->revalidate_disk)
922 ret = disk->fops->revalidate_disk(disk);
923
924 bdev = bdget_disk(disk, 0);
925 if (!bdev)
926 return ret;
927
928 mutex_lock(&bdev->bd_mutex);
929 check_disk_size_change(disk, bdev);
930 mutex_unlock(&bdev->bd_mutex);
931 bdput(bdev);
932 return ret;
933}
934EXPORT_SYMBOL(revalidate_disk);
935
871/* 936/*
872 * This routine checks whether a removable media has been changed, 937 * This routine checks whether a removable media has been changed,
873 * and invalidates all buffer-cache-entries in that case. This 938 * and invalidates all buffer-cache-entries in that case. This
@@ -887,13 +952,9 @@ int check_disk_change(struct block_device *bdev)
887 if (!bdops->media_changed(bdev->bd_disk)) 952 if (!bdops->media_changed(bdev->bd_disk))
888 return 0; 953 return 0;
889 954
890 if (__invalidate_device(bdev)) 955 flush_disk(bdev);
891 printk("VFS: busy inodes on changed media.\n");
892
893 if (bdops->revalidate_disk) 956 if (bdops->revalidate_disk)
894 bdops->revalidate_disk(bdev->bd_disk); 957 bdops->revalidate_disk(bdev->bd_disk);
895 if (bdev->bd_disk->minors > 1)
896 bdev->bd_invalidated = 1;
897 return 1; 958 return 1;
898} 959}
899 960
@@ -927,10 +988,10 @@ static int __blkdev_put(struct block_device *bdev, int for_part);
927 988
928static int do_open(struct block_device *bdev, struct file *file, int for_part) 989static int do_open(struct block_device *bdev, struct file *file, int for_part)
929{ 990{
930 struct module *owner = NULL;
931 struct gendisk *disk; 991 struct gendisk *disk;
992 struct hd_struct *part = NULL;
932 int ret; 993 int ret;
933 int part; 994 int partno;
934 int perm = 0; 995 int perm = 0;
935 996
936 if (file->f_mode & FMODE_READ) 997 if (file->f_mode & FMODE_READ)
@@ -948,25 +1009,27 @@ static int do_open(struct block_device *bdev, struct file *file, int for_part)
948 1009
949 ret = -ENXIO; 1010 ret = -ENXIO;
950 file->f_mapping = bdev->bd_inode->i_mapping; 1011 file->f_mapping = bdev->bd_inode->i_mapping;
1012
951 lock_kernel(); 1013 lock_kernel();
952 disk = get_gendisk(bdev->bd_dev, &part); 1014
953 if (!disk) { 1015 disk = get_gendisk(bdev->bd_dev, &partno);
954 unlock_kernel(); 1016 if (!disk)
955 bdput(bdev); 1017 goto out_unlock_kernel;
956 return ret; 1018 part = disk_get_part(disk, partno);
957 } 1019 if (!part)
958 owner = disk->fops->owner; 1020 goto out_unlock_kernel;
959 1021
960 mutex_lock_nested(&bdev->bd_mutex, for_part); 1022 mutex_lock_nested(&bdev->bd_mutex, for_part);
961 if (!bdev->bd_openers) { 1023 if (!bdev->bd_openers) {
962 bdev->bd_disk = disk; 1024 bdev->bd_disk = disk;
1025 bdev->bd_part = part;
963 bdev->bd_contains = bdev; 1026 bdev->bd_contains = bdev;
964 if (!part) { 1027 if (!partno) {
965 struct backing_dev_info *bdi; 1028 struct backing_dev_info *bdi;
966 if (disk->fops->open) { 1029 if (disk->fops->open) {
967 ret = disk->fops->open(bdev->bd_inode, file); 1030 ret = disk->fops->open(bdev->bd_inode, file);
968 if (ret) 1031 if (ret)
969 goto out_first; 1032 goto out_clear;
970 } 1033 }
971 if (!bdev->bd_openers) { 1034 if (!bdev->bd_openers) {
972 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1035 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
@@ -978,36 +1041,36 @@ static int do_open(struct block_device *bdev, struct file *file, int for_part)
978 if (bdev->bd_invalidated) 1041 if (bdev->bd_invalidated)
979 rescan_partitions(disk, bdev); 1042 rescan_partitions(disk, bdev);
980 } else { 1043 } else {
981 struct hd_struct *p;
982 struct block_device *whole; 1044 struct block_device *whole;
983 whole = bdget_disk(disk, 0); 1045 whole = bdget_disk(disk, 0);
984 ret = -ENOMEM; 1046 ret = -ENOMEM;
985 if (!whole) 1047 if (!whole)
986 goto out_first; 1048 goto out_clear;
987 BUG_ON(for_part); 1049 BUG_ON(for_part);
988 ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1); 1050 ret = __blkdev_get(whole, file->f_mode, file->f_flags, 1);
989 if (ret) 1051 if (ret)
990 goto out_first; 1052 goto out_clear;
991 bdev->bd_contains = whole; 1053 bdev->bd_contains = whole;
992 p = disk->part[part - 1];
993 bdev->bd_inode->i_data.backing_dev_info = 1054 bdev->bd_inode->i_data.backing_dev_info =
994 whole->bd_inode->i_data.backing_dev_info; 1055 whole->bd_inode->i_data.backing_dev_info;
995 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { 1056 if (!(disk->flags & GENHD_FL_UP) ||
1057 !part || !part->nr_sects) {
996 ret = -ENXIO; 1058 ret = -ENXIO;
997 goto out_first; 1059 goto out_clear;
998 } 1060 }
999 kobject_get(&p->dev.kobj); 1061 bd_set_size(bdev, (loff_t)part->nr_sects << 9);
1000 bdev->bd_part = p;
1001 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
1002 } 1062 }
1003 } else { 1063 } else {
1064 disk_put_part(part);
1004 put_disk(disk); 1065 put_disk(disk);
1005 module_put(owner); 1066 module_put(disk->fops->owner);
1067 part = NULL;
1068 disk = NULL;
1006 if (bdev->bd_contains == bdev) { 1069 if (bdev->bd_contains == bdev) {
1007 if (bdev->bd_disk->fops->open) { 1070 if (bdev->bd_disk->fops->open) {
1008 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file); 1071 ret = bdev->bd_disk->fops->open(bdev->bd_inode, file);
1009 if (ret) 1072 if (ret)
1010 goto out; 1073 goto out_unlock_bdev;
1011 } 1074 }
1012 if (bdev->bd_invalidated) 1075 if (bdev->bd_invalidated)
1013 rescan_partitions(bdev->bd_disk, bdev); 1076 rescan_partitions(bdev->bd_disk, bdev);
@@ -1020,19 +1083,24 @@ static int do_open(struct block_device *bdev, struct file *file, int for_part)
1020 unlock_kernel(); 1083 unlock_kernel();
1021 return 0; 1084 return 0;
1022 1085
1023out_first: 1086 out_clear:
1024 bdev->bd_disk = NULL; 1087 bdev->bd_disk = NULL;
1088 bdev->bd_part = NULL;
1025 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1089 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1026 if (bdev != bdev->bd_contains) 1090 if (bdev != bdev->bd_contains)
1027 __blkdev_put(bdev->bd_contains, 1); 1091 __blkdev_put(bdev->bd_contains, 1);
1028 bdev->bd_contains = NULL; 1092 bdev->bd_contains = NULL;
1029 put_disk(disk); 1093 out_unlock_bdev:
1030 module_put(owner);
1031out:
1032 mutex_unlock(&bdev->bd_mutex); 1094 mutex_unlock(&bdev->bd_mutex);
1095 out_unlock_kernel:
1033 unlock_kernel(); 1096 unlock_kernel();
1034 if (ret) 1097
1035 bdput(bdev); 1098 disk_put_part(part);
1099 if (disk)
1100 module_put(disk->fops->owner);
1101 put_disk(disk);
1102 bdput(bdev);
1103
1036 return ret; 1104 return ret;
1037} 1105}
1038 1106
@@ -1117,11 +1185,8 @@ static int __blkdev_put(struct block_device *bdev, int for_part)
1117 1185
1118 put_disk(disk); 1186 put_disk(disk);
1119 module_put(owner); 1187 module_put(owner);
1120 1188 disk_put_part(bdev->bd_part);
1121 if (bdev->bd_contains != bdev) { 1189 bdev->bd_part = NULL;
1122 kobject_put(&bdev->bd_part->dev.kobj);
1123 bdev->bd_part = NULL;
1124 }
1125 bdev->bd_disk = NULL; 1190 bdev->bd_disk = NULL;
1126 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1191 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1127 if (bdev != bdev->bd_contains) 1192 if (bdev != bdev->bd_contains)
@@ -1197,10 +1262,9 @@ EXPORT_SYMBOL(ioctl_by_bdev);
1197 1262
1198/** 1263/**
1199 * lookup_bdev - lookup a struct block_device by name 1264 * lookup_bdev - lookup a struct block_device by name
1265 * @pathname: special file representing the block device
1200 * 1266 *
1201 * @path: special file representing the block device 1267 * Get a reference to the blockdevice at @pathname in the current
1202 *
1203 * Get a reference to the blockdevice at @path in the current
1204 * namespace if possible and return it. Return ERR_PTR(error) 1268 * namespace if possible and return it. Return ERR_PTR(error)
1205 * otherwise. 1269 * otherwise.
1206 */ 1270 */
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 89d2fb7b991a..fd9859f92fad 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -14,6 +14,9 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/configfs.h> 16#include <linux/configfs.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <net/ipv6.h>
17#include <net/sock.h> 20#include <net/sock.h>
18 21
19#include "config.h" 22#include "config.h"
@@ -377,24 +380,24 @@ static struct config_item_type node_type = {
377 .ct_owner = THIS_MODULE, 380 .ct_owner = THIS_MODULE,
378}; 381};
379 382
380static struct dlm_cluster *to_cluster(struct config_item *i) 383static struct dlm_cluster *config_item_to_cluster(struct config_item *i)
381{ 384{
382 return i ? container_of(to_config_group(i), struct dlm_cluster, group) : 385 return i ? container_of(to_config_group(i), struct dlm_cluster, group) :
383 NULL; 386 NULL;
384} 387}
385 388
386static struct dlm_space *to_space(struct config_item *i) 389static struct dlm_space *config_item_to_space(struct config_item *i)
387{ 390{
388 return i ? container_of(to_config_group(i), struct dlm_space, group) : 391 return i ? container_of(to_config_group(i), struct dlm_space, group) :
389 NULL; 392 NULL;
390} 393}
391 394
392static struct dlm_comm *to_comm(struct config_item *i) 395static struct dlm_comm *config_item_to_comm(struct config_item *i)
393{ 396{
394 return i ? container_of(i, struct dlm_comm, item) : NULL; 397 return i ? container_of(i, struct dlm_comm, item) : NULL;
395} 398}
396 399
397static struct dlm_node *to_node(struct config_item *i) 400static struct dlm_node *config_item_to_node(struct config_item *i)
398{ 401{
399 return i ? container_of(i, struct dlm_node, item) : NULL; 402 return i ? container_of(i, struct dlm_node, item) : NULL;
400} 403}
@@ -450,7 +453,7 @@ static struct config_group *make_cluster(struct config_group *g,
450 453
451static void drop_cluster(struct config_group *g, struct config_item *i) 454static void drop_cluster(struct config_group *g, struct config_item *i)
452{ 455{
453 struct dlm_cluster *cl = to_cluster(i); 456 struct dlm_cluster *cl = config_item_to_cluster(i);
454 struct config_item *tmp; 457 struct config_item *tmp;
455 int j; 458 int j;
456 459
@@ -468,7 +471,7 @@ static void drop_cluster(struct config_group *g, struct config_item *i)
468 471
469static void release_cluster(struct config_item *i) 472static void release_cluster(struct config_item *i)
470{ 473{
471 struct dlm_cluster *cl = to_cluster(i); 474 struct dlm_cluster *cl = config_item_to_cluster(i);
472 kfree(cl->group.default_groups); 475 kfree(cl->group.default_groups);
473 kfree(cl); 476 kfree(cl);
474} 477}
@@ -507,7 +510,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
507 510
508static void drop_space(struct config_group *g, struct config_item *i) 511static void drop_space(struct config_group *g, struct config_item *i)
509{ 512{
510 struct dlm_space *sp = to_space(i); 513 struct dlm_space *sp = config_item_to_space(i);
511 struct config_item *tmp; 514 struct config_item *tmp;
512 int j; 515 int j;
513 516
@@ -524,7 +527,7 @@ static void drop_space(struct config_group *g, struct config_item *i)
524 527
525static void release_space(struct config_item *i) 528static void release_space(struct config_item *i)
526{ 529{
527 struct dlm_space *sp = to_space(i); 530 struct dlm_space *sp = config_item_to_space(i);
528 kfree(sp->group.default_groups); 531 kfree(sp->group.default_groups);
529 kfree(sp); 532 kfree(sp);
530} 533}
@@ -546,7 +549,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
546 549
547static void drop_comm(struct config_group *g, struct config_item *i) 550static void drop_comm(struct config_group *g, struct config_item *i)
548{ 551{
549 struct dlm_comm *cm = to_comm(i); 552 struct dlm_comm *cm = config_item_to_comm(i);
550 if (local_comm == cm) 553 if (local_comm == cm)
551 local_comm = NULL; 554 local_comm = NULL;
552 dlm_lowcomms_close(cm->nodeid); 555 dlm_lowcomms_close(cm->nodeid);
@@ -557,13 +560,13 @@ static void drop_comm(struct config_group *g, struct config_item *i)
557 560
558static void release_comm(struct config_item *i) 561static void release_comm(struct config_item *i)
559{ 562{
560 struct dlm_comm *cm = to_comm(i); 563 struct dlm_comm *cm = config_item_to_comm(i);
561 kfree(cm); 564 kfree(cm);
562} 565}
563 566
564static struct config_item *make_node(struct config_group *g, const char *name) 567static struct config_item *make_node(struct config_group *g, const char *name)
565{ 568{
566 struct dlm_space *sp = to_space(g->cg_item.ci_parent); 569 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
567 struct dlm_node *nd; 570 struct dlm_node *nd;
568 571
569 nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL); 572 nd = kzalloc(sizeof(struct dlm_node), GFP_KERNEL);
@@ -585,8 +588,8 @@ static struct config_item *make_node(struct config_group *g, const char *name)
585 588
586static void drop_node(struct config_group *g, struct config_item *i) 589static void drop_node(struct config_group *g, struct config_item *i)
587{ 590{
588 struct dlm_space *sp = to_space(g->cg_item.ci_parent); 591 struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
589 struct dlm_node *nd = to_node(i); 592 struct dlm_node *nd = config_item_to_node(i);
590 593
591 mutex_lock(&sp->members_lock); 594 mutex_lock(&sp->members_lock);
592 list_del(&nd->list); 595 list_del(&nd->list);
@@ -598,7 +601,7 @@ static void drop_node(struct config_group *g, struct config_item *i)
598 601
599static void release_node(struct config_item *i) 602static void release_node(struct config_item *i)
600{ 603{
601 struct dlm_node *nd = to_node(i); 604 struct dlm_node *nd = config_item_to_node(i);
602 kfree(nd); 605 kfree(nd);
603} 606}
604 607
@@ -632,7 +635,7 @@ void dlm_config_exit(void)
632static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a, 635static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a,
633 char *buf) 636 char *buf)
634{ 637{
635 struct dlm_cluster *cl = to_cluster(i); 638 struct dlm_cluster *cl = config_item_to_cluster(i);
636 struct cluster_attribute *cla = 639 struct cluster_attribute *cla =
637 container_of(a, struct cluster_attribute, attr); 640 container_of(a, struct cluster_attribute, attr);
638 return cla->show ? cla->show(cl, buf) : 0; 641 return cla->show ? cla->show(cl, buf) : 0;
@@ -642,7 +645,7 @@ static ssize_t store_cluster(struct config_item *i,
642 struct configfs_attribute *a, 645 struct configfs_attribute *a,
643 const char *buf, size_t len) 646 const char *buf, size_t len)
644{ 647{
645 struct dlm_cluster *cl = to_cluster(i); 648 struct dlm_cluster *cl = config_item_to_cluster(i);
646 struct cluster_attribute *cla = 649 struct cluster_attribute *cla =
647 container_of(a, struct cluster_attribute, attr); 650 container_of(a, struct cluster_attribute, attr);
648 return cla->store ? cla->store(cl, buf, len) : -EINVAL; 651 return cla->store ? cla->store(cl, buf, len) : -EINVAL;
@@ -651,7 +654,7 @@ static ssize_t store_cluster(struct config_item *i,
651static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, 654static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
652 char *buf) 655 char *buf)
653{ 656{
654 struct dlm_comm *cm = to_comm(i); 657 struct dlm_comm *cm = config_item_to_comm(i);
655 struct comm_attribute *cma = 658 struct comm_attribute *cma =
656 container_of(a, struct comm_attribute, attr); 659 container_of(a, struct comm_attribute, attr);
657 return cma->show ? cma->show(cm, buf) : 0; 660 return cma->show ? cma->show(cm, buf) : 0;
@@ -660,7 +663,7 @@ static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
660static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, 663static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
661 const char *buf, size_t len) 664 const char *buf, size_t len)
662{ 665{
663 struct dlm_comm *cm = to_comm(i); 666 struct dlm_comm *cm = config_item_to_comm(i);
664 struct comm_attribute *cma = 667 struct comm_attribute *cma =
665 container_of(a, struct comm_attribute, attr); 668 container_of(a, struct comm_attribute, attr);
666 return cma->store ? cma->store(cm, buf, len) : -EINVAL; 669 return cma->store ? cma->store(cm, buf, len) : -EINVAL;
@@ -714,7 +717,7 @@ static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
714static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, 717static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
715 char *buf) 718 char *buf)
716{ 719{
717 struct dlm_node *nd = to_node(i); 720 struct dlm_node *nd = config_item_to_node(i);
718 struct node_attribute *nda = 721 struct node_attribute *nda =
719 container_of(a, struct node_attribute, attr); 722 container_of(a, struct node_attribute, attr);
720 return nda->show ? nda->show(nd, buf) : 0; 723 return nda->show ? nda->show(nd, buf) : 0;
@@ -723,7 +726,7 @@ static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
723static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, 726static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
724 const char *buf, size_t len) 727 const char *buf, size_t len)
725{ 728{
726 struct dlm_node *nd = to_node(i); 729 struct dlm_node *nd = config_item_to_node(i);
727 struct node_attribute *nda = 730 struct node_attribute *nda =
728 container_of(a, struct node_attribute, attr); 731 container_of(a, struct node_attribute, attr);
729 return nda->store ? nda->store(nd, buf, len) : -EINVAL; 732 return nda->store ? nda->store(nd, buf, len) : -EINVAL;
@@ -768,7 +771,7 @@ static struct dlm_space *get_space(char *name)
768 i = config_group_find_item(space_list, name); 771 i = config_group_find_item(space_list, name);
769 mutex_unlock(&space_list->cg_subsys->su_mutex); 772 mutex_unlock(&space_list->cg_subsys->su_mutex);
770 773
771 return to_space(i); 774 return config_item_to_space(i);
772} 775}
773 776
774static void put_space(struct dlm_space *sp) 777static void put_space(struct dlm_space *sp)
@@ -776,6 +779,33 @@ static void put_space(struct dlm_space *sp)
776 config_item_put(&sp->group.cg_item); 779 config_item_put(&sp->group.cg_item);
777} 780}
778 781
782static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
783{
784 switch (x->ss_family) {
785 case AF_INET: {
786 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
787 struct sockaddr_in *siny = (struct sockaddr_in *)y;
788 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
789 return 0;
790 if (sinx->sin_port != siny->sin_port)
791 return 0;
792 break;
793 }
794 case AF_INET6: {
795 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
796 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
797 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
798 return 0;
799 if (sinx->sin6_port != siny->sin6_port)
800 return 0;
801 break;
802 }
803 default:
804 return 0;
805 }
806 return 1;
807}
808
779static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr) 809static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr)
780{ 810{
781 struct config_item *i; 811 struct config_item *i;
@@ -788,7 +818,7 @@ static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr)
788 mutex_lock(&clusters_root.subsys.su_mutex); 818 mutex_lock(&clusters_root.subsys.su_mutex);
789 819
790 list_for_each_entry(i, &comm_list->cg_children, ci_entry) { 820 list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
791 cm = to_comm(i); 821 cm = config_item_to_comm(i);
792 822
793 if (nodeid) { 823 if (nodeid) {
794 if (cm->nodeid != nodeid) 824 if (cm->nodeid != nodeid)
@@ -797,8 +827,7 @@ static struct dlm_comm *get_comm(int nodeid, struct sockaddr_storage *addr)
797 config_item_get(i); 827 config_item_get(i);
798 break; 828 break;
799 } else { 829 } else {
800 if (!cm->addr_count || 830 if (!cm->addr_count || !addr_compare(cm->addr[0], addr))
801 memcmp(cm->addr[0], addr, sizeof(*addr)))
802 continue; 831 continue;
803 found = 1; 832 found = 1;
804 config_item_get(i); 833 config_item_get(i);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 5a7ac33b629c..868e4c9ef127 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -441,8 +441,11 @@ struct dlm_ls {
441 uint32_t ls_global_id; /* global unique lockspace ID */ 441 uint32_t ls_global_id; /* global unique lockspace ID */
442 uint32_t ls_exflags; 442 uint32_t ls_exflags;
443 int ls_lvblen; 443 int ls_lvblen;
444 int ls_count; /* reference count */ 444 int ls_count; /* refcount of processes in
445 the dlm using this ls */
446 int ls_create_count; /* create/release refcount */
445 unsigned long ls_flags; /* LSFL_ */ 447 unsigned long ls_flags; /* LSFL_ */
448 unsigned long ls_scan_time;
446 struct kobject ls_kobj; 449 struct kobject ls_kobj;
447 450
448 struct dlm_rsbtable *ls_rsbtbl; 451 struct dlm_rsbtable *ls_rsbtbl;
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 499e16759e96..d910501de6d2 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -23,6 +23,7 @@
23#include "lock.h" 23#include "lock.h"
24#include "recover.h" 24#include "recover.h"
25#include "requestqueue.h" 25#include "requestqueue.h"
26#include "user.h"
26 27
27static int ls_count; 28static int ls_count;
28static struct mutex ls_lock; 29static struct mutex ls_lock;
@@ -211,19 +212,41 @@ void dlm_lockspace_exit(void)
211 kset_unregister(dlm_kset); 212 kset_unregister(dlm_kset);
212} 213}
213 214
215static struct dlm_ls *find_ls_to_scan(void)
216{
217 struct dlm_ls *ls;
218
219 spin_lock(&lslist_lock);
220 list_for_each_entry(ls, &lslist, ls_list) {
221 if (time_after_eq(jiffies, ls->ls_scan_time +
222 dlm_config.ci_scan_secs * HZ)) {
223 spin_unlock(&lslist_lock);
224 return ls;
225 }
226 }
227 spin_unlock(&lslist_lock);
228 return NULL;
229}
230
214static int dlm_scand(void *data) 231static int dlm_scand(void *data)
215{ 232{
216 struct dlm_ls *ls; 233 struct dlm_ls *ls;
234 int timeout_jiffies = dlm_config.ci_scan_secs * HZ;
217 235
218 while (!kthread_should_stop()) { 236 while (!kthread_should_stop()) {
219 list_for_each_entry(ls, &lslist, ls_list) { 237 ls = find_ls_to_scan();
238 if (ls) {
220 if (dlm_lock_recovery_try(ls)) { 239 if (dlm_lock_recovery_try(ls)) {
240 ls->ls_scan_time = jiffies;
221 dlm_scan_rsbs(ls); 241 dlm_scan_rsbs(ls);
222 dlm_scan_timeout(ls); 242 dlm_scan_timeout(ls);
223 dlm_unlock_recovery(ls); 243 dlm_unlock_recovery(ls);
244 } else {
245 ls->ls_scan_time += HZ;
224 } 246 }
247 } else {
248 schedule_timeout_interruptible(timeout_jiffies);
225 } 249 }
226 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
227 } 250 }
228 return 0; 251 return 0;
229} 252}
@@ -246,23 +269,6 @@ static void dlm_scand_stop(void)
246 kthread_stop(scand_task); 269 kthread_stop(scand_task);
247} 270}
248 271
249static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
250{
251 struct dlm_ls *ls;
252
253 spin_lock(&lslist_lock);
254
255 list_for_each_entry(ls, &lslist, ls_list) {
256 if (ls->ls_namelen == namelen &&
257 memcmp(ls->ls_name, name, namelen) == 0)
258 goto out;
259 }
260 ls = NULL;
261 out:
262 spin_unlock(&lslist_lock);
263 return ls;
264}
265
266struct dlm_ls *dlm_find_lockspace_global(uint32_t id) 272struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
267{ 273{
268 struct dlm_ls *ls; 274 struct dlm_ls *ls;
@@ -327,6 +333,7 @@ static void remove_lockspace(struct dlm_ls *ls)
327 for (;;) { 333 for (;;) {
328 spin_lock(&lslist_lock); 334 spin_lock(&lslist_lock);
329 if (ls->ls_count == 0) { 335 if (ls->ls_count == 0) {
336 WARN_ON(ls->ls_create_count != 0);
330 list_del(&ls->ls_list); 337 list_del(&ls->ls_list);
331 spin_unlock(&lslist_lock); 338 spin_unlock(&lslist_lock);
332 return; 339 return;
@@ -381,7 +388,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
381 uint32_t flags, int lvblen) 388 uint32_t flags, int lvblen)
382{ 389{
383 struct dlm_ls *ls; 390 struct dlm_ls *ls;
384 int i, size, error = -ENOMEM; 391 int i, size, error;
385 int do_unreg = 0; 392 int do_unreg = 0;
386 393
387 if (namelen > DLM_LOCKSPACE_LEN) 394 if (namelen > DLM_LOCKSPACE_LEN)
@@ -393,12 +400,37 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
393 if (!try_module_get(THIS_MODULE)) 400 if (!try_module_get(THIS_MODULE))
394 return -EINVAL; 401 return -EINVAL;
395 402
396 ls = dlm_find_lockspace_name(name, namelen); 403 if (!dlm_user_daemon_available()) {
397 if (ls) { 404 module_put(THIS_MODULE);
398 *lockspace = ls; 405 return -EUNATCH;
406 }
407
408 error = 0;
409
410 spin_lock(&lslist_lock);
411 list_for_each_entry(ls, &lslist, ls_list) {
412 WARN_ON(ls->ls_create_count <= 0);
413 if (ls->ls_namelen != namelen)
414 continue;
415 if (memcmp(ls->ls_name, name, namelen))
416 continue;
417 if (flags & DLM_LSFL_NEWEXCL) {
418 error = -EEXIST;
419 break;
420 }
421 ls->ls_create_count++;
399 module_put(THIS_MODULE); 422 module_put(THIS_MODULE);
400 return -EEXIST; 423 error = 1; /* not an error, return 0 */
424 break;
401 } 425 }
426 spin_unlock(&lslist_lock);
427
428 if (error < 0)
429 goto out;
430 if (error)
431 goto ret_zero;
432
433 error = -ENOMEM;
402 434
403 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL); 435 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
404 if (!ls) 436 if (!ls)
@@ -408,6 +440,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
408 ls->ls_lvblen = lvblen; 440 ls->ls_lvblen = lvblen;
409 ls->ls_count = 0; 441 ls->ls_count = 0;
410 ls->ls_flags = 0; 442 ls->ls_flags = 0;
443 ls->ls_scan_time = jiffies;
411 444
412 if (flags & DLM_LSFL_TIMEWARN) 445 if (flags & DLM_LSFL_TIMEWARN)
413 set_bit(LSFL_TIMEWARN, &ls->ls_flags); 446 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
@@ -418,8 +451,9 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
418 ls->ls_allocation = GFP_KERNEL; 451 ls->ls_allocation = GFP_KERNEL;
419 452
420 /* ls_exflags are forced to match among nodes, and we don't 453 /* ls_exflags are forced to match among nodes, and we don't
421 need to require all nodes to have TIMEWARN or FS set */ 454 need to require all nodes to have some flags set */
422 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS)); 455 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
456 DLM_LSFL_NEWEXCL));
423 457
424 size = dlm_config.ci_rsbtbl_size; 458 size = dlm_config.ci_rsbtbl_size;
425 ls->ls_rsbtbl_size = size; 459 ls->ls_rsbtbl_size = size;
@@ -510,6 +544,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
510 down_write(&ls->ls_in_recovery); 544 down_write(&ls->ls_in_recovery);
511 545
512 spin_lock(&lslist_lock); 546 spin_lock(&lslist_lock);
547 ls->ls_create_count = 1;
513 list_add(&ls->ls_list, &lslist); 548 list_add(&ls->ls_list, &lslist);
514 spin_unlock(&lslist_lock); 549 spin_unlock(&lslist_lock);
515 550
@@ -548,7 +583,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
548 dlm_create_debug_file(ls); 583 dlm_create_debug_file(ls);
549 584
550 log_debug(ls, "join complete"); 585 log_debug(ls, "join complete");
551 586 ret_zero:
552 *lockspace = ls; 587 *lockspace = ls;
553 return 0; 588 return 0;
554 589
@@ -635,13 +670,34 @@ static int release_lockspace(struct dlm_ls *ls, int force)
635 struct dlm_lkb *lkb; 670 struct dlm_lkb *lkb;
636 struct dlm_rsb *rsb; 671 struct dlm_rsb *rsb;
637 struct list_head *head; 672 struct list_head *head;
638 int i; 673 int i, busy, rv;
639 int busy = lockspace_busy(ls); 674
675 busy = lockspace_busy(ls);
676
677 spin_lock(&lslist_lock);
678 if (ls->ls_create_count == 1) {
679 if (busy > force)
680 rv = -EBUSY;
681 else {
682 /* remove_lockspace takes ls off lslist */
683 ls->ls_create_count = 0;
684 rv = 0;
685 }
686 } else if (ls->ls_create_count > 1) {
687 rv = --ls->ls_create_count;
688 } else {
689 rv = -EINVAL;
690 }
691 spin_unlock(&lslist_lock);
640 692
641 if (busy > force) 693 if (rv) {
642 return -EBUSY; 694 log_debug(ls, "release_lockspace no remove %d", rv);
695 return rv;
696 }
697
698 dlm_device_deregister(ls);
643 699
644 if (force < 3) 700 if (force < 3 && dlm_user_daemon_available())
645 do_uevent(ls, 0); 701 do_uevent(ls, 0);
646 702
647 dlm_recoverd_stop(ls); 703 dlm_recoverd_stop(ls);
@@ -720,15 +776,10 @@ static int release_lockspace(struct dlm_ls *ls, int force)
720 dlm_clear_members(ls); 776 dlm_clear_members(ls);
721 dlm_clear_members_gone(ls); 777 dlm_clear_members_gone(ls);
722 kfree(ls->ls_node_array); 778 kfree(ls->ls_node_array);
779 log_debug(ls, "release_lockspace final free");
723 kobject_put(&ls->ls_kobj); 780 kobject_put(&ls->ls_kobj);
724 /* The ls structure will be freed when the kobject is done with */ 781 /* The ls structure will be freed when the kobject is done with */
725 782
726 mutex_lock(&ls_lock);
727 ls_count--;
728 if (!ls_count)
729 threads_stop();
730 mutex_unlock(&ls_lock);
731
732 module_put(THIS_MODULE); 783 module_put(THIS_MODULE);
733 return 0; 784 return 0;
734} 785}
@@ -750,11 +801,38 @@ static int release_lockspace(struct dlm_ls *ls, int force)
750int dlm_release_lockspace(void *lockspace, int force) 801int dlm_release_lockspace(void *lockspace, int force)
751{ 802{
752 struct dlm_ls *ls; 803 struct dlm_ls *ls;
804 int error;
753 805
754 ls = dlm_find_lockspace_local(lockspace); 806 ls = dlm_find_lockspace_local(lockspace);
755 if (!ls) 807 if (!ls)
756 return -EINVAL; 808 return -EINVAL;
757 dlm_put_lockspace(ls); 809 dlm_put_lockspace(ls);
758 return release_lockspace(ls, force); 810
811 mutex_lock(&ls_lock);
812 error = release_lockspace(ls, force);
813 if (!error)
814 ls_count--;
815 else if (!ls_count)
816 threads_stop();
817 mutex_unlock(&ls_lock);
818
819 return error;
820}
821
822void dlm_stop_lockspaces(void)
823{
824 struct dlm_ls *ls;
825
826 restart:
827 spin_lock(&lslist_lock);
828 list_for_each_entry(ls, &lslist, ls_list) {
829 if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
830 continue;
831 spin_unlock(&lslist_lock);
832 log_error(ls, "no userland control daemon, stopping lockspace");
833 dlm_ls_stop(ls);
834 goto restart;
835 }
836 spin_unlock(&lslist_lock);
759} 837}
760 838
diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h
index 891eabbdd021..f879f87901f8 100644
--- a/fs/dlm/lockspace.h
+++ b/fs/dlm/lockspace.h
@@ -20,6 +20,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id);
20struct dlm_ls *dlm_find_lockspace_local(void *id); 20struct dlm_ls *dlm_find_lockspace_local(void *id);
21struct dlm_ls *dlm_find_lockspace_device(int minor); 21struct dlm_ls *dlm_find_lockspace_device(int minor);
22void dlm_put_lockspace(struct dlm_ls *ls); 22void dlm_put_lockspace(struct dlm_ls *ls);
23void dlm_stop_lockspaces(void);
23 24
24#endif /* __LOCKSPACE_DOT_H__ */ 25#endif /* __LOCKSPACE_DOT_H__ */
25 26
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 34f14a14fb4e..b3832c67194a 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2007 Red Hat, Inc. All rights reserved. 2 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
3 * 3 *
4 * This copyrighted material is made available to anyone wishing to use, 4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions 5 * modify, copy, or redistribute it subject to the terms and conditions
@@ -15,7 +15,6 @@
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/signal.h> 16#include <linux/signal.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/smp_lock.h>
19#include <linux/dlm.h> 18#include <linux/dlm.h>
20#include <linux/dlm_device.h> 19#include <linux/dlm_device.h>
21 20
@@ -27,6 +26,8 @@
27 26
28static const char name_prefix[] = "dlm"; 27static const char name_prefix[] = "dlm";
29static const struct file_operations device_fops; 28static const struct file_operations device_fops;
29static atomic_t dlm_monitor_opened;
30static int dlm_monitor_unused = 1;
30 31
31#ifdef CONFIG_COMPAT 32#ifdef CONFIG_COMPAT
32 33
@@ -340,10 +341,15 @@ static int device_user_deadlock(struct dlm_user_proc *proc,
340 return error; 341 return error;
341} 342}
342 343
343static int create_misc_device(struct dlm_ls *ls, char *name) 344static int dlm_device_register(struct dlm_ls *ls, char *name)
344{ 345{
345 int error, len; 346 int error, len;
346 347
348 /* The device is already registered. This happens when the
349 lockspace is created multiple times from userspace. */
350 if (ls->ls_device.name)
351 return 0;
352
347 error = -ENOMEM; 353 error = -ENOMEM;
348 len = strlen(name) + strlen(name_prefix) + 2; 354 len = strlen(name) + strlen(name_prefix) + 2;
349 ls->ls_device.name = kzalloc(len, GFP_KERNEL); 355 ls->ls_device.name = kzalloc(len, GFP_KERNEL);
@@ -363,6 +369,22 @@ fail:
363 return error; 369 return error;
364} 370}
365 371
372int dlm_device_deregister(struct dlm_ls *ls)
373{
374 int error;
375
376 /* The device is not registered. This happens when the lockspace
377 was never used from userspace, or when device_create_lockspace()
378 calls dlm_release_lockspace() after the register fails. */
379 if (!ls->ls_device.name)
380 return 0;
381
382 error = misc_deregister(&ls->ls_device);
383 if (!error)
384 kfree(ls->ls_device.name);
385 return error;
386}
387
366static int device_user_purge(struct dlm_user_proc *proc, 388static int device_user_purge(struct dlm_user_proc *proc,
367 struct dlm_purge_params *params) 389 struct dlm_purge_params *params)
368{ 390{
@@ -397,7 +419,7 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
397 if (!ls) 419 if (!ls)
398 return -ENOENT; 420 return -ENOENT;
399 421
400 error = create_misc_device(ls, params->name); 422 error = dlm_device_register(ls, params->name);
401 dlm_put_lockspace(ls); 423 dlm_put_lockspace(ls);
402 424
403 if (error) 425 if (error)
@@ -421,31 +443,22 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
421 if (!ls) 443 if (!ls)
422 return -ENOENT; 444 return -ENOENT;
423 445
424 /* Deregister the misc device first, so we don't have
425 * a device that's not attached to a lockspace. If
426 * dlm_release_lockspace fails then we can recreate it
427 */
428 error = misc_deregister(&ls->ls_device);
429 if (error) {
430 dlm_put_lockspace(ls);
431 goto out;
432 }
433 kfree(ls->ls_device.name);
434
435 if (params->flags & DLM_USER_LSFLG_FORCEFREE) 446 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
436 force = 2; 447 force = 2;
437 448
438 lockspace = ls->ls_local_handle; 449 lockspace = ls->ls_local_handle;
450 dlm_put_lockspace(ls);
439 451
440 /* dlm_release_lockspace waits for references to go to zero, 452 /* The final dlm_release_lockspace waits for references to go to
441 so all processes will need to close their device for the ls 453 zero, so all processes will need to close their device for the
442 before the release will procede */ 454 ls before the release will proceed. release also calls the
455 device_deregister above. Converting a positive return value
456 from release to zero means that userspace won't know when its
457 release was the final one, but it shouldn't need to know. */
443 458
444 dlm_put_lockspace(ls);
445 error = dlm_release_lockspace(lockspace, force); 459 error = dlm_release_lockspace(lockspace, force);
446 if (error) 460 if (error > 0)
447 create_misc_device(ls, ls->ls_name); 461 error = 0;
448 out:
449 return error; 462 return error;
450} 463}
451 464
@@ -623,17 +636,13 @@ static int device_open(struct inode *inode, struct file *file)
623 struct dlm_user_proc *proc; 636 struct dlm_user_proc *proc;
624 struct dlm_ls *ls; 637 struct dlm_ls *ls;
625 638
626 lock_kernel();
627 ls = dlm_find_lockspace_device(iminor(inode)); 639 ls = dlm_find_lockspace_device(iminor(inode));
628 if (!ls) { 640 if (!ls)
629 unlock_kernel();
630 return -ENOENT; 641 return -ENOENT;
631 }
632 642
633 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL); 643 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL);
634 if (!proc) { 644 if (!proc) {
635 dlm_put_lockspace(ls); 645 dlm_put_lockspace(ls);
636 unlock_kernel();
637 return -ENOMEM; 646 return -ENOMEM;
638 } 647 }
639 648
@@ -645,7 +654,6 @@ static int device_open(struct inode *inode, struct file *file)
645 spin_lock_init(&proc->locks_spin); 654 spin_lock_init(&proc->locks_spin);
646 init_waitqueue_head(&proc->wait); 655 init_waitqueue_head(&proc->wait);
647 file->private_data = proc; 656 file->private_data = proc;
648 unlock_kernel();
649 657
650 return 0; 658 return 0;
651} 659}
@@ -878,9 +886,28 @@ static unsigned int device_poll(struct file *file, poll_table *wait)
878 return 0; 886 return 0;
879} 887}
880 888
889int dlm_user_daemon_available(void)
890{
891 /* dlm_controld hasn't started (or, has started, but not
892 properly populated configfs) */
893
894 if (!dlm_our_nodeid())
895 return 0;
896
897 /* This is to deal with versions of dlm_controld that don't
898 know about the monitor device. We assume that if the
899 dlm_controld was started (above), but the monitor device
900 was never opened, that it's an old version. dlm_controld
901 should open the monitor device before populating configfs. */
902
903 if (dlm_monitor_unused)
904 return 1;
905
906 return atomic_read(&dlm_monitor_opened) ? 1 : 0;
907}
908
881static int ctl_device_open(struct inode *inode, struct file *file) 909static int ctl_device_open(struct inode *inode, struct file *file)
882{ 910{
883 cycle_kernel_lock();
884 file->private_data = NULL; 911 file->private_data = NULL;
885 return 0; 912 return 0;
886} 913}
@@ -890,6 +917,20 @@ static int ctl_device_close(struct inode *inode, struct file *file)
890 return 0; 917 return 0;
891} 918}
892 919
920static int monitor_device_open(struct inode *inode, struct file *file)
921{
922 atomic_inc(&dlm_monitor_opened);
923 dlm_monitor_unused = 0;
924 return 0;
925}
926
927static int monitor_device_close(struct inode *inode, struct file *file)
928{
929 if (atomic_dec_and_test(&dlm_monitor_opened))
930 dlm_stop_lockspaces();
931 return 0;
932}
933
893static const struct file_operations device_fops = { 934static const struct file_operations device_fops = {
894 .open = device_open, 935 .open = device_open,
895 .release = device_close, 936 .release = device_close,
@@ -913,19 +954,42 @@ static struct miscdevice ctl_device = {
913 .minor = MISC_DYNAMIC_MINOR, 954 .minor = MISC_DYNAMIC_MINOR,
914}; 955};
915 956
957static const struct file_operations monitor_device_fops = {
958 .open = monitor_device_open,
959 .release = monitor_device_close,
960 .owner = THIS_MODULE,
961};
962
963static struct miscdevice monitor_device = {
964 .name = "dlm-monitor",
965 .fops = &monitor_device_fops,
966 .minor = MISC_DYNAMIC_MINOR,
967};
968
916int __init dlm_user_init(void) 969int __init dlm_user_init(void)
917{ 970{
918 int error; 971 int error;
919 972
973 atomic_set(&dlm_monitor_opened, 0);
974
920 error = misc_register(&ctl_device); 975 error = misc_register(&ctl_device);
921 if (error) 976 if (error) {
922 log_print("misc_register failed for control device"); 977 log_print("misc_register failed for control device");
978 goto out;
979 }
923 980
981 error = misc_register(&monitor_device);
982 if (error) {
983 log_print("misc_register failed for monitor device");
984 misc_deregister(&ctl_device);
985 }
986 out:
924 return error; 987 return error;
925} 988}
926 989
927void dlm_user_exit(void) 990void dlm_user_exit(void)
928{ 991{
929 misc_deregister(&ctl_device); 992 misc_deregister(&ctl_device);
993 misc_deregister(&monitor_device);
930} 994}
931 995
diff --git a/fs/dlm/user.h b/fs/dlm/user.h
index d38e9f3e4151..35eb6a13d616 100644
--- a/fs/dlm/user.h
+++ b/fs/dlm/user.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 2 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
3 * 3 *
4 * This copyrighted material is made available to anyone wishing to use, 4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions 5 * modify, copy, or redistribute it subject to the terms and conditions
@@ -12,5 +12,7 @@
12void dlm_user_add_ast(struct dlm_lkb *lkb, int type); 12void dlm_user_add_ast(struct dlm_lkb *lkb, int type);
13int dlm_user_init(void); 13int dlm_user_init(void);
14void dlm_user_exit(void); 14void dlm_user_exit(void);
15int dlm_device_deregister(struct dlm_ls *ls);
16int dlm_user_daemon_available(void);
15 17
16#endif 18#endif
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 302e95c4af7e..fb98b3d847ed 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -6,6 +6,7 @@
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/fs.h> 7#include <linux/fs.h>
8#include <linux/msdos_fs.h> 8#include <linux/msdos_fs.h>
9#include <linux/blkdev.h>
9 10
10struct fatent_operations { 11struct fatent_operations {
11 void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); 12 void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
@@ -535,6 +536,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
535 struct fat_entry fatent; 536 struct fat_entry fatent;
536 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 537 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
537 int i, err, nr_bhs; 538 int i, err, nr_bhs;
539 int first_cl = cluster;
538 540
539 nr_bhs = 0; 541 nr_bhs = 0;
540 fatent_init(&fatent); 542 fatent_init(&fatent);
@@ -551,6 +553,18 @@ int fat_free_clusters(struct inode *inode, int cluster)
551 goto error; 553 goto error;
552 } 554 }
553 555
556 /*
557 * Issue discard for the sectors we no longer care about,
558 * batching contiguous clusters into one request
559 */
560 if (cluster != fatent.entry + 1) {
561 int nr_clus = fatent.entry - first_cl + 1;
562
563 sb_issue_discard(sb, fat_clus_to_blknr(sbi, first_cl),
564 nr_clus * sbi->sec_per_clus);
565 first_cl = cluster;
566 }
567
554 ops->ent_put(&fatent, FAT_ENT_FREE); 568 ops->ent_put(&fatent, FAT_ENT_FREE);
555 if (sbi->free_clusters != -1) { 569 if (sbi->free_clusters != -1) {
556 sbi->free_clusters++; 570 sbi->free_clusters++;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 13391e546616..c962283d4e7f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1265,6 +1265,8 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1265 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 1265 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1266 if (time_before(now, holdtime)) 1266 if (time_before(now, holdtime))
1267 delay = holdtime - now; 1267 delay = holdtime - now;
1268 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1269 delay = gl->gl_ops->go_min_hold_time;
1268 1270
1269 spin_lock(&gl->gl_spin); 1271 spin_lock(&gl->gl_spin);
1270 handle_callback(gl, state, 1, delay); 1272 handle_callback(gl, state, 1, delay);
@@ -1578,8 +1580,6 @@ static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1578 *p++ = 'a'; 1580 *p++ = 'a';
1579 if (flags & GL_EXACT) 1581 if (flags & GL_EXACT)
1580 *p++ = 'E'; 1582 *p++ = 'E';
1581 if (flags & GL_ATIME)
1582 *p++ = 'a';
1583 if (flags & GL_NOCACHE) 1583 if (flags & GL_NOCACHE)
1584 *p++ = 'c'; 1584 *p++ = 'c';
1585 if (test_bit(HIF_HOLDER, &iflags)) 1585 if (test_bit(HIF_HOLDER, &iflags))
@@ -1816,15 +1816,17 @@ restart:
1816 if (gl) { 1816 if (gl) {
1817 gi->gl = hlist_entry(gl->gl_list.next, 1817 gi->gl = hlist_entry(gl->gl_list.next,
1818 struct gfs2_glock, gl_list); 1818 struct gfs2_glock, gl_list);
1819 if (gi->gl) 1819 } else {
1820 gfs2_glock_hold(gi->gl); 1820 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1821 struct gfs2_glock, gl_list);
1821 } 1822 }
1823 if (gi->gl)
1824 gfs2_glock_hold(gi->gl);
1822 read_unlock(gl_lock_addr(gi->hash)); 1825 read_unlock(gl_lock_addr(gi->hash));
1823 if (gl) 1826 if (gl)
1824 gfs2_glock_put(gl); 1827 gfs2_glock_put(gl);
1825 if (gl && gi->gl == NULL)
1826 gi->hash++;
1827 while (gi->gl == NULL) { 1828 while (gi->gl == NULL) {
1829 gi->hash++;
1828 if (gi->hash >= GFS2_GL_HASH_SIZE) 1830 if (gi->hash >= GFS2_GL_HASH_SIZE)
1829 return 1; 1831 return 1;
1830 read_lock(gl_lock_addr(gi->hash)); 1832 read_lock(gl_lock_addr(gi->hash));
@@ -1833,7 +1835,6 @@ restart:
1833 if (gi->gl) 1835 if (gi->gl)
1834 gfs2_glock_hold(gi->gl); 1836 gfs2_glock_hold(gi->gl);
1835 read_unlock(gl_lock_addr(gi->hash)); 1837 read_unlock(gl_lock_addr(gi->hash));
1836 gi->hash++;
1837 } 1838 }
1838 1839
1839 if (gi->sdp != gi->gl->gl_sbd) 1840 if (gi->sdp != gi->gl->gl_sbd)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 971d92af70fc..695c6b193611 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -24,7 +24,6 @@
24#define GL_ASYNC 0x00000040 24#define GL_ASYNC 0x00000040
25#define GL_EXACT 0x00000080 25#define GL_EXACT 0x00000080
26#define GL_SKIP 0x00000100 26#define GL_SKIP 0x00000100
27#define GL_ATIME 0x00000200
28#define GL_NOCACHE 0x00000400 27#define GL_NOCACHE 0x00000400
29 28
30#define GLR_TRYFAILED 13 29#define GLR_TRYFAILED 13
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 448697a5c462..f566ec1b4e8e 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -386,20 +386,21 @@ struct gfs2_statfs_change_host {
386#define GFS2_DATA_ORDERED 2 386#define GFS2_DATA_ORDERED 2
387 387
388struct gfs2_args { 388struct gfs2_args {
389 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */ 389 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
390 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */ 390 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
391 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */ 391 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
392 int ar_spectator; /* Don't get a journal because we're always RO */ 392 unsigned int ar_spectator:1; /* Don't get a journal */
393 int ar_ignore_local_fs; /* Don't optimize even if local_fs is 1 */ 393 unsigned int ar_ignore_local_fs:1; /* Ignore optimisations */
394 int ar_localflocks; /* Let the VFS do flock|fcntl locks for us */ 394 unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
395 int ar_localcaching; /* Local-style caching (dangerous on multihost) */ 395 unsigned int ar_localcaching:1; /* Local caching */
396 int ar_debug; /* Oops on errors instead of trying to be graceful */ 396 unsigned int ar_debug:1; /* Oops on errors */
397 int ar_upgrade; /* Upgrade ondisk/multihost format */ 397 unsigned int ar_upgrade:1; /* Upgrade ondisk format */
398 unsigned int ar_num_glockd; /* Number of glockd threads */ 398 unsigned int ar_posix_acl:1; /* Enable posix acls */
399 int ar_posix_acl; /* Enable posix acls */ 399 unsigned int ar_quota:2; /* off/account/on */
400 int ar_quota; /* off/account/on */ 400 unsigned int ar_suiddir:1; /* suiddir support */
401 int ar_suiddir; /* suiddir support */ 401 unsigned int ar_data:2; /* ordered/writeback */
402 int ar_data; /* ordered/writeback */ 402 unsigned int ar_meta:1; /* mount metafs */
403 unsigned int ar_num_glockd; /* Number of glockd threads */
403}; 404};
404 405
405struct gfs2_tune { 406struct gfs2_tune {
@@ -419,7 +420,6 @@ struct gfs2_tune {
419 unsigned int gt_quota_scale_den; /* Denominator */ 420 unsigned int gt_quota_scale_den; /* Denominator */
420 unsigned int gt_quota_cache_secs; 421 unsigned int gt_quota_cache_secs;
421 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ 422 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
422 unsigned int gt_atime_quantum; /* Min secs between atime updates */
423 unsigned int gt_new_files_jdata; 423 unsigned int gt_new_files_jdata;
424 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ 424 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
425 unsigned int gt_stall_secs; /* Detects trouble! */ 425 unsigned int gt_stall_secs; /* Detects trouble! */
@@ -432,7 +432,7 @@ enum {
432 SDF_JOURNAL_CHECKED = 0, 432 SDF_JOURNAL_CHECKED = 0,
433 SDF_JOURNAL_LIVE = 1, 433 SDF_JOURNAL_LIVE = 1,
434 SDF_SHUTDOWN = 2, 434 SDF_SHUTDOWN = 2,
435 SDF_NOATIME = 3, 435 SDF_NOBARRIERS = 3,
436}; 436};
437 437
438#define GFS2_FSNAME_LEN 256 438#define GFS2_FSNAME_LEN 256
@@ -461,7 +461,6 @@ struct gfs2_sb_host {
461 461
462struct gfs2_sbd { 462struct gfs2_sbd {
463 struct super_block *sd_vfs; 463 struct super_block *sd_vfs;
464 struct super_block *sd_vfs_meta;
465 struct kobject sd_kobj; 464 struct kobject sd_kobj;
466 unsigned long sd_flags; /* SDF_... */ 465 unsigned long sd_flags; /* SDF_... */
467 struct gfs2_sb_host sd_sb; 466 struct gfs2_sb_host sd_sb;
@@ -499,7 +498,9 @@ struct gfs2_sbd {
499 498
500 /* Inode Stuff */ 499 /* Inode Stuff */
501 500
502 struct inode *sd_master_dir; 501 struct dentry *sd_master_dir;
502 struct dentry *sd_root_dir;
503
503 struct inode *sd_jindex; 504 struct inode *sd_jindex;
504 struct inode *sd_inum_inode; 505 struct inode *sd_inum_inode;
505 struct inode *sd_statfs_inode; 506 struct inode *sd_statfs_inode;
@@ -634,7 +635,6 @@ struct gfs2_sbd {
634 /* Debugging crud */ 635 /* Debugging crud */
635 636
636 unsigned long sd_last_warning; 637 unsigned long sd_last_warning;
637 struct vfsmount *sd_gfs2mnt;
638 struct dentry *debugfs_dir; /* debugfs directory */ 638 struct dentry *debugfs_dir; /* debugfs directory */
639 struct dentry *debugfs_dentry_glocks; /* for debugfs */ 639 struct dentry *debugfs_dentry_glocks; /* for debugfs */
640}; 640};
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 8b0806a32948..7cee695fa441 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -18,6 +18,7 @@
18#include <linux/crc32.h> 18#include <linux/crc32.h>
19#include <linux/lm_interface.h> 19#include <linux/lm_interface.h>
20#include <linux/security.h> 20#include <linux/security.h>
21#include <linux/time.h>
21 22
22#include "gfs2.h" 23#include "gfs2.h"
23#include "incore.h" 24#include "incore.h"
@@ -249,6 +250,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
249{ 250{
250 struct gfs2_dinode_host *di = &ip->i_di; 251 struct gfs2_dinode_host *di = &ip->i_di;
251 const struct gfs2_dinode *str = buf; 252 const struct gfs2_dinode *str = buf;
253 struct timespec atime;
252 u16 height, depth; 254 u16 height, depth;
253 255
254 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 256 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
@@ -275,8 +277,10 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
275 di->di_size = be64_to_cpu(str->di_size); 277 di->di_size = be64_to_cpu(str->di_size);
276 i_size_write(&ip->i_inode, di->di_size); 278 i_size_write(&ip->i_inode, di->di_size);
277 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 279 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
278 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime); 280 atime.tv_sec = be64_to_cpu(str->di_atime);
279 ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 281 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
282 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
283 ip->i_inode.i_atime = atime;
280 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 284 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
281 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 285 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
282 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 286 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
@@ -1033,13 +1037,11 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
1033 1037
1034 if (bh) 1038 if (bh)
1035 brelse(bh); 1039 brelse(bh);
1036 if (!inode)
1037 return ERR_PTR(-ENOMEM);
1038 return inode; 1040 return inode;
1039 1041
1040fail_gunlock2: 1042fail_gunlock2:
1041 gfs2_glock_dq_uninit(ghs + 1); 1043 gfs2_glock_dq_uninit(ghs + 1);
1042 if (inode) 1044 if (inode && !IS_ERR(inode))
1043 iput(inode); 1045 iput(inode);
1044fail_gunlock: 1046fail_gunlock:
1045 gfs2_glock_dq(ghs); 1047 gfs2_glock_dq(ghs);
@@ -1140,54 +1142,6 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1140 return 0; 1142 return 0;
1141} 1143}
1142 1144
1143/*
1144 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1145 * @this: move this
1146 * @to: to here
1147 *
1148 * Follow @to back to the root and make sure we don't encounter @this
1149 * Assumes we already hold the rename lock.
1150 *
1151 * Returns: errno
1152 */
1153
1154int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1155{
1156 struct inode *dir = &to->i_inode;
1157 struct super_block *sb = dir->i_sb;
1158 struct inode *tmp;
1159 struct qstr dotdot;
1160 int error = 0;
1161
1162 gfs2_str2qstr(&dotdot, "..");
1163
1164 igrab(dir);
1165
1166 for (;;) {
1167 if (dir == &this->i_inode) {
1168 error = -EINVAL;
1169 break;
1170 }
1171 if (dir == sb->s_root->d_inode) {
1172 error = 0;
1173 break;
1174 }
1175
1176 tmp = gfs2_lookupi(dir, &dotdot, 1);
1177 if (IS_ERR(tmp)) {
1178 error = PTR_ERR(tmp);
1179 break;
1180 }
1181
1182 iput(dir);
1183 dir = tmp;
1184 }
1185
1186 iput(dir);
1187
1188 return error;
1189}
1190
1191/** 1145/**
1192 * gfs2_readlinki - return the contents of a symlink 1146 * gfs2_readlinki - return the contents of a symlink
1193 * @ip: the symlink's inode 1147 * @ip: the symlink's inode
@@ -1207,8 +1161,8 @@ int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1207 unsigned int x; 1161 unsigned int x;
1208 int error; 1162 int error;
1209 1163
1210 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh); 1164 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1211 error = gfs2_glock_nq_atime(&i_gh); 1165 error = gfs2_glock_nq(&i_gh);
1212 if (error) { 1166 if (error) {
1213 gfs2_holder_uninit(&i_gh); 1167 gfs2_holder_uninit(&i_gh);
1214 return error; 1168 return error;
@@ -1243,101 +1197,6 @@ out:
1243 return error; 1197 return error;
1244} 1198}
1245 1199
1246/**
1247 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1248 * conditionally update the inode's atime
1249 * @gh: the holder to acquire
1250 *
1251 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1252 * Update if the difference between the current time and the inode's current
1253 * atime is greater than an interval specified at mount.
1254 *
1255 * Returns: errno
1256 */
1257
1258int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1259{
1260 struct gfs2_glock *gl = gh->gh_gl;
1261 struct gfs2_sbd *sdp = gl->gl_sbd;
1262 struct gfs2_inode *ip = gl->gl_object;
1263 s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1264 unsigned int state;
1265 int flags;
1266 int error;
1267 struct timespec tv = CURRENT_TIME;
1268
1269 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1270 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1271 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1272 return -EINVAL;
1273
1274 state = gh->gh_state;
1275 flags = gh->gh_flags;
1276
1277 error = gfs2_glock_nq(gh);
1278 if (error)
1279 return error;
1280
1281 if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1282 (sdp->sd_vfs->s_flags & MS_RDONLY))
1283 return 0;
1284
1285 if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1286 gfs2_glock_dq(gh);
1287 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1288 gh);
1289 error = gfs2_glock_nq(gh);
1290 if (error)
1291 return error;
1292
1293 /* Verify that atime hasn't been updated while we were
1294 trying to get exclusive lock. */
1295
1296 tv = CURRENT_TIME;
1297 if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1298 struct buffer_head *dibh;
1299 struct gfs2_dinode *di;
1300
1301 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1302 if (error == -EROFS)
1303 return 0;
1304 if (error)
1305 goto fail;
1306
1307 error = gfs2_meta_inode_buffer(ip, &dibh);
1308 if (error)
1309 goto fail_end_trans;
1310
1311 ip->i_inode.i_atime = tv;
1312
1313 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1314 di = (struct gfs2_dinode *)dibh->b_data;
1315 di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1316 di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1317 brelse(dibh);
1318
1319 gfs2_trans_end(sdp);
1320 }
1321
1322 /* If someone else has asked for the glock,
1323 unlock and let them have it. Then reacquire
1324 in the original state. */
1325 if (gfs2_glock_is_blocking(gl)) {
1326 gfs2_glock_dq(gh);
1327 gfs2_holder_reinit(state, flags, gh);
1328 return gfs2_glock_nq(gh);
1329 }
1330 }
1331
1332 return 0;
1333
1334fail_end_trans:
1335 gfs2_trans_end(sdp);
1336fail:
1337 gfs2_glock_dq(gh);
1338 return error;
1339}
1340
1341static int 1200static int
1342__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) 1201__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1343{ 1202{
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 58f9607d6a86..2d43f69610a0 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -91,9 +91,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
91int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, 91int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
92 const struct gfs2_inode *ip); 92 const struct gfs2_inode *ip);
93int gfs2_permission(struct inode *inode, int mask); 93int gfs2_permission(struct inode *inode, int mask);
94int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
95int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len); 94int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
96int gfs2_glock_nq_atime(struct gfs2_holder *gh);
97int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); 95int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
98struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); 96struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
99void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); 97void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index 09d78c216f48..0c4cbe6c8285 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -144,7 +144,8 @@ static int gdlm_mount(char *table_name, char *host_data,
144 144
145 error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname), 145 error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname),
146 &ls->dlm_lockspace, 146 &ls->dlm_lockspace,
147 DLM_LSFL_FS | (nodir ? DLM_LSFL_NODIR : 0), 147 DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
148 (nodir ? DLM_LSFL_NODIR : 0),
148 GDLM_LVB_SIZE); 149 GDLM_LVB_SIZE);
149 if (error) { 150 if (error) {
150 log_error("dlm_new_lockspace error %d", error); 151 log_error("dlm_new_lockspace error %d", error);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 6c6af9f5e3ab..ad305854bdc6 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -18,6 +18,7 @@
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/bio.h>
21 22
22#include "gfs2.h" 23#include "gfs2.h"
23#include "incore.h" 24#include "incore.h"
@@ -584,7 +585,6 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
584 memset(bh->b_data, 0, bh->b_size); 585 memset(bh->b_data, 0, bh->b_size);
585 set_buffer_uptodate(bh); 586 set_buffer_uptodate(bh);
586 clear_buffer_dirty(bh); 587 clear_buffer_dirty(bh);
587 unlock_buffer(bh);
588 588
589 gfs2_ail1_empty(sdp, 0); 589 gfs2_ail1_empty(sdp, 0);
590 tail = current_tail(sdp); 590 tail = current_tail(sdp);
@@ -601,8 +601,23 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
601 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header)); 601 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
602 lh->lh_hash = cpu_to_be32(hash); 602 lh->lh_hash = cpu_to_be32(hash);
603 603
604 set_buffer_dirty(bh); 604 bh->b_end_io = end_buffer_write_sync;
605 if (sync_dirty_buffer(bh)) 605 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
606 goto skip_barrier;
607 get_bh(bh);
608 submit_bh(WRITE_BARRIER | (1 << BIO_RW_META), bh);
609 wait_on_buffer(bh);
610 if (buffer_eopnotsupp(bh)) {
611 clear_buffer_eopnotsupp(bh);
612 set_buffer_uptodate(bh);
613 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
614 lock_buffer(bh);
615skip_barrier:
616 get_bh(bh);
617 submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh);
618 wait_on_buffer(bh);
619 }
620 if (!buffer_uptodate(bh))
606 gfs2_io_error_bh(sdp, bh); 621 gfs2_io_error_bh(sdp, bh);
607 brelse(bh); 622 brelse(bh);
608 623
diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c
index b941f9f9f958..df48333e6f01 100644
--- a/fs/gfs2/mount.c
+++ b/fs/gfs2/mount.c
@@ -42,6 +42,7 @@ enum {
42 Opt_nosuiddir, 42 Opt_nosuiddir,
43 Opt_data_writeback, 43 Opt_data_writeback,
44 Opt_data_ordered, 44 Opt_data_ordered,
45 Opt_meta,
45 Opt_err, 46 Opt_err,
46}; 47};
47 48
@@ -66,6 +67,7 @@ static match_table_t tokens = {
66 {Opt_nosuiddir, "nosuiddir"}, 67 {Opt_nosuiddir, "nosuiddir"},
67 {Opt_data_writeback, "data=writeback"}, 68 {Opt_data_writeback, "data=writeback"},
68 {Opt_data_ordered, "data=ordered"}, 69 {Opt_data_ordered, "data=ordered"},
70 {Opt_meta, "meta"},
69 {Opt_err, NULL} 71 {Opt_err, NULL}
70}; 72};
71 73
@@ -239,6 +241,11 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
239 case Opt_data_ordered: 241 case Opt_data_ordered:
240 args->ar_data = GFS2_DATA_ORDERED; 242 args->ar_data = GFS2_DATA_ORDERED;
241 break; 243 break;
244 case Opt_meta:
245 if (remount && args->ar_meta != 1)
246 goto cant_remount;
247 args->ar_meta = 1;
248 break;
242 case Opt_err: 249 case Opt_err:
243 default: 250 default:
244 fs_info(sdp, "unknown option: %s\n", o); 251 fs_info(sdp, "unknown option: %s\n", o);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index e64a1b04117a..27563816e1c5 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -512,8 +512,8 @@ static int gfs2_readpage(struct file *file, struct page *page)
512 int error; 512 int error;
513 513
514 unlock_page(page); 514 unlock_page(page);
515 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 515 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
516 error = gfs2_glock_nq_atime(&gh); 516 error = gfs2_glock_nq(&gh);
517 if (unlikely(error)) 517 if (unlikely(error))
518 goto out; 518 goto out;
519 error = AOP_TRUNCATED_PAGE; 519 error = AOP_TRUNCATED_PAGE;
@@ -594,8 +594,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
594 struct gfs2_holder gh; 594 struct gfs2_holder gh;
595 int ret; 595 int ret;
596 596
597 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 597 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
598 ret = gfs2_glock_nq_atime(&gh); 598 ret = gfs2_glock_nq(&gh);
599 if (unlikely(ret)) 599 if (unlikely(ret))
600 goto out_uninit; 600 goto out_uninit;
601 if (!gfs2_is_stuffed(ip)) 601 if (!gfs2_is_stuffed(ip))
@@ -636,8 +636,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
636 unsigned to = from + len; 636 unsigned to = from + len;
637 struct page *page; 637 struct page *page;
638 638
639 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh); 639 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
640 error = gfs2_glock_nq_atime(&ip->i_gh); 640 error = gfs2_glock_nq(&ip->i_gh);
641 if (unlikely(error)) 641 if (unlikely(error))
642 goto out_uninit; 642 goto out_uninit;
643 643
@@ -975,7 +975,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
975 if (gfs2_is_stuffed(ip)) 975 if (gfs2_is_stuffed(ip))
976 return 0; 976 return 0;
977 977
978 if (offset > i_size_read(&ip->i_inode)) 978 if (offset >= i_size_read(&ip->i_inode))
979 return 0; 979 return 0;
980 return 1; 980 return 1;
981} 981}
@@ -1000,8 +1000,8 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
1000 * unfortunately have the option of only flushing a range like 1000 * unfortunately have the option of only flushing a range like
1001 * the VFS does. 1001 * the VFS does.
1002 */ 1002 */
1003 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh); 1003 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1004 rv = gfs2_glock_nq_atime(&gh); 1004 rv = gfs2_glock_nq(&gh);
1005 if (rv) 1005 if (rv)
1006 return rv; 1006 return rv;
1007 rv = gfs2_ok_for_dio(ip, rw, offset); 1007 rv = gfs2_ok_for_dio(ip, rw, offset);
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index e9a366d4411c..3a747f8e2188 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -89,8 +89,8 @@ static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
89 u64 offset = file->f_pos; 89 u64 offset = file->f_pos;
90 int error; 90 int error;
91 91
92 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh); 92 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
93 error = gfs2_glock_nq_atime(&d_gh); 93 error = gfs2_glock_nq(&d_gh);
94 if (error) { 94 if (error) {
95 gfs2_holder_uninit(&d_gh); 95 gfs2_holder_uninit(&d_gh);
96 return error; 96 return error;
@@ -153,8 +153,8 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
153 int error; 153 int error;
154 u32 fsflags; 154 u32 fsflags;
155 155
156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
157 error = gfs2_glock_nq_atime(&gh); 157 error = gfs2_glock_nq(&gh);
158 if (error) 158 if (error)
159 return error; 159 return error;
160 160
@@ -351,8 +351,8 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
351 struct gfs2_alloc *al; 351 struct gfs2_alloc *al;
352 int ret; 352 int ret;
353 353
354 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &gh); 354 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
355 ret = gfs2_glock_nq_atime(&gh); 355 ret = gfs2_glock_nq(&gh);
356 if (ret) 356 if (ret)
357 goto out; 357 goto out;
358 358
@@ -434,8 +434,8 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
434 struct gfs2_holder i_gh; 434 struct gfs2_holder i_gh;
435 int error; 435 int error;
436 436
437 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh); 437 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
438 error = gfs2_glock_nq_atime(&i_gh); 438 error = gfs2_glock_nq(&i_gh);
439 if (error) { 439 if (error) {
440 gfs2_holder_uninit(&i_gh); 440 gfs2_holder_uninit(&i_gh);
441 return error; 441 return error;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index b4d1d6490633..b117fcf2c4f5 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -40,6 +40,44 @@
40#define DO 0 40#define DO 0
41#define UNDO 1 41#define UNDO 1
42 42
43static const u32 gfs2_old_fs_formats[] = {
44 0
45};
46
47static const u32 gfs2_old_multihost_formats[] = {
48 0
49};
50
51/**
52 * gfs2_tune_init - Fill a gfs2_tune structure with default values
53 * @gt: tune
54 *
55 */
56
57static void gfs2_tune_init(struct gfs2_tune *gt)
58{
59 spin_lock_init(&gt->gt_spin);
60
61 gt->gt_demote_secs = 300;
62 gt->gt_incore_log_blocks = 1024;
63 gt->gt_log_flush_secs = 60;
64 gt->gt_recoverd_secs = 60;
65 gt->gt_logd_secs = 1;
66 gt->gt_quotad_secs = 5;
67 gt->gt_quota_simul_sync = 64;
68 gt->gt_quota_warn_period = 10;
69 gt->gt_quota_scale_num = 1;
70 gt->gt_quota_scale_den = 1;
71 gt->gt_quota_cache_secs = 300;
72 gt->gt_quota_quantum = 60;
73 gt->gt_new_files_jdata = 0;
74 gt->gt_max_readahead = 1 << 18;
75 gt->gt_stall_secs = 600;
76 gt->gt_complain_secs = 10;
77 gt->gt_statfs_quantum = 30;
78 gt->gt_statfs_slow = 0;
79}
80
43static struct gfs2_sbd *init_sbd(struct super_block *sb) 81static struct gfs2_sbd *init_sbd(struct super_block *sb)
44{ 82{
45 struct gfs2_sbd *sdp; 83 struct gfs2_sbd *sdp;
@@ -96,21 +134,271 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
96 return sdp; 134 return sdp;
97} 135}
98 136
99static void init_vfs(struct super_block *sb, unsigned noatime) 137
138/**
139 * gfs2_check_sb - Check superblock
140 * @sdp: the filesystem
141 * @sb: The superblock
142 * @silent: Don't print a message if the check fails
143 *
144 * Checks the version code of the FS is one that we understand how to
145 * read and that the sizes of the various on-disk structures have not
146 * changed.
147 */
148
149static int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
100{ 150{
101 struct gfs2_sbd *sdp = sb->s_fs_info; 151 unsigned int x;
102 152
103 sb->s_magic = GFS2_MAGIC; 153 if (sb->sb_magic != GFS2_MAGIC ||
104 sb->s_op = &gfs2_super_ops; 154 sb->sb_type != GFS2_METATYPE_SB) {
105 sb->s_export_op = &gfs2_export_ops; 155 if (!silent)
106 sb->s_time_gran = 1; 156 printk(KERN_WARNING "GFS2: not a GFS2 filesystem\n");
107 sb->s_maxbytes = MAX_LFS_FILESIZE; 157 return -EINVAL;
158 }
159
160 /* If format numbers match exactly, we're done. */
161
162 if (sb->sb_fs_format == GFS2_FORMAT_FS &&
163 sb->sb_multihost_format == GFS2_FORMAT_MULTI)
164 return 0;
165
166 if (sb->sb_fs_format != GFS2_FORMAT_FS) {
167 for (x = 0; gfs2_old_fs_formats[x]; x++)
168 if (gfs2_old_fs_formats[x] == sb->sb_fs_format)
169 break;
170
171 if (!gfs2_old_fs_formats[x]) {
172 printk(KERN_WARNING
173 "GFS2: code version (%u, %u) is incompatible "
174 "with ondisk format (%u, %u)\n",
175 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
176 sb->sb_fs_format, sb->sb_multihost_format);
177 printk(KERN_WARNING
178 "GFS2: I don't know how to upgrade this FS\n");
179 return -EINVAL;
180 }
181 }
182
183 if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
184 for (x = 0; gfs2_old_multihost_formats[x]; x++)
185 if (gfs2_old_multihost_formats[x] ==
186 sb->sb_multihost_format)
187 break;
188
189 if (!gfs2_old_multihost_formats[x]) {
190 printk(KERN_WARNING
191 "GFS2: code version (%u, %u) is incompatible "
192 "with ondisk format (%u, %u)\n",
193 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
194 sb->sb_fs_format, sb->sb_multihost_format);
195 printk(KERN_WARNING
196 "GFS2: I don't know how to upgrade this FS\n");
197 return -EINVAL;
198 }
199 }
200
201 if (!sdp->sd_args.ar_upgrade) {
202 printk(KERN_WARNING
203 "GFS2: code version (%u, %u) is incompatible "
204 "with ondisk format (%u, %u)\n",
205 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
206 sb->sb_fs_format, sb->sb_multihost_format);
207 printk(KERN_INFO
208 "GFS2: Use the \"upgrade\" mount option to upgrade "
209 "the FS\n");
210 printk(KERN_INFO "GFS2: See the manual for more details\n");
211 return -EINVAL;
212 }
213
214 return 0;
215}
216
217static void end_bio_io_page(struct bio *bio, int error)
218{
219 struct page *page = bio->bi_private;
108 220
109 if (sb->s_flags & (MS_NOATIME | MS_NODIRATIME)) 221 if (!error)
110 set_bit(noatime, &sdp->sd_flags); 222 SetPageUptodate(page);
223 else
224 printk(KERN_WARNING "gfs2: error %d reading superblock\n", error);
225 unlock_page(page);
226}
227
228static void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
229{
230 const struct gfs2_sb *str = buf;
231
232 sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
233 sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
234 sb->sb_format = be32_to_cpu(str->sb_header.mh_format);
235 sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
236 sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
237 sb->sb_bsize = be32_to_cpu(str->sb_bsize);
238 sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
239 sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
240 sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
241 sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
242 sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
243
244 memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
245 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
246}
247
248/**
249 * gfs2_read_super - Read the gfs2 super block from disk
250 * @sdp: The GFS2 super block
251 * @sector: The location of the super block
252 * @error: The error code to return
253 *
254 * This uses the bio functions to read the super block from disk
255 * because we want to be 100% sure that we never read cached data.
256 * A super block is read twice only during each GFS2 mount and is
257 * never written to by the filesystem. The first time its read no
258 * locks are held, and the only details which are looked at are those
259 * relating to the locking protocol. Once locking is up and working,
260 * the sb is read again under the lock to establish the location of
261 * the master directory (contains pointers to journals etc) and the
262 * root directory.
263 *
264 * Returns: 0 on success or error
265 */
266
267static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
268{
269 struct super_block *sb = sdp->sd_vfs;
270 struct gfs2_sb *p;
271 struct page *page;
272 struct bio *bio;
273
274 page = alloc_page(GFP_NOFS);
275 if (unlikely(!page))
276 return -ENOBUFS;
277
278 ClearPageUptodate(page);
279 ClearPageDirty(page);
280 lock_page(page);
281
282 bio = bio_alloc(GFP_NOFS, 1);
283 if (unlikely(!bio)) {
284 __free_page(page);
285 return -ENOBUFS;
286 }
111 287
112 /* Don't let the VFS update atimes. GFS2 handles this itself. */ 288 bio->bi_sector = sector * (sb->s_blocksize >> 9);
113 sb->s_flags |= MS_NOATIME | MS_NODIRATIME; 289 bio->bi_bdev = sb->s_bdev;
290 bio_add_page(bio, page, PAGE_SIZE, 0);
291
292 bio->bi_end_io = end_bio_io_page;
293 bio->bi_private = page;
294 submit_bio(READ_SYNC | (1 << BIO_RW_META), bio);
295 wait_on_page_locked(page);
296 bio_put(bio);
297 if (!PageUptodate(page)) {
298 __free_page(page);
299 return -EIO;
300 }
301 p = kmap(page);
302 gfs2_sb_in(&sdp->sd_sb, p);
303 kunmap(page);
304 __free_page(page);
305 return 0;
306}
307/**
308 * gfs2_read_sb - Read super block
309 * @sdp: The GFS2 superblock
310 * @gl: the glock for the superblock (assumed to be held)
311 * @silent: Don't print message if mount fails
312 *
313 */
314
315static int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent)
316{
317 u32 hash_blocks, ind_blocks, leaf_blocks;
318 u32 tmp_blocks;
319 unsigned int x;
320 int error;
321
322 error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
323 if (error) {
324 if (!silent)
325 fs_err(sdp, "can't read superblock\n");
326 return error;
327 }
328
329 error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
330 if (error)
331 return error;
332
333 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
334 GFS2_BASIC_BLOCK_SHIFT;
335 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
336 sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
337 sizeof(struct gfs2_dinode)) / sizeof(u64);
338 sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
339 sizeof(struct gfs2_meta_header)) / sizeof(u64);
340 sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
341 sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
342 sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
343 sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
344 sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
345 sizeof(struct gfs2_meta_header)) /
346 sizeof(struct gfs2_quota_change);
347
348 /* Compute maximum reservation required to add a entry to a directory */
349
350 hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH),
351 sdp->sd_jbsize);
352
353 ind_blocks = 0;
354 for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
355 tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
356 ind_blocks += tmp_blocks;
357 }
358
359 leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
360
361 sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
362
363 sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
364 sizeof(struct gfs2_dinode);
365 sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
366 for (x = 2;; x++) {
367 u64 space, d;
368 u32 m;
369
370 space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
371 d = space;
372 m = do_div(d, sdp->sd_inptrs);
373
374 if (d != sdp->sd_heightsize[x - 1] || m)
375 break;
376 sdp->sd_heightsize[x] = space;
377 }
378 sdp->sd_max_height = x;
379 sdp->sd_heightsize[x] = ~0;
380 gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
381
382 sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
383 sizeof(struct gfs2_dinode);
384 sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
385 for (x = 2;; x++) {
386 u64 space, d;
387 u32 m;
388
389 space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
390 d = space;
391 m = do_div(d, sdp->sd_inptrs);
392
393 if (d != sdp->sd_jheightsize[x - 1] || m)
394 break;
395 sdp->sd_jheightsize[x] = space;
396 }
397 sdp->sd_max_jheight = x;
398 sdp->sd_jheightsize[x] = ~0;
399 gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
400
401 return 0;
114} 402}
115 403
116static int init_names(struct gfs2_sbd *sdp, int silent) 404static int init_names(struct gfs2_sbd *sdp, int silent)
@@ -224,51 +512,59 @@ fail:
224 return error; 512 return error;
225} 513}
226 514
227static inline struct inode *gfs2_lookup_root(struct super_block *sb, 515static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
228 u64 no_addr) 516 u64 no_addr, const char *name)
229{ 517{
230 return gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0); 518 struct gfs2_sbd *sdp = sb->s_fs_info;
519 struct dentry *dentry;
520 struct inode *inode;
521
522 inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
523 if (IS_ERR(inode)) {
524 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
525 return PTR_ERR(inode);
526 }
527 dentry = d_alloc_root(inode);
528 if (!dentry) {
529 fs_err(sdp, "can't alloc %s dentry\n", name);
530 iput(inode);
531 return -ENOMEM;
532 }
533 dentry->d_op = &gfs2_dops;
534 *dptr = dentry;
535 return 0;
231} 536}
232 537
233static int init_sb(struct gfs2_sbd *sdp, int silent, int undo) 538static int init_sb(struct gfs2_sbd *sdp, int silent)
234{ 539{
235 struct super_block *sb = sdp->sd_vfs; 540 struct super_block *sb = sdp->sd_vfs;
236 struct gfs2_holder sb_gh; 541 struct gfs2_holder sb_gh;
237 u64 no_addr; 542 u64 no_addr;
238 struct inode *inode; 543 int ret;
239 int error = 0;
240 544
241 if (undo) { 545 ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
242 if (sb->s_root) { 546 LM_ST_SHARED, 0, &sb_gh);
243 dput(sb->s_root); 547 if (ret) {
244 sb->s_root = NULL; 548 fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
245 } 549 return ret;
246 return 0;
247 } 550 }
248 551
249 error = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops, 552 ret = gfs2_read_sb(sdp, sb_gh.gh_gl, silent);
250 LM_ST_SHARED, 0, &sb_gh); 553 if (ret) {
251 if (error) { 554 fs_err(sdp, "can't read superblock: %d\n", ret);
252 fs_err(sdp, "can't acquire superblock glock: %d\n", error);
253 return error;
254 }
255
256 error = gfs2_read_sb(sdp, sb_gh.gh_gl, silent);
257 if (error) {
258 fs_err(sdp, "can't read superblock: %d\n", error);
259 goto out; 555 goto out;
260 } 556 }
261 557
262 /* Set up the buffer cache and SB for real */ 558 /* Set up the buffer cache and SB for real */
263 if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) { 559 if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) {
264 error = -EINVAL; 560 ret = -EINVAL;
265 fs_err(sdp, "FS block size (%u) is too small for device " 561 fs_err(sdp, "FS block size (%u) is too small for device "
266 "block size (%u)\n", 562 "block size (%u)\n",
267 sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev)); 563 sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev));
268 goto out; 564 goto out;
269 } 565 }
270 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) { 566 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
271 error = -EINVAL; 567 ret = -EINVAL;
272 fs_err(sdp, "FS block size (%u) is too big for machine " 568 fs_err(sdp, "FS block size (%u) is too big for machine "
273 "page size (%u)\n", 569 "page size (%u)\n",
274 sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE); 570 sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
@@ -278,26 +574,21 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
278 574
279 /* Get the root inode */ 575 /* Get the root inode */
280 no_addr = sdp->sd_sb.sb_root_dir.no_addr; 576 no_addr = sdp->sd_sb.sb_root_dir.no_addr;
281 if (sb->s_type == &gfs2meta_fs_type) 577 ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
282 no_addr = sdp->sd_sb.sb_master_dir.no_addr; 578 if (ret)
283 inode = gfs2_lookup_root(sb, no_addr);
284 if (IS_ERR(inode)) {
285 error = PTR_ERR(inode);
286 fs_err(sdp, "can't read in root inode: %d\n", error);
287 goto out; 579 goto out;
288 }
289 580
290 sb->s_root = d_alloc_root(inode); 581 /* Get the master inode */
291 if (!sb->s_root) { 582 no_addr = sdp->sd_sb.sb_master_dir.no_addr;
292 fs_err(sdp, "can't get root dentry\n"); 583 ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
293 error = -ENOMEM; 584 if (ret) {
294 iput(inode); 585 dput(sdp->sd_root_dir);
295 } else 586 goto out;
296 sb->s_root->d_op = &gfs2_dops; 587 }
297 588 sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
298out: 589out:
299 gfs2_glock_dq_uninit(&sb_gh); 590 gfs2_glock_dq_uninit(&sb_gh);
300 return error; 591 return ret;
301} 592}
302 593
303/** 594/**
@@ -372,6 +663,7 @@ static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
372 663
373static int init_journal(struct gfs2_sbd *sdp, int undo) 664static int init_journal(struct gfs2_sbd *sdp, int undo)
374{ 665{
666 struct inode *master = sdp->sd_master_dir->d_inode;
375 struct gfs2_holder ji_gh; 667 struct gfs2_holder ji_gh;
376 struct task_struct *p; 668 struct task_struct *p;
377 struct gfs2_inode *ip; 669 struct gfs2_inode *ip;
@@ -383,7 +675,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
383 goto fail_recoverd; 675 goto fail_recoverd;
384 } 676 }
385 677
386 sdp->sd_jindex = gfs2_lookup_simple(sdp->sd_master_dir, "jindex"); 678 sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
387 if (IS_ERR(sdp->sd_jindex)) { 679 if (IS_ERR(sdp->sd_jindex)) {
388 fs_err(sdp, "can't lookup journal index: %d\n", error); 680 fs_err(sdp, "can't lookup journal index: %d\n", error);
389 return PTR_ERR(sdp->sd_jindex); 681 return PTR_ERR(sdp->sd_jindex);
@@ -506,25 +798,17 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
506{ 798{
507 int error = 0; 799 int error = 0;
508 struct gfs2_inode *ip; 800 struct gfs2_inode *ip;
509 struct inode *inode; 801 struct inode *master = sdp->sd_master_dir->d_inode;
510 802
511 if (undo) 803 if (undo)
512 goto fail_qinode; 804 goto fail_qinode;
513 805
514 inode = gfs2_lookup_root(sdp->sd_vfs, sdp->sd_sb.sb_master_dir.no_addr);
515 if (IS_ERR(inode)) {
516 error = PTR_ERR(inode);
517 fs_err(sdp, "can't read in master directory: %d\n", error);
518 goto fail;
519 }
520 sdp->sd_master_dir = inode;
521
522 error = init_journal(sdp, undo); 806 error = init_journal(sdp, undo);
523 if (error) 807 if (error)
524 goto fail_master; 808 goto fail;
525 809
526 /* Read in the master inode number inode */ 810 /* Read in the master inode number inode */
527 sdp->sd_inum_inode = gfs2_lookup_simple(sdp->sd_master_dir, "inum"); 811 sdp->sd_inum_inode = gfs2_lookup_simple(master, "inum");
528 if (IS_ERR(sdp->sd_inum_inode)) { 812 if (IS_ERR(sdp->sd_inum_inode)) {
529 error = PTR_ERR(sdp->sd_inum_inode); 813 error = PTR_ERR(sdp->sd_inum_inode);
530 fs_err(sdp, "can't read in inum inode: %d\n", error); 814 fs_err(sdp, "can't read in inum inode: %d\n", error);
@@ -533,7 +817,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
533 817
534 818
535 /* Read in the master statfs inode */ 819 /* Read in the master statfs inode */
536 sdp->sd_statfs_inode = gfs2_lookup_simple(sdp->sd_master_dir, "statfs"); 820 sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
537 if (IS_ERR(sdp->sd_statfs_inode)) { 821 if (IS_ERR(sdp->sd_statfs_inode)) {
538 error = PTR_ERR(sdp->sd_statfs_inode); 822 error = PTR_ERR(sdp->sd_statfs_inode);
539 fs_err(sdp, "can't read in statfs inode: %d\n", error); 823 fs_err(sdp, "can't read in statfs inode: %d\n", error);
@@ -541,7 +825,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
541 } 825 }
542 826
543 /* Read in the resource index inode */ 827 /* Read in the resource index inode */
544 sdp->sd_rindex = gfs2_lookup_simple(sdp->sd_master_dir, "rindex"); 828 sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
545 if (IS_ERR(sdp->sd_rindex)) { 829 if (IS_ERR(sdp->sd_rindex)) {
546 error = PTR_ERR(sdp->sd_rindex); 830 error = PTR_ERR(sdp->sd_rindex);
547 fs_err(sdp, "can't get resource index inode: %d\n", error); 831 fs_err(sdp, "can't get resource index inode: %d\n", error);
@@ -552,7 +836,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
552 sdp->sd_rindex_uptodate = 0; 836 sdp->sd_rindex_uptodate = 0;
553 837
554 /* Read in the quota inode */ 838 /* Read in the quota inode */
555 sdp->sd_quota_inode = gfs2_lookup_simple(sdp->sd_master_dir, "quota"); 839 sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
556 if (IS_ERR(sdp->sd_quota_inode)) { 840 if (IS_ERR(sdp->sd_quota_inode)) {
557 error = PTR_ERR(sdp->sd_quota_inode); 841 error = PTR_ERR(sdp->sd_quota_inode);
558 fs_err(sdp, "can't get quota file inode: %d\n", error); 842 fs_err(sdp, "can't get quota file inode: %d\n", error);
@@ -571,8 +855,6 @@ fail_inum:
571 iput(sdp->sd_inum_inode); 855 iput(sdp->sd_inum_inode);
572fail_journal: 856fail_journal:
573 init_journal(sdp, UNDO); 857 init_journal(sdp, UNDO);
574fail_master:
575 iput(sdp->sd_master_dir);
576fail: 858fail:
577 return error; 859 return error;
578} 860}
@@ -583,6 +865,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
583 char buf[30]; 865 char buf[30];
584 int error = 0; 866 int error = 0;
585 struct gfs2_inode *ip; 867 struct gfs2_inode *ip;
868 struct inode *master = sdp->sd_master_dir->d_inode;
586 869
587 if (sdp->sd_args.ar_spectator) 870 if (sdp->sd_args.ar_spectator)
588 return 0; 871 return 0;
@@ -590,7 +873,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
590 if (undo) 873 if (undo)
591 goto fail_qc_gh; 874 goto fail_qc_gh;
592 875
593 pn = gfs2_lookup_simple(sdp->sd_master_dir, "per_node"); 876 pn = gfs2_lookup_simple(master, "per_node");
594 if (IS_ERR(pn)) { 877 if (IS_ERR(pn)) {
595 error = PTR_ERR(pn); 878 error = PTR_ERR(pn);
596 fs_err(sdp, "can't find per_node directory: %d\n", error); 879 fs_err(sdp, "can't find per_node directory: %d\n", error);
@@ -800,7 +1083,11 @@ static int fill_super(struct super_block *sb, void *data, int silent)
800 goto fail; 1083 goto fail;
801 } 1084 }
802 1085
803 init_vfs(sb, SDF_NOATIME); 1086 sb->s_magic = GFS2_MAGIC;
1087 sb->s_op = &gfs2_super_ops;
1088 sb->s_export_op = &gfs2_export_ops;
1089 sb->s_time_gran = 1;
1090 sb->s_maxbytes = MAX_LFS_FILESIZE;
804 1091
805 /* Set up the buffer cache and fill in some fake block size values 1092 /* Set up the buffer cache and fill in some fake block size values
806 to allow us to read-in the on-disk superblock. */ 1093 to allow us to read-in the on-disk superblock. */
@@ -828,7 +1115,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
828 if (error) 1115 if (error)
829 goto fail_lm; 1116 goto fail_lm;
830 1117
831 error = init_sb(sdp, silent, DO); 1118 error = init_sb(sdp, silent);
832 if (error) 1119 if (error)
833 goto fail_locking; 1120 goto fail_locking;
834 1121
@@ -869,7 +1156,11 @@ fail_per_node:
869fail_inodes: 1156fail_inodes:
870 init_inodes(sdp, UNDO); 1157 init_inodes(sdp, UNDO);
871fail_sb: 1158fail_sb:
872 init_sb(sdp, 0, UNDO); 1159 if (sdp->sd_root_dir)
1160 dput(sdp->sd_root_dir);
1161 if (sdp->sd_master_dir)
1162 dput(sdp->sd_master_dir);
1163 sb->s_root = NULL;
873fail_locking: 1164fail_locking:
874 init_locking(sdp, &mount_gh, UNDO); 1165 init_locking(sdp, &mount_gh, UNDO);
875fail_lm: 1166fail_lm:
@@ -887,151 +1178,63 @@ fail:
887} 1178}
888 1179
889static int gfs2_get_sb(struct file_system_type *fs_type, int flags, 1180static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
890 const char *dev_name, void *data, struct vfsmount *mnt) 1181 const char *dev_name, void *data, struct vfsmount *mnt)
891{ 1182{
892 struct super_block *sb; 1183 return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt);
893 struct gfs2_sbd *sdp;
894 int error = get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt);
895 if (error)
896 goto out;
897 sb = mnt->mnt_sb;
898 sdp = sb->s_fs_info;
899 sdp->sd_gfs2mnt = mnt;
900out:
901 return error;
902} 1184}
903 1185
904static int fill_super_meta(struct super_block *sb, struct super_block *new, 1186static struct super_block *get_gfs2_sb(const char *dev_name)
905 void *data, int silent)
906{ 1187{
907 struct gfs2_sbd *sdp = sb->s_fs_info; 1188 struct super_block *sb;
908 struct inode *inode;
909 int error = 0;
910
911 new->s_fs_info = sdp;
912 sdp->sd_vfs_meta = sb;
913
914 init_vfs(new, SDF_NOATIME);
915
916 /* Get the master inode */
917 inode = igrab(sdp->sd_master_dir);
918
919 new->s_root = d_alloc_root(inode);
920 if (!new->s_root) {
921 fs_err(sdp, "can't get root dentry\n");
922 error = -ENOMEM;
923 iput(inode);
924 } else
925 new->s_root->d_op = &gfs2_dops;
926
927 return error;
928}
929
930static int set_bdev_super(struct super_block *s, void *data)
931{
932 s->s_bdev = data;
933 s->s_dev = s->s_bdev->bd_dev;
934 return 0;
935}
936
937static int test_bdev_super(struct super_block *s, void *data)
938{
939 return s->s_bdev == data;
940}
941
942static struct super_block* get_gfs2_sb(const char *dev_name)
943{
944 struct kstat stat;
945 struct nameidata nd; 1189 struct nameidata nd;
946 struct super_block *sb = NULL, *s;
947 int error; 1190 int error;
948 1191
949 error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); 1192 error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
950 if (error) { 1193 if (error) {
951 printk(KERN_WARNING "GFS2: path_lookup on %s returned error\n", 1194 printk(KERN_WARNING "GFS2: path_lookup on %s returned error %d\n",
952 dev_name); 1195 dev_name, error);
953 goto out; 1196 return NULL;
954 }
955 error = vfs_getattr(nd.path.mnt, nd.path.dentry, &stat);
956
957 list_for_each_entry(s, &gfs2_fs_type.fs_supers, s_instances) {
958 if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
959 (S_ISDIR(stat.mode) &&
960 s == nd.path.dentry->d_inode->i_sb)) {
961 sb = s;
962 goto free_nd;
963 }
964 } 1197 }
965 1198 sb = nd.path.dentry->d_inode->i_sb;
966 printk(KERN_WARNING "GFS2: Unrecognized block device or " 1199 if (sb && (sb->s_type == &gfs2_fs_type))
967 "mount point %s\n", dev_name); 1200 atomic_inc(&sb->s_active);
968 1201 else
969free_nd: 1202 sb = NULL;
970 path_put(&nd.path); 1203 path_put(&nd.path);
971out:
972 return sb; 1204 return sb;
973} 1205}
974 1206
975static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags, 1207static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
976 const char *dev_name, void *data, struct vfsmount *mnt) 1208 const char *dev_name, void *data, struct vfsmount *mnt)
977{ 1209{
978 int error = 0; 1210 struct super_block *sb = NULL;
979 struct super_block *sb = NULL, *new;
980 struct gfs2_sbd *sdp; 1211 struct gfs2_sbd *sdp;
981 1212
982 sb = get_gfs2_sb(dev_name); 1213 sb = get_gfs2_sb(dev_name);
983 if (!sb) { 1214 if (!sb) {
984 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); 1215 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
985 error = -ENOENT; 1216 return -ENOENT;
986 goto error;
987 } 1217 }
988 sdp = sb->s_fs_info; 1218 sdp = sb->s_fs_info;
989 if (sdp->sd_vfs_meta) { 1219 mnt->mnt_sb = sb;
990 printk(KERN_WARNING "GFS2: gfs2meta mount already exists\n"); 1220 mnt->mnt_root = dget(sdp->sd_master_dir);
991 error = -EBUSY; 1221 return 0;
992 goto error;
993 }
994 down(&sb->s_bdev->bd_mount_sem);
995 new = sget(fs_type, test_bdev_super, set_bdev_super, sb->s_bdev);
996 up(&sb->s_bdev->bd_mount_sem);
997 if (IS_ERR(new)) {
998 error = PTR_ERR(new);
999 goto error;
1000 }
1001 new->s_flags = flags;
1002 strlcpy(new->s_id, sb->s_id, sizeof(new->s_id));
1003 sb_set_blocksize(new, sb->s_blocksize);
1004 error = fill_super_meta(sb, new, data, flags & MS_SILENT ? 1 : 0);
1005 if (error) {
1006 up_write(&new->s_umount);
1007 deactivate_super(new);
1008 goto error;
1009 }
1010
1011 new->s_flags |= MS_ACTIVE;
1012
1013 /* Grab a reference to the gfs2 mount point */
1014 atomic_inc(&sdp->sd_gfs2mnt->mnt_count);
1015 return simple_set_mnt(mnt, new);
1016error:
1017 return error;
1018} 1222}
1019 1223
1020static void gfs2_kill_sb(struct super_block *sb) 1224static void gfs2_kill_sb(struct super_block *sb)
1021{ 1225{
1022 if (sb->s_fs_info) { 1226 struct gfs2_sbd *sdp = sb->s_fs_info;
1023 gfs2_delete_debugfs_file(sb->s_fs_info); 1227 if (sdp) {
1024 gfs2_meta_syncfs(sb->s_fs_info); 1228 gfs2_meta_syncfs(sdp);
1229 dput(sdp->sd_root_dir);
1230 dput(sdp->sd_master_dir);
1231 sdp->sd_root_dir = NULL;
1232 sdp->sd_master_dir = NULL;
1025 } 1233 }
1234 shrink_dcache_sb(sb);
1026 kill_block_super(sb); 1235 kill_block_super(sb);
1027} 1236 if (sdp)
1028 1237 gfs2_delete_debugfs_file(sdp);
1029static void gfs2_kill_sb_meta(struct super_block *sb)
1030{
1031 struct gfs2_sbd *sdp = sb->s_fs_info;
1032 generic_shutdown_super(sb);
1033 sdp->sd_vfs_meta = NULL;
1034 atomic_dec(&sdp->sd_gfs2mnt->mnt_count);
1035} 1238}
1036 1239
1037struct file_system_type gfs2_fs_type = { 1240struct file_system_type gfs2_fs_type = {
@@ -1046,7 +1249,6 @@ struct file_system_type gfs2meta_fs_type = {
1046 .name = "gfs2meta", 1249 .name = "gfs2meta",
1047 .fs_flags = FS_REQUIRES_DEV, 1250 .fs_flags = FS_REQUIRES_DEV,
1048 .get_sb = gfs2_get_sb_meta, 1251 .get_sb = gfs2_get_sb_meta,
1049 .kill_sb = gfs2_kill_sb_meta,
1050 .owner = THIS_MODULE, 1252 .owner = THIS_MODULE,
1051}; 1253};
1052 1254
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index e2c62f73a778..534e1e2c65ca 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -159,9 +159,13 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
159 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 159 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
160 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 160 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
161 161
162 error = gfs2_glock_nq_m(2, ghs); 162 error = gfs2_glock_nq(ghs); /* parent */
163 if (error) 163 if (error)
164 goto out; 164 goto out_parent;
165
166 error = gfs2_glock_nq(ghs + 1); /* child */
167 if (error)
168 goto out_child;
165 169
166 error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); 170 error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC);
167 if (error) 171 if (error)
@@ -245,8 +249,10 @@ out_alloc:
245 if (alloc_required) 249 if (alloc_required)
246 gfs2_alloc_put(dip); 250 gfs2_alloc_put(dip);
247out_gunlock: 251out_gunlock:
248 gfs2_glock_dq_m(2, ghs); 252 gfs2_glock_dq(ghs + 1);
249out: 253out_child:
254 gfs2_glock_dq(ghs);
255out_parent:
250 gfs2_holder_uninit(ghs); 256 gfs2_holder_uninit(ghs);
251 gfs2_holder_uninit(ghs + 1); 257 gfs2_holder_uninit(ghs + 1);
252 if (!error) { 258 if (!error) {
@@ -302,7 +308,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
302 308
303 error = gfs2_unlink_ok(dip, &dentry->d_name, ip); 309 error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
304 if (error) 310 if (error)
305 goto out_rgrp; 311 goto out_gunlock;
306 312
307 error = gfs2_trans_begin(sdp, 2*RES_DINODE + RES_LEAF + RES_RG_BIT, 0); 313 error = gfs2_trans_begin(sdp, 2*RES_DINODE + RES_LEAF + RES_RG_BIT, 0);
308 if (error) 314 if (error)
@@ -316,6 +322,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
316 322
317out_end_trans: 323out_end_trans:
318 gfs2_trans_end(sdp); 324 gfs2_trans_end(sdp);
325out_gunlock:
319 gfs2_glock_dq(ghs + 2); 326 gfs2_glock_dq(ghs + 2);
320out_rgrp: 327out_rgrp:
321 gfs2_holder_uninit(ghs + 2); 328 gfs2_holder_uninit(ghs + 2);
@@ -485,7 +492,6 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
485 struct gfs2_holder ri_gh; 492 struct gfs2_holder ri_gh;
486 int error; 493 int error;
487 494
488
489 error = gfs2_rindex_hold(sdp, &ri_gh); 495 error = gfs2_rindex_hold(sdp, &ri_gh);
490 if (error) 496 if (error)
491 return error; 497 return error;
@@ -495,9 +501,17 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
495 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); 501 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
496 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); 502 gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
497 503
498 error = gfs2_glock_nq_m(3, ghs); 504 error = gfs2_glock_nq(ghs); /* parent */
499 if (error) 505 if (error)
500 goto out; 506 goto out_parent;
507
508 error = gfs2_glock_nq(ghs + 1); /* child */
509 if (error)
510 goto out_child;
511
512 error = gfs2_glock_nq(ghs + 2); /* rgrp */
513 if (error)
514 goto out_rgrp;
501 515
502 error = gfs2_unlink_ok(dip, &dentry->d_name, ip); 516 error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
503 if (error) 517 if (error)
@@ -523,11 +537,15 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
523 gfs2_trans_end(sdp); 537 gfs2_trans_end(sdp);
524 538
525out_gunlock: 539out_gunlock:
526 gfs2_glock_dq_m(3, ghs); 540 gfs2_glock_dq(ghs + 2);
527out: 541out_rgrp:
528 gfs2_holder_uninit(ghs);
529 gfs2_holder_uninit(ghs + 1);
530 gfs2_holder_uninit(ghs + 2); 542 gfs2_holder_uninit(ghs + 2);
543 gfs2_glock_dq(ghs + 1);
544out_child:
545 gfs2_holder_uninit(ghs + 1);
546 gfs2_glock_dq(ghs);
547out_parent:
548 gfs2_holder_uninit(ghs);
531 gfs2_glock_dq_uninit(&ri_gh); 549 gfs2_glock_dq_uninit(&ri_gh);
532 return error; 550 return error;
533} 551}
@@ -571,6 +589,54 @@ static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
571 return 0; 589 return 0;
572} 590}
573 591
592/*
593 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
594 * @this: move this
595 * @to: to here
596 *
597 * Follow @to back to the root and make sure we don't encounter @this
598 * Assumes we already hold the rename lock.
599 *
600 * Returns: errno
601 */
602
603static int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
604{
605 struct inode *dir = &to->i_inode;
606 struct super_block *sb = dir->i_sb;
607 struct inode *tmp;
608 struct qstr dotdot;
609 int error = 0;
610
611 gfs2_str2qstr(&dotdot, "..");
612
613 igrab(dir);
614
615 for (;;) {
616 if (dir == &this->i_inode) {
617 error = -EINVAL;
618 break;
619 }
620 if (dir == sb->s_root->d_inode) {
621 error = 0;
622 break;
623 }
624
625 tmp = gfs2_lookupi(dir, &dotdot, 1);
626 if (IS_ERR(tmp)) {
627 error = PTR_ERR(tmp);
628 break;
629 }
630
631 iput(dir);
632 dir = tmp;
633 }
634
635 iput(dir);
636
637 return error;
638}
639
574/** 640/**
575 * gfs2_rename - Rename a file 641 * gfs2_rename - Rename a file
576 * @odir: Parent directory of old file name 642 * @odir: Parent directory of old file name
@@ -589,7 +655,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
589 struct gfs2_inode *ip = GFS2_I(odentry->d_inode); 655 struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
590 struct gfs2_inode *nip = NULL; 656 struct gfs2_inode *nip = NULL;
591 struct gfs2_sbd *sdp = GFS2_SB(odir); 657 struct gfs2_sbd *sdp = GFS2_SB(odir);
592 struct gfs2_holder ghs[5], r_gh; 658 struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
593 struct gfs2_rgrpd *nrgd; 659 struct gfs2_rgrpd *nrgd;
594 unsigned int num_gh; 660 unsigned int num_gh;
595 int dir_rename = 0; 661 int dir_rename = 0;
@@ -603,19 +669,20 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
603 return 0; 669 return 0;
604 } 670 }
605 671
606 /* Make sure we aren't trying to move a dirctory into it's subdir */
607
608 if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) {
609 dir_rename = 1;
610 672
611 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0, 673 if (odip != ndip) {
612 &r_gh); 674 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
675 0, &r_gh);
613 if (error) 676 if (error)
614 goto out; 677 goto out;
615 678
616 error = gfs2_ok_to_move(ip, ndip); 679 if (S_ISDIR(ip->i_inode.i_mode)) {
617 if (error) 680 dir_rename = 1;
618 goto out_gunlock_r; 681 /* don't move a dirctory into it's subdir */
682 error = gfs2_ok_to_move(ip, ndip);
683 if (error)
684 goto out_gunlock_r;
685 }
619 } 686 }
620 687
621 num_gh = 1; 688 num_gh = 1;
@@ -639,9 +706,11 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
639 gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++); 706 gfs2_holder_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh++);
640 } 707 }
641 708
642 error = gfs2_glock_nq_m(num_gh, ghs); 709 for (x = 0; x < num_gh; x++) {
643 if (error) 710 error = gfs2_glock_nq(ghs + x);
644 goto out_uninit; 711 if (error)
712 goto out_gunlock;
713 }
645 714
646 /* Check out the old directory */ 715 /* Check out the old directory */
647 716
@@ -804,12 +873,12 @@ out_alloc:
804 if (alloc_required) 873 if (alloc_required)
805 gfs2_alloc_put(ndip); 874 gfs2_alloc_put(ndip);
806out_gunlock: 875out_gunlock:
807 gfs2_glock_dq_m(num_gh, ghs); 876 while (x--) {
808out_uninit: 877 gfs2_glock_dq(ghs + x);
809 for (x = 0; x < num_gh; x++)
810 gfs2_holder_uninit(ghs + x); 878 gfs2_holder_uninit(ghs + x);
879 }
811out_gunlock_r: 880out_gunlock_r:
812 if (dir_rename) 881 if (r_gh.gh_gl)
813 gfs2_glock_dq_uninit(&r_gh); 882 gfs2_glock_dq_uninit(&r_gh);
814out: 883out:
815 return error; 884 return error;
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index f66ea0f7a356..d5355d9b5926 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -20,6 +20,7 @@
20#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
21#include <linux/crc32.h> 21#include <linux/crc32.h>
22#include <linux/lm_interface.h> 22#include <linux/lm_interface.h>
23#include <linux/time.h>
23 24
24#include "gfs2.h" 25#include "gfs2.h"
25#include "incore.h" 26#include "incore.h"
@@ -38,6 +39,7 @@
38#include "dir.h" 39#include "dir.h"
39#include "eattr.h" 40#include "eattr.h"
40#include "bmap.h" 41#include "bmap.h"
42#include "meta_io.h"
41 43
42/** 44/**
43 * gfs2_write_inode - Make sure the inode is stable on the disk 45 * gfs2_write_inode - Make sure the inode is stable on the disk
@@ -50,16 +52,74 @@
50static int gfs2_write_inode(struct inode *inode, int sync) 52static int gfs2_write_inode(struct inode *inode, int sync)
51{ 53{
52 struct gfs2_inode *ip = GFS2_I(inode); 54 struct gfs2_inode *ip = GFS2_I(inode);
53 55 struct gfs2_sbd *sdp = GFS2_SB(inode);
54 /* Check this is a "normal" inode */ 56 struct gfs2_holder gh;
55 if (test_bit(GIF_USER, &ip->i_flags)) { 57 struct buffer_head *bh;
56 if (current->flags & PF_MEMALLOC) 58 struct timespec atime;
57 return 0; 59 struct gfs2_dinode *di;
58 if (sync) 60 int ret = 0;
59 gfs2_log_flush(GFS2_SB(inode), ip->i_gl); 61
62 /* Check this is a "normal" inode, etc */
63 if (!test_bit(GIF_USER, &ip->i_flags) ||
64 (current->flags & PF_MEMALLOC))
65 return 0;
66 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
67 if (ret)
68 goto do_flush;
69 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
70 if (ret)
71 goto do_unlock;
72 ret = gfs2_meta_inode_buffer(ip, &bh);
73 if (ret == 0) {
74 di = (struct gfs2_dinode *)bh->b_data;
75 atime.tv_sec = be64_to_cpu(di->di_atime);
76 atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
77 if (timespec_compare(&inode->i_atime, &atime) > 0) {
78 gfs2_trans_add_bh(ip->i_gl, bh, 1);
79 gfs2_dinode_out(ip, bh->b_data);
80 }
81 brelse(bh);
60 } 82 }
83 gfs2_trans_end(sdp);
84do_unlock:
85 gfs2_glock_dq_uninit(&gh);
86do_flush:
87 if (sync != 0)
88 gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
89 return ret;
90}
61 91
62 return 0; 92/**
93 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
94 * @sdp: the filesystem
95 *
96 * Returns: errno
97 */
98
99static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
100{
101 struct gfs2_holder t_gh;
102 int error;
103
104 gfs2_quota_sync(sdp);
105 gfs2_statfs_sync(sdp);
106
107 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
108 &t_gh);
109 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
110 return error;
111
112 gfs2_meta_syncfs(sdp);
113 gfs2_log_shutdown(sdp);
114
115 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
116
117 if (t_gh.gh_gl)
118 gfs2_glock_dq_uninit(&t_gh);
119
120 gfs2_quota_cleanup(sdp);
121
122 return error;
63} 123}
64 124
65/** 125/**
@@ -73,12 +133,6 @@ static void gfs2_put_super(struct super_block *sb)
73 struct gfs2_sbd *sdp = sb->s_fs_info; 133 struct gfs2_sbd *sdp = sb->s_fs_info;
74 int error; 134 int error;
75 135
76 if (!sdp)
77 return;
78
79 if (!strncmp(sb->s_type->name, "gfs2meta", 8))
80 return; /* Nothing to do */
81
82 /* Unfreeze the filesystem, if we need to */ 136 /* Unfreeze the filesystem, if we need to */
83 137
84 mutex_lock(&sdp->sd_freeze_lock); 138 mutex_lock(&sdp->sd_freeze_lock);
@@ -101,7 +155,6 @@ static void gfs2_put_super(struct super_block *sb)
101 155
102 /* Release stuff */ 156 /* Release stuff */
103 157
104 iput(sdp->sd_master_dir);
105 iput(sdp->sd_jindex); 158 iput(sdp->sd_jindex);
106 iput(sdp->sd_inum_inode); 159 iput(sdp->sd_inum_inode);
107 iput(sdp->sd_statfs_inode); 160 iput(sdp->sd_statfs_inode);
@@ -152,6 +205,7 @@ static void gfs2_write_super(struct super_block *sb)
152 * 205 *
153 * Flushes the log to disk. 206 * Flushes the log to disk.
154 */ 207 */
208
155static int gfs2_sync_fs(struct super_block *sb, int wait) 209static int gfs2_sync_fs(struct super_block *sb, int wait)
156{ 210{
157 sb->s_dirt = 0; 211 sb->s_dirt = 0;
@@ -270,14 +324,6 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
270 } 324 }
271 } 325 }
272 326
273 if (*flags & (MS_NOATIME | MS_NODIRATIME))
274 set_bit(SDF_NOATIME, &sdp->sd_flags);
275 else
276 clear_bit(SDF_NOATIME, &sdp->sd_flags);
277
278 /* Don't let the VFS update atimes. GFS2 handles this itself. */
279 *flags |= MS_NOATIME | MS_NODIRATIME;
280
281 return error; 327 return error;
282} 328}
283 329
@@ -295,6 +341,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
295 * inode's blocks, or alternatively pass the baton on to another 341 * inode's blocks, or alternatively pass the baton on to another
296 * node for later deallocation. 342 * node for later deallocation.
297 */ 343 */
344
298static void gfs2_drop_inode(struct inode *inode) 345static void gfs2_drop_inode(struct inode *inode)
299{ 346{
300 struct gfs2_inode *ip = GFS2_I(inode); 347 struct gfs2_inode *ip = GFS2_I(inode);
@@ -333,6 +380,16 @@ static void gfs2_clear_inode(struct inode *inode)
333 } 380 }
334} 381}
335 382
383static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
384{
385 do {
386 if (d1 == d2)
387 return 1;
388 d1 = d1->d_parent;
389 } while (!IS_ROOT(d1));
390 return 0;
391}
392
336/** 393/**
337 * gfs2_show_options - Show mount options for /proc/mounts 394 * gfs2_show_options - Show mount options for /proc/mounts
338 * @s: seq_file structure 395 * @s: seq_file structure
@@ -346,6 +403,8 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
346 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; 403 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
347 struct gfs2_args *args = &sdp->sd_args; 404 struct gfs2_args *args = &sdp->sd_args;
348 405
406 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
407 seq_printf(s, ",meta");
349 if (args->ar_lockproto[0]) 408 if (args->ar_lockproto[0])
350 seq_printf(s, ",lockproto=%s", args->ar_lockproto); 409 seq_printf(s, ",lockproto=%s", args->ar_lockproto);
351 if (args->ar_locktable[0]) 410 if (args->ar_locktable[0])
@@ -414,6 +473,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
414 * conversion on the iopen lock, but we can change that later. This 473 * conversion on the iopen lock, but we can change that later. This
415 * is safe, just less efficient. 474 * is safe, just less efficient.
416 */ 475 */
476
417static void gfs2_delete_inode(struct inode *inode) 477static void gfs2_delete_inode(struct inode *inode)
418{ 478{
419 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 479 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
@@ -478,8 +538,6 @@ out:
478 clear_inode(inode); 538 clear_inode(inode);
479} 539}
480 540
481
482
483static struct inode *gfs2_alloc_inode(struct super_block *sb) 541static struct inode *gfs2_alloc_inode(struct super_block *sb)
484{ 542{
485 struct gfs2_inode *ip; 543 struct gfs2_inode *ip;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ca831991cbc2..c3ba3d9d0aac 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -33,313 +33,6 @@
33#include "trans.h" 33#include "trans.h"
34#include "util.h" 34#include "util.h"
35 35
36static const u32 gfs2_old_fs_formats[] = {
37 0
38};
39
40static const u32 gfs2_old_multihost_formats[] = {
41 0
42};
43
44/**
45 * gfs2_tune_init - Fill a gfs2_tune structure with default values
46 * @gt: tune
47 *
48 */
49
50void gfs2_tune_init(struct gfs2_tune *gt)
51{
52 spin_lock_init(&gt->gt_spin);
53
54 gt->gt_demote_secs = 300;
55 gt->gt_incore_log_blocks = 1024;
56 gt->gt_log_flush_secs = 60;
57 gt->gt_recoverd_secs = 60;
58 gt->gt_logd_secs = 1;
59 gt->gt_quotad_secs = 5;
60 gt->gt_quota_simul_sync = 64;
61 gt->gt_quota_warn_period = 10;
62 gt->gt_quota_scale_num = 1;
63 gt->gt_quota_scale_den = 1;
64 gt->gt_quota_cache_secs = 300;
65 gt->gt_quota_quantum = 60;
66 gt->gt_atime_quantum = 3600;
67 gt->gt_new_files_jdata = 0;
68 gt->gt_max_readahead = 1 << 18;
69 gt->gt_stall_secs = 600;
70 gt->gt_complain_secs = 10;
71 gt->gt_statfs_quantum = 30;
72 gt->gt_statfs_slow = 0;
73}
74
75/**
76 * gfs2_check_sb - Check superblock
77 * @sdp: the filesystem
78 * @sb: The superblock
79 * @silent: Don't print a message if the check fails
80 *
81 * Checks the version code of the FS is one that we understand how to
82 * read and that the sizes of the various on-disk structures have not
83 * changed.
84 */
85
86int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
87{
88 unsigned int x;
89
90 if (sb->sb_magic != GFS2_MAGIC ||
91 sb->sb_type != GFS2_METATYPE_SB) {
92 if (!silent)
93 printk(KERN_WARNING "GFS2: not a GFS2 filesystem\n");
94 return -EINVAL;
95 }
96
97 /* If format numbers match exactly, we're done. */
98
99 if (sb->sb_fs_format == GFS2_FORMAT_FS &&
100 sb->sb_multihost_format == GFS2_FORMAT_MULTI)
101 return 0;
102
103 if (sb->sb_fs_format != GFS2_FORMAT_FS) {
104 for (x = 0; gfs2_old_fs_formats[x]; x++)
105 if (gfs2_old_fs_formats[x] == sb->sb_fs_format)
106 break;
107
108 if (!gfs2_old_fs_formats[x]) {
109 printk(KERN_WARNING
110 "GFS2: code version (%u, %u) is incompatible "
111 "with ondisk format (%u, %u)\n",
112 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
113 sb->sb_fs_format, sb->sb_multihost_format);
114 printk(KERN_WARNING
115 "GFS2: I don't know how to upgrade this FS\n");
116 return -EINVAL;
117 }
118 }
119
120 if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
121 for (x = 0; gfs2_old_multihost_formats[x]; x++)
122 if (gfs2_old_multihost_formats[x] ==
123 sb->sb_multihost_format)
124 break;
125
126 if (!gfs2_old_multihost_formats[x]) {
127 printk(KERN_WARNING
128 "GFS2: code version (%u, %u) is incompatible "
129 "with ondisk format (%u, %u)\n",
130 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
131 sb->sb_fs_format, sb->sb_multihost_format);
132 printk(KERN_WARNING
133 "GFS2: I don't know how to upgrade this FS\n");
134 return -EINVAL;
135 }
136 }
137
138 if (!sdp->sd_args.ar_upgrade) {
139 printk(KERN_WARNING
140 "GFS2: code version (%u, %u) is incompatible "
141 "with ondisk format (%u, %u)\n",
142 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
143 sb->sb_fs_format, sb->sb_multihost_format);
144 printk(KERN_INFO
145 "GFS2: Use the \"upgrade\" mount option to upgrade "
146 "the FS\n");
147 printk(KERN_INFO "GFS2: See the manual for more details\n");
148 return -EINVAL;
149 }
150
151 return 0;
152}
153
154
155static void end_bio_io_page(struct bio *bio, int error)
156{
157 struct page *page = bio->bi_private;
158
159 if (!error)
160 SetPageUptodate(page);
161 else
162 printk(KERN_WARNING "gfs2: error %d reading superblock\n", error);
163 unlock_page(page);
164}
165
166static void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
167{
168 const struct gfs2_sb *str = buf;
169
170 sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
171 sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
172 sb->sb_format = be32_to_cpu(str->sb_header.mh_format);
173 sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
174 sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
175 sb->sb_bsize = be32_to_cpu(str->sb_bsize);
176 sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
177 sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
178 sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
179 sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
180 sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
181
182 memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
183 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
184}
185
186/**
187 * gfs2_read_super - Read the gfs2 super block from disk
188 * @sdp: The GFS2 super block
189 * @sector: The location of the super block
190 * @error: The error code to return
191 *
192 * This uses the bio functions to read the super block from disk
193 * because we want to be 100% sure that we never read cached data.
194 * A super block is read twice only during each GFS2 mount and is
195 * never written to by the filesystem. The first time its read no
196 * locks are held, and the only details which are looked at are those
197 * relating to the locking protocol. Once locking is up and working,
198 * the sb is read again under the lock to establish the location of
199 * the master directory (contains pointers to journals etc) and the
200 * root directory.
201 *
202 * Returns: 0 on success or error
203 */
204
205int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
206{
207 struct super_block *sb = sdp->sd_vfs;
208 struct gfs2_sb *p;
209 struct page *page;
210 struct bio *bio;
211
212 page = alloc_page(GFP_NOFS);
213 if (unlikely(!page))
214 return -ENOBUFS;
215
216 ClearPageUptodate(page);
217 ClearPageDirty(page);
218 lock_page(page);
219
220 bio = bio_alloc(GFP_NOFS, 1);
221 if (unlikely(!bio)) {
222 __free_page(page);
223 return -ENOBUFS;
224 }
225
226 bio->bi_sector = sector * (sb->s_blocksize >> 9);
227 bio->bi_bdev = sb->s_bdev;
228 bio_add_page(bio, page, PAGE_SIZE, 0);
229
230 bio->bi_end_io = end_bio_io_page;
231 bio->bi_private = page;
232 submit_bio(READ_SYNC | (1 << BIO_RW_META), bio);
233 wait_on_page_locked(page);
234 bio_put(bio);
235 if (!PageUptodate(page)) {
236 __free_page(page);
237 return -EIO;
238 }
239 p = kmap(page);
240 gfs2_sb_in(&sdp->sd_sb, p);
241 kunmap(page);
242 __free_page(page);
243 return 0;
244}
245
246/**
247 * gfs2_read_sb - Read super block
248 * @sdp: The GFS2 superblock
249 * @gl: the glock for the superblock (assumed to be held)
250 * @silent: Don't print message if mount fails
251 *
252 */
253
254int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent)
255{
256 u32 hash_blocks, ind_blocks, leaf_blocks;
257 u32 tmp_blocks;
258 unsigned int x;
259 int error;
260
261 error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
262 if (error) {
263 if (!silent)
264 fs_err(sdp, "can't read superblock\n");
265 return error;
266 }
267
268 error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
269 if (error)
270 return error;
271
272 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
273 GFS2_BASIC_BLOCK_SHIFT;
274 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
275 sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
276 sizeof(struct gfs2_dinode)) / sizeof(u64);
277 sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
278 sizeof(struct gfs2_meta_header)) / sizeof(u64);
279 sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
280 sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
281 sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
282 sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
283 sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
284 sizeof(struct gfs2_meta_header)) /
285 sizeof(struct gfs2_quota_change);
286
287 /* Compute maximum reservation required to add a entry to a directory */
288
289 hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH),
290 sdp->sd_jbsize);
291
292 ind_blocks = 0;
293 for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
294 tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
295 ind_blocks += tmp_blocks;
296 }
297
298 leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
299
300 sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
301
302 sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
303 sizeof(struct gfs2_dinode);
304 sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
305 for (x = 2;; x++) {
306 u64 space, d;
307 u32 m;
308
309 space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
310 d = space;
311 m = do_div(d, sdp->sd_inptrs);
312
313 if (d != sdp->sd_heightsize[x - 1] || m)
314 break;
315 sdp->sd_heightsize[x] = space;
316 }
317 sdp->sd_max_height = x;
318 sdp->sd_heightsize[x] = ~0;
319 gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
320
321 sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
322 sizeof(struct gfs2_dinode);
323 sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
324 for (x = 2;; x++) {
325 u64 space, d;
326 u32 m;
327
328 space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
329 d = space;
330 m = do_div(d, sdp->sd_inptrs);
331
332 if (d != sdp->sd_jheightsize[x - 1] || m)
333 break;
334 sdp->sd_jheightsize[x] = space;
335 }
336 sdp->sd_max_jheight = x;
337 sdp->sd_jheightsize[x] = ~0;
338 gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
339
340 return 0;
341}
342
343/** 36/**
344 * gfs2_jindex_hold - Grab a lock on the jindex 37 * gfs2_jindex_hold - Grab a lock on the jindex
345 * @sdp: The GFS2 superblock 38 * @sdp: The GFS2 superblock
@@ -581,39 +274,6 @@ fail:
581 return error; 274 return error;
582} 275}
583 276
584/**
585 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
586 * @sdp: the filesystem
587 *
588 * Returns: errno
589 */
590
591int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
592{
593 struct gfs2_holder t_gh;
594 int error;
595
596 gfs2_quota_sync(sdp);
597 gfs2_statfs_sync(sdp);
598
599 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
600 &t_gh);
601 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
602 return error;
603
604 gfs2_meta_syncfs(sdp);
605 gfs2_log_shutdown(sdp);
606
607 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
608
609 if (t_gh.gh_gl)
610 gfs2_glock_dq_uninit(&t_gh);
611
612 gfs2_quota_cleanup(sdp);
613
614 return error;
615}
616
617static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) 277static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
618{ 278{
619 const struct gfs2_statfs_change *str = buf; 279 const struct gfs2_statfs_change *str = buf;
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 44361ecc44f7..50a4c9b1215e 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -12,11 +12,6 @@
12 12
13#include "incore.h" 13#include "incore.h"
14 14
15void gfs2_tune_init(struct gfs2_tune *gt);
16
17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent);
18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
19int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector);
20void gfs2_lm_unmount(struct gfs2_sbd *sdp); 15void gfs2_lm_unmount(struct gfs2_sbd *sdp);
21 16
22static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) 17static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
@@ -40,7 +35,6 @@ int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
40 struct gfs2_inode **ipp); 35 struct gfs2_inode **ipp);
41 36
42int gfs2_make_fs_rw(struct gfs2_sbd *sdp); 37int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
43int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
44 38
45int gfs2_statfs_init(struct gfs2_sbd *sdp); 39int gfs2_statfs_init(struct gfs2_sbd *sdp);
46void gfs2_statfs_change(struct gfs2_sbd *sdp, 40void gfs2_statfs_change(struct gfs2_sbd *sdp,
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 74846559fc3f..7e1879f1a02c 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -269,14 +269,6 @@ ARGS_ATTR(quota, "%u\n");
269ARGS_ATTR(suiddir, "%d\n"); 269ARGS_ATTR(suiddir, "%d\n");
270ARGS_ATTR(data, "%d\n"); 270ARGS_ATTR(data, "%d\n");
271 271
272/* one oddball doesn't fit the macro mold */
273static ssize_t noatime_show(struct gfs2_sbd *sdp, char *buf)
274{
275 return snprintf(buf, PAGE_SIZE, "%d\n",
276 !!test_bit(SDF_NOATIME, &sdp->sd_flags));
277}
278static struct args_attr args_attr_noatime = __ATTR_RO(noatime);
279
280static struct attribute *args_attrs[] = { 272static struct attribute *args_attrs[] = {
281 &args_attr_lockproto.attr, 273 &args_attr_lockproto.attr,
282 &args_attr_locktable.attr, 274 &args_attr_locktable.attr,
@@ -292,7 +284,6 @@ static struct attribute *args_attrs[] = {
292 &args_attr_quota.attr, 284 &args_attr_quota.attr,
293 &args_attr_suiddir.attr, 285 &args_attr_suiddir.attr,
294 &args_attr_data.attr, 286 &args_attr_data.attr,
295 &args_attr_noatime.attr,
296 NULL, 287 NULL,
297}; 288};
298 289
@@ -407,7 +398,6 @@ TUNE_ATTR(incore_log_blocks, 0);
407TUNE_ATTR(log_flush_secs, 0); 398TUNE_ATTR(log_flush_secs, 0);
408TUNE_ATTR(quota_warn_period, 0); 399TUNE_ATTR(quota_warn_period, 0);
409TUNE_ATTR(quota_quantum, 0); 400TUNE_ATTR(quota_quantum, 0);
410TUNE_ATTR(atime_quantum, 0);
411TUNE_ATTR(max_readahead, 0); 401TUNE_ATTR(max_readahead, 0);
412TUNE_ATTR(complain_secs, 0); 402TUNE_ATTR(complain_secs, 0);
413TUNE_ATTR(statfs_slow, 0); 403TUNE_ATTR(statfs_slow, 0);
@@ -427,7 +417,6 @@ static struct attribute *tune_attrs[] = {
427 &tune_attr_log_flush_secs.attr, 417 &tune_attr_log_flush_secs.attr,
428 &tune_attr_quota_warn_period.attr, 418 &tune_attr_quota_warn_period.attr,
429 &tune_attr_quota_quantum.attr, 419 &tune_attr_quota_quantum.attr,
430 &tune_attr_atime_quantum.attr,
431 &tune_attr_max_readahead.attr, 420 &tune_attr_max_readahead.attr,
432 &tune_attr_complain_secs.attr, 421 &tune_attr_complain_secs.attr,
433 &tune_attr_statfs_slow.attr, 422 &tune_attr_statfs_slow.attr,
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index ecc3330972e5..7408227c49c9 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -120,22 +120,21 @@ static int (*check_part[])(struct parsed_partitions *, struct block_device *) =
120 * a pointer to that same buffer (for convenience). 120 * a pointer to that same buffer (for convenience).
121 */ 121 */
122 122
123char *disk_name(struct gendisk *hd, int part, char *buf) 123char *disk_name(struct gendisk *hd, int partno, char *buf)
124{ 124{
125 if (!part) 125 if (!partno)
126 snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name); 126 snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
127 else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) 127 else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
128 snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, part); 128 snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
129 else 129 else
130 snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, part); 130 snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
131 131
132 return buf; 132 return buf;
133} 133}
134 134
135const char *bdevname(struct block_device *bdev, char *buf) 135const char *bdevname(struct block_device *bdev, char *buf)
136{ 136{
137 int part = MINOR(bdev->bd_dev) - bdev->bd_disk->first_minor; 137 return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf);
138 return disk_name(bdev->bd_disk, part, buf);
139} 138}
140 139
141EXPORT_SYMBOL(bdevname); 140EXPORT_SYMBOL(bdevname);
@@ -169,7 +168,7 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
169 if (isdigit(state->name[strlen(state->name)-1])) 168 if (isdigit(state->name[strlen(state->name)-1]))
170 sprintf(state->name, "p"); 169 sprintf(state->name, "p");
171 170
172 state->limit = hd->minors; 171 state->limit = disk_max_parts(hd);
173 i = res = err = 0; 172 i = res = err = 0;
174 while (!res && check_part[i]) { 173 while (!res && check_part[i]) {
175 memset(&state->parts, 0, sizeof(state->parts)); 174 memset(&state->parts, 0, sizeof(state->parts));
@@ -204,21 +203,22 @@ static ssize_t part_start_show(struct device *dev,
204 return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect); 203 return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
205} 204}
206 205
207static ssize_t part_size_show(struct device *dev, 206ssize_t part_size_show(struct device *dev,
208 struct device_attribute *attr, char *buf) 207 struct device_attribute *attr, char *buf)
209{ 208{
210 struct hd_struct *p = dev_to_part(dev); 209 struct hd_struct *p = dev_to_part(dev);
211 return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); 210 return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
212} 211}
213 212
214static ssize_t part_stat_show(struct device *dev, 213ssize_t part_stat_show(struct device *dev,
215 struct device_attribute *attr, char *buf) 214 struct device_attribute *attr, char *buf)
216{ 215{
217 struct hd_struct *p = dev_to_part(dev); 216 struct hd_struct *p = dev_to_part(dev);
217 int cpu;
218 218
219 preempt_disable(); 219 cpu = part_stat_lock();
220 part_round_stats(p); 220 part_round_stats(cpu, p);
221 preempt_enable(); 221 part_stat_unlock();
222 return sprintf(buf, 222 return sprintf(buf,
223 "%8lu %8lu %8llu %8u " 223 "%8lu %8lu %8llu %8u "
224 "%8lu %8lu %8llu %8u " 224 "%8lu %8lu %8llu %8u "
@@ -238,17 +238,17 @@ static ssize_t part_stat_show(struct device *dev,
238} 238}
239 239
240#ifdef CONFIG_FAIL_MAKE_REQUEST 240#ifdef CONFIG_FAIL_MAKE_REQUEST
241static ssize_t part_fail_show(struct device *dev, 241ssize_t part_fail_show(struct device *dev,
242 struct device_attribute *attr, char *buf) 242 struct device_attribute *attr, char *buf)
243{ 243{
244 struct hd_struct *p = dev_to_part(dev); 244 struct hd_struct *p = dev_to_part(dev);
245 245
246 return sprintf(buf, "%d\n", p->make_it_fail); 246 return sprintf(buf, "%d\n", p->make_it_fail);
247} 247}
248 248
249static ssize_t part_fail_store(struct device *dev, 249ssize_t part_fail_store(struct device *dev,
250 struct device_attribute *attr, 250 struct device_attribute *attr,
251 const char *buf, size_t count) 251 const char *buf, size_t count)
252{ 252{
253 struct hd_struct *p = dev_to_part(dev); 253 struct hd_struct *p = dev_to_part(dev);
254 int i; 254 int i;
@@ -300,40 +300,34 @@ struct device_type part_type = {
300 .release = part_release, 300 .release = part_release,
301}; 301};
302 302
303static inline void partition_sysfs_add_subdir(struct hd_struct *p) 303static void delete_partition_rcu_cb(struct rcu_head *head)
304{
305 struct kobject *k;
306
307 k = kobject_get(&p->dev.kobj);
308 p->holder_dir = kobject_create_and_add("holders", k);
309 kobject_put(k);
310}
311
312static inline void disk_sysfs_add_subdirs(struct gendisk *disk)
313{ 304{
314 struct kobject *k; 305 struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
315 306
316 k = kobject_get(&disk->dev.kobj); 307 part->start_sect = 0;
317 disk->holder_dir = kobject_create_and_add("holders", k); 308 part->nr_sects = 0;
318 disk->slave_dir = kobject_create_and_add("slaves", k); 309 part_stat_set_all(part, 0);
319 kobject_put(k); 310 put_device(part_to_dev(part));
320} 311}
321 312
322void delete_partition(struct gendisk *disk, int part) 313void delete_partition(struct gendisk *disk, int partno)
323{ 314{
324 struct hd_struct *p = disk->part[part-1]; 315 struct disk_part_tbl *ptbl = disk->part_tbl;
316 struct hd_struct *part;
325 317
326 if (!p) 318 if (partno >= ptbl->len)
327 return; 319 return;
328 if (!p->nr_sects) 320
321 part = ptbl->part[partno];
322 if (!part)
329 return; 323 return;
330 disk->part[part-1] = NULL; 324
331 p->start_sect = 0; 325 blk_free_devt(part_devt(part));
332 p->nr_sects = 0; 326 rcu_assign_pointer(ptbl->part[partno], NULL);
333 part_stat_set_all(p, 0); 327 kobject_put(part->holder_dir);
334 kobject_put(p->holder_dir); 328 device_del(part_to_dev(part));
335 device_del(&p->dev); 329
336 put_device(&p->dev); 330 call_rcu(&part->rcu_head, delete_partition_rcu_cb);
337} 331}
338 332
339static ssize_t whole_disk_show(struct device *dev, 333static ssize_t whole_disk_show(struct device *dev,
@@ -344,102 +338,132 @@ static ssize_t whole_disk_show(struct device *dev,
344static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, 338static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
345 whole_disk_show, NULL); 339 whole_disk_show, NULL);
346 340
347int add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags) 341int add_partition(struct gendisk *disk, int partno,
342 sector_t start, sector_t len, int flags)
348{ 343{
349 struct hd_struct *p; 344 struct hd_struct *p;
345 dev_t devt = MKDEV(0, 0);
346 struct device *ddev = disk_to_dev(disk);
347 struct device *pdev;
348 struct disk_part_tbl *ptbl;
349 const char *dname;
350 int err; 350 int err;
351 351
352 err = disk_expand_part_tbl(disk, partno);
353 if (err)
354 return err;
355 ptbl = disk->part_tbl;
356
357 if (ptbl->part[partno])
358 return -EBUSY;
359
352 p = kzalloc(sizeof(*p), GFP_KERNEL); 360 p = kzalloc(sizeof(*p), GFP_KERNEL);
353 if (!p) 361 if (!p)
354 return -ENOMEM; 362 return -ENOMEM;
355 363
356 if (!init_part_stats(p)) { 364 if (!init_part_stats(p)) {
357 err = -ENOMEM; 365 err = -ENOMEM;
358 goto out0; 366 goto out_free;
359 } 367 }
368 pdev = part_to_dev(p);
369
360 p->start_sect = start; 370 p->start_sect = start;
361 p->nr_sects = len; 371 p->nr_sects = len;
362 p->partno = part; 372 p->partno = partno;
363 p->policy = disk->policy; 373 p->policy = get_disk_ro(disk);
364 374
365 if (isdigit(disk->dev.bus_id[strlen(disk->dev.bus_id)-1])) 375 dname = dev_name(ddev);
366 snprintf(p->dev.bus_id, BUS_ID_SIZE, 376 if (isdigit(dname[strlen(dname) - 1]))
367 "%sp%d", disk->dev.bus_id, part); 377 snprintf(pdev->bus_id, BUS_ID_SIZE, "%sp%d", dname, partno);
368 else 378 else
369 snprintf(p->dev.bus_id, BUS_ID_SIZE, 379 snprintf(pdev->bus_id, BUS_ID_SIZE, "%s%d", dname, partno);
370 "%s%d", disk->dev.bus_id, part);
371 380
372 device_initialize(&p->dev); 381 device_initialize(pdev);
373 p->dev.devt = MKDEV(disk->major, disk->first_minor + part); 382 pdev->class = &block_class;
374 p->dev.class = &block_class; 383 pdev->type = &part_type;
375 p->dev.type = &part_type; 384 pdev->parent = ddev;
376 p->dev.parent = &disk->dev; 385
377 disk->part[part-1] = p; 386 err = blk_alloc_devt(p, &devt);
387 if (err)
388 goto out_free;
389 pdev->devt = devt;
378 390
379 /* delay uevent until 'holders' subdir is created */ 391 /* delay uevent until 'holders' subdir is created */
380 p->dev.uevent_suppress = 1; 392 pdev->uevent_suppress = 1;
381 err = device_add(&p->dev); 393 err = device_add(pdev);
382 if (err) 394 if (err)
383 goto out1; 395 goto out_put;
384 partition_sysfs_add_subdir(p); 396
385 p->dev.uevent_suppress = 0; 397 err = -ENOMEM;
398 p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
399 if (!p->holder_dir)
400 goto out_del;
401
402 pdev->uevent_suppress = 0;
386 if (flags & ADDPART_FLAG_WHOLEDISK) { 403 if (flags & ADDPART_FLAG_WHOLEDISK) {
387 err = device_create_file(&p->dev, &dev_attr_whole_disk); 404 err = device_create_file(pdev, &dev_attr_whole_disk);
388 if (err) 405 if (err)
389 goto out2; 406 goto out_del;
390 } 407 }
391 408
409 /* everything is up and running, commence */
410 INIT_RCU_HEAD(&p->rcu_head);
411 rcu_assign_pointer(ptbl->part[partno], p);
412
392 /* suppress uevent if the disk supresses it */ 413 /* suppress uevent if the disk supresses it */
393 if (!disk->dev.uevent_suppress) 414 if (!ddev->uevent_suppress)
394 kobject_uevent(&p->dev.kobj, KOBJ_ADD); 415 kobject_uevent(&pdev->kobj, KOBJ_ADD);
395 416
396 return 0; 417 return 0;
397 418
398out2: 419out_free:
399 device_del(&p->dev);
400out1:
401 put_device(&p->dev);
402 free_part_stats(p);
403out0:
404 kfree(p); 420 kfree(p);
405 return err; 421 return err;
422out_del:
423 kobject_put(p->holder_dir);
424 device_del(pdev);
425out_put:
426 put_device(pdev);
427 blk_free_devt(devt);
428 return err;
406} 429}
407 430
408/* Not exported, helper to add_disk(). */ 431/* Not exported, helper to add_disk(). */
409void register_disk(struct gendisk *disk) 432void register_disk(struct gendisk *disk)
410{ 433{
434 struct device *ddev = disk_to_dev(disk);
411 struct block_device *bdev; 435 struct block_device *bdev;
436 struct disk_part_iter piter;
437 struct hd_struct *part;
412 char *s; 438 char *s;
413 int i;
414 struct hd_struct *p;
415 int err; 439 int err;
416 440
417 disk->dev.parent = disk->driverfs_dev; 441 ddev->parent = disk->driverfs_dev;
418 disk->dev.devt = MKDEV(disk->major, disk->first_minor);
419 442
420 strlcpy(disk->dev.bus_id, disk->disk_name, BUS_ID_SIZE); 443 strlcpy(ddev->bus_id, disk->disk_name, BUS_ID_SIZE);
421 /* ewww... some of these buggers have / in the name... */ 444 /* ewww... some of these buggers have / in the name... */
422 s = strchr(disk->dev.bus_id, '/'); 445 s = strchr(ddev->bus_id, '/');
423 if (s) 446 if (s)
424 *s = '!'; 447 *s = '!';
425 448
426 /* delay uevents, until we scanned partition table */ 449 /* delay uevents, until we scanned partition table */
427 disk->dev.uevent_suppress = 1; 450 ddev->uevent_suppress = 1;
428 451
429 if (device_add(&disk->dev)) 452 if (device_add(ddev))
430 return; 453 return;
431#ifndef CONFIG_SYSFS_DEPRECATED 454#ifndef CONFIG_SYSFS_DEPRECATED
432 err = sysfs_create_link(block_depr, &disk->dev.kobj, 455 err = sysfs_create_link(block_depr, &ddev->kobj,
433 kobject_name(&disk->dev.kobj)); 456 kobject_name(&ddev->kobj));
434 if (err) { 457 if (err) {
435 device_del(&disk->dev); 458 device_del(ddev);
436 return; 459 return;
437 } 460 }
438#endif 461#endif
439 disk_sysfs_add_subdirs(disk); 462 disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
463 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
440 464
441 /* No minors to use for partitions */ 465 /* No minors to use for partitions */
442 if (disk->minors == 1) 466 if (!disk_partitionable(disk))
443 goto exit; 467 goto exit;
444 468
445 /* No such device (e.g., media were just removed) */ 469 /* No such device (e.g., media were just removed) */
@@ -458,41 +482,57 @@ void register_disk(struct gendisk *disk)
458 482
459exit: 483exit:
460 /* announce disk after possible partitions are created */ 484 /* announce disk after possible partitions are created */
461 disk->dev.uevent_suppress = 0; 485 ddev->uevent_suppress = 0;
462 kobject_uevent(&disk->dev.kobj, KOBJ_ADD); 486 kobject_uevent(&ddev->kobj, KOBJ_ADD);
463 487
464 /* announce possible partitions */ 488 /* announce possible partitions */
465 for (i = 1; i < disk->minors; i++) { 489 disk_part_iter_init(&piter, disk, 0);
466 p = disk->part[i-1]; 490 while ((part = disk_part_iter_next(&piter)))
467 if (!p || !p->nr_sects) 491 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
468 continue; 492 disk_part_iter_exit(&piter);
469 kobject_uevent(&p->dev.kobj, KOBJ_ADD);
470 }
471} 493}
472 494
473int rescan_partitions(struct gendisk *disk, struct block_device *bdev) 495int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
474{ 496{
497 struct disk_part_iter piter;
498 struct hd_struct *part;
475 struct parsed_partitions *state; 499 struct parsed_partitions *state;
476 int p, res; 500 int p, highest, res;
477 501
478 if (bdev->bd_part_count) 502 if (bdev->bd_part_count)
479 return -EBUSY; 503 return -EBUSY;
480 res = invalidate_partition(disk, 0); 504 res = invalidate_partition(disk, 0);
481 if (res) 505 if (res)
482 return res; 506 return res;
483 bdev->bd_invalidated = 0; 507
484 for (p = 1; p < disk->minors; p++) 508 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
485 delete_partition(disk, p); 509 while ((part = disk_part_iter_next(&piter)))
510 delete_partition(disk, part->partno);
511 disk_part_iter_exit(&piter);
512
486 if (disk->fops->revalidate_disk) 513 if (disk->fops->revalidate_disk)
487 disk->fops->revalidate_disk(disk); 514 disk->fops->revalidate_disk(disk);
515 check_disk_size_change(disk, bdev);
516 bdev->bd_invalidated = 0;
488 if (!get_capacity(disk) || !(state = check_partition(disk, bdev))) 517 if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
489 return 0; 518 return 0;
490 if (IS_ERR(state)) /* I/O error reading the partition table */ 519 if (IS_ERR(state)) /* I/O error reading the partition table */
491 return -EIO; 520 return -EIO;
492 521
493 /* tell userspace that the media / partition table may have changed */ 522 /* tell userspace that the media / partition table may have changed */
494 kobject_uevent(&disk->dev.kobj, KOBJ_CHANGE); 523 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
495 524
525 /* Detect the highest partition number and preallocate
526 * disk->part_tbl. This is an optimization and not strictly
527 * necessary.
528 */
529 for (p = 1, highest = 0; p < state->limit; p++)
530 if (state->parts[p].size)
531 highest = p;
532
533 disk_expand_part_tbl(disk, highest);
534
535 /* add partitions */
496 for (p = 1; p < state->limit; p++) { 536 for (p = 1; p < state->limit; p++) {
497 sector_t size = state->parts[p].size; 537 sector_t size = state->parts[p].size;
498 sector_t from = state->parts[p].from; 538 sector_t from = state->parts[p].from;
@@ -541,25 +581,31 @@ EXPORT_SYMBOL(read_dev_sector);
541 581
542void del_gendisk(struct gendisk *disk) 582void del_gendisk(struct gendisk *disk)
543{ 583{
544 int p; 584 struct disk_part_iter piter;
585 struct hd_struct *part;
545 586
546 /* invalidate stuff */ 587 /* invalidate stuff */
547 for (p = disk->minors - 1; p > 0; p--) { 588 disk_part_iter_init(&piter, disk,
548 invalidate_partition(disk, p); 589 DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
549 delete_partition(disk, p); 590 while ((part = disk_part_iter_next(&piter))) {
591 invalidate_partition(disk, part->partno);
592 delete_partition(disk, part->partno);
550 } 593 }
594 disk_part_iter_exit(&piter);
595
551 invalidate_partition(disk, 0); 596 invalidate_partition(disk, 0);
552 disk->capacity = 0; 597 blk_free_devt(disk_to_dev(disk)->devt);
598 set_capacity(disk, 0);
553 disk->flags &= ~GENHD_FL_UP; 599 disk->flags &= ~GENHD_FL_UP;
554 unlink_gendisk(disk); 600 unlink_gendisk(disk);
555 disk_stat_set_all(disk, 0); 601 part_stat_set_all(&disk->part0, 0);
556 disk->stamp = 0; 602 disk->part0.stamp = 0;
557 603
558 kobject_put(disk->holder_dir); 604 kobject_put(disk->part0.holder_dir);
559 kobject_put(disk->slave_dir); 605 kobject_put(disk->slave_dir);
560 disk->driverfs_dev = NULL; 606 disk->driverfs_dev = NULL;
561#ifndef CONFIG_SYSFS_DEPRECATED 607#ifndef CONFIG_SYSFS_DEPRECATED
562 sysfs_remove_link(block_depr, disk->dev.bus_id); 608 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
563#endif 609#endif
564 device_del(&disk->dev); 610 device_del(disk_to_dev(disk));
565} 611}
diff --git a/fs/partitions/check.h b/fs/partitions/check.h
index 17ae8ecd9e8b..98dbe1a84528 100644
--- a/fs/partitions/check.h
+++ b/fs/partitions/check.h
@@ -5,15 +5,13 @@
5 * add_gd_partition adds a partitions details to the devices partition 5 * add_gd_partition adds a partitions details to the devices partition
6 * description. 6 * description.
7 */ 7 */
8enum { MAX_PART = 256 };
9
10struct parsed_partitions { 8struct parsed_partitions {
11 char name[BDEVNAME_SIZE]; 9 char name[BDEVNAME_SIZE];
12 struct { 10 struct {
13 sector_t from; 11 sector_t from;
14 sector_t size; 12 sector_t size;
15 int flags; 13 int flags;
16 } parts[MAX_PART]; 14 } parts[DISK_MAX_PARTS];
17 int next; 15 int next;
18 int limit; 16 int limit;
19}; 17};
diff --git a/fs/splice.c b/fs/splice.c
index 1bbc6f4bb09c..a1e701c27156 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -898,6 +898,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
898 if (unlikely(!(out->f_mode & FMODE_WRITE))) 898 if (unlikely(!(out->f_mode & FMODE_WRITE)))
899 return -EBADF; 899 return -EBADF;
900 900
901 if (unlikely(out->f_flags & O_APPEND))
902 return -EINVAL;
903
901 ret = rw_verify_area(WRITE, out, ppos, len); 904 ret = rw_verify_area(WRITE, out, ppos, len);
902 if (unlikely(ret < 0)) 905 if (unlikely(ret < 0))
903 return ret; 906 return ret;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 986061ae1b9b..36d5fcd3f593 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1001,12 +1001,13 @@ xfs_buf_iodone_work(
1001 * We can get an EOPNOTSUPP to ordered writes. Here we clear the 1001 * We can get an EOPNOTSUPP to ordered writes. Here we clear the
1002 * ordered flag and reissue them. Because we can't tell the higher 1002 * ordered flag and reissue them. Because we can't tell the higher
1003 * layers directly that they should not issue ordered I/O anymore, they 1003 * layers directly that they should not issue ordered I/O anymore, they
1004 * need to check if the ordered flag was cleared during I/O completion. 1004 * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
1005 */ 1005 */
1006 if ((bp->b_error == EOPNOTSUPP) && 1006 if ((bp->b_error == EOPNOTSUPP) &&
1007 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) { 1007 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
1008 XB_TRACE(bp, "ordered_retry", bp->b_iodone); 1008 XB_TRACE(bp, "ordered_retry", bp->b_iodone);
1009 bp->b_flags &= ~XBF_ORDERED; 1009 bp->b_flags &= ~XBF_ORDERED;
1010 bp->b_flags |= _XFS_BARRIER_FAILED;
1010 xfs_buf_iorequest(bp); 1011 xfs_buf_iorequest(bp);
1011 } else if (bp->b_iodone) 1012 } else if (bp->b_iodone)
1012 (*(bp->b_iodone))(bp); 1013 (*(bp->b_iodone))(bp);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index fe0109956656..456519a088c7 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -85,6 +85,14 @@ typedef enum {
85 * modifications being lost. 85 * modifications being lost.
86 */ 86 */
87 _XBF_PAGE_LOCKED = (1 << 22), 87 _XBF_PAGE_LOCKED = (1 << 22),
88
89 /*
90 * If we try a barrier write, but it fails we have to communicate
91 * this to the upper layers. Unfortunately b_error gets overwritten
92 * when the buffer is re-issued so we have to add another flag to
93 * keep this information.
94 */
95 _XFS_BARRIER_FAILED = (1 << 23),
88} xfs_buf_flags_t; 96} xfs_buf_flags_t;
89 97
90typedef enum { 98typedef enum {
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 503ea89e8b9a..0b02c6443551 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1033,11 +1033,12 @@ xlog_iodone(xfs_buf_t *bp)
1033 l = iclog->ic_log; 1033 l = iclog->ic_log;
1034 1034
1035 /* 1035 /*
1036 * If the ordered flag has been removed by a lower 1036 * If the _XFS_BARRIER_FAILED flag was set by a lower
1037 * layer, it means the underlyin device no longer supports 1037 * layer, it means the underlying device no longer supports
1038 * barrier I/O. Warn loudly and turn off barriers. 1038 * barrier I/O. Warn loudly and turn off barriers.
1039 */ 1039 */
1040 if ((l->l_mp->m_flags & XFS_MOUNT_BARRIER) && !XFS_BUF_ISORDERED(bp)) { 1040 if (bp->b_flags & _XFS_BARRIER_FAILED) {
1041 bp->b_flags &= ~_XFS_BARRIER_FAILED;
1041 l->l_mp->m_flags &= ~XFS_MOUNT_BARRIER; 1042 l->l_mp->m_flags &= ~XFS_MOUNT_BARRIER;
1042 xfs_fs_cmn_err(CE_WARN, l->l_mp, 1043 xfs_fs_cmn_err(CE_WARN, l->l_mp,
1043 "xlog_iodone: Barriers are no longer supported" 1044 "xlog_iodone: Barriers are no longer supported"
diff --git a/include/asm-x86/a.out-core.h b/include/asm-x86/a.out-core.h
index 714207a1c387..f5705761a37b 100644
--- a/include/asm-x86/a.out-core.h
+++ b/include/asm-x86/a.out-core.h
@@ -9,8 +9,8 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _ASM_A_OUT_CORE_H 12#ifndef ASM_X86__A_OUT_CORE_H
13#define _ASM_A_OUT_CORE_H 13#define ASM_X86__A_OUT_CORE_H
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16#ifdef CONFIG_X86_32 16#ifdef CONFIG_X86_32
@@ -70,4 +70,4 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
70 70
71#endif /* CONFIG_X86_32 */ 71#endif /* CONFIG_X86_32 */
72#endif /* __KERNEL__ */ 72#endif /* __KERNEL__ */
73#endif /* _ASM_A_OUT_CORE_H */ 73#endif /* ASM_X86__A_OUT_CORE_H */
diff --git a/include/asm-x86/a.out.h b/include/asm-x86/a.out.h
index 4684f97a5bbd..0948748bc69c 100644
--- a/include/asm-x86/a.out.h
+++ b/include/asm-x86/a.out.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_A_OUT_H 1#ifndef ASM_X86__A_OUT_H
2#define _ASM_X86_A_OUT_H 2#define ASM_X86__A_OUT_H
3 3
4struct exec 4struct exec
5{ 5{
@@ -17,4 +17,4 @@ struct exec
17#define N_DRSIZE(a) ((a).a_drsize) 17#define N_DRSIZE(a) ((a).a_drsize)
18#define N_SYMSIZE(a) ((a).a_syms) 18#define N_SYMSIZE(a) ((a).a_syms)
19 19
20#endif /* _ASM_X86_A_OUT_H */ 20#endif /* ASM_X86__A_OUT_H */
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 35d1743b57ac..392e17336be1 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ACPI_H 1#ifndef ASM_X86__ACPI_H
2#define _ASM_X86_ACPI_H 2#define ASM_X86__ACPI_H
3 3
4/* 4/*
5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
@@ -175,4 +175,4 @@ static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
175 175
176#define acpi_unlazy_tlb(x) leave_mm(x) 176#define acpi_unlazy_tlb(x) leave_mm(x)
177 177
178#endif /*__X86_ASM_ACPI_H*/ 178#endif /* ASM_X86__ACPI_H */
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index e4004a9f6a9a..3617fd4fcdf9 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_AGP_H 1#ifndef ASM_X86__AGP_H
2#define _ASM_X86_AGP_H 2#define ASM_X86__AGP_H
3 3
4#include <asm/pgtable.h> 4#include <asm/pgtable.h>
5#include <asm/cacheflush.h> 5#include <asm/cacheflush.h>
@@ -32,4 +32,4 @@
32#define free_gatt_pages(table, order) \ 32#define free_gatt_pages(table, order) \
33 free_pages((unsigned long)(table), (order)) 33 free_pages((unsigned long)(table), (order))
34 34
35#endif 35#endif /* ASM_X86__AGP_H */
diff --git a/include/asm-x86/alternative.h b/include/asm-x86/alternative.h
index f6aa18eadf71..22d3c9862bf3 100644
--- a/include/asm-x86/alternative.h
+++ b/include/asm-x86/alternative.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ALTERNATIVE_H 1#ifndef ASM_X86__ALTERNATIVE_H
2#define _ASM_X86_ALTERNATIVE_H 2#define ASM_X86__ALTERNATIVE_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
@@ -180,4 +180,4 @@ extern void add_nops(void *insns, unsigned int len);
180extern void *text_poke(void *addr, const void *opcode, size_t len); 180extern void *text_poke(void *addr, const void *opcode, size_t len);
181extern void *text_poke_early(void *addr, const void *opcode, size_t len); 181extern void *text_poke_early(void *addr, const void *opcode, size_t len);
182 182
183#endif /* _ASM_X86_ALTERNATIVE_H */ 183#endif /* ASM_X86__ALTERNATIVE_H */
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
index 30a12049353b..783f43e58052 100644
--- a/include/asm-x86/amd_iommu.h
+++ b/include/asm-x86/amd_iommu.h
@@ -17,8 +17,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#ifndef _ASM_X86_AMD_IOMMU_H 20#ifndef ASM_X86__AMD_IOMMU_H
21#define _ASM_X86_AMD_IOMMU_H 21#define ASM_X86__AMD_IOMMU_H
22 22
23#ifdef CONFIG_AMD_IOMMU 23#ifdef CONFIG_AMD_IOMMU
24extern int amd_iommu_init(void); 24extern int amd_iommu_init(void);
@@ -29,4 +29,4 @@ static inline int amd_iommu_init(void) { return -ENODEV; }
29static inline void amd_iommu_detect(void) { } 29static inline void amd_iommu_detect(void) { }
30#endif 30#endif
31 31
32#endif 32#endif /* ASM_X86__AMD_IOMMU_H */
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index dcc812067394..1ffa4e53c989 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -17,8 +17,8 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#ifndef __AMD_IOMMU_TYPES_H__ 20#ifndef ASM_X86__AMD_IOMMU_TYPES_H
21#define __AMD_IOMMU_TYPES_H__ 21#define ASM_X86__AMD_IOMMU_TYPES_H
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/list.h> 24#include <linux/list.h>
@@ -341,4 +341,4 @@ static inline u16 calc_devid(u8 bus, u8 devfn)
341 return (((u16)bus) << 8) | devfn; 341 return (((u16)bus) << 8) | devfn;
342} 342}
343 343
344#endif 344#endif /* ASM_X86__AMD_IOMMU_TYPES_H */
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 133c998161ca..65590c9aecd4 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_APIC_H 1#ifndef ASM_X86__APIC_H
2#define _ASM_X86_APIC_H 2#define ASM_X86__APIC_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
@@ -54,6 +54,11 @@ extern int disable_apic;
54#endif 54#endif
55 55
56extern int is_vsmp_box(void); 56extern int is_vsmp_box(void);
57extern void xapic_wait_icr_idle(void);
58extern u32 safe_xapic_wait_icr_idle(void);
59extern u64 xapic_icr_read(void);
60extern void xapic_icr_write(u32, u32);
61extern int setup_profiling_timer(unsigned int);
57 62
58static inline void native_apic_write(unsigned long reg, u32 v) 63static inline void native_apic_write(unsigned long reg, u32 v)
59{ 64{
@@ -76,9 +81,7 @@ extern int get_physical_broadcast(void);
76static inline void ack_APIC_irq(void) 81static inline void ack_APIC_irq(void)
77{ 82{
78 /* 83 /*
79 * ack_APIC_irq() actually gets compiled as a single instruction: 84 * ack_APIC_irq() actually gets compiled as a single instruction
80 * - a single rmw on Pentium/82489DX
81 * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
82 * ... yummie. 85 * ... yummie.
83 */ 86 */
84 87
@@ -128,4 +131,4 @@ static inline void init_apic_mappings(void) { }
128 131
129#endif /* !CONFIG_X86_LOCAL_APIC */ 132#endif /* !CONFIG_X86_LOCAL_APIC */
130 133
131#endif /* __ASM_APIC_H */ 134#endif /* ASM_X86__APIC_H */
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h
index 6b9008c78731..c40687da20fc 100644
--- a/include/asm-x86/apicdef.h
+++ b/include/asm-x86/apicdef.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_APICDEF_H 1#ifndef ASM_X86__APICDEF_H
2#define _ASM_X86_APICDEF_H 2#define ASM_X86__APICDEF_H
3 3
4/* 4/*
5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) 5 * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
@@ -411,4 +411,4 @@ struct local_apic {
411#else 411#else
412 #define BAD_APICID 0xFFFFu 412 #define BAD_APICID 0xFFFFu
413#endif 413#endif
414#endif 414#endif /* ASM_X86__APICDEF_H */
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h
index 8411750ceb63..72adc3a109cc 100644
--- a/include/asm-x86/arch_hooks.h
+++ b/include/asm-x86/arch_hooks.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_ARCH_HOOKS_H 1#ifndef ASM_X86__ARCH_HOOKS_H
2#define _ASM_ARCH_HOOKS_H 2#define ASM_X86__ARCH_HOOKS_H
3 3
4#include <linux/interrupt.h> 4#include <linux/interrupt.h>
5 5
@@ -25,4 +25,4 @@ extern void pre_time_init_hook(void);
25extern void time_init_hook(void); 25extern void time_init_hook(void);
26extern void mca_nmi_hook(void); 26extern void mca_nmi_hook(void);
27 27
28#endif 28#endif /* ASM_X86__ARCH_HOOKS_H */
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 97220321f39d..e1355f44d7c3 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ASM_H 1#ifndef ASM_X86__ASM_H
2#define _ASM_X86_ASM_H 2#define ASM_X86__ASM_H
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
@@ -20,17 +20,22 @@
20 20
21#define _ASM_PTR __ASM_SEL(.long, .quad) 21#define _ASM_PTR __ASM_SEL(.long, .quad)
22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) 22#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
23#define _ASM_MOV_UL __ASM_SIZE(mov)
24 23
24#define _ASM_MOV __ASM_SIZE(mov)
25#define _ASM_INC __ASM_SIZE(inc) 25#define _ASM_INC __ASM_SIZE(inc)
26#define _ASM_DEC __ASM_SIZE(dec) 26#define _ASM_DEC __ASM_SIZE(dec)
27#define _ASM_ADD __ASM_SIZE(add) 27#define _ASM_ADD __ASM_SIZE(add)
28#define _ASM_SUB __ASM_SIZE(sub) 28#define _ASM_SUB __ASM_SIZE(sub)
29#define _ASM_XADD __ASM_SIZE(xadd) 29#define _ASM_XADD __ASM_SIZE(xadd)
30
30#define _ASM_AX __ASM_REG(ax) 31#define _ASM_AX __ASM_REG(ax)
31#define _ASM_BX __ASM_REG(bx) 32#define _ASM_BX __ASM_REG(bx)
32#define _ASM_CX __ASM_REG(cx) 33#define _ASM_CX __ASM_REG(cx)
33#define _ASM_DX __ASM_REG(dx) 34#define _ASM_DX __ASM_REG(dx)
35#define _ASM_SP __ASM_REG(sp)
36#define _ASM_BP __ASM_REG(bp)
37#define _ASM_SI __ASM_REG(si)
38#define _ASM_DI __ASM_REG(di)
34 39
35/* Exception table entry */ 40/* Exception table entry */
36# define _ASM_EXTABLE(from,to) \ 41# define _ASM_EXTABLE(from,to) \
@@ -39,4 +44,4 @@
39 _ASM_PTR #from "," #to "\n" \ 44 _ASM_PTR #from "," #to "\n" \
40 " .previous\n" 45 " .previous\n"
41 46
42#endif /* _ASM_X86_ASM_H */ 47#endif /* ASM_X86__ASM_H */
diff --git a/include/asm-x86/atomic_32.h b/include/asm-x86/atomic_32.h
index 21a4825148c0..14d3f0beb889 100644
--- a/include/asm-x86/atomic_32.h
+++ b/include/asm-x86/atomic_32.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_I386_ATOMIC__ 1#ifndef ASM_X86__ATOMIC_32_H
2#define __ARCH_I386_ATOMIC__ 2#define ASM_X86__ATOMIC_32_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
@@ -256,4 +256,4 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
256#define smp_mb__after_atomic_inc() barrier() 256#define smp_mb__after_atomic_inc() barrier()
257 257
258#include <asm-generic/atomic.h> 258#include <asm-generic/atomic.h>
259#endif 259#endif /* ASM_X86__ATOMIC_32_H */
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h
index 91c7d03e65bc..2cb218c4a356 100644
--- a/include/asm-x86/atomic_64.h
+++ b/include/asm-x86/atomic_64.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_X86_64_ATOMIC__ 1#ifndef ASM_X86__ATOMIC_64_H
2#define __ARCH_X86_64_ATOMIC__ 2#define ASM_X86__ATOMIC_64_H
3 3
4#include <asm/alternative.h> 4#include <asm/alternative.h>
5#include <asm/cmpxchg.h> 5#include <asm/cmpxchg.h>
@@ -470,4 +470,4 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
470#define smp_mb__after_atomic_inc() barrier() 470#define smp_mb__after_atomic_inc() barrier()
471 471
472#include <asm-generic/atomic.h> 472#include <asm-generic/atomic.h>
473#endif 473#endif /* ASM_X86__ATOMIC_64_H */
diff --git a/include/asm-x86/auxvec.h b/include/asm-x86/auxvec.h
index 87f5e6d5a020..12c7cac74202 100644
--- a/include/asm-x86/auxvec.h
+++ b/include/asm-x86/auxvec.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_AUXVEC_H 1#ifndef ASM_X86__AUXVEC_H
2#define _ASM_X86_AUXVEC_H 2#define ASM_X86__AUXVEC_H
3/* 3/*
4 * Architecture-neutral AT_ values in 0-17, leave some room 4 * Architecture-neutral AT_ values in 0-17, leave some room
5 * for more of them, start the x86-specific ones at 32. 5 * for more of them, start the x86-specific ones at 32.
@@ -9,4 +9,4 @@
9#endif 9#endif
10#define AT_SYSINFO_EHDR 33 10#define AT_SYSINFO_EHDR 33
11 11
12#endif 12#endif /* ASM_X86__AUXVEC_H */
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index 0033e50c13b2..ec42ed874591 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_BIOS_EBDA_H 1#ifndef ASM_X86__BIOS_EBDA_H
2#define _MACH_BIOS_EBDA_H 2#define ASM_X86__BIOS_EBDA_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
@@ -16,4 +16,4 @@ static inline unsigned int get_bios_ebda(void)
16 16
17void reserve_ebda_region(void); 17void reserve_ebda_region(void);
18 18
19#endif /* _MACH_BIOS_EBDA_H */ 19#endif /* ASM_X86__BIOS_EBDA_H */
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index cfb2b64f76e7..61989b93b475 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BITOPS_H 1#ifndef ASM_X86__BITOPS_H
2#define _ASM_X86_BITOPS_H 2#define ASM_X86__BITOPS_H
3 3
4/* 4/*
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
@@ -458,4 +458,4 @@ static inline void set_bit_string(unsigned long *bitmap,
458#include <asm-generic/bitops/minix.h> 458#include <asm-generic/bitops/minix.h>
459 459
460#endif /* __KERNEL__ */ 460#endif /* __KERNEL__ */
461#endif /* _ASM_X86_BITOPS_H */ 461#endif /* ASM_X86__BITOPS_H */
diff --git a/include/asm-x86/boot.h b/include/asm-x86/boot.h
index 2faed7ecb092..825de5dc867c 100644
--- a/include/asm-x86/boot.h
+++ b/include/asm-x86/boot.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_BOOT_H 1#ifndef ASM_X86__BOOT_H
2#define _ASM_BOOT_H 2#define ASM_X86__BOOT_H
3 3
4/* Don't touch these, unless you really know what you're doing. */ 4/* Don't touch these, unless you really know what you're doing. */
5#define DEF_INITSEG 0x9000 5#define DEF_INITSEG 0x9000
@@ -25,4 +25,4 @@
25#define BOOT_STACK_SIZE 0x1000 25#define BOOT_STACK_SIZE 0x1000
26#endif 26#endif
27 27
28#endif /* _ASM_BOOT_H */ 28#endif /* ASM_X86__BOOT_H */
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index ae22bdf0ab14..ccf027e2d97d 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_BOOTPARAM_H 1#ifndef ASM_X86__BOOTPARAM_H
2#define _ASM_BOOTPARAM_H 2#define ASM_X86__BOOTPARAM_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/screen_info.h> 5#include <linux/screen_info.h>
@@ -108,4 +108,4 @@ struct boot_params {
108 __u8 _pad9[276]; /* 0xeec */ 108 __u8 _pad9[276]; /* 0xeec */
109} __attribute__((packed)); 109} __attribute__((packed));
110 110
111#endif /* _ASM_BOOTPARAM_H */ 111#endif /* ASM_X86__BOOTPARAM_H */
diff --git a/include/asm-x86/bug.h b/include/asm-x86/bug.h
index b69aa64b82a4..91ad43a54c47 100644
--- a/include/asm-x86/bug.h
+++ b/include/asm-x86/bug.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BUG_H 1#ifndef ASM_X86__BUG_H
2#define _ASM_X86_BUG_H 2#define ASM_X86__BUG_H
3 3
4#ifdef CONFIG_BUG 4#ifdef CONFIG_BUG
5#define HAVE_ARCH_BUG 5#define HAVE_ARCH_BUG
@@ -36,4 +36,4 @@ do { \
36#endif /* !CONFIG_BUG */ 36#endif /* !CONFIG_BUG */
37 37
38#include <asm-generic/bug.h> 38#include <asm-generic/bug.h>
39#endif 39#endif /* ASM_X86__BUG_H */
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index 021cbdd5f258..4761c461d23a 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -1,7 +1,7 @@
1#ifndef _ASM_X86_BUGS_H 1#ifndef ASM_X86__BUGS_H
2#define _ASM_X86_BUGS_H 2#define ASM_X86__BUGS_H
3 3
4extern void check_bugs(void); 4extern void check_bugs(void);
5int ppro_with_ram_bug(void); 5int ppro_with_ram_bug(void);
6 6
7#endif /* _ASM_X86_BUGS_H */ 7#endif /* ASM_X86__BUGS_H */
diff --git a/include/asm-x86/byteorder.h b/include/asm-x86/byteorder.h
index e02ae2d89acf..722f27d68105 100644
--- a/include/asm-x86/byteorder.h
+++ b/include/asm-x86/byteorder.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BYTEORDER_H 1#ifndef ASM_X86__BYTEORDER_H
2#define _ASM_X86_BYTEORDER_H 2#define ASM_X86__BYTEORDER_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -78,4 +78,4 @@ static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
78 78
79#include <linux/byteorder/little_endian.h> 79#include <linux/byteorder/little_endian.h>
80 80
81#endif /* _ASM_X86_BYTEORDER_H */ 81#endif /* ASM_X86__BYTEORDER_H */
diff --git a/include/asm-x86/cache.h b/include/asm-x86/cache.h
index 1e0bac86f38f..ea3f1cc06a97 100644
--- a/include/asm-x86/cache.h
+++ b/include/asm-x86/cache.h
@@ -1,5 +1,5 @@
1#ifndef _ARCH_X86_CACHE_H 1#ifndef ASM_X86__CACHE_H
2#define _ARCH_X86_CACHE_H 2#define ASM_X86__CACHE_H
3 3
4/* L1 cache line size */ 4/* L1 cache line size */
5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 5#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
@@ -17,4 +17,4 @@
17#endif 17#endif
18#endif 18#endif
19 19
20#endif 20#endif /* ASM_X86__CACHE_H */
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index f4c0ab50d2c2..59859cb28a36 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_CACHEFLUSH_H 1#ifndef ASM_X86__CACHEFLUSH_H
2#define _ASM_X86_CACHEFLUSH_H 2#define ASM_X86__CACHEFLUSH_H
3 3
4/* Keep includes the same across arches. */ 4/* Keep includes the same across arches. */
5#include <linux/mm.h> 5#include <linux/mm.h>
@@ -112,4 +112,4 @@ static inline int rodata_test(void)
112} 112}
113#endif 113#endif
114 114
115#endif 115#endif /* ASM_X86__CACHEFLUSH_H */
diff --git a/include/asm-x86/calgary.h b/include/asm-x86/calgary.h
index 67f60406e2d8..933fd272f826 100644
--- a/include/asm-x86/calgary.h
+++ b/include/asm-x86/calgary.h
@@ -21,8 +21,8 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23 23
24#ifndef _ASM_X86_64_CALGARY_H 24#ifndef ASM_X86__CALGARY_H
25#define _ASM_X86_64_CALGARY_H 25#define ASM_X86__CALGARY_H
26 26
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/device.h> 28#include <linux/device.h>
@@ -69,4 +69,4 @@ static inline int calgary_iommu_init(void) { return 1; }
69static inline void detect_calgary(void) { return; } 69static inline void detect_calgary(void) { return; }
70#endif 70#endif
71 71
72#endif /* _ASM_X86_64_CALGARY_H */ 72#endif /* ASM_X86__CALGARY_H */
diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h
index 52bbb0d8c4c1..d041e8cda227 100644
--- a/include/asm-x86/checksum_32.h
+++ b/include/asm-x86/checksum_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_CHECKSUM_H 1#ifndef ASM_X86__CHECKSUM_32_H
2#define _I386_CHECKSUM_H 2#define ASM_X86__CHECKSUM_32_H
3 3
4#include <linux/in6.h> 4#include <linux/in6.h>
5 5
@@ -186,4 +186,4 @@ static inline __wsum csum_and_copy_to_user(const void *src,
186 return (__force __wsum)-1; /* invalid checksum */ 186 return (__force __wsum)-1; /* invalid checksum */
187} 187}
188 188
189#endif 189#endif /* ASM_X86__CHECKSUM_32_H */
diff --git a/include/asm-x86/checksum_64.h b/include/asm-x86/checksum_64.h
index 8bd861cc5267..110f403beb89 100644
--- a/include/asm-x86/checksum_64.h
+++ b/include/asm-x86/checksum_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_CHECKSUM_H 1#ifndef ASM_X86__CHECKSUM_64_H
2#define _X86_64_CHECKSUM_H 2#define ASM_X86__CHECKSUM_64_H
3 3
4/* 4/*
5 * Checksums for x86-64 5 * Checksums for x86-64
@@ -188,4 +188,4 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
188 return a; 188 return a;
189} 189}
190 190
191#endif 191#endif /* ASM_X86__CHECKSUM_64_H */
diff --git a/include/asm-x86/cmpxchg_32.h b/include/asm-x86/cmpxchg_32.h
index bf5a69d1329e..0622e45cdf7c 100644
--- a/include/asm-x86/cmpxchg_32.h
+++ b/include/asm-x86/cmpxchg_32.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_CMPXCHG_H 1#ifndef ASM_X86__CMPXCHG_32_H
2#define __ASM_CMPXCHG_H 2#define ASM_X86__CMPXCHG_32_H
3 3
4#include <linux/bitops.h> /* for LOCK_PREFIX */ 4#include <linux/bitops.h> /* for LOCK_PREFIX */
5 5
@@ -341,4 +341,4 @@ extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
341 341
342#endif 342#endif
343 343
344#endif 344#endif /* ASM_X86__CMPXCHG_32_H */
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index 17463ccf8166..63c1a5e61b99 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_CMPXCHG_H 1#ifndef ASM_X86__CMPXCHG_64_H
2#define __ASM_CMPXCHG_H 2#define ASM_X86__CMPXCHG_64_H
3 3
4#include <asm/alternative.h> /* Provides LOCK_PREFIX */ 4#include <asm/alternative.h> /* Provides LOCK_PREFIX */
5 5
@@ -182,4 +182,4 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
182 cmpxchg_local((ptr), (o), (n)); \ 182 cmpxchg_local((ptr), (o), (n)); \
183}) 183})
184 184
185#endif 185#endif /* ASM_X86__CMPXCHG_64_H */
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index 1793ac317a30..6732b150949e 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_COMPAT_H 1#ifndef ASM_X86__COMPAT_H
2#define _ASM_X86_64_COMPAT_H 2#define ASM_X86__COMPAT_H
3 3
4/* 4/*
5 * Architecture specific compatibility types 5 * Architecture specific compatibility types
@@ -215,4 +215,4 @@ static inline int is_compat_task(void)
215 return current_thread_info()->status & TS_COMPAT; 215 return current_thread_info()->status & TS_COMPAT;
216} 216}
217 217
218#endif /* _ASM_X86_64_COMPAT_H */ 218#endif /* ASM_X86__COMPAT_H */
diff --git a/include/asm-x86/cpu.h b/include/asm-x86/cpu.h
index 73f2ea84fd74..83a115083f0d 100644
--- a/include/asm-x86/cpu.h
+++ b/include/asm-x86/cpu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_I386_CPU_H_ 1#ifndef ASM_X86__CPU_H
2#define _ASM_I386_CPU_H_ 2#define ASM_X86__CPU_H
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/cpu.h> 5#include <linux/cpu.h>
@@ -17,4 +17,4 @@ extern void arch_unregister_cpu(int);
17#endif 17#endif
18 18
19DECLARE_PER_CPU(int, cpu_state); 19DECLARE_PER_CPU(int, cpu_state);
20#endif /* _ASM_I386_CPU_H_ */ 20#endif /* ASM_X86__CPU_H */
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index cfcfb0a806ba..250fa0cb144b 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Defines x86 CPU feature bits 2 * Defines x86 CPU feature bits
3 */ 3 */
4#ifndef _ASM_X86_CPUFEATURE_H 4#ifndef ASM_X86__CPUFEATURE_H
5#define _ASM_X86_CPUFEATURE_H 5#define ASM_X86__CPUFEATURE_H
6 6
7#include <asm/required-features.h> 7#include <asm/required-features.h>
8 8
@@ -224,4 +224,4 @@ extern const char * const x86_power_flags[32];
224 224
225#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 225#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
226 226
227#endif /* _ASM_X86_CPUFEATURE_H */ 227#endif /* ASM_X86__CPUFEATURE_H */
diff --git a/include/asm-x86/current.h b/include/asm-x86/current.h
index 7515c19d4988..a863ead856f3 100644
--- a/include/asm-x86/current.h
+++ b/include/asm-x86/current.h
@@ -1,5 +1,5 @@
1#ifndef _X86_CURRENT_H 1#ifndef ASM_X86__CURRENT_H
2#define _X86_CURRENT_H 2#define ASM_X86__CURRENT_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -36,4 +36,4 @@ static __always_inline struct task_struct *get_current(void)
36 36
37#define current get_current() 37#define current get_current()
38 38
39#endif /* X86_CURRENT_H */ 39#endif /* ASM_X86__CURRENT_H */
diff --git a/include/asm-x86/debugreg.h b/include/asm-x86/debugreg.h
index c6344d572b03..ecb6907c3ea4 100644
--- a/include/asm-x86/debugreg.h
+++ b/include/asm-x86/debugreg.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DEBUGREG_H 1#ifndef ASM_X86__DEBUGREG_H
2#define _ASM_X86_DEBUGREG_H 2#define ASM_X86__DEBUGREG_H
3 3
4 4
5/* Indicate the register numbers for a number of the specific 5/* Indicate the register numbers for a number of the specific
@@ -67,4 +67,4 @@
67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ 67#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ 68#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
69 69
70#endif 70#endif /* ASM_X86__DEBUGREG_H */
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h
index 409a649204aa..8a0da95b4fc5 100644
--- a/include/asm-x86/delay.h
+++ b/include/asm-x86/delay.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DELAY_H 1#ifndef ASM_X86__DELAY_H
2#define _ASM_X86_DELAY_H 2#define ASM_X86__DELAY_H
3 3
4/* 4/*
5 * Copyright (C) 1993 Linus Torvalds 5 * Copyright (C) 1993 Linus Torvalds
@@ -28,4 +28,4 @@ extern void __delay(unsigned long loops);
28 28
29void use_tsc_delay(void); 29void use_tsc_delay(void);
30 30
31#endif /* _ASM_X86_DELAY_H */ 31#endif /* ASM_X86__DELAY_H */
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index a44c4dc70590..b73fea54def2 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_DESC_H_ 1#ifndef ASM_X86__DESC_H
2#define _ASM_DESC_H_ 2#define ASM_X86__DESC_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <asm/desc_defs.h> 5#include <asm/desc_defs.h>
@@ -397,4 +397,4 @@ static inline void set_system_gate_ist(int n, void *addr, unsigned ist)
397 397
398#endif /* __ASSEMBLY__ */ 398#endif /* __ASSEMBLY__ */
399 399
400#endif 400#endif /* ASM_X86__DESC_H */
diff --git a/include/asm-x86/desc_defs.h b/include/asm-x86/desc_defs.h
index f7bacf357dac..b881db664b46 100644
--- a/include/asm-x86/desc_defs.h
+++ b/include/asm-x86/desc_defs.h
@@ -1,6 +1,6 @@
1/* Written 2000 by Andi Kleen */ 1/* Written 2000 by Andi Kleen */
2#ifndef __ARCH_DESC_DEFS_H 2#ifndef ASM_X86__DESC_DEFS_H
3#define __ARCH_DESC_DEFS_H 3#define ASM_X86__DESC_DEFS_H
4 4
5/* 5/*
6 * Segment descriptor structure definitions, usable from both x86_64 and i386 6 * Segment descriptor structure definitions, usable from both x86_64 and i386
@@ -92,4 +92,4 @@ struct desc_ptr {
92 92
93#endif /* !__ASSEMBLY__ */ 93#endif /* !__ASSEMBLY__ */
94 94
95#endif 95#endif /* ASM_X86__DESC_DEFS_H */
diff --git a/include/asm-x86/device.h b/include/asm-x86/device.h
index 3c034f48fdb0..1bece04c7d9d 100644
--- a/include/asm-x86/device.h
+++ b/include/asm-x86/device.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DEVICE_H 1#ifndef ASM_X86__DEVICE_H
2#define _ASM_X86_DEVICE_H 2#define ASM_X86__DEVICE_H
3 3
4struct dev_archdata { 4struct dev_archdata {
5#ifdef CONFIG_ACPI 5#ifdef CONFIG_ACPI
@@ -13,4 +13,4 @@ struct dma_mapping_ops *dma_ops;
13#endif 13#endif
14}; 14};
15 15
16#endif /* _ASM_X86_DEVICE_H */ 16#endif /* ASM_X86__DEVICE_H */
diff --git a/include/asm-x86/div64.h b/include/asm-x86/div64.h
index 9a2d644c08ef..f9530f23f1d6 100644
--- a/include/asm-x86/div64.h
+++ b/include/asm-x86/div64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DIV64_H 1#ifndef ASM_X86__DIV64_H
2#define _ASM_X86_DIV64_H 2#define ASM_X86__DIV64_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
@@ -57,4 +57,4 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
57# include <asm-generic/div64.h> 57# include <asm-generic/div64.h>
58#endif /* CONFIG_X86_32 */ 58#endif /* CONFIG_X86_32 */
59 59
60#endif /* _ASM_X86_DIV64_H */ 60#endif /* ASM_X86__DIV64_H */
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index ad9cd6d49bfc..5d200e78bd81 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_DMA_MAPPING_H_ 1#ifndef ASM_X86__DMA_MAPPING_H
2#define _ASM_DMA_MAPPING_H_ 2#define ASM_X86__DMA_MAPPING_H
3 3
4/* 4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for 5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
@@ -250,4 +250,4 @@ static inline int dma_get_cache_alignment(void)
250#define dma_is_consistent(d, h) (1) 250#define dma_is_consistent(d, h) (1)
251 251
252#include <asm-generic/dma-coherent.h> 252#include <asm-generic/dma-coherent.h>
253#endif 253#endif /* ASM_X86__DMA_MAPPING_H */
diff --git a/include/asm-x86/dma.h b/include/asm-x86/dma.h
index ca1098a7e580..c9f7a4eec555 100644
--- a/include/asm-x86/dma.h
+++ b/include/asm-x86/dma.h
@@ -5,8 +5,8 @@
5 * and John Boyd, Nov. 1992. 5 * and John Boyd, Nov. 1992.
6 */ 6 */
7 7
8#ifndef _ASM_X86_DMA_H 8#ifndef ASM_X86__DMA_H
9#define _ASM_X86_DMA_H 9#define ASM_X86__DMA_H
10 10
11#include <linux/spinlock.h> /* And spinlocks */ 11#include <linux/spinlock.h> /* And spinlocks */
12#include <asm/io.h> /* need byte IO */ 12#include <asm/io.h> /* need byte IO */
@@ -315,4 +315,4 @@ extern int isa_dma_bridge_buggy;
315#define isa_dma_bridge_buggy (0) 315#define isa_dma_bridge_buggy (0)
316#endif 316#endif
317 317
318#endif /* _ASM_X86_DMA_H */ 318#endif /* ASM_X86__DMA_H */
diff --git a/include/asm-x86/dmi.h b/include/asm-x86/dmi.h
index 58a86571fe0f..1cff6fe81fa5 100644
--- a/include/asm-x86/dmi.h
+++ b/include/asm-x86/dmi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_DMI_H 1#ifndef ASM_X86__DMI_H
2#define _ASM_X86_DMI_H 2#define ASM_X86__DMI_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5 5
@@ -23,4 +23,4 @@ static inline void *dmi_alloc(unsigned len)
23#define dmi_ioremap early_ioremap 23#define dmi_ioremap early_ioremap
24#define dmi_iounmap early_iounmap 24#define dmi_iounmap early_iounmap
25 25
26#endif 26#endif /* ASM_X86__DMI_H */
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h
index 7881368142fa..c3c953a45b21 100644
--- a/include/asm-x86/ds.h
+++ b/include/asm-x86/ds.h
@@ -2,71 +2,237 @@
2 * Debug Store (DS) support 2 * Debug Store (DS) support
3 * 3 *
4 * This provides a low-level interface to the hardware's Debug Store 4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and 5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS). 6 * precise-event based sampling (PEBS).
7 * 7 *
8 * Different architectures use a different DS layout/pointer size. 8 * It manages:
9 * The below functions therefore work on a void*. 9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
10 * 13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
11 * 17 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 * 18 *
15 * 19 * Copyright (C) 2007-2008 Intel Corporation.
16 * Copyright (C) 2007 Intel Corporation. 20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */ 21 */
19 22
20#ifndef _ASM_X86_DS_H 23#ifndef ASM_X86__DS_H
21#define _ASM_X86_DS_H 24#define ASM_X86__DS_H
25
26#ifdef CONFIG_X86_DS
22 27
23#include <linux/types.h> 28#include <linux/types.h>
24#include <linux/init.h> 29#include <linux/init.h>
25 30
26struct cpuinfo_x86;
27 31
32struct task_struct;
28 33
29/* a branch trace record entry 34/*
35 * Request BTS or PEBS
36 *
37 * Due to alignement constraints, the actual buffer may be slightly
38 * smaller than the requested or provided buffer.
30 * 39 *
31 * In order to unify the interface between various processor versions, 40 * Returns 0 on success; -Eerrno otherwise
32 * we use the below data structure for all processors. 41 *
42 * task: the task to request recording for;
43 * NULL for per-cpu recording on the current cpu
44 * base: the base pointer for the (non-pageable) buffer;
45 * NULL if buffer allocation requested
46 * size: the size of the requested or provided buffer
47 * ovfl: pointer to a function to be called on buffer overflow;
48 * NULL if cyclic buffer requested
33 */ 49 */
34enum bts_qualifier { 50typedef void (*ds_ovfl_callback_t)(struct task_struct *);
35 BTS_INVALID = 0, 51extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
36 BTS_BRANCH, 52 ds_ovfl_callback_t ovfl);
37 BTS_TASK_ARRIVES, 53extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
38 BTS_TASK_DEPARTS 54 ds_ovfl_callback_t ovfl);
39};
40 55
41struct bts_struct { 56/*
42 u64 qualifier; 57 * Release BTS or PEBS resources
43 union { 58 *
44 /* BTS_BRANCH */ 59 * Frees buffers allocated on ds_request.
45 struct { 60 *
46 u64 from_ip; 61 * Returns 0 on success; -Eerrno otherwise
47 u64 to_ip; 62 *
48 } lbr; 63 * task: the task to release resources for;
49 /* BTS_TASK_ARRIVES or 64 * NULL to release resources for the current cpu
50 BTS_TASK_DEPARTS */ 65 */
51 u64 jiffies; 66extern int ds_release_bts(struct task_struct *task);
52 } variant; 67extern int ds_release_pebs(struct task_struct *task);
68
69/*
70 * Return the (array) index of the write pointer.
71 * (assuming an array of BTS/PEBS records)
72 *
73 * Returns -Eerrno on error
74 *
75 * task: the task to access;
76 * NULL to access the current cpu
77 * pos (out): if not NULL, will hold the result
78 */
79extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
80extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
81
82/*
83 * Return the (array) index one record beyond the end of the array.
84 * (assuming an array of BTS/PEBS records)
85 *
86 * Returns -Eerrno on error
87 *
88 * task: the task to access;
89 * NULL to access the current cpu
90 * pos (out): if not NULL, will hold the result
91 */
92extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
93extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
94
95/*
96 * Provide a pointer to the BTS/PEBS record at parameter index.
97 * (assuming an array of BTS/PEBS records)
98 *
99 * The pointer points directly into the buffer. The user is
100 * responsible for copying the record.
101 *
102 * Returns the size of a single record on success; -Eerrno on error
103 *
104 * task: the task to access;
105 * NULL to access the current cpu
106 * index: the index of the requested record
107 * record (out): pointer to the requested record
108 */
109extern int ds_access_bts(struct task_struct *task,
110 size_t index, const void **record);
111extern int ds_access_pebs(struct task_struct *task,
112 size_t index, const void **record);
113
114/*
115 * Write one or more BTS/PEBS records at the write pointer index and
116 * advance the write pointer.
117 *
118 * If size is not a multiple of the record size, trailing bytes are
119 * zeroed out.
120 *
121 * May result in one or more overflow notifications.
122 *
123 * If called during overflow handling, that is, with index >=
124 * interrupt threshold, the write will wrap around.
125 *
126 * An overflow notification is given if and when the interrupt
127 * threshold is reached during or after the write.
128 *
129 * Returns the number of bytes written or -Eerrno.
130 *
131 * task: the task to access;
132 * NULL to access the current cpu
133 * buffer: the buffer to write
134 * size: the size of the buffer
135 */
136extern int ds_write_bts(struct task_struct *task,
137 const void *buffer, size_t size);
138extern int ds_write_pebs(struct task_struct *task,
139 const void *buffer, size_t size);
140
141/*
142 * Same as ds_write_bts/pebs, but omit ownership checks.
143 *
144 * This is needed to have some other task than the owner of the
145 * BTS/PEBS buffer or the parameter task itself write into the
146 * respective buffer.
147 */
148extern int ds_unchecked_write_bts(struct task_struct *task,
149 const void *buffer, size_t size);
150extern int ds_unchecked_write_pebs(struct task_struct *task,
151 const void *buffer, size_t size);
152
153/*
154 * Reset the write pointer of the BTS/PEBS buffer.
155 *
156 * Returns 0 on success; -Eerrno on error
157 *
158 * task: the task to access;
159 * NULL to access the current cpu
160 */
161extern int ds_reset_bts(struct task_struct *task);
162extern int ds_reset_pebs(struct task_struct *task);
163
164/*
165 * Clear the BTS/PEBS buffer and reset the write pointer.
166 * The entire buffer will be zeroed out.
167 *
168 * Returns 0 on success; -Eerrno on error
169 *
170 * task: the task to access;
171 * NULL to access the current cpu
172 */
173extern int ds_clear_bts(struct task_struct *task);
174extern int ds_clear_pebs(struct task_struct *task);
175
176/*
177 * Provide the PEBS counter reset value.
178 *
179 * Returns 0 on success; -Eerrno on error
180 *
181 * task: the task to access;
182 * NULL to access the current cpu
183 * value (out): the counter reset value
184 */
185extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
186
187/*
188 * Set the PEBS counter reset value.
189 *
190 * Returns 0 on success; -Eerrno on error
191 *
192 * task: the task to access;
193 * NULL to access the current cpu
194 * value: the new counter reset value
195 */
196extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
197
198/*
199 * Initialization
200 */
201struct cpuinfo_x86;
202extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
203
204
205
206/*
207 * The DS context - part of struct thread_struct.
208 */
209struct ds_context {
210 /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
211 unsigned char *ds;
212 /* the owner of the BTS and PEBS configuration, respectively */
213 struct task_struct *owner[2];
214 /* buffer overflow notification function for BTS and PEBS */
215 ds_ovfl_callback_t callback[2];
216 /* the original buffer address */
217 void *buffer[2];
218 /* the number of allocated pages for on-request allocated buffers */
219 unsigned int pages[2];
220 /* use count */
221 unsigned long count;
222 /* a pointer to the context location inside the thread_struct
223 * or the per_cpu context array */
224 struct ds_context **this;
225 /* a pointer to the task owning this context, or NULL, if the
226 * context is owned by a cpu */
227 struct task_struct *task;
53}; 228};
54 229
55/* Overflow handling mechanisms */ 230/* called by exit_thread() to free leftover contexts */
56#define DS_O_SIGNAL 1 /* send overflow signal */ 231extern void ds_free(struct ds_context *context);
57#define DS_O_WRAP 2 /* wrap around */ 232
58 233#else /* CONFIG_X86_DS */
59extern int ds_allocate(void **, size_t); 234
60extern int ds_free(void **); 235#define ds_init_intel(config) do {} while (0)
61extern int ds_get_bts_size(void *); 236
62extern int ds_get_bts_end(void *); 237#endif /* CONFIG_X86_DS */
63extern int ds_get_bts_index(void *); 238#endif /* ASM_X86__DS_H */
64extern int ds_set_overflow(void *, int);
65extern int ds_get_overflow(void *);
66extern int ds_clear(void *);
67extern int ds_read_bts(void *, int, struct bts_struct *);
68extern int ds_write_bts(void *, const struct bts_struct *);
69extern unsigned long ds_debugctl_mask(void);
70extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c);
71
72#endif /* _ASM_X86_DS_H */
diff --git a/include/asm-x86/dwarf2.h b/include/asm-x86/dwarf2.h
index 738bb9fb3e53..21d1bc32ad7c 100644
--- a/include/asm-x86/dwarf2.h
+++ b/include/asm-x86/dwarf2.h
@@ -1,5 +1,5 @@
1#ifndef _DWARF2_H 1#ifndef ASM_X86__DWARF2_H
2#define _DWARF2_H 2#define ASM_X86__DWARF2_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#warning "asm/dwarf2.h should be only included in pure assembly files" 5#warning "asm/dwarf2.h should be only included in pure assembly files"
@@ -58,4 +58,4 @@
58 58
59#endif 59#endif
60 60
61#endif 61#endif /* ASM_X86__DWARF2_H */
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 16a31e2c7c57..f52daf176bcb 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_E820_H 1#ifndef ASM_X86__E820_H
2#define __ASM_E820_H 2#define ASM_X86__E820_H
3#define E820MAP 0x2d0 /* our map */ 3#define E820MAP 0x2d0 /* our map */
4#define E820MAX 128 /* number of entries in E820MAP */ 4#define E820MAX 128 /* number of entries in E820MAP */
5 5
@@ -64,6 +64,7 @@ struct e820map {
64extern struct e820map e820; 64extern struct e820map e820;
65extern struct e820map e820_saved; 65extern struct e820map e820_saved;
66 66
67extern unsigned long pci_mem_start;
67extern int e820_any_mapped(u64 start, u64 end, unsigned type); 68extern int e820_any_mapped(u64 start, u64 end, unsigned type);
68extern int e820_all_mapped(u64 start, u64 end, unsigned type); 69extern int e820_all_mapped(u64 start, u64 end, unsigned type);
69extern void e820_add_region(u64 start, u64 size, int type); 70extern void e820_add_region(u64 start, u64 size, int type);
@@ -140,4 +141,4 @@ extern char *memory_setup(void);
140#define HIGH_MEMORY (1024*1024) 141#define HIGH_MEMORY (1024*1024)
141#endif /* __KERNEL__ */ 142#endif /* __KERNEL__ */
142 143
143#endif /* __ASM_E820_H */ 144#endif /* ASM_X86__E820_H */
diff --git a/include/asm-x86/edac.h b/include/asm-x86/edac.h
index a8088f63a30e..9493c5b27bbd 100644
--- a/include/asm-x86/edac.h
+++ b/include/asm-x86/edac.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_EDAC_H 1#ifndef ASM_X86__EDAC_H
2#define _ASM_X86_EDAC_H 2#define ASM_X86__EDAC_H
3 3
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */ 4/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5 5
@@ -15,4 +15,4 @@ static inline void atomic_scrub(void *va, u32 size)
15 asm volatile("lock; addl $0, %0"::"m" (*virt_addr)); 15 asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
16} 16}
17 17
18#endif 18#endif /* ASM_X86__EDAC_H */
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index d4f2b0abe929..ed2de22e8705 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_EFI_H 1#ifndef ASM_X86__EFI_H
2#define _ASM_X86_EFI_H 2#define ASM_X86__EFI_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5 5
@@ -94,4 +94,4 @@ extern void efi_reserve_early(void);
94extern void efi_call_phys_prelog(void); 94extern void efi_call_phys_prelog(void);
95extern void efi_call_phys_epilog(void); 95extern void efi_call_phys_epilog(void);
96 96
97#endif 97#endif /* ASM_X86__EFI_H */
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index 7be4733c793e..5c4745bec906 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_ELF_H 1#ifndef ASM_X86__ELF_H
2#define _ASM_X86_ELF_H 2#define ASM_X86__ELF_H
3 3
4/* 4/*
5 * ELF register definitions.. 5 * ELF register definitions..
@@ -148,8 +148,9 @@ do { \
148 148
149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp) 149static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
150{ 150{
151 asm volatile("movl %0,%%fs" :: "r" (0)); 151 loadsegment(fs, 0);
152 asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS)); 152 loadsegment(ds, __USER32_DS);
153 loadsegment(es, __USER32_DS);
153 load_gs_index(0); 154 load_gs_index(0);
154 regs->ip = ip; 155 regs->ip = ip;
155 regs->sp = sp; 156 regs->sp = sp;
@@ -332,4 +333,4 @@ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
332extern unsigned long arch_randomize_brk(struct mm_struct *mm); 333extern unsigned long arch_randomize_brk(struct mm_struct *mm);
333#define arch_randomize_brk arch_randomize_brk 334#define arch_randomize_brk arch_randomize_brk
334 335
335#endif 336#endif /* ASM_X86__ELF_H */
diff --git a/include/asm-x86/emergency-restart.h b/include/asm-x86/emergency-restart.h
index 8e6aef19f8f0..190d0d8b71e3 100644
--- a/include/asm-x86/emergency-restart.h
+++ b/include/asm-x86/emergency-restart.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_EMERGENCY_RESTART_H 1#ifndef ASM_X86__EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H 2#define ASM_X86__EMERGENCY_RESTART_H
3 3
4enum reboot_type { 4enum reboot_type {
5 BOOT_TRIPLE = 't', 5 BOOT_TRIPLE = 't',
@@ -15,4 +15,4 @@ extern enum reboot_type reboot_type;
15 15
16extern void machine_emergency_restart(void); 16extern void machine_emergency_restart(void);
17 17
18#endif /* _ASM_EMERGENCY_RESTART_H */ 18#endif /* ASM_X86__EMERGENCY_RESTART_H */
diff --git a/include/asm-x86/fb.h b/include/asm-x86/fb.h
index 53018464aea6..aca38dbd9a64 100644
--- a/include/asm-x86/fb.h
+++ b/include/asm-x86/fb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FB_H 1#ifndef ASM_X86__FB_H
2#define _ASM_X86_FB_H 2#define ASM_X86__FB_H
3 3
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <linux/fs.h> 5#include <linux/fs.h>
@@ -18,4 +18,4 @@ extern int fb_is_primary_device(struct fb_info *info);
18static inline int fb_is_primary_device(struct fb_info *info) { return 0; } 18static inline int fb_is_primary_device(struct fb_info *info) { return 0; }
19#endif 19#endif
20 20
21#endif /* _ASM_X86_FB_H */ 21#endif /* ASM_X86__FB_H */
diff --git a/include/asm-x86/fixmap.h b/include/asm-x86/fixmap.h
index 44d4f8217349..78e33a1bc591 100644
--- a/include/asm-x86/fixmap.h
+++ b/include/asm-x86/fixmap.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_FIXMAP_H 1#ifndef ASM_X86__FIXMAP_H
2#define _ASM_FIXMAP_H 2#define ASM_X86__FIXMAP_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# include "fixmap_32.h" 5# include "fixmap_32.h"
@@ -65,4 +65,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 65 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
66 return __virt_to_fix(vaddr); 66 return __virt_to_fix(vaddr);
67} 67}
68#endif 68#endif /* ASM_X86__FIXMAP_H */
diff --git a/include/asm-x86/fixmap_32.h b/include/asm-x86/fixmap_32.h
index f1ac2b2167d7..784e3e759866 100644
--- a/include/asm-x86/fixmap_32.h
+++ b/include/asm-x86/fixmap_32.h
@@ -10,8 +10,8 @@
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */ 11 */
12 12
13#ifndef _ASM_FIXMAP_32_H 13#ifndef ASM_X86__FIXMAP_32_H
14#define _ASM_FIXMAP_32_H 14#define ASM_X86__FIXMAP_32_H
15 15
16 16
17/* used by vmalloc.c, vsyscall.lds.S. 17/* used by vmalloc.c, vsyscall.lds.S.
@@ -120,4 +120,4 @@ extern void reserve_top_address(unsigned long reserve);
120#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) 120#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
121 121
122#endif /* !__ASSEMBLY__ */ 122#endif /* !__ASSEMBLY__ */
123#endif 123#endif /* ASM_X86__FIXMAP_32_H */
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h
index 00f3d74a0524..dafb24bc0424 100644
--- a/include/asm-x86/fixmap_64.h
+++ b/include/asm-x86/fixmap_64.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 1998 Ingo Molnar 8 * Copyright (C) 1998 Ingo Molnar
9 */ 9 */
10 10
11#ifndef _ASM_FIXMAP_64_H 11#ifndef ASM_X86__FIXMAP_64_H
12#define _ASM_FIXMAP_64_H 12#define ASM_X86__FIXMAP_64_H
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <asm/acpi.h> 15#include <asm/acpi.h>
@@ -80,4 +80,4 @@ enum fixed_addresses {
80#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL) 80#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
81#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) 81#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
82 82
83#endif 83#endif /* ASM_X86__FIXMAP_64_H */
diff --git a/include/asm-x86/floppy.h b/include/asm-x86/floppy.h
index dbe82a5c5eac..7d83a3a83e37 100644
--- a/include/asm-x86/floppy.h
+++ b/include/asm-x86/floppy.h
@@ -7,8 +7,8 @@
7 * 7 *
8 * Copyright (C) 1995 8 * Copyright (C) 1995
9 */ 9 */
10#ifndef _ASM_X86_FLOPPY_H 10#ifndef ASM_X86__FLOPPY_H
11#define _ASM_X86_FLOPPY_H 11#define ASM_X86__FLOPPY_H
12 12
13#include <linux/vmalloc.h> 13#include <linux/vmalloc.h>
14 14
@@ -278,4 +278,4 @@ static int FDC2 = -1;
278 278
279#define EXTRA_FLOPPY_PARAMS 279#define EXTRA_FLOPPY_PARAMS
280 280
281#endif /* _ASM_X86_FLOPPY_H */ 281#endif /* ASM_X86__FLOPPY_H */
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h
index 5c68b32ee1c8..be0e004ad148 100644
--- a/include/asm-x86/ftrace.h
+++ b/include/asm-x86/ftrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FTRACE 1#ifndef ASM_X86__FTRACE_H
2#define _ASM_X86_FTRACE 2#define ASM_X86__FTRACE_H
3 3
4#ifdef CONFIG_FTRACE 4#ifdef CONFIG_FTRACE
5#define MCOUNT_ADDR ((long)(mcount)) 5#define MCOUNT_ADDR ((long)(mcount))
@@ -11,4 +11,4 @@ extern void mcount(void);
11 11
12#endif /* CONFIG_FTRACE */ 12#endif /* CONFIG_FTRACE */
13 13
14#endif /* _ASM_X86_FTRACE */ 14#endif /* ASM_X86__FTRACE_H */
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index e7a76b37b333..06b924ef6fa5 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_FUTEX_H 1#ifndef ASM_X86__FUTEX_H
2#define _ASM_X86_FUTEX_H 2#define ASM_X86__FUTEX_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -25,7 +25,7 @@
25 asm volatile("1:\tmovl %2, %0\n" \ 25 asm volatile("1:\tmovl %2, %0\n" \
26 "\tmovl\t%0, %3\n" \ 26 "\tmovl\t%0, %3\n" \
27 "\t" insn "\n" \ 27 "\t" insn "\n" \
28 "2:\tlock; cmpxchgl %3, %2\n" \ 28 "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
29 "\tjnz\t1b\n" \ 29 "\tjnz\t1b\n" \
30 "3:\t.section .fixup,\"ax\"\n" \ 30 "3:\t.section .fixup,\"ax\"\n" \
31 "4:\tmov\t%5, %1\n" \ 31 "4:\tmov\t%5, %1\n" \
@@ -64,7 +64,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
64 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 64 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
65 break; 65 break;
66 case FUTEX_OP_ADD: 66 case FUTEX_OP_ADD:
67 __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval, 67 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
68 uaddr, oparg); 68 uaddr, oparg);
69 break; 69 break;
70 case FUTEX_OP_OR: 70 case FUTEX_OP_OR:
@@ -122,7 +122,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
123 return -EFAULT; 123 return -EFAULT;
124 124
125 asm volatile("1:\tlock; cmpxchgl %3, %1\n" 125 asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
126 "2:\t.section .fixup, \"ax\"\n" 126 "2:\t.section .fixup, \"ax\"\n"
127 "3:\tmov %2, %0\n" 127 "3:\tmov %2, %0\n"
128 "\tjmp 2b\n" 128 "\tjmp 2b\n"
@@ -137,4 +137,4 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
137} 137}
138 138
139#endif 139#endif
140#endif 140#endif /* ASM_X86__FUTEX_H */
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 3f62a83887f3..baa54faba892 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_GART_H 1#ifndef ASM_X86__GART_H
2#define _ASM_X8664_GART_H 1 2#define ASM_X86__GART_H
3 3
4#include <asm/e820.h> 4#include <asm/e820.h>
5 5
@@ -52,15 +52,15 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
52 return 0; 52 return 0;
53 53
54 if (aper_base + aper_size > 0x100000000ULL) { 54 if (aper_base + aper_size > 0x100000000ULL) {
55 printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n"); 55 printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
56 return 0; 56 return 0;
57 } 57 }
58 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { 58 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
59 printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n"); 59 printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
60 return 0; 60 return 0;
61 } 61 }
62 if (aper_size < min_size) { 62 if (aper_size < min_size) {
63 printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n", 63 printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
64 aper_size>>20, min_size>>20); 64 aper_size>>20, min_size>>20);
65 return 0; 65 return 0;
66 } 66 }
@@ -68,4 +68,4 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
68 return 1; 68 return 1;
69} 69}
70 70
71#endif 71#endif /* ASM_X86__GART_H */
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h
index 754d635f90ff..34280f027664 100644
--- a/include/asm-x86/genapic_32.h
+++ b/include/asm-x86/genapic_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_GENAPIC_H 1#ifndef ASM_X86__GENAPIC_32_H
2#define _ASM_GENAPIC_H 1 2#define ASM_X86__GENAPIC_32_H
3 3
4#include <asm/mpspec.h> 4#include <asm/mpspec.h>
5 5
@@ -121,4 +121,4 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
121#define uv_system_init() do {} while (0) 121#define uv_system_init() do {} while (0)
122 122
123 123
124#endif 124#endif /* ASM_X86__GENAPIC_32_H */
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h
index a47d63129135..25097a8cc5ef 100644
--- a/include/asm-x86/genapic_64.h
+++ b/include/asm-x86/genapic_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_GENAPIC_H 1#ifndef ASM_X86__GENAPIC_64_H
2#define _ASM_GENAPIC_H 1 2#define ASM_X86__GENAPIC_64_H
3 3
4/* 4/*
5 * Copyright 2004 James Cleverdon, IBM. 5 * Copyright 2004 James Cleverdon, IBM.
@@ -47,4 +47,4 @@ extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
47 47
48extern void setup_apic_routing(void); 48extern void setup_apic_routing(void);
49 49
50#endif 50#endif /* ASM_X86__GENAPIC_64_H */
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index 2c1cda0b8a86..3f3444be2638 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -7,8 +7,8 @@
7 * as published by the Free Software Foundation. 7 * as published by the Free Software Foundation.
8 */ 8 */
9 9
10#ifndef _ASM_GEODE_H_ 10#ifndef ASM_X86__GEODE_H
11#define _ASM_GEODE_H_ 11#define ASM_X86__GEODE_H
12 12
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <linux/io.h> 14#include <linux/io.h>
@@ -250,4 +250,4 @@ extern int __init mfgpt_timer_setup(void);
250static inline int mfgpt_timer_setup(void) { return 0; } 250static inline int mfgpt_timer_setup(void) { return 0; }
251#endif 251#endif
252 252
253#endif 253#endif /* ASM_X86__GEODE_H */
diff --git a/include/asm-x86/gpio.h b/include/asm-x86/gpio.h
index c4c91b37c104..497fb980d962 100644
--- a/include/asm-x86/gpio.h
+++ b/include/asm-x86/gpio.h
@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
53 53
54#endif /* CONFIG_GPIOLIB */ 54#endif /* CONFIG_GPIOLIB */
55 55
56#endif /* _ASM_I386_GPIO_H */ 56#endif /* ASM_X86__GPIO_H */
diff --git a/include/asm-x86/hardirq_32.h b/include/asm-x86/hardirq_32.h
index 4f85f0f4b563..700fe230d919 100644
--- a/include/asm-x86/hardirq_32.h
+++ b/include/asm-x86/hardirq_32.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_HARDIRQ_H 1#ifndef ASM_X86__HARDIRQ_32_H
2#define __ASM_HARDIRQ_H 2#define ASM_X86__HARDIRQ_32_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/irq.h> 5#include <linux/irq.h>
@@ -25,4 +25,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
25void ack_bad_irq(unsigned int irq); 25void ack_bad_irq(unsigned int irq);
26#include <linux/irq_cpustat.h> 26#include <linux/irq_cpustat.h>
27 27
28#endif /* __ASM_HARDIRQ_H */ 28#endif /* ASM_X86__HARDIRQ_32_H */
diff --git a/include/asm-x86/hardirq_64.h b/include/asm-x86/hardirq_64.h
index 95d5e090ed89..f8bd2919a8ce 100644
--- a/include/asm-x86/hardirq_64.h
+++ b/include/asm-x86/hardirq_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_HARDIRQ_H 1#ifndef ASM_X86__HARDIRQ_64_H
2#define __ASM_HARDIRQ_H 2#define ASM_X86__HARDIRQ_64_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/irq.h> 5#include <linux/irq.h>
@@ -20,4 +20,4 @@
20 20
21extern void ack_bad_irq(unsigned int irq); 21extern void ack_bad_irq(unsigned int irq);
22 22
23#endif /* __ASM_HARDIRQ_H */ 23#endif /* ASM_X86__HARDIRQ_64_H */
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 4514b16cc723..bc3f6a280316 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -15,8 +15,8 @@
15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 15 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
16 */ 16 */
17 17
18#ifndef _ASM_HIGHMEM_H 18#ifndef ASM_X86__HIGHMEM_H
19#define _ASM_HIGHMEM_H 19#define ASM_X86__HIGHMEM_H
20 20
21#ifdef __KERNEL__ 21#ifdef __KERNEL__
22 22
@@ -79,4 +79,4 @@ extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
79 79
80#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
81 81
82#endif /* _ASM_HIGHMEM_H */ 82#endif /* ASM_X86__HIGHMEM_H */
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h
index 82f1ac641bd7..cbbbb6d4dd32 100644
--- a/include/asm-x86/hpet.h
+++ b/include/asm-x86/hpet.h
@@ -1,5 +1,5 @@
1#ifndef ASM_X86_HPET_H 1#ifndef ASM_X86__HPET_H
2#define ASM_X86_HPET_H 2#define ASM_X86__HPET_H
3 3
4#ifdef CONFIG_HPET_TIMER 4#ifdef CONFIG_HPET_TIMER
5 5
@@ -90,4 +90,4 @@ static inline int is_hpet_enabled(void) { return 0; }
90#define hpet_readl(a) 0 90#define hpet_readl(a) 0
91 91
92#endif 92#endif
93#endif /* ASM_X86_HPET_H */ 93#endif /* ASM_X86__HPET_H */
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
index 439a9acc132d..0b7ec5dc0884 100644
--- a/include/asm-x86/hugetlb.h
+++ b/include/asm-x86/hugetlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_HUGETLB_H 1#ifndef ASM_X86__HUGETLB_H
2#define _ASM_X86_HUGETLB_H 2#define ASM_X86__HUGETLB_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
@@ -90,4 +90,4 @@ static inline void arch_release_hugepage(struct page *page)
90{ 90{
91} 91}
92 92
93#endif /* _ASM_X86_HUGETLB_H */ 93#endif /* ASM_X86__HUGETLB_H */
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index edd0b95f14d0..65997b15d56a 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_HW_IRQ_H 1#ifndef ASM_X86__HW_IRQ_H
2#define _ASM_HW_IRQ_H 2#define ASM_X86__HW_IRQ_H
3 3
4/* 4/*
5 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar 5 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
@@ -93,6 +93,26 @@ extern asmlinkage void qic_reschedule_interrupt(void);
93extern asmlinkage void qic_enable_irq_interrupt(void); 93extern asmlinkage void qic_enable_irq_interrupt(void);
94extern asmlinkage void qic_call_function_interrupt(void); 94extern asmlinkage void qic_call_function_interrupt(void);
95 95
96/* SMP */
97extern void smp_apic_timer_interrupt(struct pt_regs *);
98#ifdef CONFIG_X86_32
99extern void smp_spurious_interrupt(struct pt_regs *);
100extern void smp_error_interrupt(struct pt_regs *);
101#else
102extern asmlinkage void smp_spurious_interrupt(void);
103extern asmlinkage void smp_error_interrupt(void);
104#endif
105#ifdef CONFIG_X86_SMP
106extern void smp_reschedule_interrupt(struct pt_regs *);
107extern void smp_call_function_interrupt(struct pt_regs *);
108extern void smp_call_function_single_interrupt(struct pt_regs *);
109#ifdef CONFIG_X86_32
110extern void smp_invalidate_interrupt(struct pt_regs *);
111#else
112extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
113#endif
114#endif
115
96#ifdef CONFIG_X86_32 116#ifdef CONFIG_X86_32
97extern void (*const interrupt[NR_IRQS])(void); 117extern void (*const interrupt[NR_IRQS])(void);
98#else 118#else
@@ -112,4 +132,4 @@ static inline void __setup_vector_irq(int cpu) {}
112 132
113#endif /* !ASSEMBLY_ */ 133#endif /* !ASSEMBLY_ */
114 134
115#endif 135#endif /* ASM_X86__HW_IRQ_H */
diff --git a/include/asm-x86/hypertransport.h b/include/asm-x86/hypertransport.h
index d2bbd238b3e1..cc011a3bc1c2 100644
--- a/include/asm-x86/hypertransport.h
+++ b/include/asm-x86/hypertransport.h
@@ -1,5 +1,5 @@
1#ifndef ASM_HYPERTRANSPORT_H 1#ifndef ASM_X86__HYPERTRANSPORT_H
2#define ASM_HYPERTRANSPORT_H 2#define ASM_X86__HYPERTRANSPORT_H
3 3
4/* 4/*
5 * Constants for x86 Hypertransport Interrupts. 5 * Constants for x86 Hypertransport Interrupts.
@@ -42,4 +42,4 @@
42#define HT_IRQ_HIGH_DEST_ID(v) \ 42#define HT_IRQ_HIGH_DEST_ID(v) \
43 ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) 43 ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
44 44
45#endif /* ASM_HYPERTRANSPORT_H */ 45#endif /* ASM_X86__HYPERTRANSPORT_H */
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index 56d00e31aec0..1ecdc3ed96e4 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -7,8 +7,8 @@
7 * x86-64 work by Andi Kleen 2002 7 * x86-64 work by Andi Kleen 2002
8 */ 8 */
9 9
10#ifndef _ASM_X86_I387_H 10#ifndef ASM_X86__I387_H
11#define _ASM_X86_I387_H 11#define ASM_X86__I387_H
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
@@ -25,6 +25,7 @@ extern void mxcsr_feature_mask_init(void);
25extern int init_fpu(struct task_struct *child); 25extern int init_fpu(struct task_struct *child);
26extern asmlinkage void math_state_restore(void); 26extern asmlinkage void math_state_restore(void);
27extern void init_thread_xstate(void); 27extern void init_thread_xstate(void);
28extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
28 29
29extern user_regset_active_fn fpregs_active, xfpregs_active; 30extern user_regset_active_fn fpregs_active, xfpregs_active;
30extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; 31extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
@@ -336,4 +337,4 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
336 } 337 }
337} 338}
338 339
339#endif /* _ASM_X86_I387_H */ 340#endif /* ASM_X86__I387_H */
diff --git a/include/asm-x86/i8253.h b/include/asm-x86/i8253.h
index b51c0487fc41..15a5b530044e 100644
--- a/include/asm-x86/i8253.h
+++ b/include/asm-x86/i8253.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I8253_H__ 1#ifndef ASM_X86__I8253_H
2#define __ASM_I8253_H__ 2#define ASM_X86__I8253_H
3 3
4/* i8253A PIT registers */ 4/* i8253A PIT registers */
5#define PIT_MODE 0x43 5#define PIT_MODE 0x43
@@ -15,4 +15,4 @@ extern void setup_pit_timer(void);
15#define inb_pit inb_p 15#define inb_pit inb_p
16#define outb_pit outb_p 16#define outb_pit outb_p
17 17
18#endif /* __ASM_I8253_H__ */ 18#endif /* ASM_X86__I8253_H */
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h
index 2f98df91f1f2..c586559a6957 100644
--- a/include/asm-x86/i8259.h
+++ b/include/asm-x86/i8259.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I8259_H__ 1#ifndef ASM_X86__I8259_H
2#define __ASM_I8259_H__ 2#define ASM_X86__I8259_H
3 3
4#include <linux/delay.h> 4#include <linux/delay.h>
5 5
@@ -57,4 +57,4 @@ static inline void outb_pic(unsigned char value, unsigned int port)
57 57
58extern struct irq_chip i8259A_chip; 58extern struct irq_chip i8259A_chip;
59 59
60#endif /* __ASM_I8259_H__ */ 60#endif /* ASM_X86__I8259_H */
diff --git a/include/asm-x86/ia32.h b/include/asm-x86/ia32.h
index 55d3abe5276f..f932f7ad51dd 100644
--- a/include/asm-x86/ia32.h
+++ b/include/asm-x86/ia32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IA32_H 1#ifndef ASM_X86__IA32_H
2#define _ASM_X86_64_IA32_H 2#define ASM_X86__IA32_H
3 3
4 4
5#ifdef CONFIG_IA32_EMULATION 5#ifdef CONFIG_IA32_EMULATION
@@ -167,4 +167,4 @@ extern void ia32_pick_mmap_layout(struct mm_struct *mm);
167 167
168#endif /* !CONFIG_IA32_SUPPORT */ 168#endif /* !CONFIG_IA32_SUPPORT */
169 169
170#endif 170#endif /* ASM_X86__IA32_H */
diff --git a/include/asm-x86/ia32_unistd.h b/include/asm-x86/ia32_unistd.h
index 61cea9e7c5c1..dbd887d8a5a5 100644
--- a/include/asm-x86/ia32_unistd.h
+++ b/include/asm-x86/ia32_unistd.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IA32_UNISTD_H_ 1#ifndef ASM_X86__IA32_UNISTD_H
2#define _ASM_X86_64_IA32_UNISTD_H_ 2#define ASM_X86__IA32_UNISTD_H
3 3
4/* 4/*
5 * This file contains the system call numbers of the ia32 port, 5 * This file contains the system call numbers of the ia32 port,
@@ -15,4 +15,4 @@
15#define __NR_ia32_sigreturn 119 15#define __NR_ia32_sigreturn 119
16#define __NR_ia32_rt_sigreturn 173 16#define __NR_ia32_rt_sigreturn 173
17 17
18#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ 18#endif /* ASM_X86__IA32_UNISTD_H */
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h
index cbb649123612..baa3f783d27d 100644
--- a/include/asm-x86/idle.h
+++ b/include/asm-x86/idle.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_IDLE_H 1#ifndef ASM_X86__IDLE_H
2#define _ASM_X86_64_IDLE_H 1 2#define ASM_X86__IDLE_H
3 3
4#define IDLE_START 1 4#define IDLE_START 1
5#define IDLE_END 2 5#define IDLE_END 2
@@ -12,4 +12,4 @@ void exit_idle(void);
12 12
13void c1e_remove_cpu(int cpu); 13void c1e_remove_cpu(int cpu);
14 14
15#endif 15#endif /* ASM_X86__IDLE_H */
diff --git a/include/asm-x86/intel_arch_perfmon.h b/include/asm-x86/intel_arch_perfmon.h
index fa0fd068bc2e..07c03c6c9a16 100644
--- a/include/asm-x86/intel_arch_perfmon.h
+++ b/include/asm-x86/intel_arch_perfmon.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H 1#ifndef ASM_X86__INTEL_ARCH_PERFMON_H
2#define _ASM_X86_INTEL_ARCH_PERFMON_H 2#define ASM_X86__INTEL_ARCH_PERFMON_H
3 3
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
@@ -28,4 +28,4 @@ union cpuid10_eax {
28 unsigned int full; 28 unsigned int full;
29}; 29};
30 30
31#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ 31#endif /* ASM_X86__INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h
index 0f954dc89cb3..72b7719523bf 100644
--- a/include/asm-x86/io.h
+++ b/include/asm-x86/io.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IO_H 1#ifndef ASM_X86__IO_H
2#define _ASM_X86_IO_H 2#define ASM_X86__IO_H
3 3
4#define ARCH_HAS_IOREMAP_WC 4#define ARCH_HAS_IOREMAP_WC
5 5
@@ -73,6 +73,8 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
73#define writeq writeq 73#define writeq writeq
74#endif 74#endif
75 75
76extern int iommu_bio_merge;
77
76#ifdef CONFIG_X86_32 78#ifdef CONFIG_X86_32
77# include "io_32.h" 79# include "io_32.h"
78#else 80#else
@@ -99,4 +101,4 @@ extern void early_iounmap(void *addr, unsigned long size);
99extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 101extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
100 102
101 103
102#endif /* _ASM_X86_IO_H */ 104#endif /* ASM_X86__IO_H */
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index e876d89ac156..4f7d878bda18 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IO_H 1#ifndef ASM_X86__IO_32_H
2#define _ASM_IO_H 2#define ASM_X86__IO_32_H
3 3
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -281,4 +281,4 @@ BUILDIO(b, b, char)
281BUILDIO(w, w, short) 281BUILDIO(w, w, short)
282BUILDIO(l, , int) 282BUILDIO(l, , int)
283 283
284#endif 284#endif /* ASM_X86__IO_32_H */
diff --git a/include/asm-x86/io_64.h b/include/asm-x86/io_64.h
index 22995c5c5adc..64429e9431a8 100644
--- a/include/asm-x86/io_64.h
+++ b/include/asm-x86/io_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IO_H 1#ifndef ASM_X86__IO_64_H
2#define _ASM_IO_H 2#define ASM_X86__IO_64_H
3 3
4 4
5/* 5/*
@@ -235,7 +235,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
235 235
236#define flush_write_buffers() 236#define flush_write_buffers()
237 237
238extern int iommu_bio_merge;
239#define BIO_VMERGE_BOUNDARY iommu_bio_merge 238#define BIO_VMERGE_BOUNDARY iommu_bio_merge
240 239
241/* 240/*
@@ -245,4 +244,4 @@ extern int iommu_bio_merge;
245 244
246#endif /* __KERNEL__ */ 245#endif /* __KERNEL__ */
247 246
248#endif 247#endif /* ASM_X86__IO_64_H */
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 14f82bbcb5fd..be62847ab07e 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_IO_APIC_H 1#ifndef ASM_X86__IO_APIC_H
2#define __ASM_IO_APIC_H 2#define ASM_X86__IO_APIC_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/mpspec.h> 5#include <asm/mpspec.h>
@@ -189,4 +189,4 @@ static const int timer_through_8259 = 0;
189static inline void ioapic_init_mappings(void) { } 189static inline void ioapic_init_mappings(void) { }
190#endif 190#endif
191 191
192#endif 192#endif /* ASM_X86__IO_APIC_H */
diff --git a/include/asm-x86/ioctls.h b/include/asm-x86/ioctls.h
index c0c338bd4068..336603512399 100644
--- a/include/asm-x86/ioctls.h
+++ b/include/asm-x86/ioctls.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IOCTLS_H 1#ifndef ASM_X86__IOCTLS_H
2#define _ASM_X86_IOCTLS_H 2#define ASM_X86__IOCTLS_H
3 3
4#include <asm/ioctl.h> 4#include <asm/ioctl.h>
5 5
@@ -85,4 +85,4 @@
85 85
86#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ 86#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
87 87
88#endif 88#endif /* ASM_X86__IOCTLS_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 5f888cc5be49..e86f44148c66 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_IOMMU_H 1#ifndef ASM_X86__IOMMU_H
2#define _ASM_X8664_IOMMU_H 1 2#define ASM_X86__IOMMU_H
3 3
4extern void pci_iommu_shutdown(void); 4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void); 5extern void no_iommu_init(void);
@@ -42,4 +42,4 @@ static inline void gart_iommu_hole_init(void)
42} 42}
43#endif 43#endif
44 44
45#endif 45#endif /* ASM_X86__IOMMU_H */
diff --git a/include/asm-x86/ipcbuf.h b/include/asm-x86/ipcbuf.h
index ee678fd51594..910304fbdc8f 100644
--- a/include/asm-x86/ipcbuf.h
+++ b/include/asm-x86/ipcbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_IPCBUF_H 1#ifndef ASM_X86__IPCBUF_H
2#define _ASM_X86_IPCBUF_H 2#define ASM_X86__IPCBUF_H
3 3
4/* 4/*
5 * The ipc64_perm structure for x86 architecture. 5 * The ipc64_perm structure for x86 architecture.
@@ -25,4 +25,4 @@ struct ipc64_perm {
25 unsigned long __unused2; 25 unsigned long __unused2;
26}; 26};
27 27
28#endif /* _ASM_X86_IPCBUF_H */ 28#endif /* ASM_X86__IPCBUF_H */
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index bb1c09f7a76c..c1b226797518 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_IPI_H 1#ifndef ASM_X86__IPI_H
2#define __ASM_IPI_H 2#define ASM_X86__IPI_H
3 3
4/* 4/*
5 * Copyright 2004 James Cleverdon, IBM. 5 * Copyright 2004 James Cleverdon, IBM.
@@ -129,4 +129,4 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
129 local_irq_restore(flags); 129 local_irq_restore(flags);
130} 130}
131 131
132#endif /* __ASM_IPI_H */ 132#endif /* ASM_X86__IPI_H */
diff --git a/include/asm-x86/irq.h b/include/asm-x86/irq.h
index 1a2925757317..1e5f2909c1db 100644
--- a/include/asm-x86/irq.h
+++ b/include/asm-x86/irq.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_H 1#ifndef ASM_X86__IRQ_H
2#define _ASM_IRQ_H 2#define ASM_X86__IRQ_H
3/* 3/*
4 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar 4 * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
5 * 5 *
@@ -47,4 +47,4 @@ extern void native_init_IRQ(void);
47/* Interrupt vector management */ 47/* Interrupt vector management */
48extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 48extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
49 49
50#endif /* _ASM_IRQ_H */ 50#endif /* ASM_X86__IRQ_H */
diff --git a/include/asm-x86/irq_regs_32.h b/include/asm-x86/irq_regs_32.h
index 3368b20c0b48..316a3b258871 100644
--- a/include/asm-x86/irq_regs_32.h
+++ b/include/asm-x86/irq_regs_32.h
@@ -4,8 +4,8 @@
4 * 4 *
5 * Jeremy Fitzhardinge <jeremy@goop.org> 5 * Jeremy Fitzhardinge <jeremy@goop.org>
6 */ 6 */
7#ifndef _ASM_I386_IRQ_REGS_H 7#ifndef ASM_X86__IRQ_REGS_32_H
8#define _ASM_I386_IRQ_REGS_H 8#define ASM_X86__IRQ_REGS_32_H
9 9
10#include <asm/percpu.h> 10#include <asm/percpu.h>
11 11
@@ -26,4 +26,4 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
26 return old_regs; 26 return old_regs;
27} 27}
28 28
29#endif /* _ASM_I386_IRQ_REGS_H */ 29#endif /* ASM_X86__IRQ_REGS_32_H */
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index a48c7f2dbdc0..c5d2d767a1f3 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_VECTORS_H 1#ifndef ASM_X86__IRQ_VECTORS_H
2#define _ASM_IRQ_VECTORS_H 2#define ASM_X86__IRQ_VECTORS_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5 5
@@ -179,4 +179,4 @@
179#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) 179#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
180 180
181 181
182#endif /* _ASM_IRQ_VECTORS_H */ 182#endif /* ASM_X86__IRQ_VECTORS_H */
diff --git a/include/asm-x86/ist.h b/include/asm-x86/ist.h
index 6ec6ceed95a7..35a2fe9bc921 100644
--- a/include/asm-x86/ist.h
+++ b/include/asm-x86/ist.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IST_H 1#ifndef ASM_X86__IST_H
2#define _ASM_IST_H 2#define ASM_X86__IST_H
3 3
4/* 4/*
5 * Include file for the interface to IST BIOS 5 * Include file for the interface to IST BIOS
@@ -31,4 +31,4 @@ struct ist_info {
31extern struct ist_info ist_info; 31extern struct ist_info ist_info;
32 32
33#endif /* __KERNEL__ */ 33#endif /* __KERNEL__ */
34#endif /* _ASM_IST_H */ 34#endif /* ASM_X86__IST_H */
diff --git a/include/asm-x86/k8.h b/include/asm-x86/k8.h
index 452e2b696ff4..2bbaf4370a55 100644
--- a/include/asm-x86/k8.h
+++ b/include/asm-x86/k8.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_K8_H 1#ifndef ASM_X86__K8_H
2#define _ASM_K8_H 1 2#define ASM_X86__K8_H
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
@@ -12,4 +12,4 @@ extern int cache_k8_northbridges(void);
12extern void k8_flush_garts(void); 12extern void k8_flush_garts(void);
13extern int k8_scan_nodes(unsigned long start, unsigned long end); 13extern int k8_scan_nodes(unsigned long start, unsigned long end);
14 14
15#endif 15#endif /* ASM_X86__K8_H */
diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h
index 96651bb59ba1..5ec3ad3e825c 100644
--- a/include/asm-x86/kdebug.h
+++ b/include/asm-x86/kdebug.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KDEBUG_H 1#ifndef ASM_X86__KDEBUG_H
2#define _ASM_X86_KDEBUG_H 2#define ASM_X86__KDEBUG_H
3 3
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5 5
@@ -35,4 +35,4 @@ extern void show_regs(struct pt_regs *regs);
35extern unsigned long oops_begin(void); 35extern unsigned long oops_begin(void);
36extern void oops_end(unsigned long, struct pt_regs *, int signr); 36extern void oops_end(unsigned long, struct pt_regs *, int signr);
37 37
38#endif 38#endif /* ASM_X86__KDEBUG_H */
diff --git a/include/asm-x86/kexec.h b/include/asm-x86/kexec.h
index 4246ab7dc988..ea09600d6129 100644
--- a/include/asm-x86/kexec.h
+++ b/include/asm-x86/kexec.h
@@ -1,5 +1,5 @@
1#ifndef _KEXEC_H 1#ifndef ASM_X86__KEXEC_H
2#define _KEXEC_H 2#define ASM_X86__KEXEC_H
3 3
4#ifdef CONFIG_X86_32 4#ifdef CONFIG_X86_32
5# define PA_CONTROL_PAGE 0 5# define PA_CONTROL_PAGE 0
@@ -172,4 +172,4 @@ relocate_kernel(unsigned long indirection_page,
172 172
173#endif /* __ASSEMBLY__ */ 173#endif /* __ASSEMBLY__ */
174 174
175#endif /* _KEXEC_H */ 175#endif /* ASM_X86__KEXEC_H */
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
index 94d63db10365..d283863354de 100644
--- a/include/asm-x86/kgdb.h
+++ b/include/asm-x86/kgdb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_KGDB_H_ 1#ifndef ASM_X86__KGDB_H
2#define _ASM_KGDB_H_ 2#define ASM_X86__KGDB_H
3 3
4/* 4/*
5 * Copyright (C) 2001-2004 Amit S. Kale 5 * Copyright (C) 2001-2004 Amit S. Kale
@@ -76,4 +76,4 @@ static inline void arch_kgdb_breakpoint(void)
76#define BREAK_INSTR_SIZE 1 76#define BREAK_INSTR_SIZE 1
77#define CACHE_FLUSH_IS_SAFE 1 77#define CACHE_FLUSH_IS_SAFE 1
78 78
79#endif /* _ASM_KGDB_H_ */ 79#endif /* ASM_X86__KGDB_H */
diff --git a/include/asm-x86/kmap_types.h b/include/asm-x86/kmap_types.h
index 5f4174132a22..89f44493e643 100644
--- a/include/asm-x86/kmap_types.h
+++ b/include/asm-x86/kmap_types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_KMAP_TYPES_H 1#ifndef ASM_X86__KMAP_TYPES_H
2#define _ASM_X86_KMAP_TYPES_H 2#define ASM_X86__KMAP_TYPES_H
3 3
4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) 4#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
5# define D(n) __KM_FENCE_##n , 5# define D(n) __KM_FENCE_##n ,
@@ -26,4 +26,4 @@ D(13) KM_TYPE_NR
26 26
27#undef D 27#undef D
28 28
29#endif 29#endif /* ASM_X86__KMAP_TYPES_H */
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
index 54980b0b3892..bd8407863c13 100644
--- a/include/asm-x86/kprobes.h
+++ b/include/asm-x86/kprobes.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_KPROBES_H 1#ifndef ASM_X86__KPROBES_H
2#define _ASM_KPROBES_H 2#define ASM_X86__KPROBES_H
3/* 3/*
4 * Kernel Probes (KProbes) 4 * Kernel Probes (KProbes)
5 * 5 *
@@ -94,4 +94,4 @@ static inline void restore_interrupts(struct pt_regs *regs)
94extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 94extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
95extern int kprobe_exceptions_notify(struct notifier_block *self, 95extern int kprobe_exceptions_notify(struct notifier_block *self,
96 unsigned long val, void *data); 96 unsigned long val, void *data);
97#endif /* _ASM_KPROBES_H */ 97#endif /* ASM_X86__KPROBES_H */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 6f1840812e59..78e954db1e7f 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -1,5 +1,5 @@
1#ifndef __LINUX_KVM_X86_H 1#ifndef ASM_X86__KVM_H
2#define __LINUX_KVM_X86_H 2#define ASM_X86__KVM_H
3 3
4/* 4/*
5 * KVM x86 specific structures and definitions 5 * KVM x86 specific structures and definitions
@@ -230,4 +230,4 @@ struct kvm_pit_state {
230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) 230#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
231#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) 231#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
232 232
233#endif 233#endif /* ASM_X86__KVM_H */
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index c2e34c275900..69794547f514 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -1,4 +1,4 @@
1#/* 1/*
2 * Kernel-based Virtual Machine driver for Linux 2 * Kernel-based Virtual Machine driver for Linux
3 * 3 *
4 * This header defines architecture specific interfaces, x86 version 4 * This header defines architecture specific interfaces, x86 version
@@ -8,8 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#ifndef ASM_KVM_HOST_H 11#ifndef ASM_X86__KVM_HOST_H
12#define ASM_KVM_HOST_H 12#define ASM_X86__KVM_HOST_H
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
@@ -735,4 +735,4 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
735int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 735int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
736int kvm_age_hva(struct kvm *kvm, unsigned long hva); 736int kvm_age_hva(struct kvm *kvm, unsigned long hva);
737 737
738#endif 738#endif /* ASM_X86__KVM_HOST_H */
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index 76f392146daa..30054fded4fb 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -1,5 +1,5 @@
1#ifndef __X86_KVM_PARA_H 1#ifndef ASM_X86__KVM_PARA_H
2#define __X86_KVM_PARA_H 2#define ASM_X86__KVM_PARA_H
3 3
4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
5 * should be used to determine that a VM is running under KVM. 5 * should be used to determine that a VM is running under KVM.
@@ -144,4 +144,4 @@ static inline unsigned int kvm_arch_para_features(void)
144 144
145#endif 145#endif
146 146
147#endif 147#endif /* ASM_X86__KVM_PARA_H */
diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h
index 4e8c1e48d91d..e2d9b030c1ac 100644
--- a/include/asm-x86/kvm_x86_emulate.h
+++ b/include/asm-x86/kvm_x86_emulate.h
@@ -8,8 +8,8 @@
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */ 9 */
10 10
11#ifndef __X86_EMULATE_H__ 11#ifndef ASM_X86__KVM_X86_EMULATE_H
12#define __X86_EMULATE_H__ 12#define ASM_X86__KVM_X86_EMULATE_H
13 13
14struct x86_emulate_ctxt; 14struct x86_emulate_ctxt;
15 15
@@ -181,4 +181,4 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
181int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, 181int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
182 struct x86_emulate_ops *ops); 182 struct x86_emulate_ops *ops);
183 183
184#endif /* __X86_EMULATE_H__ */ 184#endif /* ASM_X86__KVM_X86_EMULATE_H */
diff --git a/include/asm-x86/ldt.h b/include/asm-x86/ldt.h
index 20c597242b53..a5228504d867 100644
--- a/include/asm-x86/ldt.h
+++ b/include/asm-x86/ldt.h
@@ -3,8 +3,8 @@
3 * 3 *
4 * Definitions of structures used with the modify_ldt system call. 4 * Definitions of structures used with the modify_ldt system call.
5 */ 5 */
6#ifndef _ASM_X86_LDT_H 6#ifndef ASM_X86__LDT_H
7#define _ASM_X86_LDT_H 7#define ASM_X86__LDT_H
8 8
9/* Maximum number of LDT entries supported. */ 9/* Maximum number of LDT entries supported. */
10#define LDT_ENTRIES 8192 10#define LDT_ENTRIES 8192
@@ -37,4 +37,4 @@ struct user_desc {
37#define MODIFY_LDT_CONTENTS_CODE 2 37#define MODIFY_LDT_CONTENTS_CODE 2
38 38
39#endif /* !__ASSEMBLY__ */ 39#endif /* !__ASSEMBLY__ */
40#endif 40#endif /* ASM_X86__LDT_H */
diff --git a/include/asm-x86/lguest.h b/include/asm-x86/lguest.h
index be4a7247fa2b..7505e947ed27 100644
--- a/include/asm-x86/lguest.h
+++ b/include/asm-x86/lguest.h
@@ -1,5 +1,5 @@
1#ifndef _X86_LGUEST_H 1#ifndef ASM_X86__LGUEST_H
2#define _X86_LGUEST_H 2#define ASM_X86__LGUEST_H
3 3
4#define GDT_ENTRY_LGUEST_CS 10 4#define GDT_ENTRY_LGUEST_CS 10
5#define GDT_ENTRY_LGUEST_DS 11 5#define GDT_ENTRY_LGUEST_DS 11
@@ -91,4 +91,4 @@ static inline void lguest_set_ts(void)
91 91
92#endif /* __ASSEMBLY__ */ 92#endif /* __ASSEMBLY__ */
93 93
94#endif 94#endif /* ASM_X86__LGUEST_H */
diff --git a/include/asm-x86/lguest_hcall.h b/include/asm-x86/lguest_hcall.h
index a3241f28e34a..8f034ba4b53e 100644
--- a/include/asm-x86/lguest_hcall.h
+++ b/include/asm-x86/lguest_hcall.h
@@ -1,6 +1,6 @@
1/* Architecture specific portion of the lguest hypercalls */ 1/* Architecture specific portion of the lguest hypercalls */
2#ifndef _X86_LGUEST_HCALL_H 2#ifndef ASM_X86__LGUEST_HCALL_H
3#define _X86_LGUEST_HCALL_H 3#define ASM_X86__LGUEST_HCALL_H
4 4
5#define LHCALL_FLUSH_ASYNC 0 5#define LHCALL_FLUSH_ASYNC 0
6#define LHCALL_LGUEST_INIT 1 6#define LHCALL_LGUEST_INIT 1
@@ -68,4 +68,4 @@ struct hcall_args {
68}; 68};
69 69
70#endif /* !__ASSEMBLY__ */ 70#endif /* !__ASSEMBLY__ */
71#endif /* _I386_LGUEST_HCALL_H */ 71#endif /* ASM_X86__LGUEST_HCALL_H */
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index 64e444f8e85b..42d8b62ee8ab 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_LINKAGE_H 1#ifndef ASM_X86__LINKAGE_H
2#define __ASM_LINKAGE_H 2#define ASM_X86__LINKAGE_H
3 3
4#undef notrace 4#undef notrace
5#define notrace __attribute__((no_instrument_function)) 5#define notrace __attribute__((no_instrument_function))
@@ -57,5 +57,5 @@
57#define __ALIGN_STR ".align 16,0x90" 57#define __ALIGN_STR ".align 16,0x90"
58#endif 58#endif
59 59
60#endif 60#endif /* ASM_X86__LINKAGE_H */
61 61
diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
index 330a72496abd..ae91994fd6c9 100644
--- a/include/asm-x86/local.h
+++ b/include/asm-x86/local.h
@@ -1,5 +1,5 @@
1#ifndef _ARCH_LOCAL_H 1#ifndef ASM_X86__LOCAL_H
2#define _ARCH_LOCAL_H 2#define ASM_X86__LOCAL_H
3 3
4#include <linux/percpu.h> 4#include <linux/percpu.h>
5 5
@@ -232,4 +232,4 @@ static inline long local_sub_return(long i, local_t *l)
232#define __cpu_local_add(i, l) cpu_local_add((i), (l)) 232#define __cpu_local_add(i, l) cpu_local_add((i), (l))
233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) 233#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
234 234
235#endif /* _ARCH_LOCAL_H */ 235#endif /* ASM_X86__LOCAL_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/mach-bigsmp/mach_apic.h
index c3b9dc6970c9..05362d44a3ee 100644
--- a/include/asm-x86/mach-bigsmp/mach_apic.h
+++ b/include/asm-x86/mach-bigsmp/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_BIGSMP__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_BIGSMP__MACH_APIC_H
3 3
4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) 4#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
5#define esr_disable (1) 5#define esr_disable (1)
@@ -141,4 +141,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
141 return cpuid_apic >> index_msb; 141 return cpuid_apic >> index_msb;
142} 142}
143 143
144#endif /* __ASM_MACH_APIC_H */ 144#endif /* ASM_X86__MACH_BIGSMP__MACH_APIC_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_apicdef.h b/include/asm-x86/mach-bigsmp/mach_apicdef.h
index a58ab5a75c8c..811935d9d49b 100644
--- a/include/asm-x86/mach-bigsmp/mach_apicdef.h
+++ b/include/asm-x86/mach-bigsmp/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_BIGSMP__MACH_APICDEF_H
3 3
4#define APIC_ID_MASK (0xFF<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
@@ -10,4 +10,4 @@ static inline unsigned get_apic_id(unsigned long x)
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
12 12
13#endif 13#endif /* ASM_X86__MACH_BIGSMP__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-bigsmp/mach_ipi.h b/include/asm-x86/mach-bigsmp/mach_ipi.h
index 9404c535b7ec..b1b0f966a009 100644
--- a/include/asm-x86/mach-bigsmp/mach_ipi.h
+++ b/include/asm-x86/mach-bigsmp/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_BIGSMP__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_BIGSMP__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* ASM_X86__MACH_BIGSMP__MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/apm.h b/include/asm-x86/mach-default/apm.h
index 989f34c37d32..2aa61b54fbd5 100644
--- a/include/asm-x86/mach-default/apm.h
+++ b/include/asm-x86/mach-default/apm.h
@@ -3,8 +3,8 @@
3 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> 3 * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5 5
6#ifndef _ASM_APM_H 6#ifndef ASM_X86__MACH_DEFAULT__APM_H
7#define _ASM_APM_H 7#define ASM_X86__MACH_DEFAULT__APM_H
8 8
9#ifdef APM_ZERO_SEGS 9#ifdef APM_ZERO_SEGS
10# define APM_DO_ZERO_SEGS \ 10# define APM_DO_ZERO_SEGS \
@@ -70,4 +70,4 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
70 return error; 70 return error;
71} 71}
72 72
73#endif /* _ASM_APM_H */ 73#endif /* ASM_X86__MACH_DEFAULT__APM_H */
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h
index f3226b9a6b82..b615f40736be 100644
--- a/include/asm-x86/mach-default/mach_apic.h
+++ b/include/asm-x86/mach-default/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_DEFAULT__MACH_APIC_H
3 3
4#ifdef CONFIG_X86_LOCAL_APIC 4#ifdef CONFIG_X86_LOCAL_APIC
5 5
@@ -138,4 +138,4 @@ static inline void enable_apic_mode(void)
138} 138}
139 139
140#endif /* CONFIG_X86_LOCAL_APIC */ 140#endif /* CONFIG_X86_LOCAL_APIC */
141#endif /* __ASM_MACH_APIC_H */ 141#endif /* ASM_X86__MACH_DEFAULT__MACH_APIC_H */
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h
index e4b29ba37de6..936704f816d6 100644
--- a/include/asm-x86/mach-default/mach_apicdef.h
+++ b/include/asm-x86/mach-default/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_DEFAULT__MACH_APICDEF_H
3 3
4#include <asm/apic.h> 4#include <asm/apic.h>
5 5
@@ -21,4 +21,4 @@ static inline unsigned get_apic_id(unsigned long x)
21#define GET_APIC_ID(x) get_apic_id(x) 21#define GET_APIC_ID(x) get_apic_id(x)
22#endif 22#endif
23 23
24#endif 24#endif /* ASM_X86__MACH_DEFAULT__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-default/mach_ipi.h b/include/asm-x86/mach-default/mach_ipi.h
index be323364e68f..674bc7e50c35 100644
--- a/include/asm-x86/mach-default/mach_ipi.h
+++ b/include/asm-x86/mach-default/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_DEFAULT__MACH_IPI_H
3 3
4/* Avoid include hell */ 4/* Avoid include hell */
5#define NMI_VECTOR 0x02 5#define NMI_VECTOR 0x02
@@ -61,4 +61,4 @@ static inline void send_IPI_all(int vector)
61} 61}
62#endif 62#endif
63 63
64#endif /* __ASM_MACH_IPI_H */ 64#endif /* ASM_X86__MACH_DEFAULT__MACH_IPI_H */
diff --git a/include/asm-x86/mach-default/mach_mpparse.h b/include/asm-x86/mach-default/mach_mpparse.h
index d14108505bb8..9c381f2815ac 100644
--- a/include/asm-x86/mach-default/mach_mpparse.h
+++ b/include/asm-x86/mach-default/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H
3 3
4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, 4static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid) 5 char *productid)
@@ -14,4 +14,4 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
14} 14}
15 15
16 16
17#endif /* __ASM_MACH_MPPARSE_H */ 17#endif /* ASM_X86__MACH_DEFAULT__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-default/mach_mpspec.h b/include/asm-x86/mach-default/mach_mpspec.h
index 51c9a9775932..d77646f011f1 100644
--- a/include/asm-x86/mach-default/mach_mpspec.h
+++ b/include/asm-x86/mach-default/mach_mpspec.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPSPEC_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H 2#define ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H
3 3
4#define MAX_IRQ_SOURCES 256 4#define MAX_IRQ_SOURCES 256
5 5
@@ -9,4 +9,4 @@
9#define MAX_MP_BUSSES 32 9#define MAX_MP_BUSSES 32
10#endif 10#endif
11 11
12#endif /* __ASM_MACH_MPSPEC_H */ 12#endif /* ASM_X86__MACH_DEFAULT__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-default/mach_timer.h b/include/asm-x86/mach-default/mach_timer.h
index 4b76e536cd98..990b15833834 100644
--- a/include/asm-x86/mach-default/mach_timer.h
+++ b/include/asm-x86/mach-default/mach_timer.h
@@ -10,8 +10,8 @@
10 * directly because of the awkward 8-bit access mechanism of the 82C54 10 * directly because of the awkward 8-bit access mechanism of the 82C54
11 * device. 11 * device.
12 */ 12 */
13#ifndef _MACH_TIMER_H 13#ifndef ASM_X86__MACH_DEFAULT__MACH_TIMER_H
14#define _MACH_TIMER_H 14#define ASM_X86__MACH_DEFAULT__MACH_TIMER_H
15 15
16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ 16#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
17#define CALIBRATE_LATCH \ 17#define CALIBRATE_LATCH \
@@ -45,4 +45,4 @@ static inline void mach_countup(unsigned long *count_p)
45 *count_p = count; 45 *count_p = count;
46} 46}
47 47
48#endif /* !_MACH_TIMER_H */ 48#endif /* ASM_X86__MACH_DEFAULT__MACH_TIMER_H */
diff --git a/include/asm-x86/mach-default/mach_traps.h b/include/asm-x86/mach-default/mach_traps.h
index 2fe7705c0484..de9ac3f5c4ce 100644
--- a/include/asm-x86/mach-default/mach_traps.h
+++ b/include/asm-x86/mach-default/mach_traps.h
@@ -2,8 +2,8 @@
2 * Machine specific NMI handling for generic. 2 * Machine specific NMI handling for generic.
3 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> 3 * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
4 */ 4 */
5#ifndef _MACH_TRAPS_H 5#ifndef ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
6#define _MACH_TRAPS_H 6#define ASM_X86__MACH_DEFAULT__MACH_TRAPS_H
7 7
8#include <asm/mc146818rtc.h> 8#include <asm/mc146818rtc.h>
9 9
@@ -36,4 +36,4 @@ static inline void reassert_nmi(void)
36 unlock_cmos(); 36 unlock_cmos();
37} 37}
38 38
39#endif /* !_MACH_TRAPS_H */ 39#endif /* ASM_X86__MACH_DEFAULT__MACH_TRAPS_H */
diff --git a/include/asm-x86/mach-default/mach_wakecpu.h b/include/asm-x86/mach-default/mach_wakecpu.h
index 3ebb17893aa5..361b810f5160 100644
--- a/include/asm-x86/mach-default/mach_wakecpu.h
+++ b/include/asm-x86/mach-default/mach_wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H
3 3
4/* 4/*
5 * This file copes with machines that wakeup secondary CPUs by the 5 * This file copes with machines that wakeup secondary CPUs by the
@@ -39,4 +39,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
39 #define inquire_remote_apic(apicid) {} 39 #define inquire_remote_apic(apicid) {}
40#endif 40#endif
41 41
42#endif /* __ASM_MACH_WAKECPU_H */ 42#endif /* ASM_X86__MACH_DEFAULT__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/mach-es7000/mach_apic.h
index 0a3fdf930672..c1f6f682d619 100644
--- a/include/asm-x86/mach-es7000/mach_apic.h
+++ b/include/asm-x86/mach-es7000/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_ES7000__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_ES7000__MACH_APIC_H
3 3
4#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) 4#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
5#define esr_disable (1) 5#define esr_disable (1)
@@ -191,4 +191,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
191 return cpuid_apic >> index_msb; 191 return cpuid_apic >> index_msb;
192} 192}
193 193
194#endif /* __ASM_MACH_APIC_H */ 194#endif /* ASM_X86__MACH_ES7000__MACH_APIC_H */
diff --git a/include/asm-x86/mach-es7000/mach_apicdef.h b/include/asm-x86/mach-es7000/mach_apicdef.h
index a58ab5a75c8c..a07e56744028 100644
--- a/include/asm-x86/mach-es7000/mach_apicdef.h
+++ b/include/asm-x86/mach-es7000/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_ES7000__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_ES7000__MACH_APICDEF_H
3 3
4#define APIC_ID_MASK (0xFF<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
@@ -10,4 +10,4 @@ static inline unsigned get_apic_id(unsigned long x)
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
12 12
13#endif 13#endif /* ASM_X86__MACH_ES7000__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-es7000/mach_ipi.h b/include/asm-x86/mach-es7000/mach_ipi.h
index 5e61bd220b06..3a21240e03dc 100644
--- a/include/asm-x86/mach-es7000/mach_ipi.h
+++ b/include/asm-x86/mach-es7000/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_ES7000__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_ES7000__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -21,4 +21,4 @@ static inline void send_IPI_all(int vector)
21 send_IPI_mask(cpu_online_map, vector); 21 send_IPI_mask(cpu_online_map, vector);
22} 22}
23 23
24#endif /* __ASM_MACH_IPI_H */ 24#endif /* ASM_X86__MACH_ES7000__MACH_IPI_H */
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/mach-es7000/mach_mpparse.h
index ef26d3523625..befde24705b7 100644
--- a/include/asm-x86/mach-es7000/mach_mpparse.h
+++ b/include/asm-x86/mach-es7000/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_ES7000__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_ES7000__MACH_MPPARSE_H
3 3
4#include <linux/acpi.h> 4#include <linux/acpi.h>
5 5
@@ -26,4 +26,4 @@ static inline int es7000_check_dsdt(void)
26} 26}
27#endif 27#endif
28 28
29#endif /* __ASM_MACH_MPPARSE_H */ 29#endif /* ASM_X86__MACH_ES7000__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-es7000/mach_wakecpu.h b/include/asm-x86/mach-es7000/mach_wakecpu.h
index 84ff58314501..97c776ce13f2 100644
--- a/include/asm-x86/mach-es7000/mach_wakecpu.h
+++ b/include/asm-x86/mach-es7000/mach_wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef ASM_X86__MACH_ES7000__MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define ASM_X86__MACH_ES7000__MACH_WAKECPU_H
3 3
4/* 4/*
5 * This file copes with machines that wakeup secondary CPUs by the 5 * This file copes with machines that wakeup secondary CPUs by the
@@ -56,4 +56,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
56 #define inquire_remote_apic(apicid) {} 56 #define inquire_remote_apic(apicid) {}
57#endif 57#endif
58 58
59#endif /* __ASM_MACH_WAKECPU_H */ 59#endif /* ASM_X86__MACH_ES7000__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-generic/gpio.h b/include/asm-x86/mach-generic/gpio.h
index 5305dcb96df2..6ce0f7786ef8 100644
--- a/include/asm-x86/mach-generic/gpio.h
+++ b/include/asm-x86/mach-generic/gpio.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_GENERIC_GPIO_H 1#ifndef ASM_X86__MACH_GENERIC__GPIO_H
2#define __ASM_MACH_GENERIC_GPIO_H 2#define ASM_X86__MACH_GENERIC__GPIO_H
3 3
4int gpio_request(unsigned gpio, const char *label); 4int gpio_request(unsigned gpio, const char *label);
5void gpio_free(unsigned gpio); 5void gpio_free(unsigned gpio);
@@ -12,4 +12,4 @@ int irq_to_gpio(unsigned irq);
12 12
13#include <asm-generic/gpio.h> /* cansleep wrappers */ 13#include <asm-generic/gpio.h> /* cansleep wrappers */
14 14
15#endif /* __ASM_MACH_GENERIC_GPIO_H */ 15#endif /* ASM_X86__MACH_GENERIC__GPIO_H */
diff --git a/include/asm-x86/mach-generic/irq_vectors_limits.h b/include/asm-x86/mach-generic/irq_vectors_limits.h
index 890ce3f5e09a..f7870e1a220d 100644
--- a/include/asm-x86/mach-generic/irq_vectors_limits.h
+++ b/include/asm-x86/mach-generic/irq_vectors_limits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H 1#ifndef ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H 2#define ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H
3 3
4/* 4/*
5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, 5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
@@ -11,4 +11,4 @@
11#define NR_IRQS 224 11#define NR_IRQS 224
12#define NR_IRQ_VECTORS 1024 12#define NR_IRQ_VECTORS 1024
13 13
14#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ 14#endif /* ASM_X86__MACH_GENERIC__IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-generic/mach_apic.h b/include/asm-x86/mach-generic/mach_apic.h
index 6eff343e1233..5d010c6881dd 100644
--- a/include/asm-x86/mach-generic/mach_apic.h
+++ b/include/asm-x86/mach-generic/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_GENERIC__MACH_APIC_H
3 3
4#include <asm/genapic.h> 4#include <asm/genapic.h>
5 5
@@ -29,4 +29,4 @@
29 29
30extern void generic_bigsmp_probe(void); 30extern void generic_bigsmp_probe(void);
31 31
32#endif /* __ASM_MACH_APIC_H */ 32#endif /* ASM_X86__MACH_GENERIC__MACH_APIC_H */
diff --git a/include/asm-x86/mach-generic/mach_apicdef.h b/include/asm-x86/mach-generic/mach_apicdef.h
index 28ed98972ca8..1657f38b8f27 100644
--- a/include/asm-x86/mach-generic/mach_apicdef.h
+++ b/include/asm-x86/mach-generic/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef _GENAPIC_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_APICDEF_H
2#define _GENAPIC_MACH_APICDEF_H 1 2#define ASM_X86__MACH_GENERIC__MACH_APICDEF_H
3 3
4#ifndef APIC_DEFINITION 4#ifndef APIC_DEFINITION
5#include <asm/genapic.h> 5#include <asm/genapic.h>
@@ -8,4 +8,4 @@
8#define APIC_ID_MASK (genapic->apic_id_mask) 8#define APIC_ID_MASK (genapic->apic_id_mask)
9#endif 9#endif
10 10
11#endif 11#endif /* ASM_X86__MACH_GENERIC__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-generic/mach_ipi.h b/include/asm-x86/mach-generic/mach_ipi.h
index 441b0fe3ed1d..f67433dbd65f 100644
--- a/include/asm-x86/mach-generic/mach_ipi.h
+++ b/include/asm-x86/mach-generic/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_IPI_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_IPI_H
2#define _MACH_IPI_H 1 2#define ASM_X86__MACH_GENERIC__MACH_IPI_H
3 3
4#include <asm/genapic.h> 4#include <asm/genapic.h>
5 5
@@ -7,4 +7,4 @@
7#define send_IPI_allbutself (genapic->send_IPI_allbutself) 7#define send_IPI_allbutself (genapic->send_IPI_allbutself)
8#define send_IPI_all (genapic->send_IPI_all) 8#define send_IPI_all (genapic->send_IPI_all)
9 9
10#endif 10#endif /* ASM_X86__MACH_GENERIC__MACH_IPI_H */
diff --git a/include/asm-x86/mach-generic/mach_mpparse.h b/include/asm-x86/mach-generic/mach_mpparse.h
index 586cadbf3787..3115564e557c 100644
--- a/include/asm-x86/mach-generic/mach_mpparse.h
+++ b/include/asm-x86/mach-generic/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef _MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
2#define _MACH_MPPARSE_H 1 2#define ASM_X86__MACH_GENERIC__MACH_MPPARSE_H
3 3
4 4
5extern int mps_oem_check(struct mp_config_table *mpc, char *oem, 5extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
@@ -7,4 +7,4 @@ extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
7 7
8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); 8extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
9 9
10#endif 10#endif /* ASM_X86__MACH_GENERIC__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-generic/mach_mpspec.h b/include/asm-x86/mach-generic/mach_mpspec.h
index c83c120be538..6061b153613e 100644
--- a/include/asm-x86/mach-generic/mach_mpspec.h
+++ b/include/asm-x86/mach-generic/mach_mpspec.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPSPEC_H 1#ifndef ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
2#define __ASM_MACH_MPSPEC_H 2#define ASM_X86__MACH_GENERIC__MACH_MPSPEC_H
3 3
4#define MAX_IRQ_SOURCES 256 4#define MAX_IRQ_SOURCES 256
5 5
@@ -9,4 +9,4 @@
9 9
10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 10extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
11 char *productid); 11 char *productid);
12#endif /* __ASM_MACH_MPSPEC_H */ 12#endif /* ASM_X86__MACH_GENERIC__MACH_MPSPEC_H */
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h
index d802465e026a..7a0d39edfcfa 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/mach-numaq/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_NUMAQ__MACH_APIC_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <linux/mmzone.h> 5#include <linux/mmzone.h>
@@ -135,4 +135,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
135 return cpuid_apic >> index_msb; 135 return cpuid_apic >> index_msb;
136} 136}
137 137
138#endif /* __ASM_MACH_APIC_H */ 138#endif /* ASM_X86__MACH_NUMAQ__MACH_APIC_H */
diff --git a/include/asm-x86/mach-numaq/mach_apicdef.h b/include/asm-x86/mach-numaq/mach_apicdef.h
index bf439d0690f5..f870ec5f7782 100644
--- a/include/asm-x86/mach-numaq/mach_apicdef.h
+++ b/include/asm-x86/mach-numaq/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_NUMAQ__MACH_APICDEF_H
3 3
4 4
5#define APIC_ID_MASK (0xF<<24) 5#define APIC_ID_MASK (0xF<<24)
@@ -11,4 +11,4 @@ static inline unsigned get_apic_id(unsigned long x)
11 11
12#define GET_APIC_ID(x) get_apic_id(x) 12#define GET_APIC_ID(x) get_apic_id(x)
13 13
14#endif 14#endif /* ASM_X86__MACH_NUMAQ__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-numaq/mach_ipi.h b/include/asm-x86/mach-numaq/mach_ipi.h
index c6044488e9e6..1e835823f4bc 100644
--- a/include/asm-x86/mach-numaq/mach_ipi.h
+++ b/include/asm-x86/mach-numaq/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_NUMAQ__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t, int vector); 4void send_IPI_mask_sequence(cpumask_t, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* ASM_X86__MACH_NUMAQ__MACH_IPI_H */
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h
index 626aef6b155f..74ade184920b 100644
--- a/include/asm-x86/mach-numaq/mach_mpparse.h
+++ b/include/asm-x86/mach-numaq/mach_mpparse.h
@@ -1,7 +1,7 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H
3 3
4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, 4extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
5 char *productid); 5 char *productid);
6 6
7#endif /* __ASM_MACH_MPPARSE_H */ 7#endif /* ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H */
diff --git a/include/asm-x86/mach-numaq/mach_wakecpu.h b/include/asm-x86/mach-numaq/mach_wakecpu.h
index 00530041a991..0db8cea643c0 100644
--- a/include/asm-x86/mach-numaq/mach_wakecpu.h
+++ b/include/asm-x86/mach-numaq/mach_wakecpu.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_WAKECPU_H 1#ifndef ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H
2#define __ASM_MACH_WAKECPU_H 2#define ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H
3 3
4/* This file copes with machines that wakeup secondary CPUs by NMIs */ 4/* This file copes with machines that wakeup secondary CPUs by NMIs */
5 5
@@ -40,4 +40,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
40 40
41#define inquire_remote_apic(apicid) {} 41#define inquire_remote_apic(apicid) {}
42 42
43#endif /* __ASM_MACH_WAKECPU_H */ 43#endif /* ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H */
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h
index acce0b7d397b..94b6cdf532e2 100644
--- a/include/asm-x86/mach-rdc321x/gpio.h
+++ b/include/asm-x86/mach-rdc321x/gpio.h
@@ -1,5 +1,7 @@
1#ifndef _RDC321X_GPIO_H 1#ifndef ASM_X86__MACH_RDC321X__GPIO_H
2#define _RDC321X_GPIO_H 2#define ASM_X86__MACH_RDC321X__GPIO_H
3
4#include <linux/kernel.h>
3 5
4extern int rdc_gpio_get_value(unsigned gpio); 6extern int rdc_gpio_get_value(unsigned gpio);
5extern void rdc_gpio_set_value(unsigned gpio, int value); 7extern void rdc_gpio_set_value(unsigned gpio, int value);
@@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label)
18 20
19static inline void gpio_free(unsigned gpio) 21static inline void gpio_free(unsigned gpio)
20{ 22{
23 might_sleep();
21 rdc_gpio_free(gpio); 24 rdc_gpio_free(gpio);
22} 25}
23 26
@@ -54,4 +57,4 @@ static inline int irq_to_gpio(unsigned irq)
54/* For cansleep */ 57/* For cansleep */
55#include <asm-generic/gpio.h> 58#include <asm-generic/gpio.h>
56 59
57#endif /* _RDC321X_GPIO_H_ */ 60#endif /* ASM_X86__MACH_RDC321X__GPIO_H */
diff --git a/include/asm-x86/mach-summit/irq_vectors_limits.h b/include/asm-x86/mach-summit/irq_vectors_limits.h
index 890ce3f5e09a..22f376ad68e1 100644
--- a/include/asm-x86/mach-summit/irq_vectors_limits.h
+++ b/include/asm-x86/mach-summit/irq_vectors_limits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H 1#ifndef ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H 2#define ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H
3 3
4/* 4/*
5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, 5 * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs,
@@ -11,4 +11,4 @@
11#define NR_IRQS 224 11#define NR_IRQS 224
12#define NR_IRQ_VECTORS 1024 12#define NR_IRQ_VECTORS 1024
13 13
14#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ 14#endif /* ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/mach-summit/mach_apic.h
index c47e2ab5c5ca..7a66758d701d 100644
--- a/include/asm-x86/mach-summit/mach_apic.h
+++ b/include/asm-x86/mach-summit/mach_apic.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define ASM_X86__MACH_SUMMIT__MACH_APIC_H
3 3
4#include <asm/smp.h> 4#include <asm/smp.h>
5 5
@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
182 return hard_smp_processor_id() >> index_msb; 182 return hard_smp_processor_id() >> index_msb;
183} 183}
184 184
185#endif /* __ASM_MACH_APIC_H */ 185#endif /* ASM_X86__MACH_SUMMIT__MACH_APIC_H */
diff --git a/include/asm-x86/mach-summit/mach_apicdef.h b/include/asm-x86/mach-summit/mach_apicdef.h
index a58ab5a75c8c..d4bc8590c4f6 100644
--- a/include/asm-x86/mach-summit/mach_apicdef.h
+++ b/include/asm-x86/mach-summit/mach_apicdef.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define ASM_X86__MACH_SUMMIT__MACH_APICDEF_H
3 3
4#define APIC_ID_MASK (0xFF<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
@@ -10,4 +10,4 @@ static inline unsigned get_apic_id(unsigned long x)
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
12 12
13#endif 13#endif /* ASM_X86__MACH_SUMMIT__MACH_APICDEF_H */
diff --git a/include/asm-x86/mach-summit/mach_ipi.h b/include/asm-x86/mach-summit/mach_ipi.h
index 9404c535b7ec..a3b31c528d90 100644
--- a/include/asm-x86/mach-summit/mach_ipi.h
+++ b/include/asm-x86/mach-summit/mach_ipi.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_IPI_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_IPI_H
2#define __ASM_MACH_IPI_H 2#define ASM_X86__MACH_SUMMIT__MACH_IPI_H
3 3
4void send_IPI_mask_sequence(cpumask_t mask, int vector); 4void send_IPI_mask_sequence(cpumask_t mask, int vector);
5 5
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector)
22 send_IPI_mask(cpu_online_map, vector); 22 send_IPI_mask(cpu_online_map, vector);
23} 23}
24 24
25#endif /* __ASM_MACH_IPI_H */ 25#endif /* ASM_X86__MACH_SUMMIT__MACH_IPI_H */
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/mach-summit/mach_mpparse.h
index fdf591701339..92396f28772b 100644
--- a/include/asm-x86/mach-summit/mach_mpparse.h
+++ b/include/asm-x86/mach-summit/mach_mpparse.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MACH_MPPARSE_H 1#ifndef ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H
2#define __ASM_MACH_MPPARSE_H 2#define ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H
3 3
4#include <mach_apic.h> 4#include <mach_apic.h>
5#include <asm/tsc.h> 5#include <asm/tsc.h>
@@ -107,4 +107,4 @@ static inline int is_WPEG(struct rio_detail *rio){
107 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); 107 rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
108} 108}
109 109
110#endif /* __ASM_MACH_MPPARSE_H */ 110#endif /* ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H */
diff --git a/include/asm-x86/math_emu.h b/include/asm-x86/math_emu.h
index 9bf4ae93ab10..5768d8e95c8c 100644
--- a/include/asm-x86/math_emu.h
+++ b/include/asm-x86/math_emu.h
@@ -1,5 +1,5 @@
1#ifndef _I386_MATH_EMU_H 1#ifndef ASM_X86__MATH_EMU_H
2#define _I386_MATH_EMU_H 2#define ASM_X86__MATH_EMU_H
3 3
4/* This structure matches the layout of the data saved to the stack 4/* This structure matches the layout of the data saved to the stack
5 following a device-not-present interrupt, part of it saved 5 following a device-not-present interrupt, part of it saved
@@ -28,4 +28,4 @@ struct info {
28 long ___vm86_fs; 28 long ___vm86_fs;
29 long ___vm86_gs; 29 long ___vm86_gs;
30}; 30};
31#endif 31#endif /* ASM_X86__MATH_EMU_H */
diff --git a/include/asm-x86/mc146818rtc.h b/include/asm-x86/mc146818rtc.h
index daf1ccde77af..a995f33176cd 100644
--- a/include/asm-x86/mc146818rtc.h
+++ b/include/asm-x86/mc146818rtc.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Machine dependent access functions for RTC registers. 2 * Machine dependent access functions for RTC registers.
3 */ 3 */
4#ifndef _ASM_MC146818RTC_H 4#ifndef ASM_X86__MC146818RTC_H
5#define _ASM_MC146818RTC_H 5#define ASM_X86__MC146818RTC_H
6 6
7#include <asm/io.h> 7#include <asm/io.h>
8#include <asm/system.h> 8#include <asm/system.h>
@@ -101,4 +101,4 @@ extern unsigned long mach_get_cmos_time(void);
101 101
102#define RTC_IRQ 8 102#define RTC_IRQ 8
103 103
104#endif /* _ASM_MC146818RTC_H */ 104#endif /* ASM_X86__MC146818RTC_H */
diff --git a/include/asm-x86/mca.h b/include/asm-x86/mca.h
index 09adf2eac4dc..60d1ed287b13 100644
--- a/include/asm-x86/mca.h
+++ b/include/asm-x86/mca.h
@@ -1,8 +1,8 @@
1/* -*- mode: c; c-basic-offset: 8 -*- */ 1/* -*- mode: c; c-basic-offset: 8 -*- */
2 2
3/* Platform specific MCA defines */ 3/* Platform specific MCA defines */
4#ifndef _ASM_MCA_H 4#ifndef ASM_X86__MCA_H
5#define _ASM_MCA_H 5#define ASM_X86__MCA_H
6 6
7/* Maximal number of MCA slots - actually, some machines have less, but 7/* Maximal number of MCA slots - actually, some machines have less, but
8 * they all have sufficient number of POS registers to cover 8. 8 * they all have sufficient number of POS registers to cover 8.
@@ -40,4 +40,4 @@
40 */ 40 */
41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) 41#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3)
42 42
43#endif 43#endif /* ASM_X86__MCA_H */
diff --git a/include/asm-x86/mca_dma.h b/include/asm-x86/mca_dma.h
index c3dca6edc6b1..49f22be237d2 100644
--- a/include/asm-x86/mca_dma.h
+++ b/include/asm-x86/mca_dma.h
@@ -1,5 +1,5 @@
1#ifndef MCA_DMA_H 1#ifndef ASM_X86__MCA_DMA_H
2#define MCA_DMA_H 2#define ASM_X86__MCA_DMA_H
3 3
4#include <asm/io.h> 4#include <asm/io.h>
5#include <linux/ioport.h> 5#include <linux/ioport.h>
@@ -198,4 +198,4 @@ static inline void mca_set_dma_mode(unsigned int dmanr, unsigned int mode)
198 outb(mode, MCA_DMA_REG_EXE); 198 outb(mode, MCA_DMA_REG_EXE);
199} 199}
200 200
201#endif /* MCA_DMA_H */ 201#endif /* ASM_X86__MCA_DMA_H */
diff --git a/include/asm-x86/mce.h b/include/asm-x86/mce.h
index 531eaa587455..036133eaf744 100644
--- a/include/asm-x86/mce.h
+++ b/include/asm-x86/mce.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MCE_H 1#ifndef ASM_X86__MCE_H
2#define _ASM_X86_MCE_H 2#define ASM_X86__MCE_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5 5
@@ -127,4 +127,4 @@ extern void restart_mce(void);
127 127
128#endif /* __KERNEL__ */ 128#endif /* __KERNEL__ */
129 129
130#endif 130#endif /* ASM_X86__MCE_H */
diff --git a/include/asm-x86/mman.h b/include/asm-x86/mman.h
index 90bc4108a4fd..4ef28e6de383 100644
--- a/include/asm-x86/mman.h
+++ b/include/asm-x86/mman.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MMAN_H 1#ifndef ASM_X86__MMAN_H
2#define _ASM_X86_MMAN_H 2#define ASM_X86__MMAN_H
3 3
4#include <asm-generic/mman.h> 4#include <asm-generic/mman.h>
5 5
@@ -17,4 +17,4 @@
17#define MCL_CURRENT 1 /* lock all current mappings */ 17#define MCL_CURRENT 1 /* lock all current mappings */
18#define MCL_FUTURE 2 /* lock all future mappings */ 18#define MCL_FUTURE 2 /* lock all future mappings */
19 19
20#endif /* _ASM_X86_MMAN_H */ 20#endif /* ASM_X86__MMAN_H */
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
index e293ab81e850..fb79b1cf5d07 100644
--- a/include/asm-x86/mmconfig.h
+++ b/include/asm-x86/mmconfig.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MMCONFIG_H 1#ifndef ASM_X86__MMCONFIG_H
2#define _ASM_MMCONFIG_H 2#define ASM_X86__MMCONFIG_H
3 3
4#ifdef CONFIG_PCI_MMCONFIG 4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void); 5extern void __cpuinit fam10h_check_enable_mmcfg(void);
@@ -9,4 +9,4 @@ static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { } 9static inline void check_enable_amd_mmconf_dmi(void) { }
10#endif 10#endif
11 11
12#endif 12#endif /* ASM_X86__MMCONFIG_H */
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index 00e88679e11f..9d5aff14334a 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MMU_H 1#ifndef ASM_X86__MMU_H
2#define _ASM_X86_MMU_H 2#define ASM_X86__MMU_H
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <linux/mutex.h> 5#include <linux/mutex.h>
@@ -7,14 +7,9 @@
7/* 7/*
8 * The x86 doesn't have a mmu context, but 8 * The x86 doesn't have a mmu context, but
9 * we put the segment information here. 9 * we put the segment information here.
10 *
11 * cpu_vm_mask is used to optimize ldt flushing.
12 */ 10 */
13typedef struct { 11typedef struct {
14 void *ldt; 12 void *ldt;
15#ifdef CONFIG_X86_64
16 rwlock_t ldtlock;
17#endif
18 int size; 13 int size;
19 struct mutex lock; 14 struct mutex lock;
20 void *vdso; 15 void *vdso;
@@ -28,4 +23,4 @@ static inline void leave_mm(int cpu)
28} 23}
29#endif 24#endif
30 25
31#endif /* _ASM_X86_MMU_H */ 26#endif /* ASM_X86__MMU_H */
diff --git a/include/asm-x86/mmu_context.h b/include/asm-x86/mmu_context.h
index fac57014e7c6..8ec940bfd079 100644
--- a/include/asm-x86/mmu_context.h
+++ b/include/asm-x86/mmu_context.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_MMU_CONTEXT_H 1#ifndef ASM_X86__MMU_CONTEXT_H
2#define __ASM_X86_MMU_CONTEXT_H 2#define ASM_X86__MMU_CONTEXT_H
3 3
4#include <asm/desc.h> 4#include <asm/desc.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
@@ -34,4 +34,4 @@ do { \
34} while (0); 34} while (0);
35 35
36 36
37#endif /* __ASM_X86_MMU_CONTEXT_H */ 37#endif /* ASM_X86__MMU_CONTEXT_H */
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 824fc575c6d8..cce6f6e4afd6 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SCHED_H 1#ifndef ASM_X86__MMU_CONTEXT_32_H
2#define __I386_SCHED_H 2#define ASM_X86__MMU_CONTEXT_32_H
3 3
4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
5{ 5{
@@ -53,4 +53,4 @@ static inline void switch_mm(struct mm_struct *prev,
53#define deactivate_mm(tsk, mm) \ 53#define deactivate_mm(tsk, mm) \
54 asm("movl %0,%%gs": :"r" (0)); 54 asm("movl %0,%%gs": :"r" (0));
55 55
56#endif 56#endif /* ASM_X86__MMU_CONTEXT_32_H */
diff --git a/include/asm-x86/mmu_context_64.h b/include/asm-x86/mmu_context_64.h
index c7000634ccae..26758673c828 100644
--- a/include/asm-x86/mmu_context_64.h
+++ b/include/asm-x86/mmu_context_64.h
@@ -1,5 +1,5 @@
1#ifndef __X86_64_MMU_CONTEXT_H 1#ifndef ASM_X86__MMU_CONTEXT_64_H
2#define __X86_64_MMU_CONTEXT_H 2#define ASM_X86__MMU_CONTEXT_64_H
3 3
4#include <asm/pda.h> 4#include <asm/pda.h>
5 5
@@ -51,4 +51,4 @@ do { \
51 asm volatile("movl %0,%%fs"::"r"(0)); \ 51 asm volatile("movl %0,%%fs"::"r"(0)); \
52} while (0) 52} while (0)
53 53
54#endif 54#endif /* ASM_X86__MMU_CONTEXT_64_H */
diff --git a/include/asm-x86/mmx.h b/include/asm-x86/mmx.h
index 940881218ff8..2e7299bb3653 100644
--- a/include/asm-x86/mmx.h
+++ b/include/asm-x86/mmx.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MMX_H 1#ifndef ASM_X86__MMX_H
2#define _ASM_MMX_H 2#define ASM_X86__MMX_H
3 3
4/* 4/*
5 * MMX 3Dnow! helper operations 5 * MMX 3Dnow! helper operations
@@ -11,4 +11,4 @@ extern void *_mmx_memcpy(void *to, const void *from, size_t size);
11extern void mmx_clear_page(void *page); 11extern void mmx_clear_page(void *page);
12extern void mmx_copy_page(void *to, void *from); 12extern void mmx_copy_page(void *to, void *from);
13 13
14#endif 14#endif /* ASM_X86__MMX_H */
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index 5862e6460658..121b65d61d86 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -3,8 +3,8 @@
3 * 3 *
4 */ 4 */
5 5
6#ifndef _ASM_MMZONE_H_ 6#ifndef ASM_X86__MMZONE_32_H
7#define _ASM_MMZONE_H_ 7#define ASM_X86__MMZONE_32_H
8 8
9#include <asm/smp.h> 9#include <asm/smp.h>
10 10
@@ -131,4 +131,4 @@ static inline int pfn_valid(int pfn)
131}) 131})
132#endif /* CONFIG_NEED_MULTIPLE_NODES */ 132#endif /* CONFIG_NEED_MULTIPLE_NODES */
133 133
134#endif /* _ASM_MMZONE_H_ */ 134#endif /* ASM_X86__MMZONE_32_H */
diff --git a/include/asm-x86/mmzone_64.h b/include/asm-x86/mmzone_64.h
index 594bd0dc1d08..626b03a14875 100644
--- a/include/asm-x86/mmzone_64.h
+++ b/include/asm-x86/mmzone_64.h
@@ -1,8 +1,8 @@
1/* K8 NUMA support */ 1/* K8 NUMA support */
2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */ 2/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */ 3/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
4#ifndef _ASM_X86_64_MMZONE_H 4#ifndef ASM_X86__MMZONE_64_H
5#define _ASM_X86_64_MMZONE_H 1 5#define ASM_X86__MMZONE_64_H
6 6
7 7
8#ifdef CONFIG_NUMA 8#ifdef CONFIG_NUMA
@@ -49,4 +49,4 @@ extern int early_pfn_to_nid(unsigned long pfn);
49#endif 49#endif
50 50
51#endif 51#endif
52#endif 52#endif /* ASM_X86__MMZONE_64_H */
diff --git a/include/asm-x86/module.h b/include/asm-x86/module.h
index bfedb247871c..48dc3e0c07d9 100644
--- a/include/asm-x86/module.h
+++ b/include/asm-x86/module.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_MODULE_H 1#ifndef ASM_X86__MODULE_H
2#define _ASM_MODULE_H 2#define ASM_X86__MODULE_H
3 3
4/* x86_32/64 are simple */ 4/* x86_32/64 are simple */
5struct mod_arch_specific {}; 5struct mod_arch_specific {};
@@ -79,4 +79,4 @@ struct mod_arch_specific {};
79# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE 79# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
80#endif 80#endif
81 81
82#endif /* _ASM_MODULE_H */ 82#endif /* ASM_X86__MODULE_H */
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index b6995e567fcc..118da365e371 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -1,5 +1,5 @@
1#ifndef _AM_X86_MPSPEC_H 1#ifndef ASM_X86__MPSPEC_H
2#define _AM_X86_MPSPEC_H 2#define ASM_X86__MPSPEC_H
3 3
4#include <linux/init.h> 4#include <linux/init.h>
5 5
@@ -141,4 +141,4 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
141 141
142extern physid_mask_t phys_cpu_present_map; 142extern physid_mask_t phys_cpu_present_map;
143 143
144#endif 144#endif /* ASM_X86__MPSPEC_H */
diff --git a/include/asm-x86/mpspec_def.h b/include/asm-x86/mpspec_def.h
index 38d1e73b49e4..79166b048012 100644
--- a/include/asm-x86/mpspec_def.h
+++ b/include/asm-x86/mpspec_def.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MPSPEC_DEF_H 1#ifndef ASM_X86__MPSPEC_DEF_H
2#define __ASM_MPSPEC_DEF_H 2#define ASM_X86__MPSPEC_DEF_H
3 3
4/* 4/*
5 * Structure definitions for SMP machines following the 5 * Structure definitions for SMP machines following the
@@ -177,4 +177,4 @@ enum mp_bustype {
177 MP_BUS_PCI, 177 MP_BUS_PCI,
178 MP_BUS_MCA, 178 MP_BUS_MCA,
179}; 179};
180#endif 180#endif /* ASM_X86__MPSPEC_DEF_H */
diff --git a/include/asm-x86/msgbuf.h b/include/asm-x86/msgbuf.h
index 7e4e9481f51c..1b538c907a3d 100644
--- a/include/asm-x86/msgbuf.h
+++ b/include/asm-x86/msgbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_MSGBUF_H 1#ifndef ASM_X86__MSGBUF_H
2#define _ASM_X86_MSGBUF_H 2#define ASM_X86__MSGBUF_H
3 3
4/* 4/*
5 * The msqid64_ds structure for i386 architecture. 5 * The msqid64_ds structure for i386 architecture.
@@ -36,4 +36,4 @@ struct msqid64_ds {
36 unsigned long __unused5; 36 unsigned long __unused5;
37}; 37};
38 38
39#endif /* _ASM_X86_MSGBUF_H */ 39#endif /* ASM_X86__MSGBUF_H */
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h
index 296f29ce426d..3139666a94fa 100644
--- a/include/asm-x86/msidef.h
+++ b/include/asm-x86/msidef.h
@@ -1,5 +1,5 @@
1#ifndef ASM_MSIDEF_H 1#ifndef ASM_X86__MSIDEF_H
2#define ASM_MSIDEF_H 2#define ASM_X86__MSIDEF_H
3 3
4/* 4/*
5 * Constants for Intel APIC based MSI messages. 5 * Constants for Intel APIC based MSI messages.
@@ -48,4 +48,4 @@
48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ 48#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
49 MSI_ADDR_DEST_ID_MASK) 49 MSI_ADDR_DEST_ID_MASK)
50 50
51#endif /* ASM_MSIDEF_H */ 51#endif /* ASM_X86__MSIDEF_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index 44bce773012e..3052f058ab06 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_MSR_INDEX_H 1#ifndef ASM_X86__MSR_INDEX_H
2#define __ASM_MSR_INDEX_H 2#define ASM_X86__MSR_INDEX_H
3 3
4/* CPU model specific register (MSR) numbers */ 4/* CPU model specific register (MSR) numbers */
5 5
@@ -310,4 +310,4 @@
310/* Geode defined MSRs */ 310/* Geode defined MSRs */
311#define MSR_GEODE_BUSCONT_CONF0 0x00001900 311#define MSR_GEODE_BUSCONT_CONF0 0x00001900
312 312
313#endif /* __ASM_MSR_INDEX_H */ 313#endif /* ASM_X86__MSR_INDEX_H */
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 2362cfda1fbc..530af1f6389e 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_MSR_H_ 1#ifndef ASM_X86__MSR_H
2#define __ASM_X86_MSR_H_ 2#define ASM_X86__MSR_H
3 3
4#include <asm/msr-index.h> 4#include <asm/msr-index.h>
5 5
@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
63 return EAX_EDX_VAL(val, low, high); 63 return EAX_EDX_VAL(val, low, high);
64} 64}
65 65
66static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
67 int *err)
68{
69 DECLARE_ARGS(val, low, high);
70
71 asm volatile("2: rdmsr ; xor %0,%0\n"
72 "1:\n\t"
73 ".section .fixup,\"ax\"\n\t"
74 "3: mov %3,%0 ; jmp 1b\n\t"
75 ".previous\n\t"
76 _ASM_EXTABLE(2b, 3b)
77 : "=r" (*err), EAX_EDX_RET(val, low, high)
78 : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
79 return EAX_EDX_VAL(val, low, high);
80}
81
66static inline void native_write_msr(unsigned int msr, 82static inline void native_write_msr(unsigned int msr,
67 unsigned low, unsigned high) 83 unsigned low, unsigned high)
68{ 84{
@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
158 *p = native_read_msr_safe(msr, &err); 174 *p = native_read_msr_safe(msr, &err);
159 return err; 175 return err;
160} 176}
177static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
178{
179 int err;
180
181 *p = native_read_msr_amd_safe(msr, &err);
182 return err;
183}
161 184
162#define rdtscl(low) \ 185#define rdtscl(low) \
163 ((low) = (u32)native_read_tsc()) 186 ((low) = (u32)native_read_tsc())
@@ -221,4 +244,4 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
221#endif /* __KERNEL__ */ 244#endif /* __KERNEL__ */
222 245
223 246
224#endif 247#endif /* ASM_X86__MSR_H */
diff --git a/include/asm-x86/mtrr.h b/include/asm-x86/mtrr.h
index a69a01a51729..23a7f83da953 100644
--- a/include/asm-x86/mtrr.h
+++ b/include/asm-x86/mtrr.h
@@ -20,8 +20,8 @@
20 The postal address is: 20 The postal address is:
21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 21 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
22*/ 22*/
23#ifndef _ASM_X86_MTRR_H 23#ifndef ASM_X86__MTRR_H
24#define _ASM_X86_MTRR_H 24#define ASM_X86__MTRR_H
25 25
26#include <linux/ioctl.h> 26#include <linux/ioctl.h>
27#include <linux/errno.h> 27#include <linux/errno.h>
@@ -170,4 +170,4 @@ struct mtrr_gentry32 {
170 170
171#endif /* __KERNEL__ */ 171#endif /* __KERNEL__ */
172 172
173#endif /* _ASM_X86_MTRR_H */ 173#endif /* ASM_X86__MTRR_H */
diff --git a/include/asm-x86/mutex_32.h b/include/asm-x86/mutex_32.h
index 73e928ef5f03..25c16d8ba3c7 100644
--- a/include/asm-x86/mutex_32.h
+++ b/include/asm-x86/mutex_32.h
@@ -6,8 +6,8 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */ 8 */
9#ifndef _ASM_MUTEX_H 9#ifndef ASM_X86__MUTEX_32_H
10#define _ASM_MUTEX_H 10#define ASM_X86__MUTEX_32_H
11 11
12#include <asm/alternative.h> 12#include <asm/alternative.h>
13 13
@@ -122,4 +122,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
122#endif 122#endif
123} 123}
124 124
125#endif 125#endif /* ASM_X86__MUTEX_32_H */
diff --git a/include/asm-x86/mutex_64.h b/include/asm-x86/mutex_64.h
index f3fae9becb38..918ba21ab9d9 100644
--- a/include/asm-x86/mutex_64.h
+++ b/include/asm-x86/mutex_64.h
@@ -6,8 +6,8 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */ 8 */
9#ifndef _ASM_MUTEX_H 9#ifndef ASM_X86__MUTEX_64_H
10#define _ASM_MUTEX_H 10#define ASM_X86__MUTEX_64_H
11 11
12/** 12/**
13 * __mutex_fastpath_lock - decrement and call function if negative 13 * __mutex_fastpath_lock - decrement and call function if negative
@@ -97,4 +97,4 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
97 return 0; 97 return 0;
98} 98}
99 99
100#endif 100#endif /* ASM_X86__MUTEX_64_H */
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 21f8d0202a82..d5e715f024dc 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_NMI_H_ 1#ifndef ASM_X86__NMI_H
2#define _ASM_X86_NMI_H_ 2#define ASM_X86__NMI_H
3 3
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <asm/irq.h> 5#include <asm/irq.h>
@@ -34,6 +34,7 @@ extern void stop_apic_nmi_watchdog(void *);
34extern void disable_timer_nmi_watchdog(void); 34extern void disable_timer_nmi_watchdog(void);
35extern void enable_timer_nmi_watchdog(void); 35extern void enable_timer_nmi_watchdog(void);
36extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason); 36extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
37extern void cpu_nmi_set_wd_enabled(void);
37 38
38extern atomic_t nmi_active; 39extern atomic_t nmi_active;
39extern unsigned int nmi_watchdog; 40extern unsigned int nmi_watchdog;
@@ -81,4 +82,4 @@ void enable_lapic_nmi_watchdog(void);
81void stop_nmi(void); 82void stop_nmi(void);
82void restart_nmi(void); 83void restart_nmi(void);
83 84
84#endif 85#endif /* ASM_X86__NMI_H */
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index ad0bedd10b89..ae742721ae73 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_NOPS_H 1#ifndef ASM_X86__NOPS_H
2#define _ASM_NOPS_H 1 2#define ASM_X86__NOPS_H
3 3
4/* Define nops for use with alternative() */ 4/* Define nops for use with alternative() */
5 5
@@ -115,4 +115,4 @@
115 115
116#define ASM_NOP_MAX 8 116#define ASM_NOP_MAX 8
117 117
118#endif 118#endif /* ASM_X86__NOPS_H */
diff --git a/include/asm-x86/numa_32.h b/include/asm-x86/numa_32.h
index 220d7b7707a0..44cb07855c5b 100644
--- a/include/asm-x86/numa_32.h
+++ b/include/asm-x86/numa_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_32_NUMA_H 1#ifndef ASM_X86__NUMA_32_H
2#define _ASM_X86_32_NUMA_H 1 2#define ASM_X86__NUMA_32_H
3 3
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 5extern void numa_remove_cpu(int cpu);
@@ -8,4 +8,4 @@ extern void numa_remove_cpu(int cpu);
8extern void set_highmem_pages_init(void); 8extern void set_highmem_pages_init(void);
9#endif 9#endif
10 10
11#endif /* _ASM_X86_32_NUMA_H */ 11#endif /* ASM_X86__NUMA_32_H */
diff --git a/include/asm-x86/numa_64.h b/include/asm-x86/numa_64.h
index 3830094434a9..15c990395b02 100644
--- a/include/asm-x86/numa_64.h
+++ b/include/asm-x86/numa_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_NUMA_H 1#ifndef ASM_X86__NUMA_64_H
2#define _ASM_X8664_NUMA_H 1 2#define ASM_X86__NUMA_64_H
3 3
4#include <linux/nodemask.h> 4#include <linux/nodemask.h>
5#include <asm/apicdef.h> 5#include <asm/apicdef.h>
@@ -40,4 +40,4 @@ static inline void numa_add_cpu(int cpu, int node) { }
40static inline void numa_remove_cpu(int cpu) { } 40static inline void numa_remove_cpu(int cpu) { }
41#endif 41#endif
42 42
43#endif 43#endif /* ASM_X86__NUMA_64_H */
diff --git a/include/asm-x86/numaq.h b/include/asm-x86/numaq.h
index 34b92d581fa3..124bf7d4b70a 100644
--- a/include/asm-x86/numaq.h
+++ b/include/asm-x86/numaq.h
@@ -23,8 +23,8 @@
23 * Send feedback to <gone@us.ibm.com> 23 * Send feedback to <gone@us.ibm.com>
24 */ 24 */
25 25
26#ifndef NUMAQ_H 26#ifndef ASM_X86__NUMAQ_H
27#define NUMAQ_H 27#define ASM_X86__NUMAQ_H
28 28
29#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
30 30
@@ -165,5 +165,5 @@ static inline int get_memcfg_numaq(void)
165 return 0; 165 return 0;
166} 166}
167#endif /* CONFIG_X86_NUMAQ */ 167#endif /* CONFIG_X86_NUMAQ */
168#endif /* NUMAQ_H */ 168#endif /* ASM_X86__NUMAQ_H */
169 169
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h
index 97d47133486f..d7328b1a05c1 100644
--- a/include/asm-x86/olpc.h
+++ b/include/asm-x86/olpc.h
@@ -1,7 +1,7 @@
1/* OLPC machine specific definitions */ 1/* OLPC machine specific definitions */
2 2
3#ifndef ASM_OLPC_H_ 3#ifndef ASM_X86__OLPC_H
4#define ASM_OLPC_H_ 4#define ASM_X86__OLPC_H
5 5
6#include <asm/geode.h> 6#include <asm/geode.h>
7 7
@@ -129,4 +129,4 @@ extern int olpc_ec_mask_unset(uint8_t bits);
129#define OLPC_GPIO_LID geode_gpio(26) 129#define OLPC_GPIO_LID geode_gpio(26)
130#define OLPC_GPIO_ECSCI geode_gpio(27) 130#define OLPC_GPIO_ECSCI geode_gpio(27)
131 131
132#endif 132#endif /* ASM_X86__OLPC_H */
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index 49982110e4d9..79544e6ffb8b 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PAGE_H 1#ifndef ASM_X86__PAGE_H
2#define _ASM_X86_PAGE_H 2#define ASM_X86__PAGE_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5 5
@@ -199,4 +199,4 @@ static inline pteval_t native_pte_flags(pte_t pte)
199#define __HAVE_ARCH_GATE_AREA 1 199#define __HAVE_ARCH_GATE_AREA 1
200 200
201#endif /* __KERNEL__ */ 201#endif /* __KERNEL__ */
202#endif /* _ASM_X86_PAGE_H */ 202#endif /* ASM_X86__PAGE_H */
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index ab8528793f08..72f7305682c6 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PAGE_32_H 1#ifndef ASM_X86__PAGE_32_H
2#define _ASM_X86_PAGE_32_H 2#define ASM_X86__PAGE_32_H
3 3
4/* 4/*
5 * This handles the memory map. 5 * This handles the memory map.
@@ -89,13 +89,11 @@ extern int nx_enabled;
89extern unsigned int __VMALLOC_RESERVE; 89extern unsigned int __VMALLOC_RESERVE;
90extern int sysctl_legacy_va_layout; 90extern int sysctl_legacy_va_layout;
91 91
92#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
93#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
94
95extern void find_low_pfn_range(void); 92extern void find_low_pfn_range(void);
96extern unsigned long init_memory_mapping(unsigned long start, 93extern unsigned long init_memory_mapping(unsigned long start,
97 unsigned long end); 94 unsigned long end);
98extern void initmem_init(unsigned long, unsigned long); 95extern void initmem_init(unsigned long, unsigned long);
96extern void free_initmem(void);
99extern void setup_bootmem_allocator(void); 97extern void setup_bootmem_allocator(void);
100 98
101 99
@@ -126,4 +124,4 @@ static inline void copy_page(void *to, void *from)
126#endif /* CONFIG_X86_3DNOW */ 124#endif /* CONFIG_X86_3DNOW */
127#endif /* !__ASSEMBLY__ */ 125#endif /* !__ASSEMBLY__ */
128 126
129#endif /* _ASM_X86_PAGE_32_H */ 127#endif /* ASM_X86__PAGE_32_H */
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index c6916c83e6b1..5e64acfed0a4 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_PAGE_H 1#ifndef ASM_X86__PAGE_64_H
2#define _X86_64_PAGE_H 2#define ASM_X86__PAGE_64_H
3 3
4#define PAGETABLE_LEVELS 4 4#define PAGETABLE_LEVELS 4
5 5
@@ -91,6 +91,7 @@ extern unsigned long init_memory_mapping(unsigned long start,
91 unsigned long end); 91 unsigned long end);
92 92
93extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); 93extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
94extern void free_initmem(void);
94 95
95extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 96extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
96extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 97extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
@@ -102,4 +103,4 @@ extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
102#endif 103#endif
103 104
104 105
105#endif /* _X86_64_PAGE_H */ 106#endif /* ASM_X86__PAGE_64_H */
diff --git a/include/asm-x86/param.h b/include/asm-x86/param.h
index 6f0d0422f4ca..0009cfb11a5f 100644
--- a/include/asm-x86/param.h
+++ b/include/asm-x86/param.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PARAM_H 1#ifndef ASM_X86__PARAM_H
2#define _ASM_X86_PARAM_H 2#define ASM_X86__PARAM_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */ 5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
@@ -19,4 +19,4 @@
19 19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */ 20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21 21
22#endif /* _ASM_X86_PARAM_H */ 22#endif /* ASM_X86__PARAM_H */
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index fbbde93f12d6..891971f57d35 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_PARAVIRT_H 1#ifndef ASM_X86__PARAVIRT_H
2#define __ASM_PARAVIRT_H 2#define ASM_X86__PARAVIRT_H
3/* Various instructions on x86 need to be replaced for 3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5 5
@@ -137,6 +137,7 @@ struct pv_cpu_ops {
137 137
138 /* MSR, PMC and TSR operations. 138 /* MSR, PMC and TSR operations.
139 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ 139 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
140 u64 (*read_msr_amd)(unsigned int msr, int *err);
140 u64 (*read_msr)(unsigned int msr, int *err); 141 u64 (*read_msr)(unsigned int msr, int *err);
141 int (*write_msr)(unsigned int msr, unsigned low, unsigned high); 142 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
142 143
@@ -257,13 +258,13 @@ struct pv_mmu_ops {
257 * Hooks for allocating/releasing pagetable pages when they're 258 * Hooks for allocating/releasing pagetable pages when they're
258 * attached to a pagetable 259 * attached to a pagetable
259 */ 260 */
260 void (*alloc_pte)(struct mm_struct *mm, u32 pfn); 261 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
261 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn); 262 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
262 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 263 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
263 void (*alloc_pud)(struct mm_struct *mm, u32 pfn); 264 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
264 void (*release_pte)(u32 pfn); 265 void (*release_pte)(unsigned long pfn);
265 void (*release_pmd)(u32 pfn); 266 void (*release_pmd)(unsigned long pfn);
266 void (*release_pud)(u32 pfn); 267 void (*release_pud)(unsigned long pfn);
267 268
268 /* Pagetable manipulation functions */ 269 /* Pagetable manipulation functions */
269 void (*set_pte)(pte_t *ptep, pte_t pteval); 270 void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -726,6 +727,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
726{ 727{
727 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 728 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
728} 729}
730static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
731{
732 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
733}
729static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 734static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
730{ 735{
731 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 736 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
@@ -771,6 +776,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
771 *p = paravirt_read_msr(msr, &err); 776 *p = paravirt_read_msr(msr, &err);
772 return err; 777 return err;
773} 778}
779static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
780{
781 int err;
782
783 *p = paravirt_read_msr_amd(msr, &err);
784 return err;
785}
774 786
775static inline u64 paravirt_read_tsc(void) 787static inline u64 paravirt_read_tsc(void)
776{ 788{
@@ -993,35 +1005,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
993 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); 1005 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
994} 1006}
995 1007
996static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn) 1008static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
997{ 1009{
998 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); 1010 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
999} 1011}
1000static inline void paravirt_release_pte(unsigned pfn) 1012static inline void paravirt_release_pte(unsigned long pfn)
1001{ 1013{
1002 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); 1014 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1003} 1015}
1004 1016
1005static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn) 1017static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1006{ 1018{
1007 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); 1019 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1008} 1020}
1009 1021
1010static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn, 1022static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1011 unsigned start, unsigned count) 1023 unsigned long start, unsigned long count)
1012{ 1024{
1013 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count); 1025 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1014} 1026}
1015static inline void paravirt_release_pmd(unsigned pfn) 1027static inline void paravirt_release_pmd(unsigned long pfn)
1016{ 1028{
1017 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); 1029 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1018} 1030}
1019 1031
1020static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn) 1032static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1021{ 1033{
1022 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); 1034 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1023} 1035}
1024static inline void paravirt_release_pud(unsigned pfn) 1036static inline void paravirt_release_pud(unsigned long pfn)
1025{ 1037{
1026 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); 1038 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1027} 1039}
@@ -1634,4 +1646,4 @@ static inline unsigned long __raw_local_irq_save(void)
1634 1646
1635#endif /* __ASSEMBLY__ */ 1647#endif /* __ASSEMBLY__ */
1636#endif /* CONFIG_PARAVIRT */ 1648#endif /* CONFIG_PARAVIRT */
1637#endif /* __ASM_PARAVIRT_H */ 1649#endif /* ASM_X86__PARAVIRT_H */
diff --git a/include/asm-x86/parport.h b/include/asm-x86/parport.h
index 3c4ffeb467e9..2e3dda4dc3d9 100644
--- a/include/asm-x86/parport.h
+++ b/include/asm-x86/parport.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PARPORT_H 1#ifndef ASM_X86__PARPORT_H
2#define _ASM_X86_PARPORT_H 2#define ASM_X86__PARPORT_H
3 3
4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); 4static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) 5static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
@@ -7,4 +7,4 @@ static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
7 return parport_pc_find_isa_ports(autoirq, autodma); 7 return parport_pc_find_isa_ports(autoirq, autodma);
8} 8}
9 9
10#endif /* _ASM_X86_PARPORT_H */ 10#endif /* ASM_X86__PARPORT_H */
diff --git a/include/asm-x86/pat.h b/include/asm-x86/pat.h
index 7edc47307217..482c3e3f9879 100644
--- a/include/asm-x86/pat.h
+++ b/include/asm-x86/pat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_PAT_H 1#ifndef ASM_X86__PAT_H
2#define _ASM_PAT_H 2#define ASM_X86__PAT_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
@@ -19,4 +19,4 @@ extern int free_memtype(u64 start, u64 end);
19 19
20extern void pat_disable(char *reason); 20extern void pat_disable(char *reason);
21 21
22#endif 22#endif /* ASM_X86__PAT_H */
diff --git a/include/asm-x86/pci-direct.h b/include/asm-x86/pci-direct.h
index 80c775d9fe20..da42be07b690 100644
--- a/include/asm-x86/pci-direct.h
+++ b/include/asm-x86/pci-direct.h
@@ -1,5 +1,5 @@
1#ifndef ASM_PCI_DIRECT_H 1#ifndef ASM_X86__PCI_DIRECT_H
2#define ASM_PCI_DIRECT_H 1 2#define ASM_X86__PCI_DIRECT_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
@@ -18,4 +18,4 @@ extern int early_pci_allowed(void);
18extern unsigned int pci_early_dump_regs; 18extern unsigned int pci_early_dump_regs;
19extern void early_dump_pci_device(u8 bus, u8 slot, u8 func); 19extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
20extern void early_dump_pci_devices(void); 20extern void early_dump_pci_devices(void);
21#endif 21#endif /* ASM_X86__PCI_DIRECT_H */
diff --git a/include/asm-x86/pci.h b/include/asm-x86/pci.h
index 2db14cf17db8..602583192991 100644
--- a/include/asm-x86/pci.h
+++ b/include/asm-x86/pci.h
@@ -1,5 +1,5 @@
1#ifndef __x86_PCI_H 1#ifndef ASM_X86__PCI_H
2#define __x86_PCI_H 2#define ASM_X86__PCI_H
3 3
4#include <linux/mm.h> /* for struct page */ 4#include <linux/mm.h> /* for struct page */
5#include <linux/types.h> 5#include <linux/types.h>
@@ -111,4 +111,4 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
111} 111}
112#endif 112#endif
113 113
114#endif 114#endif /* ASM_X86__PCI_H */
diff --git a/include/asm-x86/pci_32.h b/include/asm-x86/pci_32.h
index a50d46851285..3f2288207c0c 100644
--- a/include/asm-x86/pci_32.h
+++ b/include/asm-x86/pci_32.h
@@ -1,5 +1,5 @@
1#ifndef __i386_PCI_H 1#ifndef ASM_X86__PCI_32_H
2#define __i386_PCI_H 2#define ASM_X86__PCI_32_H
3 3
4 4
5#ifdef __KERNEL__ 5#ifdef __KERNEL__
@@ -31,4 +31,4 @@ struct pci_dev;
31#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
32 32
33 33
34#endif /* __i386_PCI_H */ 34#endif /* ASM_X86__PCI_32_H */
diff --git a/include/asm-x86/pci_64.h b/include/asm-x86/pci_64.h
index f330234ffa5c..f72e12d5770e 100644
--- a/include/asm-x86/pci_64.h
+++ b/include/asm-x86/pci_64.h
@@ -1,5 +1,5 @@
1#ifndef __x8664_PCI_H 1#ifndef ASM_X86__PCI_64_H
2#define __x8664_PCI_H 2#define ASM_X86__PCI_64_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -63,4 +63,4 @@ extern void pci_iommu_alloc(void);
63 63
64#endif /* __KERNEL__ */ 64#endif /* __KERNEL__ */
65 65
66#endif /* __x8664_PCI_H */ 66#endif /* ASM_X86__PCI_64_H */
diff --git a/include/asm-x86/pda.h b/include/asm-x86/pda.h
index b34e9a7cc80b..80860afffbdb 100644
--- a/include/asm-x86/pda.h
+++ b/include/asm-x86/pda.h
@@ -1,5 +1,5 @@
1#ifndef X86_64_PDA_H 1#ifndef ASM_X86__PDA_H
2#define X86_64_PDA_H 2#define ASM_X86__PDA_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/stddef.h> 5#include <linux/stddef.h>
@@ -134,4 +134,4 @@ do { \
134 134
135#define PDA_STACKOFFSET (5*8) 135#define PDA_STACKOFFSET (5*8)
136 136
137#endif 137#endif /* ASM_X86__PDA_H */
diff --git a/include/asm-x86/percpu.h b/include/asm-x86/percpu.h
index f643a3a92da0..e10a1d0678cf 100644
--- a/include/asm-x86/percpu.h
+++ b/include/asm-x86/percpu.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PERCPU_H_ 1#ifndef ASM_X86__PERCPU_H
2#define _ASM_X86_PERCPU_H_ 2#define ASM_X86__PERCPU_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5#include <linux/compiler.h> 5#include <linux/compiler.h>
@@ -215,4 +215,4 @@ do { \
215 215
216#endif /* !CONFIG_SMP */ 216#endif /* !CONFIG_SMP */
217 217
218#endif /* _ASM_X86_PERCPU_H_ */ 218#endif /* ASM_X86__PERCPU_H */
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index d63ea431cb3b..3cd23adedae8 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PGALLOC_H 1#ifndef ASM_X86__PGALLOC_H
2#define _ASM_X86_PGALLOC_H 2#define ASM_X86__PGALLOC_H
3 3
4#include <linux/threads.h> 4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */ 5#include <linux/mm.h> /* for struct page */
@@ -111,4 +111,4 @@ extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
111#endif /* PAGETABLE_LEVELS > 3 */ 111#endif /* PAGETABLE_LEVELS > 3 */
112#endif /* PAGETABLE_LEVELS > 2 */ 112#endif /* PAGETABLE_LEVELS > 2 */
113 113
114#endif /* _ASM_X86_PGALLOC_H */ 114#endif /* ASM_X86__PGALLOC_H */
diff --git a/include/asm-x86/pgtable-2level-defs.h b/include/asm-x86/pgtable-2level-defs.h
index 0f71c9f13da4..7ec48f4e5347 100644
--- a/include/asm-x86/pgtable-2level-defs.h
+++ b/include/asm-x86/pgtable-2level-defs.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_2LEVEL_DEFS_H 1#ifndef ASM_X86__PGTABLE_2LEVEL_DEFS_H
2#define _I386_PGTABLE_2LEVEL_DEFS_H 2#define ASM_X86__PGTABLE_2LEVEL_DEFS_H
3 3
4#define SHARED_KERNEL_PMD 0 4#define SHARED_KERNEL_PMD 0
5 5
@@ -17,4 +17,4 @@
17 17
18#define PTRS_PER_PTE 1024 18#define PTRS_PER_PTE 1024
19 19
20#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ 20#endif /* ASM_X86__PGTABLE_2LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 46bc52c0eae1..81762081dcd8 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_2LEVEL_H 1#ifndef ASM_X86__PGTABLE_2LEVEL_H
2#define _I386_PGTABLE_2LEVEL_H 2#define ASM_X86__PGTABLE_2LEVEL_H
3 3
4#define pte_ERROR(e) \ 4#define pte_ERROR(e) \
5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 5 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) 53#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
54#endif 54#endif
55 55
56#define pte_page(x) pfn_to_page(pte_pfn(x))
57#define pte_none(x) (!(x).pte_low) 56#define pte_none(x) (!(x).pte_low)
58#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
59 57
60/* 58/*
61 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset 59 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
@@ -78,4 +76,4 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
78#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) 76#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
79#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) 77#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
80 78
81#endif /* _I386_PGTABLE_2LEVEL_H */ 79#endif /* ASM_X86__PGTABLE_2LEVEL_H */
diff --git a/include/asm-x86/pgtable-3level-defs.h b/include/asm-x86/pgtable-3level-defs.h
index 448ac9516314..c05fe6ff3720 100644
--- a/include/asm-x86/pgtable-3level-defs.h
+++ b/include/asm-x86/pgtable-3level-defs.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_3LEVEL_DEFS_H 1#ifndef ASM_X86__PGTABLE_3LEVEL_DEFS_H
2#define _I386_PGTABLE_3LEVEL_DEFS_H 2#define ASM_X86__PGTABLE_3LEVEL_DEFS_H
3 3
4#ifdef CONFIG_PARAVIRT 4#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) 5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
@@ -25,4 +25,4 @@
25 */ 25 */
26#define PTRS_PER_PTE 512 26#define PTRS_PER_PTE 512
27 27
28#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ 28#endif /* ASM_X86__PGTABLE_3LEVEL_DEFS_H */
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index 105057f34032..75f4276b5ddb 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_3LEVEL_H 1#ifndef ASM_X86__PGTABLE_3LEVEL_H
2#define _I386_PGTABLE_3LEVEL_H 2#define ASM_X86__PGTABLE_3LEVEL_H
3 3
4/* 4/*
5 * Intel Physical Address Extension (PAE) Mode - three-level page 5 * Intel Physical Address Extension (PAE) Mode - three-level page
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b)
151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high; 151 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
152} 152}
153 153
154#define pte_page(x) pfn_to_page(pte_pfn(x))
155
156static inline int pte_none(pte_t pte) 154static inline int pte_none(pte_t pte)
157{ 155{
158 return !pte.pte_low && !pte.pte_high; 156 return !pte.pte_low && !pte.pte_high;
159} 157}
160 158
161static inline unsigned long pte_pfn(pte_t pte)
162{
163 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
164}
165
166/* 159/*
167 * Bits 0, 6 and 7 are taken in the low part of the pte, 160 * Bits 0, 6 and 7 are taken in the low part of the pte,
168 * put the 32 bits of offset into the high part. 161 * put the 32 bits of offset into the high part.
@@ -179,4 +172,4 @@ static inline unsigned long pte_pfn(pte_t pte)
179#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) 172#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
180#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) 173#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
181 174
182#endif /* _I386_PGTABLE_3LEVEL_H */ 175#endif /* ASM_X86__PGTABLE_3LEVEL_H */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 04caa2f544df..888add7b0882 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define ASM_X86__PGTABLE_H
3 3
4#define FIRST_USER_ADDRESS 0 4#define FIRST_USER_ADDRESS 0
5 5
@@ -186,6 +186,13 @@ static inline int pte_special(pte_t pte)
186 return pte_val(pte) & _PAGE_SPECIAL; 186 return pte_val(pte) & _PAGE_SPECIAL;
187} 187}
188 188
189static inline unsigned long pte_pfn(pte_t pte)
190{
191 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
192}
193
194#define pte_page(pte) pfn_to_page(pte_pfn(pte))
195
189static inline int pmd_large(pmd_t pte) 196static inline int pmd_large(pmd_t pte)
190{ 197{
191 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == 198 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -313,6 +320,8 @@ static inline void native_pagetable_setup_start(pgd_t *base) {}
313static inline void native_pagetable_setup_done(pgd_t *base) {} 320static inline void native_pagetable_setup_done(pgd_t *base) {}
314#endif 321#endif
315 322
323extern int arch_report_meminfo(char *page);
324
316#ifdef CONFIG_PARAVIRT 325#ifdef CONFIG_PARAVIRT
317#include <asm/paravirt.h> 326#include <asm/paravirt.h>
318#else /* !CONFIG_PARAVIRT */ 327#else /* !CONFIG_PARAVIRT */
@@ -521,4 +530,4 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
521#include <asm-generic/pgtable.h> 530#include <asm-generic/pgtable.h>
522#endif /* __ASSEMBLY__ */ 531#endif /* __ASSEMBLY__ */
523 532
524#endif /* _ASM_X86_PGTABLE_H */ 533#endif /* ASM_X86__PGTABLE_H */
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 5c3b26567a95..8de702dc7d62 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_32_H
2#define _I386_PGTABLE_H 2#define ASM_X86__PGTABLE_32_H
3 3
4 4
5/* 5/*
@@ -31,6 +31,7 @@ static inline void pgtable_cache_init(void) { }
31static inline void check_pgt_cache(void) { } 31static inline void check_pgt_cache(void) { }
32void paging_init(void); 32void paging_init(void);
33 33
34extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
34 35
35/* 36/*
36 * The Linux x86 paging architecture is 'compile-time dual-mode', it 37 * The Linux x86 paging architecture is 'compile-time dual-mode', it
@@ -56,8 +57,7 @@ void paging_init(void);
56 * area for the same reason. ;) 57 * area for the same reason. ;)
57 */ 58 */
58#define VMALLOC_OFFSET (8 * 1024 * 1024) 59#define VMALLOC_OFFSET (8 * 1024 * 1024)
59#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \ 60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
60 & ~(VMALLOC_OFFSET - 1))
61#ifdef CONFIG_X86_PAE 61#ifdef CONFIG_X86_PAE
62#define LAST_PKMAP 512 62#define LAST_PKMAP 512
63#else 63#else
@@ -73,6 +73,8 @@ void paging_init(void);
73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) 73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
74#endif 74#endif
75 75
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77
76/* 78/*
77 * Define this if things work differently on an i386 and an i486: 79 * Define this if things work differently on an i386 and an i486:
78 * it will (on an i486) warn about kernel memory accesses that are 80 * it will (on an i486) warn about kernel memory accesses that are
@@ -186,4 +188,4 @@ do { \
186#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 188#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
187 remap_pfn_range(vma, vaddr, pfn, size, prot) 189 remap_pfn_range(vma, vaddr, pfn, size, prot)
188 190
189#endif /* _I386_PGTABLE_H */ 191#endif /* ASM_X86__PGTABLE_32_H */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 549144d03d99..fde9770e53d1 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_PGTABLE_H 1#ifndef ASM_X86__PGTABLE_64_H
2#define _X86_64_PGTABLE_H 2#define ASM_X86__PGTABLE_64_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
@@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd)
175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 175#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
176 176
177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ 177#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
178#define pte_page(x) pfn_to_page(pte_pfn((x)))
179#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
180 178
181/* 179/*
182 * Macro to mark a page protection value as "uncacheable". 180 * Macro to mark a page protection value as "uncacheable".
@@ -284,4 +282,4 @@ extern void cleanup_highmap(void);
284#define __HAVE_ARCH_PTE_SAME 282#define __HAVE_ARCH_PTE_SAME
285#endif /* !__ASSEMBLY__ */ 283#endif /* !__ASSEMBLY__ */
286 284
287#endif /* _X86_64_PGTABLE_H */ 285#endif /* ASM_X86__PGTABLE_64_H */
diff --git a/include/asm-x86/posix_types_32.h b/include/asm-x86/posix_types_32.h
index b031efda37ec..70cf2bb05939 100644
--- a/include/asm-x86/posix_types_32.h
+++ b/include/asm-x86/posix_types_32.h
@@ -1,5 +1,5 @@
1#ifndef __ARCH_I386_POSIX_TYPES_H 1#ifndef ASM_X86__POSIX_TYPES_32_H
2#define __ARCH_I386_POSIX_TYPES_H 2#define ASM_X86__POSIX_TYPES_32_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
@@ -82,4 +82,4 @@ do { \
82 82
83#endif /* defined(__KERNEL__) */ 83#endif /* defined(__KERNEL__) */
84 84
85#endif 85#endif /* ASM_X86__POSIX_TYPES_32_H */
diff --git a/include/asm-x86/posix_types_64.h b/include/asm-x86/posix_types_64.h
index d6624c95854a..388b4e7f4a44 100644
--- a/include/asm-x86/posix_types_64.h
+++ b/include/asm-x86/posix_types_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_POSIX_TYPES_H 1#ifndef ASM_X86__POSIX_TYPES_64_H
2#define _ASM_X86_64_POSIX_TYPES_H 2#define ASM_X86__POSIX_TYPES_64_H
3 3
4/* 4/*
5 * This file is generally used by user-level software, so you need to 5 * This file is generally used by user-level software, so you need to
@@ -116,4 +116,4 @@ static inline void __FD_ZERO(__kernel_fd_set *p)
116 116
117#endif /* defined(__KERNEL__) */ 117#endif /* defined(__KERNEL__) */
118 118
119#endif 119#endif /* ASM_X86__POSIX_TYPES_64_H */
diff --git a/include/asm-x86/prctl.h b/include/asm-x86/prctl.h
index 52952adef1ca..e7ae34eb4103 100644
--- a/include/asm-x86/prctl.h
+++ b/include/asm-x86/prctl.h
@@ -1,5 +1,5 @@
1#ifndef X86_64_PRCTL_H 1#ifndef ASM_X86__PRCTL_H
2#define X86_64_PRCTL_H 1 2#define ASM_X86__PRCTL_H
3 3
4#define ARCH_SET_GS 0x1001 4#define ARCH_SET_GS 0x1001
5#define ARCH_SET_FS 0x1002 5#define ARCH_SET_FS 0x1002
@@ -7,4 +7,4 @@
7#define ARCH_GET_GS 0x1004 7#define ARCH_GET_GS 0x1004
8 8
9 9
10#endif 10#endif /* ASM_X86__PRCTL_H */
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h
index eff2ecd7fff0..5dd79774f693 100644
--- a/include/asm-x86/processor-flags.h
+++ b/include/asm-x86/processor-flags.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_I386_PROCESSOR_FLAGS_H 1#ifndef ASM_X86__PROCESSOR_FLAGS_H
2#define __ASM_I386_PROCESSOR_FLAGS_H 2#define ASM_X86__PROCESSOR_FLAGS_H
3/* Various flags defined: can be included from assembler. */ 3/* Various flags defined: can be included from assembler. */
4 4
5/* 5/*
@@ -96,4 +96,4 @@
96#endif 96#endif
97#endif 97#endif
98 98
99#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ 99#endif /* ASM_X86__PROCESSOR_FLAGS_H */
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index 4df3e2f6fb56..5eaf9bf0a623 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_PROCESSOR_H 1#ifndef ASM_X86__PROCESSOR_H
2#define __ASM_X86_PROCESSOR_H 2#define ASM_X86__PROCESSOR_H
3 3
4#include <asm/processor-flags.h> 4#include <asm/processor-flags.h>
5 5
@@ -20,6 +20,7 @@ struct mm_struct;
20#include <asm/msr.h> 20#include <asm/msr.h>
21#include <asm/desc_defs.h> 21#include <asm/desc_defs.h>
22#include <asm/nops.h> 22#include <asm/nops.h>
23#include <asm/ds.h>
23 24
24#include <linux/personality.h> 25#include <linux/personality.h>
25#include <linux/cpumask.h> 26#include <linux/cpumask.h>
@@ -140,6 +141,8 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
140#define current_cpu_data boot_cpu_data 141#define current_cpu_data boot_cpu_data
141#endif 142#endif
142 143
144extern const struct seq_operations cpuinfo_op;
145
143static inline int hlt_works(int cpu) 146static inline int hlt_works(int cpu)
144{ 147{
145#ifdef CONFIG_X86_32 148#ifdef CONFIG_X86_32
@@ -153,6 +156,8 @@ static inline int hlt_works(int cpu)
153 156
154extern void cpu_detect(struct cpuinfo_x86 *c); 157extern void cpu_detect(struct cpuinfo_x86 *c);
155 158
159extern struct pt_regs *idle_regs(struct pt_regs *);
160
156extern void early_cpu_init(void); 161extern void early_cpu_init(void);
157extern void identify_boot_cpu(void); 162extern void identify_boot_cpu(void);
158extern void identify_secondary_cpu(struct cpuinfo_x86 *); 163extern void identify_secondary_cpu(struct cpuinfo_x86 *);
@@ -411,9 +416,14 @@ struct thread_struct {
411 unsigned io_bitmap_max; 416 unsigned io_bitmap_max;
412/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 417/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
413 unsigned long debugctlmsr; 418 unsigned long debugctlmsr;
414/* Debug Store - if not 0 points to a DS Save Area configuration; 419#ifdef CONFIG_X86_DS
415 * goes into MSR_IA32_DS_AREA */ 420/* Debug Store context; see include/asm-x86/ds.h; goes into MSR_IA32_DS_AREA */
416 unsigned long ds_area_msr; 421 struct ds_context *ds_ctx;
422#endif /* CONFIG_X86_DS */
423#ifdef CONFIG_X86_PTRACE_BTS
424/* the signal to send on a bts buffer overflow */
425 unsigned int bts_ovfl_signal;
426#endif /* CONFIG_X86_PTRACE_BTS */
417}; 427};
418 428
419static inline unsigned long native_get_debugreg(int regno) 429static inline unsigned long native_get_debugreg(int regno)
@@ -943,4 +953,4 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
943extern int get_tsc_mode(unsigned long adr); 953extern int get_tsc_mode(unsigned long adr);
944extern int set_tsc_mode(unsigned int val); 954extern int set_tsc_mode(unsigned int val);
945 955
946#endif 956#endif /* ASM_X86__PROCESSOR_H */
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index 3dd458c385c0..6e89e8b4de0e 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X8664_PROTO_H 1#ifndef ASM_X86__PROTO_H
2#define _ASM_X8664_PROTO_H 1 2#define ASM_X86__PROTO_H
3 3
4#include <asm/ldt.h> 4#include <asm/ldt.h>
5 5
@@ -29,4 +29,4 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
29#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) 29#define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1))
30#define round_down(x, y) ((x) & ~((y) - 1)) 30#define round_down(x, y) ((x) & ~((y) - 1))
31 31
32#endif 32#endif /* ASM_X86__PROTO_H */
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
index 72e7b9db29bb..4298b8882a78 100644
--- a/include/asm-x86/ptrace-abi.h
+++ b/include/asm-x86/ptrace-abi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PTRACE_ABI_H 1#ifndef ASM_X86__PTRACE_ABI_H
2#define _ASM_X86_PTRACE_ABI_H 2#define ASM_X86__PTRACE_ABI_H
3 3
4#ifdef __i386__ 4#ifdef __i386__
5 5
@@ -80,8 +80,9 @@
80 80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ 81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82 82
83#ifndef __ASSEMBLY__ 83#ifdef CONFIG_X86_PTRACE_BTS
84 84
85#ifndef __ASSEMBLY__
85#include <asm/types.h> 86#include <asm/types.h>
86 87
87/* configuration/status structure used in PTRACE_BTS_CONFIG and 88/* configuration/status structure used in PTRACE_BTS_CONFIG and
@@ -97,20 +98,20 @@ struct ptrace_bts_config {
97 /* actual size of bts_struct in bytes */ 98 /* actual size of bts_struct in bytes */
98 __u32 bts_size; 99 __u32 bts_size;
99}; 100};
100#endif 101#endif /* __ASSEMBLY__ */
101 102
102#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */ 103#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
103#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */ 104#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
104#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow 105#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
105 instead of wrapping around */ 106 instead of wrapping around */
106#define PTRACE_BTS_O_CUT_SIZE 0x8 /* cut requested size to max available 107#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
107 instead of failing */
108 108
109#define PTRACE_BTS_CONFIG 40 109#define PTRACE_BTS_CONFIG 40
110/* Configure branch trace recording. 110/* Configure branch trace recording.
111 ADDR points to a struct ptrace_bts_config. 111 ADDR points to a struct ptrace_bts_config.
112 DATA gives the size of that buffer. 112 DATA gives the size of that buffer.
113 A new buffer is allocated, iff the size changes. 113 A new buffer is allocated, if requested in the flags.
114 An overflow signal may only be requested for new buffers.
114 Returns the number of bytes read. 115 Returns the number of bytes read.
115*/ 116*/
116#define PTRACE_BTS_STATUS 41 117#define PTRACE_BTS_STATUS 41
@@ -119,7 +120,7 @@ struct ptrace_bts_config {
119 Returns the number of bytes written. 120 Returns the number of bytes written.
120*/ 121*/
121#define PTRACE_BTS_SIZE 42 122#define PTRACE_BTS_SIZE 42
122/* Return the number of available BTS records. 123/* Return the number of available BTS records for draining.
123 DATA and ADDR are ignored. 124 DATA and ADDR are ignored.
124*/ 125*/
125#define PTRACE_BTS_GET 43 126#define PTRACE_BTS_GET 43
@@ -139,5 +140,6 @@ struct ptrace_bts_config {
139 BTS records are read from oldest to newest. 140 BTS records are read from oldest to newest.
140 Returns number of BTS records drained. 141 Returns number of BTS records drained.
141*/ 142*/
143#endif /* CONFIG_X86_PTRACE_BTS */
142 144
143#endif 145#endif /* ASM_X86__PTRACE_ABI_H */
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 8a71db803da6..d64a61097165 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PTRACE_H 1#ifndef ASM_X86__PTRACE_H
2#define _ASM_X86_PTRACE_H 2#define ASM_X86__PTRACE_H
3 3
4#include <linux/compiler.h> /* For __user */ 4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
@@ -127,14 +127,48 @@ struct pt_regs {
127#endif /* __KERNEL__ */ 127#endif /* __KERNEL__ */
128#endif /* !__i386__ */ 128#endif /* !__i386__ */
129 129
130
131#ifdef CONFIG_X86_PTRACE_BTS
132/* a branch trace record entry
133 *
134 * In order to unify the interface between various processor versions,
135 * we use the below data structure for all processors.
136 */
137enum bts_qualifier {
138 BTS_INVALID = 0,
139 BTS_BRANCH,
140 BTS_TASK_ARRIVES,
141 BTS_TASK_DEPARTS
142};
143
144struct bts_struct {
145 __u64 qualifier;
146 union {
147 /* BTS_BRANCH */
148 struct {
149 __u64 from_ip;
150 __u64 to_ip;
151 } lbr;
152 /* BTS_TASK_ARRIVES or
153 BTS_TASK_DEPARTS */
154 __u64 jiffies;
155 } variant;
156};
157#endif /* CONFIG_X86_PTRACE_BTS */
158
130#ifdef __KERNEL__ 159#ifdef __KERNEL__
131 160
132/* the DS BTS struct is used for ptrace as well */ 161#include <linux/init.h>
133#include <asm/ds.h>
134 162
163struct cpuinfo_x86;
135struct task_struct; 164struct task_struct;
136 165
166#ifdef CONFIG_X86_PTRACE_BTS
167extern void __cpuinit ptrace_bts_init_intel(struct cpuinfo_x86 *);
137extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier); 168extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
169#else
170#define ptrace_bts_init_intel(config) do {} while (0)
171#endif /* CONFIG_X86_PTRACE_BTS */
138 172
139extern unsigned long profile_pc(struct pt_regs *regs); 173extern unsigned long profile_pc(struct pt_regs *regs);
140 174
@@ -148,6 +182,9 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
148void signal_fault(struct pt_regs *regs, void __user *frame, char *where); 182void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
149#endif 183#endif
150 184
185extern long syscall_trace_enter(struct pt_regs *);
186extern void syscall_trace_leave(struct pt_regs *);
187
151static inline unsigned long regs_return_value(struct pt_regs *regs) 188static inline unsigned long regs_return_value(struct pt_regs *regs)
152{ 189{
153 return regs->ax; 190 return regs->ax;
@@ -213,6 +250,11 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
213 return regs->bp; 250 return regs->bp;
214} 251}
215 252
253static inline unsigned long user_stack_pointer(struct pt_regs *regs)
254{
255 return regs->sp;
256}
257
216/* 258/*
217 * These are defined as per linux/ptrace.h, which see. 259 * These are defined as per linux/ptrace.h, which see.
218 */ 260 */
@@ -239,4 +281,4 @@ extern int do_set_thread_area(struct task_struct *p, int idx,
239 281
240#endif /* !__ASSEMBLY__ */ 282#endif /* !__ASSEMBLY__ */
241 283
242#endif 284#endif /* ASM_X86__PTRACE_H */
diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h
index 6857f840b243..edb3b4ecfc81 100644
--- a/include/asm-x86/pvclock-abi.h
+++ b/include/asm-x86/pvclock-abi.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PVCLOCK_ABI_H_ 1#ifndef ASM_X86__PVCLOCK_ABI_H
2#define _ASM_X86_PVCLOCK_ABI_H_ 2#define ASM_X86__PVCLOCK_ABI_H
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4 4
5/* 5/*
@@ -39,4 +39,4 @@ struct pvclock_wall_clock {
39} __attribute__((__packed__)); 39} __attribute__((__packed__));
40 40
41#endif /* __ASSEMBLY__ */ 41#endif /* __ASSEMBLY__ */
42#endif /* _ASM_X86_PVCLOCK_ABI_H_ */ 42#endif /* ASM_X86__PVCLOCK_ABI_H */
diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h
index 85b1bba8e0a3..1a38f6834800 100644
--- a/include/asm-x86/pvclock.h
+++ b/include/asm-x86/pvclock.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_PVCLOCK_H_ 1#ifndef ASM_X86__PVCLOCK_H
2#define _ASM_X86_PVCLOCK_H_ 2#define ASM_X86__PVCLOCK_H
3 3
4#include <linux/clocksource.h> 4#include <linux/clocksource.h>
5#include <asm/pvclock-abi.h> 5#include <asm/pvclock-abi.h>
@@ -10,4 +10,4 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
10 struct pvclock_vcpu_time_info *vcpu, 10 struct pvclock_vcpu_time_info *vcpu,
11 struct timespec *ts); 11 struct timespec *ts);
12 12
13#endif /* _ASM_X86_PVCLOCK_H_ */ 13#endif /* ASM_X86__PVCLOCK_H */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 206f355786dc..1c2f0ce9e31e 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_REBOOT_H 1#ifndef ASM_X86__REBOOT_H
2#define _ASM_REBOOT_H 2#define ASM_X86__REBOOT_H
3 3
4struct pt_regs; 4struct pt_regs;
5 5
@@ -18,4 +18,4 @@ void native_machine_crash_shutdown(struct pt_regs *regs);
18void native_machine_shutdown(void); 18void native_machine_shutdown(void);
19void machine_real_restart(const unsigned char *code, int length); 19void machine_real_restart(const unsigned char *code, int length);
20 20
21#endif /* _ASM_REBOOT_H */ 21#endif /* ASM_X86__REBOOT_H */
diff --git a/include/asm-x86/reboot_fixups.h b/include/asm-x86/reboot_fixups.h
index 0cb7d87c2b68..2c2987d97570 100644
--- a/include/asm-x86/reboot_fixups.h
+++ b/include/asm-x86/reboot_fixups.h
@@ -1,6 +1,6 @@
1#ifndef _LINUX_REBOOT_FIXUPS_H 1#ifndef ASM_X86__REBOOT_FIXUPS_H
2#define _LINUX_REBOOT_FIXUPS_H 2#define ASM_X86__REBOOT_FIXUPS_H
3 3
4extern void mach_reboot_fixups(void); 4extern void mach_reboot_fixups(void);
5 5
6#endif /* _LINUX_REBOOT_FIXUPS_H */ 6#endif /* ASM_X86__REBOOT_FIXUPS_H */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index 5c2ff4bc2980..a01c4e376331 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_REQUIRED_FEATURES_H 1#ifndef ASM_X86__REQUIRED_FEATURES_H
2#define _ASM_REQUIRED_FEATURES_H 1 2#define ASM_X86__REQUIRED_FEATURES_H
3 3
4/* Define minimum CPUID feature set for kernel These bits are checked 4/* Define minimum CPUID feature set for kernel These bits are checked
5 really early to actually display a visible error message before the 5 really early to actually display a visible error message before the
@@ -79,4 +79,4 @@
79#define REQUIRED_MASK6 0 79#define REQUIRED_MASK6 0
80#define REQUIRED_MASK7 0 80#define REQUIRED_MASK7 0
81 81
82#endif 82#endif /* ASM_X86__REQUIRED_FEATURES_H */
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 8d9f0b41ee86..e39376d7de50 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_RESUME_TRACE_H 1#ifndef ASM_X86__RESUME_TRACE_H
2#define _ASM_X86_RESUME_TRACE_H 2#define ASM_X86__RESUME_TRACE_H
3 3
4#include <asm/asm.h> 4#include <asm/asm.h>
5 5
@@ -7,7 +7,7 @@
7do { \ 7do { \
8 if (pm_trace_enabled) { \ 8 if (pm_trace_enabled) { \
9 const void *tracedata; \ 9 const void *tracedata; \
10 asm volatile(_ASM_MOV_UL " $1f,%0\n" \ 10 asm volatile(_ASM_MOV " $1f,%0\n" \
11 ".section .tracedata,\"a\"\n" \ 11 ".section .tracedata,\"a\"\n" \
12 "1:\t.word %c1\n\t" \ 12 "1:\t.word %c1\n\t" \
13 _ASM_PTR " %c2\n" \ 13 _ASM_PTR " %c2\n" \
@@ -18,4 +18,4 @@ do { \
18 } \ 18 } \
19} while (0) 19} while (0)
20 20
21#endif 21#endif /* ASM_X86__RESUME_TRACE_H */
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index c9448bd8968f..5e1256bdee83 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -5,8 +5,8 @@
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net> 5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
6 */ 6 */
7 7
8#ifndef __ASM_RIO_H 8#ifndef ASM_X86__RIO_H
9#define __ASM_RIO_H 9#define ASM_X86__RIO_H
10 10
11#define RIO_TABLE_VERSION 3 11#define RIO_TABLE_VERSION 3
12 12
@@ -60,4 +60,4 @@ enum {
60 ALT_CALGARY = 5, /* Second Planar Calgary */ 60 ALT_CALGARY = 5, /* Second Planar Calgary */
61}; 61};
62 62
63#endif /* __ASM_RIO_H */ 63#endif /* ASM_X86__RIO_H */
diff --git a/include/asm-x86/rwlock.h b/include/asm-x86/rwlock.h
index 6a8c0d645108..48a3109e1a7d 100644
--- a/include/asm-x86/rwlock.h
+++ b/include/asm-x86/rwlock.h
@@ -1,8 +1,8 @@
1#ifndef _ASM_X86_RWLOCK_H 1#ifndef ASM_X86__RWLOCK_H
2#define _ASM_X86_RWLOCK_H 2#define ASM_X86__RWLOCK_H
3 3
4#define RW_LOCK_BIAS 0x01000000 4#define RW_LOCK_BIAS 0x01000000
5 5
6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ 6/* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */
7 7
8#endif /* _ASM_X86_RWLOCK_H */ 8#endif /* ASM_X86__RWLOCK_H */
diff --git a/include/asm-x86/rwsem.h b/include/asm-x86/rwsem.h
index 750f2a3542b3..3ff3015b71a8 100644
--- a/include/asm-x86/rwsem.h
+++ b/include/asm-x86/rwsem.h
@@ -29,8 +29,8 @@
29 * front, then they'll all be woken up, but no other readers will be. 29 * front, then they'll all be woken up, but no other readers will be.
30 */ 30 */
31 31
32#ifndef _I386_RWSEM_H 32#ifndef ASM_X86__RWSEM_H
33#define _I386_RWSEM_H 33#define ASM_X86__RWSEM_H
34 34
35#ifndef _LINUX_RWSEM_H 35#ifndef _LINUX_RWSEM_H
36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 36#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
@@ -262,4 +262,4 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
262} 262}
263 263
264#endif /* __KERNEL__ */ 264#endif /* __KERNEL__ */
265#endif /* _I386_RWSEM_H */ 265#endif /* ASM_X86__RWSEM_H */
diff --git a/include/asm-x86/scatterlist.h b/include/asm-x86/scatterlist.h
index c0432061f81a..ee48f880005d 100644
--- a/include/asm-x86/scatterlist.h
+++ b/include/asm-x86/scatterlist.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef ASM_X86__SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define ASM_X86__SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5 5
@@ -30,4 +30,4 @@ struct scatterlist {
30# define sg_dma_len(sg) ((sg)->dma_length) 30# define sg_dma_len(sg) ((sg)->dma_length)
31#endif 31#endif
32 32
33#endif 33#endif /* ASM_X86__SCATTERLIST_H */
diff --git a/include/asm-x86/seccomp_32.h b/include/asm-x86/seccomp_32.h
index 36e71c5f306f..cf9ab2dbcef1 100644
--- a/include/asm-x86/seccomp_32.h
+++ b/include/asm-x86/seccomp_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef ASM_X86__SECCOMP_32_H
2#define _ASM_SECCOMP_H 2#define ASM_X86__SECCOMP_32_H
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
@@ -14,4 +14,4 @@
14#define __NR_seccomp_exit __NR_exit 14#define __NR_seccomp_exit __NR_exit
15#define __NR_seccomp_sigreturn __NR_sigreturn 15#define __NR_seccomp_sigreturn __NR_sigreturn
16 16
17#endif /* _ASM_SECCOMP_H */ 17#endif /* ASM_X86__SECCOMP_32_H */
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 76cfe69aa63c..03274cea751f 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef ASM_X86__SECCOMP_64_H
2#define _ASM_SECCOMP_H 2#define ASM_X86__SECCOMP_64_H
3 3
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
@@ -22,4 +22,4 @@
22#define __NR_seccomp_exit_32 __NR_ia32_exit 22#define __NR_seccomp_exit_32 __NR_ia32_exit
23#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn 23#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
24 24
25#endif /* _ASM_SECCOMP_H */ 25#endif /* ASM_X86__SECCOMP_64_H */
diff --git a/include/asm-x86/segment.h b/include/asm-x86/segment.h
index 646452ea9ea3..ea5f0a8686f7 100644
--- a/include/asm-x86/segment.h
+++ b/include/asm-x86/segment.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SEGMENT_H_ 1#ifndef ASM_X86__SEGMENT_H
2#define _ASM_X86_SEGMENT_H_ 2#define ASM_X86__SEGMENT_H
3 3
4/* Constructor for a conventional segment GDT (or LDT) entry */ 4/* Constructor for a conventional segment GDT (or LDT) entry */
5/* This is a macro so it can be used in initializers */ 5/* This is a macro so it can be used in initializers */
@@ -212,4 +212,4 @@ extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][10];
212#endif 212#endif
213#endif 213#endif
214 214
215#endif 215#endif /* ASM_X86__SEGMENT_H */
diff --git a/include/asm-x86/sembuf.h b/include/asm-x86/sembuf.h
index ee50c801f7b7..81f06b7e5a3f 100644
--- a/include/asm-x86/sembuf.h
+++ b/include/asm-x86/sembuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SEMBUF_H 1#ifndef ASM_X86__SEMBUF_H
2#define _ASM_X86_SEMBUF_H 2#define ASM_X86__SEMBUF_H
3 3
4/* 4/*
5 * The semid64_ds structure for x86 architecture. 5 * The semid64_ds structure for x86 architecture.
@@ -21,4 +21,4 @@ struct semid64_ds {
21 unsigned long __unused4; 21 unsigned long __unused4;
22}; 22};
23 23
24#endif /* _ASM_X86_SEMBUF_H */ 24#endif /* ASM_X86__SEMBUF_H */
diff --git a/include/asm-x86/serial.h b/include/asm-x86/serial.h
index 628c801535ea..303660b671e5 100644
--- a/include/asm-x86/serial.h
+++ b/include/asm-x86/serial.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SERIAL_H 1#ifndef ASM_X86__SERIAL_H
2#define _ASM_X86_SERIAL_H 2#define ASM_X86__SERIAL_H
3 3
4/* 4/*
5 * This assumes you have a 1.8432 MHz clock for your UART. 5 * This assumes you have a 1.8432 MHz clock for your UART.
@@ -26,4 +26,4 @@
26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ 26 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ 27 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
28 28
29#endif /* _ASM_X86_SERIAL_H */ 29#endif /* ASM_X86__SERIAL_H */
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index a07c6f1c01e1..9030cb73c4d7 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SETUP_H 1#ifndef ASM_X86__SETUP_H
2#define _ASM_X86_SETUP_H 2#define ASM_X86__SETUP_H
3 3
4#define COMMAND_LINE_SIZE 2048 4#define COMMAND_LINE_SIZE 2048
5 5
@@ -41,6 +41,7 @@ struct x86_quirks {
41}; 41};
42 42
43extern struct x86_quirks *x86_quirks; 43extern struct x86_quirks *x86_quirks;
44extern unsigned long saved_video_mode;
44 45
45#ifndef CONFIG_PARAVIRT 46#ifndef CONFIG_PARAVIRT
46#define paravirt_post_allocator_init() do {} while (0) 47#define paravirt_post_allocator_init() do {} while (0)
@@ -100,4 +101,4 @@ void __init x86_64_start_reservations(char *real_mode_data);
100#endif /* __ASSEMBLY__ */ 101#endif /* __ASSEMBLY__ */
101#endif /* __KERNEL__ */ 102#endif /* __KERNEL__ */
102 103
103#endif /* _ASM_X86_SETUP_H */ 104#endif /* ASM_X86__SETUP_H */
diff --git a/include/asm-x86/shmbuf.h b/include/asm-x86/shmbuf.h
index b51413b74971..f51aec2298e9 100644
--- a/include/asm-x86/shmbuf.h
+++ b/include/asm-x86/shmbuf.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SHMBUF_H 1#ifndef ASM_X86__SHMBUF_H
2#define _ASM_X86_SHMBUF_H 2#define ASM_X86__SHMBUF_H
3 3
4/* 4/*
5 * The shmid64_ds structure for x86 architecture. 5 * The shmid64_ds structure for x86 architecture.
@@ -48,4 +48,4 @@ struct shminfo64 {
48 unsigned long __unused4; 48 unsigned long __unused4;
49}; 49};
50 50
51#endif /* _ASM_X86_SHMBUF_H */ 51#endif /* ASM_X86__SHMBUF_H */
diff --git a/include/asm-x86/shmparam.h b/include/asm-x86/shmparam.h
index 0880cf0917b9..a83a1fd96a0e 100644
--- a/include/asm-x86/shmparam.h
+++ b/include/asm-x86/shmparam.h
@@ -1,6 +1,6 @@
1#ifndef _ASM_X86_SHMPARAM_H 1#ifndef ASM_X86__SHMPARAM_H
2#define _ASM_X86_SHMPARAM_H 2#define ASM_X86__SHMPARAM_H
3 3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ 4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5 5
6#endif /* _ASM_X86_SHMPARAM_H */ 6#endif /* ASM_X86__SHMPARAM_H */
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h
index 2f9c884d2c0f..24879c85b291 100644
--- a/include/asm-x86/sigcontext.h
+++ b/include/asm-x86/sigcontext.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGCONTEXT_H 1#ifndef ASM_X86__SIGCONTEXT_H
2#define _ASM_X86_SIGCONTEXT_H 2#define ASM_X86__SIGCONTEXT_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/types.h> 5#include <asm/types.h>
@@ -202,4 +202,4 @@ struct sigcontext {
202 202
203#endif /* !__i386__ */ 203#endif /* !__i386__ */
204 204
205#endif 205#endif /* ASM_X86__SIGCONTEXT_H */
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h
index 57a9686fb491..4e2ec732dd01 100644
--- a/include/asm-x86/sigcontext32.h
+++ b/include/asm-x86/sigcontext32.h
@@ -1,5 +1,5 @@
1#ifndef _SIGCONTEXT32_H 1#ifndef ASM_X86__SIGCONTEXT32_H
2#define _SIGCONTEXT32_H 1 2#define ASM_X86__SIGCONTEXT32_H
3 3
4/* signal context for 32bit programs. */ 4/* signal context for 32bit programs. */
5 5
@@ -68,4 +68,4 @@ struct sigcontext_ia32 {
68 unsigned int cr2; 68 unsigned int cr2;
69}; 69};
70 70
71#endif 71#endif /* ASM_X86__SIGCONTEXT32_H */
diff --git a/include/asm-x86/siginfo.h b/include/asm-x86/siginfo.h
index a477bea0c2a1..808bdfb2958c 100644
--- a/include/asm-x86/siginfo.h
+++ b/include/asm-x86/siginfo.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGINFO_H 1#ifndef ASM_X86__SIGINFO_H
2#define _ASM_X86_SIGINFO_H 2#define ASM_X86__SIGINFO_H
3 3
4#ifdef __x86_64__ 4#ifdef __x86_64__
5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) 5# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
@@ -7,4 +7,4 @@
7 7
8#include <asm-generic/siginfo.h> 8#include <asm-generic/siginfo.h>
9 9
10#endif 10#endif /* ASM_X86__SIGINFO_H */
diff --git a/include/asm-x86/signal.h b/include/asm-x86/signal.h
index 6dac49364e95..65acc82d267a 100644
--- a/include/asm-x86/signal.h
+++ b/include/asm-x86/signal.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SIGNAL_H 1#ifndef ASM_X86__SIGNAL_H
2#define _ASM_X86_SIGNAL_H 2#define ASM_X86__SIGNAL_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#include <linux/types.h> 5#include <linux/types.h>
@@ -140,6 +140,9 @@ struct sigaction {
140struct k_sigaction { 140struct k_sigaction {
141 struct sigaction sa; 141 struct sigaction sa;
142}; 142};
143
144extern void do_notify_resume(struct pt_regs *, void *, __u32);
145
143# else /* __KERNEL__ */ 146# else /* __KERNEL__ */
144/* Here we must cater to libcs that poke about in kernel headers. */ 147/* Here we must cater to libcs that poke about in kernel headers. */
145 148
@@ -256,4 +259,4 @@ struct pt_regs;
256#endif /* __KERNEL__ */ 259#endif /* __KERNEL__ */
257#endif /* __ASSEMBLY__ */ 260#endif /* __ASSEMBLY__ */
258 261
259#endif 262#endif /* ASM_X86__SIGNAL_H */
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 3c877f74f279..04f84f4e2c8b 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SMP_H_ 1#ifndef ASM_X86__SMP_H
2#define _ASM_X86_SMP_H_ 2#define ASM_X86__SMP_H
3#ifndef __ASSEMBLY__ 3#ifndef __ASSEMBLY__
4#include <linux/cpumask.h> 4#include <linux/cpumask.h>
5#include <linux/init.h> 5#include <linux/init.h>
@@ -34,6 +34,9 @@ extern cpumask_t cpu_initialized;
34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 34DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
35DECLARE_PER_CPU(cpumask_t, cpu_core_map); 35DECLARE_PER_CPU(cpumask_t, cpu_core_map);
36DECLARE_PER_CPU(u16, cpu_llc_id); 36DECLARE_PER_CPU(u16, cpu_llc_id);
37#ifdef CONFIG_X86_32
38DECLARE_PER_CPU(int, cpu_number);
39#endif
37 40
38DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 41DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
39DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 42DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
@@ -142,7 +145,6 @@ extern unsigned disabled_cpus __cpuinitdata;
142 * from the initial startup. We map APIC_BASE very early in page_setup(), 145 * from the initial startup. We map APIC_BASE very early in page_setup(),
143 * so this is correct in the x86 case. 146 * so this is correct in the x86 case.
144 */ 147 */
145DECLARE_PER_CPU(int, cpu_number);
146#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) 148#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
147extern int safe_smp_processor_id(void); 149extern int safe_smp_processor_id(void);
148 150
@@ -205,4 +207,4 @@ extern void cpu_uninit(void);
205#endif 207#endif
206 208
207#endif /* __ASSEMBLY__ */ 209#endif /* __ASSEMBLY__ */
208#endif 210#endif /* ASM_X86__SMP_H */
diff --git a/include/asm-x86/socket.h b/include/asm-x86/socket.h
index 80af9c4ccad7..db73274c83c3 100644
--- a/include/asm-x86/socket.h
+++ b/include/asm-x86/socket.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SOCKET_H 1#ifndef ASM_X86__SOCKET_H
2#define _ASM_SOCKET_H 2#define ASM_X86__SOCKET_H
3 3
4#include <asm/sockios.h> 4#include <asm/sockios.h>
5 5
@@ -54,4 +54,4 @@
54 54
55#define SO_MARK 36 55#define SO_MARK 36
56 56
57#endif /* _ASM_SOCKET_H */ 57#endif /* ASM_X86__SOCKET_H */
diff --git a/include/asm-x86/sockios.h b/include/asm-x86/sockios.h
index 49cc72b5d3c9..a006704fdc84 100644
--- a/include/asm-x86/sockios.h
+++ b/include/asm-x86/sockios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SOCKIOS_H 1#ifndef ASM_X86__SOCKIOS_H
2#define _ASM_X86_SOCKIOS_H 2#define ASM_X86__SOCKIOS_H
3 3
4/* Socket-level I/O control calls. */ 4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901 5#define FIOSETOWN 0x8901
@@ -10,4 +10,4 @@
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ 10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ 11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12 12
13#endif /* _ASM_X86_SOCKIOS_H */ 13#endif /* ASM_X86__SOCKIOS_H */
diff --git a/include/asm-x86/sparsemem.h b/include/asm-x86/sparsemem.h
index 9bd48b0a534b..38f8e6bc3186 100644
--- a/include/asm-x86/sparsemem.h
+++ b/include/asm-x86/sparsemem.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SPARSEMEM_H 1#ifndef ASM_X86__SPARSEMEM_H
2#define _ASM_X86_SPARSEMEM_H 2#define ASM_X86__SPARSEMEM_H
3 3
4#ifdef CONFIG_SPARSEMEM 4#ifdef CONFIG_SPARSEMEM
5/* 5/*
@@ -31,4 +31,4 @@
31#endif 31#endif
32 32
33#endif /* CONFIG_SPARSEMEM */ 33#endif /* CONFIG_SPARSEMEM */
34#endif 34#endif /* ASM_X86__SPARSEMEM_H */
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index e39c790dbfd2..93adae338ac6 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -1,5 +1,5 @@
1#ifndef _X86_SPINLOCK_H_ 1#ifndef ASM_X86__SPINLOCK_H
2#define _X86_SPINLOCK_H_ 2#define ASM_X86__SPINLOCK_H
3 3
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5#include <asm/rwlock.h> 5#include <asm/rwlock.h>
@@ -97,7 +97,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
97 "jne 1f\n\t" 97 "jne 1f\n\t"
98 "movw %w0,%w1\n\t" 98 "movw %w0,%w1\n\t"
99 "incb %h1\n\t" 99 "incb %h1\n\t"
100 "lock ; cmpxchgw %w1,%2\n\t" 100 LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
101 "1:" 101 "1:"
102 "sete %b1\n\t" 102 "sete %b1\n\t"
103 "movzbl %b1,%0\n\t" 103 "movzbl %b1,%0\n\t"
@@ -135,7 +135,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
135 int inc = 0x00010000; 135 int inc = 0x00010000;
136 int tmp; 136 int tmp;
137 137
138 asm volatile("lock ; xaddl %0, %1\n" 138 asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
139 "movzwl %w0, %2\n\t" 139 "movzwl %w0, %2\n\t"
140 "shrl $16, %0\n\t" 140 "shrl $16, %0\n\t"
141 "1:\t" 141 "1:\t"
@@ -162,7 +162,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
162 "cmpl %0,%1\n\t" 162 "cmpl %0,%1\n\t"
163 "jne 1f\n\t" 163 "jne 1f\n\t"
164 "addl $0x00010000, %1\n\t" 164 "addl $0x00010000, %1\n\t"
165 "lock ; cmpxchgl %1,%2\n\t" 165 LOCK_PREFIX "cmpxchgl %1,%2\n\t"
166 "1:" 166 "1:"
167 "sete %b1\n\t" 167 "sete %b1\n\t"
168 "movzbl %b1,%0\n\t" 168 "movzbl %b1,%0\n\t"
@@ -366,4 +366,4 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
366#define _raw_read_relax(lock) cpu_relax() 366#define _raw_read_relax(lock) cpu_relax()
367#define _raw_write_relax(lock) cpu_relax() 367#define _raw_write_relax(lock) cpu_relax()
368 368
369#endif 369#endif /* ASM_X86__SPINLOCK_H */
diff --git a/include/asm-x86/spinlock_types.h b/include/asm-x86/spinlock_types.h
index 06c071c9eee9..6aa9b562c508 100644
--- a/include/asm-x86/spinlock_types.h
+++ b/include/asm-x86/spinlock_types.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SPINLOCK_TYPES_H 1#ifndef ASM_X86__SPINLOCK_TYPES_H
2#define __ASM_SPINLOCK_TYPES_H 2#define ASM_X86__SPINLOCK_TYPES_H
3 3
4#ifndef __LINUX_SPINLOCK_TYPES_H 4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly" 5# error "please don't include this file directly"
@@ -17,4 +17,4 @@ typedef struct {
17 17
18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 18#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
19 19
20#endif 20#endif /* ASM_X86__SPINLOCK_TYPES_H */
diff --git a/include/asm-x86/srat.h b/include/asm-x86/srat.h
index 774c919dc232..5363e4f7e1cd 100644
--- a/include/asm-x86/srat.h
+++ b/include/asm-x86/srat.h
@@ -24,8 +24,8 @@
24 * Send feedback to Pat Gaughen <gone@us.ibm.com> 24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */ 25 */
26 26
27#ifndef _ASM_SRAT_H_ 27#ifndef ASM_X86__SRAT_H
28#define _ASM_SRAT_H_ 28#define ASM_X86__SRAT_H
29 29
30#ifdef CONFIG_ACPI_NUMA 30#ifdef CONFIG_ACPI_NUMA
31extern int get_memcfg_from_srat(void); 31extern int get_memcfg_from_srat(void);
@@ -36,4 +36,4 @@ static inline int get_memcfg_from_srat(void)
36} 36}
37#endif 37#endif
38 38
39#endif /* _ASM_SRAT_H_ */ 39#endif /* ASM_X86__SRAT_H */
diff --git a/include/asm-x86/stacktrace.h b/include/asm-x86/stacktrace.h
index 30f82526a8e2..f43517e28532 100644
--- a/include/asm-x86/stacktrace.h
+++ b/include/asm-x86/stacktrace.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_STACKTRACE_H 1#ifndef ASM_X86__STACKTRACE_H
2#define _ASM_STACKTRACE_H 1 2#define ASM_X86__STACKTRACE_H
3 3
4extern int kstack_depth_to_print; 4extern int kstack_depth_to_print;
5 5
@@ -18,4 +18,4 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18 unsigned long *stack, unsigned long bp, 18 unsigned long *stack, unsigned long bp,
19 const struct stacktrace_ops *ops, void *data); 19 const struct stacktrace_ops *ops, void *data);
20 20
21#endif 21#endif /* ASM_X86__STACKTRACE_H */
diff --git a/include/asm-x86/stat.h b/include/asm-x86/stat.h
index 5c22dcb5d17e..1e120f628905 100644
--- a/include/asm-x86/stat.h
+++ b/include/asm-x86/stat.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_STAT_H 1#ifndef ASM_X86__STAT_H
2#define _ASM_X86_STAT_H 2#define ASM_X86__STAT_H
3 3
4#define STAT_HAVE_NSEC 1 4#define STAT_HAVE_NSEC 1
5 5
@@ -111,4 +111,4 @@ struct __old_kernel_stat {
111#endif 111#endif
112}; 112};
113 113
114#endif 114#endif /* ASM_X86__STAT_H */
diff --git a/include/asm-x86/statfs.h b/include/asm-x86/statfs.h
index 7c651aa97252..3f005bc3aa5b 100644
--- a/include/asm-x86/statfs.h
+++ b/include/asm-x86/statfs.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_STATFS_H 1#ifndef ASM_X86__STATFS_H
2#define _ASM_X86_STATFS_H 2#define ASM_X86__STATFS_H
3 3
4#ifdef __i386__ 4#ifdef __i386__
5#include <asm-generic/statfs.h> 5#include <asm-generic/statfs.h>
@@ -60,4 +60,4 @@ struct compat_statfs64 {
60} __attribute__((packed)); 60} __attribute__((packed));
61 61
62#endif /* !__i386__ */ 62#endif /* !__i386__ */
63#endif 63#endif /* ASM_X86__STATFS_H */
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index 193578cd1fd9..487843ed245a 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_STRING_H_ 1#ifndef ASM_X86__STRING_32_H
2#define _I386_STRING_H_ 2#define ASM_X86__STRING_32_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -323,4 +323,4 @@ extern void *memscan(void *addr, int c, size_t size);
323 323
324#endif /* __KERNEL__ */ 324#endif /* __KERNEL__ */
325 325
326#endif 326#endif /* ASM_X86__STRING_32_H */
diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h
index 52b5ab383395..a2add11d3b66 100644
--- a/include/asm-x86/string_64.h
+++ b/include/asm-x86/string_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_STRING_H_ 1#ifndef ASM_X86__STRING_64_H
2#define _X86_64_STRING_H_ 2#define ASM_X86__STRING_64_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
@@ -57,4 +57,4 @@ int strcmp(const char *cs, const char *ct);
57 57
58#endif /* __KERNEL__ */ 58#endif /* __KERNEL__ */
59 59
60#endif 60#endif /* ASM_X86__STRING_64_H */
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 8675c6782a7d..acb6d4d491f4 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,8 +3,8 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_32_SUSPEND_H 6#ifndef ASM_X86__SUSPEND_32_H
7#define __ASM_X86_32_SUSPEND_H 7#define ASM_X86__SUSPEND_32_H
8 8
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
@@ -48,4 +48,4 @@ static inline void acpi_save_register_state(unsigned long return_point)
48extern int acpi_save_state_mem(void); 48extern int acpi_save_state_mem(void);
49#endif 49#endif
50 50
51#endif /* __ASM_X86_32_SUSPEND_H */ 51#endif /* ASM_X86__SUSPEND_32_H */
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
index dc3262b43072..cf821dd310e8 100644
--- a/include/asm-x86/suspend_64.h
+++ b/include/asm-x86/suspend_64.h
@@ -3,8 +3,8 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_64_SUSPEND_H 6#ifndef ASM_X86__SUSPEND_64_H
7#define __ASM_X86_64_SUSPEND_H 7#define ASM_X86__SUSPEND_64_H
8 8
9#include <asm/desc.h> 9#include <asm/desc.h>
10#include <asm/i387.h> 10#include <asm/i387.h>
@@ -49,4 +49,4 @@ extern int acpi_save_state_mem(void);
49extern char core_restore_code; 49extern char core_restore_code;
50extern char restore_registers; 50extern char restore_registers;
51 51
52#endif /* __ASM_X86_64_SUSPEND_H */ 52#endif /* ASM_X86__SUSPEND_64_H */
diff --git a/include/asm-x86/swiotlb.h b/include/asm-x86/swiotlb.h
index 2730b351afcf..1e20adbcad4b 100644
--- a/include/asm-x86/swiotlb.h
+++ b/include/asm-x86/swiotlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef ASM_X86__SWIOTLB_H
2#define _ASM_SWIOTLB_H 1 2#define ASM_X86__SWIOTLB_H
3 3
4#include <asm/dma-mapping.h> 4#include <asm/dma-mapping.h>
5 5
@@ -55,4 +55,4 @@ static inline void pci_swiotlb_init(void)
55 55
56static inline void dma_mark_clean(void *addr, size_t size) {} 56static inline void dma_mark_clean(void *addr, size_t size) {}
57 57
58#endif /* _ASM_SWIOTLB_H */ 58#endif /* ASM_X86__SWIOTLB_H */
diff --git a/include/asm-x86/sync_bitops.h b/include/asm-x86/sync_bitops.h
index b47a1d0b8a83..b689bee71104 100644
--- a/include/asm-x86/sync_bitops.h
+++ b/include/asm-x86/sync_bitops.h
@@ -1,5 +1,5 @@
1#ifndef _I386_SYNC_BITOPS_H 1#ifndef ASM_X86__SYNC_BITOPS_H
2#define _I386_SYNC_BITOPS_H 2#define ASM_X86__SYNC_BITOPS_H
3 3
4/* 4/*
5 * Copyright 1992, Linus Torvalds. 5 * Copyright 1992, Linus Torvalds.
@@ -127,4 +127,4 @@ static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
127 127
128#undef ADDR 128#undef ADDR
129 129
130#endif /* _I386_SYNC_BITOPS_H */ 130#endif /* ASM_X86__SYNC_BITOPS_H */
diff --git a/include/asm-x86/syscall.h b/include/asm-x86/syscall.h
new file mode 100644
index 000000000000..04c47dc5597c
--- /dev/null
+++ b/include/asm-x86/syscall.h
@@ -0,0 +1,211 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17#include <linux/err.h>
18
19static inline long syscall_get_nr(struct task_struct *task,
20 struct pt_regs *regs)
21{
22 /*
23 * We always sign-extend a -1 value being set here,
24 * so this is always either -1L or a syscall number.
25 */
26 return regs->orig_ax;
27}
28
29static inline void syscall_rollback(struct task_struct *task,
30 struct pt_regs *regs)
31{
32 regs->ax = regs->orig_ax;
33}
34
35static inline long syscall_get_error(struct task_struct *task,
36 struct pt_regs *regs)
37{
38 unsigned long error = regs->ax;
39#ifdef CONFIG_IA32_EMULATION
40 /*
41 * TS_COMPAT is set for 32-bit syscall entries and then
42 * remains set until we return to user mode.
43 */
44 if (task_thread_info(task)->status & TS_COMPAT)
45 /*
46 * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
47 * and will match correctly in comparisons.
48 */
49 error = (long) (int) error;
50#endif
51 return IS_ERR_VALUE(error) ? error : 0;
52}
53
54static inline long syscall_get_return_value(struct task_struct *task,
55 struct pt_regs *regs)
56{
57 return regs->ax;
58}
59
60static inline void syscall_set_return_value(struct task_struct *task,
61 struct pt_regs *regs,
62 int error, long val)
63{
64 regs->ax = (long) error ?: val;
65}
66
67#ifdef CONFIG_X86_32
68
69static inline void syscall_get_arguments(struct task_struct *task,
70 struct pt_regs *regs,
71 unsigned int i, unsigned int n,
72 unsigned long *args)
73{
74 BUG_ON(i + n > 6);
75 memcpy(args, &regs->bx + i, n * sizeof(args[0]));
76}
77
78static inline void syscall_set_arguments(struct task_struct *task,
79 struct pt_regs *regs,
80 unsigned int i, unsigned int n,
81 const unsigned long *args)
82{
83 BUG_ON(i + n > 6);
84 memcpy(&regs->bx + i, args, n * sizeof(args[0]));
85}
86
87#else /* CONFIG_X86_64 */
88
89static inline void syscall_get_arguments(struct task_struct *task,
90 struct pt_regs *regs,
91 unsigned int i, unsigned int n,
92 unsigned long *args)
93{
94# ifdef CONFIG_IA32_EMULATION
95 if (task_thread_info(task)->status & TS_COMPAT)
96 switch (i + n) {
97 case 6:
98 if (!n--) break;
99 *args++ = regs->bp;
100 case 5:
101 if (!n--) break;
102 *args++ = regs->di;
103 case 4:
104 if (!n--) break;
105 *args++ = regs->si;
106 case 3:
107 if (!n--) break;
108 *args++ = regs->dx;
109 case 2:
110 if (!n--) break;
111 *args++ = regs->cx;
112 case 1:
113 if (!n--) break;
114 *args++ = regs->bx;
115 case 0:
116 if (!n--) break;
117 default:
118 BUG();
119 break;
120 }
121 else
122# endif
123 switch (i + n) {
124 case 6:
125 if (!n--) break;
126 *args++ = regs->r9;
127 case 5:
128 if (!n--) break;
129 *args++ = regs->r8;
130 case 4:
131 if (!n--) break;
132 *args++ = regs->r10;
133 case 3:
134 if (!n--) break;
135 *args++ = regs->dx;
136 case 2:
137 if (!n--) break;
138 *args++ = regs->si;
139 case 1:
140 if (!n--) break;
141 *args++ = regs->di;
142 case 0:
143 if (!n--) break;
144 default:
145 BUG();
146 break;
147 }
148}
149
150static inline void syscall_set_arguments(struct task_struct *task,
151 struct pt_regs *regs,
152 unsigned int i, unsigned int n,
153 const unsigned long *args)
154{
155# ifdef CONFIG_IA32_EMULATION
156 if (task_thread_info(task)->status & TS_COMPAT)
157 switch (i + n) {
158 case 6:
159 if (!n--) break;
160 regs->bp = *args++;
161 case 5:
162 if (!n--) break;
163 regs->di = *args++;
164 case 4:
165 if (!n--) break;
166 regs->si = *args++;
167 case 3:
168 if (!n--) break;
169 regs->dx = *args++;
170 case 2:
171 if (!n--) break;
172 regs->cx = *args++;
173 case 1:
174 if (!n--) break;
175 regs->bx = *args++;
176 case 0:
177 if (!n--) break;
178 default:
179 BUG();
180 }
181 else
182# endif
183 switch (i + n) {
184 case 6:
185 if (!n--) break;
186 regs->r9 = *args++;
187 case 5:
188 if (!n--) break;
189 regs->r8 = *args++;
190 case 4:
191 if (!n--) break;
192 regs->r10 = *args++;
193 case 3:
194 if (!n--) break;
195 regs->dx = *args++;
196 case 2:
197 if (!n--) break;
198 regs->si = *args++;
199 case 1:
200 if (!n--) break;
201 regs->di = *args++;
202 case 0:
203 if (!n--) break;
204 default:
205 BUG();
206 }
207}
208
209#endif /* CONFIG_X86_32 */
210
211#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-x86/syscalls.h b/include/asm-x86/syscalls.h
new file mode 100644
index 000000000000..87803da44010
--- /dev/null
+++ b/include/asm-x86/syscalls.h
@@ -0,0 +1,93 @@
1/*
2 * syscalls.h - Linux syscall interfaces (arch-specific)
3 *
4 * Copyright (c) 2008 Jaswinder Singh
5 *
6 * This file is released under the GPLv2.
7 * See the file COPYING for more details.
8 */
9
10#ifndef _ASM_X86_SYSCALLS_H
11#define _ASM_X86_SYSCALLS_H
12
13#include <linux/compiler.h>
14#include <linux/linkage.h>
15#include <linux/types.h>
16#include <linux/signal.h>
17
18/* Common in X86_32 and X86_64 */
19/* kernel/ioport.c */
20asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
21
22/* X86_32 only */
23#ifdef CONFIG_X86_32
24/* kernel/process_32.c */
25asmlinkage int sys_fork(struct pt_regs);
26asmlinkage int sys_clone(struct pt_regs);
27asmlinkage int sys_vfork(struct pt_regs);
28asmlinkage int sys_execve(struct pt_regs);
29
30/* kernel/signal_32.c */
31asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
32asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
33 struct old_sigaction __user *);
34asmlinkage int sys_sigaltstack(unsigned long);
35asmlinkage unsigned long sys_sigreturn(unsigned long);
36asmlinkage int sys_rt_sigreturn(unsigned long);
37
38/* kernel/ioport.c */
39asmlinkage long sys_iopl(unsigned long);
40
41/* kernel/ldt.c */
42asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
43
44/* kernel/sys_i386_32.c */
45asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
46 unsigned long, unsigned long, unsigned long);
47struct mmap_arg_struct;
48asmlinkage int old_mmap(struct mmap_arg_struct __user *);
49struct sel_arg_struct;
50asmlinkage int old_select(struct sel_arg_struct __user *);
51asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
52struct old_utsname;
53asmlinkage int sys_uname(struct old_utsname __user *);
54struct oldold_utsname;
55asmlinkage int sys_olduname(struct oldold_utsname __user *);
56
57/* kernel/tls.c */
58asmlinkage int sys_set_thread_area(struct user_desc __user *);
59asmlinkage int sys_get_thread_area(struct user_desc __user *);
60
61/* kernel/vm86_32.c */
62asmlinkage int sys_vm86old(struct pt_regs);
63asmlinkage int sys_vm86(struct pt_regs);
64
65#else /* CONFIG_X86_32 */
66
67/* X86_64 only */
68/* kernel/process_64.c */
69asmlinkage long sys_fork(struct pt_regs *);
70asmlinkage long sys_clone(unsigned long, unsigned long,
71 void __user *, void __user *,
72 struct pt_regs *);
73asmlinkage long sys_vfork(struct pt_regs *);
74asmlinkage long sys_execve(char __user *, char __user * __user *,
75 char __user * __user *,
76 struct pt_regs *);
77
78/* kernel/ioport.c */
79asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
80
81/* kernel/signal_64.c */
82asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
83 struct pt_regs *);
84asmlinkage long sys_rt_sigreturn(struct pt_regs *);
85
86/* kernel/sys_x86_64.c */
87asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
88 unsigned long, unsigned long, unsigned long);
89struct new_utsname;
90asmlinkage long sys_uname(struct new_utsname __user *);
91
92#endif /* CONFIG_X86_32 */
93#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 983ce37c491f..34505dd7b24d 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_SYSTEM_H_ 1#ifndef ASM_X86__SYSTEM_H
2#define _ASM_X86_SYSTEM_H_ 2#define ASM_X86__SYSTEM_H
3 3
4#include <asm/asm.h> 4#include <asm/asm.h>
5#include <asm/segment.h> 5#include <asm/segment.h>
@@ -419,4 +419,4 @@ static inline void rdtsc_barrier(void)
419 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); 419 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
420} 420}
421 421
422#endif 422#endif /* ASM_X86__SYSTEM_H */
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 97fa251ccb2b..5aedb8bffc5a 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SYSTEM_H 1#ifndef ASM_X86__SYSTEM_64_H
2#define __ASM_SYSTEM_H 2#define ASM_X86__SYSTEM_64_H
3 3
4#include <asm/segment.h> 4#include <asm/segment.h>
5#include <asm/cmpxchg.h> 5#include <asm/cmpxchg.h>
@@ -19,4 +19,4 @@ static inline void write_cr8(unsigned long val)
19 19
20#include <linux/irqflags.h> 20#include <linux/irqflags.h>
21 21
22#endif 22#endif /* ASM_X86__SYSTEM_64_H */
diff --git a/include/asm-x86/tce.h b/include/asm-x86/tce.h
index b1a4ea00df78..e7932d7fbbab 100644
--- a/include/asm-x86/tce.h
+++ b/include/asm-x86/tce.h
@@ -21,8 +21,8 @@
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */ 22 */
23 23
24#ifndef _ASM_X86_64_TCE_H 24#ifndef ASM_X86__TCE_H
25#define _ASM_X86_64_TCE_H 25#define ASM_X86__TCE_H
26 26
27extern unsigned int specified_table_size; 27extern unsigned int specified_table_size;
28struct iommu_table; 28struct iommu_table;
@@ -45,4 +45,4 @@ extern void * __init alloc_tce_table(void);
45extern void __init free_tce_table(void *tbl); 45extern void __init free_tce_table(void *tbl);
46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar); 46extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
47 47
48#endif /* _ASM_X86_64_TCE_H */ 48#endif /* ASM_X86__TCE_H */
diff --git a/include/asm-x86/termbits.h b/include/asm-x86/termbits.h
index af1b70ea440f..3d00dc5e0c71 100644
--- a/include/asm-x86/termbits.h
+++ b/include/asm-x86/termbits.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TERMBITS_H 1#ifndef ASM_X86__TERMBITS_H
2#define _ASM_X86_TERMBITS_H 2#define ASM_X86__TERMBITS_H
3 3
4#include <linux/posix_types.h> 4#include <linux/posix_types.h>
5 5
@@ -195,4 +195,4 @@ struct ktermios {
195#define TCSADRAIN 1 195#define TCSADRAIN 1
196#define TCSAFLUSH 2 196#define TCSAFLUSH 2
197 197
198#endif /* _ASM_X86_TERMBITS_H */ 198#endif /* ASM_X86__TERMBITS_H */
diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h
index f72956331c49..e235db248071 100644
--- a/include/asm-x86/termios.h
+++ b/include/asm-x86/termios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TERMIOS_H 1#ifndef ASM_X86__TERMIOS_H
2#define _ASM_X86_TERMIOS_H 2#define ASM_X86__TERMIOS_H
3 3
4#include <asm/termbits.h> 4#include <asm/termbits.h>
5#include <asm/ioctls.h> 5#include <asm/ioctls.h>
@@ -110,4 +110,4 @@ static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
110 110
111#endif /* __KERNEL__ */ 111#endif /* __KERNEL__ */
112 112
113#endif /* _ASM_X86_TERMIOS_H */ 113#endif /* ASM_X86__TERMIOS_H */
diff --git a/include/asm-x86/therm_throt.h b/include/asm-x86/therm_throt.h
index 399bf6026b16..1c7f57b6b66e 100644
--- a/include/asm-x86/therm_throt.h
+++ b/include/asm-x86/therm_throt.h
@@ -1,9 +1,9 @@
1#ifndef __ASM_I386_THERM_THROT_H__ 1#ifndef ASM_X86__THERM_THROT_H
2#define __ASM_I386_THERM_THROT_H__ 1 2#define ASM_X86__THERM_THROT_H
3 3
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5 5
6extern atomic_t therm_throt_en; 6extern atomic_t therm_throt_en;
7int therm_throt_process(int curr); 7int therm_throt_process(int curr);
8 8
9#endif /* __ASM_I386_THERM_THROT_H__ */ 9#endif /* ASM_X86__THERM_THROT_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index da0a675adf94..4db0066a3a35 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -4,8 +4,8 @@
4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller 4 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
5 */ 5 */
6 6
7#ifndef _ASM_X86_THREAD_INFO_H 7#ifndef ASM_X86__THREAD_INFO_H
8#define _ASM_X86_THREAD_INFO_H 8#define ASM_X86__THREAD_INFO_H
9 9
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <asm/page.h> 11#include <asm/page.h>
@@ -71,6 +71,7 @@ struct thread_info {
71 * Warning: layout of LSW is hardcoded in entry.S 71 * Warning: layout of LSW is hardcoded in entry.S
72 */ 72 */
73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 73#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
74#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
74#define TIF_SIGPENDING 2 /* signal pending */ 75#define TIF_SIGPENDING 2 /* signal pending */
75#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 76#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
76#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 77#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
@@ -93,6 +94,7 @@ struct thread_info {
93#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */ 94#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
94 95
95#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 96#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
97#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
96#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 98#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
97#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 99#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -133,7 +135,7 @@ struct thread_info {
133 135
134/* Only used for 64 bit */ 136/* Only used for 64 bit */
135#define _TIF_DO_NOTIFY_MASK \ 137#define _TIF_DO_NOTIFY_MASK \
136 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY) 138 (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)
137 139
138/* flags to check in __switch_to() */ 140/* flags to check in __switch_to() */
139#define _TIF_WORK_CTXSW \ 141#define _TIF_WORK_CTXSW \
@@ -258,4 +260,4 @@ extern void free_thread_info(struct thread_info *ti);
258extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 260extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
259#define arch_task_cache_init arch_task_cache_init 261#define arch_task_cache_init arch_task_cache_init
260#endif 262#endif
261#endif /* _ASM_X86_THREAD_INFO_H */ 263#endif /* ASM_X86__THREAD_INFO_H */
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h
index a17fa473e91d..3e724eef7ac4 100644
--- a/include/asm-x86/time.h
+++ b/include/asm-x86/time.h
@@ -1,5 +1,5 @@
1#ifndef _ASMX86_TIME_H 1#ifndef ASM_X86__TIME_H
2#define _ASMX86_TIME_H 2#define ASM_X86__TIME_H
3 3
4extern void hpet_time_init(void); 4extern void hpet_time_init(void);
5 5
@@ -46,6 +46,8 @@ static inline int native_set_wallclock(unsigned long nowtime)
46 46
47#endif 47#endif
48 48
49extern void time_init(void);
50
49#ifdef CONFIG_PARAVIRT 51#ifdef CONFIG_PARAVIRT
50#include <asm/paravirt.h> 52#include <asm/paravirt.h>
51#else /* !CONFIG_PARAVIRT */ 53#else /* !CONFIG_PARAVIRT */
@@ -58,4 +60,4 @@ static inline int native_set_wallclock(unsigned long nowtime)
58 60
59extern unsigned long __init calibrate_cpu(void); 61extern unsigned long __init calibrate_cpu(void);
60 62
61#endif 63#endif /* ASM_X86__TIME_H */
diff --git a/include/asm-x86/timer.h b/include/asm-x86/timer.h
index fb2a4ddddf3d..d0babce4b47a 100644
--- a/include/asm-x86/timer.h
+++ b/include/asm-x86/timer.h
@@ -1,5 +1,5 @@
1#ifndef _ASMi386_TIMER_H 1#ifndef ASM_X86__TIMER_H
2#define _ASMi386_TIMER_H 2#define ASM_X86__TIMER_H
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
@@ -9,9 +9,12 @@
9unsigned long long native_sched_clock(void); 9unsigned long long native_sched_clock(void);
10unsigned long native_calibrate_tsc(void); 10unsigned long native_calibrate_tsc(void);
11 11
12#ifdef CONFIG_X86_32
12extern int timer_ack; 13extern int timer_ack;
13extern int no_timer_check;
14extern int recalibrate_cpu_khz(void); 14extern int recalibrate_cpu_khz(void);
15#endif /* CONFIG_X86_32 */
16
17extern int no_timer_check;
15 18
16#ifndef CONFIG_PARAVIRT 19#ifndef CONFIG_PARAVIRT
17#define calibrate_tsc() native_calibrate_tsc() 20#define calibrate_tsc() native_calibrate_tsc()
@@ -60,4 +63,4 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
60 return ns; 63 return ns;
61} 64}
62 65
63#endif 66#endif /* ASM_X86__TIMER_H */
diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h
index 43e5a78500c5..d1ce2416a5da 100644
--- a/include/asm-x86/timex.h
+++ b/include/asm-x86/timex.h
@@ -1,6 +1,6 @@
1/* x86 architecture timex specifications */ 1/* x86 architecture timex specifications */
2#ifndef _ASM_X86_TIMEX_H 2#ifndef ASM_X86__TIMEX_H
3#define _ASM_X86_TIMEX_H 3#define ASM_X86__TIMEX_H
4 4
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/tsc.h> 6#include <asm/tsc.h>
@@ -16,4 +16,4 @@
16 16
17#define ARCH_HAS_READ_CURRENT_TIMER 17#define ARCH_HAS_READ_CURRENT_TIMER
18 18
19#endif 19#endif /* ASM_X86__TIMEX_H */
diff --git a/include/asm-x86/tlb.h b/include/asm-x86/tlb.h
index e4e9e2d07a93..db36e9e89e87 100644
--- a/include/asm-x86/tlb.h
+++ b/include/asm-x86/tlb.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TLB_H 1#ifndef ASM_X86__TLB_H
2#define _ASM_X86_TLB_H 2#define ASM_X86__TLB_H
3 3
4#define tlb_start_vma(tlb, vma) do { } while (0) 4#define tlb_start_vma(tlb, vma) do { } while (0)
5#define tlb_end_vma(tlb, vma) do { } while (0) 5#define tlb_end_vma(tlb, vma) do { } while (0)
@@ -8,4 +8,4 @@
8 8
9#include <asm-generic/tlb.h> 9#include <asm-generic/tlb.h>
10 10
11#endif 11#endif /* ASM_X86__TLB_H */
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index 35c76ceb9f40..ef68b76dc3c5 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TLBFLUSH_H 1#ifndef ASM_X86__TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H 2#define ASM_X86__TLBFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
@@ -165,4 +165,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
165 flush_tlb_all(); 165 flush_tlb_all();
166} 166}
167 167
168#endif /* _ASM_X86_TLBFLUSH_H */ 168#endif /* ASM_X86__TLBFLUSH_H */
diff --git a/include/asm-x86/topology.h b/include/asm-x86/topology.h
index 90ac7718469a..7eca9bc022b2 100644
--- a/include/asm-x86/topology.h
+++ b/include/asm-x86/topology.h
@@ -22,8 +22,8 @@
22 * 22 *
23 * Send feedback to <colpatch@us.ibm.com> 23 * Send feedback to <colpatch@us.ibm.com>
24 */ 24 */
25#ifndef _ASM_X86_TOPOLOGY_H 25#ifndef ASM_X86__TOPOLOGY_H
26#define _ASM_X86_TOPOLOGY_H 26#define ASM_X86__TOPOLOGY_H
27 27
28#ifdef CONFIG_X86_32 28#ifdef CONFIG_X86_32
29# ifdef CONFIG_X86_HT 29# ifdef CONFIG_X86_HT
@@ -255,4 +255,4 @@ static inline void set_mp_bus_to_node(int busnum, int node)
255} 255}
256#endif 256#endif
257 257
258#endif /* _ASM_X86_TOPOLOGY_H */ 258#endif /* ASM_X86__TOPOLOGY_H */
diff --git a/include/asm-x86/trampoline.h b/include/asm-x86/trampoline.h
index b156b08d0131..0406bbd898a9 100644
--- a/include/asm-x86/trampoline.h
+++ b/include/asm-x86/trampoline.h
@@ -1,5 +1,5 @@
1#ifndef __TRAMPOLINE_HEADER 1#ifndef ASM_X86__TRAMPOLINE_H
2#define __TRAMPOLINE_HEADER 2#define ASM_X86__TRAMPOLINE_H
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
@@ -18,4 +18,4 @@ extern unsigned long setup_trampoline(void);
18 18
19#endif /* __ASSEMBLY__ */ 19#endif /* __ASSEMBLY__ */
20 20
21#endif /* __TRAMPOLINE_HEADER */ 21#endif /* ASM_X86__TRAMPOLINE_H */
diff --git a/include/asm-x86/traps.h b/include/asm-x86/traps.h
index a4b65a71bd66..2ccebc6fb0b0 100644
--- a/include/asm-x86/traps.h
+++ b/include/asm-x86/traps.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TRAPS_H 1#ifndef ASM_X86__TRAPS_H
2#define _ASM_X86_TRAPS_H 2#define ASM_X86__TRAPS_H
3 3
4/* Common in X86_32 and X86_64 */ 4/* Common in X86_32 and X86_64 */
5asmlinkage void divide_error(void); 5asmlinkage void divide_error(void);
@@ -51,6 +51,8 @@ void do_spurious_interrupt_bug(struct pt_regs *, long);
51unsigned long patch_espfix_desc(unsigned long, unsigned long); 51unsigned long patch_espfix_desc(unsigned long, unsigned long);
52asmlinkage void math_emulate(long); 52asmlinkage void math_emulate(long);
53 53
54void do_page_fault(struct pt_regs *regs, unsigned long error_code);
55
54#else /* CONFIG_X86_32 */ 56#else /* CONFIG_X86_32 */
55 57
56asmlinkage void double_fault(void); 58asmlinkage void double_fault(void);
@@ -62,5 +64,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *);
62asmlinkage void do_simd_coprocessor_error(struct pt_regs *); 64asmlinkage void do_simd_coprocessor_error(struct pt_regs *);
63asmlinkage void do_spurious_interrupt_bug(struct pt_regs *); 65asmlinkage void do_spurious_interrupt_bug(struct pt_regs *);
64 66
67asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code);
68
65#endif /* CONFIG_X86_32 */ 69#endif /* CONFIG_X86_32 */
66#endif /* _ASM_X86_TRAPS_H */ 70#endif /* ASM_X86__TRAPS_H */
diff --git a/include/asm-x86/tsc.h b/include/asm-x86/tsc.h
index cb6f6ee45b8f..ad0f5c41e78c 100644
--- a/include/asm-x86/tsc.h
+++ b/include/asm-x86/tsc.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * x86 TSC related functions 2 * x86 TSC related functions
3 */ 3 */
4#ifndef _ASM_X86_TSC_H 4#ifndef ASM_X86__TSC_H
5#define _ASM_X86_TSC_H 5#define ASM_X86__TSC_H
6 6
7#include <asm/processor.h> 7#include <asm/processor.h>
8 8
@@ -59,4 +59,4 @@ extern void check_tsc_sync_target(void);
59 59
60extern int notsc_setup(char *); 60extern int notsc_setup(char *);
61 61
62#endif 62#endif /* ASM_X86__TSC_H */
diff --git a/include/asm-x86/types.h b/include/asm-x86/types.h
index 1ac80cd9acf8..e78b52e17444 100644
--- a/include/asm-x86/types.h
+++ b/include/asm-x86/types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_TYPES_H 1#ifndef ASM_X86__TYPES_H
2#define _ASM_X86_TYPES_H 2#define ASM_X86__TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <asm-generic/int-ll64.h>
5 5
@@ -33,4 +33,4 @@ typedef u32 dma_addr_t;
33#endif /* __ASSEMBLY__ */ 33#endif /* __ASSEMBLY__ */
34#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
35 35
36#endif 36#endif /* ASM_X86__TYPES_H */
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h
index 5f702d1d5218..48ebc0ad40ec 100644
--- a/include/asm-x86/uaccess.h
+++ b/include/asm-x86/uaccess.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_UACCES_H_ 1#ifndef ASM_X86__UACCESS_H
2#define _ASM_UACCES_H_ 2#define ASM_X86__UACCESS_H
3/* 3/*
4 * User space memory access functions 4 * User space memory access functions
5 */ 5 */
@@ -450,5 +450,5 @@ extern struct movsl_mask {
450# include "uaccess_64.h" 450# include "uaccess_64.h"
451#endif 451#endif
452 452
453#endif 453#endif /* ASM_X86__UACCESS_H */
454 454
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index 6fdef39a0bcb..6b5b57d9c6d1 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -1,5 +1,5 @@
1#ifndef __i386_UACCESS_H 1#ifndef ASM_X86__UACCESS_32_H
2#define __i386_UACCESS_H 2#define ASM_X86__UACCESS_32_H
3 3
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
@@ -215,4 +215,4 @@ long strnlen_user(const char __user *str, long n);
215unsigned long __must_check clear_user(void __user *mem, unsigned long len); 215unsigned long __must_check clear_user(void __user *mem, unsigned long len);
216unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 216unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
217 217
218#endif /* __i386_UACCESS_H */ 218#endif /* ASM_X86__UACCESS_32_H */
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 45806d60bcbe..c96c1f5d07a2 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -1,5 +1,5 @@
1#ifndef __X86_64_UACCESS_H 1#ifndef ASM_X86__UACCESS_64_H
2#define __X86_64_UACCESS_H 2#define ASM_X86__UACCESS_64_H
3 3
4/* 4/*
5 * User space memory access functions 5 * User space memory access functions
@@ -199,4 +199,4 @@ static inline int __copy_from_user_inatomic_nocache(void *dst,
199unsigned long 199unsigned long
200copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); 200copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
201 201
202#endif /* __X86_64_UACCESS_H */ 202#endif /* ASM_X86__UACCESS_64_H */
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h
index 50a79f7fcde9..9948dd328084 100644
--- a/include/asm-x86/ucontext.h
+++ b/include/asm-x86/ucontext.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UCONTEXT_H 1#ifndef ASM_X86__UCONTEXT_H
2#define _ASM_X86_UCONTEXT_H 2#define ASM_X86__UCONTEXT_H
3 3
4struct ucontext { 4struct ucontext {
5 unsigned long uc_flags; 5 unsigned long uc_flags;
@@ -9,4 +9,4 @@ struct ucontext {
9 sigset_t uc_sigmask; /* mask last for extensibility */ 9 sigset_t uc_sigmask; /* mask last for extensibility */
10}; 10};
11 11
12#endif /* _ASM_X86_UCONTEXT_H */ 12#endif /* ASM_X86__UCONTEXT_H */
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h
index a7bd416b4763..59dcdec37160 100644
--- a/include/asm-x86/unaligned.h
+++ b/include/asm-x86/unaligned.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UNALIGNED_H 1#ifndef ASM_X86__UNALIGNED_H
2#define _ASM_X86_UNALIGNED_H 2#define ASM_X86__UNALIGNED_H
3 3
4/* 4/*
5 * The x86 can do unaligned accesses itself. 5 * The x86 can do unaligned accesses itself.
@@ -11,4 +11,4 @@
11#define get_unaligned __get_unaligned_le 11#define get_unaligned __get_unaligned_le
12#define put_unaligned __put_unaligned_le 12#define put_unaligned __put_unaligned_le
13 13
14#endif /* _ASM_X86_UNALIGNED_H */ 14#endif /* ASM_X86__UNALIGNED_H */
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index d7394673b772..017f4a87c913 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_I386_UNISTD_H_ 1#ifndef ASM_X86__UNISTD_32_H
2#define _ASM_I386_UNISTD_H_ 2#define ASM_X86__UNISTD_32_H
3 3
4/* 4/*
5 * This file contains the system call numbers. 5 * This file contains the system call numbers.
@@ -376,4 +376,4 @@
376#endif 376#endif
377 377
378#endif /* __KERNEL__ */ 378#endif /* __KERNEL__ */
379#endif /* _ASM_I386_UNISTD_H_ */ 379#endif /* ASM_X86__UNISTD_32_H */
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index 3a341d791792..ace83f1f6787 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_UNISTD_H_ 1#ifndef ASM_X86__UNISTD_64_H
2#define _ASM_X86_64_UNISTD_H_ 2#define ASM_X86__UNISTD_64_H
3 3
4#ifndef __SYSCALL 4#ifndef __SYSCALL
5#define __SYSCALL(a, b) 5#define __SYSCALL(a, b)
@@ -690,4 +690,4 @@ __SYSCALL(__NR_inotify_init1, sys_inotify_init1)
690#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 690#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
691#endif /* __KERNEL__ */ 691#endif /* __KERNEL__ */
692 692
693#endif /* _ASM_X86_64_UNISTD_H_ */ 693#endif /* ASM_X86__UNISTD_64_H */
diff --git a/include/asm-x86/unwind.h b/include/asm-x86/unwind.h
index 8b064bd9c553..a2151567db44 100644
--- a/include/asm-x86/unwind.h
+++ b/include/asm-x86/unwind.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_UNWIND_H 1#ifndef ASM_X86__UNWIND_H
2#define _ASM_X86_UNWIND_H 2#define ASM_X86__UNWIND_H
3 3
4#define UNW_PC(frame) ((void)(frame), 0UL) 4#define UNW_PC(frame) ((void)(frame), 0UL)
5#define UNW_SP(frame) ((void)(frame), 0UL) 5#define UNW_SP(frame) ((void)(frame), 0UL)
@@ -10,4 +10,4 @@ static inline int arch_unw_user_mode(const void *info)
10 return 0; 10 return 0;
11} 11}
12 12
13#endif /* _ASM_X86_UNWIND_H */ 13#endif /* ASM_X86__UNWIND_H */
diff --git a/include/asm-x86/user32.h b/include/asm-x86/user32.h
index a3d910047879..aa66c1857f06 100644
--- a/include/asm-x86/user32.h
+++ b/include/asm-x86/user32.h
@@ -1,5 +1,5 @@
1#ifndef USER32_H 1#ifndef ASM_X86__USER32_H
2#define USER32_H 1 2#define ASM_X86__USER32_H
3 3
4/* IA32 compatible user structures for ptrace. 4/* IA32 compatible user structures for ptrace.
5 * These should be used for 32bit coredumps too. */ 5 * These should be used for 32bit coredumps too. */
@@ -67,4 +67,4 @@ struct user32 {
67}; 67};
68 68
69 69
70#endif 70#endif /* ASM_X86__USER32_H */
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h
index d6e51edc259d..e0fe2f55f1a6 100644
--- a/include/asm-x86/user_32.h
+++ b/include/asm-x86/user_32.h
@@ -1,5 +1,5 @@
1#ifndef _I386_USER_H 1#ifndef ASM_X86__USER_32_H
2#define _I386_USER_H 2#define ASM_X86__USER_32_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5/* Core file format: The core file is written in such a way that gdb 5/* Core file format: The core file is written in such a way that gdb
@@ -128,4 +128,4 @@ struct user{
128#define HOST_TEXT_START_ADDR (u.start_code) 128#define HOST_TEXT_START_ADDR (u.start_code)
129#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) 129#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
130 130
131#endif /* _I386_USER_H */ 131#endif /* ASM_X86__USER_32_H */
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h
index 6037b634c77f..38b5799863b4 100644
--- a/include/asm-x86/user_64.h
+++ b/include/asm-x86/user_64.h
@@ -1,5 +1,5 @@
1#ifndef _X86_64_USER_H 1#ifndef ASM_X86__USER_64_H
2#define _X86_64_USER_H 2#define ASM_X86__USER_64_H
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/page.h> 5#include <asm/page.h>
@@ -134,4 +134,4 @@ struct user {
134#define HOST_TEXT_START_ADDR (u.start_code) 134#define HOST_TEXT_START_ADDR (u.start_code)
135#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) 135#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
136 136
137#endif /* _X86_64_USER_H */ 137#endif /* ASM_X86__USER_64_H */
diff --git a/include/asm-x86/uv/bios.h b/include/asm-x86/uv/bios.h
index aa73362ff5df..7cd6d7ec1308 100644
--- a/include/asm-x86/uv/bios.h
+++ b/include/asm-x86/uv/bios.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_BIOS_H 1#ifndef ASM_X86__UV__BIOS_H
2#define _ASM_X86_BIOS_H 2#define ASM_X86__UV__BIOS_H
3 3
4/* 4/*
5 * BIOS layer definitions. 5 * BIOS layer definitions.
@@ -65,4 +65,4 @@ x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
65 unsigned long *drift_info); 65 unsigned long *drift_info);
66extern const char *x86_bios_strerror(long status); 66extern const char *x86_bios_strerror(long status);
67 67
68#endif /* _ASM_X86_BIOS_H */ 68#endif /* ASM_X86__UV__BIOS_H */
diff --git a/include/asm-x86/uv/uv_bau.h b/include/asm-x86/uv/uv_bau.h
index 610b6b308e93..77153fb18f5e 100644
--- a/include/asm-x86/uv/uv_bau.h
+++ b/include/asm-x86/uv/uv_bau.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_BAU__ 11#ifndef ASM_X86__UV__UV_BAU_H
12#define __ASM_X86_UV_BAU__ 12#define ASM_X86__UV__UV_BAU_H
13 13
14#include <linux/bitmap.h> 14#include <linux/bitmap.h>
15#define BITSPERBYTE 8 15#define BITSPERBYTE 8
@@ -329,4 +329,4 @@ extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
329extern void uv_bau_message_intr1(void); 329extern void uv_bau_message_intr1(void);
330extern void uv_bau_timeout_intr1(void); 330extern void uv_bau_timeout_intr1(void);
331 331
332#endif /* __ASM_X86_UV_BAU__ */ 332#endif /* ASM_X86__UV__UV_BAU_H */
diff --git a/include/asm-x86/uv/uv_hub.h b/include/asm-x86/uv/uv_hub.h
index a4ef26e5850b..bdb5b01afbf5 100644
--- a/include/asm-x86/uv/uv_hub.h
+++ b/include/asm-x86/uv/uv_hub.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_HUB_H__ 11#ifndef ASM_X86__UV__UV_HUB_H
12#define __ASM_X86_UV_HUB_H__ 12#define ASM_X86__UV__UV_HUB_H
13 13
14#include <linux/numa.h> 14#include <linux/numa.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
@@ -350,5 +350,5 @@ static inline int uv_num_possible_blades(void)
350 return uv_possible_blades; 350 return uv_possible_blades;
351} 351}
352 352
353#endif /* __ASM_X86_UV_HUB__ */ 353#endif /* ASM_X86__UV__UV_HUB_H */
354 354
diff --git a/include/asm-x86/uv/uv_mmrs.h b/include/asm-x86/uv/uv_mmrs.h
index 151fd7fcb809..8b03d89d2459 100644
--- a/include/asm-x86/uv/uv_mmrs.h
+++ b/include/asm-x86/uv/uv_mmrs.h
@@ -8,8 +8,8 @@
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef __ASM_X86_UV_MMRS__ 11#ifndef ASM_X86__UV__UV_MMRS_H
12#define __ASM_X86_UV_MMRS__ 12#define ASM_X86__UV__UV_MMRS_H
13 13
14#define UV_MMR_ENABLE (1UL << 63) 14#define UV_MMR_ENABLE (1UL << 63)
15 15
@@ -1292,4 +1292,4 @@ union uvh_si_alias2_overlay_config_u {
1292}; 1292};
1293 1293
1294 1294
1295#endif /* __ASM_X86_UV_MMRS__ */ 1295#endif /* ASM_X86__UV__UV_MMRS_H */
diff --git a/include/asm-x86/vdso.h b/include/asm-x86/vdso.h
index 8e18fb80f5e6..4ab320913ea3 100644
--- a/include/asm-x86/vdso.h
+++ b/include/asm-x86/vdso.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_VDSO_H 1#ifndef ASM_X86__VDSO_H
2#define _ASM_X86_VDSO_H 1 2#define ASM_X86__VDSO_H
3 3
4#ifdef CONFIG_X86_64 4#ifdef CONFIG_X86_64
5extern const char VDSO64_PRELINK[]; 5extern const char VDSO64_PRELINK[];
@@ -44,4 +44,4 @@ extern const char vdso32_int80_start, vdso32_int80_end;
44extern const char vdso32_syscall_start, vdso32_syscall_end; 44extern const char vdso32_syscall_start, vdso32_syscall_end;
45extern const char vdso32_sysenter_start, vdso32_sysenter_end; 45extern const char vdso32_sysenter_start, vdso32_sysenter_end;
46 46
47#endif /* asm-x86/vdso.h */ 47#endif /* ASM_X86__VDSO_H */
diff --git a/include/asm-x86/vga.h b/include/asm-x86/vga.h
index 0ccf804377e6..b9e493d07d07 100644
--- a/include/asm-x86/vga.h
+++ b/include/asm-x86/vga.h
@@ -4,8 +4,8 @@
4 * (c) 1998 Martin Mares <mj@ucw.cz> 4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */ 5 */
6 6
7#ifndef _LINUX_ASM_VGA_H_ 7#ifndef ASM_X86__VGA_H
8#define _LINUX_ASM_VGA_H_ 8#define ASM_X86__VGA_H
9 9
10/* 10/*
11 * On the PC, we can just recalculate addresses and then 11 * On the PC, we can just recalculate addresses and then
@@ -17,4 +17,4 @@
17#define vga_readb(x) (*(x)) 17#define vga_readb(x) (*(x))
18#define vga_writeb(x, y) (*(y) = (x)) 18#define vga_writeb(x, y) (*(y) = (x))
19 19
20#endif 20#endif /* ASM_X86__VGA_H */
diff --git a/include/asm-x86/vgtod.h b/include/asm-x86/vgtod.h
index 3301f0929342..38fd13364021 100644
--- a/include/asm-x86/vgtod.h
+++ b/include/asm-x86/vgtod.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_VGTOD_H 1#ifndef ASM_X86__VGTOD_H
2#define _ASM_VGTOD_H 1 2#define ASM_X86__VGTOD_H
3 3
4#include <asm/vsyscall.h> 4#include <asm/vsyscall.h>
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
@@ -26,4 +26,4 @@ extern struct vsyscall_gtod_data __vsyscall_gtod_data
26__section_vsyscall_gtod_data; 26__section_vsyscall_gtod_data;
27extern struct vsyscall_gtod_data vsyscall_gtod_data; 27extern struct vsyscall_gtod_data vsyscall_gtod_data;
28 28
29#endif 29#endif /* ASM_X86__VGTOD_H */
diff --git a/include/asm-x86/visws/cobalt.h b/include/asm-x86/visws/cobalt.h
index 995258831b7f..9627a8fe84e9 100644
--- a/include/asm-x86/visws/cobalt.h
+++ b/include/asm-x86/visws/cobalt.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_COBALT_H 1#ifndef ASM_X86__VISWS__COBALT_H
2#define __I386_SGI_COBALT_H 2#define ASM_X86__VISWS__COBALT_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5 5
@@ -122,4 +122,4 @@ extern char visws_board_type;
122 122
123extern char visws_board_rev; 123extern char visws_board_rev;
124 124
125#endif /* __I386_SGI_COBALT_H */ 125#endif /* ASM_X86__VISWS__COBALT_H */
diff --git a/include/asm-x86/visws/lithium.h b/include/asm-x86/visws/lithium.h
index dfcd4f07ab85..b36d3b378c63 100644
--- a/include/asm-x86/visws/lithium.h
+++ b/include/asm-x86/visws/lithium.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_LITHIUM_H 1#ifndef ASM_X86__VISWS__LITHIUM_H
2#define __I386_SGI_LITHIUM_H 2#define ASM_X86__VISWS__LITHIUM_H
3 3
4#include <asm/fixmap.h> 4#include <asm/fixmap.h>
5 5
@@ -49,5 +49,5 @@ static inline unsigned short li_pcib_read16(unsigned long reg)
49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); 49 return *((volatile unsigned short *)(LI_PCIB_VADDR+reg));
50} 50}
51 51
52#endif 52#endif /* ASM_X86__VISWS__LITHIUM_H */
53 53
diff --git a/include/asm-x86/visws/piix4.h b/include/asm-x86/visws/piix4.h
index 83ea4f46e419..61c938045ec9 100644
--- a/include/asm-x86/visws/piix4.h
+++ b/include/asm-x86/visws/piix4.h
@@ -1,5 +1,5 @@
1#ifndef __I386_SGI_PIIX_H 1#ifndef ASM_X86__VISWS__PIIX4_H
2#define __I386_SGI_PIIX_H 2#define ASM_X86__VISWS__PIIX4_H
3 3
4/* 4/*
5 * PIIX4 as used on SGI Visual Workstations 5 * PIIX4 as used on SGI Visual Workstations
@@ -104,4 +104,4 @@
104 */ 104 */
105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in 105#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in
106 106
107#endif 107#endif /* ASM_X86__VISWS__PIIX4_H */
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index 5ce351325e01..998bd18eb737 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -1,5 +1,5 @@
1#ifndef _LINUX_VM86_H 1#ifndef ASM_X86__VM86_H
2#define _LINUX_VM86_H 2#define ASM_X86__VM86_H
3 3
4/* 4/*
5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how 5 * I'm guessing at the VIF/VIP flag usage, but hope that this is how
@@ -205,4 +205,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
205 205
206#endif /* __KERNEL__ */ 206#endif /* __KERNEL__ */
207 207
208#endif 208#endif /* ASM_X86__VM86_H */
diff --git a/include/asm-x86/vmi_time.h b/include/asm-x86/vmi_time.h
index c3118c385156..b2d39e6a08b7 100644
--- a/include/asm-x86/vmi_time.h
+++ b/include/asm-x86/vmi_time.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __VMI_TIME_H 25#ifndef ASM_X86__VMI_TIME_H
26#define __VMI_TIME_H 26#define ASM_X86__VMI_TIME_H
27 27
28/* 28/*
29 * Raw VMI call indices for timer functions 29 * Raw VMI call indices for timer functions
@@ -95,4 +95,4 @@ extern void __devinit vmi_time_ap_init(void);
95 95
96#define CONFIG_VMI_ALARM_HZ 100 96#define CONFIG_VMI_ALARM_HZ 100
97 97
98#endif 98#endif /* ASM_X86__VMI_TIME_H */
diff --git a/include/asm-x86/vsyscall.h b/include/asm-x86/vsyscall.h
index 6b66ff905af0..dcd4682413de 100644
--- a/include/asm-x86/vsyscall.h
+++ b/include/asm-x86/vsyscall.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_X86_64_VSYSCALL_H_ 1#ifndef ASM_X86__VSYSCALL_H
2#define _ASM_X86_64_VSYSCALL_H_ 2#define ASM_X86__VSYSCALL_H
3 3
4enum vsyscall_num { 4enum vsyscall_num {
5 __NR_vgettimeofday, 5 __NR_vgettimeofday,
@@ -41,4 +41,4 @@ extern void map_vsyscall(void);
41 41
42#endif /* __KERNEL__ */ 42#endif /* __KERNEL__ */
43 43
44#endif /* _ASM_X86_64_VSYSCALL_H_ */ 44#endif /* ASM_X86__VSYSCALL_H */
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index 8ded74720024..8151f5b8b6cb 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -1,5 +1,5 @@
1#ifndef __XEN_EVENTS_H 1#ifndef ASM_X86__XEN__EVENTS_H
2#define __XEN_EVENTS_H 2#define ASM_X86__XEN__EVENTS_H
3 3
4enum ipi_vector { 4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR, 5 XEN_RESCHEDULE_VECTOR,
@@ -21,4 +21,4 @@ static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
21 do_IRQ(regs); 21 do_IRQ(regs);
22} 22}
23 23
24#endif /* __XEN_EVENTS_H */ 24#endif /* ASM_X86__XEN__EVENTS_H */
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h
index 2444d4593a3b..c4baab4d2b68 100644
--- a/include/asm-x86/xen/grant_table.h
+++ b/include/asm-x86/xen/grant_table.h
@@ -1,7 +1,7 @@
1#ifndef __XEN_GRANT_TABLE_H 1#ifndef ASM_X86__XEN__GRANT_TABLE_H
2#define __XEN_GRANT_TABLE_H 2#define ASM_X86__XEN__GRANT_TABLE_H
3 3
4#define xen_alloc_vm_area(size) alloc_vm_area(size) 4#define xen_alloc_vm_area(size) alloc_vm_area(size)
5#define xen_free_vm_area(area) free_vm_area(area) 5#define xen_free_vm_area(area) free_vm_area(area)
6 6
7#endif /* __XEN_GRANT_TABLE_H */ 7#endif /* ASM_X86__XEN__GRANT_TABLE_H */
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index 91cb7fd5c123..44f4259bee3f 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -30,8 +30,8 @@
30 * IN THE SOFTWARE. 30 * IN THE SOFTWARE.
31 */ 31 */
32 32
33#ifndef __HYPERCALL_H__ 33#ifndef ASM_X86__XEN__HYPERCALL_H
34#define __HYPERCALL_H__ 34#define ASM_X86__XEN__HYPERCALL_H
35 35
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/string.h> 37#include <linux/string.h>
@@ -524,4 +524,4 @@ MULTI_stack_switch(struct multicall_entry *mcl,
524 mcl->args[1] = esp; 524 mcl->args[1] = esp;
525} 525}
526 526
527#endif /* __HYPERCALL_H__ */ 527#endif /* ASM_X86__XEN__HYPERCALL_H */
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h
index 04ee0610014a..0ef3a88b869d 100644
--- a/include/asm-x86/xen/hypervisor.h
+++ b/include/asm-x86/xen/hypervisor.h
@@ -30,8 +30,8 @@
30 * IN THE SOFTWARE. 30 * IN THE SOFTWARE.
31 */ 31 */
32 32
33#ifndef __HYPERVISOR_H__ 33#ifndef ASM_X86__XEN__HYPERVISOR_H
34#define __HYPERVISOR_H__ 34#define ASM_X86__XEN__HYPERVISOR_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
@@ -69,4 +69,4 @@ u64 jiffies_to_st(unsigned long jiffies);
69 69
70#define is_running_on_xen() (xen_start_info ? 1 : 0) 70#define is_running_on_xen() (xen_start_info ? 1 : 0)
71 71
72#endif /* __HYPERVISOR_H__ */ 72#endif /* ASM_X86__XEN__HYPERVISOR_H */
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h
index 9d810f2538a2..d077bba96da9 100644
--- a/include/asm-x86/xen/interface.h
+++ b/include/asm-x86/xen/interface.h
@@ -6,8 +6,8 @@
6 * Copyright (c) 2004, K A Fraser 6 * Copyright (c) 2004, K A Fraser
7 */ 7 */
8 8
9#ifndef __ASM_X86_XEN_INTERFACE_H 9#ifndef ASM_X86__XEN__INTERFACE_H
10#define __ASM_X86_XEN_INTERFACE_H 10#define ASM_X86__XEN__INTERFACE_H
11 11
12#ifdef __XEN__ 12#ifdef __XEN__
13#define __DEFINE_GUEST_HANDLE(name, type) \ 13#define __DEFINE_GUEST_HANDLE(name, type) \
@@ -172,4 +172,4 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
172#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" 172#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
173#endif 173#endif
174 174
175#endif /* __ASM_X86_XEN_INTERFACE_H */ 175#endif /* ASM_X86__XEN__INTERFACE_H */
diff --git a/include/asm-x86/xen/interface_32.h b/include/asm-x86/xen/interface_32.h
index d8ac41d5db86..08167e19fc66 100644
--- a/include/asm-x86/xen/interface_32.h
+++ b/include/asm-x86/xen/interface_32.h
@@ -6,8 +6,8 @@
6 * Copyright (c) 2004, K A Fraser 6 * Copyright (c) 2004, K A Fraser
7 */ 7 */
8 8
9#ifndef __ASM_X86_XEN_INTERFACE_32_H 9#ifndef ASM_X86__XEN__INTERFACE_32_H
10#define __ASM_X86_XEN_INTERFACE_32_H 10#define ASM_X86__XEN__INTERFACE_32_H
11 11
12 12
13/* 13/*
@@ -94,4 +94,4 @@ typedef struct xen_callback xen_callback_t;
94#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) 94#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
95#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) 95#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
96 96
97#endif /* __ASM_X86_XEN_INTERFACE_32_H */ 97#endif /* ASM_X86__XEN__INTERFACE_32_H */
diff --git a/include/asm-x86/xen/interface_64.h b/include/asm-x86/xen/interface_64.h
index 842266ce96e6..046c0f1e01d4 100644
--- a/include/asm-x86/xen/interface_64.h
+++ b/include/asm-x86/xen/interface_64.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_X86_XEN_INTERFACE_64_H 1#ifndef ASM_X86__XEN__INTERFACE_64_H
2#define __ASM_X86_XEN_INTERFACE_64_H 2#define ASM_X86__XEN__INTERFACE_64_H
3 3
4/* 4/*
5 * 64-bit segment selectors 5 * 64-bit segment selectors
@@ -156,4 +156,4 @@ typedef unsigned long xen_callback_t;
156#endif /* !__ASSEMBLY__ */ 156#endif /* !__ASSEMBLY__ */
157 157
158 158
159#endif /* __ASM_X86_XEN_INTERFACE_64_H */ 159#endif /* ASM_X86__XEN__INTERFACE_64_H */
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
index 7b3835d3b77d..c50185dccec1 100644
--- a/include/asm-x86/xen/page.h
+++ b/include/asm-x86/xen/page.h
@@ -1,5 +1,5 @@
1#ifndef __XEN_PAGE_H 1#ifndef ASM_X86__XEN__PAGE_H
2#define __XEN_PAGE_H 2#define ASM_X86__XEN__PAGE_H
3 3
4#include <linux/pfn.h> 4#include <linux/pfn.h>
5 5
@@ -162,4 +162,4 @@ xmaddr_t arbitrary_virt_to_machine(void *address);
162void make_lowmem_page_readonly(void *vaddr); 162void make_lowmem_page_readonly(void *vaddr);
163void make_lowmem_page_readwrite(void *vaddr); 163void make_lowmem_page_readwrite(void *vaddr);
164 164
165#endif /* __XEN_PAGE_H */ 165#endif /* ASM_X86__XEN__PAGE_H */
diff --git a/include/crypto/internal/rng.h b/include/crypto/internal/rng.h
new file mode 100644
index 000000000000..896973369573
--- /dev/null
+++ b/include/crypto/internal/rng.h
@@ -0,0 +1,26 @@
1/*
2 * RNG: Random Number Generator algorithms under the crypto API
3 *
4 * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _CRYPTO_INTERNAL_RNG_H
14#define _CRYPTO_INTERNAL_RNG_H
15
16#include <crypto/algapi.h>
17#include <crypto/rng.h>
18
19extern const struct crypto_type crypto_rng_type;
20
21static inline void *crypto_rng_ctx(struct crypto_rng *tfm)
22{
23 return crypto_tfm_ctx(&tfm->base);
24}
25
26#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index ccc32bad9a89..2ba42cd7d6aa 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -15,7 +15,6 @@
15 15
16#include <crypto/algapi.h> 16#include <crypto/algapi.h>
17#include <crypto/skcipher.h> 17#include <crypto/skcipher.h>
18#include <linux/init.h>
19#include <linux/types.h> 18#include <linux/types.h>
20 19
21struct rtattr; 20struct rtattr;
@@ -65,11 +64,6 @@ void skcipher_geniv_free(struct crypto_instance *inst);
65int skcipher_geniv_init(struct crypto_tfm *tfm); 64int skcipher_geniv_init(struct crypto_tfm *tfm);
66void skcipher_geniv_exit(struct crypto_tfm *tfm); 65void skcipher_geniv_exit(struct crypto_tfm *tfm);
67 66
68int __init eseqiv_module_init(void);
69void __exit eseqiv_module_exit(void);
70int __init chainiv_module_init(void);
71void chainiv_module_exit(void);
72
73static inline struct crypto_ablkcipher *skcipher_geniv_cipher( 67static inline struct crypto_ablkcipher *skcipher_geniv_cipher(
74 struct crypto_ablkcipher *geniv) 68 struct crypto_ablkcipher *geniv)
75{ 69{
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
new file mode 100644
index 000000000000..c93f9b917925
--- /dev/null
+++ b/include/crypto/rng.h
@@ -0,0 +1,75 @@
1/*
2 * RNG: Random Number Generator algorithms under the crypto API
3 *
4 * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#ifndef _CRYPTO_RNG_H
14#define _CRYPTO_RNG_H
15
16#include <linux/crypto.h>
17
18extern struct crypto_rng *crypto_default_rng;
19
20int crypto_get_default_rng(void);
21void crypto_put_default_rng(void);
22
23static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
24{
25 return (struct crypto_rng *)tfm;
26}
27
28static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
29 u32 type, u32 mask)
30{
31 type &= ~CRYPTO_ALG_TYPE_MASK;
32 type |= CRYPTO_ALG_TYPE_RNG;
33 mask |= CRYPTO_ALG_TYPE_MASK;
34
35 return __crypto_rng_cast(crypto_alloc_base(alg_name, type, mask));
36}
37
38static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
39{
40 return &tfm->base;
41}
42
43static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
44{
45 return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
46}
47
48static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
49{
50 return &crypto_rng_tfm(tfm)->crt_rng;
51}
52
53static inline void crypto_free_rng(struct crypto_rng *tfm)
54{
55 crypto_free_tfm(crypto_rng_tfm(tfm));
56}
57
58static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
59 u8 *rdata, unsigned int dlen)
60{
61 return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
62}
63
64static inline int crypto_rng_reset(struct crypto_rng *tfm,
65 u8 *seed, unsigned int slen)
66{
67 return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
68}
69
70static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
71{
72 return crypto_rng_alg(tfm)->seedsize;
73}
74
75#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b68ec09399be..31474e89c59a 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -180,6 +180,7 @@ unifdef-y += audit.h
180unifdef-y += auto_fs.h 180unifdef-y += auto_fs.h
181unifdef-y += auxvec.h 181unifdef-y += auxvec.h
182unifdef-y += binfmts.h 182unifdef-y += binfmts.h
183unifdef-y += blktrace_api.h
183unifdef-y += capability.h 184unifdef-y += capability.h
184unifdef-y += capi.h 185unifdef-y += capi.h
185unifdef-y += cciss_ioctl.h 186unifdef-y += cciss_ioctl.h
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 8a12d718c169..be00973d1a8c 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -88,6 +88,7 @@ enum {
88 ATA_ID_DLF = 128, 88 ATA_ID_DLF = 128,
89 ATA_ID_CSFO = 129, 89 ATA_ID_CSFO = 129,
90 ATA_ID_CFA_POWER = 160, 90 ATA_ID_CFA_POWER = 160,
91 ATA_ID_ROT_SPEED = 217,
91 ATA_ID_PIO4 = (1 << 1), 92 ATA_ID_PIO4 = (1 << 1),
92 93
93 ATA_ID_SERNO_LEN = 20, 94 ATA_ID_SERNO_LEN = 20,
@@ -667,6 +668,15 @@ static inline int ata_id_has_dword_io(const u16 *id)
667 return 0; 668 return 0;
668} 669}
669 670
671static inline int ata_id_has_unload(const u16 *id)
672{
673 if (ata_id_major_version(id) >= 7 &&
674 (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
675 id[ATA_ID_CFSSE] & (1 << 13))
676 return 1;
677 return 0;
678}
679
670static inline int ata_id_current_chs_valid(const u16 *id) 680static inline int ata_id_current_chs_valid(const u16 *id)
671{ 681{
672 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 682 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -691,6 +701,11 @@ static inline int ata_id_is_cfa(const u16 *id)
691 return 0; 701 return 0;
692} 702}
693 703
704static inline int ata_id_is_ssd(const u16 *id)
705{
706 return id[ATA_ID_ROT_SPEED] == 0x01;
707}
708
694static inline int ata_drive_40wire(const u16 *dev_id) 709static inline int ata_drive_40wire(const u16 *dev_id)
695{ 710{
696 if (ata_id_is_sata(dev_id)) 711 if (ata_id_is_sata(dev_id))
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0933a14e6414..ff5b4cf9e2da 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -26,21 +26,8 @@
26 26
27#ifdef CONFIG_BLOCK 27#ifdef CONFIG_BLOCK
28 28
29/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
30#include <asm/io.h> 29#include <asm/io.h>
31 30
32#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
33#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
34#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
35#else
36#define BIOVEC_VIRT_START_SIZE(x) 0
37#define BIOVEC_VIRT_OVERSIZE(x) 0
38#endif
39
40#ifndef BIO_VMERGE_BOUNDARY
41#define BIO_VMERGE_BOUNDARY 0
42#endif
43
44#define BIO_DEBUG 31#define BIO_DEBUG
45 32
46#ifdef BIO_DEBUG 33#ifdef BIO_DEBUG
@@ -88,25 +75,14 @@ struct bio {
88 /* Number of segments in this BIO after 75 /* Number of segments in this BIO after
89 * physical address coalescing is performed. 76 * physical address coalescing is performed.
90 */ 77 */
91 unsigned short bi_phys_segments; 78 unsigned int bi_phys_segments;
92
93 /* Number of segments after physical and DMA remapping
94 * hardware coalescing is performed.
95 */
96 unsigned short bi_hw_segments;
97 79
98 unsigned int bi_size; /* residual I/O count */ 80 unsigned int bi_size; /* residual I/O count */
99 81
100 /*
101 * To keep track of the max hw size, we account for the
102 * sizes of the first and last virtually mergeable segments
103 * in this bio
104 */
105 unsigned int bi_hw_front_size;
106 unsigned int bi_hw_back_size;
107
108 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 82 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
109 83
84 unsigned int bi_comp_cpu; /* completion CPU */
85
110 struct bio_vec *bi_io_vec; /* the actual vec list */ 86 struct bio_vec *bi_io_vec; /* the actual vec list */
111 87
112 bio_end_io_t *bi_end_io; 88 bio_end_io_t *bi_end_io;
@@ -126,11 +102,14 @@ struct bio {
126#define BIO_UPTODATE 0 /* ok after I/O completion */ 102#define BIO_UPTODATE 0 /* ok after I/O completion */
127#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 103#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
128#define BIO_EOF 2 /* out-out-bounds error */ 104#define BIO_EOF 2 /* out-out-bounds error */
129#define BIO_SEG_VALID 3 /* nr_hw_seg valid */ 105#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
130#define BIO_CLONED 4 /* doesn't own data */ 106#define BIO_CLONED 4 /* doesn't own data */
131#define BIO_BOUNCED 5 /* bio is a bounce bio */ 107#define BIO_BOUNCED 5 /* bio is a bounce bio */
132#define BIO_USER_MAPPED 6 /* contains user pages */ 108#define BIO_USER_MAPPED 6 /* contains user pages */
133#define BIO_EOPNOTSUPP 7 /* not supported */ 109#define BIO_EOPNOTSUPP 7 /* not supported */
110#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
111#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
112#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
134#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 113#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
135 114
136/* 115/*
@@ -144,18 +123,31 @@ struct bio {
144/* 123/*
145 * bio bi_rw flags 124 * bio bi_rw flags
146 * 125 *
147 * bit 0 -- read (not set) or write (set) 126 * bit 0 -- data direction
127 * If not set, bio is a read from device. If set, it's a write to device.
148 * bit 1 -- rw-ahead when set 128 * bit 1 -- rw-ahead when set
149 * bit 2 -- barrier 129 * bit 2 -- barrier
130 * Insert a serialization point in the IO queue, forcing previously
131 * submitted IO to be completed before this oen is issued.
150 * bit 3 -- fail fast, don't want low level driver retries 132 * bit 3 -- fail fast, don't want low level driver retries
151 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately 133 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
134 * Note that this does NOT indicate that the IO itself is sync, just
135 * that the block layer will not postpone issue of this IO by plugging.
136 * bit 5 -- metadata request
137 * Used for tracing to differentiate metadata and data IO. May also
138 * get some preferential treatment in the IO scheduler
139 * bit 6 -- discard sectors
140 * Informs the lower level device that this range of sectors is no longer
141 * used by the file system and may thus be freed by the device. Used
142 * for flash based storage.
152 */ 143 */
153#define BIO_RW 0 144#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
154#define BIO_RW_AHEAD 1 145#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
155#define BIO_RW_BARRIER 2 146#define BIO_RW_BARRIER 2
156#define BIO_RW_FAILFAST 3 147#define BIO_RW_FAILFAST 3
157#define BIO_RW_SYNC 4 148#define BIO_RW_SYNC 4
158#define BIO_RW_META 5 149#define BIO_RW_META 5
150#define BIO_RW_DISCARD 6
159 151
160/* 152/*
161 * upper 16 bits of bi_rw define the io priority of this bio 153 * upper 16 bits of bi_rw define the io priority of this bio
@@ -185,14 +177,15 @@ struct bio {
185#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 177#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
186#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 178#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
187#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 179#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
188#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size) 180#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
181#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
189 182
190static inline unsigned int bio_cur_sectors(struct bio *bio) 183static inline unsigned int bio_cur_sectors(struct bio *bio)
191{ 184{
192 if (bio->bi_vcnt) 185 if (bio->bi_vcnt)
193 return bio_iovec(bio)->bv_len >> 9; 186 return bio_iovec(bio)->bv_len >> 9;
194 187 else /* dataless requests such as discard */
195 return 0; 188 return bio->bi_size >> 9;
196} 189}
197 190
198static inline void *bio_data(struct bio *bio) 191static inline void *bio_data(struct bio *bio)
@@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio)
236 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 229 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
237#endif 230#endif
238 231
239#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
240 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
241#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 232#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
242 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 233 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
243#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 234#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -319,15 +310,14 @@ struct bio_pair {
319 atomic_t cnt; 310 atomic_t cnt;
320 int error; 311 int error;
321}; 312};
322extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, 313extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
323 int first_sectors);
324extern mempool_t *bio_split_pool;
325extern void bio_pair_release(struct bio_pair *dbio); 314extern void bio_pair_release(struct bio_pair *dbio);
326 315
327extern struct bio_set *bioset_create(int, int); 316extern struct bio_set *bioset_create(int, int);
328extern void bioset_free(struct bio_set *); 317extern void bioset_free(struct bio_set *);
329 318
330extern struct bio *bio_alloc(gfp_t, int); 319extern struct bio *bio_alloc(gfp_t, int);
320extern struct bio *bio_kmalloc(gfp_t, int);
331extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 321extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
332extern void bio_put(struct bio *); 322extern void bio_put(struct bio *);
333extern void bio_free(struct bio *, struct bio_set *); 323extern void bio_free(struct bio *, struct bio_set *);
@@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *);
335extern void bio_endio(struct bio *, int); 325extern void bio_endio(struct bio *, int);
336struct request_queue; 326struct request_queue;
337extern int bio_phys_segments(struct request_queue *, struct bio *); 327extern int bio_phys_segments(struct request_queue *, struct bio *);
338extern int bio_hw_segments(struct request_queue *, struct bio *);
339 328
340extern void __bio_clone(struct bio *, struct bio *); 329extern void __bio_clone(struct bio *, struct bio *);
341extern struct bio *bio_clone(struct bio *, gfp_t); 330extern struct bio *bio_clone(struct bio *, gfp_t);
@@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
346extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 335extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
347 unsigned int, unsigned int); 336 unsigned int, unsigned int);
348extern int bio_get_nr_vecs(struct block_device *); 337extern int bio_get_nr_vecs(struct block_device *);
338extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
349extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 339extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
350 unsigned long, unsigned int, int); 340 unsigned long, unsigned int, int, gfp_t);
351struct sg_iovec; 341struct sg_iovec;
342struct rq_map_data;
352extern struct bio *bio_map_user_iov(struct request_queue *, 343extern struct bio *bio_map_user_iov(struct request_queue *,
353 struct block_device *, 344 struct block_device *,
354 struct sg_iovec *, int, int); 345 struct sg_iovec *, int, int, gfp_t);
355extern void bio_unmap_user(struct bio *); 346extern void bio_unmap_user(struct bio *);
356extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 347extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
357 gfp_t); 348 gfp_t);
@@ -359,15 +350,25 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
359 gfp_t, int); 350 gfp_t, int);
360extern void bio_set_pages_dirty(struct bio *bio); 351extern void bio_set_pages_dirty(struct bio *bio);
361extern void bio_check_pages_dirty(struct bio *bio); 352extern void bio_check_pages_dirty(struct bio *bio);
362extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 353extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
363extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, 354 unsigned long, unsigned int, int, gfp_t);
364 int, int); 355extern struct bio *bio_copy_user_iov(struct request_queue *,
356 struct rq_map_data *, struct sg_iovec *,
357 int, int, gfp_t);
365extern int bio_uncopy_user(struct bio *); 358extern int bio_uncopy_user(struct bio *);
366void zero_fill_bio(struct bio *bio); 359void zero_fill_bio(struct bio *bio);
367extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 360extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
368extern unsigned int bvec_nr_vecs(unsigned short idx); 361extern unsigned int bvec_nr_vecs(unsigned short idx);
369 362
370/* 363/*
364 * Allow queuer to specify a completion CPU for this bio
365 */
366static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
367{
368 bio->bi_comp_cpu = cpu;
369}
370
371/*
371 * bio_set is used to allow other portions of the IO system to 372 * bio_set is used to allow other portions of the IO system to
372 * allocate their own private memory pools for bio and iovec structures. 373 * allocate their own private memory pools for bio and iovec structures.
373 * These memory pools in turn all allocate from the bio_slab 374 * These memory pools in turn all allocate from the bio_slab
@@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
445 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 446 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
446#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 447#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
447 448
449/*
450 * Check whether this bio carries any data or not. A NULL bio is allowed.
451 */
452static inline int bio_has_data(struct bio *bio)
453{
454 return bio && bio->bi_io_vec != NULL;
455}
456
448#if defined(CONFIG_BLK_DEV_INTEGRITY) 457#if defined(CONFIG_BLK_DEV_INTEGRITY)
449 458
450#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) 459#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
@@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
458#define bip_for_each_vec(bvl, bip, i) \ 467#define bip_for_each_vec(bvl, bip, i) \
459 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 468 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
460 469
461static inline int bio_integrity(struct bio *bio) 470#define bio_integrity(bio) (bio->bi_integrity != NULL)
462{
463#if defined(CONFIG_BLK_DEV_INTEGRITY)
464 return bio->bi_integrity != NULL;
465#else
466 return 0;
467#endif
468}
469 471
470extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); 472extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
471extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 473extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 53ea933cf60b..a92d9e4ea96e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -16,7 +16,9 @@
16#include <linux/bio.h> 16#include <linux/bio.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/stringify.h> 18#include <linux/stringify.h>
19#include <linux/gfp.h>
19#include <linux/bsg.h> 20#include <linux/bsg.h>
21#include <linux/smp.h>
20 22
21#include <asm/scatterlist.h> 23#include <asm/scatterlist.h>
22 24
@@ -54,7 +56,6 @@ enum rq_cmd_type_bits {
54 REQ_TYPE_PM_SUSPEND, /* suspend request */ 56 REQ_TYPE_PM_SUSPEND, /* suspend request */
55 REQ_TYPE_PM_RESUME, /* resume request */ 57 REQ_TYPE_PM_RESUME, /* resume request */
56 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 58 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
57 REQ_TYPE_FLUSH, /* flush request */
58 REQ_TYPE_SPECIAL, /* driver defined type */ 59 REQ_TYPE_SPECIAL, /* driver defined type */
59 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 60 REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
60 /* 61 /*
@@ -76,19 +77,18 @@ enum rq_cmd_type_bits {
76 * 77 *
77 */ 78 */
78enum { 79enum {
79 /*
80 * just examples for now
81 */
82 REQ_LB_OP_EJECT = 0x40, /* eject request */ 80 REQ_LB_OP_EJECT = 0x40, /* eject request */
83 REQ_LB_OP_FLUSH = 0x41, /* flush device */ 81 REQ_LB_OP_FLUSH = 0x41, /* flush request */
82 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
84}; 83};
85 84
86/* 85/*
87 * request type modified bits. first three bits match BIO_RW* bits, important 86 * request type modified bits. first two bits match BIO_RW* bits, important
88 */ 87 */
89enum rq_flag_bits { 88enum rq_flag_bits {
90 __REQ_RW, /* not set, read. set, write */ 89 __REQ_RW, /* not set, read. set, write */
91 __REQ_FAILFAST, /* no low level driver retries */ 90 __REQ_FAILFAST, /* no low level driver retries */
91 __REQ_DISCARD, /* request to discard sectors */
92 __REQ_SORTED, /* elevator knows about this request */ 92 __REQ_SORTED, /* elevator knows about this request */
93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
94 __REQ_HARDBARRIER, /* may not be passed by drive either */ 94 __REQ_HARDBARRIER, /* may not be passed by drive either */
@@ -111,6 +111,7 @@ enum rq_flag_bits {
111}; 111};
112 112
113#define REQ_RW (1 << __REQ_RW) 113#define REQ_RW (1 << __REQ_RW)
114#define REQ_DISCARD (1 << __REQ_DISCARD)
114#define REQ_FAILFAST (1 << __REQ_FAILFAST) 115#define REQ_FAILFAST (1 << __REQ_FAILFAST)
115#define REQ_SORTED (1 << __REQ_SORTED) 116#define REQ_SORTED (1 << __REQ_SORTED)
116#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 117#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
@@ -140,12 +141,14 @@ enum rq_flag_bits {
140 */ 141 */
141struct request { 142struct request {
142 struct list_head queuelist; 143 struct list_head queuelist;
143 struct list_head donelist; 144 struct call_single_data csd;
145 int cpu;
144 146
145 struct request_queue *q; 147 struct request_queue *q;
146 148
147 unsigned int cmd_flags; 149 unsigned int cmd_flags;
148 enum rq_cmd_type_bits cmd_type; 150 enum rq_cmd_type_bits cmd_type;
151 unsigned long atomic_flags;
149 152
150 /* Maintain bio traversal state for part by part I/O submission. 153 /* Maintain bio traversal state for part by part I/O submission.
151 * hard_* are block layer internals, no driver should touch them! 154 * hard_* are block layer internals, no driver should touch them!
@@ -190,13 +193,6 @@ struct request {
190 */ 193 */
191 unsigned short nr_phys_segments; 194 unsigned short nr_phys_segments;
192 195
193 /* Number of scatter-gather addr+len pairs after
194 * physical and DMA remapping hardware coalescing is performed.
195 * This is the number of scatter-gather entries the driver
196 * will actually have to deal with after DMA mapping is done.
197 */
198 unsigned short nr_hw_segments;
199
200 unsigned short ioprio; 196 unsigned short ioprio;
201 197
202 void *special; 198 void *special;
@@ -220,6 +216,8 @@ struct request {
220 void *data; 216 void *data;
221 void *sense; 217 void *sense;
222 218
219 unsigned long deadline;
220 struct list_head timeout_list;
223 unsigned int timeout; 221 unsigned int timeout;
224 int retries; 222 int retries;
225 223
@@ -233,6 +231,11 @@ struct request {
233 struct request *next_rq; 231 struct request *next_rq;
234}; 232};
235 233
234static inline unsigned short req_get_ioprio(struct request *req)
235{
236 return req->ioprio;
237}
238
236/* 239/*
237 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 240 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
238 * requests. Some step values could eventually be made generic. 241 * requests. Some step values could eventually be made generic.
@@ -252,6 +255,7 @@ typedef void (request_fn_proc) (struct request_queue *q);
252typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 255typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
253typedef int (prep_rq_fn) (struct request_queue *, struct request *); 256typedef int (prep_rq_fn) (struct request_queue *, struct request *);
254typedef void (unplug_fn) (struct request_queue *); 257typedef void (unplug_fn) (struct request_queue *);
258typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
255 259
256struct bio_vec; 260struct bio_vec;
257struct bvec_merge_data { 261struct bvec_merge_data {
@@ -265,6 +269,15 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
265typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 269typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
266typedef void (softirq_done_fn)(struct request *); 270typedef void (softirq_done_fn)(struct request *);
267typedef int (dma_drain_needed_fn)(struct request *); 271typedef int (dma_drain_needed_fn)(struct request *);
272typedef int (lld_busy_fn) (struct request_queue *q);
273
274enum blk_eh_timer_return {
275 BLK_EH_NOT_HANDLED,
276 BLK_EH_HANDLED,
277 BLK_EH_RESET_TIMER,
278};
279
280typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
268 281
269enum blk_queue_state { 282enum blk_queue_state {
270 Queue_down, 283 Queue_down,
@@ -307,10 +320,13 @@ struct request_queue
307 make_request_fn *make_request_fn; 320 make_request_fn *make_request_fn;
308 prep_rq_fn *prep_rq_fn; 321 prep_rq_fn *prep_rq_fn;
309 unplug_fn *unplug_fn; 322 unplug_fn *unplug_fn;
323 prepare_discard_fn *prepare_discard_fn;
310 merge_bvec_fn *merge_bvec_fn; 324 merge_bvec_fn *merge_bvec_fn;
311 prepare_flush_fn *prepare_flush_fn; 325 prepare_flush_fn *prepare_flush_fn;
312 softirq_done_fn *softirq_done_fn; 326 softirq_done_fn *softirq_done_fn;
327 rq_timed_out_fn *rq_timed_out_fn;
313 dma_drain_needed_fn *dma_drain_needed; 328 dma_drain_needed_fn *dma_drain_needed;
329 lld_busy_fn *lld_busy_fn;
314 330
315 /* 331 /*
316 * Dispatch queue sorting 332 * Dispatch queue sorting
@@ -385,6 +401,10 @@ struct request_queue
385 unsigned int nr_sorted; 401 unsigned int nr_sorted;
386 unsigned int in_flight; 402 unsigned int in_flight;
387 403
404 unsigned int rq_timeout;
405 struct timer_list timeout;
406 struct list_head timeout_list;
407
388 /* 408 /*
389 * sg stuff 409 * sg stuff
390 */ 410 */
@@ -421,6 +441,10 @@ struct request_queue
421#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 441#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
422#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 442#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */
423#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 443#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */
444#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */
445#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
446#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
447#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
424 448
425static inline int queue_is_locked(struct request_queue *q) 449static inline int queue_is_locked(struct request_queue *q)
426{ 450{
@@ -526,7 +550,10 @@ enum {
526#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 550#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
527#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 551#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
528#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 552#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
553#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
529#define blk_queue_flushing(q) ((q)->ordseq) 554#define blk_queue_flushing(q) ((q)->ordseq)
555#define blk_queue_stackable(q) \
556 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
530 557
531#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 558#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
532#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 559#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -536,16 +563,18 @@ enum {
536#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 563#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
537#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 564#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
538 565
539#define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 566#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
540 567
541#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 568#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
542#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 569#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
543#define blk_pm_request(rq) \ 570#define blk_pm_request(rq) \
544 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 571 (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
545 572
573#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
546#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 574#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
547#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 575#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
548#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 576#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
577#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
549#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 578#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
550#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) 579#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
551/* rq->queuelist of dequeued request must be list_empty() */ 580/* rq->queuelist of dequeued request must be list_empty() */
@@ -592,7 +621,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
592#define RQ_NOMERGE_FLAGS \ 621#define RQ_NOMERGE_FLAGS \
593 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 622 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
594#define rq_mergeable(rq) \ 623#define rq_mergeable(rq) \
595 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 624 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
625 (blk_discard_rq(rq) || blk_fs_request((rq))))
596 626
597/* 627/*
598 * q->prep_rq_fn return values 628 * q->prep_rq_fn return values
@@ -637,6 +667,12 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
637} 667}
638#endif /* CONFIG_MMU */ 668#endif /* CONFIG_MMU */
639 669
670struct rq_map_data {
671 struct page **pages;
672 int page_order;
673 int nr_entries;
674};
675
640struct req_iterator { 676struct req_iterator {
641 int i; 677 int i;
642 struct bio *bio; 678 struct bio *bio;
@@ -664,6 +700,10 @@ extern void __blk_put_request(struct request_queue *, struct request *);
664extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 700extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
665extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 701extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
666extern void blk_requeue_request(struct request_queue *, struct request *); 702extern void blk_requeue_request(struct request_queue *, struct request *);
703extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
704extern int blk_lld_busy(struct request_queue *q);
705extern int blk_insert_cloned_request(struct request_queue *q,
706 struct request *rq);
667extern void blk_plug_device(struct request_queue *); 707extern void blk_plug_device(struct request_queue *);
668extern void blk_plug_device_unlocked(struct request_queue *); 708extern void blk_plug_device_unlocked(struct request_queue *);
669extern int blk_remove_plug(struct request_queue *); 709extern int blk_remove_plug(struct request_queue *);
@@ -705,11 +745,14 @@ extern void __blk_stop_queue(struct request_queue *q);
705extern void __blk_run_queue(struct request_queue *); 745extern void __blk_run_queue(struct request_queue *);
706extern void blk_run_queue(struct request_queue *); 746extern void blk_run_queue(struct request_queue *);
707extern void blk_start_queueing(struct request_queue *); 747extern void blk_start_queueing(struct request_queue *);
708extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); 748extern int blk_rq_map_user(struct request_queue *, struct request *,
749 struct rq_map_data *, void __user *, unsigned long,
750 gfp_t);
709extern int blk_rq_unmap_user(struct bio *); 751extern int blk_rq_unmap_user(struct bio *);
710extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 752extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
711extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 753extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
712 struct sg_iovec *, int, unsigned int); 754 struct rq_map_data *, struct sg_iovec *, int,
755 unsigned int, gfp_t);
713extern int blk_execute_rq(struct request_queue *, struct gendisk *, 756extern int blk_execute_rq(struct request_queue *, struct gendisk *,
714 struct request *, int); 757 struct request *, int);
715extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 758extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@ -750,12 +793,15 @@ extern int __blk_end_request(struct request *rq, int error,
750extern int blk_end_bidi_request(struct request *rq, int error, 793extern int blk_end_bidi_request(struct request *rq, int error,
751 unsigned int nr_bytes, unsigned int bidi_bytes); 794 unsigned int nr_bytes, unsigned int bidi_bytes);
752extern void end_request(struct request *, int); 795extern void end_request(struct request *, int);
753extern void end_queued_request(struct request *, int);
754extern void end_dequeued_request(struct request *, int);
755extern int blk_end_request_callback(struct request *rq, int error, 796extern int blk_end_request_callback(struct request *rq, int error,
756 unsigned int nr_bytes, 797 unsigned int nr_bytes,
757 int (drv_callback)(struct request *)); 798 int (drv_callback)(struct request *));
758extern void blk_complete_request(struct request *); 799extern void blk_complete_request(struct request *);
800extern void __blk_complete_request(struct request *);
801extern void blk_abort_request(struct request *);
802extern void blk_abort_queue(struct request_queue *);
803extern void blk_update_request(struct request *rq, int error,
804 unsigned int nr_bytes);
759 805
760/* 806/*
761 * blk_end_request() takes bytes instead of sectors as a complete size. 807 * blk_end_request() takes bytes instead of sectors as a complete size.
@@ -790,12 +836,16 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
790extern int blk_queue_dma_drain(struct request_queue *q, 836extern int blk_queue_dma_drain(struct request_queue *q,
791 dma_drain_needed_fn *dma_drain_needed, 837 dma_drain_needed_fn *dma_drain_needed,
792 void *buf, unsigned int size); 838 void *buf, unsigned int size);
839extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
793extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 840extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
794extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 841extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
795extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 842extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
796extern void blk_queue_dma_alignment(struct request_queue *, int); 843extern void blk_queue_dma_alignment(struct request_queue *, int);
797extern void blk_queue_update_dma_alignment(struct request_queue *, int); 844extern void blk_queue_update_dma_alignment(struct request_queue *, int);
798extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 845extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
846extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
847extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
848extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
799extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 849extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
800extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 850extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
801extern int blk_do_ordered(struct request_queue *, struct request **); 851extern int blk_do_ordered(struct request_queue *, struct request **);
@@ -837,6 +887,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
837} 887}
838 888
839extern int blkdev_issue_flush(struct block_device *, sector_t *); 889extern int blkdev_issue_flush(struct block_device *, sector_t *);
890extern int blkdev_issue_discard(struct block_device *,
891 sector_t sector, sector_t nr_sects, gfp_t);
892
893static inline int sb_issue_discard(struct super_block *sb,
894 sector_t block, sector_t nr_blocks)
895{
896 block <<= (sb->s_blocksize_bits - 9);
897 nr_blocks <<= (sb->s_blocksize_bits - 9);
898 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
899}
840 900
841/* 901/*
842* command filter functions 902* command filter functions
@@ -874,6 +934,13 @@ static inline int queue_dma_alignment(struct request_queue *q)
874 return q ? q->dma_alignment : 511; 934 return q ? q->dma_alignment : 511;
875} 935}
876 936
937static inline int blk_rq_aligned(struct request_queue *q, void *addr,
938 unsigned int len)
939{
940 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
941 return !((unsigned long)addr & alignment) && !(len & alignment);
942}
943
877/* assumes size > 256 */ 944/* assumes size > 256 */
878static inline unsigned int blksize_bits(unsigned int size) 945static inline unsigned int blksize_bits(unsigned int size)
879{ 946{
@@ -900,7 +967,7 @@ static inline void put_dev_sector(Sector p)
900} 967}
901 968
902struct work_struct; 969struct work_struct;
903int kblockd_schedule_work(struct work_struct *work); 970int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
904void kblockd_flush_work(struct work_struct *work); 971void kblockd_flush_work(struct work_struct *work);
905 972
906#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 973#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@@ -945,49 +1012,19 @@ struct blk_integrity {
945 1012
946extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1013extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
947extern void blk_integrity_unregister(struct gendisk *); 1014extern void blk_integrity_unregister(struct gendisk *);
948extern int blk_integrity_compare(struct block_device *, struct block_device *); 1015extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
949extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1016extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
950extern int blk_rq_count_integrity_sg(struct request *); 1017extern int blk_rq_count_integrity_sg(struct request *);
951 1018
952static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) 1019static inline
953{ 1020struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
954 if (bi)
955 return bi->tuple_size;
956
957 return 0;
958}
959
960static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
961{ 1021{
962 return bdev->bd_disk->integrity; 1022 return bdev->bd_disk->integrity;
963} 1023}
964 1024
965static inline unsigned int bdev_get_tag_size(struct block_device *bdev) 1025static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
966{ 1026{
967 struct blk_integrity *bi = bdev_get_integrity(bdev); 1027 return disk->integrity;
968
969 if (bi)
970 return bi->tag_size;
971
972 return 0;
973}
974
975static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
976{
977 struct blk_integrity *bi = bdev_get_integrity(bdev);
978
979 if (bi == NULL)
980 return 0;
981
982 if (rw == READ && bi->verify_fn != NULL &&
983 (bi->flags & INTEGRITY_FLAG_READ))
984 return 1;
985
986 if (rw == WRITE && bi->generate_fn != NULL &&
987 (bi->flags & INTEGRITY_FLAG_WRITE))
988 return 1;
989
990 return 0;
991} 1028}
992 1029
993static inline int blk_integrity_rq(struct request *rq) 1030static inline int blk_integrity_rq(struct request *rq)
@@ -1004,7 +1041,7 @@ static inline int blk_integrity_rq(struct request *rq)
1004#define blk_rq_count_integrity_sg(a) (0) 1041#define blk_rq_count_integrity_sg(a) (0)
1005#define blk_rq_map_integrity_sg(a, b) (0) 1042#define blk_rq_map_integrity_sg(a, b) (0)
1006#define bdev_get_integrity(a) (0) 1043#define bdev_get_integrity(a) (0)
1007#define bdev_get_tag_size(a) (0) 1044#define blk_get_integrity(a) (0)
1008#define blk_integrity_compare(a, b) (0) 1045#define blk_integrity_compare(a, b) (0)
1009#define blk_integrity_register(a, b) (0) 1046#define blk_integrity_register(a, b) (0)
1010#define blk_integrity_unregister(a) do { } while (0); 1047#define blk_integrity_unregister(a) do { } while (0);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index d084b8d227a5..3a31eb506164 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -1,8 +1,10 @@
1#ifndef BLKTRACE_H 1#ifndef BLKTRACE_H
2#define BLKTRACE_H 2#define BLKTRACE_H
3 3
4#ifdef __KERNEL__
4#include <linux/blkdev.h> 5#include <linux/blkdev.h>
5#include <linux/relay.h> 6#include <linux/relay.h>
7#endif
6 8
7/* 9/*
8 * Trace categories 10 * Trace categories
@@ -21,6 +23,7 @@ enum blktrace_cat {
21 BLK_TC_NOTIFY = 1 << 10, /* special message */ 23 BLK_TC_NOTIFY = 1 << 10, /* special message */
22 BLK_TC_AHEAD = 1 << 11, /* readahead */ 24 BLK_TC_AHEAD = 1 << 11, /* readahead */
23 BLK_TC_META = 1 << 12, /* metadata */ 25 BLK_TC_META = 1 << 12, /* metadata */
26 BLK_TC_DISCARD = 1 << 13, /* discard requests */
24 27
25 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ 28 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
26}; 29};
@@ -47,6 +50,7 @@ enum blktrace_act {
47 __BLK_TA_SPLIT, /* bio was split */ 50 __BLK_TA_SPLIT, /* bio was split */
48 __BLK_TA_BOUNCE, /* bio was bounced */ 51 __BLK_TA_BOUNCE, /* bio was bounced */
49 __BLK_TA_REMAP, /* bio was remapped */ 52 __BLK_TA_REMAP, /* bio was remapped */
53 __BLK_TA_ABORT, /* request aborted */
50}; 54};
51 55
52/* 56/*
@@ -77,6 +81,7 @@ enum blktrace_notify {
77#define BLK_TA_SPLIT (__BLK_TA_SPLIT) 81#define BLK_TA_SPLIT (__BLK_TA_SPLIT)
78#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) 82#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
79#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) 83#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
84#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
80 85
81#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) 86#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
82#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) 87#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@@ -89,17 +94,17 @@ enum blktrace_notify {
89 * The trace itself 94 * The trace itself
90 */ 95 */
91struct blk_io_trace { 96struct blk_io_trace {
92 u32 magic; /* MAGIC << 8 | version */ 97 __u32 magic; /* MAGIC << 8 | version */
93 u32 sequence; /* event number */ 98 __u32 sequence; /* event number */
94 u64 time; /* in microseconds */ 99 __u64 time; /* in microseconds */
95 u64 sector; /* disk offset */ 100 __u64 sector; /* disk offset */
96 u32 bytes; /* transfer length */ 101 __u32 bytes; /* transfer length */
97 u32 action; /* what happened */ 102 __u32 action; /* what happened */
98 u32 pid; /* who did it */ 103 __u32 pid; /* who did it */
99 u32 device; /* device number */ 104 __u32 device; /* device number */
100 u32 cpu; /* on what cpu did it happen */ 105 __u32 cpu; /* on what cpu did it happen */
101 u16 error; /* completion error */ 106 __u16 error; /* completion error */
102 u16 pdu_len; /* length of data after this trace */ 107 __u16 pdu_len; /* length of data after this trace */
103}; 108};
104 109
105/* 110/*
@@ -117,6 +122,23 @@ enum {
117 Blktrace_stopped, 122 Blktrace_stopped,
118}; 123};
119 124
125#define BLKTRACE_BDEV_SIZE 32
126
127/*
128 * User setup structure passed with BLKTRACESTART
129 */
130struct blk_user_trace_setup {
131 char name[BLKTRACE_BDEV_SIZE]; /* output */
132 __u16 act_mask; /* input */
133 __u32 buf_size; /* input */
134 __u32 buf_nr; /* input */
135 __u64 start_lba;
136 __u64 end_lba;
137 __u32 pid;
138};
139
140#ifdef __KERNEL__
141#if defined(CONFIG_BLK_DEV_IO_TRACE)
120struct blk_trace { 142struct blk_trace {
121 int trace_state; 143 int trace_state;
122 struct rchan *rchan; 144 struct rchan *rchan;
@@ -133,21 +155,6 @@ struct blk_trace {
133 atomic_t dropped; 155 atomic_t dropped;
134}; 156};
135 157
136/*
137 * User setup structure passed with BLKTRACESTART
138 */
139struct blk_user_trace_setup {
140 char name[BDEVNAME_SIZE]; /* output */
141 u16 act_mask; /* input */
142 u32 buf_size; /* input */
143 u32 buf_nr; /* input */
144 u64 start_lba;
145 u64 end_lba;
146 u32 pid;
147};
148
149#ifdef __KERNEL__
150#if defined(CONFIG_BLK_DEV_IO_TRACE)
151extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 158extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
152extern void blk_trace_shutdown(struct request_queue *); 159extern void blk_trace_shutdown(struct request_queue *);
153extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 160extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
@@ -195,6 +202,9 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
195 if (likely(!bt)) 202 if (likely(!bt))
196 return; 203 return;
197 204
205 if (blk_discard_rq(rq))
206 rw |= (1 << BIO_RW_DISCARD);
207
198 if (blk_pc_request(rq)) { 208 if (blk_pc_request(rq)) {
199 what |= BLK_TC_ACT(BLK_TC_PC); 209 what |= BLK_TC_ACT(BLK_TC_PC);
200 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); 210 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index c8bd2daf95ec..8322141ee480 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
190 * ACCESS_ONCE() in different C statements. 190 * ACCESS_ONCE() in different C statements.
191 * 191 *
192 * This macro does absolutely -nothing- to prevent the CPU from reordering, 192 * This macro does absolutely -nothing- to prevent the CPU from reordering,
193 * merging, or refetching absolutely anything at any time. 193 * merging, or refetching absolutely anything at any time. Its main intended
194 * use is to mediate communication between process-level code and irq/NMI
195 * handlers, all running on the same CPU.
194 */ 196 */
195#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 197#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
196 198
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index c43dc47fdf75..3d2317e4af2e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -38,6 +38,7 @@
38#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 38#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
39#define CRYPTO_ALG_TYPE_HASH 0x00000009 39#define CRYPTO_ALG_TYPE_HASH 0x00000009
40#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 40#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
41#define CRYPTO_ALG_TYPE_RNG 0x0000000c
41 42
42#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 43#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
43#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c 44#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -61,6 +62,14 @@
61#define CRYPTO_ALG_GENIV 0x00000200 62#define CRYPTO_ALG_GENIV 0x00000200
62 63
63/* 64/*
65 * Set if the algorithm has passed automated run-time testing. Note that
66 * if there is no run-time testing for a given algorithm it is considered
67 * to have passed.
68 */
69
70#define CRYPTO_ALG_TESTED 0x00000400
71
72/*
64 * Transform masks and values (for crt_flags). 73 * Transform masks and values (for crt_flags).
65 */ 74 */
66#define CRYPTO_TFM_REQ_MASK 0x000fff00 75#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -105,6 +114,7 @@ struct crypto_aead;
105struct crypto_blkcipher; 114struct crypto_blkcipher;
106struct crypto_hash; 115struct crypto_hash;
107struct crypto_ahash; 116struct crypto_ahash;
117struct crypto_rng;
108struct crypto_tfm; 118struct crypto_tfm;
109struct crypto_type; 119struct crypto_type;
110struct aead_givcrypt_request; 120struct aead_givcrypt_request;
@@ -290,6 +300,15 @@ struct compress_alg {
290 unsigned int slen, u8 *dst, unsigned int *dlen); 300 unsigned int slen, u8 *dst, unsigned int *dlen);
291}; 301};
292 302
303struct rng_alg {
304 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
305 unsigned int dlen);
306 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
307
308 unsigned int seedsize;
309};
310
311
293#define cra_ablkcipher cra_u.ablkcipher 312#define cra_ablkcipher cra_u.ablkcipher
294#define cra_aead cra_u.aead 313#define cra_aead cra_u.aead
295#define cra_blkcipher cra_u.blkcipher 314#define cra_blkcipher cra_u.blkcipher
@@ -298,6 +317,7 @@ struct compress_alg {
298#define cra_hash cra_u.hash 317#define cra_hash cra_u.hash
299#define cra_ahash cra_u.ahash 318#define cra_ahash cra_u.ahash
300#define cra_compress cra_u.compress 319#define cra_compress cra_u.compress
320#define cra_rng cra_u.rng
301 321
302struct crypto_alg { 322struct crypto_alg {
303 struct list_head cra_list; 323 struct list_head cra_list;
@@ -325,6 +345,7 @@ struct crypto_alg {
325 struct hash_alg hash; 345 struct hash_alg hash;
326 struct ahash_alg ahash; 346 struct ahash_alg ahash;
327 struct compress_alg compress; 347 struct compress_alg compress;
348 struct rng_alg rng;
328 } cra_u; 349 } cra_u;
329 350
330 int (*cra_init)(struct crypto_tfm *tfm); 351 int (*cra_init)(struct crypto_tfm *tfm);
@@ -430,6 +451,12 @@ struct compress_tfm {
430 u8 *dst, unsigned int *dlen); 451 u8 *dst, unsigned int *dlen);
431}; 452};
432 453
454struct rng_tfm {
455 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
456 unsigned int dlen);
457 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
458};
459
433#define crt_ablkcipher crt_u.ablkcipher 460#define crt_ablkcipher crt_u.ablkcipher
434#define crt_aead crt_u.aead 461#define crt_aead crt_u.aead
435#define crt_blkcipher crt_u.blkcipher 462#define crt_blkcipher crt_u.blkcipher
@@ -437,6 +464,7 @@ struct compress_tfm {
437#define crt_hash crt_u.hash 464#define crt_hash crt_u.hash
438#define crt_ahash crt_u.ahash 465#define crt_ahash crt_u.ahash
439#define crt_compress crt_u.compress 466#define crt_compress crt_u.compress
467#define crt_rng crt_u.rng
440 468
441struct crypto_tfm { 469struct crypto_tfm {
442 470
@@ -450,6 +478,7 @@ struct crypto_tfm {
450 struct hash_tfm hash; 478 struct hash_tfm hash;
451 struct ahash_tfm ahash; 479 struct ahash_tfm ahash;
452 struct compress_tfm compress; 480 struct compress_tfm compress;
481 struct rng_tfm rng;
453 } crt_u; 482 } crt_u;
454 483
455 struct crypto_alg *__crt_alg; 484 struct crypto_alg *__crt_alg;
@@ -481,6 +510,10 @@ struct crypto_hash {
481 struct crypto_tfm base; 510 struct crypto_tfm base;
482}; 511};
483 512
513struct crypto_rng {
514 struct crypto_tfm base;
515};
516
484enum { 517enum {
485 CRYPTOA_UNSPEC, 518 CRYPTOA_UNSPEC,
486 CRYPTOA_ALG, 519 CRYPTOA_ALG,
@@ -515,6 +548,8 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
515struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 548struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
516void crypto_free_tfm(struct crypto_tfm *tfm); 549void crypto_free_tfm(struct crypto_tfm *tfm);
517 550
551int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
552
518/* 553/*
519 * Transform helpers which query the underlying algorithm. 554 * Transform helpers which query the underlying algorithm.
520 */ 555 */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a90222e3297d..08d783592b73 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -13,7 +13,6 @@
13 13
14struct dm_target; 14struct dm_target;
15struct dm_table; 15struct dm_table;
16struct dm_dev;
17struct mapped_device; 16struct mapped_device;
18struct bio_vec; 17struct bio_vec;
19 18
@@ -84,6 +83,12 @@ void dm_error(const char *message);
84 */ 83 */
85void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); 84void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
86 85
86struct dm_dev {
87 struct block_device *bdev;
88 int mode;
89 char name[16];
90};
91
87/* 92/*
88 * Constructors should call these functions to ensure destination devices 93 * Constructors should call these functions to ensure destination devices
89 * are opened/closed correctly. 94 * are opened/closed correctly.
@@ -202,6 +207,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
202struct gendisk *dm_disk(struct mapped_device *md); 207struct gendisk *dm_disk(struct mapped_device *md);
203int dm_suspended(struct mapped_device *md); 208int dm_suspended(struct mapped_device *md);
204int dm_noflush_suspending(struct dm_target *ti); 209int dm_noflush_suspending(struct dm_target *ti);
210union map_info *dm_get_mapinfo(struct bio *bio);
205 211
206/* 212/*
207 * Geometry functions. 213 * Geometry functions.
@@ -232,6 +238,11 @@ int dm_table_add_target(struct dm_table *t, const char *type,
232int dm_table_complete(struct dm_table *t); 238int dm_table_complete(struct dm_table *t);
233 239
234/* 240/*
241 * Unplug all devices in a table.
242 */
243void dm_table_unplug_all(struct dm_table *t);
244
245/*
235 * Table reference counting. 246 * Table reference counting.
236 */ 247 */
237struct dm_table *dm_get_table(struct mapped_device *md); 248struct dm_table *dm_get_table(struct mapped_device *md);
@@ -256,6 +267,11 @@ void dm_table_event(struct dm_table *t);
256 */ 267 */
257int dm_swap_table(struct mapped_device *md, struct dm_table *t); 268int dm_swap_table(struct mapped_device *md, struct dm_table *t);
258 269
270/*
271 * A wrapper around vmalloc.
272 */
273void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
274
259/*----------------------------------------------------------------- 275/*-----------------------------------------------------------------
260 * Macros. 276 * Macros.
261 *---------------------------------------------------------------*/ 277 *---------------------------------------------------------------*/
diff --git a/include/linux/device.h b/include/linux/device.h
index 4d8372d135df..246937c9cbc7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -199,6 +199,11 @@ struct class {
199 struct class_private *p; 199 struct class_private *p;
200}; 200};
201 201
202struct class_dev_iter {
203 struct klist_iter ki;
204 const struct device_type *type;
205};
206
202extern struct kobject *sysfs_dev_block_kobj; 207extern struct kobject *sysfs_dev_block_kobj;
203extern struct kobject *sysfs_dev_char_kobj; 208extern struct kobject *sysfs_dev_char_kobj;
204extern int __must_check __class_register(struct class *class, 209extern int __must_check __class_register(struct class *class,
@@ -213,6 +218,13 @@ extern void class_unregister(struct class *class);
213 __class_register(class, &__key); \ 218 __class_register(class, &__key); \
214}) 219})
215 220
221extern void class_dev_iter_init(struct class_dev_iter *iter,
222 struct class *class,
223 struct device *start,
224 const struct device_type *type);
225extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
226extern void class_dev_iter_exit(struct class_dev_iter *iter);
227
216extern int class_for_each_device(struct class *class, struct device *start, 228extern int class_for_each_device(struct class *class, struct device *start,
217 void *data, 229 void *data,
218 int (*fn)(struct device *dev, void *data)); 230 int (*fn)(struct device *dev, void *data));
@@ -396,7 +408,7 @@ struct device {
396 spinlock_t devres_lock; 408 spinlock_t devres_lock;
397 struct list_head devres_head; 409 struct list_head devres_head;
398 410
399 struct list_head node; 411 struct klist_node knode_class;
400 struct class *class; 412 struct class *class;
401 dev_t devt; /* dev_t, creates the sysfs "dev" */ 413 dev_t devt; /* dev_t, creates the sysfs "dev" */
402 struct attribute_group **groups; /* optional groups */ 414 struct attribute_group **groups; /* optional groups */
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index 203a025e30e5..b9cd38603fd8 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -65,9 +65,12 @@ struct dlm_lksb {
65 char * sb_lvbptr; 65 char * sb_lvbptr;
66}; 66};
67 67
68/* dlm_new_lockspace() flags */
69
68#define DLM_LSFL_NODIR 0x00000001 70#define DLM_LSFL_NODIR 0x00000001
69#define DLM_LSFL_TIMEWARN 0x00000002 71#define DLM_LSFL_TIMEWARN 0x00000002
70#define DLM_LSFL_FS 0x00000004 72#define DLM_LSFL_FS 0x00000004
73#define DLM_LSFL_NEWEXCL 0x00000008
71 74
72#ifdef __KERNEL__ 75#ifdef __KERNEL__
73 76
diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h
index c6034508fed9..3060783c4191 100644
--- a/include/linux/dlm_device.h
+++ b/include/linux/dlm_device.h
@@ -26,7 +26,7 @@
26/* Version of the device interface */ 26/* Version of the device interface */
27#define DLM_DEVICE_VERSION_MAJOR 6 27#define DLM_DEVICE_VERSION_MAJOR 6
28#define DLM_DEVICE_VERSION_MINOR 0 28#define DLM_DEVICE_VERSION_MINOR 0
29#define DLM_DEVICE_VERSION_PATCH 0 29#define DLM_DEVICE_VERSION_PATCH 1
30 30
31/* struct passed to the lock write */ 31/* struct passed to the lock write */
32struct dlm_lock_params { 32struct dlm_lock_params {
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 639624b55fbe..92f6f634e3e6 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -112,6 +112,7 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
112extern int elv_register_queue(struct request_queue *q); 112extern int elv_register_queue(struct request_queue *q);
113extern void elv_unregister_queue(struct request_queue *q); 113extern void elv_unregister_queue(struct request_queue *q);
114extern int elv_may_queue(struct request_queue *, int); 114extern int elv_may_queue(struct request_queue *, int);
115extern void elv_abort_queue(struct request_queue *);
115extern void elv_completed_request(struct request_queue *, struct request *); 116extern void elv_completed_request(struct request_queue *, struct request *);
116extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 117extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
117extern void elv_put_request(struct request_queue *, struct request *); 118extern void elv_put_request(struct request_queue *, struct request *);
@@ -173,15 +174,15 @@ enum {
173#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) 174#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
174 175
175/* 176/*
176 * Hack to reuse the donelist list_head as the fifo time holder while 177 * Hack to reuse the csd.list list_head as the fifo time holder while
177 * the request is in the io scheduler. Saves an unsigned long in rq. 178 * the request is in the io scheduler. Saves an unsigned long in rq.
178 */ 179 */
179#define rq_fifo_time(rq) ((unsigned long) (rq)->donelist.next) 180#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
180#define rq_set_fifo_time(rq,exp) ((rq)->donelist.next = (void *) (exp)) 181#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
181#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) 182#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
182#define rq_fifo_clear(rq) do { \ 183#define rq_fifo_clear(rq) do { \
183 list_del_init(&(rq)->queuelist); \ 184 list_del_init(&(rq)->queuelist); \
184 INIT_LIST_HEAD(&(rq)->donelist); \ 185 INIT_LIST_HEAD(&(rq)->csd.list); \
185 } while (0) 186 } while (0)
186 187
187/* 188/*
diff --git a/include/linux/fd.h b/include/linux/fd.h
index b6bd41d2b460..f5d194af07a8 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -15,10 +15,16 @@ struct floppy_struct {
15 sect, /* sectors per track */ 15 sect, /* sectors per track */
16 head, /* nr of heads */ 16 head, /* nr of heads */
17 track, /* nr of tracks */ 17 track, /* nr of tracks */
18 stretch; /* !=0 means double track steps */ 18 stretch; /* bit 0 !=0 means double track steps */
19 /* bit 1 != 0 means swap sides */
20 /* bits 2..9 give the first sector */
21 /* number (the LSB is flipped) */
19#define FD_STRETCH 1 22#define FD_STRETCH 1
20#define FD_SWAPSIDES 2 23#define FD_SWAPSIDES 2
21#define FD_ZEROBASED 4 24#define FD_ZEROBASED 4
25#define FD_SECTBASEMASK 0x3FC
26#define FD_MKSECTBASE(s) (((s) ^ 1) << 2)
27#define FD_SECTBASE(floppy) ((((floppy)->stretch & FD_SECTBASEMASK) >> 2) ^ 1)
22 28
23 unsigned char gap, /* gap1 size */ 29 unsigned char gap, /* gap1 size */
24 30
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 580b513668fe..32477e8872d5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -86,7 +86,9 @@ extern int dir_notify_enable;
86#define READ_META (READ | (1 << BIO_RW_META)) 86#define READ_META (READ | (1 << BIO_RW_META))
87#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) 87#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
88#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) 88#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC))
89#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) 89#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
90#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
91#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
90 92
91#define SEL_IN 1 93#define SEL_IN 1
92#define SEL_OUT 2 94#define SEL_OUT 2
@@ -222,6 +224,7 @@ extern int dir_notify_enable;
222#define BLKTRACESTART _IO(0x12,116) 224#define BLKTRACESTART _IO(0x12,116)
223#define BLKTRACESTOP _IO(0x12,117) 225#define BLKTRACESTOP _IO(0x12,117)
224#define BLKTRACETEARDOWN _IO(0x12,118) 226#define BLKTRACETEARDOWN _IO(0x12,118)
227#define BLKDISCARD _IO(0x12,119)
225 228
226#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ 229#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
227#define FIBMAP _IO(0x00,1) /* bmap access */ 230#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -1682,6 +1685,7 @@ extern void chrdev_show(struct seq_file *,off_t);
1682 1685
1683/* fs/block_dev.c */ 1686/* fs/block_dev.c */
1684#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 1687#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1688#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
1685 1689
1686#ifdef CONFIG_BLOCK 1690#ifdef CONFIG_BLOCK
1687#define BLKDEV_MAJOR_HASH_SIZE 255 1691#define BLKDEV_MAJOR_HASH_SIZE 255
@@ -1718,6 +1722,9 @@ extern int fs_may_remount_ro(struct super_block *);
1718 */ 1722 */
1719#define bio_data_dir(bio) ((bio)->bi_rw & 1) 1723#define bio_data_dir(bio) ((bio)->bi_rw & 1)
1720 1724
1725extern void check_disk_size_change(struct gendisk *disk,
1726 struct block_device *bdev);
1727extern int revalidate_disk(struct gendisk *);
1721extern int check_disk_change(struct block_device *); 1728extern int check_disk_change(struct block_device *);
1722extern int __invalidate_device(struct block_device *); 1729extern int __invalidate_device(struct block_device *);
1723extern int invalidate_partition(struct gendisk *, int); 1730extern int invalidate_partition(struct gendisk *, int);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index be4f5e5bfe06..206cdf96c3a7 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -11,12 +11,15 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/kdev_t.h> 13#include <linux/kdev_t.h>
14#include <linux/rcupdate.h>
14 15
15#ifdef CONFIG_BLOCK 16#ifdef CONFIG_BLOCK
16 17
17#define kobj_to_dev(k) container_of(k, struct device, kobj) 18#define kobj_to_dev(k) container_of((k), struct device, kobj)
18#define dev_to_disk(device) container_of(device, struct gendisk, dev) 19#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
19#define dev_to_part(device) container_of(device, struct hd_struct, dev) 20#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
21#define disk_to_dev(disk) (&(disk)->part0.__dev)
22#define part_to_dev(part) (&((part)->__dev))
20 23
21extern struct device_type part_type; 24extern struct device_type part_type;
22extern struct kobject *block_depr; 25extern struct kobject *block_depr;
@@ -55,6 +58,9 @@ enum {
55 UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */ 58 UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
56}; 59};
57 60
61#define DISK_MAX_PARTS 256
62#define DISK_NAME_LEN 32
63
58#include <linux/major.h> 64#include <linux/major.h>
59#include <linux/device.h> 65#include <linux/device.h>
60#include <linux/smp.h> 66#include <linux/smp.h>
@@ -87,7 +93,7 @@ struct disk_stats {
87struct hd_struct { 93struct hd_struct {
88 sector_t start_sect; 94 sector_t start_sect;
89 sector_t nr_sects; 95 sector_t nr_sects;
90 struct device dev; 96 struct device __dev;
91 struct kobject *holder_dir; 97 struct kobject *holder_dir;
92 int policy, partno; 98 int policy, partno;
93#ifdef CONFIG_FAIL_MAKE_REQUEST 99#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -100,6 +106,7 @@ struct hd_struct {
100#else 106#else
101 struct disk_stats dkstats; 107 struct disk_stats dkstats;
102#endif 108#endif
109 struct rcu_head rcu_head;
103}; 110};
104 111
105#define GENHD_FL_REMOVABLE 1 112#define GENHD_FL_REMOVABLE 1
@@ -108,100 +115,148 @@ struct hd_struct {
108#define GENHD_FL_CD 8 115#define GENHD_FL_CD 8
109#define GENHD_FL_UP 16 116#define GENHD_FL_UP 16
110#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 117#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
111#define GENHD_FL_FAIL 64 118#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
119
120#define BLK_SCSI_MAX_CMDS (256)
121#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
122
123struct blk_scsi_cmd_filter {
124 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
125 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
126 struct kobject kobj;
127};
128
129struct disk_part_tbl {
130 struct rcu_head rcu_head;
131 int len;
132 struct hd_struct *part[];
133};
112 134
113struct gendisk { 135struct gendisk {
136 /* major, first_minor and minors are input parameters only,
137 * don't use directly. Use disk_devt() and disk_max_parts().
138 */
114 int major; /* major number of driver */ 139 int major; /* major number of driver */
115 int first_minor; 140 int first_minor;
116 int minors; /* maximum number of minors, =1 for 141 int minors; /* maximum number of minors, =1 for
117 * disks that can't be partitioned. */ 142 * disks that can't be partitioned. */
118 char disk_name[32]; /* name of major driver */ 143
119 struct hd_struct **part; /* [indexed by minor] */ 144 char disk_name[DISK_NAME_LEN]; /* name of major driver */
145
146 /* Array of pointers to partitions indexed by partno.
147 * Protected with matching bdev lock but stat and other
148 * non-critical accesses use RCU. Always access through
149 * helpers.
150 */
151 struct disk_part_tbl *part_tbl;
152 struct hd_struct part0;
153
120 struct block_device_operations *fops; 154 struct block_device_operations *fops;
121 struct request_queue *queue; 155 struct request_queue *queue;
122 void *private_data; 156 void *private_data;
123 sector_t capacity;
124 157
125 int flags; 158 int flags;
126 struct device *driverfs_dev; // FIXME: remove 159 struct device *driverfs_dev; // FIXME: remove
127 struct device dev;
128 struct kobject *holder_dir;
129 struct kobject *slave_dir; 160 struct kobject *slave_dir;
130 161
131 struct timer_rand_state *random; 162 struct timer_rand_state *random;
132 int policy;
133 163
134 atomic_t sync_io; /* RAID */ 164 atomic_t sync_io; /* RAID */
135 unsigned long stamp;
136 int in_flight;
137#ifdef CONFIG_SMP
138 struct disk_stats *dkstats;
139#else
140 struct disk_stats dkstats;
141#endif
142 struct work_struct async_notify; 165 struct work_struct async_notify;
143#ifdef CONFIG_BLK_DEV_INTEGRITY 166#ifdef CONFIG_BLK_DEV_INTEGRITY
144 struct blk_integrity *integrity; 167 struct blk_integrity *integrity;
145#endif 168#endif
169 int node_id;
146}; 170};
147 171
148/* 172static inline struct gendisk *part_to_disk(struct hd_struct *part)
149 * Macros to operate on percpu disk statistics:
150 *
151 * The __ variants should only be called in critical sections. The full
152 * variants disable/enable preemption.
153 */
154static inline struct hd_struct *get_part(struct gendisk *gendiskp,
155 sector_t sector)
156{ 173{
157 struct hd_struct *part; 174 if (likely(part)) {
158 int i; 175 if (part->partno)
159 for (i = 0; i < gendiskp->minors - 1; i++) { 176 return dev_to_disk(part_to_dev(part)->parent);
160 part = gendiskp->part[i]; 177 else
161 if (part && part->start_sect <= sector 178 return dev_to_disk(part_to_dev(part));
162 && sector < part->start_sect + part->nr_sects)
163 return part;
164 } 179 }
165 return NULL; 180 return NULL;
166} 181}
167 182
168#ifdef CONFIG_SMP 183static inline int disk_max_parts(struct gendisk *disk)
169#define __disk_stat_add(gendiskp, field, addnd) \ 184{
170 (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd) 185 if (disk->flags & GENHD_FL_EXT_DEVT)
186 return DISK_MAX_PARTS;
187 return disk->minors;
188}
171 189
172#define disk_stat_read(gendiskp, field) \ 190static inline bool disk_partitionable(struct gendisk *disk)
173({ \ 191{
174 typeof(gendiskp->dkstats->field) res = 0; \ 192 return disk_max_parts(disk) > 1;
175 int i; \ 193}
176 for_each_possible_cpu(i) \
177 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
178 res; \
179})
180 194
181static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { 195static inline dev_t disk_devt(struct gendisk *disk)
182 int i; 196{
197 return disk_to_dev(disk)->devt;
198}
183 199
184 for_each_possible_cpu(i) 200static inline dev_t part_devt(struct hd_struct *part)
185 memset(per_cpu_ptr(gendiskp->dkstats, i), value, 201{
186 sizeof(struct disk_stats)); 202 return part_to_dev(part)->devt;
187} 203}
188 204
189#define __part_stat_add(part, field, addnd) \ 205extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
190 (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd)
191 206
192#define __all_stat_add(gendiskp, part, field, addnd, sector) \ 207static inline void disk_put_part(struct hd_struct *part)
193({ \ 208{
194 if (part) \ 209 if (likely(part))
195 __part_stat_add(part, field, addnd); \ 210 put_device(part_to_dev(part));
196 __disk_stat_add(gendiskp, field, addnd); \ 211}
197}) 212
213/*
214 * Smarter partition iterator without context limits.
215 */
216#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
217#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
218#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
219
220struct disk_part_iter {
221 struct gendisk *disk;
222 struct hd_struct *part;
223 int idx;
224 unsigned int flags;
225};
226
227extern void disk_part_iter_init(struct disk_part_iter *piter,
228 struct gendisk *disk, unsigned int flags);
229extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
230extern void disk_part_iter_exit(struct disk_part_iter *piter);
231
232extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
233 sector_t sector);
234
235/*
236 * Macros to operate on percpu disk statistics:
237 *
238 * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
239 * and should be called between disk_stat_lock() and
240 * disk_stat_unlock().
241 *
242 * part_stat_read() can be called at any time.
243 *
244 * part_stat_{add|set_all}() and {init|free}_part_stats are for
245 * internal use only.
246 */
247#ifdef CONFIG_SMP
248#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
249#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
250
251#define __part_stat_add(cpu, part, field, addnd) \
252 (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
198 253
199#define part_stat_read(part, field) \ 254#define part_stat_read(part, field) \
200({ \ 255({ \
201 typeof(part->dkstats->field) res = 0; \ 256 typeof((part)->dkstats->field) res = 0; \
202 int i; \ 257 int i; \
203 for_each_possible_cpu(i) \ 258 for_each_possible_cpu(i) \
204 res += per_cpu_ptr(part->dkstats, i)->field; \ 259 res += per_cpu_ptr((part)->dkstats, i)->field; \
205 res; \ 260 res; \
206}) 261})
207 262
@@ -213,171 +268,107 @@ static inline void part_stat_set_all(struct hd_struct *part, int value)
213 memset(per_cpu_ptr(part->dkstats, i), value, 268 memset(per_cpu_ptr(part->dkstats, i), value,
214 sizeof(struct disk_stats)); 269 sizeof(struct disk_stats));
215} 270}
216
217#else /* !CONFIG_SMP */
218#define __disk_stat_add(gendiskp, field, addnd) \
219 (gendiskp->dkstats.field += addnd)
220#define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field)
221 271
222static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) 272static inline int init_part_stats(struct hd_struct *part)
223{ 273{
224 memset(&gendiskp->dkstats, value, sizeof (struct disk_stats)); 274 part->dkstats = alloc_percpu(struct disk_stats);
275 if (!part->dkstats)
276 return 0;
277 return 1;
225} 278}
226 279
227#define __part_stat_add(part, field, addnd) \ 280static inline void free_part_stats(struct hd_struct *part)
228 (part->dkstats.field += addnd)
229
230#define __all_stat_add(gendiskp, part, field, addnd, sector) \
231({ \
232 if (part) \
233 part->dkstats.field += addnd; \
234 __disk_stat_add(gendiskp, field, addnd); \
235})
236
237#define part_stat_read(part, field) (part->dkstats.field)
238
239static inline void part_stat_set_all(struct hd_struct *part, int value)
240{ 281{
241 memset(&part->dkstats, value, sizeof(struct disk_stats)); 282 free_percpu(part->dkstats);
242} 283}
243 284
244#endif /* CONFIG_SMP */ 285#else /* !CONFIG_SMP */
286#define part_stat_lock() ({ rcu_read_lock(); 0; })
287#define part_stat_unlock() rcu_read_unlock()
245 288
246#define disk_stat_add(gendiskp, field, addnd) \ 289#define __part_stat_add(cpu, part, field, addnd) \
247 do { \ 290 ((part)->dkstats.field += addnd)
248 preempt_disable(); \ 291
249 __disk_stat_add(gendiskp, field, addnd); \ 292#define part_stat_read(part, field) ((part)->dkstats.field)
250 preempt_enable(); \
251 } while (0)
252
253#define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1)
254#define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1)
255
256#define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1)
257#define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1)
258
259#define __disk_stat_sub(gendiskp, field, subnd) \
260 __disk_stat_add(gendiskp, field, -subnd)
261#define disk_stat_sub(gendiskp, field, subnd) \
262 disk_stat_add(gendiskp, field, -subnd)
263
264#define part_stat_add(gendiskp, field, addnd) \
265 do { \
266 preempt_disable(); \
267 __part_stat_add(gendiskp, field, addnd);\
268 preempt_enable(); \
269 } while (0)
270
271#define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1)
272#define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1)
273
274#define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1)
275#define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1)
276
277#define __part_stat_sub(gendiskp, field, subnd) \
278 __part_stat_add(gendiskp, field, -subnd)
279#define part_stat_sub(gendiskp, field, subnd) \
280 part_stat_add(gendiskp, field, -subnd)
281
282#define all_stat_add(gendiskp, part, field, addnd, sector) \
283 do { \
284 preempt_disable(); \
285 __all_stat_add(gendiskp, part, field, addnd, sector); \
286 preempt_enable(); \
287 } while (0)
288
289#define __all_stat_dec(gendiskp, field, sector) \
290 __all_stat_add(gendiskp, field, -1, sector)
291#define all_stat_dec(gendiskp, field, sector) \
292 all_stat_add(gendiskp, field, -1, sector)
293
294#define __all_stat_inc(gendiskp, part, field, sector) \
295 __all_stat_add(gendiskp, part, field, 1, sector)
296#define all_stat_inc(gendiskp, part, field, sector) \
297 all_stat_add(gendiskp, part, field, 1, sector)
298
299#define __all_stat_sub(gendiskp, part, field, subnd, sector) \
300 __all_stat_add(gendiskp, part, field, -subnd, sector)
301#define all_stat_sub(gendiskp, part, field, subnd, sector) \
302 all_stat_add(gendiskp, part, field, -subnd, sector)
303
304/* Inlines to alloc and free disk stats in struct gendisk */
305#ifdef CONFIG_SMP
306static inline int init_disk_stats(struct gendisk *disk)
307{
308 disk->dkstats = alloc_percpu(struct disk_stats);
309 if (!disk->dkstats)
310 return 0;
311 return 1;
312}
313 293
314static inline void free_disk_stats(struct gendisk *disk) 294static inline void part_stat_set_all(struct hd_struct *part, int value)
315{ 295{
316 free_percpu(disk->dkstats); 296 memset(&part->dkstats, value, sizeof(struct disk_stats));
317} 297}
318 298
319static inline int init_part_stats(struct hd_struct *part) 299static inline int init_part_stats(struct hd_struct *part)
320{ 300{
321 part->dkstats = alloc_percpu(struct disk_stats);
322 if (!part->dkstats)
323 return 0;
324 return 1; 301 return 1;
325} 302}
326 303
327static inline void free_part_stats(struct hd_struct *part) 304static inline void free_part_stats(struct hd_struct *part)
328{ 305{
329 free_percpu(part->dkstats);
330}
331
332#else /* CONFIG_SMP */
333static inline int init_disk_stats(struct gendisk *disk)
334{
335 return 1;
336} 306}
337 307
338static inline void free_disk_stats(struct gendisk *disk) 308#endif /* CONFIG_SMP */
339{
340}
341 309
342static inline int init_part_stats(struct hd_struct *part) 310#define part_stat_add(cpu, part, field, addnd) do { \
311 __part_stat_add((cpu), (part), field, addnd); \
312 if ((part)->partno) \
313 __part_stat_add((cpu), &part_to_disk((part))->part0, \
314 field, addnd); \
315} while (0)
316
317#define part_stat_dec(cpu, gendiskp, field) \
318 part_stat_add(cpu, gendiskp, field, -1)
319#define part_stat_inc(cpu, gendiskp, field) \
320 part_stat_add(cpu, gendiskp, field, 1)
321#define part_stat_sub(cpu, gendiskp, field, subnd) \
322 part_stat_add(cpu, gendiskp, field, -subnd)
323
324static inline void part_inc_in_flight(struct hd_struct *part)
343{ 325{
344 return 1; 326 part->in_flight++;
327 if (part->partno)
328 part_to_disk(part)->part0.in_flight++;
345} 329}
346 330
347static inline void free_part_stats(struct hd_struct *part) 331static inline void part_dec_in_flight(struct hd_struct *part)
348{ 332{
333 part->in_flight--;
334 if (part->partno)
335 part_to_disk(part)->part0.in_flight--;
349} 336}
350#endif /* CONFIG_SMP */
351 337
352/* drivers/block/ll_rw_blk.c */ 338/* drivers/block/ll_rw_blk.c */
353extern void disk_round_stats(struct gendisk *disk); 339extern void part_round_stats(int cpu, struct hd_struct *part);
354extern void part_round_stats(struct hd_struct *part);
355 340
356/* drivers/block/genhd.c */ 341/* drivers/block/genhd.c */
357extern int get_blkdev_list(char *, int); 342extern int get_blkdev_list(char *, int);
358extern void add_disk(struct gendisk *disk); 343extern void add_disk(struct gendisk *disk);
359extern void del_gendisk(struct gendisk *gp); 344extern void del_gendisk(struct gendisk *gp);
360extern void unlink_gendisk(struct gendisk *gp); 345extern void unlink_gendisk(struct gendisk *gp);
361extern struct gendisk *get_gendisk(dev_t dev, int *part); 346extern struct gendisk *get_gendisk(dev_t dev, int *partno);
347extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
362 348
363extern void set_device_ro(struct block_device *bdev, int flag); 349extern void set_device_ro(struct block_device *bdev, int flag);
364extern void set_disk_ro(struct gendisk *disk, int flag); 350extern void set_disk_ro(struct gendisk *disk, int flag);
365 351
352static inline int get_disk_ro(struct gendisk *disk)
353{
354 return disk->part0.policy;
355}
356
366/* drivers/char/random.c */ 357/* drivers/char/random.c */
367extern void add_disk_randomness(struct gendisk *disk); 358extern void add_disk_randomness(struct gendisk *disk);
368extern void rand_initialize_disk(struct gendisk *disk); 359extern void rand_initialize_disk(struct gendisk *disk);
369 360
370static inline sector_t get_start_sect(struct block_device *bdev) 361static inline sector_t get_start_sect(struct block_device *bdev)
371{ 362{
372 return bdev->bd_contains == bdev ? 0 : bdev->bd_part->start_sect; 363 return bdev->bd_part->start_sect;
373} 364}
374static inline sector_t get_capacity(struct gendisk *disk) 365static inline sector_t get_capacity(struct gendisk *disk)
375{ 366{
376 return disk->capacity; 367 return disk->part0.nr_sects;
377} 368}
378static inline void set_capacity(struct gendisk *disk, sector_t size) 369static inline void set_capacity(struct gendisk *disk, sector_t size)
379{ 370{
380 disk->capacity = size; 371 disk->part0.nr_sects = size;
381} 372}
382 373
383#ifdef CONFIG_SOLARIS_X86_PARTITION 374#ifdef CONFIG_SOLARIS_X86_PARTITION
@@ -527,9 +518,12 @@ struct unixware_disklabel {
527#define ADDPART_FLAG_RAID 1 518#define ADDPART_FLAG_RAID 1
528#define ADDPART_FLAG_WHOLEDISK 2 519#define ADDPART_FLAG_WHOLEDISK 2
529 520
530extern dev_t blk_lookup_devt(const char *name, int part); 521extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
531extern char *disk_name (struct gendisk *hd, int part, char *buf); 522extern void blk_free_devt(dev_t devt);
523extern dev_t blk_lookup_devt(const char *name, int partno);
524extern char *disk_name (struct gendisk *hd, int partno, char *buf);
532 525
526extern int disk_expand_part_tbl(struct gendisk *disk, int target);
533extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 527extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
534extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); 528extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int);
535extern void delete_partition(struct gendisk *, int); 529extern void delete_partition(struct gendisk *, int);
@@ -546,16 +540,23 @@ extern void blk_register_region(dev_t devt, unsigned long range,
546 void *data); 540 void *data);
547extern void blk_unregister_region(dev_t devt, unsigned long range); 541extern void blk_unregister_region(dev_t devt, unsigned long range);
548 542
549static inline struct block_device *bdget_disk(struct gendisk *disk, int index) 543extern ssize_t part_size_show(struct device *dev,
550{ 544 struct device_attribute *attr, char *buf);
551 return bdget(MKDEV(disk->major, disk->first_minor) + index); 545extern ssize_t part_stat_show(struct device *dev,
552} 546 struct device_attribute *attr, char *buf);
547#ifdef CONFIG_FAIL_MAKE_REQUEST
548extern ssize_t part_fail_show(struct device *dev,
549 struct device_attribute *attr, char *buf);
550extern ssize_t part_fail_store(struct device *dev,
551 struct device_attribute *attr,
552 const char *buf, size_t count);
553#endif /* CONFIG_FAIL_MAKE_REQUEST */
553 554
554#else /* CONFIG_BLOCK */ 555#else /* CONFIG_BLOCK */
555 556
556static inline void printk_all_partitions(void) { } 557static inline void printk_all_partitions(void) { }
557 558
558static inline dev_t blk_lookup_devt(const char *name, int part) 559static inline dev_t blk_lookup_devt(const char *name, int partno)
559{ 560{
560 dev_t devt = MKDEV(0, 0); 561 dev_t devt = MKDEV(0, 0);
561 return devt; 562 return devt;
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index c3c19f926e6f..14d0df0b5749 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -118,7 +118,11 @@ struct gfs2_sb {
118 118
119 char sb_lockproto[GFS2_LOCKNAME_LEN]; 119 char sb_lockproto[GFS2_LOCKNAME_LEN];
120 char sb_locktable[GFS2_LOCKNAME_LEN]; 120 char sb_locktable[GFS2_LOCKNAME_LEN];
121 /* In gfs1, quota and license dinodes followed */ 121
122 struct gfs2_inum __pad3; /* Was quota inode in gfs1 */
123 struct gfs2_inum __pad4; /* Was licence inode in gfs1 */
124#define GFS2_HAS_UUID 1
125 __u8 sb_uuid[16]; /* The UUID, maybe 0 for backwards compat */
122}; 126};
123 127
124/* 128/*
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 06c338ef7f1b..8ea98db223e5 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -38,7 +38,7 @@ extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
38 void (*put)(struct klist_node *)); 38 void (*put)(struct klist_node *));
39 39
40struct klist_node { 40struct klist_node {
41 struct klist *n_klist; 41 void *n_klist; /* never access directly */
42 struct list_head n_node; 42 struct list_head n_node;
43 struct kref n_ref; 43 struct kref n_ref;
44 struct completion n_removed; 44 struct completion n_removed;
@@ -57,7 +57,6 @@ extern int klist_node_attached(struct klist_node *n);
57 57
58struct klist_iter { 58struct klist_iter {
59 struct klist *i_klist; 59 struct klist *i_klist;
60 struct list_head *i_head;
61 struct klist_node *i_cur; 60 struct klist_node *i_cur;
62}; 61};
63 62
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 225bfc5bd9ec..947cf84e555d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -146,6 +146,7 @@ enum {
146 ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ 146 ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
147 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ 147 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
148 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ 148 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
149 ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
149 ATA_DFLAG_INIT_MASK = (1 << 24) - 1, 150 ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
150 151
151 ATA_DFLAG_DETACH = (1 << 24), 152 ATA_DFLAG_DETACH = (1 << 24),
@@ -244,6 +245,7 @@ enum {
244 ATA_TMOUT_BOOT = 30000, /* heuristic */ 245 ATA_TMOUT_BOOT = 30000, /* heuristic */
245 ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ 246 ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
246 ATA_TMOUT_INTERNAL_QUICK = 5000, 247 ATA_TMOUT_INTERNAL_QUICK = 5000,
248 ATA_TMOUT_MAX_PARK = 30000,
247 249
248 /* FIXME: GoVault needs 2s but we can't afford that without 250 /* FIXME: GoVault needs 2s but we can't afford that without
249 * parallel probing. 800ms is enough for iVDR disk 251 * parallel probing. 800ms is enough for iVDR disk
@@ -319,8 +321,11 @@ enum {
319 ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 321 ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
320 ATA_EH_ENABLE_LINK = (1 << 3), 322 ATA_EH_ENABLE_LINK = (1 << 3),
321 ATA_EH_LPM = (1 << 4), /* link power management action */ 323 ATA_EH_LPM = (1 << 4), /* link power management action */
324 ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
322 325
323 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE, 326 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
327 ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
328 ATA_EH_ENABLE_LINK | ATA_EH_LPM,
324 329
325 /* ata_eh_info->flags */ 330 /* ata_eh_info->flags */
326 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 331 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
@@ -452,6 +457,7 @@ enum link_pm {
452 MEDIUM_POWER, 457 MEDIUM_POWER,
453}; 458};
454extern struct device_attribute dev_attr_link_power_management_policy; 459extern struct device_attribute dev_attr_link_power_management_policy;
460extern struct device_attribute dev_attr_unload_heads;
455extern struct device_attribute dev_attr_em_message_type; 461extern struct device_attribute dev_attr_em_message_type;
456extern struct device_attribute dev_attr_em_message; 462extern struct device_attribute dev_attr_em_message;
457extern struct device_attribute dev_attr_sw_activity; 463extern struct device_attribute dev_attr_sw_activity;
@@ -554,8 +560,8 @@ struct ata_ering {
554struct ata_device { 560struct ata_device {
555 struct ata_link *link; 561 struct ata_link *link;
556 unsigned int devno; /* 0 or 1 */ 562 unsigned int devno; /* 0 or 1 */
557 unsigned long flags; /* ATA_DFLAG_xxx */
558 unsigned int horkage; /* List of broken features */ 563 unsigned int horkage; /* List of broken features */
564 unsigned long flags; /* ATA_DFLAG_xxx */
559 struct scsi_device *sdev; /* attached SCSI device */ 565 struct scsi_device *sdev; /* attached SCSI device */
560#ifdef CONFIG_ATA_ACPI 566#ifdef CONFIG_ATA_ACPI
561 acpi_handle acpi_handle; 567 acpi_handle acpi_handle;
@@ -564,6 +570,7 @@ struct ata_device {
564 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ 570 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
565 u64 n_sectors; /* size of device, if ATA */ 571 u64 n_sectors; /* size of device, if ATA */
566 unsigned int class; /* ATA_DEV_xxx */ 572 unsigned int class; /* ATA_DEV_xxx */
573 unsigned long unpark_deadline;
567 574
568 u8 pio_mode; 575 u8 pio_mode;
569 u8 dma_mode; 576 u8 dma_mode;
@@ -621,6 +628,7 @@ struct ata_eh_context {
621 [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; 628 [ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
622 unsigned int classes[ATA_MAX_DEVICES]; 629 unsigned int classes[ATA_MAX_DEVICES];
623 unsigned int did_probe_mask; 630 unsigned int did_probe_mask;
631 unsigned int unloaded_mask;
624 unsigned int saved_ncq_enabled; 632 unsigned int saved_ncq_enabled;
625 u8 saved_xfer_mode[ATA_MAX_DEVICES]; 633 u8 saved_xfer_mode[ATA_MAX_DEVICES];
626 /* timestamp for the last reset attempt or success */ 634 /* timestamp for the last reset attempt or success */
@@ -688,7 +696,8 @@ struct ata_port {
688 unsigned int qc_active; 696 unsigned int qc_active;
689 int nr_active_links; /* #links with active qcs */ 697 int nr_active_links; /* #links with active qcs */
690 698
691 struct ata_link link; /* host default link */ 699 struct ata_link link; /* host default link */
700 struct ata_link *slave_link; /* see ata_slave_link_init() */
692 701
693 int nr_pmp_links; /* nr of available PMP links */ 702 int nr_pmp_links; /* nr of available PMP links */
694 struct ata_link *pmp_link; /* array of PMP links */ 703 struct ata_link *pmp_link; /* array of PMP links */
@@ -709,6 +718,7 @@ struct ata_port {
709 struct list_head eh_done_q; 718 struct list_head eh_done_q;
710 wait_queue_head_t eh_wait_q; 719 wait_queue_head_t eh_wait_q;
711 int eh_tries; 720 int eh_tries;
721 struct completion park_req_pending;
712 722
713 pm_message_t pm_mesg; 723 pm_message_t pm_mesg;
714 int *pm_result; 724 int *pm_result;
@@ -772,8 +782,8 @@ struct ata_port_operations {
772 /* 782 /*
773 * Optional features 783 * Optional features
774 */ 784 */
775 int (*scr_read)(struct ata_port *ap, unsigned int sc_reg, u32 *val); 785 int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
776 int (*scr_write)(struct ata_port *ap, unsigned int sc_reg, u32 val); 786 int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
777 void (*pmp_attach)(struct ata_port *ap); 787 void (*pmp_attach)(struct ata_port *ap);
778 void (*pmp_detach)(struct ata_port *ap); 788 void (*pmp_detach)(struct ata_port *ap);
779 int (*enable_pm)(struct ata_port *ap, enum link_pm policy); 789 int (*enable_pm)(struct ata_port *ap, enum link_pm policy);
@@ -895,6 +905,7 @@ extern void ata_port_disable(struct ata_port *);
895extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); 905extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
896extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, 906extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
897 const struct ata_port_info * const * ppi, int n_ports); 907 const struct ata_port_info * const * ppi, int n_ports);
908extern int ata_slave_link_init(struct ata_port *ap);
898extern int ata_host_start(struct ata_host *host); 909extern int ata_host_start(struct ata_host *host);
899extern int ata_host_register(struct ata_host *host, 910extern int ata_host_register(struct ata_host *host,
900 struct scsi_host_template *sht); 911 struct scsi_host_template *sht);
@@ -920,8 +931,8 @@ extern int sata_scr_valid(struct ata_link *link);
920extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); 931extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
921extern int sata_scr_write(struct ata_link *link, int reg, u32 val); 932extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
922extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); 933extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
923extern int ata_link_online(struct ata_link *link); 934extern bool ata_link_online(struct ata_link *link);
924extern int ata_link_offline(struct ata_link *link); 935extern bool ata_link_offline(struct ata_link *link);
925#ifdef CONFIG_PM 936#ifdef CONFIG_PM
926extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); 937extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
927extern void ata_host_resume(struct ata_host *host); 938extern void ata_host_resume(struct ata_host *host);
@@ -1098,6 +1109,7 @@ extern void ata_std_error_handler(struct ata_port *ap);
1098 */ 1109 */
1099extern const struct ata_port_operations ata_base_port_ops; 1110extern const struct ata_port_operations ata_base_port_ops;
1100extern const struct ata_port_operations sata_port_ops; 1111extern const struct ata_port_operations sata_port_ops;
1112extern struct device_attribute *ata_common_sdev_attrs[];
1101 1113
1102#define ATA_BASE_SHT(drv_name) \ 1114#define ATA_BASE_SHT(drv_name) \
1103 .module = THIS_MODULE, \ 1115 .module = THIS_MODULE, \
@@ -1112,7 +1124,8 @@ extern const struct ata_port_operations sata_port_ops;
1112 .proc_name = drv_name, \ 1124 .proc_name = drv_name, \
1113 .slave_configure = ata_scsi_slave_config, \ 1125 .slave_configure = ata_scsi_slave_config, \
1114 .slave_destroy = ata_scsi_slave_destroy, \ 1126 .slave_destroy = ata_scsi_slave_destroy, \
1115 .bios_param = ata_std_bios_param 1127 .bios_param = ata_std_bios_param, \
1128 .sdev_attrs = ata_common_sdev_attrs
1116 1129
1117#define ATA_NCQ_SHT(drv_name) \ 1130#define ATA_NCQ_SHT(drv_name) \
1118 ATA_BASE_SHT(drv_name), \ 1131 ATA_BASE_SHT(drv_name), \
@@ -1134,7 +1147,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
1134 1147
1135static inline int ata_is_host_link(const struct ata_link *link) 1148static inline int ata_is_host_link(const struct ata_link *link)
1136{ 1149{
1137 return link == &link->ap->link; 1150 return link == &link->ap->link || link == link->ap->slave_link;
1138} 1151}
1139#else /* CONFIG_SATA_PMP */ 1152#else /* CONFIG_SATA_PMP */
1140static inline bool sata_pmp_supported(struct ata_port *ap) 1153static inline bool sata_pmp_supported(struct ata_port *ap)
@@ -1167,7 +1180,7 @@ static inline int sata_srst_pmp(struct ata_link *link)
1167 printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) 1180 printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
1168 1181
1169#define ata_link_printk(link, lv, fmt, args...) do { \ 1182#define ata_link_printk(link, lv, fmt, args...) do { \
1170 if (sata_pmp_attached((link)->ap)) \ 1183 if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \
1171 printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ 1184 printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \
1172 (link)->pmp , ##args); \ 1185 (link)->pmp , ##args); \
1173 else \ 1186 else \
@@ -1265,34 +1278,17 @@ static inline int ata_link_active(struct ata_link *link)
1265 return ata_tag_valid(link->active_tag) || link->sactive; 1278 return ata_tag_valid(link->active_tag) || link->sactive;
1266} 1279}
1267 1280
1268static inline struct ata_link *ata_port_first_link(struct ata_port *ap) 1281extern struct ata_link *__ata_port_next_link(struct ata_port *ap,
1269{ 1282 struct ata_link *link,
1270 if (sata_pmp_attached(ap)) 1283 bool dev_only);
1271 return ap->pmp_link;
1272 return &ap->link;
1273}
1274
1275static inline struct ata_link *ata_port_next_link(struct ata_link *link)
1276{
1277 struct ata_port *ap = link->ap;
1278
1279 if (ata_is_host_link(link)) {
1280 if (!sata_pmp_attached(ap))
1281 return NULL;
1282 return ap->pmp_link;
1283 }
1284
1285 if (++link < ap->nr_pmp_links + ap->pmp_link)
1286 return link;
1287 return NULL;
1288}
1289 1284
1290#define __ata_port_for_each_link(lk, ap) \ 1285#define __ata_port_for_each_link(link, ap) \
1291 for ((lk) = &(ap)->link; (lk); (lk) = ata_port_next_link(lk)) 1286 for ((link) = __ata_port_next_link((ap), NULL, false); (link); \
1287 (link) = __ata_port_next_link((ap), (link), false))
1292 1288
1293#define ata_port_for_each_link(link, ap) \ 1289#define ata_port_for_each_link(link, ap) \
1294 for ((link) = ata_port_first_link(ap); (link); \ 1290 for ((link) = __ata_port_next_link((ap), NULL, true); (link); \
1295 (link) = ata_port_next_link(link)) 1291 (link) = __ata_port_next_link((ap), (link), true))
1296 1292
1297#define ata_link_for_each_dev(dev, link) \ 1293#define ata_link_for_each_dev(dev, link) \
1298 for ((dev) = (link)->device; \ 1294 for ((dev) = (link)->device; \
diff --git a/include/linux/major.h b/include/linux/major.h
index 53d5fafd85c3..88249452b935 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -170,4 +170,6 @@
170 170
171#define VIOTAPE_MAJOR 230 171#define VIOTAPE_MAJOR 230
172 172
173#define BLOCK_EXT_MAJOR 259
174
173#endif 175#endif
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 310e61606415..8b4aa0523db7 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -41,6 +41,8 @@ struct mtd_blktrans_ops {
41 unsigned long block, char *buffer); 41 unsigned long block, char *buffer);
42 int (*writesect)(struct mtd_blktrans_dev *dev, 42 int (*writesect)(struct mtd_blktrans_dev *dev,
43 unsigned long block, char *buffer); 43 unsigned long block, char *buffer);
44 int (*discard)(struct mtd_blktrans_dev *dev,
45 unsigned long block, unsigned nr_blocks);
44 46
45 /* Block layer ioctls */ 47 /* Block layer ioctls */
46 int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo); 48 int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 4ab843622727..5f89b62e6983 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -40,12 +40,21 @@
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
44#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
45#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
46#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
43 47
44/* Global control variables for rcupdate callback mechanism. */ 48/* Global control variables for rcupdate callback mechanism. */
45struct rcu_ctrlblk { 49struct rcu_ctrlblk {
46 long cur; /* Current batch number. */ 50 long cur; /* Current batch number. */
47 long completed; /* Number of the last completed batch */ 51 long completed; /* Number of the last completed batch */
48 int next_pending; /* Is the next batch already waiting? */ 52 long pending; /* Number of the last pending batch */
53#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
54 unsigned long gp_start; /* Time at which GP started in jiffies. */
55 unsigned long jiffies_stall;
56 /* Time at which to check for CPU stalls. */
57#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
49 58
50 int signaled; 59 int signaled;
51 60
@@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b)
66 return (a - b) > 0; 75 return (a - b) > 0;
67} 76}
68 77
69/* 78/* Per-CPU data for Read-Copy UPdate. */
70 * Per-CPU data for Read-Copy UPdate.
71 * nxtlist - new callbacks are added here
72 * curlist - current batch for which quiescent cycle started if any
73 */
74struct rcu_data { 79struct rcu_data {
75 /* 1) quiescent state handling : */ 80 /* 1) quiescent state handling : */
76 long quiescbatch; /* Batch # for grace period */ 81 long quiescbatch; /* Batch # for grace period */
@@ -78,12 +83,24 @@ struct rcu_data {
78 int qs_pending; /* core waits for quiesc state */ 83 int qs_pending; /* core waits for quiesc state */
79 84
80 /* 2) batch handling */ 85 /* 2) batch handling */
81 long batch; /* Batch # for current RCU batch */ 86 /*
87 * if nxtlist is not NULL, then:
88 * batch:
89 * The batch # for the last entry of nxtlist
90 * [*nxttail[1], NULL = *nxttail[2]):
91 * Entries that batch # <= batch
92 * [*nxttail[0], *nxttail[1]):
93 * Entries that batch # <= batch - 1
94 * [nxtlist, *nxttail[0]):
95 * Entries that batch # <= batch - 2
96 * The grace period for these entries has completed, and
97 * the other grace-period-completed entries may be moved
98 * here temporarily in rcu_process_callbacks().
99 */
100 long batch;
82 struct rcu_head *nxtlist; 101 struct rcu_head *nxtlist;
83 struct rcu_head **nxttail; 102 struct rcu_head **nxttail[3];
84 long qlen; /* # of queued callbacks */ 103 long qlen; /* # of queued callbacks */
85 struct rcu_head *curlist;
86 struct rcu_head **curtail;
87 struct rcu_head *donelist; 104 struct rcu_head *donelist;
88 struct rcu_head **donetail; 105 struct rcu_head **donetail;
89 long blimit; /* Upper limit on a processed batch */ 106 long blimit; /* Upper limit on a processed batch */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index eb4443c7e05b..e649bd3f2c97 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
198 at->prev = last; 198 at->prev = last;
199} 199}
200 200
201/**
202 * list_for_each_rcu - iterate over an rcu-protected list
203 * @pos: the &struct list_head to use as a loop cursor.
204 * @head: the head for your list.
205 *
206 * This list-traversal primitive may safely run concurrently with
207 * the _rcu list-mutation primitives such as list_add_rcu()
208 * as long as the traversal is guarded by rcu_read_lock().
209 */
210#define list_for_each_rcu(pos, head) \
211 for (pos = rcu_dereference((head)->next); \
212 prefetch(pos->next), pos != (head); \
213 pos = rcu_dereference(pos->next))
214
215#define __list_for_each_rcu(pos, head) \ 201#define __list_for_each_rcu(pos, head) \
216 for (pos = rcu_dereference((head)->next); \ 202 for (pos = rcu_dereference((head)->next); \
217 pos != (head); \ 203 pos != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index e8b4039cfb2f..86f1f5e43e33 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -133,6 +133,26 @@ struct rcu_head {
133#define rcu_read_unlock_bh() __rcu_read_unlock_bh() 133#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
134 134
135/** 135/**
136 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
137 *
138 * Should be used with either
139 * - synchronize_sched()
140 * or
141 * - call_rcu_sched() and rcu_barrier_sched()
142 * on the write-side to insure proper synchronization.
143 */
144#define rcu_read_lock_sched() preempt_disable()
145
146/*
147 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
148 *
149 * See rcu_read_lock_sched for more information.
150 */
151#define rcu_read_unlock_sched() preempt_enable()
152
153
154
155/**
136 * rcu_dereference - fetch an RCU-protected pointer in an 156 * rcu_dereference - fetch an RCU-protected pointer in an
137 * RCU read-side critical section. This pointer may later 157 * RCU read-side critical section. This pointer may later
138 * be safely dereferenced. 158 * be safely dereferenced.
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 0967f03b0705..3e05c09b54a2 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu)
57 rdssp->sched_qs++; 57 rdssp->sched_qs++;
58} 58}
59#define rcu_bh_qsctr_inc(cpu) 59#define rcu_bh_qsctr_inc(cpu)
60#define call_rcu_bh(head, rcu) call_rcu(head, rcu) 60
61/*
62 * Someone might want to pass call_rcu_bh as a function pointer.
63 * So this needs to just be a rename and not a macro function.
64 * (no parentheses)
65 */
66#define call_rcu_bh call_rcu
61 67
62/** 68/**
63 * call_rcu_sched - Queue RCU callback for invocation after sched grace period. 69 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
111struct softirq_action; 117struct softirq_action;
112 118
113#ifdef CONFIG_NO_HZ 119#ifdef CONFIG_NO_HZ
114DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
115 120
116static inline void rcu_enter_nohz(void) 121static inline void rcu_enter_nohz(void)
117{ 122{
@@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void)
126{ 131{
127 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); 132 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
128 133
129 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
130 __get_cpu_var(rcu_dyntick_sched).dynticks++; 134 __get_cpu_var(rcu_dyntick_sched).dynticks++;
135 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
131 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), 136 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
132 &rs); 137 &rs);
133} 138}
diff --git a/include/linux/security.h b/include/linux/security.h
index 80c4d002864c..f5c4a51eb42e 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1560,11 +1560,6 @@ struct security_operations {
1560extern int security_init(void); 1560extern int security_init(void);
1561extern int security_module_enable(struct security_operations *ops); 1561extern int security_module_enable(struct security_operations *ops);
1562extern int register_security(struct security_operations *ops); 1562extern int register_security(struct security_operations *ops);
1563extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
1564 struct dentry *parent, void *data,
1565 const struct file_operations *fops);
1566extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
1567extern void securityfs_remove(struct dentry *dentry);
1568 1563
1569/* Security operations */ 1564/* Security operations */
1570int security_ptrace_may_access(struct task_struct *child, unsigned int mode); 1565int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
@@ -2424,25 +2419,6 @@ static inline int security_netlink_recv(struct sk_buff *skb, int cap)
2424 return cap_netlink_recv(skb, cap); 2419 return cap_netlink_recv(skb, cap);
2425} 2420}
2426 2421
2427static inline struct dentry *securityfs_create_dir(const char *name,
2428 struct dentry *parent)
2429{
2430 return ERR_PTR(-ENODEV);
2431}
2432
2433static inline struct dentry *securityfs_create_file(const char *name,
2434 mode_t mode,
2435 struct dentry *parent,
2436 void *data,
2437 const struct file_operations *fops)
2438{
2439 return ERR_PTR(-ENODEV);
2440}
2441
2442static inline void securityfs_remove(struct dentry *dentry)
2443{
2444}
2445
2446static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2422static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
2447{ 2423{
2448 return -EOPNOTSUPP; 2424 return -EOPNOTSUPP;
@@ -2806,5 +2782,35 @@ static inline void security_audit_rule_free(void *lsmrule)
2806#endif /* CONFIG_SECURITY */ 2782#endif /* CONFIG_SECURITY */
2807#endif /* CONFIG_AUDIT */ 2783#endif /* CONFIG_AUDIT */
2808 2784
2785#ifdef CONFIG_SECURITYFS
2786
2787extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
2788 struct dentry *parent, void *data,
2789 const struct file_operations *fops);
2790extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
2791extern void securityfs_remove(struct dentry *dentry);
2792
2793#else /* CONFIG_SECURITYFS */
2794
2795static inline struct dentry *securityfs_create_dir(const char *name,
2796 struct dentry *parent)
2797{
2798 return ERR_PTR(-ENODEV);
2799}
2800
2801static inline struct dentry *securityfs_create_file(const char *name,
2802 mode_t mode,
2803 struct dentry *parent,
2804 void *data,
2805 const struct file_operations *fops)
2806{
2807 return ERR_PTR(-ENODEV);
2808}
2809
2810static inline void securityfs_remove(struct dentry *dentry)
2811{}
2812
2813#endif
2814
2809#endif /* ! __LINUX_SECURITY_H */ 2815#endif /* ! __LINUX_SECURITY_H */
2810 2816
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
new file mode 100644
index 000000000000..a3eb2f65b656
--- /dev/null
+++ b/include/linux/string_helpers.h
@@ -0,0 +1,16 @@
1#ifndef _LINUX_STRING_HELPERS_H_
2#define _LINUX_STRING_HELPERS_H_
3
4#include <linux/types.h>
5
6/* Descriptions of the types of units to
7 * print in */
8enum string_size_units {
9 STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
10 STRING_UNITS_2, /* use binary powers of 2^10 */
11};
12
13int string_get_size(u64 size, enum string_size_units units,
14 char *buf, int len);
15
16#endif
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index f9f6e793575c..855bf95963e7 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -75,7 +75,6 @@ struct scsi_cmnd {
75 75
76 int retries; 76 int retries;
77 int allowed; 77 int allowed;
78 int timeout_per_command;
79 78
80 unsigned char prot_op; 79 unsigned char prot_op;
81 unsigned char prot_type; 80 unsigned char prot_type;
@@ -86,7 +85,6 @@ struct scsi_cmnd {
86 /* These elements define the operation we are about to perform */ 85 /* These elements define the operation we are about to perform */
87 unsigned char *cmnd; 86 unsigned char *cmnd;
88 87
89 struct timer_list eh_timeout; /* Used to time out the command. */
90 88
91 /* These elements define the operation we ultimately want to perform */ 89 /* These elements define the operation we ultimately want to perform */
92 struct scsi_data_buffer sdb; 90 struct scsi_data_buffer sdb;
@@ -139,7 +137,6 @@ extern void scsi_put_command(struct scsi_cmnd *);
139extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *, 137extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
140 struct device *); 138 struct device *);
141extern void scsi_finish_command(struct scsi_cmnd *cmd); 139extern void scsi_finish_command(struct scsi_cmnd *cmd);
142extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
143 140
144extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, 141extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
145 size_t *offset, size_t *len); 142 size_t *offset, size_t *len);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 80b2e93c2936..b49e725be039 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -42,9 +42,11 @@ enum scsi_device_state {
42 * originate in the mid-layer) */ 42 * originate in the mid-layer) */
43 SDEV_OFFLINE, /* Device offlined (by error handling or 43 SDEV_OFFLINE, /* Device offlined (by error handling or
44 * user request */ 44 * user request */
45 SDEV_BLOCK, /* Device blocked by scsi lld. No scsi 45 SDEV_BLOCK, /* Device blocked by scsi lld. No
46 * commands from user or midlayer should be issued 46 * scsi commands from user or midlayer
47 * to the scsi lld. */ 47 * should be issued to the scsi
48 * lld. */
49 SDEV_CREATED_BLOCK, /* same as above but for created devices */
48}; 50};
49 51
50enum scsi_device_event { 52enum scsi_device_event {
@@ -384,10 +386,23 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
384#define scmd_id(scmd) sdev_id((scmd)->device) 386#define scmd_id(scmd) sdev_id((scmd)->device)
385#define scmd_channel(scmd) sdev_channel((scmd)->device) 387#define scmd_channel(scmd) sdev_channel((scmd)->device)
386 388
389/*
390 * checks for positions of the SCSI state machine
391 */
387static inline int scsi_device_online(struct scsi_device *sdev) 392static inline int scsi_device_online(struct scsi_device *sdev)
388{ 393{
389 return sdev->sdev_state != SDEV_OFFLINE; 394 return sdev->sdev_state != SDEV_OFFLINE;
390} 395}
396static inline int scsi_device_blocked(struct scsi_device *sdev)
397{
398 return sdev->sdev_state == SDEV_BLOCK ||
399 sdev->sdev_state == SDEV_CREATED_BLOCK;
400}
401static inline int scsi_device_created(struct scsi_device *sdev)
402{
403 return sdev->sdev_state == SDEV_CREATED ||
404 sdev->sdev_state == SDEV_CREATED_BLOCK;
405}
391 406
392/* accessor functions for the SCSI parameters */ 407/* accessor functions for the SCSI parameters */
393static inline int scsi_device_sync(struct scsi_device *sdev) 408static inline int scsi_device_sync(struct scsi_device *sdev)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 44a55d1bf530..d123ca84e732 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -43,13 +43,6 @@ struct blk_queue_tags;
43#define DISABLE_CLUSTERING 0 43#define DISABLE_CLUSTERING 0
44#define ENABLE_CLUSTERING 1 44#define ENABLE_CLUSTERING 1
45 45
46enum scsi_eh_timer_return {
47 EH_NOT_HANDLED,
48 EH_HANDLED,
49 EH_RESET_TIMER,
50};
51
52
53struct scsi_host_template { 46struct scsi_host_template {
54 struct module *module; 47 struct module *module;
55 const char *name; 48 const char *name;
@@ -347,7 +340,7 @@ struct scsi_host_template {
347 * 340 *
348 * Status: OPTIONAL 341 * Status: OPTIONAL
349 */ 342 */
350 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 343 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
351 344
352 /* 345 /*
353 * Name of proc directory 346 * Name of proc directory
diff --git a/include/scsi/scsi_netlink.h b/include/scsi/scsi_netlink.h
index 8c1470cc8209..536752c40d41 100644
--- a/include/scsi/scsi_netlink.h
+++ b/include/scsi/scsi_netlink.h
@@ -22,6 +22,9 @@
22#ifndef SCSI_NETLINK_H 22#ifndef SCSI_NETLINK_H
23#define SCSI_NETLINK_H 23#define SCSI_NETLINK_H
24 24
25#include <linux/netlink.h>
26
27
25/* 28/*
26 * This file intended to be included by both kernel and user space 29 * This file intended to be included by both kernel and user space
27 */ 30 */
@@ -55,7 +58,41 @@ struct scsi_nl_hdr {
55#define SCSI_NL_TRANSPORT_FC 1 58#define SCSI_NL_TRANSPORT_FC 1
56#define SCSI_NL_MAX_TRANSPORTS 2 59#define SCSI_NL_MAX_TRANSPORTS 2
57 60
58/* scsi_nl_hdr->msgtype values are defined in each transport */ 61/* Transport-based scsi_nl_hdr->msgtype values are defined in each transport */
62
63/*
64 * GENERIC SCSI scsi_nl_hdr->msgtype Values
65 */
66 /* kernel -> user */
67#define SCSI_NL_SHOST_VENDOR 0x0001
68 /* user -> kernel */
69/* SCSI_NL_SHOST_VENDOR msgtype is kernel->user and user->kernel */
70
71
72/*
73 * Message Structures :
74 */
75
76/* macro to round up message lengths to 8byte boundary */
77#define SCSI_NL_MSGALIGN(len) (((len) + 7) & ~7)
78
79
80/*
81 * SCSI HOST Vendor Unique messages :
82 * SCSI_NL_SHOST_VENDOR
83 *
84 * Note: The Vendor Unique message payload will begin directly after
85 * this structure, with the length of the payload per vmsg_datalen.
86 *
87 * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
88 * formatting requirements specified below
89 */
90struct scsi_nl_host_vendor_msg {
91 struct scsi_nl_hdr snlh; /* must be 1st element ! */
92 uint64_t vendor_id;
93 uint16_t host_no;
94 uint16_t vmsg_datalen;
95} __attribute__((aligned(sizeof(uint64_t))));
59 96
60 97
61/* 98/*
@@ -83,5 +120,28 @@ struct scsi_nl_hdr {
83 } 120 }
84 121
85 122
123#ifdef __KERNEL__
124
125#include <scsi/scsi_host.h>
126
127/* Exported Kernel Interfaces */
128int scsi_nl_add_transport(u8 tport,
129 int (*msg_handler)(struct sk_buff *),
130 void (*event_handler)(struct notifier_block *, unsigned long, void *));
131void scsi_nl_remove_transport(u8 tport);
132
133int scsi_nl_add_driver(u64 vendor_id, struct scsi_host_template *hostt,
134 int (*nlmsg_handler)(struct Scsi_Host *shost, void *payload,
135 u32 len, u32 pid),
136 void (*nlevt_handler)(struct notifier_block *nb,
137 unsigned long event, void *notify_ptr));
138void scsi_nl_remove_driver(u64 vendor_id);
139
140void scsi_nl_send_transport_msg(u32 pid, struct scsi_nl_hdr *hdr);
141int scsi_nl_send_vendor_msg(u32 pid, unsigned short host_no, u64 vendor_id,
142 char *data_buf, u32 data_len);
143
144#endif /* __KERNEL__ */
145
86#endif /* SCSI_NETLINK_H */ 146#endif /* SCSI_NETLINK_H */
87 147
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index 490bd13a634c..0de32cd4e8a7 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -21,6 +21,7 @@
21#define SCSI_TRANSPORT_H 21#define SCSI_TRANSPORT_H
22 22
23#include <linux/transport_class.h> 23#include <linux/transport_class.h>
24#include <linux/blkdev.h>
24#include <scsi/scsi_host.h> 25#include <scsi/scsi_host.h>
25#include <scsi/scsi_device.h> 26#include <scsi/scsi_device.h>
26 27
@@ -64,7 +65,7 @@ struct scsi_transport_template {
64 * begin counting again 65 * begin counting again
65 * EH_NOT_HANDLED Begin normal error recovery 66 * EH_NOT_HANDLED Begin normal error recovery
66 */ 67 */
67 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 68 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
68 69
69 /* 70 /*
70 * Used as callback for the completion of i_t_nexus request 71 * Used as callback for the completion of i_t_nexus request
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 878373c32ef7..21018a4df452 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -167,6 +167,26 @@ enum fc_tgtid_binding_type {
167struct device_attribute dev_attr_vport_##_name = \ 167struct device_attribute dev_attr_vport_##_name = \
168 __ATTR(_name,_mode,_show,_store) 168 __ATTR(_name,_mode,_show,_store)
169 169
170/*
171 * fc_vport_identifiers: This set of data contains all elements
172 * to uniquely identify and instantiate a FC virtual port.
173 *
174 * Notes:
175 * symbolic_name: The driver is to append the symbolic_name string data
176 * to the symbolic_node_name data that it generates by default.
177 * the resulting combination should then be registered with the switch.
178 * It is expected that things like Xen may stuff a VM title into
179 * this field.
180 */
181#define FC_VPORT_SYMBOLIC_NAMELEN 64
182struct fc_vport_identifiers {
183 u64 node_name;
184 u64 port_name;
185 u32 roles;
186 bool disable;
187 enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
188 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
189};
170 190
171/* 191/*
172 * FC Virtual Port Attributes 192 * FC Virtual Port Attributes
@@ -197,7 +217,6 @@ struct device_attribute dev_attr_vport_##_name = \
197 * managed by the transport w/o driver interaction. 217 * managed by the transport w/o driver interaction.
198 */ 218 */
199 219
200#define FC_VPORT_SYMBOLIC_NAMELEN 64
201struct fc_vport { 220struct fc_vport {
202 /* Fixed Attributes */ 221 /* Fixed Attributes */
203 222
@@ -732,6 +751,8 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
732 * be sure to read the Vendor Type and ID formatting requirements 751 * be sure to read the Vendor Type and ID formatting requirements
733 * specified in scsi_netlink.h 752 * specified in scsi_netlink.h
734 */ 753 */
754struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
755 struct fc_vport_identifiers *);
735int fc_vport_terminate(struct fc_vport *vport); 756int fc_vport_terminate(struct fc_vport *vport);
736 757
737#endif /* SCSI_TRANSPORT_FC_H */ 758#endif /* SCSI_TRANSPORT_FC_H */
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 3715feb8446d..d055b1914c3d 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -263,6 +263,10 @@ retry:
263 printk("Please append a correct \"root=\" boot option; here are the available partitions:\n"); 263 printk("Please append a correct \"root=\" boot option; here are the available partitions:\n");
264 264
265 printk_all_partitions(); 265 printk_all_partitions();
266#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
267 printk("DEBUG_BLOCK_EXT_DEVT is enabled, you need to specify "
268 "explicit textual name for \"root=\" boot option.\n");
269#endif
266 panic("VFS: Unable to mount root fs on %s", b); 270 panic("VFS: Unable to mount root fs on %s", b);
267 } 271 }
268 272
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index aad93cdc9f68..37f72e551542 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -47,6 +47,7 @@
47#include <linux/notifier.h> 47#include <linux/notifier.h>
48#include <linux/cpu.h> 48#include <linux/cpu.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/time.h>
50 51
51#ifdef CONFIG_DEBUG_LOCK_ALLOC 52#ifdef CONFIG_DEBUG_LOCK_ALLOC
52static struct lock_class_key rcu_lock_key; 53static struct lock_class_key rcu_lock_key;
@@ -60,12 +61,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
60static struct rcu_ctrlblk rcu_ctrlblk = { 61static struct rcu_ctrlblk rcu_ctrlblk = {
61 .cur = -300, 62 .cur = -300,
62 .completed = -300, 63 .completed = -300,
64 .pending = -300,
63 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
64 .cpumask = CPU_MASK_NONE, 66 .cpumask = CPU_MASK_NONE,
65}; 67};
66static struct rcu_ctrlblk rcu_bh_ctrlblk = { 68static struct rcu_ctrlblk rcu_bh_ctrlblk = {
67 .cur = -300, 69 .cur = -300,
68 .completed = -300, 70 .completed = -300,
71 .pending = -300,
69 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
70 .cpumask = CPU_MASK_NONE, 73 .cpumask = CPU_MASK_NONE,
71}; 74};
@@ -83,7 +86,10 @@ static void force_quiescent_state(struct rcu_data *rdp,
83{ 86{
84 int cpu; 87 int cpu;
85 cpumask_t cpumask; 88 cpumask_t cpumask;
89 unsigned long flags;
90
86 set_need_resched(); 91 set_need_resched();
92 spin_lock_irqsave(&rcp->lock, flags);
87 if (unlikely(!rcp->signaled)) { 93 if (unlikely(!rcp->signaled)) {
88 rcp->signaled = 1; 94 rcp->signaled = 1;
89 /* 95 /*
@@ -109,6 +115,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
109 for_each_cpu_mask_nr(cpu, cpumask) 115 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 116 smp_send_reschedule(cpu);
111 } 117 }
118 spin_unlock_irqrestore(&rcp->lock, flags);
112} 119}
113#else 120#else
114static inline void force_quiescent_state(struct rcu_data *rdp, 121static inline void force_quiescent_state(struct rcu_data *rdp,
@@ -118,6 +125,126 @@ static inline void force_quiescent_state(struct rcu_data *rdp,
118} 125}
119#endif 126#endif
120 127
128static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
129 struct rcu_data *rdp)
130{
131 long batch;
132
133 head->next = NULL;
134 smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
135
136 /*
137 * Determine the batch number of this callback.
138 *
139 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
140 * local variable "batch" and emits codes like this:
141 * 1) rdp->batch = rcp->cur + 1 # gets old value
142 * ......
143 * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
144 * then [*nxttail[0], *nxttail[1]) may contain callbacks
145 * that batch# = rdp->batch, see the comment of struct rcu_data.
146 */
147 batch = ACCESS_ONCE(rcp->cur) + 1;
148
149 if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
150 /* process callbacks */
151 rdp->nxttail[0] = rdp->nxttail[1];
152 rdp->nxttail[1] = rdp->nxttail[2];
153 if (rcu_batch_after(batch - 1, rdp->batch))
154 rdp->nxttail[0] = rdp->nxttail[2];
155 }
156
157 rdp->batch = batch;
158 *rdp->nxttail[2] = head;
159 rdp->nxttail[2] = &head->next;
160
161 if (unlikely(++rdp->qlen > qhimark)) {
162 rdp->blimit = INT_MAX;
163 force_quiescent_state(rdp, &rcu_ctrlblk);
164 }
165}
166
167#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
168
169static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
170{
171 rcp->gp_start = jiffies;
172 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
173}
174
175static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
176{
177 int cpu;
178 long delta;
179 unsigned long flags;
180
181 /* Only let one CPU complain about others per time interval. */
182
183 spin_lock_irqsave(&rcp->lock, flags);
184 delta = jiffies - rcp->jiffies_stall;
185 if (delta < 2 || rcp->cur != rcp->completed) {
186 spin_unlock_irqrestore(&rcp->lock, flags);
187 return;
188 }
189 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
190 spin_unlock_irqrestore(&rcp->lock, flags);
191
192 /* OK, time to rat on our buddy... */
193
194 printk(KERN_ERR "RCU detected CPU stalls:");
195 for_each_possible_cpu(cpu) {
196 if (cpu_isset(cpu, rcp->cpumask))
197 printk(" %d", cpu);
198 }
199 printk(" (detected by %d, t=%ld jiffies)\n",
200 smp_processor_id(), (long)(jiffies - rcp->gp_start));
201}
202
203static void print_cpu_stall(struct rcu_ctrlblk *rcp)
204{
205 unsigned long flags;
206
207 printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
208 smp_processor_id(), jiffies,
209 jiffies - rcp->gp_start);
210 dump_stack();
211 spin_lock_irqsave(&rcp->lock, flags);
212 if ((long)(jiffies - rcp->jiffies_stall) >= 0)
213 rcp->jiffies_stall =
214 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
215 spin_unlock_irqrestore(&rcp->lock, flags);
216 set_need_resched(); /* kick ourselves to get things going. */
217}
218
219static void check_cpu_stall(struct rcu_ctrlblk *rcp)
220{
221 long delta;
222
223 delta = jiffies - rcp->jiffies_stall;
224 if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) {
225
226 /* We haven't checked in, so go dump stack. */
227 print_cpu_stall(rcp);
228
229 } else if (rcp->cur != rcp->completed && delta >= 2) {
230
231 /* They had two seconds to dump stack, so complain. */
232 print_other_cpu_stall(rcp);
233 }
234}
235
236#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
237
238static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
239{
240}
241
242static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
243{
244}
245
246#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
247
121/** 248/**
122 * call_rcu - Queue an RCU callback for invocation after a grace period. 249 * call_rcu - Queue an RCU callback for invocation after a grace period.
123 * @head: structure to be used for queueing the RCU updates. 250 * @head: structure to be used for queueing the RCU updates.
@@ -133,18 +260,10 @@ void call_rcu(struct rcu_head *head,
133 void (*func)(struct rcu_head *rcu)) 260 void (*func)(struct rcu_head *rcu))
134{ 261{
135 unsigned long flags; 262 unsigned long flags;
136 struct rcu_data *rdp;
137 263
138 head->func = func; 264 head->func = func;
139 head->next = NULL;
140 local_irq_save(flags); 265 local_irq_save(flags);
141 rdp = &__get_cpu_var(rcu_data); 266 __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
142 *rdp->nxttail = head;
143 rdp->nxttail = &head->next;
144 if (unlikely(++rdp->qlen > qhimark)) {
145 rdp->blimit = INT_MAX;
146 force_quiescent_state(rdp, &rcu_ctrlblk);
147 }
148 local_irq_restore(flags); 267 local_irq_restore(flags);
149} 268}
150EXPORT_SYMBOL_GPL(call_rcu); 269EXPORT_SYMBOL_GPL(call_rcu);
@@ -169,20 +288,10 @@ void call_rcu_bh(struct rcu_head *head,
169 void (*func)(struct rcu_head *rcu)) 288 void (*func)(struct rcu_head *rcu))
170{ 289{
171 unsigned long flags; 290 unsigned long flags;
172 struct rcu_data *rdp;
173 291
174 head->func = func; 292 head->func = func;
175 head->next = NULL;
176 local_irq_save(flags); 293 local_irq_save(flags);
177 rdp = &__get_cpu_var(rcu_bh_data); 294 __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
178 *rdp->nxttail = head;
179 rdp->nxttail = &head->next;
180
181 if (unlikely(++rdp->qlen > qhimark)) {
182 rdp->blimit = INT_MAX;
183 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
184 }
185
186 local_irq_restore(flags); 295 local_irq_restore(flags);
187} 296}
188EXPORT_SYMBOL_GPL(call_rcu_bh); 297EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -211,12 +320,6 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
211static inline void raise_rcu_softirq(void) 320static inline void raise_rcu_softirq(void)
212{ 321{
213 raise_softirq(RCU_SOFTIRQ); 322 raise_softirq(RCU_SOFTIRQ);
214 /*
215 * The smp_mb() here is required to ensure that this cpu's
216 * __rcu_process_callbacks() reads the most recently updated
217 * value of rcu->cur.
218 */
219 smp_mb();
220} 323}
221 324
222/* 325/*
@@ -225,6 +328,7 @@ static inline void raise_rcu_softirq(void)
225 */ 328 */
226static void rcu_do_batch(struct rcu_data *rdp) 329static void rcu_do_batch(struct rcu_data *rdp)
227{ 330{
331 unsigned long flags;
228 struct rcu_head *next, *list; 332 struct rcu_head *next, *list;
229 int count = 0; 333 int count = 0;
230 334
@@ -239,9 +343,9 @@ static void rcu_do_batch(struct rcu_data *rdp)
239 } 343 }
240 rdp->donelist = list; 344 rdp->donelist = list;
241 345
242 local_irq_disable(); 346 local_irq_save(flags);
243 rdp->qlen -= count; 347 rdp->qlen -= count;
244 local_irq_enable(); 348 local_irq_restore(flags);
245 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) 349 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
246 rdp->blimit = blimit; 350 rdp->blimit = blimit;
247 351
@@ -269,6 +373,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
269 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace 373 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
270 * period (if necessary). 374 * period (if necessary).
271 */ 375 */
376
272/* 377/*
273 * Register a new batch of callbacks, and start it up if there is currently no 378 * Register a new batch of callbacks, and start it up if there is currently no
274 * active batch and the batch to be registered has not already occurred. 379 * active batch and the batch to be registered has not already occurred.
@@ -276,15 +381,10 @@ static void rcu_do_batch(struct rcu_data *rdp)
276 */ 381 */
277static void rcu_start_batch(struct rcu_ctrlblk *rcp) 382static void rcu_start_batch(struct rcu_ctrlblk *rcp)
278{ 383{
279 if (rcp->next_pending && 384 if (rcp->cur != rcp->pending &&
280 rcp->completed == rcp->cur) { 385 rcp->completed == rcp->cur) {
281 rcp->next_pending = 0;
282 /*
283 * next_pending == 0 must be visible in
284 * __rcu_process_callbacks() before it can see new value of cur.
285 */
286 smp_wmb();
287 rcp->cur++; 386 rcp->cur++;
387 record_gp_stall_check_time(rcp);
288 388
289 /* 389 /*
290 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a 390 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
@@ -322,6 +422,8 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
322static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, 422static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
323 struct rcu_data *rdp) 423 struct rcu_data *rdp)
324{ 424{
425 unsigned long flags;
426
325 if (rdp->quiescbatch != rcp->cur) { 427 if (rdp->quiescbatch != rcp->cur) {
326 /* start new grace period: */ 428 /* start new grace period: */
327 rdp->qs_pending = 1; 429 rdp->qs_pending = 1;
@@ -345,7 +447,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
345 return; 447 return;
346 rdp->qs_pending = 0; 448 rdp->qs_pending = 0;
347 449
348 spin_lock(&rcp->lock); 450 spin_lock_irqsave(&rcp->lock, flags);
349 /* 451 /*
350 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync 452 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
351 * during cpu startup. Ignore the quiescent state. 453 * during cpu startup. Ignore the quiescent state.
@@ -353,7 +455,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
353 if (likely(rdp->quiescbatch == rcp->cur)) 455 if (likely(rdp->quiescbatch == rcp->cur))
354 cpu_quiet(rdp->cpu, rcp); 456 cpu_quiet(rdp->cpu, rcp);
355 457
356 spin_unlock(&rcp->lock); 458 spin_unlock_irqrestore(&rcp->lock, flags);
357} 459}
358 460
359 461
@@ -364,33 +466,38 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
364 * which is dead and hence not processing interrupts. 466 * which is dead and hence not processing interrupts.
365 */ 467 */
366static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, 468static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
367 struct rcu_head **tail) 469 struct rcu_head **tail, long batch)
368{ 470{
369 local_irq_disable(); 471 unsigned long flags;
370 *this_rdp->nxttail = list; 472
371 if (list) 473 if (list) {
372 this_rdp->nxttail = tail; 474 local_irq_save(flags);
373 local_irq_enable(); 475 this_rdp->batch = batch;
476 *this_rdp->nxttail[2] = list;
477 this_rdp->nxttail[2] = tail;
478 local_irq_restore(flags);
479 }
374} 480}
375 481
376static void __rcu_offline_cpu(struct rcu_data *this_rdp, 482static void __rcu_offline_cpu(struct rcu_data *this_rdp,
377 struct rcu_ctrlblk *rcp, struct rcu_data *rdp) 483 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
378{ 484{
379 /* if the cpu going offline owns the grace period 485 unsigned long flags;
486
487 /*
488 * if the cpu going offline owns the grace period
380 * we can block indefinitely waiting for it, so flush 489 * we can block indefinitely waiting for it, so flush
381 * it here 490 * it here
382 */ 491 */
383 spin_lock_bh(&rcp->lock); 492 spin_lock_irqsave(&rcp->lock, flags);
384 if (rcp->cur != rcp->completed) 493 if (rcp->cur != rcp->completed)
385 cpu_quiet(rdp->cpu, rcp); 494 cpu_quiet(rdp->cpu, rcp);
386 spin_unlock_bh(&rcp->lock); 495 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
387 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); 496 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
388 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); 497 spin_unlock(&rcp->lock);
389 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
390 498
391 local_irq_disable();
392 this_rdp->qlen += rdp->qlen; 499 this_rdp->qlen += rdp->qlen;
393 local_irq_enable(); 500 local_irq_restore(flags);
394} 501}
395 502
396static void rcu_offline_cpu(int cpu) 503static void rcu_offline_cpu(int cpu)
@@ -420,38 +527,52 @@ static void rcu_offline_cpu(int cpu)
420static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, 527static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
421 struct rcu_data *rdp) 528 struct rcu_data *rdp)
422{ 529{
423 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { 530 unsigned long flags;
424 *rdp->donetail = rdp->curlist; 531 long completed_snap;
425 rdp->donetail = rdp->curtail;
426 rdp->curlist = NULL;
427 rdp->curtail = &rdp->curlist;
428 }
429 532
430 if (rdp->nxtlist && !rdp->curlist) { 533 if (rdp->nxtlist) {
431 local_irq_disable(); 534 local_irq_save(flags);
432 rdp->curlist = rdp->nxtlist; 535 completed_snap = ACCESS_ONCE(rcp->completed);
433 rdp->curtail = rdp->nxttail;
434 rdp->nxtlist = NULL;
435 rdp->nxttail = &rdp->nxtlist;
436 local_irq_enable();
437 536
438 /* 537 /*
439 * start the next batch of callbacks 538 * move the other grace-period-completed entries to
539 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
440 */ 540 */
541 if (!rcu_batch_before(completed_snap, rdp->batch))
542 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
543 else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
544 rdp->nxttail[0] = rdp->nxttail[1];
441 545
442 /* determine batch number */ 546 /*
443 rdp->batch = rcp->cur + 1; 547 * the grace period for entries in
444 /* see the comment and corresponding wmb() in 548 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
445 * the rcu_start_batch() 549 * move these entries to donelist
446 */ 550 */
447 smp_rmb(); 551 if (rdp->nxttail[0] != &rdp->nxtlist) {
552 *rdp->donetail = rdp->nxtlist;
553 rdp->donetail = rdp->nxttail[0];
554 rdp->nxtlist = *rdp->nxttail[0];
555 *rdp->donetail = NULL;
556
557 if (rdp->nxttail[1] == rdp->nxttail[0])
558 rdp->nxttail[1] = &rdp->nxtlist;
559 if (rdp->nxttail[2] == rdp->nxttail[0])
560 rdp->nxttail[2] = &rdp->nxtlist;
561 rdp->nxttail[0] = &rdp->nxtlist;
562 }
563
564 local_irq_restore(flags);
565
566 if (rcu_batch_after(rdp->batch, rcp->pending)) {
567 unsigned long flags2;
448 568
449 if (!rcp->next_pending) {
450 /* and start it/schedule start if it's a new batch */ 569 /* and start it/schedule start if it's a new batch */
451 spin_lock(&rcp->lock); 570 spin_lock_irqsave(&rcp->lock, flags2);
452 rcp->next_pending = 1; 571 if (rcu_batch_after(rdp->batch, rcp->pending)) {
453 rcu_start_batch(rcp); 572 rcp->pending = rdp->batch;
454 spin_unlock(&rcp->lock); 573 rcu_start_batch(rcp);
574 }
575 spin_unlock_irqrestore(&rcp->lock, flags2);
455 } 576 }
456 } 577 }
457 578
@@ -462,21 +583,53 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
462 583
463static void rcu_process_callbacks(struct softirq_action *unused) 584static void rcu_process_callbacks(struct softirq_action *unused)
464{ 585{
586 /*
587 * Memory references from any prior RCU read-side critical sections
588 * executed by the interrupted code must be see before any RCU
589 * grace-period manupulations below.
590 */
591
592 smp_mb(); /* See above block comment. */
593
465 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); 594 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
466 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); 595 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
596
597 /*
598 * Memory references from any later RCU read-side critical sections
599 * executed by the interrupted code must be see after any RCU
600 * grace-period manupulations above.
601 */
602
603 smp_mb(); /* See above block comment. */
467} 604}
468 605
469static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) 606static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
470{ 607{
471 /* This cpu has pending rcu entries and the grace period 608 /* Check for CPU stalls, if enabled. */
472 * for them has completed. 609 check_cpu_stall(rcp);
473 */
474 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
475 return 1;
476 610
477 /* This cpu has no pending entries, but there are new entries */ 611 if (rdp->nxtlist) {
478 if (!rdp->curlist && rdp->nxtlist) 612 long completed_snap = ACCESS_ONCE(rcp->completed);
479 return 1; 613
614 /*
615 * This cpu has pending rcu entries and the grace period
616 * for them has completed.
617 */
618 if (!rcu_batch_before(completed_snap, rdp->batch))
619 return 1;
620 if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
621 rdp->nxttail[0] != rdp->nxttail[1])
622 return 1;
623 if (rdp->nxttail[0] != &rdp->nxtlist)
624 return 1;
625
626 /*
627 * This cpu has pending rcu entries and the new batch
628 * for then hasn't been started nor scheduled start
629 */
630 if (rcu_batch_after(rdp->batch, rcp->pending))
631 return 1;
632 }
480 633
481 /* This cpu has finished callbacks to invoke */ 634 /* This cpu has finished callbacks to invoke */
482 if (rdp->donelist) 635 if (rdp->donelist)
@@ -512,9 +665,15 @@ int rcu_needs_cpu(int cpu)
512 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 665 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
513 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); 666 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
514 667
515 return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); 668 return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
516} 669}
517 670
671/*
672 * Top-level function driving RCU grace-period detection, normally
673 * invoked from the scheduler-clock interrupt. This function simply
674 * increments counters that are read only from softirq by this same
675 * CPU, so there are no memory barriers required.
676 */
518void rcu_check_callbacks(int cpu, int user) 677void rcu_check_callbacks(int cpu, int user)
519{ 678{
520 if (user || 679 if (user ||
@@ -558,14 +717,17 @@ void rcu_check_callbacks(int cpu, int user)
558static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, 717static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
559 struct rcu_data *rdp) 718 struct rcu_data *rdp)
560{ 719{
720 unsigned long flags;
721
722 spin_lock_irqsave(&rcp->lock, flags);
561 memset(rdp, 0, sizeof(*rdp)); 723 memset(rdp, 0, sizeof(*rdp));
562 rdp->curtail = &rdp->curlist; 724 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
563 rdp->nxttail = &rdp->nxtlist;
564 rdp->donetail = &rdp->donelist; 725 rdp->donetail = &rdp->donelist;
565 rdp->quiescbatch = rcp->completed; 726 rdp->quiescbatch = rcp->completed;
566 rdp->qs_pending = 0; 727 rdp->qs_pending = 0;
567 rdp->cpu = cpu; 728 rdp->cpu = cpu;
568 rdp->blimit = blimit; 729 rdp->blimit = blimit;
730 spin_unlock_irqrestore(&rcp->lock, flags);
569} 731}
570 732
571static void __cpuinit rcu_online_cpu(int cpu) 733static void __cpuinit rcu_online_cpu(int cpu)
@@ -610,6 +772,9 @@ static struct notifier_block __cpuinitdata rcu_nb = {
610 */ 772 */
611void __init __rcu_init(void) 773void __init __rcu_init(void)
612{ 774{
775#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
776 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
777#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
613 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, 778 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
614 (void *)(long)smp_processor_id()); 779 (void *)(long)smp_processor_id());
615 /* Register notifier for non-boot CPUs */ 780 /* Register notifier for non-boot CPUs */
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 27827931ca0d..ca4bbbe04aa4 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -59,14 +59,6 @@
59#include <linux/rcupreempt_trace.h> 59#include <linux/rcupreempt_trace.h>
60 60
61/* 61/*
62 * Macro that prevents the compiler from reordering accesses, but does
63 * absolutely -nothing- to prevent CPUs from reordering. This is used
64 * only to mediate communication between mainline code and hardware
65 * interrupt and NMI handlers.
66 */
67#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
68
69/*
70 * PREEMPT_RCU data structures. 62 * PREEMPT_RCU data structures.
71 */ 63 */
72 64
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
index 5edf82c34bbc..35c2d3360ecf 100644
--- a/kernel/rcupreempt_trace.c
+++ b/kernel/rcupreempt_trace.c
@@ -308,11 +308,16 @@ out:
308 308
309static int __init rcupreempt_trace_init(void) 309static int __init rcupreempt_trace_init(void)
310{ 310{
311 int ret;
312
311 mutex_init(&rcupreempt_trace_mutex); 313 mutex_init(&rcupreempt_trace_mutex);
312 rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); 314 rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
313 if (!rcupreempt_trace_buf) 315 if (!rcupreempt_trace_buf)
314 return 1; 316 return 1;
315 return rcupreempt_debugfs_init(); 317 ret = rcupreempt_debugfs_init();
318 if (ret)
319 kfree(rcupreempt_trace_buf);
320 return ret;
316} 321}
317 322
318static void __exit rcupreempt_trace_cleanup(void) 323static void __exit rcupreempt_trace_cleanup(void)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0b504814e378..ce697e0b319e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -597,6 +597,19 @@ config RCU_TORTURE_TEST_RUNNABLE
597 Say N here if you want the RCU torture tests to start only 597 Say N here if you want the RCU torture tests to start only
598 after being manually enabled via /proc. 598 after being manually enabled via /proc.
599 599
600config RCU_CPU_STALL_DETECTOR
601 bool "Check for stalled CPUs delaying RCU grace periods"
602 depends on CLASSIC_RCU
603 default n
604 help
605 This option causes RCU to printk information on which
606 CPUs are delaying the current grace period, but only when
607 the grace period extends for excessive time periods.
608
609 Say Y if you want RCU to perform such checks.
610
611 Say N if you are unsure.
612
600config KPROBES_SANITY_TEST 613config KPROBES_SANITY_TEST
601 bool "Kprobes sanity tests" 614 bool "Kprobes sanity tests"
602 depends on DEBUG_KERNEL 615 depends on DEBUG_KERNEL
@@ -624,6 +637,28 @@ config BACKTRACE_SELF_TEST
624 637
625 Say N if you are unsure. 638 Say N if you are unsure.
626 639
640config DEBUG_BLOCK_EXT_DEVT
641 bool "Force extended block device numbers and spread them"
642 depends on DEBUG_KERNEL
643 depends on BLOCK
644 default n
645 help
646 Conventionally, block device numbers are allocated from
647 predetermined contiguous area. However, extended block area
648 may introduce non-contiguous block device numbers. This
649 option forces most block device numbers to be allocated from
650 the extended space and spreads them to discover kernel or
651 userland code paths which assume predetermined contiguous
652 device number allocation.
653
654 Note that turning on this debug option shuffles all the
655 device numbers for all IDE and SCSI devices including libata
656 ones, so root partition specified using device number
657 directly (via rdev or root=MAJ:MIN) won't work anymore.
658 Textual device names (root=/dev/sdXn) will continue to work.
659
660 Say N if you are unsure.
661
627config LKDTM 662config LKDTM
628 tristate "Linux Kernel Dump Test Tool Module" 663 tristate "Linux Kernel Dump Test Tool Module"
629 depends on DEBUG_KERNEL 664 depends on DEBUG_KERNEL
@@ -661,10 +696,21 @@ config FAIL_PAGE_ALLOC
661 696
662config FAIL_MAKE_REQUEST 697config FAIL_MAKE_REQUEST
663 bool "Fault-injection capability for disk IO" 698 bool "Fault-injection capability for disk IO"
664 depends on FAULT_INJECTION 699 depends on FAULT_INJECTION && BLOCK
665 help 700 help
666 Provide fault-injection capability for disk IO. 701 Provide fault-injection capability for disk IO.
667 702
703config FAIL_IO_TIMEOUT
704 bool "Faul-injection capability for faking disk interrupts"
705 depends on FAULT_INJECTION && BLOCK
706 help
707 Provide fault-injection capability on end IO handling. This
708 will make the block layer "forget" an interrupt as configured,
709 thus exercising the error handling.
710
711 Only works with drivers that use the generic timeout handling,
712 for others it wont do anything.
713
668config FAULT_INJECTION_DEBUG_FS 714config FAULT_INJECTION_DEBUG_FS
669 bool "Debugfs entries for fault-injection capabilities" 715 bool "Debugfs entries for fault-injection capabilities"
670 depends on FAULT_INJECTION && SYSFS && DEBUG_FS 716 depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index 3b1f94bbe9de..44001af76a7d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o
19lib-y += kobject.o kref.o klist.o 19lib-y += kobject.o kref.o klist.o
20 20
21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 21obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o 22 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
23 string_helpers.o
23 24
24ifeq ($(CONFIG_DEBUG_KOBJECT),y) 25ifeq ($(CONFIG_DEBUG_KOBJECT),y)
25CFLAGS_kobject.o += -DDEBUG 26CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/klist.c b/lib/klist.c
index cca37f96faa2..bbdd3015c2c7 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -37,6 +37,37 @@
37#include <linux/klist.h> 37#include <linux/klist.h>
38#include <linux/module.h> 38#include <linux/module.h>
39 39
40/*
41 * Use the lowest bit of n_klist to mark deleted nodes and exclude
42 * dead ones from iteration.
43 */
44#define KNODE_DEAD 1LU
45#define KNODE_KLIST_MASK ~KNODE_DEAD
46
47static struct klist *knode_klist(struct klist_node *knode)
48{
49 return (struct klist *)
50 ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
51}
52
53static bool knode_dead(struct klist_node *knode)
54{
55 return (unsigned long)knode->n_klist & KNODE_DEAD;
56}
57
58static void knode_set_klist(struct klist_node *knode, struct klist *klist)
59{
60 knode->n_klist = klist;
61 /* no knode deserves to start its life dead */
62 WARN_ON(knode_dead(knode));
63}
64
65static void knode_kill(struct klist_node *knode)
66{
67 /* and no knode should die twice ever either, see we're very humane */
68 WARN_ON(knode_dead(knode));
69 *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
70}
40 71
41/** 72/**
42 * klist_init - Initialize a klist structure. 73 * klist_init - Initialize a klist structure.
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n)
79 INIT_LIST_HEAD(&n->n_node); 110 INIT_LIST_HEAD(&n->n_node);
80 init_completion(&n->n_removed); 111 init_completion(&n->n_removed);
81 kref_init(&n->n_ref); 112 kref_init(&n->n_ref);
82 n->n_klist = k; 113 knode_set_klist(n, k);
83 if (k->get) 114 if (k->get)
84 k->get(n); 115 k->get(n);
85} 116}
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
115 */ 146 */
116void klist_add_after(struct klist_node *n, struct klist_node *pos) 147void klist_add_after(struct klist_node *n, struct klist_node *pos)
117{ 148{
118 struct klist *k = pos->n_klist; 149 struct klist *k = knode_klist(pos);
119 150
120 klist_node_init(k, n); 151 klist_node_init(k, n);
121 spin_lock(&k->k_lock); 152 spin_lock(&k->k_lock);
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after);
131 */ 162 */
132void klist_add_before(struct klist_node *n, struct klist_node *pos) 163void klist_add_before(struct klist_node *n, struct klist_node *pos)
133{ 164{
134 struct klist *k = pos->n_klist; 165 struct klist *k = knode_klist(pos);
135 166
136 klist_node_init(k, n); 167 klist_node_init(k, n);
137 spin_lock(&k->k_lock); 168 spin_lock(&k->k_lock);
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref)
144{ 175{
145 struct klist_node *n = container_of(kref, struct klist_node, n_ref); 176 struct klist_node *n = container_of(kref, struct klist_node, n_ref);
146 177
178 WARN_ON(!knode_dead(n));
147 list_del(&n->n_node); 179 list_del(&n->n_node);
148 complete(&n->n_removed); 180 complete(&n->n_removed);
149 n->n_klist = NULL; 181 knode_set_klist(n, NULL);
150} 182}
151 183
152static int klist_dec_and_del(struct klist_node *n) 184static int klist_dec_and_del(struct klist_node *n)
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n)
154 return kref_put(&n->n_ref, klist_release); 186 return kref_put(&n->n_ref, klist_release);
155} 187}
156 188
157/** 189static void klist_put(struct klist_node *n, bool kill)
158 * klist_del - Decrement the reference count of node and try to remove.
159 * @n: node we're deleting.
160 */
161void klist_del(struct klist_node *n)
162{ 190{
163 struct klist *k = n->n_klist; 191 struct klist *k = knode_klist(n);
164 void (*put)(struct klist_node *) = k->put; 192 void (*put)(struct klist_node *) = k->put;
165 193
166 spin_lock(&k->k_lock); 194 spin_lock(&k->k_lock);
195 if (kill)
196 knode_kill(n);
167 if (!klist_dec_and_del(n)) 197 if (!klist_dec_and_del(n))
168 put = NULL; 198 put = NULL;
169 spin_unlock(&k->k_lock); 199 spin_unlock(&k->k_lock);
170 if (put) 200 if (put)
171 put(n); 201 put(n);
172} 202}
203
204/**
205 * klist_del - Decrement the reference count of node and try to remove.
206 * @n: node we're deleting.
207 */
208void klist_del(struct klist_node *n)
209{
210 klist_put(n, true);
211}
173EXPORT_SYMBOL_GPL(klist_del); 212EXPORT_SYMBOL_GPL(klist_del);
174 213
175/** 214/**
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
206 struct klist_node *n) 245 struct klist_node *n)
207{ 246{
208 i->i_klist = k; 247 i->i_klist = k;
209 i->i_head = &k->k_list;
210 i->i_cur = n; 248 i->i_cur = n;
211 if (n) 249 if (n)
212 kref_get(&n->n_ref); 250 kref_get(&n->n_ref);
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init);
237void klist_iter_exit(struct klist_iter *i) 275void klist_iter_exit(struct klist_iter *i)
238{ 276{
239 if (i->i_cur) { 277 if (i->i_cur) {
240 klist_del(i->i_cur); 278 klist_put(i->i_cur, false);
241 i->i_cur = NULL; 279 i->i_cur = NULL;
242 } 280 }
243} 281}
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n)
258 */ 296 */
259struct klist_node *klist_next(struct klist_iter *i) 297struct klist_node *klist_next(struct klist_iter *i)
260{ 298{
261 struct list_head *next;
262 struct klist_node *lnode = i->i_cur;
263 struct klist_node *knode = NULL;
264 void (*put)(struct klist_node *) = i->i_klist->put; 299 void (*put)(struct klist_node *) = i->i_klist->put;
300 struct klist_node *last = i->i_cur;
301 struct klist_node *next;
265 302
266 spin_lock(&i->i_klist->k_lock); 303 spin_lock(&i->i_klist->k_lock);
267 if (lnode) { 304
268 next = lnode->n_node.next; 305 if (last) {
269 if (!klist_dec_and_del(lnode)) 306 next = to_klist_node(last->n_node.next);
307 if (!klist_dec_and_del(last))
270 put = NULL; 308 put = NULL;
271 } else 309 } else
272 next = i->i_head->next; 310 next = to_klist_node(i->i_klist->k_list.next);
273 311
274 if (next != i->i_head) { 312 i->i_cur = NULL;
275 knode = to_klist_node(next); 313 while (next != to_klist_node(&i->i_klist->k_list)) {
276 kref_get(&knode->n_ref); 314 if (likely(!knode_dead(next))) {
315 kref_get(&next->n_ref);
316 i->i_cur = next;
317 break;
318 }
319 next = to_klist_node(next->n_node.next);
277 } 320 }
278 i->i_cur = knode; 321
279 spin_unlock(&i->i_klist->k_lock); 322 spin_unlock(&i->i_klist->k_lock);
280 if (put && lnode) 323
281 put(lnode); 324 if (put && last)
282 return knode; 325 put(last);
326 return i->i_cur;
283} 327}
284EXPORT_SYMBOL_GPL(klist_next); 328EXPORT_SYMBOL_GPL(klist_next);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
new file mode 100644
index 000000000000..8347925030ff
--- /dev/null
+++ b/lib/string_helpers.c
@@ -0,0 +1,64 @@
1/*
2 * Helpers for formatting and printing strings
3 *
4 * Copyright 31 August 2008 James Bottomley
5 */
6#include <linux/kernel.h>
7#include <linux/math64.h>
8#include <linux/module.h>
9#include <linux/string_helpers.h>
10
11/**
12 * string_get_size - get the size in the specified units
13 * @size: The size to be converted
14 * @units: units to use (powers of 1000 or 1024)
15 * @buf: buffer to format to
16 * @len: length of buffer
17 *
18 * This function returns a string formatted to 3 significant figures
19 * giving the size in the required units. Returns 0 on success or
20 * error on failure. @buf is always zero terminated.
21 *
22 */
23int string_get_size(u64 size, const enum string_size_units units,
24 char *buf, int len)
25{
26 const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB",
27 "EB", "ZB", "YB", NULL};
28 const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
29 "EiB", "ZiB", "YiB", NULL };
30 const char **units_str[] = {
31 [STRING_UNITS_10] = units_10,
32 [STRING_UNITS_2] = units_2,
33 };
34 const int divisor[] = {
35 [STRING_UNITS_10] = 1000,
36 [STRING_UNITS_2] = 1024,
37 };
38 int i, j;
39 u64 remainder = 0, sf_cap;
40 char tmp[8];
41
42 tmp[0] = '\0';
43
44 for (i = 0; size > divisor[units] && units_str[units][i]; i++)
45 remainder = do_div(size, divisor[units]);
46
47 sf_cap = size;
48 for (j = 0; sf_cap*10 < 1000; j++)
49 sf_cap *= 10;
50
51 if (j) {
52 remainder *= 1000;
53 do_div(remainder, divisor[units]);
54 snprintf(tmp, sizeof(tmp), ".%03lld",
55 (unsigned long long)remainder);
56 tmp[j+1] = '\0';
57 }
58
59 snprintf(buf, len, "%lld%s%s", (unsigned long long)size,
60 tmp, units_str[units][i]);
61
62 return 0;
63}
64EXPORT_SYMBOL(string_get_size);
diff --git a/mm/bounce.c b/mm/bounce.c
index b6d2d0f1019b..06722c403058 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -267,7 +267,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
267 /* 267 /*
268 * Data-less bio, nothing to bounce 268 * Data-less bio, nothing to bounce
269 */ 269 */
270 if (bio_empty_barrier(*bio_orig)) 270 if (!bio_has_data(*bio_orig))
271 return; 271 return;
272 272
273 /* 273 /*
diff --git a/mm/slob.c b/mm/slob.c
index 62b679dc660f..cb675d126791 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -514,9 +514,11 @@ size_t ksize(const void *block)
514 return 0; 514 return 0;
515 515
516 sp = (struct slob_page *)virt_to_page(block); 516 sp = (struct slob_page *)virt_to_page(block);
517 if (slob_page(sp)) 517 if (slob_page(sp)) {
518 return (((slob_t *)block - 1)->units - 1) * SLOB_UNIT; 518 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
519 else 519 unsigned int *m = (unsigned int *)(block - align);
520 return SLOB_UNITS(*m) * SLOB_UNIT;
521 } else
520 return sp->page.private; 522 return sp->page.private;
521} 523}
522 524
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 01c83e2a4c19..28c71574a781 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -317,6 +317,9 @@ void ax25_destroy_socket(ax25_cb *ax25)
317 /* Queue the unaccepted socket for death */ 317 /* Queue the unaccepted socket for death */
318 sock_orphan(skb->sk); 318 sock_orphan(skb->sk);
319 319
320 /* 9A4GL: hack to release unaccepted sockets */
321 skb->sk->sk_state = TCP_LISTEN;
322
320 ax25_start_heartbeat(sax25); 323 ax25_start_heartbeat(sax25);
321 sax25->state = AX25_STATE_0; 324 sax25->state = AX25_STATE_0;
322 } 325 }
diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
index cdc7e751ef36..96e4b9273250 100644
--- a/net/ax25/ax25_std_timer.c
+++ b/net/ax25/ax25_std_timer.c
@@ -39,9 +39,11 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
39 39
40 switch (ax25->state) { 40 switch (ax25->state) {
41 case AX25_STATE_0: 41 case AX25_STATE_0:
42 if (!sk || 42 /* Magic here: If we listen() and a new link dies before it
43 sock_flag(sk, SOCK_DESTROY) || 43 is accepted() it isn't 'dead' so doesn't get removed. */
44 sock_flag(sk, SOCK_DEAD)) { 44 if (!sk || sock_flag(sk, SOCK_DESTROY) ||
45 (sk->sk_state == TCP_LISTEN &&
46 sock_flag(sk, SOCK_DEAD))) {
45 if (sk) { 47 if (sk) {
46 sock_hold(sk); 48 sock_hold(sk);
47 ax25_destroy_socket(ax25); 49 ax25_destroy_socket(ax25);
diff --git a/net/core/dev.c b/net/core/dev.c
index e8eb2b478344..0ae08d3f57e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2918,6 +2918,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
2918 return 0; 2918 return 0;
2919} 2919}
2920 2920
2921static void dev_change_rx_flags(struct net_device *dev, int flags)
2922{
2923 if (dev->flags & IFF_UP && dev->change_rx_flags)
2924 dev->change_rx_flags(dev, flags);
2925}
2926
2921static int __dev_set_promiscuity(struct net_device *dev, int inc) 2927static int __dev_set_promiscuity(struct net_device *dev, int inc)
2922{ 2928{
2923 unsigned short old_flags = dev->flags; 2929 unsigned short old_flags = dev->flags;
@@ -2955,8 +2961,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
2955 current->uid, current->gid, 2961 current->uid, current->gid,
2956 audit_get_sessionid(current)); 2962 audit_get_sessionid(current));
2957 2963
2958 if (dev->change_rx_flags) 2964 dev_change_rx_flags(dev, IFF_PROMISC);
2959 dev->change_rx_flags(dev, IFF_PROMISC);
2960 } 2965 }
2961 return 0; 2966 return 0;
2962} 2967}
@@ -3022,8 +3027,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
3022 } 3027 }
3023 } 3028 }
3024 if (dev->flags ^ old_flags) { 3029 if (dev->flags ^ old_flags) {
3025 if (dev->change_rx_flags) 3030 dev_change_rx_flags(dev, IFF_ALLMULTI);
3026 dev->change_rx_flags(dev, IFF_ALLMULTI);
3027 dev_set_rx_mode(dev); 3031 dev_set_rx_mode(dev);
3028 } 3032 }
3029 return 0; 3033 return 0;
@@ -3347,8 +3351,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
3347 * Load in the correct multicast list now the flags have changed. 3351 * Load in the correct multicast list now the flags have changed.
3348 */ 3352 */
3349 3353
3350 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST) 3354 if ((old_flags ^ flags) & IFF_MULTICAST)
3351 dev->change_rx_flags(dev, IFF_MULTICAST); 3355 dev_change_rx_flags(dev, IFF_MULTICAST);
3352 3356
3353 dev_set_rx_mode(dev); 3357 dev_set_rx_mode(dev);
3354 3358
@@ -3808,14 +3812,11 @@ static int dev_new_index(struct net *net)
3808} 3812}
3809 3813
3810/* Delayed registration/unregisteration */ 3814/* Delayed registration/unregisteration */
3811static DEFINE_SPINLOCK(net_todo_list_lock);
3812static LIST_HEAD(net_todo_list); 3815static LIST_HEAD(net_todo_list);
3813 3816
3814static void net_set_todo(struct net_device *dev) 3817static void net_set_todo(struct net_device *dev)
3815{ 3818{
3816 spin_lock(&net_todo_list_lock);
3817 list_add_tail(&dev->todo_list, &net_todo_list); 3819 list_add_tail(&dev->todo_list, &net_todo_list);
3818 spin_unlock(&net_todo_list_lock);
3819} 3820}
3820 3821
3821static void rollback_registered(struct net_device *dev) 3822static void rollback_registered(struct net_device *dev)
@@ -4142,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
4142 * free_netdev(y1); 4143 * free_netdev(y1);
4143 * free_netdev(y2); 4144 * free_netdev(y2);
4144 * 4145 *
4145 * We are invoked by rtnl_unlock() after it drops the semaphore. 4146 * We are invoked by rtnl_unlock().
4146 * This allows us to deal with problems: 4147 * This allows us to deal with problems:
4147 * 1) We can delete sysfs objects which invoke hotplug 4148 * 1) We can delete sysfs objects which invoke hotplug
4148 * without deadlocking with linkwatch via keventd. 4149 * without deadlocking with linkwatch via keventd.
4149 * 2) Since we run with the RTNL semaphore not held, we can sleep 4150 * 2) Since we run with the RTNL semaphore not held, we can sleep
4150 * safely in order to wait for the netdev refcnt to drop to zero. 4151 * safely in order to wait for the netdev refcnt to drop to zero.
4152 *
4153 * We must not return until all unregister events added during
4154 * the interval the lock was held have been completed.
4151 */ 4155 */
4152static DEFINE_MUTEX(net_todo_run_mutex);
4153void netdev_run_todo(void) 4156void netdev_run_todo(void)
4154{ 4157{
4155 struct list_head list; 4158 struct list_head list;
4156 4159
4157 /* Need to guard against multiple cpu's getting out of order. */
4158 mutex_lock(&net_todo_run_mutex);
4159
4160 /* Not safe to do outside the semaphore. We must not return
4161 * until all unregister events invoked by the local processor
4162 * have been completed (either by this todo run, or one on
4163 * another cpu).
4164 */
4165 if (list_empty(&net_todo_list))
4166 goto out;
4167
4168 /* Snapshot list, allow later requests */ 4160 /* Snapshot list, allow later requests */
4169 spin_lock(&net_todo_list_lock);
4170 list_replace_init(&net_todo_list, &list); 4161 list_replace_init(&net_todo_list, &list);
4171 spin_unlock(&net_todo_list_lock); 4162
4163 __rtnl_unlock();
4172 4164
4173 while (!list_empty(&list)) { 4165 while (!list_empty(&list)) {
4174 struct net_device *dev 4166 struct net_device *dev
@@ -4200,9 +4192,6 @@ void netdev_run_todo(void)
4200 /* Free network device */ 4192 /* Free network device */
4201 kobject_put(&dev->dev.kobj); 4193 kobject_put(&dev->dev.kobj);
4202 } 4194 }
4203
4204out:
4205 mutex_unlock(&net_todo_run_mutex);
4206} 4195}
4207 4196
4208static struct net_device_stats *internal_stats(struct net_device *dev) 4197static struct net_device_stats *internal_stats(struct net_device *dev)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 71edb8b36341..d6381c2a4693 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -73,7 +73,7 @@ void __rtnl_unlock(void)
73 73
74void rtnl_unlock(void) 74void rtnl_unlock(void)
75{ 75{
76 mutex_unlock(&rtnl_mutex); 76 /* This fellow will unlock it for us. */
77 netdev_run_todo(); 77 netdev_run_todo();
78} 78}
79 79
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index bfcbd148a89d..c209e054a634 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -150,7 +150,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
150 ca->snd_cwnd_cents -= 128; 150 ca->snd_cwnd_cents -= 128;
151 tp->snd_cwnd_cnt = 0; 151 tp->snd_cwnd_cnt = 0;
152 } 152 }
153 153 /* check when cwnd has not been incremented for a while */
154 if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) {
155 tp->snd_cwnd++;
156 tp->snd_cwnd_cnt = 0;
157 }
154 /* clamp down slowstart cwnd to ssthresh value. */ 158 /* clamp down slowstart cwnd to ssthresh value. */
155 if (is_slowstart) 159 if (is_slowstart)
156 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 160 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 67ccce2a96bd..7abc6b80d47d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4879,7 +4879,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4879 goto no_ack; 4879 goto no_ack;
4880 } 4880 }
4881 4881
4882 __tcp_ack_snd_check(sk, 0); 4882 if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
4883 __tcp_ack_snd_check(sk, 0);
4883no_ack: 4884no_ack:
4884#ifdef CONFIG_NET_DMA 4885#ifdef CONFIG_NET_DMA
4885 if (copied_early) 4886 if (copied_early)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 532e4faa29f7..9f1ea4a27b35 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -525,6 +525,7 @@ static int nr_release(struct socket *sock)
525 if (sk == NULL) return 0; 525 if (sk == NULL) return 0;
526 526
527 sock_hold(sk); 527 sock_hold(sk);
528 sock_orphan(sk);
528 lock_sock(sk); 529 lock_sock(sk);
529 nr = nr_sk(sk); 530 nr = nr_sk(sk);
530 531
@@ -548,7 +549,6 @@ static int nr_release(struct socket *sock)
548 sk->sk_state = TCP_CLOSE; 549 sk->sk_state = TCP_CLOSE;
549 sk->sk_shutdown |= SEND_SHUTDOWN; 550 sk->sk_shutdown |= SEND_SHUTDOWN;
550 sk->sk_state_change(sk); 551 sk->sk_state_change(sk);
551 sock_orphan(sk);
552 sock_set_flag(sk, SOCK_DESTROY); 552 sock_set_flag(sk, SOCK_DESTROY);
553 break; 553 break;
554 554
diff --git a/scripts/Makefile b/scripts/Makefile
index 1c73c5aea66b..aafdf064feef 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -20,6 +20,7 @@ hostprogs-y += unifdef
20 20
21subdir-$(CONFIG_MODVERSIONS) += genksyms 21subdir-$(CONFIG_MODVERSIONS) += genksyms
22subdir-y += mod 22subdir-y += mod
23subdir-$(CONFIG_SECURITY_SELINUX) += selinux
23 24
24# Let clean descend into subdirs 25# Let clean descend into subdirs
25subdir- += basic kconfig package 26subdir- += basic kconfig package selinux
diff --git a/scripts/selinux/Makefile b/scripts/selinux/Makefile
new file mode 100644
index 000000000000..ca4b1ec01822
--- /dev/null
+++ b/scripts/selinux/Makefile
@@ -0,0 +1,2 @@
1subdir-y := mdp
2subdir- += mdp
diff --git a/scripts/selinux/README b/scripts/selinux/README
new file mode 100644
index 000000000000..a936315ba2c8
--- /dev/null
+++ b/scripts/selinux/README
@@ -0,0 +1,2 @@
1Please see Documentation/SELinux.txt for information on
2installing a dummy SELinux policy.
diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh
new file mode 100644
index 000000000000..7b9ccf61f8f9
--- /dev/null
+++ b/scripts/selinux/install_policy.sh
@@ -0,0 +1,69 @@
1#!/bin/sh
2if [ `id -u` -ne 0 ]; then
3 echo "$0: must be root to install the selinux policy"
4 exit 1
5fi
6SF=`which setfiles`
7if [ $? -eq 1 ]; then
8 if [ -f /sbin/setfiles ]; then
9 SF="/usr/setfiles"
10 else
11 echo "no selinux tools installed: setfiles"
12 exit 1
13 fi
14fi
15
16cd mdp
17
18CP=`which checkpolicy`
19VERS=`$CP -V | awk '{print $1}'`
20
21./mdp policy.conf file_contexts
22$CP -o policy.$VERS policy.conf
23
24mkdir -p /etc/selinux/dummy/policy
25mkdir -p /etc/selinux/dummy/contexts/files
26
27cp file_contexts /etc/selinux/dummy/contexts/files
28cp dbus_contexts /etc/selinux/dummy/contexts
29cp policy.$VERS /etc/selinux/dummy/policy
30FC_FILE=/etc/selinux/dummy/contexts/files/file_contexts
31
32if [ ! -d /etc/selinux ]; then
33 mkdir -p /etc/selinux
34fi
35if [ ! -f /etc/selinux/config ]; then
36 cat > /etc/selinux/config << EOF
37SELINUX=enforcing
38SELINUXTYPE=dummy
39EOF
40else
41 TYPE=`cat /etc/selinux/config | grep "^SELINUXTYPE" | tail -1 | awk -F= '{ print $2 '}`
42 if [ "eq$TYPE" != "eqdummy" ]; then
43 selinuxenabled
44 if [ $? -eq 0 ]; then
45 echo "SELinux already enabled with a non-dummy policy."
46 echo "Exiting. Please install policy by hand if that"
47 echo "is what you REALLY want."
48 exit 1
49 fi
50 mv /etc/selinux/config /etc/selinux/config.mdpbak
51 grep -v "^SELINUXTYPE" /etc/selinux/config.mdpbak >> /etc/selinux/config
52 echo "SELINUXTYPE=dummy" >> /etc/selinux/config
53 fi
54fi
55
56cd /etc/selinux/dummy/contexts/files
57$SF file_contexts /
58
59mounts=`cat /proc/$$/mounts | egrep "ext2|ext3|xfs|jfs|ext4|ext4dev|gfs2" | awk '{ print $2 '}`
60$SF file_contexts $mounts
61
62
63dodev=`cat /proc/$$/mounts | grep "/dev "`
64if [ "eq$dodev" != "eq" ]; then
65 mount --move /dev /mnt
66 $SF file_contexts /dev
67 mount --move /mnt /dev
68fi
69
diff --git a/scripts/selinux/mdp/.gitignore b/scripts/selinux/mdp/.gitignore
new file mode 100644
index 000000000000..654546d8dffd
--- /dev/null
+++ b/scripts/selinux/mdp/.gitignore
@@ -0,0 +1,2 @@
1# Generated file
2mdp
diff --git a/scripts/selinux/mdp/Makefile b/scripts/selinux/mdp/Makefile
new file mode 100644
index 000000000000..eb365b333441
--- /dev/null
+++ b/scripts/selinux/mdp/Makefile
@@ -0,0 +1,5 @@
1hostprogs-y := mdp
2HOST_EXTRACFLAGS += -Isecurity/selinux/include
3
4always := $(hostprogs-y)
5clean-files := $(hostprogs-y) policy.* file_contexts
diff --git a/scripts/selinux/mdp/dbus_contexts b/scripts/selinux/mdp/dbus_contexts
new file mode 100644
index 000000000000..116e684f9fc1
--- /dev/null
+++ b/scripts/selinux/mdp/dbus_contexts
@@ -0,0 +1,6 @@
1<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
2 "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
3<busconfig>
4 <selinux>
5 </selinux>
6</busconfig>
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
new file mode 100644
index 000000000000..ca757d486187
--- /dev/null
+++ b/scripts/selinux/mdp/mdp.c
@@ -0,0 +1,242 @@
1/*
2 *
3 * mdp - make dummy policy
4 *
5 * When pointed at a kernel tree, builds a dummy policy for that kernel
6 * with exactly one type with full rights to itself.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (C) IBM Corporation, 2006
23 *
24 * Authors: Serge E. Hallyn <serue@us.ibm.com>
25 */
26
27#include <stdio.h>
28#include <stdlib.h>
29#include <unistd.h>
30#include <string.h>
31
32#include "flask.h"
33
34void usage(char *name)
35{
36 printf("usage: %s [-m] policy_file context_file\n", name);
37 exit(1);
38}
39
40void find_common_name(char *cname, char *dest, int len)
41{
42 char *start, *end;
43
44 start = strchr(cname, '_')+1;
45 end = strchr(start, '_');
46 if (!start || !end || start-cname > len || end-start > len) {
47 printf("Error with commons defines\n");
48 exit(1);
49 }
50 strncpy(dest, start, end-start);
51 dest[end-start] = '\0';
52}
53
54#define S_(x) x,
55static char *classlist[] = {
56#include "class_to_string.h"
57 NULL
58};
59#undef S_
60
61#include "initial_sid_to_string.h"
62
63#define TB_(x) char *x[] = {
64#define TE_(x) NULL };
65#define S_(x) x,
66#include "common_perm_to_string.h"
67#undef TB_
68#undef TE_
69#undef S_
70
71struct common {
72 char *cname;
73 char **perms;
74};
75struct common common[] = {
76#define TB_(x) { #x, x },
77#define S_(x)
78#define TE_(x)
79#include "common_perm_to_string.h"
80#undef TB_
81#undef TE_
82#undef S_
83};
84
85#define S_(x, y, z) {x, #y},
86struct av_inherit {
87 int class;
88 char *common;
89};
90struct av_inherit av_inherit[] = {
91#include "av_inherit.h"
92};
93#undef S_
94
95#include "av_permissions.h"
96#define S_(x, y, z) {x, y, z},
97struct av_perms {
98 int class;
99 int perm_i;
100 char *perm_s;
101};
102struct av_perms av_perms[] = {
103#include "av_perm_to_string.h"
104};
105#undef S_
106
107int main(int argc, char *argv[])
108{
109 int i, j, mls = 0;
110 char **arg, *polout, *ctxout;
111 int classlist_len, initial_sid_to_string_len;
112 FILE *fout;
113
114 if (argc < 3)
115 usage(argv[0]);
116 arg = argv+1;
117 if (argc==4 && strcmp(argv[1], "-m") == 0) {
118 mls = 1;
119 arg++;
120 }
121 polout = *arg++;
122 ctxout = *arg;
123
124 fout = fopen(polout, "w");
125 if (!fout) {
126 printf("Could not open %s for writing\n", polout);
127 usage(argv[0]);
128 }
129
130 classlist_len = sizeof(classlist) / sizeof(char *);
131 /* print out the classes */
132 for (i=1; i < classlist_len; i++) {
133 if(classlist[i])
134 fprintf(fout, "class %s\n", classlist[i]);
135 else
136 fprintf(fout, "class user%d\n", i);
137 }
138 fprintf(fout, "\n");
139
140 initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *);
141 /* print out the sids */
142 for (i=1; i < initial_sid_to_string_len; i++)
143 fprintf(fout, "sid %s\n", initial_sid_to_string[i]);
144 fprintf(fout, "\n");
145
146 /* print out the commons */
147 for (i=0; i< sizeof(common)/sizeof(struct common); i++) {
148 char cname[101];
149 find_common_name(common[i].cname, cname, 100);
150 cname[100] = '\0';
151 fprintf(fout, "common %s\n{\n", cname);
152 for (j=0; common[i].perms[j]; j++)
153 fprintf(fout, "\t%s\n", common[i].perms[j]);
154 fprintf(fout, "}\n\n");
155 }
156 fprintf(fout, "\n");
157
158 /* print out the class permissions */
159 for (i=1; i < classlist_len; i++) {
160 if (classlist[i]) {
161 int firstperm = -1, numperms = 0;
162
163 fprintf(fout, "class %s\n", classlist[i]);
164 /* does it inherit from a common? */
165 for (j=0; j < sizeof(av_inherit)/sizeof(struct av_inherit); j++)
166 if (av_inherit[j].class == i)
167 fprintf(fout, "inherits %s\n", av_inherit[j].common);
168
169 for (j=0; j < sizeof(av_perms)/sizeof(struct av_perms); j++) {
170 if (av_perms[j].class == i) {
171 if (firstperm == -1)
172 firstperm = j;
173 numperms++;
174 }
175 }
176 if (!numperms) {
177 fprintf(fout, "\n");
178 continue;
179 }
180
181 fprintf(fout, "{\n");
182 /* print out the av_perms */
183 for (j=0; j < numperms; j++) {
184 fprintf(fout, "\t%s\n", av_perms[firstperm+j].perm_s);
185 }
186 fprintf(fout, "}\n\n");
187 }
188 }
189 fprintf(fout, "\n");
190
191 /* NOW PRINT OUT MLS STUFF */
192 if (mls) {
193 printf("MLS not yet implemented\n");
194 exit(1);
195 }
196
197 /* types, roles, and allows */
198 fprintf(fout, "type base_t;\n");
199 fprintf(fout, "role base_r types { base_t };\n");
200 for (i=1; i < classlist_len; i++) {
201 if (classlist[i])
202 fprintf(fout, "allow base_t base_t:%s *;\n", classlist[i]);
203 else
204 fprintf(fout, "allow base_t base_t:user%d *;\n", i);
205 }
206 fprintf(fout, "user user_u roles { base_r };\n");
207 fprintf(fout, "\n");
208
209 /* default sids */
210 for (i=1; i < initial_sid_to_string_len; i++)
211 fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]);
212 fprintf(fout, "\n");
213
214
215 fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n");
216 fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n");
217 fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n");
218 fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n");
219 fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n");
220
221 fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n");
222 fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n");
223
224 fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n");
225 fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n");
226 fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n");
227
228 fprintf(fout, "genfscon proc / user_u:base_r:base_t\n");
229
230 fclose(fout);
231
232 fout = fopen(ctxout, "w");
233 if (!fout) {
234 printf("Wrote policy, but cannot open %s for writing\n", ctxout);
235 usage(argv[0]);
236 }
237 fprintf(fout, "/ user_u:base_r:base_t\n");
238 fprintf(fout, "/.* user_u:base_r:base_t\n");
239 fclose(fout);
240
241 return 0;
242}
diff --git a/security/Kconfig b/security/Kconfig
index 559293922a47..d9f47ce7e207 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -51,6 +51,14 @@ config SECURITY
51 51
52 If you are unsure how to answer this question, answer N. 52 If you are unsure how to answer this question, answer N.
53 53
54config SECURITYFS
55 bool "Enable the securityfs filesystem"
56 help
57 This will build the securityfs filesystem. It is currently used by
58 the TPM bios character driver. It is not used by SELinux or SMACK.
59
60 If you are unsure how to answer this question, answer N.
61
54config SECURITY_NETWORK 62config SECURITY_NETWORK
55 bool "Socket and Networking Security Hooks" 63 bool "Socket and Networking Security Hooks"
56 depends on SECURITY 64 depends on SECURITY
diff --git a/security/Makefile b/security/Makefile
index f65426099aa6..c05c127fff9a 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -10,7 +10,8 @@ subdir-$(CONFIG_SECURITY_SMACK) += smack
10obj-y += commoncap.o 10obj-y += commoncap.o
11 11
12# Object file lists 12# Object file lists
13obj-$(CONFIG_SECURITY) += security.o capability.o inode.o 13obj-$(CONFIG_SECURITY) += security.o capability.o
14obj-$(CONFIG_SECURITYFS) += inode.o
14# Must precede capability.o in order to stack properly. 15# Must precede capability.o in order to stack properly.
15obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o 16obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
16obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o 17obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
diff --git a/security/commoncap.c b/security/commoncap.c
index e4c4b3fc0c04..399bfdb9e2da 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -541,7 +541,7 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid,
541 * yet with increased caps. 541 * yet with increased caps.
542 * So we check for increased caps on the target process. 542 * So we check for increased caps on the target process.
543 */ 543 */
544static inline int cap_safe_nice(struct task_struct *p) 544static int cap_safe_nice(struct task_struct *p)
545{ 545{
546 if (!cap_issubset(p->cap_permitted, current->cap_permitted) && 546 if (!cap_issubset(p->cap_permitted, current->cap_permitted) &&
547 !capable(CAP_SYS_NICE)) 547 !capable(CAP_SYS_NICE))
diff --git a/security/inode.c b/security/inode.c
index acc6cf0d7900..ca4958ebad8d 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -190,7 +190,7 @@ static int create_by_name(const char *name, mode_t mode,
190 * @name: a pointer to a string containing the name of the file to create. 190 * @name: a pointer to a string containing the name of the file to create.
191 * @mode: the permission that the file should have 191 * @mode: the permission that the file should have
192 * @parent: a pointer to the parent dentry for this file. This should be a 192 * @parent: a pointer to the parent dentry for this file. This should be a
193 * directory dentry if set. If this paramater is NULL, then the 193 * directory dentry if set. If this parameter is %NULL, then the
194 * file will be created in the root of the securityfs filesystem. 194 * file will be created in the root of the securityfs filesystem.
195 * @data: a pointer to something that the caller will want to get to later 195 * @data: a pointer to something that the caller will want to get to later
196 * on. The inode.i_private pointer will point to this value on 196 * on. The inode.i_private pointer will point to this value on
@@ -199,18 +199,18 @@ static int create_by_name(const char *name, mode_t mode,
199 * this file. 199 * this file.
200 * 200 *
201 * This is the basic "create a file" function for securityfs. It allows for a 201 * This is the basic "create a file" function for securityfs. It allows for a
202 * wide range of flexibility in createing a file, or a directory (if you 202 * wide range of flexibility in creating a file, or a directory (if you
203 * want to create a directory, the securityfs_create_dir() function is 203 * want to create a directory, the securityfs_create_dir() function is
204 * recommended to be used instead.) 204 * recommended to be used instead).
205 * 205 *
206 * This function will return a pointer to a dentry if it succeeds. This 206 * This function returns a pointer to a dentry if it succeeds. This
207 * pointer must be passed to the securityfs_remove() function when the file is 207 * pointer must be passed to the securityfs_remove() function when the file is
208 * to be removed (no automatic cleanup happens if your module is unloaded, 208 * to be removed (no automatic cleanup happens if your module is unloaded,
209 * you are responsible here.) If an error occurs, NULL will be returned. 209 * you are responsible here). If an error occurs, %NULL is returned.
210 * 210 *
211 * If securityfs is not enabled in the kernel, the value -ENODEV will be 211 * If securityfs is not enabled in the kernel, the value %-ENODEV is
212 * returned. It is not wise to check for this value, but rather, check for 212 * returned. It is not wise to check for this value, but rather, check for
213 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 213 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
214 * code. 214 * code.
215 */ 215 */
216struct dentry *securityfs_create_file(const char *name, mode_t mode, 216struct dentry *securityfs_create_file(const char *name, mode_t mode,
@@ -252,19 +252,19 @@ EXPORT_SYMBOL_GPL(securityfs_create_file);
252 * @name: a pointer to a string containing the name of the directory to 252 * @name: a pointer to a string containing the name of the directory to
253 * create. 253 * create.
254 * @parent: a pointer to the parent dentry for this file. This should be a 254 * @parent: a pointer to the parent dentry for this file. This should be a
255 * directory dentry if set. If this paramater is NULL, then the 255 * directory dentry if set. If this parameter is %NULL, then the
256 * directory will be created in the root of the securityfs filesystem. 256 * directory will be created in the root of the securityfs filesystem.
257 * 257 *
258 * This function creates a directory in securityfs with the given name. 258 * This function creates a directory in securityfs with the given @name.
259 * 259 *
260 * This function will return a pointer to a dentry if it succeeds. This 260 * This function returns a pointer to a dentry if it succeeds. This
261 * pointer must be passed to the securityfs_remove() function when the file is 261 * pointer must be passed to the securityfs_remove() function when the file is
262 * to be removed (no automatic cleanup happens if your module is unloaded, 262 * to be removed (no automatic cleanup happens if your module is unloaded,
263 * you are responsible here.) If an error occurs, NULL will be returned. 263 * you are responsible here). If an error occurs, %NULL will be returned.
264 * 264 *
265 * If securityfs is not enabled in the kernel, the value -ENODEV will be 265 * If securityfs is not enabled in the kernel, the value %-ENODEV is
266 * returned. It is not wise to check for this value, but rather, check for 266 * returned. It is not wise to check for this value, but rather, check for
267 * NULL or !NULL instead as to eliminate the need for #ifdef in the calling 267 * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling
268 * code. 268 * code.
269 */ 269 */
270struct dentry *securityfs_create_dir(const char *name, struct dentry *parent) 270struct dentry *securityfs_create_dir(const char *name, struct dentry *parent)
@@ -278,16 +278,15 @@ EXPORT_SYMBOL_GPL(securityfs_create_dir);
278/** 278/**
279 * securityfs_remove - removes a file or directory from the securityfs filesystem 279 * securityfs_remove - removes a file or directory from the securityfs filesystem
280 * 280 *
281 * @dentry: a pointer to a the dentry of the file or directory to be 281 * @dentry: a pointer to a the dentry of the file or directory to be removed.
282 * removed.
283 * 282 *
284 * This function removes a file or directory in securityfs that was previously 283 * This function removes a file or directory in securityfs that was previously
285 * created with a call to another securityfs function (like 284 * created with a call to another securityfs function (like
286 * securityfs_create_file() or variants thereof.) 285 * securityfs_create_file() or variants thereof.)
287 * 286 *
288 * This function is required to be called in order for the file to be 287 * This function is required to be called in order for the file to be
289 * removed, no automatic cleanup of files will happen when a module is 288 * removed. No automatic cleanup of files will happen when a module is
290 * removed, you are responsible here. 289 * removed; you are responsible here.
291 */ 290 */
292void securityfs_remove(struct dentry *dentry) 291void securityfs_remove(struct dentry *dentry)
293{ 292{
diff --git a/security/security.c b/security/security.c
index 3a4b4f55b33f..255b08559b2b 100644
--- a/security/security.c
+++ b/security/security.c
@@ -82,8 +82,8 @@ __setup("security=", choose_lsm);
82 * 82 *
83 * Return true if: 83 * Return true if:
84 * -The passed LSM is the one chosen by user at boot time, 84 * -The passed LSM is the one chosen by user at boot time,
85 * -or user didsn't specify a specific LSM and we're the first to ask 85 * -or user didn't specify a specific LSM and we're the first to ask
86 * for registeration permissoin, 86 * for registration permission,
87 * -or the passed LSM is currently loaded. 87 * -or the passed LSM is currently loaded.
88 * Otherwise, return false. 88 * Otherwise, return false.
89 */ 89 */
@@ -101,13 +101,13 @@ int __init security_module_enable(struct security_operations *ops)
101 * register_security - registers a security framework with the kernel 101 * register_security - registers a security framework with the kernel
102 * @ops: a pointer to the struct security_options that is to be registered 102 * @ops: a pointer to the struct security_options that is to be registered
103 * 103 *
104 * This function is to allow a security module to register itself with the 104 * This function allows a security module to register itself with the
105 * kernel security subsystem. Some rudimentary checking is done on the @ops 105 * kernel security subsystem. Some rudimentary checking is done on the @ops
106 * value passed to this function. You'll need to check first if your LSM 106 * value passed to this function. You'll need to check first if your LSM
107 * is allowed to register its @ops by calling security_module_enable(@ops). 107 * is allowed to register its @ops by calling security_module_enable(@ops).
108 * 108 *
109 * If there is already a security module registered with the kernel, 109 * If there is already a security module registered with the kernel,
110 * an error will be returned. Otherwise 0 is returned on success. 110 * an error will be returned. Otherwise %0 is returned on success.
111 */ 111 */
112int register_security(struct security_operations *ops) 112int register_security(struct security_operations *ops)
113{ 113{
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index a436d1cfa88b..26301dd651d3 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -6,9 +6,6 @@ config SECURITY_SELINUX
6 help 6 help
7 This selects NSA Security-Enhanced Linux (SELinux). 7 This selects NSA Security-Enhanced Linux (SELinux).
8 You will also need a policy configuration and a labeled filesystem. 8 You will also need a policy configuration and a labeled filesystem.
9 You can obtain the policy compiler (checkpolicy), the utility for
10 labeling filesystems (setfiles), and an example policy configuration
11 from <http://www.nsa.gov/selinux/>.
12 If you are unsure how to answer this question, answer N. 9 If you are unsure how to answer this question, answer N.
13 10
14config SECURITY_SELINUX_BOOTPARAM 11config SECURITY_SELINUX_BOOTPARAM
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 114b4b4c97b2..cb30c7e350b3 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -136,7 +136,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
136 * @tclass: target security class 136 * @tclass: target security class
137 * @av: access vector 137 * @av: access vector
138 */ 138 */
139static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) 139void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
140{ 140{
141 const char **common_pts = NULL; 141 const char **common_pts = NULL;
142 u32 common_base = 0; 142 u32 common_base = 0;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 03fc6a81ae32..4a7374c12d9c 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -957,7 +957,8 @@ out_err:
957 return rc; 957 return rc;
958} 958}
959 959
960void selinux_write_opts(struct seq_file *m, struct security_mnt_opts *opts) 960static void selinux_write_opts(struct seq_file *m,
961 struct security_mnt_opts *opts)
961{ 962{
962 int i; 963 int i;
963 char *prefix; 964 char *prefix;
@@ -1290,7 +1291,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
1290 /* Default to the fs superblock SID. */ 1291 /* Default to the fs superblock SID. */
1291 isec->sid = sbsec->sid; 1292 isec->sid = sbsec->sid;
1292 1293
1293 if (sbsec->proc) { 1294 if (sbsec->proc && !S_ISLNK(inode->i_mode)) {
1294 struct proc_inode *proci = PROC_I(inode); 1295 struct proc_inode *proci = PROC_I(inode);
1295 if (proci->pde) { 1296 if (proci->pde) {
1296 isec->sclass = inode_mode_to_security_class(inode->i_mode); 1297 isec->sclass = inode_mode_to_security_class(inode->i_mode);
@@ -3548,38 +3549,44 @@ out:
3548#endif /* IPV6 */ 3549#endif /* IPV6 */
3549 3550
3550static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad, 3551static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
3551 char **addrp, int src, u8 *proto) 3552 char **_addrp, int src, u8 *proto)
3552{ 3553{
3553 int ret = 0; 3554 char *addrp;
3555 int ret;
3554 3556
3555 switch (ad->u.net.family) { 3557 switch (ad->u.net.family) {
3556 case PF_INET: 3558 case PF_INET:
3557 ret = selinux_parse_skb_ipv4(skb, ad, proto); 3559 ret = selinux_parse_skb_ipv4(skb, ad, proto);
3558 if (ret || !addrp) 3560 if (ret)
3559 break; 3561 goto parse_error;
3560 *addrp = (char *)(src ? &ad->u.net.v4info.saddr : 3562 addrp = (char *)(src ? &ad->u.net.v4info.saddr :
3561 &ad->u.net.v4info.daddr); 3563 &ad->u.net.v4info.daddr);
3562 break; 3564 goto okay;
3563 3565
3564#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 3566#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
3565 case PF_INET6: 3567 case PF_INET6:
3566 ret = selinux_parse_skb_ipv6(skb, ad, proto); 3568 ret = selinux_parse_skb_ipv6(skb, ad, proto);
3567 if (ret || !addrp) 3569 if (ret)
3568 break; 3570 goto parse_error;
3569 *addrp = (char *)(src ? &ad->u.net.v6info.saddr : 3571 addrp = (char *)(src ? &ad->u.net.v6info.saddr :
3570 &ad->u.net.v6info.daddr); 3572 &ad->u.net.v6info.daddr);
3571 break; 3573 goto okay;
3572#endif /* IPV6 */ 3574#endif /* IPV6 */
3573 default: 3575 default:
3574 break; 3576 addrp = NULL;
3577 goto okay;
3575 } 3578 }
3576 3579
3577 if (unlikely(ret)) 3580parse_error:
3578 printk(KERN_WARNING 3581 printk(KERN_WARNING
3579 "SELinux: failure in selinux_parse_skb()," 3582 "SELinux: failure in selinux_parse_skb(),"
3580 " unable to parse packet\n"); 3583 " unable to parse packet\n");
3581
3582 return ret; 3584 return ret;
3585
3586okay:
3587 if (_addrp)
3588 *_addrp = addrp;
3589 return 0;
3583} 3590}
3584 3591
3585/** 3592/**
@@ -5219,8 +5226,12 @@ static int selinux_setprocattr(struct task_struct *p,
5219 5226
5220 if (sid == 0) 5227 if (sid == 0)
5221 return -EINVAL; 5228 return -EINVAL;
5222 5229 /*
5223 /* Only allow single threaded processes to change context */ 5230 * SELinux allows to change context in the following case only.
5231 * - Single threaded processes.
5232 * - Multi threaded processes intend to change its context into
5233 * more restricted domain (defined by TYPEBOUNDS statement).
5234 */
5224 if (atomic_read(&p->mm->mm_users) != 1) { 5235 if (atomic_read(&p->mm->mm_users) != 1) {
5225 struct task_struct *g, *t; 5236 struct task_struct *g, *t;
5226 struct mm_struct *mm = p->mm; 5237 struct mm_struct *mm = p->mm;
@@ -5228,11 +5239,16 @@ static int selinux_setprocattr(struct task_struct *p,
5228 do_each_thread(g, t) { 5239 do_each_thread(g, t) {
5229 if (t->mm == mm && t != p) { 5240 if (t->mm == mm && t != p) {
5230 read_unlock(&tasklist_lock); 5241 read_unlock(&tasklist_lock);
5231 return -EPERM; 5242 error = security_bounded_transition(tsec->sid, sid);
5243 if (!error)
5244 goto boundary_ok;
5245
5246 return error;
5232 } 5247 }
5233 } while_each_thread(g, t); 5248 } while_each_thread(g, t);
5234 read_unlock(&tasklist_lock); 5249 read_unlock(&tasklist_lock);
5235 } 5250 }
5251boundary_ok:
5236 5252
5237 /* Check permissions for the transition. */ 5253 /* Check permissions for the transition. */
5238 error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS, 5254 error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS,
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 7b9769f5e775..d12ff1a9c0aa 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -12,6 +12,7 @@
12#include <linux/kdev_t.h> 12#include <linux/kdev_t.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/audit.h>
15#include <linux/in6.h> 16#include <linux/in6.h>
16#include <linux/path.h> 17#include <linux/path.h>
17#include <asm/system.h> 18#include <asm/system.h>
@@ -126,6 +127,9 @@ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
126 u32 events, u32 ssid, u32 tsid, 127 u32 events, u32 ssid, u32 tsid,
127 u16 tclass, u32 perms); 128 u16 tclass, u32 perms);
128 129
130/* Shows permission in human readable form */
131void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av);
132
129/* Exported to selinuxfs */ 133/* Exported to selinuxfs */
130int avc_get_hash_stats(char *page); 134int avc_get_hash_stats(char *page);
131extern unsigned int avc_cache_threshold; 135extern unsigned int avc_cache_threshold;
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 7c543003d653..72447370bc95 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -27,13 +27,14 @@
27#define POLICYDB_VERSION_RANGETRANS 21 27#define POLICYDB_VERSION_RANGETRANS 21
28#define POLICYDB_VERSION_POLCAP 22 28#define POLICYDB_VERSION_POLCAP 22
29#define POLICYDB_VERSION_PERMISSIVE 23 29#define POLICYDB_VERSION_PERMISSIVE 23
30#define POLICYDB_VERSION_BOUNDARY 24
30 31
31/* Range of policy versions we understand*/ 32/* Range of policy versions we understand*/
32#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE 33#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
33#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX 34#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
34#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE 35#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
35#else 36#else
36#define POLICYDB_VERSION_MAX POLICYDB_VERSION_PERMISSIVE 37#define POLICYDB_VERSION_MAX POLICYDB_VERSION_BOUNDARY
37#endif 38#endif
38 39
39#define CONTEXT_MNT 0x01 40#define CONTEXT_MNT 0x01
@@ -62,6 +63,16 @@ enum {
62extern int selinux_policycap_netpeer; 63extern int selinux_policycap_netpeer;
63extern int selinux_policycap_openperm; 64extern int selinux_policycap_openperm;
64 65
66/*
67 * type_datum properties
68 * available at the kernel policy version >= POLICYDB_VERSION_BOUNDARY
69 */
70#define TYPEDATUM_PROPERTY_PRIMARY 0x0001
71#define TYPEDATUM_PROPERTY_ATTRIBUTE 0x0002
72
73/* limitation of boundary depth */
74#define POLICYDB_BOUNDS_MAXDEPTH 4
75
65int security_load_policy(void *data, size_t len); 76int security_load_policy(void *data, size_t len);
66 77
67int security_policycap_supported(unsigned int req_cap); 78int security_policycap_supported(unsigned int req_cap);
@@ -117,6 +128,8 @@ int security_node_sid(u16 domain, void *addr, u32 addrlen,
117int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, 128int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
118 u16 tclass); 129 u16 tclass);
119 130
131int security_bounded_transition(u32 oldsid, u32 newsid);
132
120int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid); 133int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
121 134
122int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type, 135int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index a1be97f8beea..1215b8e47dba 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -98,7 +98,7 @@ struct avtab_node *
98avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum) 98avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum)
99{ 99{
100 int hvalue; 100 int hvalue;
101 struct avtab_node *prev, *cur, *newnode; 101 struct avtab_node *prev, *cur;
102 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); 102 u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
103 103
104 if (!h || !h->htable) 104 if (!h || !h->htable)
@@ -122,9 +122,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
122 key->target_class < cur->key.target_class) 122 key->target_class < cur->key.target_class)
123 break; 123 break;
124 } 124 }
125 newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum); 125 return avtab_insert_node(h, hvalue, prev, cur, key, datum);
126
127 return newnode;
128} 126}
129 127
130struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key) 128struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
@@ -231,7 +229,7 @@ void avtab_destroy(struct avtab *h)
231 229
232 for (i = 0; i < h->nslot; i++) { 230 for (i = 0; i < h->nslot; i++) {
233 cur = h->htable[i]; 231 cur = h->htable[i];
234 while (cur != NULL) { 232 while (cur) {
235 temp = cur; 233 temp = cur;
236 cur = cur->next; 234 cur = cur->next;
237 kmem_cache_free(avtab_node_cachep, temp); 235 kmem_cache_free(avtab_node_cachep, temp);
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index fb4efe4f4bc8..4a4e35cac22b 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -29,7 +29,7 @@ static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr)
29 int s[COND_EXPR_MAXDEPTH]; 29 int s[COND_EXPR_MAXDEPTH];
30 int sp = -1; 30 int sp = -1;
31 31
32 for (cur = expr; cur != NULL; cur = cur->next) { 32 for (cur = expr; cur; cur = cur->next) {
33 switch (cur->expr_type) { 33 switch (cur->expr_type) {
34 case COND_BOOL: 34 case COND_BOOL:
35 if (sp == (COND_EXPR_MAXDEPTH - 1)) 35 if (sp == (COND_EXPR_MAXDEPTH - 1))
@@ -97,14 +97,14 @@ int evaluate_cond_node(struct policydb *p, struct cond_node *node)
97 if (new_state == -1) 97 if (new_state == -1)
98 printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n"); 98 printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n");
99 /* turn the rules on or off */ 99 /* turn the rules on or off */
100 for (cur = node->true_list; cur != NULL; cur = cur->next) { 100 for (cur = node->true_list; cur; cur = cur->next) {
101 if (new_state <= 0) 101 if (new_state <= 0)
102 cur->node->key.specified &= ~AVTAB_ENABLED; 102 cur->node->key.specified &= ~AVTAB_ENABLED;
103 else 103 else
104 cur->node->key.specified |= AVTAB_ENABLED; 104 cur->node->key.specified |= AVTAB_ENABLED;
105 } 105 }
106 106
107 for (cur = node->false_list; cur != NULL; cur = cur->next) { 107 for (cur = node->false_list; cur; cur = cur->next) {
108 /* -1 or 1 */ 108 /* -1 or 1 */
109 if (new_state) 109 if (new_state)
110 cur->node->key.specified &= ~AVTAB_ENABLED; 110 cur->node->key.specified &= ~AVTAB_ENABLED;
@@ -128,7 +128,7 @@ int cond_policydb_init(struct policydb *p)
128static void cond_av_list_destroy(struct cond_av_list *list) 128static void cond_av_list_destroy(struct cond_av_list *list)
129{ 129{
130 struct cond_av_list *cur, *next; 130 struct cond_av_list *cur, *next;
131 for (cur = list; cur != NULL; cur = next) { 131 for (cur = list; cur; cur = next) {
132 next = cur->next; 132 next = cur->next;
133 /* the avtab_ptr_t node is destroy by the avtab */ 133 /* the avtab_ptr_t node is destroy by the avtab */
134 kfree(cur); 134 kfree(cur);
@@ -139,7 +139,7 @@ static void cond_node_destroy(struct cond_node *node)
139{ 139{
140 struct cond_expr *cur_expr, *next_expr; 140 struct cond_expr *cur_expr, *next_expr;
141 141
142 for (cur_expr = node->expr; cur_expr != NULL; cur_expr = next_expr) { 142 for (cur_expr = node->expr; cur_expr; cur_expr = next_expr) {
143 next_expr = cur_expr->next; 143 next_expr = cur_expr->next;
144 kfree(cur_expr); 144 kfree(cur_expr);
145 } 145 }
@@ -155,7 +155,7 @@ static void cond_list_destroy(struct cond_node *list)
155 if (list == NULL) 155 if (list == NULL)
156 return; 156 return;
157 157
158 for (cur = list; cur != NULL; cur = next) { 158 for (cur = list; cur; cur = next) {
159 next = cur->next; 159 next = cur->next;
160 cond_node_destroy(cur); 160 cond_node_destroy(cur);
161 } 161 }
@@ -239,7 +239,7 @@ int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
239 rc = next_entry(key, fp, len); 239 rc = next_entry(key, fp, len);
240 if (rc < 0) 240 if (rc < 0)
241 goto err; 241 goto err;
242 key[len] = 0; 242 key[len] = '\0';
243 if (hashtab_insert(h, key, booldatum)) 243 if (hashtab_insert(h, key, booldatum))
244 goto err; 244 goto err;
245 245
@@ -291,7 +291,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
291 goto err; 291 goto err;
292 } 292 }
293 found = 0; 293 found = 0;
294 for (cur = other; cur != NULL; cur = cur->next) { 294 for (cur = other; cur; cur = cur->next) {
295 if (cur->node == node_ptr) { 295 if (cur->node == node_ptr) {
296 found = 1; 296 found = 1;
297 break; 297 break;
@@ -485,7 +485,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decisi
485 if (!ctab || !key || !avd) 485 if (!ctab || !key || !avd)
486 return; 486 return;
487 487
488 for (node = avtab_search_node(ctab, key); node != NULL; 488 for (node = avtab_search_node(ctab, key); node;
489 node = avtab_search_node_next(node, key->specified)) { 489 node = avtab_search_node_next(node, key->specified)) {
490 if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) == 490 if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
491 (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED))) 491 (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 65b9f8366e9c..53ddb013ae57 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -28,7 +28,7 @@ struct cond_expr {
28#define COND_XOR 5 /* bool ^ bool */ 28#define COND_XOR 5 /* bool ^ bool */
29#define COND_EQ 6 /* bool == bool */ 29#define COND_EQ 6 /* bool == bool */
30#define COND_NEQ 7 /* bool != bool */ 30#define COND_NEQ 7 /* bool != bool */
31#define COND_LAST 8 31#define COND_LAST COND_NEQ
32 __u32 expr_type; 32 __u32 expr_type;
33 __u32 bool; 33 __u32 bool;
34 struct cond_expr *next; 34 struct cond_expr *next;
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index ddc275490af8..68c7348d1acc 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -109,7 +109,7 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
109 *catmap = c_iter; 109 *catmap = c_iter;
110 c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1); 110 c_iter->startbit = e_iter->startbit & ~(NETLBL_CATMAP_SIZE - 1);
111 111
112 while (e_iter != NULL) { 112 while (e_iter) {
113 for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { 113 for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
114 unsigned int delta, e_startbit, c_endbit; 114 unsigned int delta, e_startbit, c_endbit;
115 115
@@ -197,7 +197,7 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
197 } 197 }
198 } 198 }
199 c_iter = c_iter->next; 199 c_iter = c_iter->next;
200 } while (c_iter != NULL); 200 } while (c_iter);
201 if (e_iter != NULL) 201 if (e_iter != NULL)
202 ebmap->highbit = e_iter->startbit + EBITMAP_SIZE; 202 ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
203 else 203 else
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 2e7788e13213..933e735bb185 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -81,7 +81,7 @@ void *hashtab_search(struct hashtab *h, const void *key)
81 81
82 hvalue = h->hash_value(h, key); 82 hvalue = h->hash_value(h, key);
83 cur = h->htable[hvalue]; 83 cur = h->htable[hvalue];
84 while (cur != NULL && h->keycmp(h, key, cur->key) > 0) 84 while (cur && h->keycmp(h, key, cur->key) > 0)
85 cur = cur->next; 85 cur = cur->next;
86 86
87 if (cur == NULL || (h->keycmp(h, key, cur->key) != 0)) 87 if (cur == NULL || (h->keycmp(h, key, cur->key) != 0))
@@ -100,7 +100,7 @@ void hashtab_destroy(struct hashtab *h)
100 100
101 for (i = 0; i < h->size; i++) { 101 for (i = 0; i < h->size; i++) {
102 cur = h->htable[i]; 102 cur = h->htable[i];
103 while (cur != NULL) { 103 while (cur) {
104 temp = cur; 104 temp = cur;
105 cur = cur->next; 105 cur = cur->next;
106 kfree(temp); 106 kfree(temp);
@@ -127,7 +127,7 @@ int hashtab_map(struct hashtab *h,
127 127
128 for (i = 0; i < h->size; i++) { 128 for (i = 0; i < h->size; i++) {
129 cur = h->htable[i]; 129 cur = h->htable[i];
130 while (cur != NULL) { 130 while (cur) {
131 ret = apply(cur->key, cur->datum, args); 131 ret = apply(cur->key, cur->datum, args);
132 if (ret) 132 if (ret)
133 return ret; 133 return ret;
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 77d745da48bb..b5407f16c2a4 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -283,8 +283,8 @@ int mls_context_to_sid(struct policydb *pol,
283 p++; 283 p++;
284 284
285 delim = *p; 285 delim = *p;
286 if (delim != 0) 286 if (delim != '\0')
287 *p++ = 0; 287 *p++ = '\0';
288 288
289 for (l = 0; l < 2; l++) { 289 for (l = 0; l < 2; l++) {
290 levdatum = hashtab_search(pol->p_levels.table, scontextp); 290 levdatum = hashtab_search(pol->p_levels.table, scontextp);
@@ -302,14 +302,14 @@ int mls_context_to_sid(struct policydb *pol,
302 while (*p && *p != ',' && *p != '-') 302 while (*p && *p != ',' && *p != '-')
303 p++; 303 p++;
304 delim = *p; 304 delim = *p;
305 if (delim != 0) 305 if (delim != '\0')
306 *p++ = 0; 306 *p++ = '\0';
307 307
308 /* Separate into range if exists */ 308 /* Separate into range if exists */
309 rngptr = strchr(scontextp, '.'); 309 rngptr = strchr(scontextp, '.');
310 if (rngptr != NULL) { 310 if (rngptr != NULL) {
311 /* Remove '.' */ 311 /* Remove '.' */
312 *rngptr++ = 0; 312 *rngptr++ = '\0';
313 } 313 }
314 314
315 catdatum = hashtab_search(pol->p_cats.table, 315 catdatum = hashtab_search(pol->p_cats.table,
@@ -357,8 +357,8 @@ int mls_context_to_sid(struct policydb *pol,
357 p++; 357 p++;
358 358
359 delim = *p; 359 delim = *p;
360 if (delim != 0) 360 if (delim != '\0')
361 *p++ = 0; 361 *p++ = '\0';
362 } else 362 } else
363 break; 363 break;
364 } 364 }
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 2391761ae422..72e4a54973aa 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/errno.h> 32#include <linux/errno.h>
33#include <linux/audit.h>
33#include "security.h" 34#include "security.h"
34 35
35#include "policydb.h" 36#include "policydb.h"
@@ -116,7 +117,12 @@ static struct policydb_compat_info policydb_compat[] = {
116 .version = POLICYDB_VERSION_PERMISSIVE, 117 .version = POLICYDB_VERSION_PERMISSIVE,
117 .sym_num = SYM_NUM, 118 .sym_num = SYM_NUM,
118 .ocon_num = OCON_NUM, 119 .ocon_num = OCON_NUM,
119 } 120 },
121 {
122 .version = POLICYDB_VERSION_BOUNDARY,
123 .sym_num = SYM_NUM,
124 .ocon_num = OCON_NUM,
125 },
120}; 126};
121 127
122static struct policydb_compat_info *policydb_lookup_compat(int version) 128static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -254,7 +260,9 @@ static int role_index(void *key, void *datum, void *datap)
254 260
255 role = datum; 261 role = datum;
256 p = datap; 262 p = datap;
257 if (!role->value || role->value > p->p_roles.nprim) 263 if (!role->value
264 || role->value > p->p_roles.nprim
265 || role->bounds > p->p_roles.nprim)
258 return -EINVAL; 266 return -EINVAL;
259 p->p_role_val_to_name[role->value - 1] = key; 267 p->p_role_val_to_name[role->value - 1] = key;
260 p->role_val_to_struct[role->value - 1] = role; 268 p->role_val_to_struct[role->value - 1] = role;
@@ -270,9 +278,12 @@ static int type_index(void *key, void *datum, void *datap)
270 p = datap; 278 p = datap;
271 279
272 if (typdatum->primary) { 280 if (typdatum->primary) {
273 if (!typdatum->value || typdatum->value > p->p_types.nprim) 281 if (!typdatum->value
282 || typdatum->value > p->p_types.nprim
283 || typdatum->bounds > p->p_types.nprim)
274 return -EINVAL; 284 return -EINVAL;
275 p->p_type_val_to_name[typdatum->value - 1] = key; 285 p->p_type_val_to_name[typdatum->value - 1] = key;
286 p->type_val_to_struct[typdatum->value - 1] = typdatum;
276 } 287 }
277 288
278 return 0; 289 return 0;
@@ -285,7 +296,9 @@ static int user_index(void *key, void *datum, void *datap)
285 296
286 usrdatum = datum; 297 usrdatum = datum;
287 p = datap; 298 p = datap;
288 if (!usrdatum->value || usrdatum->value > p->p_users.nprim) 299 if (!usrdatum->value
300 || usrdatum->value > p->p_users.nprim
301 || usrdatum->bounds > p->p_users.nprim)
289 return -EINVAL; 302 return -EINVAL;
290 p->p_user_val_to_name[usrdatum->value - 1] = key; 303 p->p_user_val_to_name[usrdatum->value - 1] = key;
291 p->user_val_to_struct[usrdatum->value - 1] = usrdatum; 304 p->user_val_to_struct[usrdatum->value - 1] = usrdatum;
@@ -438,6 +451,14 @@ static int policydb_index_others(struct policydb *p)
438 goto out; 451 goto out;
439 } 452 }
440 453
454 p->type_val_to_struct =
455 kmalloc(p->p_types.nprim * sizeof(*(p->type_val_to_struct)),
456 GFP_KERNEL);
457 if (!p->type_val_to_struct) {
458 rc = -ENOMEM;
459 goto out;
460 }
461
441 if (cond_init_bool_indexes(p)) { 462 if (cond_init_bool_indexes(p)) {
442 rc = -ENOMEM; 463 rc = -ENOMEM;
443 goto out; 464 goto out;
@@ -625,6 +646,7 @@ void policydb_destroy(struct policydb *p)
625 kfree(p->class_val_to_struct); 646 kfree(p->class_val_to_struct);
626 kfree(p->role_val_to_struct); 647 kfree(p->role_val_to_struct);
627 kfree(p->user_val_to_struct); 648 kfree(p->user_val_to_struct);
649 kfree(p->type_val_to_struct);
628 650
629 avtab_destroy(&p->te_avtab); 651 avtab_destroy(&p->te_avtab);
630 652
@@ -932,7 +954,7 @@ static int perm_read(struct policydb *p, struct hashtab *h, void *fp)
932 rc = next_entry(key, fp, len); 954 rc = next_entry(key, fp, len);
933 if (rc < 0) 955 if (rc < 0)
934 goto bad; 956 goto bad;
935 key[len] = 0; 957 key[len] = '\0';
936 958
937 rc = hashtab_insert(h, key, perdatum); 959 rc = hashtab_insert(h, key, perdatum);
938 if (rc) 960 if (rc)
@@ -979,7 +1001,7 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp)
979 rc = next_entry(key, fp, len); 1001 rc = next_entry(key, fp, len);
980 if (rc < 0) 1002 if (rc < 0)
981 goto bad; 1003 goto bad;
982 key[len] = 0; 1004 key[len] = '\0';
983 1005
984 for (i = 0; i < nel; i++) { 1006 for (i = 0; i < nel; i++) {
985 rc = perm_read(p, comdatum->permissions.table, fp); 1007 rc = perm_read(p, comdatum->permissions.table, fp);
@@ -1117,7 +1139,7 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
1117 rc = next_entry(key, fp, len); 1139 rc = next_entry(key, fp, len);
1118 if (rc < 0) 1140 if (rc < 0)
1119 goto bad; 1141 goto bad;
1120 key[len] = 0; 1142 key[len] = '\0';
1121 1143
1122 if (len2) { 1144 if (len2) {
1123 cladatum->comkey = kmalloc(len2 + 1, GFP_KERNEL); 1145 cladatum->comkey = kmalloc(len2 + 1, GFP_KERNEL);
@@ -1128,7 +1150,7 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
1128 rc = next_entry(cladatum->comkey, fp, len2); 1150 rc = next_entry(cladatum->comkey, fp, len2);
1129 if (rc < 0) 1151 if (rc < 0)
1130 goto bad; 1152 goto bad;
1131 cladatum->comkey[len2] = 0; 1153 cladatum->comkey[len2] = '\0';
1132 1154
1133 cladatum->comdatum = hashtab_search(p->p_commons.table, 1155 cladatum->comdatum = hashtab_search(p->p_commons.table,
1134 cladatum->comkey); 1156 cladatum->comkey);
@@ -1176,8 +1198,8 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
1176{ 1198{
1177 char *key = NULL; 1199 char *key = NULL;
1178 struct role_datum *role; 1200 struct role_datum *role;
1179 int rc; 1201 int rc, to_read = 2;
1180 __le32 buf[2]; 1202 __le32 buf[3];
1181 u32 len; 1203 u32 len;
1182 1204
1183 role = kzalloc(sizeof(*role), GFP_KERNEL); 1205 role = kzalloc(sizeof(*role), GFP_KERNEL);
@@ -1186,12 +1208,17 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
1186 goto out; 1208 goto out;
1187 } 1209 }
1188 1210
1189 rc = next_entry(buf, fp, sizeof buf); 1211 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
1212 to_read = 3;
1213
1214 rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
1190 if (rc < 0) 1215 if (rc < 0)
1191 goto bad; 1216 goto bad;
1192 1217
1193 len = le32_to_cpu(buf[0]); 1218 len = le32_to_cpu(buf[0]);
1194 role->value = le32_to_cpu(buf[1]); 1219 role->value = le32_to_cpu(buf[1]);
1220 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
1221 role->bounds = le32_to_cpu(buf[2]);
1195 1222
1196 key = kmalloc(len + 1, GFP_KERNEL); 1223 key = kmalloc(len + 1, GFP_KERNEL);
1197 if (!key) { 1224 if (!key) {
@@ -1201,7 +1228,7 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
1201 rc = next_entry(key, fp, len); 1228 rc = next_entry(key, fp, len);
1202 if (rc < 0) 1229 if (rc < 0)
1203 goto bad; 1230 goto bad;
1204 key[len] = 0; 1231 key[len] = '\0';
1205 1232
1206 rc = ebitmap_read(&role->dominates, fp); 1233 rc = ebitmap_read(&role->dominates, fp);
1207 if (rc) 1234 if (rc)
@@ -1236,8 +1263,8 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp)
1236{ 1263{
1237 char *key = NULL; 1264 char *key = NULL;
1238 struct type_datum *typdatum; 1265 struct type_datum *typdatum;
1239 int rc; 1266 int rc, to_read = 3;
1240 __le32 buf[3]; 1267 __le32 buf[4];
1241 u32 len; 1268 u32 len;
1242 1269
1243 typdatum = kzalloc(sizeof(*typdatum), GFP_KERNEL); 1270 typdatum = kzalloc(sizeof(*typdatum), GFP_KERNEL);
@@ -1246,13 +1273,27 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp)
1246 return rc; 1273 return rc;
1247 } 1274 }
1248 1275
1249 rc = next_entry(buf, fp, sizeof buf); 1276 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
1277 to_read = 4;
1278
1279 rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
1250 if (rc < 0) 1280 if (rc < 0)
1251 goto bad; 1281 goto bad;
1252 1282
1253 len = le32_to_cpu(buf[0]); 1283 len = le32_to_cpu(buf[0]);
1254 typdatum->value = le32_to_cpu(buf[1]); 1284 typdatum->value = le32_to_cpu(buf[1]);
1255 typdatum->primary = le32_to_cpu(buf[2]); 1285 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
1286 u32 prop = le32_to_cpu(buf[2]);
1287
1288 if (prop & TYPEDATUM_PROPERTY_PRIMARY)
1289 typdatum->primary = 1;
1290 if (prop & TYPEDATUM_PROPERTY_ATTRIBUTE)
1291 typdatum->attribute = 1;
1292
1293 typdatum->bounds = le32_to_cpu(buf[3]);
1294 } else {
1295 typdatum->primary = le32_to_cpu(buf[2]);
1296 }
1256 1297
1257 key = kmalloc(len + 1, GFP_KERNEL); 1298 key = kmalloc(len + 1, GFP_KERNEL);
1258 if (!key) { 1299 if (!key) {
@@ -1262,7 +1303,7 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp)
1262 rc = next_entry(key, fp, len); 1303 rc = next_entry(key, fp, len);
1263 if (rc < 0) 1304 if (rc < 0)
1264 goto bad; 1305 goto bad;
1265 key[len] = 0; 1306 key[len] = '\0';
1266 1307
1267 rc = hashtab_insert(h, key, typdatum); 1308 rc = hashtab_insert(h, key, typdatum);
1268 if (rc) 1309 if (rc)
@@ -1309,8 +1350,8 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp)
1309{ 1350{
1310 char *key = NULL; 1351 char *key = NULL;
1311 struct user_datum *usrdatum; 1352 struct user_datum *usrdatum;
1312 int rc; 1353 int rc, to_read = 2;
1313 __le32 buf[2]; 1354 __le32 buf[3];
1314 u32 len; 1355 u32 len;
1315 1356
1316 usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL); 1357 usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL);
@@ -1319,12 +1360,17 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp)
1319 goto out; 1360 goto out;
1320 } 1361 }
1321 1362
1322 rc = next_entry(buf, fp, sizeof buf); 1363 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
1364 to_read = 3;
1365
1366 rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
1323 if (rc < 0) 1367 if (rc < 0)
1324 goto bad; 1368 goto bad;
1325 1369
1326 len = le32_to_cpu(buf[0]); 1370 len = le32_to_cpu(buf[0]);
1327 usrdatum->value = le32_to_cpu(buf[1]); 1371 usrdatum->value = le32_to_cpu(buf[1]);
1372 if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
1373 usrdatum->bounds = le32_to_cpu(buf[2]);
1328 1374
1329 key = kmalloc(len + 1, GFP_KERNEL); 1375 key = kmalloc(len + 1, GFP_KERNEL);
1330 if (!key) { 1376 if (!key) {
@@ -1334,7 +1380,7 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp)
1334 rc = next_entry(key, fp, len); 1380 rc = next_entry(key, fp, len);
1335 if (rc < 0) 1381 if (rc < 0)
1336 goto bad; 1382 goto bad;
1337 key[len] = 0; 1383 key[len] = '\0';
1338 1384
1339 rc = ebitmap_read(&usrdatum->roles, fp); 1385 rc = ebitmap_read(&usrdatum->roles, fp);
1340 if (rc) 1386 if (rc)
@@ -1388,7 +1434,7 @@ static int sens_read(struct policydb *p, struct hashtab *h, void *fp)
1388 rc = next_entry(key, fp, len); 1434 rc = next_entry(key, fp, len);
1389 if (rc < 0) 1435 if (rc < 0)
1390 goto bad; 1436 goto bad;
1391 key[len] = 0; 1437 key[len] = '\0';
1392 1438
1393 levdatum->level = kmalloc(sizeof(struct mls_level), GFP_ATOMIC); 1439 levdatum->level = kmalloc(sizeof(struct mls_level), GFP_ATOMIC);
1394 if (!levdatum->level) { 1440 if (!levdatum->level) {
@@ -1440,7 +1486,7 @@ static int cat_read(struct policydb *p, struct hashtab *h, void *fp)
1440 rc = next_entry(key, fp, len); 1486 rc = next_entry(key, fp, len);
1441 if (rc < 0) 1487 if (rc < 0)
1442 goto bad; 1488 goto bad;
1443 key[len] = 0; 1489 key[len] = '\0';
1444 1490
1445 rc = hashtab_insert(h, key, catdatum); 1491 rc = hashtab_insert(h, key, catdatum);
1446 if (rc) 1492 if (rc)
@@ -1465,6 +1511,133 @@ static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp)
1465 cat_read, 1511 cat_read,
1466}; 1512};
1467 1513
1514static int user_bounds_sanity_check(void *key, void *datum, void *datap)
1515{
1516 struct user_datum *upper, *user;
1517 struct policydb *p = datap;
1518 int depth = 0;
1519
1520 upper = user = datum;
1521 while (upper->bounds) {
1522 struct ebitmap_node *node;
1523 unsigned long bit;
1524
1525 if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
1526 printk(KERN_ERR "SELinux: user %s: "
1527 "too deep or looped boundary",
1528 (char *) key);
1529 return -EINVAL;
1530 }
1531
1532 upper = p->user_val_to_struct[upper->bounds - 1];
1533 ebitmap_for_each_positive_bit(&user->roles, node, bit) {
1534 if (ebitmap_get_bit(&upper->roles, bit))
1535 continue;
1536
1537 printk(KERN_ERR
1538 "SELinux: boundary violated policy: "
1539 "user=%s role=%s bounds=%s\n",
1540 p->p_user_val_to_name[user->value - 1],
1541 p->p_role_val_to_name[bit],
1542 p->p_user_val_to_name[upper->value - 1]);
1543
1544 return -EINVAL;
1545 }
1546 }
1547
1548 return 0;
1549}
1550
1551static int role_bounds_sanity_check(void *key, void *datum, void *datap)
1552{
1553 struct role_datum *upper, *role;
1554 struct policydb *p = datap;
1555 int depth = 0;
1556
1557 upper = role = datum;
1558 while (upper->bounds) {
1559 struct ebitmap_node *node;
1560 unsigned long bit;
1561
1562 if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
1563 printk(KERN_ERR "SELinux: role %s: "
1564 "too deep or looped bounds\n",
1565 (char *) key);
1566 return -EINVAL;
1567 }
1568
1569 upper = p->role_val_to_struct[upper->bounds - 1];
1570 ebitmap_for_each_positive_bit(&role->types, node, bit) {
1571 if (ebitmap_get_bit(&upper->types, bit))
1572 continue;
1573
1574 printk(KERN_ERR
1575 "SELinux: boundary violated policy: "
1576 "role=%s type=%s bounds=%s\n",
1577 p->p_role_val_to_name[role->value - 1],
1578 p->p_type_val_to_name[bit],
1579 p->p_role_val_to_name[upper->value - 1]);
1580
1581 return -EINVAL;
1582 }
1583 }
1584
1585 return 0;
1586}
1587
1588static int type_bounds_sanity_check(void *key, void *datum, void *datap)
1589{
1590 struct type_datum *upper, *type;
1591 struct policydb *p = datap;
1592 int depth = 0;
1593
1594 upper = type = datum;
1595 while (upper->bounds) {
1596 if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
1597 printk(KERN_ERR "SELinux: type %s: "
1598 "too deep or looped boundary\n",
1599 (char *) key);
1600 return -EINVAL;
1601 }
1602
1603 upper = p->type_val_to_struct[upper->bounds - 1];
1604 if (upper->attribute) {
1605 printk(KERN_ERR "SELinux: type %s: "
1606 "bounded by attribute %s",
1607 (char *) key,
1608 p->p_type_val_to_name[upper->value - 1]);
1609 return -EINVAL;
1610 }
1611 }
1612
1613 return 0;
1614}
1615
1616static int policydb_bounds_sanity_check(struct policydb *p)
1617{
1618 int rc;
1619
1620 if (p->policyvers < POLICYDB_VERSION_BOUNDARY)
1621 return 0;
1622
1623 rc = hashtab_map(p->p_users.table,
1624 user_bounds_sanity_check, p);
1625 if (rc)
1626 return rc;
1627
1628 rc = hashtab_map(p->p_roles.table,
1629 role_bounds_sanity_check, p);
1630 if (rc)
1631 return rc;
1632
1633 rc = hashtab_map(p->p_types.table,
1634 type_bounds_sanity_check, p);
1635 if (rc)
1636 return rc;
1637
1638 return 0;
1639}
1640
1468extern int ss_initialized; 1641extern int ss_initialized;
1469 1642
1470/* 1643/*
@@ -1523,7 +1696,7 @@ int policydb_read(struct policydb *p, void *fp)
1523 kfree(policydb_str); 1696 kfree(policydb_str);
1524 goto bad; 1697 goto bad;
1525 } 1698 }
1526 policydb_str[len] = 0; 1699 policydb_str[len] = '\0';
1527 if (strcmp(policydb_str, POLICYDB_STRING)) { 1700 if (strcmp(policydb_str, POLICYDB_STRING)) {
1528 printk(KERN_ERR "SELinux: policydb string %s does not match " 1701 printk(KERN_ERR "SELinux: policydb string %s does not match "
1529 "my string %s\n", policydb_str, POLICYDB_STRING); 1702 "my string %s\n", policydb_str, POLICYDB_STRING);
@@ -1961,6 +2134,10 @@ int policydb_read(struct policydb *p, void *fp)
1961 goto bad; 2134 goto bad;
1962 } 2135 }
1963 2136
2137 rc = policydb_bounds_sanity_check(p);
2138 if (rc)
2139 goto bad;
2140
1964 rc = 0; 2141 rc = 0;
1965out: 2142out:
1966 return rc; 2143 return rc;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 4253370fda6a..55152d498b53 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -61,6 +61,7 @@ struct class_datum {
61/* Role attributes */ 61/* Role attributes */
62struct role_datum { 62struct role_datum {
63 u32 value; /* internal role value */ 63 u32 value; /* internal role value */
64 u32 bounds; /* boundary of role */
64 struct ebitmap dominates; /* set of roles dominated by this role */ 65 struct ebitmap dominates; /* set of roles dominated by this role */
65 struct ebitmap types; /* set of authorized types for role */ 66 struct ebitmap types; /* set of authorized types for role */
66}; 67};
@@ -81,12 +82,15 @@ struct role_allow {
81/* Type attributes */ 82/* Type attributes */
82struct type_datum { 83struct type_datum {
83 u32 value; /* internal type value */ 84 u32 value; /* internal type value */
85 u32 bounds; /* boundary of type */
84 unsigned char primary; /* primary name? */ 86 unsigned char primary; /* primary name? */
87 unsigned char attribute;/* attribute ?*/
85}; 88};
86 89
87/* User attributes */ 90/* User attributes */
88struct user_datum { 91struct user_datum {
89 u32 value; /* internal user value */ 92 u32 value; /* internal user value */
93 u32 bounds; /* bounds of user */
90 struct ebitmap roles; /* set of authorized roles for user */ 94 struct ebitmap roles; /* set of authorized roles for user */
91 struct mls_range range; /* MLS range (min - max) for user */ 95 struct mls_range range; /* MLS range (min - max) for user */
92 struct mls_level dfltlevel; /* default login MLS level for user */ 96 struct mls_level dfltlevel; /* default login MLS level for user */
@@ -209,6 +213,7 @@ struct policydb {
209 struct class_datum **class_val_to_struct; 213 struct class_datum **class_val_to_struct;
210 struct role_datum **role_val_to_struct; 214 struct role_datum **role_val_to_struct;
211 struct user_datum **user_val_to_struct; 215 struct user_datum **user_val_to_struct;
216 struct type_datum **type_val_to_struct;
212 217
213 /* type enforcement access vectors and transitions */ 218 /* type enforcement access vectors and transitions */
214 struct avtab te_avtab; 219 struct avtab te_avtab;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 8551952ef329..ab0cc0c7b944 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -88,6 +88,11 @@ static u32 latest_granting;
88static int context_struct_to_string(struct context *context, char **scontext, 88static int context_struct_to_string(struct context *context, char **scontext,
89 u32 *scontext_len); 89 u32 *scontext_len);
90 90
91static int context_struct_compute_av(struct context *scontext,
92 struct context *tcontext,
93 u16 tclass,
94 u32 requested,
95 struct av_decision *avd);
91/* 96/*
92 * Return the boolean value of a constraint expression 97 * Return the boolean value of a constraint expression
93 * when it is applied to the specified source and target 98 * when it is applied to the specified source and target
@@ -274,6 +279,100 @@ mls_ops:
274} 279}
275 280
276/* 281/*
282 * security_boundary_permission - drops violated permissions
283 * on boundary constraint.
284 */
285static void type_attribute_bounds_av(struct context *scontext,
286 struct context *tcontext,
287 u16 tclass,
288 u32 requested,
289 struct av_decision *avd)
290{
291 struct context lo_scontext;
292 struct context lo_tcontext;
293 struct av_decision lo_avd;
294 struct type_datum *source
295 = policydb.type_val_to_struct[scontext->type - 1];
296 struct type_datum *target
297 = policydb.type_val_to_struct[tcontext->type - 1];
298 u32 masked = 0;
299
300 if (source->bounds) {
301 memset(&lo_avd, 0, sizeof(lo_avd));
302
303 memcpy(&lo_scontext, scontext, sizeof(lo_scontext));
304 lo_scontext.type = source->bounds;
305
306 context_struct_compute_av(&lo_scontext,
307 tcontext,
308 tclass,
309 requested,
310 &lo_avd);
311 if ((lo_avd.allowed & avd->allowed) == avd->allowed)
312 return; /* no masked permission */
313 masked = ~lo_avd.allowed & avd->allowed;
314 }
315
316 if (target->bounds) {
317 memset(&lo_avd, 0, sizeof(lo_avd));
318
319 memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext));
320 lo_tcontext.type = target->bounds;
321
322 context_struct_compute_av(scontext,
323 &lo_tcontext,
324 tclass,
325 requested,
326 &lo_avd);
327 if ((lo_avd.allowed & avd->allowed) == avd->allowed)
328 return; /* no masked permission */
329 masked = ~lo_avd.allowed & avd->allowed;
330 }
331
332 if (source->bounds && target->bounds) {
333 memset(&lo_avd, 0, sizeof(lo_avd));
334 /*
335 * lo_scontext and lo_tcontext are already
336 * set up.
337 */
338
339 context_struct_compute_av(&lo_scontext,
340 &lo_tcontext,
341 tclass,
342 requested,
343 &lo_avd);
344 if ((lo_avd.allowed & avd->allowed) == avd->allowed)
345 return; /* no masked permission */
346 masked = ~lo_avd.allowed & avd->allowed;
347 }
348
349 if (masked) {
350 struct audit_buffer *ab;
351 char *stype_name
352 = policydb.p_type_val_to_name[source->value - 1];
353 char *ttype_name
354 = policydb.p_type_val_to_name[target->value - 1];
355 char *tclass_name
356 = policydb.p_class_val_to_name[tclass - 1];
357
358 /* mask violated permissions */
359 avd->allowed &= ~masked;
360
361 /* notice to userspace via audit message */
362 ab = audit_log_start(current->audit_context,
363 GFP_ATOMIC, AUDIT_SELINUX_ERR);
364 if (!ab)
365 return;
366
367 audit_log_format(ab, "av boundary violation: "
368 "source=%s target=%s tclass=%s",
369 stype_name, ttype_name, tclass_name);
370 avc_dump_av(ab, tclass, masked);
371 audit_log_end(ab);
372 }
373}
374
375/*
277 * Compute access vectors based on a context structure pair for 376 * Compute access vectors based on a context structure pair for
278 * the permissions in a particular class. 377 * the permissions in a particular class.
279 */ 378 */
@@ -356,7 +455,7 @@ static int context_struct_compute_av(struct context *scontext,
356 avkey.source_type = i + 1; 455 avkey.source_type = i + 1;
357 avkey.target_type = j + 1; 456 avkey.target_type = j + 1;
358 for (node = avtab_search_node(&policydb.te_avtab, &avkey); 457 for (node = avtab_search_node(&policydb.te_avtab, &avkey);
359 node != NULL; 458 node;
360 node = avtab_search_node_next(node, avkey.specified)) { 459 node = avtab_search_node_next(node, avkey.specified)) {
361 if (node->key.specified == AVTAB_ALLOWED) 460 if (node->key.specified == AVTAB_ALLOWED)
362 avd->allowed |= node->datum.data; 461 avd->allowed |= node->datum.data;
@@ -404,6 +503,14 @@ static int context_struct_compute_av(struct context *scontext,
404 PROCESS__DYNTRANSITION); 503 PROCESS__DYNTRANSITION);
405 } 504 }
406 505
506 /*
507 * If the given source and target types have boundary
508 * constraint, lazy checks have to mask any violated
509 * permission and notice it to userspace via audit.
510 */
511 type_attribute_bounds_av(scontext, tcontext,
512 tclass, requested, avd);
513
407 return 0; 514 return 0;
408 515
409inval_class: 516inval_class:
@@ -549,6 +656,69 @@ out:
549 return rc; 656 return rc;
550} 657}
551 658
659/*
660 * security_bounded_transition - check whether the given
661 * transition is directed to bounded, or not.
662 * It returns 0, if @newsid is bounded by @oldsid.
663 * Otherwise, it returns error code.
664 *
665 * @oldsid : current security identifier
666 * @newsid : destinated security identifier
667 */
668int security_bounded_transition(u32 old_sid, u32 new_sid)
669{
670 struct context *old_context, *new_context;
671 struct type_datum *type;
672 int index;
673 int rc = -EINVAL;
674
675 read_lock(&policy_rwlock);
676
677 old_context = sidtab_search(&sidtab, old_sid);
678 if (!old_context) {
679 printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
680 __func__, old_sid);
681 goto out;
682 }
683
684 new_context = sidtab_search(&sidtab, new_sid);
685 if (!new_context) {
686 printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
687 __func__, new_sid);
688 goto out;
689 }
690
691 /* type/domain unchaned */
692 if (old_context->type == new_context->type) {
693 rc = 0;
694 goto out;
695 }
696
697 index = new_context->type;
698 while (true) {
699 type = policydb.type_val_to_struct[index - 1];
700 BUG_ON(!type);
701
702 /* not bounded anymore */
703 if (!type->bounds) {
704 rc = -EPERM;
705 break;
706 }
707
708 /* @newsid is bounded by @oldsid */
709 if (type->bounds == old_context->type) {
710 rc = 0;
711 break;
712 }
713 index = type->bounds;
714 }
715out:
716 read_unlock(&policy_rwlock);
717
718 return rc;
719}
720
721
552/** 722/**
553 * security_compute_av - Compute access vector decisions. 723 * security_compute_av - Compute access vector decisions.
554 * @ssid: source security identifier 724 * @ssid: source security identifier
@@ -794,7 +964,7 @@ static int string_to_context_struct(struct policydb *pol,
794 *p++ = 0; 964 *p++ = 0;
795 965
796 typdatum = hashtab_search(pol->p_types.table, scontextp); 966 typdatum = hashtab_search(pol->p_types.table, scontextp);
797 if (!typdatum) 967 if (!typdatum || typdatum->attribute)
798 goto out; 968 goto out;
799 969
800 ctx->type = typdatum->value; 970 ctx->type = typdatum->value;
@@ -1037,7 +1207,7 @@ static int security_compute_sid(u32 ssid,
1037 /* If no permanent rule, also check for enabled conditional rules */ 1207 /* If no permanent rule, also check for enabled conditional rules */
1038 if (!avdatum) { 1208 if (!avdatum) {
1039 node = avtab_search_node(&policydb.te_cond_avtab, &avkey); 1209 node = avtab_search_node(&policydb.te_cond_avtab, &avkey);
1040 for (; node != NULL; node = avtab_search_node_next(node, specified)) { 1210 for (; node; node = avtab_search_node_next(node, specified)) {
1041 if (node->key.specified & AVTAB_ENABLED) { 1211 if (node->key.specified & AVTAB_ENABLED) {
1042 avdatum = &node->datum; 1212 avdatum = &node->datum;
1043 break; 1213 break;
@@ -2050,7 +2220,7 @@ int security_set_bools(int len, int *values)
2050 policydb.bool_val_to_struct[i]->state = 0; 2220 policydb.bool_val_to_struct[i]->state = 0;
2051 } 2221 }
2052 2222
2053 for (cur = policydb.cond_list; cur != NULL; cur = cur->next) { 2223 for (cur = policydb.cond_list; cur; cur = cur->next) {
2054 rc = evaluate_cond_node(&policydb, cur); 2224 rc = evaluate_cond_node(&policydb, cur);
2055 if (rc) 2225 if (rc)
2056 goto out; 2226 goto out;
@@ -2102,7 +2272,7 @@ static int security_preserve_bools(struct policydb *p)
2102 if (booldatum) 2272 if (booldatum)
2103 booldatum->state = bvalues[i]; 2273 booldatum->state = bvalues[i];
2104 } 2274 }
2105 for (cur = p->cond_list; cur != NULL; cur = cur->next) { 2275 for (cur = p->cond_list; cur; cur = cur->next) {
2106 rc = evaluate_cond_node(p, cur); 2276 rc = evaluate_cond_node(p, cur);
2107 if (rc) 2277 if (rc)
2108 goto out; 2278 goto out;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index a81ded104129..e817989764cd 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -43,7 +43,7 @@ int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
43 hvalue = SIDTAB_HASH(sid); 43 hvalue = SIDTAB_HASH(sid);
44 prev = NULL; 44 prev = NULL;
45 cur = s->htable[hvalue]; 45 cur = s->htable[hvalue];
46 while (cur != NULL && sid > cur->sid) { 46 while (cur && sid > cur->sid) {
47 prev = cur; 47 prev = cur;
48 cur = cur->next; 48 cur = cur->next;
49 } 49 }
@@ -92,7 +92,7 @@ static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
92 92
93 hvalue = SIDTAB_HASH(sid); 93 hvalue = SIDTAB_HASH(sid);
94 cur = s->htable[hvalue]; 94 cur = s->htable[hvalue];
95 while (cur != NULL && sid > cur->sid) 95 while (cur && sid > cur->sid)
96 cur = cur->next; 96 cur = cur->next;
97 97
98 if (force && cur && sid == cur->sid && cur->context.len) 98 if (force && cur && sid == cur->sid && cur->context.len)
@@ -103,7 +103,7 @@ static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
103 sid = SECINITSID_UNLABELED; 103 sid = SECINITSID_UNLABELED;
104 hvalue = SIDTAB_HASH(sid); 104 hvalue = SIDTAB_HASH(sid);
105 cur = s->htable[hvalue]; 105 cur = s->htable[hvalue];
106 while (cur != NULL && sid > cur->sid) 106 while (cur && sid > cur->sid)
107 cur = cur->next; 107 cur = cur->next;
108 if (!cur || sid != cur->sid) 108 if (!cur || sid != cur->sid)
109 return NULL; 109 return NULL;
@@ -136,7 +136,7 @@ int sidtab_map(struct sidtab *s,
136 136
137 for (i = 0; i < SIDTAB_SIZE; i++) { 137 for (i = 0; i < SIDTAB_SIZE; i++) {
138 cur = s->htable[i]; 138 cur = s->htable[i];
139 while (cur != NULL) { 139 while (cur) {
140 rc = apply(cur->sid, &cur->context, args); 140 rc = apply(cur->sid, &cur->context, args);
141 if (rc) 141 if (rc)
142 goto out; 142 goto out;
@@ -155,7 +155,7 @@ static inline u32 sidtab_search_context(struct sidtab *s,
155 155
156 for (i = 0; i < SIDTAB_SIZE; i++) { 156 for (i = 0; i < SIDTAB_SIZE; i++) {
157 cur = s->htable[i]; 157 cur = s->htable[i];
158 while (cur != NULL) { 158 while (cur) {
159 if (context_cmp(&cur->context, context)) 159 if (context_cmp(&cur->context, context))
160 return cur->sid; 160 return cur->sid;
161 cur = cur->next; 161 cur = cur->next;
@@ -242,7 +242,7 @@ void sidtab_destroy(struct sidtab *s)
242 242
243 for (i = 0; i < SIDTAB_SIZE; i++) { 243 for (i = 0; i < SIDTAB_SIZE; i++) {
244 cur = s->htable[i]; 244 cur = s->htable[i];
245 while (cur != NULL) { 245 while (cur) {
246 temp = cur; 246 temp = cur;
247 cur = cur->next; 247 cur = cur->next;
248 context_destroy(&temp->context); 248 context_destroy(&temp->context);
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 4a4477f5afdc..31dce559595a 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -178,6 +178,7 @@ u32 smack_to_secid(const char *);
178extern int smack_cipso_direct; 178extern int smack_cipso_direct;
179extern int smack_net_nltype; 179extern int smack_net_nltype;
180extern char *smack_net_ambient; 180extern char *smack_net_ambient;
181extern char *smack_onlycap;
181 182
182extern struct smack_known *smack_known; 183extern struct smack_known *smack_known;
183extern struct smack_known smack_known_floor; 184extern struct smack_known smack_known_floor;
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index f6b5f6eed6dd..79ff21ed4c3b 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -157,7 +157,7 @@ int smk_access(char *subject_label, char *object_label, int request)
157 * 157 *
158 * This function checks the current subject label/object label pair 158 * This function checks the current subject label/object label pair
159 * in the access rule list and returns 0 if the access is permitted, 159 * in the access rule list and returns 0 if the access is permitted,
160 * non zero otherwise. It allows that current my have the capability 160 * non zero otherwise. It allows that current may have the capability
161 * to override the rules. 161 * to override the rules.
162 */ 162 */
163int smk_curacc(char *obj_label, u32 mode) 163int smk_curacc(char *obj_label, u32 mode)
@@ -168,6 +168,14 @@ int smk_curacc(char *obj_label, u32 mode)
168 if (rc == 0) 168 if (rc == 0)
169 return 0; 169 return 0;
170 170
171 /*
172 * Return if a specific label has been designated as the
173 * only one that gets privilege and current does not
174 * have that label.
175 */
176 if (smack_onlycap != NULL && smack_onlycap != current->security)
177 return rc;
178
171 if (capable(CAP_MAC_OVERRIDE)) 179 if (capable(CAP_MAC_OVERRIDE))
172 return 0; 180 return 0;
173 181
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 271a835fbbe3..e7c642458ec9 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -39,6 +39,7 @@ enum smk_inos {
39 SMK_DIRECT = 6, /* CIPSO level indicating direct label */ 39 SMK_DIRECT = 6, /* CIPSO level indicating direct label */
40 SMK_AMBIENT = 7, /* internet ambient label */ 40 SMK_AMBIENT = 7, /* internet ambient label */
41 SMK_NLTYPE = 8, /* label scheme to use by default */ 41 SMK_NLTYPE = 8, /* label scheme to use by default */
42 SMK_ONLYCAP = 9, /* the only "capable" label */
42}; 43};
43 44
44/* 45/*
@@ -68,6 +69,16 @@ int smack_net_nltype = NETLBL_NLTYPE_CIPSOV4;
68 */ 69 */
69int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT; 70int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
70 71
72/*
73 * Unless a process is running with this label even
74 * having CAP_MAC_OVERRIDE isn't enough to grant
75 * privilege to violate MAC policy. If no label is
76 * designated (the NULL case) capabilities apply to
77 * everyone. It is expected that the hat (^) label
78 * will be used if any label is used.
79 */
80char *smack_onlycap;
81
71static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT; 82static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
72struct smk_list_entry *smack_list; 83struct smk_list_entry *smack_list;
73 84
@@ -787,6 +798,85 @@ static const struct file_operations smk_ambient_ops = {
787 .write = smk_write_ambient, 798 .write = smk_write_ambient,
788}; 799};
789 800
801/**
802 * smk_read_onlycap - read() for /smack/onlycap
803 * @filp: file pointer, not actually used
804 * @buf: where to put the result
805 * @cn: maximum to send along
806 * @ppos: where to start
807 *
808 * Returns number of bytes read or error code, as appropriate
809 */
810static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
811 size_t cn, loff_t *ppos)
812{
813 char *smack = "";
814 ssize_t rc = -EINVAL;
815 int asize;
816
817 if (*ppos != 0)
818 return 0;
819
820 if (smack_onlycap != NULL)
821 smack = smack_onlycap;
822
823 asize = strlen(smack) + 1;
824
825 if (cn >= asize)
826 rc = simple_read_from_buffer(buf, cn, ppos, smack, asize);
827
828 return rc;
829}
830
831/**
832 * smk_write_onlycap - write() for /smack/onlycap
833 * @filp: file pointer, not actually used
834 * @buf: where to get the data from
835 * @count: bytes sent
836 * @ppos: where to start
837 *
838 * Returns number of bytes written or error code, as appropriate
839 */
840static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
841 size_t count, loff_t *ppos)
842{
843 char in[SMK_LABELLEN];
844 char *sp = current->security;
845
846 if (!capable(CAP_MAC_ADMIN))
847 return -EPERM;
848
849 /*
850 * This can be done using smk_access() but is done
851 * explicitly for clarity. The smk_access() implementation
852 * would use smk_access(smack_onlycap, MAY_WRITE)
853 */
854 if (smack_onlycap != NULL && smack_onlycap != sp)
855 return -EPERM;
856
857 if (count >= SMK_LABELLEN)
858 return -EINVAL;
859
860 if (copy_from_user(in, buf, count) != 0)
861 return -EFAULT;
862
863 /*
864 * Should the null string be passed in unset the onlycap value.
865 * This seems like something to be careful with as usually
866 * smk_import only expects to return NULL for errors. It
867 * is usually the case that a nullstring or "\n" would be
868 * bad to pass to smk_import but in fact this is useful here.
869 */
870 smack_onlycap = smk_import(in, count);
871
872 return count;
873}
874
875static const struct file_operations smk_onlycap_ops = {
876 .read = smk_read_onlycap,
877 .write = smk_write_onlycap,
878};
879
790struct option_names { 880struct option_names {
791 int o_number; 881 int o_number;
792 char *o_name; 882 char *o_name;
@@ -919,6 +1009,8 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
919 {"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR}, 1009 {"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
920 [SMK_NLTYPE] = 1010 [SMK_NLTYPE] =
921 {"nltype", &smk_nltype_ops, S_IRUGO|S_IWUSR}, 1011 {"nltype", &smk_nltype_ops, S_IRUGO|S_IWUSR},
1012 [SMK_ONLYCAP] =
1013 {"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
922 /* last one */ {""} 1014 /* last one */ {""}
923 }; 1015 };
924 1016