aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/s390/Debugging390.txt2
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile17
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c4
-rw-r--r--arch/ia64/kernel/crash.c16
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/iosapic.c5
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/ia64/kernel/setup.c31
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/contig.c76
-rw-r--r--arch/ia64/mm/discontig.c46
-rw-r--r--arch/ia64/mm/init.c38
-rw-r--r--arch/ia64/sn/kernel/huberror.c16
-rw-r--r--arch/mips/Kconfig103
-rw-r--r--arch/mips/Kconfig.debug8
-rw-r--r--arch/mips/arc/identify.c2
-rw-r--r--arch/mips/arc/memory.c18
-rw-r--r--arch/mips/au1000/common/irq.c8
-rw-r--r--arch/mips/au1000/common/pci.c18
-rw-r--r--arch/mips/au1000/common/prom.c3
-rw-r--r--arch/mips/au1000/common/setup.c19
-rw-r--r--arch/mips/au1000/pb1100/board_setup.c93
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c32
-rw-r--r--arch/mips/basler/excite/excite_irq.c6
-rw-r--r--arch/mips/cobalt/irq.c2
-rw-r--r--arch/mips/cobalt/setup.c3
-rw-r--r--arch/mips/ddb5xxx/common/prom.c3
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq.c9
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq_5477.c2
-rw-r--r--arch/mips/dec/ioasic-irq.c4
-rw-r--r--arch/mips/dec/kn02-irq.c2
-rw-r--r--arch/mips/dec/prom/memory.c17
-rw-r--r--arch/mips/dec/setup.c12
-rw-r--r--arch/mips/emma2rh/common/irq_emma2rh.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq_markeins.c4
-rw-r--r--arch/mips/gt64120/ev64120/irq.c2
-rw-r--r--arch/mips/gt64120/ev64120/setup.c3
-rw-r--r--arch/mips/gt64120/momenco_ocelot/dbg_io.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/irq.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/prom.c3
-rw-r--r--arch/mips/gt64120/wrppmc/irq.c2
-rw-r--r--arch/mips/gt64120/wrppmc/setup.c3
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/jmr3927/common/prom.c3
-rw-r--r--arch/mips/jmr3927/rbhma3100/irq.c2
-rw-r--r--arch/mips/jmr3927/rbhma3100/setup.c2
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/gdb-stub.c6
-rw-r--r--arch/mips/kernel/head.S25
-rw-r--r--arch/mips/kernel/i8259.c24
-rw-r--r--arch/mips/kernel/irixelf.c331
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq-mv6434x.c14
-rw-r--r--arch/mips/kernel/irq-rm7000.c13
-rw-r--r--arch/mips/kernel/irq-rm9000.c24
-rw-r--r--arch/mips/kernel/irq_cpu.c21
-rw-r--r--arch/mips/kernel/linux32.c28
-rw-r--r--arch/mips/kernel/mips-mt.c9
-rw-r--r--arch/mips/kernel/proc.c8
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/r4k_fpu.S19
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c47
-rw-r--r--arch/mips/kernel/signal.c6
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/smp-mt.c9
-rw-r--r--arch/mips/kernel/smtc.c54
-rw-r--r--arch/mips/kernel/sysirix.c4
-rw-r--r--arch/mips/kernel/vpe.c36
-rw-r--r--arch/mips/lasat/interrupt.c2
-rw-r--r--arch/mips/lasat/prom.c3
-rw-r--r--arch/mips/lib-32/Makefile2
-rw-r--r--arch/mips/lib-64/Makefile2
-rw-r--r--arch/mips/lib-64/memset.S142
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/memset.S (renamed from arch/mips/lib-32/memset.S)35
-rw-r--r--arch/mips/lib/uncached.c4
-rw-r--r--arch/mips/mips-boards/atlas/atlas_int.c9
-rw-r--r--arch/mips/mips-boards/generic/memory.c18
-rw-r--r--arch/mips/mips-boards/malta/malta_int.c7
-rw-r--r--arch/mips/mips-boards/sead/sead_int.c2
-rw-r--r--arch/mips/mips-boards/sim/sim_int.c6
-rw-r--r--arch/mips/mips-boards/sim/sim_mem.c16
-rw-r--r--arch/mips/mm/init.c50
-rw-r--r--arch/mips/momentum/jaguar_atx/Makefile2
-rw-r--r--arch/mips/momentum/jaguar_atx/irq.c4
-rw-r--r--arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h6
-rw-r--r--arch/mips/momentum/jaguar_atx/platform.c235
-rw-r--r--arch/mips/momentum/jaguar_atx/prom.c58
-rw-r--r--arch/mips/momentum/ocelot_3/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_3/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/cpci-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_c/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/uart-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_g/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_g/irq.c4
-rw-r--r--arch/mips/momentum/ocelot_g/prom.c3
-rw-r--r--arch/mips/oprofile/Kconfig2
-rw-r--r--arch/mips/pci/fixup-vr4133.c16
-rw-r--r--arch/mips/philips/pnx8550/common/int.c2
-rw-r--r--arch/mips/philips/pnx8550/common/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/dbg_io.c2
-rw-r--r--arch/mips/pmc-sierra/yosemite/irq.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c2
-rw-r--r--arch/mips/qemu/q-mem.c3
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c13
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c10
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c3
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c3
-rw-r--r--arch/mips/sibyte/sb1250/irq.c2
-rw-r--r--arch/mips/sibyte/sb1250/prom.c3
-rw-r--r--arch/mips/sni/irq.c2
-rw-r--r--arch/mips/sni/sniprom.c3
-rw-r--r--arch/mips/tx4927/common/tx4927_irq.c4
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c12
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c3
-rw-r--r--arch/mips/tx4938/common/irq.c4
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/irq.c2
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/prom.c3
-rw-r--r--arch/mips/vr41xx/common/icu.c31
-rw-r--r--arch/mips/vr41xx/common/init.c3
-rw-r--r--arch/mips/vr41xx/common/irq.c18
-rw-r--r--arch/mips/vr41xx/nec-cmbvr4133/irq.c53
-rw-r--r--arch/s390/Kconfig29
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/appldata/appldata_net_sum.c2
-rw-r--r--arch/s390/crypto/Kconfig60
-rw-r--r--arch/s390/crypto/Makefile3
-rw-r--r--arch/s390/crypto/aes_s390.c47
-rw-r--r--arch/s390/crypto/crypt_s390.h281
-rw-r--r--arch/s390/crypto/crypt_s390_query.c129
-rw-r--r--arch/s390/crypto/des_check_key.c6
-rw-r--r--arch/s390/crypto/des_s390.c8
-rw-r--r--arch/s390/crypto/prng.c213
-rw-r--r--arch/s390/crypto/sha1_s390.c83
-rw-r--r--arch/s390/crypto/sha256_s390.c11
-rw-r--r--arch/s390/defconfig12
-rw-r--r--arch/s390/hypfs/Makefile2
-rw-r--r--arch/s390/hypfs/hypfs.h9
-rw-r--r--arch/s390/hypfs/hypfs_diag.h16
-rw-r--r--arch/s390/hypfs/hypfs_vm.c231
-rw-r--r--arch/s390/hypfs/inode.c31
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S150
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/compat_exec_domain.c5
-rw-r--r--arch/s390/kernel/compat_linux.c24
-rw-r--r--arch/s390/kernel/compat_linux.h31
-rw-r--r--arch/s390/kernel/compat_signal.c8
-rw-r--r--arch/s390/kernel/cpcmd.c14
-rw-r--r--arch/s390/kernel/crash.c1
-rw-r--r--arch/s390/kernel/debug.c18
-rw-r--r--arch/s390/kernel/early.c306
-rw-r--r--arch/s390/kernel/ebcdic.c1
-rw-r--r--arch/s390/kernel/head31.S194
-rw-r--r--arch/s390/kernel/head64.S193
-rw-r--r--arch/s390/kernel/ipl.c106
-rw-r--r--arch/s390/kernel/irq.c15
-rw-r--r--arch/s390/kernel/kprobes.c32
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/module.c5
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/profile.c20
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reset.S90
-rw-r--r--arch/s390/kernel/s390_ext.c8
-rw-r--r--arch/s390/kernel/setup.c155
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c32
-rw-r--r--arch/s390/kernel/stacktrace.c10
-rw-r--r--arch/s390/kernel/time.c1185
-rw-r--r--arch/s390/kernel/traps.c24
-rw-r--r--arch/s390/kernel/vmlinux.lds.S13
-rw-r--r--arch/s390/kernel/vtime.c10
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c48
-rw-r--r--arch/s390/lib/qrnnd.S77
-rw-r--r--arch/s390/lib/uaccess.h23
-rw-r--r--arch/s390/lib/uaccess_mvcos.c78
-rw-r--r--arch/s390/lib/uaccess_pt.c329
-rw-r--r--arch/s390/lib/uaccess_std.c23
-rw-r--r--arch/s390/math-emu/Makefile2
-rw-r--r--arch/s390/math-emu/math.c2
-rw-r--r--arch/s390/math-emu/qrnnd.S77
-rw-r--r--arch/s390/mm/cmm.c4
-rw-r--r--arch/s390/mm/extmem.c66
-rw-r--r--arch/s390/mm/fault.c93
-rw-r--r--arch/s390/mm/init.c20
-rw-r--r--arch/s390/mm/vmem.c14
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--crypto/Kconfig49
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/hid/Kconfig14
-rw-r--r--drivers/hid/Makefile11
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-debug.c764
-rw-r--r--drivers/hid/hid-input.c35
-rw-r--r--drivers/infiniband/core/addr.c3
-rw-r--r--drivers/infiniband/core/mad.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h29
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c65
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c78
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c395
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/misc/tifm_7xx1.c402
-rw-r--r--drivers/misc/tifm_core.c65
-rw-r--r--drivers/mmc/at91_mci.c3
-rw-r--r--drivers/mmc/au1xmmc.c13
-rw-r--r--drivers/mmc/imxmmc.c4
-rw-r--r--drivers/mmc/mmc.c182
-rw-r--r--drivers/mmc/mmc_block.c15
-rw-r--r--drivers/mmc/mmc_queue.c2
-rw-r--r--drivers/mmc/mmc_sysfs.c2
-rw-r--r--drivers/mmc/mmci.c15
-rw-r--r--drivers/mmc/omap.c6
-rw-r--r--drivers/mmc/pxamci.c10
-rw-r--r--drivers/mmc/sdhci.c91
-rw-r--r--drivers/mmc/sdhci.h2
-rw-r--r--drivers/mmc/tifm_sd.c487
-rw-r--r--drivers/mmc/wbsd.c102
-rw-r--r--drivers/mmc/wbsd.h1
-rw-r--r--drivers/s390/Kconfig8
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_3990_erp.c5
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c95
-rw-r--r--drivers/s390/block/dasd_eer.c24
-rw-r--r--drivers/s390/block/dasd_erp.c80
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c8
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/char/Makefile4
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/defkeymap.c2
-rw-r--r--drivers/s390/char/fs3270.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monwriter.c4
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.c93
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_con.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_info.c57
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape.h22
-rw-r--r--drivers/s390/char/tape_3590.c479
-rw-r--r--drivers/s390/char/tape_3590.h53
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_char.c27
-rw-r--r--drivers/s390/char/tape_core.c69
-rw-r--r--drivers/s390/char/tty3270.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c5
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c270
-rw-r--r--drivers/s390/cio/chsc.h11
-rw-r--r--drivers/s390/cio/cio.c37
-rw-r--r--drivers/s390/cio/cmf.c4
-rw-r--r--drivers/s390/cio/css.c13
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/qdio.c77
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c20
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c8
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c3
-rw-r--r--drivers/s390/net/claw.c16
-rw-r--r--drivers/s390/net/ctcmain.c8
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_eddp.c28
-rw-r--r--drivers/s390/net/qeth_main.c92
-rw-r--r--drivers/s390/net/qeth_sys.c30
-rw-r--r--drivers/s390/s390mach.c37
-rw-r--r--drivers/s390/s390mach.h3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c44
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c38
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/s390/sysinfo.c63
-rw-r--r--drivers/usb/input/Kconfig8
-rw-r--r--drivers/usb/input/Makefile3
-rw-r--r--drivers/usb/input/hid-core.c81
-rw-r--r--drivers/usb/input/hid-ff.c3
-rw-r--r--drivers/usb/input/hid-plff.c129
-rw-r--r--fs/cifs/CHANGES2
-rw-r--r--fs/cifs/cifsfs.c10
-rw-r--r--fs/cifs/file.c12
-rw-r--r--fs/cifs/readdir.c6
-rw-r--r--fs/cifs/smbdes.c10
-rw-r--r--fs/ocfs2/journal.h4
-rw-r--r--include/asm-ia64/dma.h2
-rw-r--r--include/asm-ia64/esi.h1
-rw-r--r--include/asm-ia64/meminit.h3
-rw-r--r--include/asm-ia64/pgalloc.h3
-rw-r--r--include/asm-ia64/swiotlb.h9
-rw-r--r--include/asm-ia64/thread_info.h4
-rw-r--r--include/asm-ia64/unistd.h4
-rw-r--r--include/asm-mips/bootinfo.h4
-rw-r--r--include/asm-mips/ddb5xxx/ddb5477.h41
-rw-r--r--include/asm-mips/dec/interrupts.h3
-rw-r--r--include/asm-mips/dma.h1
-rw-r--r--include/asm-mips/emma2rh/emma2rh.h5
-rw-r--r--include/asm-mips/emma2rh/markeins.h1
-rw-r--r--include/asm-mips/i8259.h3
-rw-r--r--include/asm-mips/io.h4
-rw-r--r--include/asm-mips/irq.h2
-rw-r--r--include/asm-mips/irq_cpu.h6
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h1
-rw-r--r--include/asm-mips/mach-cobalt/cobalt.h4
-rw-r--r--include/asm-mips/mach-emma2rh/irq.h2
-rw-r--r--include/asm-mips/mach-generic/irq.h32
-rw-r--r--include/asm-mips/mach-mips/irq.h2
-rw-r--r--include/asm-mips/mach-vr41xx/irq.h11
-rw-r--r--include/asm-mips/mips-boards/atlasint.h4
-rw-r--r--include/asm-mips/mips-boards/maltaint.h4
-rw-r--r--include/asm-mips/mips-boards/prom.h1
-rw-r--r--include/asm-mips/mips-boards/seadint.h4
-rw-r--r--include/asm-mips/mips-boards/simint.h3
-rw-r--r--include/asm-mips/mipsmtregs.h2
-rw-r--r--include/asm-mips/page.h25
-rw-r--r--include/asm-mips/rtlx.h3
-rw-r--r--include/asm-mips/sections.h2
-rw-r--r--include/asm-mips/sgi/ip22.h13
-rw-r--r--include/asm-mips/smtc_ipi.h3
-rw-r--r--include/asm-mips/uaccess.h3
-rw-r--r--include/asm-mips/vr41xx/cmbvr4133.h5
-rw-r--r--include/asm-s390/compat.h28
-rw-r--r--include/asm-s390/etr.h219
-rw-r--r--include/asm-s390/hardirq.h2
-rw-r--r--include/asm-s390/io.h4
-rw-r--r--include/asm-s390/kdebug.h3
-rw-r--r--include/asm-s390/lowcore.h6
-rw-r--r--include/asm-s390/mmu_context.h50
-rw-r--r--include/asm-s390/pgalloc.h85
-rw-r--r--include/asm-s390/pgtable.h147
-rw-r--r--include/asm-s390/processor.h27
-rw-r--r--include/asm-s390/ptrace.h11
-rw-r--r--include/asm-s390/reset.h3
-rw-r--r--include/asm-s390/sclp.h39
-rw-r--r--include/asm-s390/sections.h2
-rw-r--r--include/asm-s390/setup.h23
-rw-r--r--include/asm-s390/sfp-util.h (renamed from arch/s390/math-emu/sfp-util.h)6
-rw-r--r--include/asm-s390/smp.h6
-rw-r--r--include/asm-s390/system.h4
-rw-r--r--include/asm-s390/tape390.h72
-rw-r--r--include/asm-s390/timer.h3
-rw-r--r--include/asm-s390/timex.h50
-rw-r--r--include/asm-s390/tlbflush.h9
-rw-r--r--include/asm-s390/uaccess.h2
-rw-r--r--include/asm-x86_64/swiotlb.h8
-rw-r--r--include/linux/hid-debug.h749
-rw-r--r--include/linux/hid.h17
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/host.h10
-rw-r--r--include/linux/mmc/mmc.h1
-rw-r--r--include/linux/mmc/protocol.h13
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/tifm.h35
-rw-r--r--include/rdma/ib_user_mad.h2
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--lib/swiotlb.c290
-rw-r--r--scripts/Kbuild.include96
-rw-r--r--scripts/gen_initramfs_list.sh43
-rwxr-xr-xscripts/makelst34
-rw-r--r--security/keys/key.c33
414 files changed, 9323 insertions, 5358 deletions
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 3f9ddbc23b27..0993969609cf 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -480,7 +480,7 @@ r2 argument 0 / return value 0 call-clobbered
480r3 argument 1 / return value 1 (if long long) call-clobbered 480r3 argument 1 / return value 1 (if long long) call-clobbered
481r4 argument 2 call-clobbered 481r4 argument 2 call-clobbered
482r5 argument 3 call-clobbered 482r5 argument 3 call-clobbered
483r6 argument 5 saved 483r6 argument 4 saved
484r7 pointer-to arguments 5 to ... saved 484r7 pointer-to arguments 5 to ... saved
485r8 this & that saved 485r8 this & that saved
486r9 this & that saved 486r9 this & that saved
diff --git a/MAINTAINERS b/MAINTAINERS
index 0ad8803a0c75..16c0e15b5785 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2791,7 +2791,7 @@ M: schwidefsky@de.ibm.com
2791P: Heiko Carstens 2791P: Heiko Carstens
2792M: heiko.carstens@de.ibm.com 2792M: heiko.carstens@de.ibm.com
2793M: linux390@de.ibm.com 2793M: linux390@de.ibm.com
2794L: linux-390@vm.marist.edu 2794L: linux-s390@vger.kernel.org
2795W: http://www.ibm.com/developerworks/linux/linux390/ 2795W: http://www.ibm.com/developerworks/linux/linux390/
2796S: Supported 2796S: Supported
2797 2797
@@ -2799,7 +2799,7 @@ S390 NETWORK DRIVERS
2799P: Frank Pavlic 2799P: Frank Pavlic
2800M: fpavlic@de.ibm.com 2800M: fpavlic@de.ibm.com
2801M: linux390@de.ibm.com 2801M: linux390@de.ibm.com
2802L: linux-390@vm.marist.edu 2802L: linux-s390@vger.kernel.org
2803W: http://www.ibm.com/developerworks/linux/linux390/ 2803W: http://www.ibm.com/developerworks/linux/linux390/
2804S: Supported 2804S: Supported
2805 2805
@@ -2807,7 +2807,7 @@ S390 ZFCP DRIVER
2807P: Swen Schillig 2807P: Swen Schillig
2808M: swen@vnet.ibm.com 2808M: swen@vnet.ibm.com
2809M: linux390@de.ibm.com 2809M: linux390@de.ibm.com
2810L: linux-390@vm.marist.edu 2810L: linux-s390@vger.kernel.org
2811W: http://www.ibm.com/developerworks/linux/linux390/ 2811W: http://www.ibm.com/developerworks/linux/linux390/
2812S: Supported 2812S: Supported
2813 2813
@@ -3647,7 +3647,7 @@ S: Maintained
3647W83L51xD SD/MMC CARD INTERFACE DRIVER 3647W83L51xD SD/MMC CARD INTERFACE DRIVER
3648P: Pierre Ossman 3648P: Pierre Ossman
3649M: drzeus-wbsd@drzeus.cx 3649M: drzeus-wbsd@drzeus.cx
3650L: wbsd-devel@list.drzeus.cx 3650L: linux-kernel@vger.kernel.org
3651W: http://projects.drzeus.cx/wbsd 3651W: http://projects.drzeus.cx/wbsd
3652S: Maintained 3652S: Maintained
3653 3653
diff --git a/Makefile b/Makefile
index 7e2750f4ca70..cdeda68cf2aa 100644
--- a/Makefile
+++ b/Makefile
@@ -776,7 +776,7 @@ $(vmlinux-dirs): prepare scripts
776# $(EXTRAVERSION) eg, -rc6 776# $(EXTRAVERSION) eg, -rc6
777# $(localver-full) 777# $(localver-full)
778# $(localver) 778# $(localver)
779# localversion* (all localversion* files) 779# localversion* (files without backups, containing '~')
780# $(CONFIG_LOCALVERSION) (from kernel config setting) 780# $(CONFIG_LOCALVERSION) (from kernel config setting)
781# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set) 781# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set)
782# ./scripts/setlocalversion (SCM tag, if one exists) 782# ./scripts/setlocalversion (SCM tag, if one exists)
@@ -787,17 +787,12 @@ $(vmlinux-dirs): prepare scripts
787# moment, only git is supported but other SCMs can edit the script 787# moment, only git is supported but other SCMs can edit the script
788# scripts/setlocalversion and add the appropriate checks as needed. 788# scripts/setlocalversion and add the appropriate checks as needed.
789 789
790nullstring := 790pattern = ".*/localversion[^~]*"
791space := $(nullstring) # end of line 791string = $(shell cat /dev/null \
792 `find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort`)
792 793
793___localver = $(objtree)/localversion* $(srctree)/localversion* 794localver = $(subst $(space),, $(string) \
794__localver = $(sort $(wildcard $(___localver))) 795 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
795# skip backup files (containing '~')
796_localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
797
798localver = $(subst $(space),, \
799 $(shell cat /dev/null $(_localver)) \
800 $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
801 796
802# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called 797# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
803# and if the SCM is know a tag from the SCM is appended. 798# and if the SCM is know a tag from the SCM is appended.
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fcacfe291b9b..f1d2899e9a62 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,6 +11,8 @@ menu "Processor type and features"
11 11
12config IA64 12config IA64
13 bool 13 bool
14 select PCI if (!IA64_HP_SIM)
15 select ACPI if (!IA64_HP_SIM)
14 default y 16 default y
15 help 17 help
16 The Itanium Processor Family is Intel's 64-bit successor to 18 The Itanium Processor Family is Intel's 64-bit successor to
@@ -28,7 +30,6 @@ config MMU
28 30
29config SWIOTLB 31config SWIOTLB
30 bool 32 bool
31 default y
32 33
33config RWSEM_XCHGADD_ALGORITHM 34config RWSEM_XCHGADD_ALGORITHM
34 bool 35 bool
@@ -84,10 +85,9 @@ choice
84 85
85config IA64_GENERIC 86config IA64_GENERIC
86 bool "generic" 87 bool "generic"
87 select ACPI
88 select PCI
89 select NUMA 88 select NUMA
90 select ACPI_NUMA 89 select ACPI_NUMA
90 select SWIOTLB
91 help 91 help
92 This selects the system type of your hardware. A "generic" kernel 92 This selects the system type of your hardware. A "generic" kernel
93 will run on any supported IA-64 system. However, if you configure 93 will run on any supported IA-64 system. However, if you configure
@@ -104,6 +104,7 @@ config IA64_GENERIC
104 104
105config IA64_DIG 105config IA64_DIG
106 bool "DIG-compliant" 106 bool "DIG-compliant"
107 select SWIOTLB
107 108
108config IA64_HP_ZX1 109config IA64_HP_ZX1
109 bool "HP-zx1/sx1000" 110 bool "HP-zx1/sx1000"
@@ -113,6 +114,7 @@ config IA64_HP_ZX1
113 114
114config IA64_HP_ZX1_SWIOTLB 115config IA64_HP_ZX1_SWIOTLB
115 bool "HP-zx1/sx1000 with software I/O TLB" 116 bool "HP-zx1/sx1000 with software I/O TLB"
117 select SWIOTLB
116 help 118 help
117 Build a kernel that runs on HP zx1 and sx1000 systems even when they 119 Build a kernel that runs on HP zx1 and sx1000 systems even when they
118 have broken PCI devices which cannot DMA to full 32 bits. Apart 120 have broken PCI devices which cannot DMA to full 32 bits. Apart
@@ -131,6 +133,7 @@ config IA64_SGI_SN2
131 133
132config IA64_HP_SIM 134config IA64_HP_SIM
133 bool "Ski-simulator" 135 bool "Ski-simulator"
136 select SWIOTLB
134 137
135endchoice 138endchoice
136 139
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index a5a5637507be..2153bcacbe6c 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 192EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 193EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 194EXPORT_SYMBOL(hwsw_free_coherent);
195EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
196EXPORT_SYMBOL(hwsw_sync_single_for_device);
197EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
198EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bc2f64d72244..9d92097ce96d 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -79,6 +79,7 @@ crash_save_this_cpu()
79 final_note(buf); 79 final_note(buf);
80} 80}
81 81
82#ifdef CONFIG_SMP
82static int 83static int
83kdump_wait_cpu_freeze(void) 84kdump_wait_cpu_freeze(void)
84{ 85{
@@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
91 } 92 }
92 return 1; 93 return 1;
93} 94}
95#endif
94 96
95void 97void
96machine_crash_shutdown(struct pt_regs *pt) 98machine_crash_shutdown(struct pt_regs *pt)
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
116static void 118static void
117machine_kdump_on_init(void) 119machine_kdump_on_init(void)
118{ 120{
121 if (!ia64_kimage) {
122 printk(KERN_NOTICE "machine_kdump_on_init(): "
123 "kdump not configured\n");
124 return;
125 }
119 local_irq_disable(); 126 local_irq_disable();
120 kexec_disable_iosapic(); 127 kexec_disable_iosapic();
121 machine_kexec(ia64_kimage); 128 machine_kexec(ia64_kimage);
@@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
132 atomic_inc(&kdump_cpu_freezed); 139 atomic_inc(&kdump_cpu_freezed);
133 kdump_status[cpuid] = 1; 140 kdump_status[cpuid] = 1;
134 mb(); 141 mb();
135 if (cpuid == 0) { 142#ifdef CONFIG_HOTPLUG_CPU
136 for (;;) 143 if (cpuid != 0)
137 cpu_relax();
138 } else
139 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]); 144 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
145#endif
146 for (;;)
147 cpu_relax();
140} 148}
141 149
142static int 150static int
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 83b8c91c1408..da60e90eeeb1 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -9,7 +9,8 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12#include <linux/uaccess.h> 12#include <asm/page.h>
13#include <asm/uaccess.h>
13 14
14/** 15/**
15 * copy_oldmem_page - copy one page from "oldmem" 16 * copy_oldmem_page - copy one page from "oldmem"
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0b25a7d4e1e4..6c03928544c2 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -380,7 +380,7 @@ efi_get_pal_addr (void)
380#endif 380#endif
381 return __va(md->phys_addr); 381 return __va(md->phys_addr);
382 } 382 }
383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found", 383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
384 __FUNCTION__); 384 __FUNCTION__);
385 return NULL; 385 return NULL;
386} 386}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 15234ed3a341..e7873eeae448 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1610,5 +1610,7 @@ sys_call_table:
1610 data8 sys_sync_file_range // 1300 1610 data8 sys_sync_file_range // 1300
1611 data8 sys_tee 1611 data8 sys_tee
1612 data8 sys_vmsplice 1612 data8 sys_vmsplice
1613 data8 sys_ni_syscall // reserved for move_pages
1614 data8 sys_getcpu
1613 1615
1614 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1616 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 0fc5fb7865cf..d6aab40c6416 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
925 /* Clear the interrupt controller descriptor */ 925 /* Clear the interrupt controller descriptor */
926 idesc->chip = &no_irq_type; 926 idesc->chip = &no_irq_type;
927 927
928#ifdef CONFIG_SMP
929 /* Clear affinity */
930 cpus_setall(idesc->affinity);
931#endif
932
928 /* Clear the interrupt information */ 933 /* Clear the interrupt information */
929 memset(&iosapic_intr_info[vector], 0, 934 memset(&iosapic_intr_info[vector], 0,
930 sizeof(struct iosapic_intr_info)); 935 sizeof(struct iosapic_intr_info));
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index e2ccc9f660c5..4f0f3b8c1ee2 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
14#include <linux/kexec.h> 14#include <linux/kexec.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/efi.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/delay.h> 20#include <asm/delay.h>
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
68{ 69{
69} 70}
70 71
71void machine_shutdown(void)
72{
73 int cpu;
74
75 for_each_online_cpu(cpu) {
76 if (cpu != smp_processor_id())
77 cpu_down(cpu);
78 }
79 kexec_disable_iosapic();
80}
81
82/* 72/*
83 * Do not allocate memory (or fail in any way) in machine_kexec(). 73 * Do not allocate memory (or fail in any way) in machine_kexec().
84 * We are past the point of no return, committed to rebooting now. 74 * We are past the point of no return, committed to rebooting now.
85 */ 75 */
86extern void *efi_get_pal_addr(void);
87static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) 76static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
88{ 77{
89 struct kimage *image = arg; 78 struct kimage *image = arg;
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
93 unsigned long vector; 82 unsigned long vector;
94 int ii; 83 int ii;
95 84
85 BUG_ON(!image);
96 if (image->type == KEXEC_TYPE_CRASH) { 86 if (image->type == KEXEC_TYPE_CRASH) {
97 crash_save_this_cpu(); 87 crash_save_this_cpu();
98 current->thread.ksp = (__u64)info->sw - 16; 88 current->thread.ksp = (__u64)info->sw - 16;
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
131 121
132void machine_kexec(struct kimage *image) 122void machine_kexec(struct kimage *image)
133{ 123{
124 BUG_ON(!image);
134 unw_init_running(ia64_machine_kexec, image); 125 unw_init_running(ia64_machine_kexec, image);
135 for(;;); 126 for(;;);
136} 127}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 17685abaf496..ae96d4176995 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <asm/ia32.h> 34#include <asm/ia32.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/kdebug.h> 36#include <asm/kdebug.h>
37#include <asm/kexec.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39#include <asm/sal.h> 40#include <asm/sal.h>
@@ -803,6 +804,21 @@ cpu_halt (void)
803 ia64_pal_halt(min_power_state); 804 ia64_pal_halt(min_power_state);
804} 805}
805 806
807void machine_shutdown(void)
808{
809#ifdef CONFIG_HOTPLUG_CPU
810 int cpu;
811
812 for_each_online_cpu(cpu) {
813 if (cpu != smp_processor_id())
814 cpu_down(cpu);
815 }
816#endif
817#ifdef CONFIG_KEXEC
818 kexec_disable_iosapic();
819#endif
820}
821
806void 822void
807machine_restart (char *restart_cmd) 823machine_restart (char *restart_cmd)
808{ 824{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index aa705e46b974..3f8918782e0c 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
607 */ 607 */
608 list_for_each_safe(this, next, &current->children) { 608 list_for_each_safe(this, next, &current->children) {
609 p = list_entry(this, struct task_struct, sibling); 609 p = list_entry(this, struct task_struct, sibling);
610 if (p->mm != mm) 610 if (p->tgid != child->tgid)
611 continue; 611 continue;
612 if (thread_matches(p, addr)) { 612 if (thread_matches(p, addr)) {
613 child = p; 613 child = p;
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1406 1406
1407 /* make sure the single step/taken-branch trap bits are not set: */ 1407 /* make sure the single step/taken-branch trap bits are not set: */
1408 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1408 child_psr->ss = 0; 1409 child_psr->ss = 0;
1409 child_psr->tb = 0; 1410 child_psr->tb = 0;
1410} 1411}
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1525 * Make sure the single step/taken-branch trap bits 1526 * Make sure the single step/taken-branch trap bits
1526 * are not set: 1527 * are not set:
1527 */ 1528 */
1529 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1528 ia64_psr(pt)->ss = 0; 1530 ia64_psr(pt)->ss = 0;
1529 ia64_psr(pt)->tb = 0; 1531 ia64_psr(pt)->tb = 0;
1530 1532
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1556 goto out_tsk; 1558 goto out_tsk;
1557 1559
1558 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 1560 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1561 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1559 if (request == PTRACE_SINGLESTEP) { 1562 if (request == PTRACE_SINGLESTEP) {
1560 ia64_psr(pt)->ss = 1; 1563 ia64_psr(pt)->ss = 1;
1561 } else { 1564 } else {
@@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1595} 1598}
1596 1599
1597 1600
1598void 1601static void
1599syscall_trace (void) 1602syscall_trace (void)
1600{ 1603{
1601 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1602 return;
1603 if (!(current->ptrace & PT_PTRACED))
1604 return;
1605 /* 1604 /*
1606 * The 0x80 provides a way for the tracing parent to 1605 * The 0x80 provides a way for the tracing parent to
1607 * distinguish between a syscall stop and SIGTRAP delivery. 1606 * distinguish between a syscall stop and SIGTRAP delivery.
@@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1664 audit_syscall_exit(success, result); 1663 audit_syscall_exit(success, result);
1665 } 1664 }
1666 1665
1667 if (test_thread_flag(TIF_SYSCALL_TRACE) 1666 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1667 || test_thread_flag(TIF_SINGLESTEP))
1668 && (current->ptrace & PT_PTRACED)) 1668 && (current->ptrace & PT_PTRACED))
1669 syscall_trace(); 1669 syscall_trace();
1670} 1670}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad567b8d432e..83c2629e1c4c 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
569 { 1UL << 1, "spontaneous deferral"}, 569 { 1UL << 1, "spontaneous deferral"},
570 { 1UL << 2, "16-byte atomic ops" } 570 { 1UL << 2, "16-byte atomic ops" }
571 }; 571 };
572 char features[128], *cp, sep; 572 char features[128], *cp, *sep;
573 struct cpuinfo_ia64 *c = v; 573 struct cpuinfo_ia64 *c = v;
574 unsigned long mask; 574 unsigned long mask;
575 unsigned long proc_freq; 575 unsigned long proc_freq;
576 int i; 576 int i, size;
577 577
578 mask = c->features; 578 mask = c->features;
579 579
580 /* build the feature string: */ 580 /* build the feature string: */
581 memcpy(features, " standard", 10); 581 memcpy(features, "standard", 9);
582 cp = features; 582 cp = features;
583 sep = 0; 583 size = sizeof(features);
584 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 584 sep = "";
585 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
585 if (mask & feature_bits[i].mask) { 586 if (mask & feature_bits[i].mask) {
586 if (sep) 587 cp += snprintf(cp, size, "%s%s", sep,
587 *cp++ = sep; 588 feature_bits[i].feature_name),
588 sep = ','; 589 sep = ", ";
589 *cp++ = ' ';
590 strcpy(cp, feature_bits[i].feature_name);
591 cp += strlen(feature_bits[i].feature_name);
592 mask &= ~feature_bits[i].mask; 590 mask &= ~feature_bits[i].mask;
591 size = sizeof(features) - (cp - features);
593 } 592 }
594 } 593 }
595 if (mask) { 594 if (mask && size > 1) {
596 /* print unknown features as a hex value: */ 595 /* print unknown features as a hex value */
597 if (sep) 596 snprintf(cp, size, "%s0x%lx", sep, mask);
598 *cp++ = sep;
599 sprintf(cp, " 0x%lx", mask);
600 } 597 }
601 598
602 proc_freq = cpufreq_quick_get(cpunum); 599 proc_freq = cpufreq_quick_get(cpunum);
@@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
612 "model name : %s\n" 609 "model name : %s\n"
613 "revision : %u\n" 610 "revision : %u\n"
614 "archrev : %u\n" 611 "archrev : %u\n"
615 "features :%s\n" /* don't change this---it _is_ right! */ 612 "features : %s\n"
616 "cpu number : %lu\n" 613 "cpu number : %lu\n"
617 "cpu regs : %u\n" 614 "cpu regs : %u\n"
618 "cpu MHz : %lu.%06lu\n" 615 "cpu MHz : %lu.%06lu\n"
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d6083a0936f4..8f3d0066f446 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -157,6 +157,7 @@ SECTIONS
157 } 157 }
158#endif 158#endif
159 159
160 . = ALIGN(8);
160 __con_initcall_start = .; 161 __con_initcall_start = .;
161 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) 162 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
162 { *(.con_initcall.init) } 163 { *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b9..63e6d49c5813 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,47 +30,69 @@ static unsigned long max_gap;
30#endif 30#endif
31 31
32/** 32/**
33 * show_mem - display a memory statistics summary 33 * show_mem - give short summary of memory stats
34 * 34 *
35 * Just walks the pages in the system and describes where they're allocated. 35 * Shows a simple page count of reserved and used pages in the system.
36 * For discontig machines, it does this on a per-pgdat basis.
36 */ 37 */
37void 38void show_mem(void)
38show_mem (void)
39{ 39{
40 int i, total = 0, reserved = 0; 40 int i, total_reserved = 0;
41 int shared = 0, cached = 0; 41 int total_shared = 0, total_cached = 0;
42 unsigned long total_present = 0;
43 pg_data_t *pgdat;
42 44
43 printk(KERN_INFO "Mem-info:\n"); 45 printk(KERN_INFO "Mem-info:\n");
44 show_free_areas(); 46 show_free_areas();
45
46 printk(KERN_INFO "Free swap: %6ldkB\n", 47 printk(KERN_INFO "Free swap: %6ldkB\n",
47 nr_swap_pages<<(PAGE_SHIFT-10)); 48 nr_swap_pages<<(PAGE_SHIFT-10));
48 i = max_mapnr; 49 printk(KERN_INFO "Node memory in pages:\n");
49 for (i = 0; i < max_mapnr; i++) { 50 for_each_online_pgdat(pgdat) {
50 if (!pfn_valid(i)) { 51 unsigned long present;
52 unsigned long flags;
53 int shared = 0, cached = 0, reserved = 0;
54
55 pgdat_resize_lock(pgdat, &flags);
56 present = pgdat->node_present_pages;
57 for(i = 0; i < pgdat->node_spanned_pages; i++) {
58 struct page *page;
59 if (pfn_valid(pgdat->node_start_pfn + i))
60 page = pfn_to_page(pgdat->node_start_pfn + i);
61 else {
51#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP) 63 if (max_gap < LARGE_GAP)
53 continue; 64 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif 65#endif
56 continue; 66 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
67 i) - 1;
68 continue;
69 }
70 if (PageReserved(page))
71 reserved++;
72 else if (PageSwapCache(page))
73 cached++;
74 else if (page_count(page))
75 shared += page_count(page)-1;
57 } 76 }
58 total++; 77 pgdat_resize_unlock(pgdat, &flags);
59 if (PageReserved(mem_map+i)) 78 total_present += present;
60 reserved++; 79 total_reserved += reserved;
61 else if (PageSwapCache(mem_map+i)) 80 total_cached += cached;
62 cached++; 81 total_shared += shared;
63 else if (page_count(mem_map + i)) 82 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
64 shared += page_count(mem_map + i) - 1; 83 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
84 present, reserved, shared, cached);
65 } 85 }
66 printk(KERN_INFO "%d pages of RAM\n", total); 86 printk(KERN_INFO "%ld pages of RAM\n", total_present);
67 printk(KERN_INFO "%d reserved pages\n", reserved); 87 printk(KERN_INFO "%d reserved pages\n", total_reserved);
68 printk(KERN_INFO "%d pages shared\n", shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
69 printk(KERN_INFO "%d pages swap cached\n", cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
70 printk(KERN_INFO "%ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 pgtable_quicklist_total_size()); 91 pgtable_quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
72} 93}
73 94
95
74/* physical address where the bootmem map is located */ 96/* physical address where the bootmem map is located */
75unsigned long bootmap_start; 97unsigned long bootmap_start;
76 98
@@ -177,7 +199,7 @@ find_memory (void)
177 199
178#ifdef CONFIG_CRASH_DUMP 200#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem 201 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */ 202 * size before original memory map is reset. */
181 saved_max_pfn = max_pfn; 203 saved_max_pfn = max_pfn;
182#endif 204#endif
183} 205}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49d..6eae596c509d 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
412 return; 412 return;
413} 413}
414 414
415#ifdef CONFIG_SPARSEMEM
416/**
417 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
418 * @start: physical start of range
419 * @end: physical end of range
420 * @arg: unused
421 *
422 * Simply calls SPARSEMEM to register memory section(s).
423 */
424static int __init register_sparse_mem(unsigned long start, unsigned long end,
425 void *arg)
426{
427 int nid;
428
429 start = __pa(start) >> PAGE_SHIFT;
430 end = __pa(end) >> PAGE_SHIFT;
431 nid = early_pfn_to_nid(start);
432 memory_present(nid, start, end);
433
434 return 0;
435}
436
437static void __init arch_sparse_init(void)
438{
439 efi_memmap_walk(register_sparse_mem, NULL);
440 sparse_init();
441}
442#else
443#define arch_sparse_init() do {} while (0)
444#endif
445
446/** 415/**
447 * find_memory - walk the EFI memory map and setup the bootmem allocator 416 * find_memory - walk the EFI memory map and setup the bootmem allocator
448 * 417 *
@@ -473,6 +442,9 @@ void __init find_memory(void)
473 node_clear(node, memory_less_mask); 442 node_clear(node, memory_less_mask);
474 mem_data[node].min_pfn = ~0UL; 443 mem_data[node].min_pfn = ~0UL;
475 } 444 }
445
446 efi_memmap_walk(register_active_ranges, NULL);
447
476 /* 448 /*
477 * Initialize the boot memory maps in reverse order since that's 449 * Initialize the boot memory maps in reverse order since that's
478 * what the bootmem allocator expects 450 * what the bootmem allocator expects
@@ -506,6 +478,12 @@ void __init find_memory(void)
506 max_pfn = max_low_pfn; 478 max_pfn = max_low_pfn;
507 479
508 find_initrd(); 480 find_initrd();
481
482#ifdef CONFIG_CRASH_DUMP
483 /* If we are doing a crash dump, we still need to know the real mem
484 * size before original memory map is reset. */
485 saved_max_pfn = max_pfn;
486#endif
509} 487}
510 488
511#ifdef CONFIG_SMP 489#ifdef CONFIG_SMP
@@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
654{ 632{
655 unsigned long end = start + len; 633 unsigned long end = start + len;
656 634
657 add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
658 mem_data[node].num_physpages += len >> PAGE_SHIFT; 635 mem_data[node].num_physpages += len >> PAGE_SHIFT;
659 if (start <= __pa(MAX_DMA_ADDRESS)) 636 if (start <= __pa(MAX_DMA_ADDRESS))
660 mem_data[node].num_dma_physpages += 637 mem_data[node].num_dma_physpages +=
@@ -686,10 +663,11 @@ void __init paging_init(void)
686 663
687 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 664 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
688 665
689 arch_sparse_init();
690
691 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 666 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
692 667
668 sparse_memory_present_with_active_regions(MAX_NUMNODES);
669 sparse_init();
670
693#ifdef CONFIG_VIRTUAL_MEM_MAP 671#ifdef CONFIG_VIRTUAL_MEM_MAP
694 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 672 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
695 sizeof(struct page)); 673 sizeof(struct page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657f..faaca21a3718 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h>
22 23
23#include <asm/a.out.h> 24#include <asm/a.out.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129} 130}
130 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
131inline void 151inline void
132ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
133{ 153{
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
595 return 0; 615 return 0;
596} 616}
597 617
618#endif /* CONFIG_VIRTUAL_MEM_MAP */
619
598int __init 620int __init
599register_active_ranges(u64 start, u64 end, void *arg) 621register_active_ranges(u64 start, u64 end, void *arg)
600{ 622{
601 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 623 int nid = paddr_to_nid(__pa(start));
624
625 if (nid < 0)
626 nid = 0;
627#ifdef CONFIG_KEXEC
628 if (start > crashk_res.start && start < crashk_res.end)
629 start = crashk_res.end;
630 if (end > crashk_res.start && end < crashk_res.end)
631 end = crashk_res.start;
632#endif
633
634 if (start < end)
635 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
636 __pa(end) >> PAGE_SHIFT);
602 return 0; 637 return 0;
603} 638}
604#endif /* CONFIG_VIRTUAL_MEM_MAP */
605 639
606static int __init 640static int __init
607count_reserved_pages (u64 start, u64 end, void *arg) 641count_reserved_pages (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index abca6bd7962f..fcf7f93c4b61 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,12 +38,20 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
38 (u64) nasid, 0, 0, 0, 0, 0, 0); 38 (u64) nasid, 0, 0, 0, 0, 0, 0);
39 39
40 if ((int)ret_stuff.v0) 40 if ((int)ret_stuff.v0)
41 panic("hubii_eint_handler(): Fatal TIO Error"); 41 panic("%s: Fatal %s Error", __FUNCTION__,
42 ((nasid & 1) ? "TIO" : "HUBII"));
42 43
43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 44 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
44 (void)hubiio_crb_error_handler(hubdev_info); 45 (void)hubiio_crb_error_handler(hubdev_info);
45 } else 46 } else
46 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 47 if (nasid & 1) { /* TIO errors */
48 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
49 (u64) nasid, 0, 0, 0, 0, 0, 0);
50
51 if ((int)ret_stuff.v0)
52 panic("%s: Fatal TIO Error", __FUNCTION__);
53 } else
54 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
47 55
48 return IRQ_HANDLED; 56 return IRQ_HANDLED;
49} 57}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bbd386f572d9..44a0224c32dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -575,6 +575,7 @@ config SGI_IP27
575 select DMA_IP27 575 select DMA_IP27
576 select EARLY_PRINTK 576 select EARLY_PRINTK
577 select HW_HAS_PCI 577 select HW_HAS_PCI
578 select NR_CPUS_DEFAULT_64
578 select PCI_DOMAINS 579 select PCI_DOMAINS
579 select SYS_HAS_CPU_R10000 580 select SYS_HAS_CPU_R10000
580 select SYS_SUPPORTS_64BIT_KERNEL 581 select SYS_SUPPORTS_64BIT_KERNEL
@@ -612,6 +613,7 @@ config SIBYTE_BIGSUR
612 bool "Sibyte BCM91480B-BigSur" 613 bool "Sibyte BCM91480B-BigSur"
613 select BOOT_ELF32 614 select BOOT_ELF32
614 select DMA_COHERENT 615 select DMA_COHERENT
616 select NR_CPUS_DEFAULT_4
615 select PCI_DOMAINS 617 select PCI_DOMAINS
616 select SIBYTE_BCM1x80 618 select SIBYTE_BCM1x80
617 select SWAP_IO_SPACE 619 select SWAP_IO_SPACE
@@ -623,6 +625,7 @@ config SIBYTE_SWARM
623 bool "Sibyte BCM91250A-SWARM" 625 bool "Sibyte BCM91250A-SWARM"
624 select BOOT_ELF32 626 select BOOT_ELF32
625 select DMA_COHERENT 627 select DMA_COHERENT
628 select NR_CPUS_DEFAULT_2
626 select SIBYTE_SB1250 629 select SIBYTE_SB1250
627 select SWAP_IO_SPACE 630 select SWAP_IO_SPACE
628 select SYS_HAS_CPU_SB1 631 select SYS_HAS_CPU_SB1
@@ -635,6 +638,7 @@ config SIBYTE_SENTOSA
635 depends on EXPERIMENTAL 638 depends on EXPERIMENTAL
636 select BOOT_ELF32 639 select BOOT_ELF32
637 select DMA_COHERENT 640 select DMA_COHERENT
641 select NR_CPUS_DEFAULT_2
638 select SIBYTE_SB1250 642 select SIBYTE_SB1250
639 select SWAP_IO_SPACE 643 select SWAP_IO_SPACE
640 select SYS_HAS_CPU_SB1 644 select SYS_HAS_CPU_SB1
@@ -668,6 +672,7 @@ config SIBYTE_PTSWARM
668 depends on EXPERIMENTAL 672 depends on EXPERIMENTAL
669 select BOOT_ELF32 673 select BOOT_ELF32
670 select DMA_COHERENT 674 select DMA_COHERENT
675 select NR_CPUS_DEFAULT_2
671 select SIBYTE_SB1250 676 select SIBYTE_SB1250
672 select SWAP_IO_SPACE 677 select SWAP_IO_SPACE
673 select SYS_HAS_CPU_SB1 678 select SYS_HAS_CPU_SB1
@@ -680,6 +685,7 @@ config SIBYTE_LITTLESUR
680 depends on EXPERIMENTAL 685 depends on EXPERIMENTAL
681 select BOOT_ELF32 686 select BOOT_ELF32
682 select DMA_COHERENT 687 select DMA_COHERENT
688 select NR_CPUS_DEFAULT_2
683 select SIBYTE_SB1250 689 select SIBYTE_SB1250
684 select SWAP_IO_SPACE 690 select SWAP_IO_SPACE
685 select SYS_HAS_CPU_SB1 691 select SYS_HAS_CPU_SB1
@@ -790,23 +796,6 @@ config TOSHIBA_RBTX4938
790 796
791endchoice 797endchoice
792 798
793config KEXEC
794 bool "Kexec system call (EXPERIMENTAL)"
795 depends on EXPERIMENTAL
796 help
797 kexec is a system call that implements the ability to shutdown your
798 current kernel, and to start another kernel. It is like a reboot
799 but it is indepedent of the system firmware. And like a reboot
800 you can start any kernel with it, not just Linux.
801
802 The name comes from the similiarity to the exec system call.
803
804 It is an ongoing process to be certain the hardware in a machine
805 is properly shutdown, so do not be surprised if this code does not
806 initially work for you. It may help to enable device hotplugging
807 support. As of this writing the exact hardware interface is
808 strongly in flux, so no good recommendation can be made.
809
810source "arch/mips/ddb5xxx/Kconfig" 799source "arch/mips/ddb5xxx/Kconfig"
811source "arch/mips/gt64120/ev64120/Kconfig" 800source "arch/mips/gt64120/ev64120/Kconfig"
812source "arch/mips/jazz/Kconfig" 801source "arch/mips/jazz/Kconfig"
@@ -1541,6 +1530,8 @@ config MIPS_MT_SMTC
1541 select CPU_MIPSR2_IRQ_VI 1530 select CPU_MIPSR2_IRQ_VI
1542 select CPU_MIPSR2_SRS 1531 select CPU_MIPSR2_SRS
1543 select MIPS_MT 1532 select MIPS_MT
1533 select NR_CPUS_DEFAULT_2
1534 select NR_CPUS_DEFAULT_8
1544 select SMP 1535 select SMP
1545 select SYS_SUPPORTS_SMP 1536 select SYS_SUPPORTS_SMP
1546 help 1537 help
@@ -1756,13 +1747,34 @@ config SMP
1756config SYS_SUPPORTS_SMP 1747config SYS_SUPPORTS_SMP
1757 bool 1748 bool
1758 1749
1750config NR_CPUS_DEFAULT_2
1751 bool
1752
1753config NR_CPUS_DEFAULT_4
1754 bool
1755
1756config NR_CPUS_DEFAULT_8
1757 bool
1758
1759config NR_CPUS_DEFAULT_16
1760 bool
1761
1762config NR_CPUS_DEFAULT_32
1763 bool
1764
1765config NR_CPUS_DEFAULT_64
1766 bool
1767
1759config NR_CPUS 1768config NR_CPUS
1760 int "Maximum number of CPUs (2-64)" 1769 int "Maximum number of CPUs (2-64)"
1761 range 2 64 1770 range 2 64
1762 depends on SMP 1771 depends on SMP
1763 default "64" if SGI_IP27 1772 default "2" if NR_CPUS_DEFAULT_2
1764 default "2" 1773 default "4" if NR_CPUS_DEFAULT_4
1765 default "8" if MIPS_MT_SMTC 1774 default "8" if NR_CPUS_DEFAULT_8
1775 default "16" if NR_CPUS_DEFAULT_16
1776 default "32" if NR_CPUS_DEFAULT_32
1777 default "64" if NR_CPUS_DEFAULT_64
1766 help 1778 help
1767 This allows you to specify the maximum number of CPUs which this 1779 This allows you to specify the maximum number of CPUs which this
1768 kernel will support. The maximum supported value is 32 for 32-bit 1780 kernel will support. The maximum supported value is 32 for 32-bit
@@ -1859,6 +1871,40 @@ config MIPS_INSANE_LARGE
1859 This will result in additional memory usage, so it is not 1871 This will result in additional memory usage, so it is not
1860 recommended for normal users. 1872 recommended for normal users.
1861 1873
1874config KEXEC
1875 bool "Kexec system call (EXPERIMENTAL)"
1876 depends on EXPERIMENTAL
1877 help
1878 kexec is a system call that implements the ability to shutdown your
1879 current kernel, and to start another kernel. It is like a reboot
1880 but it is indepedent of the system firmware. And like a reboot
1881 you can start any kernel with it, not just Linux.
1882
1883 The name comes from the similiarity to the exec system call.
1884
1885 It is an ongoing process to be certain the hardware in a machine
1886 is properly shutdown, so do not be surprised if this code does not
1887 initially work for you. It may help to enable device hotplugging
1888 support. As of this writing the exact hardware interface is
1889 strongly in flux, so no good recommendation can be made.
1890
1891config SECCOMP
1892 bool "Enable seccomp to safely compute untrusted bytecode"
1893 depends on PROC_FS && BROKEN
1894 default y
1895 help
1896 This kernel feature is useful for number crunching applications
1897 that may need to compute untrusted bytecode during their
1898 execution. By using pipes or other transports made available to
1899 the process as file descriptors supporting the read/write
1900 syscalls, it's possible to isolate those applications in
1901 their own address space using seccomp. Once seccomp is
1902 enabled via /proc/<pid>/seccomp, it cannot be disabled
1903 and the task is only allowed to execute a few safe syscalls
1904 defined by each seccomp mode.
1905
1906 If unsure, say Y. Only embedded should say N here.
1907
1862endmenu 1908endmenu
1863 1909
1864config RWSEM_GENERIC_SPINLOCK 1910config RWSEM_GENERIC_SPINLOCK
@@ -2025,23 +2071,6 @@ config BINFMT_ELF32
2025 bool 2071 bool
2026 default y if MIPS32_O32 || MIPS32_N32 2072 default y if MIPS32_O32 || MIPS32_N32
2027 2073
2028config SECCOMP
2029 bool "Enable seccomp to safely compute untrusted bytecode"
2030 depends on PROC_FS && BROKEN
2031 default y
2032 help
2033 This kernel feature is useful for number crunching applications
2034 that may need to compute untrusted bytecode during their
2035 execution. By using pipes or other transports made available to
2036 the process as file descriptors supporting the read/write
2037 syscalls, it's possible to isolate those applications in
2038 their own address space using seccomp. Once seccomp is
2039 enabled via /proc/<pid>/seccomp, it cannot be disabled
2040 and the task is only allowed to execute a few safe syscalls
2041 defined by each seccomp mode.
2042
2043 If unsure, say Y. Only embedded should say N here.
2044
2045config PM 2074config PM
2046 bool "Power Management support (EXPERIMENTAL)" 2075 bool "Power Management support (EXPERIMENTAL)"
2047 depends on EXPERIMENTAL && SOC_AU1X00 2076 depends on EXPERIMENTAL && SOC_AU1X00
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 5d6afb52d904..9351f1c04a9d 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -22,10 +22,10 @@ config CMDLINE
22 string "Default kernel command string" 22 string "Default kernel command string"
23 default "" 23 default ""
24 help 24 help
25 On some platforms, there is currently no way for the boot loader to 25 On some platforms, there is currently no way for the boot loader to
26 pass arguments to the kernel. For these platforms, you can supply 26 pass arguments to the kernel. For these platforms, you can supply
27 some command-line options at build time by entering them here. In 27 some command-line options at build time by entering them here. In
28 other cases you can specify kernel args so that you don't have 28 other cases you can specify kernel args so that you don't have
29 to set them up in board prom initialization routines. 29 to set them up in board prom initialization routines.
30 30
31config DEBUG_STACK_USAGE 31config DEBUG_STACK_USAGE
diff --git a/arch/mips/arc/identify.c b/arch/mips/arc/identify.c
index 3ba7c47f9f23..4b907369b0f9 100644
--- a/arch/mips/arc/identify.c
+++ b/arch/mips/arc/identify.c
@@ -77,7 +77,7 @@ static struct smatch * __init string_to_mach(const char *s)
77{ 77{
78 int i; 78 int i;
79 79
80 for (i = 0; i < (sizeof(mach_table) / sizeof (mach_table[0])); i++) { 80 for (i = 0; i < ARRAY_SIZE(mach_table); i++) {
81 if (!strcmp(s, mach_table[i].arcname)) 81 if (!strcmp(s, mach_table[i].arcname))
82 return &mach_table[i]; 82 return &mach_table[i];
83 } 83 }
diff --git a/arch/mips/arc/memory.c b/arch/mips/arc/memory.c
index 8a9ef58cc399..456cb81a32d9 100644
--- a/arch/mips/arc/memory.c
+++ b/arch/mips/arc/memory.c
@@ -141,30 +141,20 @@ void __init prom_meminit(void)
141 } 141 }
142} 142}
143 143
144unsigned long __init prom_free_prom_memory(void) 144void __init prom_free_prom_memory(void)
145{ 145{
146 unsigned long freed = 0;
147 unsigned long addr; 146 unsigned long addr;
148 int i; 147 int i;
149 148
150 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP) 149 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
151 return 0; 150 return;
152 151
153 for (i = 0; i < boot_mem_map.nr_map; i++) { 152 for (i = 0; i < boot_mem_map.nr_map; i++) {
154 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 153 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
155 continue; 154 continue;
156 155
157 addr = boot_mem_map.map[i].addr; 156 addr = boot_mem_map.map[i].addr;
158 while (addr < boot_mem_map.map[i].addr 157 free_init_pages("prom memory",
159 + boot_mem_map.map[i].size) { 158 addr, addr + boot_mem_map.map[i].size);
160 ClearPageReserved(virt_to_page(__va(addr)));
161 init_page_count(virt_to_page(__va(addr)));
162 free_page((unsigned long)__va(addr));
163 addr += PAGE_SIZE;
164 freed += PAGE_SIZE;
165 }
166 } 159 }
167 printk(KERN_INFO "Freeing prom memory: %ldkb freed\n", freed >> 10);
168
169 return freed;
170} 160}
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index 9cf7b6715836..ea6e99fbe2f7 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -233,7 +233,7 @@ void restore_local_and_enable(int controller, unsigned long mask)
233 233
234 234
235static struct irq_chip rise_edge_irq_type = { 235static struct irq_chip rise_edge_irq_type = {
236 .typename = "Au1000 Rise Edge", 236 .name = "Au1000 Rise Edge",
237 .ack = mask_and_ack_rise_edge_irq, 237 .ack = mask_and_ack_rise_edge_irq,
238 .mask = local_disable_irq, 238 .mask = local_disable_irq,
239 .mask_ack = mask_and_ack_rise_edge_irq, 239 .mask_ack = mask_and_ack_rise_edge_irq,
@@ -242,7 +242,7 @@ static struct irq_chip rise_edge_irq_type = {
242}; 242};
243 243
244static struct irq_chip fall_edge_irq_type = { 244static struct irq_chip fall_edge_irq_type = {
245 .typename = "Au1000 Fall Edge", 245 .name = "Au1000 Fall Edge",
246 .ack = mask_and_ack_fall_edge_irq, 246 .ack = mask_and_ack_fall_edge_irq,
247 .mask = local_disable_irq, 247 .mask = local_disable_irq,
248 .mask_ack = mask_and_ack_fall_edge_irq, 248 .mask_ack = mask_and_ack_fall_edge_irq,
@@ -251,7 +251,7 @@ static struct irq_chip fall_edge_irq_type = {
251}; 251};
252 252
253static struct irq_chip either_edge_irq_type = { 253static struct irq_chip either_edge_irq_type = {
254 .typename = "Au1000 Rise or Fall Edge", 254 .name = "Au1000 Rise or Fall Edge",
255 .ack = mask_and_ack_either_edge_irq, 255 .ack = mask_and_ack_either_edge_irq,
256 .mask = local_disable_irq, 256 .mask = local_disable_irq,
257 .mask_ack = mask_and_ack_either_edge_irq, 257 .mask_ack = mask_and_ack_either_edge_irq,
@@ -260,7 +260,7 @@ static struct irq_chip either_edge_irq_type = {
260}; 260};
261 261
262static struct irq_chip level_irq_type = { 262static struct irq_chip level_irq_type = {
263 .typename = "Au1000 Level", 263 .name = "Au1000 Level",
264 .ack = mask_and_ack_level_irq, 264 .ack = mask_and_ack_level_irq,
265 .mask = local_disable_irq, 265 .mask = local_disable_irq,
266 .mask_ack = mask_and_ack_level_irq, 266 .mask_ack = mask_and_ack_level_irq,
diff --git a/arch/mips/au1000/common/pci.c b/arch/mips/au1000/common/pci.c
index 9f8ce08e173b..6c25e6c09f78 100644
--- a/arch/mips/au1000/common/pci.c
+++ b/arch/mips/au1000/common/pci.c
@@ -76,13 +76,17 @@ static int __init au1x_pci_setup(void)
76 } 76 }
77 77
78#ifdef CONFIG_DMA_NONCOHERENT 78#ifdef CONFIG_DMA_NONCOHERENT
79 /* 79 {
80 * Set the NC bit in controller for Au1500 pre-AC silicon 80 /*
81 */ 81 * Set the NC bit in controller for Au1500 pre-AC silicon
82 u32 prid = read_c0_prid(); 82 */
83 if ( (prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) { 83 u32 prid = read_c0_prid();
84 au_writel( 1<<16 | au_readl(Au1500_PCI_CFG), Au1500_PCI_CFG); 84
85 printk("Non-coherent PCI accesses enabled\n"); 85 if ((prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
86 au_writel((1 << 16) | au_readl(Au1500_PCI_CFG),
87 Au1500_PCI_CFG);
88 printk("Non-coherent PCI accesses enabled\n");
89 }
86 } 90 }
87#endif 91#endif
88 92
diff --git a/arch/mips/au1000/common/prom.c b/arch/mips/au1000/common/prom.c
index 6fce60af005d..a8637cdb5b4b 100644
--- a/arch/mips/au1000/common/prom.c
+++ b/arch/mips/au1000/common/prom.c
@@ -149,9 +149,8 @@ int get_ethernet_addr(char *ethernet_addr)
149 return 0; 149 return 0;
150} 150}
151 151
152unsigned long __init prom_free_prom_memory(void) 152void __init prom_free_prom_memory(void)
153{ 153{
154 return 0;
155} 154}
156 155
157EXPORT_SYMBOL(prom_getcmdline); 156EXPORT_SYMBOL(prom_getcmdline);
diff --git a/arch/mips/au1000/common/setup.c b/arch/mips/au1000/common/setup.c
index 919172db560c..13fe187f35d6 100644
--- a/arch/mips/au1000/common/setup.c
+++ b/arch/mips/au1000/common/setup.c
@@ -141,17 +141,20 @@ void __init plat_mem_setup(void)
141/* This routine should be valid for all Au1x based boards */ 141/* This routine should be valid for all Au1x based boards */
142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) 142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
143{ 143{
144 u32 start, end;
145
146 /* Don't fixup 36 bit addresses */ 144 /* Don't fixup 36 bit addresses */
147 if ((phys_addr >> 32) != 0) return phys_addr; 145 if ((phys_addr >> 32) != 0)
146 return phys_addr;
148 147
149#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
150 start = (u32)Au1500_PCI_MEM_START; 149 {
151 end = (u32)Au1500_PCI_MEM_END; 150 u32 start, end;
152 /* check for pci memory window */ 151
153 if ((phys_addr >= start) && ((phys_addr + size) < end)) { 152 start = (u32)Au1500_PCI_MEM_START;
154 return (phys_t)((phys_addr - start) + Au1500_PCI_MEM_START); 153 end = (u32)Au1500_PCI_MEM_END;
154 /* check for pci memory window */
155 if ((phys_addr >= start) && ((phys_addr + size) < end))
156 return (phys_t)
157 ((phys_addr - start) + Au1500_PCI_MEM_START);
155 } 158 }
156#endif 159#endif
157 160
diff --git a/arch/mips/au1000/pb1100/board_setup.c b/arch/mips/au1000/pb1100/board_setup.c
index 2d1533f116c0..6bc1f8e1b608 100644
--- a/arch/mips/au1000/pb1100/board_setup.c
+++ b/arch/mips/au1000/pb1100/board_setup.c
@@ -47,8 +47,7 @@ void board_reset (void)
47 47
48void __init board_setup(void) 48void __init board_setup(void)
49{ 49{
50 u32 pin_func; 50 volatile void __iomem * base = (volatile void __iomem *) 0xac000000UL;
51 u32 sys_freqctrl, sys_clksrc;
52 51
53 // set AUX clock to 12MHz * 8 = 96 MHz 52 // set AUX clock to 12MHz * 8 = 96 MHz
54 au_writel(8, SYS_AUXPLL); 53 au_writel(8, SYS_AUXPLL);
@@ -56,58 +55,62 @@ void __init board_setup(void)
56 udelay(100); 55 udelay(100);
57 56
58#ifdef CONFIG_USB_OHCI 57#ifdef CONFIG_USB_OHCI
59 // configure pins GPIO[14:9] as GPIO 58 {
60 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80); 59 u32 pin_func, sys_freqctrl, sys_clksrc;
61 60
62 /* zero and disable FREQ2 */ 61 // configure pins GPIO[14:9] as GPIO
63 sys_freqctrl = au_readl(SYS_FREQCTRL0); 62 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80);
64 sys_freqctrl &= ~0xFFF00000; 63
65 au_writel(sys_freqctrl, SYS_FREQCTRL0); 64 /* zero and disable FREQ2 */
66 65 sys_freqctrl = au_readl(SYS_FREQCTRL0);
67 /* zero and disable USBH/USBD/IrDA clock */ 66 sys_freqctrl &= ~0xFFF00000;
68 sys_clksrc = au_readl(SYS_CLKSRC); 67 au_writel(sys_freqctrl, SYS_FREQCTRL0);
69 sys_clksrc &= ~0x0000001F; 68
70 au_writel(sys_clksrc, SYS_CLKSRC); 69 /* zero and disable USBH/USBD/IrDA clock */
71 70 sys_clksrc = au_readl(SYS_CLKSRC);
72 sys_freqctrl = au_readl(SYS_FREQCTRL0); 71 sys_clksrc &= ~0x0000001F;
73 sys_freqctrl &= ~0xFFF00000; 72 au_writel(sys_clksrc, SYS_CLKSRC);
74 73
75 sys_clksrc = au_readl(SYS_CLKSRC); 74 sys_freqctrl = au_readl(SYS_FREQCTRL0);
76 sys_clksrc &= ~0x0000001F; 75 sys_freqctrl &= ~0xFFF00000;
77 76
78 // FREQ2 = aux/2 = 48 MHz 77 sys_clksrc = au_readl(SYS_CLKSRC);
79 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20)); 78 sys_clksrc &= ~0x0000001F;
80 au_writel(sys_freqctrl, SYS_FREQCTRL0); 79
81 80 // FREQ2 = aux/2 = 48 MHz
82 /* 81 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20));
83 * Route 48MHz FREQ2 into USBH/USBD/IrDA 82 au_writel(sys_freqctrl, SYS_FREQCTRL0);
84 */ 83
85 sys_clksrc |= ((4<<2) | (0<<1) | 0 ); 84 /*
86 au_writel(sys_clksrc, SYS_CLKSRC); 85 * Route 48MHz FREQ2 into USBH/USBD/IrDA
87 86 */
88 /* setup the static bus controller */ 87 sys_clksrc |= ((4<<2) | (0<<1) | 0 );
89 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */ 88 au_writel(sys_clksrc, SYS_CLKSRC);
90 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */ 89
91 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */ 90 /* setup the static bus controller */
92 91 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */
93 // get USB Functionality pin state (device vs host drive pins) 92 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
94 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000); 93 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
95 // 2nd USB port is USB host 94
96 pin_func |= 0x8000; 95 // get USB Functionality pin state (device vs host drive pins)
97 au_writel(pin_func, SYS_PINFUNC); 96 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000);
97 // 2nd USB port is USB host
98 pin_func |= 0x8000;
99 au_writel(pin_func, SYS_PINFUNC);
100 }
98#endif // defined (CONFIG_USB_OHCI) 101#endif // defined (CONFIG_USB_OHCI)
99 102
100 /* Enable sys bus clock divider when IDLE state or no bus activity. */ 103 /* Enable sys bus clock divider when IDLE state or no bus activity. */
101 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); 104 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
102 105
103 // Enable the RTC if not already enabled 106 // Enable the RTC if not already enabled
104 if (!(readb(0xac000028) & 0x20)) { 107 if (!(readb(base + 0x28) & 0x20)) {
105 writeb(readb(0xac000028) | 0x20, 0xac000028); 108 writeb(readb(base + 0x28) | 0x20, base + 0x28);
106 au_sync(); 109 au_sync();
107 } 110 }
108 // Put the clock in BCD mode 111 // Put the clock in BCD mode
109 if (readb(0xac00002C) & 0x4) { /* reg B */ 112 if (readb(base + 0x2C) & 0x4) { /* reg B */
110 writeb(readb(0xac00002c) & ~0x4, 0xac00002c); 113 writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
111 au_sync(); 114 au_sync();
112 } 115 }
113} 116}
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index 91983ba407c4..b73b2d18bf56 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -137,33 +137,20 @@ static void pb1200_shutdown_irq( unsigned int irq_nr )
137 return; 137 return;
138} 138}
139 139
140static inline void pb1200_mask_and_ack_irq(unsigned int irq_nr)
141{
142 pb1200_disable_irq( irq_nr );
143}
144
145static void pb1200_end_irq(unsigned int irq_nr)
146{
147 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
148 pb1200_enable_irq(irq_nr);
149 }
150}
151
152static struct irq_chip external_irq_type = 140static struct irq_chip external_irq_type =
153{ 141{
154#ifdef CONFIG_MIPS_PB1200 142#ifdef CONFIG_MIPS_PB1200
155 "Pb1200 Ext", 143 .name = "Pb1200 Ext",
156#endif 144#endif
157#ifdef CONFIG_MIPS_DB1200 145#ifdef CONFIG_MIPS_DB1200
158 "Db1200 Ext", 146 .name = "Db1200 Ext",
159#endif 147#endif
160 pb1200_startup_irq, 148 .startup = pb1200_startup_irq,
161 pb1200_shutdown_irq, 149 .shutdown = pb1200_shutdown_irq,
162 pb1200_enable_irq, 150 .ack = pb1200_disable_irq,
163 pb1200_disable_irq, 151 .mask = pb1200_disable_irq,
164 pb1200_mask_and_ack_irq, 152 .mask_ack = pb1200_disable_irq,
165 pb1200_end_irq, 153 .unmask = pb1200_enable_irq,
166 NULL
167}; 154};
168 155
169void _board_init_irq(void) 156void _board_init_irq(void)
@@ -172,7 +159,8 @@ void _board_init_irq(void)
172 159
173 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++) 160 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++)
174 { 161 {
175 irq_desc[irq_nr].chip = &external_irq_type; 162 set_irq_chip_and_handler(irq_nr, &external_irq_type,
163 handle_level_irq);
176 pb1200_disable_irq(irq_nr); 164 pb1200_disable_irq(irq_nr);
177 } 165 }
178 166
diff --git a/arch/mips/basler/excite/excite_irq.c b/arch/mips/basler/excite/excite_irq.c
index 2e2061a286c5..1ecab6350421 100644
--- a/arch/mips/basler/excite/excite_irq.c
+++ b/arch/mips/basler/excite/excite_irq.c
@@ -47,9 +47,9 @@ extern asmlinkage void excite_handle_int(void);
47 */ 47 */
48void __init arch_init_irq(void) 48void __init arch_init_irq(void)
49{ 49{
50 mips_cpu_irq_init(0); 50 mips_cpu_irq_init();
51 rm7k_cpu_irq_init(8); 51 rm7k_cpu_irq_init();
52 rm9k_cpu_irq_init(12); 52 rm9k_cpu_irq_init();
53 53
54#ifdef CONFIG_KGDB 54#ifdef CONFIG_KGDB
55 excite_kgdb_init(); 55 excite_kgdb_init();
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index 4c46f0e73783..fe93b846923b 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -104,7 +104,7 @@ void __init arch_init_irq(void)
104 GT_WRITE(GT_INTRMASK_OFS, 0); 104 GT_WRITE(GT_INTRMASK_OFS, 0);
105 105
106 init_i8259_irqs(); /* 0 ... 15 */ 106 init_i8259_irqs(); /* 0 ... 15 */
107 mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */ 107 mips_cpu_irq_init(); /* 16 ... 23 */
108 108
109 /* 109 /*
110 * Mask all cpu interrupts 110 * Mask all cpu interrupts
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index e8f0f20b852d..a4b69b543bd9 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -204,8 +204,7 @@ void __init prom_init(void)
204 add_memory_region(0x0, memsz, BOOT_MEM_RAM); 204 add_memory_region(0x0, memsz, BOOT_MEM_RAM);
205} 205}
206 206
207unsigned long __init prom_free_prom_memory(void) 207void __init prom_free_prom_memory(void)
208{ 208{
209 /* Nothing to do! */ 209 /* Nothing to do! */
210 return 0;
211} 210}
diff --git a/arch/mips/ddb5xxx/common/prom.c b/arch/mips/ddb5xxx/common/prom.c
index efef0f57ce1e..54a857b5e3ba 100644
--- a/arch/mips/ddb5xxx/common/prom.c
+++ b/arch/mips/ddb5xxx/common/prom.c
@@ -59,9 +59,8 @@ void __init prom_init(void)
59#endif 59#endif
60} 60}
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67#if defined(CONFIG_DDB5477) 66#if defined(CONFIG_DDB5477)
diff --git a/arch/mips/ddb5xxx/ddb5477/irq.c b/arch/mips/ddb5xxx/ddb5477/irq.c
index a8bd2e66705c..2b23234a5b95 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq.c
@@ -17,6 +17,7 @@
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18 18
19#include <asm/i8259.h> 19#include <asm/i8259.h>
20#include <asm/irq_cpu.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/mipsregs.h> 22#include <asm/mipsregs.h>
22#include <asm/debug.h> 23#include <asm/debug.h>
@@ -73,7 +74,6 @@ set_pci_int_attr(u32 pci, u32 intn, u32 active, u32 trigger)
73} 74}
74 75
75extern void vrc5477_irq_init(u32 base); 76extern void vrc5477_irq_init(u32 base);
76extern void mips_cpu_irq_init(u32 base);
77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL }; 77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
78 78
79void __init arch_init_irq(void) 79void __init arch_init_irq(void)
@@ -125,7 +125,7 @@ void __init arch_init_irq(void)
125 125
126 /* init all controllers */ 126 /* init all controllers */
127 init_i8259_irqs(); 127 init_i8259_irqs();
128 mips_cpu_irq_init(CPU_IRQ_BASE); 128 mips_cpu_irq_init();
129 vrc5477_irq_init(VRC5477_IRQ_BASE); 129 vrc5477_irq_init(VRC5477_IRQ_BASE);
130 130
131 131
@@ -146,8 +146,7 @@ u8 i8259_interrupt_ack(void)
146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE); 146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE);
147 ddb_out32(DDB_PCIINIT10, reg); 147 ddb_out32(DDB_PCIINIT10, reg);
148 148
149 /* i8259.c set the base vector to be 0x0 */ 149 return irq;
150 return irq + I8259_IRQ_BASE;
151} 150}
152/* 151/*
153 * the first level int-handler will jump here if it is a vrc5477 irq 152 * the first level int-handler will jump here if it is a vrc5477 irq
@@ -177,7 +176,7 @@ static void vrc5477_irq_dispatch(void)
177 /* check for i8259 interrupts */ 176 /* check for i8259 interrupts */
178 if (intStatus & (1 << VRC5477_I8259_CASCADE)) { 177 if (intStatus & (1 << VRC5477_I8259_CASCADE)) {
179 int i8259_irq = i8259_interrupt_ack(); 178 int i8259_irq = i8259_interrupt_ack();
180 do_IRQ(I8259_IRQ_BASE + i8259_irq); 179 do_IRQ(i8259_irq);
181 return; 180 return;
182 } 181 }
183 } 182 }
diff --git a/arch/mips/ddb5xxx/ddb5477/irq_5477.c b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
index 96249aa5df5d..98c3b15eb369 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq_5477.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
@@ -82,7 +82,7 @@ vrc5477_irq_end(unsigned int irq)
82} 82}
83 83
84struct irq_chip vrc5477_irq_controller = { 84struct irq_chip vrc5477_irq_controller = {
85 .typename = "vrc5477_irq", 85 .name = "vrc5477_irq",
86 .ack = vrc5477_irq_ack, 86 .ack = vrc5477_irq_ack,
87 .mask = vrc5477_irq_disable, 87 .mask = vrc5477_irq_disable,
88 .mask_ack = vrc5477_irq_ack, 88 .mask_ack = vrc5477_irq_ack,
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 4c7cb4048d35..3acb133668dc 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -62,7 +62,7 @@ static inline void end_ioasic_irq(unsigned int irq)
62} 62}
63 63
64static struct irq_chip ioasic_irq_type = { 64static struct irq_chip ioasic_irq_type = {
65 .typename = "IO-ASIC", 65 .name = "IO-ASIC",
66 .ack = ack_ioasic_irq, 66 .ack = ack_ioasic_irq,
67 .mask = mask_ioasic_irq, 67 .mask = mask_ioasic_irq,
68 .mask_ack = ack_ioasic_irq, 68 .mask_ack = ack_ioasic_irq,
@@ -84,7 +84,7 @@ static inline void end_ioasic_dma_irq(unsigned int irq)
84} 84}
85 85
86static struct irq_chip ioasic_dma_irq_type = { 86static struct irq_chip ioasic_dma_irq_type = {
87 .typename = "IO-ASIC-DMA", 87 .name = "IO-ASIC-DMA",
88 .ack = ack_ioasic_dma_irq, 88 .ack = ack_ioasic_dma_irq,
89 .mask = mask_ioasic_dma_irq, 89 .mask = mask_ioasic_dma_irq,
90 .mask_ack = ack_ioasic_dma_irq, 90 .mask_ack = ack_ioasic_dma_irq,
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 916e46b8ccd8..02439dc0ba83 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -58,7 +58,7 @@ static void ack_kn02_irq(unsigned int irq)
58} 58}
59 59
60static struct irq_chip kn02_irq_type = { 60static struct irq_chip kn02_irq_type = {
61 .typename = "KN02-CSR", 61 .name = "KN02-CSR",
62 .ack = ack_kn02_irq, 62 .ack = ack_kn02_irq,
63 .mask = mask_kn02_irq, 63 .mask = mask_kn02_irq,
64 .mask_ack = ack_kn02_irq, 64 .mask_ack = ack_kn02_irq,
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 3aa01d268f2d..5a557e268f78 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -92,9 +92,9 @@ void __init prom_meminit(u32 magic)
92 rex_setup_memory_region(); 92 rex_setup_memory_region();
93} 93}
94 94
95unsigned long __init prom_free_prom_memory(void) 95void __init prom_free_prom_memory(void)
96{ 96{
97 unsigned long addr, end; 97 unsigned long end;
98 98
99 /* 99 /*
100 * Free everything below the kernel itself but leave 100 * Free everything below the kernel itself but leave
@@ -114,16 +114,5 @@ unsigned long __init prom_free_prom_memory(void)
114#endif 114#endif
115 end = __pa(&_text); 115 end = __pa(&_text);
116 116
117 addr = PAGE_SIZE; 117 free_init_pages("unused PROM memory", PAGE_SIZE, end);
118 while (addr < end) {
119 ClearPageReserved(virt_to_page(__va(addr)));
120 init_page_count(virt_to_page(__va(addr)));
121 free_page((unsigned long)__va(addr));
122 addr += PAGE_SIZE;
123 }
124
125 printk("Freeing unused PROM memory: %ldkb freed\n",
126 (end - PAGE_SIZE) >> 10);
127
128 return end - PAGE_SIZE;
129} 118}
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index d34032ac492a..1058e2f409bb 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -234,7 +234,7 @@ static void __init dec_init_kn01(void)
234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl, 234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl,
235 sizeof(kn01_cpu_mask_nr_tbl)); 235 sizeof(kn01_cpu_mask_nr_tbl));
236 236
237 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 237 mips_cpu_irq_init();
238 238
239} /* dec_init_kn01 */ 239} /* dec_init_kn01 */
240 240
@@ -309,7 +309,7 @@ static void __init dec_init_kn230(void)
309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl, 309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl,
310 sizeof(kn230_cpu_mask_nr_tbl)); 310 sizeof(kn230_cpu_mask_nr_tbl));
311 311
312 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 312 mips_cpu_irq_init();
313 313
314} /* dec_init_kn230 */ 314} /* dec_init_kn230 */
315 315
@@ -403,7 +403,7 @@ static void __init dec_init_kn02(void)
403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl, 403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl,
404 sizeof(kn02_asic_mask_nr_tbl)); 404 sizeof(kn02_asic_mask_nr_tbl));
405 405
406 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 406 mips_cpu_irq_init();
407 init_kn02_irqs(KN02_IRQ_BASE); 407 init_kn02_irqs(KN02_IRQ_BASE);
408 408
409} /* dec_init_kn02 */ 409} /* dec_init_kn02 */
@@ -504,7 +504,7 @@ static void __init dec_init_kn02ba(void)
504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl, 504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl,
505 sizeof(kn02ba_asic_mask_nr_tbl)); 505 sizeof(kn02ba_asic_mask_nr_tbl));
506 506
507 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 507 mips_cpu_irq_init();
508 init_ioasic_irqs(IO_IRQ_BASE); 508 init_ioasic_irqs(IO_IRQ_BASE);
509 509
510} /* dec_init_kn02ba */ 510} /* dec_init_kn02ba */
@@ -601,7 +601,7 @@ static void __init dec_init_kn02ca(void)
601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl, 601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl,
602 sizeof(kn02ca_asic_mask_nr_tbl)); 602 sizeof(kn02ca_asic_mask_nr_tbl));
603 603
604 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 604 mips_cpu_irq_init();
605 init_ioasic_irqs(IO_IRQ_BASE); 605 init_ioasic_irqs(IO_IRQ_BASE);
606 606
607} /* dec_init_kn02ca */ 607} /* dec_init_kn02ca */
@@ -702,7 +702,7 @@ static void __init dec_init_kn03(void)
702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl, 702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl,
703 sizeof(kn03_asic_mask_nr_tbl)); 703 sizeof(kn03_asic_mask_nr_tbl));
704 704
705 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 705 mips_cpu_irq_init();
706 init_ioasic_irqs(IO_IRQ_BASE); 706 init_ioasic_irqs(IO_IRQ_BASE);
707 707
708} /* dec_init_kn03 */ 708} /* dec_init_kn03 */
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c
index 8d880f0b06ec..96df37b77759 100644
--- a/arch/mips/emma2rh/common/irq_emma2rh.c
+++ b/arch/mips/emma2rh/common/irq_emma2rh.c
@@ -57,7 +57,7 @@ static void emma2rh_irq_disable(unsigned int irq)
57} 57}
58 58
59struct irq_chip emma2rh_irq_controller = { 59struct irq_chip emma2rh_irq_controller = {
60 .typename = "emma2rh_irq", 60 .name = "emma2rh_irq",
61 .ack = emma2rh_irq_disable, 61 .ack = emma2rh_irq_disable,
62 .mask = emma2rh_irq_disable, 62 .mask = emma2rh_irq_disable,
63 .mask_ack = emma2rh_irq_disable, 63 .mask_ack = emma2rh_irq_disable,
diff --git a/arch/mips/emma2rh/markeins/irq.c b/arch/mips/emma2rh/markeins/irq.c
index c93369cb4115..3299b6dfe764 100644
--- a/arch/mips/emma2rh/markeins/irq.c
+++ b/arch/mips/emma2rh/markeins/irq.c
@@ -106,7 +106,7 @@ void __init arch_init_irq(void)
106 emma2rh_irq_init(EMMA2RH_IRQ_BASE); 106 emma2rh_irq_init(EMMA2RH_IRQ_BASE);
107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE); 107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE);
108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE); 108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE);
109 mips_cpu_irq_init(CPU_IRQ_BASE); 109 mips_cpu_irq_init();
110 110
111 /* setup cascade interrupts */ 111 /* setup cascade interrupts */
112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade); 112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade);
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c
index 2116d9be5fa9..fba5c156f472 100644
--- a/arch/mips/emma2rh/markeins/irq_markeins.c
+++ b/arch/mips/emma2rh/markeins/irq_markeins.c
@@ -49,7 +49,7 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
49} 49}
50 50
51struct irq_chip emma2rh_sw_irq_controller = { 51struct irq_chip emma2rh_sw_irq_controller = {
52 .typename = "emma2rh_sw_irq", 52 .name = "emma2rh_sw_irq",
53 .ack = emma2rh_sw_irq_disable, 53 .ack = emma2rh_sw_irq_disable,
54 .mask = emma2rh_sw_irq_disable, 54 .mask = emma2rh_sw_irq_disable,
55 .mask_ack = emma2rh_sw_irq_disable, 55 .mask_ack = emma2rh_sw_irq_disable,
@@ -115,7 +115,7 @@ static void emma2rh_gpio_irq_end(unsigned int irq)
115} 115}
116 116
117struct irq_chip emma2rh_gpio_irq_controller = { 117struct irq_chip emma2rh_gpio_irq_controller = {
118 .typename = "emma2rh_gpio_irq", 118 .name = "emma2rh_gpio_irq",
119 .ack = emma2rh_gpio_irq_ack, 119 .ack = emma2rh_gpio_irq_ack,
120 .mask = emma2rh_gpio_irq_disable, 120 .mask = emma2rh_gpio_irq_disable,
121 .mask_ack = emma2rh_gpio_irq_ack, 121 .mask_ack = emma2rh_gpio_irq_ack,
diff --git a/arch/mips/gt64120/ev64120/irq.c b/arch/mips/gt64120/ev64120/irq.c
index b3e5796c81d7..04572b9c9642 100644
--- a/arch/mips/gt64120/ev64120/irq.c
+++ b/arch/mips/gt64120/ev64120/irq.c
@@ -88,7 +88,7 @@ static void end_ev64120_irq(unsigned int irq)
88} 88}
89 89
90static struct irq_chip ev64120_irq_type = { 90static struct irq_chip ev64120_irq_type = {
91 .typename = "EV64120", 91 .name = "EV64120",
92 .ack = disable_ev64120_irq, 92 .ack = disable_ev64120_irq,
93 .mask = disable_ev64120_irq, 93 .mask = disable_ev64120_irq,
94 .mask_ack = disable_ev64120_irq, 94 .mask_ack = disable_ev64120_irq,
diff --git a/arch/mips/gt64120/ev64120/setup.c b/arch/mips/gt64120/ev64120/setup.c
index 99c8d42212e2..477848c22a2c 100644
--- a/arch/mips/gt64120/ev64120/setup.c
+++ b/arch/mips/gt64120/ev64120/setup.c
@@ -59,9 +59,8 @@ extern void galileo_machine_power_off(void);
59 */ 59 */
60extern struct pci_ops galileo_pci_ops; 60extern struct pci_ops galileo_pci_ops;
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67/* 66/*
diff --git a/arch/mips/gt64120/momenco_ocelot/dbg_io.c b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/gt64120/momenco_ocelot/dbg_io.c
+++ b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/gt64120/momenco_ocelot/irq.c b/arch/mips/gt64120/momenco_ocelot/irq.c
index d9294401ccb0..2585d9dbda33 100644
--- a/arch/mips/gt64120/momenco_ocelot/irq.c
+++ b/arch/mips/gt64120/momenco_ocelot/irq.c
@@ -90,6 +90,6 @@ void __init arch_init_irq(void)
90 clear_c0_status(ST0_IM); 90 clear_c0_status(ST0_IM);
91 local_irq_disable(); 91 local_irq_disable();
92 92
93 mips_cpu_irq_init(0); 93 mips_cpu_irq_init();
94 rm7k_cpu_irq_init(8); 94 rm7k_cpu_irq_init();
95} 95}
diff --git a/arch/mips/gt64120/momenco_ocelot/prom.c b/arch/mips/gt64120/momenco_ocelot/prom.c
index 8677b6d3ada7..78f393b2afd9 100644
--- a/arch/mips/gt64120/momenco_ocelot/prom.c
+++ b/arch/mips/gt64120/momenco_ocelot/prom.c
@@ -67,7 +67,6 @@ void __init prom_init(void)
67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM); 67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM);
68} 68}
69 69
70unsigned long __init prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
diff --git a/arch/mips/gt64120/wrppmc/irq.c b/arch/mips/gt64120/wrppmc/irq.c
index eedfc24e1eae..d3d96591780e 100644
--- a/arch/mips/gt64120/wrppmc/irq.c
+++ b/arch/mips/gt64120/wrppmc/irq.c
@@ -63,7 +63,7 @@ void gt64120_init_pic(void)
63void __init arch_init_irq(void) 63void __init arch_init_irq(void)
64{ 64{
65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */ 65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */
66 mips_cpu_irq_init(0); 66 mips_cpu_irq_init();
67 67
68 gt64120_init_pic(); 68 gt64120_init_pic();
69} 69}
diff --git a/arch/mips/gt64120/wrppmc/setup.c b/arch/mips/gt64120/wrppmc/setup.c
index 429afc400cb4..121188d5ec4a 100644
--- a/arch/mips/gt64120/wrppmc/setup.c
+++ b/arch/mips/gt64120/wrppmc/setup.c
@@ -93,9 +93,8 @@ void __init wrppmc_early_printk(const char *fmt, ...)
93} 93}
94#endif /* WRPPMC_EARLY_DEBUG */ 94#endif /* WRPPMC_EARLY_DEBUG */
95 95
96unsigned long __init prom_free_prom_memory(void) 96void __init prom_free_prom_memory(void)
97{ 97{
98 return 0;
99} 98}
100 99
101#ifdef CONFIG_SERIAL_8250 100#ifdef CONFIG_SERIAL_8250
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index f8d417b5c2bb..295892e4ce53 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -40,7 +40,7 @@ void disable_r4030_irq(unsigned int irq)
40} 40}
41 41
42static struct irq_chip r4030_irq_type = { 42static struct irq_chip r4030_irq_type = {
43 .typename = "R4030", 43 .name = "R4030",
44 .ack = disable_r4030_irq, 44 .ack = disable_r4030_irq,
45 .mask = disable_r4030_irq, 45 .mask = disable_r4030_irq,
46 .mask_ack = disable_r4030_irq, 46 .mask_ack = disable_r4030_irq,
diff --git a/arch/mips/jmr3927/common/prom.c b/arch/mips/jmr3927/common/prom.c
index 5d5838f41d23..aa481b774c42 100644
--- a/arch/mips/jmr3927/common/prom.c
+++ b/arch/mips/jmr3927/common/prom.c
@@ -75,7 +75,6 @@ void __init prom_init_cmdline(void)
75 *cp = '\0'; 75 *cp = '\0';
76} 76}
77 77
78unsigned long __init prom_free_prom_memory(void) 78void __init prom_free_prom_memory(void)
79{ 79{
80 return 0;
81} 80}
diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c
index 3da49c5aaf49..7d2c203cb406 100644
--- a/arch/mips/jmr3927/rbhma3100/irq.c
+++ b/arch/mips/jmr3927/rbhma3100/irq.c
@@ -439,7 +439,7 @@ void __init arch_init_irq(void)
439} 439}
440 440
441static struct irq_chip jmr3927_irq_controller = { 441static struct irq_chip jmr3927_irq_controller = {
442 .typename = "jmr3927_irq", 442 .name = "jmr3927_irq",
443 .ack = jmr3927_irq_ack, 443 .ack = jmr3927_irq_ack,
444 .mask = jmr3927_irq_disable, 444 .mask = jmr3927_irq_disable,
445 .mask_ack = jmr3927_irq_ack, 445 .mask_ack = jmr3927_irq_ack,
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index 138f25efe38a..7ca3d6d07b34 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -434,7 +434,7 @@ void __init tx3927_setup(void)
434 434
435 /* DMA */ 435 /* DMA */
436 tx3927_dmaptr->mcr = 0; 436 tx3927_dmaptr->mcr = 0;
437 for (i = 0; i < sizeof(tx3927_dmaptr->ch) / sizeof(tx3927_dmaptr->ch[0]); i++) { 437 for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) {
438 /* reset channel */ 438 /* reset channel */
439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; 439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST;
440 tx3927_dmaptr->ch[i].ccr = 0; 440 tx3927_dmaptr->ch[i].ccr = 0;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ff88b06f89df..ea7df4b8da33 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -234,10 +234,6 @@ void output_mm_defines(void)
234 constant("#define _PMD_SHIFT ", PMD_SHIFT); 234 constant("#define _PMD_SHIFT ", PMD_SHIFT);
235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); 235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
236 linefeed; 236 linefeed;
237 constant("#define _PGD_ORDER ", PGD_ORDER);
238 constant("#define _PMD_ORDER ", PMD_ORDER);
239 constant("#define _PTE_ORDER ", PTE_ORDER);
240 linefeed;
241 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD); 237 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
242 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD); 238 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
243 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE); 239 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 442839e9578c..f59ef271d247 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -565,7 +565,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
565 if (config3 & MIPS_CONF3_VEIC) 565 if (config3 & MIPS_CONF3_VEIC)
566 c->options |= MIPS_CPU_VEIC; 566 c->options |= MIPS_CPU_VEIC;
567 if (config3 & MIPS_CONF3_MT) 567 if (config3 & MIPS_CONF3_MT)
568 c->ases |= MIPS_ASE_MIPSMT; 568 c->ases |= MIPS_ASE_MIPSMT;
569 569
570 return config3 & MIPS_CONF_M; 570 return config3 & MIPS_CONF_M;
571} 571}
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index 719d26968cb2..7bc882049269 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -505,13 +505,13 @@ void show_gdbregs(struct gdb_regs * regs)
505 */ 505 */
506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
507 regs->reg0, regs->reg1, regs->reg2, regs->reg3, 507 regs->reg0, regs->reg1, regs->reg2, regs->reg3,
508 regs->reg4, regs->reg5, regs->reg6, regs->reg7); 508 regs->reg4, regs->reg5, regs->reg6, regs->reg7);
509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
510 regs->reg8, regs->reg9, regs->reg10, regs->reg11, 510 regs->reg8, regs->reg9, regs->reg10, regs->reg11,
511 regs->reg12, regs->reg13, regs->reg14, regs->reg15); 511 regs->reg12, regs->reg13, regs->reg14, regs->reg15);
512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
513 regs->reg16, regs->reg17, regs->reg18, regs->reg19, 513 regs->reg16, regs->reg17, regs->reg18, regs->reg19,
514 regs->reg20, regs->reg21, regs->reg22, regs->reg23); 514 regs->reg20, regs->reg21, regs->reg22, regs->reg23);
515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
516 regs->reg24, regs->reg25, regs->reg26, regs->reg27, 516 regs->reg24, regs->reg25, regs->reg26, regs->reg27,
517 regs->reg28, regs->reg29, regs->reg30, regs->reg31); 517 regs->reg28, regs->reg29, regs->reg30, regs->reg31);
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 9a7811d13db2..6f57ca44291f 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -231,28 +231,3 @@ NESTED(smp_bootstrap, 16, sp)
231#endif /* CONFIG_SMP */ 231#endif /* CONFIG_SMP */
232 232
233 __FINIT 233 __FINIT
234
235 .comm kernelsp, NR_CPUS * 8, 8
236 .comm pgd_current, NR_CPUS * 8, 8
237
238 .comm fw_arg0, SZREG, SZREG # firmware arguments
239 .comm fw_arg1, SZREG, SZREG
240 .comm fw_arg2, SZREG, SZREG
241 .comm fw_arg3, SZREG, SZREG
242
243 .macro page name, order
244 .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
245 .endm
246
247 /*
248 * On 64-bit we've got three-level pagetables with a slightly
249 * different layout ...
250 */
251 page swapper_pg_dir, _PGD_ORDER
252#ifdef CONFIG_64BIT
253#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
254 page module_pg_dir, _PGD_ORDER
255#endif
256 page invalid_pmd_table, _PMD_ORDER
257#endif
258 page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b59a676c6d0e..b33ba6cd7f5b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -54,9 +54,11 @@ static unsigned int cached_irq_mask = 0xffff;
54 54
55void disable_8259A_irq(unsigned int irq) 55void disable_8259A_irq(unsigned int irq)
56{ 56{
57 unsigned int mask = 1 << irq; 57 unsigned int mask;
58 unsigned long flags; 58 unsigned long flags;
59 59
60 irq -= I8259A_IRQ_BASE;
61 mask = 1 << irq;
60 spin_lock_irqsave(&i8259A_lock, flags); 62 spin_lock_irqsave(&i8259A_lock, flags);
61 cached_irq_mask |= mask; 63 cached_irq_mask |= mask;
62 if (irq & 8) 64 if (irq & 8)
@@ -68,9 +70,11 @@ void disable_8259A_irq(unsigned int irq)
68 70
69void enable_8259A_irq(unsigned int irq) 71void enable_8259A_irq(unsigned int irq)
70{ 72{
71 unsigned int mask = ~(1 << irq); 73 unsigned int mask;
72 unsigned long flags; 74 unsigned long flags;
73 75
76 irq -= I8259A_IRQ_BASE;
77 mask = ~(1 << irq);
74 spin_lock_irqsave(&i8259A_lock, flags); 78 spin_lock_irqsave(&i8259A_lock, flags);
75 cached_irq_mask &= mask; 79 cached_irq_mask &= mask;
76 if (irq & 8) 80 if (irq & 8)
@@ -82,10 +86,12 @@ void enable_8259A_irq(unsigned int irq)
82 86
83int i8259A_irq_pending(unsigned int irq) 87int i8259A_irq_pending(unsigned int irq)
84{ 88{
85 unsigned int mask = 1 << irq; 89 unsigned int mask;
86 unsigned long flags; 90 unsigned long flags;
87 int ret; 91 int ret;
88 92
93 irq -= I8259A_IRQ_BASE;
94 mask = 1 << irq;
89 spin_lock_irqsave(&i8259A_lock, flags); 95 spin_lock_irqsave(&i8259A_lock, flags);
90 if (irq < 8) 96 if (irq < 8)
91 ret = inb(PIC_MASTER_CMD) & mask; 97 ret = inb(PIC_MASTER_CMD) & mask;
@@ -134,9 +140,11 @@ static inline int i8259A_irq_real(unsigned int irq)
134 */ 140 */
135void mask_and_ack_8259A(unsigned int irq) 141void mask_and_ack_8259A(unsigned int irq)
136{ 142{
137 unsigned int irqmask = 1 << irq; 143 unsigned int irqmask;
138 unsigned long flags; 144 unsigned long flags;
139 145
146 irq -= I8259A_IRQ_BASE;
147 irqmask = 1 << irq;
140 spin_lock_irqsave(&i8259A_lock, flags); 148 spin_lock_irqsave(&i8259A_lock, flags);
141 /* 149 /*
142 * Lightweight spurious IRQ detection. We do not want 150 * Lightweight spurious IRQ detection. We do not want
@@ -169,8 +177,8 @@ handle_real_irq:
169 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ 177 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
170 } 178 }
171#ifdef CONFIG_MIPS_MT_SMTC 179#ifdef CONFIG_MIPS_MT_SMTC
172 if (irq_hwmask[irq] & ST0_IM) 180 if (irq_hwmask[irq] & ST0_IM)
173 set_c0_status(irq_hwmask[irq] & ST0_IM); 181 set_c0_status(irq_hwmask[irq] & ST0_IM);
174#endif /* CONFIG_MIPS_MT_SMTC */ 182#endif /* CONFIG_MIPS_MT_SMTC */
175 spin_unlock_irqrestore(&i8259A_lock, flags); 183 spin_unlock_irqrestore(&i8259A_lock, flags);
176 return; 184 return;
@@ -322,8 +330,8 @@ void __init init_i8259_irqs (void)
322 330
323 init_8259A(0); 331 init_8259A(0);
324 332
325 for (i = 0; i < 16; i++) 333 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++)
326 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 334 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
327 335
328 setup_irq(PIC_CASCADE_IR, &irq2); 336 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
329} 337}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 37cad5de515c..3cc25c05d367 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -10,6 +10,8 @@
10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> 10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com> 11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
12 */ 12 */
13#undef DEBUG
14
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
15#include <linux/stat.h> 17#include <linux/stat.h>
@@ -40,8 +42,6 @@
40 42
41#include <linux/elf.h> 43#include <linux/elf.h>
42 44
43#undef DEBUG
44
45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); 45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46static int load_irix_library(struct file *); 46static int load_irix_library(struct file *);
47static int irix_core_dump(long signr, struct pt_regs * regs, 47static int irix_core_dump(long signr, struct pt_regs * regs,
@@ -52,72 +52,102 @@ static struct linux_binfmt irix_format = {
52 irix_core_dump, PAGE_SIZE 52 irix_core_dump, PAGE_SIZE
53}; 53};
54 54
55#ifdef DEBUG
56/* Debugging routines. */ 55/* Debugging routines. */
57static char *get_elf_p_type(Elf32_Word p_type) 56static char *get_elf_p_type(Elf32_Word p_type)
58{ 57{
59 int i = (int) p_type; 58#ifdef DEBUG
60 59 switch (p_type) {
61 switch(i) { 60 case PT_NULL:
62 case PT_NULL: return("PT_NULL"); break; 61 return "PT_NULL";
63 case PT_LOAD: return("PT_LOAD"); break; 62 break;
64 case PT_DYNAMIC: return("PT_DYNAMIC"); break; 63
65 case PT_INTERP: return("PT_INTERP"); break; 64 case PT_LOAD:
66 case PT_NOTE: return("PT_NOTE"); break; 65 return "PT_LOAD";
67 case PT_SHLIB: return("PT_SHLIB"); break; 66 break;
68 case PT_PHDR: return("PT_PHDR"); break; 67
69 case PT_LOPROC: return("PT_LOPROC/REGINFO"); break; 68 case PT_DYNAMIC:
70 case PT_HIPROC: return("PT_HIPROC"); break; 69 return "PT_DYNAMIC";
71 default: return("PT_BOGUS"); break; 70 break;
71
72 case PT_INTERP:
73 return "PT_INTERP";
74 break;
75
76 case PT_NOTE:
77 return "PT_NOTE";
78 break;
79
80 case PT_SHLIB:
81 return "PT_SHLIB";
82 break;
83
84 case PT_PHDR:
85 return "PT_PHDR";
86 break;
87
88 case PT_LOPROC:
89 return "PT_LOPROC/REGINFO";
90 break;
91
92 case PT_HIPROC:
93 return "PT_HIPROC";
94 break;
95
96 default:
97 return "PT_BOGUS";
98 break;
72 } 99 }
100#endif
73} 101}
74 102
75static void print_elfhdr(struct elfhdr *ehp) 103static void print_elfhdr(struct elfhdr *ehp)
76{ 104{
77 int i; 105 int i;
78 106
79 printk("ELFHDR: e_ident<"); 107 pr_debug("ELFHDR: e_ident<");
80 for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]); 108 for (i = 0; i < (EI_NIDENT - 1); i++)
81 printk("%x>\n", ehp->e_ident[i]); 109 pr_debug("%x ", ehp->e_ident[i]);
82 printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n", 110 pr_debug("%x>\n", ehp->e_ident[i]);
83 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine, 111 pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
84 (unsigned long) ehp->e_version); 112 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
85 printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] " 113 (unsigned long) ehp->e_version);
86 "e_flags[%08lx]\n", 114 pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
87 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff, 115 "e_flags[%08lx]\n",
88 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags); 116 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
89 printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n", 117 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
90 (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize, 118 pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
91 (unsigned short) ehp->e_phnum); 119 (unsigned short) ehp->e_ehsize,
92 printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n", 120 (unsigned short) ehp->e_phentsize,
93 (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum, 121 (unsigned short) ehp->e_phnum);
94 (unsigned short) ehp->e_shstrndx); 122 pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
123 (unsigned short) ehp->e_shentsize,
124 (unsigned short) ehp->e_shnum,
125 (unsigned short) ehp->e_shstrndx);
95} 126}
96 127
97static void print_phdr(int i, struct elf_phdr *ep) 128static void print_phdr(int i, struct elf_phdr *ep)
98{ 129{
99 printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] " 130 pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
100 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type), 131 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
101 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr, 132 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
102 (unsigned long) ep->p_paddr); 133 (unsigned long) ep->p_paddr);
103 printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] " 134 pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
104 "p_align[%08lx]\n", (unsigned long) ep->p_filesz, 135 "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
105 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags, 136 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
106 (unsigned long) ep->p_align); 137 (unsigned long) ep->p_align);
107} 138}
108 139
109static void dump_phdrs(struct elf_phdr *ep, int pnum) 140static void dump_phdrs(struct elf_phdr *ep, int pnum)
110{ 141{
111 int i; 142 int i;
112 143
113 for(i = 0; i < pnum; i++, ep++) { 144 for (i = 0; i < pnum; i++, ep++) {
114 if((ep->p_type == PT_LOAD) || 145 if ((ep->p_type == PT_LOAD) ||
115 (ep->p_type == PT_INTERP) || 146 (ep->p_type == PT_INTERP) ||
116 (ep->p_type == PT_PHDR)) 147 (ep->p_type == PT_PHDR))
117 print_phdr(i, ep); 148 print_phdr(i, ep);
118 } 149 }
119} 150}
120#endif /* DEBUG */
121 151
122static void set_brk(unsigned long start, unsigned long end) 152static void set_brk(unsigned long start, unsigned long end)
123{ 153{
@@ -156,11 +186,10 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
156 elf_addr_t *envp; 186 elf_addr_t *envp;
157 elf_addr_t *sp, *csp; 187 elf_addr_t *sp, *csp;
158 188
159#ifdef DEBUG 189 pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
160 printk("create_irix_tables: p[%p] argc[%d] envc[%d] " 190 "load_addr[%08x] interp_load_addr[%08x]\n",
161 "load_addr[%08x] interp_load_addr[%08x]\n", 191 p, argc, envc, load_addr, interp_load_addr);
162 p, argc, envc, load_addr, interp_load_addr); 192
163#endif
164 sp = (elf_addr_t *) (~15UL & (unsigned long) p); 193 sp = (elf_addr_t *) (~15UL & (unsigned long) p);
165 csp = sp; 194 csp = sp;
166 csp -= exec ? DLINFO_ITEMS*2 : 2; 195 csp -= exec ? DLINFO_ITEMS*2 : 2;
@@ -181,7 +210,7 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
181 sp -= 2; 210 sp -= 2;
182 NEW_AUX_ENT(0, AT_NULL, 0); 211 NEW_AUX_ENT(0, AT_NULL, 0);
183 212
184 if(exec) { 213 if (exec) {
185 sp -= 11*2; 214 sp -= 11*2;
186 215
187 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff); 216 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
@@ -245,9 +274,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
245 last_bss = 0; 274 last_bss = 0;
246 error = load_addr = 0; 275 error = load_addr = 0;
247 276
248#ifdef DEBUG
249 print_elfhdr(interp_elf_ex); 277 print_elfhdr(interp_elf_ex);
250#endif
251 278
252 /* First of all, some simple consistency checks */ 279 /* First of all, some simple consistency checks */
253 if ((interp_elf_ex->e_type != ET_EXEC && 280 if ((interp_elf_ex->e_type != ET_EXEC &&
@@ -258,7 +285,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
258 } 285 }
259 286
260 /* Now read in all of the header information */ 287 /* Now read in all of the header information */
261 if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) { 288 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
262 printk("IRIX interp header bigger than a page (%d)\n", 289 printk("IRIX interp header bigger than a page (%d)\n",
263 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum)); 290 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
264 return 0xffffffff; 291 return 0xffffffff;
@@ -267,15 +294,15 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
267 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, 294 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
268 GFP_KERNEL); 295 GFP_KERNEL);
269 296
270 if(!elf_phdata) { 297 if (!elf_phdata) {
271 printk("Cannot kmalloc phdata for IRIX interp.\n"); 298 printk("Cannot kmalloc phdata for IRIX interp.\n");
272 return 0xffffffff; 299 return 0xffffffff;
273 } 300 }
274 301
275 /* If the size of this structure has changed, then punt, since 302 /* If the size of this structure has changed, then punt, since
276 * we will be doing the wrong thing. 303 * we will be doing the wrong thing.
277 */ 304 */
278 if(interp_elf_ex->e_phentsize != 32) { 305 if (interp_elf_ex->e_phentsize != 32) {
279 printk("IRIX interp e_phentsize == %d != 32 ", 306 printk("IRIX interp e_phentsize == %d != 32 ",
280 interp_elf_ex->e_phentsize); 307 interp_elf_ex->e_phentsize);
281 kfree(elf_phdata); 308 kfree(elf_phdata);
@@ -286,61 +313,71 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
286 (char *) elf_phdata, 313 (char *) elf_phdata,
287 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 314 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
288 315
289#ifdef DEBUG
290 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); 316 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
291#endif
292 317
293 eppnt = elf_phdata; 318 eppnt = elf_phdata;
294 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { 319 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
295 if(eppnt->p_type == PT_LOAD) { 320 if (eppnt->p_type == PT_LOAD) {
296 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 321 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
297 int elf_prot = 0; 322 int elf_prot = 0;
298 unsigned long vaddr = 0; 323 unsigned long vaddr = 0;
299 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 324 if (eppnt->p_flags & PF_R)
300 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 325 elf_prot = PROT_READ;
301 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 326 if (eppnt->p_flags & PF_W)
302 elf_type |= MAP_FIXED; 327 elf_prot |= PROT_WRITE;
303 vaddr = eppnt->p_vaddr; 328 if (eppnt->p_flags & PF_X)
304 329 elf_prot |= PROT_EXEC;
305 pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", 330 elf_type |= MAP_FIXED;
306 interpreter, vaddr, 331 vaddr = eppnt->p_vaddr;
307 (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), 332
308 (unsigned long) elf_prot, (unsigned long) elf_type, 333 pr_debug("INTERP do_mmap"
309 (unsigned long) (eppnt->p_offset & 0xfffff000)); 334 "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
310 down_write(&current->mm->mmap_sem); 335 interpreter, vaddr,
311 error = do_mmap(interpreter, vaddr, 336 (unsigned long)
312 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), 337 (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
313 elf_prot, elf_type, 338 (unsigned long)
314 eppnt->p_offset & 0xfffff000); 339 elf_prot, (unsigned long) elf_type,
315 up_write(&current->mm->mmap_sem); 340 (unsigned long)
316 341 (eppnt->p_offset & 0xfffff000));
317 if(error < 0 && error > -1024) { 342
318 printk("Aieee IRIX interp mmap error=%d\n", error); 343 down_write(&current->mm->mmap_sem);
319 break; /* Real error */ 344 error = do_mmap(interpreter, vaddr,
320 } 345 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
321 pr_debug("error=%08lx ", (unsigned long) error); 346 elf_prot, elf_type,
322 if(!load_addr && interp_elf_ex->e_type == ET_DYN) { 347 eppnt->p_offset & 0xfffff000);
323 load_addr = error; 348 up_write(&current->mm->mmap_sem);
324 pr_debug("load_addr = error "); 349
325 } 350 if (error < 0 && error > -1024) {
326 351 printk("Aieee IRIX interp mmap error=%d\n",
327 /* Find the end of the file mapping for this phdr, and keep 352 error);
328 * track of the largest address we see for this. 353 break; /* Real error */
329 */ 354 }
330 k = eppnt->p_vaddr + eppnt->p_filesz; 355 pr_debug("error=%08lx ", (unsigned long) error);
331 if(k > elf_bss) elf_bss = k; 356 if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
332 357 load_addr = error;
333 /* Do the same thing for the memory mapping - between 358 pr_debug("load_addr = error ");
334 * elf_bss and last_bss is the bss section. 359 }
335 */ 360
336 k = eppnt->p_memsz + eppnt->p_vaddr; 361 /*
337 if(k > last_bss) last_bss = k; 362 * Find the end of the file mapping for this phdr, and
338 pr_debug("\n"); 363 * keep track of the largest address we see for this.
339 } 364 */
365 k = eppnt->p_vaddr + eppnt->p_filesz;
366 if (k > elf_bss)
367 elf_bss = k;
368
369 /* Do the same thing for the memory mapping - between
370 * elf_bss and last_bss is the bss section.
371 */
372 k = eppnt->p_memsz + eppnt->p_vaddr;
373 if (k > last_bss)
374 last_bss = k;
375 pr_debug("\n");
376 }
340 } 377 }
341 378
342 /* Now use mmap to map the library into memory. */ 379 /* Now use mmap to map the library into memory. */
343 if(error < 0 && error > -1024) { 380 if (error < 0 && error > -1024) {
344 pr_debug("got error %d\n", error); 381 pr_debug("got error %d\n", error);
345 kfree(elf_phdata); 382 kfree(elf_phdata);
346 return 0xffffffff; 383 return 0xffffffff;
@@ -377,7 +414,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
377 return -ENOEXEC; 414 return -ENOEXEC;
378 415
379 /* First of all, some simple consistency checks */ 416 /* First of all, some simple consistency checks */
380 if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || 417 if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
381 !bprm->file->f_op->mmap) { 418 !bprm->file->f_op->mmap) {
382 return -ENOEXEC; 419 return -ENOEXEC;
383 } 420 }
@@ -388,7 +425,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
388 * XXX all registers as 64bits on cpu's capable of this at 425 * XXX all registers as 64bits on cpu's capable of this at
389 * XXX exception time plus frob the XTLB exception vector. 426 * XXX exception time plus frob the XTLB exception vector.
390 */ 427 */
391 if((ehp->e_flags & EF_MIPS_ABI2)) 428 if ((ehp->e_flags & EF_MIPS_ABI2))
392 return -ENOEXEC; 429 return -ENOEXEC;
393 430
394 return 0; 431 return 0;
@@ -410,7 +447,7 @@ static inline int look_for_irix_interpreter(char **name,
410 struct file *file = NULL; 447 struct file *file = NULL;
411 448
412 *name = NULL; 449 *name = NULL;
413 for(i = 0; i < pnum; i++, epp++) { 450 for (i = 0; i < pnum; i++, epp++) {
414 if (epp->p_type != PT_INTERP) 451 if (epp->p_type != PT_INTERP)
415 continue; 452 continue;
416 453
@@ -467,8 +504,8 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
467 unsigned int tmp; 504 unsigned int tmp;
468 int i, prot; 505 int i, prot;
469 506
470 for(i = 0; i < pnum; i++, epp++) { 507 for (i = 0; i < pnum; i++, epp++) {
471 if(epp->p_type != PT_LOAD) 508 if (epp->p_type != PT_LOAD)
472 continue; 509 continue;
473 510
474 /* Map it. */ 511 /* Map it. */
@@ -483,23 +520,23 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
483 up_write(&current->mm->mmap_sem); 520 up_write(&current->mm->mmap_sem);
484 521
485 /* Fixup location tracking vars. */ 522 /* Fixup location tracking vars. */
486 if((epp->p_vaddr & 0xfffff000) < *estack) 523 if ((epp->p_vaddr & 0xfffff000) < *estack)
487 *estack = (epp->p_vaddr & 0xfffff000); 524 *estack = (epp->p_vaddr & 0xfffff000);
488 if(!*laddr) 525 if (!*laddr)
489 *laddr = epp->p_vaddr - epp->p_offset; 526 *laddr = epp->p_vaddr - epp->p_offset;
490 if(epp->p_vaddr < *scode) 527 if (epp->p_vaddr < *scode)
491 *scode = epp->p_vaddr; 528 *scode = epp->p_vaddr;
492 529
493 tmp = epp->p_vaddr + epp->p_filesz; 530 tmp = epp->p_vaddr + epp->p_filesz;
494 if(tmp > *ebss) 531 if (tmp > *ebss)
495 *ebss = tmp; 532 *ebss = tmp;
496 if((epp->p_flags & PF_X) && *ecode < tmp) 533 if ((epp->p_flags & PF_X) && *ecode < tmp)
497 *ecode = tmp; 534 *ecode = tmp;
498 if(*edata < tmp) 535 if (*edata < tmp)
499 *edata = tmp; 536 *edata = tmp;
500 537
501 tmp = epp->p_vaddr + epp->p_memsz; 538 tmp = epp->p_vaddr + epp->p_memsz;
502 if(tmp > *ebrk) 539 if (tmp > *ebrk)
503 *ebrk = tmp; 540 *ebrk = tmp;
504 } 541 }
505 542
@@ -513,12 +550,12 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
513 int i; 550 int i;
514 551
515 *eentry = 0xffffffff; 552 *eentry = 0xffffffff;
516 for(i = 0; i < pnum; i++, epp++) { 553 for (i = 0; i < pnum; i++, epp++) {
517 if(epp->p_type != PT_INTERP) 554 if (epp->p_type != PT_INTERP)
518 continue; 555 continue;
519 556
520 /* We should have fielded this error elsewhere... */ 557 /* We should have fielded this error elsewhere... */
521 if(*eentry != 0xffffffff) 558 if (*eentry != 0xffffffff)
522 return -1; 559 return -1;
523 560
524 set_fs(old_fs); 561 set_fs(old_fs);
@@ -604,9 +641,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
604 if (elf_ex.e_shnum > 20) 641 if (elf_ex.e_shnum > 20)
605 goto out; 642 goto out;
606 643
607#ifdef DEBUG
608 print_elfhdr(&elf_ex); 644 print_elfhdr(&elf_ex);
609#endif
610 645
611 /* Now read in all of the header information */ 646 /* Now read in all of the header information */
612 size = elf_ex.e_phentsize * elf_ex.e_phnum; 647 size = elf_ex.e_phentsize * elf_ex.e_phnum;
@@ -622,13 +657,11 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
622 if (retval < 0) 657 if (retval < 0)
623 goto out_free_ph; 658 goto out_free_ph;
624 659
625#ifdef DEBUG
626 dump_phdrs(elf_phdata, elf_ex.e_phnum); 660 dump_phdrs(elf_phdata, elf_ex.e_phnum);
627#endif
628 661
629 /* Set some things for later. */ 662 /* Set some things for later. */
630 for(i = 0; i < elf_ex.e_phnum; i++) { 663 for (i = 0; i < elf_ex.e_phnum; i++) {
631 switch(elf_phdata[i].p_type) { 664 switch (elf_phdata[i].p_type) {
632 case PT_INTERP: 665 case PT_INTERP:
633 has_interp = 1; 666 has_interp = 1;
634 elf_ihdr = &elf_phdata[i]; 667 elf_ihdr = &elf_phdata[i];
@@ -667,7 +700,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
667 700
668 if (elf_interpreter) { 701 if (elf_interpreter) {
669 retval = verify_irix_interpreter(&interp_elf_ex); 702 retval = verify_irix_interpreter(&interp_elf_ex);
670 if(retval) 703 if (retval)
671 goto out_free_interp; 704 goto out_free_interp;
672 } 705 }
673 706
@@ -706,12 +739,12 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
706 &load_addr, &start_code, &elf_bss, &end_code, 739 &load_addr, &start_code, &elf_bss, &end_code,
707 &end_data, &elf_brk); 740 &end_data, &elf_brk);
708 741
709 if(elf_interpreter) { 742 if (elf_interpreter) {
710 retval = map_interpreter(elf_phdata, &interp_elf_ex, 743 retval = map_interpreter(elf_phdata, &interp_elf_ex,
711 interpreter, &interp_load_addr, 744 interpreter, &interp_load_addr,
712 elf_ex.e_phnum, old_fs, &elf_entry); 745 elf_ex.e_phnum, old_fs, &elf_entry);
713 kfree(elf_interpreter); 746 kfree(elf_interpreter);
714 if(retval) { 747 if (retval) {
715 set_fs(old_fs); 748 set_fs(old_fs);
716 printk("Unable to load IRIX ELF interpreter\n"); 749 printk("Unable to load IRIX ELF interpreter\n");
717 send_sig(SIGSEGV, current, 0); 750 send_sig(SIGSEGV, current, 0);
@@ -809,12 +842,12 @@ static int load_irix_library(struct file *file)
809 return -ENOEXEC; 842 return -ENOEXEC;
810 843
811 /* First of all, some simple consistency checks. */ 844 /* First of all, some simple consistency checks. */
812 if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 845 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
813 !file->f_op->mmap) 846 !file->f_op->mmap)
814 return -ENOEXEC; 847 return -ENOEXEC;
815 848
816 /* Now read in all of the header information. */ 849 /* Now read in all of the header information. */
817 if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) 850 if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
818 return -ENOEXEC; 851 return -ENOEXEC;
819 852
820 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); 853 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
@@ -825,15 +858,15 @@ static int load_irix_library(struct file *file)
825 sizeof(struct elf_phdr) * elf_ex.e_phnum); 858 sizeof(struct elf_phdr) * elf_ex.e_phnum);
826 859
827 j = 0; 860 j = 0;
828 for(i=0; i<elf_ex.e_phnum; i++) 861 for (i=0; i<elf_ex.e_phnum; i++)
829 if((elf_phdata + i)->p_type == PT_LOAD) j++; 862 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
830 863
831 if(j != 1) { 864 if (j != 1) {
832 kfree(elf_phdata); 865 kfree(elf_phdata);
833 return -ENOEXEC; 866 return -ENOEXEC;
834 } 867 }
835 868
836 while(elf_phdata->p_type != PT_LOAD) elf_phdata++; 869 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
837 870
838 /* Now use mmap to map the library into memory. */ 871 /* Now use mmap to map the library into memory. */
839 down_write(&current->mm->mmap_sem); 872 down_write(&current->mm->mmap_sem);
@@ -889,9 +922,7 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
889 return -EFAULT; 922 return -EFAULT;
890 } 923 }
891 924
892#ifdef DEBUG
893 dump_phdrs(user_phdrp, cnt); 925 dump_phdrs(user_phdrp, cnt);
894#endif
895 926
896 for (i = 0; i < cnt; i++, hp++) { 927 for (i = 0; i < cnt; i++, hp++) {
897 if (__get_user(type, &hp->p_type)) 928 if (__get_user(type, &hp->p_type))
@@ -905,14 +936,14 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
905 filp = fget(fd); 936 filp = fget(fd);
906 if (!filp) 937 if (!filp)
907 return -EACCES; 938 return -EACCES;
908 if(!filp->f_op) { 939 if (!filp->f_op) {
909 printk("irix_mapelf: Bogon filp!\n"); 940 printk("irix_mapelf: Bogon filp!\n");
910 fput(filp); 941 fput(filp);
911 return -EACCES; 942 return -EACCES;
912 } 943 }
913 944
914 hp = user_phdrp; 945 hp = user_phdrp;
915 for(i = 0; i < cnt; i++, hp++) { 946 for (i = 0; i < cnt; i++, hp++) {
916 int prot; 947 int prot;
917 948
918 retval = __get_user(vaddr, &hp->p_vaddr); 949 retval = __get_user(vaddr, &hp->p_vaddr);
@@ -1015,8 +1046,6 @@ static int notesize(struct memelfnote *en)
1015 return sz; 1046 return sz;
1016} 1047}
1017 1048
1018/* #define DEBUG */
1019
1020#define DUMP_WRITE(addr, nr) \ 1049#define DUMP_WRITE(addr, nr) \
1021 if (!dump_write(file, (addr), (nr))) \ 1050 if (!dump_write(file, (addr), (nr))) \
1022 goto end_coredump; 1051 goto end_coredump;
@@ -1093,9 +1122,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1093 1122
1094 segs++; 1123 segs++;
1095 } 1124 }
1096#ifdef DEBUG 1125 pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1097 printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1098#endif
1099 1126
1100 /* Set up header. */ 1127 /* Set up header. */
1101 memcpy(elf.e_ident, ELFMAG, SELFMAG); 1128 memcpy(elf.e_ident, ELFMAG, SELFMAG);
@@ -1221,7 +1248,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1221 struct elf_phdr phdr; 1248 struct elf_phdr phdr;
1222 int sz = 0; 1249 int sz = 0;
1223 1250
1224 for(i = 0; i < numnote; i++) 1251 for (i = 0; i < numnote; i++)
1225 sz += notesize(&notes[i]); 1252 sz += notesize(&notes[i]);
1226 1253
1227 phdr.p_type = PT_NOTE; 1254 phdr.p_type = PT_NOTE;
@@ -1241,7 +1268,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1241 dataoff = offset = roundup(offset, PAGE_SIZE); 1268 dataoff = offset = roundup(offset, PAGE_SIZE);
1242 1269
1243 /* Write program headers for segments dump. */ 1270 /* Write program headers for segments dump. */
1244 for(vma = current->mm->mmap, i = 0; 1271 for (vma = current->mm->mmap, i = 0;
1245 i < segs && vma != NULL; vma = vma->vm_next) { 1272 i < segs && vma != NULL; vma = vma->vm_next) {
1246 struct elf_phdr phdr; 1273 struct elf_phdr phdr;
1247 size_t sz; 1274 size_t sz;
@@ -1267,7 +1294,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1267 DUMP_WRITE(&phdr, sizeof(phdr)); 1294 DUMP_WRITE(&phdr, sizeof(phdr));
1268 } 1295 }
1269 1296
1270 for(i = 0; i < numnote; i++) 1297 for (i = 0; i < numnote; i++)
1271 if (!writenote(&notes[i], file)) 1298 if (!writenote(&notes[i], file))
1272 goto end_coredump; 1299 goto end_coredump;
1273 1300
@@ -1275,7 +1302,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1275 1302
1276 DUMP_SEEK(dataoff); 1303 DUMP_SEEK(dataoff);
1277 1304
1278 for(i = 0, vma = current->mm->mmap; 1305 for (i = 0, vma = current->mm->mmap;
1279 i < segs && vma != NULL; 1306 i < segs && vma != NULL;
1280 vma = vma->vm_next) { 1307 vma = vma->vm_next) {
1281 unsigned long addr = vma->vm_start; 1308 unsigned long addr = vma->vm_start;
@@ -1284,9 +1311,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1284 if (!maydump(vma)) 1311 if (!maydump(vma))
1285 continue; 1312 continue;
1286 i++; 1313 i++;
1287#ifdef DEBUG 1314 pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
1288 printk("elf_core_dump: writing %08lx %lx\n", addr, len);
1289#endif
1290 DUMP_WRITE((void __user *)addr, len); 1315 DUMP_WRITE((void __user *)addr, len);
1291 } 1316 }
1292 1317
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index bcaad6696082..2967537221e2 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -112,7 +112,7 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
112} 112}
113 113
114struct irq_chip msc_levelirq_type = { 114struct irq_chip msc_levelirq_type = {
115 .typename = "SOC-it-Level", 115 .name = "SOC-it-Level",
116 .ack = level_mask_and_ack_msc_irq, 116 .ack = level_mask_and_ack_msc_irq,
117 .mask = mask_msc_irq, 117 .mask = mask_msc_irq,
118 .mask_ack = level_mask_and_ack_msc_irq, 118 .mask_ack = level_mask_and_ack_msc_irq,
@@ -122,7 +122,7 @@ struct irq_chip msc_levelirq_type = {
122}; 122};
123 123
124struct irq_chip msc_edgeirq_type = { 124struct irq_chip msc_edgeirq_type = {
125 .typename = "SOC-it-Edge", 125 .name = "SOC-it-Edge",
126 .ack = edge_mask_and_ack_msc_irq, 126 .ack = edge_mask_and_ack_msc_irq,
127 .mask = mask_msc_irq, 127 .mask = mask_msc_irq,
128 .mask_ack = edge_mask_and_ack_msc_irq, 128 .mask_ack = edge_mask_and_ack_msc_irq,
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index efbd219845b5..3dd561832e4c 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -23,13 +23,13 @@ static unsigned int irq_base;
23 23
24static inline int ls1bit32(unsigned int x) 24static inline int ls1bit32(unsigned int x)
25{ 25{
26 int b = 31, s; 26 int b = 31, s;
27 27
28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s; 28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s; 29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s; 30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s; 31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
32 s = 1; if (x << 1 == 0) s = 0; b -= s; 32 s = 1; if (x << 1 == 0) s = 0; b -= s;
33 33
34 return b; 34 return b;
35} 35}
@@ -92,7 +92,7 @@ void ll_mv64340_irq(void)
92} 92}
93 93
94struct irq_chip mv64340_irq_type = { 94struct irq_chip mv64340_irq_type = {
95 .typename = "MV-64340", 95 .name = "MV-64340",
96 .ack = mask_mv64340_irq, 96 .ack = mask_mv64340_irq,
97 .mask = mask_mv64340_irq, 97 .mask = mask_mv64340_irq,
98 .mask_ack = mask_mv64340_irq, 98 .mask_ack = mask_mv64340_irq,
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 123324ba8c14..250732883488 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -17,28 +17,27 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20static int irq_base;
21
22static inline void unmask_rm7k_irq(unsigned int irq) 20static inline void unmask_rm7k_irq(unsigned int irq)
23{ 21{
24 set_c0_intcontrol(0x100 << (irq - irq_base)); 22 set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
25} 23}
26 24
27static inline void mask_rm7k_irq(unsigned int irq) 25static inline void mask_rm7k_irq(unsigned int irq)
28{ 26{
29 clear_c0_intcontrol(0x100 << (irq - irq_base)); 27 clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
30} 28}
31 29
32static struct irq_chip rm7k_irq_controller = { 30static struct irq_chip rm7k_irq_controller = {
33 .typename = "RM7000", 31 .name = "RM7000",
34 .ack = mask_rm7k_irq, 32 .ack = mask_rm7k_irq,
35 .mask = mask_rm7k_irq, 33 .mask = mask_rm7k_irq,
36 .mask_ack = mask_rm7k_irq, 34 .mask_ack = mask_rm7k_irq,
37 .unmask = unmask_rm7k_irq, 35 .unmask = unmask_rm7k_irq,
38}; 36};
39 37
40void __init rm7k_cpu_irq_init(int base) 38void __init rm7k_cpu_irq_init(void)
41{ 39{
40 int base = RM7K_CPU_IRQ_BASE;
42 int i; 41 int i;
43 42
44 clear_c0_intcontrol(0x00000f00); /* Mask all */ 43 clear_c0_intcontrol(0x00000f00); /* Mask all */
@@ -46,6 +45,4 @@ void __init rm7k_cpu_irq_init(int base)
46 for (i = base; i < base + 4; i++) 45 for (i = base; i < base + 4; i++)
47 set_irq_chip_and_handler(i, &rm7k_irq_controller, 46 set_irq_chip_and_handler(i, &rm7k_irq_controller,
48 handle_level_irq); 47 handle_level_irq);
49
50 irq_base = base;
51} 48}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 0e6f4c5349d2..ae83d2df6f31 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -18,16 +18,14 @@
18#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21static int irq_base;
22
23static inline void unmask_rm9k_irq(unsigned int irq) 21static inline void unmask_rm9k_irq(unsigned int irq)
24{ 22{
25 set_c0_intcontrol(0x1000 << (irq - irq_base)); 23 set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
26} 24}
27 25
28static inline void mask_rm9k_irq(unsigned int irq) 26static inline void mask_rm9k_irq(unsigned int irq)
29{ 27{
30 clear_c0_intcontrol(0x1000 << (irq - irq_base)); 28 clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
31} 29}
32 30
33static inline void rm9k_cpu_irq_enable(unsigned int irq) 31static inline void rm9k_cpu_irq_enable(unsigned int irq)
@@ -39,15 +37,6 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
39 local_irq_restore(flags); 37 local_irq_restore(flags);
40} 38}
41 39
42static void rm9k_cpu_irq_disable(unsigned int irq)
43{
44 unsigned long flags;
45
46 local_irq_save(flags);
47 mask_rm9k_irq(irq);
48 local_irq_restore(flags);
49}
50
51/* 40/*
52 * Performance counter interrupts are global on all processors. 41 * Performance counter interrupts are global on all processors.
53 */ 42 */
@@ -81,7 +70,7 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
81} 70}
82 71
83static struct irq_chip rm9k_irq_controller = { 72static struct irq_chip rm9k_irq_controller = {
84 .typename = "RM9000", 73 .name = "RM9000",
85 .ack = mask_rm9k_irq, 74 .ack = mask_rm9k_irq,
86 .mask = mask_rm9k_irq, 75 .mask = mask_rm9k_irq,
87 .mask_ack = mask_rm9k_irq, 76 .mask_ack = mask_rm9k_irq,
@@ -89,7 +78,7 @@ static struct irq_chip rm9k_irq_controller = {
89}; 78};
90 79
91static struct irq_chip rm9k_perfcounter_irq = { 80static struct irq_chip rm9k_perfcounter_irq = {
92 .typename = "RM9000", 81 .name = "RM9000",
93 .startup = rm9k_perfcounter_irq_startup, 82 .startup = rm9k_perfcounter_irq_startup,
94 .shutdown = rm9k_perfcounter_irq_shutdown, 83 .shutdown = rm9k_perfcounter_irq_shutdown,
95 .ack = mask_rm9k_irq, 84 .ack = mask_rm9k_irq,
@@ -102,8 +91,9 @@ unsigned int rm9000_perfcount_irq;
102 91
103EXPORT_SYMBOL(rm9000_perfcount_irq); 92EXPORT_SYMBOL(rm9000_perfcount_irq);
104 93
105void __init rm9k_cpu_irq_init(int base) 94void __init rm9k_cpu_irq_init(void)
106{ 95{
96 int base = RM9K_CPU_IRQ_BASE;
107 int i; 97 int i;
108 98
109 clear_c0_intcontrol(0x0000f000); /* Mask all */ 99 clear_c0_intcontrol(0x0000f000); /* Mask all */
@@ -115,6 +105,4 @@ void __init rm9k_cpu_irq_init(int base)
115 rm9000_perfcount_irq = base + 1; 105 rm9000_perfcount_irq = base + 1;
116 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 106 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
117 handle_level_irq); 107 handle_level_irq);
118
119 irq_base = base;
120} 108}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index fcc86b96ccf6..7b66e03b5899 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -25,7 +25,7 @@
25 * Don't even think about using this on SMP. You have been warned. 25 * Don't even think about using this on SMP. You have been warned.
26 * 26 *
27 * This file exports one global function: 27 * This file exports one global function:
28 * void mips_cpu_irq_init(int irq_base); 28 * void mips_cpu_irq_init(void);
29 */ 29 */
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
@@ -36,22 +36,20 @@
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/system.h> 37#include <asm/system.h>
38 38
39static int mips_cpu_irq_base;
40
41static inline void unmask_mips_irq(unsigned int irq) 39static inline void unmask_mips_irq(unsigned int irq)
42{ 40{
43 set_c0_status(0x100 << (irq - mips_cpu_irq_base)); 41 set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
44 irq_enable_hazard(); 42 irq_enable_hazard();
45} 43}
46 44
47static inline void mask_mips_irq(unsigned int irq) 45static inline void mask_mips_irq(unsigned int irq)
48{ 46{
49 clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); 47 clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
50 irq_disable_hazard(); 48 irq_disable_hazard();
51} 49}
52 50
53static struct irq_chip mips_cpu_irq_controller = { 51static struct irq_chip mips_cpu_irq_controller = {
54 .typename = "MIPS", 52 .name = "MIPS",
55 .ack = mask_mips_irq, 53 .ack = mask_mips_irq,
56 .mask = mask_mips_irq, 54 .mask = mask_mips_irq,
57 .mask_ack = mask_mips_irq, 55 .mask_ack = mask_mips_irq,
@@ -70,7 +68,7 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
70{ 68{
71 unsigned int vpflags = dvpe(); 69 unsigned int vpflags = dvpe();
72 70
73 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 71 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
74 evpe(vpflags); 72 evpe(vpflags);
75 unmask_mips_mt_irq(irq); 73 unmask_mips_mt_irq(irq);
76 74
@@ -84,13 +82,13 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
84static void mips_mt_cpu_irq_ack(unsigned int irq) 82static void mips_mt_cpu_irq_ack(unsigned int irq)
85{ 83{
86 unsigned int vpflags = dvpe(); 84 unsigned int vpflags = dvpe();
87 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 85 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
88 evpe(vpflags); 86 evpe(vpflags);
89 mask_mips_mt_irq(irq); 87 mask_mips_mt_irq(irq);
90} 88}
91 89
92static struct irq_chip mips_mt_cpu_irq_controller = { 90static struct irq_chip mips_mt_cpu_irq_controller = {
93 .typename = "MIPS", 91 .name = "MIPS",
94 .startup = mips_mt_cpu_irq_startup, 92 .startup = mips_mt_cpu_irq_startup,
95 .ack = mips_mt_cpu_irq_ack, 93 .ack = mips_mt_cpu_irq_ack,
96 .mask = mask_mips_mt_irq, 94 .mask = mask_mips_mt_irq,
@@ -99,8 +97,9 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
99 .eoi = unmask_mips_mt_irq, 97 .eoi = unmask_mips_mt_irq,
100}; 98};
101 99
102void __init mips_cpu_irq_init(int irq_base) 100void __init mips_cpu_irq_init(void)
103{ 101{
102 int irq_base = MIPS_CPU_IRQ_BASE;
104 int i; 103 int i;
105 104
106 /* Mask interrupts. */ 105 /* Mask interrupts. */
@@ -118,6 +117,4 @@ void __init mips_cpu_irq_init(int irq_base)
118 for (i = irq_base + 2; i < irq_base + 8; i++) 117 for (i = irq_base + 2; i < irq_base + 8; i++)
119 set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 118 set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
120 handle_level_irq); 119 handle_level_irq);
121
122 mips_cpu_irq_base = irq_base;
123} 120}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index de3fae260ff8..0b8ce59429a8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -194,15 +194,15 @@ sysn32_waitid(int which, compat_pid_t pid,
194} 194}
195 195
196struct sysinfo32 { 196struct sysinfo32 {
197 s32 uptime; 197 s32 uptime;
198 u32 loads[3]; 198 u32 loads[3];
199 u32 totalram; 199 u32 totalram;
200 u32 freeram; 200 u32 freeram;
201 u32 sharedram; 201 u32 sharedram;
202 u32 bufferram; 202 u32 bufferram;
203 u32 totalswap; 203 u32 totalswap;
204 u32 freeswap; 204 u32 freeswap;
205 u16 procs; 205 u16 procs;
206 u32 totalhigh; 206 u32 totalhigh;
207 u32 freehigh; 207 u32 freehigh;
208 u32 mem_unit; 208 u32 mem_unit;
@@ -558,7 +558,7 @@ extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) 558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
559{ 559{
560 int err; 560 int err;
561 struct ustat tmp; 561 struct ustat tmp;
562 struct ustat32 tmp32; 562 struct ustat32 tmp32;
563 mm_segment_t old_fs = get_fs(); 563 mm_segment_t old_fs = get_fs();
564 564
@@ -569,11 +569,11 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
569 if (err) 569 if (err)
570 goto out; 570 goto out;
571 571
572 memset(&tmp32,0,sizeof(struct ustat32)); 572 memset(&tmp32,0,sizeof(struct ustat32));
573 tmp32.f_tfree = tmp.f_tfree; 573 tmp32.f_tfree = tmp.f_tfree;
574 tmp32.f_tinode = tmp.f_tinode; 574 tmp32.f_tinode = tmp.f_tinode;
575 575
576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0; 576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
577 577
578out: 578out:
579 return err; 579 return err;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index c1373a6e668b..a32f6797353a 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -96,6 +96,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
96 goto out_unlock; 96 goto out_unlock;
97 } 97 }
98 98
99 retval = security_task_setscheduler(p, 0, NULL);
100 if (retval)
101 goto out_unlock;
102
99 /* Record new user-specified CPU set for future reference */ 103 /* Record new user-specified CPU set for future reference */
100 p->thread.user_cpus_allowed = new_mask; 104 p->thread.user_cpus_allowed = new_mask;
101 105
@@ -141,8 +145,9 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
141 p = find_process_by_pid(pid); 145 p = find_process_by_pid(pid);
142 if (!p) 146 if (!p)
143 goto out_unlock; 147 goto out_unlock;
144 148 retval = security_task_getscheduler(p);
145 retval = 0; 149 if (retval)
150 goto out_unlock;
146 151
147 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 152 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
148 153
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4ed37ba19731..5ddc2e9deecf 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -31,13 +31,13 @@ static const char *cpu_name[] = {
31 [CPU_R4000PC] = "R4000PC", 31 [CPU_R4000PC] = "R4000PC",
32 [CPU_R4000SC] = "R4000SC", 32 [CPU_R4000SC] = "R4000SC",
33 [CPU_R4000MC] = "R4000MC", 33 [CPU_R4000MC] = "R4000MC",
34 [CPU_R4200] = "R4200", 34 [CPU_R4200] = "R4200",
35 [CPU_R4400PC] = "R4400PC", 35 [CPU_R4400PC] = "R4400PC",
36 [CPU_R4400SC] = "R4400SC", 36 [CPU_R4400SC] = "R4400SC",
37 [CPU_R4400MC] = "R4400MC", 37 [CPU_R4400MC] = "R4400MC",
38 [CPU_R4600] = "R4600", 38 [CPU_R4600] = "R4600",
39 [CPU_R6000] = "R6000", 39 [CPU_R6000] = "R6000",
40 [CPU_R6000A] = "R6000A", 40 [CPU_R6000A] = "R6000A",
41 [CPU_R8000] = "R8000", 41 [CPU_R8000] = "R8000",
42 [CPU_R10000] = "R10000", 42 [CPU_R10000] = "R10000",
43 [CPU_R12000] = "R12000", 43 [CPU_R12000] = "R12000",
@@ -46,14 +46,14 @@ static const char *cpu_name[] = {
46 [CPU_R4650] = "R4650", 46 [CPU_R4650] = "R4650",
47 [CPU_R4700] = "R4700", 47 [CPU_R4700] = "R4700",
48 [CPU_R5000] = "R5000", 48 [CPU_R5000] = "R5000",
49 [CPU_R5000A] = "R5000A", 49 [CPU_R5000A] = "R5000A",
50 [CPU_R4640] = "R4640", 50 [CPU_R4640] = "R4640",
51 [CPU_NEVADA] = "Nevada", 51 [CPU_NEVADA] = "Nevada",
52 [CPU_RM7000] = "RM7000", 52 [CPU_RM7000] = "RM7000",
53 [CPU_RM9000] = "RM9000", 53 [CPU_RM9000] = "RM9000",
54 [CPU_R5432] = "R5432", 54 [CPU_R5432] = "R5432",
55 [CPU_4KC] = "MIPS 4Kc", 55 [CPU_4KC] = "MIPS 4Kc",
56 [CPU_5KC] = "MIPS 5Kc", 56 [CPU_5KC] = "MIPS 5Kc",
57 [CPU_R4310] = "R4310", 57 [CPU_R4310] = "R4310",
58 [CPU_SB1] = "SiByte SB1", 58 [CPU_SB1] = "SiByte SB1",
59 [CPU_SB1A] = "SiByte SB1A", 59 [CPU_SB1A] = "SiByte SB1A",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ec8209f3a0c6..04e5b38d327d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,10 +41,6 @@
41#include <asm/isadep.h> 41#include <asm/isadep.h>
42#include <asm/inst.h> 42#include <asm/inst.h>
43#include <asm/stacktrace.h> 43#include <asm/stacktrace.h>
44#ifdef CONFIG_MIPS_MT_SMTC
45#include <asm/mipsmtregs.h>
46extern void smtc_idle_loop_hook(void);
47#endif /* CONFIG_MIPS_MT_SMTC */
48 44
49/* 45/*
50 * The idle thread. There's no useful work to be done, so just try to conserve 46 * The idle thread. There's no useful work to be done, so just try to conserve
@@ -57,6 +53,8 @@ ATTRIB_NORET void cpu_idle(void)
57 while (1) { 53 while (1) {
58 while (!need_resched()) { 54 while (!need_resched()) {
59#ifdef CONFIG_MIPS_MT_SMTC 55#ifdef CONFIG_MIPS_MT_SMTC
56 extern void smtc_idle_loop_hook(void);
57
60 smtc_idle_loop_hook(); 58 smtc_idle_loop_hook();
61#endif /* CONFIG_MIPS_MT_SMTC */ 59#endif /* CONFIG_MIPS_MT_SMTC */
62 if (cpu_wait) 60 if (cpu_wait)
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 880fa6e841ee..59c1577ecbb3 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -114,6 +114,14 @@ LEAF(_save_fp_context32)
114 */ 114 */
115LEAF(_restore_fp_context) 115LEAF(_restore_fp_context)
116 EX lw t0, SC_FPC_CSR(a0) 116 EX lw t0, SC_FPC_CSR(a0)
117
118 /* Fail if the CSR has exceptions pending */
119 srl t1, t0, 5
120 and t1, t0
121 andi t1, 0x1f << 7
122 bnez t1, fault
123 nop
124
117#ifdef CONFIG_64BIT 125#ifdef CONFIG_64BIT
118 EX ldc1 $f1, SC_FPREGS+8(a0) 126 EX ldc1 $f1, SC_FPREGS+8(a0)
119 EX ldc1 $f3, SC_FPREGS+24(a0) 127 EX ldc1 $f3, SC_FPREGS+24(a0)
@@ -157,6 +165,14 @@ LEAF(_restore_fp_context)
157LEAF(_restore_fp_context32) 165LEAF(_restore_fp_context32)
158 /* Restore an o32 sigcontext. */ 166 /* Restore an o32 sigcontext. */
159 EX lw t0, SC32_FPC_CSR(a0) 167 EX lw t0, SC32_FPC_CSR(a0)
168
169 /* Fail if the CSR has exceptions pending */
170 srl t1, t0, 5
171 and t1, t0
172 andi t1, 0x1f << 7
173 bnez t1, fault
174 nop
175
160 EX ldc1 $f0, SC32_FPREGS+0(a0) 176 EX ldc1 $f0, SC32_FPREGS+0(a0)
161 EX ldc1 $f2, SC32_FPREGS+16(a0) 177 EX ldc1 $f2, SC32_FPREGS+16(a0)
162 EX ldc1 $f4, SC32_FPREGS+32(a0) 178 EX ldc1 $f4, SC32_FPREGS+32(a0)
@@ -177,9 +193,10 @@ LEAF(_restore_fp_context32)
177 jr ra 193 jr ra
178 li v0, 0 # success 194 li v0, 0 # success
179 END(_restore_fp_context32) 195 END(_restore_fp_context32)
180 .set reorder
181#endif 196#endif
182 197
198 .set reorder
199
183 .type fault@function 200 .type fault@function
184 .ent fault 201 .ent fault
185fault: li v0, -EFAULT # failure 202fault: li v0, -EFAULT # failure
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 5a99e3e0c96d..8610f4a925e9 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -63,7 +63,7 @@ extern void *vpe_get_shared(int index);
63 63
64static void rtlx_dispatch(void) 64static void rtlx_dispatch(void)
65{ 65{
66 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ); 66 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
67} 67}
68 68
69 69
@@ -491,7 +491,7 @@ static struct irqaction rtlx_irq = {
491 .name = "RTLX", 491 .name = "RTLX",
492}; 492};
493 493
494static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; 494static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
495 495
496static char register_chrdev_failed[] __initdata = 496static char register_chrdev_failed[] __initdata =
497 KERN_ERR "rtlx_module_init: unable to register device\n"; 497 KERN_ERR "rtlx_module_init: unable to register device\n";
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a7bff2a54723..39add2341aa2 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -384,7 +384,7 @@ EXPORT(sysn32_call_table)
384 PTR sys_readlinkat 384 PTR sys_readlinkat
385 PTR sys_fchmodat 385 PTR sys_fchmodat
386 PTR sys_faccessat 386 PTR sys_faccessat
387 PTR sys_pselect6 387 PTR compat_sys_pselect6
388 PTR sys_ppoll /* 6265 */ 388 PTR sys_ppoll /* 6265 */
389 PTR sys_unshare 389 PTR sys_unshare
390 PTR sys_splice 390 PTR sys_splice
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e91379c1be1d..c58b8e0105ea 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -506,7 +506,7 @@ sys_call_table:
506 PTR sys_readlinkat 506 PTR sys_readlinkat
507 PTR sys_fchmodat 507 PTR sys_fchmodat
508 PTR sys_faccessat /* 4300 */ 508 PTR sys_faccessat /* 4300 */
509 PTR sys_pselect6 509 PTR compat_sys_pselect6
510 PTR sys_ppoll 510 PTR sys_ppoll
511 PTR sys_unshare 511 PTR sys_unshare
512 PTR sys_splice 512 PTR sys_splice
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 89440a0d8528..d2e01e7167b8 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -271,8 +271,7 @@ static void __init bootmem_init(void)
271static void __init bootmem_init(void) 271static void __init bootmem_init(void)
272{ 272{
273 unsigned long reserved_end; 273 unsigned long reserved_end;
274 unsigned long highest = 0; 274 unsigned long mapstart = ~0UL;
275 unsigned long mapstart = -1UL;
276 unsigned long bootmap_size; 275 unsigned long bootmap_size;
277 int i; 276 int i;
278 277
@@ -284,6 +283,13 @@ static void __init bootmem_init(void)
284 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end))); 283 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
285 284
286 /* 285 /*
286 * max_low_pfn is not a number of pages. The number of pages
287 * of the system is given by 'max_low_pfn - min_low_pfn'.
288 */
289 min_low_pfn = ~0UL;
290 max_low_pfn = 0;
291
292 /*
287 * Find the highest page frame number we have available. 293 * Find the highest page frame number we have available.
288 */ 294 */
289 for (i = 0; i < boot_mem_map.nr_map; i++) { 295 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -296,8 +302,10 @@ static void __init bootmem_init(void)
296 end = PFN_DOWN(boot_mem_map.map[i].addr 302 end = PFN_DOWN(boot_mem_map.map[i].addr
297 + boot_mem_map.map[i].size); 303 + boot_mem_map.map[i].size);
298 304
299 if (end > highest) 305 if (end > max_low_pfn)
300 highest = end; 306 max_low_pfn = end;
307 if (start < min_low_pfn)
308 min_low_pfn = start;
301 if (end <= reserved_end) 309 if (end <= reserved_end)
302 continue; 310 continue;
303 if (start >= mapstart) 311 if (start >= mapstart)
@@ -305,22 +313,36 @@ static void __init bootmem_init(void)
305 mapstart = max(reserved_end, start); 313 mapstart = max(reserved_end, start);
306 } 314 }
307 315
316 if (min_low_pfn >= max_low_pfn)
317 panic("Incorrect memory mapping !!!");
318 if (min_low_pfn > ARCH_PFN_OFFSET) {
319 printk(KERN_INFO
320 "Wasting %lu bytes for tracking %lu unused pages\n",
321 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
322 min_low_pfn - ARCH_PFN_OFFSET);
323 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
324 printk(KERN_INFO
325 "%lu free pages won't be used\n",
326 ARCH_PFN_OFFSET - min_low_pfn);
327 }
328 min_low_pfn = ARCH_PFN_OFFSET;
329
308 /* 330 /*
309 * Determine low and high memory ranges 331 * Determine low and high memory ranges
310 */ 332 */
311 if (highest > PFN_DOWN(HIGHMEM_START)) { 333 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
312#ifdef CONFIG_HIGHMEM 334#ifdef CONFIG_HIGHMEM
313 highstart_pfn = PFN_DOWN(HIGHMEM_START); 335 highstart_pfn = PFN_DOWN(HIGHMEM_START);
314 highend_pfn = highest; 336 highend_pfn = max_low_pfn;
315#endif 337#endif
316 highest = PFN_DOWN(HIGHMEM_START); 338 max_low_pfn = PFN_DOWN(HIGHMEM_START);
317 } 339 }
318 340
319 /* 341 /*
320 * Initialize the boot-time allocator with low memory only. 342 * Initialize the boot-time allocator with low memory only.
321 */ 343 */
322 bootmap_size = init_bootmem(mapstart, highest); 344 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
323 345 min_low_pfn, max_low_pfn);
324 /* 346 /*
325 * Register fully available low RAM pages with the bootmem allocator. 347 * Register fully available low RAM pages with the bootmem allocator.
326 */ 348 */
@@ -507,9 +529,9 @@ void __init setup_arch(char **cmdline_p)
507 529
508#if defined(CONFIG_VT) 530#if defined(CONFIG_VT)
509#if defined(CONFIG_VGA_CONSOLE) 531#if defined(CONFIG_VGA_CONSOLE)
510 conswitchp = &vga_con; 532 conswitchp = &vga_con;
511#elif defined(CONFIG_DUMMY_CONSOLE) 533#elif defined(CONFIG_DUMMY_CONSOLE)
512 conswitchp = &dummy_con; 534 conswitchp = &dummy_con;
513#endif 535#endif
514#endif 536#endif
515 537
@@ -541,3 +563,6 @@ int __init dsp_disable(char *s)
541} 563}
542 564
543__setup("nodsp", dsp_disable); 565__setup("nodsp", dsp_disable);
566
567unsigned long kernelsp[NR_CPUS];
568unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b9d358e05214..9a44053cd9f1 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -89,7 +89,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
89 spin_lock_irq(&current->sighand->siglock); 89 spin_lock_irq(&current->sighand->siglock);
90 current->saved_sigmask = current->blocked; 90 current->saved_sigmask = current->blocked;
91 current->blocked = newset; 91 current->blocked = newset;
92 recalc_sigpending(); 92 recalc_sigpending();
93 spin_unlock_irq(&current->sighand->siglock); 93 spin_unlock_irq(&current->sighand->siglock);
94 94
95 current->state = TASK_INTERRUPTIBLE; 95 current->state = TASK_INTERRUPTIBLE;
@@ -124,7 +124,7 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act,
124 124
125 if (!ret && oact) { 125 if (!ret && oact) {
126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
127 return -EFAULT; 127 return -EFAULT;
128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
@@ -304,7 +304,7 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
304 current->comm, current->pid, 304 current->comm, current->pid,
305 frame, regs->cp0_epc, frame->regs[31]); 305 frame, regs->cp0_epc, frame->regs[31]);
306#endif 306#endif
307 return 0; 307 return 0;
308 308
309give_sigsegv: 309give_sigsegv:
310 force_sigsegv(signr, current); 310 force_sigsegv(signr, current);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index a67c18555ed3..b28646b3ceae 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -105,7 +105,7 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
105 spin_lock_irq(&current->sighand->siglock); 105 spin_lock_irq(&current->sighand->siglock);
106 current->saved_sigmask = current->blocked; 106 current->saved_sigmask = current->blocked;
107 current->blocked = newset; 107 current->blocked = newset;
108 recalc_sigpending(); 108 recalc_sigpending();
109 spin_unlock_irq(&current->sighand->siglock); 109 spin_unlock_irq(&current->sighand->siglock);
110 110
111 current->state = TASK_INTERRUPTIBLE; 111 current->state = TASK_INTERRUPTIBLE;
@@ -184,7 +184,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka,
184 /* Create the ucontext. */ 184 /* Create the ucontext. */
185 err |= __put_user(0, &frame->rs_uc.uc_flags); 185 err |= __put_user(0, &frame->rs_uc.uc_flags);
186 err |= __put_user(0, &frame->rs_uc.uc_link); 186 err |= __put_user(0, &frame->rs_uc.uc_link);
187 sp = (int) (long) current->sas_ss_sp; 187 sp = (int) (long) current->sas_ss_sp;
188 err |= __put_user(sp, 188 err |= __put_user(sp,
189 &frame->rs_uc.uc_stack.ss_sp); 189 &frame->rs_uc.uc_stack.ss_sp);
190 err |= __put_user(sas_ss_flags(regs->regs[29]), 190 err |= __put_user(sas_ss_flags(regs->regs[29]),
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 1ee689c0e0c9..64b62bdfb4f6 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -35,7 +35,6 @@
35#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/mips_mt.h> 37#include <asm/mips_mt.h>
38#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
39 38
40#define MIPS_CPU_IPI_RESCHED_IRQ 0 39#define MIPS_CPU_IPI_RESCHED_IRQ 0
41#define MIPS_CPU_IPI_CALL_IRQ 1 40#define MIPS_CPU_IPI_CALL_IRQ 1
@@ -108,12 +107,12 @@ void __init sanitize_tlb_entries(void)
108 107
109static void ipi_resched_dispatch(void) 108static void ipi_resched_dispatch(void)
110{ 109{
111 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ); 110 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
112} 111}
113 112
114static void ipi_call_dispatch(void) 113static void ipi_call_dispatch(void)
115{ 114{
116 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ); 115 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
117} 116}
118 117
119static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 118static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
@@ -270,8 +269,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
270 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); 269 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
271 } 270 }
272 271
273 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 272 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
274 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; 273 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
275 274
276 setup_irq(cpu_ipi_resched_irq, &irq_resched); 275 setup_irq(cpu_ipi_resched_irq, &irq_resched);
277 setup_irq(cpu_ipi_call_irq, &irq_call); 276 setup_irq(cpu_ipi_call_irq, &irq_call);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 6a857bf030b0..9251ea824937 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -26,16 +26,6 @@
26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. 26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
27 */ 27 */
28 28
29/*
30 * MIPSCPU_INT_BASE is identically defined in both
31 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
32 * but as yet there's no properly organized include structure that
33 * will ensure that the right *int.h file will be included for a
34 * given platform build.
35 */
36
37#define MIPSCPU_INT_BASE 16
38
39#define MIPS_CPU_IPI_IRQ 1 29#define MIPS_CPU_IPI_IRQ 1
40 30
41#define LOCK_MT_PRA() \ 31#define LOCK_MT_PRA() \
@@ -77,15 +67,15 @@ unsigned int ipi_timer_latch[NR_CPUS];
77 67
78#define IPIBUF_PER_CPU 4 68#define IPIBUF_PER_CPU 4
79 69
80struct smtc_ipi_q IPIQ[NR_CPUS]; 70static struct smtc_ipi_q IPIQ[NR_CPUS];
81struct smtc_ipi_q freeIPIq; 71static struct smtc_ipi_q freeIPIq;
82 72
83 73
84/* Forward declarations */ 74/* Forward declarations */
85 75
86void ipi_decode(struct smtc_ipi *); 76void ipi_decode(struct smtc_ipi *);
87void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 77static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
88void setup_cross_vpe_interrupts(void); 78static void setup_cross_vpe_interrupts(void);
89void init_smtc_stats(void); 79void init_smtc_stats(void);
90 80
91/* Global SMTC Status */ 81/* Global SMTC Status */
@@ -200,7 +190,7 @@ void __init sanitize_tlb_entries(void)
200 * Configure shared TLB - VPC configuration bit must be set by caller 190 * Configure shared TLB - VPC configuration bit must be set by caller
201 */ 191 */
202 192
203void smtc_configure_tlb(void) 193static void smtc_configure_tlb(void)
204{ 194{
205 int i,tlbsiz,vpes; 195 int i,tlbsiz,vpes;
206 unsigned long mvpconf0; 196 unsigned long mvpconf0;
@@ -648,7 +638,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
648 * the VPE. 638 * the VPE.
649 */ 639 */
650 640
651void smtc_ipi_qdump(void) 641static void smtc_ipi_qdump(void)
652{ 642{
653 int i; 643 int i;
654 644
@@ -686,28 +676,6 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
686 return result; 676 return result;
687} 677}
688 678
689/* No longer used in IPI dispatch, but retained for future recycling */
690
691static __inline__ int atomic_postclear(unsigned int *pv)
692{
693 unsigned long result;
694
695 unsigned long temp;
696
697 __asm__ __volatile__(
698 "1: ll %0, %2 \n"
699 " or %1, $0, $0 \n"
700 " sc %1, %2 \n"
701 " beqz %1, 1b \n"
702 " sync \n"
703 : "=&r" (result), "=&r" (temp), "=m" (*pv)
704 : "m" (*pv)
705 : "memory");
706
707 return result;
708}
709
710
711void smtc_send_ipi(int cpu, int type, unsigned int action) 679void smtc_send_ipi(int cpu, int type, unsigned int action)
712{ 680{
713 int tcstatus; 681 int tcstatus;
@@ -781,7 +749,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
781/* 749/*
782 * Send IPI message to Halted TC, TargTC/TargVPE already having been set 750 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
783 */ 751 */
784void post_direct_ipi(int cpu, struct smtc_ipi *pipi) 752static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
785{ 753{
786 struct pt_regs *kstack; 754 struct pt_regs *kstack;
787 unsigned long tcstatus; 755 unsigned long tcstatus;
@@ -921,7 +889,7 @@ void smtc_timer_broadcast(int vpe)
921 * interrupts. 889 * interrupts.
922 */ 890 */
923 891
924static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ; 892static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
925 893
926static irqreturn_t ipi_interrupt(int irq, void *dev_idm) 894static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
927{ 895{
@@ -1000,7 +968,7 @@ static void ipi_irq_dispatch(void)
1000 968
1001static struct irqaction irq_ipi; 969static struct irqaction irq_ipi;
1002 970
1003void setup_cross_vpe_interrupts(void) 971static void setup_cross_vpe_interrupts(void)
1004{ 972{
1005 if (!cpu_has_vint) 973 if (!cpu_has_vint)
1006 panic("SMTC Kernel requires Vectored Interupt support"); 974 panic("SMTC Kernel requires Vectored Interupt support");
@@ -1191,7 +1159,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1191 * It would be nice to be able to use a spinlock here, 1159 * It would be nice to be able to use a spinlock here,
1192 * but this is invoked from within TLB flush routines 1160 * but this is invoked from within TLB flush routines
1193 * that protect themselves with DVPE, so if a lock is 1161 * that protect themselves with DVPE, so if a lock is
1194 * held by another TC, it'll never be freed. 1162 * held by another TC, it'll never be freed.
1195 * 1163 *
1196 * DVPE/DMT must not be done with interrupts enabled, 1164 * DVPE/DMT must not be done with interrupts enabled,
1197 * so even so most callers will already have disabled 1165 * so even so most callers will already have disabled
@@ -1296,7 +1264,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1296 * Support for single-threading cache flush operations. 1264 * Support for single-threading cache flush operations.
1297 */ 1265 */
1298 1266
1299int halt_state_save[NR_CPUS]; 1267static int halt_state_save[NR_CPUS];
1300 1268
1301/* 1269/*
1302 * To really, really be sure that nothing is being done 1270 * To really, really be sure that nothing is being done
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 6c2406a93f2b..93a148486f88 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -669,7 +669,7 @@ asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name,
669 669
670struct irix_statfs { 670struct irix_statfs {
671 short f_type; 671 short f_type;
672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree; 672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
673 char f_fname[6], f_fpack[6]; 673 char f_fname[6], f_fpack[6];
674}; 674};
675 675
@@ -959,7 +959,7 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin)
959 959
960 fn = default_llseek; 960 fn = default_llseek;
961 if (file->f_op && file->f_op->llseek) 961 if (file->f_op && file->f_op->llseek)
962 fn = file->f_op->llseek; 962 fn = file->f_op->llseek;
963 lock_kernel(); 963 lock_kernel();
964 retval = fn(file, offset, origin); 964 retval = fn(file, offset, origin);
965 unlock_kernel(); 965 unlock_kernel();
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 458fccf87c54..459624969c99 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -522,7 +522,7 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
522}; 522};
523 523
524static char *rstrs[] = { 524static char *rstrs[] = {
525 [R_MIPS_NONE] = "MIPS_NONE", 525 [R_MIPS_NONE] = "MIPS_NONE",
526 [R_MIPS_32] = "MIPS_32", 526 [R_MIPS_32] = "MIPS_32",
527 [R_MIPS_26] = "MIPS_26", 527 [R_MIPS_26] = "MIPS_26",
528 [R_MIPS_HI16] = "MIPS_HI16", 528 [R_MIPS_HI16] = "MIPS_HI16",
@@ -695,7 +695,7 @@ static void dump_tclist(void)
695} 695}
696 696
697/* We are prepared so configure and start the VPE... */ 697/* We are prepared so configure and start the VPE... */
698int vpe_run(struct vpe * v) 698static int vpe_run(struct vpe * v)
699{ 699{
700 struct vpe_notifications *n; 700 struct vpe_notifications *n;
701 unsigned long val, dmt_flag; 701 unsigned long val, dmt_flag;
@@ -713,16 +713,16 @@ int vpe_run(struct vpe * v)
713 dvpe(); 713 dvpe();
714 714
715 if (!list_empty(&v->tc)) { 715 if (!list_empty(&v->tc)) {
716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { 716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", 717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
718 t->index); 718 t->index);
719 return -ENOEXEC; 719 return -ENOEXEC;
720 } 720 }
721 } else { 721 } else {
722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", 722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
723 v->minor); 723 v->minor);
724 return -ENOEXEC; 724 return -ENOEXEC;
725 } 725 }
726 726
727 /* Put MVPE's into 'configuration state' */ 727 /* Put MVPE's into 'configuration state' */
728 set_c0_mvpcontrol(MVPCONTROL_VPC); 728 set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -775,14 +775,14 @@ int vpe_run(struct vpe * v)
775 775
776 back_to_back_c0_hazard(); 776 back_to_back_c0_hazard();
777 777
778 /* Set up the XTC bit in vpeconf0 to point at our tc */ 778 /* Set up the XTC bit in vpeconf0 to point at our tc */
779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) 779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
780 | (t->index << VPECONF0_XTC_SHIFT)); 780 | (t->index << VPECONF0_XTC_SHIFT));
781 781
782 back_to_back_c0_hazard(); 782 back_to_back_c0_hazard();
783 783
784 /* enable this VPE */ 784 /* enable this VPE */
785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
786 786
787 /* clear out any left overs from a previous program */ 787 /* clear out any left overs from a previous program */
788 write_vpe_c0_status(0); 788 write_vpe_c0_status(0);
@@ -832,7 +832,7 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
832 * contents of the program (p)buffer performing relocatations/etc, free's it 832 * contents of the program (p)buffer performing relocatations/etc, free's it
833 * when finished. 833 * when finished.
834 */ 834 */
835int vpe_elfload(struct vpe * v) 835static int vpe_elfload(struct vpe * v)
836{ 836{
837 Elf_Ehdr *hdr; 837 Elf_Ehdr *hdr;
838 Elf_Shdr *sechdrs; 838 Elf_Shdr *sechdrs;
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index 2affa5ff171c..9a622b9a1051 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -45,7 +45,7 @@ void enable_lasat_irq(unsigned int irq_nr)
45} 45}
46 46
47static struct irq_chip lasat_irq_type = { 47static struct irq_chip lasat_irq_type = {
48 .typename = "Lasat", 48 .name = "Lasat",
49 .ack = disable_lasat_irq, 49 .ack = disable_lasat_irq,
50 .mask = disable_lasat_irq, 50 .mask = disable_lasat_irq,
51 .mask_ack = disable_lasat_irq, 51 .mask_ack = disable_lasat_irq,
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 88c7ab871ec4..d47692f73a26 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM); 132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM);
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140const char *get_system_type(void) 139const char *get_system_type(void)
diff --git a/arch/mips/lib-32/Makefile b/arch/mips/lib-32/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-32/Makefile
+++ b/arch/mips/lib-32/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-64/Makefile
+++ b/arch/mips/lib-64/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/memset.S b/arch/mips/lib-64/memset.S
deleted file mode 100644
index e2c42c85113b..000000000000
--- a/arch/mips/lib-64/memset.S
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#include <asm/asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/regdef.h>
12
13#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \
15 .section __ex_table,"a"; \
16 PTR 9b, handler; \
17 .previous
18
19 .macro f_fill64 dst, offset, val, fixup
20 EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
21 EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
22 EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
23 EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
24 EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
28 .endm
29
30/*
31 * memset(void *s, int c, size_t n)
32 *
33 * a0: start of area to clear
34 * a1: char to fill with
35 * a2: size of area to clear
36 */
37 .set noreorder
38 .align 5
39LEAF(memset)
40 beqz a1, 1f
41 move v0, a0 /* result */
42
43 andi a1, 0xff /* spread fillword */
44 dsll t1, a1, 8
45 or a1, t1
46 dsll t1, a1, 16
47 or a1, t1
48 dsll t1, a1, 32
49 or a1, t1
501:
51
52FEXPORT(__bzero)
53 sltiu t0, a2, LONGSIZE /* very small region? */
54 bnez t0, small_memset
55 andi t0, a0, LONGMASK /* aligned? */
56
57 beqz t0, 1f
58 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
59
60#ifdef __MIPSEB__
61 EX(sdl, a1, (a0), first_fixup) /* make dword aligned */
62#endif
63#ifdef __MIPSEL__
64 EX(sdr, a1, (a0), first_fixup) /* make dword aligned */
65#endif
66 PTR_SUBU a0, t0 /* long align ptr */
67 PTR_ADDU a2, t0 /* correct size */
68
691: ori t1, a2, 0x3f /* # of full blocks */
70 xori t1, 0x3f
71 beqz t1, memset_partial /* no block to fill */
72 andi t0, a2, 0x38
73
74 PTR_ADDU t1, a0 /* end address */
75 .set reorder
761: PTR_ADDIU a0, 64
77 f_fill64 a0, -64, a1, fwd_fixup
78 bne t1, a0, 1b
79 .set noreorder
80
81memset_partial:
82 PTR_LA t1, 2f /* where to start */
83 .set noat
84 dsrl AT, t0, 1
85 PTR_SUBU t1, AT
86 .set noat
87 jr t1
88 PTR_ADDU a0, t0 /* dest ptr */
89
90 .set push
91 .set noreorder
92 .set nomacro
93 f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */
942: .set pop
95 andi a2, LONGMASK /* At most one long to go */
96
97 beqz a2, 1f
98 PTR_ADDU a0, a2 /* What's left */
99#ifdef __MIPSEB__
100 EX(sdr, a1, -1(a0), last_fixup)
101#endif
102#ifdef __MIPSEL__
103 EX(sdl, a1, -1(a0), last_fixup)
104#endif
1051: jr ra
106 move a2, zero
107
108small_memset:
109 beqz a2, 2f
110 PTR_ADDU t1, a0, a2
111
1121: PTR_ADDIU a0, 1 /* fill bytewise */
113 bne t1, a0, 1b
114 sb a1, -1(a0)
115
1162: jr ra /* done */
117 move a2, zero
118 END(memset)
119
120first_fixup:
121 jr ra
122 nop
123
124fwd_fixup:
125 PTR_L t0, TI_TASK($28)
126 LONG_L t0, THREAD_BUADDR(t0)
127 andi a2, 0x3f
128 LONG_ADDU a2, t1
129 jr ra
130 LONG_SUBU a2, t0
131
132partial_fixup:
133 PTR_L t0, TI_TASK($28)
134 LONG_L t0, THREAD_BUADDR(t0)
135 andi a2, LONGMASK
136 LONG_ADDU a2, t1
137 jr ra
138 LONG_SUBU a2, t0
139
140last_fixup:
141 jr ra
142 andi v1, a2, LONGMASK
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 989c900b8b14..5ad501b30b43 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial.o memcpy.o promlib.o \ 5lib-y += csum_partial.o memcpy.o memset.o promlib.o \
6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o 6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o
7 7
8obj-y += iomap.o 8obj-y += iomap.o
diff --git a/arch/mips/lib-32/memset.S b/arch/mips/lib/memset.S
index 1981485bd48b..3f8b8b3d0b23 100644
--- a/arch/mips/lib-32/memset.S
+++ b/arch/mips/lib/memset.S
@@ -10,6 +10,14 @@
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11#include <asm/regdef.h> 11#include <asm/regdef.h>
12 12
13#if LONGSIZE == 4
14#define LONG_S_L swl
15#define LONG_S_R swr
16#else
17#define LONG_S_L sdl
18#define LONG_S_R sdr
19#endif
20
13#define EX(insn,reg,addr,handler) \ 21#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \ 229: insn reg, addr; \
15 .section __ex_table,"a"; \ 23 .section __ex_table,"a"; \
@@ -25,6 +33,7 @@
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) 33 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) 34 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) 35 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
36#if LONGSIZE == 4
28 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) 37 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup)
29 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) 38 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup)
30 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) 39 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
@@ -33,6 +42,7 @@
33 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) 42 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
34 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) 43 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
35 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) 44 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
45#endif
36 .endm 46 .endm
37 47
38/* 48/*
@@ -49,9 +59,13 @@ LEAF(memset)
49 move v0, a0 /* result */ 59 move v0, a0 /* result */
50 60
51 andi a1, 0xff /* spread fillword */ 61 andi a1, 0xff /* spread fillword */
52 sll t1, a1, 8 62 LONG_SLL t1, a1, 8
53 or a1, t1 63 or a1, t1
54 sll t1, a1, 16 64 LONG_SLL t1, a1, 16
65#if LONGSIZE == 8
66 or a1, t1
67 LONG_SLL t1, a1, 32
68#endif
55 or a1, t1 69 or a1, t1
561: 701:
57 71
@@ -64,10 +78,10 @@ FEXPORT(__bzero)
64 PTR_SUBU t0, LONGSIZE /* alignment in bytes */ 78 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
65 79
66#ifdef __MIPSEB__ 80#ifdef __MIPSEB__
67 EX(swl, a1, (a0), first_fixup) /* make word aligned */ 81 EX(LONG_S_L, a1, (a0), first_fixup) /* make word/dword aligned */
68#endif 82#endif
69#ifdef __MIPSEL__ 83#ifdef __MIPSEL__
70 EX(swr, a1, (a0), first_fixup) /* make word aligned */ 84 EX(LONG_S_R, a1, (a0), first_fixup) /* make word/dword aligned */
71#endif 85#endif
72 PTR_SUBU a0, t0 /* long align ptr */ 86 PTR_SUBU a0, t0 /* long align ptr */
73 PTR_ADDU a2, t0 /* correct size */ 87 PTR_ADDU a2, t0 /* correct size */
@@ -75,7 +89,7 @@ FEXPORT(__bzero)
751: ori t1, a2, 0x3f /* # of full blocks */ 891: ori t1, a2, 0x3f /* # of full blocks */
76 xori t1, 0x3f 90 xori t1, 0x3f
77 beqz t1, memset_partial /* no block to fill */ 91 beqz t1, memset_partial /* no block to fill */
78 andi t0, a2, 0x3c 92 andi t0, a2, 0x40-LONGSIZE
79 93
80 PTR_ADDU t1, a0 /* end address */ 94 PTR_ADDU t1, a0 /* end address */
81 .set reorder 95 .set reorder
@@ -86,7 +100,14 @@ FEXPORT(__bzero)
86 100
87memset_partial: 101memset_partial:
88 PTR_LA t1, 2f /* where to start */ 102 PTR_LA t1, 2f /* where to start */
103#if LONGSIZE == 4
89 PTR_SUBU t1, t0 104 PTR_SUBU t1, t0
105#else
106 .set noat
107 LONG_SRL AT, t0, 1
108 PTR_SUBU t1, AT
109 .set noat
110#endif
90 jr t1 111 jr t1
91 PTR_ADDU a0, t0 /* dest ptr */ 112 PTR_ADDU a0, t0 /* dest ptr */
92 113
@@ -100,10 +121,10 @@ memset_partial:
100 beqz a2, 1f 121 beqz a2, 1f
101 PTR_ADDU a0, a2 /* What's left */ 122 PTR_ADDU a0, a2 /* What's left */
102#ifdef __MIPSEB__ 123#ifdef __MIPSEB__
103 EX(swr, a1, -1(a0), last_fixup) 124 EX(LONG_S_R, a1, -1(a0), last_fixup)
104#endif 125#endif
105#ifdef __MIPSEL__ 126#ifdef __MIPSEL__
106 EX(swl, a1, -1(a0), last_fixup) 127 EX(LONG_S_L, a1, -1(a0), last_fixup)
107#endif 128#endif
1081: jr ra 1291: jr ra
109 move a2, zero 130 move a2, zero
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 98ce89f8068b..2388f7f3ffde 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -44,20 +44,24 @@ unsigned long __init run_uncached(void *func)
44 44
45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) 45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
46 usp = CKSEG1ADDR(sp); 46 usp = CKSEG1ADDR(sp);
47#ifdef CONFIG_64BIT
47 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 48 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
48 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0)) 49 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0))
49 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 50 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
50 XKPHYS_TO_PHYS((long long)sp)); 51 XKPHYS_TO_PHYS((long long)sp));
52#endif
51 else { 53 else {
52 BUG(); 54 BUG();
53 usp = sp; 55 usp = sp;
54 } 56 }
55 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2) 57 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
56 ufunc = CKSEG1ADDR(lfunc); 58 ufunc = CKSEG1ADDR(lfunc);
59#ifdef CONFIG_64BIT
57 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 60 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
58 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0)) 61 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0))
59 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 62 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
60 XKPHYS_TO_PHYS((long long)lfunc)); 63 XKPHYS_TO_PHYS((long long)lfunc));
64#endif
61 else { 65 else {
62 BUG(); 66 BUG();
63 ufunc = lfunc; 67 ufunc = lfunc;
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index 43dba6ce6603..dfa0acbd7fc2 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
35#include <linux/kernel.h>
35 36
36#include <asm/gdb-stub.h> 37#include <asm/gdb-stub.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -69,7 +70,7 @@ static void end_atlas_irq(unsigned int irq)
69} 70}
70 71
71static struct irq_chip atlas_irq_type = { 72static struct irq_chip atlas_irq_type = {
72 .typename = "Atlas", 73 .name = "Atlas",
73 .ack = disable_atlas_irq, 74 .ack = disable_atlas_irq,
74 .mask = disable_atlas_irq, 75 .mask = disable_atlas_irq,
75 .mask_ack = disable_atlas_irq, 76 .mask_ack = disable_atlas_irq,
@@ -220,7 +221,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
220 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 221 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
221 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 222 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
222}; 223};
223int __initdata msc_nr_irqs = sizeof(msc_irqmap) / sizeof(*msc_irqmap); 224int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
224 225
225msc_irqmap_t __initdata msc_eicirqmap[] = { 226msc_irqmap_t __initdata msc_eicirqmap[] = {
226 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 227 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -231,14 +232,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
231 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 232 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
232 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 233 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
233}; 234};
234int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap) / sizeof(*msc_eicirqmap); 235int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
235 236
236void __init arch_init_irq(void) 237void __init arch_init_irq(void)
237{ 238{
238 init_atlas_irqs(ATLAS_INT_BASE); 239 init_atlas_irqs(ATLAS_INT_BASE);
239 240
240 if (!cpu_has_veic) 241 if (!cpu_has_veic)
241 mips_cpu_irq_init(MIPSCPU_INT_BASE); 242 mips_cpu_irq_init();
242 243
243 switch(mips_revision_corid) { 244 switch(mips_revision_corid) {
244 case MIPS_REVISION_CORID_CORE_MSC: 245 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c
index eeed944e0f83..ebf0e16c5a0d 100644
--- a/arch/mips/mips-boards/generic/memory.c
+++ b/arch/mips/mips-boards/generic/memory.c
@@ -166,9 +166,8 @@ void __init prom_meminit(void)
166 } 166 }
167} 167}
168 168
169unsigned long __init prom_free_prom_memory(void) 169void __init prom_free_prom_memory(void)
170{ 170{
171 unsigned long freed = 0;
172 unsigned long addr; 171 unsigned long addr;
173 int i; 172 int i;
174 173
@@ -176,17 +175,8 @@ unsigned long __init prom_free_prom_memory(void)
176 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 175 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
177 continue; 176 continue;
178 177
179 addr = PAGE_ALIGN(boot_mem_map.map[i].addr); 178 addr = boot_mem_map.map[i].addr;
180 while (addr < boot_mem_map.map[i].addr 179 free_init_pages("prom memory",
181 + boot_mem_map.map[i].size) { 180 addr, addr + boot_mem_map.map[i].size);
182 ClearPageReserved(virt_to_page(__va(addr)));
183 init_page_count(virt_to_page(__va(addr)));
184 free_page((unsigned long)__va(addr));
185 addr += PAGE_SIZE;
186 freed += PAGE_SIZE;
187 }
188 } 181 }
189 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
190
191 return freed;
192} 182}
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index 90ad5bf3e2f1..3c206bb17160 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/kernel.h>
30#include <linux/random.h> 31#include <linux/random.h>
31 32
32#include <asm/i8259.h> 33#include <asm/i8259.h>
@@ -289,7 +290,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
289 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 290 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
290 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 291 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
291}; 292};
292int __initdata msc_nr_irqs = sizeof(msc_irqmap)/sizeof(msc_irqmap_t); 293int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
293 294
294msc_irqmap_t __initdata msc_eicirqmap[] = { 295msc_irqmap_t __initdata msc_eicirqmap[] = {
295 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 296 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -303,14 +304,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
303 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 304 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
304 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 305 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
305}; 306};
306int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap)/sizeof(msc_irqmap_t); 307int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
307 308
308void __init arch_init_irq(void) 309void __init arch_init_irq(void)
309{ 310{
310 init_i8259_irqs(); 311 init_i8259_irqs();
311 312
312 if (!cpu_has_veic) 313 if (!cpu_has_veic)
313 mips_cpu_irq_init (MIPSCPU_INT_BASE); 314 mips_cpu_irq_init();
314 315
315 switch(mips_revision_corid) { 316 switch(mips_revision_corid) {
316 case MIPS_REVISION_CORID_CORE_MSC: 317 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c
index 874ccb0066b8..c4b9de3a7f27 100644
--- a/arch/mips/mips-boards/sead/sead_int.c
+++ b/arch/mips/mips-boards/sead/sead_int.c
@@ -113,5 +113,5 @@ asmlinkage void plat_irq_dispatch(void)
113 113
114void __init arch_init_irq(void) 114void __init arch_init_irq(void)
115{ 115{
116 mips_cpu_irq_init(MIPSCPU_INT_BASE); 116 mips_cpu_irq_init();
117} 117}
diff --git a/arch/mips/mips-boards/sim/sim_int.c b/arch/mips/mips-boards/sim/sim_int.c
index 2ce449dce6f2..15ac0655c1ff 100644
--- a/arch/mips/mips-boards/sim/sim_int.c
+++ b/arch/mips/mips-boards/sim/sim_int.c
@@ -21,9 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel_stat.h> 22#include <linux/kernel_stat.h>
23#include <asm/mips-boards/simint.h> 23#include <asm/mips-boards/simint.h>
24 24#include <asm/irq_cpu.h>
25
26extern void mips_cpu_irq_init(int);
27 25
28static inline int clz(unsigned long x) 26static inline int clz(unsigned long x)
29{ 27{
@@ -86,5 +84,5 @@ asmlinkage void plat_irq_dispatch(void)
86 84
87void __init arch_init_irq(void) 85void __init arch_init_irq(void)
88{ 86{
89 mips_cpu_irq_init(MIPSCPU_INT_BASE); 87 mips_cpu_irq_init();
90} 88}
diff --git a/arch/mips/mips-boards/sim/sim_mem.c b/arch/mips/mips-boards/sim/sim_mem.c
index f7ce76983328..46bc16f8b15d 100644
--- a/arch/mips/mips-boards/sim/sim_mem.c
+++ b/arch/mips/mips-boards/sim/sim_mem.c
@@ -99,10 +99,9 @@ void __init prom_meminit(void)
99 } 99 }
100} 100}
101 101
102unsigned long __init prom_free_prom_memory(void) 102void __init prom_free_prom_memory(void)
103{ 103{
104 int i; 104 int i;
105 unsigned long freed = 0;
106 unsigned long addr; 105 unsigned long addr;
107 106
108 for (i = 0; i < boot_mem_map.nr_map; i++) { 107 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -110,16 +109,7 @@ unsigned long __init prom_free_prom_memory(void)
110 continue; 109 continue;
111 110
112 addr = boot_mem_map.map[i].addr; 111 addr = boot_mem_map.map[i].addr;
113 while (addr < boot_mem_map.map[i].addr 112 free_init_pages("prom memory",
114 + boot_mem_map.map[i].size) { 113 addr, addr + boot_mem_map.map[i].size);
115 ClearPageReserved(virt_to_page(__va(addr)));
116 init_page_count(virt_to_page(__va(addr)));
117 free_page((unsigned long)__va(addr));
118 addr += PAGE_SIZE;
119 freed += PAGE_SIZE;
120 }
121 } 114 }
122 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
123
124 return freed;
125} 115}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 49065c133ebf..125a4a85ec05 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -341,7 +341,6 @@ static int __init page_is_ram(unsigned long pagenr)
341void __init paging_init(void) 341void __init paging_init(void)
342{ 342{
343 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 343 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
344 unsigned long max_dma, low;
345#ifndef CONFIG_FLATMEM 344#ifndef CONFIG_FLATMEM
346 unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; 345 unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
347 unsigned long i, j, pfn; 346 unsigned long i, j, pfn;
@@ -354,19 +353,19 @@ void __init paging_init(void)
354#endif 353#endif
355 kmap_coherent_init(); 354 kmap_coherent_init();
356 355
357 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
358 low = max_low_pfn;
359
360#ifdef CONFIG_ISA 356#ifdef CONFIG_ISA
361 if (low < max_dma) 357 if (max_low_pfn >= MAX_DMA_PFN)
362 zones_size[ZONE_DMA] = low; 358 if (min_low_pfn >= MAX_DMA_PFN) {
363 else { 359 zones_size[ZONE_DMA] = 0;
364 zones_size[ZONE_DMA] = max_dma; 360 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
365 zones_size[ZONE_NORMAL] = low - max_dma; 361 } else {
366 } 362 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
367#else 363 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
368 zones_size[ZONE_DMA] = low; 364 }
365 else
369#endif 366#endif
367 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
368
370#ifdef CONFIG_HIGHMEM 369#ifdef CONFIG_HIGHMEM
371 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; 370 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
372 371
@@ -467,7 +466,7 @@ void __init mem_init(void)
467} 466}
468#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 467#endif /* !CONFIG_NEED_MULTIPLE_NODES */
469 468
470static void free_init_pages(char *what, unsigned long begin, unsigned long end) 469void free_init_pages(const char *what, unsigned long begin, unsigned long end)
471{ 470{
472 unsigned long pfn; 471 unsigned long pfn;
473 472
@@ -493,18 +492,25 @@ void free_initrd_mem(unsigned long start, unsigned long end)
493} 492}
494#endif 493#endif
495 494
496extern unsigned long prom_free_prom_memory(void);
497
498void free_initmem(void) 495void free_initmem(void)
499{ 496{
500 unsigned long freed; 497 prom_free_prom_memory();
501
502 freed = prom_free_prom_memory();
503 if (freed)
504 printk(KERN_INFO "Freeing firmware memory: %ldkb freed\n",
505 freed >> 10);
506
507 free_init_pages("unused kernel memory", 498 free_init_pages("unused kernel memory",
508 __pa_symbol(&__init_begin), 499 __pa_symbol(&__init_begin),
509 __pa_symbol(&__init_end)); 500 __pa_symbol(&__init_end));
510} 501}
502
503unsigned long pgd_current[NR_CPUS];
504/*
505 * On 64-bit we've got three-level pagetables with a slightly
506 * different layout ...
507 */
508#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
509pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
510#ifdef CONFIG_64BIT
511#ifdef MODULE_START
512pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
513#endif
514pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
515#endif
516pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/momentum/jaguar_atx/Makefile b/arch/mips/momentum/jaguar_atx/Makefile
index 67372f3f9654..2e8cebd49bc0 100644
--- a/arch/mips/momentum/jaguar_atx/Makefile
+++ b/arch/mips/momentum/jaguar_atx/Makefile
@@ -6,7 +6,7 @@
6# unless it's something special (ie not a .c file). 6# unless it's something special (ie not a .c file).
7# 7#
8 8
9obj-y += irq.o prom.o reset.o setup.o 9obj-y += irq.o platform.o prom.o reset.o setup.o
10 10
11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o 11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o
12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o 12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o
diff --git a/arch/mips/momentum/jaguar_atx/irq.c b/arch/mips/momentum/jaguar_atx/irq.c
index 2efb25aa1aed..f2b432585df2 100644
--- a/arch/mips/momentum/jaguar_atx/irq.c
+++ b/arch/mips/momentum/jaguar_atx/irq.c
@@ -82,8 +82,8 @@ void __init arch_init_irq(void)
82 */ 82 */
83 clear_c0_status(ST0_IM); 83 clear_c0_status(ST0_IM);
84 84
85 mips_cpu_irq_init(0); 85 mips_cpu_irq_init();
86 rm7k_cpu_irq_init(8); 86 rm7k_cpu_irq_init();
87 87
88 /* set up the cascading interrupts */ 88 /* set up the cascading interrupts */
89 setup_irq(8, &cascade_mv64340); 89 setup_irq(8, &cascade_mv64340);
diff --git a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
index 6978654c712b..022f6974b76e 100644
--- a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
+++ b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
@@ -46,7 +46,9 @@
46 46
47extern unsigned long ja_fpga_base; 47extern unsigned long ja_fpga_base;
48 48
49#define JAGUAR_FPGA_WRITE(x,y) writeb(x, ja_fpga_base + JAGUAR_ATX_REG_##y) 49#define __FPGA_REG_TO_ADDR(reg) \
50#define JAGUAR_FPGA_READ(x) readb(ja_fpga_base + JAGUAR_ATX_REG_##x) 50 ((void *) ja_fpga_base + JAGUAR_ATX_REG_##reg)
51#define JAGUAR_FPGA_WRITE(x, reg) writeb(x, __FPGA_REG_TO_ADDR(reg))
52#define JAGUAR_FPGA_READ(reg) readb(__FPGA_REG_TO_ADDR(reg))
51 53
52#endif 54#endif
diff --git a/arch/mips/momentum/jaguar_atx/platform.c b/arch/mips/momentum/jaguar_atx/platform.c
new file mode 100644
index 000000000000..035ea5137c71
--- /dev/null
+++ b/arch/mips/momentum/jaguar_atx/platform.c
@@ -0,0 +1,235 @@
1#include <linux/delay.h>
2#include <linux/if_ether.h>
3#include <linux/ioport.h>
4#include <linux/mv643xx.h>
5#include <linux/platform_device.h>
6
7#include "jaguar_atx_fpga.h"
8
9#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
10
11static struct resource mv643xx_eth_shared_resources[] = {
12 [0] = {
13 .name = "ethernet shared base",
14 .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
15 .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
16 MV643XX_ETH_SHARED_REGS_SIZE - 1,
17 .flags = IORESOURCE_MEM,
18 },
19};
20
21static struct platform_device mv643xx_eth_shared_device = {
22 .name = MV643XX_ETH_SHARED_NAME,
23 .id = 0,
24 .num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources),
25 .resource = mv643xx_eth_shared_resources,
26};
27
28#define MV_SRAM_BASE 0xfe000000UL
29#define MV_SRAM_SIZE (256 * 1024)
30
31#define MV_SRAM_RXRING_SIZE (MV_SRAM_SIZE / 4)
32#define MV_SRAM_TXRING_SIZE (MV_SRAM_SIZE / 4)
33
34#define MV_SRAM_BASE_ETH0 MV_SRAM_BASE
35#define MV_SRAM_BASE_ETH1 (MV_SRAM_BASE + (MV_SRAM_SIZE / 2))
36
37#define MV64x60_IRQ_ETH_0 48
38#define MV64x60_IRQ_ETH_1 49
39#define MV64x60_IRQ_ETH_2 50
40
41#ifdef CONFIG_MV643XX_ETH_0
42
43static struct resource mv64x60_eth0_resources[] = {
44 [0] = {
45 .name = "eth0 irq",
46 .start = MV64x60_IRQ_ETH_0,
47 .end = MV64x60_IRQ_ETH_0,
48 .flags = IORESOURCE_IRQ,
49 },
50};
51
52static char eth0_mac_addr[ETH_ALEN];
53
54static struct mv643xx_eth_platform_data eth0_pd = {
55 .mac_addr = eth0_mac_addr,
56
57 .tx_sram_addr = MV_SRAM_BASE_ETH0,
58 .tx_sram_size = MV_SRAM_TXRING_SIZE,
59 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
60
61 .rx_sram_addr = MV_SRAM_BASE_ETH0 + MV_SRAM_TXRING_SIZE,
62 .rx_sram_size = MV_SRAM_RXRING_SIZE,
63 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
64};
65
66static struct platform_device eth0_device = {
67 .name = MV643XX_ETH_NAME,
68 .id = 0,
69 .num_resources = ARRAY_SIZE(mv64x60_eth0_resources),
70 .resource = mv64x60_eth0_resources,
71 .dev = {
72 .platform_data = &eth0_pd,
73 },
74};
75#endif /* CONFIG_MV643XX_ETH_0 */
76
77#ifdef CONFIG_MV643XX_ETH_1
78
79static struct resource mv64x60_eth1_resources[] = {
80 [0] = {
81 .name = "eth1 irq",
82 .start = MV64x60_IRQ_ETH_1,
83 .end = MV64x60_IRQ_ETH_1,
84 .flags = IORESOURCE_IRQ,
85 },
86};
87
88static char eth1_mac_addr[ETH_ALEN];
89
90static struct mv643xx_eth_platform_data eth1_pd = {
91 .mac_addr = eth1_mac_addr,
92
93 .tx_sram_addr = MV_SRAM_BASE_ETH1,
94 .tx_sram_size = MV_SRAM_TXRING_SIZE,
95 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
96
97 .rx_sram_addr = MV_SRAM_BASE_ETH1 + MV_SRAM_TXRING_SIZE,
98 .rx_sram_size = MV_SRAM_RXRING_SIZE,
99 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
100};
101
102static struct platform_device eth1_device = {
103 .name = MV643XX_ETH_NAME,
104 .id = 1,
105 .num_resources = ARRAY_SIZE(mv64x60_eth1_resources),
106 .resource = mv64x60_eth1_resources,
107 .dev = {
108 .platform_data = &eth1_pd,
109 },
110};
111#endif /* CONFIG_MV643XX_ETH_1 */
112
113#ifdef CONFIG_MV643XX_ETH_2
114
115static struct resource mv64x60_eth2_resources[] = {
116 [0] = {
117 .name = "eth2 irq",
118 .start = MV64x60_IRQ_ETH_2,
119 .end = MV64x60_IRQ_ETH_2,
120 .flags = IORESOURCE_IRQ,
121 },
122};
123
124static char eth2_mac_addr[ETH_ALEN];
125
126static struct mv643xx_eth_platform_data eth2_pd = {
127 .mac_addr = eth2_mac_addr,
128};
129
130static struct platform_device eth2_device = {
131 .name = MV643XX_ETH_NAME,
132 .id = 1,
133 .num_resources = ARRAY_SIZE(mv64x60_eth2_resources),
134 .resource = mv64x60_eth2_resources,
135 .dev = {
136 .platform_data = &eth2_pd,
137 },
138};
139#endif /* CONFIG_MV643XX_ETH_2 */
140
141static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
142 &mv643xx_eth_shared_device,
143#ifdef CONFIG_MV643XX_ETH_0
144 &eth0_device,
145#endif
146#ifdef CONFIG_MV643XX_ETH_1
147 &eth1_device,
148#endif
149#ifdef CONFIG_MV643XX_ETH_2
150 &eth2_device,
151#endif
152};
153
154static u8 __init exchange_bit(u8 val, u8 cs)
155{
156 /* place the data */
157 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
158 udelay(1);
159
160 /* turn the clock on */
161 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
162 udelay(1);
163
164 /* turn the clock off and read-strobe */
165 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
166
167 /* return the data */
168 return (JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1;
169}
170
171static void __init get_mac(char dest[6])
172{
173 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
174 int i,j;
175
176 for (i = 0; i < 12; i++)
177 exchange_bit(read_opcode[i], 1);
178
179 for (j = 0; j < 6; j++) {
180 dest[j] = 0;
181 for (i = 0; i < 8; i++) {
182 dest[j] <<= 1;
183 dest[j] |= exchange_bit(0, 1);
184 }
185 }
186
187 /* turn off CS */
188 exchange_bit(0,0);
189}
190
191/*
192 * Copy and increment ethernet MAC address by a small value.
193 *
194 * This is useful for systems where the only one MAC address is stored in
195 * non-volatile memory for multiple ports.
196 */
197static inline void eth_mac_add(unsigned char *dst, unsigned char *src,
198 unsigned int add)
199{
200 int i;
201
202 BUG_ON(add >= 256);
203
204 for (i = ETH_ALEN; i >= 0; i--) {
205 dst[i] = src[i] + add;
206 add = dst[i] < src[i]; /* compute carry */
207 }
208
209 WARN_ON(add);
210}
211
212static int __init mv643xx_eth_add_pds(void)
213{
214 unsigned char mac[ETH_ALEN];
215 int ret;
216
217 get_mac(mac);
218#ifdef CONFIG_MV643XX_ETH_0
219 eth_mac_add(eth1_mac_addr, mac, 0);
220#endif
221#ifdef CONFIG_MV643XX_ETH_1
222 eth_mac_add(eth1_mac_addr, mac, 1);
223#endif
224#ifdef CONFIG_MV643XX_ETH_2
225 eth_mac_add(eth2_mac_addr, mac, 2);
226#endif
227 ret = platform_add_devices(mv643xx_eth_pd_devs,
228 ARRAY_SIZE(mv643xx_eth_pd_devs));
229
230 return ret;
231}
232
233device_initcall(mv643xx_eth_add_pds);
234
235#endif /* defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE) */
diff --git a/arch/mips/momentum/jaguar_atx/prom.c b/arch/mips/momentum/jaguar_atx/prom.c
index 3d2712929293..5dd154ee58f6 100644
--- a/arch/mips/momentum/jaguar_atx/prom.c
+++ b/arch/mips/momentum/jaguar_atx/prom.c
@@ -39,56 +39,6 @@ const char *get_system_type(void)
39 return "Momentum Jaguar-ATX"; 39 return "Momentum Jaguar-ATX";
40} 40}
41 41
42#ifdef CONFIG_MV643XX_ETH
43extern unsigned char prom_mac_addr_base[6];
44
45static void burn_clocks(void)
46{
47 int i;
48
49 /* this loop should burn at least 1us -- this should be plenty */
50 for (i = 0; i < 0x10000; i++)
51 ;
52}
53
54static u8 exchange_bit(u8 val, u8 cs)
55{
56 /* place the data */
57 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
58 burn_clocks();
59
60 /* turn the clock on */
61 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
62 burn_clocks();
63
64 /* turn the clock off and read-strobe */
65 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
66
67 /* return the data */
68 return ((JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1);
69}
70
71void get_mac(char dest[6])
72{
73 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
74 int i,j;
75
76 for (i = 0; i < 12; i++)
77 exchange_bit(read_opcode[i], 1);
78
79 for (j = 0; j < 6; j++) {
80 dest[j] = 0;
81 for (i = 0; i < 8; i++) {
82 dest[j] <<= 1;
83 dest[j] |= exchange_bit(0, 1);
84 }
85 }
86
87 /* turn off CS */
88 exchange_bit(0,0);
89}
90#endif
91
92#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
93 43
94unsigned long signext(unsigned long addr) 44unsigned long signext(unsigned long addr)
@@ -228,16 +178,10 @@ void __init prom_init(void)
228#endif /* CONFIG_64BIT */ 178#endif /* CONFIG_64BIT */
229 mips_machgroup = MACH_GROUP_MOMENCO; 179 mips_machgroup = MACH_GROUP_MOMENCO;
230 mips_machtype = MACH_MOMENCO_JAGUAR_ATX; 180 mips_machtype = MACH_MOMENCO_JAGUAR_ATX;
231
232#ifdef CONFIG_MV643XX_ETH
233 /* get the base MAC address for on-board ethernet ports */
234 get_mac(prom_mac_addr_base);
235#endif
236} 181}
237 182
238unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
239{ 184{
240 return 0;
241} 185}
242 186
243void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_3/irq.c b/arch/mips/momentum/ocelot_3/irq.c
index cea0e5deb80e..3862d1d1add4 100644
--- a/arch/mips/momentum/ocelot_3/irq.c
+++ b/arch/mips/momentum/ocelot_3/irq.c
@@ -65,7 +65,7 @@ void __init arch_init_irq(void)
65 */ 65 */
66 clear_c0_status(ST0_IM | ST0_BEV); 66 clear_c0_status(ST0_IM | ST0_BEV);
67 67
68 rm7k_cpu_irq_init(8); 68 rm7k_cpu_irq_init();
69 69
70 /* set up the cascading interrupts */ 70 /* set up the cascading interrupts */
71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */ 71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */
diff --git a/arch/mips/momentum/ocelot_3/prom.c b/arch/mips/momentum/ocelot_3/prom.c
index 6ce9b7fdb824..8e02df63578a 100644
--- a/arch/mips/momentum/ocelot_3/prom.c
+++ b/arch/mips/momentum/ocelot_3/prom.c
@@ -180,9 +180,8 @@ void __init prom_init(void)
180#endif 180#endif
181} 181}
182 182
183unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
184{ 184{
185 return 0;
186} 185}
187 186
188void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index bb11fef08472..186a140fd2a9 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -84,7 +84,7 @@ void ll_cpci_irq(void)
84} 84}
85 85
86struct irq_chip cpci_irq_type = { 86struct irq_chip cpci_irq_type = {
87 .typename = "CPCI/FPGA", 87 .name = "CPCI/FPGA",
88 .ack = mask_cpci_irq, 88 .ack = mask_cpci_irq,
89 .mask = mask_cpci_irq, 89 .mask = mask_cpci_irq,
90 .mask_ack = mask_cpci_irq, 90 .mask_ack = mask_cpci_irq,
diff --git a/arch/mips/momentum/ocelot_c/dbg_io.c b/arch/mips/momentum/ocelot_c/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_c/dbg_io.c
+++ b/arch/mips/momentum/ocelot_c/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_c/irq.c b/arch/mips/momentum/ocelot_c/irq.c
index ea65223a6d2c..40472f7944d7 100644
--- a/arch/mips/momentum/ocelot_c/irq.c
+++ b/arch/mips/momentum/ocelot_c/irq.c
@@ -94,7 +94,7 @@ void __init arch_init_irq(void)
94 */ 94 */
95 clear_c0_status(ST0_IM); 95 clear_c0_status(ST0_IM);
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 98
99 /* set up the cascading interrupts */ 99 /* set up the cascading interrupts */
100 setup_irq(3, &cascade_fpga); 100 setup_irq(3, &cascade_fpga);
diff --git a/arch/mips/momentum/ocelot_c/prom.c b/arch/mips/momentum/ocelot_c/prom.c
index d0b77e101d74..b689ceea8cfb 100644
--- a/arch/mips/momentum/ocelot_c/prom.c
+++ b/arch/mips/momentum/ocelot_c/prom.c
@@ -178,7 +178,6 @@ void __init prom_init(void)
178#endif 178#endif
179} 179}
180 180
181unsigned long __init prom_free_prom_memory(void) 181void __init prom_free_prom_memory(void)
182{ 182{
183 return 0;
184} 183}
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index a7a80c0da569..de1a31ee52f3 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -77,7 +77,7 @@ void ll_uart_irq(void)
77} 77}
78 78
79struct irq_chip uart_irq_type = { 79struct irq_chip uart_irq_type = {
80 .typename = "UART/FPGA", 80 .name = "UART/FPGA",
81 .ack = mask_uart_irq, 81 .ack = mask_uart_irq,
82 .mask = mask_uart_irq, 82 .mask = mask_uart_irq,
83 .mask_ack = mask_uart_irq, 83 .mask_ack = mask_uart_irq,
diff --git a/arch/mips/momentum/ocelot_g/dbg_io.c b/arch/mips/momentum/ocelot_g/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_g/dbg_io.c
+++ b/arch/mips/momentum/ocelot_g/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_g/irq.c b/arch/mips/momentum/ocelot_g/irq.c
index da46524e87cb..273541fe7087 100644
--- a/arch/mips/momentum/ocelot_g/irq.c
+++ b/arch/mips/momentum/ocelot_g/irq.c
@@ -94,8 +94,8 @@ void __init arch_init_irq(void)
94 clear_c0_status(ST0_IM); 94 clear_c0_status(ST0_IM);
95 local_irq_disable(); 95 local_irq_disable();
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 rm7k_cpu_irq_init(8); 98 rm7k_cpu_irq_init();
99 99
100 gt64240_irq_init(); 100 gt64240_irq_init();
101} 101}
diff --git a/arch/mips/momentum/ocelot_g/prom.c b/arch/mips/momentum/ocelot_g/prom.c
index 2f75c6b91ec5..836d0830720d 100644
--- a/arch/mips/momentum/ocelot_g/prom.c
+++ b/arch/mips/momentum/ocelot_g/prom.c
@@ -79,7 +79,6 @@ void __init prom_init(void)
79 } 79 }
80} 80}
81 81
82unsigned long __init prom_free_prom_memory(void) 82void __init prom_free_prom_memory(void)
83{ 83{
84 return 0;
85} 84}
diff --git a/arch/mips/oprofile/Kconfig b/arch/mips/oprofile/Kconfig
index 55feaf798596..ca395ef06d4e 100644
--- a/arch/mips/oprofile/Kconfig
+++ b/arch/mips/oprofile/Kconfig
@@ -11,7 +11,7 @@ config PROFILING
11 11
12config OPROFILE 12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)" 13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING && EXPERIMENTAL 14 depends on PROFILING && !!MIPS_MT_SMTC && EXPERIMENTAL
15 help 15 help
16 OProfile is a profiling system capable of profiling the 16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries, 17 whole system, include the kernel, kernel modules, libraries,
diff --git a/arch/mips/pci/fixup-vr4133.c b/arch/mips/pci/fixup-vr4133.c
index 597b89764ba1..a8d9d22b13df 100644
--- a/arch/mips/pci/fixup-vr4133.c
+++ b/arch/mips/pci/fixup-vr4133.c
@@ -17,8 +17,10 @@
17 */ 17 */
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/kernel.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/i8259.h>
22#include <asm/vr41xx/cmbvr4133.h> 24#include <asm/vr41xx/cmbvr4133.h>
23 25
24extern int vr4133_rockhopper; 26extern int vr4133_rockhopper;
@@ -142,7 +144,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
142 if (bus == NULL) 144 if (bus == NULL)
143 return -1; 145 return -1;
144 146
145 for (i = 0; i < sizeof (int_map) / sizeof (int_map[0]); i++) { 147 for (i = 0; i < ARRAY_SIZE(int_map); i++) {
146 if (int_map[i].bus == bus->number && int_map[i].slot == slot) { 148 if (int_map[i].bus == bus->number && int_map[i].slot == slot) {
147 int line; 149 int line;
148 for (line = 0; line < 4; line++) 150 for (line = 0; line < 4; line++)
@@ -160,17 +162,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
160#ifdef CONFIG_ROCKHOPPER 162#ifdef CONFIG_ROCKHOPPER
161void i8259_init(void) 163void i8259_init(void)
162{ 164{
163 outb(0x11, 0x20); /* Master ICW1 */ 165 init_i8259_irqs();
164 outb(I8259_IRQ_BASE, 0x21); /* Master ICW2 */
165 outb(0x04, 0x21); /* Master ICW3 */
166 outb(0x01, 0x21); /* Master ICW4 */
167 outb(0xff, 0x21); /* Master IMW */
168
169 outb(0x11, 0xa0); /* Slave ICW1 */
170 outb(I8259_IRQ_BASE + 8, 0xa1); /* Slave ICW2 */
171 outb(0x02, 0xa1); /* Slave ICW3 */
172 outb(0x01, 0xa1); /* Slave ICW4 */
173 outb(0xff, 0xa1); /* Slave IMW */
174 166
175 outb(0x00, 0x4d0); 167 outb(0x00, 0x4d0);
176 outb(0x02, 0x4d1); /* USB IRQ9 is level */ 168 outb(0x02, 0x4d1); /* USB IRQ9 is level */
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 2c36c108c4d6..d48665ebd33c 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -159,7 +159,7 @@ int pnx8550_set_gic_priority(int irq, int priority)
159} 159}
160 160
161static struct irq_chip level_irq_type = { 161static struct irq_chip level_irq_type = {
162 .typename = "PNX Level IRQ", 162 .name = "PNX Level IRQ",
163 .ack = mask_irq, 163 .ack = mask_irq,
164 .mask = mask_irq, 164 .mask = mask_irq,
165 .mask_ack = mask_irq, 165 .mask_ack = mask_irq,
diff --git a/arch/mips/philips/pnx8550/common/prom.c b/arch/mips/philips/pnx8550/common/prom.c
index eb6ec11fef07..8aeed6c2b8c3 100644
--- a/arch/mips/philips/pnx8550/common/prom.c
+++ b/arch/mips/philips/pnx8550/common/prom.c
@@ -106,9 +106,8 @@ int get_ethernet_addr(char *ethernet_addr)
106 return 0; 106 return 0;
107} 107}
108 108
109unsigned long __init prom_free_prom_memory(void) 109void __init prom_free_prom_memory(void)
110{ 110{
111 return 0;
112} 111}
113 112
114extern int pnx8550_console_port; 113extern int pnx8550_console_port;
diff --git a/arch/mips/pmc-sierra/yosemite/dbg_io.c b/arch/mips/pmc-sierra/yosemite/dbg_io.c
index 0f659c9106ac..6362c702e389 100644
--- a/arch/mips/pmc-sierra/yosemite/dbg_io.c
+++ b/arch/mips/pmc-sierra/yosemite/dbg_io.c
@@ -93,7 +93,7 @@
93 * Functions to READ and WRITE to serial port 1 93 * Functions to READ and WRITE to serial port 1
94 */ 94 */
95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \ 95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \
96 (TITAN_SERIAL_BASE_1 + ofs) 96 (TITAN_SERIAL_BASE_1 + ofs)))
97 97
98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \ 98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \
99 (TITAN_SERIAL_BASE_1 + ofs))) = val) 99 (TITAN_SERIAL_BASE_1 + ofs))) = val)
diff --git a/arch/mips/pmc-sierra/yosemite/irq.c b/arch/mips/pmc-sierra/yosemite/irq.c
index adb048527e76..428d1f45a287 100644
--- a/arch/mips/pmc-sierra/yosemite/irq.c
+++ b/arch/mips/pmc-sierra/yosemite/irq.c
@@ -148,9 +148,9 @@ void __init arch_init_irq(void)
148{ 148{
149 clear_c0_status(ST0_IM); 149 clear_c0_status(ST0_IM);
150 150
151 mips_cpu_irq_init(0); 151 mips_cpu_irq_init();
152 rm7k_cpu_irq_init(8); 152 rm7k_cpu_irq_init();
153 rm9k_cpu_irq_init(12); 153 rm9k_cpu_irq_init();
154 154
155#ifdef CONFIG_KGDB 155#ifdef CONFIG_KGDB
156 /* At this point, initialize the second serial port */ 156 /* At this point, initialize the second serial port */
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 9fe4973377c3..1e1685e415a4 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 prom_grab_secondary(); 132 prom_grab_secondary();
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 139void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 1b9b0d396d3e..6a6e15e40009 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -171,6 +171,7 @@ static void __init py_map_ocd(void)
171 171
172static void __init py_uart_setup(void) 172static void __init py_uart_setup(void)
173{ 173{
174#ifdef CONFIG_SERIAL_8250
174 struct uart_port up; 175 struct uart_port up;
175 176
176 /* 177 /*
@@ -188,6 +189,7 @@ static void __init py_uart_setup(void)
188 189
189 if (early_serial_setup(&up)) 190 if (early_serial_setup(&up))
190 printk(KERN_ERR "Early serial init of port 0 failed\n"); 191 printk(KERN_ERR "Early serial init of port 0 failed\n");
192#endif /* CONFIG_SERIAL_8250 */
191} 193}
192 194
193static void __init py_rtc_setup(void) 195static void __init py_rtc_setup(void)
diff --git a/arch/mips/qemu/q-mem.c b/arch/mips/qemu/q-mem.c
index d174fac43031..dae39b59de15 100644
--- a/arch/mips/qemu/q-mem.c
+++ b/arch/mips/qemu/q-mem.c
@@ -1,6 +1,5 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3unsigned long __init prom_free_prom_memory(void) 3void __init prom_free_prom_memory(void)
4{ 4{
5 return 0UL;
6} 5}
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index a1a9af6da7bf..6b6e97b90c6e 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -139,7 +139,7 @@ static void end_eisa1_irq(unsigned int irq)
139} 139}
140 140
141static struct irq_chip ip22_eisa1_irq_type = { 141static struct irq_chip ip22_eisa1_irq_type = {
142 .typename = "IP22 EISA", 142 .name = "IP22 EISA",
143 .startup = startup_eisa1_irq, 143 .startup = startup_eisa1_irq,
144 .ack = mask_and_ack_eisa1_irq, 144 .ack = mask_and_ack_eisa1_irq,
145 .mask = disable_eisa1_irq, 145 .mask = disable_eisa1_irq,
@@ -194,7 +194,7 @@ static void end_eisa2_irq(unsigned int irq)
194} 194}
195 195
196static struct irq_chip ip22_eisa2_irq_type = { 196static struct irq_chip ip22_eisa2_irq_type = {
197 .typename = "IP22 EISA", 197 .name = "IP22 EISA",
198 .startup = startup_eisa2_irq, 198 .startup = startup_eisa2_irq,
199 .ack = mask_and_ack_eisa2_irq, 199 .ack = mask_and_ack_eisa2_irq,
200 .mask = disable_eisa2_irq, 200 .mask = disable_eisa2_irq,
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index c44f8be0644f..b454924aeb56 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/mipsregs.h> 20#include <asm/mipsregs.h>
21#include <asm/addrspace.h> 21#include <asm/addrspace.h>
22#include <asm/irq_cpu.h>
22 23
23#include <asm/sgi/ioc.h> 24#include <asm/sgi/ioc.h>
24#include <asm/sgi/hpc3.h> 25#include <asm/sgi/hpc3.h>
@@ -52,7 +53,7 @@ static void disable_local0_irq(unsigned int irq)
52} 53}
53 54
54static struct irq_chip ip22_local0_irq_type = { 55static struct irq_chip ip22_local0_irq_type = {
55 .typename = "IP22 local 0", 56 .name = "IP22 local 0",
56 .ack = disable_local0_irq, 57 .ack = disable_local0_irq,
57 .mask = disable_local0_irq, 58 .mask = disable_local0_irq,
58 .mask_ack = disable_local0_irq, 59 .mask_ack = disable_local0_irq,
@@ -73,7 +74,7 @@ void disable_local1_irq(unsigned int irq)
73} 74}
74 75
75static struct irq_chip ip22_local1_irq_type = { 76static struct irq_chip ip22_local1_irq_type = {
76 .typename = "IP22 local 1", 77 .name = "IP22 local 1",
77 .ack = disable_local1_irq, 78 .ack = disable_local1_irq,
78 .mask = disable_local1_irq, 79 .mask = disable_local1_irq,
79 .mask_ack = disable_local1_irq, 80 .mask_ack = disable_local1_irq,
@@ -94,7 +95,7 @@ void disable_local2_irq(unsigned int irq)
94} 95}
95 96
96static struct irq_chip ip22_local2_irq_type = { 97static struct irq_chip ip22_local2_irq_type = {
97 .typename = "IP22 local 2", 98 .name = "IP22 local 2",
98 .ack = disable_local2_irq, 99 .ack = disable_local2_irq,
99 .mask = disable_local2_irq, 100 .mask = disable_local2_irq,
100 .mask_ack = disable_local2_irq, 101 .mask_ack = disable_local2_irq,
@@ -115,7 +116,7 @@ void disable_local3_irq(unsigned int irq)
115} 116}
116 117
117static struct irq_chip ip22_local3_irq_type = { 118static struct irq_chip ip22_local3_irq_type = {
118 .typename = "IP22 local 3", 119 .name = "IP22 local 3",
119 .ack = disable_local3_irq, 120 .ack = disable_local3_irq,
120 .mask = disable_local3_irq, 121 .mask = disable_local3_irq,
121 .mask_ack = disable_local3_irq, 122 .mask_ack = disable_local3_irq,
@@ -253,8 +254,6 @@ asmlinkage void plat_irq_dispatch(void)
253 indy_8254timer_irq(); 254 indy_8254timer_irq();
254} 255}
255 256
256extern void mips_cpu_irq_init(unsigned int irq_base);
257
258void __init arch_init_irq(void) 257void __init arch_init_irq(void)
259{ 258{
260 int i; 259 int i;
@@ -316,7 +315,7 @@ void __init arch_init_irq(void)
316 sgint->cmeimask1 = 0; 315 sgint->cmeimask1 = 0;
317 316
318 /* init CPU irqs */ 317 /* init CPU irqs */
319 mips_cpu_irq_init(SGINT_CPU); 318 mips_cpu_irq_init();
320 319
321 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) { 320 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) {
322 struct irq_chip *handler; 321 struct irq_chip *handler;
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index b58bd522262b..ddb6506d8341 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -202,7 +202,6 @@ void __init sgimc_init(void)
202} 202}
203 203
204void __init prom_meminit(void) {} 204void __init prom_meminit(void) {}
205unsigned long __init prom_free_prom_memory(void) 205void __init prom_free_prom_memory(void)
206{ 206{
207 return 0;
208} 207}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 319f8803ef6f..60ade7690e09 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -333,7 +333,7 @@ static inline void disable_bridge_irq(unsigned int irq)
333} 333}
334 334
335static struct irq_chip bridge_irq_type = { 335static struct irq_chip bridge_irq_type = {
336 .typename = "bridge", 336 .name = "bridge",
337 .startup = startup_bridge_irq, 337 .startup = startup_bridge_irq,
338 .shutdown = shutdown_bridge_irq, 338 .shutdown = shutdown_bridge_irq,
339 .ack = disable_bridge_irq, 339 .ack = disable_bridge_irq,
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 16e5682b01f1..0e3d535e9f43 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -498,10 +498,9 @@ void __init prom_meminit(void)
498 } 498 }
499} 499}
500 500
501unsigned long __init prom_free_prom_memory(void) 501void __init prom_free_prom_memory(void)
502{ 502{
503 /* We got nothing to free here ... */ 503 /* We got nothing to free here ... */
504 return 0;
505} 504}
506 505
507extern void pagetable_init(void); 506extern void pagetable_init(void);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index c20e9899b34b..9ce513629b14 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -181,7 +181,7 @@ static void disable_rt_irq(unsigned int irq)
181} 181}
182 182
183static struct irq_chip rt_irq_type = { 183static struct irq_chip rt_irq_type = {
184 .typename = "SN HUB RT timer", 184 .name = "SN HUB RT timer",
185 .ack = disable_rt_irq, 185 .ack = disable_rt_irq,
186 .mask = disable_rt_irq, 186 .mask = disable_rt_irq,
187 .mask_ack = disable_rt_irq, 187 .mask_ack = disable_rt_irq,
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index ae063864c026..8c450d9e8696 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -144,7 +144,7 @@ static void end_cpu_irq(unsigned int irq)
144} 144}
145 145
146static struct irq_chip ip32_cpu_interrupt = { 146static struct irq_chip ip32_cpu_interrupt = {
147 .typename = "IP32 CPU", 147 .name = "IP32 CPU",
148 .ack = disable_cpu_irq, 148 .ack = disable_cpu_irq,
149 .mask = disable_cpu_irq, 149 .mask = disable_cpu_irq,
150 .mask_ack = disable_cpu_irq, 150 .mask_ack = disable_cpu_irq,
@@ -193,7 +193,7 @@ static void end_crime_irq(unsigned int irq)
193} 193}
194 194
195static struct irq_chip ip32_crime_interrupt = { 195static struct irq_chip ip32_crime_interrupt = {
196 .typename = "IP32 CRIME", 196 .name = "IP32 CRIME",
197 .ack = mask_and_ack_crime_irq, 197 .ack = mask_and_ack_crime_irq,
198 .mask = disable_crime_irq, 198 .mask = disable_crime_irq,
199 .mask_ack = mask_and_ack_crime_irq, 199 .mask_ack = mask_and_ack_crime_irq,
@@ -234,7 +234,7 @@ static void end_macepci_irq(unsigned int irq)
234} 234}
235 235
236static struct irq_chip ip32_macepci_interrupt = { 236static struct irq_chip ip32_macepci_interrupt = {
237 .typename = "IP32 MACE PCI", 237 .name = "IP32 MACE PCI",
238 .ack = disable_macepci_irq, 238 .ack = disable_macepci_irq,
239 .mask = disable_macepci_irq, 239 .mask = disable_macepci_irq,
240 .mask_ack = disable_macepci_irq, 240 .mask_ack = disable_macepci_irq,
@@ -347,7 +347,7 @@ static void end_maceisa_irq(unsigned irq)
347} 347}
348 348
349static struct irq_chip ip32_maceisa_interrupt = { 349static struct irq_chip ip32_maceisa_interrupt = {
350 .typename = "IP32 MACE ISA", 350 .name = "IP32 MACE ISA",
351 .ack = mask_and_ack_maceisa_irq, 351 .ack = mask_and_ack_maceisa_irq,
352 .mask = disable_maceisa_irq, 352 .mask = disable_maceisa_irq,
353 .mask_ack = mask_and_ack_maceisa_irq, 353 .mask_ack = mask_and_ack_maceisa_irq,
@@ -379,7 +379,7 @@ static void end_mace_irq(unsigned int irq)
379} 379}
380 380
381static struct irq_chip ip32_mace_interrupt = { 381static struct irq_chip ip32_mace_interrupt = {
382 .typename = "IP32 MACE", 382 .name = "IP32 MACE",
383 .ack = disable_mace_irq, 383 .ack = disable_mace_irq,
384 .mask = disable_mace_irq, 384 .mask = disable_mace_irq,
385 .mask_ack = disable_mace_irq, 385 .mask_ack = disable_mace_irq,
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index d37d40a3cdae..849d392a0013 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -43,7 +43,6 @@ void __init prom_meminit (void)
43} 43}
44 44
45 45
46unsigned long __init prom_free_prom_memory (void) 46void __init prom_free_prom_memory(void)
47{ 47{
48 return 0;
49} 48}
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 2e8f6b2e2420..1dc5d05d8962 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -82,7 +82,7 @@ extern char sb1250_duart_present[];
82#endif 82#endif
83 83
84static struct irq_chip bcm1480_irq_type = { 84static struct irq_chip bcm1480_irq_type = {
85 .typename = "BCM1480-IMR", 85 .name = "BCM1480-IMR",
86 .ack = ack_bcm1480_irq, 86 .ack = ack_bcm1480_irq,
87 .mask = disable_bcm1480_irq, 87 .mask = disable_bcm1480_irq,
88 .mask_ack = ack_bcm1480_irq, 88 .mask_ack = ack_bcm1480_irq,
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 6e8952da6e2a..9e6099e69622 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -343,10 +343,9 @@ void __init prom_init(void)
343 prom_meminit(); 343 prom_meminit();
344} 344}
345 345
346unsigned long __init prom_free_prom_memory(void) 346void __init prom_free_prom_memory(void)
347{ 347{
348 /* Not sure what I'm supposed to do here. Nothing, I think */ 348 /* Not sure what I'm supposed to do here. Nothing, I think */
349 return 0;
350} 349}
351 350
352void prom_putchar(char c) 351void prom_putchar(char c)
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 82ce7533053f..148239446e6e 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -67,7 +67,7 @@ extern char sb1250_duart_present[];
67#endif 67#endif
68 68
69static struct irq_chip sb1250_irq_type = { 69static struct irq_chip sb1250_irq_type = {
70 .typename = "SB1250-IMR", 70 .name = "SB1250-IMR",
71 .ack = ack_sb1250_irq, 71 .ack = ack_sb1250_irq,
72 .mask = disable_sb1250_irq, 72 .mask = disable_sb1250_irq,
73 .mask_ack = ack_sb1250_irq, 73 .mask_ack = ack_sb1250_irq,
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index 3c33a4517bc3..257c4e674353 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -87,10 +87,9 @@ void __init prom_init(void)
87 prom_meminit(); 87 prom_meminit();
88} 88}
89 89
90unsigned long __init prom_free_prom_memory(void) 90void __init prom_free_prom_memory(void)
91{ 91{
92 /* Not sure what I'm supposed to do here. Nothing, I think */ 92 /* Not sure what I'm supposed to do here. Nothing, I think */
93 return 0;
94} 93}
95 94
96void prom_putchar(char c) 95void prom_putchar(char c)
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 8511bcc6d99d..039e8e540508 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -37,7 +37,7 @@ static void end_pciasic_irq(unsigned int irq)
37} 37}
38 38
39static struct irq_chip pciasic_irq_type = { 39static struct irq_chip pciasic_irq_type = {
40 .typename = "ASIC-PCI", 40 .name = "ASIC-PCI",
41 .ack = disable_pciasic_irq, 41 .ack = disable_pciasic_irq,
42 .mask = disable_pciasic_irq, 42 .mask = disable_pciasic_irq,
43 .mask_ack = disable_pciasic_irq, 43 .mask_ack = disable_pciasic_irq,
diff --git a/arch/mips/sni/sniprom.c b/arch/mips/sni/sniprom.c
index d1d0f1f493b4..1213d166f22e 100644
--- a/arch/mips/sni/sniprom.c
+++ b/arch/mips/sni/sniprom.c
@@ -67,9 +67,8 @@ void prom_printf(char *fmt, ...)
67 va_end(args); 67 va_end(args);
68} 68}
69 69
70unsigned long prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
74 73
75/* 74/*
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index ed4a19adf361..e7f3e5b84dcf 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -120,7 +120,7 @@ static void tx4927_irq_pic_disable(unsigned int irq);
120 120
121#define TX4927_CP0_NAME "TX4927-CP0" 121#define TX4927_CP0_NAME "TX4927-CP0"
122static struct irq_chip tx4927_irq_cp0_type = { 122static struct irq_chip tx4927_irq_cp0_type = {
123 .typename = TX4927_CP0_NAME, 123 .name = TX4927_CP0_NAME,
124 .ack = tx4927_irq_cp0_disable, 124 .ack = tx4927_irq_cp0_disable,
125 .mask = tx4927_irq_cp0_disable, 125 .mask = tx4927_irq_cp0_disable,
126 .mask_ack = tx4927_irq_cp0_disable, 126 .mask_ack = tx4927_irq_cp0_disable,
@@ -129,7 +129,7 @@ static struct irq_chip tx4927_irq_cp0_type = {
129 129
130#define TX4927_PIC_NAME "TX4927-PIC" 130#define TX4927_PIC_NAME "TX4927-PIC"
131static struct irq_chip tx4927_irq_pic_type = { 131static struct irq_chip tx4927_irq_pic_type = {
132 .typename = TX4927_PIC_NAME, 132 .name = TX4927_PIC_NAME,
133 .ack = tx4927_irq_pic_disable, 133 .ack = tx4927_irq_pic_disable,
134 .mask = tx4927_irq_pic_disable, 134 .mask = tx4927_irq_pic_disable,
135 .mask_ack = tx4927_irq_pic_disable, 135 .mask_ack = tx4927_irq_pic_disable,
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index b54b529a29f9..dcce88f403c9 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -228,7 +228,7 @@ static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq);
228 228
229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC" 229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = { 230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
231 .typename = TOSHIBA_RBTX4927_IOC_NAME, 231 .name = TOSHIBA_RBTX4927_IOC_NAME,
232 .ack = toshiba_rbtx4927_irq_ioc_disable, 232 .ack = toshiba_rbtx4927_irq_ioc_disable,
233 .mask = toshiba_rbtx4927_irq_ioc_disable, 233 .mask = toshiba_rbtx4927_irq_ioc_disable,
234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable, 234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable,
@@ -241,7 +241,7 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
241#ifdef CONFIG_TOSHIBA_FPCIB0 241#ifdef CONFIG_TOSHIBA_FPCIB0
242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA" 242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA"
243static struct irq_chip toshiba_rbtx4927_irq_isa_type = { 243static struct irq_chip toshiba_rbtx4927_irq_isa_type = {
244 .typename = TOSHIBA_RBTX4927_ISA_NAME, 244 .name = TOSHIBA_RBTX4927_ISA_NAME,
245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
246 .mask = toshiba_rbtx4927_irq_isa_disable, 246 .mask = toshiba_rbtx4927_irq_isa_disable,
247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
@@ -490,13 +490,13 @@ void toshiba_rbtx4927_irq_dump(char *key)
490 { 490 {
491 u32 i, j = 0; 491 u32 i, j = 0;
492 for (i = 0; i < NR_IRQS; i++) { 492 for (i = 0; i < NR_IRQS; i++) {
493 if (strcmp(irq_desc[i].chip->typename, "none") 493 if (strcmp(irq_desc[i].chip->name, "none")
494 == 0) 494 == 0)
495 continue; 495 continue;
496 496
497 if ((i >= 1) 497 if ((i >= 1)
498 && (irq_desc[i - 1].chip->typename == 498 && (irq_desc[i - 1].chip->name ==
499 irq_desc[i].chip->typename)) { 499 irq_desc[i].chip->name)) {
500 j++; 500 j++;
501 } else { 501 } else {
502 j = 0; 502 j = 0;
@@ -510,7 +510,7 @@ void toshiba_rbtx4927_irq_dump(char *key)
510 (u32) (irq_desc[i].action ? irq_desc[i]. 510 (u32) (irq_desc[i].action ? irq_desc[i].
511 action->handler : 0), 511 action->handler : 0),
512 irq_desc[i].depth, 512 irq_desc[i].depth,
513 irq_desc[i].chip->typename, j); 513 irq_desc[i].chip->name, j);
514 } 514 }
515 } 515 }
516#endif 516#endif
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
index efe50562f0ce..9a3a5babd1fb 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
@@ -80,9 +80,8 @@ void __init prom_init(void)
80 add_memory_region(0, msize << 20, BOOT_MEM_RAM); 80 add_memory_region(0, msize << 20, BOOT_MEM_RAM);
81} 81}
82 82
83unsigned long __init prom_free_prom_memory(void) 83void __init prom_free_prom_memory(void)
84{ 84{
85 return 0;
86} 85}
87 86
88const char *get_system_type(void) 87const char *get_system_type(void)
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index a347b424d91c..3a2dbfc25014 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -49,7 +49,7 @@ static void tx4938_irq_pic_disable(unsigned int irq);
49 49
50#define TX4938_CP0_NAME "TX4938-CP0" 50#define TX4938_CP0_NAME "TX4938-CP0"
51static struct irq_chip tx4938_irq_cp0_type = { 51static struct irq_chip tx4938_irq_cp0_type = {
52 .typename = TX4938_CP0_NAME, 52 .name = TX4938_CP0_NAME,
53 .ack = tx4938_irq_cp0_disable, 53 .ack = tx4938_irq_cp0_disable,
54 .mask = tx4938_irq_cp0_disable, 54 .mask = tx4938_irq_cp0_disable,
55 .mask_ack = tx4938_irq_cp0_disable, 55 .mask_ack = tx4938_irq_cp0_disable,
@@ -58,7 +58,7 @@ static struct irq_chip tx4938_irq_cp0_type = {
58 58
59#define TX4938_PIC_NAME "TX4938-PIC" 59#define TX4938_PIC_NAME "TX4938-PIC"
60static struct irq_chip tx4938_irq_pic_type = { 60static struct irq_chip tx4938_irq_pic_type = {
61 .typename = TX4938_PIC_NAME, 61 .name = TX4938_PIC_NAME,
62 .ack = tx4938_irq_pic_disable, 62 .ack = tx4938_irq_pic_disable,
63 .mask = tx4938_irq_pic_disable, 63 .mask = tx4938_irq_pic_disable,
64 .mask_ack = tx4938_irq_pic_disable, 64 .mask_ack = tx4938_irq_pic_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index b6f363d08011..2e96dbb248b1 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -92,7 +92,7 @@ static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
92 92
93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" 93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { 94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
95 .typename = TOSHIBA_RBTX4938_IOC_NAME, 95 .name = TOSHIBA_RBTX4938_IOC_NAME,
96 .ack = toshiba_rbtx4938_irq_ioc_disable, 96 .ack = toshiba_rbtx4938_irq_ioc_disable,
97 .mask = toshiba_rbtx4938_irq_ioc_disable, 97 .mask = toshiba_rbtx4938_irq_ioc_disable,
98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable, 98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/prom.c b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
index e44daf30a7c1..7dc6a0aae21c 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/prom.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
@@ -56,9 +56,8 @@ void __init prom_init(void)
56 return; 56 return;
57} 57}
58 58
59unsigned long __init prom_free_prom_memory(void) 59void __init prom_free_prom_memory(void)
60{ 60{
61 return 0;
62} 61}
63 62
64void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 63void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index c075261976c5..adabc6bad440 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001-2002 MontaVista Software Inc. 4 * Copyright (C) 2001-2002 MontaVista Software Inc.
5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com> 5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
6 * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 6 * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -68,6 +68,7 @@ static unsigned char sysint2_assign[16] = {
68#define MPIUINTREG 0x0e 68#define MPIUINTREG 0x0e
69#define MAIUINTREG 0x10 69#define MAIUINTREG 0x10
70#define MKIUINTREG 0x12 70#define MKIUINTREG 0x12
71#define MMACINTREG 0x12
71#define MGIUINTLREG 0x14 72#define MGIUINTLREG 0x14
72#define MDSIUINTREG 0x16 73#define MDSIUINTREG 0x16
73#define NMIREG 0x18 74#define NMIREG 0x18
@@ -241,6 +242,30 @@ void vr41xx_disable_kiuint(uint16_t mask)
241 242
242EXPORT_SYMBOL(vr41xx_disable_kiuint); 243EXPORT_SYMBOL(vr41xx_disable_kiuint);
243 244
245void vr41xx_enable_macint(uint16_t mask)
246{
247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
248 unsigned long flags;
249
250 spin_lock_irqsave(&desc->lock, flags);
251 icu1_set(MMACINTREG, mask);
252 spin_unlock_irqrestore(&desc->lock, flags);
253}
254
255EXPORT_SYMBOL(vr41xx_enable_macint);
256
257void vr41xx_disable_macint(uint16_t mask)
258{
259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
260 unsigned long flags;
261
262 spin_lock_irqsave(&desc->lock, flags);
263 icu1_clear(MMACINTREG, mask);
264 spin_unlock_irqrestore(&desc->lock, flags);
265}
266
267EXPORT_SYMBOL(vr41xx_disable_macint);
268
244void vr41xx_enable_dsiuint(uint16_t mask) 269void vr41xx_enable_dsiuint(uint16_t mask)
245{ 270{
246 struct irq_desc *desc = irq_desc + DSIU_IRQ; 271 struct irq_desc *desc = irq_desc + DSIU_IRQ;
@@ -428,7 +453,7 @@ static void enable_sysint1_irq(unsigned int irq)
428} 453}
429 454
430static struct irq_chip sysint1_irq_type = { 455static struct irq_chip sysint1_irq_type = {
431 .typename = "SYSINT1", 456 .name = "SYSINT1",
432 .ack = disable_sysint1_irq, 457 .ack = disable_sysint1_irq,
433 .mask = disable_sysint1_irq, 458 .mask = disable_sysint1_irq,
434 .mask_ack = disable_sysint1_irq, 459 .mask_ack = disable_sysint1_irq,
@@ -446,7 +471,7 @@ static void enable_sysint2_irq(unsigned int irq)
446} 471}
447 472
448static struct irq_chip sysint2_irq_type = { 473static struct irq_chip sysint2_irq_type = {
449 .typename = "SYSINT2", 474 .name = "SYSINT2",
450 .ack = disable_sysint2_irq, 475 .ack = disable_sysint2_irq,
451 .mask = disable_sysint2_irq, 476 .mask = disable_sysint2_irq,
452 .mask_ack = disable_sysint2_irq, 477 .mask_ack = disable_sysint2_irq,
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index a2e285c1d4d5..4f97e0ba9e24 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -81,7 +81,6 @@ void __init prom_init(void)
81 } 81 }
82} 82}
83 83
84unsigned long __init prom_free_prom_memory (void) 84void __init prom_free_prom_memory(void)
85{ 85{
86 return 0UL;
87} 86}
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 16decf4ac2f4..cba36a247e32 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -95,27 +95,27 @@ asmlinkage void plat_irq_dispatch(void)
95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; 95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
96 96
97 if (pending & CAUSEF_IP7) 97 if (pending & CAUSEF_IP7)
98 do_IRQ(7); 98 do_IRQ(TIMER_IRQ);
99 else if (pending & 0x7800) { 99 else if (pending & 0x7800) {
100 if (pending & CAUSEF_IP3) 100 if (pending & CAUSEF_IP3)
101 irq_dispatch(3); 101 irq_dispatch(INT1_IRQ);
102 else if (pending & CAUSEF_IP4) 102 else if (pending & CAUSEF_IP4)
103 irq_dispatch(4); 103 irq_dispatch(INT2_IRQ);
104 else if (pending & CAUSEF_IP5) 104 else if (pending & CAUSEF_IP5)
105 irq_dispatch(5); 105 irq_dispatch(INT3_IRQ);
106 else if (pending & CAUSEF_IP6) 106 else if (pending & CAUSEF_IP6)
107 irq_dispatch(6); 107 irq_dispatch(INT4_IRQ);
108 } else if (pending & CAUSEF_IP2) 108 } else if (pending & CAUSEF_IP2)
109 irq_dispatch(2); 109 irq_dispatch(INT0_IRQ);
110 else if (pending & CAUSEF_IP0) 110 else if (pending & CAUSEF_IP0)
111 do_IRQ(0); 111 do_IRQ(MIPS_SOFTINT0_IRQ);
112 else if (pending & CAUSEF_IP1) 112 else if (pending & CAUSEF_IP1)
113 do_IRQ(1); 113 do_IRQ(MIPS_SOFTINT1_IRQ);
114 else 114 else
115 spurious_interrupt(); 115 spurious_interrupt();
116} 116}
117 117
118void __init arch_init_irq(void) 118void __init arch_init_irq(void)
119{ 119{
120 mips_cpu_irq_init(MIPS_CPU_IRQ_BASE); 120 mips_cpu_irq_init();
121} 121}
diff --git a/arch/mips/vr41xx/nec-cmbvr4133/irq.c b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
index 128ed8d6f111..7d2d076b0f54 100644
--- a/arch/mips/vr41xx/nec-cmbvr4133/irq.c
+++ b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
@@ -21,60 +21,16 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22 22
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/i8259.h>
24#include <asm/vr41xx/cmbvr4133.h> 25#include <asm/vr41xx/cmbvr4133.h>
25 26
26extern void enable_8259A_irq(unsigned int irq);
27extern void disable_8259A_irq(unsigned int irq);
28extern void mask_and_ack_8259A(unsigned int irq);
29extern void init_8259A(int hoge);
30
31extern int vr4133_rockhopper; 27extern int vr4133_rockhopper;
32 28
33static void enable_i8259_irq(unsigned int irq)
34{
35 enable_8259A_irq(irq - I8259_IRQ_BASE);
36}
37
38static void disable_i8259_irq(unsigned int irq)
39{
40 disable_8259A_irq(irq - I8259_IRQ_BASE);
41}
42
43static void ack_i8259_irq(unsigned int irq)
44{
45 mask_and_ack_8259A(irq - I8259_IRQ_BASE);
46}
47
48static struct irq_chip i8259_irq_type = {
49 .typename = "XT-PIC",
50 .ack = ack_i8259_irq,
51 .mask = disable_i8259_irq,
52 .mask_ack = ack_i8259_irq,
53 .unmask = enable_i8259_irq,
54};
55
56static int i8259_get_irq_number(int irq) 29static int i8259_get_irq_number(int irq)
57{ 30{
58 unsigned long isr; 31 return i8259_irq();
59
60 isr = inb(0x20);
61 irq = ffz(~isr);
62 if (irq == 2) {
63 isr = inb(0xa0);
64 irq = 8 + ffz(~isr);
65 }
66
67 if (irq < 0 || irq > 15)
68 return -EINVAL;
69
70 return I8259_IRQ_BASE + irq;
71} 32}
72 33
73static struct irqaction i8259_slave_cascade = {
74 .handler = &no_action,
75 .name = "cascade",
76};
77
78void __init rockhopper_init_irq(void) 34void __init rockhopper_init_irq(void)
79{ 35{
80 int i; 36 int i;
@@ -84,11 +40,6 @@ void __init rockhopper_init_irq(void)
84 return; 40 return;
85 } 41 }
86 42
87 for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
88 set_irq_chip_and_handler(i, &i8259_irq_type, handle_level_irq);
89
90 setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);
91
92 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH); 43 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH);
93 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH); 44 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH);
94 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number); 45 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 12272361c018..eaed402ad346 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -34,10 +34,6 @@ config GENERIC_HWEIGHT
34 bool 34 bool
35 default y 35 default y
36 36
37config GENERIC_CALIBRATE_DELAY
38 bool
39 default y
40
41config GENERIC_TIME 37config GENERIC_TIME
42 def_bool y 38 def_bool y
43 39
@@ -134,6 +130,31 @@ config AUDIT_ARCH
134 bool 130 bool
135 default y 131 default y
136 132
133config S390_SWITCH_AMODE
134 bool "Switch kernel/user addressing modes"
135 help
136 This option allows to switch the addressing modes of kernel and user
137 space. The kernel parameter switch_amode=on will enable this feature,
138 default is disabled. Enabling this (via kernel parameter) on machines
139 earlier than IBM System z9-109 EC/BC will reduce system performance.
140
141 Note that this option will also be selected by selecting the execute
142 protection option below. Enabling the execute protection via the
143 noexec kernel parameter will also switch the addressing modes,
144 independent of the switch_amode kernel parameter.
145
146
147config S390_EXEC_PROTECT
148 bool "Data execute protection"
149 select S390_SWITCH_AMODE
150 help
151 This option allows to enable a buffer overflow protection for user
152 space programs and it also selects the addressing mode option above.
153 The kernel parameter noexec=on will enable this feature and also
154 switch the addressing modes, default is disabled. Enabling this (via
155 kernel parameter) on machines earlier than IBM System z9-109 EC/BC
156 will reduce system performance.
157
137comment "Code generation options" 158comment "Code generation options"
138 159
139choice 160choice
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index b8c237290263..c9da7d16145e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -81,7 +81,7 @@ static struct ctl_table appldata_dir_table[] = {
81/* 81/*
82 * Timer 82 * Timer
83 */ 83 */
84DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 84static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
85static atomic_t appldata_expire_count = ATOMIC_INIT(0); 85static atomic_t appldata_expire_count = ATOMIC_INIT(0);
86 86
87static DEFINE_SPINLOCK(appldata_timer_lock); 87static DEFINE_SPINLOCK(appldata_timer_lock);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 8aea3698a77b..4ca615788702 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -36,7 +36,7 @@
36 * book: 36 * book:
37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
38 */ 38 */
39struct appldata_mem_data { 39static struct appldata_mem_data {
40 u64 timestamp; 40 u64 timestamp;
41 u32 sync_count_1; /* after VM collected the record data, */ 41 u32 sync_count_1; /* after VM collected the record data, */
42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 075e619bf37d..f64b8c867ae2 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -34,7 +34,7 @@
34 * book: 34 * book:
35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
36 */ 36 */
37struct appldata_net_sum_data { 37static struct appldata_net_sum_data {
38 u64 timestamp; 38 u64 timestamp;
39 u32 sync_count_1; /* after VM collected the record data, */ 39 u32 sync_count_1; /* after VM collected the record data, */
40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
new file mode 100644
index 000000000000..99ff9f08e4d7
--- /dev/null
+++ b/arch/s390/crypto/Kconfig
@@ -0,0 +1,60 @@
1config CRYPTO_SHA1_S390
2 tristate "SHA1 digest algorithm"
3 depends on S390
4 select CRYPTO_ALGAPI
5 help
6 This is the s390 hardware accelerated implementation of the
7 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
8
9config CRYPTO_SHA256_S390
10 tristate "SHA256 digest algorithm"
11 depends on S390
12 select CRYPTO_ALGAPI
13 help
14 This is the s390 hardware accelerated implementation of the
15 SHA256 secure hash standard (DFIPS 180-2).
16
17 This version of SHA implements a 256 bit hash with 128 bits of
18 security against collision attacks.
19
20config CRYPTO_DES_S390
21 tristate "DES and Triple DES cipher algorithms"
22 depends on S390
23 select CRYPTO_ALGAPI
24 select CRYPTO_BLKCIPHER
25 help
26 This us the s390 hardware accelerated implementation of the
27 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
28
29config CRYPTO_AES_S390
30 tristate "AES cipher algorithms"
31 depends on S390
32 select CRYPTO_ALGAPI
33 select CRYPTO_BLKCIPHER
34 help
35 This is the s390 hardware accelerated implementation of the
36 AES cipher algorithms (FIPS-197). AES uses the Rijndael
37 algorithm.
38
39 Rijndael appears to be consistently a very good performer in
40 both hardware and software across a wide range of computing
41 environments regardless of its use in feedback or non-feedback
42 modes. Its key setup time is excellent, and its key agility is
43 good. Rijndael's very low memory requirements make it very well
44 suited for restricted-space environments, in which it also
45 demonstrates excellent performance. Rijndael's operations are
46 among the easiest to defend against power and timing attacks.
47
48 On s390 the System z9-109 currently only supports the key size
49 of 128 bit.
50
51config S390_PRNG
52 tristate "Pseudo random number generator device driver"
53 depends on S390
54 default "m"
55 help
56 Select this option if you want to use the s390 pseudo random number
57 generator. The PRNG is part of the cryptograhic processor functions
58 and uses triple-DES to generate secure random numbers like the
59 ANSI X9.17 standard. The PRNG is usable via the char device
60 /dev/prandom.
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index bfe2541dc5cf..14e552c5cc43 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o 7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
9 9obj-$(CONFIG_S390_PRNG) += prng.o
10obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 15c9eec02928..91636353f6f0 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the AES Cipher Algorithm. 4 * s390 implementation of the AES Cipher Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/aes.c" 10 * Derived from "crypto/aes.c"
@@ -27,9 +27,11 @@
27/* data block size for all key lengths */ 27/* data block size for all key lengths */
28#define AES_BLOCK_SIZE 16 28#define AES_BLOCK_SIZE 16
29 29
30int has_aes_128 = 0; 30#define AES_KEYLEN_128 1
31int has_aes_192 = 0; 31#define AES_KEYLEN_192 2
32int has_aes_256 = 0; 32#define AES_KEYLEN_256 4
33
34static char keylen_flag = 0;
33 35
34struct s390_aes_ctx { 36struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE]; 37 u8 iv[AES_BLOCK_SIZE];
@@ -47,20 +49,19 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
47 49
48 switch (key_len) { 50 switch (key_len) {
49 case 16: 51 case 16:
50 if (!has_aes_128) 52 if (!(keylen_flag & AES_KEYLEN_128))
51 goto fail; 53 goto fail;
52 break; 54 break;
53 case 24: 55 case 24:
54 if (!has_aes_192) 56 if (!(keylen_flag & AES_KEYLEN_192))
55 goto fail; 57 goto fail;
56 58
57 break; 59 break;
58 case 32: 60 case 32:
59 if (!has_aes_256) 61 if (!(keylen_flag & AES_KEYLEN_256))
60 goto fail; 62 goto fail;
61 break; 63 break;
62 default: 64 default:
63 /* invalid key length */
64 goto fail; 65 goto fail;
65 break; 66 break;
66 } 67 }
@@ -322,34 +323,32 @@ static int __init aes_init(void)
322 int ret; 323 int ret;
323 324
324 if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) 325 if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
325 has_aes_128 = 1; 326 keylen_flag |= AES_KEYLEN_128;
326 if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) 327 if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
327 has_aes_192 = 1; 328 keylen_flag |= AES_KEYLEN_192;
328 if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) 329 if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
329 has_aes_256 = 1; 330 keylen_flag |= AES_KEYLEN_256;
331
332 if (!keylen_flag)
333 return -EOPNOTSUPP;
330 334
331 if (!has_aes_128 && !has_aes_192 && !has_aes_256) 335 /* z9 109 and z9 BC/EC only support 128 bit key length */
332 return -ENOSYS; 336 if (keylen_flag == AES_KEYLEN_128)
337 printk(KERN_INFO
338 "aes_s390: hardware acceleration only available for"
339 "128 bit keys\n");
333 340
334 ret = crypto_register_alg(&aes_alg); 341 ret = crypto_register_alg(&aes_alg);
335 if (ret != 0) { 342 if (ret)
336 printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n");
337 goto aes_err; 343 goto aes_err;
338 }
339 344
340 ret = crypto_register_alg(&ecb_aes_alg); 345 ret = crypto_register_alg(&ecb_aes_alg);
341 if (ret != 0) { 346 if (ret)
342 printk(KERN_INFO
343 "crypt_s390: ecb-aes-s390 couldn't be loaded.\n");
344 goto ecb_aes_err; 347 goto ecb_aes_err;
345 }
346 348
347 ret = crypto_register_alg(&cbc_aes_alg); 349 ret = crypto_register_alg(&cbc_aes_alg);
348 if (ret != 0) { 350 if (ret)
349 printk(KERN_INFO
350 "crypt_s390: cbc-aes-s390 couldn't be loaded.\n");
351 goto cbc_aes_err; 351 goto cbc_aes_err;
352 }
353 352
354out: 353out:
355 return ret; 354 return ret;
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 2b137089f625..2775d2618332 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,8 +3,9 @@
3 * 3 *
4 * Support for s390 cryptographic instructions. 4 * Support for s390 cryptographic instructions.
5 * 5 *
6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * Jan Glauber (jan.glauber@de.ibm.com)
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
@@ -32,7 +33,8 @@ enum crypt_s390_operations {
32 CRYPT_S390_KMAC = 0x0500 33 CRYPT_S390_KMAC = 0x0500
33}; 34};
34 35
35/* function codes for KM (CIPHER MESSAGE) instruction 36/*
37 * function codes for KM (CIPHER MESSAGE) instruction
36 * 0x80 is the decipher modifier bit 38 * 0x80 is the decipher modifier bit
37 */ 39 */
38enum crypt_s390_km_func { 40enum crypt_s390_km_func {
@@ -51,7 +53,8 @@ enum crypt_s390_km_func {
51 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, 53 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
52}; 54};
53 55
54/* function codes for KMC (CIPHER MESSAGE WITH CHAINING) 56/*
57 * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
55 * instruction 58 * instruction
56 */ 59 */
57enum crypt_s390_kmc_func { 60enum crypt_s390_kmc_func {
@@ -68,9 +71,11 @@ enum crypt_s390_kmc_func {
68 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, 71 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
69 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, 72 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
70 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, 73 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
74 KMC_PRNG = CRYPT_S390_KMC | 0x43,
71}; 75};
72 76
73/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) 77/*
78 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
74 * instruction 79 * instruction
75 */ 80 */
76enum crypt_s390_kimd_func { 81enum crypt_s390_kimd_func {
@@ -79,7 +84,8 @@ enum crypt_s390_kimd_func {
79 KIMD_SHA_256 = CRYPT_S390_KIMD | 2, 84 KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
80}; 85};
81 86
82/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) 87/*
88 * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
83 * instruction 89 * instruction
84 */ 90 */
85enum crypt_s390_klmd_func { 91enum crypt_s390_klmd_func {
@@ -88,7 +94,8 @@ enum crypt_s390_klmd_func {
88 KLMD_SHA_256 = CRYPT_S390_KLMD | 2, 94 KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
89}; 95};
90 96
91/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) 97/*
98 * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
92 * instruction 99 * instruction
93 */ 100 */
94enum crypt_s390_kmac_func { 101enum crypt_s390_kmac_func {
@@ -98,229 +105,219 @@ enum crypt_s390_kmac_func {
98 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 105 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
99}; 106};
100 107
101/* status word for s390 crypto instructions' QUERY functions */ 108/**
102struct crypt_s390_query_status { 109 * crypt_s390_km:
103 u64 high; 110 * @func: the function code passed to KM; see crypt_s390_km_func
104 u64 low; 111 * @param: address of parameter block; see POP for details on each func
105}; 112 * @dest: address of destination memory area
106 113 * @src: address of source memory area
107/* 114 * @src_len: length of src operand in bytes
115 *
108 * Executes the KM (CIPHER MESSAGE) operation of the CPU. 116 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
109 * @param func: the function code passed to KM; see crypt_s390_km_func 117 *
110 * @param param: address of parameter block; see POP for details on each func 118 * Returns -1 for failure, 0 for the query func, number of processed
111 * @param dest: address of destination memory area 119 * bytes for encryption/decryption funcs
112 * @param src: address of source memory area
113 * @param src_len: length of src operand in bytes
114 * @returns < zero for failure, 0 for the query func, number of processed bytes
115 * for encryption/decryption funcs
116 */ 120 */
117static inline int 121static inline int crypt_s390_km(long func, void *param,
118crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) 122 u8 *dest, const u8 *src, long src_len)
119{ 123{
120 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 124 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
121 register void* __param asm("1") = param; 125 register void *__param asm("1") = param;
122 register const u8* __src asm("2") = src; 126 register const u8 *__src asm("2") = src;
123 register long __src_len asm("3") = src_len; 127 register long __src_len asm("3") = src_len;
124 register u8* __dest asm("4") = dest; 128 register u8 *__dest asm("4") = dest;
125 int ret; 129 int ret;
126 130
127 asm volatile( 131 asm volatile(
128 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ 132 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
129 "1: brc 1,0b \n" /* handle partial completion */ 133 "1: brc 1,0b \n" /* handle partial completion */
130 " ahi %0,%h7\n" 134 " la %0,0\n"
131 "2: ahi %0,%h8\n" 135 "2:\n"
132 "3:\n" 136 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
133 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
134 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 137 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
135 : "d" (__func), "a" (__param), "0" (-EFAULT), 138 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
136 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
137 if (ret < 0) 139 if (ret < 0)
138 return ret; 140 return ret;
139 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 141 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
140} 142}
141 143
142/* 144/**
145 * crypt_s390_kmc:
146 * @func: the function code passed to KM; see crypt_s390_kmc_func
147 * @param: address of parameter block; see POP for details on each func
148 * @dest: address of destination memory area
149 * @src: address of source memory area
150 * @src_len: length of src operand in bytes
151 *
143 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. 152 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
144 * @param func: the function code passed to KM; see crypt_s390_kmc_func 153 *
145 * @param param: address of parameter block; see POP for details on each func 154 * Returns -1 for failure, 0 for the query func, number of processed
146 * @param dest: address of destination memory area 155 * bytes for encryption/decryption funcs
147 * @param src: address of source memory area
148 * @param src_len: length of src operand in bytes
149 * @returns < zero for failure, 0 for the query func, number of processed bytes
150 * for encryption/decryption funcs
151 */ 156 */
152static inline int 157static inline int crypt_s390_kmc(long func, void *param,
153crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) 158 u8 *dest, const u8 *src, long src_len)
154{ 159{
155 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 160 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
156 register void* __param asm("1") = param; 161 register void *__param asm("1") = param;
157 register const u8* __src asm("2") = src; 162 register const u8 *__src asm("2") = src;
158 register long __src_len asm("3") = src_len; 163 register long __src_len asm("3") = src_len;
159 register u8* __dest asm("4") = dest; 164 register u8 *__dest asm("4") = dest;
160 int ret; 165 int ret;
161 166
162 asm volatile( 167 asm volatile(
163 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ 168 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
164 "1: brc 1,0b \n" /* handle partial completion */ 169 "1: brc 1,0b \n" /* handle partial completion */
165 " ahi %0,%h7\n" 170 " la %0,0\n"
166 "2: ahi %0,%h8\n" 171 "2:\n"
167 "3:\n" 172 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
168 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
169 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 173 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
170 : "d" (__func), "a" (__param), "0" (-EFAULT), 174 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
171 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
172 if (ret < 0) 175 if (ret < 0)
173 return ret; 176 return ret;
174 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 177 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
175} 178}
176 179
177/* 180/**
181 * crypt_s390_kimd:
182 * @func: the function code passed to KM; see crypt_s390_kimd_func
183 * @param: address of parameter block; see POP for details on each func
184 * @src: address of source memory area
185 * @src_len: length of src operand in bytes
186 *
178 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation 187 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
179 * of the CPU. 188 * of the CPU.
180 * @param func: the function code passed to KM; see crypt_s390_kimd_func 189 *
181 * @param param: address of parameter block; see POP for details on each func 190 * Returns -1 for failure, 0 for the query func, number of processed
182 * @param src: address of source memory area 191 * bytes for digest funcs
183 * @param src_len: length of src operand in bytes
184 * @returns < zero for failure, 0 for the query func, number of processed bytes
185 * for digest funcs
186 */ 192 */
187static inline int 193static inline int crypt_s390_kimd(long func, void *param,
188crypt_s390_kimd(long func, void* param, const u8* src, long src_len) 194 const u8 *src, long src_len)
189{ 195{
190 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 196 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
191 register void* __param asm("1") = param; 197 register void *__param asm("1") = param;
192 register const u8* __src asm("2") = src; 198 register const u8 *__src asm("2") = src;
193 register long __src_len asm("3") = src_len; 199 register long __src_len asm("3") = src_len;
194 int ret; 200 int ret;
195 201
196 asm volatile( 202 asm volatile(
197 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ 203 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
198 "1: brc 1,0b \n" /* handle partial completion */ 204 "1: brc 1,0b \n" /* handle partial completion */
199 " ahi %0,%h6\n" 205 " la %0,0\n"
200 "2: ahi %0,%h7\n" 206 "2:\n"
201 "3:\n" 207 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
202 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
203 : "=d" (ret), "+a" (__src), "+d" (__src_len) 208 : "=d" (ret), "+a" (__src), "+d" (__src_len)
204 : "d" (__func), "a" (__param), "0" (-EFAULT), 209 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
205 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
206 if (ret < 0) 210 if (ret < 0)
207 return ret; 211 return ret;
208 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 212 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
209} 213}
210 214
211/* 215/**
216 * crypt_s390_klmd:
217 * @func: the function code passed to KM; see crypt_s390_klmd_func
218 * @param: address of parameter block; see POP for details on each func
219 * @src: address of source memory area
220 * @src_len: length of src operand in bytes
221 *
212 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. 222 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
213 * @param func: the function code passed to KM; see crypt_s390_klmd_func 223 *
214 * @param param: address of parameter block; see POP for details on each func 224 * Returns -1 for failure, 0 for the query func, number of processed
215 * @param src: address of source memory area 225 * bytes for digest funcs
216 * @param src_len: length of src operand in bytes
217 * @returns < zero for failure, 0 for the query func, number of processed bytes
218 * for digest funcs
219 */ 226 */
220static inline int 227static inline int crypt_s390_klmd(long func, void *param,
221crypt_s390_klmd(long func, void* param, const u8* src, long src_len) 228 const u8 *src, long src_len)
222{ 229{
223 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 230 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
224 register void* __param asm("1") = param; 231 register void *__param asm("1") = param;
225 register const u8* __src asm("2") = src; 232 register const u8 *__src asm("2") = src;
226 register long __src_len asm("3") = src_len; 233 register long __src_len asm("3") = src_len;
227 int ret; 234 int ret;
228 235
229 asm volatile( 236 asm volatile(
230 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ 237 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
231 "1: brc 1,0b \n" /* handle partial completion */ 238 "1: brc 1,0b \n" /* handle partial completion */
232 " ahi %0,%h6\n" 239 " la %0,0\n"
233 "2: ahi %0,%h7\n" 240 "2:\n"
234 "3:\n" 241 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
235 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
236 : "=d" (ret), "+a" (__src), "+d" (__src_len) 242 : "=d" (ret), "+a" (__src), "+d" (__src_len)
237 : "d" (__func), "a" (__param), "0" (-EFAULT), 243 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
238 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
239 if (ret < 0) 244 if (ret < 0)
240 return ret; 245 return ret;
241 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 246 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
242} 247}
243 248
244/* 249/**
250 * crypt_s390_kmac:
251 * @func: the function code passed to KM; see crypt_s390_klmd_func
252 * @param: address of parameter block; see POP for details on each func
253 * @src: address of source memory area
254 * @src_len: length of src operand in bytes
255 *
245 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation 256 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
246 * of the CPU. 257 * of the CPU.
247 * @param func: the function code passed to KM; see crypt_s390_klmd_func 258 *
248 * @param param: address of parameter block; see POP for details on each func 259 * Returns -1 for failure, 0 for the query func, number of processed
249 * @param src: address of source memory area 260 * bytes for digest funcs
250 * @param src_len: length of src operand in bytes
251 * @returns < zero for failure, 0 for the query func, number of processed bytes
252 * for digest funcs
253 */ 261 */
254static inline int 262static inline int crypt_s390_kmac(long func, void *param,
255crypt_s390_kmac(long func, void* param, const u8* src, long src_len) 263 const u8 *src, long src_len)
256{ 264{
257 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 265 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
258 register void* __param asm("1") = param; 266 register void *__param asm("1") = param;
259 register const u8* __src asm("2") = src; 267 register const u8 *__src asm("2") = src;
260 register long __src_len asm("3") = src_len; 268 register long __src_len asm("3") = src_len;
261 int ret; 269 int ret;
262 270
263 asm volatile( 271 asm volatile(
264 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ 272 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
265 "1: brc 1,0b \n" /* handle partial completion */ 273 "1: brc 1,0b \n" /* handle partial completion */
266 " ahi %0,%h6\n" 274 " la %0,0\n"
267 "2: ahi %0,%h7\n" 275 "2:\n"
268 "3:\n" 276 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
269 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
270 : "=d" (ret), "+a" (__src), "+d" (__src_len) 277 : "=d" (ret), "+a" (__src), "+d" (__src_len)
271 : "d" (__func), "a" (__param), "0" (-EFAULT), 278 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
272 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
273 if (ret < 0) 279 if (ret < 0)
274 return ret; 280 return ret;
275 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 281 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
276} 282}
277 283
278/** 284/**
285 * crypt_s390_func_available:
286 * @func: the function code of the specific function; 0 if op in general
287 *
279 * Tests if a specific crypto function is implemented on the machine. 288 * Tests if a specific crypto function is implemented on the machine.
280 * @param func: the function code of the specific function; 0 if op in general 289 *
281 * @return 1 if func available; 0 if func or op in general not available 290 * Returns 1 if func available; 0 if func or op in general not available
282 */ 291 */
283static inline int 292static inline int crypt_s390_func_available(int func)
284crypt_s390_func_available(int func)
285{ 293{
294 unsigned char status[16];
286 int ret; 295 int ret;
287 296
288 struct crypt_s390_query_status status = { 297 switch (func & CRYPT_S390_OP_MASK) {
289 .high = 0, 298 case CRYPT_S390_KM:
290 .low = 0 299 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
291 }; 300 break;
292 switch (func & CRYPT_S390_OP_MASK){ 301 case CRYPT_S390_KMC:
293 case CRYPT_S390_KM: 302 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
294 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 303 break;
295 break; 304 case CRYPT_S390_KIMD:
296 case CRYPT_S390_KMC: 305 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
297 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); 306 break;
298 break; 307 case CRYPT_S390_KLMD:
299 case CRYPT_S390_KIMD: 308 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
300 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); 309 break;
301 break; 310 case CRYPT_S390_KMAC:
302 case CRYPT_S390_KLMD: 311 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
303 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); 312 break;
304 break; 313 default:
305 case CRYPT_S390_KMAC: 314 return 0;
306 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
307 break;
308 default:
309 ret = 0;
310 return ret;
311 }
312 if (ret >= 0){
313 func &= CRYPT_S390_FUNC_MASK;
314 func &= 0x7f; //mask modifier bit
315 if (func < 64){
316 ret = (status.high >> (64 - func - 1)) & 0x1;
317 } else {
318 ret = (status.low >> (128 - func - 1)) & 0x1;
319 }
320 } else {
321 ret = 0;
322 } 315 }
323 return ret; 316 if (ret < 0)
317 return 0;
318 func &= CRYPT_S390_FUNC_MASK;
319 func &= 0x7f; /* mask modifier bit */
320 return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
324} 321}
325 322
326#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H 323#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
deleted file mode 100644
index 54fb11d7fadd..000000000000
--- a/arch/s390/crypto/crypt_s390_query.c
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for s390 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_s390.h"
21
22static void query_available_functions(void)
23{
24 printk(KERN_INFO "#####################\n");
25
26 /* query available KM functions */
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_s390_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_s390_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_s390_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_s390_func_available(KM_TDEA_192_ENCRYPT));
35 printk(KERN_INFO "KM_AES_128: %d\n",
36 crypt_s390_func_available(KM_AES_128_ENCRYPT));
37 printk(KERN_INFO "KM_AES_192: %d\n",
38 crypt_s390_func_available(KM_AES_192_ENCRYPT));
39 printk(KERN_INFO "KM_AES_256: %d\n",
40 crypt_s390_func_available(KM_AES_256_ENCRYPT));
41
42 /* query available KMC functions */
43 printk(KERN_INFO "KMC_QUERY: %d\n",
44 crypt_s390_func_available(KMC_QUERY));
45 printk(KERN_INFO "KMC_DEA: %d\n",
46 crypt_s390_func_available(KMC_DEA_ENCRYPT));
47 printk(KERN_INFO "KMC_TDEA_128: %d\n",
48 crypt_s390_func_available(KMC_TDEA_128_ENCRYPT));
49 printk(KERN_INFO "KMC_TDEA_192: %d\n",
50 crypt_s390_func_available(KMC_TDEA_192_ENCRYPT));
51 printk(KERN_INFO "KMC_AES_128: %d\n",
52 crypt_s390_func_available(KMC_AES_128_ENCRYPT));
53 printk(KERN_INFO "KMC_AES_192: %d\n",
54 crypt_s390_func_available(KMC_AES_192_ENCRYPT));
55 printk(KERN_INFO "KMC_AES_256: %d\n",
56 crypt_s390_func_available(KMC_AES_256_ENCRYPT));
57
58 /* query available KIMD functions */
59 printk(KERN_INFO "KIMD_QUERY: %d\n",
60 crypt_s390_func_available(KIMD_QUERY));
61 printk(KERN_INFO "KIMD_SHA_1: %d\n",
62 crypt_s390_func_available(KIMD_SHA_1));
63 printk(KERN_INFO "KIMD_SHA_256: %d\n",
64 crypt_s390_func_available(KIMD_SHA_256));
65
66 /* query available KLMD functions */
67 printk(KERN_INFO "KLMD_QUERY: %d\n",
68 crypt_s390_func_available(KLMD_QUERY));
69 printk(KERN_INFO "KLMD_SHA_1: %d\n",
70 crypt_s390_func_available(KLMD_SHA_1));
71 printk(KERN_INFO "KLMD_SHA_256: %d\n",
72 crypt_s390_func_available(KLMD_SHA_256));
73
74 /* query available KMAC functions */
75 printk(KERN_INFO "KMAC_QUERY: %d\n",
76 crypt_s390_func_available(KMAC_QUERY));
77 printk(KERN_INFO "KMAC_DEA: %d\n",
78 crypt_s390_func_available(KMAC_DEA));
79 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
80 crypt_s390_func_available(KMAC_TDEA_128));
81 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
82 crypt_s390_func_available(KMAC_TDEA_192));
83}
84
85static int init(void)
86{
87 struct crypt_s390_query_status status = {
88 .high = 0,
89 .low = 0
90 };
91
92 printk(KERN_INFO "crypt_s390: querying available crypto functions\n");
93 crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
94 printk(KERN_INFO "KM:\t%016llx %016llx\n",
95 (unsigned long long) status.high,
96 (unsigned long long) status.low);
97 status.high = status.low = 0;
98 crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
99 printk(KERN_INFO "KMC:\t%016llx %016llx\n",
100 (unsigned long long) status.high,
101 (unsigned long long) status.low);
102 status.high = status.low = 0;
103 crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
104 printk(KERN_INFO "KIMD:\t%016llx %016llx\n",
105 (unsigned long long) status.high,
106 (unsigned long long) status.low);
107 status.high = status.low = 0;
108 crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
109 printk(KERN_INFO "KLMD:\t%016llx %016llx\n",
110 (unsigned long long) status.high,
111 (unsigned long long) status.low);
112 status.high = status.low = 0;
113 crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
114 printk(KERN_INFO "KMAC:\t%016llx %016llx\n",
115 (unsigned long long) status.high,
116 (unsigned long long) status.low);
117
118 query_available_functions();
119 return -ECANCELED;
120}
121
122static void __exit cleanup(void)
123{
124}
125
126module_init(init);
127module_exit(cleanup);
128
129MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
index e3f5c5f238fe..5706af266442 100644
--- a/arch/s390/crypto/des_check_key.c
+++ b/arch/s390/crypto/des_check_key.c
@@ -10,8 +10,9 @@
10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. 10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL.
11 * 11 *
12 * s390 Version: 12 * s390 Version:
13 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 13 * Copyright IBM Corp. 2003
14 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 14 * Author(s): Thomas Spatzier
15 * Jan Glauber (jan.glauber@de.ibm.com)
15 * 16 *
16 * Derived from "crypto/des.c" 17 * Derived from "crypto/des.c"
17 * Copyright (c) 1992 Dana L. How. 18 * Copyright (c) 1992 Dana L. How.
@@ -30,6 +31,7 @@
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/crypto.h> 33#include <linux/crypto.h>
34#include "crypto_des.h"
33 35
34#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) 36#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o))
35 37
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 2aba04852fe3..ea22707f435f 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,9 +3,9 @@
3 * 3 *
4 * s390 implementation of the DES Cipher Algorithm. 4 * s390 implementation of the DES Cipher Algorithm.
5 * 5 *
6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * 8 * Jan Glauber (jan.glauber@de.ibm.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -557,7 +557,7 @@ static int init(void)
557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || 557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || 558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
560 return -ENOSYS; 560 return -EOPNOTSUPP;
561 561
562 ret = crypto_register_alg(&des_alg); 562 ret = crypto_register_alg(&des_alg);
563 if (ret) 563 if (ret)
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
new file mode 100644
index 000000000000..8eb3a1aedc22
--- /dev/null
+++ b/arch/s390/crypto/prng.c
@@ -0,0 +1,213 @@
1/*
2 * Copyright IBM Corp. 2006,2007
3 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
4 * Driver for the s390 pseudo random number generator
5 */
6#include <linux/fs.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/miscdevice.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/random.h>
13#include <asm/debug.h>
14#include <asm/uaccess.h>
15
16#include "crypt_s390.h"
17
18MODULE_LICENSE("GPL");
19MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>");
20MODULE_DESCRIPTION("s390 PRNG interface");
21
22static int prng_chunk_size = 256;
23module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH);
24MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
25
26static int prng_entropy_limit = 4096;
27module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR);
28MODULE_PARM_DESC(prng_entropy_limit,
29 "PRNG add entropy after that much bytes were produced");
30
31/*
32 * Any one who considers arithmetical methods of producing random digits is,
33 * of course, in a state of sin. -- John von Neumann
34 */
35
36struct s390_prng_data {
37 unsigned long count; /* how many bytes were produced */
38 char *buf;
39};
40
41static struct s390_prng_data *p;
42
43/* copied from libica, use a non-zero initial parameter block */
44static unsigned char parm_block[32] = {
450x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4,
460x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0,
47};
48
49static int prng_open(struct inode *inode, struct file *file)
50{
51 return nonseekable_open(inode, file);
52}
53
54static void prng_add_entropy(void)
55{
56 __u64 entropy[4];
57 unsigned int i;
58 int ret;
59
60 for (i = 0; i < 16; i++) {
61 ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy,
62 (char *)entropy, sizeof(entropy));
63 BUG_ON(ret < 0 || ret != sizeof(entropy));
64 memcpy(parm_block, entropy, sizeof(entropy));
65 }
66}
67
68static void prng_seed(int nbytes)
69{
70 char buf[16];
71 int i = 0;
72
73 BUG_ON(nbytes > 16);
74 get_random_bytes(buf, nbytes);
75
76 /* Add the entropy */
77 while (nbytes >= 8) {
78 *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8);
79 prng_add_entropy();
80 i += 8;
81 nbytes -= 8;
82 }
83 prng_add_entropy();
84}
85
86static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
87 loff_t *ppos)
88{
89 int chunk, n;
90 int ret = 0;
91 int tmp;
92
93 /* nbytes can be arbitrary long, we spilt it into chunks */
94 while (nbytes) {
95 /* same as in extract_entropy_user in random.c */
96 if (need_resched()) {
97 if (signal_pending(current)) {
98 if (ret == 0)
99 ret = -ERESTARTSYS;
100 break;
101 }
102 schedule();
103 }
104
105 /*
106 * we lose some random bytes if an attacker issues
107 * reads < 8 bytes, but we don't care
108 */
109 chunk = min_t(int, nbytes, prng_chunk_size);
110
111 /* PRNG only likes multiples of 8 bytes */
112 n = (chunk + 7) & -8;
113
114 if (p->count > prng_entropy_limit)
115 prng_seed(8);
116
117 /* if the CPU supports PRNG stckf is present too */
118 asm volatile(".insn s,0xb27c0000,%0"
119 : "=m" (*((unsigned long long *)p->buf)) : : "cc");
120
121 /*
122 * Beside the STCKF the input for the TDES-EDE is the output
123 * of the last operation. We differ here from X9.17 since we
124 * only store one timestamp into the buffer. Padding the whole
125 * buffer with timestamps does not improve security, since
126 * successive stckf have nearly constant offsets.
127 * If an attacker knows the first timestamp it would be
128 * trivial to guess the additional values. One timestamp
129 * is therefore enough and still guarantees unique input values.
130 *
131 * Note: you can still get strict X9.17 conformity by setting
132 * prng_chunk_size to 8 bytes.
133 */
134 tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n);
135 BUG_ON((tmp < 0) || (tmp != n));
136
137 p->count += n;
138
139 if (copy_to_user(ubuf, p->buf, chunk))
140 return -EFAULT;
141
142 nbytes -= chunk;
143 ret += chunk;
144 ubuf += chunk;
145 }
146 return ret;
147}
148
149static struct file_operations prng_fops = {
150 .owner = THIS_MODULE,
151 .open = &prng_open,
152 .release = NULL,
153 .read = &prng_read,
154};
155
156static struct miscdevice prng_dev = {
157 .name = "prandom",
158 .minor = MISC_DYNAMIC_MINOR,
159 .fops = &prng_fops,
160};
161
162static int __init prng_init(void)
163{
164 int ret;
165
166 /* check if the CPU has a PRNG */
167 if (!crypt_s390_func_available(KMC_PRNG))
168 return -EOPNOTSUPP;
169
170 if (prng_chunk_size < 8)
171 return -EINVAL;
172
173 p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL);
174 if (!p)
175 return -ENOMEM;
176 p->count = 0;
177
178 p->buf = kmalloc(prng_chunk_size, GFP_KERNEL);
179 if (!p->buf) {
180 ret = -ENOMEM;
181 goto out_free;
182 }
183
184 /* initialize the PRNG, add 128 bits of entropy */
185 prng_seed(16);
186
187 ret = misc_register(&prng_dev);
188 if (ret) {
189 printk(KERN_WARNING
190 "Could not register misc device for PRNG.\n");
191 goto out_buf;
192 }
193 return 0;
194
195out_buf:
196 kfree(p->buf);
197out_free:
198 kfree(p);
199 return ret;
200}
201
202static void __exit prng_exit(void)
203{
204 /* wipe me */
205 memset(p->buf, 0, prng_chunk_size);
206 kfree(p->buf);
207 kfree(p);
208
209 misc_deregister(&prng_dev);
210}
211
212module_init(prng_init);
213module_exit(prng_exit);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 49ca8690ee39..969639f31977 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -8,8 +8,9 @@
8 * implementation written by Steve Reid. 8 * implementation written by Steve Reid.
9 * 9 *
10 * s390 Version: 10 * s390 Version:
11 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 11 * Copyright IBM Corp. 2003,2007
12 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 12 * Author(s): Thomas Spatzier
13 * Jan Glauber (jan.glauber@de.ibm.com)
13 * 14 *
14 * Derived from "crypto/sha1.c" 15 * Derived from "crypto/sha1.c"
15 * Copyright (c) Alan Smithee. 16 * Copyright (c) Alan Smithee.
@@ -43,16 +44,14 @@ struct crypt_s390_sha1_ctx {
43static void sha1_init(struct crypto_tfm *tfm) 44static void sha1_init(struct crypto_tfm *tfm)
44{ 45{
45 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); 46 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
46 static const u32 initstate[5] = { 47
47 0x67452301, 48 ctx->state[0] = 0x67452301;
48 0xEFCDAB89, 49 ctx->state[1] = 0xEFCDAB89;
49 0x98BADCFE, 50 ctx->state[2] = 0x98BADCFE;
50 0x10325476, 51 ctx->state[3] = 0x10325476;
51 0xC3D2E1F0 52 ctx->state[4] = 0xC3D2E1F0;
52 };
53 53
54 ctx->count = 0; 54 ctx->count = 0;
55 memcpy(ctx->state, &initstate, sizeof(initstate));
56 ctx->buf_len = 0; 55 ctx->buf_len = 0;
57} 56}
58 57
@@ -63,13 +62,13 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
63 long imd_len; 62 long imd_len;
64 63
65 sctx = crypto_tfm_ctx(tfm); 64 sctx = crypto_tfm_ctx(tfm);
66 sctx->count += len * 8; //message bit length 65 sctx->count += len * 8; /* message bit length */
67 66
68 //anything in buffer yet? -> must be completed 67 /* anything in buffer yet? -> must be completed */
69 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { 68 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) {
70 //complete full block and hash 69 /* complete full block and hash */
71 memcpy(sctx->buffer + sctx->buf_len, data, 70 memcpy(sctx->buffer + sctx->buf_len, data,
72 SHA1_BLOCK_SIZE - sctx->buf_len); 71 SHA1_BLOCK_SIZE - sctx->buf_len);
73 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
74 SHA1_BLOCK_SIZE); 73 SHA1_BLOCK_SIZE);
75 data += SHA1_BLOCK_SIZE - sctx->buf_len; 74 data += SHA1_BLOCK_SIZE - sctx->buf_len;
@@ -77,37 +76,36 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
77 sctx->buf_len = 0; 76 sctx->buf_len = 0;
78 } 77 }
79 78
80 //rest of data contains full blocks? 79 /* rest of data contains full blocks? */
81 imd_len = len & ~0x3ful; 80 imd_len = len & ~0x3ful;
82 if (imd_len){ 81 if (imd_len) {
83 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
84 data += imd_len; 83 data += imd_len;
85 len -= imd_len; 84 len -= imd_len;
86 } 85 }
87 //anything left? store in buffer 86 /* anything left? store in buffer */
88 if (len){ 87 if (len) {
89 memcpy(sctx->buffer + sctx->buf_len , data, len); 88 memcpy(sctx->buffer + sctx->buf_len , data, len);
90 sctx->buf_len += len; 89 sctx->buf_len += len;
91 } 90 }
92} 91}
93 92
94 93
95static void 94static void pad_message(struct crypt_s390_sha1_ctx* sctx)
96pad_message(struct crypt_s390_sha1_ctx* sctx)
97{ 95{
98 int index; 96 int index;
99 97
100 index = sctx->buf_len; 98 index = sctx->buf_len;
101 sctx->buf_len = (sctx->buf_len < 56)? 99 sctx->buf_len = (sctx->buf_len < 56) ?
102 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; 100 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
103 //start pad with 1 101 /* start pad with 1 */
104 sctx->buffer[index] = 0x80; 102 sctx->buffer[index] = 0x80;
105 //pad with zeros 103 /* pad with zeros */
106 index++; 104 index++;
107 memset(sctx->buffer + index, 0x00, sctx->buf_len - index); 105 memset(sctx->buffer + index, 0x00, sctx->buf_len - index);
108 //append length 106 /* append length */
109 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, 107 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
110 sizeof sctx->count); 108 sizeof sctx->count);
111} 109}
112 110
113/* Add padding and return the message digest. */ 111/* Add padding and return the message digest. */
@@ -115,47 +113,40 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
115{ 113{
116 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 114 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
117 115
118 //must perform manual padding 116 /* must perform manual padding */
119 pad_message(sctx); 117 pad_message(sctx);
120 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); 118 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
121 //copy digest to out 119 /* copy digest to out */
122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
123 /* Wipe context */ 121 /* wipe context */
124 memset(sctx, 0, sizeof *sctx); 122 memset(sctx, 0, sizeof *sctx);
125} 123}
126 124
127static struct crypto_alg alg = { 125static struct crypto_alg alg = {
128 .cra_name = "sha1", 126 .cra_name = "sha1",
129 .cra_driver_name = "sha1-s390", 127 .cra_driver_name= "sha1-s390",
130 .cra_priority = CRYPT_S390_PRIORITY, 128 .cra_priority = CRYPT_S390_PRIORITY,
131 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
132 .cra_blocksize = SHA1_BLOCK_SIZE, 130 .cra_blocksize = SHA1_BLOCK_SIZE,
133 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
134 .cra_module = THIS_MODULE, 132 .cra_module = THIS_MODULE,
135 .cra_list = LIST_HEAD_INIT(alg.cra_list), 133 .cra_list = LIST_HEAD_INIT(alg.cra_list),
136 .cra_u = { .digest = { 134 .cra_u = { .digest = {
137 .dia_digestsize = SHA1_DIGEST_SIZE, 135 .dia_digestsize = SHA1_DIGEST_SIZE,
138 .dia_init = sha1_init, 136 .dia_init = sha1_init,
139 .dia_update = sha1_update, 137 .dia_update = sha1_update,
140 .dia_final = sha1_final } } 138 .dia_final = sha1_final } }
141}; 139};
142 140
143static int 141static int __init init(void)
144init(void)
145{ 142{
146 int ret = -ENOSYS; 143 if (!crypt_s390_func_available(KIMD_SHA_1))
144 return -EOPNOTSUPP;
147 145
148 if (crypt_s390_func_available(KIMD_SHA_1)){ 146 return crypto_register_alg(&alg);
149 ret = crypto_register_alg(&alg);
150 if (ret == 0){
151 printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n");
152 }
153 }
154 return ret;
155} 147}
156 148
157static void __exit 149static void __exit fini(void)
158fini(void)
159{ 150{
160 crypto_unregister_alg(&alg); 151 crypto_unregister_alg(&alg);
161} 152}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 8e4e67503fe7..78436c696d37 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the SHA256 Secure Hash Algorithm. 4 * s390 implementation of the SHA256 Secure Hash Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/sha256.c" 10 * Derived from "crypto/sha256.c"
@@ -143,15 +143,10 @@ static struct crypto_alg alg = {
143 143
144static int init(void) 144static int init(void)
145{ 145{
146 int ret;
147
148 if (!crypt_s390_func_available(KIMD_SHA_256)) 146 if (!crypt_s390_func_available(KIMD_SHA_256))
149 return -ENOSYS; 147 return -EOPNOTSUPP;
150 148
151 ret = crypto_register_alg(&alg); 149 return crypto_register_alg(&alg);
152 if (ret != 0)
153 printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded.");
154 return ret;
155} 150}
156 151
157static void __exit fini(void) 152static void __exit fini(void)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 5368cf4a350e..7c621b8ef683 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -108,6 +108,8 @@ CONFIG_DEFAULT_MIGRATION_COST=1000000
108CONFIG_COMPAT=y 108CONFIG_COMPAT=y
109CONFIG_SYSVIPC_COMPAT=y 109CONFIG_SYSVIPC_COMPAT=y
110CONFIG_AUDIT_ARCH=y 110CONFIG_AUDIT_ARCH=y
111CONFIG_S390_SWITCH_AMODE=y
112CONFIG_S390_EXEC_PROTECT=y
111 113
112# 114#
113# Code generation options 115# Code generation options
@@ -431,7 +433,6 @@ CONFIG_TN3270_CONSOLE=y
431CONFIG_TN3215=y 433CONFIG_TN3215=y
432CONFIG_TN3215_CONSOLE=y 434CONFIG_TN3215_CONSOLE=y
433CONFIG_CCW_CONSOLE=y 435CONFIG_CCW_CONSOLE=y
434CONFIG_SCLP=y
435CONFIG_SCLP_TTY=y 436CONFIG_SCLP_TTY=y
436CONFIG_SCLP_CONSOLE=y 437CONFIG_SCLP_CONSOLE=y
437CONFIG_SCLP_VT220_TTY=y 438CONFIG_SCLP_VT220_TTY=y
@@ -724,9 +725,7 @@ CONFIG_CRYPTO_MANAGER=y
724# CONFIG_CRYPTO_MD4 is not set 725# CONFIG_CRYPTO_MD4 is not set
725# CONFIG_CRYPTO_MD5 is not set 726# CONFIG_CRYPTO_MD5 is not set
726# CONFIG_CRYPTO_SHA1 is not set 727# CONFIG_CRYPTO_SHA1 is not set
727# CONFIG_CRYPTO_SHA1_S390 is not set
728# CONFIG_CRYPTO_SHA256 is not set 728# CONFIG_CRYPTO_SHA256 is not set
729# CONFIG_CRYPTO_SHA256_S390 is not set
730# CONFIG_CRYPTO_SHA512 is not set 729# CONFIG_CRYPTO_SHA512 is not set
731# CONFIG_CRYPTO_WP512 is not set 730# CONFIG_CRYPTO_WP512 is not set
732# CONFIG_CRYPTO_TGR192 is not set 731# CONFIG_CRYPTO_TGR192 is not set
@@ -735,12 +734,10 @@ CONFIG_CRYPTO_ECB=m
735CONFIG_CRYPTO_CBC=y 734CONFIG_CRYPTO_CBC=y
736# CONFIG_CRYPTO_LRW is not set 735# CONFIG_CRYPTO_LRW is not set
737# CONFIG_CRYPTO_DES is not set 736# CONFIG_CRYPTO_DES is not set
738# CONFIG_CRYPTO_DES_S390 is not set
739# CONFIG_CRYPTO_BLOWFISH is not set 737# CONFIG_CRYPTO_BLOWFISH is not set
740# CONFIG_CRYPTO_TWOFISH is not set 738# CONFIG_CRYPTO_TWOFISH is not set
741# CONFIG_CRYPTO_SERPENT is not set 739# CONFIG_CRYPTO_SERPENT is not set
742# CONFIG_CRYPTO_AES is not set 740# CONFIG_CRYPTO_AES is not set
743# CONFIG_CRYPTO_AES_S390 is not set
744# CONFIG_CRYPTO_CAST5 is not set 741# CONFIG_CRYPTO_CAST5 is not set
745# CONFIG_CRYPTO_CAST6 is not set 742# CONFIG_CRYPTO_CAST6 is not set
746# CONFIG_CRYPTO_TEA is not set 743# CONFIG_CRYPTO_TEA is not set
@@ -755,6 +752,11 @@ CONFIG_CRYPTO_CBC=y
755# 752#
756# Hardware crypto devices 753# Hardware crypto devices
757# 754#
755# CONFIG_CRYPTO_SHA1_S390 is not set
756# CONFIG_CRYPTO_SHA256_S390 is not set
757# CONFIG_CRYPTO_DES_S390 is not set
758# CONFIG_CRYPTO_AES_S390 is not set
759CONFIG_S390_PRNG=m
758 760
759# 761#
760# Library routines 762# Library routines
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index f4b00cd81f7c..b08d2abf6178 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
6 6
7s390_hypfs-objs := inode.o hypfs_diag.o 7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index f3dbd91965c6..aea572009d60 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -27,4 +27,13 @@ extern struct dentry *hypfs_create_str(struct super_block *sb,
27 struct dentry *dir, const char *name, 27 struct dentry *dir, const char *name,
28 char *string); 28 char *string);
29 29
30/* LPAR Hypervisor */
31extern int hypfs_diag_init(void);
32extern void hypfs_diag_exit(void);
33extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
34
35/* VM Hypervisor */
36extern int hypfs_vm_init(void);
37extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
38
30#endif /* _HYPFS_H_ */ 39#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
deleted file mode 100644
index 256b384aebe1..000000000000
--- a/arch/s390/hypfs/hypfs_diag.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * arch/s390/hypfs_diag.h
3 * Hypervisor filesystem for Linux on s390.
4 *
5 * Copyright (C) IBM Corp. 2006
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#ifndef _HYPFS_DIAG_H_
10#define _HYPFS_DIAG_H_
11
12extern int hypfs_diag_init(void);
13extern void hypfs_diag_exit(void);
14extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
15
16#endif /* _HYPFS_DIAG_H_ */
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
new file mode 100644
index 000000000000..d01fc8f799f0
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -0,0 +1,231 @@
1/*
2 * Hypervisor filesystem for Linux on s390. z/VM implementation.
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/vmalloc.h>
12#include <asm/ebcdic.h>
13#include "hypfs.h"
14
15#define NAME_LEN 8
16
17static char local_guest[] = " ";
18static char all_guests[] = "* ";
19static char *guest_query;
20
21struct diag2fc_data {
22 __u32 version;
23 __u32 flags;
24 __u64 used_cpu;
25 __u64 el_time;
26 __u64 mem_min_kb;
27 __u64 mem_max_kb;
28 __u64 mem_share_kb;
29 __u64 mem_used_kb;
30 __u32 pcpus;
31 __u32 lcpus;
32 __u32 vcpus;
33 __u32 cpu_min;
34 __u32 cpu_max;
35 __u32 cpu_shares;
36 __u32 cpu_use_samp;
37 __u32 cpu_delay_samp;
38 __u32 page_wait_samp;
39 __u32 idle_samp;
40 __u32 other_samp;
41 __u32 total_samp;
42 char guest_name[NAME_LEN];
43};
44
45struct diag2fc_parm_list {
46 char userid[NAME_LEN];
47 char aci_grp[NAME_LEN];
48 __u64 addr;
49 __u32 size;
50 __u32 fmt;
51};
52
53static int diag2fc(int size, char* query, void *addr)
54{
55 unsigned long residual_cnt;
56 unsigned long rc;
57 struct diag2fc_parm_list parm_list;
58
59 memcpy(parm_list.userid, query, NAME_LEN);
60 ASCEBC(parm_list.userid, NAME_LEN);
61 parm_list.addr = (unsigned long) addr ;
62 parm_list.size = size;
63 parm_list.fmt = 0x02;
64 memset(parm_list.aci_grp, 0x40, NAME_LEN);
65 rc = -1;
66
67 asm volatile(
68 " diag %0,%1,0x2fc\n"
69 "0:\n"
70 EX_TABLE(0b,0b)
71 : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory");
72
73 if ((rc != 0 ) && (rc != -2))
74 return rc;
75 else
76 return -residual_cnt;
77}
78
79static struct diag2fc_data *diag2fc_store(char *query, int *count)
80{
81 int size;
82 struct diag2fc_data *data;
83
84 do {
85 size = diag2fc(0, query, NULL);
86 if (size < 0)
87 return ERR_PTR(-EACCES);
88 data = vmalloc(size);
89 if (!data)
90 return ERR_PTR(-ENOMEM);
91 if (diag2fc(size, query, data) == 0)
92 break;
93 vfree(data);
94 } while (1);
95 *count = (size / sizeof(*data));
96
97 return data;
98}
99
100static void diag2fc_free(void *data)
101{
102 vfree(data);
103}
104
105#define ATTRIBUTE(sb, dir, name, member) \
106do { \
107 void *rc; \
108 rc = hypfs_create_u64(sb, dir, name, member); \
109 if (IS_ERR(rc)) \
110 return PTR_ERR(rc); \
111} while(0)
112
113static int hpyfs_vm_create_guest(struct super_block *sb,
114 struct dentry *systems_dir,
115 struct diag2fc_data *data)
116{
117 char guest_name[NAME_LEN + 1] = {};
118 struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
119 int dedicated_flag, capped_value;
120
121 capped_value = (data->flags & 0x00000006) >> 1;
122 dedicated_flag = (data->flags & 0x00000008) >> 3;
123
124 /* guest dir */
125 memcpy(guest_name, data->guest_name, NAME_LEN);
126 EBCASC(guest_name, NAME_LEN);
127 strstrip(guest_name);
128 guest_dir = hypfs_mkdir(sb, systems_dir, guest_name);
129 if (IS_ERR(guest_dir))
130 return PTR_ERR(guest_dir);
131 ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time);
132
133 /* logical cpu information */
134 cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus");
135 if (IS_ERR(cpus_dir))
136 return PTR_ERR(cpus_dir);
137 ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu);
138 ATTRIBUTE(sb, cpus_dir, "capped", capped_value);
139 ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag);
140 ATTRIBUTE(sb, cpus_dir, "count", data->vcpus);
141 ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min);
142 ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max);
143 ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares);
144
145 /* memory information */
146 mem_dir = hypfs_mkdir(sb, guest_dir, "mem");
147 if (IS_ERR(mem_dir))
148 return PTR_ERR(mem_dir);
149 ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb);
150 ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb);
151 ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb);
152 ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb);
153
154 /* samples */
155 samples_dir = hypfs_mkdir(sb, guest_dir, "samples");
156 if (IS_ERR(samples_dir))
157 return PTR_ERR(samples_dir);
158 ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp);
159 ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp);
160 ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp);
161 ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp);
162 ATTRIBUTE(sb, samples_dir, "other", data->other_samp);
163 ATTRIBUTE(sb, samples_dir, "total", data->total_samp);
164 return 0;
165}
166
167int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
168{
169 struct dentry *dir, *file;
170 struct diag2fc_data *data;
171 int rc, i, count = 0;
172
173 data = diag2fc_store(guest_query, &count);
174 if (IS_ERR(data))
175 return PTR_ERR(data);
176
177 /* Hpervisor Info */
178 dir = hypfs_mkdir(sb, root, "hyp");
179 if (IS_ERR(dir)) {
180 rc = PTR_ERR(dir);
181 goto failed;
182 }
183 file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor");
184 if (IS_ERR(file)) {
185 rc = PTR_ERR(file);
186 goto failed;
187 }
188
189 /* physical cpus */
190 dir = hypfs_mkdir(sb, root, "cpus");
191 if (IS_ERR(dir)) {
192 rc = PTR_ERR(dir);
193 goto failed;
194 }
195 file = hypfs_create_u64(sb, dir, "count", data->lcpus);
196 if (IS_ERR(file)) {
197 rc = PTR_ERR(file);
198 goto failed;
199 }
200
201 /* guests */
202 dir = hypfs_mkdir(sb, root, "systems");
203 if (IS_ERR(dir)) {
204 rc = PTR_ERR(dir);
205 goto failed;
206 }
207
208 for (i = 0; i < count; i++) {
209 rc = hpyfs_vm_create_guest(sb, dir, &(data[i]));
210 if (rc)
211 goto failed;
212 }
213 diag2fc_free(data);
214 return 0;
215
216failed:
217 diag2fc_free(data);
218 return rc;
219}
220
221int hypfs_vm_init(void)
222{
223 if (diag2fc(0, all_guests, NULL) > 0)
224 guest_query = all_guests;
225 else if (diag2fc(0, local_guest, NULL) > 0)
226 guest_query = local_guest;
227 else
228 return -EACCES;
229
230 return 0;
231}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index b6716c4b9934..a4fda7b53640 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <asm/ebcdic.h> 20#include <asm/ebcdic.h>
21#include "hypfs.h" 21#include "hypfs.h"
22#include "hypfs_diag.h"
23 22
24#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ 23#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
25#define TMP_SIZE 64 /* size of temporary buffers */ 24#define TMP_SIZE 64 /* size of temporary buffers */
@@ -192,7 +191,10 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
192 goto out; 191 goto out;
193 } 192 }
194 hypfs_delete_tree(sb->s_root); 193 hypfs_delete_tree(sb->s_root);
195 rc = hypfs_diag_create_files(sb, sb->s_root); 194 if (MACHINE_IS_VM)
195 rc = hypfs_vm_create_files(sb, sb->s_root);
196 else
197 rc = hypfs_diag_create_files(sb, sb->s_root);
196 if (rc) { 198 if (rc) {
197 printk(KERN_ERR "hypfs: Update failed\n"); 199 printk(KERN_ERR "hypfs: Update failed\n");
198 hypfs_delete_tree(sb->s_root); 200 hypfs_delete_tree(sb->s_root);
@@ -289,7 +291,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
289 rc = -ENOMEM; 291 rc = -ENOMEM;
290 goto err_alloc; 292 goto err_alloc;
291 } 293 }
292 rc = hypfs_diag_create_files(sb, root_dentry); 294 if (MACHINE_IS_VM)
295 rc = hypfs_vm_create_files(sb, root_dentry);
296 else
297 rc = hypfs_diag_create_files(sb, root_dentry);
293 if (rc) 298 if (rc)
294 goto err_tree; 299 goto err_tree;
295 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 300 sbi->update_file = hypfs_create_update_file(sb, root_dentry);
@@ -462,11 +467,15 @@ static int __init hypfs_init(void)
462{ 467{
463 int rc; 468 int rc;
464 469
465 if (MACHINE_IS_VM) 470 if (MACHINE_IS_VM) {
466 return -ENODATA; 471 if (hypfs_vm_init())
467 if (hypfs_diag_init()) { 472 /* no diag 2fc, just exit */
468 rc = -ENODATA; 473 return -ENODATA;
469 goto fail_diag; 474 } else {
475 if (hypfs_diag_init()) {
476 rc = -ENODATA;
477 goto fail_diag;
478 }
470 } 479 }
471 kset_set_kset_s(&s390_subsys, hypervisor_subsys); 480 kset_set_kset_s(&s390_subsys, hypervisor_subsys);
472 rc = subsystem_register(&s390_subsys); 481 rc = subsystem_register(&s390_subsys);
@@ -480,7 +489,8 @@ static int __init hypfs_init(void)
480fail_filesystem: 489fail_filesystem:
481 subsystem_unregister(&s390_subsys); 490 subsystem_unregister(&s390_subsys);
482fail_sysfs: 491fail_sysfs:
483 hypfs_diag_exit(); 492 if (!MACHINE_IS_VM)
493 hypfs_diag_exit();
484fail_diag: 494fail_diag:
485 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); 495 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
486 return rc; 496 return rc;
@@ -488,7 +498,8 @@ fail_diag:
488 498
489static void __exit hypfs_exit(void) 499static void __exit hypfs_exit(void)
490{ 500{
491 hypfs_diag_exit(); 501 if (!MACHINE_IS_VM)
502 hypfs_diag_exit();
492 unregister_filesystem(&hypfs_type); 503 unregister_filesystem(&hypfs_type);
493 subsystem_unregister(&s390_subsys); 504 subsystem_unregister(&s390_subsys);
494} 505}
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a81881c9b297..5492d25d7d69 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,9 +4,9 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o reset.o \ 7obj-y := bitmap.o traps.o time.o process.o base.o early.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o 9 semaphore.o s390_ext.o debug.o irq.o ipl.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
new file mode 100644
index 000000000000..dc7e5259770f
--- /dev/null
+++ b/arch/s390/kernel/base.S
@@ -0,0 +1,150 @@
1/*
2 * arch/s390/kernel/base.S
3 *
4 * Copyright IBM Corp. 2006,2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl s390_base_mcck_handler
15s390_base_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 larl %r1,s390_base_mcck_handler_fn
20 lg %r1,0(%r1)
21 ltgr %r1,%r1
22 jz 1f
23 basr %r14,%r1
241: la %r1,4095
25 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
26 lpswe __LC_MCK_OLD_PSW
27
28 .section .bss
29 .globl s390_base_mcck_handler_fn
30s390_base_mcck_handler_fn:
31 .quad 0
32 .previous
33
34 .globl s390_base_ext_handler
35s390_base_ext_handler:
36 stmg %r0,%r15,__LC_SAVE_AREA
37 basr %r13,0
380: aghi %r15,-STACK_FRAME_OVERHEAD
39 larl %r1,s390_base_ext_handler_fn
40 lg %r1,0(%r1)
41 ltgr %r1,%r1
42 jz 1f
43 basr %r14,%r1
441: lmg %r0,%r15,__LC_SAVE_AREA
45 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
46 lpswe __LC_EXT_OLD_PSW
47
48 .section .bss
49 .globl s390_base_ext_handler_fn
50s390_base_ext_handler_fn:
51 .quad 0
52 .previous
53
54 .globl s390_base_pgm_handler
55s390_base_pgm_handler:
56 stmg %r0,%r15,__LC_SAVE_AREA
57 basr %r13,0
580: aghi %r15,-STACK_FRAME_OVERHEAD
59 larl %r1,s390_base_pgm_handler_fn
60 lg %r1,0(%r1)
61 ltgr %r1,%r1
62 jz 1f
63 basr %r14,%r1
64 lmg %r0,%r15,__LC_SAVE_AREA
65 lpswe __LC_PGM_OLD_PSW
661: lpswe disabled_wait_psw-0b(%r13)
67
68 .align 8
69disabled_wait_psw:
70 .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
71
72 .section .bss
73 .globl s390_base_pgm_handler_fn
74s390_base_pgm_handler_fn:
75 .quad 0
76 .previous
77
78#else /* CONFIG_64BIT */
79
80 .globl s390_base_mcck_handler
81s390_base_mcck_handler:
82 basr %r13,0
830: l %r15,__LC_PANIC_STACK # load panic stack
84 ahi %r15,-STACK_FRAME_OVERHEAD
85 l %r1,2f-0b(%r13)
86 l %r1,0(%r1)
87 ltr %r1,%r1
88 jz 1f
89 basr %r14,%r1
901: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
91 lpsw __LC_MCK_OLD_PSW
92
932: .long s390_base_mcck_handler_fn
94
95 .section .bss
96 .globl s390_base_mcck_handler_fn
97s390_base_mcck_handler_fn:
98 .long 0
99 .previous
100
101 .globl s390_base_ext_handler
102s390_base_ext_handler:
103 stm %r0,%r15,__LC_SAVE_AREA
104 basr %r13,0
1050: ahi %r15,-STACK_FRAME_OVERHEAD
106 l %r1,2f-0b(%r13)
107 l %r1,0(%r1)
108 ltr %r1,%r1
109 jz 1f
110 basr %r14,%r1
1111: lm %r0,%r15,__LC_SAVE_AREA
112 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
113 lpsw __LC_EXT_OLD_PSW
114
1152: .long s390_base_ext_handler_fn
116
117 .section .bss
118 .globl s390_base_ext_handler_fn
119s390_base_ext_handler_fn:
120 .long 0
121 .previous
122
123 .globl s390_base_pgm_handler
124s390_base_pgm_handler:
125 stm %r0,%r15,__LC_SAVE_AREA
126 basr %r13,0
1270: ahi %r15,-STACK_FRAME_OVERHEAD
128 l %r1,2f-0b(%r13)
129 l %r1,0(%r1)
130 ltr %r1,%r1
131 jz 1f
132 basr %r14,%r1
133 lm %r0,%r15,__LC_SAVE_AREA
134 lpsw __LC_PGM_OLD_PSW
135
1361: lpsw disabled_wait_psw-0b(%r13)
137
1382: .long s390_base_pgm_handler_fn
139
140disabled_wait_psw:
141 .align 8
142 .long 0x000a0000,0x00000000 + s390_base_pgm_handler
143
144 .section .bss
145 .globl s390_base_pgm_handler_fn
146s390_base_pgm_handler_fn:
147 .long 0
148 .previous
149
150#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 5c46054195cb..f1e40ca00d8d 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -192,7 +192,7 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
192 192
193#undef cputime_to_timeval 193#undef cputime_to_timeval
194#define cputime_to_timeval cputime_to_compat_timeval 194#define cputime_to_timeval cputime_to_compat_timeval
195static __inline__ void 195static inline void
196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) 196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
197{ 197{
198 value->tv_usec = cputime % 1000000; 198 value->tv_usec = cputime % 1000000;
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
index 71d27c493568..914d49444f92 100644
--- a/arch/s390/kernel/compat_exec_domain.c
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -12,10 +12,9 @@
12#include <linux/personality.h> 12#include <linux/personality.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15struct exec_domain s390_exec_domain; 15static struct exec_domain s390_exec_domain;
16 16
17static int __init 17static int __init s390_init (void)
18s390_init (void)
19{ 18{
20 s390_exec_domain.name = "Linux/s390"; 19 s390_exec_domain.name = "Linux/s390";
21 s390_exec_domain.handler = NULL; 20 s390_exec_domain.handler = NULL;
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 5b33f823863a..666bb6daa148 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -69,6 +69,12 @@
69 69
70#include "compat_linux.h" 70#include "compat_linux.h"
71 71
72long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
73 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
74 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
75long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
76 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
77 PSW32_MASK_PSTATE);
72 78
73/* For this source file, we want overflow handling. */ 79/* For this source file, we want overflow handling. */
74 80
@@ -416,7 +422,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
416 mm_segment_t old_fs = get_fs (); 422 mm_segment_t old_fs = get_fs ();
417 423
418 set_fs (KERNEL_DS); 424 set_fs (KERNEL_DS);
419 ret = sys_sysinfo((struct sysinfo __user *) &s); 425 ret = sys_sysinfo((struct sysinfo __force __user *) &s);
420 set_fs (old_fs); 426 set_fs (old_fs);
421 err = put_user (s.uptime, &info->uptime); 427 err = put_user (s.uptime, &info->uptime);
422 err |= __put_user (s.loads[0], &info->loads[0]); 428 err |= __put_user (s.loads[0], &info->loads[0]);
@@ -445,7 +451,8 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
445 mm_segment_t old_fs = get_fs (); 451 mm_segment_t old_fs = get_fs ();
446 452
447 set_fs (KERNEL_DS); 453 set_fs (KERNEL_DS);
448 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); 454 ret = sys_sched_rr_get_interval(pid,
455 (struct timespec __force __user *) &t);
449 set_fs (old_fs); 456 set_fs (old_fs);
450 if (put_compat_timespec(&t, interval)) 457 if (put_compat_timespec(&t, interval))
451 return -EFAULT; 458 return -EFAULT;
@@ -472,8 +479,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
472 } 479 }
473 set_fs (KERNEL_DS); 480 set_fs (KERNEL_DS);
474 ret = sys_rt_sigprocmask(how, 481 ret = sys_rt_sigprocmask(how,
475 set ? (sigset_t __user *) &s : NULL, 482 set ? (sigset_t __force __user *) &s : NULL,
476 oset ? (sigset_t __user *) &s : NULL, 483 oset ? (sigset_t __force __user *) &s : NULL,
477 sigsetsize); 484 sigsetsize);
478 set_fs (old_fs); 485 set_fs (old_fs);
479 if (ret) return ret; 486 if (ret) return ret;
@@ -499,7 +506,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
499 mm_segment_t old_fs = get_fs(); 506 mm_segment_t old_fs = get_fs();
500 507
501 set_fs (KERNEL_DS); 508 set_fs (KERNEL_DS);
502 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 509 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
503 set_fs (old_fs); 510 set_fs (old_fs);
504 if (!ret) { 511 if (!ret) {
505 switch (_NSIG_WORDS) { 512 switch (_NSIG_WORDS) {
@@ -524,7 +531,7 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
524 if (copy_siginfo_from_user32(&info, uinfo)) 531 if (copy_siginfo_from_user32(&info, uinfo))
525 return -EFAULT; 532 return -EFAULT;
526 set_fs (KERNEL_DS); 533 set_fs (KERNEL_DS);
527 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); 534 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
528 set_fs (old_fs); 535 set_fs (old_fs);
529 return ret; 536 return ret;
530} 537}
@@ -682,7 +689,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offse
682 689
683 set_fs(KERNEL_DS); 690 set_fs(KERNEL_DS);
684 ret = sys_sendfile(out_fd, in_fd, 691 ret = sys_sendfile(out_fd, in_fd,
685 offset ? (off_t __user *) &of : NULL, count); 692 offset ? (off_t __force __user *) &of : NULL, count);
686 set_fs(old_fs); 693 set_fs(old_fs);
687 694
688 if (offset && put_user(of, offset)) 695 if (offset && put_user(of, offset))
@@ -703,7 +710,8 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
703 710
704 set_fs(KERNEL_DS); 711 set_fs(KERNEL_DS);
705 ret = sys_sendfile64(out_fd, in_fd, 712 ret = sys_sendfile64(out_fd, in_fd,
706 offset ? (loff_t __user *) &lof : NULL, count); 713 offset ? (loff_t __force __user *) &lof : NULL,
714 count);
707 set_fs(old_fs); 715 set_fs(old_fs);
708 716
709 if (offset && put_user(lof, offset)) 717 if (offset && put_user(lof, offset))
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 1a18e29668ef..e89f8c0c42a0 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -115,37 +115,6 @@ typedef struct
115 __u32 addr; 115 __u32 addr;
116} _psw_t32 __attribute__ ((aligned(8))); 116} _psw_t32 __attribute__ ((aligned(8)));
117 117
118#define PSW32_MASK_PER 0x40000000UL
119#define PSW32_MASK_DAT 0x04000000UL
120#define PSW32_MASK_IO 0x02000000UL
121#define PSW32_MASK_EXT 0x01000000UL
122#define PSW32_MASK_KEY 0x00F00000UL
123#define PSW32_MASK_MCHECK 0x00040000UL
124#define PSW32_MASK_WAIT 0x00020000UL
125#define PSW32_MASK_PSTATE 0x00010000UL
126#define PSW32_MASK_ASC 0x0000C000UL
127#define PSW32_MASK_CC 0x00003000UL
128#define PSW32_MASK_PM 0x00000f00UL
129
130#define PSW32_ADDR_AMODE31 0x80000000UL
131#define PSW32_ADDR_INSN 0x7FFFFFFFUL
132
133#define PSW32_BASE_BITS 0x00080000UL
134
135#define PSW32_ASC_PRIMARY 0x00000000UL
136#define PSW32_ASC_ACCREG 0x00004000UL
137#define PSW32_ASC_SECONDARY 0x00008000UL
138#define PSW32_ASC_HOME 0x0000C000UL
139
140#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
141 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
142 PSW32_MASK_PSTATE)
143
144#define PSW32_MASK_MERGE(CURRENT,NEW) \
145 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
146 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
147
148
149typedef struct 118typedef struct
150{ 119{
151 _psw_t32 psw; 120 _psw_t32 psw;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 861888ab8c13..887a9881d0d0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -275,8 +275,8 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
275 } 275 }
276 276
277 set_fs (KERNEL_DS); 277 set_fs (KERNEL_DS);
278 ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL), 278 ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL),
279 (stack_t __user *) (uoss ? &koss : NULL), 279 (stack_t __force __user *) (uoss ? &koss : NULL),
280 regs->gprs[15]); 280 regs->gprs[15]);
281 set_fs (old_fs); 281 set_fs (old_fs);
282 282
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
298 _s390_regs_common32 regs32; 298 _s390_regs_common32 regs32;
299 int err, i; 299 int err, i;
300 300
301 regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, 301 regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
302 (__u32)(regs->psw.mask >> 32)); 302 (__u32)(regs->psw.mask >> 32));
303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; 303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
304 for (i = 0; i < NUM_GPRS; i++) 304 for (i = 0; i < NUM_GPRS; i++)
@@ -401,7 +401,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
401 goto badframe; 401 goto badframe;
402 402
403 set_fs (KERNEL_DS); 403 set_fs (KERNEL_DS);
404 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); 404 do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
405 set_fs (old_fs); 405 set_fs (old_fs);
406 406
407 return regs->gprs[2]; 407 return regs->gprs[2];
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index a5972f1541fe..6c89f30c8e31 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -16,6 +16,7 @@
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17#include <asm/cpcmd.h> 17#include <asm/cpcmd.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/io.h>
19 20
20static DEFINE_SPINLOCK(cpcmd_lock); 21static DEFINE_SPINLOCK(cpcmd_lock);
21static char cpcmd_buf[241]; 22static char cpcmd_buf[241];
@@ -88,13 +89,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
88 int len; 89 int len;
89 unsigned long flags; 90 unsigned long flags;
90 91
91 if ((rlen == 0) || (response == NULL) 92 if ((virt_to_phys(response) != (unsigned long) response) ||
92 || !((unsigned long)response >> 31)) { 93 (((unsigned long)response + rlen) >> 31)) {
93 spin_lock_irqsave(&cpcmd_lock, flags);
94 len = __cpcmd(cmd, response, rlen, response_code);
95 spin_unlock_irqrestore(&cpcmd_lock, flags);
96 }
97 else {
98 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); 94 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
99 if (!lowbuf) { 95 if (!lowbuf) {
100 printk(KERN_WARNING 96 printk(KERN_WARNING
@@ -106,6 +102,10 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
106 spin_unlock_irqrestore(&cpcmd_lock, flags); 102 spin_unlock_irqrestore(&cpcmd_lock, flags);
107 memcpy(response, lowbuf, rlen); 103 memcpy(response, lowbuf, rlen);
108 kfree(lowbuf); 104 kfree(lowbuf);
105 } else {
106 spin_lock_irqsave(&cpcmd_lock, flags);
107 len = __cpcmd(cmd, response, rlen, response_code);
108 spin_unlock_irqrestore(&cpcmd_lock, flags);
109 } 109 }
110 return len; 110 return len;
111} 111}
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 926cceeae0fa..8cc7c9fa64f5 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <linux/kexec.h> 11#include <linux/kexec.h>
12#include <linux/reboot.h>
12 13
13void machine_crash_shutdown(struct pt_regs *regs) 14void machine_crash_shutdown(struct pt_regs *regs)
14{ 15{
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bb57bc0e3fc8..f4b62df02aa2 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -120,7 +120,7 @@ struct debug_view debug_hex_ascii_view = {
120 NULL 120 NULL
121}; 121};
122 122
123struct debug_view debug_level_view = { 123static struct debug_view debug_level_view = {
124 "level", 124 "level",
125 &debug_prolog_level_fn, 125 &debug_prolog_level_fn,
126 NULL, 126 NULL,
@@ -129,7 +129,7 @@ struct debug_view debug_level_view = {
129 NULL 129 NULL
130}; 130};
131 131
132struct debug_view debug_pages_view = { 132static struct debug_view debug_pages_view = {
133 "pages", 133 "pages",
134 &debug_prolog_pages_fn, 134 &debug_prolog_pages_fn,
135 NULL, 135 NULL,
@@ -138,7 +138,7 @@ struct debug_view debug_pages_view = {
138 NULL 138 NULL
139}; 139};
140 140
141struct debug_view debug_flush_view = { 141static struct debug_view debug_flush_view = {
142 "flush", 142 "flush",
143 NULL, 143 NULL,
144 NULL, 144 NULL,
@@ -156,14 +156,14 @@ struct debug_view debug_sprintf_view = {
156 NULL 156 NULL
157}; 157};
158 158
159 159/* used by dump analysis tools to determine version of debug feature */
160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; 160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
161 161
162/* static globals */ 162/* static globals */
163 163
164static debug_info_t *debug_area_first = NULL; 164static debug_info_t *debug_area_first = NULL;
165static debug_info_t *debug_area_last = NULL; 165static debug_info_t *debug_area_last = NULL;
166DECLARE_MUTEX(debug_lock); 166static DECLARE_MUTEX(debug_lock);
167 167
168static int initialized; 168static int initialized;
169 169
@@ -905,7 +905,7 @@ static struct ctl_table s390dbf_dir_table[] = {
905 { .ctl_name = 0 } 905 { .ctl_name = 0 }
906}; 906};
907 907
908struct ctl_table_header *s390dbf_sysctl_header; 908static struct ctl_table_header *s390dbf_sysctl_header;
909 909
910void 910void
911debug_stop_all(void) 911debug_stop_all(void)
@@ -1300,8 +1300,7 @@ out:
1300 * flushes debug areas 1300 * flushes debug areas
1301 */ 1301 */
1302 1302
1303void 1303static void debug_flush(debug_info_t* id, int area)
1304debug_flush(debug_info_t* id, int area)
1305{ 1304{
1306 unsigned long flags; 1305 unsigned long flags;
1307 int i,j; 1306 int i,j;
@@ -1511,8 +1510,7 @@ out:
1511/* 1510/*
1512 * clean up module 1511 * clean up module
1513 */ 1512 */
1514void 1513static void __exit debug_exit(void)
1515__exit debug_exit(void)
1516{ 1514{
1517 debugfs_remove(debug_debugfs_root_entry); 1515 debugfs_remove(debug_debugfs_root_entry);
1518 unregister_sysctl_table(s390dbf_sysctl_header); 1516 unregister_sysctl_table(s390dbf_sysctl_header);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
new file mode 100644
index 000000000000..e518dd53eff5
--- /dev/null
+++ b/arch/s390/kernel/early.c
@@ -0,0 +1,306 @@
1/*
2 * arch/s390/kernel/early.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/ctype.h>
13#include <linux/lockdep.h>
14#include <linux/module.h>
15#include <linux/pfn.h>
16#include <linux/uaccess.h>
17#include <asm/lowcore.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/setup.h>
21#include <asm/cpcmd.h>
22#include <asm/sclp.h>
23
24/*
25 * Create a Kernel NSS if the SAVESYS= parameter is defined
26 */
27#define DEFSYS_CMD_SIZE 96
28#define SAVESYS_CMD_SIZE 32
29
30char kernel_nss_name[NSS_NAME_SIZE + 1];
31
32#ifdef CONFIG_SHARED_KERNEL
33static noinline __init void create_kernel_nss(void)
34{
35 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
36#ifdef CONFIG_BLK_DEV_INITRD
37 unsigned int sinitrd_pfn, einitrd_pfn;
38#endif
39 int response;
40 char *savesys_ptr;
41 char upper_command_line[COMMAND_LINE_SIZE];
42 char defsys_cmd[DEFSYS_CMD_SIZE];
43 char savesys_cmd[SAVESYS_CMD_SIZE];
44
45 /* Do nothing if we are not running under VM */
46 if (!MACHINE_IS_VM)
47 return;
48
49 /* Convert COMMAND_LINE to upper case */
50 for (i = 0; i < strlen(COMMAND_LINE); i++)
51 upper_command_line[i] = toupper(COMMAND_LINE[i]);
52
53 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
54
55 if (!savesys_ptr)
56 return;
57
58 savesys_ptr += 8; /* Point to the beginning of the NSS name */
59 for (i = 0; i < NSS_NAME_SIZE; i++) {
60 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
61 break;
62 kernel_nss_name[i] = savesys_ptr[i];
63 }
64
65 stext_pfn = PFN_DOWN(__pa(&_stext));
66 eshared_pfn = PFN_DOWN(__pa(&_eshared));
67 end_pfn = PFN_UP(__pa(&_end));
68 min_size = end_pfn << 2;
69
70 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
71 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
72 eshared_pfn, end_pfn);
73
74#ifdef CONFIG_BLK_DEV_INITRD
75 if (INITRD_START && INITRD_SIZE) {
76 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
77 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
78 min_size = einitrd_pfn << 2;
79 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
80 sinitrd_pfn, einitrd_pfn);
81 }
82#endif
83
84 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
85 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
86 kernel_nss_name, kernel_nss_name);
87
88 __cpcmd(defsys_cmd, NULL, 0, &response);
89
90 if (response != 0)
91 return;
92
93 __cpcmd(savesys_cmd, NULL, 0, &response);
94
95 if (response != strlen(savesys_cmd))
96 return;
97
98 ipl_flags = IPL_NSS_VALID;
99}
100
101#else /* CONFIG_SHARED_KERNEL */
102
103static inline void create_kernel_nss(void) { }
104
105#endif /* CONFIG_SHARED_KERNEL */
106
107/*
108 * Clear bss memory
109 */
110static noinline __init void clear_bss_section(void)
111{
112 memset(__bss_start, 0, _end - __bss_start);
113}
114
115/*
116 * Initialize storage key for kernel pages
117 */
118static noinline __init void init_kernel_storage_key(void)
119{
120 unsigned long end_pfn, init_pfn;
121
122 end_pfn = PFN_UP(__pa(&_end));
123
124 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
125 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
126}
127
128static noinline __init void detect_machine_type(void)
129{
130 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
131
132 asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id));
133
134 /* Running under z/VM ? */
135 if (cpuinfo->cpu_id.version == 0xff)
136 machine_flags |= 1;
137
138 /* Running on a P/390 ? */
139 if (cpuinfo->cpu_id.machine == 0x7490)
140 machine_flags |= 4;
141}
142
143static noinline __init int memory_fast_detect(void)
144{
145
146 unsigned long val0 = 0;
147 unsigned long val1 = 0xc;
148 int ret = -ENOSYS;
149
150 if (ipl_flags & IPL_NSS_VALID)
151 return -ENOSYS;
152
153 asm volatile(
154 " diag %1,%2,0x260\n"
155 "0: lhi %0,0\n"
156 "1:\n"
157 EX_TABLE(0b,1b)
158 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
159
160 if (ret || val0 != val1)
161 return -ENOSYS;
162
163 memory_chunk[0].size = val0;
164 return 0;
165}
166
167#define ADDR2G (1UL << 31)
168
169static noinline __init unsigned long sclp_memory_detect(void)
170{
171 struct sclp_readinfo_sccb *sccb;
172 unsigned long long memsize;
173
174 sccb = &s390_readinfo_sccb;
175
176 if (sccb->header.response_code != 0x10)
177 return 0;
178
179 if (sccb->rnsize)
180 memsize = sccb->rnsize << 20;
181 else
182 memsize = sccb->rnsize2 << 20;
183 if (sccb->rnmax)
184 memsize *= sccb->rnmax;
185 else
186 memsize *= sccb->rnmax2;
187#ifndef CONFIG_64BIT
188 /*
189 * Can't deal with more than 2G in 31 bit addressing mode, so
190 * limit the value in order to avoid strange side effects.
191 */
192 if (memsize > ADDR2G)
193 memsize = ADDR2G;
194#endif
195 return (unsigned long) memsize;
196}
197
198static inline __init unsigned long __tprot(unsigned long addr)
199{
200 int cc = -1;
201
202 asm volatile(
203 " tprot 0(%1),0\n"
204 "0: ipm %0\n"
205 " srl %0,28\n"
206 "1:\n"
207 EX_TABLE(0b,1b)
208 : "+d" (cc) : "a" (addr) : "cc");
209 return (unsigned long)cc;
210}
211
212/* Checking memory in 128KB increments. */
213#define CHUNK_INCR (1UL << 17)
214
215static noinline __init void find_memory_chunks(unsigned long memsize)
216{
217 unsigned long addr = 0, old_addr = 0;
218 unsigned long old_cc = CHUNK_READ_WRITE;
219 unsigned long cc;
220 int chunk = 0;
221
222 while (chunk < MEMORY_CHUNKS) {
223 cc = __tprot(addr);
224 while (cc == old_cc) {
225 addr += CHUNK_INCR;
226 cc = __tprot(addr);
227#ifndef CONFIG_64BIT
228 if (addr == ADDR2G)
229 break;
230#endif
231 }
232
233 if (old_addr != addr &&
234 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
235 memory_chunk[chunk].addr = old_addr;
236 memory_chunk[chunk].size = addr - old_addr;
237 memory_chunk[chunk].type = old_cc;
238 chunk++;
239 }
240
241 old_addr = addr;
242 old_cc = cc;
243
244#ifndef CONFIG_64BIT
245 if (addr == ADDR2G)
246 break;
247#endif
248 /*
249 * Finish memory detection at the first hole, unless
250 * - we reached the hsa -> skip it.
251 * - we know there must be more.
252 */
253 if (cc == -1UL && !memsize && old_addr != ADDR2G)
254 break;
255 if (memsize && addr >= memsize)
256 break;
257 }
258}
259
260static __init void early_pgm_check_handler(void)
261{
262 unsigned long addr;
263 const struct exception_table_entry *fixup;
264
265 addr = S390_lowcore.program_old_psw.addr;
266 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
267 if (!fixup)
268 disabled_wait(0);
269 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
270}
271
272static noinline __init void setup_lowcore_early(void)
273{
274 psw_t psw;
275
276 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
277 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
278 S390_lowcore.external_new_psw = psw;
279 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
280 S390_lowcore.program_new_psw = psw;
281 s390_base_pgm_handler_fn = early_pgm_check_handler;
282}
283
284/*
285 * Save ipl parameters, clear bss memory, initialize storage keys
286 * and create a kernel NSS at startup if the SAVESYS= parm is defined
287 */
288void __init startup_init(void)
289{
290 unsigned long memsize;
291
292 ipl_save_parameters();
293 clear_bss_section();
294 init_kernel_storage_key();
295 lockdep_init();
296 lockdep_off();
297 detect_machine_type();
298 create_kernel_nss();
299 sort_main_extable();
300 setup_lowcore_early();
301 sclp_readinfo_early();
302 memsize = sclp_memory_detect();
303 if (memory_fast_detect() < 0)
304 find_memory_chunks(memsize);
305 lockdep_on();
306}
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index bb0f973137f0..cc0dc609d738 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/types.h> 13#include <asm/types.h>
14#include <asm/ebcdic.h>
14 15
15/* 16/*
16 * ASCII (IBM PC 437) -> EBCDIC 037 17 * ASCII (IBM PC 437) -> EBCDIC 037
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index eca507050e47..453fd3b4edea 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -51,176 +51,15 @@ startup_continue:
51 st %r15,__LC_KERNEL_STACK # set end of kernel stack 51 st %r15,__LC_KERNEL_STACK # set end of kernel stack
52 ahi %r15,-96 52 ahi %r15,-96
53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
54
55 l %r14,.Lipl_save_parameters-.LPG1(%r13)
56 basr %r14,%r14
57# 54#
58# clear bss memory 55# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
56# and create a kernel NSS if the SAVESYS= parm is defined
59# 57#
60 l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss 58 l %r14,.Lstartup_init-.LPG1(%r13)
61 l %r3,.Lbss_end-.LPG1(%r13) # end of bss 59 basr %r14,%r14
62 sr %r3,%r2 # length of bss
63 sr %r4,%r4
64 sr %r5,%r5 # set src,length and pad to zero
65 sr %r0,%r0
66 mvcle %r2,%r4,0 # clear mem
67 jo .-4 # branch back, if not finish
68
69 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
70.Lservicecall:
71 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
72
73 stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
74 la %r1,0x200 # set bit 22
75 o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
76 st %r1,.Lcr-.LPG1(%r13)
77 lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
78
79 mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
80 la %r1, .Lsclph-.LPG1(%r13)
81 a %r1,__LC_EXT_NEW_PSW+4 # set handler
82 st %r1,__LC_EXT_NEW_PSW+4
83
84 l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff
85 lr %r1,%r4 # our sccb
86 .insn rre,0xb2200000,%r2,%r1 # service call
87 ipm %r1
88 srl %r1,28 # get cc code
89 xr %r3, %r3
90 chi %r1,3
91 be .Lfchunk-.LPG1(%r13) # leave
92 chi %r1,2
93 be .Lservicecall-.LPG1(%r13)
94 lpsw .Lwaitsclp-.LPG1(%r13)
95.Lsclph:
96 lh %r1,.Lsccbr-.Lsccb(%r4)
97 chi %r1,0x10 # 0x0010 is the sucess code
98 je .Lprocsccb # let's process the sccb
99 chi %r1,0x1f0
100 bne .Lfchunk-.LPG1(%r13) # unhandled error code
101 c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
102 bne .Lfchunk-.LPG1(%r13) # if no, give up
103 l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
104 b .Lservicecall-.LPG1(%r13)
105.Lprocsccb:
106 lhi %r1,0
107 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
108 jnz .Lscnd
109 lhi %r1,0x800 # otherwise report 2GB
110.Lscnd:
111 lhi %r3,0x800 # limit reported memory size to 2GB
112 cr %r1,%r3
113 jl .Lno2gb
114 lr %r1,%r3
115.Lno2gb:
116 xr %r3,%r3 # same logic
117 ic %r3,.Lscpa1-.Lsccb(%r4)
118 chi %r3,0x00
119 jne .Lcompmem
120 l %r3,.Lscpa2-.Lsccb(%r4)
121.Lcompmem:
122 mr %r2,%r1 # mem in MB on 128-bit
123 l %r1,.Lonemb-.LPG1(%r13)
124 mr %r2,%r1 # mem size in bytes in %r3
125 b .Lfchunk-.LPG1(%r13)
126
127 .align 4
128.Lipl_save_parameters:
129 .long ipl_save_parameters
130.Linittu:
131 .long init_thread_union
132.Lpmask:
133 .byte 0
134 .align 8
135.Lpcext:.long 0x00080000,0x80000000
136.Lcr:
137 .long 0x00 # place holder for cr0
138 .align 8
139.Lwaitsclp:
140 .long 0x010a0000,0x80000000 + .Lsclph
141.Lrcp:
142 .int 0x00120001 # Read SCP forced code
143.Lrcp2:
144 .int 0x00020001 # Read SCP code
145.Lonemb:
146 .int 0x100000
147.Lfchunk:
148 60
149#
150# find memory chunks.
151#
152 lr %r9,%r3 # end of mem
153 mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
154 la %r1,1 # test in increments of 128KB
155 sll %r1,17
156 l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
157 slr %r4,%r4 # set start of chunk to zero
158 slr %r5,%r5 # set end of chunk to zero
159 slr %r6,%r6 # set access code to zero
160 la %r10,MEMORY_CHUNKS # number of chunks
161.Lloop:
162 tprot 0(%r5),0 # test protection of first byte
163 ipm %r7
164 srl %r7,28
165 clr %r6,%r7 # compare cc with last access code
166 be .Lsame-.LPG1(%r13)
167 lhi %r8,0 # no program checks
168 b .Lsavchk-.LPG1(%r13)
169.Lsame:
170 ar %r5,%r1 # add 128KB to end of chunk
171 bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
172.Lchkmem: # > 2GB or tprot got a program check
173 lhi %r8,1 # set program check flag
174.Lsavchk:
175 clr %r4,%r5 # chunk size > 0?
176 be .Lchkloop-.LPG1(%r13)
177 st %r4,0(%r3) # store start address of chunk
178 lr %r0,%r5
179 slr %r0,%r4
180 st %r0,4(%r3) # store size of chunk
181 st %r6,8(%r3) # store type of chunk
182 la %r3,12(%r3)
183 ahi %r10,-1 # update chunk number
184.Lchkloop:
185 lr %r6,%r7 # set access code to last cc
186 # we got an exception or we're starting a new
187 # chunk , we must check if we should
188 # still try to find valid memory (if we detected
189 # the amount of available storage), and if we
190 # have chunks left
191 xr %r0,%r0
192 clr %r0,%r9 # did we detect memory?
193 je .Ldonemem # if not, leave
194 chi %r10,0 # do we have chunks left?
195 je .Ldonemem
196 chi %r8,1 # program check ?
197 je .Lpgmchk
198 lr %r4,%r5 # potential new chunk
199 alr %r5,%r1 # add 128KB to end of chunk
200 j .Llpcnt
201.Lpgmchk:
202 alr %r5,%r1 # add 128KB to end of chunk
203 lr %r4,%r5 # potential new chunk
204.Llpcnt:
205 clr %r5,%r9 # should we go on?
206 jl .Lloop
207.Ldonemem:
208 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags 61 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
209# 62#
210# find out if we are running under VM
211#
212 stidp __LC_CPUID # store cpuid
213 tm __LC_CPUID,0xff # running under VM ?
214 bno .Lnovm-.LPG1(%r13)
215 oi 3(%r12),1 # set VM flag
216.Lnovm:
217 lh %r0,__LC_CPUID+4 # get cpu version
218 chi %r0,0x7490 # running on a P/390 ?
219 bne .Lnop390-.LPG1(%r13)
220 oi 3(%r12),4 # set P/390 flag
221.Lnop390:
222
223#
224# find out if we have an IEEE fpu 63# find out if we have an IEEE fpu
225# 64#
226 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) 65 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
@@ -295,7 +134,6 @@ startup_continue:
295 .long 0 # cr15: linkage stack operations 134 .long 0 # cr15: linkage stack operations
296.Lduct: .long 0,0,0,0,0,0,0,0 135.Lduct: .long 0,0,0,0,0,0,0,0
297 .long 0,0,0,0,0,0,0,0 136 .long 0,0,0,0,0,0,0,0
298.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
299.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu 137.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
300.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp 138.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
301.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg 139.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
@@ -306,7 +144,9 @@ startup_continue:
306.Lbss_bgn: .long __bss_start 144.Lbss_bgn: .long __bss_start
307.Lbss_end: .long _end 145.Lbss_end: .long _end
308.Lparmaddr: .long PARMAREA 146.Lparmaddr: .long PARMAREA
309.Lsccbaddr: .long .Lsccb 147.Linittu: .long init_thread_union
148.Lstartup_init:
149 .long startup_init
310 150
311 .globl ipl_schib 151 .globl ipl_schib
312ipl_schib: 152ipl_schib:
@@ -322,26 +162,6 @@ ipl_devno:
322 .word 0 162 .word 0
323 163
324 .org 0x12000 164 .org 0x12000
325.globl s390_readinfo_sccb
326s390_readinfo_sccb:
327.Lsccb:
328 .hword 0x1000 # length, one page
329 .byte 0x00,0x00,0x00
330 .byte 0x80 # variable response bit set
331.Lsccbr:
332 .hword 0x00 # response code
333.Lscpincr1:
334 .hword 0x00
335.Lscpa1:
336 .byte 0x00
337 .fill 89,1,0
338.Lscpa2:
339 .int 0x00
340.Lscpincr2:
341 .quad 0x00
342 .fill 3984,1,0
343 .org 0x13000
344
345#ifdef CONFIG_SHARED_KERNEL 165#ifdef CONFIG_SHARED_KERNEL
346 .org 0x100000 166 .org 0x100000
347#endif 167#endif
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6ba3f4512dd1..b8fec4e5c5d4 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -58,183 +58,15 @@ startup_continue:
58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack 58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
59 aghi %r15,-160 59 aghi %r15,-160
60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
61
62 brasl %r14,ipl_save_parameters
63# 61#
64# clear bss memory 62# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
63# and create a kernel NSS if the SAVESYS= parm is defined
65# 64#
66 larl %r2,__bss_start # start of bss segment 65 brasl %r14,startup_init
67 larl %r3,_end # end of bss segment
68 sgr %r3,%r2 # length of bss
69 sgr %r4,%r4 #
70 sgr %r5,%r5 # set src,length and pad to zero
71 mvcle %r2,%r4,0 # clear mem
72 jo .-4 # branch back, if not finish
73 # set program check new psw mask 66 # set program check new psw mask
74 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) 67 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
75 larl %r1,.Lslowmemdetect # set program check address
76 stg %r1,__LC_PGM_NEW_PSW+8
77 lghi %r1,0xc
78 diag %r0,%r1,0x260 # get memory size of virtual machine
79 cgr %r0,%r1 # different? -> old detection routine
80 jne .Lslowmemdetect
81 aghi %r1,1 # size is one more than end
82 larl %r2,memory_chunk
83 stg %r1,8(%r2) # store size of chunk
84 j .Ldonemem
85
86.Lslowmemdetect:
87 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
88.Lservicecall:
89 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
90
91 stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
92 la %r1,0x200 # set bit 22
93 og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
94 stg %r1,.Lcr-.LPG1(%r13)
95 lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
96
97 mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
98 larl %r1,.Lsclph
99 stg %r1,__LC_EXT_NEW_PSW+8 # set handler
100
101 larl %r4,.Lsccb # %r4 is our index for sccb stuff
102 lgr %r1,%r4 # our sccb
103 .insn rre,0xb2200000,%r2,%r1 # service call
104 ipm %r1
105 srl %r1,28 # get cc code
106 xr %r3,%r3
107 chi %r1,3
108 be .Lfchunk-.LPG1(%r13) # leave
109 chi %r1,2
110 be .Lservicecall-.LPG1(%r13)
111 lpswe .Lwaitsclp-.LPG1(%r13)
112.Lsclph:
113 lh %r1,.Lsccbr-.Lsccb(%r4)
114 chi %r1,0x10 # 0x0010 is the sucess code
115 je .Lprocsccb # let's process the sccb
116 chi %r1,0x1f0
117 bne .Lfchunk-.LPG1(%r13) # unhandled error code
118 c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
119 bne .Lfchunk-.LPG1(%r13) # if no, give up
120 l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
121 b .Lservicecall-.LPG1(%r13)
122.Lprocsccb:
123 lghi %r1,0
124 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
125 jnz .Lscnd
126 lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
127.Lscnd:
128 xr %r3,%r3 # same logic
129 ic %r3,.Lscpa1-.Lsccb(%r4)
130 chi %r3,0x00
131 jne .Lcompmem
132 l %r3,.Lscpa2-.Lsccb(%r4)
133.Lcompmem:
134 mlgr %r2,%r1 # mem in MB on 128-bit
135 l %r1,.Lonemb-.LPG1(%r13)
136 mlgr %r2,%r1 # mem size in bytes in %r3
137 b .Lfchunk-.LPG1(%r13)
138
139 .align 4
140.Lpmask:
141 .byte 0
142 .align 8
143.Lcr:
144 .quad 0x00 # place holder for cr0
145.Lwaitsclp:
146 .quad 0x0102000180000000,.Lsclph
147.Lrcp:
148 .int 0x00120001 # Read SCP forced code
149.Lrcp2:
150 .int 0x00020001 # Read SCP code
151.Lonemb:
152 .int 0x100000
153
154.Lfchunk:
155
156#
157# find memory chunks.
158#
159 lgr %r9,%r3 # end of mem
160 larl %r1,.Lchkmem # set program check address
161 stg %r1,__LC_PGM_NEW_PSW+8
162 la %r1,1 # test in increments of 128KB
163 sllg %r1,%r1,17
164 larl %r3,memory_chunk
165 slgr %r4,%r4 # set start of chunk to zero
166 slgr %r5,%r5 # set end of chunk to zero
167 slr %r6,%r6 # set access code to zero
168 la %r10,MEMORY_CHUNKS # number of chunks
169.Lloop:
170 tprot 0(%r5),0 # test protection of first byte
171 ipm %r7
172 srl %r7,28
173 clr %r6,%r7 # compare cc with last access code
174 je .Lsame
175 lghi %r8,0 # no program checks
176 j .Lsavchk
177.Lsame:
178 algr %r5,%r1 # add 128KB to end of chunk
179 # no need to check here,
180 brc 12,.Lloop # this is the same chunk
181.Lchkmem: # > 16EB or tprot got a program check
182 lghi %r8,1 # set program check flag
183.Lsavchk:
184 clgr %r4,%r5 # chunk size > 0?
185 je .Lchkloop
186 stg %r4,0(%r3) # store start address of chunk
187 lgr %r0,%r5
188 slgr %r0,%r4
189 stg %r0,8(%r3) # store size of chunk
190 st %r6,20(%r3) # store type of chunk
191 la %r3,24(%r3)
192 ahi %r10,-1 # update chunk number
193.Lchkloop:
194 lr %r6,%r7 # set access code to last cc
195 # we got an exception or we're starting a new
196 # chunk , we must check if we should
197 # still try to find valid memory (if we detected
198 # the amount of available storage), and if we
199 # have chunks left
200 lghi %r4,1
201 sllg %r4,%r4,31
202 clgr %r5,%r4
203 je .Lhsaskip
204 xr %r0, %r0
205 clgr %r0, %r9 # did we detect memory?
206 je .Ldonemem # if not, leave
207 chi %r10, 0 # do we have chunks left?
208 je .Ldonemem
209.Lhsaskip:
210 chi %r8,1 # program check ?
211 je .Lpgmchk
212 lgr %r4,%r5 # potential new chunk
213 algr %r5,%r1 # add 128KB to end of chunk
214 j .Llpcnt
215.Lpgmchk:
216 algr %r5,%r1 # add 128KB to end of chunk
217 lgr %r4,%r5 # potential new chunk
218.Llpcnt:
219 clgr %r5,%r9 # should we go on?
220 jl .Lloop
221.Ldonemem:
222
223 larl %r12,machine_flags 68 larl %r12,machine_flags
224# 69#
225# find out if we are running under VM
226#
227 stidp __LC_CPUID # store cpuid
228 tm __LC_CPUID,0xff # running under VM ?
229 bno 0f-.LPG1(%r13)
230 oi 7(%r12),1 # set VM flag
2310: lh %r0,__LC_CPUID+4 # get cpu version
232 chi %r0,0x7490 # running on a P/390 ?
233 bne 1f-.LPG1(%r13)
234 oi 7(%r12),4 # set P/390 flag
2351:
236
237#
238# find out if we have the MVPG instruction 70# find out if we have the MVPG instruction
239# 71#
240 la %r1,0f-.LPG1(%r13) # set program check address 72 la %r1,0f-.LPG1(%r13) # set program check address
@@ -336,25 +168,6 @@ ipl_devno:
336 .word 0 168 .word 0
337 169
338 .org 0x12000 170 .org 0x12000
339.globl s390_readinfo_sccb
340s390_readinfo_sccb:
341.Lsccb:
342 .hword 0x1000 # length, one page
343 .byte 0x00,0x00,0x00
344 .byte 0x80 # variable response bit set
345.Lsccbr:
346 .hword 0x00 # response code
347.Lscpincr1:
348 .hword 0x00
349.Lscpa1:
350 .byte 0x00
351 .fill 89,1,0
352.Lscpa2:
353 .int 0x00
354.Lscpincr2:
355 .quad 0x00
356 .fill 3984,1,0
357 .org 0x13000
358 171
359#ifdef CONFIG_SHARED_KERNEL 172#ifdef CONFIG_SHARED_KERNEL
360 .org 0x100000 173 .org 0x100000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 9e9972e8a52b..052259530651 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -20,26 +20,27 @@
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/reset.h> 22#include <asm/reset.h>
23#include <asm/sclp.h>
23 24
24#define IPL_PARM_BLOCK_VERSION 0 25#define IPL_PARM_BLOCK_VERSION 0
25#define LOADPARM_LEN 8
26 26
27extern char s390_readinfo_sccb[]; 27#define SCCB_VALID (s390_readinfo_sccb.header.response_code == 0x10)
28#define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010) 28#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
29#define SCCB_LOADPARM (&s390_readinfo_sccb[24]) 29#define SCCB_FLAG (s390_readinfo_sccb.flags)
30#define SCCB_FLAG (s390_readinfo_sccb[91])
31 30
32enum ipl_type { 31enum ipl_type {
33 IPL_TYPE_NONE = 1, 32 IPL_TYPE_NONE = 1,
34 IPL_TYPE_UNKNOWN = 2, 33 IPL_TYPE_UNKNOWN = 2,
35 IPL_TYPE_CCW = 4, 34 IPL_TYPE_CCW = 4,
36 IPL_TYPE_FCP = 8, 35 IPL_TYPE_FCP = 8,
36 IPL_TYPE_NSS = 16,
37}; 37};
38 38
39#define IPL_NONE_STR "none" 39#define IPL_NONE_STR "none"
40#define IPL_UNKNOWN_STR "unknown" 40#define IPL_UNKNOWN_STR "unknown"
41#define IPL_CCW_STR "ccw" 41#define IPL_CCW_STR "ccw"
42#define IPL_FCP_STR "fcp" 42#define IPL_FCP_STR "fcp"
43#define IPL_NSS_STR "nss"
43 44
44static char *ipl_type_str(enum ipl_type type) 45static char *ipl_type_str(enum ipl_type type)
45{ 46{
@@ -50,6 +51,8 @@ static char *ipl_type_str(enum ipl_type type)
50 return IPL_CCW_STR; 51 return IPL_CCW_STR;
51 case IPL_TYPE_FCP: 52 case IPL_TYPE_FCP:
52 return IPL_FCP_STR; 53 return IPL_FCP_STR;
54 case IPL_TYPE_NSS:
55 return IPL_NSS_STR;
53 case IPL_TYPE_UNKNOWN: 56 case IPL_TYPE_UNKNOWN:
54 default: 57 default:
55 return IPL_UNKNOWN_STR; 58 return IPL_UNKNOWN_STR;
@@ -64,6 +67,7 @@ enum ipl_method {
64 IPL_METHOD_FCP_RO_DIAG, 67 IPL_METHOD_FCP_RO_DIAG,
65 IPL_METHOD_FCP_RW_DIAG, 68 IPL_METHOD_FCP_RW_DIAG,
66 IPL_METHOD_FCP_RO_VM, 69 IPL_METHOD_FCP_RO_VM,
70 IPL_METHOD_NSS,
67}; 71};
68 72
69enum shutdown_action { 73enum shutdown_action {
@@ -114,11 +118,14 @@ enum diag308_rc {
114static int diag308_set_works = 0; 118static int diag308_set_works = 0;
115 119
116static int reipl_capabilities = IPL_TYPE_UNKNOWN; 120static int reipl_capabilities = IPL_TYPE_UNKNOWN;
121
117static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 122static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
118static enum ipl_method reipl_method = IPL_METHOD_NONE; 123static enum ipl_method reipl_method = IPL_METHOD_NONE;
119static struct ipl_parameter_block *reipl_block_fcp; 124static struct ipl_parameter_block *reipl_block_fcp;
120static struct ipl_parameter_block *reipl_block_ccw; 125static struct ipl_parameter_block *reipl_block_ccw;
121 126
127static char reipl_nss_name[NSS_NAME_SIZE + 1];
128
122static int dump_capabilities = IPL_TYPE_NONE; 129static int dump_capabilities = IPL_TYPE_NONE;
123static enum ipl_type dump_type = IPL_TYPE_NONE; 130static enum ipl_type dump_type = IPL_TYPE_NONE;
124static enum ipl_method dump_method = IPL_METHOD_NONE; 131static enum ipl_method dump_method = IPL_METHOD_NONE;
@@ -173,6 +180,24 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
173 sys_##_prefix##_##_name##_show, \ 180 sys_##_prefix##_##_name##_show, \
174 sys_##_prefix##_##_name##_store); 181 sys_##_prefix##_##_name##_store);
175 182
183#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
184static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
185 char *page) \
186{ \
187 return sprintf(page, _fmt_out, _value); \
188} \
189static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
190 const char *buf, size_t len) \
191{ \
192 if (sscanf(buf, _fmt_in, _value) != 1) \
193 return -EINVAL; \
194 return len; \
195} \
196static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
197 __ATTR(_name,(S_IRUGO | S_IWUSR), \
198 sys_##_prefix##_##_name##_show, \
199 sys_##_prefix##_##_name##_store);
200
176static void make_attrs_ro(struct attribute **attrs) 201static void make_attrs_ro(struct attribute **attrs)
177{ 202{
178 while (*attrs) { 203 while (*attrs) {
@@ -189,6 +214,8 @@ static enum ipl_type ipl_get_type(void)
189{ 214{
190 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 215 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
191 216
217 if (ipl_flags & IPL_NSS_VALID)
218 return IPL_TYPE_NSS;
192 if (!(ipl_flags & IPL_DEVNO_VALID)) 219 if (!(ipl_flags & IPL_DEVNO_VALID))
193 return IPL_TYPE_UNKNOWN; 220 return IPL_TYPE_UNKNOWN;
194 if (!(ipl_flags & IPL_PARMBLOCK_VALID)) 221 if (!(ipl_flags & IPL_PARMBLOCK_VALID))
@@ -324,6 +351,20 @@ static struct attribute_group ipl_ccw_attr_group = {
324 .attrs = ipl_ccw_attrs, 351 .attrs = ipl_ccw_attrs,
325}; 352};
326 353
354/* NSS ipl device attributes */
355
356DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
357
358static struct attribute *ipl_nss_attrs[] = {
359 &sys_ipl_type_attr.attr,
360 &sys_ipl_nss_name_attr.attr,
361 NULL,
362};
363
364static struct attribute_group ipl_nss_attr_group = {
365 .attrs = ipl_nss_attrs,
366};
367
327/* UNKNOWN ipl device attributes */ 368/* UNKNOWN ipl device attributes */
328 369
329static struct attribute *ipl_unknown_attrs[] = { 370static struct attribute *ipl_unknown_attrs[] = {
@@ -432,6 +473,21 @@ static struct attribute_group reipl_ccw_attr_group = {
432 .attrs = reipl_ccw_attrs, 473 .attrs = reipl_ccw_attrs,
433}; 474};
434 475
476
477/* NSS reipl device attributes */
478
479DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name);
480
481static struct attribute *reipl_nss_attrs[] = {
482 &sys_reipl_nss_name_attr.attr,
483 NULL,
484};
485
486static struct attribute_group reipl_nss_attr_group = {
487 .name = IPL_NSS_STR,
488 .attrs = reipl_nss_attrs,
489};
490
435/* reipl type */ 491/* reipl type */
436 492
437static int reipl_set_type(enum ipl_type type) 493static int reipl_set_type(enum ipl_type type)
@@ -454,6 +510,9 @@ static int reipl_set_type(enum ipl_type type)
454 else 510 else
455 reipl_method = IPL_METHOD_FCP_RO_DIAG; 511 reipl_method = IPL_METHOD_FCP_RO_DIAG;
456 break; 512 break;
513 case IPL_TYPE_NSS:
514 reipl_method = IPL_METHOD_NSS;
515 break;
457 default: 516 default:
458 reipl_method = IPL_METHOD_NONE; 517 reipl_method = IPL_METHOD_NONE;
459 } 518 }
@@ -475,6 +534,8 @@ static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
475 rc = reipl_set_type(IPL_TYPE_CCW); 534 rc = reipl_set_type(IPL_TYPE_CCW);
476 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) 535 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
477 rc = reipl_set_type(IPL_TYPE_FCP); 536 rc = reipl_set_type(IPL_TYPE_FCP);
537 else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
538 rc = reipl_set_type(IPL_TYPE_NSS);
478 return (rc != 0) ? rc : len; 539 return (rc != 0) ? rc : len;
479} 540}
480 541
@@ -647,6 +708,10 @@ void do_reipl(void)
647 case IPL_METHOD_FCP_RO_VM: 708 case IPL_METHOD_FCP_RO_VM:
648 __cpcmd("IPL", NULL, 0, NULL); 709 __cpcmd("IPL", NULL, 0, NULL);
649 break; 710 break;
711 case IPL_METHOD_NSS:
712 sprintf(buf, "IPL %s", reipl_nss_name);
713 __cpcmd(buf, NULL, 0, NULL);
714 break;
650 case IPL_METHOD_NONE: 715 case IPL_METHOD_NONE:
651 default: 716 default:
652 if (MACHINE_IS_VM) 717 if (MACHINE_IS_VM)
@@ -733,6 +798,10 @@ static int __init ipl_init(void)
733 case IPL_TYPE_FCP: 798 case IPL_TYPE_FCP:
734 rc = ipl_register_fcp_files(); 799 rc = ipl_register_fcp_files();
735 break; 800 break;
801 case IPL_TYPE_NSS:
802 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
803 &ipl_nss_attr_group);
804 break;
736 default: 805 default:
737 rc = sysfs_create_group(&ipl_subsys.kset.kobj, 806 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
738 &ipl_unknown_attr_group); 807 &ipl_unknown_attr_group);
@@ -755,6 +824,20 @@ static void __init reipl_probe(void)
755 free_page((unsigned long)buffer); 824 free_page((unsigned long)buffer);
756} 825}
757 826
827static int __init reipl_nss_init(void)
828{
829 int rc;
830
831 if (!MACHINE_IS_VM)
832 return 0;
833 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_nss_attr_group);
834 if (rc)
835 return rc;
836 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
837 reipl_capabilities |= IPL_TYPE_NSS;
838 return 0;
839}
840
758static int __init reipl_ccw_init(void) 841static int __init reipl_ccw_init(void)
759{ 842{
760 int rc; 843 int rc;
@@ -837,6 +920,9 @@ static int __init reipl_init(void)
837 rc = reipl_fcp_init(); 920 rc = reipl_fcp_init();
838 if (rc) 921 if (rc)
839 return rc; 922 return rc;
923 rc = reipl_nss_init();
924 if (rc)
925 return rc;
840 rc = reipl_set_type(ipl_get_type()); 926 rc = reipl_set_type(ipl_get_type());
841 if (rc) 927 if (rc)
842 return rc; 928 return rc;
@@ -993,8 +1079,6 @@ static void do_reset_calls(void)
993 reset->fn(); 1079 reset->fn();
994} 1080}
995 1081
996extern void reset_mcck_handler(void);
997extern void reset_pgm_handler(void);
998extern __u32 dump_prefix_page; 1082extern __u32 dump_prefix_page;
999 1083
1000void s390_reset_system(void) 1084void s390_reset_system(void)
@@ -1016,14 +1100,14 @@ void s390_reset_system(void)
1016 __ctl_clear_bit(0,28); 1100 __ctl_clear_bit(0,28);
1017 1101
1018 /* Set new machine check handler */ 1102 /* Set new machine check handler */
1019 S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1103 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1020 S390_lowcore.mcck_new_psw.addr = 1104 S390_lowcore.mcck_new_psw.addr =
1021 PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; 1105 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
1022 1106
1023 /* Set new program check handler */ 1107 /* Set new program check handler */
1024 S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1108 S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1025 S390_lowcore.program_new_psw.addr = 1109 S390_lowcore.program_new_psw.addr =
1026 PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler; 1110 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
1027 1111
1028 do_reset_calls(); 1112 do_reset_calls();
1029} 1113}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1eef50918615..8f0cbca31203 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/irq.c 2 * arch/s390/kernel/irq.c
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 2004,2007
5 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Thomas Spatzier (tspat@de.ibm.com)
7 * 7 *
8 * This file contains interrupt related functions. 8 * This file contains interrupt related functions.
9 */ 9 */
@@ -14,6 +14,8 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/proc_fs.h>
18#include <linux/profile.h>
17 19
18/* 20/*
19 * show_interrupts is needed by /proc/interrupts. 21 * show_interrupts is needed by /proc/interrupts.
@@ -93,5 +95,12 @@ asmlinkage void do_softirq(void)
93 95
94 local_irq_restore(flags); 96 local_irq_restore(flags);
95} 97}
96
97EXPORT_SYMBOL(do_softirq); 98EXPORT_SYMBOL(do_softirq);
99
100void init_irq_proc(void)
101{
102 struct proc_dir_entry *root_irq_dir;
103
104 root_irq_dir = proc_mkdir("irq", NULL);
105 create_prof_cpu_mask(root_irq_dir);
106}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 576368c4f605..a466bab6677e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
155static int __kprobes swap_instruction(void *aref) 155static int __kprobes swap_instruction(void *aref)
156{ 156{
157 struct ins_replace_args *args = aref; 157 struct ins_replace_args *args = aref;
158 u32 *addr;
159 u32 instr;
158 int err = -EFAULT; 160 int err = -EFAULT;
159 161
162 /*
163 * Text segment is read-only, hence we use stura to bypass dynamic
164 * address translation to exchange the instruction. Since stura
165 * always operates on four bytes, but we only want to exchange two
166 * bytes do some calculations to get things right. In addition we
167 * shall not cross any page boundaries (vmalloc area!) when writing
168 * the new instruction.
169 */
170 addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
171 if ((unsigned long)args->ptr & 2)
172 instr = ((*addr) & 0xffff0000) | args->new;
173 else
174 instr = ((*addr) & 0x0000ffff) | args->new << 16;
175
160 asm volatile( 176 asm volatile(
161 "0: mvc 0(2,%2),0(%3)\n" 177 " lra %1,0(%1)\n"
162 "1: la %0,0\n" 178 "0: stura %2,%1\n"
179 "1: la %0,0\n"
163 "2:\n" 180 "2:\n"
164 EX_TABLE(0b,2b) 181 EX_TABLE(0b,2b)
165 : "+d" (err), "=m" (*args->ptr) 182 : "+d" (err)
166 : "a" (args->ptr), "a" (&args->new), "m" (args->new)); 183 : "a" (addr), "d" (instr)
184 : "memory", "cc");
185
167 return err; 186 return err;
168} 187}
169 188
@@ -356,7 +375,7 @@ no_kprobe:
356 * - When the probed function returns, this probe 375 * - When the probed function returns, this probe
357 * causes the handlers to fire 376 * causes the handlers to fire
358 */ 377 */
359void __kprobes kretprobe_trampoline_holder(void) 378void kretprobe_trampoline_holder(void)
360{ 379{
361 asm volatile(".global kretprobe_trampoline\n" 380 asm volatile(".global kretprobe_trampoline\n"
362 "kretprobe_trampoline: bcr 0,0\n"); 381 "kretprobe_trampoline: bcr 0,0\n");
@@ -365,7 +384,8 @@ void __kprobes kretprobe_trampoline_holder(void)
365/* 384/*
366 * Called when the probe at kretprobe trampoline is hit 385 * Called when the probe at kretprobe trampoline is hit
367 */ 386 */
368int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 387static int __kprobes trampoline_probe_handler(struct kprobe *p,
388 struct pt_regs *regs)
369{ 389{
370 struct kretprobe_instance *ri = NULL; 390 struct kretprobe_instance *ri = NULL;
371 struct hlist_head *head, empty_rp; 391 struct hlist_head *head, empty_rp;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index f6d9bcc0f75b..52f57af252b4 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/kexec.h> 12#include <linux/kexec.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/reboot.h>
14#include <asm/cio.h> 15#include <asm/cio.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index d989ed45a7aa..39d1dd752529 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -30,6 +30,7 @@
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/moduleloader.h>
33 34
34#if 0 35#if 0
35#define DEBUGP printk 36#define DEBUGP printk
@@ -58,7 +59,7 @@ void module_free(struct module *mod, void *module_region)
58 table entries. */ 59 table entries. */
59} 60}
60 61
61static inline void 62static void
62check_rela(Elf_Rela *rela, struct module *me) 63check_rela(Elf_Rela *rela, struct module *me)
63{ 64{
64 struct mod_arch_syminfo *info; 65 struct mod_arch_syminfo *info;
@@ -181,7 +182,7 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
181 return -ENOEXEC; 182 return -ENOEXEC;
182} 183}
183 184
184static inline int 185static int
185apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 186apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
186 struct module *me) 187 struct module *me)
187{ 188{
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6603fbb41d07..5acfac654f9d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -144,7 +144,7 @@ static void default_idle(void)
144 144
145 trace_hardirqs_on(); 145 trace_hardirqs_on();
146 /* Wait for external, I/O or machine check interrupt. */ 146 /* Wait for external, I/O or machine check interrupt. */
147 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | 147 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
148 PSW_MASK_IO | PSW_MASK_EXT); 148 PSW_MASK_IO | PSW_MASK_EXT);
149} 149}
150 150
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
190 struct pt_regs regs; 190 struct pt_regs regs;
191 191
192 memset(&regs, 0, sizeof(regs)); 192 memset(&regs, 0, sizeof(regs));
193 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 193 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
195 regs.gprs[9] = (unsigned long) fn; 195 regs.gprs[9] = (unsigned long) fn;
196 regs.gprs[10] = (unsigned long) arg; 196 regs.gprs[10] = (unsigned long) arg;
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
deleted file mode 100644
index b81aa1f569ca..000000000000
--- a/arch/s390/kernel/profile.c
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * arch/s390/kernel/profile.c
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
6 *
7 */
8#include <linux/proc_fs.h>
9#include <linux/profile.h>
10
11static struct proc_dir_entry * root_irq_dir;
12
13void init_irq_proc(void)
14{
15 /* create /proc/irq */
16 root_irq_dir = proc_mkdir("irq", NULL);
17
18 /* create /proc/irq/prof_cpu_mask */
19 create_prof_cpu_mask(root_irq_dir);
20}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8f36504075ed..2a8f0872ea8b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,15 +86,13 @@ FixPerRegisters(struct task_struct *task)
86 per_info->control_regs.bits.storage_alt_space_ctl = 0; 86 per_info->control_regs.bits.storage_alt_space_ctl = 0;
87} 87}
88 88
89void 89static void set_single_step(struct task_struct *task)
90set_single_step(struct task_struct *task)
91{ 90{
92 task->thread.per_info.single_step = 1; 91 task->thread.per_info.single_step = 1;
93 FixPerRegisters(task); 92 FixPerRegisters(task);
94} 93}
95 94
96void 95static void clear_single_step(struct task_struct *task)
97clear_single_step(struct task_struct *task)
98{ 96{
99 task->thread.per_info.single_step = 0; 97 task->thread.per_info.single_step = 0;
100 FixPerRegisters(task); 98 FixPerRegisters(task);
@@ -232,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
232 */ 230 */
233 if (addr == (addr_t) &dummy->regs.psw.mask && 231 if (addr == (addr_t) &dummy->regs.psw.mask &&
234#ifdef CONFIG_COMPAT 232#ifdef CONFIG_COMPAT
235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && 233 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
236#endif 234#endif
237 data != PSW_MASK_MERGE(PSW_USER_BITS, data)) 235 data != PSW_MASK_MERGE(psw_user_bits, data))
238 /* Invalid psw mask. */ 236 /* Invalid psw mask. */
239 return -EINVAL; 237 return -EINVAL;
240#ifndef CONFIG_64BIT 238#ifndef CONFIG_64BIT
@@ -309,7 +307,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
309 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 307 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
310 if (copied != sizeof(tmp)) 308 if (copied != sizeof(tmp))
311 return -EIO; 309 return -EIO;
312 return put_user(tmp, (unsigned long __user *) data); 310 return put_user(tmp, (unsigned long __force __user *) data);
313 311
314 case PTRACE_PEEKUSR: 312 case PTRACE_PEEKUSR:
315 /* read the word at location addr in the USER area. */ 313 /* read the word at location addr in the USER area. */
@@ -331,7 +329,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
331 329
332 case PTRACE_PEEKUSR_AREA: 330 case PTRACE_PEEKUSR_AREA:
333 case PTRACE_POKEUSR_AREA: 331 case PTRACE_POKEUSR_AREA:
334 if (copy_from_user(&parea, (void __user *) addr, 332 if (copy_from_user(&parea, (void __force __user *) addr,
335 sizeof(parea))) 333 sizeof(parea)))
336 return -EFAULT; 334 return -EFAULT;
337 addr = parea.kernel_addr; 335 addr = parea.kernel_addr;
@@ -341,10 +339,11 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
341 if (request == PTRACE_PEEKUSR_AREA) 339 if (request == PTRACE_PEEKUSR_AREA)
342 ret = peek_user(child, addr, data); 340 ret = peek_user(child, addr, data);
343 else { 341 else {
344 addr_t tmp; 342 addr_t utmp;
345 if (get_user (tmp, (addr_t __user *) data)) 343 if (get_user(utmp,
344 (addr_t __force __user *) data))
346 return -EFAULT; 345 return -EFAULT;
347 ret = poke_user(child, addr, tmp); 346 ret = poke_user(child, addr, utmp);
348 } 347 }
349 if (ret) 348 if (ret)
350 return ret; 349 return ret;
@@ -394,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
394 if (addr == (addr_t) &dummy32->regs.psw.mask) { 393 if (addr == (addr_t) &dummy32->regs.psw.mask) {
395 /* Fake a 31 bit psw mask. */ 394 /* Fake a 31 bit psw mask. */
396 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); 395 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); 396 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 397 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
399 /* Fake a 31 bit psw address. */ 398 /* Fake a 31 bit psw address. */
400 tmp = (__u32) task_pt_regs(child)->psw.addr | 399 tmp = (__u32) task_pt_regs(child)->psw.addr |
@@ -469,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
469 */ 468 */
470 if (addr == (addr_t) &dummy32->regs.psw.mask) { 469 if (addr == (addr_t) &dummy32->regs.psw.mask) {
471 /* Build a 64 bit psw mask from 31 bit mask. */ 470 /* Build a 64 bit psw mask from 31 bit mask. */
472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) 471 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
473 /* Invalid psw mask. */ 472 /* Invalid psw mask. */
474 return -EINVAL; 473 return -EINVAL;
475 task_pt_regs(child)->psw.mask = 474 task_pt_regs(child)->psw.mask =
476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); 475 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 476 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
478 /* Build a 64 bit psw address from 31 bit address. */ 477 /* Build a 64 bit psw address from 31 bit address. */
479 task_pt_regs(child)->psw.addr = 478 task_pt_regs(child)->psw.addr =
@@ -550,7 +549,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
550 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 549 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
551 if (copied != sizeof(tmp)) 550 if (copied != sizeof(tmp))
552 return -EIO; 551 return -EIO;
553 return put_user(tmp, (unsigned int __user *) data); 552 return put_user(tmp, (unsigned int __force __user *) data);
554 553
555 case PTRACE_PEEKUSR: 554 case PTRACE_PEEKUSR:
556 /* read the word at location addr in the USER area. */ 555 /* read the word at location addr in the USER area. */
@@ -571,7 +570,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
571 570
572 case PTRACE_PEEKUSR_AREA: 571 case PTRACE_PEEKUSR_AREA:
573 case PTRACE_POKEUSR_AREA: 572 case PTRACE_POKEUSR_AREA:
574 if (copy_from_user(&parea, (void __user *) addr, 573 if (copy_from_user(&parea, (void __force __user *) addr,
575 sizeof(parea))) 574 sizeof(parea)))
576 return -EFAULT; 575 return -EFAULT;
577 addr = parea.kernel_addr; 576 addr = parea.kernel_addr;
@@ -581,10 +580,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
581 if (request == PTRACE_PEEKUSR_AREA) 580 if (request == PTRACE_PEEKUSR_AREA)
582 ret = peek_user_emu31(child, addr, data); 581 ret = peek_user_emu31(child, addr, data);
583 else { 582 else {
584 __u32 tmp; 583 __u32 utmp;
585 if (get_user (tmp, (__u32 __user *) data)) 584 if (get_user(utmp,
585 (__u32 __force __user *) data))
586 return -EFAULT; 586 return -EFAULT;
587 ret = poke_user_emu31(child, addr, tmp); 587 ret = poke_user_emu31(child, addr, utmp);
588 } 588 }
589 if (ret) 589 if (ret)
590 return ret; 590 return ret;
@@ -595,17 +595,19 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
595 return 0; 595 return 0;
596 case PTRACE_GETEVENTMSG: 596 case PTRACE_GETEVENTMSG:
597 return put_user((__u32) child->ptrace_message, 597 return put_user((__u32) child->ptrace_message,
598 (unsigned int __user *) data); 598 (unsigned int __force __user *) data);
599 case PTRACE_GETSIGINFO: 599 case PTRACE_GETSIGINFO:
600 if (child->last_siginfo == NULL) 600 if (child->last_siginfo == NULL)
601 return -EINVAL; 601 return -EINVAL;
602 return copy_siginfo_to_user32((compat_siginfo_t __user *) data, 602 return copy_siginfo_to_user32((compat_siginfo_t
603 __force __user *) data,
603 child->last_siginfo); 604 child->last_siginfo);
604 case PTRACE_SETSIGINFO: 605 case PTRACE_SETSIGINFO:
605 if (child->last_siginfo == NULL) 606 if (child->last_siginfo == NULL)
606 return -EINVAL; 607 return -EINVAL;
607 return copy_siginfo_from_user32(child->last_siginfo, 608 return copy_siginfo_from_user32(child->last_siginfo,
608 (compat_siginfo_t __user *) data); 609 (compat_siginfo_t
610 __force __user *) data);
609 } 611 }
610 return ptrace_request(child, request, addr, data); 612 return ptrace_request(child, request, addr, data);
611} 613}
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S
deleted file mode 100644
index 8a87355161fa..000000000000
--- a/arch/s390/kernel/reset.S
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * arch/s390/kernel/reset.S
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl reset_mcck_handler
15reset_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 lg %r1,s390_reset_mcck_handler-0b(%r13)
20 ltgr %r1,%r1
21 jz 1f
22 basr %r14,%r1
231: la %r1,4095
24 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
25 lpswe __LC_MCK_OLD_PSW
26
27 .globl s390_reset_mcck_handler
28s390_reset_mcck_handler:
29 .quad 0
30
31 .globl reset_pgm_handler
32reset_pgm_handler:
33 stmg %r0,%r15,__LC_SAVE_AREA
34 basr %r13,0
350: lg %r15,__LC_PANIC_STACK # load panic stack
36 aghi %r15,-STACK_FRAME_OVERHEAD
37 lg %r1,s390_reset_pgm_handler-0b(%r13)
38 ltgr %r1,%r1
39 jz 1f
40 basr %r14,%r1
41 lmg %r0,%r15,__LC_SAVE_AREA
42 lpswe __LC_PGM_OLD_PSW
431: lpswe disabled_wait_psw-0b(%r13)
44 .globl s390_reset_pgm_handler
45s390_reset_pgm_handler:
46 .quad 0
47 .align 8
48disabled_wait_psw:
49 .quad 0x0002000180000000,0x0000000000000000 + reset_pgm_handler
50
51#else /* CONFIG_64BIT */
52
53 .globl reset_mcck_handler
54reset_mcck_handler:
55 basr %r13,0
560: l %r15,__LC_PANIC_STACK # load panic stack
57 ahi %r15,-STACK_FRAME_OVERHEAD
58 l %r1,s390_reset_mcck_handler-0b(%r13)
59 ltr %r1,%r1
60 jz 1f
61 basr %r14,%r1
621: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
63 lpsw __LC_MCK_OLD_PSW
64
65 .globl s390_reset_mcck_handler
66s390_reset_mcck_handler:
67 .long 0
68
69 .globl reset_pgm_handler
70reset_pgm_handler:
71 stm %r0,%r15,__LC_SAVE_AREA
72 basr %r13,0
730: l %r15,__LC_PANIC_STACK # load panic stack
74 ahi %r15,-STACK_FRAME_OVERHEAD
75 l %r1,s390_reset_pgm_handler-0b(%r13)
76 ltr %r1,%r1
77 jz 1f
78 basr %r14,%r1
79 lm %r0,%r15,__LC_SAVE_AREA
80 lpsw __LC_PGM_OLD_PSW
81
821: lpsw disabled_wait_psw-0b(%r13)
83 .globl s390_reset_pgm_handler
84s390_reset_pgm_handler:
85 .long 0
86disabled_wait_psw:
87 .align 8
88 .long 0x000a0000,0x00000000 + reset_pgm_handler
89
90#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bc5beaa8f98e..acf93dba7727 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -125,14 +125,12 @@ void do_extint(struct pt_regs *regs, unsigned short code)
125 * Make sure that the i/o interrupt did not "overtake" 125 * Make sure that the i/o interrupt did not "overtake"
126 * the last HZ timer interrupt. 126 * the last HZ timer interrupt.
127 */ 127 */
128 account_ticks(); 128 account_ticks(S390_lowcore.int_clock);
129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
130 index = ext_hash(code); 130 index = ext_hash(code);
131 for (p = ext_int_hash[index]; p; p = p->next) { 131 for (p = ext_int_hash[index]; p; p = p->next) {
132 if (likely(p->code == code)) { 132 if (likely(p->code == code))
133 if (likely(p->handler)) 133 p->handler(code);
134 p->handler(code);
135 }
136 } 134 }
137 irq_exit(); 135 irq_exit();
138 set_irq_regs(old_regs); 136 set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5d8ee3baac14..03739813d3bf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -38,6 +38,8 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/notifier.h> 39#include <linux/notifier.h>
40#include <linux/pfn.h> 40#include <linux/pfn.h>
41#include <linux/ctype.h>
42#include <linux/reboot.h>
41 43
42#include <asm/uaccess.h> 44#include <asm/uaccess.h>
43#include <asm/system.h> 45#include <asm/system.h>
@@ -49,6 +51,14 @@
49#include <asm/page.h> 51#include <asm/page.h>
50#include <asm/ptrace.h> 52#include <asm/ptrace.h>
51#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/ebcdic.h>
55#include <asm/compat.h>
56
57long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
58 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
59long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
60 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
61 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
52 62
53/* 63/*
54 * User copy operations. 64 * User copy operations.
@@ -117,9 +127,9 @@ void __devinit cpu_init (void)
117 */ 127 */
118char vmhalt_cmd[128] = ""; 128char vmhalt_cmd[128] = "";
119char vmpoff_cmd[128] = ""; 129char vmpoff_cmd[128] = "";
120char vmpanic_cmd[128] = ""; 130static char vmpanic_cmd[128] = "";
121 131
122static inline void strncpy_skip_quote(char *dst, char *src, int n) 132static void strncpy_skip_quote(char *dst, char *src, int n)
123{ 133{
124 int sx, dx; 134 int sx, dx;
125 135
@@ -275,10 +285,6 @@ static void __init conmode_default(void)
275} 285}
276 286
277#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
278extern void machine_restart_smp(char *);
279extern void machine_halt_smp(void);
280extern void machine_power_off_smp(void);
281
282void (*_machine_restart)(char *command) = machine_restart_smp; 288void (*_machine_restart)(char *command) = machine_restart_smp;
283void (*_machine_halt)(void) = machine_halt_smp; 289void (*_machine_halt)(void) = machine_halt_smp;
284void (*_machine_power_off)(void) = machine_power_off_smp; 290void (*_machine_power_off)(void) = machine_power_off_smp;
@@ -386,6 +392,84 @@ static int __init early_parse_ipldelay(char *p)
386} 392}
387early_param("ipldelay", early_parse_ipldelay); 393early_param("ipldelay", early_parse_ipldelay);
388 394
395#ifdef CONFIG_S390_SWITCH_AMODE
396unsigned int switch_amode = 0;
397EXPORT_SYMBOL_GPL(switch_amode);
398
399static void set_amode_and_uaccess(unsigned long user_amode,
400 unsigned long user32_amode)
401{
402 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
403 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
404 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
405#ifdef CONFIG_COMPAT
406 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
407 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
408 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
409 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
410 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
411 PSW32_MASK_PSTATE;
412#endif
413 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
414 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
415
416 if (MACHINE_HAS_MVCOS) {
417 printk("mvcos available.\n");
418 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
419 } else {
420 printk("mvcos not available.\n");
421 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
422 }
423}
424
425/*
426 * Switch kernel/user addressing modes?
427 */
428static int __init early_parse_switch_amode(char *p)
429{
430 switch_amode = 1;
431 return 0;
432}
433early_param("switch_amode", early_parse_switch_amode);
434
435#else /* CONFIG_S390_SWITCH_AMODE */
436static inline void set_amode_and_uaccess(unsigned long user_amode,
437 unsigned long user32_amode)
438{
439}
440#endif /* CONFIG_S390_SWITCH_AMODE */
441
442#ifdef CONFIG_S390_EXEC_PROTECT
443unsigned int s390_noexec = 0;
444EXPORT_SYMBOL_GPL(s390_noexec);
445
446/*
447 * Enable execute protection?
448 */
449static int __init early_parse_noexec(char *p)
450{
451 if (!strncmp(p, "off", 3))
452 return 0;
453 switch_amode = 1;
454 s390_noexec = 1;
455 return 0;
456}
457early_param("noexec", early_parse_noexec);
458#endif /* CONFIG_S390_EXEC_PROTECT */
459
460static void setup_addressing_mode(void)
461{
462 if (s390_noexec) {
463 printk("S390 execute protection active, ");
464 set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
465 return;
466 }
467 if (switch_amode) {
468 printk("S390 address spaces switched, ");
469 set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
470 }
471}
472
389static void __init 473static void __init
390setup_lowcore(void) 474setup_lowcore(void)
391{ 475{
@@ -402,19 +486,21 @@ setup_lowcore(void)
402 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 486 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
403 lc->restart_psw.addr = 487 lc->restart_psw.addr =
404 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 488 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
405 lc->external_new_psw.mask = PSW_KERNEL_BITS; 489 if (switch_amode)
490 lc->restart_psw.mask |= PSW_ASC_HOME;
491 lc->external_new_psw.mask = psw_kernel_bits;
406 lc->external_new_psw.addr = 492 lc->external_new_psw.addr =
407 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 493 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
408 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 494 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
409 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 495 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
410 lc->program_new_psw.mask = PSW_KERNEL_BITS; 496 lc->program_new_psw.mask = psw_kernel_bits;
411 lc->program_new_psw.addr = 497 lc->program_new_psw.addr =
412 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 498 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
413 lc->mcck_new_psw.mask = 499 lc->mcck_new_psw.mask =
414 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; 500 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
415 lc->mcck_new_psw.addr = 501 lc->mcck_new_psw.addr =
416 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 502 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
417 lc->io_new_psw.mask = PSW_KERNEL_BITS; 503 lc->io_new_psw.mask = psw_kernel_bits;
418 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 504 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
419 lc->ipl_device = S390_lowcore.ipl_device; 505 lc->ipl_device = S390_lowcore.ipl_device;
420 lc->jiffy_timer = -1LL; 506 lc->jiffy_timer = -1LL;
@@ -439,7 +525,7 @@ setup_lowcore(void)
439static void __init 525static void __init
440setup_resources(void) 526setup_resources(void)
441{ 527{
442 struct resource *res; 528 struct resource *res, *sub_res;
443 int i; 529 int i;
444 530
445 code_resource.start = (unsigned long) &_text; 531 code_resource.start = (unsigned long) &_text;
@@ -464,8 +550,38 @@ setup_resources(void)
464 res->start = memory_chunk[i].addr; 550 res->start = memory_chunk[i].addr;
465 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; 551 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
466 request_resource(&iomem_resource, res); 552 request_resource(&iomem_resource, res);
467 request_resource(res, &code_resource); 553
468 request_resource(res, &data_resource); 554 if (code_resource.start >= res->start &&
555 code_resource.start <= res->end &&
556 code_resource.end > res->end) {
557 sub_res = alloc_bootmem_low(sizeof(struct resource));
558 memcpy(sub_res, &code_resource,
559 sizeof(struct resource));
560 sub_res->end = res->end;
561 code_resource.start = res->end + 1;
562 request_resource(res, sub_res);
563 }
564
565 if (code_resource.start >= res->start &&
566 code_resource.start <= res->end &&
567 code_resource.end <= res->end)
568 request_resource(res, &code_resource);
569
570 if (data_resource.start >= res->start &&
571 data_resource.start <= res->end &&
572 data_resource.end > res->end) {
573 sub_res = alloc_bootmem_low(sizeof(struct resource));
574 memcpy(sub_res, &data_resource,
575 sizeof(struct resource));
576 sub_res->end = res->end;
577 data_resource.start = res->end + 1;
578 request_resource(res, sub_res);
579 }
580
581 if (data_resource.start >= res->start &&
582 data_resource.start <= res->end &&
583 data_resource.end <= res->end)
584 request_resource(res, &data_resource);
469 } 585 }
470} 586}
471 587
@@ -495,16 +611,13 @@ static void __init setup_memory_end(void)
495 } 611 }
496 if (!memory_end) 612 if (!memory_end)
497 memory_end = memory_size; 613 memory_end = memory_size;
498 if (real_size > memory_end)
499 printk("More memory detected than supported. Unused: %luk\n",
500 (real_size - memory_end) >> 10);
501} 614}
502 615
503static void __init 616static void __init
504setup_memory(void) 617setup_memory(void)
505{ 618{
506 unsigned long bootmap_size; 619 unsigned long bootmap_size;
507 unsigned long start_pfn, end_pfn, init_pfn; 620 unsigned long start_pfn, end_pfn;
508 int i; 621 int i;
509 622
510 /* 623 /*
@@ -514,10 +627,6 @@ setup_memory(void)
514 start_pfn = PFN_UP(__pa(&_end)); 627 start_pfn = PFN_UP(__pa(&_end));
515 end_pfn = max_pfn = PFN_DOWN(memory_end); 628 end_pfn = max_pfn = PFN_DOWN(memory_end);
516 629
517 /* Initialize storage key for kernel pages */
518 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
519 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
520
521#ifdef CONFIG_BLK_DEV_INITRD 630#ifdef CONFIG_BLK_DEV_INITRD
522 /* 631 /*
523 * Move the initrd in case the bitmap of the bootmem allocater 632 * Move the initrd in case the bitmap of the bootmem allocater
@@ -651,6 +760,7 @@ setup_arch(char **cmdline_p)
651 parse_early_param(); 760 parse_early_param();
652 761
653 setup_memory_end(); 762 setup_memory_end();
763 setup_addressing_mode();
654 setup_memory(); 764 setup_memory();
655 setup_resources(); 765 setup_resources();
656 setup_lowcore(); 766 setup_lowcore();
@@ -694,6 +804,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
694 struct cpuinfo_S390 *cpuinfo; 804 struct cpuinfo_S390 *cpuinfo;
695 unsigned long n = (unsigned long) v - 1; 805 unsigned long n = (unsigned long) v - 1;
696 806
807 s390_adjust_jiffies();
697 preempt_disable(); 808 preempt_disable();
698 if (!n) { 809 if (!n) {
699 seq_printf(m, "vendor_id : IBM/S390\n" 810 seq_printf(m, "vendor_id : IBM/S390\n"
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4c8a7954ef48..554f9cf7499c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
119 119
120 /* Copy a 'clean' PSW mask to the user to avoid leaking 120 /* Copy a 'clean' PSW mask to the user to avoid leaking
121 information about whether PER is currently on. */ 121 information about whether PER is currently on. */
122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); 122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
123 user_sregs.regs.psw.addr = regs->psw.addr; 123 user_sregs.regs.psw.addr = regs->psw.addr;
124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
125 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 125 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c0cd255fddbd..65b52320d145 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -22,23 +22,23 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/init.h> 24#include <linux/init.h>
25
26#include <linux/mm.h> 25#include <linux/mm.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
28#include <linux/kernel_stat.h> 27#include <linux/kernel_stat.h>
29#include <linux/smp_lock.h> 28#include <linux/smp_lock.h>
30
31#include <linux/delay.h> 29#include <linux/delay.h>
32#include <linux/cache.h> 30#include <linux/cache.h>
33#include <linux/interrupt.h> 31#include <linux/interrupt.h>
34#include <linux/cpu.h> 32#include <linux/cpu.h>
35 33#include <linux/timex.h>
34#include <asm/setup.h>
36#include <asm/sigp.h> 35#include <asm/sigp.h>
37#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
38#include <asm/irq.h> 37#include <asm/irq.h>
39#include <asm/s390_ext.h> 38#include <asm/s390_ext.h>
40#include <asm/cpcmd.h> 39#include <asm/cpcmd.h>
41#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
41#include <asm/timer.h>
42 42
43extern volatile int __cpu_logical_map[]; 43extern volatile int __cpu_logical_map[];
44 44
@@ -53,12 +53,6 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
53 53
54static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
55 55
56/*
57 * Reboot, halt and power_off routines for SMP.
58 */
59extern char vmhalt_cmd[];
60extern char vmpoff_cmd[];
61
62static void smp_ext_bitcall(int, ec_bit_sig); 56static void smp_ext_bitcall(int, ec_bit_sig);
63static void smp_ext_bitcall_others(ec_bit_sig); 57static void smp_ext_bitcall_others(ec_bit_sig);
64 58
@@ -200,7 +194,7 @@ int smp_call_function_on(void (*func) (void *info), void *info,
200} 194}
201EXPORT_SYMBOL(smp_call_function_on); 195EXPORT_SYMBOL(smp_call_function_on);
202 196
203static inline void do_send_stop(void) 197static void do_send_stop(void)
204{ 198{
205 int cpu, rc; 199 int cpu, rc;
206 200
@@ -214,7 +208,7 @@ static inline void do_send_stop(void)
214 } 208 }
215} 209}
216 210
217static inline void do_store_status(void) 211static void do_store_status(void)
218{ 212{
219 int cpu, rc; 213 int cpu, rc;
220 214
@@ -230,7 +224,7 @@ static inline void do_store_status(void)
230 } 224 }
231} 225}
232 226
233static inline void do_wait_for_stop(void) 227static void do_wait_for_stop(void)
234{ 228{
235 int cpu; 229 int cpu;
236 230
@@ -250,7 +244,7 @@ static inline void do_wait_for_stop(void)
250void smp_send_stop(void) 244void smp_send_stop(void)
251{ 245{
252 /* Disable all interrupts/machine checks */ 246 /* Disable all interrupts/machine checks */
253 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 247 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
254 248
255 /* write magic number to zero page (absolute 0) */ 249 /* write magic number to zero page (absolute 0) */
256 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 250 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
@@ -298,7 +292,7 @@ void machine_power_off_smp(void)
298 * cpus are handled. 292 * cpus are handled.
299 */ 293 */
300 294
301void do_ext_call_interrupt(__u16 code) 295static void do_ext_call_interrupt(__u16 code)
302{ 296{
303 unsigned long bits; 297 unsigned long bits;
304 298
@@ -385,7 +379,7 @@ struct ec_creg_mask_parms {
385/* 379/*
386 * callback for setting/clearing control bits 380 * callback for setting/clearing control bits
387 */ 381 */
388void smp_ctl_bit_callback(void *info) { 382static void smp_ctl_bit_callback(void *info) {
389 struct ec_creg_mask_parms *pp = info; 383 struct ec_creg_mask_parms *pp = info;
390 unsigned long cregs[16]; 384 unsigned long cregs[16];
391 int i; 385 int i;
@@ -458,17 +452,15 @@ __init smp_count_cpus(void)
458/* 452/*
459 * Activate a secondary processor. 453 * Activate a secondary processor.
460 */ 454 */
461extern void init_cpu_timer(void);
462extern void init_cpu_vtimer(void);
463
464int __devinit start_secondary(void *cpuvoid) 455int __devinit start_secondary(void *cpuvoid)
465{ 456{
466 /* Setup the cpu */ 457 /* Setup the cpu */
467 cpu_init(); 458 cpu_init();
468 preempt_disable(); 459 preempt_disable();
469 /* init per CPU timer */ 460 /* Enable TOD clock interrupts on the secondary cpu. */
470 init_cpu_timer(); 461 init_cpu_timer();
471#ifdef CONFIG_VIRT_TIMER 462#ifdef CONFIG_VIRT_TIMER
463 /* Enable cpu timer interrupts on the secondary cpu. */
472 init_cpu_vtimer(); 464 init_cpu_vtimer();
473#endif 465#endif
474 /* Enable pfault pseudo page faults on this cpu. */ 466 /* Enable pfault pseudo page faults on this cpu. */
@@ -542,7 +534,7 @@ smp_put_cpu(int cpu)
542 spin_unlock_irqrestore(&smp_reserve_lock, flags); 534 spin_unlock_irqrestore(&smp_reserve_lock, flags);
543} 535}
544 536
545static inline int 537static int
546cpu_stopped(int cpu) 538cpu_stopped(int cpu)
547{ 539{
548 __u32 status; 540 __u32 status;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 0d14a4789bf2..2e5c65a1863e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,11 +11,11 @@
11#include <linux/stacktrace.h> 11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13 13
14static inline unsigned long save_context_stack(struct stack_trace *trace, 14static unsigned long save_context_stack(struct stack_trace *trace,
15 unsigned int *skip, 15 unsigned int *skip,
16 unsigned long sp, 16 unsigned long sp,
17 unsigned long low, 17 unsigned long low,
18 unsigned long high) 18 unsigned long high)
19{ 19{
20 struct stack_frame *sf; 20 struct stack_frame *sf;
21 struct pt_regs *regs; 21 struct pt_regs *regs;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 6cceed4df73e..3b91f27ab202 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -37,11 +37,15 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/irq_regs.h> 38#include <asm/irq_regs.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/etr.h>
40 41
41/* change this if you have some constant time drift */ 42/* change this if you have some constant time drift */
42#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 43#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
43#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 44#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
44 45
46/* The value of the TOD clock for 1.1.1970. */
47#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
48
45/* 49/*
46 * Create a small time difference between the timer interrupts 50 * Create a small time difference between the timer interrupts
47 * on the different cpus to avoid lock contention. 51 * on the different cpus to avoid lock contention.
@@ -51,6 +55,7 @@
51#define TICK_SIZE tick 55#define TICK_SIZE tick
52 56
53static ext_int_info_t ext_int_info_cc; 57static ext_int_info_t ext_int_info_cc;
58static ext_int_info_t ext_int_etr_cc;
54static u64 init_timer_cc; 59static u64 init_timer_cc;
55static u64 jiffies_timer_cc; 60static u64 jiffies_timer_cc;
56static u64 xtime_cc; 61static u64 xtime_cc;
@@ -89,29 +94,21 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
89#define s390_do_profile() do { ; } while(0) 94#define s390_do_profile() do { ; } while(0)
90#endif /* CONFIG_PROFILING */ 95#endif /* CONFIG_PROFILING */
91 96
92
93/* 97/*
94 * timer_interrupt() needs to keep up the real-time clock, 98 * Advance the per cpu tick counter up to the time given with the
95 * as well as call the "do_timer()" routine every clocktick 99 * "time" argument. The per cpu update consists of accounting
100 * the virtual cpu time, calling update_process_times and calling
101 * the profiling hook. If xtime is before time it is advanced as well.
96 */ 102 */
97void account_ticks(void) 103void account_ticks(u64 time)
98{ 104{
99 __u64 tmp;
100 __u32 ticks; 105 __u32 ticks;
106 __u64 tmp;
101 107
102 /* Calculate how many ticks have passed. */ 108 /* Calculate how many ticks have passed. */
103 if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { 109 if (time < S390_lowcore.jiffy_timer)
104 /*
105 * We have to program the clock comparator even if
106 * no tick has passed. That happens if e.g. an i/o
107 * interrupt wakes up an idle processor that has
108 * switched off its hz timer.
109 */
110 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
111 asm volatile ("SCKC %0" : : "m" (tmp));
112 return; 110 return;
113 } 111 tmp = time - S390_lowcore.jiffy_timer;
114 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
115 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ 112 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
116 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; 113 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
117 S390_lowcore.jiffy_timer += 114 S390_lowcore.jiffy_timer +=
@@ -124,10 +121,6 @@ void account_ticks(void)
124 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; 121 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
125 } 122 }
126 123
127 /* set clock comparator for next tick */
128 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
129 asm volatile ("SCKC %0" : : "m" (tmp));
130
131#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
132 /* 125 /*
133 * Do not rely on the boot cpu to do the calls to do_timer. 126 * Do not rely on the boot cpu to do the calls to do_timer.
@@ -173,7 +166,7 @@ int sysctl_hz_timer = 1;
173 * Stop the HZ tick on the current CPU. 166 * Stop the HZ tick on the current CPU.
174 * Only cpu_idle may call this function. 167 * Only cpu_idle may call this function.
175 */ 168 */
176static inline void stop_hz_timer(void) 169static void stop_hz_timer(void)
177{ 170{
178 unsigned long flags; 171 unsigned long flags;
179 unsigned long seq, next; 172 unsigned long seq, next;
@@ -210,20 +203,21 @@ static inline void stop_hz_timer(void)
210 if (timer >= jiffies_timer_cc) 203 if (timer >= jiffies_timer_cc)
211 todval = timer; 204 todval = timer;
212 } 205 }
213 asm volatile ("SCKC %0" : : "m" (todval)); 206 set_clock_comparator(todval);
214} 207}
215 208
216/* 209/*
217 * Start the HZ tick on the current CPU. 210 * Start the HZ tick on the current CPU.
218 * Only cpu_idle may call this function. 211 * Only cpu_idle may call this function.
219 */ 212 */
220static inline void start_hz_timer(void) 213static void start_hz_timer(void)
221{ 214{
222 BUG_ON(!in_interrupt()); 215 BUG_ON(!in_interrupt());
223 216
224 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 217 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
225 return; 218 return;
226 account_ticks(); 219 account_ticks(get_clock());
220 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
227 cpu_clear(smp_processor_id(), nohz_cpu_mask); 221 cpu_clear(smp_processor_id(), nohz_cpu_mask);
228} 222}
229 223
@@ -245,7 +239,7 @@ static struct notifier_block nohz_idle_nb = {
245 .notifier_call = nohz_idle_notify, 239 .notifier_call = nohz_idle_notify,
246}; 240};
247 241
248void __init nohz_init(void) 242static void __init nohz_init(void)
249{ 243{
250 if (register_idle_notifier(&nohz_idle_nb)) 244 if (register_idle_notifier(&nohz_idle_nb))
251 panic("Couldn't register idle notifier"); 245 panic("Couldn't register idle notifier");
@@ -254,24 +248,57 @@ void __init nohz_init(void)
254#endif 248#endif
255 249
256/* 250/*
257 * Start the clock comparator on the current CPU. 251 * Set up per cpu jiffy timer and set the clock comparator.
252 */
253static void setup_jiffy_timer(void)
254{
255 /* Set up clock comparator to next jiffy. */
256 S390_lowcore.jiffy_timer =
257 jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
258 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
259}
260
261/*
262 * Set up lowcore and control register of the current cpu to
263 * enable TOD clock and clock comparator interrupts.
258 */ 264 */
259void init_cpu_timer(void) 265void init_cpu_timer(void)
260{ 266{
261 unsigned long cr0; 267 setup_jiffy_timer();
262 __u64 timer;
263 268
264 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; 269 /* Enable clock comparator timer interrupt. */
265 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; 270 __ctl_set_bit(0,11);
266 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; 271
267 asm volatile ("SCKC %0" : : "m" (timer)); 272 /* Always allow ETR external interrupts, even without an ETR. */
268 /* allow clock comparator timer interrupt */ 273 __ctl_set_bit(0, 4);
269 __ctl_store(cr0, 0, 0);
270 cr0 |= 0x800;
271 __ctl_load(cr0, 0, 0);
272} 274}
273 275
274extern void vtime_init(void); 276static void clock_comparator_interrupt(__u16 code)
277{
278 /* set clock comparator for next tick */
279 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
280}
281
282static void etr_reset(void);
283static void etr_init(void);
284static void etr_ext_handler(__u16);
285
286/*
287 * Get the TOD clock running.
288 */
289static u64 __init reset_tod_clock(void)
290{
291 u64 time;
292
293 etr_reset();
294 if (store_clock(&time) == 0)
295 return time;
296 /* TOD clock not running. Set the clock to Unix Epoch. */
297 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
298 panic("TOD clock not operational.");
299
300 return TOD_UNIX_EPOCH;
301}
275 302
276static cycle_t read_tod_clock(void) 303static cycle_t read_tod_clock(void)
277{ 304{
@@ -295,48 +322,31 @@ static struct clocksource clocksource_tod = {
295 */ 322 */
296void __init time_init(void) 323void __init time_init(void)
297{ 324{
298 __u64 set_time_cc; 325 init_timer_cc = reset_tod_clock();
299 int cc; 326 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
300
301 /* kick the TOD clock */
302 asm volatile(
303 " stck 0(%2)\n"
304 " ipm %0\n"
305 " srl %0,28"
306 : "=d" (cc), "=m" (init_timer_cc)
307 : "a" (&init_timer_cc) : "cc");
308 switch (cc) {
309 case 0: /* clock in set state: all is fine */
310 break;
311 case 1: /* clock in non-set state: FIXME */
312 printk("time_init: TOD clock in non-set state\n");
313 break;
314 case 2: /* clock in error state: FIXME */
315 printk("time_init: TOD clock in error state\n");
316 break;
317 case 3: /* clock in stopped or not-operational state: FIXME */
318 printk("time_init: TOD clock stopped/non-operational\n");
319 break;
320 }
321 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; 327 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
322 328
323 /* set xtime */ 329 /* set xtime */
324 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; 330 tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime);
325 set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
326 (0x3c26700LL*1000000*4096);
327 tod_to_timeval(set_time_cc, &xtime);
328 set_normalized_timespec(&wall_to_monotonic, 331 set_normalized_timespec(&wall_to_monotonic,
329 -xtime.tv_sec, -xtime.tv_nsec); 332 -xtime.tv_sec, -xtime.tv_nsec);
330 333
331 /* request the clock comparator external interrupt */ 334 /* request the clock comparator external interrupt */
332 if (register_early_external_interrupt(0x1004, NULL, 335 if (register_early_external_interrupt(0x1004,
336 clock_comparator_interrupt,
333 &ext_int_info_cc) != 0) 337 &ext_int_info_cc) != 0)
334 panic("Couldn't request external interrupt 0x1004"); 338 panic("Couldn't request external interrupt 0x1004");
335 339
336 if (clocksource_register(&clocksource_tod) != 0) 340 if (clocksource_register(&clocksource_tod) != 0)
337 panic("Could not register TOD clock source"); 341 panic("Could not register TOD clock source");
338 342
339 init_cpu_timer(); 343 /* request the etr external interrupt */
344 if (register_early_external_interrupt(0x1406, etr_ext_handler,
345 &ext_int_etr_cc) != 0)
346 panic("Couldn't request external interrupt 0x1406");
347
348 /* Enable TOD clock interrupts on the boot cpu. */
349 init_cpu_timer();
340 350
341#ifdef CONFIG_NO_IDLE_HZ 351#ifdef CONFIG_NO_IDLE_HZ
342 nohz_init(); 352 nohz_init();
@@ -345,5 +355,1048 @@ void __init time_init(void)
345#ifdef CONFIG_VIRT_TIMER 355#ifdef CONFIG_VIRT_TIMER
346 vtime_init(); 356 vtime_init();
347#endif 357#endif
358 etr_init();
359}
360
361/*
362 * External Time Reference (ETR) code.
363 */
364static int etr_port0_online;
365static int etr_port1_online;
366
367static int __init early_parse_etr(char *p)
368{
369 if (strncmp(p, "off", 3) == 0)
370 etr_port0_online = etr_port1_online = 0;
371 else if (strncmp(p, "port0", 5) == 0)
372 etr_port0_online = 1;
373 else if (strncmp(p, "port1", 5) == 0)
374 etr_port1_online = 1;
375 else if (strncmp(p, "on", 2) == 0)
376 etr_port0_online = etr_port1_online = 1;
377 return 0;
378}
379early_param("etr", early_parse_etr);
380
381enum etr_event {
382 ETR_EVENT_PORT0_CHANGE,
383 ETR_EVENT_PORT1_CHANGE,
384 ETR_EVENT_PORT_ALERT,
385 ETR_EVENT_SYNC_CHECK,
386 ETR_EVENT_SWITCH_LOCAL,
387 ETR_EVENT_UPDATE,
388};
389
390enum etr_flags {
391 ETR_FLAG_ENOSYS,
392 ETR_FLAG_EACCES,
393 ETR_FLAG_STEAI,
394};
395
396/*
397 * Valid bit combinations of the eacr register are (x = don't care):
398 * e0 e1 dp p0 p1 ea es sl
399 * 0 0 x 0 0 0 0 0 initial, disabled state
400 * 0 0 x 0 1 1 0 0 port 1 online
401 * 0 0 x 1 0 1 0 0 port 0 online
402 * 0 0 x 1 1 1 0 0 both ports online
403 * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
404 * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
405 * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
406 * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
407 * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
408 * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
409 * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
410 * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
411 * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
412 * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
413 * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
414 * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
415 * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
416 * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
417 * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
418 * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
419 */
420static struct etr_eacr etr_eacr;
421static u64 etr_tolec; /* time of last eacr update */
422static unsigned long etr_flags;
423static struct etr_aib etr_port0;
424static int etr_port0_uptodate;
425static struct etr_aib etr_port1;
426static int etr_port1_uptodate;
427static unsigned long etr_events;
428static struct timer_list etr_timer;
429static struct tasklet_struct etr_tasklet;
430static DEFINE_PER_CPU(atomic_t, etr_sync_word);
431
432static void etr_timeout(unsigned long dummy);
433static void etr_tasklet_fn(unsigned long dummy);
434
435/*
436 * The etr get_clock function. It will write the current clock value
437 * to the clock pointer and return 0 if the clock is in sync with the
438 * external time source. If the clock mode is local it will return
439 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
440 * reference. This function is what ETR is all about..
441 */
442int get_sync_clock(unsigned long long *clock)
443{
444 atomic_t *sw_ptr;
445 unsigned int sw0, sw1;
446
447 sw_ptr = &get_cpu_var(etr_sync_word);
448 sw0 = atomic_read(sw_ptr);
449 *clock = get_clock();
450 sw1 = atomic_read(sw_ptr);
451 put_cpu_var(etr_sync_sync);
452 if (sw0 == sw1 && (sw0 & 0x80000000U))
453 /* Success: time is in sync. */
454 return 0;
455 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
456 return -ENOSYS;
457 if (test_bit(ETR_FLAG_EACCES, &etr_flags))
458 return -EACCES;
459 return -EAGAIN;
460}
461EXPORT_SYMBOL(get_sync_clock);
462
463/*
464 * Make get_sync_clock return -EAGAIN.
465 */
466static void etr_disable_sync_clock(void *dummy)
467{
468 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
469 /*
470 * Clear the in-sync bit 2^31. All get_sync_clock calls will
471 * fail until the sync bit is turned back on. In addition
472 * increase the "sequence" counter to avoid the race of an
473 * etr event and the complete recovery against get_sync_clock.
474 */
475 atomic_clear_mask(0x80000000, sw_ptr);
476 atomic_inc(sw_ptr);
477}
478
479/*
480 * Make get_sync_clock return 0 again.
481 * Needs to be called from a context disabled for preemption.
482 */
483static void etr_enable_sync_clock(void)
484{
485 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
486 atomic_set_mask(0x80000000, sw_ptr);
487}
488
489/*
490 * Reset ETR attachment.
491 */
492static void etr_reset(void)
493{
494 etr_eacr = (struct etr_eacr) {
495 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
496 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
497 .es = 0, .sl = 0 };
498 if (etr_setr(&etr_eacr) == 0)
499 etr_tolec = get_clock();
500 else {
501 set_bit(ETR_FLAG_ENOSYS, &etr_flags);
502 if (etr_port0_online || etr_port1_online) {
503 printk(KERN_WARNING "Running on non ETR capable "
504 "machine, only local mode available.\n");
505 etr_port0_online = etr_port1_online = 0;
506 }
507 }
508}
509
510static void etr_init(void)
511{
512 struct etr_aib aib;
513
514 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
515 return;
516 /* Check if this machine has the steai instruction. */
517 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
518 set_bit(ETR_FLAG_STEAI, &etr_flags);
519 setup_timer(&etr_timer, etr_timeout, 0UL);
520 tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
521 if (!etr_port0_online && !etr_port1_online)
522 set_bit(ETR_FLAG_EACCES, &etr_flags);
523 if (etr_port0_online) {
524 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
525 tasklet_hi_schedule(&etr_tasklet);
526 }
527 if (etr_port1_online) {
528 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
529 tasklet_hi_schedule(&etr_tasklet);
530 }
531}
532
533/*
534 * Two sorts of ETR machine checks. The architecture reads:
535 * "When a machine-check niterruption occurs and if a switch-to-local or
536 * ETR-sync-check interrupt request is pending but disabled, this pending
537 * disabled interruption request is indicated and is cleared".
538 * Which means that we can get etr_switch_to_local events from the machine
539 * check handler although the interruption condition is disabled. Lovely..
540 */
541
542/*
543 * Switch to local machine check. This is called when the last usable
544 * ETR port goes inactive. After switch to local the clock is not in sync.
545 */
546void etr_switch_to_local(void)
547{
548 if (!etr_eacr.sl)
549 return;
550 etr_disable_sync_clock(NULL);
551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
552 tasklet_hi_schedule(&etr_tasklet);
553}
554
555/*
556 * ETR sync check machine check. This is called when the ETR OTE and the
557 * local clock OTE are farther apart than the ETR sync check tolerance.
558 * After a ETR sync check the clock is not in sync. The machine check
559 * is broadcasted to all cpus at the same time.
560 */
561void etr_sync_check(void)
562{
563 if (!etr_eacr.es)
564 return;
565 etr_disable_sync_clock(NULL);
566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
567 tasklet_hi_schedule(&etr_tasklet);
568}
569
570/*
571 * ETR external interrupt. There are two causes:
572 * 1) port state change, check the usability of the port
573 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
574 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
575 * or ETR-data word 4 (edf4) has changed.
576 */
577static void etr_ext_handler(__u16 code)
578{
579 struct etr_interruption_parameter *intparm =
580 (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
581
582 if (intparm->pc0)
583 /* ETR port 0 state change. */
584 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
585 if (intparm->pc1)
586 /* ETR port 1 state change. */
587 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
588 if (intparm->eai)
589 /*
590 * ETR port alert on either port 0, 1 or both.
591 * Both ports are not up-to-date now.
592 */
593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
594 tasklet_hi_schedule(&etr_tasklet);
595}
596
597static void etr_timeout(unsigned long dummy)
598{
599 set_bit(ETR_EVENT_UPDATE, &etr_events);
600 tasklet_hi_schedule(&etr_tasklet);
601}
602
603/*
604 * Check if the etr mode is pss.
605 */
606static inline int etr_mode_is_pps(struct etr_eacr eacr)
607{
608 return eacr.es && !eacr.sl;
609}
610
611/*
612 * Check if the etr mode is etr.
613 */
614static inline int etr_mode_is_etr(struct etr_eacr eacr)
615{
616 return eacr.es && eacr.sl;
617}
618
619/*
620 * Check if the port can be used for TOD synchronization.
621 * For PPS mode the port has to receive OTEs. For ETR mode
622 * the port has to receive OTEs, the ETR stepping bit has to
623 * be zero and the validity bits for data frame 1, 2, and 3
624 * have to be 1.
625 */
626static int etr_port_valid(struct etr_aib *aib, int port)
627{
628 unsigned int psc;
629
630 /* Check that this port is receiving OTEs. */
631 if (aib->tsp == 0)
632 return 0;
633
634 psc = port ? aib->esw.psc1 : aib->esw.psc0;
635 if (psc == etr_lpsc_pps_mode)
636 return 1;
637 if (psc == etr_lpsc_operational_step)
638 return !aib->esw.y && aib->slsw.v1 &&
639 aib->slsw.v2 && aib->slsw.v3;
640 return 0;
641}
642
643/*
644 * Check if two ports are on the same network.
645 */
646static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
647{
648 // FIXME: any other fields we have to compare?
649 return aib1->edf1.net_id == aib2->edf1.net_id;
650}
651
652/*
653 * Wrapper for etr_stei that converts physical port states
654 * to logical port states to be consistent with the output
655 * of stetr (see etr_psc vs. etr_lpsc).
656 */
657static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
658{
659 BUG_ON(etr_steai(aib, func) != 0);
660 /* Convert port state to logical port state. */
661 if (aib->esw.psc0 == 1)
662 aib->esw.psc0 = 2;
663 else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
664 aib->esw.psc0 = 1;
665 if (aib->esw.psc1 == 1)
666 aib->esw.psc1 = 2;
667 else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
668 aib->esw.psc1 = 1;
669}
670
671/*
672 * Check if the aib a2 is still connected to the same attachment as
673 * aib a1, the etv values differ by one and a2 is valid.
674 */
675static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
676{
677 int state_a1, state_a2;
678
679 /* Paranoia check: e0/e1 should better be the same. */
680 if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
681 a1->esw.eacr.e1 != a2->esw.eacr.e1)
682 return 0;
683
684 /* Still connected to the same etr ? */
685 state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
686 state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
687 if (state_a1 == etr_lpsc_operational_step) {
688 if (state_a2 != etr_lpsc_operational_step ||
689 a1->edf1.net_id != a2->edf1.net_id ||
690 a1->edf1.etr_id != a2->edf1.etr_id ||
691 a1->edf1.etr_pn != a2->edf1.etr_pn)
692 return 0;
693 } else if (state_a2 != etr_lpsc_pps_mode)
694 return 0;
695
696 /* The ETV value of a2 needs to be ETV of a1 + 1. */
697 if (a1->edf2.etv + 1 != a2->edf2.etv)
698 return 0;
699
700 if (!etr_port_valid(a2, p))
701 return 0;
702
703 return 1;
704}
705
706/*
707 * The time is "clock". xtime is what we think the time is.
708 * Adjust the value by a multiple of jiffies and add the delta to ntp.
709 * "delay" is an approximation how long the synchronization took. If
710 * the time correction is positive, then "delay" is subtracted from
711 * the time difference and only the remaining part is passed to ntp.
712 */
713static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
714{
715 unsigned long long delta, ticks;
716 struct timex adjust;
717
718 /*
719 * We don't have to take the xtime lock because the cpu
720 * executing etr_adjust_time is running disabled in
721 * tasklet context and all other cpus are looping in
722 * etr_sync_cpu_start.
723 */
724 if (clock > xtime_cc) {
725 /* It is later than we thought. */
726 delta = ticks = clock - xtime_cc;
727 delta = ticks = (delta < delay) ? 0 : delta - delay;
728 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
729 init_timer_cc = init_timer_cc + delta;
730 jiffies_timer_cc = jiffies_timer_cc + delta;
731 xtime_cc = xtime_cc + delta;
732 adjust.offset = ticks * (1000000 / HZ);
733 } else {
734 /* It is earlier than we thought. */
735 delta = ticks = xtime_cc - clock;
736 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
737 init_timer_cc = init_timer_cc - delta;
738 jiffies_timer_cc = jiffies_timer_cc - delta;
739 xtime_cc = xtime_cc - delta;
740 adjust.offset = -ticks * (1000000 / HZ);
741 }
742 if (adjust.offset != 0) {
743 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
744 adjust.offset);
745 adjust.modes = ADJ_OFFSET_SINGLESHOT;
746 do_adjtimex(&adjust);
747 }
748}
749
750static void etr_sync_cpu_start(void *dummy)
751{
752 int *in_sync = dummy;
753
754 etr_enable_sync_clock();
755 /*
756 * This looks like a busy wait loop but it isn't. etr_sync_cpus
757 * is called on all other cpus while the TOD clocks is stopped.
758 * __udelay will stop the cpu on an enabled wait psw until the
759 * TOD is running again.
760 */
761 while (*in_sync == 0)
762 __udelay(1);
763 if (*in_sync != 1)
764 /* Didn't work. Clear per-cpu in sync bit again. */
765 etr_disable_sync_clock(NULL);
766 /*
767 * This round of TOD syncing is done. Set the clock comparator
768 * to the next tick and let the processor continue.
769 */
770 setup_jiffy_timer();
771}
772
773static void etr_sync_cpu_end(void *dummy)
774{
775}
776
777/*
778 * Sync the TOD clock using the port refered to by aibp. This port
779 * has to be enabled and the other port has to be disabled. The
780 * last eacr update has to be more than 1.6 seconds in the past.
781 */
782static int etr_sync_clock(struct etr_aib *aib, int port)
783{
784 struct etr_aib *sync_port;
785 unsigned long long clock, delay;
786 int in_sync, follows;
787 int rc;
788
789 /* Check if the current aib is adjacent to the sync port aib. */
790 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
791 follows = etr_aib_follows(sync_port, aib, port);
792 memcpy(sync_port, aib, sizeof(*aib));
793 if (!follows)
794 return -EAGAIN;
795
796 /*
797 * Catch all other cpus and make them wait until we have
798 * successfully synced the clock. smp_call_function will
799 * return after all other cpus are in etr_sync_cpu_start.
800 */
801 in_sync = 0;
802 preempt_disable();
803 smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
804 local_irq_disable();
805 etr_enable_sync_clock();
806
807 /* Set clock to next OTE. */
808 __ctl_set_bit(14, 21);
809 __ctl_set_bit(0, 29);
810 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
811 if (set_clock(clock) == 0) {
812 __udelay(1); /* Wait for the clock to start. */
813 __ctl_clear_bit(0, 29);
814 __ctl_clear_bit(14, 21);
815 etr_stetr(aib);
816 /* Adjust Linux timing variables. */
817 delay = (unsigned long long)
818 (aib->edf2.etv - sync_port->edf2.etv) << 32;
819 etr_adjust_time(clock, delay);
820 setup_jiffy_timer();
821 /* Verify that the clock is properly set. */
822 if (!etr_aib_follows(sync_port, aib, port)) {
823 /* Didn't work. */
824 etr_disable_sync_clock(NULL);
825 in_sync = -EAGAIN;
826 rc = -EAGAIN;
827 } else {
828 in_sync = 1;
829 rc = 0;
830 }
831 } else {
832 /* Could not set the clock ?!? */
833 __ctl_clear_bit(0, 29);
834 __ctl_clear_bit(14, 21);
835 etr_disable_sync_clock(NULL);
836 in_sync = -EAGAIN;
837 rc = -EAGAIN;
838 }
839 local_irq_enable();
840 smp_call_function(etr_sync_cpu_end,NULL,0,0);
841 preempt_enable();
842 return rc;
843}
844
845/*
846 * Handle the immediate effects of the different events.
847 * The port change event is used for online/offline changes.
848 */
849static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
850{
851 if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
852 eacr.es = 0;
853 if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
854 eacr.es = eacr.sl = 0;
855 if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
856 etr_port0_uptodate = etr_port1_uptodate = 0;
857
858 if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
859 if (eacr.e0)
860 /*
861 * Port change of an enabled port. We have to
862 * assume that this can have caused an stepping
863 * port switch.
864 */
865 etr_tolec = get_clock();
866 eacr.p0 = etr_port0_online;
867 if (!eacr.p0)
868 eacr.e0 = 0;
869 etr_port0_uptodate = 0;
870 }
871 if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
872 if (eacr.e1)
873 /*
874 * Port change of an enabled port. We have to
875 * assume that this can have caused an stepping
876 * port switch.
877 */
878 etr_tolec = get_clock();
879 eacr.p1 = etr_port1_online;
880 if (!eacr.p1)
881 eacr.e1 = 0;
882 etr_port1_uptodate = 0;
883 }
884 clear_bit(ETR_EVENT_UPDATE, &etr_events);
885 return eacr;
886}
887
888/*
889 * Set up a timer that expires after the etr_tolec + 1.6 seconds if
890 * one of the ports needs an update.
891 */
892static void etr_set_tolec_timeout(unsigned long long now)
893{
894 unsigned long micros;
895
896 if ((!etr_eacr.p0 || etr_port0_uptodate) &&
897 (!etr_eacr.p1 || etr_port1_uptodate))
898 return;
899 micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
900 micros = (micros > 1600000) ? 0 : 1600000 - micros;
901 mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
902}
903
904/*
905 * Set up a time that expires after 1/2 second.
906 */
907static void etr_set_sync_timeout(void)
908{
909 mod_timer(&etr_timer, jiffies + HZ/2);
910}
911
912/*
913 * Update the aib information for one or both ports.
914 */
915static struct etr_eacr etr_handle_update(struct etr_aib *aib,
916 struct etr_eacr eacr)
917{
918 /* With both ports disabled the aib information is useless. */
919 if (!eacr.e0 && !eacr.e1)
920 return eacr;
921
922 /* Update port0 or port1 with aib stored in etr_tasklet_fn. */
923 if (aib->esw.q == 0) {
924 /* Information for port 0 stored. */
925 if (eacr.p0 && !etr_port0_uptodate) {
926 etr_port0 = *aib;
927 if (etr_port0_online)
928 etr_port0_uptodate = 1;
929 }
930 } else {
931 /* Information for port 1 stored. */
932 if (eacr.p1 && !etr_port1_uptodate) {
933 etr_port1 = *aib;
934 if (etr_port0_online)
935 etr_port1_uptodate = 1;
936 }
937 }
938
939 /*
940 * Do not try to get the alternate port aib if the clock
941 * is not in sync yet.
942 */
943 if (!eacr.es)
944 return eacr;
945
946 /*
947 * If steai is available we can get the information about
948 * the other port immediately. If only stetr is available the
949 * data-port bit toggle has to be used.
950 */
951 if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
952 if (eacr.p0 && !etr_port0_uptodate) {
953 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
954 etr_port0_uptodate = 1;
955 }
956 if (eacr.p1 && !etr_port1_uptodate) {
957 etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
958 etr_port1_uptodate = 1;
959 }
960 } else {
961 /*
962 * One port was updated above, if the other
963 * port is not uptodate toggle dp bit.
964 */
965 if ((eacr.p0 && !etr_port0_uptodate) ||
966 (eacr.p1 && !etr_port1_uptodate))
967 eacr.dp ^= 1;
968 else
969 eacr.dp = 0;
970 }
971 return eacr;
972}
973
974/*
975 * Write new etr control register if it differs from the current one.
976 * Return 1 if etr_tolec has been updated as well.
977 */
978static void etr_update_eacr(struct etr_eacr eacr)
979{
980 int dp_changed;
981
982 if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
983 /* No change, return. */
984 return;
985 /*
986 * The disable of an active port of the change of the data port
987 * bit can/will cause a change in the data port.
988 */
989 dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
990 (etr_eacr.dp ^ eacr.dp) != 0;
991 etr_eacr = eacr;
992 etr_setr(&etr_eacr);
993 if (dp_changed)
994 etr_tolec = get_clock();
995}
996
997/*
998 * ETR tasklet. In this function you'll find the main logic. In
999 * particular this is the only function that calls etr_update_eacr(),
1000 * it "controls" the etr control register.
1001 */
1002static void etr_tasklet_fn(unsigned long dummy)
1003{
1004 unsigned long long now;
1005 struct etr_eacr eacr;
1006 struct etr_aib aib;
1007 int sync_port;
1008
1009 /* Create working copy of etr_eacr. */
1010 eacr = etr_eacr;
1011
1012 /* Check for the different events and their immediate effects. */
1013 eacr = etr_handle_events(eacr);
1014
1015 /* Check if ETR is supposed to be active. */
1016 eacr.ea = eacr.p0 || eacr.p1;
1017 if (!eacr.ea) {
1018 /* Both ports offline. Reset everything. */
1019 eacr.dp = eacr.es = eacr.sl = 0;
1020 on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
1021 del_timer_sync(&etr_timer);
1022 etr_update_eacr(eacr);
1023 set_bit(ETR_FLAG_EACCES, &etr_flags);
1024 return;
1025 }
1026
1027 /* Store aib to get the current ETR status word. */
1028 BUG_ON(etr_stetr(&aib) != 0);
1029 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
1030 now = get_clock();
1031
1032 /*
1033 * Update the port information if the last stepping port change
1034 * or data port change is older than 1.6 seconds.
1035 */
1036 if (now >= etr_tolec + (1600000 << 12))
1037 eacr = etr_handle_update(&aib, eacr);
1038
1039 /*
1040 * Select ports to enable. The prefered synchronization mode is PPS.
1041 * If a port can be enabled depends on a number of things:
1042 * 1) The port needs to be online and uptodate. A port is not
1043 * disabled just because it is not uptodate, but it is only
1044 * enabled if it is uptodate.
1045 * 2) The port needs to have the same mode (pps / etr).
1046 * 3) The port needs to be usable -> etr_port_valid() == 1
1047 * 4) To enable the second port the clock needs to be in sync.
1048 * 5) If both ports are useable and are ETR ports, the network id
1049 * has to be the same.
1050 * The eacr.sl bit is used to indicate etr mode vs. pps mode.
1051 */
1052 if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
1053 eacr.sl = 0;
1054 eacr.e0 = 1;
1055 if (!etr_mode_is_pps(etr_eacr))
1056 eacr.es = 0;
1057 if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
1058 eacr.e1 = 0;
1059 // FIXME: uptodate checks ?
1060 else if (etr_port0_uptodate && etr_port1_uptodate)
1061 eacr.e1 = 1;
1062 sync_port = (etr_port0_uptodate &&
1063 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1064 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1065 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
1066 eacr.sl = 0;
1067 eacr.e0 = 0;
1068 eacr.e1 = 1;
1069 if (!etr_mode_is_pps(etr_eacr))
1070 eacr.es = 0;
1071 sync_port = (etr_port1_uptodate &&
1072 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1073 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1074 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
1075 eacr.sl = 1;
1076 eacr.e0 = 1;
1077 if (!etr_mode_is_etr(etr_eacr))
1078 eacr.es = 0;
1079 if (!eacr.es || !eacr.p1 ||
1080 aib.esw.psc1 != etr_lpsc_operational_alt)
1081 eacr.e1 = 0;
1082 else if (etr_port0_uptodate && etr_port1_uptodate &&
1083 etr_compare_network(&etr_port0, &etr_port1))
1084 eacr.e1 = 1;
1085 sync_port = (etr_port0_uptodate &&
1086 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1087 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1088 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
1089 eacr.sl = 1;
1090 eacr.e0 = 0;
1091 eacr.e1 = 1;
1092 if (!etr_mode_is_etr(etr_eacr))
1093 eacr.es = 0;
1094 sync_port = (etr_port1_uptodate &&
1095 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1096 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1097 } else {
1098 /* Both ports not usable. */
1099 eacr.es = eacr.sl = 0;
1100 sync_port = -1;
1101 set_bit(ETR_FLAG_EACCES, &etr_flags);
1102 }
1103
1104 /*
1105 * If the clock is in sync just update the eacr and return.
1106 * If there is no valid sync port wait for a port update.
1107 */
1108 if (eacr.es || sync_port < 0) {
1109 etr_update_eacr(eacr);
1110 etr_set_tolec_timeout(now);
1111 return;
1112 }
1113
1114 /*
1115 * Prepare control register for clock syncing
1116 * (reset data port bit, set sync check control.
1117 */
1118 eacr.dp = 0;
1119 eacr.es = 1;
1120
1121 /*
1122 * Update eacr and try to synchronize the clock. If the update
1123 * of eacr caused a stepping port switch (or if we have to
1124 * assume that a stepping port switch has occured) or the
1125 * clock syncing failed, reset the sync check control bit
1126 * and set up a timer to try again after 0.5 seconds
1127 */
1128 etr_update_eacr(eacr);
1129 if (now < etr_tolec + (1600000 << 12) ||
1130 etr_sync_clock(&aib, sync_port) != 0) {
1131 /* Sync failed. Try again in 1/2 second. */
1132 eacr.es = 0;
1133 etr_update_eacr(eacr);
1134 etr_set_sync_timeout();
1135 } else
1136 etr_set_tolec_timeout(now);
1137}
1138
1139/*
1140 * Sysfs interface functions
1141 */
1142static struct sysdev_class etr_sysclass = {
1143 set_kset_name("etr")
1144};
1145
1146static struct sys_device etr_port0_dev = {
1147 .id = 0,
1148 .cls = &etr_sysclass,
1149};
1150
1151static struct sys_device etr_port1_dev = {
1152 .id = 1,
1153 .cls = &etr_sysclass,
1154};
1155
1156/*
1157 * ETR class attributes
1158 */
1159static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf)
1160{
1161 return sprintf(buf, "%i\n", etr_port0.esw.p);
1162}
1163
1164static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1165
1166static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf)
1167{
1168 char *mode_str;
1169
1170 if (etr_mode_is_pps(etr_eacr))
1171 mode_str = "pps";
1172 else if (etr_mode_is_etr(etr_eacr))
1173 mode_str = "etr";
1174 else
1175 mode_str = "local";
1176 return sprintf(buf, "%s\n", mode_str);
1177}
1178
1179static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
1180
1181/*
1182 * ETR port attributes
1183 */
1184static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1185{
1186 if (dev == &etr_port0_dev)
1187 return etr_port0_online ? &etr_port0 : NULL;
1188 else
1189 return etr_port1_online ? &etr_port1 : NULL;
1190}
1191
1192static ssize_t etr_online_show(struct sys_device *dev, char *buf)
1193{
1194 unsigned int online;
1195
1196 online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
1197 return sprintf(buf, "%i\n", online);
1198}
1199
1200static ssize_t etr_online_store(struct sys_device *dev,
1201 const char *buf, size_t count)
1202{
1203 unsigned int value;
1204
1205 value = simple_strtoul(buf, NULL, 0);
1206 if (value != 0 && value != 1)
1207 return -EINVAL;
1208 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
1209 return -ENOSYS;
1210 if (dev == &etr_port0_dev) {
1211 if (etr_port0_online == value)
1212 return count; /* Nothing to do. */
1213 etr_port0_online = value;
1214 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1215 tasklet_hi_schedule(&etr_tasklet);
1216 } else {
1217 if (etr_port1_online == value)
1218 return count; /* Nothing to do. */
1219 etr_port1_online = value;
1220 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1221 tasklet_hi_schedule(&etr_tasklet);
1222 }
1223 return count;
1224}
1225
1226static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1227
1228static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf)
1229{
1230 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1231 etr_eacr.e0 : etr_eacr.e1);
1232}
1233
1234static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1235
1236static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf)
1237{
1238 if (!etr_port0_online && !etr_port1_online)
1239 /* Status word is not uptodate if both ports are offline. */
1240 return -ENODATA;
1241 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1242 etr_port0.esw.psc0 : etr_port0.esw.psc1);
1243}
1244
1245static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1246
1247static ssize_t etr_untuned_show(struct sys_device *dev, char *buf)
1248{
1249 struct etr_aib *aib = etr_aib_from_dev(dev);
1250
1251 if (!aib || !aib->slsw.v1)
1252 return -ENODATA;
1253 return sprintf(buf, "%i\n", aib->edf1.u);
1254}
1255
1256static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1257
1258static ssize_t etr_network_id_show(struct sys_device *dev, char *buf)
1259{
1260 struct etr_aib *aib = etr_aib_from_dev(dev);
1261
1262 if (!aib || !aib->slsw.v1)
1263 return -ENODATA;
1264 return sprintf(buf, "%i\n", aib->edf1.net_id);
1265}
1266
1267static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1268
1269static ssize_t etr_id_show(struct sys_device *dev, char *buf)
1270{
1271 struct etr_aib *aib = etr_aib_from_dev(dev);
1272
1273 if (!aib || !aib->slsw.v1)
1274 return -ENODATA;
1275 return sprintf(buf, "%i\n", aib->edf1.etr_id);
1276}
1277
1278static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1279
1280static ssize_t etr_port_number_show(struct sys_device *dev, char *buf)
1281{
1282 struct etr_aib *aib = etr_aib_from_dev(dev);
1283
1284 if (!aib || !aib->slsw.v1)
1285 return -ENODATA;
1286 return sprintf(buf, "%i\n", aib->edf1.etr_pn);
1287}
1288
1289static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1290
1291static ssize_t etr_coupled_show(struct sys_device *dev, char *buf)
1292{
1293 struct etr_aib *aib = etr_aib_from_dev(dev);
1294
1295 if (!aib || !aib->slsw.v3)
1296 return -ENODATA;
1297 return sprintf(buf, "%i\n", aib->edf3.c);
1298}
1299
1300static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1301
1302static ssize_t etr_local_time_show(struct sys_device *dev, char *buf)
1303{
1304 struct etr_aib *aib = etr_aib_from_dev(dev);
1305
1306 if (!aib || !aib->slsw.v3)
1307 return -ENODATA;
1308 return sprintf(buf, "%i\n", aib->edf3.blto);
1309}
1310
1311static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1312
1313static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf)
1314{
1315 struct etr_aib *aib = etr_aib_from_dev(dev);
1316
1317 if (!aib || !aib->slsw.v3)
1318 return -ENODATA;
1319 return sprintf(buf, "%i\n", aib->edf3.buo);
1320}
1321
1322static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
1323
1324static struct sysdev_attribute *etr_port_attributes[] = {
1325 &attr_online,
1326 &attr_stepping_control,
1327 &attr_state_code,
1328 &attr_untuned,
1329 &attr_network,
1330 &attr_id,
1331 &attr_port,
1332 &attr_coupled,
1333 &attr_local_time,
1334 &attr_utc_offset,
1335 NULL
1336};
1337
1338static int __init etr_register_port(struct sys_device *dev)
1339{
1340 struct sysdev_attribute **attr;
1341 int rc;
1342
1343 rc = sysdev_register(dev);
1344 if (rc)
1345 goto out;
1346 for (attr = etr_port_attributes; *attr; attr++) {
1347 rc = sysdev_create_file(dev, *attr);
1348 if (rc)
1349 goto out_unreg;
1350 }
1351 return 0;
1352out_unreg:
1353 for (; attr >= etr_port_attributes; attr--)
1354 sysdev_remove_file(dev, *attr);
1355 sysdev_unregister(dev);
1356out:
1357 return rc;
1358}
1359
1360static void __init etr_unregister_port(struct sys_device *dev)
1361{
1362 struct sysdev_attribute **attr;
1363
1364 for (attr = etr_port_attributes; *attr; attr++)
1365 sysdev_remove_file(dev, *attr);
1366 sysdev_unregister(dev);
1367}
1368
1369static int __init etr_init_sysfs(void)
1370{
1371 int rc;
1372
1373 rc = sysdev_class_register(&etr_sysclass);
1374 if (rc)
1375 goto out;
1376 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
1377 if (rc)
1378 goto out_unreg_class;
1379 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
1380 if (rc)
1381 goto out_remove_stepping_port;
1382 rc = etr_register_port(&etr_port0_dev);
1383 if (rc)
1384 goto out_remove_stepping_mode;
1385 rc = etr_register_port(&etr_port1_dev);
1386 if (rc)
1387 goto out_remove_port0;
1388 return 0;
1389
1390out_remove_port0:
1391 etr_unregister_port(&etr_port0_dev);
1392out_remove_stepping_mode:
1393 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
1394out_remove_stepping_port:
1395 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
1396out_unreg_class:
1397 sysdev_class_unregister(&etr_sysclass);
1398out:
1399 return rc;
348} 1400}
349 1401
1402device_initcall(etr_init_sysfs);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3cbb0dcf1f1d..f0e5a320e2ec 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -283,7 +283,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
283 return buffer; 283 return buffer;
284} 284}
285 285
286DEFINE_SPINLOCK(die_lock); 286static DEFINE_SPINLOCK(die_lock);
287 287
288void die(const char * str, struct pt_regs * regs, long err) 288void die(const char * str, struct pt_regs * regs, long err)
289{ 289{
@@ -364,8 +364,7 @@ void __kprobes do_single_step(struct pt_regs *regs)
364 force_sig(SIGTRAP, current); 364 force_sig(SIGTRAP, current);
365} 365}
366 366
367asmlinkage void 367static void default_trap_handler(struct pt_regs * regs, long interruption_code)
368default_trap_handler(struct pt_regs * regs, long interruption_code)
369{ 368{
370 if (regs->psw.mask & PSW_MASK_PSTATE) { 369 if (regs->psw.mask & PSW_MASK_PSTATE) {
371 local_irq_enable(); 370 local_irq_enable();
@@ -376,7 +375,7 @@ default_trap_handler(struct pt_regs * regs, long interruption_code)
376} 375}
377 376
378#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 377#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
379asmlinkage void name(struct pt_regs * regs, long interruption_code) \ 378static void name(struct pt_regs * regs, long interruption_code) \
380{ \ 379{ \
381 siginfo_t info; \ 380 siginfo_t info; \
382 info.si_signo = signr; \ 381 info.si_signo = signr; \
@@ -442,7 +441,7 @@ do_fp_trap(struct pt_regs *regs, void __user *location,
442 "floating point exception", regs, &si); 441 "floating point exception", regs, &si);
443} 442}
444 443
445asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) 444static void illegal_op(struct pt_regs * regs, long interruption_code)
446{ 445{
447 siginfo_t info; 446 siginfo_t info;
448 __u8 opcode[6]; 447 __u8 opcode[6];
@@ -491,8 +490,15 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
491#endif 490#endif
492 } else 491 } else
493 signal = SIGILL; 492 signal = SIGILL;
494 } else 493 } else {
495 signal = SIGILL; 494 /*
495 * If we get an illegal op in kernel mode, send it through the
496 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
497 */
498 if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
499 3, SIGTRAP) != NOTIFY_STOP)
500 signal = SIGILL;
501 }
496 502
497#ifdef CONFIG_MATHEMU 503#ifdef CONFIG_MATHEMU
498 if (signal == SIGFPE) 504 if (signal == SIGFPE)
@@ -585,7 +591,7 @@ DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
585 ILL_ILLOPN, get_check_address(regs)); 591 ILL_ILLOPN, get_check_address(regs));
586#endif 592#endif
587 593
588asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) 594static void data_exception(struct pt_regs * regs, long interruption_code)
589{ 595{
590 __u16 __user *location; 596 __u16 __user *location;
591 int signal = 0; 597 int signal = 0;
@@ -675,7 +681,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
675 } 681 }
676} 682}
677 683
678asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) 684static void space_switch_exception(struct pt_regs * regs, long int_code)
679{ 685{
680 siginfo_t info; 686 siginfo_t info;
681 687
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index fe0f2e97ba7b..a48907392522 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,18 +31,19 @@ SECTIONS
31 31
32 _etext = .; /* End of text section */ 32 _etext = .; /* End of text section */
33 33
34 . = ALIGN(16); /* Exception table */
35 __start___ex_table = .;
36 __ex_table : { *(__ex_table) }
37 __stop___ex_table = .;
38
39 RODATA 34 RODATA
40 35
41#ifdef CONFIG_SHARED_KERNEL 36#ifdef CONFIG_SHARED_KERNEL
42 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ 37 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */
38#endif
43 39
40 . = ALIGN(4096);
44 _eshared = .; /* End of shareable data */ 41 _eshared = .; /* End of shareable data */
45#endif 42
43 . = ALIGN(16); /* Exception table */
44 __start___ex_table = .;
45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .;
46 47
47 .data : { /* Data */ 48 .data : { /* Data */
48 *(.data) 49 *(.data)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 21baaf5496d6..9d5b02801b46 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@
25#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
26 26
27static ext_int_info_t ext_int_info_timer; 27static ext_int_info_t ext_int_info_timer;
28DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 28static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
29 29
30#ifdef CONFIG_VIRT_CPU_ACCOUNTING 30#ifdef CONFIG_VIRT_CPU_ACCOUNTING
31/* 31/*
@@ -524,16 +524,15 @@ EXPORT_SYMBOL(del_virt_timer);
524void init_cpu_vtimer(void) 524void init_cpu_vtimer(void)
525{ 525{
526 struct vtimer_queue *vt_list; 526 struct vtimer_queue *vt_list;
527 unsigned long cr0;
528 527
529 /* kick the virtual timer */ 528 /* kick the virtual timer */
530 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 529 S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
531 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 530 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
532 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 531 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
533 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 532 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
534 __ctl_store(cr0, 0, 0); 533
535 cr0 |= 0x400; 534 /* enable cpu timer interrupts */
536 __ctl_load(cr0, 0, 0); 535 __ctl_set_bit(0,10);
537 536
538 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 537 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
539 INIT_LIST_HEAD(&vt_list->list); 538 INIT_LIST_HEAD(&vt_list->list);
@@ -572,6 +571,7 @@ void __init vtime_init(void)
572 if (register_idle_notifier(&vtimer_idle_nb)) 571 if (register_idle_notifier(&vtimer_idle_nb))
573 panic("Couldn't register idle notifier"); 572 panic("Couldn't register idle notifier");
574 573
574 /* Enable cpu timer interrupts on the boot cpu. */
575 init_cpu_vtimer(); 575 init_cpu_vtimer();
576} 576}
577 577
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b5f94cf3bde8..7a44fed21b35 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o
8lib-$(CONFIG_32BIT) += div64.o 8lib-$(CONFIG_32BIT) += div64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
10lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 027c4742a001..02854449b74b 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/delay.c 2 * arch/s390/lib/delay.c
3 * Precise Delay Loops for S390 3 * Precise Delay Loops for S390
4 * 4 *
5 * S390 version 5 * S390 version
@@ -13,10 +13,8 @@
13 13
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/timex.h>
17#ifdef CONFIG_SMP 17#include <linux/irqflags.h>
18#include <asm/smp.h>
19#endif
20 18
21void __delay(unsigned long loops) 19void __delay(unsigned long loops)
22{ 20{
@@ -31,17 +29,39 @@ void __delay(unsigned long loops)
31} 29}
32 30
33/* 31/*
34 * Waits for 'usecs' microseconds using the tod clock, giving up the time slice 32 * Waits for 'usecs' microseconds using the TOD clock comparator.
35 * of the virtual PU inbetween to avoid congestion.
36 */ 33 */
37void __udelay(unsigned long usecs) 34void __udelay(unsigned long usecs)
38{ 35{
39 uint64_t start_cc; 36 u64 end, time, jiffy_timer = 0;
37 unsigned long flags, cr0, mask, dummy;
38
39 local_irq_save(flags);
40 if (raw_irqs_disabled_flags(flags)) {
41 jiffy_timer = S390_lowcore.jiffy_timer;
42 S390_lowcore.jiffy_timer = -1ULL - (4096 << 12);
43 __ctl_store(cr0, 0, 0);
44 dummy = (cr0 & 0xffff00e0) | 0x00000800;
45 __ctl_load(dummy , 0, 0);
46 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
47 } else
48 mask = psw_kernel_bits | PSW_MASK_WAIT |
49 PSW_MASK_EXT | PSW_MASK_IO;
50
51 end = get_clock() + ((u64) usecs << 12);
52 do {
53 time = end < S390_lowcore.jiffy_timer ?
54 end : S390_lowcore.jiffy_timer;
55 set_clock_comparator(time);
56 trace_hardirqs_on();
57 __load_psw_mask(mask);
58 local_irq_disable();
59 } while (get_clock() < end);
40 60
41 if (usecs == 0) 61 if (raw_irqs_disabled_flags(flags)) {
42 return; 62 __ctl_load(cr0, 0, 0);
43 start_cc = get_clock(); 63 S390_lowcore.jiffy_timer = jiffy_timer;
44 do { 64 }
45 cpu_relax(); 65 set_clock_comparator(S390_lowcore.jiffy_timer);
46 } while (((get_clock() - start_cc)/4096) < usecs); 66 local_irq_restore(flags);
47} 67}
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
new file mode 100644
index 000000000000..eb1df632e749
--- /dev/null
+++ b/arch/s390/lib/qrnnd.S
@@ -0,0 +1,77 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
new file mode 100644
index 000000000000..126011df14f1
--- /dev/null
+++ b/arch/s390/lib/uaccess.h
@@ -0,0 +1,23 @@
1/*
2 * arch/s390/uaccess.h
3 *
4 * Copyright IBM Corp. 2007
5 *
6 */
7
8#ifndef __ARCH_S390_LIB_UACCESS_H
9#define __ARCH_S390_LIB_UACCESS_H
10
11extern size_t copy_from_user_std(size_t, const void __user *, void *);
12extern size_t copy_to_user_std(size_t, void __user *, const void *);
13extern size_t strnlen_user_std(size_t, const char __user *);
14extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
15extern int futex_atomic_cmpxchg_std(int __user *, int, int);
16extern int futex_atomic_op_std(int, int __user *, int, int *);
17
18extern size_t copy_from_user_pt(size_t, const void __user *, void *);
19extern size_t copy_to_user_pt(size_t, void __user *, const void *);
20extern int futex_atomic_op_pt(int, int __user *, int, int *);
21extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
22
23#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index f9a23d57eb79..6d8772339d76 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/futex.h> 14#include <asm/futex.h>
15#include "uaccess.h"
15 16
16#ifndef __s390x__ 17#ifndef __s390x__
17#define AHI "ahi" 18#define AHI "ahi"
@@ -27,10 +28,7 @@
27#define SLR "slgr" 28#define SLR "slgr"
28#endif 29#endif
29 30
30extern size_t copy_from_user_std(size_t, const void __user *, void *); 31static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31extern size_t copy_to_user_std(size_t, void __user *, const void *);
32
33size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
34{ 32{
35 register unsigned long reg0 asm("0") = 0x81UL; 33 register unsigned long reg0 asm("0") = 0x81UL;
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -69,14 +67,14 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
69 return size; 67 return size;
70} 68}
71 69
72size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) 70static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
73{ 71{
74 if (size <= 256) 72 if (size <= 256)
75 return copy_from_user_std(size, ptr, x); 73 return copy_from_user_std(size, ptr, x);
76 return copy_from_user_mvcos(size, ptr, x); 74 return copy_from_user_mvcos(size, ptr, x);
77} 75}
78 76
79size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 77static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
80{ 78{
81 register unsigned long reg0 asm("0") = 0x810000UL; 79 register unsigned long reg0 asm("0") = 0x810000UL;
82 unsigned long tmp1, tmp2; 80 unsigned long tmp1, tmp2;
@@ -105,14 +103,16 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
105 return size; 103 return size;
106} 104}
107 105
108size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) 106static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
107 const void *x)
109{ 108{
110 if (size <= 256) 109 if (size <= 256)
111 return copy_to_user_std(size, ptr, x); 110 return copy_to_user_std(size, ptr, x);
112 return copy_to_user_mvcos(size, ptr, x); 111 return copy_to_user_mvcos(size, ptr, x);
113} 112}
114 113
115size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) 114static size_t copy_in_user_mvcos(size_t size, void __user *to,
115 const void __user *from)
116{ 116{
117 register unsigned long reg0 asm("0") = 0x810081UL; 117 register unsigned long reg0 asm("0") = 0x810081UL;
118 unsigned long tmp1, tmp2; 118 unsigned long tmp1, tmp2;
@@ -134,7 +134,7 @@ size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
134 return size; 134 return size;
135} 135}
136 136
137size_t clear_user_mvcos(size_t size, void __user *to) 137static size_t clear_user_mvcos(size_t size, void __user *to)
138{ 138{
139 register unsigned long reg0 asm("0") = 0x810000UL; 139 register unsigned long reg0 asm("0") = 0x810000UL;
140 unsigned long tmp1, tmp2; 140 unsigned long tmp1, tmp2;
@@ -162,10 +162,43 @@ size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165extern size_t strnlen_user_std(size_t, const char __user *); 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
166extern size_t strncpy_from_user_std(size_t, const char __user *, char *); 166{
167extern int futex_atomic_op(int, int __user *, int, int *); 167 char buf[256];
168extern int futex_atomic_cmpxchg(int __user *, int, int); 168 int rc;
169 size_t done, len, len_str;
170
171 done = 0;
172 do {
173 len = min(count - done, (size_t) 256);
174 rc = uaccess.copy_from_user(len, src + done, buf);
175 if (unlikely(rc == len))
176 return 0;
177 len -= rc;
178 len_str = strnlen(buf, len);
179 done += len_str;
180 } while ((len_str == len) && (done < count));
181 return done + 1;
182}
183
184static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
185 char *dst)
186{
187 int rc;
188 size_t done, len, len_str;
189
190 done = 0;
191 do {
192 len = min(count - done, (size_t) 4096);
193 rc = uaccess.copy_from_user(len, src + done, dst);
194 if (unlikely(rc == len))
195 return -EFAULT;
196 len -= rc;
197 len_str = strnlen(dst, len);
198 done += len_str;
199 } while ((len_str == len) && (done < count));
200 return done;
201}
169 202
170struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
171 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -176,6 +209,21 @@ struct uaccess_ops uaccess_mvcos = {
176 .clear_user = clear_user_mvcos, 209 .clear_user = clear_user_mvcos,
177 .strnlen_user = strnlen_user_std, 210 .strnlen_user = strnlen_user_std,
178 .strncpy_from_user = strncpy_from_user_std, 211 .strncpy_from_user = strncpy_from_user_std,
179 .futex_atomic_op = futex_atomic_op, 212 .futex_atomic_op = futex_atomic_op_std,
180 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214};
215
216#ifdef CONFIG_S390_SWITCH_AMODE
217struct uaccess_ops uaccess_mvcos_switch = {
218 .copy_from_user = copy_from_user_mvcos,
219 .copy_from_user_small = copy_from_user_mvcos,
220 .copy_to_user = copy_to_user_mvcos,
221 .copy_to_user_small = copy_to_user_mvcos,
222 .copy_in_user = copy_in_user_mvcos,
223 .clear_user = clear_user_mvcos,
224 .strnlen_user = strnlen_user_mvcos,
225 .strncpy_from_user = strncpy_from_user_mvcos,
226 .futex_atomic_op = futex_atomic_op_pt,
227 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
181}; 228};
229#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 49c3e46b4065..63181671e3e3 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/lib/uaccess_pt.c 2 * arch/s390/lib/uaccess_pt.c
3 * 3 *
4 * User access functions based on page table walks. 4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
5 * 6 *
6 * Copyright IBM Corp. 2006 7 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
@@ -12,9 +13,10 @@
12#include <linux/mm.h> 13#include <linux/mm.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
15 17
16static inline int __handle_fault(struct mm_struct *mm, unsigned long address, 18static int __handle_fault(struct mm_struct *mm, unsigned long address,
17 int write_access) 19 int write_access)
18{ 20{
19 struct vm_area_struct *vma; 21 struct vm_area_struct *vma;
20 int ret = -EFAULT; 22 int ret = -EFAULT;
@@ -79,8 +81,8 @@ out_sigbus:
79 return ret; 81 return ret;
80} 82}
81 83
82static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 84static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
83 size_t n, int write_user) 85 size_t n, int write_user)
84{ 86{
85 struct mm_struct *mm = current->mm; 87 struct mm_struct *mm = current->mm;
86 unsigned long offset, pfn, done, size; 88 unsigned long offset, pfn, done, size;
@@ -133,6 +135,49 @@ fault:
133 goto retry; 135 goto retry;
134} 136}
135 137
138/*
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
141 */
142static unsigned long __dat_user_addr(unsigned long uaddr)
143{
144 struct mm_struct *mm = current->mm;
145 unsigned long pfn, ret;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int rc;
150
151 ret = 0;
152retry:
153 pgd = pgd_offset(mm, uaddr);
154 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
155 goto fault;
156
157 pmd = pmd_offset(pgd, uaddr);
158 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
159 goto fault;
160
161 pte = pte_offset_map(pmd, uaddr);
162 if (!pte || !pte_present(*pte))
163 goto fault;
164
165 pfn = pte_pfn(*pte);
166 if (!pfn_valid(pfn))
167 goto out;
168
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170out:
171 return ret;
172fault:
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
176 if (rc)
177 goto out;
178 goto retry;
179}
180
136size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 181size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
137{ 182{
138 size_t rc; 183 size_t rc;
@@ -155,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
155 } 200 }
156 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
157} 202}
203
204static size_t clear_user_pt(size_t n, void __user *to)
205{
206 long done, size, ret;
207
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
210 return 0;
211 }
212 done = 0;
213 do {
214 if (n - done > PAGE_SIZE)
215 size = PAGE_SIZE;
216 else
217 size = n - done;
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
220 done += size;
221 if (ret)
222 return ret + n - done;
223 } while (done < n);
224 return 0;
225}
226
227static size_t strnlen_user_pt(size_t count, const char __user *src)
228{
229 char *addr;
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
233 pgd_t *pgd;
234 pmd_t *pmd;
235 pte_t *pte;
236 size_t len_str;
237
238 if (segment_eq(get_fs(), KERNEL_DS))
239 return strnlen((const char __kernel __force *) src, count) + 1;
240 done = 0;
241retry:
242 spin_lock(&mm->page_table_lock);
243 do {
244 pgd = pgd_offset(mm, uaddr);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 goto fault;
247
248 pmd = pmd_offset(pgd, uaddr);
249 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
250 goto fault;
251
252 pte = pte_offset_map(pmd, uaddr);
253 if (!pte || !pte_present(*pte))
254 goto fault;
255
256 pfn = pte_pfn(*pte);
257 if (!pfn_valid(pfn)) {
258 done = -1;
259 goto out;
260 }
261
262 offset = uaddr & (PAGE_SIZE-1);
263 addr = (char *)(pfn << PAGE_SHIFT) + offset;
264 len = min(count - done, PAGE_SIZE - offset);
265 len_str = strnlen(addr, len);
266 done += len_str;
267 uaddr += len_str;
268 } while ((len_str == len) && (done < count));
269out:
270 spin_unlock(&mm->page_table_lock);
271 return done + 1;
272fault:
273 spin_unlock(&mm->page_table_lock);
274 if (__handle_fault(mm, uaddr, 0)) {
275 return 0;
276 }
277 goto retry;
278}
279
280static size_t strncpy_from_user_pt(size_t count, const char __user *src,
281 char *dst)
282{
283 size_t n = strnlen_user_pt(count, src);
284
285 if (!n)
286 return -EFAULT;
287 if (n > count)
288 n = count;
289 if (segment_eq(get_fs(), KERNEL_DS)) {
290 memcpy(dst, (const char __kernel __force *) src, n);
291 if (dst[n-1] == '\0')
292 return n-1;
293 else
294 return n;
295 }
296 if (__user_copy_pt((unsigned long) src, dst, n, 0))
297 return -EFAULT;
298 if (dst[n-1] == '\0')
299 return n-1;
300 else
301 return n;
302}
303
304static size_t copy_in_user_pt(size_t n, void __user *to,
305 const void __user *from)
306{
307 struct mm_struct *mm = current->mm;
308 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
309 uaddr, done, size;
310 unsigned long uaddr_from = (unsigned long) from;
311 unsigned long uaddr_to = (unsigned long) to;
312 pgd_t *pgd_from, *pgd_to;
313 pmd_t *pmd_from, *pmd_to;
314 pte_t *pte_from, *pte_to;
315 int write_user;
316
317 done = 0;
318retry:
319 spin_lock(&mm->page_table_lock);
320 do {
321 pgd_from = pgd_offset(mm, uaddr_from);
322 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
323 uaddr = uaddr_from;
324 write_user = 0;
325 goto fault;
326 }
327 pgd_to = pgd_offset(mm, uaddr_to);
328 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
329 uaddr = uaddr_to;
330 write_user = 1;
331 goto fault;
332 }
333
334 pmd_from = pmd_offset(pgd_from, uaddr_from);
335 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
336 uaddr = uaddr_from;
337 write_user = 0;
338 goto fault;
339 }
340 pmd_to = pmd_offset(pgd_to, uaddr_to);
341 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
342 uaddr = uaddr_to;
343 write_user = 1;
344 goto fault;
345 }
346
347 pte_from = pte_offset_map(pmd_from, uaddr_from);
348 if (!pte_from || !pte_present(*pte_from)) {
349 uaddr = uaddr_from;
350 write_user = 0;
351 goto fault;
352 }
353 pte_to = pte_offset_map(pmd_to, uaddr_to);
354 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
355 uaddr = uaddr_to;
356 write_user = 1;
357 goto fault;
358 }
359
360 pfn_from = pte_pfn(*pte_from);
361 if (!pfn_valid(pfn_from))
362 goto out;
363 pfn_to = pte_pfn(*pte_to);
364 if (!pfn_valid(pfn_to))
365 goto out;
366
367 offset_from = uaddr_from & (PAGE_SIZE-1);
368 offset_to = uaddr_from & (PAGE_SIZE-1);
369 offset_max = max(offset_from, offset_to);
370 size = min(n - done, PAGE_SIZE - offset_max);
371
372 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
373 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
374 done += size;
375 uaddr_from += size;
376 uaddr_to += size;
377 } while (done < n);
378out:
379 spin_unlock(&mm->page_table_lock);
380 return n - done;
381fault:
382 spin_unlock(&mm->page_table_lock);
383 if (__handle_fault(mm, uaddr, write_user))
384 return n - done;
385 goto retry;
386}
387
388#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
390 "1: " insn \
391 "2: cs %1,%2,0(%6)\n" \
392 "3: jl 1b\n" \
393 " lhi %0,0\n" \
394 "4:\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 "=m" (*uaddr) \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
400
401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
402{
403 int oldval = 0, newval, ret;
404
405 spin_lock(&current->mm->page_table_lock);
406 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 if (!uaddr) {
408 spin_unlock(&current->mm->page_table_lock);
409 return -EFAULT;
410 }
411 get_page(virt_to_page(uaddr));
412 spin_unlock(&current->mm->page_table_lock);
413 switch (op) {
414 case FUTEX_OP_SET:
415 __futex_atomic_op("lr %2,%5\n",
416 ret, oldval, newval, uaddr, oparg);
417 break;
418 case FUTEX_OP_ADD:
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret, oldval, newval, uaddr, oparg);
421 break;
422 case FUTEX_OP_OR:
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret, oldval, newval, uaddr, oparg);
425 break;
426 case FUTEX_OP_ANDN:
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret, oldval, newval, uaddr, oparg);
429 break;
430 case FUTEX_OP_XOR:
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret, oldval, newval, uaddr, oparg);
433 break;
434 default:
435 ret = -ENOSYS;
436 }
437 put_page(virt_to_page(uaddr));
438 *old = oldval;
439 return ret;
440}
441
442int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
443{
444 int ret;
445
446 spin_lock(&current->mm->page_table_lock);
447 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
448 if (!uaddr) {
449 spin_unlock(&current->mm->page_table_lock);
450 return -EFAULT;
451 }
452 get_page(virt_to_page(uaddr));
453 spin_unlock(&current->mm->page_table_lock);
454 asm volatile(" cs %1,%4,0(%5)\n"
455 "0: lr %0,%1\n"
456 "1:\n"
457 EX_TABLE(0b,1b)
458 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
459 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
460 : "cc", "memory" );
461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_from_user_small = copy_from_user_pt,
468 .copy_to_user = copy_to_user_pt,
469 .copy_to_user_small = copy_to_user_pt,
470 .copy_in_user = copy_in_user_pt,
471 .clear_user = clear_user_pt,
472 .strnlen_user = strnlen_user_pt,
473 .strncpy_from_user = strncpy_from_user_pt,
474 .futex_atomic_op = futex_atomic_op_pt,
475 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
476};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 56a0214e9928..28c4500a58d0 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -13,6 +13,7 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
16 17
17#ifndef __s390x__ 18#ifndef __s390x__
18#define AHI "ahi" 19#define AHI "ahi"
@@ -28,9 +29,6 @@
28#define SLR "slgr" 29#define SLR "slgr"
29#endif 30#endif
30 31
31extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
32extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
33
34size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) 32size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
35{ 33{
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -72,7 +70,8 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
72 return size; 70 return size;
73} 71}
74 72
75size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) 73static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
74 void *x)
76{ 75{
77 if (size <= 1024) 76 if (size <= 1024)
78 return copy_from_user_std(size, ptr, x); 77 return copy_from_user_std(size, ptr, x);
@@ -110,14 +109,16 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
110 return size; 109 return size;
111} 110}
112 111
113size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) 112static size_t copy_to_user_std_check(size_t size, void __user *ptr,
113 const void *x)
114{ 114{
115 if (size <= 1024) 115 if (size <= 1024)
116 return copy_to_user_std(size, ptr, x); 116 return copy_to_user_std(size, ptr, x);
117 return copy_to_user_pt(size, ptr, x); 117 return copy_to_user_pt(size, ptr, x);
118} 118}
119 119
120size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) 120static size_t copy_in_user_std(size_t size, void __user *to,
121 const void __user *from)
121{ 122{
122 unsigned long tmp1; 123 unsigned long tmp1;
123 124
@@ -148,7 +149,7 @@ size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
148 return size; 149 return size;
149} 150}
150 151
151size_t clear_user_std(size_t size, void __user *to) 152static size_t clear_user_std(size_t size, void __user *to)
152{ 153{
153 unsigned long tmp1, tmp2; 154 unsigned long tmp1, tmp2;
154 155
@@ -254,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
254 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 255 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
255 "m" (*uaddr) : "cc"); 256 "m" (*uaddr) : "cc");
256 257
257int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) 258int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
258{ 259{
259 int oldval = 0, newval, ret; 260 int oldval = 0, newval, ret;
260 261
@@ -286,7 +287,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
286 return ret; 287 return ret;
287} 288}
288 289
289int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) 290int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
290{ 291{
291 int ret; 292 int ret;
292 293
@@ -311,6 +312,6 @@ struct uaccess_ops uaccess_std = {
311 .clear_user = clear_user_std, 312 .clear_user = clear_user_std,
312 .strnlen_user = strnlen_user_std, 313 .strnlen_user = strnlen_user_std,
313 .strncpy_from_user = strncpy_from_user_std, 314 .strncpy_from_user = strncpy_from_user_std,
314 .futex_atomic_op = futex_atomic_op, 315 .futex_atomic_op = futex_atomic_op_std,
315 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 316 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
316}; 317};
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
index c10df144f2ab..73b3e72efc46 100644
--- a/arch/s390/math-emu/Makefile
+++ b/arch/s390/math-emu/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the FPU instruction emulation. 2# Makefile for the FPU instruction emulation.
3# 3#
4 4
5obj-$(CONFIG_MATHEMU) := math.o qrnnd.o 5obj-$(CONFIG_MATHEMU) := math.o
6 6
7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w 7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w
8EXTRA_AFLAGS := -traditional 8EXTRA_AFLAGS := -traditional
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index 6b9aec5a2c18..3ee78ccb617d 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -15,7 +15,7 @@
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/lowcore.h> 16#include <asm/lowcore.h>
17 17
18#include "sfp-util.h" 18#include <asm/sfp-util.h>
19#include <math-emu/soft-fp.h> 19#include <math-emu/soft-fp.h>
20#include <math-emu/single.h> 20#include <math-emu/single.h>
21#include <math-emu/double.h> 21#include <math-emu/double.h>
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S
deleted file mode 100644
index b01c2b648e22..000000000000
--- a/arch/s390/math-emu/qrnnd.S
+++ /dev/null
@@ -1,77 +0,0 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 607f50ead1fd..f93a056869bc 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -245,7 +245,7 @@ cmm_set_timeout(long nr, long seconds)
245 cmm_set_timer(); 245 cmm_set_timer();
246} 246}
247 247
248static inline int 248static int
249cmm_skip_blanks(char *cp, char **endp) 249cmm_skip_blanks(char *cp, char **endp)
250{ 250{
251 char *str; 251 char *str;
@@ -414,7 +414,7 @@ cmm_smsg_target(char *from, char *msg)
414} 414}
415#endif 415#endif
416 416
417struct ctl_table_header *cmm_sysctl_header; 417static struct ctl_table_header *cmm_sysctl_header;
418 418
419static int 419static int
420cmm_init (void) 420cmm_init (void)
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 775bf19e742b..394980b05e6f 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/ioport.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
20#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
@@ -70,6 +71,7 @@ struct qin64 {
70struct dcss_segment { 71struct dcss_segment {
71 struct list_head list; 72 struct list_head list;
72 char dcss_name[8]; 73 char dcss_name[8];
74 char res_name[15];
73 unsigned long start_addr; 75 unsigned long start_addr;
74 unsigned long end; 76 unsigned long end;
75 atomic_t ref_count; 77 atomic_t ref_count;
@@ -77,6 +79,7 @@ struct dcss_segment {
77 unsigned int vm_segtype; 79 unsigned int vm_segtype;
78 struct qrange range[6]; 80 struct qrange range[6];
79 int segcnt; 81 int segcnt;
82 struct resource *res;
80}; 83};
81 84
82static DEFINE_MUTEX(dcss_lock); 85static DEFINE_MUTEX(dcss_lock);
@@ -88,7 +91,7 @@ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
88 * Create the 8 bytes, ebcdic VM segment name from 91 * Create the 8 bytes, ebcdic VM segment name from
89 * an ascii name. 92 * an ascii name.
90 */ 93 */
91static void inline 94static void
92dcss_mkname(char *name, char *dcss_name) 95dcss_mkname(char *name, char *dcss_name)
93{ 96{
94 int i; 97 int i;
@@ -303,6 +306,29 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
303 goto out_free; 306 goto out_free;
304 } 307 }
305 308
309 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
310 if (seg->res == NULL) {
311 rc = -ENOMEM;
312 goto out_shared;
313 }
314 seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
315 seg->res->start = seg->start_addr;
316 seg->res->end = seg->end;
317 memcpy(&seg->res_name, seg->dcss_name, 8);
318 EBCASC(seg->res_name, 8);
319 seg->res_name[8] = '\0';
320 strncat(seg->res_name, " (DCSS)", 7);
321 seg->res->name = seg->res_name;
322 rc = seg->vm_segtype;
323 if (rc == SEG_TYPE_SC ||
324 ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
325 seg->res->flags |= IORESOURCE_READONLY;
326 if (request_resource(&iomem_resource, seg->res)) {
327 rc = -EBUSY;
328 kfree(seg->res);
329 goto out_shared;
330 }
331
306 if (do_nonshared) 332 if (do_nonshared)
307 dcss_command = DCSS_LOADNSR; 333 dcss_command = DCSS_LOADNSR;
308 else 334 else
@@ -316,12 +342,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
316 rc = dcss_diag_translate_rc (seg->end); 342 rc = dcss_diag_translate_rc (seg->end);
317 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 343 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
318 &seg->start_addr, &seg->end); 344 &seg->start_addr, &seg->end);
319 goto out_shared; 345 goto out_resource;
320 } 346 }
321 seg->do_nonshared = do_nonshared; 347 seg->do_nonshared = do_nonshared;
322 atomic_set(&seg->ref_count, 1); 348 atomic_set(&seg->ref_count, 1);
323 list_add(&seg->list, &dcss_list); 349 list_add(&seg->list, &dcss_list);
324 rc = seg->vm_segtype;
325 *addr = seg->start_addr; 350 *addr = seg->start_addr;
326 *end = seg->end; 351 *end = seg->end;
327 if (do_nonshared) 352 if (do_nonshared)
@@ -329,12 +354,16 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
329 "type %s in non-shared mode\n", name, 354 "type %s in non-shared mode\n", name,
330 (void*)seg->start_addr, (void*)seg->end, 355 (void*)seg->start_addr, (void*)seg->end,
331 segtype_string[seg->vm_segtype]); 356 segtype_string[seg->vm_segtype]);
332 else 357 else {
333 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " 358 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
334 "type %s in shared mode\n", name, 359 "type %s in shared mode\n", name,
335 (void*)seg->start_addr, (void*)seg->end, 360 (void*)seg->start_addr, (void*)seg->end,
336 segtype_string[seg->vm_segtype]); 361 segtype_string[seg->vm_segtype]);
362 }
337 goto out; 363 goto out;
364 out_resource:
365 release_resource(seg->res);
366 kfree(seg->res);
338 out_shared: 367 out_shared:
339 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 368 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
340 out_free: 369 out_free:
@@ -401,6 +430,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
401 * -ENOENT : no such segment (segment gone!) 430 * -ENOENT : no such segment (segment gone!)
402 * -EAGAIN : segment is in use by other exploiters, try later 431 * -EAGAIN : segment is in use by other exploiters, try later
403 * -EINVAL : no segment with the given name is currently loaded - name invalid 432 * -EINVAL : no segment with the given name is currently loaded - name invalid
433 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
404 * 0 : operation succeeded 434 * 0 : operation succeeded
405 */ 435 */
406int 436int
@@ -428,12 +458,24 @@ segment_modify_shared (char *name, int do_nonshared)
428 rc = -EAGAIN; 458 rc = -EAGAIN;
429 goto out_unlock; 459 goto out_unlock;
430 } 460 }
431 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 461 release_resource(seg->res);
432 &dummy, &dummy); 462 if (do_nonshared) {
433 if (do_nonshared)
434 dcss_command = DCSS_LOADNSR; 463 dcss_command = DCSS_LOADNSR;
435 else 464 seg->res->flags &= ~IORESOURCE_READONLY;
436 dcss_command = DCSS_LOADNOLY; 465 } else {
466 dcss_command = DCSS_LOADNOLY;
467 if (seg->vm_segtype == SEG_TYPE_SR ||
468 seg->vm_segtype == SEG_TYPE_ER)
469 seg->res->flags |= IORESOURCE_READONLY;
470 }
471 if (request_resource(&iomem_resource, seg->res)) {
472 PRINT_WARN("segment_modify_shared: could not reload segment %s"
473 " - overlapping resources\n", name);
474 rc = -EBUSY;
475 kfree(seg->res);
476 goto out_del;
477 }
478 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
437 diag_cc = dcss_diag(dcss_command, seg->dcss_name, 479 diag_cc = dcss_diag(dcss_command, seg->dcss_name,
438 &seg->start_addr, &seg->end); 480 &seg->start_addr, &seg->end);
439 if (diag_cc > 1) { 481 if (diag_cc > 1) {
@@ -446,9 +488,9 @@ segment_modify_shared (char *name, int do_nonshared)
446 rc = 0; 488 rc = 0;
447 goto out_unlock; 489 goto out_unlock;
448 out_del: 490 out_del:
491 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
449 list_del(&seg->list); 492 list_del(&seg->list);
450 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 493 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
451 &dummy, &dummy);
452 kfree(seg); 494 kfree(seg);
453 out_unlock: 495 out_unlock:
454 mutex_unlock(&dcss_lock); 496 mutex_unlock(&dcss_lock);
@@ -478,6 +520,8 @@ segment_unload(char *name)
478 } 520 }
479 if (atomic_dec_return(&seg->ref_count) != 0) 521 if (atomic_dec_return(&seg->ref_count) != 0)
480 goto out_unlock; 522 goto out_unlock;
523 release_resource(seg->res);
524 kfree(seg->res);
481 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 525 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
482 list_del(&seg->list); 526 list_del(&seg->list);
483 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); 527 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index cd85e34d8703..9ff143e87746 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -52,7 +52,7 @@ extern int sysctl_userprocess_debug;
52extern void die(const char *,struct pt_regs *,long); 52extern void die(const char *,struct pt_regs *,long);
53 53
54#ifdef CONFIG_KPROBES 54#ifdef CONFIG_KPROBES
55ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 55static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
56int register_page_fault_notifier(struct notifier_block *nb) 56int register_page_fault_notifier(struct notifier_block *nb)
57{ 57{
58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
@@ -137,7 +137,9 @@ static int __check_access_register(struct pt_regs *regs, int error_code)
137 137
138/* 138/*
139 * Check which address space the address belongs to. 139 * Check which address space the address belongs to.
140 * Returns 1 for user space and 0 for kernel space. 140 * May return 1 or 2 for user space and 0 for kernel space.
141 * Returns 2 for user space in primary addressing mode with
142 * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 */ 143 */
142static inline int check_user_space(struct pt_regs *regs, int error_code) 144static inline int check_user_space(struct pt_regs *regs, int error_code)
143{ 145{
@@ -154,7 +156,7 @@ static inline int check_user_space(struct pt_regs *regs, int error_code)
154 return __check_access_register(regs, error_code); 156 return __check_access_register(regs, error_code);
155 if (descriptor == 2) 157 if (descriptor == 2)
156 return current->thread.mm_segment.ar4; 158 return current->thread.mm_segment.ar4;
157 return descriptor != 0; 159 return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
158} 160}
159 161
160/* 162/*
@@ -183,6 +185,77 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
183 force_sig_info(SIGSEGV, &si, current); 185 force_sig_info(SIGSEGV, &si, current);
184} 186}
185 187
188#ifdef CONFIG_S390_EXEC_PROTECT
189extern long sys_sigreturn(struct pt_regs *regs);
190extern long sys_rt_sigreturn(struct pt_regs *regs);
191extern long sys32_sigreturn(struct pt_regs *regs);
192extern long sys32_rt_sigreturn(struct pt_regs *regs);
193
194static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
195 int rt)
196{
197 up_read(&mm->mmap_sem);
198 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
199#ifdef CONFIG_COMPAT
200 if (test_tsk_thread_flag(current, TIF_31BIT)) {
201 if (rt)
202 sys32_rt_sigreturn(regs);
203 else
204 sys32_sigreturn(regs);
205 return;
206 }
207#endif /* CONFIG_COMPAT */
208 if (rt)
209 sys_rt_sigreturn(regs);
210 else
211 sys_sigreturn(regs);
212 return;
213}
214
215static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
216 unsigned long address, unsigned long error_code)
217{
218 pgd_t *pgd;
219 pmd_t *pmd;
220 pte_t *pte;
221 u16 *instruction;
222 unsigned long pfn, uaddr = regs->psw.addr;
223
224 spin_lock(&mm->page_table_lock);
225 pgd = pgd_offset(mm, uaddr);
226 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
227 goto out_fault;
228 pmd = pmd_offset(pgd, uaddr);
229 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
230 goto out_fault;
231 pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
232 if (!pte || !pte_present(*pte))
233 goto out_fault;
234 pfn = pte_pfn(*pte);
235 if (!pfn_valid(pfn))
236 goto out_fault;
237 spin_unlock(&mm->page_table_lock);
238
239 instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
240 if (*instruction == 0x0a77)
241 do_sigreturn(mm, regs, 0);
242 else if (*instruction == 0x0aad)
243 do_sigreturn(mm, regs, 1);
244 else {
245 printk("- XXX - do_exception: task = %s, primary, NO EXEC "
246 "-> SIGSEGV\n", current->comm);
247 up_read(&mm->mmap_sem);
248 current->thread.prot_addr = address;
249 current->thread.trap_no = error_code;
250 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
251 }
252 return 0;
253out_fault:
254 spin_unlock(&mm->page_table_lock);
255 return -EFAULT;
256}
257#endif /* CONFIG_S390_EXEC_PROTECT */
258
186/* 259/*
187 * This routine handles page faults. It determines the address, 260 * This routine handles page faults. It determines the address,
188 * and the problem, and then passes it off to one of the appropriate 261 * and the problem, and then passes it off to one of the appropriate
@@ -260,6 +333,17 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
260 vma = find_vma(mm, address); 333 vma = find_vma(mm, address);
261 if (!vma) 334 if (!vma)
262 goto bad_area; 335 goto bad_area;
336
337#ifdef CONFIG_S390_EXEC_PROTECT
338 if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
339 if (!signal_return(mm, regs, address, error_code))
340 /*
341 * signal_return() has done an up_read(&mm->mmap_sem)
342 * if it returns 0.
343 */
344 return;
345#endif
346
263 if (vma->vm_start <= address) 347 if (vma->vm_start <= address)
264 goto good_area; 348 goto good_area;
265 if (!(vma->vm_flags & VM_GROWSDOWN)) 349 if (!(vma->vm_flags & VM_GROWSDOWN))
@@ -452,8 +536,7 @@ void pfault_fini(void)
452 : : "a" (&refbk), "m" (refbk) : "cc"); 536 : : "a" (&refbk), "m" (refbk) : "cc");
453} 537}
454 538
455asmlinkage void 539static void pfault_interrupt(__u16 error_code)
456pfault_interrupt(__u16 error_code)
457{ 540{
458 struct task_struct *tsk; 541 struct task_struct *tsk;
459 __u16 subcode; 542 __u16 subcode;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 4bb21be3b007..b3e7c45efb63 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -25,7 +25,7 @@
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h> 27#include <linux/poison.h>
28 28#include <linux/initrd.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -95,20 +95,18 @@ static void __init setup_ro_region(void)
95 pte_t new_pte; 95 pte_t new_pte;
96 unsigned long address, end; 96 unsigned long address, end;
97 97
98 address = ((unsigned long)&__start_rodata) & PAGE_MASK; 98 address = ((unsigned long)&_stext) & PAGE_MASK;
99 end = PFN_ALIGN((unsigned long)&__end_rodata); 99 end = PFN_ALIGN((unsigned long)&_eshared);
100 100
101 for (; address < end; address += PAGE_SIZE) { 101 for (; address < end; address += PAGE_SIZE) {
102 pgd = pgd_offset_k(address); 102 pgd = pgd_offset_k(address);
103 pmd = pmd_offset(pgd, address); 103 pmd = pmd_offset(pgd, address);
104 pte = pte_offset_kernel(pmd, address); 104 pte = pte_offset_kernel(pmd, address);
105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
106 set_pte(pte, new_pte); 106 *pte = new_pte;
107 } 107 }
108} 108}
109 109
110extern void vmem_map_init(void);
111
112/* 110/*
113 * paging_init() sets up the page tables 111 * paging_init() sets up the page tables
114 */ 112 */
@@ -125,11 +123,11 @@ void __init paging_init(void)
125#ifdef CONFIG_64BIT 123#ifdef CONFIG_64BIT
126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 124 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
127 for (i = 0; i < PTRS_PER_PGD; i++) 125 for (i = 0; i < PTRS_PER_PGD; i++)
128 pgd_clear(pg_dir + i); 126 pgd_clear_kernel(pg_dir + i);
129#else 127#else
130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 128 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
131 for (i = 0; i < PTRS_PER_PGD; i++) 129 for (i = 0; i < PTRS_PER_PGD; i++)
132 pmd_clear((pmd_t *)(pg_dir + i)); 130 pmd_clear_kernel((pmd_t *)(pg_dir + i));
133#endif 131#endif
134 vmem_map_init(); 132 vmem_map_init();
135 setup_ro_region(); 133 setup_ro_region();
@@ -174,10 +172,8 @@ void __init mem_init(void)
174 datasize >>10, 172 datasize >>10,
175 initsize >> 10); 173 initsize >> 10);
176 printk("Write protected kernel read-only data: %#lx - %#lx\n", 174 printk("Write protected kernel read-only data: %#lx - %#lx\n",
177 (unsigned long)&__start_rodata, 175 (unsigned long)&_stext,
178 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 176 PFN_ALIGN((unsigned long)&_eshared) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
181} 177}
182 178
183void free_initmem(void) 179void free_initmem(void)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index cd3d93e8c211..92a565190028 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -82,7 +82,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
82 if (!pmd) 82 if (!pmd)
83 return NULL; 83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 84 for (i = 0; i < PTRS_PER_PMD; i++)
85 pmd_clear(pmd + i); 85 pmd_clear_kernel(pmd + i);
86 return pmd; 86 return pmd;
87} 87}
88 88
@@ -97,7 +97,7 @@ static inline pte_t *vmem_pte_alloc(void)
97 return NULL; 97 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 for (i = 0; i < PTRS_PER_PTE; i++) 99 for (i = 0; i < PTRS_PER_PTE; i++)
100 set_pte(pte + i, empty_pte); 100 pte[i] = empty_pte;
101 return pte; 101 return pte;
102} 102}
103 103
@@ -119,7 +119,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
119 pm_dir = vmem_pmd_alloc(); 119 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir) 120 if (!pm_dir)
121 goto out; 121 goto out;
122 pgd_populate(&init_mm, pg_dir, pm_dir); 122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 } 123 }
124 124
125 pm_dir = pmd_offset(pg_dir, address); 125 pm_dir = pmd_offset(pg_dir, address);
@@ -132,7 +132,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); 134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 set_pte(pt_dir, pte); 135 *pt_dir = pte;
136 } 136 }
137 ret = 0; 137 ret = 0;
138out: 138out:
@@ -161,7 +161,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
161 if (pmd_none(*pm_dir)) 161 if (pmd_none(*pm_dir))
162 continue; 162 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address); 163 pt_dir = pte_offset_kernel(pm_dir, address);
164 set_pte(pt_dir, pte); 164 *pt_dir = pte;
165 } 165 }
166 flush_tlb_kernel_range(start, start + size); 166 flush_tlb_kernel_range(start, start + size);
167} 167}
@@ -191,7 +191,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
191 pm_dir = vmem_pmd_alloc(); 191 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir) 192 if (!pm_dir)
193 goto out; 193 goto out;
194 pgd_populate(&init_mm, pg_dir, pm_dir); 194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 } 195 }
196 196
197 pm_dir = pmd_offset(pg_dir, address); 197 pm_dir = pmd_offset(pg_dir, address);
@@ -210,7 +210,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
210 if (!new_page) 210 if (!new_page)
211 goto out; 211 goto out;
212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 set_pte(pt_dir, pte); 213 *pt_dir = pte;
214 } 214 }
215 } 215 }
216 ret = 0; 216 ret = 0;
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa794b9..eb18be5a6569 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
29 .dma_supported = NULL, 29 .dma_supported = NULL,
30}; 30};
31 31
32void pci_swiotlb_init(void) 32void __init pci_swiotlb_init(void)
33{ 33{
34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) 35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 92ba249f3a5b..918b4d845f93 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -74,14 +74,6 @@ config CRYPTO_SHA1
74 help 74 help
75 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 75 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
76 76
77config CRYPTO_SHA1_S390
78 tristate "SHA1 digest algorithm (s390)"
79 depends on S390
80 select CRYPTO_ALGAPI
81 help
82 This is the s390 hardware accelerated implementation of the
83 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
84
85config CRYPTO_SHA256 77config CRYPTO_SHA256
86 tristate "SHA256 digest algorithm" 78 tristate "SHA256 digest algorithm"
87 select CRYPTO_ALGAPI 79 select CRYPTO_ALGAPI
@@ -91,17 +83,6 @@ config CRYPTO_SHA256
91 This version of SHA implements a 256 bit hash with 128 bits of 83 This version of SHA implements a 256 bit hash with 128 bits of
92 security against collision attacks. 84 security against collision attacks.
93 85
94config CRYPTO_SHA256_S390
95 tristate "SHA256 digest algorithm (s390)"
96 depends on S390
97 select CRYPTO_ALGAPI
98 help
99 This is the s390 hardware accelerated implementation of the
100 SHA256 secure hash standard (DFIPS 180-2).
101
102 This version of SHA implements a 256 bit hash with 128 bits of
103 security against collision attacks.
104
105config CRYPTO_SHA512 86config CRYPTO_SHA512
106 tristate "SHA384 and SHA512 digest algorithms" 87 tristate "SHA384 and SHA512 digest algorithms"
107 select CRYPTO_ALGAPI 88 select CRYPTO_ALGAPI
@@ -187,14 +168,6 @@ config CRYPTO_DES
187 help 168 help
188 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 169 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
189 170
190config CRYPTO_DES_S390
191 tristate "DES and Triple DES cipher algorithms (s390)"
192 depends on S390
193 select CRYPTO_ALGAPI
194 select CRYPTO_BLKCIPHER
195 help
196 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
197
198config CRYPTO_BLOWFISH 171config CRYPTO_BLOWFISH
199 tristate "Blowfish cipher algorithm" 172 tristate "Blowfish cipher algorithm"
200 select CRYPTO_ALGAPI 173 select CRYPTO_ALGAPI
@@ -336,28 +309,6 @@ config CRYPTO_AES_X86_64
336 309
337 See <http://csrc.nist.gov/encryption/aes/> for more information. 310 See <http://csrc.nist.gov/encryption/aes/> for more information.
338 311
339config CRYPTO_AES_S390
340 tristate "AES cipher algorithms (s390)"
341 depends on S390
342 select CRYPTO_ALGAPI
343 select CRYPTO_BLKCIPHER
344 help
345 This is the s390 hardware accelerated implementation of the
346 AES cipher algorithms (FIPS-197). AES uses the Rijndael
347 algorithm.
348
349 Rijndael appears to be consistently a very good performer in
350 both hardware and software across a wide range of computing
351 environments regardless of its use in feedback or non-feedback
352 modes. Its key setup time is excellent, and its key agility is
353 good. Rijndael's very low memory requirements make it very well
354 suited for restricted-space environments, in which it also
355 demonstrates excellent performance. Rijndael's operations are
356 among the easiest to defend against power and timing attacks.
357
358 On s390 the System z9-109 currently only supports the key size
359 of 128 bit.
360
361config CRYPTO_CAST5 312config CRYPTO_CAST5
362 tristate "CAST5 (CAST-128) cipher algorithm" 313 tristate "CAST5 (CAST-128) cipher algorithm"
363 select CRYPTO_ALGAPI 314 select CRYPTO_ALGAPI
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 879250d3d069..ff8c4beaace4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,6 +51,8 @@ config CRYPTO_DEV_PADLOCK_SHA
51 If unsure say M. The compiled module will be 51 If unsure say M. The compiled module will be
52 called padlock-sha.ko 52 called padlock-sha.ko
53 53
54source "arch/s390/crypto/Kconfig"
55
54config CRYPTO_DEV_GEODE 56config CRYPTO_DEV_GEODE
55 tristate "Support for the Geode LX AES engine" 57 tristate "Support for the Geode LX AES engine"
56 depends on CRYPTO && X86_32 && PCI 58 depends on CRYPTO && X86_32 && PCI
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ec796ad087df..850788f4dd2e 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -22,5 +22,19 @@ config HID
22 22
23 If unsure, say Y 23 If unsure, say Y
24 24
25config HID_DEBUG
26 bool "HID debugging support"
27 depends on HID
28 ---help---
29 This option lets the HID layer output diagnostics about its internal
30 state, resolve HID usages, dump HID fields, etc. Individual HID drivers
31 use this debugging facility to output information about individual HID
32 devices, etc.
33
34 This feature is useful for those who are either debugging the HID parser
35 or any HID hardware device.
36
37 If unsure, say N
38
25endmenu 39endmenu
26 40
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6432392110bf..52e97d8f3c95 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -1,15 +1,8 @@
1# 1#
2# Makefile for the HID driver 2# Makefile for the HID driver
3# 3#
4 4hid-objs := hid-core.o hid-input.o
5# Multipart objects.
6hid-objs := hid-core.o hid-input.o
7
8# Optional parts of multipart objects.
9 5
10obj-$(CONFIG_HID) += hid.o 6obj-$(CONFIG_HID) += hid.o
11 7hid-$(CONFIG_HID_DEBUG) += hid-debug.o
12ifeq ($(CONFIG_INPUT_DEBUG),y)
13EXTRA_CFLAGS += -DDEBUG
14endif
15 8
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 49f18f5b2514..8c7d48eff7b7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -28,11 +28,9 @@
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30 30
31#undef DEBUG
32#undef DEBUG_DATA
33
34#include <linux/hid.h> 31#include <linux/hid.h>
35#include <linux/hiddev.h> 32#include <linux/hiddev.h>
33#include <linux/hid-debug.h>
36 34
37/* 35/*
38 * Version Information 36 * Version Information
@@ -951,7 +949,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
951 return -1; 949 return -1;
952 } 950 }
953 951
954#ifdef DEBUG_DATA 952#ifdef CONFIG_HID_DEBUG
955 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un"); 953 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
956#endif 954#endif
957 955
@@ -961,7 +959,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
961 size--; 959 size--;
962 } 960 }
963 961
964#ifdef DEBUG_DATA 962#ifdef CONFIG_HID_DEBUG
965 { 963 {
966 int i; 964 int i;
967 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size); 965 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
new file mode 100644
index 000000000000..89241be4ec9b
--- /dev/null
+++ b/drivers/hid/hid-debug.c
@@ -0,0 +1,764 @@
1/*
2 * $Id: hid-debug.h,v 1.8 2001/09/25 09:37:57 vojtech Exp $
3 *
4 * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
5 * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
6 * (c) 2007 Jiri Kosina
7 *
8 * Some debug stuff for the HID parser.
9 */
10
11/*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * Should you need to contact me, the author, you can do so either by
27 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
28 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
29 */
30
31#include <linux/hid.h>
32
33struct hid_usage_entry {
34 unsigned page;
35 unsigned usage;
36 char *description;
37};
38
39static const struct hid_usage_entry hid_usage_table[] = {
40 { 0, 0, "Undefined" },
41 { 1, 0, "GenericDesktop" },
42 {0, 0x01, "Pointer"},
43 {0, 0x02, "Mouse"},
44 {0, 0x04, "Joystick"},
45 {0, 0x05, "GamePad"},
46 {0, 0x06, "Keyboard"},
47 {0, 0x07, "Keypad"},
48 {0, 0x08, "MultiAxis"},
49 {0, 0x30, "X"},
50 {0, 0x31, "Y"},
51 {0, 0x32, "Z"},
52 {0, 0x33, "Rx"},
53 {0, 0x34, "Ry"},
54 {0, 0x35, "Rz"},
55 {0, 0x36, "Slider"},
56 {0, 0x37, "Dial"},
57 {0, 0x38, "Wheel"},
58 {0, 0x39, "HatSwitch"},
59 {0, 0x3a, "CountedBuffer"},
60 {0, 0x3b, "ByteCount"},
61 {0, 0x3c, "MotionWakeup"},
62 {0, 0x3d, "Start"},
63 {0, 0x3e, "Select"},
64 {0, 0x40, "Vx"},
65 {0, 0x41, "Vy"},
66 {0, 0x42, "Vz"},
67 {0, 0x43, "Vbrx"},
68 {0, 0x44, "Vbry"},
69 {0, 0x45, "Vbrz"},
70 {0, 0x46, "Vno"},
71 {0, 0x80, "SystemControl"},
72 {0, 0x81, "SystemPowerDown"},
73 {0, 0x82, "SystemSleep"},
74 {0, 0x83, "SystemWakeUp"},
75 {0, 0x84, "SystemContextMenu"},
76 {0, 0x85, "SystemMainMenu"},
77 {0, 0x86, "SystemAppMenu"},
78 {0, 0x87, "SystemMenuHelp"},
79 {0, 0x88, "SystemMenuExit"},
80 {0, 0x89, "SystemMenuSelect"},
81 {0, 0x8a, "SystemMenuRight"},
82 {0, 0x8b, "SystemMenuLeft"},
83 {0, 0x8c, "SystemMenuUp"},
84 {0, 0x8d, "SystemMenuDown"},
85 {0, 0x90, "D-PadUp"},
86 {0, 0x91, "D-PadDown"},
87 {0, 0x92, "D-PadRight"},
88 {0, 0x93, "D-PadLeft"},
89 { 2, 0, "Simulation" },
90 {0, 0xb0, "Aileron"},
91 {0, 0xb1, "AileronTrim"},
92 {0, 0xb2, "Anti-Torque"},
93 {0, 0xb3, "Autopilot"},
94 {0, 0xb4, "Chaff"},
95 {0, 0xb5, "Collective"},
96 {0, 0xb6, "DiveBrake"},
97 {0, 0xb7, "ElectronicCountermeasures"},
98 {0, 0xb8, "Elevator"},
99 {0, 0xb9, "ElevatorTrim"},
100 {0, 0xba, "Rudder"},
101 {0, 0xbb, "Throttle"},
102 {0, 0xbc, "FlightCommunications"},
103 {0, 0xbd, "FlareRelease"},
104 {0, 0xbe, "LandingGear"},
105 {0, 0xbf, "ToeBrake"},
106 { 7, 0, "Keyboard" },
107 { 8, 0, "LED" },
108 {0, 0x01, "NumLock"},
109 {0, 0x02, "CapsLock"},
110 {0, 0x03, "ScrollLock"},
111 {0, 0x04, "Compose"},
112 {0, 0x05, "Kana"},
113 {0, 0x4b, "GenericIndicator"},
114 { 9, 0, "Button" },
115 { 10, 0, "Ordinal" },
116 { 12, 0, "Consumer" },
117 {0, 0x238, "HorizontalWheel"},
118 { 13, 0, "Digitizers" },
119 {0, 0x01, "Digitizer"},
120 {0, 0x02, "Pen"},
121 {0, 0x03, "LightPen"},
122 {0, 0x04, "TouchScreen"},
123 {0, 0x05, "TouchPad"},
124 {0, 0x20, "Stylus"},
125 {0, 0x21, "Puck"},
126 {0, 0x22, "Finger"},
127 {0, 0x30, "TipPressure"},
128 {0, 0x31, "BarrelPressure"},
129 {0, 0x32, "InRange"},
130 {0, 0x33, "Touch"},
131 {0, 0x34, "UnTouch"},
132 {0, 0x35, "Tap"},
133 {0, 0x39, "TabletFunctionKey"},
134 {0, 0x3a, "ProgramChangeKey"},
135 {0, 0x3c, "Invert"},
136 {0, 0x42, "TipSwitch"},
137 {0, 0x43, "SecondaryTipSwitch"},
138 {0, 0x44, "BarrelSwitch"},
139 {0, 0x45, "Eraser"},
140 {0, 0x46, "TabletPick"},
141 { 15, 0, "PhysicalInterfaceDevice" },
142 {0, 0x00, "Undefined"},
143 {0, 0x01, "Physical_Interface_Device"},
144 {0, 0x20, "Normal"},
145 {0, 0x21, "Set_Effect_Report"},
146 {0, 0x22, "Effect_Block_Index"},
147 {0, 0x23, "Parameter_Block_Offset"},
148 {0, 0x24, "ROM_Flag"},
149 {0, 0x25, "Effect_Type"},
150 {0, 0x26, "ET_Constant_Force"},
151 {0, 0x27, "ET_Ramp"},
152 {0, 0x28, "ET_Custom_Force_Data"},
153 {0, 0x30, "ET_Square"},
154 {0, 0x31, "ET_Sine"},
155 {0, 0x32, "ET_Triangle"},
156 {0, 0x33, "ET_Sawtooth_Up"},
157 {0, 0x34, "ET_Sawtooth_Down"},
158 {0, 0x40, "ET_Spring"},
159 {0, 0x41, "ET_Damper"},
160 {0, 0x42, "ET_Inertia"},
161 {0, 0x43, "ET_Friction"},
162 {0, 0x50, "Duration"},
163 {0, 0x51, "Sample_Period"},
164 {0, 0x52, "Gain"},
165 {0, 0x53, "Trigger_Button"},
166 {0, 0x54, "Trigger_Repeat_Interval"},
167 {0, 0x55, "Axes_Enable"},
168 {0, 0x56, "Direction_Enable"},
169 {0, 0x57, "Direction"},
170 {0, 0x58, "Type_Specific_Block_Offset"},
171 {0, 0x59, "Block_Type"},
172 {0, 0x5A, "Set_Envelope_Report"},
173 {0, 0x5B, "Attack_Level"},
174 {0, 0x5C, "Attack_Time"},
175 {0, 0x5D, "Fade_Level"},
176 {0, 0x5E, "Fade_Time"},
177 {0, 0x5F, "Set_Condition_Report"},
178 {0, 0x60, "CP_Offset"},
179 {0, 0x61, "Positive_Coefficient"},
180 {0, 0x62, "Negative_Coefficient"},
181 {0, 0x63, "Positive_Saturation"},
182 {0, 0x64, "Negative_Saturation"},
183 {0, 0x65, "Dead_Band"},
184 {0, 0x66, "Download_Force_Sample"},
185 {0, 0x67, "Isoch_Custom_Force_Enable"},
186 {0, 0x68, "Custom_Force_Data_Report"},
187 {0, 0x69, "Custom_Force_Data"},
188 {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
189 {0, 0x6B, "Set_Custom_Force_Report"},
190 {0, 0x6C, "Custom_Force_Data_Offset"},
191 {0, 0x6D, "Sample_Count"},
192 {0, 0x6E, "Set_Periodic_Report"},
193 {0, 0x6F, "Offset"},
194 {0, 0x70, "Magnitude"},
195 {0, 0x71, "Phase"},
196 {0, 0x72, "Period"},
197 {0, 0x73, "Set_Constant_Force_Report"},
198 {0, 0x74, "Set_Ramp_Force_Report"},
199 {0, 0x75, "Ramp_Start"},
200 {0, 0x76, "Ramp_End"},
201 {0, 0x77, "Effect_Operation_Report"},
202 {0, 0x78, "Effect_Operation"},
203 {0, 0x79, "Op_Effect_Start"},
204 {0, 0x7A, "Op_Effect_Start_Solo"},
205 {0, 0x7B, "Op_Effect_Stop"},
206 {0, 0x7C, "Loop_Count"},
207 {0, 0x7D, "Device_Gain_Report"},
208 {0, 0x7E, "Device_Gain"},
209 {0, 0x7F, "PID_Pool_Report"},
210 {0, 0x80, "RAM_Pool_Size"},
211 {0, 0x81, "ROM_Pool_Size"},
212 {0, 0x82, "ROM_Effect_Block_Count"},
213 {0, 0x83, "Simultaneous_Effects_Max"},
214 {0, 0x84, "Pool_Alignment"},
215 {0, 0x85, "PID_Pool_Move_Report"},
216 {0, 0x86, "Move_Source"},
217 {0, 0x87, "Move_Destination"},
218 {0, 0x88, "Move_Length"},
219 {0, 0x89, "PID_Block_Load_Report"},
220 {0, 0x8B, "Block_Load_Status"},
221 {0, 0x8C, "Block_Load_Success"},
222 {0, 0x8D, "Block_Load_Full"},
223 {0, 0x8E, "Block_Load_Error"},
224 {0, 0x8F, "Block_Handle"},
225 {0, 0x90, "PID_Block_Free_Report"},
226 {0, 0x91, "Type_Specific_Block_Handle"},
227 {0, 0x92, "PID_State_Report"},
228 {0, 0x94, "Effect_Playing"},
229 {0, 0x95, "PID_Device_Control_Report"},
230 {0, 0x96, "PID_Device_Control"},
231 {0, 0x97, "DC_Enable_Actuators"},
232 {0, 0x98, "DC_Disable_Actuators"},
233 {0, 0x99, "DC_Stop_All_Effects"},
234 {0, 0x9A, "DC_Device_Reset"},
235 {0, 0x9B, "DC_Device_Pause"},
236 {0, 0x9C, "DC_Device_Continue"},
237 {0, 0x9F, "Device_Paused"},
238 {0, 0xA0, "Actuators_Enabled"},
239 {0, 0xA4, "Safety_Switch"},
240 {0, 0xA5, "Actuator_Override_Switch"},
241 {0, 0xA6, "Actuator_Power"},
242 {0, 0xA7, "Start_Delay"},
243 {0, 0xA8, "Parameter_Block_Size"},
244 {0, 0xA9, "Device_Managed_Pool"},
245 {0, 0xAA, "Shared_Parameter_Blocks"},
246 {0, 0xAB, "Create_New_Effect_Report"},
247 {0, 0xAC, "RAM_Pool_Available"},
248 { 0x84, 0, "Power Device" },
249 { 0x84, 0x02, "PresentStatus" },
250 { 0x84, 0x03, "ChangeStatus" },
251 { 0x84, 0x04, "UPS" },
252 { 0x84, 0x05, "PowerSupply" },
253 { 0x84, 0x10, "BatterySystem" },
254 { 0x84, 0x11, "BatterySystemID" },
255 { 0x84, 0x12, "Battery" },
256 { 0x84, 0x13, "BatteryID" },
257 { 0x84, 0x14, "Charger" },
258 { 0x84, 0x15, "ChargerID" },
259 { 0x84, 0x16, "PowerConverter" },
260 { 0x84, 0x17, "PowerConverterID" },
261 { 0x84, 0x18, "OutletSystem" },
262 { 0x84, 0x19, "OutletSystemID" },
263 { 0x84, 0x1a, "Input" },
264 { 0x84, 0x1b, "InputID" },
265 { 0x84, 0x1c, "Output" },
266 { 0x84, 0x1d, "OutputID" },
267 { 0x84, 0x1e, "Flow" },
268 { 0x84, 0x1f, "FlowID" },
269 { 0x84, 0x20, "Outlet" },
270 { 0x84, 0x21, "OutletID" },
271 { 0x84, 0x22, "Gang" },
272 { 0x84, 0x24, "PowerSummary" },
273 { 0x84, 0x25, "PowerSummaryID" },
274 { 0x84, 0x30, "Voltage" },
275 { 0x84, 0x31, "Current" },
276 { 0x84, 0x32, "Frequency" },
277 { 0x84, 0x33, "ApparentPower" },
278 { 0x84, 0x35, "PercentLoad" },
279 { 0x84, 0x40, "ConfigVoltage" },
280 { 0x84, 0x41, "ConfigCurrent" },
281 { 0x84, 0x43, "ConfigApparentPower" },
282 { 0x84, 0x53, "LowVoltageTransfer" },
283 { 0x84, 0x54, "HighVoltageTransfer" },
284 { 0x84, 0x56, "DelayBeforeStartup" },
285 { 0x84, 0x57, "DelayBeforeShutdown" },
286 { 0x84, 0x58, "Test" },
287 { 0x84, 0x5a, "AudibleAlarmControl" },
288 { 0x84, 0x60, "Present" },
289 { 0x84, 0x61, "Good" },
290 { 0x84, 0x62, "InternalFailure" },
291 { 0x84, 0x65, "Overload" },
292 { 0x84, 0x66, "OverCharged" },
293 { 0x84, 0x67, "OverTemperature" },
294 { 0x84, 0x68, "ShutdownRequested" },
295 { 0x84, 0x69, "ShutdownImminent" },
296 { 0x84, 0x6b, "SwitchOn/Off" },
297 { 0x84, 0x6c, "Switchable" },
298 { 0x84, 0x6d, "Used" },
299 { 0x84, 0x6e, "Boost" },
300 { 0x84, 0x73, "CommunicationLost" },
301 { 0x84, 0xfd, "iManufacturer" },
302 { 0x84, 0xfe, "iProduct" },
303 { 0x84, 0xff, "iSerialNumber" },
304 { 0x85, 0, "Battery System" },
305 { 0x85, 0x01, "SMBBatteryMode" },
306 { 0x85, 0x02, "SMBBatteryStatus" },
307 { 0x85, 0x03, "SMBAlarmWarning" },
308 { 0x85, 0x04, "SMBChargerMode" },
309 { 0x85, 0x05, "SMBChargerStatus" },
310 { 0x85, 0x06, "SMBChargerSpecInfo" },
311 { 0x85, 0x07, "SMBSelectorState" },
312 { 0x85, 0x08, "SMBSelectorPresets" },
313 { 0x85, 0x09, "SMBSelectorInfo" },
314 { 0x85, 0x29, "RemainingCapacityLimit" },
315 { 0x85, 0x2c, "CapacityMode" },
316 { 0x85, 0x42, "BelowRemainingCapacityLimit" },
317 { 0x85, 0x44, "Charging" },
318 { 0x85, 0x45, "Discharging" },
319 { 0x85, 0x4b, "NeedReplacement" },
320 { 0x85, 0x66, "RemainingCapacity" },
321 { 0x85, 0x68, "RunTimeToEmpty" },
322 { 0x85, 0x6a, "AverageTimeToFull" },
323 { 0x85, 0x83, "DesignCapacity" },
324 { 0x85, 0x85, "ManufacturerDate" },
325 { 0x85, 0x89, "iDeviceChemistry" },
326 { 0x85, 0x8b, "Rechargable" },
327 { 0x85, 0x8f, "iOEMInformation" },
328 { 0x85, 0x8d, "CapacityGranularity1" },
329 { 0x85, 0xd0, "ACPresent" },
330 /* pages 0xff00 to 0xffff are vendor-specific */
331 { 0xffff, 0, "Vendor-specific-FF" },
332 { 0, 0, NULL }
333};
334
335static void resolv_usage_page(unsigned page) {
336 const struct hid_usage_entry *p;
337
338 for (p = hid_usage_table; p->description; p++)
339 if (p->page == page) {
340 printk("%s", p->description);
341 return;
342 }
343 printk("%04x", page);
344}
345
346void hid_resolv_usage(unsigned usage) {
347 const struct hid_usage_entry *p;
348
349 resolv_usage_page(usage >> 16);
350 printk(".");
351 for (p = hid_usage_table; p->description; p++)
352 if (p->page == (usage >> 16)) {
353 for(++p; p->description && p->usage != 0; p++)
354 if (p->usage == (usage & 0xffff)) {
355 printk("%s", p->description);
356 return;
357 }
358 break;
359 }
360 printk("%04x", usage & 0xffff);
361}
362EXPORT_SYMBOL_GPL(hid_resolv_usage);
363
364__inline__ static void tab(int n) {
365 while (n--) printk(" ");
366}
367
368void hid_dump_field(struct hid_field *field, int n) {
369 int j;
370
371 if (field->physical) {
372 tab(n);
373 printk("Physical(");
374 hid_resolv_usage(field->physical); printk(")\n");
375 }
376 if (field->logical) {
377 tab(n);
378 printk("Logical(");
379 hid_resolv_usage(field->logical); printk(")\n");
380 }
381 tab(n); printk("Usage(%d)\n", field->maxusage);
382 for (j = 0; j < field->maxusage; j++) {
383 tab(n+2); hid_resolv_usage(field->usage[j].hid); printk("\n");
384 }
385 if (field->logical_minimum != field->logical_maximum) {
386 tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
387 tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
388 }
389 if (field->physical_minimum != field->physical_maximum) {
390 tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
391 tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
392 }
393 if (field->unit_exponent) {
394 tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
395 }
396 if (field->unit) {
397 char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
398 char *units[5][8] = {
399 { "None", "None", "None", "None", "None", "None", "None", "None" },
400 { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
401 { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
402 { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" },
403 { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }
404 };
405
406 int i;
407 int sys;
408 __u32 data = field->unit;
409
410 /* First nibble tells us which system we're in. */
411 sys = data & 0xf;
412 data >>= 4;
413
414 if(sys > 4) {
415 tab(n); printk("Unit(Invalid)\n");
416 }
417 else {
418 int earlier_unit = 0;
419
420 tab(n); printk("Unit(%s : ", systems[sys]);
421
422 for (i=1 ; i<sizeof(__u32)*2 ; i++) {
423 char nibble = data & 0xf;
424 data >>= 4;
425 if (nibble != 0) {
426 if(earlier_unit++ > 0)
427 printk("*");
428 printk("%s", units[sys][i]);
429 if(nibble != 1) {
430 /* This is a _signed_ nibble(!) */
431
432 int val = nibble & 0x7;
433 if(nibble & 0x08)
434 val = -((0x7 & ~val) +1);
435 printk("^%d", val);
436 }
437 }
438 }
439 printk(")\n");
440 }
441 }
442 tab(n); printk("Report Size(%u)\n", field->report_size);
443 tab(n); printk("Report Count(%u)\n", field->report_count);
444 tab(n); printk("Report Offset(%u)\n", field->report_offset);
445
446 tab(n); printk("Flags( ");
447 j = field->flags;
448 printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
449 printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
450 printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
451 printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
452 printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
453 printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPrefferedState " : "");
454 printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
455 printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
456 printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
457 printk(")\n");
458}
459EXPORT_SYMBOL_GPL(hid_dump_field);
460
461void hid_dump_device(struct hid_device *device) {
462 struct hid_report_enum *report_enum;
463 struct hid_report *report;
464 struct list_head *list;
465 unsigned i,k;
466 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
467
468 for (i = 0; i < HID_REPORT_TYPES; i++) {
469 report_enum = device->report_enum + i;
470 list = report_enum->report_list.next;
471 while (list != &report_enum->report_list) {
472 report = (struct hid_report *) list;
473 tab(2);
474 printk("%s", table[i]);
475 if (report->id)
476 printk("(%d)", report->id);
477 printk("[%s]", table[report->type]);
478 printk("\n");
479 for (k = 0; k < report->maxfield; k++) {
480 tab(4);
481 printk("Field(%d)\n", k);
482 hid_dump_field(report->field[k], 6);
483 }
484 list = list->next;
485 }
486 }
487}
488EXPORT_SYMBOL_GPL(hid_dump_device);
489
490void hid_dump_input(struct hid_usage *usage, __s32 value) {
491 printk("hid-debug: input ");
492 hid_resolv_usage(usage->hid);
493 printk(" = %d\n", value);
494}
495EXPORT_SYMBOL_GPL(hid_dump_input);
496
497static char *events[EV_MAX + 1] = {
498 [EV_SYN] = "Sync", [EV_KEY] = "Key",
499 [EV_REL] = "Relative", [EV_ABS] = "Absolute",
500 [EV_MSC] = "Misc", [EV_LED] = "LED",
501 [EV_SND] = "Sound", [EV_REP] = "Repeat",
502 [EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
503 [EV_FF_STATUS] = "ForceFeedbackStatus",
504};
505
506static char *syncs[2] = {
507 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
508};
509static char *keys[KEY_MAX + 1] = {
510 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
511 [KEY_1] = "1", [KEY_2] = "2",
512 [KEY_3] = "3", [KEY_4] = "4",
513 [KEY_5] = "5", [KEY_6] = "6",
514 [KEY_7] = "7", [KEY_8] = "8",
515 [KEY_9] = "9", [KEY_0] = "0",
516 [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal",
517 [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab",
518 [KEY_Q] = "Q", [KEY_W] = "W",
519 [KEY_E] = "E", [KEY_R] = "R",
520 [KEY_T] = "T", [KEY_Y] = "Y",
521 [KEY_U] = "U", [KEY_I] = "I",
522 [KEY_O] = "O", [KEY_P] = "P",
523 [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace",
524 [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl",
525 [KEY_A] = "A", [KEY_S] = "S",
526 [KEY_D] = "D", [KEY_F] = "F",
527 [KEY_G] = "G", [KEY_H] = "H",
528 [KEY_J] = "J", [KEY_K] = "K",
529 [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon",
530 [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave",
531 [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash",
532 [KEY_Z] = "Z", [KEY_X] = "X",
533 [KEY_C] = "C", [KEY_V] = "V",
534 [KEY_B] = "B", [KEY_N] = "N",
535 [KEY_M] = "M", [KEY_COMMA] = "Comma",
536 [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash",
537 [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk",
538 [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space",
539 [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1",
540 [KEY_F2] = "F2", [KEY_F3] = "F3",
541 [KEY_F4] = "F4", [KEY_F5] = "F5",
542 [KEY_F6] = "F6", [KEY_F7] = "F7",
543 [KEY_F8] = "F8", [KEY_F9] = "F9",
544 [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock",
545 [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7",
546 [KEY_KP8] = "KP8", [KEY_KP9] = "KP9",
547 [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4",
548 [KEY_KP5] = "KP5", [KEY_KP6] = "KP6",
549 [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1",
550 [KEY_KP2] = "KP2", [KEY_KP3] = "KP3",
551 [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot",
552 [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd",
553 [KEY_F11] = "F11", [KEY_F12] = "F12",
554 [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana",
555 [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan",
556 [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan",
557 [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter",
558 [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash",
559 [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt",
560 [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home",
561 [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp",
562 [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right",
563 [KEY_END] = "End", [KEY_DOWN] = "Down",
564 [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert",
565 [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro",
566 [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown",
567 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
568 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
569 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
570 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
571 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
572 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
573 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
574 [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo",
575 [KEY_FRONT] = "Front", [KEY_COPY] = "Copy",
576 [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste",
577 [KEY_FIND] = "Find", [KEY_CUT] = "Cut",
578 [KEY_HELP] = "Help", [KEY_MENU] = "Menu",
579 [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup",
580 [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp",
581 [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile",
582 [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
583 [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
584 [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
585 [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
586 [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
587 [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
588 [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
589 [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD",
590 [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong",
591 [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong",
592 [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record",
593 [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone",
594 [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config",
595 [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh",
596 [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move",
597 [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp",
598 [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
599 [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
600 [KEY_REDO] = "Redo", [KEY_F13] = "F13",
601 [KEY_F14] = "F14", [KEY_F15] = "F15",
602 [KEY_F16] = "F16", [KEY_F17] = "F17",
603 [KEY_F18] = "F18", [KEY_F19] = "F19",
604 [KEY_F20] = "F20", [KEY_F21] = "F21",
605 [KEY_F22] = "F22", [KEY_F23] = "F23",
606 [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
607 [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
608 [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
609 [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
610 [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
611 [KEY_PRINT] = "Print", [KEY_HP] = "HP",
612 [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound",
613 [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email",
614 [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search",
615 [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance",
616 [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop",
617 [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
618 [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
619 [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
620 [BTN_0] = "Btn0", [BTN_1] = "Btn1",
621 [BTN_2] = "Btn2", [BTN_3] = "Btn3",
622 [BTN_4] = "Btn4", [BTN_5] = "Btn5",
623 [BTN_6] = "Btn6", [BTN_7] = "Btn7",
624 [BTN_8] = "Btn8", [BTN_9] = "Btn9",
625 [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn",
626 [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn",
627 [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn",
628 [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn",
629 [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn",
630 [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn",
631 [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn",
632 [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2",
633 [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4",
634 [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6",
635 [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA",
636 [BTN_B] = "BtnB", [BTN_C] = "BtnC",
637 [BTN_X] = "BtnX", [BTN_Y] = "BtnY",
638 [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL",
639 [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2",
640 [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
641 [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
642 [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
643 [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
644 [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
645 [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
646 [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
647 [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
648 [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
649 [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
650 [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
651 [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
652 [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
653 [KEY_OPTION] = "Option", [KEY_INFO] = "Info",
654 [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor",
655 [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program",
656 [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites",
657 [KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
658 [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
659 [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
660 [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
661 [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
662 [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
663 [KEY_TV] = "TV", [KEY_TV2] = "TV2",
664 [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
665 [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
666 [KEY_CD] = "CD", [KEY_TAPE] = "Tape",
667 [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner",
668 [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text",
669 [KEY_DVD] = "DVD", [KEY_AUX] = "Aux",
670 [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio",
671 [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory",
672 [KEY_LIST] = "List", [KEY_MEMO] = "Memo",
673 [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red",
674 [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow",
675 [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp",
676 [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First",
677 [KEY_LAST] = "Last", [KEY_AB] = "AB",
678 [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart",
679 [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle",
680 [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous",
681 [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN",
682 [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL",
683 [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine",
684 [KEY_DEL_LINE] = "DeleteLine",
685 [KEY_SEND] = "Send", [KEY_REPLY] = "Reply",
686 [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save",
687 [KEY_DOCUMENTS] = "Documents",
688 [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC",
689 [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2",
690 [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D",
691 [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F",
692 [KEY_FN_S] = "Fn+S",
693 [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2",
694 [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4",
695 [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6",
696 [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8",
697 [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10",
698 [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12",
699 [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle",
700 [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
701 [KEY_KBDILLUMUP] = "KbdIlluminationUp",
702 [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
703};
704
705static char *relatives[REL_MAX + 1] = {
706 [REL_X] = "X", [REL_Y] = "Y",
707 [REL_Z] = "Z", [REL_RX] = "Rx",
708 [REL_RY] = "Ry", [REL_RZ] = "Rz",
709 [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
710 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
711};
712
713static char *absolutes[ABS_MAX + 1] = {
714 [ABS_X] = "X", [ABS_Y] = "Y",
715 [ABS_Z] = "Z", [ABS_RX] = "Rx",
716 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
717 [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder",
718 [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas",
719 [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X",
720 [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X",
721 [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X",
722 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
723 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
724 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
725 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
726 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
727};
728
729static char *misc[MSC_MAX + 1] = {
730 [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled",
731 [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData"
732};
733
734static char *leds[LED_MAX + 1] = {
735 [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock",
736 [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
737 [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
738 [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
739 [LED_MISC] = "Misc",
740};
741
742static char *repeats[REP_MAX + 1] = {
743 [REP_DELAY] = "Delay", [REP_PERIOD] = "Period"
744};
745
746static char *sounds[SND_MAX + 1] = {
747 [SND_CLICK] = "Click", [SND_BELL] = "Bell",
748 [SND_TONE] = "Tone"
749};
750
751static char **names[EV_MAX + 1] = {
752 [EV_SYN] = syncs, [EV_KEY] = keys,
753 [EV_REL] = relatives, [EV_ABS] = absolutes,
754 [EV_MSC] = misc, [EV_LED] = leds,
755 [EV_SND] = sounds, [EV_REP] = repeats,
756};
757
758void hid_resolv_event(__u8 type, __u16 code) {
759
760 printk("%s.%s", events[type] ? events[type] : "?",
761 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
762}
763EXPORT_SYMBOL_GPL(hid_resolv_event);
764
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c7a6833f6821..25d180a24fc4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -31,9 +31,8 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33 33
34#undef DEBUG
35
36#include <linux/hid.h> 34#include <linux/hid.h>
35#include <linux/hid-debug.h>
37 36
38static int hid_pb_fnmode = 1; 37static int hid_pb_fnmode = 1;
39module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644); 38module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644);
@@ -252,9 +251,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
252 251
253 field->hidinput = hidinput; 252 field->hidinput = hidinput;
254 253
255#ifdef DEBUG 254#ifdef CONFIG_HID_DEBUG
256 printk(KERN_DEBUG "Mapping: "); 255 printk(KERN_DEBUG "Mapping: ");
257 resolv_usage(usage->hid); 256 hid_resolv_usage(usage->hid);
258 printk(" ---> "); 257 printk(" ---> ");
259#endif 258#endif
260 259
@@ -682,14 +681,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
682 field->dpad = usage->code; 681 field->dpad = usage->code;
683 } 682 }
684 683
685#ifdef DEBUG 684 hid_resolv_event(usage->type, usage->code);
686 resolv_event(usage->type, usage->code); 685#ifdef CONFIG_HID_DEBUG
687 printk("\n"); 686 printk("\n");
688#endif 687#endif
689 return; 688 return;
690 689
691ignore: 690ignore:
692#ifdef DEBUG 691#ifdef CONFIG_HID_DEBUG
693 printk("IGNORED\n"); 692 printk("IGNORED\n");
694#endif 693#endif
695 return; 694 return;
@@ -804,6 +803,18 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
804} 803}
805EXPORT_SYMBOL_GPL(hidinput_find_field); 804EXPORT_SYMBOL_GPL(hidinput_find_field);
806 805
806static int hidinput_open(struct input_dev *dev)
807{
808 struct hid_device *hid = dev->private;
809 return hid->hid_open(hid);
810}
811
812static void hidinput_close(struct input_dev *dev)
813{
814 struct hid_device *hid = dev->private;
815 hid->hid_close(hid);
816}
817
807/* 818/*
808 * Register the input device; print a message. 819 * Register the input device; print a message.
809 * Configure the input layer interface 820 * Configure the input layer interface
@@ -816,6 +827,7 @@ int hidinput_connect(struct hid_device *hid)
816 struct hid_input *hidinput = NULL; 827 struct hid_input *hidinput = NULL;
817 struct input_dev *input_dev; 828 struct input_dev *input_dev;
818 int i, j, k; 829 int i, j, k;
830 int max_report_type = HID_OUTPUT_REPORT;
819 831
820 INIT_LIST_HEAD(&hid->inputs); 832 INIT_LIST_HEAD(&hid->inputs);
821 833
@@ -828,7 +840,10 @@ int hidinput_connect(struct hid_device *hid)
828 if (i == hid->maxcollection) 840 if (i == hid->maxcollection)
829 return -1; 841 return -1;
830 842
831 for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) 843 if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
844 max_report_type = HID_INPUT_REPORT;
845
846 for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
832 list_for_each_entry(report, &hid->report_enum[k].report_list, list) { 847 list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
833 848
834 if (!report->maxfield) 849 if (!report->maxfield)
@@ -846,8 +861,8 @@ int hidinput_connect(struct hid_device *hid)
846 861
847 input_dev->private = hid; 862 input_dev->private = hid;
848 input_dev->event = hid->hidinput_input_event; 863 input_dev->event = hid->hidinput_input_event;
849 input_dev->open = hid->hidinput_open; 864 input_dev->open = hidinput_open;
850 input_dev->close = hid->hidinput_close; 865 input_dev->close = hidinput_close;
851 866
852 input_dev->name = hid->name; 867 input_dev->name = hid->name;
853 input_dev->phys = hid->phys; 868 input_dev->phys = hid->phys;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index af939796750d..d2bb5a9a303f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -360,8 +360,7 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
360 if (event == NETEVENT_NEIGH_UPDATE) { 360 if (event == NETEVENT_NEIGH_UPDATE) {
361 struct neighbour *neigh = ctx; 361 struct neighbour *neigh = ctx;
362 362
363 if (neigh->dev->type == ARPHRD_INFINIBAND && 363 if (neigh->nud_state & NUD_VALID) {
364 (neigh->nud_state & NUD_VALID)) {
365 set_timeout(jiffies); 364 set_timeout(jiffies);
366 } 365 }
367 } 366 }
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed141ebd1c8..13efd4170349 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -642,7 +642,8 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
643} 643}
644 644
645static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, 645static void build_smp_wc(struct ib_qp *qp,
646 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
646 struct ib_wc *wc) 647 struct ib_wc *wc)
647{ 648{
648 memset(wc, 0, sizeof *wc); 649 memset(wc, 0, sizeof *wc);
@@ -652,7 +653,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
652 wc->pkey_index = pkey_index; 653 wc->pkey_index = pkey_index;
653 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 654 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
654 wc->src_qp = IB_QP0; 655 wc->src_qp = IB_QP0;
655 wc->qp_num = IB_QP0; 656 wc->qp = qp;
656 wc->slid = slid; 657 wc->slid = slid;
657 wc->sl = 0; 658 wc->sl = 0;
658 wc->dlid_path_bits = 0; 659 wc->dlid_path_bits = 0;
@@ -713,7 +714,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
713 goto out; 714 goto out;
714 } 715 }
715 716
716 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), 717 build_smp_wc(mad_agent_priv->agent.qp,
718 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
717 send_wr->wr.ud.pkey_index, 719 send_wr->wr.ud.pkey_index,
718 send_wr->wr.ud.port_num, &mad_wc); 720 send_wr->wr.ud.port_num, &mad_wc);
719 721
@@ -2355,7 +2357,8 @@ static void local_completions(struct work_struct *work)
2355 * Defined behavior is to complete response 2357 * Defined behavior is to complete response
2356 * before request 2358 * before request
2357 */ 2359 */
2358 build_smp_wc((unsigned long) local->mad_send_wr, 2360 build_smp_wc(recv_mad_agent->agent.qp,
2361 (unsigned long) local->mad_send_wr,
2359 be16_to_cpu(IB_LID_PERMISSIVE), 2362 be16_to_cpu(IB_LID_PERMISSIVE),
2360 0, recv_mad_agent->agent.port_num, &wc); 2363 0, recv_mad_agent->agent.port_num, &wc);
2361 2364
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 743247ec065e..df1efbc10882 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -933,7 +933,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
933 resp->wc[i].vendor_err = wc[i].vendor_err; 933 resp->wc[i].vendor_err = wc[i].vendor_err;
934 resp->wc[i].byte_len = wc[i].byte_len; 934 resp->wc[i].byte_len = wc[i].byte_len;
935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; 935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
936 resp->wc[i].qp_num = wc[i].qp_num; 936 resp->wc[i].qp_num = wc[i].qp->qp_num;
937 resp->wc[i].src_qp = wc[i].src_qp; 937 resp->wc[i].src_qp = wc[i].src_qp;
938 resp->wc[i].wc_flags = wc[i].wc_flags; 938 resp->wc[i].wc_flags = wc[i].wc_flags;
939 resp->wc[i].pkey_index = wc[i].pkey_index; 939 resp->wc[i].pkey_index = wc[i].pkey_index;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 05c9154d46f4..5175c99ee586 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -153,7 +153,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
153 153
154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce)); 154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
155 entry->wr_id = ce->hdr.context; 155 entry->wr_id = ce->hdr.context;
156 entry->qp_num = ce->handle; 156 entry->qp = &qp->ibqp;
157 entry->wc_flags = 0; 157 entry->wc_flags = 0;
158 entry->slid = 0; 158 entry->slid = 0;
159 entry->sl = 0; 159 entry->sl = 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1c722032319c..cf95ee474b0f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -119,13 +119,14 @@ struct ehca_qp {
119 struct ipz_qp_handle ipz_qp_handle; 119 struct ipz_qp_handle ipz_qp_handle;
120 struct ehca_pfqp pf; 120 struct ehca_pfqp pf;
121 struct ib_qp_init_attr init_attr; 121 struct ib_qp_init_attr init_attr;
122 u64 uspace_squeue;
123 u64 uspace_rqueue;
124 u64 uspace_fwh;
125 struct ehca_cq *send_cq; 122 struct ehca_cq *send_cq;
126 struct ehca_cq *recv_cq; 123 struct ehca_cq *recv_cq;
127 unsigned int sqerr_purgeflag; 124 unsigned int sqerr_purgeflag;
128 struct hlist_node list_entries; 125 struct hlist_node list_entries;
126 /* mmap counter for resources mapped into user space */
127 u32 mm_count_squeue;
128 u32 mm_count_rqueue;
129 u32 mm_count_galpa;
129}; 130};
130 131
131/* must be power of 2 */ 132/* must be power of 2 */
@@ -142,13 +143,14 @@ struct ehca_cq {
142 struct ipz_cq_handle ipz_cq_handle; 143 struct ipz_cq_handle ipz_cq_handle;
143 struct ehca_pfcq pf; 144 struct ehca_pfcq pf;
144 spinlock_t cb_lock; 145 spinlock_t cb_lock;
145 u64 uspace_queue;
146 u64 uspace_fwh;
147 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 146 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
148 struct list_head entry; 147 struct list_head entry;
149 u32 nr_callbacks; 148 u32 nr_callbacks;
150 spinlock_t task_lock; 149 spinlock_t task_lock;
151 u32 ownpid; 150 u32 ownpid;
151 /* mmap counter for resources mapped into user space */
152 u32 mm_count_queue;
153 u32 mm_count_galpa;
152}; 154};
153 155
154enum ehca_mr_flag { 156enum ehca_mr_flag {
@@ -248,20 +250,6 @@ struct ehca_ucontext {
248 struct ib_ucontext ib_ucontext; 250 struct ib_ucontext ib_ucontext;
249}; 251};
250 252
251struct ehca_module *ehca_module_new(void);
252
253int ehca_module_delete(struct ehca_module *me);
254
255int ehca_eq_ctor(struct ehca_eq *eq);
256
257int ehca_eq_dtor(struct ehca_eq *eq);
258
259struct ehca_shca *ehca_shca_new(void);
260
261int ehca_shca_delete(struct ehca_shca *me);
262
263struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
264
265int ehca_init_pd_cache(void); 253int ehca_init_pd_cache(void);
266void ehca_cleanup_pd_cache(void); 254void ehca_cleanup_pd_cache(void);
267int ehca_init_cq_cache(void); 255int ehca_init_cq_cache(void);
@@ -283,7 +271,6 @@ extern int ehca_port_act_time;
283extern int ehca_use_hp_mr; 271extern int ehca_use_hp_mr;
284 272
285struct ipzu_queue_resp { 273struct ipzu_queue_resp {
286 u64 queue; /* points to first queue entry */
287 u32 qe_size; /* queue entry size */ 274 u32 qe_size; /* queue entry size */
288 u32 act_nr_of_sg; 275 u32 act_nr_of_sg;
289 u32 queue_length; /* queue length allocated in bytes */ 276 u32 queue_length; /* queue length allocated in bytes */
@@ -296,7 +283,6 @@ struct ehca_create_cq_resp {
296 u32 cq_number; 283 u32 cq_number;
297 u32 token; 284 u32 token;
298 struct ipzu_queue_resp ipz_queue; 285 struct ipzu_queue_resp ipz_queue;
299 struct h_galpas galpas;
300}; 286};
301 287
302struct ehca_create_qp_resp { 288struct ehca_create_qp_resp {
@@ -309,7 +295,6 @@ struct ehca_create_qp_resp {
309 u32 dummy; /* padding for 8 byte alignment */ 295 u32 dummy; /* padding for 8 byte alignment */
310 struct ipzu_queue_resp ipz_squeue; 296 struct ipzu_queue_resp ipz_squeue;
311 struct ipzu_queue_resp ipz_rqueue; 297 struct ipzu_queue_resp ipz_rqueue;
312 struct h_galpas galpas;
313}; 298};
314 299
315struct ehca_alloc_cq_parms { 300struct ehca_alloc_cq_parms {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6074c897f51c..9291a86ca053 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
267 if (context) { 267 if (context) {
268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue; 268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
269 struct ehca_create_cq_resp resp; 269 struct ehca_create_cq_resp resp;
270 struct vm_area_struct *vma;
271 memset(&resp, 0, sizeof(resp)); 270 memset(&resp, 0, sizeof(resp));
272 resp.cq_number = my_cq->cq_number; 271 resp.cq_number = my_cq->cq_number;
273 resp.token = my_cq->token; 272 resp.token = my_cq->token;
@@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
276 resp.ipz_queue.queue_length = ipz_queue->queue_length; 275 resp.ipz_queue.queue_length = ipz_queue->queue_length;
277 resp.ipz_queue.pagesize = ipz_queue->pagesize; 276 resp.ipz_queue.pagesize = ipz_queue->pagesize;
278 resp.ipz_queue.toggle_state = ipz_queue->toggle_state; 277 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
279 ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
280 ipz_queue->queue_length,
281 (void**)&resp.ipz_queue.queue,
282 &vma);
283 if (ret) {
284 ehca_err(device, "Could not mmap queue pages");
285 cq = ERR_PTR(ret);
286 goto create_cq_exit4;
287 }
288 my_cq->uspace_queue = resp.ipz_queue.queue;
289 resp.galpas = my_cq->galpas;
290 ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
291 (void**)&resp.galpas.kernel.fw_handle,
292 &vma);
293 if (ret) {
294 ehca_err(device, "Could not mmap fw_handle");
295 cq = ERR_PTR(ret);
296 goto create_cq_exit5;
297 }
298 my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
299 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 278 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
300 ehca_err(device, "Copy to udata failed."); 279 ehca_err(device, "Copy to udata failed.");
301 goto create_cq_exit6; 280 goto create_cq_exit4;
302 } 281 }
303 } 282 }
304 283
305 return cq; 284 return cq;
306 285
307create_cq_exit6:
308 ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
309
310create_cq_exit5:
311 ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
312
313create_cq_exit4: 286create_cq_exit4:
314 ipz_queue_dtor(&my_cq->ipz_queue); 287 ipz_queue_dtor(&my_cq->ipz_queue);
315 288
@@ -333,7 +306,6 @@ create_cq_exit1:
333int ehca_destroy_cq(struct ib_cq *cq) 306int ehca_destroy_cq(struct ib_cq *cq)
334{ 307{
335 u64 h_ret; 308 u64 h_ret;
336 int ret;
337 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 309 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
338 int cq_num = my_cq->cq_number; 310 int cq_num = my_cq->cq_number;
339 struct ib_device *device = cq->device; 311 struct ib_device *device = cq->device;
@@ -343,6 +315,20 @@ int ehca_destroy_cq(struct ib_cq *cq)
343 u32 cur_pid = current->tgid; 315 u32 cur_pid = current->tgid;
344 unsigned long flags; 316 unsigned long flags;
345 317
318 if (cq->uobject) {
319 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
320 ehca_err(device, "Resources still referenced in "
321 "user space cq_num=%x", my_cq->cq_number);
322 return -EINVAL;
323 }
324 if (my_cq->ownpid != cur_pid) {
325 ehca_err(device, "Invalid caller pid=%x ownpid=%x "
326 "cq_num=%x",
327 cur_pid, my_cq->ownpid, my_cq->cq_number);
328 return -EINVAL;
329 }
330 }
331
346 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 332 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 while (my_cq->nr_callbacks) { 333 while (my_cq->nr_callbacks) {
348 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 334 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -353,25 +339,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
353 idr_remove(&ehca_cq_idr, my_cq->token); 339 idr_remove(&ehca_cq_idr, my_cq->token);
354 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 340 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
355 341
356 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
357 ehca_err(device, "Invalid caller pid=%x ownpid=%x",
358 cur_pid, my_cq->ownpid);
359 return -EINVAL;
360 }
361
362 /* un-mmap if vma alloc */
363 if (my_cq->uspace_queue ) {
364 ret = ehca_munmap(my_cq->uspace_queue,
365 my_cq->ipz_queue.queue_length);
366 if (ret)
367 ehca_err(device, "Could not munmap queue ehca_cq=%p "
368 "cq_num=%x", my_cq, cq_num);
369 ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
370 if (ret)
371 ehca_err(device, "Could not munmap fwh ehca_cq=%p "
372 "cq_num=%x", my_cq, cq_num);
373 }
374
375 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 342 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
376 if (h_ret == H_R_STATE) { 343 if (h_ret == H_R_STATE) {
377 /* cq in err: read err data and destroy it forcibly */ 344 /* cq in err: read err data and destroy it forcibly */
@@ -400,7 +367,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
400 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 367 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
401 u32 cur_pid = current->tgid; 368 u32 cur_pid = current->tgid;
402 369
403 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { 370 if (cq->uobject && my_cq->ownpid != cur_pid) {
404 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", 371 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
405 cur_pid, my_cq->ownpid); 372 cur_pid, my_cq->ownpid);
406 return -EINVAL; 373 return -EINVAL;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index cd7789f0d08e..95fd59fb4528 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -171,14 +171,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
171 171
172void ehca_poll_eqs(unsigned long data); 172void ehca_poll_eqs(unsigned long data);
173 173
174int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
175 struct vm_area_struct **vma);
176
177int ehca_mmap_register(u64 physical,void **mapped,
178 struct vm_area_struct **vma);
179
180int ehca_munmap(unsigned long addr, size_t len);
181
182#ifdef CONFIG_PPC_64K_PAGES 174#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(gfp_t flags); 175void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 176void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 6574fbbaead5..1155bcf48212 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0019"); 55MODULE_VERSION("SVNEHCA_0020");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -288,7 +288,7 @@ int ehca_init_device(struct ehca_shca *shca)
288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); 288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
289 shca->ib_device.owner = THIS_MODULE; 289 shca->ib_device.owner = THIS_MODULE;
290 290
291 shca->ib_device.uverbs_abi_ver = 5; 291 shca->ib_device.uverbs_abi_ver = 6;
292 shca->ib_device.uverbs_cmd_mask = 292 shca->ib_device.uverbs_cmd_mask =
293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
790 int ret; 790 int ret;
791 791
792 printk(KERN_INFO "eHCA Infiniband Device Driver " 792 printk(KERN_INFO "eHCA Infiniband Device Driver "
793 "(Rel.: SVNEHCA_0019)\n"); 793 "(Rel.: SVNEHCA_0020)\n");
794 idr_init(&ehca_qp_idr); 794 idr_init(&ehca_qp_idr);
795 idr_init(&ehca_cq_idr); 795 idr_init(&ehca_cq_idr);
796 spin_lock_init(&ehca_qp_idr_lock); 796 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 34b85556d01e..95efef921f1d 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -637,7 +637,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue; 637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue; 638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
639 struct ehca_create_qp_resp resp; 639 struct ehca_create_qp_resp resp;
640 struct vm_area_struct * vma;
641 memset(&resp, 0, sizeof(resp)); 640 memset(&resp, 0, sizeof(resp));
642 641
643 resp.qp_num = my_qp->real_qp_num; 642 resp.qp_num = my_qp->real_qp_num;
@@ -651,59 +650,21 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
651 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; 650 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
652 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; 651 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
653 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state; 652 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
654 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
655 ipz_rqueue->queue_length,
656 (void**)&resp.ipz_rqueue.queue,
657 &vma);
658 if (ret) {
659 ehca_err(pd->device, "Could not mmap rqueue pages");
660 goto create_qp_exit3;
661 }
662 my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
663 /* squeue properties */ 653 /* squeue properties */
664 resp.ipz_squeue.qe_size = ipz_squeue->qe_size; 654 resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
665 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg; 655 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
666 resp.ipz_squeue.queue_length = ipz_squeue->queue_length; 656 resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
667 resp.ipz_squeue.pagesize = ipz_squeue->pagesize; 657 resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
668 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state; 658 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
669 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
670 ipz_squeue->queue_length,
671 (void**)&resp.ipz_squeue.queue,
672 &vma);
673 if (ret) {
674 ehca_err(pd->device, "Could not mmap squeue pages");
675 goto create_qp_exit4;
676 }
677 my_qp->uspace_squeue = resp.ipz_squeue.queue;
678 /* fw_handle */
679 resp.galpas = my_qp->galpas;
680 ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
681 (void**)&resp.galpas.kernel.fw_handle,
682 &vma);
683 if (ret) {
684 ehca_err(pd->device, "Could not mmap fw_handle");
685 goto create_qp_exit5;
686 }
687 my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
688
689 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 659 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
690 ehca_err(pd->device, "Copy to udata failed"); 660 ehca_err(pd->device, "Copy to udata failed");
691 ret = -EINVAL; 661 ret = -EINVAL;
692 goto create_qp_exit6; 662 goto create_qp_exit3;
693 } 663 }
694 } 664 }
695 665
696 return &my_qp->ib_qp; 666 return &my_qp->ib_qp;
697 667
698create_qp_exit6:
699 ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
700
701create_qp_exit5:
702 ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
703
704create_qp_exit4:
705 ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
706
707create_qp_exit3: 668create_qp_exit3:
708 ipz_queue_dtor(&my_qp->ipz_rqueue); 669 ipz_queue_dtor(&my_qp->ipz_rqueue);
709 ipz_queue_dtor(&my_qp->ipz_squeue); 670 ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -931,7 +892,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
931 my_qp->qp_type == IB_QPT_SMI) && 892 my_qp->qp_type == IB_QPT_SMI) &&
932 statetrans == IB_QPST_SQE2RTS) { 893 statetrans == IB_QPST_SQE2RTS) {
933 /* mark next free wqe if kernel */ 894 /* mark next free wqe if kernel */
934 if (my_qp->uspace_squeue == 0) { 895 if (!ibqp->uobject) {
935 struct ehca_wqe *wqe; 896 struct ehca_wqe *wqe;
936 /* lock send queue */ 897 /* lock send queue */
937 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); 898 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -1417,11 +1378,18 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1417 enum ib_qp_type qp_type; 1378 enum ib_qp_type qp_type;
1418 unsigned long flags; 1379 unsigned long flags;
1419 1380
1420 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && 1381 if (ibqp->uobject) {
1421 my_pd->ownpid != cur_pid) { 1382 if (my_qp->mm_count_galpa ||
1422 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", 1383 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
1423 cur_pid, my_pd->ownpid); 1384 ehca_err(ibqp->device, "Resources still referenced in "
1424 return -EINVAL; 1385 "user space qp_num=%x", ibqp->qp_num);
1386 return -EINVAL;
1387 }
1388 if (my_pd->ownpid != cur_pid) {
1389 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
1390 cur_pid, my_pd->ownpid);
1391 return -EINVAL;
1392 }
1425 } 1393 }
1426 1394
1427 if (my_qp->send_cq) { 1395 if (my_qp->send_cq) {
@@ -1439,24 +1407,6 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1439 idr_remove(&ehca_qp_idr, my_qp->token); 1407 idr_remove(&ehca_qp_idr, my_qp->token);
1440 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 1408 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1441 1409
1442 /* un-mmap if vma alloc */
1443 if (my_qp->uspace_rqueue) {
1444 ret = ehca_munmap(my_qp->uspace_rqueue,
1445 my_qp->ipz_rqueue.queue_length);
1446 if (ret)
1447 ehca_err(ibqp->device, "Could not munmap rqueue "
1448 "qp_num=%x", qp_num);
1449 ret = ehca_munmap(my_qp->uspace_squeue,
1450 my_qp->ipz_squeue.queue_length);
1451 if (ret)
1452 ehca_err(ibqp->device, "Could not munmap squeue "
1453 "qp_num=%x", qp_num);
1454 ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
1455 if (ret)
1456 ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
1457 qp_num);
1458 }
1459
1460 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1410 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1461 if (h_ret != H_SUCCESS) { 1411 if (h_ret != H_SUCCESS) {
1462 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " 1412 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index b46bda1bf85d..08d3f892d9f3 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -579,7 +579,7 @@ poll_cq_one_read_cqe:
579 } else 579 } else
580 wc->status = IB_WC_SUCCESS; 580 wc->status = IB_WC_SUCCESS;
581 581
582 wc->qp_num = cqe->local_qp_number; 582 wc->qp = NULL;
583 wc->byte_len = cqe->nr_bytes_transferred; 583 wc->byte_len = cqe->nr_bytes_transferred;
584 wc->pkey_index = cqe->pkey_index; 584 wc->pkey_index = cqe->pkey_index;
585 wc->slid = cqe->rlid; 585 wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e08764e4aef2..73db920b6945 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -68,105 +68,183 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
68 return 0; 68 return 0;
69} 69}
70 70
71struct page *ehca_nopage(struct vm_area_struct *vma, 71static void ehca_mm_open(struct vm_area_struct *vma)
72 unsigned long address, int *type)
73{ 72{
74 struct page *mypage = NULL; 73 u32 *count = (u32*)vma->vm_private_data;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 74 if (!count) {
76 u32 idr_handle = fileoffset >> 32; 75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ 76 vma->vm_start, vma->vm_end);
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 77 return;
79 u32 cur_pid = current->tgid; 78 }
80 unsigned long flags; 79 (*count)++;
81 struct ehca_cq *cq; 80 if (!(*count))
82 struct ehca_qp *qp; 81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
83 struct ehca_pd *pd; 82 vma->vm_start, vma->vm_end);
84 u64 offset; 83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
85 void *vaddr; 84 vma->vm_start, vma->vm_end, *count);
85}
86 86
87 switch (q_type) { 87static void ehca_mm_close(struct vm_area_struct *vma)
88 case 1: /* CQ */ 88{
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 89 u32 *count = (u32*)vma->vm_private_data;
90 cq = idr_find(&ehca_cq_idr, idr_handle); 90 if (!count) {
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
92 99
93 /* make sure this mmap really belongs to the authorized user */ 100static struct vm_operations_struct vm_ops = {
94 if (!cq) { 101 .open = ehca_mm_open,
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); 102 .close = ehca_mm_close,
96 return NOPAGE_SIGBUS; 103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize != EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121 ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
122 vsize, vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_RESERVED;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%x", ret);
150 return ret;
97 } 151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
98 157
99 if (cq->ownpid != cur_pid) { 158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 1: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
100 ehca_err(cq->ib_cq.device, 171 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x", 172 "ehca_mmap_fw() failed rc=%x cq_num=%x",
102 cur_pid, cq->ownpid); 173 ret, cq->cq_number);
103 return NOPAGE_SIGBUS; 174 return ret;
104 } 175 }
176 break;
105 177
106 if (rsrc_type == 2) { 178 case 2: /* cq queue_addr */
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
108 offset = address - vma->vm_start; 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); 181 if (unlikely(ret)) {
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", 182 ehca_err(cq->ib_cq.device,
111 offset, vaddr); 183 "ehca_mmap_queue() failed rc=%x cq_num=%x",
112 mypage = virt_to_page(vaddr); 184 ret, cq->cq_number);
185 return ret;
113 } 186 }
114 break; 187 break;
115 188
116 case 2: /* QP */ 189 default:
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags); 190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
118 qp = idr_find(&ehca_qp_idr, idr_handle); 191 rsrc_type, cq->cq_number);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 192 return -EINVAL;
193 }
120 194
121 /* make sure this mmap really belongs to the authorized user */ 195 return 0;
122 if (!qp) { 196}
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); 197
124 return NOPAGE_SIGBUS; 198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 1: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%x qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
125 } 212 }
213 break;
126 214
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); 215 case 2: /* qp rqueue_addr */
128 if (pd->ownpid != cur_pid) { 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217 qp->ib_qp.qp_num);
218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
129 ehca_err(qp->ib_qp.device, 220 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x", 221 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
131 cur_pid, pd->ownpid); 222 ret, qp->ib_qp.qp_num);
132 return NOPAGE_SIGBUS; 223 return ret;
133 } 224 }
225 break;
134 226
135 if (rsrc_type == 2) { /* rqueue */ 227 case 3: /* qp squeue_addr */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
137 offset = address - vma->vm_start; 229 qp->ib_qp.qp_num);
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); 230 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", 231 if (unlikely(ret)) {
140 offset, vaddr); 232 ehca_err(qp->ib_qp.device,
141 mypage = virt_to_page(vaddr); 233 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
142 } else if (rsrc_type == 3) { /* squeue */ 234 ret, qp->ib_qp.qp_num);
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); 235 return ret;
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147 offset, vaddr);
148 mypage = virt_to_page(vaddr);
149 } 236 }
150 break; 237 break;
151 238
152 default: 239 default:
153 ehca_gen_err("bad queue type %x", q_type); 240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
154 return NOPAGE_SIGBUS; 241 rsrc_type, qp->ib_qp.qp_num);
155 } 242 return -EINVAL;
156
157 if (!mypage) {
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
160 } 243 }
161 get_page(mypage);
162 244
163 return mypage; 245 return 0;
164} 246}
165 247
166static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
168};
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171{ 249{
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 250 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
@@ -175,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid; 254 u32 cur_pid = current->tgid;
177 u32 ret; 255 u32 ret;
178 u64 vsize, physical;
179 unsigned long flags; 256 unsigned long flags;
180 struct ehca_cq *cq; 257 struct ehca_cq *cq;
181 struct ehca_qp *qp; 258 struct ehca_qp *qp;
@@ -201,44 +278,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 278 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202 return -EINVAL; 279 return -EINVAL;
203 280
204 switch (rsrc_type) { 281 ret = ehca_mmap_cq(vma, cq, rsrc_type);
205 case 1: /* galpa fw handle */ 282 if (unlikely(ret)) {
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); 283 ehca_err(cq->ib_cq.device,
207 vma->vm_flags |= VM_RESERVED; 284 "ehca_mmap_cq() failed rc=%x cq_num=%x",
208 vsize = vma->vm_end - vma->vm_start; 285 ret, cq->cq_number);
209 if (vsize != EHCA_PAGESIZE) { 286 return ret;
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
212 return -EINVAL;
213 }
214
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
218
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
223 vma->vm_page_prot);
224 if (ret) {
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
227 ret);
228 return -ENOMEM;
229 }
230 break;
231
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
236 break;
237
238 default:
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
240 rsrc_type);
241 return -EINVAL;
242 } 287 }
243 break; 288 break;
244 289
@@ -262,50 +307,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) 307 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263 return -EINVAL; 308 return -EINVAL;
264 309
265 switch (rsrc_type) { 310 ret = ehca_mmap_qp(vma, qp, rsrc_type);
266 case 1: /* galpa fw handle */ 311 if (unlikely(ret)) {
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); 312 ehca_err(qp->ib_qp.device,
268 vma->vm_flags |= VM_RESERVED; 313 "ehca_mmap_qp() failed rc=%x qp_num=%x",
269 vsize = vma->vm_end - vma->vm_start; 314 ret, qp->ib_qp.qp_num);
270 if (vsize != EHCA_PAGESIZE) { 315 return ret;
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
273 return -EINVAL;
274 }
275
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
279
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281 vsize, physical);
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
284 vma->vm_page_prot);
285 if (ret) {
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
288 ret);
289 return -ENOMEM;
290 }
291 break;
292
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
297 break;
298
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
303 break;
304
305 default:
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
307 rsrc_type);
308 return -EINVAL;
309 } 316 }
310 break; 317 break;
311 318
@@ -316,77 +323,3 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
316 323
317 return 0; 324 return 0;
318} 325}
319
320int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
322{
323 down_write(&current->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
326 foffset);
327 up_write(&current->mm->mmap_sem);
328 if (!(*mapped)) {
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330 foffset, length);
331 return -EINVAL;
332 }
333
334 *vma = find_vma(current->mm, (u64)*mapped);
335 if (!(*vma)) {
336 down_write(&current->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(&current->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340 return -EINVAL;
341 }
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
344
345 return 0;
346}
347
348int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
350{
351 int ret;
352 unsigned long vsize;
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355 if (ret) {
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
357 return ret;
358 }
359
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
365 return -EINVAL;
366 }
367
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
374 if (ret) {
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376 return -ENOMEM;
377 }
378
379 return 0;
380
381}
382
383int ehca_munmap(unsigned long addr, size_t len) {
384 int ret = 0;
385 struct mm_struct *mm = current->mm;
386 if (mm) {
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);
390 }
391 return ret;
392}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 46c1c89bf6ae..64f07b19349f 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -379,7 +379,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
379 wc.vendor_err = 0; 379 wc.vendor_err = 0;
380 wc.byte_len = 0; 380 wc.byte_len = 0;
381 wc.imm_data = 0; 381 wc.imm_data = 0;
382 wc.qp_num = qp->ibqp.qp_num; 382 wc.qp = &qp->ibqp;
383 wc.src_qp = 0; 383 wc.src_qp = 0;
384 wc.wc_flags = 0; 384 wc.wc_flags = 0;
385 wc.pkey_index = 0; 385 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index ce6038743c5c..5ff20cb04494 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -702,7 +702,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
703 wc->vendor_err = 0; 703 wc->vendor_err = 0;
704 wc->byte_len = 0; 704 wc->byte_len = 0;
705 wc->qp_num = qp->ibqp.qp_num; 705 wc->qp = &qp->ibqp;
706 wc->src_qp = qp->remote_qpn; 706 wc->src_qp = qp->remote_qpn;
707 wc->pkey_index = 0; 707 wc->pkey_index = 0;
708 wc->slid = qp->remote_ah_attr.dlid; 708 wc->slid = qp->remote_ah_attr.dlid;
@@ -836,7 +836,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
837 wc.vendor_err = 0; 837 wc.vendor_err = 0;
838 wc.byte_len = wqe->length; 838 wc.byte_len = wqe->length;
839 wc.qp_num = qp->ibqp.qp_num; 839 wc.qp = &qp->ibqp;
840 wc.src_qp = qp->remote_qpn; 840 wc.src_qp = qp->remote_qpn;
841 wc.pkey_index = 0; 841 wc.pkey_index = 0;
842 wc.slid = qp->remote_ah_attr.dlid; 842 wc.slid = qp->remote_ah_attr.dlid;
@@ -951,7 +951,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
952 wc.vendor_err = 0; 952 wc.vendor_err = 0;
953 wc.byte_len = 0; 953 wc.byte_len = 0;
954 wc.qp_num = qp->ibqp.qp_num; 954 wc.qp = &qp->ibqp;
955 wc.src_qp = qp->remote_qpn; 955 wc.src_qp = qp->remote_qpn;
956 wc.pkey_index = 0; 956 wc.pkey_index = 0;
957 wc.slid = qp->remote_ah_attr.dlid; 957 wc.slid = qp->remote_ah_attr.dlid;
@@ -1511,7 +1511,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1511 wc.status = IB_WC_SUCCESS; 1511 wc.status = IB_WC_SUCCESS;
1512 wc.opcode = IB_WC_RECV; 1512 wc.opcode = IB_WC_RECV;
1513 wc.vendor_err = 0; 1513 wc.vendor_err = 0;
1514 wc.qp_num = qp->ibqp.qp_num; 1514 wc.qp = &qp->ibqp;
1515 wc.src_qp = qp->remote_qpn; 1515 wc.src_qp = qp->remote_qpn;
1516 wc.pkey_index = 0; 1516 wc.pkey_index = 0;
1517 wc.slid = qp->remote_ah_attr.dlid; 1517 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f7530512045d..e86cb171872e 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -137,7 +137,7 @@ bad_lkey:
137 wc.vendor_err = 0; 137 wc.vendor_err = 0;
138 wc.byte_len = 0; 138 wc.byte_len = 0;
139 wc.imm_data = 0; 139 wc.imm_data = 0;
140 wc.qp_num = qp->ibqp.qp_num; 140 wc.qp = &qp->ibqp;
141 wc.src_qp = 0; 141 wc.src_qp = 0;
142 wc.wc_flags = 0; 142 wc.wc_flags = 0;
143 wc.pkey_index = 0; 143 wc.pkey_index = 0;
@@ -336,7 +336,7 @@ again:
336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
337 wc.vendor_err = 0; 337 wc.vendor_err = 0;
338 wc.byte_len = 0; 338 wc.byte_len = 0;
339 wc.qp_num = sqp->ibqp.qp_num; 339 wc.qp = &sqp->ibqp;
340 wc.src_qp = sqp->remote_qpn; 340 wc.src_qp = sqp->remote_qpn;
341 wc.pkey_index = 0; 341 wc.pkey_index = 0;
342 wc.slid = sqp->remote_ah_attr.dlid; 342 wc.slid = sqp->remote_ah_attr.dlid;
@@ -426,7 +426,7 @@ again:
426 wc.status = IB_WC_SUCCESS; 426 wc.status = IB_WC_SUCCESS;
427 wc.vendor_err = 0; 427 wc.vendor_err = 0;
428 wc.byte_len = wqe->length; 428 wc.byte_len = wqe->length;
429 wc.qp_num = qp->ibqp.qp_num; 429 wc.qp = &qp->ibqp;
430 wc.src_qp = qp->remote_qpn; 430 wc.src_qp = qp->remote_qpn;
431 /* XXX do we know which pkey matched? Only needed for GSI. */ 431 /* XXX do we know which pkey matched? Only needed for GSI. */
432 wc.pkey_index = 0; 432 wc.pkey_index = 0;
@@ -447,7 +447,7 @@ send_comp:
447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
448 wc.vendor_err = 0; 448 wc.vendor_err = 0;
449 wc.byte_len = wqe->length; 449 wc.byte_len = wqe->length;
450 wc.qp_num = sqp->ibqp.qp_num; 450 wc.qp = &sqp->ibqp;
451 wc.src_qp = 0; 451 wc.src_qp = 0;
452 wc.pkey_index = 0; 452 wc.pkey_index = 0;
453 wc.slid = 0; 453 wc.slid = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index e636cfd67a82..325d6634ff53 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -49,7 +49,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
50 wc->vendor_err = 0; 50 wc->vendor_err = 0;
51 wc->byte_len = wqe->length; 51 wc->byte_len = wqe->length;
52 wc->qp_num = qp->ibqp.qp_num; 52 wc->qp = &qp->ibqp;
53 wc->src_qp = qp->remote_qpn; 53 wc->src_qp = qp->remote_qpn;
54 wc->pkey_index = 0; 54 wc->pkey_index = 0;
55 wc->slid = qp->remote_ah_attr.dlid; 55 wc->slid = qp->remote_ah_attr.dlid;
@@ -411,7 +411,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
411 wc.status = IB_WC_SUCCESS; 411 wc.status = IB_WC_SUCCESS;
412 wc.opcode = IB_WC_RECV; 412 wc.opcode = IB_WC_RECV;
413 wc.vendor_err = 0; 413 wc.vendor_err = 0;
414 wc.qp_num = qp->ibqp.qp_num; 414 wc.qp = &qp->ibqp;
415 wc.src_qp = qp->remote_qpn; 415 wc.src_qp = qp->remote_qpn;
416 wc.pkey_index = 0; 416 wc.pkey_index = 0;
417 wc.slid = qp->remote_ah_attr.dlid; 417 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 49f1102af8b3..9a3e54664ee4 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -66,7 +66,7 @@ bad_lkey:
66 wc.vendor_err = 0; 66 wc.vendor_err = 0;
67 wc.byte_len = 0; 67 wc.byte_len = 0;
68 wc.imm_data = 0; 68 wc.imm_data = 0;
69 wc.qp_num = qp->ibqp.qp_num; 69 wc.qp = &qp->ibqp;
70 wc.src_qp = 0; 70 wc.src_qp = 0;
71 wc.wc_flags = 0; 71 wc.wc_flags = 0;
72 wc.pkey_index = 0; 72 wc.pkey_index = 0;
@@ -255,7 +255,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
255 wc->status = IB_WC_SUCCESS; 255 wc->status = IB_WC_SUCCESS;
256 wc->opcode = IB_WC_RECV; 256 wc->opcode = IB_WC_RECV;
257 wc->vendor_err = 0; 257 wc->vendor_err = 0;
258 wc->qp_num = qp->ibqp.qp_num; 258 wc->qp = &qp->ibqp;
259 wc->src_qp = sqp->ibqp.qp_num; 259 wc->src_qp = sqp->ibqp.qp_num;
260 /* XXX do we know which pkey matched? Only needed for GSI. */ 260 /* XXX do we know which pkey matched? Only needed for GSI. */
261 wc->pkey_index = 0; 261 wc->pkey_index = 0;
@@ -474,7 +474,7 @@ done:
474 wc.vendor_err = 0; 474 wc.vendor_err = 0;
475 wc.opcode = IB_WC_SEND; 475 wc.opcode = IB_WC_SEND;
476 wc.byte_len = len; 476 wc.byte_len = len;
477 wc.qp_num = qp->ibqp.qp_num; 477 wc.qp = &qp->ibqp;
478 wc.src_qp = 0; 478 wc.src_qp = 0;
479 wc.wc_flags = 0; 479 wc.wc_flags = 0;
480 /* XXX initialize other fields? */ 480 /* XXX initialize other fields? */
@@ -651,7 +651,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
651 wc.status = IB_WC_SUCCESS; 651 wc.status = IB_WC_SUCCESS;
652 wc.opcode = IB_WC_RECV; 652 wc.opcode = IB_WC_RECV;
653 wc.vendor_err = 0; 653 wc.vendor_err = 0;
654 wc.qp_num = qp->ibqp.qp_num; 654 wc.qp = &qp->ibqp;
655 wc.src_qp = src_qp; 655 wc.src_qp = src_qp;
656 /* XXX do we know which pkey matched? Only needed for GSI. */ 656 /* XXX do we know which pkey matched? Only needed for GSI. */
657 wc.pkey_index = 0; 657 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 768df7265b81..968d1519761c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1854,7 +1854,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1854 1854
1855 memset(inbox + 256, 0, 256); 1855 memset(inbox + 256, 0, 256);
1856 1856
1857 MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1857 MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
1859 1859
1860 val = in_wc->sl << 4; 1860 val = in_wc->sl << 4;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1159c8a0f2c5..efd79ef109a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -534,7 +534,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
534 } 534 }
535 } 535 }
536 536
537 entry->qp_num = (*cur_qp)->qpn; 537 entry->qp = &(*cur_qp)->ibqp;
538 538
539 if (is_send) { 539 if (is_send) {
540 wq = &(*cur_qp)->sq; 540 wq = &(*cur_qp)->sq;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 72611fd15103..5e8ac577f0ad 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -548,6 +548,7 @@ static int srp_reconnect_target(struct srp_target_port *target)
548 target->tx_head = 0; 548 target->tx_head = 0;
549 target->tx_tail = 0; 549 target->tx_tail = 0;
550 550
551 target->qp_in_error = 0;
551 ret = srp_connect_target(target); 552 ret = srp_connect_target(target);
552 if (ret) 553 if (ret)
553 goto err; 554 goto err;
@@ -878,6 +879,7 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
878 printk(KERN_ERR PFX "failed %s status %d\n", 879 printk(KERN_ERR PFX "failed %s status %d\n",
879 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 880 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
880 wc.status); 881 wc.status);
882 target->qp_in_error = 1;
881 break; 883 break;
882 } 884 }
883 885
@@ -1337,6 +1339,8 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1337 1339
1338 printk(KERN_ERR "SRP abort called\n"); 1340 printk(KERN_ERR "SRP abort called\n");
1339 1341
1342 if (target->qp_in_error)
1343 return FAILED;
1340 if (srp_find_req(target, scmnd, &req)) 1344 if (srp_find_req(target, scmnd, &req))
1341 return FAILED; 1345 return FAILED;
1342 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1346 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
@@ -1365,6 +1369,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1365 1369
1366 printk(KERN_ERR "SRP reset_device called\n"); 1370 printk(KERN_ERR "SRP reset_device called\n");
1367 1371
1372 if (target->qp_in_error)
1373 return FAILED;
1368 if (srp_find_req(target, scmnd, &req)) 1374 if (srp_find_req(target, scmnd, &req))
1369 return FAILED; 1375 return FAILED;
1370 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1376 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
@@ -1801,6 +1807,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1801 goto err_free; 1807 goto err_free;
1802 } 1808 }
1803 1809
1810 target->qp_in_error = 0;
1804 ret = srp_connect_target(target); 1811 ret = srp_connect_target(target);
1805 if (ret) { 1812 if (ret) {
1806 printk(KERN_ERR PFX "Connection failed\n"); 1813 printk(KERN_ERR PFX "Connection failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index c21772317b86..2f3319c719a5 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -158,6 +158,7 @@ struct srp_target_port {
158 struct completion done; 158 struct completion done;
159 int status; 159 int status;
160 enum srp_target_state state; 160 enum srp_target_state state;
161 int qp_in_error;
161}; 162};
162 163
163struct srp_iu { 164struct srp_iu {
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 2ab7add78f94..e21e490fedb0 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -11,66 +11,25 @@
11 11
12#include <linux/tifm.h> 12#include <linux/tifm.h>
13#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
14#include <linux/freezer.h>
14 15
15#define DRIVER_NAME "tifm_7xx1" 16#define DRIVER_NAME "tifm_7xx1"
16#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
17 18
18static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) 19static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
19{ 20{
20 int cnt;
21 unsigned long flags;
22
23 spin_lock_irqsave(&fm->lock, flags);
24 if (!fm->inhibit_new_cards) {
25 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
26 if (fm->sockets[cnt] == sock) {
27 fm->remove_mask |= (1 << cnt);
28 queue_work(fm->wq, &fm->media_remover);
29 break;
30 }
31 }
32 }
33 spin_unlock_irqrestore(&fm->lock, flags);
34}
35
36static void tifm_7xx1_remove_media(struct work_struct *work)
37{
38 struct tifm_adapter *fm =
39 container_of(work, struct tifm_adapter, media_remover);
40 unsigned long flags; 21 unsigned long flags;
41 int cnt;
42 struct tifm_dev *sock;
43 22
44 if (!class_device_get(&fm->cdev))
45 return;
46 spin_lock_irqsave(&fm->lock, flags); 23 spin_lock_irqsave(&fm->lock, flags);
47 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 24 fm->socket_change_set |= 1 << sock->socket_id;
48 if (fm->sockets[cnt] && (fm->remove_mask & (1 << cnt))) { 25 wake_up_all(&fm->change_set_notify);
49 printk(KERN_INFO DRIVER_NAME
50 ": demand removing card from socket %d\n", cnt);
51 sock = fm->sockets[cnt];
52 fm->sockets[cnt] = NULL;
53 fm->remove_mask &= ~(1 << cnt);
54
55 writel(0x0e00, sock->addr + SOCK_CONTROL);
56
57 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
58 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
59 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
60 fm->addr + FM_SET_INTERRUPT_ENABLE);
61
62 spin_unlock_irqrestore(&fm->lock, flags);
63 device_unregister(&sock->dev);
64 spin_lock_irqsave(&fm->lock, flags);
65 }
66 }
67 spin_unlock_irqrestore(&fm->lock, flags); 26 spin_unlock_irqrestore(&fm->lock, flags);
68 class_device_put(&fm->cdev);
69} 27}
70 28
71static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) 29static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
72{ 30{
73 struct tifm_adapter *fm = dev_id; 31 struct tifm_adapter *fm = dev_id;
32 struct tifm_dev *sock;
74 unsigned int irq_status; 33 unsigned int irq_status;
75 unsigned int sock_irq_status, cnt; 34 unsigned int sock_irq_status, cnt;
76 35
@@ -84,42 +43,32 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
84 if (irq_status & TIFM_IRQ_ENABLE) { 43 if (irq_status & TIFM_IRQ_ENABLE) {
85 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 44 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
86 45
87 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 46 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
88 sock_irq_status = (irq_status >> cnt) & 47 sock = fm->sockets[cnt];
89 (TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK); 48 sock_irq_status = (irq_status >> cnt)
90 49 & (TIFM_IRQ_FIFOMASK(1)
91 if (fm->sockets[cnt]) { 50 | TIFM_IRQ_CARDMASK(1));
92 if (sock_irq_status &&
93 fm->sockets[cnt]->signal_irq)
94 sock_irq_status = fm->sockets[cnt]->
95 signal_irq(fm->sockets[cnt],
96 sock_irq_status);
97 51
98 if (irq_status & (1 << cnt)) 52 if (sock && sock_irq_status)
99 fm->remove_mask |= 1 << cnt; 53 sock->signal_irq(sock, sock_irq_status);
100 } else {
101 if (irq_status & (1 << cnt))
102 fm->insert_mask |= 1 << cnt;
103 }
104 } 54 }
55
56 fm->socket_change_set |= irq_status
57 & ((1 << fm->num_sockets) - 1);
105 } 58 }
106 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); 59 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
107 60
108 if (!fm->inhibit_new_cards) { 61 if (!fm->socket_change_set)
109 if (!fm->remove_mask && !fm->insert_mask) { 62 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
110 writel(TIFM_IRQ_ENABLE, 63 else
111 fm->addr + FM_SET_INTERRUPT_ENABLE); 64 wake_up_all(&fm->change_set_notify);
112 } else {
113 queue_work(fm->wq, &fm->media_remover);
114 queue_work(fm->wq, &fm->media_inserter);
115 }
116 }
117 65
118 spin_unlock(&fm->lock); 66 spin_unlock(&fm->lock);
119 return IRQ_HANDLED; 67 return IRQ_HANDLED;
120} 68}
121 69
122static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is_x2) 70static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr,
71 int is_x2)
123{ 72{
124 unsigned int s_state; 73 unsigned int s_state;
125 int cnt; 74 int cnt;
@@ -127,8 +76,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
127 writel(0x0e00, sock_addr + SOCK_CONTROL); 76 writel(0x0e00, sock_addr + SOCK_CONTROL);
128 77
129 for (cnt = 0; cnt < 100; cnt++) { 78 for (cnt = 0; cnt < 100; cnt++) {
130 if (!(TIFM_SOCK_STATE_POWERED & 79 if (!(TIFM_SOCK_STATE_POWERED
131 readl(sock_addr + SOCK_PRESENT_STATE))) 80 & readl(sock_addr + SOCK_PRESENT_STATE)))
132 break; 81 break;
133 msleep(10); 82 msleep(10);
134 } 83 }
@@ -151,8 +100,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
151 } 100 }
152 101
153 for (cnt = 0; cnt < 100; cnt++) { 102 for (cnt = 0; cnt < 100; cnt++) {
154 if ((TIFM_SOCK_STATE_POWERED & 103 if ((TIFM_SOCK_STATE_POWERED
155 readl(sock_addr + SOCK_PRESENT_STATE))) 104 & readl(sock_addr + SOCK_PRESENT_STATE)))
156 break; 105 break;
157 msleep(10); 106 msleep(10);
158 } 107 }
@@ -170,130 +119,209 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
170 return base_addr + ((sock_num + 1) << 10); 119 return base_addr + ((sock_num + 1) << 10);
171} 120}
172 121
173static void tifm_7xx1_insert_media(struct work_struct *work) 122static int tifm_7xx1_switch_media(void *data)
174{ 123{
175 struct tifm_adapter *fm = 124 struct tifm_adapter *fm = data;
176 container_of(work, struct tifm_adapter, media_inserter);
177 unsigned long flags; 125 unsigned long flags;
178 tifm_media_id media_id; 126 tifm_media_id media_id;
179 char *card_name = "xx"; 127 char *card_name = "xx";
180 int cnt, ok_to_register; 128 int cnt, rc;
181 unsigned int insert_mask; 129 struct tifm_dev *sock;
182 struct tifm_dev *new_sock = NULL; 130 unsigned int socket_change_set;
183 131
184 if (!class_device_get(&fm->cdev)) 132 while (1) {
185 return; 133 rc = wait_event_interruptible(fm->change_set_notify,
186 spin_lock_irqsave(&fm->lock, flags); 134 fm->socket_change_set);
187 insert_mask = fm->insert_mask; 135 if (rc == -ERESTARTSYS)
188 fm->insert_mask = 0; 136 try_to_freeze();
189 if (fm->inhibit_new_cards) { 137
138 spin_lock_irqsave(&fm->lock, flags);
139 socket_change_set = fm->socket_change_set;
140 fm->socket_change_set = 0;
141
142 dev_dbg(fm->dev, "checking media set %x\n",
143 socket_change_set);
144
145 if (kthread_should_stop())
146 socket_change_set = (1 << fm->num_sockets) - 1;
190 spin_unlock_irqrestore(&fm->lock, flags); 147 spin_unlock_irqrestore(&fm->lock, flags);
191 class_device_put(&fm->cdev);
192 return;
193 }
194 spin_unlock_irqrestore(&fm->lock, flags);
195 148
196 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 149 if (!socket_change_set)
197 if (!(insert_mask & (1 << cnt)))
198 continue; 150 continue;
199 151
200 media_id = tifm_7xx1_toggle_sock_power(tifm_7xx1_sock_addr(fm->addr, cnt), 152 spin_lock_irqsave(&fm->lock, flags);
201 fm->max_sockets == 2); 153 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
202 if (media_id) { 154 if (!(socket_change_set & (1 << cnt)))
203 ok_to_register = 0; 155 continue;
204 new_sock = tifm_alloc_device(fm, cnt); 156 sock = fm->sockets[cnt];
205 if (new_sock) { 157 if (sock) {
206 new_sock->addr = tifm_7xx1_sock_addr(fm->addr,
207 cnt);
208 new_sock->media_id = media_id;
209 switch (media_id) {
210 case 1:
211 card_name = "xd";
212 break;
213 case 2:
214 card_name = "ms";
215 break;
216 case 3:
217 card_name = "sd";
218 break;
219 default:
220 break;
221 }
222 snprintf(new_sock->dev.bus_id, BUS_ID_SIZE,
223 "tifm_%s%u:%u", card_name, fm->id, cnt);
224 printk(KERN_INFO DRIVER_NAME 158 printk(KERN_INFO DRIVER_NAME
225 ": %s card detected in socket %d\n", 159 ": demand removing card from socket %d\n",
226 card_name, cnt); 160 cnt);
161 fm->sockets[cnt] = NULL;
162 spin_unlock_irqrestore(&fm->lock, flags);
163 device_unregister(&sock->dev);
227 spin_lock_irqsave(&fm->lock, flags); 164 spin_lock_irqsave(&fm->lock, flags);
228 if (!fm->sockets[cnt]) { 165 writel(0x0e00,
229 fm->sockets[cnt] = new_sock; 166 tifm_7xx1_sock_addr(fm->addr, cnt)
230 ok_to_register = 1; 167 + SOCK_CONTROL);
168 }
169 if (kthread_should_stop())
170 continue;
171
172 spin_unlock_irqrestore(&fm->lock, flags);
173 media_id = tifm_7xx1_toggle_sock_power(
174 tifm_7xx1_sock_addr(fm->addr, cnt),
175 fm->num_sockets == 2);
176 if (media_id) {
177 sock = tifm_alloc_device(fm);
178 if (sock) {
179 sock->addr = tifm_7xx1_sock_addr(fm->addr,
180 cnt);
181 sock->media_id = media_id;
182 sock->socket_id = cnt;
183 switch (media_id) {
184 case 1:
185 card_name = "xd";
186 break;
187 case 2:
188 card_name = "ms";
189 break;
190 case 3:
191 card_name = "sd";
192 break;
193 default:
194 tifm_free_device(&sock->dev);
195 spin_lock_irqsave(&fm->lock, flags);
196 continue;
197 }
198 snprintf(sock->dev.bus_id, BUS_ID_SIZE,
199 "tifm_%s%u:%u", card_name,
200 fm->id, cnt);
201 printk(KERN_INFO DRIVER_NAME
202 ": %s card detected in socket %d\n",
203 card_name, cnt);
204 if (!device_register(&sock->dev)) {
205 spin_lock_irqsave(&fm->lock, flags);
206 if (!fm->sockets[cnt]) {
207 fm->sockets[cnt] = sock;
208 sock = NULL;
209 }
210 spin_unlock_irqrestore(&fm->lock, flags);
211 }
212 if (sock)
213 tifm_free_device(&sock->dev);
231 } 214 }
215 spin_lock_irqsave(&fm->lock, flags);
216 }
217 }
218
219 if (!kthread_should_stop()) {
220 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
221 | TIFM_IRQ_CARDMASK(socket_change_set),
222 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
223 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
224 | TIFM_IRQ_CARDMASK(socket_change_set),
225 fm->addr + FM_SET_INTERRUPT_ENABLE);
226 writel(TIFM_IRQ_ENABLE,
227 fm->addr + FM_SET_INTERRUPT_ENABLE);
228 spin_unlock_irqrestore(&fm->lock, flags);
229 } else {
230 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
231 if (fm->sockets[cnt])
232 fm->socket_change_set |= 1 << cnt;
233 }
234 if (!fm->socket_change_set) {
235 spin_unlock_irqrestore(&fm->lock, flags);
236 return 0;
237 } else {
232 spin_unlock_irqrestore(&fm->lock, flags); 238 spin_unlock_irqrestore(&fm->lock, flags);
233 if (!ok_to_register ||
234 device_register(&new_sock->dev)) {
235 spin_lock_irqsave(&fm->lock, flags);
236 fm->sockets[cnt] = NULL;
237 spin_unlock_irqrestore(&fm->lock,
238 flags);
239 tifm_free_device(&new_sock->dev);
240 }
241 } 239 }
242 } 240 }
243 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
244 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
245 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
246 fm->addr + FM_SET_INTERRUPT_ENABLE);
247 } 241 }
248 242 return 0;
249 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
250 class_device_put(&fm->cdev);
251} 243}
252 244
245#ifdef CONFIG_PM
246
253static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) 247static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
254{ 248{
255 struct tifm_adapter *fm = pci_get_drvdata(dev); 249 dev_dbg(&dev->dev, "suspending host\n");
256 unsigned long flags;
257 250
258 spin_lock_irqsave(&fm->lock, flags); 251 pci_save_state(dev);
259 fm->inhibit_new_cards = 1; 252 pci_enable_wake(dev, pci_choose_state(dev, state), 0);
260 fm->remove_mask = 0xf; 253 pci_disable_device(dev);
261 fm->insert_mask = 0; 254 pci_set_power_state(dev, pci_choose_state(dev, state));
262 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
263 spin_unlock_irqrestore(&fm->lock, flags);
264 flush_workqueue(fm->wq);
265
266 tifm_7xx1_remove_media(&fm->media_remover);
267
268 pci_set_power_state(dev, PCI_D3hot);
269 pci_disable_device(dev);
270 pci_save_state(dev);
271 return 0; 255 return 0;
272} 256}
273 257
274static int tifm_7xx1_resume(struct pci_dev *dev) 258static int tifm_7xx1_resume(struct pci_dev *dev)
275{ 259{
276 struct tifm_adapter *fm = pci_get_drvdata(dev); 260 struct tifm_adapter *fm = pci_get_drvdata(dev);
261 int cnt, rc;
277 unsigned long flags; 262 unsigned long flags;
263 tifm_media_id new_ids[fm->num_sockets];
278 264
265 pci_set_power_state(dev, PCI_D0);
279 pci_restore_state(dev); 266 pci_restore_state(dev);
280 pci_enable_device(dev); 267 rc = pci_enable_device(dev);
281 pci_set_power_state(dev, PCI_D0); 268 if (rc)
282 pci_set_master(dev); 269 return rc;
270 pci_set_master(dev);
283 271
272 dev_dbg(&dev->dev, "resuming host\n");
273
274 for (cnt = 0; cnt < fm->num_sockets; cnt++)
275 new_ids[cnt] = tifm_7xx1_toggle_sock_power(
276 tifm_7xx1_sock_addr(fm->addr, cnt),
277 fm->num_sockets == 2);
284 spin_lock_irqsave(&fm->lock, flags); 278 spin_lock_irqsave(&fm->lock, flags);
285 fm->inhibit_new_cards = 0; 279 fm->socket_change_set = 0;
286 writel(TIFM_IRQ_SETALL, fm->addr + FM_INTERRUPT_STATUS); 280 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
287 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 281 if (fm->sockets[cnt]) {
288 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 282 if (fm->sockets[cnt]->media_id == new_ids[cnt])
289 fm->addr + FM_SET_INTERRUPT_ENABLE); 283 fm->socket_change_set |= 1 << cnt;
290 fm->insert_mask = 0xf; 284
285 fm->sockets[cnt]->media_id = new_ids[cnt];
286 }
287 }
288
289 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
290 fm->addr + FM_SET_INTERRUPT_ENABLE);
291 if (!fm->socket_change_set) {
292 spin_unlock_irqrestore(&fm->lock, flags);
293 return 0;
294 } else {
295 fm->socket_change_set = 0;
296 spin_unlock_irqrestore(&fm->lock, flags);
297 }
298
299 wait_event_timeout(fm->change_set_notify, fm->socket_change_set, HZ);
300
301 spin_lock_irqsave(&fm->lock, flags);
302 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
303 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
304 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
305 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
306 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
307 fm->addr + FM_SET_INTERRUPT_ENABLE);
308 writel(TIFM_IRQ_ENABLE,
309 fm->addr + FM_SET_INTERRUPT_ENABLE);
310 fm->socket_change_set = 0;
311
291 spin_unlock_irqrestore(&fm->lock, flags); 312 spin_unlock_irqrestore(&fm->lock, flags);
292 return 0; 313 return 0;
293} 314}
294 315
316#else
317
318#define tifm_7xx1_suspend NULL
319#define tifm_7xx1_resume NULL
320
321#endif /* CONFIG_PM */
322
295static int tifm_7xx1_probe(struct pci_dev *dev, 323static int tifm_7xx1_probe(struct pci_dev *dev,
296 const struct pci_device_id *dev_id) 324 const struct pci_device_id *dev_id)
297{ 325{
298 struct tifm_adapter *fm; 326 struct tifm_adapter *fm;
299 int pci_dev_busy = 0; 327 int pci_dev_busy = 0;
@@ -324,19 +352,18 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
324 } 352 }
325 353
326 fm->dev = &dev->dev; 354 fm->dev = &dev->dev;
327 fm->max_sockets = (dev->device == 0x803B) ? 2 : 4; 355 fm->num_sockets = (dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM)
328 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->max_sockets, 356 ? 4 : 2;
329 GFP_KERNEL); 357 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->num_sockets,
358 GFP_KERNEL);
330 if (!fm->sockets) 359 if (!fm->sockets)
331 goto err_out_free; 360 goto err_out_free;
332 361
333 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
334 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
335 fm->eject = tifm_7xx1_eject; 362 fm->eject = tifm_7xx1_eject;
336 pci_set_drvdata(dev, fm); 363 pci_set_drvdata(dev, fm);
337 364
338 fm->addr = ioremap(pci_resource_start(dev, 0), 365 fm->addr = ioremap(pci_resource_start(dev, 0),
339 pci_resource_len(dev, 0)); 366 pci_resource_len(dev, 0));
340 if (!fm->addr) 367 if (!fm->addr)
341 goto err_out_free; 368 goto err_out_free;
342 369
@@ -344,16 +371,15 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
344 if (rc) 371 if (rc)
345 goto err_out_unmap; 372 goto err_out_unmap;
346 373
347 rc = tifm_add_adapter(fm); 374 init_waitqueue_head(&fm->change_set_notify);
375 rc = tifm_add_adapter(fm, tifm_7xx1_switch_media);
348 if (rc) 376 if (rc)
349 goto err_out_irq; 377 goto err_out_irq;
350 378
351 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 379 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
352 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 380 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
353 fm->addr + FM_SET_INTERRUPT_ENABLE); 381 fm->addr + FM_SET_INTERRUPT_ENABLE);
354 382 wake_up_process(fm->media_switcher);
355 fm->insert_mask = 0xf;
356
357 return 0; 383 return 0;
358 384
359err_out_irq: 385err_out_irq:
@@ -377,19 +403,15 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
377 struct tifm_adapter *fm = pci_get_drvdata(dev); 403 struct tifm_adapter *fm = pci_get_drvdata(dev);
378 unsigned long flags; 404 unsigned long flags;
379 405
406 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
407 mmiowb();
408 free_irq(dev->irq, fm);
409
380 spin_lock_irqsave(&fm->lock, flags); 410 spin_lock_irqsave(&fm->lock, flags);
381 fm->inhibit_new_cards = 1; 411 fm->socket_change_set = (1 << fm->num_sockets) - 1;
382 fm->remove_mask = 0xf;
383 fm->insert_mask = 0;
384 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
385 spin_unlock_irqrestore(&fm->lock, flags); 412 spin_unlock_irqrestore(&fm->lock, flags);
386 413
387 flush_workqueue(fm->wq); 414 kthread_stop(fm->media_switcher);
388
389 tifm_7xx1_remove_media(&fm->media_remover);
390
391 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
392 free_irq(dev->irq, fm);
393 415
394 tifm_remove_adapter(fm); 416 tifm_remove_adapter(fm);
395 417
@@ -404,10 +426,12 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
404} 426}
405 427
406static struct pci_device_id tifm_7xx1_pci_tbl [] = { 428static struct pci_device_id tifm_7xx1_pci_tbl [] = {
407 { PCI_VENDOR_ID_TI, 0x8033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 429 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
408 0 }, /* xx21 - the one I have */ 430 PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
409 { PCI_VENDOR_ID_TI, 0x803B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 431 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
410 0 }, /* xx12 - should be also supported */ 432 PCI_ANY_ID, 0, 0, 0 },
433 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
434 PCI_ANY_ID, 0, 0, 0 },
411 { } 435 { }
412}; 436};
413 437
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index d61df5c3ac36..6b10ebe9d936 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -14,7 +14,7 @@
14#include <linux/idr.h> 14#include <linux/idr.h>
15 15
16#define DRIVER_NAME "tifm_core" 16#define DRIVER_NAME "tifm_core"
17#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
18 18
19static DEFINE_IDR(tifm_adapter_idr); 19static DEFINE_IDR(tifm_adapter_idr);
20static DEFINE_SPINLOCK(tifm_adapter_lock); 20static DEFINE_SPINLOCK(tifm_adapter_lock);
@@ -60,10 +60,41 @@ static int tifm_uevent(struct device *dev, char **envp, int num_envp,
60 return 0; 60 return 0;
61} 61}
62 62
63#ifdef CONFIG_PM
64
65static int tifm_device_suspend(struct device *dev, pm_message_t state)
66{
67 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
68 struct tifm_driver *drv = fm_dev->drv;
69
70 if (drv && drv->suspend)
71 return drv->suspend(fm_dev, state);
72 return 0;
73}
74
75static int tifm_device_resume(struct device *dev)
76{
77 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
78 struct tifm_driver *drv = fm_dev->drv;
79
80 if (drv && drv->resume)
81 return drv->resume(fm_dev);
82 return 0;
83}
84
85#else
86
87#define tifm_device_suspend NULL
88#define tifm_device_resume NULL
89
90#endif /* CONFIG_PM */
91
63static struct bus_type tifm_bus_type = { 92static struct bus_type tifm_bus_type = {
64 .name = "tifm", 93 .name = "tifm",
65 .match = tifm_match, 94 .match = tifm_match,
66 .uevent = tifm_uevent, 95 .uevent = tifm_uevent,
96 .suspend = tifm_device_suspend,
97 .resume = tifm_device_resume
67}; 98};
68 99
69static void tifm_free(struct class_device *cdev) 100static void tifm_free(struct class_device *cdev)
@@ -71,8 +102,6 @@ static void tifm_free(struct class_device *cdev)
71 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev); 102 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev);
72 103
73 kfree(fm->sockets); 104 kfree(fm->sockets);
74 if (fm->wq)
75 destroy_workqueue(fm->wq);
76 kfree(fm); 105 kfree(fm);
77} 106}
78 107
@@ -101,7 +130,8 @@ void tifm_free_adapter(struct tifm_adapter *fm)
101} 130}
102EXPORT_SYMBOL(tifm_free_adapter); 131EXPORT_SYMBOL(tifm_free_adapter);
103 132
104int tifm_add_adapter(struct tifm_adapter *fm) 133int tifm_add_adapter(struct tifm_adapter *fm,
134 int (*mediathreadfn)(void *data))
105{ 135{
106 int rc; 136 int rc;
107 137
@@ -113,10 +143,10 @@ int tifm_add_adapter(struct tifm_adapter *fm)
113 spin_unlock(&tifm_adapter_lock); 143 spin_unlock(&tifm_adapter_lock);
114 if (!rc) { 144 if (!rc) {
115 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); 145 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id);
116 strncpy(fm->wq_name, fm->cdev.class_id, KOBJ_NAME_LEN); 146 fm->media_switcher = kthread_create(mediathreadfn,
147 fm, "tifm/%u", fm->id);
117 148
118 fm->wq = create_singlethread_workqueue(fm->wq_name); 149 if (!IS_ERR(fm->media_switcher))
119 if (fm->wq)
120 return class_device_add(&fm->cdev); 150 return class_device_add(&fm->cdev);
121 151
122 spin_lock(&tifm_adapter_lock); 152 spin_lock(&tifm_adapter_lock);
@@ -141,27 +171,27 @@ EXPORT_SYMBOL(tifm_remove_adapter);
141void tifm_free_device(struct device *dev) 171void tifm_free_device(struct device *dev)
142{ 172{
143 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); 173 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
144 if (fm_dev->wq)
145 destroy_workqueue(fm_dev->wq);
146 kfree(fm_dev); 174 kfree(fm_dev);
147} 175}
148EXPORT_SYMBOL(tifm_free_device); 176EXPORT_SYMBOL(tifm_free_device);
149 177
150struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id) 178static void tifm_dummy_signal_irq(struct tifm_dev *sock,
179 unsigned int sock_irq_status)
180{
181 return;
182}
183
184struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm)
151{ 185{
152 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); 186 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
153 187
154 if (dev) { 188 if (dev) {
155 spin_lock_init(&dev->lock); 189 spin_lock_init(&dev->lock);
156 snprintf(dev->wq_name, KOBJ_NAME_LEN, "tifm%u:%u", fm->id, id); 190
157 dev->wq = create_singlethread_workqueue(dev->wq_name);
158 if (!dev->wq) {
159 kfree(dev);
160 return NULL;
161 }
162 dev->dev.parent = fm->dev; 191 dev->dev.parent = fm->dev;
163 dev->dev.bus = &tifm_bus_type; 192 dev->dev.bus = &tifm_bus_type;
164 dev->dev.release = tifm_free_device; 193 dev->dev.release = tifm_free_device;
194 dev->signal_irq = tifm_dummy_signal_irq;
165 } 195 }
166 return dev; 196 return dev;
167} 197}
@@ -219,6 +249,7 @@ static int tifm_device_remove(struct device *dev)
219 struct tifm_driver *drv = fm_dev->drv; 249 struct tifm_driver *drv = fm_dev->drv;
220 250
221 if (drv) { 251 if (drv) {
252 fm_dev->signal_irq = tifm_dummy_signal_irq;
222 if (drv->remove) 253 if (drv->remove)
223 drv->remove(fm_dev); 254 drv->remove(fm_dev);
224 fm_dev->drv = NULL; 255 fm_dev->drv = NULL;
@@ -233,6 +264,8 @@ int tifm_register_driver(struct tifm_driver *drv)
233 drv->driver.bus = &tifm_bus_type; 264 drv->driver.bus = &tifm_bus_type;
234 drv->driver.probe = tifm_device_probe; 265 drv->driver.probe = tifm_device_probe;
235 drv->driver.remove = tifm_device_remove; 266 drv->driver.remove = tifm_device_remove;
267 drv->driver.suspend = tifm_device_suspend;
268 drv->driver.resume = tifm_device_resume;
236 269
237 return driver_register(&drv->driver); 270 return driver_register(&drv->driver);
238} 271}
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index aa152f31851e..2ce50f38e3c7 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -823,6 +823,9 @@ static int __init at91_mci_probe(struct platform_device *pdev)
823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
824 mmc->caps = MMC_CAP_BYTEBLOCK; 824 mmc->caps = MMC_CAP_BYTEBLOCK;
825 825
826 mmc->max_blk_size = 4095;
827 mmc->max_blk_count = mmc->max_req_size;
828
826 host = mmc_priv(mmc); 829 host = mmc_priv(mmc);
827 host->mmc = mmc; 830 host->mmc = mmc;
828 host->buffer = NULL; 831 host->buffer = NULL;
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 800527cf40d5..b834be261ab7 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -152,8 +152,9 @@ static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
152 ? 1 : 0; 152 ? 1 : 0;
153} 153}
154 154
155static inline int au1xmmc_card_readonly(struct au1xmmc_host *host) 155static int au1xmmc_card_readonly(struct mmc_host *mmc)
156{ 156{
157 struct au1xmmc_host *host = mmc_priv(mmc);
157 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) 158 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
158 ? 1 : 0; 159 ? 1 : 0;
159} 160}
@@ -193,6 +194,8 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
193 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); 194 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194 195
195 switch (mmc_resp_type(cmd)) { 196 switch (mmc_resp_type(cmd)) {
197 case MMC_RSP_NONE:
198 break;
196 case MMC_RSP_R1: 199 case MMC_RSP_R1:
197 mmccmd |= SD_CMD_RT_1; 200 mmccmd |= SD_CMD_RT_1;
198 break; 201 break;
@@ -205,6 +208,10 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
205 case MMC_RSP_R3: 208 case MMC_RSP_R3:
206 mmccmd |= SD_CMD_RT_3; 209 mmccmd |= SD_CMD_RT_3;
207 break; 210 break;
211 default:
212 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
213 mmc_resp_type(cmd));
214 return MMC_ERR_INVALID;
208 } 215 }
209 216
210 switch(cmd->opcode) { 217 switch(cmd->opcode) {
@@ -878,6 +885,7 @@ static void au1xmmc_init_dma(struct au1xmmc_host *host)
878static const struct mmc_host_ops au1xmmc_ops = { 885static const struct mmc_host_ops au1xmmc_ops = {
879 .request = au1xmmc_request, 886 .request = au1xmmc_request,
880 .set_ios = au1xmmc_set_ios, 887 .set_ios = au1xmmc_set_ios,
888 .get_ro = au1xmmc_card_readonly,
881}; 889};
882 890
883static int __devinit au1xmmc_probe(struct platform_device *pdev) 891static int __devinit au1xmmc_probe(struct platform_device *pdev)
@@ -914,6 +922,9 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
914 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 922 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
915 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 923 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
916 924
925 mmc->max_blk_size = 2048;
926 mmc->max_blk_count = 512;
927
917 mmc->ocr_avail = AU1XMMC_OCR; 928 mmc->ocr_avail = AU1XMMC_OCR;
918 929
919 host = mmc_priv(mmc); 930 host = mmc_priv(mmc);
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index bfb9ff693208..b060d4bfba29 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -958,8 +958,10 @@ static int imxmci_probe(struct platform_device *pdev)
958 /* MMC core transfer sizes tunable parameters */ 958 /* MMC core transfer sizes tunable parameters */
959 mmc->max_hw_segs = 64; 959 mmc->max_hw_segs = 64;
960 mmc->max_phys_segs = 64; 960 mmc->max_phys_segs = 64;
961 mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */
962 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ 961 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
962 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
963 mmc->max_blk_size = 2048;
964 mmc->max_blk_count = 65535;
963 965
964 host = mmc_priv(mmc); 966 host = mmc_priv(mmc);
965 host->mmc = mmc; 967 host->mmc = mmc;
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 6f2a282e2b97..5046a1661342 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -103,11 +103,16 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
103 mmc_hostname(host), mrq->cmd->opcode, 103 mmc_hostname(host), mrq->cmd->opcode,
104 mrq->cmd->arg, mrq->cmd->flags); 104 mrq->cmd->arg, mrq->cmd->flags);
105 105
106 WARN_ON(host->card_busy == NULL); 106 WARN_ON(!host->claimed);
107 107
108 mrq->cmd->error = 0; 108 mrq->cmd->error = 0;
109 mrq->cmd->mrq = mrq; 109 mrq->cmd->mrq = mrq;
110 if (mrq->data) { 110 if (mrq->data) {
111 BUG_ON(mrq->data->blksz > host->max_blk_size);
112 BUG_ON(mrq->data->blocks > host->max_blk_count);
113 BUG_ON(mrq->data->blocks * mrq->data->blksz >
114 host->max_req_size);
115
111 mrq->cmd->data = mrq->data; 116 mrq->cmd->data = mrq->data;
112 mrq->data->error = 0; 117 mrq->data->error = 0;
113 mrq->data->mrq = mrq; 118 mrq->data->mrq = mrq;
@@ -157,7 +162,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
157{ 162{
158 struct mmc_request mrq; 163 struct mmc_request mrq;
159 164
160 BUG_ON(host->card_busy == NULL); 165 BUG_ON(!host->claimed);
161 166
162 memset(&mrq, 0, sizeof(struct mmc_request)); 167 memset(&mrq, 0, sizeof(struct mmc_request));
163 168
@@ -195,7 +200,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
195 200
196 int i, err; 201 int i, err;
197 202
198 BUG_ON(host->card_busy == NULL); 203 BUG_ON(!host->claimed);
199 BUG_ON(retries < 0); 204 BUG_ON(retries < 0);
200 205
201 err = MMC_ERR_INVALID; 206 err = MMC_ERR_INVALID;
@@ -289,7 +294,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
289 else 294 else
290 limit_us = 100000; 295 limit_us = 100000;
291 296
292 if (timeout_us > limit_us) { 297 /*
298 * SDHC cards always use these fixed values.
299 */
300 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
293 data->timeout_ns = limit_us * 1000; 301 data->timeout_ns = limit_us * 1000;
294 data->timeout_clks = 0; 302 data->timeout_clks = 0;
295 } 303 }
@@ -320,14 +328,14 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
320 spin_lock_irqsave(&host->lock, flags); 328 spin_lock_irqsave(&host->lock, flags);
321 while (1) { 329 while (1) {
322 set_current_state(TASK_UNINTERRUPTIBLE); 330 set_current_state(TASK_UNINTERRUPTIBLE);
323 if (host->card_busy == NULL) 331 if (!host->claimed)
324 break; 332 break;
325 spin_unlock_irqrestore(&host->lock, flags); 333 spin_unlock_irqrestore(&host->lock, flags);
326 schedule(); 334 schedule();
327 spin_lock_irqsave(&host->lock, flags); 335 spin_lock_irqsave(&host->lock, flags);
328 } 336 }
329 set_current_state(TASK_RUNNING); 337 set_current_state(TASK_RUNNING);
330 host->card_busy = card; 338 host->claimed = 1;
331 spin_unlock_irqrestore(&host->lock, flags); 339 spin_unlock_irqrestore(&host->lock, flags);
332 remove_wait_queue(&host->wq, &wait); 340 remove_wait_queue(&host->wq, &wait);
333 341
@@ -353,10 +361,10 @@ void mmc_release_host(struct mmc_host *host)
353{ 361{
354 unsigned long flags; 362 unsigned long flags;
355 363
356 BUG_ON(host->card_busy == NULL); 364 BUG_ON(!host->claimed);
357 365
358 spin_lock_irqsave(&host->lock, flags); 366 spin_lock_irqsave(&host->lock, flags);
359 host->card_busy = NULL; 367 host->claimed = 0;
360 spin_unlock_irqrestore(&host->lock, flags); 368 spin_unlock_irqrestore(&host->lock, flags);
361 369
362 wake_up(&host->wq); 370 wake_up(&host->wq);
@@ -372,7 +380,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
372 mmc_hostname(host), ios->clock, ios->bus_mode, 380 mmc_hostname(host), ios->clock, ios->bus_mode,
373 ios->power_mode, ios->chip_select, ios->vdd, 381 ios->power_mode, ios->chip_select, ios->vdd,
374 ios->bus_width); 382 ios->bus_width);
375 383
376 host->ops->set_ios(host, ios); 384 host->ops->set_ios(host, ios);
377} 385}
378 386
@@ -381,7 +389,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
381 int err; 389 int err;
382 struct mmc_command cmd; 390 struct mmc_command cmd;
383 391
384 BUG_ON(host->card_busy == NULL); 392 BUG_ON(!host->claimed);
385 393
386 if (host->card_selected == card) 394 if (host->card_selected == card)
387 return MMC_ERR_NONE; 395 return MMC_ERR_NONE;
@@ -588,34 +596,65 @@ static void mmc_decode_csd(struct mmc_card *card)
588 596
589 if (mmc_card_sd(card)) { 597 if (mmc_card_sd(card)) {
590 csd_struct = UNSTUFF_BITS(resp, 126, 2); 598 csd_struct = UNSTUFF_BITS(resp, 126, 2);
591 if (csd_struct != 0) { 599
600 switch (csd_struct) {
601 case 0:
602 m = UNSTUFF_BITS(resp, 115, 4);
603 e = UNSTUFF_BITS(resp, 112, 3);
604 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
605 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
606
607 m = UNSTUFF_BITS(resp, 99, 4);
608 e = UNSTUFF_BITS(resp, 96, 3);
609 csd->max_dtr = tran_exp[e] * tran_mant[m];
610 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
611
612 e = UNSTUFF_BITS(resp, 47, 3);
613 m = UNSTUFF_BITS(resp, 62, 12);
614 csd->capacity = (1 + m) << (e + 2);
615
616 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
617 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
618 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
619 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
620 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
621 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
622 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
623 break;
624 case 1:
625 /*
626 * This is a block-addressed SDHC card. Most
627 * interesting fields are unused and have fixed
628 * values. To avoid getting tripped by buggy cards,
629 * we assume those fixed values ourselves.
630 */
631 mmc_card_set_blockaddr(card);
632
633 csd->tacc_ns = 0; /* Unused */
634 csd->tacc_clks = 0; /* Unused */
635
636 m = UNSTUFF_BITS(resp, 99, 4);
637 e = UNSTUFF_BITS(resp, 96, 3);
638 csd->max_dtr = tran_exp[e] * tran_mant[m];
639 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
640
641 m = UNSTUFF_BITS(resp, 48, 22);
642 csd->capacity = (1 + m) << 10;
643
644 csd->read_blkbits = 9;
645 csd->read_partial = 0;
646 csd->write_misalign = 0;
647 csd->read_misalign = 0;
648 csd->r2w_factor = 4; /* Unused */
649 csd->write_blkbits = 9;
650 csd->write_partial = 0;
651 break;
652 default:
592 printk("%s: unrecognised CSD structure version %d\n", 653 printk("%s: unrecognised CSD structure version %d\n",
593 mmc_hostname(card->host), csd_struct); 654 mmc_hostname(card->host), csd_struct);
594 mmc_card_set_bad(card); 655 mmc_card_set_bad(card);
595 return; 656 return;
596 } 657 }
597
598 m = UNSTUFF_BITS(resp, 115, 4);
599 e = UNSTUFF_BITS(resp, 112, 3);
600 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
601 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
602
603 m = UNSTUFF_BITS(resp, 99, 4);
604 e = UNSTUFF_BITS(resp, 96, 3);
605 csd->max_dtr = tran_exp[e] * tran_mant[m];
606 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
607
608 e = UNSTUFF_BITS(resp, 47, 3);
609 m = UNSTUFF_BITS(resp, 62, 12);
610 csd->capacity = (1 + m) << (e + 2);
611
612 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
613 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
614 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
615 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
616 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
617 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
618 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
619 } else { 658 } else {
620 /* 659 /*
621 * We only understand CSD structure v1.1 and v1.2. 660 * We only understand CSD structure v1.1 and v1.2.
@@ -848,6 +887,41 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
848 return err; 887 return err;
849} 888}
850 889
890static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2)
891{
892 struct mmc_command cmd;
893 int err, sd2;
894 static const u8 test_pattern = 0xAA;
895
896 /*
897 * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
898 * before SD_APP_OP_COND. This command will harmlessly fail for
899 * SD 1.0 cards.
900 */
901 cmd.opcode = SD_SEND_IF_COND;
902 cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
903 cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR;
904
905 err = mmc_wait_for_cmd(host, &cmd, 0);
906 if (err == MMC_ERR_NONE) {
907 if ((cmd.resp[0] & 0xFF) == test_pattern) {
908 sd2 = 1;
909 } else {
910 sd2 = 0;
911 err = MMC_ERR_FAILED;
912 }
913 } else {
914 /*
915 * Treat errors as SD 1.0 card.
916 */
917 sd2 = 0;
918 err = MMC_ERR_NONE;
919 }
920 if (rsd2)
921 *rsd2 = sd2;
922 return err;
923}
924
851/* 925/*
852 * Discover cards by requesting their CID. If this command 926 * Discover cards by requesting their CID. If this command
853 * times out, it is not an error; there are no further cards 927 * times out, it is not an error; there are no further cards
@@ -1018,7 +1092,8 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1018 mmc_wait_for_req(host, &mrq); 1092 mmc_wait_for_req(host, &mrq);
1019 1093
1020 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1094 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1021 mmc_card_set_dead(card); 1095 printk("%s: unable to read EXT_CSD, performance "
1096 "might suffer.\n", mmc_hostname(card->host));
1022 continue; 1097 continue;
1023 } 1098 }
1024 1099
@@ -1034,7 +1109,6 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1034 printk("%s: card is mmc v4 but doesn't support " 1109 printk("%s: card is mmc v4 but doesn't support "
1035 "any high-speed modes.\n", 1110 "any high-speed modes.\n",
1036 mmc_hostname(card->host)); 1111 mmc_hostname(card->host));
1037 mmc_card_set_bad(card);
1038 continue; 1112 continue;
1039 } 1113 }
1040 1114
@@ -1215,7 +1289,9 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1215 mmc_wait_for_req(host, &mrq); 1289 mmc_wait_for_req(host, &mrq);
1216 1290
1217 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1291 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1218 mmc_card_set_dead(card); 1292 printk("%s: unable to read switch capabilities, "
1293 "performance might suffer.\n",
1294 mmc_hostname(card->host));
1219 continue; 1295 continue;
1220 } 1296 }
1221 1297
@@ -1247,12 +1323,8 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1247 1323
1248 mmc_wait_for_req(host, &mrq); 1324 mmc_wait_for_req(host, &mrq);
1249 1325
1250 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1326 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE ||
1251 mmc_card_set_dead(card); 1327 (status[16] & 0xF) != 1) {
1252 continue;
1253 }
1254
1255 if ((status[16] & 0xF) != 1) {
1256 printk(KERN_WARNING "%s: Problem switching card " 1328 printk(KERN_WARNING "%s: Problem switching card "
1257 "into high-speed mode!\n", 1329 "into high-speed mode!\n",
1258 mmc_hostname(host)); 1330 mmc_hostname(host));
@@ -1334,6 +1406,10 @@ static void mmc_setup(struct mmc_host *host)
1334 mmc_power_up(host); 1406 mmc_power_up(host);
1335 mmc_idle_cards(host); 1407 mmc_idle_cards(host);
1336 1408
1409 err = mmc_send_if_cond(host, host->ocr_avail, NULL);
1410 if (err != MMC_ERR_NONE) {
1411 return;
1412 }
1337 err = mmc_send_app_op_cond(host, 0, &ocr); 1413 err = mmc_send_app_op_cond(host, 0, &ocr);
1338 1414
1339 /* 1415 /*
@@ -1386,10 +1462,21 @@ static void mmc_setup(struct mmc_host *host)
1386 * all get the idea that they should be ready for CMD2. 1462 * all get the idea that they should be ready for CMD2.
1387 * (My SanDisk card seems to need this.) 1463 * (My SanDisk card seems to need this.)
1388 */ 1464 */
1389 if (host->mode == MMC_MODE_SD) 1465 if (host->mode == MMC_MODE_SD) {
1390 mmc_send_app_op_cond(host, host->ocr, NULL); 1466 int err, sd2;
1391 else 1467 err = mmc_send_if_cond(host, host->ocr, &sd2);
1468 if (err == MMC_ERR_NONE) {
1469 /*
1470 * If SD_SEND_IF_COND indicates an SD 2.0
1471 * compliant card and we should set bit 30
1472 * of the ocr to indicate that we can handle
1473 * block-addressed SDHC cards.
1474 */
1475 mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL);
1476 }
1477 } else {
1392 mmc_send_op_cond(host, host->ocr, NULL); 1478 mmc_send_op_cond(host, host->ocr, NULL);
1479 }
1393 1480
1394 mmc_discover_cards(host); 1481 mmc_discover_cards(host);
1395 1482
@@ -1519,8 +1606,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
1519 */ 1606 */
1520 host->max_hw_segs = 1; 1607 host->max_hw_segs = 1;
1521 host->max_phys_segs = 1; 1608 host->max_phys_segs = 1;
1522 host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
1523 host->max_seg_size = PAGE_CACHE_SIZE; 1609 host->max_seg_size = PAGE_CACHE_SIZE;
1610
1611 host->max_req_size = PAGE_CACHE_SIZE;
1612 host->max_blk_size = 512;
1613 host->max_blk_count = PAGE_CACHE_SIZE / 512;
1524 } 1614 }
1525 1615
1526 return host; 1616 return host;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 87713572293f..05ba8ace70e7 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -237,13 +237,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 brq.mrq.cmd = &brq.cmd; 237 brq.mrq.cmd = &brq.cmd;
238 brq.mrq.data = &brq.data; 238 brq.mrq.data = &brq.data;
239 239
240 brq.cmd.arg = req->sector << 9; 240 brq.cmd.arg = req->sector;
241 if (!mmc_card_blockaddr(card))
242 brq.cmd.arg <<= 9;
241 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 243 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
242 brq.data.blksz = 1 << md->block_bits; 244 brq.data.blksz = 1 << md->block_bits;
243 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
244 brq.stop.opcode = MMC_STOP_TRANSMISSION; 245 brq.stop.opcode = MMC_STOP_TRANSMISSION;
245 brq.stop.arg = 0; 246 brq.stop.arg = 0;
246 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 247 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
248 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
249 if (brq.data.blocks > card->host->max_blk_count)
250 brq.data.blocks = card->host->max_blk_count;
247 251
248 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); 252 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
249 253
@@ -375,9 +379,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
375 spin_unlock_irq(&md->lock); 379 spin_unlock_irq(&md->lock);
376 } 380 }
377 381
382flush_queue:
383
378 mmc_card_release_host(card); 384 mmc_card_release_host(card);
379 385
380flush_queue:
381 spin_lock_irq(&md->lock); 386 spin_lock_irq(&md->lock);
382 while (ret) { 387 while (ret) {
383 ret = end_that_request_chunk(req, 0, 388 ret = end_that_request_chunk(req, 0,
@@ -494,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
494 struct mmc_command cmd; 499 struct mmc_command cmd;
495 int err; 500 int err;
496 501
502 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
503 if (mmc_card_blockaddr(card))
504 return 0;
505
497 mmc_card_claim_host(card); 506 mmc_card_claim_host(card);
498 cmd.opcode = MMC_SET_BLOCKLEN; 507 cmd.opcode = MMC_SET_BLOCKLEN;
499 cmd.arg = 1 << md->block_bits; 508 cmd.arg = 1 << md->block_bits;
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index 3e35a43819fb..c27e42645cdb 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
147 147
148 blk_queue_prep_rq(mq->queue, mmc_prep_request); 148 blk_queue_prep_rq(mq->queue, mmc_prep_request);
149 blk_queue_bounce_limit(mq->queue, limit); 149 blk_queue_bounce_limit(mq->queue, limit);
150 blk_queue_max_sectors(mq->queue, host->max_sectors); 150 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
153 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 153 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index e334acd045bc..d32698b02d7f 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -199,7 +199,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
199 memset(card, 0, sizeof(struct mmc_card)); 199 memset(card, 0, sizeof(struct mmc_card));
200 card->host = host; 200 card->host = host;
201 device_initialize(&card->dev); 201 device_initialize(&card->dev);
202 card->dev.parent = mmc_dev(host); 202 card->dev.parent = mmc_classdev(host);
203 card->dev.bus = &mmc_bus_type; 203 card->dev.bus = &mmc_bus_type;
204 card->dev.release = mmc_release_card; 204 card->dev.release = mmc_release_card;
205} 205}
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index ccfe6561be24..5941dd951e82 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -524,15 +524,24 @@ static int mmci_probe(struct amba_device *dev, void *id)
524 /* 524 /*
525 * Since we only have a 16-bit data length register, we must 525 * Since we only have a 16-bit data length register, we must
526 * ensure that we don't exceed 2^16-1 bytes in a single request. 526 * ensure that we don't exceed 2^16-1 bytes in a single request.
527 * Choose 64 (512-byte) sectors as the limit.
528 */ 527 */
529 mmc->max_sectors = 64; 528 mmc->max_req_size = 65535;
530 529
531 /* 530 /*
532 * Set the maximum segment size. Since we aren't doing DMA 531 * Set the maximum segment size. Since we aren't doing DMA
533 * (yet) we are only limited by the data length register. 532 * (yet) we are only limited by the data length register.
534 */ 533 */
535 mmc->max_seg_size = mmc->max_sectors << 9; 534 mmc->max_seg_size = mmc->max_req_size;
535
536 /*
537 * Block size can be up to 2048 bytes, but must be a power of two.
538 */
539 mmc->max_blk_size = 2048;
540
541 /*
542 * No limit on the number of blocks transferred.
543 */
544 mmc->max_blk_count = mmc->max_req_size;
536 545
537 spin_lock_init(&host->lock); 546 spin_lock_init(&host->lock);
538 547
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index d30540b27614..1e96a2f65022 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -1099,8 +1099,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1099 */ 1099 */
1100 mmc->max_phys_segs = 32; 1100 mmc->max_phys_segs = 32;
1101 mmc->max_hw_segs = 32; 1101 mmc->max_hw_segs = 32;
1102 mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ 1102 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1103 mmc->max_seg_size = mmc->max_sectors * 512; 1103 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1104 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1105 mmc->max_seg_size = mmc->max_req_size;
1104 1106
1105 if (host->power_pin >= 0) { 1107 if (host->power_pin >= 0) {
1106 if ((ret = omap_request_gpio(host->power_pin)) != 0) { 1108 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index 6073d998b11f..9774fc68b61a 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -450,6 +450,16 @@ static int pxamci_probe(struct platform_device *pdev)
450 */ 450 */
451 mmc->max_seg_size = PAGE_SIZE; 451 mmc->max_seg_size = PAGE_SIZE;
452 452
453 /*
454 * Block length register is 10 bits.
455 */
456 mmc->max_blk_size = 1023;
457
458 /*
459 * Block count register is 16 bits.
460 */
461 mmc->max_blk_count = 65535;
462
453 host = mmc_priv(mmc); 463 host = mmc_priv(mmc);
454 host->mmc = mmc; 464 host->mmc = mmc;
455 host->dma = -1; 465 host->dma = -1;
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index c2d13d7e9911..4bf1fea5e2c4 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -37,6 +37,7 @@ static unsigned int debug_quirks = 0;
37#define SDHCI_QUIRK_FORCE_DMA (1<<1) 37#define SDHCI_QUIRK_FORCE_DMA (1<<1)
38/* Controller doesn't like some resets when there is no card inserted. */ 38/* Controller doesn't like some resets when there is no card inserted. */
39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) 39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
40#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
40 41
41static const struct pci_device_id pci_ids[] __devinitdata = { 42static const struct pci_device_id pci_ids[] __devinitdata = {
42 { 43 {
@@ -65,6 +66,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
65 .driver_data = SDHCI_QUIRK_FORCE_DMA, 66 .driver_data = SDHCI_QUIRK_FORCE_DMA,
66 }, 67 },
67 68
69 {
70 .vendor = PCI_VENDOR_ID_ENE,
71 .device = PCI_DEVICE_ID_ENE_CB712_SD,
72 .subvendor = PCI_ANY_ID,
73 .subdevice = PCI_ANY_ID,
74 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
75 },
76
68 { /* Generic SD host controller */ 77 { /* Generic SD host controller */
69 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 78 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
70 }, 79 },
@@ -197,15 +206,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
197 * * 206 * *
198\*****************************************************************************/ 207\*****************************************************************************/
199 208
200static inline char* sdhci_kmap_sg(struct sdhci_host* host) 209static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
201{ 210{
202 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ); 211 return page_address(host->cur_sg->page) + host->cur_sg->offset;
203 return host->mapped_sg + host->cur_sg->offset;
204}
205
206static inline void sdhci_kunmap_sg(struct sdhci_host* host)
207{
208 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
209} 212}
210 213
211static inline int sdhci_next_sg(struct sdhci_host* host) 214static inline int sdhci_next_sg(struct sdhci_host* host)
@@ -240,7 +243,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
240 chunk_remain = 0; 243 chunk_remain = 0;
241 data = 0; 244 data = 0;
242 245
243 buffer = sdhci_kmap_sg(host) + host->offset; 246 buffer = sdhci_sg_to_buffer(host) + host->offset;
244 247
245 while (blksize) { 248 while (blksize) {
246 if (chunk_remain == 0) { 249 if (chunk_remain == 0) {
@@ -264,16 +267,13 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
264 } 267 }
265 268
266 if (host->remain == 0) { 269 if (host->remain == 0) {
267 sdhci_kunmap_sg(host);
268 if (sdhci_next_sg(host) == 0) { 270 if (sdhci_next_sg(host) == 0) {
269 BUG_ON(blksize != 0); 271 BUG_ON(blksize != 0);
270 return; 272 return;
271 } 273 }
272 buffer = sdhci_kmap_sg(host); 274 buffer = sdhci_sg_to_buffer(host);
273 } 275 }
274 } 276 }
275
276 sdhci_kunmap_sg(host);
277} 277}
278 278
279static void sdhci_write_block_pio(struct sdhci_host *host) 279static void sdhci_write_block_pio(struct sdhci_host *host)
@@ -290,7 +290,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
290 data = 0; 290 data = 0;
291 291
292 bytes = 0; 292 bytes = 0;
293 buffer = sdhci_kmap_sg(host) + host->offset; 293 buffer = sdhci_sg_to_buffer(host) + host->offset;
294 294
295 while (blksize) { 295 while (blksize) {
296 size = min(host->size, host->remain); 296 size = min(host->size, host->remain);
@@ -314,16 +314,13 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
314 } 314 }
315 315
316 if (host->remain == 0) { 316 if (host->remain == 0) {
317 sdhci_kunmap_sg(host);
318 if (sdhci_next_sg(host) == 0) { 317 if (sdhci_next_sg(host) == 0) {
319 BUG_ON(blksize != 0); 318 BUG_ON(blksize != 0);
320 return; 319 return;
321 } 320 }
322 buffer = sdhci_kmap_sg(host); 321 buffer = sdhci_sg_to_buffer(host);
323 } 322 }
324 } 323 }
325
326 sdhci_kunmap_sg(host);
327} 324}
328 325
329static void sdhci_transfer_pio(struct sdhci_host *host) 326static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -372,7 +369,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
372 369
373 /* Sanity checks */ 370 /* Sanity checks */
374 BUG_ON(data->blksz * data->blocks > 524288); 371 BUG_ON(data->blksz * data->blocks > 524288);
375 BUG_ON(data->blksz > host->max_block); 372 BUG_ON(data->blksz > host->mmc->max_blk_size);
376 BUG_ON(data->blocks > 65535); 373 BUG_ON(data->blocks > 65535);
377 374
378 /* timeout in us */ 375 /* timeout in us */
@@ -674,10 +671,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
674 if (host->power == power) 671 if (host->power == power)
675 return; 672 return;
676 673
677 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 674 if (power == (unsigned short)-1) {
678 675 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
679 if (power == (unsigned short)-1)
680 goto out; 676 goto out;
677 }
678
679 /*
680 * Spec says that we should clear the power reg before setting
681 * a new value. Some controllers don't seem to like this though.
682 */
683 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
684 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
681 685
682 pwr = SDHCI_POWER_ON; 686 pwr = SDHCI_POWER_ON;
683 687
@@ -1109,7 +1113,9 @@ static int sdhci_resume (struct pci_dev *pdev)
1109 1113
1110 pci_set_power_state(pdev, PCI_D0); 1114 pci_set_power_state(pdev, PCI_D0);
1111 pci_restore_state(pdev); 1115 pci_restore_state(pdev);
1112 pci_enable_device(pdev); 1116 ret = pci_enable_device(pdev);
1117 if (ret)
1118 return ret;
1113 1119
1114 for (i = 0;i < chip->num_slots;i++) { 1120 for (i = 0;i < chip->num_slots;i++) {
1115 if (!chip->hosts[i]) 1121 if (!chip->hosts[i])
@@ -1274,15 +1280,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1274 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1280 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1275 host->timeout_clk *= 1000; 1281 host->timeout_clk *= 1000;
1276 1282
1277 host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1278 if (host->max_block >= 3) {
1279 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1280 host->slot_descr);
1281 ret = -ENODEV;
1282 goto unmap;
1283 }
1284 host->max_block = 512 << host->max_block;
1285
1286 /* 1283 /*
1287 * Set host parameters. 1284 * Set host parameters.
1288 */ 1285 */
@@ -1294,9 +1291,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1294 mmc->ocr_avail = 0; 1291 mmc->ocr_avail = 0;
1295 if (caps & SDHCI_CAN_VDD_330) 1292 if (caps & SDHCI_CAN_VDD_330)
1296 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1293 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1297 else if (caps & SDHCI_CAN_VDD_300) 1294 if (caps & SDHCI_CAN_VDD_300)
1298 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1295 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1299 else if (caps & SDHCI_CAN_VDD_180) 1296 if (caps & SDHCI_CAN_VDD_180)
1300 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; 1297 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
1301 1298
1302 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { 1299 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
@@ -1326,15 +1323,33 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1326 1323
1327 /* 1324 /*
1328 * Maximum number of sectors in one transfer. Limited by DMA boundary 1325 * Maximum number of sectors in one transfer. Limited by DMA boundary
1329 * size (512KiB), which means (512 KiB/512=) 1024 entries. 1326 * size (512KiB).
1330 */ 1327 */
1331 mmc->max_sectors = 1024; 1328 mmc->max_req_size = 524288;
1332 1329
1333 /* 1330 /*
1334 * Maximum segment size. Could be one segment with the maximum number 1331 * Maximum segment size. Could be one segment with the maximum number
1335 * of sectors. 1332 * of bytes.
1333 */
1334 mmc->max_seg_size = mmc->max_req_size;
1335
1336 /*
1337 * Maximum block size. This varies from controller to controller and
1338 * is specified in the capabilities register.
1339 */
1340 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1341 if (mmc->max_blk_size >= 3) {
1342 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1343 host->slot_descr);
1344 ret = -ENODEV;
1345 goto unmap;
1346 }
1347 mmc->max_blk_size = 512 << mmc->max_blk_size;
1348
1349 /*
1350 * Maximum block count.
1336 */ 1351 */
1337 mmc->max_seg_size = mmc->max_sectors * 512; 1352 mmc->max_blk_count = 65535;
1338 1353
1339 /* 1354 /*
1340 * Init tasklets. 1355 * Init tasklets.
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h
index f9d1a0a6f03a..e324f0a623dc 100644
--- a/drivers/mmc/sdhci.h
+++ b/drivers/mmc/sdhci.h
@@ -174,7 +174,6 @@ struct sdhci_host {
174 174
175 unsigned int max_clk; /* Max possible freq (MHz) */ 175 unsigned int max_clk; /* Max possible freq (MHz) */
176 unsigned int timeout_clk; /* Timeout freq (KHz) */ 176 unsigned int timeout_clk; /* Timeout freq (KHz) */
177 unsigned int max_block; /* Max block size (bytes) */
178 177
179 unsigned int clock; /* Current clock (MHz) */ 178 unsigned int clock; /* Current clock (MHz) */
180 unsigned short power; /* Current voltage */ 179 unsigned short power; /* Current voltage */
@@ -184,7 +183,6 @@ struct sdhci_host {
184 struct mmc_data *data; /* Current data request */ 183 struct mmc_data *data; /* Current data request */
185 184
186 struct scatterlist *cur_sg; /* We're working on this */ 185 struct scatterlist *cur_sg; /* We're working on this */
187 char *mapped_sg; /* This is where it's mapped */
188 int num_sg; /* Entries left */ 186 int num_sg; /* Entries left */
189 int offset; /* Offset into current sg */ 187 int offset; /* Offset into current sg */
190 int remain; /* Bytes left in current */ 188 int remain; /* Bytes left in current */
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index fa4a52886b97..e65f8a0a9349 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -17,7 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19#define DRIVER_NAME "tifm_sd" 19#define DRIVER_NAME "tifm_sd"
20#define DRIVER_VERSION "0.6" 20#define DRIVER_VERSION "0.7"
21 21
22static int no_dma = 0; 22static int no_dma = 0;
23static int fixed_timeout = 0; 23static int fixed_timeout = 0;
@@ -79,7 +79,6 @@ typedef enum {
79 79
80enum { 80enum {
81 FIFO_RDY = 0x0001, /* hardware dependent value */ 81 FIFO_RDY = 0x0001, /* hardware dependent value */
82 HOST_REG = 0x0002,
83 EJECT = 0x0004, 82 EJECT = 0x0004,
84 EJECT_DONE = 0x0008, 83 EJECT_DONE = 0x0008,
85 CARD_BUSY = 0x0010, 84 CARD_BUSY = 0x0010,
@@ -95,46 +94,53 @@ struct tifm_sd {
95 card_state_t state; 94 card_state_t state;
96 unsigned int clk_freq; 95 unsigned int clk_freq;
97 unsigned int clk_div; 96 unsigned int clk_div;
98 unsigned long timeout_jiffies; // software timeout - 2 sec 97 unsigned long timeout_jiffies;
99 98
99 struct tasklet_struct finish_tasklet;
100 struct timer_list timer;
100 struct mmc_request *req; 101 struct mmc_request *req;
101 struct work_struct cmd_handler; 102 wait_queue_head_t notify;
102 struct delayed_work abort_handler;
103 wait_queue_head_t can_eject;
104 103
105 size_t written_blocks; 104 size_t written_blocks;
106 char *buffer;
107 size_t buffer_size; 105 size_t buffer_size;
108 size_t buffer_pos; 106 size_t buffer_pos;
109 107
110}; 108};
111 109
110static char* tifm_sd_data_buffer(struct mmc_data *data)
111{
112 return page_address(data->sg->page) + data->sg->offset;
113}
114
112static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, 115static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
113 unsigned int host_status) 116 unsigned int host_status)
114{ 117{
115 struct mmc_command *cmd = host->req->cmd; 118 struct mmc_command *cmd = host->req->cmd;
116 unsigned int t_val = 0, cnt = 0; 119 unsigned int t_val = 0, cnt = 0;
120 char *buffer;
117 121
118 if (host_status & TIFM_MMCSD_BRS) { 122 if (host_status & TIFM_MMCSD_BRS) {
119 /* in non-dma rx mode BRS fires when fifo is still not empty */ 123 /* in non-dma rx mode BRS fires when fifo is still not empty */
120 if (host->buffer && (cmd->data->flags & MMC_DATA_READ)) { 124 if (no_dma && (cmd->data->flags & MMC_DATA_READ)) {
125 buffer = tifm_sd_data_buffer(host->req->data);
121 while (host->buffer_size > host->buffer_pos) { 126 while (host->buffer_size > host->buffer_pos) {
122 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 127 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
123 host->buffer[host->buffer_pos++] = t_val & 0xff; 128 buffer[host->buffer_pos++] = t_val & 0xff;
124 host->buffer[host->buffer_pos++] = 129 buffer[host->buffer_pos++] =
125 (t_val >> 8) & 0xff; 130 (t_val >> 8) & 0xff;
126 } 131 }
127 } 132 }
128 return 1; 133 return 1;
129 } else if (host->buffer) { 134 } else if (no_dma) {
135 buffer = tifm_sd_data_buffer(host->req->data);
130 if ((cmd->data->flags & MMC_DATA_READ) && 136 if ((cmd->data->flags & MMC_DATA_READ) &&
131 (host_status & TIFM_MMCSD_AF)) { 137 (host_status & TIFM_MMCSD_AF)) {
132 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 138 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
133 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 139 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
134 if (host->buffer_size > host->buffer_pos) { 140 if (host->buffer_size > host->buffer_pos) {
135 host->buffer[host->buffer_pos++] = 141 buffer[host->buffer_pos++] =
136 t_val & 0xff; 142 t_val & 0xff;
137 host->buffer[host->buffer_pos++] = 143 buffer[host->buffer_pos++] =
138 (t_val >> 8) & 0xff; 144 (t_val >> 8) & 0xff;
139 } 145 }
140 } 146 }
@@ -142,11 +148,12 @@ static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
142 && (host_status & TIFM_MMCSD_AE)) { 148 && (host_status & TIFM_MMCSD_AE)) {
143 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 149 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
144 if (host->buffer_size > host->buffer_pos) { 150 if (host->buffer_size > host->buffer_pos) {
145 t_val = host->buffer[host->buffer_pos++] & 0x00ff; 151 t_val = buffer[host->buffer_pos++]
146 t_val |= ((host->buffer[host->buffer_pos++]) << 8) 152 & 0x00ff;
147 & 0xff00; 153 t_val |= ((buffer[host->buffer_pos++])
154 << 8) & 0xff00;
148 writel(t_val, 155 writel(t_val,
149 sock->addr + SOCK_MMCSD_DATA); 156 sock->addr + SOCK_MMCSD_DATA);
150 } 157 }
151 } 158 }
152 } 159 }
@@ -206,7 +213,7 @@ static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
206 cmd_mask |= TIFM_MMCSD_READ; 213 cmd_mask |= TIFM_MMCSD_READ;
207 214
208 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", 215 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
209 cmd->opcode, cmd->arg, cmd_mask); 216 cmd->opcode, cmd->arg, cmd_mask);
210 217
211 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); 218 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
212 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); 219 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
@@ -239,65 +246,78 @@ change_state:
239 tifm_sd_fetch_resp(cmd, sock); 246 tifm_sd_fetch_resp(cmd, sock);
240 if (cmd->data) { 247 if (cmd->data) {
241 host->state = BRS; 248 host->state = BRS;
242 } else 249 } else {
243 host->state = READY; 250 host->state = READY;
251 }
244 goto change_state; 252 goto change_state;
245 } 253 }
246 break; 254 break;
247 case BRS: 255 case BRS:
248 if (tifm_sd_transfer_data(sock, host, host_status)) { 256 if (tifm_sd_transfer_data(sock, host, host_status)) {
249 if (!host->req->stop) { 257 if (cmd->data->flags & MMC_DATA_WRITE) {
250 if (cmd->data->flags & MMC_DATA_WRITE) { 258 host->state = CARD;
251 host->state = CARD; 259 } else {
260 if (no_dma) {
261 if (host->req->stop) {
262 tifm_sd_exec(host, host->req->stop);
263 host->state = SCMD;
264 } else {
265 host->state = READY;
266 }
252 } else { 267 } else {
253 host->state = 268 host->state = FIFO;
254 host->buffer ? READY : FIFO;
255 } 269 }
256 goto change_state;
257 } 270 }
258 tifm_sd_exec(host, host->req->stop); 271 goto change_state;
259 host->state = SCMD;
260 } 272 }
261 break; 273 break;
262 case SCMD: 274 case SCMD:
263 if (host_status & TIFM_MMCSD_EOC) { 275 if (host_status & TIFM_MMCSD_EOC) {
264 tifm_sd_fetch_resp(host->req->stop, sock); 276 tifm_sd_fetch_resp(host->req->stop, sock);
265 if (cmd->error) { 277 host->state = READY;
266 host->state = READY;
267 } else if (cmd->data->flags & MMC_DATA_WRITE) {
268 host->state = CARD;
269 } else {
270 host->state = host->buffer ? READY : FIFO;
271 }
272 goto change_state; 278 goto change_state;
273 } 279 }
274 break; 280 break;
275 case CARD: 281 case CARD:
282 dev_dbg(&sock->dev, "waiting for CARD, have %zd blocks\n",
283 host->written_blocks);
276 if (!(host->flags & CARD_BUSY) 284 if (!(host->flags & CARD_BUSY)
277 && (host->written_blocks == cmd->data->blocks)) { 285 && (host->written_blocks == cmd->data->blocks)) {
278 host->state = host->buffer ? READY : FIFO; 286 if (no_dma) {
287 if (host->req->stop) {
288 tifm_sd_exec(host, host->req->stop);
289 host->state = SCMD;
290 } else {
291 host->state = READY;
292 }
293 } else {
294 host->state = FIFO;
295 }
279 goto change_state; 296 goto change_state;
280 } 297 }
281 break; 298 break;
282 case FIFO: 299 case FIFO:
283 if (host->flags & FIFO_RDY) { 300 if (host->flags & FIFO_RDY) {
284 host->state = READY;
285 host->flags &= ~FIFO_RDY; 301 host->flags &= ~FIFO_RDY;
302 if (host->req->stop) {
303 tifm_sd_exec(host, host->req->stop);
304 host->state = SCMD;
305 } else {
306 host->state = READY;
307 }
286 goto change_state; 308 goto change_state;
287 } 309 }
288 break; 310 break;
289 case READY: 311 case READY:
290 queue_work(sock->wq, &host->cmd_handler); 312 tasklet_schedule(&host->finish_tasklet);
291 return; 313 return;
292 } 314 }
293 315
294 queue_delayed_work(sock->wq, &host->abort_handler,
295 host->timeout_jiffies);
296} 316}
297 317
298/* Called from interrupt handler */ 318/* Called from interrupt handler */
299static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, 319static void tifm_sd_signal_irq(struct tifm_dev *sock,
300 unsigned int sock_irq_status) 320 unsigned int sock_irq_status)
301{ 321{
302 struct tifm_sd *host; 322 struct tifm_sd *host;
303 unsigned int host_status = 0, fifo_status = 0; 323 unsigned int host_status = 0, fifo_status = 0;
@@ -305,7 +325,6 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
305 325
306 spin_lock(&sock->lock); 326 spin_lock(&sock->lock);
307 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); 327 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
308 cancel_delayed_work(&host->abort_handler);
309 328
310 if (sock_irq_status & FIFO_EVENT) { 329 if (sock_irq_status & FIFO_EVENT) {
311 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); 330 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
@@ -318,19 +337,17 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
318 host_status = readl(sock->addr + SOCK_MMCSD_STATUS); 337 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
319 writel(host_status, sock->addr + SOCK_MMCSD_STATUS); 338 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
320 339
321 if (!(host->flags & HOST_REG))
322 queue_work(sock->wq, &host->cmd_handler);
323 if (!host->req) 340 if (!host->req)
324 goto done; 341 goto done;
325 342
326 if (host_status & TIFM_MMCSD_ERRMASK) { 343 if (host_status & TIFM_MMCSD_ERRMASK) {
327 if (host_status & TIFM_MMCSD_CERR) 344 if (host_status & TIFM_MMCSD_CERR)
328 error_code = MMC_ERR_FAILED; 345 error_code = MMC_ERR_FAILED;
329 else if (host_status & 346 else if (host_status
330 (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) 347 & (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO))
331 error_code = MMC_ERR_TIMEOUT; 348 error_code = MMC_ERR_TIMEOUT;
332 else if (host_status & 349 else if (host_status
333 (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) 350 & (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC))
334 error_code = MMC_ERR_BADCRC; 351 error_code = MMC_ERR_BADCRC;
335 352
336 writel(TIFM_FIFO_INT_SETALL, 353 writel(TIFM_FIFO_INT_SETALL,
@@ -340,12 +357,11 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
340 if (host->req->stop) { 357 if (host->req->stop) {
341 if (host->state == SCMD) { 358 if (host->state == SCMD) {
342 host->req->stop->error = error_code; 359 host->req->stop->error = error_code;
343 } else if(host->state == BRS) { 360 } else if (host->state == BRS
361 || host->state == CARD
362 || host->state == FIFO) {
344 host->req->cmd->error = error_code; 363 host->req->cmd->error = error_code;
345 tifm_sd_exec(host, host->req->stop); 364 tifm_sd_exec(host, host->req->stop);
346 queue_delayed_work(sock->wq,
347 &host->abort_handler,
348 host->timeout_jiffies);
349 host->state = SCMD; 365 host->state = SCMD;
350 goto done; 366 goto done;
351 } else { 367 } else {
@@ -359,8 +375,8 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
359 375
360 if (host_status & TIFM_MMCSD_CB) 376 if (host_status & TIFM_MMCSD_CB)
361 host->flags |= CARD_BUSY; 377 host->flags |= CARD_BUSY;
362 if ((host_status & TIFM_MMCSD_EOFB) && 378 if ((host_status & TIFM_MMCSD_EOFB)
363 (host->flags & CARD_BUSY)) { 379 && (host->flags & CARD_BUSY)) {
364 host->written_blocks++; 380 host->written_blocks++;
365 host->flags &= ~CARD_BUSY; 381 host->flags &= ~CARD_BUSY;
366 } 382 }
@@ -370,22 +386,22 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
370 tifm_sd_process_cmd(sock, host, host_status); 386 tifm_sd_process_cmd(sock, host, host_status);
371done: 387done:
372 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", 388 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n",
373 host_status, fifo_status); 389 host_status, fifo_status);
374 spin_unlock(&sock->lock); 390 spin_unlock(&sock->lock);
375 return sock_irq_status;
376} 391}
377 392
378static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) 393static void tifm_sd_prepare_data(struct tifm_sd *host, struct mmc_command *cmd)
379{ 394{
380 struct tifm_dev *sock = card->dev; 395 struct tifm_dev *sock = host->dev;
381 unsigned int dest_cnt; 396 unsigned int dest_cnt;
382 397
383 /* DMA style IO */ 398 /* DMA style IO */
384 399 dev_dbg(&sock->dev, "setting dma for %d blocks\n",
400 cmd->data->blocks);
385 writel(TIFM_FIFO_INT_SETALL, 401 writel(TIFM_FIFO_INT_SETALL,
386 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 402 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
387 writel(ilog2(cmd->data->blksz) - 2, 403 writel(ilog2(cmd->data->blksz) - 2,
388 sock->addr + SOCK_FIFO_PAGE_SIZE); 404 sock->addr + SOCK_FIFO_PAGE_SIZE);
389 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); 405 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL);
390 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); 406 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
391 407
@@ -399,7 +415,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
399 if (cmd->data->flags & MMC_DATA_WRITE) { 415 if (cmd->data->flags & MMC_DATA_WRITE) {
400 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 416 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
401 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, 417 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN,
402 sock->addr + SOCK_DMA_CONTROL); 418 sock->addr + SOCK_DMA_CONTROL);
403 } else { 419 } else {
404 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 420 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
405 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); 421 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL);
@@ -407,7 +423,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
407} 423}
408 424
409static void tifm_sd_set_data_timeout(struct tifm_sd *host, 425static void tifm_sd_set_data_timeout(struct tifm_sd *host,
410 struct mmc_data *data) 426 struct mmc_data *data)
411{ 427{
412 struct tifm_dev *sock = host->dev; 428 struct tifm_dev *sock = host->dev;
413 unsigned int data_timeout = data->timeout_clks; 429 unsigned int data_timeout = data->timeout_clks;
@@ -416,22 +432,21 @@ static void tifm_sd_set_data_timeout(struct tifm_sd *host,
416 return; 432 return;
417 433
418 data_timeout += data->timeout_ns / 434 data_timeout += data->timeout_ns /
419 ((1000000000 / host->clk_freq) * host->clk_div); 435 ((1000000000UL / host->clk_freq) * host->clk_div);
420 data_timeout *= 10; // call it fudge factor for now
421 436
422 if (data_timeout < 0xffff) { 437 if (data_timeout < 0xffff) {
423 writel((~TIFM_MMCSD_DPE) &
424 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
425 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
426 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 438 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
439 writel((~TIFM_MMCSD_DPE)
440 & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
441 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
427 } else { 442 } else {
428 writel(TIFM_MMCSD_DPE |
429 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
430 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
431 data_timeout = (data_timeout >> 10) + 1; 443 data_timeout = (data_timeout >> 10) + 1;
432 if(data_timeout > 0xffff) 444 if (data_timeout > 0xffff)
433 data_timeout = 0; /* set to unlimited */ 445 data_timeout = 0; /* set to unlimited */
434 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 446 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
447 writel(TIFM_MMCSD_DPE
448 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
449 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
435 } 450 }
436} 451}
437 452
@@ -474,11 +489,10 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
474 } 489 }
475 490
476 host->req = mrq; 491 host->req = mrq;
492 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
477 host->state = CMD; 493 host->state = CMD;
478 queue_delayed_work(sock->wq, &host->abort_handler,
479 host->timeout_jiffies);
480 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 494 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
481 sock->addr + SOCK_CONTROL); 495 sock->addr + SOCK_CONTROL);
482 tifm_sd_exec(host, mrq->cmd); 496 tifm_sd_exec(host, mrq->cmd);
483 spin_unlock_irqrestore(&sock->lock, flags); 497 spin_unlock_irqrestore(&sock->lock, flags);
484 return; 498 return;
@@ -493,9 +507,9 @@ err_out:
493 mmc_request_done(mmc, mrq); 507 mmc_request_done(mmc, mrq);
494} 508}
495 509
496static void tifm_sd_end_cmd(struct work_struct *work) 510static void tifm_sd_end_cmd(unsigned long data)
497{ 511{
498 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 512 struct tifm_sd *host = (struct tifm_sd*)data;
499 struct tifm_dev *sock = host->dev; 513 struct tifm_dev *sock = host->dev;
500 struct mmc_host *mmc = tifm_get_drvdata(sock); 514 struct mmc_host *mmc = tifm_get_drvdata(sock);
501 struct mmc_request *mrq; 515 struct mmc_request *mrq;
@@ -504,6 +518,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
504 518
505 spin_lock_irqsave(&sock->lock, flags); 519 spin_lock_irqsave(&sock->lock, flags);
506 520
521 del_timer(&host->timer);
507 mrq = host->req; 522 mrq = host->req;
508 host->req = NULL; 523 host->req = NULL;
509 host->state = IDLE; 524 host->state = IDLE;
@@ -517,8 +532,8 @@ static void tifm_sd_end_cmd(struct work_struct *work)
517 r_data = mrq->cmd->data; 532 r_data = mrq->cmd->data;
518 if (r_data) { 533 if (r_data) {
519 if (r_data->flags & MMC_DATA_WRITE) { 534 if (r_data->flags & MMC_DATA_WRITE) {
520 r_data->bytes_xfered = host->written_blocks * 535 r_data->bytes_xfered = host->written_blocks
521 r_data->blksz; 536 * r_data->blksz;
522 } else { 537 } else {
523 r_data->bytes_xfered = r_data->blocks - 538 r_data->bytes_xfered = r_data->blocks -
524 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 539 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -532,7 +547,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
532 } 547 }
533 548
534 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 549 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
535 sock->addr + SOCK_CONTROL); 550 sock->addr + SOCK_CONTROL);
536 551
537 spin_unlock_irqrestore(&sock->lock, flags); 552 spin_unlock_irqrestore(&sock->lock, flags);
538 mmc_request_done(mmc, mrq); 553 mmc_request_done(mmc, mrq);
@@ -544,15 +559,6 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
544 struct tifm_dev *sock = host->dev; 559 struct tifm_dev *sock = host->dev;
545 unsigned long flags; 560 unsigned long flags;
546 struct mmc_data *r_data = mrq->cmd->data; 561 struct mmc_data *r_data = mrq->cmd->data;
547 char *t_buffer = NULL;
548
549 if (r_data) {
550 t_buffer = kmap(r_data->sg->page);
551 if (!t_buffer) {
552 printk(KERN_ERR DRIVER_NAME ": kmap failed\n");
553 goto err_out;
554 }
555 }
556 562
557 spin_lock_irqsave(&sock->lock, flags); 563 spin_lock_irqsave(&sock->lock, flags);
558 if (host->flags & EJECT) { 564 if (host->flags & EJECT) {
@@ -569,15 +575,14 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
569 if (r_data) { 575 if (r_data) {
570 tifm_sd_set_data_timeout(host, r_data); 576 tifm_sd_set_data_timeout(host, r_data);
571 577
572 host->buffer = t_buffer + r_data->sg->offset; 578 host->buffer_size = mrq->cmd->data->blocks
573 host->buffer_size = mrq->cmd->data->blocks * 579 * mrq->cmd->data->blksz;
574 mrq->cmd->data->blksz;
575 580
576 writel(TIFM_MMCSD_BUFINT | 581 writel(TIFM_MMCSD_BUFINT
577 readl(sock->addr + SOCK_MMCSD_INT_ENABLE), 582 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
578 sock->addr + SOCK_MMCSD_INT_ENABLE); 583 sock->addr + SOCK_MMCSD_INT_ENABLE);
579 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) | 584 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
580 (TIFM_MMCSD_FIFO_SIZE - 1), 585 | (TIFM_MMCSD_FIFO_SIZE - 1),
581 sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 586 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
582 587
583 host->written_blocks = 0; 588 host->written_blocks = 0;
@@ -588,26 +593,22 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
588 } 593 }
589 594
590 host->req = mrq; 595 host->req = mrq;
596 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
591 host->state = CMD; 597 host->state = CMD;
592 queue_delayed_work(sock->wq, &host->abort_handler,
593 host->timeout_jiffies);
594 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 598 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
595 sock->addr + SOCK_CONTROL); 599 sock->addr + SOCK_CONTROL);
596 tifm_sd_exec(host, mrq->cmd); 600 tifm_sd_exec(host, mrq->cmd);
597 spin_unlock_irqrestore(&sock->lock, flags); 601 spin_unlock_irqrestore(&sock->lock, flags);
598 return; 602 return;
599 603
600err_out: 604err_out:
601 if (t_buffer)
602 kunmap(r_data->sg->page);
603
604 mrq->cmd->error = MMC_ERR_TIMEOUT; 605 mrq->cmd->error = MMC_ERR_TIMEOUT;
605 mmc_request_done(mmc, mrq); 606 mmc_request_done(mmc, mrq);
606} 607}
607 608
608static void tifm_sd_end_cmd_nodma(struct work_struct *work) 609static void tifm_sd_end_cmd_nodma(unsigned long data)
609{ 610{
610 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 611 struct tifm_sd *host = (struct tifm_sd*)data;
611 struct tifm_dev *sock = host->dev; 612 struct tifm_dev *sock = host->dev;
612 struct mmc_host *mmc = tifm_get_drvdata(sock); 613 struct mmc_host *mmc = tifm_get_drvdata(sock);
613 struct mmc_request *mrq; 614 struct mmc_request *mrq;
@@ -616,6 +617,7 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
616 617
617 spin_lock_irqsave(&sock->lock, flags); 618 spin_lock_irqsave(&sock->lock, flags);
618 619
620 del_timer(&host->timer);
619 mrq = host->req; 621 mrq = host->req;
620 host->req = NULL; 622 host->req = NULL;
621 host->state = IDLE; 623 host->state = IDLE;
@@ -633,8 +635,8 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
633 sock->addr + SOCK_MMCSD_INT_ENABLE); 635 sock->addr + SOCK_MMCSD_INT_ENABLE);
634 636
635 if (r_data->flags & MMC_DATA_WRITE) { 637 if (r_data->flags & MMC_DATA_WRITE) {
636 r_data->bytes_xfered = host->written_blocks * 638 r_data->bytes_xfered = host->written_blocks
637 r_data->blksz; 639 * r_data->blksz;
638 } else { 640 } else {
639 r_data->bytes_xfered = r_data->blocks - 641 r_data->bytes_xfered = r_data->blocks -
640 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 642 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -642,29 +644,44 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
642 r_data->bytes_xfered += r_data->blksz - 644 r_data->bytes_xfered += r_data->blksz -
643 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; 645 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
644 } 646 }
645 host->buffer = NULL;
646 host->buffer_pos = 0; 647 host->buffer_pos = 0;
647 host->buffer_size = 0; 648 host->buffer_size = 0;
648 } 649 }
649 650
650 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 651 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
651 sock->addr + SOCK_CONTROL); 652 sock->addr + SOCK_CONTROL);
652 653
653 spin_unlock_irqrestore(&sock->lock, flags); 654 spin_unlock_irqrestore(&sock->lock, flags);
654 655
655 if (r_data)
656 kunmap(r_data->sg->page);
657
658 mmc_request_done(mmc, mrq); 656 mmc_request_done(mmc, mrq);
659} 657}
660 658
661static void tifm_sd_abort(struct work_struct *work) 659static void tifm_sd_terminate(struct tifm_sd *host)
660{
661 struct tifm_dev *sock = host->dev;
662 unsigned long flags;
663
664 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
665 mmiowb();
666 spin_lock_irqsave(&sock->lock, flags);
667 host->flags |= EJECT;
668 if (host->req) {
669 writel(TIFM_FIFO_INT_SETALL,
670 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
671 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
672 tasklet_schedule(&host->finish_tasklet);
673 }
674 spin_unlock_irqrestore(&sock->lock, flags);
675}
676
677static void tifm_sd_abort(unsigned long data)
662{ 678{
663 struct tifm_sd *host = 679 struct tifm_sd *host = (struct tifm_sd*)data;
664 container_of(work, struct tifm_sd, abort_handler.work);
665 680
666 printk(KERN_ERR DRIVER_NAME 681 printk(KERN_ERR DRIVER_NAME
667 ": card failed to respond for a long period of time"); 682 ": card failed to respond for a long period of time");
683
684 tifm_sd_terminate(host);
668 tifm_eject(host->dev); 685 tifm_eject(host->dev);
669} 686}
670 687
@@ -683,9 +700,9 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
683 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), 700 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
684 sock->addr + SOCK_MMCSD_CONFIG); 701 sock->addr + SOCK_MMCSD_CONFIG);
685 } else { 702 } else {
686 writel((~TIFM_MMCSD_4BBUS) & 703 writel((~TIFM_MMCSD_4BBUS)
687 readl(sock->addr + SOCK_MMCSD_CONFIG), 704 & readl(sock->addr + SOCK_MMCSD_CONFIG),
688 sock->addr + SOCK_MMCSD_CONFIG); 705 sock->addr + SOCK_MMCSD_CONFIG);
689 } 706 }
690 707
691 if (ios->clock) { 708 if (ios->clock) {
@@ -704,23 +721,24 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
704 if ((20000000 / clk_div1) > (24000000 / clk_div2)) { 721 if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
705 host->clk_freq = 20000000; 722 host->clk_freq = 20000000;
706 host->clk_div = clk_div1; 723 host->clk_div = clk_div1;
707 writel((~TIFM_CTRL_FAST_CLK) & 724 writel((~TIFM_CTRL_FAST_CLK)
708 readl(sock->addr + SOCK_CONTROL), 725 & readl(sock->addr + SOCK_CONTROL),
709 sock->addr + SOCK_CONTROL); 726 sock->addr + SOCK_CONTROL);
710 } else { 727 } else {
711 host->clk_freq = 24000000; 728 host->clk_freq = 24000000;
712 host->clk_div = clk_div2; 729 host->clk_div = clk_div2;
713 writel(TIFM_CTRL_FAST_CLK | 730 writel(TIFM_CTRL_FAST_CLK
714 readl(sock->addr + SOCK_CONTROL), 731 | readl(sock->addr + SOCK_CONTROL),
715 sock->addr + SOCK_CONTROL); 732 sock->addr + SOCK_CONTROL);
716 } 733 }
717 } else { 734 } else {
718 host->clk_div = 0; 735 host->clk_div = 0;
719 } 736 }
720 host->clk_div &= TIFM_MMCSD_CLKMASK; 737 host->clk_div &= TIFM_MMCSD_CLKMASK;
721 writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) & 738 writel(host->clk_div
722 readl(sock->addr + SOCK_MMCSD_CONFIG)), 739 | ((~TIFM_MMCSD_CLKMASK)
723 sock->addr + SOCK_MMCSD_CONFIG); 740 & readl(sock->addr + SOCK_MMCSD_CONFIG)),
741 sock->addr + SOCK_MMCSD_CONFIG);
724 742
725 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 743 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
726 host->flags |= OPENDRAIN; 744 host->flags |= OPENDRAIN;
@@ -734,7 +752,7 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
734 // allow removal. 752 // allow removal.
735 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { 753 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) {
736 host->flags |= EJECT_DONE; 754 host->flags |= EJECT_DONE;
737 wake_up_all(&host->can_eject); 755 wake_up_all(&host->notify);
738 } 756 }
739 757
740 spin_unlock_irqrestore(&sock->lock, flags); 758 spin_unlock_irqrestore(&sock->lock, flags);
@@ -762,20 +780,67 @@ static struct mmc_host_ops tifm_sd_ops = {
762 .get_ro = tifm_sd_ro 780 .get_ro = tifm_sd_ro
763}; 781};
764 782
765static void tifm_sd_register_host(struct work_struct *work) 783static int tifm_sd_initialize_host(struct tifm_sd *host)
766{ 784{
767 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 785 int rc;
786 unsigned int host_status = 0;
768 struct tifm_dev *sock = host->dev; 787 struct tifm_dev *sock = host->dev;
769 struct mmc_host *mmc = tifm_get_drvdata(sock);
770 unsigned long flags;
771 788
772 spin_lock_irqsave(&sock->lock, flags); 789 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
773 host->flags |= HOST_REG; 790 mmiowb();
774 PREPARE_WORK(&host->cmd_handler, 791 host->clk_div = 61;
775 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); 792 host->clk_freq = 20000000;
776 spin_unlock_irqrestore(&sock->lock, flags); 793 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
777 dev_dbg(&sock->dev, "adding host\n"); 794 writel(host->clk_div | TIFM_MMCSD_POWER,
778 mmc_add_host(mmc); 795 sock->addr + SOCK_MMCSD_CONFIG);
796
797 /* wait up to 0.51 sec for reset */
798 for (rc = 2; rc <= 256; rc <<= 1) {
799 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
800 rc = 0;
801 break;
802 }
803 msleep(rc);
804 }
805
806 if (rc) {
807 printk(KERN_ERR DRIVER_NAME
808 ": controller failed to reset\n");
809 return -ENODEV;
810 }
811
812 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
813 writel(host->clk_div | TIFM_MMCSD_POWER,
814 sock->addr + SOCK_MMCSD_CONFIG);
815 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
816
817 // command timeout fixed to 64 clocks for now
818 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
819 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
820
821 /* INAB should take much less than reset */
822 for (rc = 1; rc <= 16; rc <<= 1) {
823 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
824 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
825 if (!(host_status & TIFM_MMCSD_ERRMASK)
826 && (host_status & TIFM_MMCSD_EOC)) {
827 rc = 0;
828 break;
829 }
830 msleep(rc);
831 }
832
833 if (rc) {
834 printk(KERN_ERR DRIVER_NAME
835 ": card not ready - probe failed on initialization\n");
836 return -ENODEV;
837 }
838
839 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
840 sock->addr + SOCK_MMCSD_INT_ENABLE);
841 mmiowb();
842
843 return 0;
779} 844}
780 845
781static int tifm_sd_probe(struct tifm_dev *sock) 846static int tifm_sd_probe(struct tifm_dev *sock)
@@ -784,8 +849,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
784 struct tifm_sd *host; 849 struct tifm_sd *host;
785 int rc = -EIO; 850 int rc = -EIO;
786 851
787 if (!(TIFM_SOCK_STATE_OCCUPIED & 852 if (!(TIFM_SOCK_STATE_OCCUPIED
788 readl(sock->addr + SOCK_PRESENT_STATE))) { 853 & readl(sock->addr + SOCK_PRESENT_STATE))) {
789 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); 854 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n");
790 return rc; 855 return rc;
791 } 856 }
@@ -795,109 +860,99 @@ static int tifm_sd_probe(struct tifm_dev *sock)
795 return -ENOMEM; 860 return -ENOMEM;
796 861
797 host = mmc_priv(mmc); 862 host = mmc_priv(mmc);
798 host->dev = sock;
799 host->clk_div = 61;
800 init_waitqueue_head(&host->can_eject);
801 INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
802 INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
803
804 tifm_set_drvdata(sock, mmc); 863 tifm_set_drvdata(sock, mmc);
805 sock->signal_irq = tifm_sd_signal_irq; 864 host->dev = sock;
806
807 host->clk_freq = 20000000;
808 host->timeout_jiffies = msecs_to_jiffies(1000); 865 host->timeout_jiffies = msecs_to_jiffies(1000);
809 866
867 init_waitqueue_head(&host->notify);
868 tasklet_init(&host->finish_tasklet,
869 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
870 (unsigned long)host);
871 setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
872
810 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; 873 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request;
811 mmc->ops = &tifm_sd_ops; 874 mmc->ops = &tifm_sd_ops;
812 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 875 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
813 mmc->caps = MMC_CAP_4_BIT_DATA; 876 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE;
814 mmc->f_min = 20000000 / 60; 877 mmc->f_min = 20000000 / 60;
815 mmc->f_max = 24000000; 878 mmc->f_max = 24000000;
816 mmc->max_hw_segs = 1; 879 mmc->max_hw_segs = 1;
817 mmc->max_phys_segs = 1; 880 mmc->max_phys_segs = 1;
818 mmc->max_sectors = 127; 881 // limited by DMA counter - it's safer to stick with
819 mmc->max_seg_size = mmc->max_sectors << 11; //2k maximum hw block length 882 // block counter has 11 bits though
820 883 mmc->max_blk_count = 256;
821 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); 884 // 2k maximum hw block length
822 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); 885 mmc->max_blk_size = 2048;
823 writel(host->clk_div | TIFM_MMCSD_POWER, 886 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
824 sock->addr + SOCK_MMCSD_CONFIG); 887 mmc->max_seg_size = mmc->max_req_size;
888 sock->signal_irq = tifm_sd_signal_irq;
889 rc = tifm_sd_initialize_host(host);
825 890
826 for (rc = 0; rc < 50; rc++) { 891 if (!rc)
827 /* Wait for reset ack */ 892 rc = mmc_add_host(mmc);
828 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { 893 if (rc)
829 rc = 0; 894 goto out_free_mmc;
830 break;
831 }
832 msleep(10);
833 }
834 895
835 if (rc) { 896 return 0;
836 printk(KERN_ERR DRIVER_NAME 897out_free_mmc:
837 ": card not ready - probe failed\n"); 898 mmc_free_host(mmc);
838 mmc_free_host(mmc); 899 return rc;
839 return -ENODEV; 900}
840 }
841 901
842 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); 902static void tifm_sd_remove(struct tifm_dev *sock)
843 writel(host->clk_div | TIFM_MMCSD_POWER, 903{
844 sock->addr + SOCK_MMCSD_CONFIG); 904 struct mmc_host *mmc = tifm_get_drvdata(sock);
845 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 905 struct tifm_sd *host = mmc_priv(mmc);
846 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
847 sock->addr + SOCK_MMCSD_INT_ENABLE);
848 906
849 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); // command timeout 64 clocks for now 907 del_timer_sync(&host->timer);
850 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); 908 tifm_sd_terminate(host);
851 writel(host->clk_div | TIFM_MMCSD_POWER, 909 wait_event_timeout(host->notify, host->flags & EJECT_DONE,
852 sock->addr + SOCK_MMCSD_CONFIG); 910 host->timeout_jiffies);
911 tasklet_kill(&host->finish_tasklet);
912 mmc_remove_host(mmc);
853 913
854 queue_delayed_work(sock->wq, &host->abort_handler, 914 /* The meaning of the bit majority in this constant is unknown. */
855 host->timeout_jiffies); 915 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
916 sock->addr + SOCK_CONTROL);
856 917
857 return 0; 918 tifm_set_drvdata(sock, NULL);
919 mmc_free_host(mmc);
858} 920}
859 921
860static int tifm_sd_host_is_down(struct tifm_dev *sock) 922#ifdef CONFIG_PM
923
924static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
861{ 925{
862 struct mmc_host *mmc = tifm_get_drvdata(sock); 926 struct mmc_host *mmc = tifm_get_drvdata(sock);
863 struct tifm_sd *host = mmc_priv(mmc); 927 int rc;
864 unsigned long flags;
865 int rc = 0;
866 928
867 spin_lock_irqsave(&sock->lock, flags); 929 rc = mmc_suspend_host(mmc, state);
868 rc = (host->flags & EJECT_DONE); 930 /* The meaning of the bit majority in this constant is unknown. */
869 spin_unlock_irqrestore(&sock->lock, flags); 931 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
932 sock->addr + SOCK_CONTROL);
870 return rc; 933 return rc;
871} 934}
872 935
873static void tifm_sd_remove(struct tifm_dev *sock) 936static int tifm_sd_resume(struct tifm_dev *sock)
874{ 937{
875 struct mmc_host *mmc = tifm_get_drvdata(sock); 938 struct mmc_host *mmc = tifm_get_drvdata(sock);
876 struct tifm_sd *host = mmc_priv(mmc); 939 struct tifm_sd *host = mmc_priv(mmc);
877 unsigned long flags;
878 940
879 spin_lock_irqsave(&sock->lock, flags); 941 if (sock->media_id != FM_SD
880 host->flags |= EJECT; 942 || tifm_sd_initialize_host(host)) {
881 if (host->req) 943 tifm_eject(sock);
882 queue_work(sock->wq, &host->cmd_handler); 944 return 0;
883 spin_unlock_irqrestore(&sock->lock, flags); 945 } else {
884 wait_event_timeout(host->can_eject, tifm_sd_host_is_down(sock), 946 return mmc_resume_host(mmc);
885 host->timeout_jiffies); 947 }
948}
886 949
887 if (host->flags & HOST_REG) 950#else
888 mmc_remove_host(mmc);
889 951
890 /* The meaning of the bit majority in this constant is unknown. */ 952#define tifm_sd_suspend NULL
891 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), 953#define tifm_sd_resume NULL
892 sock->addr + SOCK_CONTROL);
893 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
894 writel(TIFM_FIFO_INT_SETALL,
895 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
896 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
897 954
898 tifm_set_drvdata(sock, NULL); 955#endif /* CONFIG_PM */
899 mmc_free_host(mmc);
900}
901 956
902static tifm_media_id tifm_sd_id_tbl[] = { 957static tifm_media_id tifm_sd_id_tbl[] = {
903 FM_SD, 0 958 FM_SD, 0
@@ -910,7 +965,9 @@ static struct tifm_driver tifm_sd_driver = {
910 }, 965 },
911 .id_table = tifm_sd_id_tbl, 966 .id_table = tifm_sd_id_tbl,
912 .probe = tifm_sd_probe, 967 .probe = tifm_sd_probe,
913 .remove = tifm_sd_remove 968 .remove = tifm_sd_remove,
969 .suspend = tifm_sd_suspend,
970 .resume = tifm_sd_resume
914}; 971};
915 972
916static int __init tifm_sd_init(void) 973static int __init tifm_sd_init(void)
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 7a282672f8e9..a44d8777ab9f 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver 2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 * 3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. 4 * Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
272 return host->num_sg; 272 return host->num_sg;
273} 273}
274 274
275static inline char *wbsd_kmap_sg(struct wbsd_host *host) 275static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
276{ 276{
277 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + 277 return page_address(host->cur_sg->page) + host->cur_sg->offset;
278 host->cur_sg->offset;
279 return host->mapped_sg;
280}
281
282static inline void wbsd_kunmap_sg(struct wbsd_host *host)
283{
284 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
285} 278}
286 279
287static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) 280static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
302 * we do not transfer too much. 295 * we do not transfer too much.
303 */ 296 */
304 for (i = 0; i < len; i++) { 297 for (i = 0; i < len; i++) {
305 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 298 sgbuf = page_address(sg[i].page) + sg[i].offset;
306 if (size < sg[i].length) 299 if (size < sg[i].length)
307 memcpy(dmabuf, sgbuf, size); 300 memcpy(dmabuf, sgbuf, size);
308 else 301 else
309 memcpy(dmabuf, sgbuf, sg[i].length); 302 memcpy(dmabuf, sgbuf, sg[i].length);
310 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
311 dmabuf += sg[i].length; 303 dmabuf += sg[i].length;
312 304
313 if (size < sg[i].length) 305 if (size < sg[i].length)
@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
347 * we do not transfer too much. 339 * we do not transfer too much.
348 */ 340 */
349 for (i = 0; i < len; i++) { 341 for (i = 0; i < len; i++) {
350 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 342 sgbuf = page_address(sg[i].page) + sg[i].offset;
351 if (size < sg[i].length) 343 if (size < sg[i].length)
352 memcpy(sgbuf, dmabuf, size); 344 memcpy(sgbuf, dmabuf, size);
353 else 345 else
@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
497 if (data->bytes_xfered == host->size) 489 if (data->bytes_xfered == host->size)
498 return; 490 return;
499 491
500 buffer = wbsd_kmap_sg(host) + host->offset; 492 buffer = wbsd_sg_to_buffer(host) + host->offset;
501 493
502 /* 494 /*
503 * Drain the fifo. This has a tendency to loop longer 495 * Drain the fifo. This has a tendency to loop longer
@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
526 /* 518 /*
527 * Transfer done? 519 * Transfer done?
528 */ 520 */
529 if (data->bytes_xfered == host->size) { 521 if (data->bytes_xfered == host->size)
530 wbsd_kunmap_sg(host);
531 return; 522 return;
532 }
533 523
534 /* 524 /*
535 * End of scatter list entry? 525 * End of scatter list entry?
536 */ 526 */
537 if (host->remain == 0) { 527 if (host->remain == 0) {
538 wbsd_kunmap_sg(host);
539
540 /* 528 /*
541 * Get next entry. Check if last. 529 * Get next entry. Check if last.
542 */ 530 */
@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
554 return; 542 return;
555 } 543 }
556 544
557 buffer = wbsd_kmap_sg(host); 545 buffer = wbsd_sg_to_buffer(host);
558 } 546 }
559 } 547 }
560 } 548 }
561 549
562 wbsd_kunmap_sg(host);
563
564 /* 550 /*
565 * This is a very dirty hack to solve a 551 * This is a very dirty hack to solve a
566 * hardware problem. The chip doesn't trigger 552 * hardware problem. The chip doesn't trigger
@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
583 if (data->bytes_xfered == host->size) 569 if (data->bytes_xfered == host->size)
584 return; 570 return;
585 571
586 buffer = wbsd_kmap_sg(host) + host->offset; 572 buffer = wbsd_sg_to_buffer(host) + host->offset;
587 573
588 /* 574 /*
589 * Fill the fifo. This has a tendency to loop longer 575 * Fill the fifo. This has a tendency to loop longer
@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
612 /* 598 /*
613 * Transfer done? 599 * Transfer done?
614 */ 600 */
615 if (data->bytes_xfered == host->size) { 601 if (data->bytes_xfered == host->size)
616 wbsd_kunmap_sg(host);
617 return; 602 return;
618 }
619 603
620 /* 604 /*
621 * End of scatter list entry? 605 * End of scatter list entry?
622 */ 606 */
623 if (host->remain == 0) { 607 if (host->remain == 0) {
624 wbsd_kunmap_sg(host);
625
626 /* 608 /*
627 * Get next entry. Check if last. 609 * Get next entry. Check if last.
628 */ 610 */
@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
640 return; 622 return;
641 } 623 }
642 624
643 buffer = wbsd_kmap_sg(host); 625 buffer = wbsd_sg_to_buffer(host);
644 } 626 }
645 } 627 }
646 } 628 }
647 629
648 wbsd_kunmap_sg(host);
649
650 /* 630 /*
651 * The controller stops sending interrupts for 631 * The controller stops sending interrupts for
652 * 'FIFO empty' under certain conditions. So we 632 * 'FIFO empty' under certain conditions. So we
@@ -910,6 +890,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 */ 890 */
911 if (cmd->data && (cmd->error == MMC_ERR_NONE)) { 891 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
912 /* 892 /*
893 * The hardware is so delightfully stupid that it has a list
894 * of "data" commands. If a command isn't on this list, it'll
895 * just go back to the idle state and won't send any data
896 * interrupts.
897 */
898 switch (cmd->opcode) {
899 case 11:
900 case 17:
901 case 18:
902 case 20:
903 case 24:
904 case 25:
905 case 26:
906 case 27:
907 case 30:
908 case 42:
909 case 56:
910 break;
911
912 /* ACMDs. We don't keep track of state, so we just treat them
913 * like any other command. */
914 case 51:
915 break;
916
917 default:
918#ifdef CONFIG_MMC_DEBUG
919 printk(KERN_WARNING "%s: Data command %d is not "
920 "supported by this controller.\n",
921 mmc_hostname(host->mmc), cmd->opcode);
922#endif
923 cmd->data->error = MMC_ERR_INVALID;
924
925 if (cmd->data->stop)
926 wbsd_send_command(host, cmd->data->stop);
927
928 goto done;
929 };
930
931 /*
913 * Dirty fix for hardware bug. 932 * Dirty fix for hardware bug.
914 */ 933 */
915 if (host->dma == -1) 934 if (host->dma == -1)
@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1343 mmc->max_phys_segs = 128; 1362 mmc->max_phys_segs = 128;
1344 1363
1345 /* 1364 /*
1346 * Maximum number of sectors in one transfer. Also limited by 64kB 1365 * Maximum request size. Also limited by 64KiB buffer.
1347 * buffer.
1348 */ 1366 */
1349 mmc->max_sectors = 128; 1367 mmc->max_req_size = 65536;
1350 1368
1351 /* 1369 /*
1352 * Maximum segment size. Could be one segment with the maximum number 1370 * Maximum segment size. Could be one segment with the maximum number
1353 * of segments. 1371 * of bytes.
1372 */
1373 mmc->max_seg_size = mmc->max_req_size;
1374
1375 /*
1376 * Maximum block size. We have 12 bits (= 4095) but have to subtract
1377 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
1378 */
1379 mmc->max_blk_size = 4087;
1380
1381 /*
1382 * Maximum block count. There is no real limit so the maximum
1383 * request size will be the only restriction.
1354 */ 1384 */
1355 mmc->max_seg_size = mmc->max_sectors * 512; 1385 mmc->max_blk_count = mmc->max_req_size;
1356 1386
1357 dev_set_drvdata(dev, mmc); 1387 dev_set_drvdata(dev, mmc);
1358 1388
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
index 6072993f01e3..d06718b0e2ab 100644
--- a/drivers/mmc/wbsd.h
+++ b/drivers/mmc/wbsd.h
@@ -154,7 +154,6 @@ struct wbsd_host
154 154
155 struct scatterlist* cur_sg; /* Current SG entry */ 155 struct scatterlist* cur_sg; /* Current SG entry */
156 unsigned int num_sg; /* Number of entries left */ 156 unsigned int num_sg; /* Number of entries left */
157 void* mapped_sg; /* vaddr of mapped sg */
158 157
159 unsigned int offset; /* Offset into current entry */ 158 unsigned int offset; /* Offset into current entry */
160 unsigned int remain; /* Data left in curren entry */ 159 unsigned int remain; /* Data left in curren entry */
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index ae89b9b88743..165af398fdea 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -103,14 +103,8 @@ config CCW_CONSOLE
103 depends on TN3215_CONSOLE || TN3270_CONSOLE 103 depends on TN3215_CONSOLE || TN3270_CONSOLE
104 default y 104 default y
105 105
106config SCLP
107 bool "Support for SCLP"
108 help
109 Include support for the SCLP interface to the service element.
110
111config SCLP_TTY 106config SCLP_TTY
112 bool "Support for SCLP line mode terminal" 107 bool "Support for SCLP line mode terminal"
113 depends on SCLP
114 help 108 help
115 Include support for IBM SCLP line-mode terminals. 109 Include support for IBM SCLP line-mode terminals.
116 110
@@ -123,7 +117,6 @@ config SCLP_CONSOLE
123 117
124config SCLP_VT220_TTY 118config SCLP_VT220_TTY
125 bool "Support for SCLP VT220-compatible terminal" 119 bool "Support for SCLP VT220-compatible terminal"
126 depends on SCLP
127 help 120 help
128 Include support for an IBM SCLP VT220-compatible terminal. 121 Include support for an IBM SCLP VT220-compatible terminal.
129 122
@@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE
136 129
137config SCLP_CPI 130config SCLP_CPI
138 tristate "Control-Program Identification" 131 tristate "Control-Program Identification"
139 depends on SCLP
140 help 132 help
141 This option enables the hardware console interface for system 133 This option enables the hardware console interface for system
142 identification. This is commonly used for workload management and 134 identification. This is commonly used for workload management and
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 9803c9352d78..5a888704a8d0 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6
5obj-y += s390mach.o sysinfo.o s390_rdev.o 7obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 8obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 9
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 492b68bcd7cc..eb5dc62f0d9c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,7 @@
37 */ 37 */
38debug_info_t *dasd_debug_area; 38debug_info_t *dasd_debug_area;
39struct dasd_discipline *dasd_diag_discipline_pointer; 39struct dasd_discipline *dasd_diag_discipline_pointer;
40void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
40 41
41MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
42MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
@@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device);
51static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_device * device);
52static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_device * device);
53static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_device *);
54static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_ccw_queue(struct dasd_device *, int);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_tasklet(struct dasd_device *);
57static void do_kick_device(struct work_struct *); 57static void do_kick_device(struct work_struct *);
@@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
483/* 483/*
484 * Add profiling information for cqr before execution. 484 * Add profiling information for cqr before execution.
485 */ 485 */
486static inline void 486static void
487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
488 struct request *req) 488 struct request *req)
489{ 489{
@@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
505/* 505/*
506 * Add profiling information for cqr after execution. 506 * Add profiling information for cqr after execution.
507 */ 507 */
508static inline void 508static void
509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
510 struct request *req) 510 struct request *req)
511{ 511{
@@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1022 irb->scsw.cstat == 0 && 1022 irb->scsw.cstat == 0 &&
1023 !irb->esw.esw0.erw.cons) 1023 !irb->esw.esw0.erw.cons)
1024 era = dasd_era_none; 1024 era = dasd_era_none;
1025 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1026 era = dasd_era_fatal; /* don't recover this request */
1027 else if (irb->esw.esw0.erw.cons) 1025 else if (irb->esw.esw0.erw.cons)
1028 era = device->discipline->examine_error(cqr, irb); 1026 era = device->discipline->examine_error(cqr, irb);
1029 else 1027 else
@@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1104/* 1102/*
1105 * Process ccw request queue. 1103 * Process ccw request queue.
1106 */ 1104 */
1107static inline void 1105static void
1108__dasd_process_ccw_queue(struct dasd_device * device, 1106__dasd_process_ccw_queue(struct dasd_device * device,
1109 struct list_head *final_queue) 1107 struct list_head *final_queue)
1110{ 1108{
@@ -1127,7 +1125,9 @@ restart:
1127 cqr->status = DASD_CQR_FAILED; 1125 cqr->status = DASD_CQR_FAILED;
1128 cqr->stopclk = get_clock(); 1126 cqr->stopclk = get_clock();
1129 } else { 1127 } else {
1130 if (cqr->irb.esw.esw0.erw.cons) { 1128 if (cqr->irb.esw.esw0.erw.cons &&
1129 test_bit(DASD_CQR_FLAGS_USE_ERP,
1130 &cqr->flags)) {
1131 erp_fn = device->discipline-> 1131 erp_fn = device->discipline->
1132 erp_action(cqr); 1132 erp_action(cqr);
1133 erp_fn(cqr); 1133 erp_fn(cqr);
@@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1181/* 1181/*
1182 * Fetch requests from the block device queue. 1182 * Fetch requests from the block device queue.
1183 */ 1183 */
1184static inline void 1184static void
1185__dasd_process_blk_queue(struct dasd_device * device) 1185__dasd_process_blk_queue(struct dasd_device * device)
1186{ 1186{
1187 request_queue_t *queue; 1187 request_queue_t *queue;
@@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device)
1232 if (IS_ERR(cqr)) { 1232 if (IS_ERR(cqr)) {
1233 if (PTR_ERR(cqr) == -ENOMEM) 1233 if (PTR_ERR(cqr) == -ENOMEM)
1234 break; /* terminate request queue loop */ 1234 break; /* terminate request queue loop */
1235 if (PTR_ERR(cqr) == -EAGAIN) {
1236 /*
1237 * The current request cannot be build right
1238 * now, we have to try later. If this request
1239 * is the head-of-queue we stop the device
1240 * for 1/2 second.
1241 */
1242 if (!list_empty(&device->ccw_queue))
1243 break;
1244 device->stopped |= DASD_STOPPED_PENDING;
1245 dasd_set_timer(device, HZ/2);
1246 break;
1247 }
1235 DBF_DEV_EVENT(DBF_ERR, device, 1248 DBF_DEV_EVENT(DBF_ERR, device,
1236 "CCW creation failed (rc=%ld) " 1249 "CCW creation failed (rc=%ld) "
1237 "on request %p", 1250 "on request %p",
@@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device)
1254 * Take a look at the first request on the ccw queue and check 1267 * Take a look at the first request on the ccw queue and check
1255 * if it reached its expire time. If so, terminate the IO. 1268 * if it reached its expire time. If so, terminate the IO.
1256 */ 1269 */
1257static inline void 1270static void
1258__dasd_check_expire(struct dasd_device * device) 1271__dasd_check_expire(struct dasd_device * device)
1259{ 1272{
1260 struct dasd_ccw_req *cqr; 1273 struct dasd_ccw_req *cqr;
@@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device)
1285 * Take a look at the first request on the ccw queue and check 1298 * Take a look at the first request on the ccw queue and check
1286 * if it needs to be started. 1299 * if it needs to be started.
1287 */ 1300 */
1288static inline void 1301static void
1289__dasd_start_head(struct dasd_device * device) 1302__dasd_start_head(struct dasd_device * device)
1290{ 1303{
1291 struct dasd_ccw_req *cqr; 1304 struct dasd_ccw_req *cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4d01040c2c63..8b9d68f6e016 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
170 /* log the erp chain if fatal error occurred */ 170 /* log the erp chain if fatal error occurred */
171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { 171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
172 dasd_log_sense(cqr, irb); 172 dasd_log_sense(cqr, irb);
173 dasd_log_ccw(cqr, 0, irb->scsw.cpa);
174 } 173 }
175 174
176 return era; 175 return era;
@@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2640 2639
2641 struct dasd_ccw_req *erp = NULL; 2640 struct dasd_ccw_req *erp = NULL;
2642 struct dasd_device *device = cqr->device; 2641 struct dasd_device *device = cqr->device;
2643 __u32 cpa = cqr->irb.scsw.cpa;
2644 struct dasd_ccw_req *temp_erp = NULL; 2642 struct dasd_ccw_req *temp_erp = NULL;
2645 2643
2646 if (device->features & DASD_FEATURE_ERPLOG) { 2644 if (device->features & DASD_FEATURE_ERPLOG) {
@@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2706 } 2704 }
2707 } 2705 }
2708 2706
2709 if (erp->status == DASD_CQR_FAILED)
2710 dasd_log_ccw(erp, 1, cpa);
2711
2712 /* enqueue added ERP request */ 2707 /* enqueue added ERP request */
2713 if (erp->status == DASD_CQR_FILLED) { 2708 if (erp->status == DASD_CQR_FILLED) {
2714 erp->status = DASD_CQR_QUEUED; 2709 erp->status = DASD_CQR_QUEUED;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 5943266152f5..ed70852cc915 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup);
136/* 136/*
137 * Read a device busid/devno from a string. 137 * Read a device busid/devno from a string.
138 */ 138 */
139static inline int 139static int
140dasd_busid(char **str, int *id0, int *id1, int *devno) 140dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 141{
142 int val, old_style; 142 int val, old_style;
@@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
182 * only one: "ro" for read-only devices. The default feature set 182 * only one: "ro" for read-only devices. The default feature set
183 * is empty (value 0). 183 * is empty (value 0).
184 */ 184 */
185static inline int 185static int
186dasd_feature_list(char *str, char **endp) 186dasd_feature_list(char *str, char **endp)
187{ 187{
188 int features, len, rc; 188 int features, len, rc;
@@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) {
341 return ERR_PTR(-EINVAL); 341 return ERR_PTR(-EINVAL);
342} 342}
343 343
344static inline char * 344static char *
345dasd_parse_next_element( char *parsestring ) { 345dasd_parse_next_element( char *parsestring ) {
346 char * residual_str; 346 char * residual_str;
347 residual_str = dasd_parse_keyword(parsestring); 347 residual_str = dasd_parse_keyword(parsestring);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 53db58a68617..ab782bb46ac1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
43#define DIAG_MAX_RETRIES 32 43#define DIAG_MAX_RETRIES 32
44#define DIAG_TIMEOUT 50 * HZ 44#define DIAG_TIMEOUT 50 * HZ
45 45
46struct dasd_discipline dasd_diag_discipline; 46static struct dasd_discipline dasd_diag_discipline;
47 47
48struct dasd_diag_private { 48struct dasd_diag_private {
49 struct dasd_diag_characteristics rdc_data; 49 struct dasd_diag_characteristics rdc_data;
@@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd)
90 * block offset. On success, return zero and set end_block to contain the 90 * block offset. On success, return zero and set end_block to contain the
91 * number of blocks on the device minus the specified offset. Return non-zero 91 * number of blocks on the device minus the specified offset. Return non-zero
92 * otherwise. */ 92 * otherwise. */
93static __inline__ int 93static inline int
94mdsk_init_io(struct dasd_device *device, unsigned int blocksize, 94mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
95 blocknum_t offset, blocknum_t *end_block) 95 blocknum_t offset, blocknum_t *end_block)
96{ 96{
@@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
117 117
118/* Remove block I/O environment for device. Return zero on success, non-zero 118/* Remove block I/O environment for device. Return zero on success, non-zero
119 * otherwise. */ 119 * otherwise. */
120static __inline__ int 120static inline int
121mdsk_term_io(struct dasd_device * device) 121mdsk_term_io(struct dasd_device * device)
122{ 122{
123 struct dasd_diag_private *private; 123 struct dasd_diag_private *private;
@@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
576 "dump sense not available for DIAG data"); 576 "dump sense not available for DIAG data");
577} 577}
578 578
579struct dasd_discipline dasd_diag_discipline = { 579static struct dasd_discipline dasd_diag_discipline = {
580 .owner = THIS_MODULE, 580 .owner = THIS_MODULE,
581 .name = "DIAG", 581 .name = "DIAG",
582 .ebcname = "DIAG", 582 .ebcname = "DIAG",
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fdaa471e845f..cecab2274a6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2)
134 return (d1 + (d2 - 1)) / d2; 134 return (d1 + (d2 - 1)) / d2;
135} 135}
136 136
137static inline int 137static unsigned int
138bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
139{
140 unsigned int fl1, fl2, int1, int2;
141 int bpr;
142
143 switch (rdc->formula) {
144 case 0x01:
145 fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
146 fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
147 ECKD_F1(rdc));
148 bpr = fl1 + fl2;
149 break;
150 case 0x02:
151 int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
152 int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
153 fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
154 ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
155 ECKD_F1(rdc));
156 fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
157 ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
158 ECKD_F1(rdc));
159 bpr = fl1 + fl2;
160 break;
161 default:
162 bpr = 0;
163 break;
164 }
165 return bpr;
166}
167
168static inline unsigned int
169bytes_per_track(struct dasd_eckd_characteristics *rdc)
170{
171 return *(unsigned int *) (rdc->byte_per_track) >> 8;
172}
173
174static inline unsigned int
175recs_per_track(struct dasd_eckd_characteristics * rdc, 138recs_per_track(struct dasd_eckd_characteristics * rdc,
176 unsigned int kl, unsigned int dl) 139 unsigned int kl, unsigned int dl)
177{ 140{
@@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc,
204 return 0; 167 return 0;
205} 168}
206 169
207static inline void 170static int
208check_XRC (struct ccw1 *de_ccw, 171check_XRC (struct ccw1 *de_ccw,
209 struct DE_eckd_data *data, 172 struct DE_eckd_data *data,
210 struct dasd_device *device) 173 struct dasd_device *device)
211{ 174{
212 struct dasd_eckd_private *private; 175 struct dasd_eckd_private *private;
176 int rc;
213 177
214 private = (struct dasd_eckd_private *) device->private; 178 private = (struct dasd_eckd_private *) device->private;
179 if (!private->rdc_data.facilities.XRC_supported)
180 return 0;
215 181
216 /* switch on System Time Stamp - needed for XRC Support */ 182 /* switch on System Time Stamp - needed for XRC Support */
217 if (private->rdc_data.facilities.XRC_supported) { 183 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
218 184 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
219 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
220 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
221
222 data->ep_sys_time = get_clock ();
223
224 de_ccw->count = sizeof (struct DE_eckd_data);
225 de_ccw->flags |= CCW_FLAG_SLI;
226 }
227 185
228 return; 186 rc = get_sync_clock(&data->ep_sys_time);
187 /* Ignore return code if sync clock is switched off. */
188 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0;
229 190
230} /* end check_XRC */ 191 de_ccw->count = sizeof (struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc;
194}
231 195
232static inline void 196static int
233define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, 197define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
234 int totrk, int cmd, struct dasd_device * device) 198 int totrk, int cmd, struct dasd_device * device)
235{ 199{
236 struct dasd_eckd_private *private; 200 struct dasd_eckd_private *private;
237 struct ch_t geo, beg, end; 201 struct ch_t geo, beg, end;
202 int rc = 0;
238 203
239 private = (struct dasd_eckd_private *) device->private; 204 private = (struct dasd_eckd_private *) device->private;
240 205
@@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
263 case DASD_ECKD_CCW_WRITE_KD_MT: 228 case DASD_ECKD_CCW_WRITE_KD_MT:
264 data->mask.perm = 0x02; 229 data->mask.perm = 0x02;
265 data->attributes.operation = private->attrib.operation; 230 data->attributes.operation = private->attrib.operation;
266 check_XRC (ccw, data, device); 231 rc = check_XRC (ccw, data, device);
267 break; 232 break;
268 case DASD_ECKD_CCW_WRITE_CKD: 233 case DASD_ECKD_CCW_WRITE_CKD:
269 case DASD_ECKD_CCW_WRITE_CKD_MT: 234 case DASD_ECKD_CCW_WRITE_CKD_MT:
270 data->attributes.operation = DASD_BYPASS_CACHE; 235 data->attributes.operation = DASD_BYPASS_CACHE;
271 check_XRC (ccw, data, device); 236 rc = check_XRC (ccw, data, device);
272 break; 237 break;
273 case DASD_ECKD_CCW_ERASE: 238 case DASD_ECKD_CCW_ERASE:
274 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 239 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
276 data->mask.perm = 0x3; 241 data->mask.perm = 0x3;
277 data->mask.auth = 0x1; 242 data->mask.auth = 0x1;
278 data->attributes.operation = DASD_BYPASS_CACHE; 243 data->attributes.operation = DASD_BYPASS_CACHE;
279 check_XRC (ccw, data, device); 244 rc = check_XRC (ccw, data, device);
280 break; 245 break;
281 default: 246 default:
282 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 247 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
@@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
312 data->beg_ext.head = beg.head; 277 data->beg_ext.head = beg.head;
313 data->end_ext.cyl = end.cyl; 278 data->end_ext.cyl = end.cyl;
314 data->end_ext.head = end.head; 279 data->end_ext.head = end.head;
280 return rc;
315} 281}
316 282
317static inline void 283static void
318locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 284locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
319 int rec_on_trk, int no_rec, int cmd, 285 int rec_on_trk, int no_rec, int cmd,
320 struct dasd_device * device, int reclen) 286 struct dasd_device * device, int reclen)
@@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device)
548/* 514/*
549 * Build CP for Perform Subsystem Function - SSC. 515 * Build CP for Perform Subsystem Function - SSC.
550 */ 516 */
551struct dasd_ccw_req * 517static struct dasd_ccw_req *
552dasd_eckd_build_psf_ssc(struct dasd_device *device) 518dasd_eckd_build_psf_ssc(struct dasd_device *device)
553{ 519{
554 struct dasd_ccw_req *cqr; 520 struct dasd_ccw_req *cqr;
@@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1200 return cqr; 1166 return cqr;
1201 ccw = cqr->cpaddr; 1167 ccw = cqr->cpaddr;
1202 /* First ccw is define extent. */ 1168 /* First ccw is define extent. */
1203 define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); 1169 if (define_extent(ccw++, cqr->data, first_trk,
1170 last_trk, cmd, device) == -EAGAIN) {
1171 /* Clock not in sync and XRC is enabled. Try again later. */
1172 dasd_sfree_request(cqr, device);
1173 return ERR_PTR(-EAGAIN);
1174 }
1204 /* Build locate_record+read/write/ccws. */ 1175 /* Build locate_record+read/write/ccws. */
1205 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); 1176 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1206 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1177 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device)
1380 cqr->device = device; 1351 cqr->device = device;
1381 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1352 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1382 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1383 cqr->retries = 0; 1354 cqr->retries = 2; /* set retry counter to enable basic ERP */
1384 cqr->expires = 2 * HZ; 1355 cqr->expires = 2 * HZ;
1385 cqr->buildclk = get_clock(); 1356 cqr->buildclk = get_clock();
1386 cqr->status = DASD_CQR_FILLED; 1357 cqr->status = DASD_CQR_FILLED;
@@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1420 cqr->device = device; 1391 cqr->device = device;
1421 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1392 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1422 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1393 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1423 cqr->retries = 0; 1394 cqr->retries = 2; /* set retry counter to enable basic ERP */
1424 cqr->expires = 2 * HZ; 1395 cqr->expires = 2 * HZ;
1425 cqr->buildclk = get_clock(); 1396 cqr->buildclk = get_clock();
1426 cqr->status = DASD_CQR_FILLED; 1397 cqr->status = DASD_CQR_FILLED;
@@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1459 cqr->device = device; 1430 cqr->device = device;
1460 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1431 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1461 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1432 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1462 cqr->retries = 0; 1433 cqr->retries = 2; /* set retry counter to enable basic ERP */
1463 cqr->expires = 2 * HZ; 1434 cqr->expires = 2 * HZ;
1464 cqr->buildclk = get_clock(); 1435 cqr->buildclk = get_clock();
1465 cqr->status = DASD_CQR_FILLED; 1436 cqr->status = DASD_CQR_FILLED;
@@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
1609 * Dump the range of CCWs into 'page' buffer 1580 * Dump the range of CCWs into 'page' buffer
1610 * and return number of printed chars. 1581 * and return number of printed chars.
1611 */ 1582 */
1612static inline int 1583static int
1613dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 1584dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1614{ 1585{
1615 int len, count; 1586 int len, count;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index e0bf30ebb215..6cedc914077e 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = {
658 .owner = THIS_MODULE, 658 .owner = THIS_MODULE,
659}; 659};
660 660
661static struct miscdevice dasd_eer_dev = { 661static struct miscdevice *dasd_eer_dev = NULL;
662 .minor = MISC_DYNAMIC_MINOR,
663 .name = "dasd_eer",
664 .fops = &dasd_eer_fops,
665};
666 662
667int __init dasd_eer_init(void) 663int __init dasd_eer_init(void)
668{ 664{
669 int rc; 665 int rc;
670 666
671 rc = misc_register(&dasd_eer_dev); 667 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
668 if (!dasd_eer_dev)
669 return -ENOMEM;
670
671 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
672 dasd_eer_dev->name = "dasd_eer";
673 dasd_eer_dev->fops = &dasd_eer_fops;
674
675 rc = misc_register(dasd_eer_dev);
672 if (rc) { 676 if (rc) {
677 kfree(dasd_eer_dev);
678 dasd_eer_dev = NULL;
673 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 679 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
674 "register misc device"); 680 "register misc device");
675 return rc; 681 return rc;
@@ -680,5 +686,9 @@ int __init dasd_eer_init(void)
680 686
681void dasd_eer_exit(void) 687void dasd_eer_exit(void)
682{ 688{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 689 if (dasd_eer_dev) {
690 WARN_ON(misc_deregister(dasd_eer_dev) != 0);
691 kfree(dasd_eer_dev);
692 dasd_eer_dev = NULL;
693 }
684} 694}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 58a65097922b..caa5d91420f8 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
152 152
153} /* end default_erp_postaction */ 153} /* end default_erp_postaction */
154 154
155/*
156 * Print the hex dump of the memory used by a request. This includes
157 * all error recovery ccws that have been chained in from of the
158 * real request.
159 */
160static inline void
161hex_dump_memory(struct dasd_device *device, void *data, int len)
162{
163 int *pint;
164
165 pint = (int *) data;
166 while (len > 0) {
167 DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
168 pint, pint[0], pint[1], pint[2], pint[3]);
169 pint += 4;
170 len -= 16;
171 }
172}
173
174void 155void
175dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) 156dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
176{ 157{
@@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
182 device->discipline->dump_sense(device, cqr, irb); 163 device->discipline->dump_sense(device, cqr, irb);
183} 164}
184 165
185void
186dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
187{
188 struct dasd_device *device;
189 struct dasd_ccw_req *lcqr;
190 struct ccw1 *ccw;
191 int cplength;
192
193 device = cqr->device;
194 /* log the channel program */
195 for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
196 DEV_MESSAGE(KERN_ERR, device,
197 "(%s) ERP chain report for req: %p",
198 caller == 0 ? "EXAMINE" : "ACTION", lcqr);
199 hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
200
201 cplength = 1;
202 ccw = lcqr->cpaddr;
203 while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
204 cplength++;
205
206 if (cplength > 40) { /* log only parts of the CP */
207 DEV_MESSAGE(KERN_ERR, device, "%s",
208 "Start of channel program:");
209 hex_dump_memory(device, lcqr->cpaddr,
210 40*sizeof(struct ccw1));
211
212 DEV_MESSAGE(KERN_ERR, device, "%s",
213 "End of channel program:");
214 hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
215 10*sizeof(struct ccw1));
216 } else { /* log the whole CP */
217 DEV_MESSAGE(KERN_ERR, device, "%s",
218 "Channel program (complete):");
219 hex_dump_memory(device, lcqr->cpaddr,
220 cplength*sizeof(struct ccw1));
221 }
222
223 if (lcqr != cqr)
224 continue;
225
226 /*
227 * Log bytes arround failed CCW but only if we did
228 * not log the whole CP of the CCW is outside the
229 * logged CP.
230 */
231 if (cplength > 40 ||
232 ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
233 (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
234
235 DEV_MESSAGE(KERN_ERR, device,
236 "Failed CCW (%p) (area):",
237 (void *) (long) cpa);
238 hex_dump_memory(device, cqr->cpaddr - 10,
239 20*sizeof(struct ccw1));
240 }
241 }
242
243} /* end log_erp_chain */
244
245EXPORT_SYMBOL(dasd_default_erp_action); 166EXPORT_SYMBOL(dasd_default_erp_action);
246EXPORT_SYMBOL(dasd_default_erp_postaction); 167EXPORT_SYMBOL(dasd_default_erp_postaction);
247EXPORT_SYMBOL(dasd_alloc_erp_request); 168EXPORT_SYMBOL(dasd_alloc_erp_request);
248EXPORT_SYMBOL(dasd_free_erp_request); 169EXPORT_SYMBOL(dasd_free_erp_request);
249EXPORT_SYMBOL(dasd_log_sense); 170EXPORT_SYMBOL(dasd_log_sense);
250EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index b857fd5893fd..be0909e39226 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = {
75 .notify = dasd_generic_notify, 75 .notify = dasd_generic_notify,
76}; 76};
77 77
78static inline void 78static void
79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, 79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
80 int blksize, int beg, int nr) 80 int blksize, int beg, int nr)
81{ 81{
@@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
95 data->ext_end = nr - 1; 95 data->ext_end = nr - 1;
96} 96}
97 97
98static inline void 98static void
99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, 99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
100 int block_nr, int block_ct) 100 int block_nr, int block_ct)
101{ 101{
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d163632101d2..47ba4462708d 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device)
147 */ 147 */
148 memset(&bpart, 0, sizeof(struct blkpg_partition)); 148 memset(&bpart, 0, sizeof(struct blkpg_partition));
149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); 149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
150 barg.data = (void __user *) &bpart; 150 barg.data = (void __force __user *) &bpart;
151 barg.op = BLKPG_DEL_PARTITION; 151 barg.op = BLKPG_DEL_PARTITION;
152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) 152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); 153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fb725e3b08fe..a2cc69e11410 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
559 struct dasd_device *); 559 struct dasd_device *);
560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
561void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 561void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
562void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
563 562
564/* externals in dasd_3370_erp.c */ 563/* externals in dasd_3370_erp.c */
565dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); 564dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bfa010f6dab2..8b7e11815d70 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL;
28static struct proc_dir_entry *dasd_devices_entry = NULL; 28static struct proc_dir_entry *dasd_devices_entry = NULL;
29static struct proc_dir_entry *dasd_statistics_entry = NULL; 29static struct proc_dir_entry *dasd_statistics_entry = NULL;
30 30
31static inline char * 31static char *
32dasd_get_user_string(const char __user *user_buf, size_t user_len) 32dasd_get_user_string(const char __user *user_buf, size_t user_len)
33{ 33{
34 char *buffer; 34 char *buffer;
@@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = {
154 .release = seq_release, 154 .release = seq_release,
155}; 155};
156 156
157static inline int 157static int
158dasd_calc_metrics(char *page, char **start, off_t off, 158dasd_calc_metrics(char *page, char **start, off_t off,
159 int count, int *eof, int len) 159 int count, int *eof, int len)
160{ 160{
@@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off,
167 return len; 167 return len;
168} 168}
169 169
170static inline char * 170static char *
171dasd_statistics_array(char *str, int *array, int shift) 171dasd_statistics_array(char *str, unsigned int *array, int shift)
172{ 172{
173 int i; 173 int i;
174 174
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index be9b05347b4f..1340451ea408 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev)
102 * device needs to be enqueued before the semaphore is 102 * device needs to be enqueued before the semaphore is
103 * freed. 103 * freed.
104 */ 104 */
105static inline int 105static int
106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
107{ 107{
108 int minor, found; 108 int minor, found;
@@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
230 SEGMENT_SHARED); 230 SEGMENT_SHARED);
231 if (rc < 0) { 231 if (rc < 0) {
232 BUG_ON(rc == -EINVAL); 232 BUG_ON(rc == -EINVAL);
233 if (rc == -EIO || rc == -ENOENT) 233 if (rc != -EAGAIN)
234 goto removeseg; 234 goto removeseg;
235 } else { 235 } else {
236 dev_info->is_shared = 1; 236 dev_info->is_shared = 1;
@@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
253 SEGMENT_EXCLUSIVE); 253 SEGMENT_EXCLUSIVE);
254 if (rc < 0) { 254 if (rc < 0) {
255 BUG_ON(rc == -EINVAL); 255 BUG_ON(rc == -EINVAL);
256 if (rc == -EIO || rc == -ENOENT) 256 if (rc != -EAGAIN)
257 goto removeseg; 257 goto removeseg;
258 } else { 258 } else {
259 dev_info->is_shared = 0; 259 dev_info->is_shared = 0;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c3e97b4fc186..293e667b50f2 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,7 +2,8 @@
2# S/390 character devices 2# S/390 character devices
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o
6 7
7obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
8obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o
11 12
12obj-$(CONFIG_TN3215) += con3215.o 13obj-$(CONFIG_TN3215) += con3215.o
13 14
14obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o 15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o 16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o 17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 25b5d7a66417..9a328f14a641 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = {
1121 * 3215 tty registration code called from tty_init(). 1121 * 3215 tty registration code called from tty_init().
1122 * Most kernel services (incl. kmalloc) are available at this poimt. 1122 * Most kernel services (incl. kmalloc) are available at this poimt.
1123 */ 1123 */
1124int __init 1124static int __init
1125tty3215_init(void) 1125tty3215_init(void)
1126{ 1126{
1127 struct tty_driver *driver; 1127 struct tty_driver *driver;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 7566be890688..8e7f2d7633d6 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *);
69/* 69/*
70 * Setup timeout for a device. On timeout trigger an update. 70 * Setup timeout for a device. On timeout trigger an update.
71 */ 71 */
72void 72static void con3270_set_timer(struct con3270 *cp, int expires)
73con3270_set_timer(struct con3270 *cp, int expires)
74{ 73{
75 if (expires == 0) { 74 if (expires == 0) {
76 if (timer_pending(&cp->timer)) 75 if (timer_pending(&cp->timer))
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 17027d918cf7..564baca01b7c 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -5,6 +5,8 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/keyboard.h> 6#include <linux/keyboard.h>
7#include <linux/kd.h> 7#include <linux/kd.h>
8#include <linux/kbd_kern.h>
9#include <linux/kbd_diacr.h>
8 10
9u_short plain_map[NR_KEYS] = { 11u_short plain_map[NR_KEYS] = {
10 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 12 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0893d306ae80..e1a746269c4c 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -23,7 +23,7 @@
23#include "raw3270.h" 23#include "raw3270.h"
24#include "ctrlchar.h" 24#include "ctrlchar.h"
25 25
26struct raw3270_fn fs3270_fn; 26static struct raw3270_fn fs3270_fn;
27 27
28struct fs3270 { 28struct fs3270 {
29 struct raw3270_view view; 29 struct raw3270_view view;
@@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view)
401} 401}
402 402
403/* View to a 3270 device. Can be console, tty or fullscreen. */ 403/* View to a 3270 device. Can be console, tty or fullscreen. */
404struct raw3270_fn fs3270_fn = { 404static struct raw3270_fn fs3270_fn = {
405 .activate = fs3270_activate, 405 .activate = fs3270_activate,
406 .deactivate = fs3270_deactivate, 406 .deactivate = fs3270_deactivate,
407 .intv = (void *) fs3270_irq, 407 .intv = (void *) fs3270_irq,
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 3e86fd1756e5..f62f9a4e8950 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
148 } 148 }
149} 149}
150 150
151#if 0
151/* 152/*
152 * Generate ebcdic -> ascii translation table from kbd_data. 153 * Generate ebcdic -> ascii translation table from kbd_data.
153 */ 154 */
@@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
173 } 174 }
174 } 175 }
175} 176}
177#endif
176 178
177/* 179/*
178 * We have a combining character DIACR here, followed by the character CH. 180 * We have a combining character DIACR here, followed by the character CH.
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index cdb24f528112..9e451acc6491 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
67 return -EINVAL; 67 return -EINVAL;
68} 68}
69 69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, 70static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr) 71 struct monwrite_hdr *monhdr)
72{ 72{
73 struct mon_buf *entry, *next; 73 struct mon_buf *entry, *next;
74 74
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7a84014f2037..8facd14adb7c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32struct class *class3270; 32static struct class *class3270;
33 33
34/* The main 3270 data structure. */ 34/* The main 3270 data structure. */
35struct raw3270 { 35struct raw3270 {
@@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
86/* 86/*
87 * Encode array for 12 bit 3270 addresses. 87 * Encode array for 12 bit 3270 addresses.
88 */ 88 */
89unsigned char raw3270_ebcgraf[64] = { 89static unsigned char raw3270_ebcgraf[64] = {
90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 8a056df09d6b..f171de3b0b11 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t {
59/* Internal state: is a request active at the sclp? */ 59/* Internal state: is a request active at the sclp? */
60static volatile enum sclp_running_state_t { 60static volatile enum sclp_running_state_t {
61 sclp_running_state_idle, 61 sclp_running_state_idle,
62 sclp_running_state_running 62 sclp_running_state_running,
63 sclp_running_state_reset_pending
63} sclp_running_state = sclp_running_state_idle; 64} sclp_running_state = sclp_running_state_idle;
64 65
65/* Internal state: is a read request pending? */ 66/* Internal state: is a read request pending? */
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t {
88 89
89/* Timeout intervals in seconds.*/ 90/* Timeout intervals in seconds.*/
90#define SCLP_BUSY_INTERVAL 10 91#define SCLP_BUSY_INTERVAL 10
91#define SCLP_RETRY_INTERVAL 15 92#define SCLP_RETRY_INTERVAL 30
92 93
93static void sclp_process_queue(void); 94static void sclp_process_queue(void);
94static int sclp_init_mask(int calculate); 95static int sclp_init_mask(int calculate);
95static int sclp_init(void); 96static int sclp_init(void);
96 97
97/* Perform service call. Return 0 on success, non-zero otherwise. */ 98/* Perform service call. Return 0 on success, non-zero otherwise. */
98static int 99int
99service_call(sclp_cmdw_t command, void *sccb) 100sclp_service_call(sclp_cmdw_t command, void *sccb)
100{ 101{
101 int cc; 102 int cc;
102 103
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb)
113 return 0; 114 return 0;
114} 115}
115 116
116/* Request timeout handler. Restart the request queue. If DATA is non-zero, 117static inline void __sclp_make_read_req(void);
117 * force restart of running request. */ 118
118static void 119static void
119sclp_request_timeout(unsigned long data) 120__sclp_queue_read_req(void)
120{ 121{
121 unsigned long flags; 122 if (sclp_reading_state == sclp_reading_state_idle) {
122 123 sclp_reading_state = sclp_reading_state_reading;
123 if (data) { 124 __sclp_make_read_req();
124 spin_lock_irqsave(&sclp_lock, flags); 125 /* Add request to head of queue */
125 sclp_running_state = sclp_running_state_idle; 126 list_add(&sclp_read_req.list, &sclp_req_queue);
126 spin_unlock_irqrestore(&sclp_lock, flags);
127 } 127 }
128 sclp_process_queue();
129} 128}
130 129
131/* Set up request retry timer. Called while sclp_lock is locked. */ 130/* Set up request retry timer. Called while sclp_lock is locked. */
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
140 add_timer(&sclp_request_timer); 139 add_timer(&sclp_request_timer);
141} 140}
142 141
142/* Request timeout handler. Restart the request queue. If DATA is non-zero,
143 * force restart of running request. */
144static void
145sclp_request_timeout(unsigned long data)
146{
147 unsigned long flags;
148
149 spin_lock_irqsave(&sclp_lock, flags);
150 if (data) {
151 if (sclp_running_state == sclp_running_state_running) {
152 /* Break running state and queue NOP read event request
153 * to get a defined interface state. */
154 __sclp_queue_read_req();
155 sclp_running_state = sclp_running_state_idle;
156 }
157 } else {
158 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
159 sclp_request_timeout, 0);
160 }
161 spin_unlock_irqrestore(&sclp_lock, flags);
162 sclp_process_queue();
163}
164
143/* Try to start a request. Return zero if the request was successfully 165/* Try to start a request. Return zero if the request was successfully
144 * started or if it will be started at a later time. Return non-zero otherwise. 166 * started or if it will be started at a later time. Return non-zero otherwise.
145 * Called while sclp_lock is locked. */ 167 * Called while sclp_lock is locked. */
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req)
151 if (sclp_running_state != sclp_running_state_idle) 173 if (sclp_running_state != sclp_running_state_idle)
152 return 0; 174 return 0;
153 del_timer(&sclp_request_timer); 175 del_timer(&sclp_request_timer);
154 rc = service_call(req->command, req->sccb); 176 rc = sclp_service_call(req->command, req->sccb);
155 req->start_count++; 177 req->start_count++;
156 178
157 if (rc == 0) { 179 if (rc == 0) {
@@ -191,7 +213,15 @@ sclp_process_queue(void)
191 rc = __sclp_start_request(req); 213 rc = __sclp_start_request(req);
192 if (rc == 0) 214 if (rc == 0)
193 break; 215 break;
194 /* Request failed. */ 216 /* Request failed */
217 if (req->start_count > 1) {
218 /* Cannot abort already submitted request - could still
219 * be active at the SCLP */
220 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
221 sclp_request_timeout, 0);
222 break;
223 }
224 /* Post-processing for aborted request */
195 list_del(&req->list); 225 list_del(&req->list);
196 if (req->callback) { 226 if (req->callback) {
197 spin_unlock_irqrestore(&sclp_lock, flags); 227 spin_unlock_irqrestore(&sclp_lock, flags);
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req)
221 list_add_tail(&req->list, &sclp_req_queue); 251 list_add_tail(&req->list, &sclp_req_queue);
222 rc = 0; 252 rc = 0;
223 /* Start if request is first in list */ 253 /* Start if request is first in list */
224 if (req->list.prev == &sclp_req_queue) { 254 if (sclp_running_state == sclp_running_state_idle &&
255 req->list.prev == &sclp_req_queue) {
225 rc = __sclp_start_request(req); 256 rc = __sclp_start_request(req);
226 if (rc) 257 if (rc)
227 list_del(&req->list); 258 list_del(&req->list);
@@ -294,7 +325,7 @@ __sclp_make_read_req(void)
294 sccb = (struct sccb_header *) sclp_read_sccb; 325 sccb = (struct sccb_header *) sclp_read_sccb;
295 clear_page(sccb); 326 clear_page(sccb);
296 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 327 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
297 sclp_read_req.command = SCLP_CMDW_READDATA; 328 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
298 sclp_read_req.status = SCLP_REQ_QUEUED; 329 sclp_read_req.status = SCLP_REQ_QUEUED;
299 sclp_read_req.start_count = 0; 330 sclp_read_req.start_count = 0;
300 sclp_read_req.callback = sclp_read_cb; 331 sclp_read_req.callback = sclp_read_cb;
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code)
334 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 365 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
335 evbuf_pending = S390_lowcore.ext_params & 0x3; 366 evbuf_pending = S390_lowcore.ext_params & 0x3;
336 if (finished_sccb) { 367 if (finished_sccb) {
368 del_timer(&sclp_request_timer);
369 sclp_running_state = sclp_running_state_reset_pending;
337 req = __sclp_find_req(finished_sccb); 370 req = __sclp_find_req(finished_sccb);
338 if (req) { 371 if (req) {
339 /* Request post-processing */ 372 /* Request post-processing */
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code)
348 sclp_running_state = sclp_running_state_idle; 381 sclp_running_state = sclp_running_state_idle;
349 } 382 }
350 if (evbuf_pending && sclp_receive_mask != 0 && 383 if (evbuf_pending && sclp_receive_mask != 0 &&
351 sclp_reading_state == sclp_reading_state_idle && 384 sclp_activation_state == sclp_activation_state_active)
352 sclp_activation_state == sclp_activation_state_active ) { 385 __sclp_queue_read_req();
353 sclp_reading_state = sclp_reading_state_reading;
354 __sclp_make_read_req();
355 /* Add request to head of queue */
356 list_add(&sclp_read_req.list, &sclp_req_queue);
357 }
358 spin_unlock(&sclp_lock); 386 spin_unlock(&sclp_lock);
359 sclp_process_queue(); 387 sclp_process_queue();
360} 388}
@@ -374,6 +402,7 @@ sclp_sync_wait(void)
374 unsigned long flags; 402 unsigned long flags;
375 unsigned long cr0, cr0_sync; 403 unsigned long cr0, cr0_sync;
376 u64 timeout; 404 u64 timeout;
405 int irq_context;
377 406
378 /* We'll be disabling timer interrupts, so we need a custom timeout 407 /* We'll be disabling timer interrupts, so we need a custom timeout
379 * mechanism */ 408 * mechanism */
@@ -386,7 +415,9 @@ sclp_sync_wait(void)
386 } 415 }
387 local_irq_save(flags); 416 local_irq_save(flags);
388 /* Prevent bottom half from executing once we force interrupts open */ 417 /* Prevent bottom half from executing once we force interrupts open */
389 local_bh_disable(); 418 irq_context = in_interrupt();
419 if (!irq_context)
420 local_bh_disable();
390 /* Enable service-signal interruption, disable timer interrupts */ 421 /* Enable service-signal interruption, disable timer interrupts */
391 trace_hardirqs_on(); 422 trace_hardirqs_on();
392 __ctl_store(cr0, 0, 0); 423 __ctl_store(cr0, 0, 0);
@@ -402,19 +433,19 @@ sclp_sync_wait(void)
402 get_clock() > timeout && 433 get_clock() > timeout &&
403 del_timer(&sclp_request_timer)) 434 del_timer(&sclp_request_timer))
404 sclp_request_timer.function(sclp_request_timer.data); 435 sclp_request_timer.function(sclp_request_timer.data);
405 barrier();
406 cpu_relax(); 436 cpu_relax();
407 } 437 }
408 local_irq_disable(); 438 local_irq_disable();
409 __ctl_load(cr0, 0, 0); 439 __ctl_load(cr0, 0, 0);
410 _local_bh_enable(); 440 if (!irq_context)
441 _local_bh_enable();
411 local_irq_restore(flags); 442 local_irq_restore(flags);
412} 443}
413 444
414EXPORT_SYMBOL(sclp_sync_wait); 445EXPORT_SYMBOL(sclp_sync_wait);
415 446
416/* Dispatch changes in send and receive mask to registered listeners. */ 447/* Dispatch changes in send and receive mask to registered listeners. */
417static inline void 448static void
418sclp_dispatch_state_change(void) 449sclp_dispatch_state_change(void)
419{ 450{
420 struct list_head *l; 451 struct list_head *l;
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
597 sccb = (struct init_sccb *) sclp_init_sccb; 628 sccb = (struct init_sccb *) sclp_init_sccb;
598 clear_page(sccb); 629 clear_page(sccb);
599 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 630 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
600 sclp_init_req.command = SCLP_CMDW_WRITEMASK; 631 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
601 sclp_init_req.status = SCLP_REQ_FILLED; 632 sclp_init_req.status = SCLP_REQ_FILLED;
602 sclp_init_req.start_count = 0; 633 sclp_init_req.start_count = 0;
603 sclp_init_req.callback = NULL; 634 sclp_init_req.callback = NULL;
@@ -800,7 +831,7 @@ sclp_check_interface(void)
800 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 831 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
801 __sclp_make_init_req(0, 0); 832 __sclp_make_init_req(0, 0);
802 sccb = (struct init_sccb *) sclp_init_req.sccb; 833 sccb = (struct init_sccb *) sclp_init_req.sccb;
803 rc = service_call(sclp_init_req.command, sccb); 834 rc = sclp_service_call(sclp_init_req.command, sccb);
804 if (rc == -EIO) 835 if (rc == -EIO)
805 break; 836 break;
806 sclp_init_req.status = SCLP_REQ_RUNNING; 837 sclp_init_req.status = SCLP_REQ_RUNNING;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 2c71d6ee7b5b..7d29ab45a6ed 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -12,7 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/list.h> 14#include <linux/list.h>
15 15#include <asm/sclp.h>
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17 17
18/* maximum number of pages concerning our own memory management */ 18/* maximum number of pages concerning our own memory management */
@@ -49,9 +49,11 @@
49 49
50typedef unsigned int sclp_cmdw_t; 50typedef unsigned int sclp_cmdw_t;
51 51
52#define SCLP_CMDW_READDATA 0x00770005 52#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
53#define SCLP_CMDW_WRITEDATA 0x00760005 53#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
54#define SCLP_CMDW_WRITEMASK 0x00780005 54#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
55#define SCLP_CMDW_READ_SCP_INFO 0x00020001
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
55 57
56#define GDS_ID_MDSMU 0x1310 58#define GDS_ID_MDSMU 0x1310
57#define GDS_ID_MDSRouteInfo 0x1311 59#define GDS_ID_MDSRouteInfo 0x1311
@@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t;
66 68
67typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
68 70
69struct sccb_header {
70 u16 length;
71 u8 function_code;
72 u8 control_mask[3];
73 u16 response_code;
74} __attribute__((packed));
75
76struct gds_subvector { 71struct gds_subvector {
77 u8 length; 72 u8 length;
78 u8 key; 73 u8 key;
@@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg);
131int sclp_remove_processed(struct sccb_header *sccb); 126int sclp_remove_processed(struct sccb_header *sccb);
132int sclp_deactivate(void); 127int sclp_deactivate(void);
133int sclp_reactivate(void); 128int sclp_reactivate(void);
129int sclp_service_call(sclp_cmdw_t command, void *sccb);
134 130
135/* useful inlines */ 131/* useful inlines */
136 132
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 86864f641716..ead1043d788e 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); 66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
67} 67}
68 68
69static inline void 69static void
70sclp_conbuf_emit(void) 70sclp_conbuf_emit(void)
71{ 71{
72 struct sclp_buffer* buffer; 72 struct sclp_buffer* buffer;
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 4f873ae148b7..65aa2c85737f 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -169,7 +169,7 @@ cpi_prepare_req(void)
169 } 169 }
170 170
171 /* prepare request data structure presented to SCLP driver */ 171 /* prepare request data structure presented to SCLP driver */
172 req->command = SCLP_CMDW_WRITEDATA; 172 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
173 req->sccb = sccb; 173 req->sccb = sccb;
174 req->status = SCLP_REQ_FILLED; 174 req->status = SCLP_REQ_FILLED;
175 req->callback = cpi_callback; 175 req->callback = cpi_callback;
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
new file mode 100644
index 000000000000..7bcbe643b087
--- /dev/null
+++ b/drivers/s390/char/sclp_info.c
@@ -0,0 +1,57 @@
1/*
2 * drivers/s390/char/sclp_info.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <asm/sclp.h>
12#include "sclp.h"
13
14struct sclp_readinfo_sccb s390_readinfo_sccb;
15
16void __init sclp_readinfo_early(void)
17{
18 sclp_cmdw_t command;
19 struct sccb_header *sccb;
20 int ret;
21
22 __ctl_set_bit(0, 9); /* enable service signal subclass mask */
23
24 sccb = &s390_readinfo_sccb.header;
25 command = SCLP_CMDW_READ_SCP_INFO_FORCED;
26 while (1) {
27 u16 response;
28
29 memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb));
30 sccb->length = sizeof(s390_readinfo_sccb);
31 sccb->control_mask[2] = 0x80;
32
33 ret = sclp_service_call(command, &s390_readinfo_sccb);
34
35 if (ret == -EIO)
36 goto out;
37 if (ret == -EBUSY)
38 continue;
39
40 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
41 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
42 local_irq_disable();
43 barrier();
44
45 response = sccb->response_code;
46
47 if (response == 0x10)
48 break;
49
50 if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO)
51 break;
52
53 command = SCLP_CMDW_READ_SCP_INFO;
54 }
55out:
56 __ctl_clear_bit(0, 9); /* disable service signal subclass mask */
57}
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 0c92d3909cca..2486783ea58e 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EvTyp_PMsgCmd;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITEDATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
464 buffer->request.status = SCLP_REQ_FILLED; 464 buffer->request.status = SCLP_REQ_FILLED;
465 buffer->request.callback = sclp_writedata_callback; 465 buffer->request.callback = sclp_writedata_callback;
466 buffer->request.callback_data = buffer; 466 buffer->request.callback_data = buffer;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2d173e5c8a09..90536f60bf50 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = {
721 .ioctl = sclp_tty_ioctl, 721 .ioctl = sclp_tty_ioctl,
722}; 722};
723 723
724int __init 724static int __init
725sclp_tty_init(void) 725sclp_tty_init(void)
726{ 726{
727 struct tty_driver *driver; 727 struct tty_driver *driver;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 723bf4191bfe..544f137d70d7 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request)
207 request->sclp_req.status = SCLP_REQ_FAILED; 207 request->sclp_req.status = SCLP_REQ_FAILED;
208 return -EIO; 208 return -EIO;
209 } 209 }
210 request->sclp_req.command = SCLP_CMDW_WRITEDATA; 210 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
211 request->sclp_req.status = SCLP_REQ_FILLED; 211 request->sclp_req.status = SCLP_REQ_FILLED;
212 request->sclp_req.callback = sclp_vt220_callback; 212 request->sclp_req.callback = sclp_vt220_callback;
213 request->sclp_req.callback_data = (void *) request; 213 request->sclp_req.callback_data = (void *) request;
@@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = {
669/* 669/*
670 * Register driver with SCLP and Linux and initialize internal tty structures. 670 * Register driver with SCLP and Linux and initialize internal tty structures.
671 */ 671 */
672int __init 672static int __init
673sclp_vt220_tty_init(void) 673sclp_vt220_tty_init(void)
674{ 674{
675 struct tty_driver *driver; 675 struct tty_driver *driver;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index c9f1c4c8bb13..bb4ff537729d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,7 +3,7 @@
3 * tape device driver for 3480/3490E/3590 tapes. 3 * tape device driver for 3480/3490E/3590 tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -99,7 +99,11 @@ enum tape_op {
99 TO_DIS, /* Tape display */ 99 TO_DIS, /* Tape display */
100 TO_ASSIGN, /* Assign tape to channel path */ 100 TO_ASSIGN, /* Assign tape to channel path */
101 TO_UNASSIGN, /* Unassign tape from channel path */ 101 TO_UNASSIGN, /* Unassign tape from channel path */
102 TO_SIZE /* #entries in tape_op_t */ 102 TO_CRYPT_ON, /* Enable encrpytion */
103 TO_CRYPT_OFF, /* Disable encrpytion */
104 TO_KEKL_SET, /* Set KEK label */
105 TO_KEKL_QUERY, /* Query KEK label */
106 TO_SIZE, /* #entries in tape_op_t */
103}; 107};
104 108
105/* Forward declaration */ 109/* Forward declaration */
@@ -112,6 +116,7 @@ enum tape_request_status {
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */ 116 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */ 117 TAPE_REQUEST_DONE, /* request is completed. */
114 TAPE_REQUEST_CANCEL, /* request should be canceled. */ 118 TAPE_REQUEST_CANCEL, /* request should be canceled. */
119 TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
115}; 120};
116 121
117/* Tape CCW request */ 122/* Tape CCW request */
@@ -164,10 +169,11 @@ struct tape_discipline {
164 * The discipline irq function either returns an error code (<0) which 169 * The discipline irq function either returns an error code (<0) which
165 * means that the request has failed with an error or one of the following: 170 * means that the request has failed with an error or one of the following:
166 */ 171 */
167#define TAPE_IO_SUCCESS 0 /* request successful */ 172#define TAPE_IO_SUCCESS 0 /* request successful */
168#define TAPE_IO_PENDING 1 /* request still running */ 173#define TAPE_IO_PENDING 1 /* request still running */
169#define TAPE_IO_RETRY 2 /* retry to current request */ 174#define TAPE_IO_RETRY 2 /* retry to current request */
170#define TAPE_IO_STOP 3 /* stop the running request */ 175#define TAPE_IO_STOP 3 /* stop the running request */
176#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
171 177
172/* Char Frontend Data */ 178/* Char Frontend Data */
173struct tape_char_data { 179struct tape_char_data {
@@ -242,6 +248,10 @@ struct tape_device {
242 248
243 /* Function to start or stop the next request later. */ 249 /* Function to start or stop the next request later. */
244 struct delayed_work tape_dnr; 250 struct delayed_work tape_dnr;
251
252 /* Timer for long busy */
253 struct timer_list lb_timeout;
254
245}; 255};
246 256
247/* Externals from tape_core.c */ 257/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 9df912f63188..50f5edab83d7 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.c 2 * drivers/s390/char/tape_3590.c
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <asm/ebcdic.h>
14 15
15#define TAPE_DBF_AREA tape_3590_dbf 16#define TAPE_DBF_AREA tape_3590_dbf
16 17
@@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
30 * - Read Device (buffered) log: BRA 31 * - Read Device (buffered) log: BRA
31 * - Read Library log: BRA 32 * - Read Library log: BRA
32 * - Swap Devices: BRA 33 * - Swap Devices: BRA
33 * - Long Busy: BRA 34 * - Long Busy: implemented
34 * - Special Intercept: BRA 35 * - Special Intercept: BRA
35 * - Read Alternate: implemented 36 * - Read Alternate: implemented
36 *******************************************************************/ 37 *******************************************************************/
@@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
94 [0xae] = "Subsystem environmental alert", 95 [0xae] = "Subsystem environmental alert",
95}; 96};
96 97
98static int crypt_supported(struct tape_device *device)
99{
100 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
101}
102
103static int crypt_enabled(struct tape_device *device)
104{
105 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
106}
107
108static void ext_to_int_kekl(struct tape390_kekl *in,
109 struct tape3592_kekl *out)
110{
111 int i;
112
113 memset(out, 0, sizeof(*out));
114 if (in->type == TAPE390_KEKL_TYPE_HASH)
115 out->flags |= 0x40;
116 if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
117 out->flags |= 0x80;
118 strncpy(out->label, in->label, 64);
119 for (i = strlen(in->label); i < sizeof(out->label); i++)
120 out->label[i] = ' ';
121 ASCEBC(out->label, sizeof(out->label));
122}
123
124static void int_to_ext_kekl(struct tape3592_kekl *in,
125 struct tape390_kekl *out)
126{
127 memset(out, 0, sizeof(*out));
128 if(in->flags & 0x40)
129 out->type = TAPE390_KEKL_TYPE_HASH;
130 else
131 out->type = TAPE390_KEKL_TYPE_LABEL;
132 if(in->flags & 0x80)
133 out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
134 else
135 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
136 memcpy(out->label, in->label, sizeof(in->label));
137 EBCASC(out->label, sizeof(in->label));
138 strstrip(out->label);
139}
140
141static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
142 struct tape390_kekl_pair *out)
143{
144 if (in->count == 0) {
145 out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
146 out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
147 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
148 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
149 } else if (in->count == 1) {
150 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
151 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
152 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
153 } else if (in->count == 2) {
154 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
155 int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
156 } else {
157 printk("Invalid KEKL number: %d\n", in->count);
158 BUG();
159 }
160}
161
162static int check_ext_kekl(struct tape390_kekl *kekl)
163{
164 if (kekl->type == TAPE390_KEKL_TYPE_NONE)
165 goto invalid;
166 if (kekl->type > TAPE390_KEKL_TYPE_HASH)
167 goto invalid;
168 if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
169 goto invalid;
170 if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
171 goto invalid;
172 if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
173 (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
174 goto invalid;
175
176 return 0;
177invalid:
178 return -EINVAL;
179}
180
181static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
182{
183 if (check_ext_kekl(&kekls->kekl[0]))
184 goto invalid;
185 if (check_ext_kekl(&kekls->kekl[1]))
186 goto invalid;
187
188 return 0;
189invalid:
190 return -EINVAL;
191}
192
193/*
194 * Query KEKLs
195 */
196static int tape_3592_kekl_query(struct tape_device *device,
197 struct tape390_kekl_pair *ext_kekls)
198{
199 struct tape_request *request;
200 struct tape3592_kekl_query_order *order;
201 struct tape3592_kekl_query_data *int_kekls;
202 int rc;
203
204 DBF_EVENT(6, "tape3592_kekl_query\n");
205 int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
206 if (!int_kekls)
207 return -ENOMEM;
208 request = tape_alloc_request(2, sizeof(*order));
209 if (IS_ERR(request)) {
210 rc = PTR_ERR(request);
211 goto fail_malloc;
212 }
213 order = request->cpdata;
214 memset(order,0,sizeof(*order));
215 order->code = 0xe2;
216 order->max_count = 2;
217 request->op = TO_KEKL_QUERY;
218 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
219 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
220 int_kekls);
221 rc = tape_do_io(device, request);
222 if (rc)
223 goto fail_request;
224 int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
225
226 rc = 0;
227fail_request:
228 tape_free_request(request);
229fail_malloc:
230 kfree(int_kekls);
231 return rc;
232}
233
234/*
235 * IOCTL: Query KEKLs
236 */
237static int tape_3592_ioctl_kekl_query(struct tape_device *device,
238 unsigned long arg)
239{
240 int rc;
241 struct tape390_kekl_pair *ext_kekls;
242
243 DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
244 if (!crypt_supported(device))
245 return -ENOSYS;
246 if (!crypt_enabled(device))
247 return -EUNATCH;
248 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
249 if (!ext_kekls)
250 return -ENOMEM;
251 rc = tape_3592_kekl_query(device, ext_kekls);
252 if (rc != 0)
253 goto fail;
254 if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
255 rc = -EFAULT;
256 goto fail;
257 }
258 rc = 0;
259fail:
260 kfree(ext_kekls);
261 return rc;
262}
263
264static int tape_3590_mttell(struct tape_device *device, int mt_count);
265
266/*
267 * Set KEKLs
268 */
269static int tape_3592_kekl_set(struct tape_device *device,
270 struct tape390_kekl_pair *ext_kekls)
271{
272 struct tape_request *request;
273 struct tape3592_kekl_set_order *order;
274
275 DBF_EVENT(6, "tape3592_kekl_set\n");
276 if (check_ext_kekl_pair(ext_kekls)) {
277 DBF_EVENT(6, "invalid kekls\n");
278 return -EINVAL;
279 }
280 if (tape_3590_mttell(device, 0) != 0)
281 return -EBADSLT;
282 request = tape_alloc_request(1, sizeof(*order));
283 if (IS_ERR(request))
284 return PTR_ERR(request);
285 order = request->cpdata;
286 memset(order, 0, sizeof(*order));
287 order->code = 0xe3;
288 order->kekls.count = 2;
289 ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
290 ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
291 request->op = TO_KEKL_SET;
292 tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
293
294 return tape_do_io_free(device, request);
295}
296
297/*
298 * IOCTL: Set KEKLs
299 */
300static int tape_3592_ioctl_kekl_set(struct tape_device *device,
301 unsigned long arg)
302{
303 int rc;
304 struct tape390_kekl_pair *ext_kekls;
305
306 DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
307 if (!crypt_supported(device))
308 return -ENOSYS;
309 if (!crypt_enabled(device))
310 return -EUNATCH;
311 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
312 if (!ext_kekls)
313 return -ENOMEM;
314 if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
315 rc = -EFAULT;
316 goto out;
317 }
318 rc = tape_3592_kekl_set(device, ext_kekls);
319out:
320 kfree(ext_kekls);
321 return rc;
322}
323
324/*
325 * Enable encryption
326 */
327static int tape_3592_enable_crypt(struct tape_device *device)
328{
329 struct tape_request *request;
330 char *data;
331
332 DBF_EVENT(6, "tape_3592_enable_crypt\n");
333 if (!crypt_supported(device))
334 return -ENOSYS;
335 request = tape_alloc_request(2, 72);
336 if (IS_ERR(request))
337 return PTR_ERR(request);
338 data = request->cpdata;
339 memset(data,0,72);
340
341 data[0] = 0x05;
342 data[36 + 0] = 0x03;
343 data[36 + 1] = 0x03;
344 data[36 + 4] = 0x40;
345 data[36 + 6] = 0x01;
346 data[36 + 14] = 0x2f;
347 data[36 + 18] = 0xc3;
348 data[36 + 35] = 0x72;
349 request->op = TO_CRYPT_ON;
350 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
351 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
352 return tape_do_io_free(device, request);
353}
354
355/*
356 * Disable encryption
357 */
358static int tape_3592_disable_crypt(struct tape_device *device)
359{
360 struct tape_request *request;
361 char *data;
362
363 DBF_EVENT(6, "tape_3592_disable_crypt\n");
364 if (!crypt_supported(device))
365 return -ENOSYS;
366 request = tape_alloc_request(2, 72);
367 if (IS_ERR(request))
368 return PTR_ERR(request);
369 data = request->cpdata;
370 memset(data,0,72);
371
372 data[0] = 0x05;
373 data[36 + 0] = 0x03;
374 data[36 + 1] = 0x03;
375 data[36 + 35] = 0x32;
376
377 request->op = TO_CRYPT_OFF;
378 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
379 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
380
381 return tape_do_io_free(device, request);
382}
383
384/*
385 * IOCTL: Set encryption status
386 */
387static int tape_3592_ioctl_crypt_set(struct tape_device *device,
388 unsigned long arg)
389{
390 struct tape390_crypt_info info;
391
392 DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
393 if (!crypt_supported(device))
394 return -ENOSYS;
395 if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
396 return -EFAULT;
397 if (info.status & ~TAPE390_CRYPT_ON_MASK)
398 return -EINVAL;
399 if (info.status & TAPE390_CRYPT_ON_MASK)
400 return tape_3592_enable_crypt(device);
401 else
402 return tape_3592_disable_crypt(device);
403}
404
405static int tape_3590_sense_medium(struct tape_device *device);
406
407/*
408 * IOCTL: Query enryption status
409 */
410static int tape_3592_ioctl_crypt_query(struct tape_device *device,
411 unsigned long arg)
412{
413 DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
414 if (!crypt_supported(device))
415 return -ENOSYS;
416 tape_3590_sense_medium(device);
417 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
418 sizeof(TAPE_3590_CRYPT_INFO(device))))
419 return -EFAULT;
420 else
421 return 0;
422}
423
97/* 424/*
98 * 3590 IOCTL Overload 425 * 3590 IOCTL Overload
99 */ 426 */
@@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
109 436
110 return tape_std_display(device, &disp); 437 return tape_std_display(device, &disp);
111 } 438 }
439 case TAPE390_KEKL_SET:
440 return tape_3592_ioctl_kekl_set(device, arg);
441 case TAPE390_KEKL_QUERY:
442 return tape_3592_ioctl_kekl_query(device, arg);
443 case TAPE390_CRYPT_SET:
444 return tape_3592_ioctl_crypt_set(device, arg);
445 case TAPE390_CRYPT_QUERY:
446 return tape_3592_ioctl_crypt_query(device, arg);
112 default: 447 default:
113 return -EINVAL; /* no additional ioctls */ 448 return -EINVAL; /* no additional ioctls */
114 } 449 }
@@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work)
248 case TO_READ_ATTMSG: 583 case TO_READ_ATTMSG:
249 tape_3590_read_attmsg(p->device); 584 tape_3590_read_attmsg(p->device);
250 break; 585 break;
586 case TO_CRYPT_ON:
587 tape_3592_enable_crypt(p->device);
588 break;
589 case TO_CRYPT_OFF:
590 tape_3592_disable_crypt(p->device);
591 break;
251 default: 592 default:
252 DBF_EVENT(3, "T3590: work handler undefined for " 593 DBF_EVENT(3, "T3590: work handler undefined for "
253 "operation 0x%02x\n", p->op); 594 "operation 0x%02x\n", p->op);
@@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
365} 706}
366#endif 707#endif
367 708
709static void tape_3590_med_state_set(struct tape_device *device,
710 struct tape_3590_med_sense *sense)
711{
712 struct tape390_crypt_info *c_info;
713
714 c_info = &TAPE_3590_CRYPT_INFO(device);
715
716 if (sense->masst == MSENSE_UNASSOCIATED) {
717 tape_med_state_set(device, MS_UNLOADED);
718 TAPE_3590_CRYPT_INFO(device).medium_status = 0;
719 return;
720 }
721 if (sense->masst != MSENSE_ASSOCIATED_MOUNT) {
722 PRINT_ERR("Unknown medium state: %x\n", sense->masst);
723 return;
724 }
725 tape_med_state_set(device, MS_LOADED);
726 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
727 if (sense->flags & MSENSE_CRYPT_MASK) {
728 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
729 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
730 } else {
731 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
732 c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
733 }
734}
735
368/* 736/*
369 * The done handler is called at device/channel end and wakes up the sleeping 737 * The done handler is called at device/channel end and wakes up the sleeping
370 * process 738 * process
@@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
372static int 740static int
373tape_3590_done(struct tape_device *device, struct tape_request *request) 741tape_3590_done(struct tape_device *device, struct tape_request *request)
374{ 742{
375 struct tape_3590_med_sense *sense; 743 struct tape_3590_disc_data *disc_data;
376 744
377 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 745 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
746 disc_data = device->discdata;
378 747
379 switch (request->op) { 748 switch (request->op) {
380 case TO_BSB: 749 case TO_BSB:
@@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
394 break; 763 break;
395 case TO_RUN: 764 case TO_RUN:
396 tape_med_state_set(device, MS_UNLOADED); 765 tape_med_state_set(device, MS_UNLOADED);
766 tape_3590_schedule_work(device, TO_CRYPT_OFF);
397 break; 767 break;
398 case TO_MSEN: 768 case TO_MSEN:
399 sense = (struct tape_3590_med_sense *) request->cpdata; 769 tape_3590_med_state_set(device, request->cpdata);
400 if (sense->masst == MSENSE_UNASSOCIATED) 770 break;
401 tape_med_state_set(device, MS_UNLOADED); 771 case TO_CRYPT_ON:
402 if (sense->masst == MSENSE_ASSOCIATED_MOUNT) 772 TAPE_3590_CRYPT_INFO(device).status
403 tape_med_state_set(device, MS_LOADED); 773 |= TAPE390_CRYPT_ON_MASK;
774 *(device->modeset_byte) |= 0x03;
775 break;
776 case TO_CRYPT_OFF:
777 TAPE_3590_CRYPT_INFO(device).status
778 &= ~TAPE390_CRYPT_ON_MASK;
779 *(device->modeset_byte) &= ~0x03;
404 break; 780 break;
405 case TO_RBI: /* RBI seems to succeed even without medium loaded. */ 781 case TO_RBI: /* RBI seems to succeed even without medium loaded. */
406 case TO_NOP: /* Same to NOP. */ 782 case TO_NOP: /* Same to NOP. */
@@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
409 case TO_DIS: 785 case TO_DIS:
410 case TO_ASSIGN: 786 case TO_ASSIGN:
411 case TO_UNASSIGN: 787 case TO_UNASSIGN:
412 break;
413 case TO_SIZE: 788 case TO_SIZE:
789 case TO_KEKL_SET:
790 case TO_KEKL_QUERY:
414 break; 791 break;
415 } 792 }
416 return TAPE_IO_SUCCESS; 793 return TAPE_IO_SUCCESS;
@@ -540,10 +917,8 @@ static int
540tape_3590_erp_long_busy(struct tape_device *device, 917tape_3590_erp_long_busy(struct tape_device *device,
541 struct tape_request *request, struct irb *irb) 918 struct tape_request *request, struct irb *irb)
542{ 919{
543 /* FIXME: how about WAITING for a minute ? */ 920 DBF_EVENT(6, "Device is busy\n");
544 PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", 921 return TAPE_IO_LONG_BUSY;
545 device->cdev->dev.bus_id);
546 return tape_3590_erp_basic(device, request, irb, -EBUSY);
547} 922}
548 923
549/* 924/*
@@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
951 device->cdev->dev.bus_id, sense->mc); 1326 device->cdev->dev.bus_id, sense->mc);
952} 1327}
953 1328
1329static int tape_3590_crypt_error(struct tape_device *device,
1330 struct tape_request *request, struct irb *irb)
1331{
1332 u8 cu_rc, ekm_rc1;
1333 u16 ekm_rc2;
1334 u32 drv_rc;
1335 char *bus_id, *sense;
1336
1337 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1338 bus_id = device->cdev->dev.bus_id;
1339 cu_rc = sense[0];
1340 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1341 ekm_rc1 = sense[9];
1342 ekm_rc2 = *((u16*) &sense[10]);
1343 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1344 /* key not defined on EKM */
1345 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
1346 if ((cu_rc == 1) || (cu_rc == 2))
1347 /* No connection to EKM */
1348 return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
1349
1350 PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id);
1351 PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc,
1352 drv_rc, ekm_rc1, ekm_rc2);
1353
1354 return tape_3590_erp_basic(device, request, irb, -ENOKEY);
1355}
1356
954/* 1357/*
955 * 3590 error Recovery routine: 1358 * 3590 error Recovery routine:
956 * If possible, it tries to recover from the error. If this is not possible, 1359 * If possible, it tries to recover from the error. If this is not possible,
@@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
979 1382
980 sense = (struct tape_3590_sense *) irb->ecw; 1383 sense = (struct tape_3590_sense *) irb->ecw;
981 1384
1385 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
1386
982 /* 1387 /*
983 * First check all RC-QRCs where we want to do something special 1388 * First check all RC-QRCs where we want to do something special
984 * - "break": basic error recovery is done 1389 * - "break": basic error recovery is done
@@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
999 case 0x2231: 1404 case 0x2231:
1000 tape_3590_print_era_msg(device, irb); 1405 tape_3590_print_era_msg(device, irb);
1001 return tape_3590_erp_special_interrupt(device, request, irb); 1406 return tape_3590_erp_special_interrupt(device, request, irb);
1407 case 0x2240:
1408 return tape_3590_crypt_error(device, request, irb);
1002 1409
1003 case 0x3010: 1410 case 0x3010:
1004 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", 1411 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
@@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1020 DBF_EVENT(2, "(%08x): Rewind Unload complete\n", 1427 DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
1021 device->cdev_id); 1428 device->cdev_id);
1022 tape_med_state_set(device, MS_UNLOADED); 1429 tape_med_state_set(device, MS_UNLOADED);
1430 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1023 return tape_3590_erp_basic(device, request, irb, 0); 1431 return tape_3590_erp_basic(device, request, irb, 0);
1024 1432
1025 case 0x4010: 1433 case 0x4010:
@@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1030 PRINT_WARN("(%s): Tape operation when medium not loaded\n", 1438 PRINT_WARN("(%s): Tape operation when medium not loaded\n",
1031 device->cdev->dev.bus_id); 1439 device->cdev->dev.bus_id);
1032 tape_med_state_set(device, MS_UNLOADED); 1440 tape_med_state_set(device, MS_UNLOADED);
1441 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1033 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1442 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1034 case 0x4012: /* Device Long Busy */ 1443 case 0x4012: /* Device Long Busy */
1444 /* XXX: Also use long busy handling here? */
1445 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
1035 tape_3590_print_era_msg(device, irb); 1446 tape_3590_print_era_msg(device, irb);
1447 return tape_3590_erp_basic(device, request, irb, -EBUSY);
1448 case 0x4014:
1449 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
1036 return tape_3590_erp_long_busy(device, request, irb); 1450 return tape_3590_erp_long_busy(device, request, irb);
1037 1451
1038 case 0x5010: 1452 case 0x5010:
@@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1064 case 0x5120: 1478 case 0x5120:
1065 case 0x1120: 1479 case 0x1120:
1066 tape_med_state_set(device, MS_UNLOADED); 1480 tape_med_state_set(device, MS_UNLOADED);
1481 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1067 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1482 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1068 1483
1069 case 0x6020: 1484 case 0x6020:
@@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device)
1142{ 1557{
1143 int rc; 1558 int rc;
1144 struct tape_3590_disc_data *data; 1559 struct tape_3590_disc_data *data;
1560 char *rdc_data;
1145 1561
1146 DBF_EVENT(6, "3590 device setup\n"); 1562 DBF_EVENT(6, "3590 device setup\n");
1147 data = kmalloc(sizeof(struct tape_3590_disc_data), 1563 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
1148 GFP_KERNEL | GFP_DMA);
1149 if (data == NULL) 1564 if (data == NULL)
1150 return -ENOMEM; 1565 return -ENOMEM;
1151 data->read_back_op = READ_PREVIOUS; 1566 data->read_back_op = READ_PREVIOUS;
1152 device->discdata = data; 1567 device->discdata = data;
1153 1568
1154 if ((rc = tape_std_assign(device)) == 0) { 1569 rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA);
1155 /* Try to find out if medium is loaded */ 1570 if (!rdc_data) {
1156 if ((rc = tape_3590_sense_medium(device)) != 0) 1571 rc = -ENOMEM;
1157 DBF_LH(3, "3590 medium sense returned %d\n", rc); 1572 goto fail_kmalloc;
1573 }
1574 rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64);
1575 if (rc) {
1576 DBF_LH(3, "Read device characteristics failed!\n");
1577 goto fail_kmalloc;
1578 }
1579 rc = tape_std_assign(device);
1580 if (rc)
1581 goto fail_rdc_data;
1582 if (rdc_data[31] == 0x13) {
1583 PRINT_INFO("Device has crypto support\n");
1584 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1585 tape_3592_disable_crypt(device);
1586 } else {
1587 DBF_EVENT(6, "Device has NO crypto support\n");
1158 } 1588 }
1589 /* Try to find out if medium is loaded */
1590 rc = tape_3590_sense_medium(device);
1591 if (rc) {
1592 DBF_LH(3, "3590 medium sense returned %d\n", rc);
1593 goto fail_rdc_data;
1594 }
1595 return 0;
1159 1596
1597fail_rdc_data:
1598 kfree(rdc_data);
1599fail_kmalloc:
1600 kfree(data);
1160 return rc; 1601 return rc;
1161} 1602}
1162 1603
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index cf274b9445a6..aa5138807af1 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.h 2 * drivers/s390/char/tape_3590.h
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -38,16 +38,22 @@
38#define MSENSE_UNASSOCIATED 0x00 38#define MSENSE_UNASSOCIATED 0x00
39#define MSENSE_ASSOCIATED_MOUNT 0x01 39#define MSENSE_ASSOCIATED_MOUNT 0x01
40#define MSENSE_ASSOCIATED_UMOUNT 0x02 40#define MSENSE_ASSOCIATED_UMOUNT 0x02
41#define MSENSE_CRYPT_MASK 0x00000010
41 42
42#define TAPE_3590_MAX_MSG 0xb0 43#define TAPE_3590_MAX_MSG 0xb0
43 44
44/* Datatypes */ 45/* Datatypes */
45 46
46struct tape_3590_disc_data { 47struct tape_3590_disc_data {
47 unsigned char modeset_byte; 48 struct tape390_crypt_info crypt_info;
48 int read_back_op; 49 int read_back_op;
49}; 50};
50 51
52#define TAPE_3590_CRYPT_INFO(device) \
53 ((struct tape_3590_disc_data*)(device->discdata))->crypt_info
54#define TAPE_3590_READ_BACK_OP(device) \
55 ((struct tape_3590_disc_data*)(device->discdata))->read_back_op
56
51struct tape_3590_sense { 57struct tape_3590_sense {
52 58
53 unsigned int command_rej:1; 59 unsigned int command_rej:1;
@@ -118,7 +124,48 @@ struct tape_3590_sense {
118struct tape_3590_med_sense { 124struct tape_3590_med_sense {
119 unsigned int macst:4; 125 unsigned int macst:4;
120 unsigned int masst:4; 126 unsigned int masst:4;
121 char pad[127]; 127 char pad1[7];
128 unsigned int flags;
129 char pad2[116];
130} __attribute__ ((packed));
131
132/* Datastructures for 3592 encryption support */
133
134struct tape3592_kekl {
135 __u8 flags;
136 char label[64];
137} __attribute__ ((packed));
138
139struct tape3592_kekl_pair {
140 __u8 count;
141 struct tape3592_kekl kekl[2];
142} __attribute__ ((packed));
143
144struct tape3592_kekl_query_data {
145 __u16 len;
146 __u8 fmt;
147 __u8 mc;
148 __u32 id;
149 __u8 flags;
150 struct tape3592_kekl_pair kekls;
151 char reserved[116];
152} __attribute__ ((packed));
153
154struct tape3592_kekl_query_order {
155 __u8 code;
156 __u8 flags;
157 char reserved1[2];
158 __u8 max_count;
159 char reserved2[35];
160} __attribute__ ((packed));
161
162struct tape3592_kekl_set_order {
163 __u8 code;
164 __u8 flags;
165 char reserved1[2];
166 __u8 op;
167 struct tape3592_kekl_pair kekls;
168 char reserved2[120];
122} __attribute__ ((packed)); 169} __attribute__ ((packed));
123 170
124#endif /* _TAPE_3590_H */ 171#endif /* _TAPE_3590_H */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index c8a89b3b87d4..dd0ecaed592e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device)
73/* 73/*
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static inline void 76static void
77tapeblock_end_request(struct request *req, int uptodate) 77tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
@@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
108/* 108/*
109 * Feed the tape device CCW queue with requests supplied in a list. 109 * Feed the tape device CCW queue with requests supplied in a list.
110 */ 110 */
111static inline int 111static int
112tapeblock_start_request(struct tape_device *device, struct request *req) 112tapeblock_start_request(struct tape_device *device, struct request *req)
113{ 113{
114 struct tape_request * ccw_req; 114 struct tape_request * ccw_req;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31198c8f2718..9faea04e11e9 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -3,7 +3,7 @@
3 * character device frontend for tape device driver 3 * character device frontend for tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device)
89 device->nt = NULL; 89 device->nt = NULL;
90} 90}
91 91
92/* 92static int
93 * Terminate write command (we write two TMs and skip backward over last)
94 * This ensures that the tape is always correctly terminated.
95 * When the user writes afterwards a new file, he will overwrite the
96 * second TM and therefore one TM will remain to separate the
97 * two files on the tape...
98 */
99static inline void
100tapechar_terminate_write(struct tape_device *device)
101{
102 if (tape_mtop(device, MTWEOF, 1) == 0 &&
103 tape_mtop(device, MTWEOF, 1) == 0)
104 tape_mtop(device, MTBSR, 1);
105}
106
107static inline int
108tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) 93tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
109{ 94{
110 struct idal_buffer *new; 95 struct idal_buffer *new;
@@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
137/* 122/*
138 * Tape device read function 123 * Tape device read function
139 */ 124 */
140ssize_t 125static ssize_t
141tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 126tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
142{ 127{
143 struct tape_device *device; 128 struct tape_device *device;
@@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
201/* 186/*
202 * Tape device write function 187 * Tape device write function
203 */ 188 */
204ssize_t 189static ssize_t
205tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) 190tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
206{ 191{
207 struct tape_device *device; 192 struct tape_device *device;
@@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
291/* 276/*
292 * Character frontend tape device open function. 277 * Character frontend tape device open function.
293 */ 278 */
294int 279static int
295tapechar_open (struct inode *inode, struct file *filp) 280tapechar_open (struct inode *inode, struct file *filp)
296{ 281{
297 struct tape_device *device; 282 struct tape_device *device;
@@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp)
326 * Character frontend tape device release function. 311 * Character frontend tape device release function.
327 */ 312 */
328 313
329int 314static int
330tapechar_release(struct inode *inode, struct file *filp) 315tapechar_release(struct inode *inode, struct file *filp)
331{ 316{
332 struct tape_device *device; 317 struct tape_device *device;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c6c2e918b990..e2a8a1a04bab 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,7 +3,7 @@
3 * basic function of the tape device driver 3 * basic function of the tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -26,9 +26,11 @@
26#include "tape_std.h" 26#include "tape_std.h"
27 27
28#define PRINTK_HEADER "TAPE_CORE: " 28#define PRINTK_HEADER "TAPE_CORE: "
29#define LONG_BUSY_TIMEOUT 180 /* seconds */
29 30
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void tape_delayed_next_request(struct work_struct *); 32static void tape_delayed_next_request(struct work_struct *);
33static void tape_long_busy_timeout(unsigned long data);
32 34
33/* 35/*
34 * One list to contain all tape devices of all disciplines, so 36 * One list to contain all tape devices of all disciplines, so
@@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] =
69 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
70 [TO_READ_ATTMSG] = "RAT", 72 [TO_READ_ATTMSG] = "RAT",
71 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
72 [TO_UNASSIGN] = "UAS" 74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
76 [TO_KEKL_QUERY] = "KLQ",
73}; 77};
74 78
75static inline int 79static int
76busid_to_int(char *bus_id) 80busid_to_int(char *bus_id)
77{ 81{
78 int dec; 82 int dec;
@@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
252/* 256/*
253 * Stop running ccw. Has to be called with the device lock held. 257 * Stop running ccw. Has to be called with the device lock held.
254 */ 258 */
255static inline int 259static int
256__tape_cancel_io(struct tape_device *device, struct tape_request *request) 260__tape_cancel_io(struct tape_device *device, struct tape_request *request)
257{ 261{
258 int retries; 262 int retries;
@@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device,
346 return -EINVAL; 350 return -EINVAL;
347 } 351 }
348 352
353 init_timer(&device->lb_timeout);
354 device->lb_timeout.function = tape_long_busy_timeout;
355
349 /* Let the discipline have a go at the device. */ 356 /* Let the discipline have a go at the device. */
350 device->discipline = discipline; 357 device->discipline = discipline;
351 if (!try_module_get(discipline->owner)) { 358 if (!try_module_get(discipline->owner)) {
@@ -385,7 +392,7 @@ out:
385 return rc; 392 return rc;
386} 393}
387 394
388static inline void 395static void
389tape_cleanup_device(struct tape_device *device) 396tape_cleanup_device(struct tape_device *device)
390{ 397{
391 tapeblock_cleanup_device(device); 398 tapeblock_cleanup_device(device);
@@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev)
563 return ret; 570 return ret;
564} 571}
565 572
566static inline void 573static void
567__tape_discard_requests(struct tape_device *device) 574__tape_discard_requests(struct tape_device *device)
568{ 575{
569 struct tape_request * request; 576 struct tape_request * request;
@@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request)
703 kfree(request); 710 kfree(request);
704} 711}
705 712
706static inline int 713static int
707__tape_start_io(struct tape_device *device, struct tape_request *request) 714__tape_start_io(struct tape_device *device, struct tape_request *request)
708{ 715{
709 int rc; 716 int rc;
@@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
733 return rc; 740 return rc;
734} 741}
735 742
736static inline void 743static void
737__tape_start_next_request(struct tape_device *device) 744__tape_start_next_request(struct tape_device *device)
738{ 745{
739 struct list_head *l, *n; 746 struct list_head *l, *n;
@@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work)
801 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 808 spin_unlock_irq(get_ccwdev_lock(device->cdev));
802} 809}
803 810
804static inline void 811static void tape_long_busy_timeout(unsigned long data)
812{
813 struct tape_request *request;
814 struct tape_device *device;
815
816 device = (struct tape_device *) data;
817 spin_lock_irq(get_ccwdev_lock(device->cdev));
818 request = list_entry(device->req_queue.next, struct tape_request, list);
819 if (request->status != TAPE_REQUEST_LONG_BUSY)
820 BUG();
821 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
822 __tape_start_next_request(device);
823 device->lb_timeout.data = (unsigned long) tape_put_device(device);
824 spin_unlock_irq(get_ccwdev_lock(device->cdev));
825}
826
827static void
805__tape_end_request( 828__tape_end_request(
806 struct tape_device * device, 829 struct tape_device * device,
807 struct tape_request * request, 830 struct tape_request * request,
@@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
878 * and starts it if the tape is idle. Has to be called with 901 * and starts it if the tape is idle. Has to be called with
879 * the device lock held. 902 * the device lock held.
880 */ 903 */
881static inline int 904static int
882__tape_start_request(struct tape_device *device, struct tape_request *request) 905__tape_start_request(struct tape_device *device, struct tape_request *request)
883{ 906{
884 int rc; 907 int rc;
@@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 /* May be an unsolicited irq */ 1117 /* May be an unsolicited irq */
1095 if(request != NULL) 1118 if(request != NULL)
1096 request->rescnt = irb->scsw.count; 1119 request->rescnt = irb->scsw.count;
1097 1120 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) &&
1121 !list_empty(&device->req_queue)) {
1122 /* Not Ready to Ready after long busy ? */
1123 struct tape_request *req;
1124 req = list_entry(device->req_queue.next,
1125 struct tape_request, list);
1126 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1127 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1128 if (del_timer(&device->lb_timeout)) {
1129 device->lb_timeout.data = (unsigned long)
1130 tape_put_device(device);
1131 __tape_start_next_request(device);
1132 }
1133 return;
1134 }
1135 }
1098 if (irb->scsw.dstat != 0x0c) { 1136 if (irb->scsw.dstat != 0x0c) {
1099 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1137 /* Set the 'ONLINE' flag depending on sense byte 1 */
1100 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1138 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
@@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1142 break; 1180 break;
1143 case TAPE_IO_PENDING: 1181 case TAPE_IO_PENDING:
1144 break; 1182 break;
1183 case TAPE_IO_LONG_BUSY:
1184 device->lb_timeout.data =
1185 (unsigned long)tape_get_device_reference(device);
1186 device->lb_timeout.expires = jiffies +
1187 LONG_BUSY_TIMEOUT * HZ;
1188 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1189 add_timer(&device->lb_timeout);
1190 request->status = TAPE_REQUEST_LONG_BUSY;
1191 break;
1145 case TAPE_IO_RETRY: 1192 case TAPE_IO_RETRY:
1146 rc = __tape_start_io(device, request); 1193 rc = __tape_start_io(device, request);
1147 if (rc) 1194 if (rc)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 09844621edc0..bc33068b9ce2 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -36,7 +36,7 @@
36struct tty_driver *tty3270_driver; 36struct tty_driver *tty3270_driver;
37static int tty3270_max_index; 37static int tty3270_max_index;
38 38
39struct raw3270_fn tty3270_fn; 39static struct raw3270_fn tty3270_fn;
40 40
41struct tty3270_cell { 41struct tty3270_cell {
42 unsigned char character; 42 unsigned char character;
@@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *);
119/* 119/*
120 * Setup timeout for a device. On timeout trigger an update. 120 * Setup timeout for a device. On timeout trigger an update.
121 */ 121 */
122void 122static void tty3270_set_timer(struct tty3270 *tp, int expires)
123tty3270_set_timer(struct tty3270 *tp, int expires)
124{ 123{
125 if (expires == 0) { 124 if (expires == 0) {
126 if (timer_pending(&tp->timer) && del_timer(&tp->timer)) 125 if (timer_pending(&tp->timer) && del_timer(&tp->timer))
@@ -841,7 +840,7 @@ tty3270_del_views(void)
841 } 840 }
842} 841}
843 842
844struct raw3270_fn tty3270_fn = { 843static struct raw3270_fn tty3270_fn = {
845 .activate = tty3270_activate, 844 .activate = tty3270_activate,
846 .deactivate = tty3270_deactivate, 845 .deactivate = tty3270_deactivate,
847 .intv = (void *) tty3270_irq, 846 .intv = (void *) tty3270_irq,
@@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = {
1754 .set_termios = tty3270_set_termios 1753 .set_termios = tty3270_set_termios
1755}; 1754};
1756 1755
1757void 1756static void tty3270_notifier(int index, int active)
1758tty3270_notifier(int index, int active)
1759{ 1757{
1760 if (active) 1758 if (active)
1761 tty_register_device(tty3270_driver, index, NULL); 1759 tty_register_device(tty3270_driver, index, NULL);
@@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active)
1767 * 3270 tty registration code called from tty_init(). 1765 * 3270 tty registration code called from tty_init().
1768 * Most kernel services (incl. kmalloc) are available at this poimt. 1766 * Most kernel services (incl. kmalloc) are available at this poimt.
1769 */ 1767 */
1770int __init 1768static int __init tty3270_init(void)
1771tty3270_init(void)
1772{ 1769{
1773 struct tty_driver *driver; 1770 struct tty_driver *driver;
1774 int ret; 1771 int ret;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6cb23040954b..4f894dc2373b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -128,9 +128,8 @@ static iucv_interrupt_ops_t vmlogrdr_iucvops = {
128 .MessagePending = vmlogrdr_iucv_MessagePending, 128 .MessagePending = vmlogrdr_iucv_MessagePending,
129}; 129};
130 130
131 131static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
132DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); 132static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
133DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
134 133
135/* 134/*
136 * pointer to system service private structure 135 * pointer to system service private structure
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 12c2d6b746e6..aa65df4dfced 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -43,7 +43,7 @@ typedef enum {add, free} range_action;
43 * Function: blacklist_range 43 * Function: blacklist_range
44 * (Un-)blacklist the devices from-to 44 * (Un-)blacklist the devices from-to
45 */ 45 */
46static inline void 46static void
47blacklist_range (range_action action, unsigned int from, unsigned int to, 47blacklist_range (range_action action, unsigned int from, unsigned int to,
48 unsigned int ssid) 48 unsigned int ssid)
49{ 49{
@@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to,
69 * Get devno/busid from given string. 69 * Get devno/busid from given string.
70 * Shamelessly grabbed from dasd_devmap.c. 70 * Shamelessly grabbed from dasd_devmap.c.
71 */ 71 */
72static inline int 72static int
73blacklist_busid(char **str, int *id0, int *ssid, int *devno) 73blacklist_busid(char **str, int *id0, int *ssid, int *devno)
74{ 74{
75 int val, old_style; 75 int val, old_style;
@@ -123,10 +123,10 @@ confused:
123 return 1; 123 return 1;
124} 124}
125 125
126static inline int 126static int
127blacklist_parse_parameters (char *str, range_action action) 127blacklist_parse_parameters (char *str, range_action action)
128{ 128{
129 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; 129 int from, to, from_id0, to_id0, from_ssid, to_ssid;
130 130
131 while (*str != 0 && *str != '\n') { 131 while (*str != 0 && *str != '\n') {
132 range_action ra = action; 132 range_action ra = action;
@@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno)
227 * Function: blacklist_parse_proc_parameters 227 * Function: blacklist_parse_proc_parameters
228 * parse the stuff which is piped to /proc/cio_ignore 228 * parse the stuff which is piped to /proc/cio_ignore
229 */ 229 */
230static inline void 230static void
231blacklist_parse_proc_parameters (char *buf) 231blacklist_parse_proc_parameters (char *buf)
232{ 232{
233 if (strncmp (buf, "free ", 5) == 0) { 233 if (strncmp (buf, "free ", 5) == 0) {
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 38954f5cd14c..d48e3ca4752c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
53 53
54static struct bus_type ccwgroup_bus_type; 54static struct bus_type ccwgroup_bus_type;
55 55
56static inline void 56static void
57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
58{ 58{
59 int i; 59 int i;
@@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev)
104 kfree(gdev); 104 kfree(gdev);
105} 105}
106 106
107static inline int 107static int
108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
109{ 109{
110 char str[8]; 110 char str[8];
@@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev)
424 return 0; 424 return 0;
425} 425}
426 426
427static inline struct ccwgroup_device * 427static struct ccwgroup_device *
428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) 428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
429{ 429{
430 struct ccwgroup_device *gdev; 430 struct ccwgroup_device *gdev;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index cbab8d2ce5cf..6f05a44e3817 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
93 u16 sch; /* subchannel */ 93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */ 94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */
96 } *ssd_area; 96 } __attribute__ ((packed)) *ssd_area;
97 97
98 ssd_area = page; 98 ssd_area = page;
99 99
@@ -277,7 +277,7 @@ out_unreg:
277 return 0; 277 return 0;
278} 278}
279 279
280static inline void 280static void
281s390_set_chpid_offline( __u8 chpid) 281s390_set_chpid_offline( __u8 chpid)
282{ 282{
283 char dbf_txt[15]; 283 char dbf_txt[15];
@@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
338 return 0x80 >> chp; 338 return 0x80 >> chp;
339} 339}
340 340
341static inline int 341static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 342s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 343{
344 struct schib schib; 344 struct schib schib;
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data)
444 u32 andesc[28]; 444 u32 andesc[28];
445 /* incident-specific information */ 445 /* incident-specific information */
446 u32 isinfo[28]; 446 u32 isinfo[28];
447 } *lir; 447 } __attribute__ ((packed)) *lir;
448 448
449 lir = data; 449 lir = data;
450 if (!(lir->iq&0x80)) 450 if (!(lir->iq&0x80))
@@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data)
461 return (u16) (lir->indesc[0]&0x000000ff); 461 return (u16) (lir->indesc[0]&0x000000ff);
462} 462}
463 463
464int 464struct chsc_sei_area {
465chsc_process_crw(void) 465 struct chsc_header request;
466 u32 reserved1;
467 u32 reserved2;
468 u32 reserved3;
469 struct chsc_header response;
470 u32 reserved4;
471 u8 flags;
472 u8 vf; /* validity flags */
473 u8 rs; /* reporting source */
474 u8 cc; /* content code */
475 u16 fla; /* full link address */
476 u16 rsid; /* reporting source id */
477 u32 reserved5;
478 u32 reserved6;
479 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
480 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed));
482
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{
485 int chpid;
486
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4)
490 return 0;
491 chpid = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else
495 s390_set_chpid_offline(chpid);
496
497 return 0;
498}
499
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
466{ 501{
467 int chpid, ret;
468 struct res_acc_data res_data; 502 struct res_acc_data res_data;
469 struct { 503 struct device *dev;
470 struct chsc_header request; 504 int status;
471 u32 reserved1; 505 int rc;
472 u32 reserved2; 506
473 u32 reserved3; 507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474 struct chsc_header response; 508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475 u32 reserved4; 509 if (sei_area->rs != 4)
476 u8 flags; 510 return 0;
477 u8 vf; /* validity flags */ 511 /* allocate a new channel path structure, if needed */
478 u8 rs; /* reporting source */ 512 status = get_chp_status(sei_area->rsid);
479 u8 cc; /* content code */ 513 if (status < 0)
480 u16 fla; /* full link address */ 514 new_channel_path(sei_area->rsid);
481 u16 rsid; /* reporting source id */ 515 else if (!status)
482 u32 reserved5; 516 return 0;
483 u32 reserved6; 517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
484 u32 ccdf[96]; /* content-code dependent field */ 518 memset(&res_data, 0, sizeof(struct res_acc_data));
485 /* ccdf has to be big enough for a link-incident record */ 519 res_data.chp = to_channelpath(dev);
486 } *sei_area; 520 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0)
523 /* full link address */
524 res_data.fla_mask = 0xffff;
525 else
526 /* link address */
527 res_data.fla_mask = 0xff00;
528 }
529 rc = s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533}
534
535static int chsc_process_sei(struct chsc_sei_area *sei_area)
536{
537 int rc;
538
539 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40)
541 CIO_CRW_EVENT(2, "chsc: event overflow\n");
542 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) {
545 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area);
547 break;
548 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area);
550 break;
551 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc);
554 break;
555 }
556
557 return rc;
558}
559
560int chsc_process_crw(void)
561{
562 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
487 565
488 if (!sei_page) 566 if (!sei_page)
489 return 0; 567 return 0;
490 /* 568 /* Access to sei_page is serialized through machine check handler
491 * build the chsc request block for store event information 569 * thread, so no need for locking. */
492 * and do the call
493 * This function is only called by the machine check handler thread,
494 * so we don't need locking for the sei_page.
495 */
496 sei_area = sei_page; 570 sei_area = sei_page;
497 571
498 CIO_TRACE_EVENT( 2, "prcss"); 572 CIO_TRACE_EVENT( 2, "prcss");
499 ret = 0; 573 ret = 0;
500 do { 574 do {
501 int ccode, status;
502 struct device *dev;
503 memset(sei_area, 0, sizeof(*sei_area)); 575 memset(sei_area, 0, sizeof(*sei_area));
504 memset(&res_data, 0, sizeof(struct res_acc_data));
505 sei_area->request.length = 0x0010; 576 sei_area->request.length = 0x0010;
506 sei_area->request.code = 0x000e; 577 sei_area->request.code = 0x000e;
578 if (chsc(sei_area))
579 break;
507 580
508 ccode = chsc(sei_area); 581 if (sei_area->response.code == 0x0001) {
509 if (ccode > 0) 582 CIO_CRW_EVENT(4, "chsc: sei successful\n");
510 return 0; 583 rc = chsc_process_sei(sei_area);
511 584 if (rc)
512 switch (sei_area->response.code) { 585 ret = rc;
513 /* for debug purposes, check for problems */ 586 } else {
514 case 0x0001: 587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
515 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
516 "successfully stored\n");
517 break; /* everything ok */
518 case 0x0002:
519 CIO_CRW_EVENT(2,
520 "chsc_process_crw: invalid command!\n");
521 return 0;
522 case 0x0003:
523 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
524 "request block!\n");
525 return 0;
526 case 0x0005:
527 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
528 "information stored\n");
529 return 0;
530 default:
531 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
532 sei_area->response.code); 588 sei_area->response.code);
533 return 0; 589 ret = 0;
534 }
535
536 /* Check if we might have lost some information. */
537 if (sei_area->flags & 0x40)
538 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
539 "has been lost due to overflow!\n");
540
541 if (sei_area->rs != 4) {
542 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
543 "(%04X) isn't a chpid!\n",
544 sei_area->rsid);
545 continue;
546 }
547
548 /* which kind of information was stored? */
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 CIO_CRW_EVENT(4, "chsc_process_crw: "
552 "channel subsystem reports link incident,"
553 " reporting source is chpid %x\n",
554 sei_area->rsid);
555 chpid = __get_chpid_from_lir(sei_area->ccdf);
556 if (chpid < 0)
557 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
558 __FUNCTION__);
559 else
560 s390_set_chpid_offline(chpid);
561 break;
562
563 case 2: /* i/o resource accessibiliy */
564 CIO_CRW_EVENT(4, "chsc_process_crw: "
565 "channel subsystem reports some I/O "
566 "devices may have become accessible\n");
567 pr_debug("Data received after sei: \n");
568 pr_debug("Validity flags: %x\n", sei_area->vf);
569
570 /* allocate a new channel path structure, if needed */
571 status = get_chp_status(sei_area->rsid);
572 if (status < 0)
573 new_channel_path(sei_area->rsid);
574 else if (!status)
575 break;
576 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
577 res_data.chp = to_channelpath(dev);
578 pr_debug("chpid: %x", sei_area->rsid);
579 if ((sei_area->vf & 0xc0) != 0) {
580 res_data.fla = sei_area->fla;
581 if ((sei_area->vf & 0xc0) == 0xc0) {
582 pr_debug(" full link addr: %x",
583 sei_area->fla);
584 res_data.fla_mask = 0xffff;
585 } else {
586 pr_debug(" link addr: %x",
587 sei_area->fla);
588 res_data.fla_mask = 0xff00;
589 }
590 }
591 ret = s390_process_res_acc(&res_data);
592 pr_debug("\n\n");
593 put_device(dev);
594 break;
595
596 default: /* other stuff */
597 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
598 sei_area->cc);
599 break; 590 break;
600 } 591 }
601 } while (sei_area->flags & 0x80); 592 } while (sei_area->flags & 0x80);
593
602 return ret; 594 return ret;
603} 595}
604 596
605static inline int 597static int
606__chp_add_new_sch(struct subchannel_id schid) 598__chp_add_new_sch(struct subchannel_id schid)
607{ 599{
608 struct schib schib; 600 struct schib schib;
609 int ret; 601 int ret;
610 602
611 if (stsch(schid, &schib)) 603 if (stsch_err(schid, &schib))
612 /* We're through */ 604 /* We're through */
613 return need_rescan ? -EAGAIN : -ENXIO; 605 return need_rescan ? -EAGAIN : -ENXIO;
614 606
@@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on)
709 return chp_add(chpid); 701 return chp_add(chpid);
710} 702}
711 703
712static inline int check_for_io_on_path(struct subchannel *sch, int index) 704static int check_for_io_on_path(struct subchannel *sch, int index)
713{ 705{
714 int cc; 706 int cc;
715 707
@@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch)
741 sch->driver->termination(&sch->dev); 733 sch->driver->termination(&sch->dev);
742} 734}
743 735
744static inline void 736static void
745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746{ 738{
747 int chp, old_lpm; 739 int chp, old_lpm;
@@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = {
967static void 959static void
968chsc_remove_chp_cmg_attr(struct channel_path *chp) 960chsc_remove_chp_cmg_attr(struct channel_path *chp)
969{ 961{
970 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
971 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
972} 964}
973 965
974static int 966static int
@@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp)
976{ 968{
977 int ret; 969 int ret;
978 970
979 ret = sysfs_create_bin_file(&chp->dev.kobj, 971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
980 &chp_measurement_chars_attr);
981 if (ret) 972 if (ret)
982 return ret; 973 return ret;
983 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
984 if (ret) 975 if (ret)
985 sysfs_remove_bin_file(&chp->dev.kobj, 976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
986 &chp_measurement_chars_attr);
987 return ret; 977 return ret;
988} 978}
989 979
@@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1042 u32 : 4; 1032 u32 : 4;
1043 u32 fmt : 4; 1033 u32 fmt : 4;
1044 u32 : 16; 1034 u32 : 16;
1045 } *secm_area; 1035 } __attribute__ ((packed)) *secm_area;
1046 int ret, ccode; 1036 int ret, ccode;
1047 1037
1048 secm_area = page; 1038 secm_area = page;
@@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid,
1253 struct chsc_header response; 1243 struct chsc_header response;
1254 u32 zeroes2; 1244 u32 zeroes2;
1255 struct channel_path_desc desc; 1245 struct channel_path_desc desc;
1256 } *scpd_area; 1246 } __attribute__ ((packed)) *scpd_area;
1257 1247
1258 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1248 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1259 if (!scpd_area) 1249 if (!scpd_area)
@@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1350 u32 cmg : 8; 1340 u32 cmg : 8;
1351 u32 zeroes3; 1341 u32 zeroes3;
1352 u32 data[NR_MEASUREMENT_CHARS]; 1342 u32 data[NR_MEASUREMENT_CHARS];
1353 } *scmc_area; 1343 } __attribute__ ((packed)) *scmc_area;
1354 1344
1355 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1345 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1356 if (!scmc_area) 1346 if (!scmc_area)
@@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code)
1517 u32 reserved5:4; 1507 u32 reserved5:4;
1518 u32 format2:4; 1508 u32 format2:4;
1519 u32 reserved6:24; 1509 u32 reserved6:24;
1520 } *sda_area; 1510 } __attribute__ ((packed)) *sda_area;
1521 1511
1522 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1512 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1523 if (!sda_area) 1513 if (!sda_area)
@@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void)
1569 u32 reserved4; 1559 u32 reserved4;
1570 u32 general_char[510]; 1560 u32 general_char[510];
1571 u32 chsc_char[518]; 1561 u32 chsc_char[518];
1572 } *scsc_area; 1562 } __attribute__ ((packed)) *scsc_area;
1573 1563
1574 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1564 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1575 if (!scsc_area) { 1565 if (!scsc_area) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index a259245780ae..0fb2b024208f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -10,17 +10,17 @@
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
13}; 13} __attribute__ ((packed));
14 14
15#define NR_MEASUREMENT_CHARS 5 15#define NR_MEASUREMENT_CHARS 5
16struct cmg_chars { 16struct cmg_chars {
17 u32 values[NR_MEASUREMENT_CHARS]; 17 u32 values[NR_MEASUREMENT_CHARS];
18}; 18} __attribute__ ((packed));
19 19
20#define NR_MEASUREMENT_ENTRIES 8 20#define NR_MEASUREMENT_ENTRIES 8
21struct cmg_entry { 21struct cmg_entry {
22 u32 values[NR_MEASUREMENT_ENTRIES]; 22 u32 values[NR_MEASUREMENT_ENTRIES];
23}; 23} __attribute__ ((packed));
24 24
25struct channel_path_desc { 25struct channel_path_desc {
26 u8 flags; 26 u8 flags;
@@ -31,7 +31,7 @@ struct channel_path_desc {
31 u8 zeroes; 31 u8 zeroes;
32 u8 chla; 32 u8 chla;
33 u8 chpp; 33 u8 chpp;
34}; 34} __attribute__ ((packed));
35 35
36struct channel_path { 36struct channel_path {
37 int id; 37 int id;
@@ -47,6 +47,9 @@ struct channel_path {
47extern void s390_process_css( void ); 47extern void s390_process_css( void );
48extern void chsc_validate_chpids(struct subchannel *); 48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int); 49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
50 53
51struct css_general_char { 54struct css_general_char {
52 u64 : 41; 55 u64 : 41;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ae1bf231d089..b3a56dc5f68a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch)
122 * Use tpi to get a pending interrupt, call the interrupt handler and 122 * Use tpi to get a pending interrupt, call the interrupt handler and
123 * return a pointer to the subchannel structure. 123 * return a pointer to the subchannel structure.
124 */ 124 */
125static inline int 125static int
126cio_tpi(void) 126cio_tpi(void)
127{ 127{
128 struct tpi_info *tpi_info; 128 struct tpi_info *tpi_info;
@@ -152,7 +152,7 @@ cio_tpi(void)
152 return 1; 152 return 1;
153} 153}
154 154
155static inline int 155static int
156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
157{ 157{
158 char dbf_text[15]; 158 char dbf_text[15];
@@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
585 * This device must not be known to Linux. So we simply 585 * This device must not be known to Linux. So we simply
586 * say that there is no device and return ENODEV. 586 * say that there is no device and return ENODEV.
587 */ 587 */
588 CIO_MSG_EVENT(0, "Blacklisted device detected " 588 CIO_MSG_EVENT(4, "Blacklisted device detected "
589 "at devno %04X, subchannel set %x\n", 589 "at devno %04X, subchannel set %x\n",
590 sch->schib.pmcw.dev, sch->schid.ssid); 590 sch->schib.pmcw.dev, sch->schid.ssid);
591 err = -ENODEV; 591 err = -ENODEV;
@@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs)
646 * Make sure that the i/o interrupt did not "overtake" 646 * Make sure that the i/o interrupt did not "overtake"
647 * the last HZ timer interrupt. 647 * the last HZ timer interrupt.
648 */ 648 */
649 account_ticks(); 649 account_ticks(S390_lowcore.int_clock);
650 /* 650 /*
651 * Get interrupt information from lowcore 651 * Get interrupt information from lowcore
652 */ 652 */
@@ -832,7 +832,7 @@ cio_get_console_subchannel(void)
832} 832}
833 833
834#endif 834#endif
835static inline int 835static int
836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
837{ 837{
838 int retry, cc; 838 int retry, cc;
@@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
850 return -EBUSY; /* uhm... */ 850 return -EBUSY; /* uhm... */
851} 851}
852 852
853static inline int 853/* we can't use the normal udelay here, since it enables external interrupts */
854
855static void udelay_reset(unsigned long usecs)
856{
857 uint64_t start_cc, end_cc;
858
859 asm volatile ("STCK %0" : "=m" (start_cc));
860 do {
861 cpu_relax();
862 asm volatile ("STCK %0" : "=m" (end_cc));
863 } while (((end_cc - start_cc)/4096) < usecs);
864}
865
866static int
854__clear_subchannel_easy(struct subchannel_id schid) 867__clear_subchannel_easy(struct subchannel_id schid)
855{ 868{
856 int retry; 869 int retry;
@@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid)
865 if (schid_equal(&ti.schid, &schid)) 878 if (schid_equal(&ti.schid, &schid))
866 return 0; 879 return 0;
867 } 880 }
868 udelay(100); 881 udelay_reset(100);
869 } 882 }
870 return -EBUSY; 883 return -EBUSY;
871} 884}
@@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
882 int rc; 895 int rc;
883 896
884 pgm_check_occured = 0; 897 pgm_check_occured = 0;
885 s390_reset_pgm_handler = cio_reset_pgm_check_handler; 898 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
886 rc = stsch(schid, addr); 899 rc = stsch(schid, addr);
887 s390_reset_pgm_handler = NULL; 900 s390_base_pgm_handler_fn = NULL;
888 901
889 /* The program check handler could have changed pgm_check_occured */ 902 /* The program check handler could have changed pgm_check_occured. */
890 barrier(); 903 barrier();
891 904
892 if (pgm_check_occured) 905 if (pgm_check_occured)
@@ -944,7 +957,7 @@ static void css_reset(void)
944 /* Reset subchannels. */ 957 /* Reset subchannels. */
945 for_each_subchannel(__shutdown_subchannel_easy, NULL); 958 for_each_subchannel(__shutdown_subchannel_easy, NULL);
946 /* Reset channel paths. */ 959 /* Reset channel paths. */
947 s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; 960 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
948 /* Enable channel report machine checks. */ 961 /* Enable channel report machine checks. */
949 __ctl_set_bit(14, 28); 962 __ctl_set_bit(14, 28);
950 /* Temporarily reenable machine checks. */ 963 /* Temporarily reenable machine checks. */
@@ -969,7 +982,7 @@ static void css_reset(void)
969 local_mcck_disable(); 982 local_mcck_disable();
970 /* Disable channel report machine checks. */ 983 /* Disable channel report machine checks. */
971 __ctl_clear_bit(14, 28); 984 __ctl_clear_bit(14, 28);
972 s390_reset_mcck_handler = NULL; 985 s390_base_mcck_handler_fn = NULL;
973} 986}
974 987
975static struct reset_call css_reset_call = { 988static struct reset_call css_reset_call = {
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 828b2d334f0a..90b22faabbf7 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -519,8 +519,8 @@ struct cmb {
519/* insert a single device into the cmb_area list 519/* insert a single device into the cmb_area list
520 * called with cmb_area.lock held from alloc_cmb 520 * called with cmb_area.lock held from alloc_cmb
521 */ 521 */
522static inline int alloc_cmb_single (struct ccw_device *cdev, 522static int alloc_cmb_single(struct ccw_device *cdev,
523 struct cmb_data *cmb_data) 523 struct cmb_data *cmb_data)
524{ 524{
525 struct cmb *cmb; 525 struct cmb *cmb;
526 struct ccw_device_private *node; 526 struct ccw_device_private *node;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9d6c02446863..fe0ace7aece8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1];
30 30
31int css_characteristics_avail = 0; 31int css_characteristics_avail = 0;
32 32
33inline int 33int
34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35{ 35{
36 struct subchannel_id schid; 36 struct subchannel_id schid;
@@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev)
108 } 108 }
109} 109}
110 110
111extern int css_get_ssd_info(struct subchannel *sch);
112
113
114int css_sch_device_register(struct subchannel *sch) 111int css_sch_device_register(struct subchannel *sch)
115{ 112{
116 int ret; 113 int ret;
@@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
187 return dev ? to_subchannel(dev) : NULL; 184 return dev ? to_subchannel(dev) : NULL;
188} 185}
189 186
190static inline int css_get_subchannel_status(struct subchannel *sch) 187static int css_get_subchannel_status(struct subchannel *sch)
191{ 188{
192 struct schib schib; 189 struct schib schib;
193 190
@@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
299 /* Will be done on the slow path. */ 296 /* Will be done on the slow path. */
300 return -EAGAIN; 297 return -EAGAIN;
301 } 298 }
302 if (stsch(schid, &schib) || !schib.pmcw.dnv) { 299 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) {
303 /* Unusable - ignore. */ 300 /* Unusable - ignore. */
304 return 0; 301 return 0;
305 } 302 }
@@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused)
417 need_reprobe); 414 need_reprobe);
418} 415}
419 416
420DECLARE_WORK(css_reprobe_work, reprobe_all); 417static DECLARE_WORK(css_reprobe_work, reprobe_all);
421 418
422/* Schedule reprobing of all unregistered subchannels. */ 419/* Schedule reprobing of all unregistered subchannels. */
423void css_schedule_reprobe(void) 420void css_schedule_reprobe(void)
@@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
578 575
579static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 576static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
580 577
581static inline int __init setup_css(int nr) 578static int __init setup_css(int nr)
582{ 579{
583 u32 tod_high; 580 u32 tod_high;
584 int ret; 581 int ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 3464c5b875c4..ca2bab932a8a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 144extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int);
147extern void css_reiterate_subchannels(void);
146 148
147#define __MAX_SUBCHANNEL 65535 149#define __MAX_SUBCHANNEL 65535
148#define __MAX_SSID 3 150#define __MAX_SSID 3
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 803579053c2f..e322111fb369 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -138,7 +138,6 @@ struct bus_type ccw_bus_type;
138 138
139static int io_subchannel_probe (struct subchannel *); 139static int io_subchannel_probe (struct subchannel *);
140static int io_subchannel_remove (struct subchannel *); 140static int io_subchannel_remove (struct subchannel *);
141void io_subchannel_irq (struct device *);
142static int io_subchannel_notify(struct device *, int); 141static int io_subchannel_notify(struct device *, int);
143static void io_subchannel_verify(struct device *); 142static void io_subchannel_verify(struct device *);
144static void io_subchannel_ioterm(struct device *); 143static void io_subchannel_ioterm(struct device *);
@@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
235 ssize_t ret = 0; 234 ssize_t ret = 0;
236 int chp; 235 int chp;
237 236
238 if (ssd) 237 for (chp = 0; chp < 8; chp++)
239 for (chp = 0; chp < 8; chp++) 238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
240 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
241 else
242 ret += sprintf (buf, "n/a");
243 ret += sprintf (buf+ret, "\n"); 239 ret += sprintf (buf+ret, "\n");
244 return min((ssize_t)PAGE_SIZE, ret); 240 return min((ssize_t)PAGE_SIZE, ret);
245} 241}
@@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = {
552 .attrs = ccwdev_attrs, 548 .attrs = ccwdev_attrs,
553}; 549};
554 550
555static inline int 551static int
556device_add_files (struct device *dev) 552device_add_files (struct device *dev)
557{ 553{
558 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
559} 555}
560 556
561static inline void 557static void
562device_remove_files(struct device *dev) 558device_remove_files(struct device *dev)
563{ 559{
564 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); 560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 29db6341d632..b66338b76579 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work;
74extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
75extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
76 76
77void io_subchannel_irq (struct device *pdev);
77void io_subchannel_recog_done(struct ccw_device *cdev); 78void io_subchannel_recog_done(struct ccw_device *cdev);
78 79
79int ccw_device_cancel_halt_clear(struct ccw_device *); 80int ccw_device_cancel_halt_clear(struct ccw_device *);
@@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *);
118/* qdio needs this. */ 119/* qdio needs this. */
119void ccw_device_set_timeout(struct ccw_device *, int); 120void ccw_device_set_timeout(struct ccw_device *, int);
120extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 121extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
122extern struct bus_type ccw_bus_type;
121 123
122/* Channel measurement facility related */ 124/* Channel measurement facility related */
123void retry_set_schib(struct ccw_device *cdev); 125void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index eed14572fc3b..51238e7555bb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
206 * been varied online on the SE so we have to find out by magic (i. e. driving 206 * been varied online on the SE so we have to find out by magic (i. e. driving
207 * the channel subsystem to device selection and updating our path masks). 207 * the channel subsystem to device selection and updating our path masks).
208 */ 208 */
209static inline void 209static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 210__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 211{
212 int mask, i; 212 int mask, i;
@@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
387 put_device (&cdev->dev); 387 put_device (&cdev->dev);
388} 388}
389 389
390static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) 390static int cmp_pgid(struct pgid *p1, struct pgid *p2)
391{ 391{
392 char *c1; 392 char *c1;
393 char *c2; 393 char *c2;
@@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
842call_handler_unsol: 842call_handler_unsol:
843 if (cdev->handler) 843 if (cdev->handler)
844 cdev->handler (cdev, 0, irb); 844 cdev->handler (cdev, 0, irb);
845 if (cdev->private->flags.doverify)
846 ccw_device_online_verify(cdev, 0);
845 return; 847 return;
846 } 848 }
847 /* Accumulate status and find out if a basic sense is needed. */ 849 /* Accumulate status and find out if a basic sense is needed. */
@@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
892/* 894/*
893 * Got an interrupt for a basic sense. 895 * Got an interrupt for a basic sense.
894 */ 896 */
895void 897static void
896ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 898ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
897{ 899{
898 struct irb *irb; 900 struct irb *irb;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index d269607336ec..d7b25b8f71d2 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
302 wake_up(&cdev->private->wait_q); 302 wake_up(&cdev->private->wait_q);
303} 303}
304 304
305static inline int 305static int
306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) 306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
307{ 307{
308 int ret; 308 int ret;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index bdcf930f7beb..6b1caea622ea 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -25,7 +25,7 @@
25 * Check for any kind of channel or interface control check but don't 25 * Check for any kind of channel or interface control check but don't
26 * issue the message for the console device 26 * issue the message for the console device
27 */ 27 */
28static inline void 28static void
29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
30{ 30{
31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
@@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
72/* 72/*
73 * Copy valid bits from the extended control word to device irb. 73 * Copy valid bits from the extended control word to device irb.
74 */ 74 */
75static inline void 75static void
76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) 76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
77{ 77{
78 /* 78 /*
@@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
94/* 94/*
95 * Check if extended status word is valid. 95 * Check if extended status word is valid.
96 */ 96 */
97static inline int 97static int
98ccw_device_accumulate_esw_valid(struct irb *irb) 98ccw_device_accumulate_esw_valid(struct irb *irb)
99{ 99{
100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
@@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb)
109/* 109/*
110 * Copy valid bits from the extended status word to device irb. 110 * Copy valid bits from the extended status word to device irb.
111 */ 111 */
112static inline void 112static void
113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) 113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
114{ 114{
115 struct irb *cdev_irb; 115 struct irb *cdev_irb;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 6fd1940842eb..d726cd5777de 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -66,7 +66,6 @@ MODULE_LICENSE("GPL");
66/******************** HERE WE GO ***********************************/ 66/******************** HERE WE GO ***********************************/
67 67
68static const char version[] = "QDIO base support version 2"; 68static const char version[] = "QDIO base support version 2";
69extern struct bus_type ccw_bus_type;
70 69
71static int qdio_performance_stats = 0; 70static int qdio_performance_stats = 0;
72static int proc_perf_file_registration; 71static int proc_perf_file_registration;
@@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q)
138} 137}
139 138
140/*check ccq */ 139/*check ccq */
141static inline int 140static int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 141qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{ 142{
144 char dbf_text[15]; 143 char dbf_text[15];
@@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
153 return -EIO; 152 return -EIO;
154} 153}
155/* EQBS: extract buffer states */ 154/* EQBS: extract buffer states */
156static inline int 155static int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 156qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt) 157 unsigned int *start, unsigned int *cnt)
159{ 158{
@@ -188,7 +187,7 @@ again:
188} 187}
189 188
190/* SQBS: set buffer states */ 189/* SQBS: set buffer states */
191static inline int 190static int
192qdio_do_sqbs(struct qdio_q *q, unsigned char state, 191qdio_do_sqbs(struct qdio_q *q, unsigned char state,
193 unsigned int *start, unsigned int *cnt) 192 unsigned int *start, unsigned int *cnt)
194{ 193{
@@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 314 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
316 * an access exception 315 * an access exception
317 */ 316 */
318static inline int 317static int
319qdio_siga_output(struct qdio_q *q) 318qdio_siga_output(struct qdio_q *q)
320{ 319{
321 int cc; 320 int cc;
@@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q)
349 return cc; 348 return cc;
350} 349}
351 350
352static inline int 351static int
353qdio_siga_input(struct qdio_q *q) 352qdio_siga_input(struct qdio_q *q)
354{ 353{
355 int cc; 354 int cc;
@@ -421,7 +420,7 @@ tiqdio_sched_tl(void)
421 tasklet_hi_schedule(&tiqdio_tasklet); 420 tasklet_hi_schedule(&tiqdio_tasklet);
422} 421}
423 422
424static inline void 423static void
425qdio_mark_tiq(struct qdio_q *q) 424qdio_mark_tiq(struct qdio_q *q)
426{ 425{
427 unsigned long flags; 426 unsigned long flags;
@@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q)
471 tasklet_schedule(&q->tasklet); 470 tasklet_schedule(&q->tasklet);
472} 471}
473 472
474static inline int 473static int
475qdio_stop_polling(struct qdio_q *q) 474qdio_stop_polling(struct qdio_q *q)
476{ 475{
477#ifdef QDIO_USE_PROCESSING_STATE 476#ifdef QDIO_USE_PROCESSING_STATE
@@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q)
525 * sophisticated locking outside of unmark_q, so that we don't need to 524 * sophisticated locking outside of unmark_q, so that we don't need to
526 * disable the interrupts :-) 525 * disable the interrupts :-)
527*/ 526*/
528static inline void 527static void
529qdio_unmark_q(struct qdio_q *q) 528qdio_unmark_q(struct qdio_q *q)
530{ 529{
531 unsigned long flags; 530 unsigned long flags;
@@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
691 return q->first_to_check; 690 return q->first_to_check;
692} 691}
693 692
694static inline int 693static int
695qdio_get_outbound_buffer_frontier(struct qdio_q *q) 694qdio_get_outbound_buffer_frontier(struct qdio_q *q)
696{ 695{
697 struct qdio_irq *irq; 696 struct qdio_irq *irq;
@@ -774,7 +773,7 @@ out:
774} 773}
775 774
776/* all buffers are processed */ 775/* all buffers are processed */
777static inline int 776static int
778qdio_is_outbound_q_done(struct qdio_q *q) 777qdio_is_outbound_q_done(struct qdio_q *q)
779{ 778{
780 int no_used; 779 int no_used;
@@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q)
796 return (no_used==0); 795 return (no_used==0);
797} 796}
798 797
799static inline int 798static int
800qdio_has_outbound_q_moved(struct qdio_q *q) 799qdio_has_outbound_q_moved(struct qdio_q *q)
801{ 800{
802 int i; 801 int i;
@@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q)
816 } 815 }
817} 816}
818 817
819static inline void 818static void
820qdio_kick_outbound_q(struct qdio_q *q) 819qdio_kick_outbound_q(struct qdio_q *q)
821{ 820{
822 int result; 821 int result;
@@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q)
905 } 904 }
906} 905}
907 906
908static inline void 907static void
909qdio_kick_outbound_handler(struct qdio_q *q) 908qdio_kick_outbound_handler(struct qdio_q *q)
910{ 909{
911 int start, end, real_end, count; 910 int start, end, real_end, count;
@@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q)
942 q->error_status_flags=0; 941 q->error_status_flags=0;
943} 942}
944 943
945static inline void 944static void
946__qdio_outbound_processing(struct qdio_q *q) 945__qdio_outbound_processing(struct qdio_q *q)
947{ 946{
948 int siga_attempts; 947 int siga_attempts;
@@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q)
1002/************************* INBOUND ROUTINES *******************************/ 1001/************************* INBOUND ROUTINES *******************************/
1003 1002
1004 1003
1005static inline int 1004static int
1006qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1005qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1007{ 1006{
1008 struct qdio_irq *irq; 1007 struct qdio_irq *irq;
@@ -1133,7 +1132,7 @@ out:
1133 return q->first_to_check; 1132 return q->first_to_check;
1134} 1133}
1135 1134
1136static inline int 1135static int
1137qdio_has_inbound_q_moved(struct qdio_q *q) 1136qdio_has_inbound_q_moved(struct qdio_q *q)
1138{ 1137{
1139 int i; 1138 int i;
@@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
1167} 1166}
1168 1167
1169/* means, no more buffers to be filled */ 1168/* means, no more buffers to be filled */
1170static inline int 1169static int
1171tiqdio_is_inbound_q_done(struct qdio_q *q) 1170tiqdio_is_inbound_q_done(struct qdio_q *q)
1172{ 1171{
1173 int no_used; 1172 int no_used;
@@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1228 return 0; 1227 return 0;
1229} 1228}
1230 1229
1231static inline int 1230static int
1232qdio_is_inbound_q_done(struct qdio_q *q) 1231qdio_is_inbound_q_done(struct qdio_q *q)
1233{ 1232{
1234 int no_used; 1233 int no_used;
@@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q)
1296 } 1295 }
1297} 1296}
1298 1297
1299static inline void 1298static void
1300qdio_kick_inbound_handler(struct qdio_q *q) 1299qdio_kick_inbound_handler(struct qdio_q *q)
1301{ 1300{
1302 int count, start, end, real_end, i; 1301 int count, start, end, real_end, i;
@@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q)
1343 } 1342 }
1344} 1343}
1345 1344
1346static inline void 1345static void
1347__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) 1346__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1348{ 1347{
1349 struct qdio_irq *irq_ptr; 1348 struct qdio_irq *irq_ptr;
@@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q)
1442 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); 1441 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1443} 1442}
1444 1443
1445static inline void 1444static void
1446__qdio_inbound_processing(struct qdio_q *q) 1445__qdio_inbound_processing(struct qdio_q *q)
1447{ 1446{
1448 int q_laps=0; 1447 int q_laps=0;
@@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q)
1493/************************* MAIN ROUTINES *******************************/ 1492/************************* MAIN ROUTINES *******************************/
1494 1493
1495#ifdef QDIO_USE_PROCESSING_STATE 1494#ifdef QDIO_USE_PROCESSING_STATE
1496static inline int 1495static int
1497tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) 1496tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1498{ 1497{
1499 if (!q) { 1498 if (!q) {
@@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1545} 1544}
1546#endif /* QDIO_USE_PROCESSING_STATE */ 1545#endif /* QDIO_USE_PROCESSING_STATE */
1547 1546
1548static inline void 1547static void
1549tiqdio_inbound_checks(void) 1548tiqdio_inbound_checks(void)
1550{ 1549{
1551 struct qdio_q *q; 1550 struct qdio_q *q;
@@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1949 mb(); 1948 mb();
1950} 1949}
1951 1950
1952static inline void 1951static void
1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) 1952qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1954{ 1953{
1955 char dbf_text[15]; 1954 char dbf_text[15];
@@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1966 1965
1967} 1966}
1968 1967
1969static inline void 1968static void
1970qdio_handle_pci(struct qdio_irq *irq_ptr) 1969qdio_handle_pci(struct qdio_irq *irq_ptr)
1971{ 1970{
1972 int i; 1971 int i;
@@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
2002 2001
2003static void qdio_establish_handle_irq(struct ccw_device*, int, int); 2002static void qdio_establish_handle_irq(struct ccw_device*, int, int);
2004 2003
2005static inline void 2004static void
2006qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, 2005qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
2007 int cstat, int dstat) 2006 int cstat, int dstat)
2008{ 2007{
@@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2229 return cc; 2228 return cc;
2230} 2229}
2231 2230
2232static inline void 2231static void
2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, 2232qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token) 2233 unsigned long token)
2235{ 2234{
@@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev)
2740 return 0; 2739 return 0;
2741} 2740}
2742 2741
2743static inline void 2742static void
2744qdio_allocate_do_dbf(struct qdio_initialize *init_data) 2743qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2745{ 2744{
2746 char dbf_text[20]; /* if a printf printed out more than 8 chars */ 2745 char dbf_text[20]; /* if a printf printed out more than 8 chars */
@@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2773 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); 2772 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2774} 2773}
2775 2774
2776static inline void 2775static void
2777qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) 2776qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2778{ 2777{
2779 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; 2778 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
@@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2792 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; 2791 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2793} 2792}
2794 2793
2795static inline void 2794static void
2796qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, 2795qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2797 int j, int iqfmt) 2796 int j, int iqfmt)
2798{ 2797{
@@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2813} 2812}
2814 2813
2815 2814
2816static inline void 2815static void
2817qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) 2816qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2818{ 2817{
2819 int i; 2818 int i;
@@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2839 } 2838 }
2840} 2839}
2841 2840
2842static inline void 2841static void
2843qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) 2842qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2844{ 2843{
2845 int i; 2844 int i;
@@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2865 } 2864 }
2866} 2865}
2867 2866
2868static inline int 2867static int
2869qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, 2868qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2870 int dstat) 2869 int dstat)
2871{ 2870{
@@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data)
3014 return 0; 3013 return 0;
3015} 3014}
3016 3015
3017int qdio_fill_irq(struct qdio_initialize *init_data) 3016static int qdio_fill_irq(struct qdio_initialize *init_data)
3018{ 3017{
3019 int i; 3018 int i;
3020 char dbf_text[15]; 3019 char dbf_text[15];
@@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
3367} 3366}
3368 3367
3369/* buffers filled forwards again to make Rick happy */ 3368/* buffers filled forwards again to make Rick happy */
3370static inline void 3369static void
3371qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3370qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3372 unsigned int count, struct qdio_buffer *buffers) 3371 unsigned int count, struct qdio_buffer *buffers)
3373{ 3372{
@@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3386 } 3385 }
3387} 3386}
3388 3387
3389static inline void 3388static void
3390qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3389qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3391 unsigned int count, struct qdio_buffer *buffers) 3390 unsigned int count, struct qdio_buffer *buffers)
3392{ 3391{
@@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3407 } 3406 }
3408} 3407}
3409 3408
3410static inline void 3409static void
3411do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, 3410do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3412 unsigned int qidx, unsigned int count, 3411 unsigned int qidx, unsigned int count,
3413 struct qdio_buffer *buffers) 3412 struct qdio_buffer *buffers)
@@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3443 qdio_mark_q(q); 3442 qdio_mark_q(q);
3444} 3443}
3445 3444
3446static inline void 3445static void
3447do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, 3446do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3448 unsigned int qidx, unsigned int count, 3447 unsigned int qidx, unsigned int count,
3449 struct qdio_buffer *buffers) 3448 struct qdio_buffer *buffers)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 81b5899f4010..c7d1355237b6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev)
465 * Flush all requests from the request/pending queue of an AP device. 465 * Flush all requests from the request/pending queue of an AP device.
466 * @ap_dev: pointer to the AP device. 466 * @ap_dev: pointer to the AP device.
467 */ 467 */
468static inline void __ap_flush_queue(struct ap_device *ap_dev) 468static void __ap_flush_queue(struct ap_device *ap_dev)
469{ 469{
470 struct ap_message *ap_msg, *next; 470 struct ap_message *ap_msg, *next;
471 471
@@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
587/** 587/**
588 * Pick one of the 16 ap domains. 588 * Pick one of the 16 ap domains.
589 */ 589 */
590static inline int ap_select_domain(void) 590static int ap_select_domain(void)
591{ 591{
592 int queue_depth, device_type, count, max_count, best_domain; 592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j; 593 int rc, i, j;
@@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void)
825 * required, bit 2^1 is set if the poll timer needs to get armed 825 * required, bit 2^1 is set if the poll timer needs to get armed
826 * Returns 0 if the device is still present, -ENODEV if not. 826 * Returns 0 if the device is still present, -ENODEV if not.
827 */ 827 */
828static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 828static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
829{ 829{
830 struct ap_queue_status status; 830 struct ap_queue_status status;
831 struct ap_message *ap_msg; 831 struct ap_message *ap_msg;
@@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
872 * required, bit 2^1 is set if the poll timer needs to get armed 872 * required, bit 2^1 is set if the poll timer needs to get armed
873 * Returns 0 if the device is still present, -ENODEV if not. 873 * Returns 0 if the device is still present, -ENODEV if not.
874 */ 874 */
875static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 875static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
876{ 876{
877 struct ap_queue_status status; 877 struct ap_queue_status status;
878 struct ap_message *ap_msg; 878 struct ap_message *ap_msg;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 1edc10a7a6f2..b9e59bc9435a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
791 return rc; 791 return rc;
792} 792}
793 793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 794static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg) 795 unsigned long arg)
796{ 796{
797 if (cmd == ICARSAMODEXPO) 797 if (cmd == ICARSAMODEXPO)
@@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = {
833 */ 833 */
834static struct proc_dir_entry *zcrypt_entry; 834static struct proc_dir_entry *zcrypt_entry;
835 835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, 836static int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len) 837 unsigned int len)
838{ 838{
839 int hl, i; 839 int hl, i;
840 840
@@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
845 return hl; 845 return hl;
846} 846}
847 847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, 848static int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len) 849 unsigned int len)
850{ 850{
851 int hl, inl, c, cx; 851 int hl, inl, c, cx;
852 852
@@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
865 return hl; 865 return hl;
866} 866}
867 867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr, 868static int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len) 869 unsigned char *addr, unsigned int len)
870{ 870{
871 int hl, inl, r, rx; 871 int hl, inl, r, rx;
872 872
@@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
885 return hl; 885 return hl;
886} 886}
887 887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, 888static int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len) 889 unsigned int *array, unsigned int len)
890{ 890{
891 int hl, r; 891 int hl, r;
892 892
@@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
943 zcrypt_qdepth_mask(workarea); 943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts", 944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES); 945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea); 946 zcrypt_perdev_reqcnt((int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts", 947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1; 949 *eof = 1;
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 32e37014345c..818ffe05ac00 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
191 * 191 *
192 * Returns 0 on success or -EFAULT. 192 * Returns 0 on success or -EFAULT.
193 */ 193 */
194static inline int convert_type84(struct zcrypt_device *zdev, 194static int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply, 195 struct ap_message *reply,
196 char __user *outputdata, 196 char __user *outputdata,
197 unsigned int outputdatalength) 197 unsigned int outputdatalength)
198{ 198{
199 struct type84_hdr *t84h = reply->message; 199 struct type84_hdr *t84h = reply->message;
200 char *data; 200 char *data;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index b7153c1e15cd..252443b6bd1b 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -709,7 +709,8 @@ out_free:
709 * PCIXCC/CEX2C device to the request distributor 709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer 710 * @xcRB: pointer to the send_cprb request buffer
711 */ 711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) 712static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
713 struct ica_xcRB *xcRB)
713{ 714{
714 struct ap_message ap_msg; 715 struct ap_message ap_msg;
715 struct response_type resp_type = { 716 struct response_type resp_type = {
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 95f4e105cb96..7809a79feec7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -121,7 +121,7 @@ MODULE_LICENSE("GPL");
121#define DEBUG 121#define DEBUG
122#endif 122#endif
123 123
124 char debug_buffer[255]; 124static char debug_buffer[255];
125/** 125/**
126 * Debug Facility Stuff 126 * Debug Facility Stuff
127 */ 127 */
@@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch );
223/* Functions */ 223/* Functions */
224static int add_claw_reads(struct net_device *dev, 224static int add_claw_reads(struct net_device *dev,
225 struct ccwbk* p_first, struct ccwbk* p_last); 225 struct ccwbk* p_first, struct ccwbk* p_last);
226static void inline ccw_check_return_code (struct ccw_device *cdev, 226static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
227 int return_code); 227static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
228static void inline ccw_check_unit_check (struct chbk * p_ch,
229 unsigned char sense );
230static int find_link(struct net_device *dev, char *host_name, char *ws_name ); 228static int find_link(struct net_device *dev, char *host_name, char *ws_name );
231static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); 229static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
232static int init_ccw_bk(struct net_device *dev); 230static int init_ccw_bk(struct net_device *dev);
233static void probe_error( struct ccwgroup_device *cgdev); 231static void probe_error( struct ccwgroup_device *cgdev);
234static struct net_device_stats *claw_stats(struct net_device *dev); 232static struct net_device_stats *claw_stats(struct net_device *dev);
235static int inline pages_to_order_of_mag(int num_of_pages); 233static int pages_to_order_of_mag(int num_of_pages);
236static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); 234static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
237#ifdef DEBUG 235#ifdef DEBUG
238static void dumpit (char *buf, int len); 236static void dumpit (char *buf, int len);
@@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch )
1310* of magnitude get_free_pages() has an upper order of 9 * 1308* of magnitude get_free_pages() has an upper order of 9 *
1311*--------------------------------------------------------------------*/ 1309*--------------------------------------------------------------------*/
1312 1310
1313static int inline 1311static int
1314pages_to_order_of_mag(int num_of_pages) 1312pages_to_order_of_mag(int num_of_pages)
1315{ 1313{
1316 int order_of_mag=1; /* assume 2 pages */ 1314 int order_of_mag=1; /* assume 2 pages */
@@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1482 * * 1480 * *
1483 *-------------------------------------------------------------------*/ 1481 *-------------------------------------------------------------------*/
1484 1482
1485static void inline 1483static void
1486ccw_check_return_code(struct ccw_device *cdev, int return_code) 1484ccw_check_return_code(struct ccw_device *cdev, int return_code)
1487{ 1485{
1488#ifdef FUNCTRACE 1486#ifdef FUNCTRACE
@@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1529* ccw_check_unit_check * 1527* ccw_check_unit_check *
1530*--------------------------------------------------------------------*/ 1528*--------------------------------------------------------------------*/
1531 1529
1532static void inline 1530static void
1533ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) 1531ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1534{ 1532{
1535 struct net_device *dev = p_ch->ndev; 1533 struct net_device *dev = p_ch->ndev;
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 03cc263fe0da..5a84fbbc6611 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset)
369 * @param ch The channel where this skb has been received. 369 * @param ch The channel where this skb has been received.
370 * @param pskb The received skb. 370 * @param pskb The received skb.
371 */ 371 */
372static __inline__ void 372static void
373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) 373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374{ 374{
375 struct net_device *dev = ch->netdev; 375 struct net_device *dev = ch->netdev;
@@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
512 * @param ch The channel, the error belongs to. 512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect. 513 * @param return_code The error code to inspect.
514 */ 514 */
515static void inline 515static void
516ccw_check_return_code(struct channel *ch, int return_code, char *msg) 516ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517{ 517{
518 DBF_TEXT(trace, 5, __FUNCTION__); 518 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg)
547 * @param ch The channel, the sense code belongs to. 547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect. 548 * @param sense The sense code to inspect.
549 */ 549 */
550static void inline 550static void
551ccw_unit_check(struct channel *ch, unsigned char sense) 551ccw_unit_check(struct channel *ch, unsigned char sense)
552{ 552{
553 DBF_TEXT(trace, 5, __FUNCTION__); 553 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q)
603 } 603 }
604} 604}
605 605
606static __inline__ int 606static int
607ctc_checkalloc_buffer(struct channel *ch, int warn) 607ctc_checkalloc_buffer(struct channel *ch, int warn)
608{ 608{
609 DBF_TEXT(trace, 5, __FUNCTION__); 609 DBF_TEXT(trace, 5, __FUNCTION__);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index e965f03a7291..76728ae4b843 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = {
57 57
58static struct ccw_driver cu3088_driver; 58static struct ccw_driver cu3088_driver;
59 59
60struct device *cu3088_root_dev; 60static struct device *cu3088_root_dev;
61 61
62static ssize_t 62static ssize_t
63group_write(struct device_driver *drv, const char *buf, size_t count) 63group_write(struct device_driver *drv, const char *buf, size_t count)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index e5665b6743a1..b97dd15bdb9a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
828/** 828/**
829 * Emit buffer of a lan comand. 829 * Emit buffer of a lan comand.
830 */ 830 */
831void 831static void
832lcs_lancmd_timeout(unsigned long data) 832lcs_lancmd_timeout(unsigned long data)
833{ 833{
834 struct lcs_reply *reply, *list_reply, *r; 834 struct lcs_reply *reply, *list_reply, *r;
@@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1360 return 0; 1360 return 0;
1361} 1361}
1362 1362
1363void 1363static void
1364lcs_schedule_recovery(struct lcs_card *card) 1364lcs_schedule_recovery(struct lcs_card *card)
1365{ 1365{
1366 LCS_DBF_TEXT(2, trace, "startrec"); 1366 LCS_DBF_TEXT(2, trace, "startrec");
@@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
1990 1990
1991} 1991}
1992 1992
1993DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); 1993static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
1994 1994
1995static ssize_t 1995static ssize_t
1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index d7d1cc0a5c8e..3346088f47e0 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2053,7 +2053,7 @@ out_free_ndev:
2053 return ret; 2053 return ret;
2054} 2054}
2055 2055
2056DRIVER_ATTR(connection, 0200, NULL, conn_write); 2056static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2057 2057
2058static ssize_t 2058static ssize_t
2059remove_write (struct device_driver *drv, const char *buf, size_t count) 2059remove_write (struct device_driver *drv, const char *buf, size_t count)
@@ -2112,7 +2112,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2112 return -EINVAL; 2112 return -EINVAL;
2113} 2113}
2114 2114
2115DRIVER_ATTR(remove, 0200, NULL, remove_write); 2115static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2116 2116
2117static void 2117static void
2118netiucv_banner(void) 2118netiucv_banner(void)
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 6bb558a9a032..7c735e1fe063 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
49 return buffers_needed; 49 return buffers_needed;
50} 50}
51 51
52static inline void 52static void
53qeth_eddp_free_context(struct qeth_eddp_context *ctx) 53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{ 54{
55 int i; 55 int i;
@@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
91 } 91 }
92} 92}
93 93
94static inline int 94static int
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96 struct qeth_eddp_context *ctx) 96 struct qeth_eddp_context *ctx)
97{ 97{
@@ -196,7 +196,7 @@ out:
196 return flush_cnt; 196 return flush_cnt;
197} 197}
198 198
199static inline void 199static void
200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201 struct qeth_eddp_data *eddp, int data_len) 201 struct qeth_eddp_data *eddp, int data_len)
202{ 202{
@@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
256 ctx->offset += eddp->thl; 256 ctx->offset += eddp->thl;
257} 257}
258 258
259static inline void 259static void
260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, 260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
261 __wsum *hcsum) 261 __wsum *hcsum)
262{ 262{
@@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
302 } 302 }
303} 303}
304 304
305static inline void 305static void
306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307 struct qeth_eddp_data *eddp, int data_len, 307 struct qeth_eddp_data *eddp, int data_len,
308 __wsum hcsum) 308 __wsum hcsum)
@@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
350} 350}
351 351
352static inline __wsum 352static __wsum
353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) 353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
354{ 354{
355 __wsum phcsum; /* pseudo header checksum */ 355 __wsum phcsum; /* pseudo header checksum */
@@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); 363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
364} 364}
365 365
366static inline __wsum 366static __wsum
367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) 367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
368{ 368{
369 __be32 proto; 369 __be32 proto;
@@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
381 return phcsum; 381 return phcsum;
382} 382}
383 383
384static inline struct qeth_eddp_data * 384static struct qeth_eddp_data *
385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) 385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
386{ 386{
387 struct qeth_eddp_data *eddp; 387 struct qeth_eddp_data *eddp;
@@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
399 return eddp; 399 return eddp;
400} 400}
401 401
402static inline void 402static void
403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404 struct qeth_eddp_data *eddp) 404 struct qeth_eddp_data *eddp)
405{ 405{
@@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
464 } 464 }
465} 465}
466 466
467static inline int 467static int
468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469 struct sk_buff *skb, struct qeth_hdr *qhdr) 469 struct sk_buff *skb, struct qeth_hdr *qhdr)
470{ 470{
@@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
505 return 0; 505 return 0;
506} 506}
507 507
508static inline void 508static void
509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, 509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
510 int hdr_len) 510 int hdr_len)
511{ 511{
@@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
529 (skb_shinfo(skb)->gso_segs + 1); 529 (skb_shinfo(skb)->gso_segs + 1);
530} 530}
531 531
532static inline struct qeth_eddp_context * 532static struct qeth_eddp_context *
533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, 533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
534 int hdr_len) 534 int hdr_len)
535{ 535{
@@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
581 return ctx; 581 return ctx;
582} 582}
583 583
584static inline struct qeth_eddp_context * 584static struct qeth_eddp_context *
585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, 585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
586 struct qeth_hdr *qhdr) 586 struct qeth_hdr *qhdr)
587{ 587{
@@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
625 } 625 }
626 return NULL; 626 return NULL;
627} 627}
628
629
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index d2efa5ff125d..2257e45594b3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
651 return 0; 651 return 0;
652} 652}
653 653
654static inline int 654static int
655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, 655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
656 int same_type) 656 int same_type)
657{ 657{
@@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
795 return rc; 795 return rc;
796} 796}
797 797
798static inline void 798static void
799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) 799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
800{ 800{
801 struct qeth_ipaddr *addr, *tmp; 801 struct qeth_ipaddr *addr, *tmp;
@@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *);
882static void qeth_add_multicast_ipv6(struct qeth_card *); 882static void qeth_add_multicast_ipv6(struct qeth_card *);
883#endif 883#endif
884 884
885static inline int 885static int
886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) 886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
887{ 887{
888 unsigned long flags; 888 unsigned long flags;
@@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
920 wake_up(&card->wait_q); 920 wake_up(&card->wait_q);
921} 921}
922 922
923static inline int 923static int
924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
925{ 925{
926 unsigned long flags; 926 unsigned long flags;
@@ -1764,9 +1764,9 @@ out:
1764 qeth_release_buffer(channel,iob); 1764 qeth_release_buffer(channel,iob);
1765} 1765}
1766 1766
1767static inline void 1767static void
1768qeth_prepare_control_data(struct qeth_card *card, int len, 1768qeth_prepare_control_data(struct qeth_card *card, int len,
1769struct qeth_cmd_buffer *iob) 1769 struct qeth_cmd_buffer *iob)
1770{ 1770{
1771 qeth_setup_ccw(&card->write,iob->data,len); 1771 qeth_setup_ccw(&card->write,iob->data,len);
1772 iob->callback = qeth_release_buffer; 1772 iob->callback = qeth_release_buffer;
@@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2160 return 0; 2160 return 0;
2161} 2161}
2162 2162
2163static inline struct sk_buff * 2163static struct sk_buff *
2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) 2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2165{ 2165{
2166 struct sk_buff* skb; 2166 struct sk_buff* skb;
@@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2179 return skb; 2179 return skb;
2180} 2180}
2181 2181
2182static inline struct sk_buff * 2182static struct sk_buff *
2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, 2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2184 struct qdio_buffer_element **__element, int *__offset, 2184 struct qdio_buffer_element **__element, int *__offset,
2185 struct qeth_hdr **hdr) 2185 struct qeth_hdr **hdr)
@@ -2264,7 +2264,7 @@ no_mem:
2264 return NULL; 2264 return NULL;
2265} 2265}
2266 2266
2267static inline __be16 2267static __be16
2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev) 2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2269{ 2269{
2270 struct qeth_card *card; 2270 struct qeth_card *card;
@@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2297 return htons(ETH_P_802_2); 2297 return htons(ETH_P_802_2);
2298} 2298}
2299 2299
2300static inline void 2300static void
2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, 2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2302 struct qeth_hdr *hdr) 2302 struct qeth_hdr *hdr)
2303{ 2303{
@@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2351 fake_llc->ethertype = ETH_P_IP; 2351 fake_llc->ethertype = ETH_P_IP;
2352} 2352}
2353 2353
2354static inline void 2354static void
2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, 2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2356 struct qeth_hdr *hdr) 2356 struct qeth_hdr *hdr)
2357{ 2357{
@@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2421} 2421}
2422 2422
2423static inline __u16 2423static __u16
2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2425 struct qeth_hdr *hdr) 2425 struct qeth_hdr *hdr)
2426{ 2426{
@@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2476 return vlan_id; 2476 return vlan_id;
2477} 2477}
2478 2478
2479static inline void 2479static void
2480qeth_process_inbound_buffer(struct qeth_card *card, 2480qeth_process_inbound_buffer(struct qeth_card *card,
2481 struct qeth_qdio_buffer *buf, int index) 2481 struct qeth_qdio_buffer *buf, int index)
2482{ 2482{
@@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2528 } 2528 }
2529} 2529}
2530 2530
2531static inline struct qeth_buffer_pool_entry * 2531static struct qeth_buffer_pool_entry *
2532qeth_get_buffer_pool_entry(struct qeth_card *card) 2532qeth_get_buffer_pool_entry(struct qeth_card *card)
2533{ 2533{
2534 struct qeth_buffer_pool_entry *entry; 2534 struct qeth_buffer_pool_entry *entry;
@@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card)
2543 return NULL; 2543 return NULL;
2544} 2544}
2545 2545
2546static inline void 2546static void
2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) 2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2548{ 2548{
2549 struct qeth_buffer_pool_entry *pool_entry; 2549 struct qeth_buffer_pool_entry *pool_entry;
@@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2570 buf->state = QETH_QDIO_BUF_EMPTY; 2570 buf->state = QETH_QDIO_BUF_EMPTY;
2571} 2571}
2572 2572
2573static inline void 2573static void
2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2575 struct qeth_qdio_out_buffer *buf) 2575 struct qeth_qdio_out_buffer *buf)
2576{ 2576{
@@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2596} 2596}
2597 2597
2598static inline void 2598static void
2599qeth_queue_input_buffer(struct qeth_card *card, int index) 2599qeth_queue_input_buffer(struct qeth_card *card, int index)
2600{ 2600{
2601 struct qeth_qdio_q *queue = card->qdio.in_q; 2601 struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2699 card->perf_stats.inbound_start_time; 2699 card->perf_stats.inbound_start_time;
2700} 2700}
2701 2701
2702static inline int 2702static int
2703qeth_handle_send_error(struct qeth_card *card, 2703qeth_handle_send_error(struct qeth_card *card,
2704 struct qeth_qdio_out_buffer *buffer, 2704 struct qeth_qdio_out_buffer *buffer,
2705 unsigned int qdio_err, unsigned int siga_err) 2705 unsigned int qdio_err, unsigned int siga_err)
@@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2821 * Switched to packing state if the number of used buffers on a queue 2821 * Switched to packing state if the number of used buffers on a queue
2822 * reaches a certain limit. 2822 * reaches a certain limit.
2823 */ 2823 */
2824static inline void 2824static void
2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2826{ 2826{
2827 if (!queue->do_pack) { 2827 if (!queue->do_pack) {
@@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2842 * In that case 1 is returned to inform the caller. If no buffer 2842 * In that case 1 is returned to inform the caller. If no buffer
2843 * has to be flushed, zero is returned. 2843 * has to be flushed, zero is returned.
2844 */ 2844 */
2845static inline int 2845static int
2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2847{ 2847{
2848 struct qeth_qdio_out_buffer *buffer; 2848 struct qeth_qdio_out_buffer *buffer;
@@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2877 * Checks if there is a packing buffer and prepares it to be flushed. 2877 * Checks if there is a packing buffer and prepares it to be flushed.
2878 * In that case returns 1, otherwise zero. 2878 * In that case returns 1, otherwise zero.
2879 */ 2879 */
2880static inline int 2880static int
2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) 2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2882{ 2882{
2883 struct qeth_qdio_out_buffer *buffer; 2883 struct qeth_qdio_out_buffer *buffer;
@@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2894 return 0; 2894 return 0;
2895} 2895}
2896 2896
2897static inline void 2897static void
2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2899{ 2899{
2900 int index; 2900 int index;
@@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3594 } 3594 }
3595} 3595}
3596 3596
3597static inline int 3597static int
3598qeth_send_packet(struct qeth_card *, struct sk_buff *); 3598qeth_send_packet(struct qeth_card *, struct sk_buff *);
3599 3599
3600static int 3600static int
@@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev)
3759 return 0; 3759 return 0;
3760} 3760}
3761 3761
3762static inline int 3762static int
3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) 3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3764{ 3764{
3765 int cast_type = RTN_UNSPEC; 3765 int cast_type = RTN_UNSPEC;
@@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3806 return cast_type; 3806 return cast_type;
3807} 3807}
3808 3808
3809static inline int 3809static int
3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, 3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3811 int ipv, int cast_type) 3811 int ipv, int cast_type)
3812{ 3812{
@@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb)
3853 } 3853 }
3854} 3854}
3855 3855
3856static inline struct qeth_hdr * 3856static struct qeth_hdr *
3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) 3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3858{ 3858{
3859#ifdef CONFIG_QETH_VLAN 3859#ifdef CONFIG_QETH_VLAN
@@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); 3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
3883} 3883}
3884 3884
3885static inline void 3885static void
3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) 3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
3887{ 3887{
3888 if (orig_skb != new_skb) 3888 if (orig_skb != new_skb)
3889 dev_kfree_skb_any(new_skb); 3889 dev_kfree_skb_any(new_skb);
3890} 3890}
3891 3891
3892static inline struct sk_buff * 3892static struct sk_buff *
3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3894 struct qeth_hdr **hdr, int ipv) 3894 struct qeth_hdr **hdr, int ipv)
3895{ 3895{
@@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type)
3940 return ct | QETH_CAST_UNICAST; 3940 return ct | QETH_CAST_UNICAST;
3941} 3941}
3942 3942
3943static inline void 3943static void
3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, 3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3945 struct sk_buff *skb) 3945 struct sk_buff *skb)
3946{ 3946{
@@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3977 } 3977 }
3978} 3978}
3979 3979
3980static inline void 3980static void
3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3982 struct sk_buff *skb, int cast_type) 3982 struct sk_buff *skb, int cast_type)
3983{ 3983{
@@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4068 } 4068 }
4069} 4069}
4070 4070
4071static inline void 4071static void
4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4073 int is_tso, int *next_element_to_fill) 4073 int is_tso, int *next_element_to_fill)
4074{ 4074{
@@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4112 *next_element_to_fill = element; 4112 *next_element_to_fill = element;
4113} 4113}
4114 4114
4115static inline int 4115static int
4116qeth_fill_buffer(struct qeth_qdio_out_q *queue, 4116qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4117 struct qeth_qdio_out_buffer *buf, 4117 struct qeth_qdio_out_buffer *buf,
4118 struct sk_buff *skb) 4118 struct sk_buff *skb)
@@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4171 return flush_cnt; 4171 return flush_cnt;
4172} 4172}
4173 4173
4174static inline int 4174static int
4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4176 struct sk_buff *skb, struct qeth_hdr *hdr, 4176 struct sk_buff *skb, struct qeth_hdr *hdr,
4177 int elements_needed, 4177 int elements_needed,
@@ -4222,7 +4222,7 @@ out:
4222 return -EBUSY; 4222 return -EBUSY;
4223} 4223}
4224 4224
4225static inline int 4225static int
4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4227 struct sk_buff *skb, struct qeth_hdr *hdr, 4227 struct sk_buff *skb, struct qeth_hdr *hdr,
4228 int elements_needed, struct qeth_eddp_context *ctx) 4228 int elements_needed, struct qeth_eddp_context *ctx)
@@ -4328,7 +4328,7 @@ out:
4328 return rc; 4328 return rc;
4329} 4329}
4330 4330
4331static inline int 4331static int
4332qeth_get_elements_no(struct qeth_card *card, void *hdr, 4332qeth_get_elements_no(struct qeth_card *card, void *hdr,
4333 struct sk_buff *skb, int elems) 4333 struct sk_buff *skb, int elems)
4334{ 4334{
@@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
4349} 4349}
4350 4350
4351 4351
4352static inline int 4352static int
4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) 4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4354{ 4354{
4355 int ipv = 0; 4355 int ipv = 0;
@@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4536} 4536}
4537 4537
4538 4538
4539static inline const char * 4539static const char *
4540qeth_arp_get_error_cause(int *rc) 4540qeth_arp_get_error_cause(int *rc)
4541{ 4541{
4542 switch (*rc) { 4542 switch (*rc) {
@@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4597 return rc; 4597 return rc;
4598} 4598}
4599 4599
4600static inline void 4600static void
4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4602 struct qeth_arp_query_data *qdata, 4602 struct qeth_arp_query_data *qdata,
4603 int entry_size, int uentry_size) 4603 int entry_size, int uentry_size)
@@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5214 spin_unlock_irqrestore(&card->vlanlock, flags); 5214 spin_unlock_irqrestore(&card->vlanlock, flags);
5215} 5215}
5216 5216
5217static inline void 5217static void
5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, 5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5219 unsigned short vid) 5219 unsigned short vid)
5220{ 5220{
@@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card)
5625 spin_unlock_irqrestore(&card->ip_lock, flags); 5625 spin_unlock_irqrestore(&card->ip_lock, flags);
5626} 5626}
5627 5627
5628static inline void 5628static void
5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) 5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5630{ 5630{
5631 struct qeth_ipaddr *ipm; 5631 struct qeth_ipaddr *ipm;
@@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card)
5711} 5711}
5712 5712
5713#ifdef CONFIG_QETH_IPV6 5713#ifdef CONFIG_QETH_IPV6
5714static inline void 5714static void
5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5716{ 5716{
5717 struct qeth_ipaddr *ipm; 5717 struct qeth_ipaddr *ipm;
@@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6022 6022
6023 return rc; 6023 return rc;
6024} 6024}
6025static inline void 6025static void
6026qeth_fill_netmask(u8 *netmask, unsigned int len) 6026qeth_fill_netmask(u8 *netmask, unsigned int len)
6027{ 6027{
6028 int i,j; 6028 int i,j;
@@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6626 return rc; 6626 return rc;
6627} 6627}
6628 6628
6629static inline int 6629static int
6630qeth_setadapter_hstr(struct qeth_card *card) 6630qeth_setadapter_hstr(struct qeth_card *card)
6631{ 6631{
6632 int rc; 6632 int rc;
@@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card,
6889 return rc; 6889 return rc;
6890} 6890}
6891 6891
6892static inline int 6892static int
6893qeth_start_ipa_arp_processing(struct qeth_card *card) 6893qeth_start_ipa_arp_processing(struct qeth_card *card)
6894{ 6894{
6895 int rc; 6895 int rc;
@@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7529 wake_up(&card->wait_q); 7529 wake_up(&card->wait_q);
7530} 7530}
7531 7531
7532static inline int 7532static int
7533qeth_threads_running(struct qeth_card *card, unsigned long threads) 7533qeth_threads_running(struct qeth_card *card, unsigned long threads)
7534{ 7534{
7535 unsigned long flags; 7535 unsigned long flags;
@@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8118 spin_unlock_irqrestore(&card->ip_lock, flags); 8118 spin_unlock_irqrestore(&card->ip_lock, flags);
8119} 8119}
8120 8120
8121static inline void 8121static void
8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8123{ 8123{
8124 int i, j; 8124 int i, j;
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 5836737ac58f..d518419cd0c6 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c
328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, 328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
329 qeth_dev_bufcnt_store); 329 qeth_dev_bufcnt_store);
330 330
331static inline ssize_t 331static ssize_t
332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, 332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
333 char *buf) 333 char *buf)
334{ 334{
@@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu
368 return qeth_dev_route_show(card, &card->options.route4, buf); 368 return qeth_dev_route_show(card, &card->options.route4, buf);
369} 369}
370 370
371static inline ssize_t 371static ssize_t
372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, 372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
373 enum qeth_prot_versions prot, const char *buf, size_t count) 373 enum qeth_prot_versions prot, const char *buf, size_t count)
374{ 374{
@@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \
998 .store = _store, \ 998 .store = _store, \
999}; 999};
1000 1000
1001int 1001static int
1002qeth_check_layer2(struct qeth_card *card) 1002qeth_check_layer2(struct qeth_card *card)
1003{ 1003{
1004 if (card->options.layer2) 1004 if (card->options.layer2)
@@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1100 qeth_dev_ipato_invert4_show, 1100 qeth_dev_ipato_invert4_show,
1101 qeth_dev_ipato_invert4_store); 1101 qeth_dev_ipato_invert4_store);
1102 1102
1103static inline ssize_t 1103static ssize_t
1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, 1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1105 enum qeth_prot_versions proto) 1105 enum qeth_prot_versions proto)
1106{ 1106{
@@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char
1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); 1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1147} 1147}
1148 1148
1149static inline int 1149static int
1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, 1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1151 u8 *addr, int *mask_bits) 1151 u8 *addr, int *mask_bits)
1152{ 1152{
@@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1178 return 0; 1178 return 0;
1179} 1179}
1180 1180
1181static inline ssize_t 1181static ssize_t
1182qeth_dev_ipato_add_store(const char *buf, size_t count, 1182qeth_dev_ipato_add_store(const char *buf, size_t count,
1183 struct qeth_card *card, enum qeth_prot_versions proto) 1183 struct qeth_card *card, enum qeth_prot_versions proto)
1184{ 1184{
@@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1223 qeth_dev_ipato_add4_show, 1223 qeth_dev_ipato_add4_show,
1224 qeth_dev_ipato_add4_store); 1224 qeth_dev_ipato_add4_store);
1225 1225
1226static inline ssize_t 1226static ssize_t
1227qeth_dev_ipato_del_store(const char *buf, size_t count, 1227qeth_dev_ipato_del_store(const char *buf, size_t count,
1228 struct qeth_card *card, enum qeth_prot_versions proto) 1228 struct qeth_card *card, enum qeth_prot_versions proto)
1229{ 1229{
@@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = {
1361 .attrs = (struct attribute **)qeth_ipato_device_attrs, 1361 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1362}; 1362};
1363 1363
1364static inline ssize_t 1364static ssize_t
1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, 1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1366 enum qeth_prot_versions proto) 1366 enum qeth_prot_versions proto)
1367{ 1367{
@@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char
1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); 1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1408} 1408}
1409 1409
1410static inline int 1410static int
1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, 1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1412 u8 *addr) 1412 u8 *addr)
1413{ 1413{
@@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1418 return 0; 1418 return 0;
1419} 1419}
1420 1420
1421static inline ssize_t 1421static ssize_t
1422qeth_dev_vipa_add_store(const char *buf, size_t count, 1422qeth_dev_vipa_add_store(const char *buf, size_t count,
1423 struct qeth_card *card, enum qeth_prot_versions proto) 1423 struct qeth_card *card, enum qeth_prot_versions proto)
1424{ 1424{
@@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1451 qeth_dev_vipa_add4_show, 1451 qeth_dev_vipa_add4_show,
1452 qeth_dev_vipa_add4_store); 1452 qeth_dev_vipa_add4_store);
1453 1453
1454static inline ssize_t 1454static ssize_t
1455qeth_dev_vipa_del_store(const char *buf, size_t count, 1455qeth_dev_vipa_del_store(const char *buf, size_t count,
1456 struct qeth_card *card, enum qeth_prot_versions proto) 1456 struct qeth_card *card, enum qeth_prot_versions proto)
1457{ 1457{
@@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = {
1542 .attrs = (struct attribute **)qeth_vipa_device_attrs, 1542 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1543}; 1543};
1544 1544
1545static inline ssize_t 1545static ssize_t
1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, 1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1547 enum qeth_prot_versions proto) 1547 enum qeth_prot_versions proto)
1548{ 1548{
@@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char
1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); 1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1589} 1589}
1590 1590
1591static inline int 1591static int
1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, 1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1593 u8 *addr) 1593 u8 *addr)
1594{ 1594{
@@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1599 return 0; 1599 return 0;
1600} 1600}
1601 1601
1602static inline ssize_t 1602static ssize_t
1603qeth_dev_rxip_add_store(const char *buf, size_t count, 1603qeth_dev_rxip_add_store(const char *buf, size_t count,
1604 struct qeth_card *card, enum qeth_prot_versions proto) 1604 struct qeth_card *card, enum qeth_prot_versions proto)
1605{ 1605{
@@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1632 qeth_dev_rxip_add4_show, 1632 qeth_dev_rxip_add4_show,
1633 qeth_dev_rxip_add4_store); 1633 qeth_dev_rxip_add4_store);
1634 1634
1635static inline ssize_t 1635static ssize_t
1636qeth_dev_rxip_del_store(const char *buf, size_t count, 1636qeth_dev_rxip_del_store(const char *buf, size_t count,
1637 struct qeth_card *card, enum qeth_prot_versions proto) 1637 struct qeth_card *card, enum qeth_prot_versions proto)
1638{ 1638{
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index e088b5e28711..806bb1a921eb 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -13,22 +13,18 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/workqueue.h> 14#include <linux/workqueue.h>
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/device.h>
16#include <linux/kthread.h> 17#include <linux/kthread.h>
17 18#include <asm/etr.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
20#include "s390mach.h" 24#include "s390mach.h"
21 25
22static struct semaphore m_sem; 26static struct semaphore m_sem;
23 27
24extern int css_process_crw(int, int);
25extern int chsc_process_crw(void);
26extern int chp_process_crw(int, int);
27extern void css_reiterate_subchannels(void);
28
29extern struct workqueue_struct *slow_path_wq;
30extern struct work_struct slow_path_work;
31
32static NORET_TYPE void 28static NORET_TYPE void
33s390_handle_damage(char *msg) 29s390_handle_damage(char *msg)
34{ 30{
@@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs)
470 s390_handle_damage("unable to revalidate registers."); 466 s390_handle_damage("unable to revalidate registers.");
471 } 467 }
472 468
469 if (mci->cd) {
470 /* Timing facility damage */
471 s390_handle_damage("TOD clock damaged");
472 }
473
474 if (mci->ed && mci->ec) {
475 /* External damage */
476 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
477 etr_sync_check();
478 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
479 etr_switch_to_local();
480 }
481
473 if (mci->se) 482 if (mci->se)
474 /* Storage error uncorrected */ 483 /* Storage error uncorrected */
475 s390_handle_damage("received storage error uncorrected " 484 s390_handle_damage("received storage error uncorrected "
@@ -508,7 +517,7 @@ static int
508machine_check_init(void) 517machine_check_init(void)
509{ 518{
510 init_MUTEX_LOCKED(&m_sem); 519 init_MUTEX_LOCKED(&m_sem);
511 ctl_clear_bit(14, 25); /* disable external damage MCH */ 520 ctl_set_bit(14, 25); /* enable external damage MCH */
512 ctl_set_bit(14, 27); /* enable system recovery MCH */ 521 ctl_set_bit(14, 27); /* enable system recovery MCH */
513#ifdef CONFIG_MACHCHK_WARNING 522#ifdef CONFIG_MACHCHK_WARNING
514 ctl_set_bit(14, 24); /* enable warning MCH */ 523 ctl_set_bit(14, 24); /* enable warning MCH */
@@ -529,7 +538,11 @@ arch_initcall(machine_check_init);
529static int __init 538static int __init
530machine_check_crw_init (void) 539machine_check_crw_init (void)
531{ 540{
532 kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); 541 struct task_struct *task;
542
543 task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
544 if (IS_ERR(task))
545 return PTR_ERR(task);
533 ctl_set_bit(14, 28); /* enable channel report MCH */ 546 ctl_set_bit(14, 28); /* enable channel report MCH */
534 return 0; 547 return 0;
535} 548}
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index 7abb42a09ae2..d3ca4281a494 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw )
102 return ccode; 102 return ccode;
103} 103}
104 104
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107
105#endif /* __s390mach */ 108#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 85093b71f9fa..39a885266790 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -47,13 +47,12 @@ static int __init zfcp_module_init(void);
47static void zfcp_ns_gid_pn_handler(unsigned long); 47static void zfcp_ns_gid_pn_handler(unsigned long);
48 48
49/* miscellaneous */ 49/* miscellaneous */
50static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); 50static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
51static inline void zfcp_sg_list_free(struct zfcp_sg_list *); 51static void zfcp_sg_list_free(struct zfcp_sg_list *);
52static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, 52static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
53 void __user *, size_t); 53 void __user *, size_t);
54static inline int zfcp_sg_list_copy_to_user(void __user *, 54static int zfcp_sg_list_copy_to_user(void __user *,
55 struct zfcp_sg_list *, size_t); 55 struct zfcp_sg_list *, size_t);
56
57static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); 56static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
58 57
59#define ZFCP_CFDC_IOC_MAGIC 0xDD 58#define ZFCP_CFDC_IOC_MAGIC 0xDD
@@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
605 * elements of the scatter-gather list. The maximum size of a single element 604 * elements of the scatter-gather list. The maximum size of a single element
606 * in the scatter-gather list is PAGE_SIZE. 605 * in the scatter-gather list is PAGE_SIZE.
607 */ 606 */
608static inline int 607static int
609zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) 608zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
610{ 609{
611 struct scatterlist *sg; 610 struct scatterlist *sg;
@@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
652 * Memory for each element in the scatter-gather list is freed. 651 * Memory for each element in the scatter-gather list is freed.
653 * Finally sg_list->sg is freed itself and sg_list->count is reset. 652 * Finally sg_list->sg is freed itself and sg_list->count is reset.
654 */ 653 */
655static inline void 654static void
656zfcp_sg_list_free(struct zfcp_sg_list *sg_list) 655zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
657{ 656{
658 struct scatterlist *sg; 657 struct scatterlist *sg;
@@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
697 * @size: number of bytes to be copied 696 * @size: number of bytes to be copied
698 * Return: 0 on success, -EFAULT if copy_from_user fails. 697 * Return: 0 on success, -EFAULT if copy_from_user fails.
699 */ 698 */
700static inline int 699static int
701zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, 700zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
702 void __user *user_buffer, 701 void __user *user_buffer,
703 size_t size) 702 size_t size)
@@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
735 * @size: number of bytes to be copied 734 * @size: number of bytes to be copied
736 * Return: 0 on success, -EFAULT if copy_to_user fails 735 * Return: 0 on success, -EFAULT if copy_to_user fails
737 */ 736 */
738static inline int 737static int
739zfcp_sg_list_copy_to_user(void __user *user_buffer, 738zfcp_sg_list_copy_to_user(void __user *user_buffer,
740 struct zfcp_sg_list *sg_list, 739 struct zfcp_sg_list *sg_list,
741 size_t size) 740 size_t size)
@@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1799 * @code: reason code 1798 * @code: reason code
1800 * @rc_table: table of reason codes and descriptions 1799 * @rc_table: table of reason codes and descriptions
1801 */ 1800 */
1802static inline const char * 1801static const char *
1803zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) 1802zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1804{ 1803{
1805 const char *descr = "unknown reason code"; 1804 const char *descr = "unknown reason code";
@@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt)
1847 * @rjt_par: reject parameter acc. to FC-PH/FC-FS 1846 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1848 * @rc_table: table of reason codes and descriptions 1847 * @rc_table: table of reason codes and descriptions
1849 */ 1848 */
1850static inline void 1849static void
1851zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, 1850zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1852 const struct zfcp_rc_entry *rc_table) 1851 const struct zfcp_rc_entry *rc_table)
1853{ 1852{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0aa3b1ac76af..d8191d115c14 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize,
31 31
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
33 33
34static inline int 34static int
35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) 35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
36{ 36{
37 unsigned long long sec; 37 unsigned long long sec;
@@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label,
106 return len; 106 return len;
107} 107}
108 108
109static inline int 109static int
110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, 110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
111 debug_entry_t * entry, char *out_buf) 111 debug_entry_t * entry, char *out_buf)
112{ 112{
@@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
130 return len; 130 return len;
131} 131}
132 132
133inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) 133void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
134{ 134{
135 struct zfcp_adapter *adapter = fsf_req->adapter; 135 struct zfcp_adapter *adapter = fsf_req->adapter;
136 struct fsf_qtcb *qtcb = fsf_req->qtcb; 136 struct fsf_qtcb *qtcb = fsf_req->qtcb;
@@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
242} 242}
243 243
244inline void 244void
245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, 245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
246 struct fsf_status_read_buffer *status_buffer) 246 struct fsf_status_read_buffer *status_buffer)
247{ 247{
@@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
296} 296}
297 297
298inline void 298void
299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
300 unsigned int qdio_error, unsigned int siga_error, 300 unsigned int qdio_error, unsigned int siga_error,
301 int sbal_index, int sbal_count) 301 int sbal_index, int sbal_count)
@@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
317} 317}
318 318
319static inline int 319static int
320zfcp_hba_dbf_view_response(char *out_buf, 320zfcp_hba_dbf_view_response(char *out_buf,
321 struct zfcp_hba_dbf_record_response *rec) 321 struct zfcp_hba_dbf_record_response *rec)
322{ 322{
@@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf,
403 return len; 403 return len;
404} 404}
405 405
406static inline int 406static int
407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) 407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
408{ 408{
409 int len = 0; 409 int len = 0;
@@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
424 return len; 424 return len;
425} 425}
426 426
427static inline int 427static int
428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) 428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
429{ 429{
430 int len = 0; 430 int len = 0;
@@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
469 return len; 469 return len;
470} 470}
471 471
472struct debug_view zfcp_hba_dbf_view = { 472static struct debug_view zfcp_hba_dbf_view = {
473 "structured", 473 "structured",
474 NULL, 474 NULL,
475 &zfcp_dbf_view_header, 475 &zfcp_dbf_view_header,
@@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = {
478 NULL 478 NULL
479}; 479};
480 480
481inline void 481void
482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, 482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
483 u32 s_id, u32 d_id, void *buffer, int buflen) 483 u32 s_id, u32 d_id, void *buffer, int buflen)
484{ 484{
@@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
520} 520}
521 521
522inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 522void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
523{ 523{
524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
525 struct zfcp_port *port = ct->port; 525 struct zfcp_port *port = ct->port;
@@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
531 ct->req->length); 531 ct->req->length);
532} 532}
533 533
534inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 534void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
535{ 535{
536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
537 struct zfcp_port *port = ct->port; 537 struct zfcp_port *port = ct->port;
@@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
543 ct->resp->length); 543 ct->resp->length);
544} 544}
545 545
546static inline void 546static void
547_zfcp_san_dbf_event_common_els(const char *tag, int level, 547_zfcp_san_dbf_event_common_els(const char *tag, int level,
548 struct zfcp_fsf_req *fsf_req, u32 s_id, 548 struct zfcp_fsf_req *fsf_req, u32 s_id,
549 u32 d_id, u8 ls_code, void *buffer, int buflen) 549 u32 d_id, u8 ls_code, void *buffer, int buflen)
@@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level,
585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
586} 586}
587 587
588inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) 588void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
589{ 589{
590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
591 591
@@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
597 els->req->length); 597 els->req->length);
598} 598}
599 599
600inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) 600void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
601{ 601{
602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
603 603
@@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
608 els->resp->length); 608 els->resp->length);
609} 609}
610 610
611inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) 611void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
612{ 612{
613 struct zfcp_adapter *adapter = fsf_req->adapter; 613 struct zfcp_adapter *adapter = fsf_req->adapter;
614 struct fsf_status_read_buffer *status_buffer = 614 struct fsf_status_read_buffer *status_buffer =
@@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
693 return len; 693 return len;
694} 694}
695 695
696struct debug_view zfcp_san_dbf_view = { 696static struct debug_view zfcp_san_dbf_view = {
697 "structured", 697 "structured",
698 NULL, 698 NULL,
699 &zfcp_dbf_view_header, 699 &zfcp_dbf_view_header,
@@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = {
702 NULL 702 NULL
703}; 703};
704 704
705static inline void 705static void
706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
@@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); 786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
787} 787}
788 788
789inline void 789void
790zfcp_scsi_dbf_event_result(const char *tag, int level, 790zfcp_scsi_dbf_event_result(const char *tag, int level,
791 struct zfcp_adapter *adapter, 791 struct zfcp_adapter *adapter,
792 struct scsi_cmnd *scsi_cmnd, 792 struct scsi_cmnd *scsi_cmnd,
@@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
796 adapter, scsi_cmnd, fsf_req, 0); 796 adapter, scsi_cmnd, fsf_req, 0);
797} 797}
798 798
799inline void 799void
800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
801 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
802 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
@@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
806 adapter, scsi_cmnd, new_fsf_req, old_req_id); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
807} 807}
808 808
809inline void 809void
810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
811 struct scsi_cmnd *scsi_cmnd) 811 struct scsi_cmnd *scsi_cmnd)
812{ 812{
@@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
884 return len; 884 return len;
885} 885}
886 886
887struct debug_view zfcp_scsi_dbf_view = { 887static struct debug_view zfcp_scsi_dbf_view = {
888 "structured", 888 "structured",
889 NULL, 889 NULL,
890 &zfcp_dbf_view_header, 890 &zfcp_dbf_view_header,
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c88babce9bca..88642dec080c 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
200 * returns: 0 - initiated action successfully 200 * returns: 0 - initiated action successfully
201 * <0 - failed to initiate action 201 * <0 - failed to initiate action
202 */ 202 */
203int 203static int
204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) 204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
205{ 205{
206 int retval; 206 int retval;
@@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
295 * zfcp_erp_adisc - send ADISC ELS command 295 * zfcp_erp_adisc - send ADISC ELS command
296 * @port: port structure 296 * @port: port structure
297 */ 297 */
298int 298static int
299zfcp_erp_adisc(struct zfcp_port *port) 299zfcp_erp_adisc(struct zfcp_port *port)
300{ 300{
301 struct zfcp_adapter *adapter = port->adapter; 301 struct zfcp_adapter *adapter = port->adapter;
@@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
380 * 380 *
381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. 381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
382 */ 382 */
383void 383static void
384zfcp_erp_adisc_handler(unsigned long data) 384zfcp_erp_adisc_handler(unsigned long data)
385{ 385{
386 struct zfcp_send_els *send_els; 386 struct zfcp_send_els *send_els;
@@ -3141,7 +3141,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3141 break; 3141 break;
3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3143 if (result != ZFCP_ERP_SUCCEEDED) { 3143 if (result != ZFCP_ERP_SUCCEEDED) {
3144 struct zfcp_port *port;
3145 list_for_each_entry(port, &adapter->port_list_head, list) 3144 list_for_each_entry(port, &adapter->port_list_head, list)
3146 if (port->rport && 3145 if (port->rport &&
3147 !atomic_test_mask(ZFCP_STATUS_PORT_WKA, 3146 !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b8794d77285d..cda0cc095ad1 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); 120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); 121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
122extern void set_host_byte(u32 *, char); 122extern void set_host_byte(int *, char);
123extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(int *, char);
124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
126 126
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 067f1519eb04..4b3ae3f22e78 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4563,7 +4563,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4563/* 4563/*
4564 * set qtcb pointer in fsf_req and initialize QTCB 4564 * set qtcb pointer in fsf_req and initialize QTCB
4565 */ 4565 */
4566static inline void 4566static void
4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4568{ 4568{
4569 if (likely(fsf_req->qtcb != NULL)) { 4569 if (likely(fsf_req->qtcb != NULL)) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbd9f48e863e..1e12a78e8edd 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -21,22 +21,22 @@
21 21
22#include "zfcp_ext.h" 22#include "zfcp_ext.h"
23 23
24static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 24static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
26 (struct zfcp_qdio_queue *, int, int); 26 (struct zfcp_qdio_queue *, int, int);
27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
28 (struct zfcp_fsf_req *, int, int); 28 (struct zfcp_fsf_req *, int, int);
29static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain 29static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
30 (struct zfcp_fsf_req *, unsigned long); 30 (struct zfcp_fsf_req *, unsigned long);
31static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next 31static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
32 (struct zfcp_fsf_req *, unsigned long); 32 (struct zfcp_fsf_req *, unsigned long);
33static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); 33static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); 34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
35static inline void zfcp_qdio_sbale_fill 35static void zfcp_qdio_sbale_fill
36 (struct zfcp_fsf_req *, unsigned long, void *, int); 36 (struct zfcp_fsf_req *, unsigned long, void *, int);
37static inline int zfcp_qdio_sbals_from_segment 37static int zfcp_qdio_sbals_from_segment
38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); 38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
39static inline int zfcp_qdio_sbals_from_buffer 39static int zfcp_qdio_sbals_from_buffer
40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); 40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
41 41
42static qdio_handler_t zfcp_qdio_request_handler; 42static qdio_handler_t zfcp_qdio_request_handler;
@@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
201 * returns: error flag 201 * returns: error flag
202 * 202 *
203 */ 203 */
204static inline int 204static int
205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, 205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
206 unsigned int qdio_error, unsigned int siga_error, 206 unsigned int qdio_error, unsigned int siga_error,
207 int first_element, int elements_processed) 207 int first_element, int elements_processed)
@@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for 462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
463 * a struct zfcp_fsf_req 463 * a struct zfcp_fsf_req
464 */ 464 */
465inline volatile struct qdio_buffer_element * 465volatile struct qdio_buffer_element *
466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
467{ 467{
468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
@@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for 484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
485 * a struct zfcp_fsf_req 485 * a struct zfcp_fsf_req
486 */ 486 */
487inline volatile struct qdio_buffer_element * 487volatile struct qdio_buffer_element *
488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
489{ 489{
490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
@@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
499 * 499 *
500 * Note: We can assume at least one free SBAL in the request_queue when called. 500 * Note: We can assume at least one free SBAL in the request_queue when called.
501 */ 501 */
502static inline void 502static void
503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
504{ 504{
505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
@@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
517 * 517 *
518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. 518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
519 */ 519 */
520static inline volatile struct qdio_buffer_element * 520static volatile struct qdio_buffer_element *
521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
522{ 522{
523 volatile struct qdio_buffer_element *sbale; 523 volatile struct qdio_buffer_element *sbale;
@@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
554/** 554/**
555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed 555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
556 */ 556 */
557static inline volatile struct qdio_buffer_element * 557static volatile struct qdio_buffer_element *
558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
559{ 559{
560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue 569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
570 * with zero from 570 * with zero from
571 */ 571 */
572static inline int 572static int
573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) 573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
574{ 574{
575 struct qdio_buffer **buf = queue->buffer; 575 struct qdio_buffer **buf = queue->buffer;
@@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE 603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
604 * on request_queue 604 * on request_queue
605 */ 605 */
606static inline void 606static void
607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
608 void *addr, int length) 608 void *addr, int length)
609{ 609{
@@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
624 * Alignment and length of the segment determine how many SBALEs are needed 624 * Alignment and length of the segment determine how many SBALEs are needed
625 * for the memory segment. 625 * for the memory segment.
626 */ 626 */
627static inline int 627static int
628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
629 void *start_addr, unsigned long total_length) 629 void *start_addr, unsigned long total_length)
630{ 630{
@@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
659 * @sg_count: number of elements in scatter-gather list 659 * @sg_count: number of elements in scatter-gather list
660 * @max_sbals: upper bound for number of SBALs to be used 660 * @max_sbals: upper bound for number of SBALs to be used
661 */ 661 */
662inline int 662int
663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
664 struct scatterlist *sg, int sg_count, int max_sbals) 664 struct scatterlist *sg, int sg_count, int max_sbals)
665{ 665{
@@ -707,7 +707,7 @@ out:
707 * @length: length of buffer 707 * @length: length of buffer
708 * @max_sbals: upper bound for number of SBALs to be used 708 * @max_sbals: upper bound for number of SBALs to be used
709 */ 709 */
710static inline int 710static int
711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
712 void *buffer, unsigned long length, int max_sbals) 712 void *buffer, unsigned long length, int max_sbals)
713{ 713{
@@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used 728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
729 * to fill SBALs 729 * to fill SBALs
730 */ 730 */
731inline int 731int
732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, 732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) 733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
734{ 734{
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 452d96f92a14..99db02062c3b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
90 return fcp_sns_info_ptr; 90 return fcp_sns_info_ptr;
91} 91}
92 92
93fcp_dl_t * 93static fcp_dl_t *
94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) 94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
95{ 95{
96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2; 96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
@@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
124 * regarding the specified byte 124 * regarding the specified byte
125 */ 125 */
126static inline void 126static inline void
127set_byte(u32 * result, char status, char pos) 127set_byte(int *result, char status, char pos)
128{ 128{
129 *result |= status << (pos * 8); 129 *result |= status << (pos * 8);
130} 130}
131 131
132void 132void
133set_host_byte(u32 * result, char status) 133set_host_byte(int *result, char status)
134{ 134{
135 set_byte(result, status, 2); 135 set_byte(result, status, 2);
136} 136}
137 137
138void 138void
139set_driver_byte(u32 * result, char status) 139set_driver_byte(int *result, char status)
140{ 140{
141 set_byte(result, status, 3); 141 set_byte(result, status, 3);
142} 142}
@@ -280,7 +280,7 @@ out:
280 return retval; 280 return retval;
281} 281}
282 282
283void 283static void
284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) 284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
285{ 285{
286 struct completion *wait = (struct completion *) scpnt->SCp.ptr; 286 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
@@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
324 * returns: 0 - success, SCSI command enqueued 324 * returns: 0 - success, SCSI command enqueued
325 * !0 - failure 325 * !0 - failure
326 */ 326 */
327int 327static int
328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
329 void (*done) (struct scsi_cmnd *)) 329 void (*done) (struct scsi_cmnd *))
330{ 330{
@@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
380 * will handle late commands. (Usually, the normal completion of late 380 * will handle late commands. (Usually, the normal completion of late
381 * commands is ignored with respect to the running abort operation.) 381 * commands is ignored with respect to the running abort operation.)
382 */ 382 */
383int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 383static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 384{
385 struct Scsi_Host *scsi_host; 385 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 386 struct zfcp_adapter *adapter;
@@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
445 return retval; 445 return retval;
446} 446}
447 447
448int 448static int
449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
450{ 450{
451 int retval; 451 int retval;
@@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
541/** 541/**
542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
543 */ 543 */
544int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 544static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
545{ 545{
546 struct zfcp_unit *unit; 546 struct zfcp_unit *unit;
547 struct zfcp_adapter *adapter; 547 struct zfcp_adapter *adapter;
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 1e788e815ce7..090743d2f914 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -9,8 +9,14 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h>
12#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
13 14
15/* Sigh, math-emu. Don't ask. */
16#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h>
18#include <math-emu/single.h>
19
14struct sysinfo_1_1_1 { 20struct sysinfo_1_1_1 {
15 char reserved_0[32]; 21 char reserved_0[32];
16 char manufacturer[16]; 22 char manufacturer[16];
@@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
198 * if the higher order 8 bits are not zero. Printing 204 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no, 205 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer. 206 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange 207 * The user-space needs to know about the strange
202 * encoding of the alternate cpu capability. 208 * encoding of the alternate cpu capability.
203 */ 209 */
204 len += sprintf(page + len, "Capability: %u %u\n", 210 len += sprintf(page + len, "Capability: %u %u\n",
@@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void)
351 357
352__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
353 359
360/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */
363void s390_adjust_jiffies(void)
364{
365 struct sysinfo_1_2_2 *info;
366 const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
367 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
368 FP_DECL_EX;
369 unsigned int capability;
370
371 info = (void *) get_zeroed_page(GFP_KERNEL);
372 if (!info)
373 return;
374
375 if (stsi(info, 1, 2, 2) != -ENOSYS) {
376 /*
377 * Major sigh. The cpu capability encoding is "special".
378 * If the first 9 bits of info->capability are 0 then it
379 * is a 32 bit unsigned integer in the range 0 .. 2^23.
380 * If the first 9 bits are != 0 then it is a 32 bit float.
381 * In addition a lower value indicates a proportionally
382 * higher cpu capacity. Bogomips are the other way round.
383 * To get to a halfway suitable number we divide 1e7
384 * by the cpu capability number. Yes, that means a floating
385 * point division .. math-emu here we come :-)
386 */
387 FP_UNPACK_SP(SA, &fmil);
388 if ((info->capability >> 23) == 0)
389 FP_FROM_INT_S(SB, info->capability, 32, int);
390 else
391 FP_UNPACK_SP(SB, &info->capability);
392 FP_DIV_S(SR, SA, SB);
393 FP_TO_INT_S(capability, SR, 32, 0);
394 } else
395 /*
396 * Really old machine without stsi block for basic
397 * cpu information. Report 42.0 bogomips.
398 */
399 capability = 42;
400 loops_per_jiffy = capability * (500000/HZ);
401 free_page((unsigned long) info);
402}
403
404/*
405 * calibrate the delay loop
406 */
407void __init calibrate_delay(void)
408{
409 s390_adjust_jiffies();
410 /* Print the good old Bogomips line .. */
411 printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
412 "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
413 (loops_per_jiffy/(5000/HZ)) % 100);
414}
diff --git a/drivers/usb/input/Kconfig b/drivers/usb/input/Kconfig
index c7d887540d8d..aa6a620c162f 100644
--- a/drivers/usb/input/Kconfig
+++ b/drivers/usb/input/Kconfig
@@ -69,6 +69,14 @@ config LOGITECH_FF
69 Note: if you say N here, this device will still be supported, but without 69 Note: if you say N here, this device will still be supported, but without
70 force feedback. 70 force feedback.
71 71
72config PANTHERLORD_FF
73 bool "PantherLord USB/PS2 2in1 Adapter support"
74 depends on HID_FF
75 select INPUT_FF_MEMLESS if USB_HID
76 help
77 Say Y here if you have a PantherLord USB/PS2 2in1 Adapter and want
78 to enable force feedback support for it.
79
72config THRUSTMASTER_FF 80config THRUSTMASTER_FF
73 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)" 81 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)"
74 depends on HID_FF && EXPERIMENTAL 82 depends on HID_FF && EXPERIMENTAL
diff --git a/drivers/usb/input/Makefile b/drivers/usb/input/Makefile
index 1a24b5bfa05f..a06024e5cd56 100644
--- a/drivers/usb/input/Makefile
+++ b/drivers/usb/input/Makefile
@@ -17,6 +17,9 @@ endif
17ifeq ($(CONFIG_LOGITECH_FF),y) 17ifeq ($(CONFIG_LOGITECH_FF),y)
18 usbhid-objs += hid-lgff.o 18 usbhid-objs += hid-lgff.o
19endif 19endif
20ifeq ($(CONFIG_PANTHERLORD_FF),y)
21 usbhid-objs += hid-plff.o
22endif
20ifeq ($(CONFIG_THRUSTMASTER_FF),y) 23ifeq ($(CONFIG_THRUSTMASTER_FF),y)
21 usbhid-objs += hid-tmff.o 24 usbhid-objs += hid-tmff.o
22endif 25endif
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index c6c9e72e5fd9..e07a30490726 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/hid.h> 36#include <linux/hid.h>
37#include <linux/hiddev.h> 37#include <linux/hiddev.h>
38#include <linux/hid-debug.h>
38#include "usbhid.h" 39#include "usbhid.h"
39 40
40/* 41/*
@@ -220,23 +221,6 @@ static void hid_irq_in(struct urb *urb)
220 } 221 }
221} 222}
222 223
223/*
224 * Find a report field with a specified HID usage.
225 */
226#if 0
227struct hid_field *hid_find_field_by_usage(struct hid_device *hid, __u32 wanted_usage, int type)
228{
229 struct hid_report *report;
230 int i;
231
232 list_for_each_entry(report, &hid->report_enum[type].report_list, list)
233 for (i = 0; i < report->maxfield; i++)
234 if (report->field[i]->logical == wanted_usage)
235 return report->field[i];
236 return NULL;
237}
238#endif /* 0 */
239
240static int hid_submit_out(struct hid_device *hid) 224static int hid_submit_out(struct hid_device *hid)
241{ 225{
242 struct hid_report *report; 226 struct hid_report *report;
@@ -501,7 +485,7 @@ static int hid_get_class_descriptor(struct usb_device *dev, int ifnum,
501{ 485{
502 int result, retries = 4; 486 int result, retries = 4;
503 487
504 memset(buf,0,size); // Make sure we parse really received data 488 memset(buf, 0, size);
505 489
506 do { 490 do {
507 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 491 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
@@ -528,18 +512,6 @@ void usbhid_close(struct hid_device *hid)
528 usb_kill_urb(usbhid->urbin); 512 usb_kill_urb(usbhid->urbin);
529} 513}
530 514
531static int hidinput_open(struct input_dev *dev)
532{
533 struct hid_device *hid = dev->private;
534 return usbhid_open(hid);
535}
536
537static void hidinput_close(struct input_dev *dev)
538{
539 struct hid_device *hid = dev->private;
540 usbhid_close(hid);
541}
542
543#define USB_VENDOR_ID_PANJIT 0x134c 515#define USB_VENDOR_ID_PANJIT 0x134c
544 516
545#define USB_VENDOR_ID_TURBOX 0x062a 517#define USB_VENDOR_ID_TURBOX 0x062a
@@ -770,6 +742,7 @@ void usbhid_init_reports(struct hid_device *hid)
770#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c 742#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c
771#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 743#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
772#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 744#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
745#define USB_DEVICE_ID_APPLE_IR 0x8240
773 746
774#define USB_VENDOR_ID_CHERRY 0x046a 747#define USB_VENDOR_ID_CHERRY 0x046a
775#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 748#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
@@ -792,6 +765,9 @@ void usbhid_init_reports(struct hid_device *hid)
792#define USB_VENDOR_ID_IMATION 0x0718 765#define USB_VENDOR_ID_IMATION 0x0718
793#define USB_DEVICE_ID_DISC_STAKKA 0xd000 766#define USB_DEVICE_ID_DISC_STAKKA 0xd000
794 767
768#define USB_VENDOR_ID_PANTHERLORD 0x0810
769#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001
770
795/* 771/*
796 * Alphabetically sorted blacklist by quirk type. 772 * Alphabetically sorted blacklist by quirk type.
797 */ 773 */
@@ -946,19 +922,21 @@ static const struct hid_blacklist {
946 922
947 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION }, 923 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
948 924
949 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 925 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
950 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN }, 926 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
951 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 927 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
952 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 928 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
953 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 929 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
954 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 930 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
955 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 931 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
956 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 932 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
957 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 933 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
958 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 934 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
959 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 935 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
960 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 936 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
961 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 937 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
938
939 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IR, HID_QUIRK_IGNORE },
962 940
963 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE }, 941 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE },
964 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE }, 942 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE },
@@ -969,6 +947,8 @@ static const struct hid_blacklist {
969 947
970 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS }, 948 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
971 949
950 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
951
972 { 0, 0 } 952 { 0, 0 }
973}; 953};
974 954
@@ -1064,6 +1044,11 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1064 if (quirks & HID_QUIRK_IGNORE) 1044 if (quirks & HID_QUIRK_IGNORE)
1065 return NULL; 1045 return NULL;
1066 1046
1047 if ((quirks & HID_QUIRK_IGNORE_MOUSE) &&
1048 (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE))
1049 return NULL;
1050
1051
1067 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && 1052 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
1068 (!interface->desc.bNumEndpoints || 1053 (!interface->desc.bNumEndpoints ||
1069 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { 1054 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
@@ -1235,8 +1220,8 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1235 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; 1220 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
1236 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); 1221 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
1237 hid->hidinput_input_event = usb_hidinput_input_event; 1222 hid->hidinput_input_event = usb_hidinput_input_event;
1238 hid->hidinput_open = hidinput_open; 1223 hid->hid_open = usbhid_open;
1239 hid->hidinput_close = hidinput_close; 1224 hid->hid_close = usbhid_close;
1240#ifdef CONFIG_USB_HIDDEV 1225#ifdef CONFIG_USB_HIDDEV
1241 hid->hiddev_hid_event = hiddev_hid_event; 1226 hid->hiddev_hid_event = hiddev_hid_event;
1242 hid->hiddev_report_event = hiddev_report_event; 1227 hid->hiddev_report_event = hiddev_report_event;
@@ -1315,11 +1300,7 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
1315 return -ENODEV; 1300 return -ENODEV;
1316 } 1301 }
1317 1302
1318 /* This only gets called when we are a single-input (most of the 1303 if ((hid->claimed & HID_CLAIMED_INPUT))
1319 * time). IOW, not a HID_QUIRK_MULTI_INPUT. The hid_ff_init() is
1320 * only useful in this case, and not for multi-input quirks. */
1321 if ((hid->claimed & HID_CLAIMED_INPUT) &&
1322 !(hid->quirks & HID_QUIRK_MULTI_INPUT))
1323 hid_ff_init(hid); 1304 hid_ff_init(hid);
1324 1305
1325 printk(KERN_INFO); 1306 printk(KERN_INFO);
diff --git a/drivers/usb/input/hid-ff.c b/drivers/usb/input/hid-ff.c
index 59ed65e7a621..5d145058a5cb 100644
--- a/drivers/usb/input/hid-ff.c
+++ b/drivers/usb/input/hid-ff.c
@@ -58,6 +58,9 @@ static struct hid_ff_initializer inits[] = {
58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */ 58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */
59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */ 59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */
60#endif 60#endif
61#ifdef CONFIG_PANTHERLORD_FF
62 { 0x810, 0x0001, hid_plff_init },
63#endif
61#ifdef CONFIG_THRUSTMASTER_FF 64#ifdef CONFIG_THRUSTMASTER_FF
62 { 0x44f, 0xb304, hid_tmff_init }, 65 { 0x44f, 0xb304, hid_tmff_init },
63#endif 66#endif
diff --git a/drivers/usb/input/hid-plff.c b/drivers/usb/input/hid-plff.c
new file mode 100644
index 000000000000..76d2e6e14db4
--- /dev/null
+++ b/drivers/usb/input/hid-plff.c
@@ -0,0 +1,129 @@
1/*
2 * Force feedback support for PantherLord USB/PS2 2in1 Adapter devices
3 *
4 * Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23
24/* #define DEBUG */
25
26#define debug(format, arg...) pr_debug("hid-plff: " format "\n" , ## arg)
27
28#include <linux/input.h>
29#include <linux/usb.h>
30#include <linux/hid.h>
31#include "usbhid.h"
32
33struct plff_device {
34 struct hid_report *report;
35};
36
37static int hid_plff_play(struct input_dev *dev, void *data,
38 struct ff_effect *effect)
39{
40 struct hid_device *hid = dev->private;
41 struct plff_device *plff = data;
42 int left, right;
43
44 left = effect->u.rumble.strong_magnitude;
45 right = effect->u.rumble.weak_magnitude;
46 debug("called with 0x%04x 0x%04x", left, right);
47
48 left = left * 0x7f / 0xffff;
49 right = right * 0x7f / 0xffff;
50
51 plff->report->field[0]->value[2] = left;
52 plff->report->field[0]->value[3] = right;
53 debug("running with 0x%02x 0x%02x", left, right);
54 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
55
56 return 0;
57}
58
59int hid_plff_init(struct hid_device *hid)
60{
61 struct plff_device *plff;
62 struct hid_report *report;
63 struct hid_input *hidinput;
64 struct list_head *report_list =
65 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
66 struct list_head *report_ptr = report_list;
67 struct input_dev *dev;
68 int error;
69
70 /* The device contains 2 output reports (one for each
71 HID_QUIRK_MULTI_INPUT device), both containing 1 field, which
72 contains 4 ff00.0002 usages and 4 16bit absolute values.
73
74 The 2 input reports also contain a field which contains
75 8 ff00.0001 usages and 8 boolean values. Their meaning is
76 currently unknown. */
77
78 if (list_empty(report_list)) {
79 printk(KERN_ERR "hid-plff: no output reports found\n");
80 return -ENODEV;
81 }
82
83 list_for_each_entry(hidinput, &hid->inputs, list) {
84
85 report_ptr = report_ptr->next;
86
87 if (report_ptr == report_list) {
88 printk(KERN_ERR "hid-plff: required output report is missing\n");
89 return -ENODEV;
90 }
91
92 report = list_entry(report_ptr, struct hid_report, list);
93 if (report->maxfield < 1) {
94 printk(KERN_ERR "hid-plff: no fields in the report\n");
95 return -ENODEV;
96 }
97
98 if (report->field[0]->report_count < 4) {
99 printk(KERN_ERR "hid-plff: not enough values in the field\n");
100 return -ENODEV;
101 }
102
103 plff = kzalloc(sizeof(struct plff_device), GFP_KERNEL);
104 if (!plff)
105 return -ENOMEM;
106
107 dev = hidinput->input;
108
109 set_bit(FF_RUMBLE, dev->ffbit);
110
111 error = input_ff_create_memless(dev, plff, hid_plff_play);
112 if (error) {
113 kfree(plff);
114 return error;
115 }
116
117 plff->report = report;
118 plff->report->field[0]->value[0] = 0x00;
119 plff->report->field[0]->value[1] = 0x00;
120 plff->report->field[0]->value[2] = 0x00;
121 plff->report->field[0]->value[3] = 0x00;
122 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
123 }
124
125 printk(KERN_INFO "hid-plff: Force feedback for PantherLord USB/PS2 "
126 "2in1 Adapters by Anssi Hannula <anssi.hannula@gmail.com>\n");
127
128 return 0;
129}
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index d04d2f7448d9..85e3850bf2c9 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,6 +1,8 @@
1Version 1.47 1Version 1.47
2------------ 2------------
3Fix oops in list_del during mount caused by unaligned string. 3Fix oops in list_del during mount caused by unaligned string.
4Seek to SEEK_END forces check for update of file size for non-cached
5files.
4 6
5Version 1.46 7Version 1.46
6------------ 8------------
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 10c90294cd18..93ef09971d2f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -511,7 +511,15 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
511{ 511{
512 /* origin == SEEK_END => we must revalidate the cached file length */ 512 /* origin == SEEK_END => we must revalidate the cached file length */
513 if (origin == SEEK_END) { 513 if (origin == SEEK_END) {
514 int retval = cifs_revalidate(file->f_path.dentry); 514 int retval;
515
516 /* some applications poll for the file length in this strange
517 way so we must seek to end on non-oplocked files by
518 setting the revalidate time to zero */
519 if(file->f_path.dentry->d_inode)
520 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
521
522 retval = cifs_revalidate(file->f_path.dentry);
515 if (retval < 0) 523 if (retval < 0)
516 return (loff_t)retval; 524 return (loff_t)retval;
517 } 525 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8a49b2e77d37..e9dcf5ee29a2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1146,7 +1146,7 @@ static int cifs_writepages(struct address_space *mapping,
1146 pgoff_t end; 1146 pgoff_t end;
1147 pgoff_t index; 1147 pgoff_t index;
1148 int range_whole = 0; 1148 int range_whole = 0;
1149 struct kvec iov[32]; 1149 struct kvec * iov;
1150 int len; 1150 int len;
1151 int n_iov = 0; 1151 int n_iov = 0;
1152 pgoff_t next; 1152 pgoff_t next;
@@ -1171,15 +1171,21 @@ static int cifs_writepages(struct address_space *mapping,
1171 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server)) 1171 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1172 if(cifs_sb->tcon->ses->server->secMode & 1172 if(cifs_sb->tcon->ses->server->secMode &
1173 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 1173 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1174 if(!experimEnabled) 1174 if(!experimEnabled)
1175 return generic_writepages(mapping, wbc); 1175 return generic_writepages(mapping, wbc);
1176 1176
1177 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1178 if(iov == NULL)
1179 return generic_writepages(mapping, wbc);
1180
1181
1177 /* 1182 /*
1178 * BB: Is this meaningful for a non-block-device file system? 1183 * BB: Is this meaningful for a non-block-device file system?
1179 * If it is, we should test it again after we do I/O 1184 * If it is, we should test it again after we do I/O
1180 */ 1185 */
1181 if (wbc->nonblocking && bdi_write_congested(bdi)) { 1186 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1182 wbc->encountered_congestion = 1; 1187 wbc->encountered_congestion = 1;
1188 kfree(iov);
1183 return 0; 1189 return 0;
1184 } 1190 }
1185 1191
@@ -1345,7 +1351,7 @@ retry:
1345 mapping->writeback_index = index; 1351 mapping->writeback_index = index;
1346 1352
1347 FreeXid(xid); 1353 FreeXid(xid);
1348 1354 kfree(iov);
1349 return rc; 1355 return rc;
1350} 1356}
1351 1357
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 99dfb5337e31..782940be550f 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -156,9 +156,9 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
156 tmp_inode->i_atime = cnvrtDosUnixTm( 156 tmp_inode->i_atime = cnvrtDosUnixTm(
157 le16_to_cpu(pfindData->LastAccessDate), 157 le16_to_cpu(pfindData->LastAccessDate),
158 le16_to_cpu(pfindData->LastAccessTime)); 158 le16_to_cpu(pfindData->LastAccessTime));
159 tmp_inode->i_ctime = cnvrtDosUnixTm( 159 tmp_inode->i_ctime = cnvrtDosUnixTm(
160 le16_to_cpu(pfindData->LastWriteDate), 160 le16_to_cpu(pfindData->LastWriteDate),
161 le16_to_cpu(pfindData->LastWriteTime)); 161 le16_to_cpu(pfindData->LastWriteTime));
162 AdjustForTZ(cifs_sb->tcon, tmp_inode); 162 AdjustForTZ(cifs_sb->tcon, tmp_inode);
163 attr = le16_to_cpu(pfindData->Attributes); 163 attr = le16_to_cpu(pfindData->Attributes);
164 allocation_size = le32_to_cpu(pfindData->AllocationSize); 164 allocation_size = le32_to_cpu(pfindData->AllocationSize);
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index 7a1b2b961ec8..1b1daf63f062 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -196,7 +196,7 @@ dohash(char *out, char *in, char *key, int forw)
196 char c[28]; 196 char c[28];
197 char d[28]; 197 char d[28];
198 char *cd; 198 char *cd;
199 char ki[16][48]; 199 char (*ki)[48];
200 char *pd1; 200 char *pd1;
201 char l[32], r[32]; 201 char l[32], r[32];
202 char *rl; 202 char *rl;
@@ -206,6 +206,12 @@ dohash(char *out, char *in, char *key, int forw)
206 if(pk1 == NULL) 206 if(pk1 == NULL)
207 return; 207 return;
208 208
209 ki = kmalloc(16*48, GFP_KERNEL);
210 if(ki == NULL) {
211 kfree(pk1);
212 return;
213 }
214
209 cd = pk1 + 56; 215 cd = pk1 + 56;
210 pd1= cd + 56; 216 pd1= cd + 56;
211 rl = pd1 + 64; 217 rl = pd1 + 64;
@@ -243,6 +249,7 @@ dohash(char *out, char *in, char *key, int forw)
243 er = kmalloc(48+48+32+32+32, GFP_KERNEL); 249 er = kmalloc(48+48+32+32+32, GFP_KERNEL);
244 if(er == NULL) { 250 if(er == NULL) {
245 kfree(pk1); 251 kfree(pk1);
252 kfree(ki);
246 return; 253 return;
247 } 254 }
248 erk = er+48; 255 erk = er+48;
@@ -290,6 +297,7 @@ dohash(char *out, char *in, char *key, int forw)
290 297
291 permute(out, rl, perm6, 64); 298 permute(out, rl, perm6, 64);
292 kfree(pk1); 299 kfree(pk1);
300 kfree(ki);
293} 301}
294 302
295static void 303static void
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index e1216364d191..d026b4f27757 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -306,8 +306,8 @@ int ocfs2_journal_dirty_data(handle_t *handle,
306 * for the dinode, one for the new block. */ 306 * for the dinode, one for the new block. */
307#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2) 307#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
308 308
309/* file update (nlink, etc) + dir entry block */ 309/* file update (nlink, etc) + directory mtime/ctime + dir entry block */
310#define OCFS2_LINK_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) 310#define OCFS2_LINK_CREDITS (2*OCFS2_INODE_UPDATE_CREDITS + 1)
311 311
312/* inode + dir inode (if we unlink a dir), + dir entry block + orphan 312/* inode + dir inode (if we unlink a dir), + dir entry block + orphan
313 * dir inode link */ 313 * dir inode link */
diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h
index dad3a735df8b..4d97f60f1ef5 100644
--- a/include/asm-ia64/dma.h
+++ b/include/asm-ia64/dma.h
@@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS;
19 19
20#define free_dma(x) 20#define free_dma(x)
21 21
22void dma_mark_clean(void *addr, size_t size);
23
22#endif /* _ASM_IA64_DMA_H */ 24#endif /* _ASM_IA64_DMA_H */
diff --git a/include/asm-ia64/esi.h b/include/asm-ia64/esi.h
index 84aac0e0b583..40991c6ba647 100644
--- a/include/asm-ia64/esi.h
+++ b/include/asm-ia64/esi.h
@@ -19,7 +19,6 @@ enum esi_proc_type {
19 ESI_PROC_REENTRANT /* MP-safe and reentrant */ 19 ESI_PROC_REENTRANT /* MP-safe and reentrant */
20}; 20};
21 21
22extern int ia64_esi_init (void);
23extern struct ia64_sal_retval esi_call_phys (void *, u64 *); 22extern struct ia64_sal_retval esi_call_phys (void *, u64 *);
24extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *, 23extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *,
25 enum esi_proc_type, 24 enum esi_proc_type,
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index c8df75901083..6dd476b652c6 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -51,12 +51,13 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
51 51
52#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ 52#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
53 53
54extern int register_active_ranges(u64 start, u64 end, void *arg);
55
54#ifdef CONFIG_VIRTUAL_MEM_MAP 56#ifdef CONFIG_VIRTUAL_MEM_MAP
55# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 57# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
56 extern unsigned long vmalloc_end; 58 extern unsigned long vmalloc_end;
57 extern struct page *vmem_map; 59 extern struct page *vmem_map;
58 extern int find_largest_hole (u64 start, u64 end, void *arg); 60 extern int find_largest_hole (u64 start, u64 end, void *arg);
59 extern int register_active_ranges (u64 start, u64 end, void *arg);
60 extern int create_mem_map_page_table (u64 start, u64 end, void *arg); 61 extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
61 extern int vmemmap_find_next_valid_pfn(int, int); 62 extern int vmemmap_find_next_valid_pfn(int, int);
62#else 63#else
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 393e04c42a2c..560c287b1233 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -137,7 +137,8 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
137static inline struct page *pte_alloc_one(struct mm_struct *mm, 137static inline struct page *pte_alloc_one(struct mm_struct *mm,
138 unsigned long addr) 138 unsigned long addr)
139{ 139{
140 return virt_to_page(pgtable_quicklist_alloc()); 140 void *pg = pgtable_quicklist_alloc();
141 return pg ? virt_to_page(pg) : NULL;
141} 142}
142 143
143static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
diff --git a/include/asm-ia64/swiotlb.h b/include/asm-ia64/swiotlb.h
new file mode 100644
index 000000000000..452c162dee4e
--- /dev/null
+++ b/include/asm-ia64/swiotlb.h
@@ -0,0 +1,9 @@
1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWIOTLB_H 1
3
4#include <asm/machvec.h>
5
6#define SWIOTLB_ARCH_NEED_LATE_INIT
7#define SWIOTLB_ARCH_NEED_ALLOC
8
9#endif /* _ASM_SWIOTLB_H */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 9b505b25544f..91698599f918 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -84,6 +84,7 @@ struct thread_info {
84#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 84#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ 85#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 86#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
87#define TIF_SINGLESTEP 5 /* restore singlestep on return to user mode */
87#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 88#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
88#define TIF_MEMDIE 17 89#define TIF_MEMDIE 17
89#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 90#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
@@ -92,7 +93,8 @@ struct thread_info {
92 93
93#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 94#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
94#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 95#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
95#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 96#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
97#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP)
96#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 98#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
97#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 99#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
98#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 53c5c0ee122c..a9e1fa4cac4d 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -291,11 +291,13 @@
291#define __NR_sync_file_range 1300 291#define __NR_sync_file_range 1300
292#define __NR_tee 1301 292#define __NR_tee 1301
293#define __NR_vmsplice 1302 293#define __NR_vmsplice 1302
294/* 1303 reserved for move_pages */
295#define __NR_getcpu 1304
294 296
295#ifdef __KERNEL__ 297#ifdef __KERNEL__
296 298
297 299
298#define NR_syscalls 279 /* length of syscall table */ 300#define NR_syscalls 281 /* length of syscall table */
299 301
300#define __ARCH_WANT_SYS_RT_SIGACTION 302#define __ARCH_WANT_SYS_RT_SIGACTION
301 303
diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h
index 8e321f53a382..c7c945baf1ee 100644
--- a/include/asm-mips/bootinfo.h
+++ b/include/asm-mips/bootinfo.h
@@ -243,6 +243,10 @@ extern struct boot_mem_map boot_mem_map;
243extern void add_memory_region(phys_t start, phys_t size, long type); 243extern void add_memory_region(phys_t start, phys_t size, long type);
244 244
245extern void prom_init(void); 245extern void prom_init(void);
246extern void prom_free_prom_memory(void);
247
248extern void free_init_pages(const char *what,
249 unsigned long begin, unsigned long end);
246 250
247/* 251/*
248 * Initial kernel command line, usually setup by prom_init() 252 * Initial kernel command line, usually setup by prom_init()
diff --git a/include/asm-mips/ddb5xxx/ddb5477.h b/include/asm-mips/ddb5xxx/ddb5477.h
index c5af4b73fdd7..6cf177caf6d5 100644
--- a/include/asm-mips/ddb5xxx/ddb5477.h
+++ b/include/asm-mips/ddb5xxx/ddb5477.h
@@ -17,6 +17,7 @@
17#ifndef __ASM_DDB5XXX_DDB5477_H 17#ifndef __ASM_DDB5XXX_DDB5477_H
18#define __ASM_DDB5XXX_DDB5477_H 18#define __ASM_DDB5XXX_DDB5477_H
19 19
20#include <irq.h>
20 21
21/* 22/*
22 * This contains macros that are specific to DDB5477 or renamed from 23 * This contains macros that are specific to DDB5477 or renamed from
@@ -251,14 +252,10 @@ extern void ll_vrc5477_irq_disable(int vrc5477_irq);
251 */ 252 */
252 253
253#define NUM_CPU_IRQ 8 254#define NUM_CPU_IRQ 8
254#define NUM_I8259_IRQ 16
255#define NUM_VRC5477_IRQ 32 255#define NUM_VRC5477_IRQ 32
256 256
257#define DDB_IRQ_BASE 0 257#define CPU_IRQ_BASE MIPS_CPU_IRQ_BASE
258 258#define VRC5477_IRQ_BASE (CPU_IRQ_BASE + NUM_CPU_IRQ)
259#define I8259_IRQ_BASE DDB_IRQ_BASE
260#define VRC5477_IRQ_BASE (I8259_IRQ_BASE + NUM_I8259_IRQ)
261#define CPU_IRQ_BASE (VRC5477_IRQ_BASE + NUM_VRC5477_IRQ)
262 259
263/* 260/*
264 * vrc5477 irq defs 261 * vrc5477 irq defs
@@ -300,22 +297,22 @@ extern void ll_vrc5477_irq_disable(int vrc5477_irq);
300/* 297/*
301 * i2859 irq assignment 298 * i2859 irq assignment
302 */ 299 */
303#define I8259_IRQ_RESERVED_0 (0 + I8259_IRQ_BASE) 300#define I8259_IRQ_RESERVED_0 (0 + I8259A_IRQ_BASE)
304#define I8259_IRQ_KEYBOARD (1 + I8259_IRQ_BASE) /* M1543 default */ 301#define I8259_IRQ_KEYBOARD (1 + I8259A_IRQ_BASE) /* M1543 default */
305#define I8259_IRQ_CASCADE (2 + I8259_IRQ_BASE) 302#define I8259_IRQ_CASCADE (2 + I8259A_IRQ_BASE)
306#define I8259_IRQ_UART_B (3 + I8259_IRQ_BASE) /* M1543 default, may conflict with RTC according to schematic diagram */ 303#define I8259_IRQ_UART_B (3 + I8259A_IRQ_BASE) /* M1543 default, may conflict with RTC according to schematic diagram */
307#define I8259_IRQ_UART_A (4 + I8259_IRQ_BASE) /* M1543 default */ 304#define I8259_IRQ_UART_A (4 + I8259A_IRQ_BASE) /* M1543 default */
308#define I8259_IRQ_PARALLEL (5 + I8259_IRQ_BASE) /* M1543 default */ 305#define I8259_IRQ_PARALLEL (5 + I8259A_IRQ_BASE) /* M1543 default */
309#define I8259_IRQ_RESERVED_6 (6 + I8259_IRQ_BASE) 306#define I8259_IRQ_RESERVED_6 (6 + I8259A_IRQ_BASE)
310#define I8259_IRQ_RESERVED_7 (7 + I8259_IRQ_BASE) 307#define I8259_IRQ_RESERVED_7 (7 + I8259A_IRQ_BASE)
311#define I8259_IRQ_RTC (8 + I8259_IRQ_BASE) /* who set this? */ 308#define I8259_IRQ_RTC (8 + I8259A_IRQ_BASE) /* who set this? */
312#define I8259_IRQ_USB (9 + I8259_IRQ_BASE) /* ddb_setup */ 309#define I8259_IRQ_USB (9 + I8259A_IRQ_BASE) /* ddb_setup */
313#define I8259_IRQ_PMU (10 + I8259_IRQ_BASE) /* ddb_setup */ 310#define I8259_IRQ_PMU (10 + I8259A_IRQ_BASE) /* ddb_setup */
314#define I8259_IRQ_RESERVED_11 (11 + I8259_IRQ_BASE) 311#define I8259_IRQ_RESERVED_11 (11 + I8259A_IRQ_BASE)
315#define I8259_IRQ_RESERVED_12 (12 + I8259_IRQ_BASE) /* m1543_irq_setup */ 312#define I8259_IRQ_RESERVED_12 (12 + I8259A_IRQ_BASE) /* m1543_irq_setup */
316#define I8259_IRQ_RESERVED_13 (13 + I8259_IRQ_BASE) 313#define I8259_IRQ_RESERVED_13 (13 + I8259A_IRQ_BASE)
317#define I8259_IRQ_HDC1 (14 + I8259_IRQ_BASE) /* default and ddb_setup */ 314#define I8259_IRQ_HDC1 (14 + I8259A_IRQ_BASE) /* default and ddb_setup */
318#define I8259_IRQ_HDC2 (15 + I8259_IRQ_BASE) /* default */ 315#define I8259_IRQ_HDC2 (15 + I8259A_IRQ_BASE) /* default */
319 316
320 317
321/* 318/*
diff --git a/include/asm-mips/dec/interrupts.h b/include/asm-mips/dec/interrupts.h
index 273e4d65bfe6..e10d341067c8 100644
--- a/include/asm-mips/dec/interrupts.h
+++ b/include/asm-mips/dec/interrupts.h
@@ -14,6 +14,7 @@
14#ifndef __ASM_DEC_INTERRUPTS_H 14#ifndef __ASM_DEC_INTERRUPTS_H
15#define __ASM_DEC_INTERRUPTS_H 15#define __ASM_DEC_INTERRUPTS_H
16 16
17#include <irq.h>
17#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
18 19
19 20
@@ -87,7 +88,7 @@
87#define DEC_CPU_INR_SW1 1 /* software #1 */ 88#define DEC_CPU_INR_SW1 1 /* software #1 */
88#define DEC_CPU_INR_SW0 0 /* software #0 */ 89#define DEC_CPU_INR_SW0 0 /* software #0 */
89 90
90#define DEC_CPU_IRQ_BASE 0 /* first IRQ assigned to CPU */ 91#define DEC_CPU_IRQ_BASE MIPS_CPU_IRQ_BASE /* first IRQ assigned to CPU */
91 92
92#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE) 93#define DEC_CPU_IRQ_NR(n) ((n) + DEC_CPU_IRQ_BASE)
93#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP)) 94#define DEC_CPU_IRQ_MASK(n) (1 << ((n) + CAUSEB_IP))
diff --git a/include/asm-mips/dma.h b/include/asm-mips/dma.h
index 23f789c80845..e06ef0776d48 100644
--- a/include/asm-mips/dma.h
+++ b/include/asm-mips/dma.h
@@ -91,6 +91,7 @@
91#else 91#else
92#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000) 92#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
93#endif 93#endif
94#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
94 95
95/* 8237 DMA controllers */ 96/* 8237 DMA controllers */
96#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ 97#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/include/asm-mips/emma2rh/emma2rh.h b/include/asm-mips/emma2rh/emma2rh.h
index 4fb8df71caa9..6a1af0af51e3 100644
--- a/include/asm-mips/emma2rh/emma2rh.h
+++ b/include/asm-mips/emma2rh/emma2rh.h
@@ -24,6 +24,8 @@
24#ifndef __ASM_EMMA2RH_EMMA2RH_H 24#ifndef __ASM_EMMA2RH_EMMA2RH_H
25#define __ASM_EMMA2RH_EMMA2RH_H 25#define __ASM_EMMA2RH_EMMA2RH_H
26 26
27#include <irq.h>
28
27/* 29/*
28 * EMMA2RH registers 30 * EMMA2RH registers
29 */ 31 */
@@ -104,7 +106,8 @@
104#define NUM_EMMA2RH_IRQ 96 106#define NUM_EMMA2RH_IRQ 96
105 107
106#define CPU_EMMA2RH_CASCADE 2 108#define CPU_EMMA2RH_CASCADE 2
107#define EMMA2RH_IRQ_BASE 0 109#define CPU_IRQ_BASE MIPS_CPU_IRQ_BASE
110#define EMMA2RH_IRQ_BASE (CPU_IRQ_BASE + NUM_CPU_IRQ)
108 111
109/* 112/*
110 * emma2rh irq defs 113 * emma2rh irq defs
diff --git a/include/asm-mips/emma2rh/markeins.h b/include/asm-mips/emma2rh/markeins.h
index 8fa766795078..973b0628490d 100644
--- a/include/asm-mips/emma2rh/markeins.h
+++ b/include/asm-mips/emma2rh/markeins.h
@@ -33,7 +33,6 @@
33 33
34#define EMMA2RH_SW_IRQ_BASE (EMMA2RH_IRQ_BASE + NUM_EMMA2RH_IRQ) 34#define EMMA2RH_SW_IRQ_BASE (EMMA2RH_IRQ_BASE + NUM_EMMA2RH_IRQ)
35#define EMMA2RH_GPIO_IRQ_BASE (EMMA2RH_SW_IRQ_BASE + NUM_EMMA2RH_IRQ_SW) 35#define EMMA2RH_GPIO_IRQ_BASE (EMMA2RH_SW_IRQ_BASE + NUM_EMMA2RH_IRQ_SW)
36#define CPU_IRQ_BASE (EMMA2RH_GPIO_IRQ_BASE + NUM_EMMA2RH_IRQ_GPIO)
37 36
38#define EMMA2RH_SW_IRQ_INT0 (0+EMMA2RH_SW_IRQ_BASE) 37#define EMMA2RH_SW_IRQ_INT0 (0+EMMA2RH_SW_IRQ_BASE)
39#define EMMA2RH_SW_IRQ_INT1 (1+EMMA2RH_SW_IRQ_BASE) 38#define EMMA2RH_SW_IRQ_INT1 (1+EMMA2RH_SW_IRQ_BASE)
diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h
index 4df8d8b118c0..e88a01607fea 100644
--- a/include/asm-mips/i8259.h
+++ b/include/asm-mips/i8259.h
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21#include <irq.h>
21 22
22/* i8259A PIC registers */ 23/* i8259A PIC registers */
23#define PIC_MASTER_CMD 0x20 24#define PIC_MASTER_CMD 0x20
@@ -42,8 +43,6 @@ extern void disable_8259A_irq(unsigned int irq);
42 43
43extern void init_i8259_irqs(void); 44extern void init_i8259_irqs(void);
44 45
45#define I8259A_IRQ_BASE 0
46
47/* 46/*
48 * Do the traditional i8259 interrupt polling thing. This is for the few 47 * Do the traditional i8259 interrupt polling thing. This is for the few
49 * cases where no better interrupt acknowledge method is available and we 48 * cases where no better interrupt acknowledge method is available and we
diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h
index d77b657c09c7..67f081078904 100644
--- a/include/asm-mips/io.h
+++ b/include/asm-mips/io.h
@@ -115,7 +115,7 @@ static inline void set_io_port_base(unsigned long base)
115 */ 115 */
116static inline unsigned long virt_to_phys(volatile const void *address) 116static inline unsigned long virt_to_phys(volatile const void *address)
117{ 117{
118 return (unsigned long)address - PAGE_OFFSET; 118 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET;
119} 119}
120 120
121/* 121/*
@@ -132,7 +132,7 @@ static inline unsigned long virt_to_phys(volatile const void *address)
132 */ 132 */
133static inline void * phys_to_virt(unsigned long address) 133static inline void * phys_to_virt(unsigned long address)
134{ 134{
135 return (void *)(address + PAGE_OFFSET); 135 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
136} 136}
137 137
138/* 138/*
diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h
index 386da82e5774..91803ba30ff2 100644
--- a/include/asm-mips/irq.h
+++ b/include/asm-mips/irq.h
@@ -18,7 +18,7 @@
18#ifdef CONFIG_I8259 18#ifdef CONFIG_I8259
19static inline int irq_canonicalize(int irq) 19static inline int irq_canonicalize(int irq)
20{ 20{
21 return ((irq == 2) ? 9 : irq); 21 return ((irq == I8259A_IRQ_BASE + 2) ? I8259A_IRQ_BASE + 9 : irq);
22} 22}
23#else 23#else
24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ 24#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
diff --git a/include/asm-mips/irq_cpu.h b/include/asm-mips/irq_cpu.h
index ed3d1e3d09ec..ef6a07cddb23 100644
--- a/include/asm-mips/irq_cpu.h
+++ b/include/asm-mips/irq_cpu.h
@@ -13,8 +13,8 @@
13#ifndef _ASM_IRQ_CPU_H 13#ifndef _ASM_IRQ_CPU_H
14#define _ASM_IRQ_CPU_H 14#define _ASM_IRQ_CPU_H
15 15
16extern void mips_cpu_irq_init(int irq_base); 16extern void mips_cpu_irq_init(void);
17extern void rm7k_cpu_irq_init(int irq_base); 17extern void rm7k_cpu_irq_init(void);
18extern void rm9k_cpu_irq_init(int irq_base); 18extern void rm9k_cpu_irq_init(void);
19 19
20#endif /* _ASM_IRQ_CPU_H */ 20#endif /* _ASM_IRQ_CPU_H */
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 582acd8adb81..58fca8a5a9a6 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -39,6 +39,7 @@
39#ifndef _LANGUAGE_ASSEMBLY 39#ifndef _LANGUAGE_ASSEMBLY
40 40
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/types.h>
42#include <asm/io.h> 43#include <asm/io.h>
43 44
44/* cpu pipeline flush */ 45/* cpu pipeline flush */
diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h
index 00b0fc68d5cb..24a8d51a55a3 100644
--- a/include/asm-mips/mach-cobalt/cobalt.h
+++ b/include/asm-mips/mach-cobalt/cobalt.h
@@ -12,6 +12,8 @@
12#ifndef __ASM_COBALT_H 12#ifndef __ASM_COBALT_H
13#define __ASM_COBALT_H 13#define __ASM_COBALT_H
14 14
15#include <irq.h>
16
15/* 17/*
16 * i8259 legacy interrupts used on Cobalt: 18 * i8259 legacy interrupts used on Cobalt:
17 * 19 *
@@ -25,7 +27,7 @@
25/* 27/*
26 * CPU IRQs are 16 ... 23 28 * CPU IRQs are 16 ... 23
27 */ 29 */
28#define COBALT_CPU_IRQ 16 30#define COBALT_CPU_IRQ MIPS_CPU_IRQ_BASE
29 31
30#define COBALT_GALILEO_IRQ (COBALT_CPU_IRQ + 2) 32#define COBALT_GALILEO_IRQ (COBALT_CPU_IRQ + 2)
31#define COBALT_SCC_IRQ (COBALT_CPU_IRQ + 3) /* pre-production has 85C30 */ 33#define COBALT_SCC_IRQ (COBALT_CPU_IRQ + 3) /* pre-production has 85C30 */
diff --git a/include/asm-mips/mach-emma2rh/irq.h b/include/asm-mips/mach-emma2rh/irq.h
index bce64244b800..5439eb856461 100644
--- a/include/asm-mips/mach-emma2rh/irq.h
+++ b/include/asm-mips/mach-emma2rh/irq.h
@@ -10,4 +10,6 @@
10 10
11#define NR_IRQS 256 11#define NR_IRQS 256
12 12
13#include_next <irq.h>
14
13#endif /* __ASM_MACH_EMMA2RH_IRQ_H */ 15#endif /* __ASM_MACH_EMMA2RH_IRQ_H */
diff --git a/include/asm-mips/mach-generic/irq.h b/include/asm-mips/mach-generic/irq.h
index 500e10ff24de..70d9a25132c5 100644
--- a/include/asm-mips/mach-generic/irq.h
+++ b/include/asm-mips/mach-generic/irq.h
@@ -8,6 +8,38 @@
8#ifndef __ASM_MACH_GENERIC_IRQ_H 8#ifndef __ASM_MACH_GENERIC_IRQ_H
9#define __ASM_MACH_GENERIC_IRQ_H 9#define __ASM_MACH_GENERIC_IRQ_H
10 10
11#ifndef NR_IRQS
11#define NR_IRQS 128 12#define NR_IRQS 128
13#endif
14
15#ifdef CONFIG_I8259
16#ifndef I8259A_IRQ_BASE
17#define I8259A_IRQ_BASE 0
18#endif
19#endif
20
21#ifdef CONFIG_IRQ_CPU
22
23#ifndef MIPS_CPU_IRQ_BASE
24#ifdef CONFIG_I8259
25#define MIPS_CPU_IRQ_BASE 16
26#else
27#define MIPS_CPU_IRQ_BASE 0
28#endif /* CONFIG_I8259 */
29#endif
30
31#ifdef CONFIG_IRQ_CPU_RM7K
32#ifndef RM7K_CPU_IRQ_BASE
33#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8)
34#endif
35#endif
36
37#ifdef CONFIG_IRQ_CPU_RM9K
38#ifndef RM9K_CPU_IRQ_BASE
39#define RM9K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+12)
40#endif
41#endif
42
43#endif /* CONFIG_IRQ_CPU */
12 44
13#endif /* __ASM_MACH_GENERIC_IRQ_H */ 45#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/include/asm-mips/mach-mips/irq.h b/include/asm-mips/mach-mips/irq.h
index e994b0c01227..9b9da26683c2 100644
--- a/include/asm-mips/mach-mips/irq.h
+++ b/include/asm-mips/mach-mips/irq.h
@@ -4,4 +4,6 @@
4 4
5#define NR_IRQS 256 5#define NR_IRQS 256
6 6
7#include_next <irq.h>
8
7#endif /* __ASM_MACH_MIPS_IRQ_H */ 9#endif /* __ASM_MACH_MIPS_IRQ_H */
diff --git a/include/asm-mips/mach-vr41xx/irq.h b/include/asm-mips/mach-vr41xx/irq.h
new file mode 100644
index 000000000000..848812296052
--- /dev/null
+++ b/include/asm-mips/mach-vr41xx/irq.h
@@ -0,0 +1,11 @@
1#ifndef __ASM_MACH_VR41XX_IRQ_H
2#define __ASM_MACH_VR41XX_IRQ_H
3
4#include <asm/vr41xx/irq.h> /* for MIPS_CPU_IRQ_BASE */
5#ifdef CONFIG_NEC_CMBVR4133
6#include <asm/vr41xx/cmbvr4133.h> /* for I8259A_IRQ_BASE */
7#endif
8
9#include_next <irq.h>
10
11#endif /* __ASM_MACH_VR41XX_IRQ_H */
diff --git a/include/asm-mips/mips-boards/atlasint.h b/include/asm-mips/mips-boards/atlasint.h
index b15e4ea0b091..76add42e486e 100644
--- a/include/asm-mips/mips-boards/atlasint.h
+++ b/include/asm-mips/mips-boards/atlasint.h
@@ -26,10 +26,12 @@
26#ifndef _MIPS_ATLASINT_H 26#ifndef _MIPS_ATLASINT_H
27#define _MIPS_ATLASINT_H 27#define _MIPS_ATLASINT_H
28 28
29#include <irq.h>
30
29/* 31/*
30 * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode) 32 * Interrupts 0..7 are used for Atlas CPU interrupts (nonEIC mode)
31 */ 33 */
32#define MIPSCPU_INT_BASE 0 34#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
33 35
34/* CPU interrupt offsets */ 36/* CPU interrupt offsets */
35#define MIPSCPU_INT_SW0 0 37#define MIPSCPU_INT_SW0 0
diff --git a/include/asm-mips/mips-boards/maltaint.h b/include/asm-mips/mips-boards/maltaint.h
index da6cc2fbbc78..9180d6466113 100644
--- a/include/asm-mips/mips-boards/maltaint.h
+++ b/include/asm-mips/mips-boards/maltaint.h
@@ -25,6 +25,8 @@
25#ifndef _MIPS_MALTAINT_H 25#ifndef _MIPS_MALTAINT_H
26#define _MIPS_MALTAINT_H 26#define _MIPS_MALTAINT_H
27 27
28#include <irq.h>
29
28/* 30/*
29 * Interrupts 0..15 are used for Malta ISA compatible interrupts 31 * Interrupts 0..15 are used for Malta ISA compatible interrupts
30 */ 32 */
@@ -33,7 +35,7 @@
33/* 35/*
34 * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode) 36 * Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode)
35 */ 37 */
36#define MIPSCPU_INT_BASE 16 38#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
37 39
38/* CPU interrupt offsets */ 40/* CPU interrupt offsets */
39#define MIPSCPU_INT_SW0 0 41#define MIPSCPU_INT_SW0 0
diff --git a/include/asm-mips/mips-boards/prom.h b/include/asm-mips/mips-boards/prom.h
index 4168c7fcd43e..7bf6f5f6ab9c 100644
--- a/include/asm-mips/mips-boards/prom.h
+++ b/include/asm-mips/mips-boards/prom.h
@@ -33,7 +33,6 @@ extern void prom_printf(char *fmt, ...);
33extern void prom_init_cmdline(void); 33extern void prom_init_cmdline(void);
34extern void prom_meminit(void); 34extern void prom_meminit(void);
35extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem); 35extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
36extern unsigned long prom_free_prom_memory (void);
37extern void mips_display_message(const char *str); 36extern void mips_display_message(const char *str);
38extern void mips_display_word(unsigned int num); 37extern void mips_display_word(unsigned int num);
39extern int get_ethernet_addr(char *ethernet_addr); 38extern int get_ethernet_addr(char *ethernet_addr);
diff --git a/include/asm-mips/mips-boards/seadint.h b/include/asm-mips/mips-boards/seadint.h
index 365c2a3c64f5..4f6a3933699d 100644
--- a/include/asm-mips/mips-boards/seadint.h
+++ b/include/asm-mips/mips-boards/seadint.h
@@ -20,10 +20,12 @@
20#ifndef _MIPS_SEADINT_H 20#ifndef _MIPS_SEADINT_H
21#define _MIPS_SEADINT_H 21#define _MIPS_SEADINT_H
22 22
23#include <irq.h>
24
23/* 25/*
24 * Interrupts 0..7 are used for SEAD CPU interrupts 26 * Interrupts 0..7 are used for SEAD CPU interrupts
25 */ 27 */
26#define MIPSCPU_INT_BASE 0 28#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
27 29
28#define MIPSCPU_INT_UART0 2 30#define MIPSCPU_INT_UART0 2
29#define MIPSCPU_INT_UART1 3 31#define MIPSCPU_INT_UART1 3
diff --git a/include/asm-mips/mips-boards/simint.h b/include/asm-mips/mips-boards/simint.h
index 4952e0b3bf11..54f2fe621d69 100644
--- a/include/asm-mips/mips-boards/simint.h
+++ b/include/asm-mips/mips-boards/simint.h
@@ -17,10 +17,11 @@
17#ifndef _MIPS_SIMINT_H 17#ifndef _MIPS_SIMINT_H
18#define _MIPS_SIMINT_H 18#define _MIPS_SIMINT_H
19 19
20#include <irq.h>
20 21
21#define SIM_INT_BASE 0 22#define SIM_INT_BASE 0
22#define MIPSCPU_INT_MB0 2 23#define MIPSCPU_INT_MB0 2
23#define MIPSCPU_INT_BASE 16 24#define MIPSCPU_INT_BASE MIPS_CPU_IRQ_BASE
24#define MIPS_CPU_TIMER_IRQ 7 25#define MIPS_CPU_TIMER_IRQ 7
25 26
26 27
diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h
index 3e9468f424f4..294bca12cd3f 100644
--- a/include/asm-mips/mipsmtregs.h
+++ b/include/asm-mips/mipsmtregs.h
@@ -165,8 +165,6 @@
165 165
166#ifndef __ASSEMBLY__ 166#ifndef __ASSEMBLY__
167 167
168extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
169
170static inline unsigned int dvpe(void) 168static inline unsigned int dvpe(void)
171{ 169{
172 int res = 0; 170 int res = 0;
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index 2f9e1a9ec51f..d3fbd83ff545 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -34,6 +34,20 @@
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36 36
37/*
38 * This gives the physical RAM offset.
39 */
40#ifndef PHYS_OFFSET
41#define PHYS_OFFSET 0UL
42#endif
43
44/*
45 * It's normally defined only for FLATMEM config but it's
46 * used in our early mem init code for all memory models.
47 * So always define it.
48 */
49#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
50
37#include <linux/pfn.h> 51#include <linux/pfn.h>
38#include <asm/io.h> 52#include <asm/io.h>
39 53
@@ -132,20 +146,23 @@ typedef struct { unsigned long pgprot; } pgprot_t;
132/* to align the pointer to the (next) page boundary */ 146/* to align the pointer to the (next) page boundary */
133#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) 147#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
134 148
149/*
150 * __pa()/__va() should be used only during mem init.
151 */
135#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) 152#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64)
136#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0) 153#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0)
137#else 154#else
138#define __pa_page_offset(x) PAGE_OFFSET 155#define __pa_page_offset(x) PAGE_OFFSET
139#endif 156#endif
140#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x)) 157#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x) + PHYS_OFFSET)
141#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) 158#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
142#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) 159#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
143 160
144#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 161#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
145 162
146#ifdef CONFIG_FLATMEM 163#ifdef CONFIG_FLATMEM
147 164
148#define pfn_valid(pfn) ((pfn) < max_mapnr) 165#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
149 166
150#elif defined(CONFIG_SPARSEMEM) 167#elif defined(CONFIG_SPARSEMEM)
151 168
diff --git a/include/asm-mips/rtlx.h b/include/asm-mips/rtlx.h
index 76cd51c6be39..59162f74a798 100644
--- a/include/asm-mips/rtlx.h
+++ b/include/asm-mips/rtlx.h
@@ -6,9 +6,10 @@
6#ifndef __ASM_RTLX_H 6#ifndef __ASM_RTLX_H
7#define __ASM_RTLX_H_ 7#define __ASM_RTLX_H_
8 8
9#include <irq.h>
10
9#define LX_NODE_BASE 10 11#define LX_NODE_BASE 10
10 12
11#define MIPSCPU_INT_BASE 16
12#define MIPS_CPU_RTLX_IRQ 0 13#define MIPS_CPU_RTLX_IRQ 0
13 14
14#define RTLX_VERSION 2 15#define RTLX_VERSION 2
diff --git a/include/asm-mips/sections.h b/include/asm-mips/sections.h
index f7016278b266..b7e37262c246 100644
--- a/include/asm-mips/sections.h
+++ b/include/asm-mips/sections.h
@@ -3,6 +3,4 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _fdata;
7
8#endif /* _ASM_SECTIONS_H */ 6#endif /* _ASM_SECTIONS_H */
diff --git a/include/asm-mips/sgi/ip22.h b/include/asm-mips/sgi/ip22.h
index bbfc05c3cab9..6592f3bd1999 100644
--- a/include/asm-mips/sgi/ip22.h
+++ b/include/asm-mips/sgi/ip22.h
@@ -21,15 +21,16 @@
21 * HAL2 driver). This will prevent many complications, trust me ;-) 21 * HAL2 driver). This will prevent many complications, trust me ;-)
22 */ 22 */
23 23
24#include <irq.h>
24#include <asm/sgi/ioc.h> 25#include <asm/sgi/ioc.h>
25 26
26#define SGINT_EISA 0 /* 16 EISA irq levels (Indigo2) */ 27#define SGINT_EISA 0 /* 16 EISA irq levels (Indigo2) */
27#define SGINT_CPU 16 /* MIPS CPU define 8 interrupt sources */ 28#define SGINT_CPU MIPS_CPU_IRQ_BASE /* MIPS CPU define 8 interrupt sources */
28#define SGINT_LOCAL0 24 /* 8 local0 irq levels */ 29#define SGINT_LOCAL0 (SGINT_CPU+8) /* 8 local0 irq levels */
29#define SGINT_LOCAL1 32 /* 8 local1 irq levels */ 30#define SGINT_LOCAL1 (SGINT_CPU+16) /* 8 local1 irq levels */
30#define SGINT_LOCAL2 40 /* 8 local2 vectored irq levels */ 31#define SGINT_LOCAL2 (SGINT_CPU+24) /* 8 local2 vectored irq levels */
31#define SGINT_LOCAL3 48 /* 8 local3 vectored irq levels */ 32#define SGINT_LOCAL3 (SGINT_CPU+32) /* 8 local3 vectored irq levels */
32#define SGINT_END 56 /* End of 'spaces' */ 33#define SGINT_END (SGINT_CPU+40) /* End of 'spaces' */
33 34
34/* 35/*
35 * Individual interrupt definitions for the Indy and Indigo2 36 * Individual interrupt definitions for the Indy and Indigo2
diff --git a/include/asm-mips/smtc_ipi.h b/include/asm-mips/smtc_ipi.h
index f22c3e2f993a..55f3419f6546 100644
--- a/include/asm-mips/smtc_ipi.h
+++ b/include/asm-mips/smtc_ipi.h
@@ -44,9 +44,6 @@ struct smtc_ipi_q {
44 int depth; 44 int depth;
45}; 45};
46 46
47extern struct smtc_ipi_q IPIQ[NR_CPUS];
48extern struct smtc_ipi_q freeIPIq;
49
50static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p) 47static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
51{ 48{
52 long flags; 49 long flags;
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 1cdd4eeb2f73..c12ebc53ef31 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -488,7 +488,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
488}) 488})
489 489
490/* 490/*
491 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. 491 * __copy_from_user: - Copy a block of data from user space, with less checking.
492 * @to: Destination address, in kernel space.
492 * @from: Source address, in user space. 493 * @from: Source address, in user space.
493 * @n: Number of bytes to copy. 494 * @n: Number of bytes to copy.
494 * 495 *
diff --git a/include/asm-mips/vr41xx/cmbvr4133.h b/include/asm-mips/vr41xx/cmbvr4133.h
index 9490ade58b46..42300037d593 100644
--- a/include/asm-mips/vr41xx/cmbvr4133.h
+++ b/include/asm-mips/vr41xx/cmbvr4133.h
@@ -35,8 +35,8 @@
35#define CMBVR41XX_INTD_IRQ GIU_IRQ(CMBVR41XX_INTD_PIN) 35#define CMBVR41XX_INTD_IRQ GIU_IRQ(CMBVR41XX_INTD_PIN)
36#define CMBVR41XX_INTE_IRQ GIU_IRQ(CMBVR41XX_INTE_PIN) 36#define CMBVR41XX_INTE_IRQ GIU_IRQ(CMBVR41XX_INTE_PIN)
37 37
38#define I8259_IRQ_BASE 72 38#define I8259A_IRQ_BASE 72
39#define I8259_IRQ(x) (I8259_IRQ_BASE + (x)) 39#define I8259_IRQ(x) (I8259A_IRQ_BASE + (x))
40#define TIMER_IRQ I8259_IRQ(0) 40#define TIMER_IRQ I8259_IRQ(0)
41#define KEYBOARD_IRQ I8259_IRQ(1) 41#define KEYBOARD_IRQ I8259_IRQ(1)
42#define I8259_SLAVE_IRQ I8259_IRQ(2) 42#define I8259_SLAVE_IRQ I8259_IRQ(2)
@@ -52,6 +52,5 @@
52#define AUX_IRQ I8259_IRQ(12) 52#define AUX_IRQ I8259_IRQ(12)
53#define IDE_PRIMARY_IRQ I8259_IRQ(14) 53#define IDE_PRIMARY_IRQ I8259_IRQ(14)
54#define IDE_SECONDARY_IRQ I8259_IRQ(15) 54#define IDE_SECONDARY_IRQ I8259_IRQ(15)
55#define I8259_IRQ_LAST IDE_SECONDARY_IRQ
56 55
57#endif /* __NEC_CMBVR4133_H */ 56#endif /* __NEC_CMBVR4133_H */
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h
index 356a0b183539..296f4f1a20e1 100644
--- a/include/asm-s390/compat.h
+++ b/include/asm-s390/compat.h
@@ -6,6 +6,34 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8 8
9#define PSW32_MASK_PER 0x40000000UL
10#define PSW32_MASK_DAT 0x04000000UL
11#define PSW32_MASK_IO 0x02000000UL
12#define PSW32_MASK_EXT 0x01000000UL
13#define PSW32_MASK_KEY 0x00F00000UL
14#define PSW32_MASK_MCHECK 0x00040000UL
15#define PSW32_MASK_WAIT 0x00020000UL
16#define PSW32_MASK_PSTATE 0x00010000UL
17#define PSW32_MASK_ASC 0x0000C000UL
18#define PSW32_MASK_CC 0x00003000UL
19#define PSW32_MASK_PM 0x00000f00UL
20
21#define PSW32_ADDR_AMODE31 0x80000000UL
22#define PSW32_ADDR_INSN 0x7FFFFFFFUL
23
24#define PSW32_BASE_BITS 0x00080000UL
25
26#define PSW32_ASC_PRIMARY 0x00000000UL
27#define PSW32_ASC_ACCREG 0x00004000UL
28#define PSW32_ASC_SECONDARY 0x00008000UL
29#define PSW32_ASC_HOME 0x0000C000UL
30
31#define PSW32_MASK_MERGE(CURRENT,NEW) \
32 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
33 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
34
35extern long psw32_user_bits;
36
9#define COMPAT_USER_HZ 100 37#define COMPAT_USER_HZ 100
10 38
11typedef u32 compat_size_t; 39typedef u32 compat_size_t;
diff --git a/include/asm-s390/etr.h b/include/asm-s390/etr.h
new file mode 100644
index 000000000000..b498f19bb9a7
--- /dev/null
+++ b/include/asm-s390/etr.h
@@ -0,0 +1,219 @@
1/*
2 * include/asm-s390/etr.h
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7#ifndef __S390_ETR_H
8#define __S390_ETR_H
9
10/* ETR attachment control register */
11struct etr_eacr {
12 unsigned int e0 : 1; /* port 0 stepping control */
13 unsigned int e1 : 1; /* port 1 stepping control */
14 unsigned int _pad0 : 5; /* must be 00100 */
15 unsigned int dp : 1; /* data port control */
16 unsigned int p0 : 1; /* port 0 change recognition control */
17 unsigned int p1 : 1; /* port 1 change recognition control */
18 unsigned int _pad1 : 3; /* must be 000 */
19 unsigned int ea : 1; /* ETR alert control */
20 unsigned int es : 1; /* ETR sync check control */
21 unsigned int sl : 1; /* switch to local control */
22} __attribute__ ((packed));
23
24/* Port state returned by steai */
25enum etr_psc {
26 etr_psc_operational = 0,
27 etr_psc_semi_operational = 1,
28 etr_psc_protocol_error = 4,
29 etr_psc_no_symbols = 8,
30 etr_psc_no_signal = 12,
31 etr_psc_pps_mode = 13
32};
33
34/* Logical port state returned by stetr */
35enum etr_lpsc {
36 etr_lpsc_operational_step = 0,
37 etr_lpsc_operational_alt = 1,
38 etr_lpsc_semi_operational = 2,
39 etr_lpsc_protocol_error = 4,
40 etr_lpsc_no_symbol_sync = 8,
41 etr_lpsc_no_signal = 12,
42 etr_lpsc_pps_mode = 13
43};
44
45/* ETR status words */
46struct etr_esw {
47 struct etr_eacr eacr; /* attachment control register */
48 unsigned int y : 1; /* stepping mode */
49 unsigned int _pad0 : 5; /* must be 00000 */
50 unsigned int p : 1; /* stepping port number */
51 unsigned int q : 1; /* data port number */
52 unsigned int psc0 : 4; /* port 0 state code */
53 unsigned int psc1 : 4; /* port 1 state code */
54} __attribute__ ((packed));
55
56/* Second level data register status word */
57struct etr_slsw {
58 unsigned int vv1 : 1; /* copy of validity bit data frame 1 */
59 unsigned int vv2 : 1; /* copy of validity bit data frame 2 */
60 unsigned int vv3 : 1; /* copy of validity bit data frame 3 */
61 unsigned int vv4 : 1; /* copy of validity bit data frame 4 */
62 unsigned int _pad0 : 19; /* must by all zeroes */
63 unsigned int n : 1; /* EAF port number */
64 unsigned int v1 : 1; /* validity bit ETR data frame 1 */
65 unsigned int v2 : 1; /* validity bit ETR data frame 2 */
66 unsigned int v3 : 1; /* validity bit ETR data frame 3 */
67 unsigned int v4 : 1; /* validity bit ETR data frame 4 */
68 unsigned int _pad1 : 4; /* must be 0000 */
69} __attribute__ ((packed));
70
71/* ETR data frames */
72struct etr_edf1 {
73 unsigned int u : 1; /* untuned bit */
74 unsigned int _pad0 : 1; /* must be 0 */
75 unsigned int r : 1; /* service request bit */
76 unsigned int _pad1 : 4; /* must be 0000 */
77 unsigned int a : 1; /* time adjustment bit */
78 unsigned int net_id : 8; /* ETR network id */
79 unsigned int etr_id : 8; /* id of ETR which sends data frames */
80 unsigned int etr_pn : 8; /* port number of ETR output port */
81} __attribute__ ((packed));
82
83struct etr_edf2 {
84 unsigned int etv : 32; /* Upper 32 bits of TOD. */
85} __attribute__ ((packed));
86
87struct etr_edf3 {
88 unsigned int rc : 8; /* failure reason code */
89 unsigned int _pad0 : 3; /* must be 000 */
90 unsigned int c : 1; /* ETR coupled bit */
91 unsigned int tc : 4; /* ETR type code */
92 unsigned int blto : 8; /* biased local time offset */
93 /* (blto - 128) * 15 = minutes */
94 unsigned int buo : 8; /* biased utc offset */
95 /* (buo - 128) = leap seconds */
96} __attribute__ ((packed));
97
98struct etr_edf4 {
99 unsigned int ed : 8; /* ETS device dependent data */
100 unsigned int _pad0 : 1; /* must be 0 */
101 unsigned int buc : 5; /* biased ut1 correction */
102 /* (buc - 16) * 0.1 seconds */
103 unsigned int em : 6; /* ETS error magnitude */
104 unsigned int dc : 6; /* ETS drift code */
105 unsigned int sc : 6; /* ETS steering code */
106} __attribute__ ((packed));
107
108/*
109 * ETR attachment information block, two formats
110 * format 1 has 4 reserved words with a size of 64 bytes
111 * format 2 has 16 reserved words with a size of 96 bytes
112 */
113struct etr_aib {
114 struct etr_esw esw;
115 struct etr_slsw slsw;
116 unsigned long long tsp;
117 struct etr_edf1 edf1;
118 struct etr_edf2 edf2;
119 struct etr_edf3 edf3;
120 struct etr_edf4 edf4;
121 unsigned int reserved[16];
122} __attribute__ ((packed,aligned(8)));
123
124/* ETR interruption parameter */
125struct etr_interruption_parameter {
126 unsigned int _pad0 : 8;
127 unsigned int pc0 : 1; /* port 0 state change */
128 unsigned int pc1 : 1; /* port 1 state change */
129 unsigned int _pad1 : 3;
130 unsigned int eai : 1; /* ETR alert indication */
131 unsigned int _pad2 : 18;
132} __attribute__ ((packed));
133
134/* Query TOD offset result */
135struct etr_ptff_qto {
136 unsigned long long physical_clock;
137 unsigned long long tod_offset;
138 unsigned long long logical_tod_offset;
139 unsigned long long tod_epoch_difference;
140} __attribute__ ((packed));
141
142/* Inline assembly helper functions */
143static inline int etr_setr(struct etr_eacr *ctrl)
144{
145 int rc = -ENOSYS;
146
147 asm volatile(
148 " .insn s,0xb2160000,0(%2)\n"
149 "0: la %0,0\n"
150 "1:\n"
151 EX_TABLE(0b,1b)
152 : "+d" (rc) : "m" (*ctrl), "a" (ctrl));
153 return rc;
154}
155
156/* Stores a format 1 aib with 64 bytes */
157static inline int etr_stetr(struct etr_aib *aib)
158{
159 int rc = -ENOSYS;
160
161 asm volatile(
162 " .insn s,0xb2170000,0(%2)\n"
163 "0: la %0,0\n"
164 "1:\n"
165 EX_TABLE(0b,1b)
166 : "+d" (rc) : "m" (*aib), "a" (aib));
167 return rc;
168}
169
170/* Stores a format 2 aib with 96 bytes for specified port */
171static inline int etr_steai(struct etr_aib *aib, unsigned int func)
172{
173 register unsigned int reg0 asm("0") = func;
174 int rc = -ENOSYS;
175
176 asm volatile(
177 " .insn s,0xb2b30000,0(%2)\n"
178 "0: la %0,0\n"
179 "1:\n"
180 EX_TABLE(0b,1b)
181 : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0));
182 return rc;
183}
184
185/* Function codes for the steai instruction. */
186#define ETR_STEAI_STEPPING_PORT 0x10
187#define ETR_STEAI_ALTERNATE_PORT 0x11
188#define ETR_STEAI_PORT_0 0x12
189#define ETR_STEAI_PORT_1 0x13
190
191static inline int etr_ptff(void *ptff_block, unsigned int func)
192{
193 register unsigned int reg0 asm("0") = func;
194 register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
195 int rc = -ENOSYS;
196
197 asm volatile(
198 " .word 0x0104\n"
199 " ipm %0\n"
200 " srl %0,28\n"
201 : "=d" (rc), "=m" (ptff_block)
202 : "d" (reg0), "d" (reg1), "m" (ptff_block) : "cc");
203 return rc;
204}
205
206/* Function codes for the ptff instruction. */
207#define ETR_PTFF_QAF 0x00 /* query available functions */
208#define ETR_PTFF_QTO 0x01 /* query tod offset */
209#define ETR_PTFF_QSI 0x02 /* query steering information */
210#define ETR_PTFF_ATO 0x40 /* adjust tod offset */
211#define ETR_PTFF_STO 0x41 /* set tod offset */
212#define ETR_PTFF_SFS 0x42 /* set fine steering rate */
213#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
214
215/* Functions needed by the machine check handler */
216extern void etr_switch_to_local(void);
217extern void etr_sync_check(void);
218
219#endif /* __S390_ETR_H */
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h
index c2f6a8782d31..31beb18cb3d1 100644
--- a/include/asm-s390/hardirq.h
+++ b/include/asm-s390/hardirq.h
@@ -32,6 +32,6 @@ typedef struct {
32 32
33#define HARDIRQ_BITS 8 33#define HARDIRQ_BITS 8
34 34
35extern void account_ticks(void); 35extern void account_ticks(u64 time);
36 36
37#endif /* __ASM_HARDIRQ_H */ 37#endif /* __ASM_HARDIRQ_H */
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h
index efb7de9c1c6b..a4c2d550dad4 100644
--- a/include/asm-s390/io.h
+++ b/include/asm-s390/io.h
@@ -28,11 +28,7 @@ static inline unsigned long virt_to_phys(volatile void * address)
28{ 28{
29 unsigned long real_address; 29 unsigned long real_address;
30 asm volatile( 30 asm volatile(
31#ifndef __s390x__
32 " lra %0,0(%1)\n" 31 " lra %0,0(%1)\n"
33#else /* __s390x__ */
34 " lrag %0,0(%1)\n"
35#endif /* __s390x__ */
36 " jz 0f\n" 32 " jz 0f\n"
37 " la %0,0\n" 33 " la %0,0\n"
38 "0:" 34 "0:"
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h
index 40cc68025e01..1b50f89819a4 100644
--- a/include/asm-s390/kdebug.h
+++ b/include/asm-s390/kdebug.h
@@ -26,7 +26,6 @@ extern int register_page_fault_notifier(struct notifier_block *);
26extern int unregister_page_fault_notifier(struct notifier_block *); 26extern int unregister_page_fault_notifier(struct notifier_block *);
27extern struct atomic_notifier_head s390die_chain; 27extern struct atomic_notifier_head s390die_chain;
28 28
29
30enum die_val { 29enum die_val {
31 DIE_OOPS = 1, 30 DIE_OOPS = 1,
32 DIE_BPT, 31 DIE_BPT,
@@ -56,4 +55,6 @@ static inline int notify_die(enum die_val val, const char *str,
56 return atomic_notifier_call_chain(&s390die_chain, val, &args); 55 return atomic_notifier_call_chain(&s390die_chain, val, &args);
57} 56}
58 57
58extern void die(const char *, struct pt_regs *, long);
59
59#endif 60#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 74f7389bd3ee..4a31d0a7ee83 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -220,7 +220,8 @@ struct _lowcore
220 __u32 kernel_asce; /* 0xc4c */ 220 __u32 kernel_asce; /* 0xc4c */
221 __u32 user_asce; /* 0xc50 */ 221 __u32 user_asce; /* 0xc50 */
222 __u32 panic_stack; /* 0xc54 */ 222 __u32 panic_stack; /* 0xc54 */
223 __u8 pad10[0xc60-0xc58]; /* 0xc58 */ 223 __u32 user_exec_asce; /* 0xc58 */
224 __u8 pad10[0xc60-0xc5c]; /* 0xc5c */
224 /* entry.S sensitive area start */ 225 /* entry.S sensitive area start */
225 struct cpuinfo_S390 cpu_data; /* 0xc60 */ 226 struct cpuinfo_S390 cpu_data; /* 0xc60 */
226 __u32 ipl_device; /* 0xc7c */ 227 __u32 ipl_device; /* 0xc7c */
@@ -310,7 +311,8 @@ struct _lowcore
310 __u64 kernel_asce; /* 0xd58 */ 311 __u64 kernel_asce; /* 0xd58 */
311 __u64 user_asce; /* 0xd60 */ 312 __u64 user_asce; /* 0xd60 */
312 __u64 panic_stack; /* 0xd68 */ 313 __u64 panic_stack; /* 0xd68 */
313 __u8 pad10[0xd80-0xd70]; /* 0xd70 */ 314 __u64 user_exec_asce; /* 0xd70 */
315 __u8 pad10[0xd80-0xd78]; /* 0xd78 */
314 /* entry.S sensitive area start */ 316 /* entry.S sensitive area start */
315 struct cpuinfo_S390 cpu_data; /* 0xd80 */ 317 struct cpuinfo_S390 cpu_data; /* 0xd80 */
316 __u32 ipl_device; /* 0xdb8 */ 318 __u32 ipl_device; /* 0xdb8 */
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index bcf24a873874..1d21da220d49 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -9,6 +9,7 @@
9#ifndef __S390_MMU_CONTEXT_H 9#ifndef __S390_MMU_CONTEXT_H
10#define __S390_MMU_CONTEXT_H 10#define __S390_MMU_CONTEXT_H
11 11
12#include <asm/pgalloc.h>
12/* 13/*
13 * get a new mmu context.. S390 don't know about contexts. 14 * get a new mmu context.. S390 don't know about contexts.
14 */ 15 */
@@ -16,29 +17,44 @@
16 17
17#define destroy_context(mm) do { } while (0) 18#define destroy_context(mm) do { } while (0)
18 19
20#ifndef __s390x__
21#define LCTL_OPCODE "lctl"
22#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
23#else
24#define LCTL_OPCODE "lctlg"
25#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
26#endif
27
19static inline void enter_lazy_tlb(struct mm_struct *mm, 28static inline void enter_lazy_tlb(struct mm_struct *mm,
20 struct task_struct *tsk) 29 struct task_struct *tsk)
21{ 30{
22} 31}
23 32
24static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
25 struct task_struct *tsk) 34 struct task_struct *tsk)
26{ 35{
27 if (prev != next) { 36 pgd_t *shadow_pgd = get_shadow_pgd(next->pgd);
28#ifndef __s390x__ 37
29 S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) | 38 if (prev != next) {
30 (_SEGMENT_TABLE|USER_STD_MASK); 39 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
31 /* Load home space page table origin. */ 40 PGTABLE_BITS;
32 asm volatile("lctl 13,13,%0" 41 if (shadow_pgd) {
33 : : "m" (S390_lowcore.user_asce) ); 42 /* Load primary/secondary space page table origin. */
34#else /* __s390x__ */ 43 S390_lowcore.user_exec_asce =
35 S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | 44 (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
36 (_REGION_TABLE|USER_STD_MASK); 45 asm volatile(LCTL_OPCODE" 1,1,%0\n"
37 /* Load home space page table origin. */ 46 LCTL_OPCODE" 7,7,%1"
38 asm volatile("lctlg 13,13,%0" 47 : : "m" (S390_lowcore.user_exec_asce),
39 : : "m" (S390_lowcore.user_asce) ); 48 "m" (S390_lowcore.user_asce) );
40#endif /* __s390x__ */ 49 } else if (switch_amode) {
41 } 50 /* Load primary space page table origin. */
51 asm volatile(LCTL_OPCODE" 1,1,%0"
52 : : "m" (S390_lowcore.user_asce) );
53 } else
54 /* Load home space page table origin. */
55 asm volatile(LCTL_OPCODE" 13,13,%0"
56 : : "m" (S390_lowcore.user_asce) );
57 }
42 cpu_set(smp_processor_id(), next->cpu_vm_mask); 58 cpu_set(smp_processor_id(), next->cpu_vm_mask);
43} 59}
44 60
@@ -51,4 +67,4 @@ static inline void activate_mm(struct mm_struct *prev,
51 set_fs(current->thread.mm_segment); 67 set_fs(current->thread.mm_segment);
52} 68}
53 69
54#endif 70#endif /* __S390_MMU_CONTEXT_H */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 0707a7e2fc16..56c8a6c80e2e 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -47,6 +47,17 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
47 47
48 if (!pgd) 48 if (!pgd)
49 return NULL; 49 return NULL;
50 if (s390_noexec) {
51 pgd_t *shadow_pgd = (pgd_t *)
52 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
53 struct page *page = virt_to_page(pgd);
54
55 if (!shadow_pgd) {
56 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
57 return NULL;
58 }
59 page->lru.next = (void *) shadow_pgd;
60 }
50 for (i = 0; i < PTRS_PER_PGD; i++) 61 for (i = 0; i < PTRS_PER_PGD; i++)
51#ifndef __s390x__ 62#ifndef __s390x__
52 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); 63 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
@@ -58,6 +69,10 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
58 69
59static inline void pgd_free(pgd_t *pgd) 70static inline void pgd_free(pgd_t *pgd)
60{ 71{
72 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
73
74 if (shadow_pgd)
75 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
61 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); 76 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
62} 77}
63 78
@@ -71,6 +86,7 @@ static inline void pgd_free(pgd_t *pgd)
71#define pmd_free(x) do { } while (0) 86#define pmd_free(x) do { } while (0)
72#define __pmd_free_tlb(tlb,x) do { } while (0) 87#define __pmd_free_tlb(tlb,x) do { } while (0)
73#define pgd_populate(mm, pmd, pte) BUG() 88#define pgd_populate(mm, pmd, pte) BUG()
89#define pgd_populate_kernel(mm, pmd, pte) BUG()
74#else /* __s390x__ */ 90#else /* __s390x__ */
75static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 91static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
76{ 92{
@@ -79,6 +95,17 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
79 95
80 if (!pmd) 96 if (!pmd)
81 return NULL; 97 return NULL;
98 if (s390_noexec) {
99 pmd_t *shadow_pmd = (pmd_t *)
100 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
101 struct page *page = virt_to_page(pmd);
102
103 if (!shadow_pmd) {
104 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
105 return NULL;
106 }
107 page->lru.next = (void *) shadow_pmd;
108 }
82 for (i=0; i < PTRS_PER_PMD; i++) 109 for (i=0; i < PTRS_PER_PMD; i++)
83 pmd_clear(pmd + i); 110 pmd_clear(pmd + i);
84 return pmd; 111 return pmd;
@@ -86,6 +113,10 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
86 113
87static inline void pmd_free (pmd_t *pmd) 114static inline void pmd_free (pmd_t *pmd)
88{ 115{
116 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
117
118 if (shadow_pmd)
119 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
89 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); 120 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
90} 121}
91 122
@@ -95,11 +126,22 @@ static inline void pmd_free (pmd_t *pmd)
95 pmd_free(pmd); \ 126 pmd_free(pmd); \
96 } while (0) 127 } while (0)
97 128
98static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) 129static inline void
130pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
99{ 131{
100 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); 132 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
101} 133}
102 134
135static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
136{
137 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
138 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
139
140 if (shadow_pgd && shadow_pmd)
141 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
142 pgd_populate_kernel(mm, pgd, pmd);
143}
144
103#endif /* __s390x__ */ 145#endif /* __s390x__ */
104 146
105static inline void 147static inline void
@@ -119,7 +161,13 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
119static inline void 161static inline void
120pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) 162pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
121{ 163{
122 pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page)); 164 pte_t *pte = (pte_t *)page_to_phys(page);
165 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
166 pte_t *shadow_pte = get_shadow_pte(pte);
167
168 pmd_populate_kernel(mm, pmd, pte);
169 if (shadow_pmd && shadow_pte)
170 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
123} 171}
124 172
125/* 173/*
@@ -133,6 +181,17 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
133 181
134 if (!pte) 182 if (!pte)
135 return NULL; 183 return NULL;
184 if (s390_noexec) {
185 pte_t *shadow_pte = (pte_t *)
186 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
187 struct page *page = virt_to_page(pte);
188
189 if (!shadow_pte) {
190 free_page((unsigned long) pte);
191 return NULL;
192 }
193 page->lru.next = (void *) shadow_pte;
194 }
136 for (i=0; i < PTRS_PER_PTE; i++) { 195 for (i=0; i < PTRS_PER_PTE; i++) {
137 pte_clear(mm, vmaddr, pte + i); 196 pte_clear(mm, vmaddr, pte + i);
138 vmaddr += PAGE_SIZE; 197 vmaddr += PAGE_SIZE;
@@ -151,14 +210,30 @@ pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
151 210
152static inline void pte_free_kernel(pte_t *pte) 211static inline void pte_free_kernel(pte_t *pte)
153{ 212{
154 free_page((unsigned long) pte); 213 pte_t *shadow_pte = get_shadow_pte(pte);
214
215 if (shadow_pte)
216 free_page((unsigned long) shadow_pte);
217 free_page((unsigned long) pte);
155} 218}
156 219
157static inline void pte_free(struct page *pte) 220static inline void pte_free(struct page *pte)
158{ 221{
159 __free_page(pte); 222 struct page *shadow_page = get_shadow_page(pte);
223
224 if (shadow_page)
225 __free_page(shadow_page);
226 __free_page(pte);
160} 227}
161 228
162#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte) 229#define __pte_free_tlb(tlb, pte) \
230({ \
231 struct mmu_gather *__tlb = (tlb); \
232 struct page *__pte = (pte); \
233 struct page *shadow_page = get_shadow_page(__pte); \
234 if (shadow_page) \
235 tlb_remove_page(__tlb, shadow_page); \
236 tlb_remove_page(__tlb, __pte); \
237})
163 238
164#endif /* _S390_PGALLOC_H */ 239#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index ae61aca5d483..13c16546eff5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -40,6 +40,7 @@ struct mm_struct;
40 40
41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 41extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
42extern void paging_init(void); 42extern void paging_init(void);
43extern void vmem_map_init(void);
43 44
44/* 45/*
45 * The S390 doesn't have any external MMU info: the kernel page 46 * The S390 doesn't have any external MMU info: the kernel page
@@ -223,6 +224,8 @@ extern unsigned long vmalloc_end;
223#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ 224#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
224#define _PAGE_TYPE_RO 0x200 225#define _PAGE_TYPE_RO 0x200
225#define _PAGE_TYPE_RW 0x000 226#define _PAGE_TYPE_RW 0x000
227#define _PAGE_TYPE_EX_RO 0x202
228#define _PAGE_TYPE_EX_RW 0x002
226 229
227/* 230/*
228 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 231 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
@@ -243,11 +246,13 @@ extern unsigned long vmalloc_end;
243 * _PAGE_TYPE_FILE 11?1 -> 11?1 246 * _PAGE_TYPE_FILE 11?1 -> 11?1
244 * _PAGE_TYPE_RO 0100 -> 1100 247 * _PAGE_TYPE_RO 0100 -> 1100
245 * _PAGE_TYPE_RW 0000 -> 1000 248 * _PAGE_TYPE_RW 0000 -> 1000
249 * _PAGE_TYPE_EX_RO 0110 -> 1110
250 * _PAGE_TYPE_EX_RW 0010 -> 1010
246 * 251 *
247 * pte_none is true for bits combinations 1000, 1100 252 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
248 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 253 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
249 * pte_file is true for bits combinations 1101, 1111 254 * pte_file is true for bits combinations 1101, 1111
250 * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid. 255 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
251 */ 256 */
252 257
253#ifndef __s390x__ 258#ifndef __s390x__
@@ -312,33 +317,100 @@ extern unsigned long vmalloc_end;
312#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 317#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
313#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 318#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
314#define PAGE_RW __pgprot(_PAGE_TYPE_RW) 319#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
320#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
321#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
315 322
316#define PAGE_KERNEL PAGE_RW 323#define PAGE_KERNEL PAGE_RW
317#define PAGE_COPY PAGE_RO 324#define PAGE_COPY PAGE_RO
318 325
319/* 326/*
320 * The S390 can't do page protection for execute, and considers that the 327 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
321 * same are read. Also, write permissions imply read permissions. This is 328 * Write permission always implies read permission. In theory with a
322 * the closest we can get.. 329 * primary/secondary page table execute only can be implemented but
330 * it would cost an additional bit in the pte to distinguish all the
331 * different pte types. To avoid that execute permission currently
332 * implies read permission as well.
323 */ 333 */
324 /*xwr*/ 334 /*xwr*/
325#define __P000 PAGE_NONE 335#define __P000 PAGE_NONE
326#define __P001 PAGE_RO 336#define __P001 PAGE_RO
327#define __P010 PAGE_RO 337#define __P010 PAGE_RO
328#define __P011 PAGE_RO 338#define __P011 PAGE_RO
329#define __P100 PAGE_RO 339#define __P100 PAGE_EX_RO
330#define __P101 PAGE_RO 340#define __P101 PAGE_EX_RO
331#define __P110 PAGE_RO 341#define __P110 PAGE_EX_RO
332#define __P111 PAGE_RO 342#define __P111 PAGE_EX_RO
333 343
334#define __S000 PAGE_NONE 344#define __S000 PAGE_NONE
335#define __S001 PAGE_RO 345#define __S001 PAGE_RO
336#define __S010 PAGE_RW 346#define __S010 PAGE_RW
337#define __S011 PAGE_RW 347#define __S011 PAGE_RW
338#define __S100 PAGE_RO 348#define __S100 PAGE_EX_RO
339#define __S101 PAGE_RO 349#define __S101 PAGE_EX_RO
340#define __S110 PAGE_RW 350#define __S110 PAGE_EX_RW
341#define __S111 PAGE_RW 351#define __S111 PAGE_EX_RW
352
353#ifndef __s390x__
354# define PMD_SHADOW_SHIFT 1
355# define PGD_SHADOW_SHIFT 1
356#else /* __s390x__ */
357# define PMD_SHADOW_SHIFT 2
358# define PGD_SHADOW_SHIFT 2
359#endif /* __s390x__ */
360
361static inline struct page *get_shadow_page(struct page *page)
362{
363 if (s390_noexec && !list_empty(&page->lru))
364 return virt_to_page(page->lru.next);
365 return NULL;
366}
367
368static inline pte_t *get_shadow_pte(pte_t *ptep)
369{
370 unsigned long pteptr = (unsigned long) (ptep);
371
372 if (s390_noexec) {
373 unsigned long offset = pteptr & (PAGE_SIZE - 1);
374 void *addr = (void *) (pteptr ^ offset);
375 struct page *page = virt_to_page(addr);
376 if (!list_empty(&page->lru))
377 return (pte_t *) ((unsigned long) page->lru.next |
378 offset);
379 }
380 return NULL;
381}
382
383static inline pmd_t *get_shadow_pmd(pmd_t *pmdp)
384{
385 unsigned long pmdptr = (unsigned long) (pmdp);
386
387 if (s390_noexec) {
388 unsigned long offset = pmdptr &
389 ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1);
390 void *addr = (void *) (pmdptr ^ offset);
391 struct page *page = virt_to_page(addr);
392 if (!list_empty(&page->lru))
393 return (pmd_t *) ((unsigned long) page->lru.next |
394 offset);
395 }
396 return NULL;
397}
398
399static inline pgd_t *get_shadow_pgd(pgd_t *pgdp)
400{
401 unsigned long pgdptr = (unsigned long) (pgdp);
402
403 if (s390_noexec) {
404 unsigned long offset = pgdptr &
405 ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1);
406 void *addr = (void *) (pgdptr ^ offset);
407 struct page *page = virt_to_page(addr);
408 if (!list_empty(&page->lru))
409 return (pgd_t *) ((unsigned long) page->lru.next |
410 offset);
411 }
412 return NULL;
413}
342 414
343/* 415/*
344 * Certain architectures need to do special things when PTEs 416 * Certain architectures need to do special things when PTEs
@@ -347,7 +419,16 @@ extern unsigned long vmalloc_end;
347 */ 419 */
348static inline void set_pte(pte_t *pteptr, pte_t pteval) 420static inline void set_pte(pte_t *pteptr, pte_t pteval)
349{ 421{
422 pte_t *shadow_pte = get_shadow_pte(pteptr);
423
350 *pteptr = pteval; 424 *pteptr = pteval;
425 if (shadow_pte) {
426 if (!(pte_val(pteval) & _PAGE_INVALID) &&
427 (pte_val(pteval) & _PAGE_SWX))
428 pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
429 else
430 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
431 }
351} 432}
352#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 433#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
353 434
@@ -465,7 +546,7 @@ static inline int pte_read(pte_t pte)
465 546
466static inline void pgd_clear(pgd_t * pgdp) { } 547static inline void pgd_clear(pgd_t * pgdp) { }
467 548
468static inline void pmd_clear(pmd_t * pmdp) 549static inline void pmd_clear_kernel(pmd_t * pmdp)
469{ 550{
470 pmd_val(pmdp[0]) = _PAGE_TABLE_INV; 551 pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
471 pmd_val(pmdp[1]) = _PAGE_TABLE_INV; 552 pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
@@ -473,24 +554,55 @@ static inline void pmd_clear(pmd_t * pmdp)
473 pmd_val(pmdp[3]) = _PAGE_TABLE_INV; 554 pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
474} 555}
475 556
557static inline void pmd_clear(pmd_t * pmdp)
558{
559 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
560
561 pmd_clear_kernel(pmdp);
562 if (shadow_pmd)
563 pmd_clear_kernel(shadow_pmd);
564}
565
476#else /* __s390x__ */ 566#else /* __s390x__ */
477 567
478static inline void pgd_clear(pgd_t * pgdp) 568static inline void pgd_clear_kernel(pgd_t * pgdp)
479{ 569{
480 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; 570 pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY;
481} 571}
482 572
483static inline void pmd_clear(pmd_t * pmdp) 573static inline void pgd_clear(pgd_t * pgdp)
574{
575 pgd_t *shadow_pgd = get_shadow_pgd(pgdp);
576
577 pgd_clear_kernel(pgdp);
578 if (shadow_pgd)
579 pgd_clear_kernel(shadow_pgd);
580}
581
582static inline void pmd_clear_kernel(pmd_t * pmdp)
484{ 583{
485 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 584 pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
486 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; 585 pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY;
487} 586}
488 587
588static inline void pmd_clear(pmd_t * pmdp)
589{
590 pmd_t *shadow_pmd = get_shadow_pmd(pmdp);
591
592 pmd_clear_kernel(pmdp);
593 if (shadow_pmd)
594 pmd_clear_kernel(shadow_pmd);
595}
596
489#endif /* __s390x__ */ 597#endif /* __s390x__ */
490 598
491static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 599static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
492{ 600{
601 pte_t *shadow_pte = get_shadow_pte(ptep);
602
493 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 603 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
604 if (shadow_pte)
605 pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
494} 606}
495 607
496/* 608/*
@@ -608,8 +720,11 @@ ptep_clear_flush(struct vm_area_struct *vma,
608 unsigned long address, pte_t *ptep) 720 unsigned long address, pte_t *ptep)
609{ 721{
610 pte_t pte = *ptep; 722 pte_t pte = *ptep;
723 pte_t *shadow_pte = get_shadow_pte(ptep);
611 724
612 __ptep_ipte(address, ptep); 725 __ptep_ipte(address, ptep);
726 if (shadow_pte)
727 __ptep_ipte(address, shadow_pte);
613 return pte; 728 return pte;
614} 729}
615 730
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index cbbedc63ba25..4c1b73940351 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -50,6 +50,7 @@ struct cpuinfo_S390
50 unsigned long pgtable_cache_sz; 50 unsigned long pgtable_cache_sz;
51}; 51};
52 52
53extern void s390_adjust_jiffies(void);
53extern void print_cpu_info(struct cpuinfo_S390 *); 54extern void print_cpu_info(struct cpuinfo_S390 *);
54 55
55/* Lazy FPU handling on uni-processor */ 56/* Lazy FPU handling on uni-processor */
@@ -144,7 +145,8 @@ struct stack_frame {
144#ifndef __s390x__ 145#ifndef __s390x__
145 146
146#define start_thread(regs, new_psw, new_stackp) do { \ 147#define start_thread(regs, new_psw, new_stackp) do { \
147 regs->psw.mask = PSW_USER_BITS; \ 148 set_fs(USER_DS); \
149 regs->psw.mask = psw_user_bits; \
148 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 150 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
149 regs->gprs[15] = new_stackp ; \ 151 regs->gprs[15] = new_stackp ; \
150} while (0) 152} while (0)
@@ -152,13 +154,15 @@ struct stack_frame {
152#else /* __s390x__ */ 154#else /* __s390x__ */
153 155
154#define start_thread(regs, new_psw, new_stackp) do { \ 156#define start_thread(regs, new_psw, new_stackp) do { \
155 regs->psw.mask = PSW_USER_BITS; \ 157 set_fs(USER_DS); \
158 regs->psw.mask = psw_user_bits; \
156 regs->psw.addr = new_psw; \ 159 regs->psw.addr = new_psw; \
157 regs->gprs[15] = new_stackp; \ 160 regs->gprs[15] = new_stackp; \
158} while (0) 161} while (0)
159 162
160#define start_thread31(regs, new_psw, new_stackp) do { \ 163#define start_thread31(regs, new_psw, new_stackp) do { \
161 regs->psw.mask = PSW_USER32_BITS; \ 164 set_fs(USER_DS); \
165 regs->psw.mask = psw_user32_bits; \
162 regs->psw.addr = new_psw; \ 166 regs->psw.addr = new_psw; \
163 regs->gprs[15] = new_stackp; \ 167 regs->gprs[15] = new_stackp; \
164} while (0) 168} while (0)
@@ -201,9 +205,8 @@ unsigned long get_wchan(struct task_struct *p);
201static inline void cpu_relax(void) 205static inline void cpu_relax(void)
202{ 206{
203 if (MACHINE_HAS_DIAG44) 207 if (MACHINE_HAS_DIAG44)
204 asm volatile("diag 0,0,68" : : : "memory"); 208 asm volatile("diag 0,0,68");
205 else 209 barrier();
206 barrier();
207} 210}
208 211
209/* 212/*
@@ -328,6 +331,18 @@ static inline void disabled_wait(unsigned long code)
328} 331}
329 332
330/* 333/*
334 * Basic Machine Check/Program Check Handler.
335 */
336
337extern void s390_base_mcck_handler(void);
338extern void s390_base_pgm_handler(void);
339extern void s390_base_ext_handler(void);
340
341extern void (*s390_base_mcck_handler_fn)(void);
342extern void (*s390_base_pgm_handler_fn)(void);
343extern void (*s390_base_ext_handler_fn)(void);
344
345/*
331 * CPU idle notifier chain. 346 * CPU idle notifier chain.
332 */ 347 */
333#define CPU_IDLE 0 348#define CPU_IDLE 0
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 7b768c5c68a8..fa6ca87080e8 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -266,17 +266,12 @@ typedef struct
266#define PSW_ASC_SECONDARY 0x0000800000000000UL 266#define PSW_ASC_SECONDARY 0x0000800000000000UL
267#define PSW_ASC_HOME 0x0000C00000000000UL 267#define PSW_ASC_HOME 0x0000C00000000000UL
268 268
269#define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ 269extern long psw_user32_bits;
270 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
271 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
272 270
273#endif /* __s390x__ */ 271#endif /* __s390x__ */
274 272
275#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ 273extern long psw_kernel_bits;
276 PSW_MASK_MCHECK | PSW_DEFAULT_KEY) 274extern long psw_user_bits;
277#define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \
278 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \
279 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
280 275
281/* This macro merges a NEW PSW mask specified by the user into 276/* This macro merges a NEW PSW mask specified by the user into
282 the currently active PSW mask CURRENT, modifying only those 277 the currently active PSW mask CURRENT, modifying only those
diff --git a/include/asm-s390/reset.h b/include/asm-s390/reset.h
index 532e65a2aafc..f584f4a52581 100644
--- a/include/asm-s390/reset.h
+++ b/include/asm-s390/reset.h
@@ -18,7 +18,4 @@ struct reset_call {
18extern void register_reset_call(struct reset_call *reset); 18extern void register_reset_call(struct reset_call *reset);
19extern void unregister_reset_call(struct reset_call *reset); 19extern void unregister_reset_call(struct reset_call *reset);
20extern void s390_reset_system(void); 20extern void s390_reset_system(void);
21extern void (*s390_reset_mcck_handler)(void);
22extern void (*s390_reset_pgm_handler)(void);
23
24#endif /* _ASM_S390_RESET_H */ 21#endif /* _ASM_S390_RESET_H */
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
new file mode 100644
index 000000000000..468b97018405
--- /dev/null
+++ b/include/asm-s390/sclp.h
@@ -0,0 +1,39 @@
1/*
2 * include/asm-s390/sclp.h
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_SCLP_H
9#define _ASM_S390_SCLP_H
10
11#include <linux/types.h>
12
13struct sccb_header {
14 u16 length;
15 u8 function_code;
16 u8 control_mask[3];
17 u16 response_code;
18} __attribute__((packed));
19
20#define LOADPARM_LEN 8
21
22struct sclp_readinfo_sccb {
23 struct sccb_header header; /* 0-7 */
24 u16 rnmax; /* 8-9 */
25 u8 rnsize; /* 10 */
26 u8 _reserved0[24 - 11]; /* 11-23 */
27 u8 loadparm[LOADPARM_LEN]; /* 24-31 */
28 u8 _reserved1[91 - 32]; /* 32-90 */
29 u8 flags; /* 91 */
30 u8 _reserved2[100 - 92]; /* 92-99 */
31 u32 rnsize2; /* 100-103 */
32 u64 rnmax2; /* 104-111 */
33 u8 _reserved3[4096 - 112]; /* 112-4095 */
34} __attribute__((packed, aligned(4096)));
35
36extern struct sclp_readinfo_sccb s390_readinfo_sccb;
37extern void sclp_readinfo_early(void);
38
39#endif /* _ASM_S390_SCLP_H */
diff --git a/include/asm-s390/sections.h b/include/asm-s390/sections.h
index 3a0b8ffeab7a..1c5a2c4ccdad 100644
--- a/include/asm-s390/sections.h
+++ b/include/asm-s390/sections.h
@@ -3,4 +3,6 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern char _eshared[];
7
6#endif 8#endif
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 9574fe80a046..3388bb52597c 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -42,6 +42,18 @@ struct mem_chunk {
42 42
43extern struct mem_chunk memory_chunk[]; 43extern struct mem_chunk memory_chunk[];
44 44
45#ifdef CONFIG_S390_SWITCH_AMODE
46extern unsigned int switch_amode;
47#else
48#define switch_amode (0)
49#endif
50
51#ifdef CONFIG_S390_EXEC_PROTECT
52extern unsigned int s390_noexec;
53#else
54#define s390_noexec (0)
55#endif
56
45/* 57/*
46 * Machine features detected in head.S 58 * Machine features detected in head.S
47 */ 59 */
@@ -74,6 +86,9 @@ extern unsigned int console_mode;
74extern unsigned int console_devno; 86extern unsigned int console_devno;
75extern unsigned int console_irq; 87extern unsigned int console_irq;
76 88
89extern char vmhalt_cmd[];
90extern char vmpoff_cmd[];
91
77#define CONSOLE_IS_UNDEFINED (console_mode == 0) 92#define CONSOLE_IS_UNDEFINED (console_mode == 0)
78#define CONSOLE_IS_SCLP (console_mode == 1) 93#define CONSOLE_IS_SCLP (console_mode == 1)
79#define CONSOLE_IS_3215 (console_mode == 2) 94#define CONSOLE_IS_3215 (console_mode == 2)
@@ -141,13 +156,19 @@ struct ipl_parameter_block {
141extern u32 ipl_flags; 156extern u32 ipl_flags;
142extern u16 ipl_devno; 157extern u16 ipl_devno;
143 158
144void do_reipl(void); 159extern void do_reipl(void);
160extern void ipl_save_parameters(void);
145 161
146enum { 162enum {
147 IPL_DEVNO_VALID = 1, 163 IPL_DEVNO_VALID = 1,
148 IPL_PARMBLOCK_VALID = 2, 164 IPL_PARMBLOCK_VALID = 2,
165 IPL_NSS_VALID = 4,
149}; 166};
150 167
168#define NSS_NAME_SIZE 8
169
170extern char kernel_nss_name[];
171
151#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ 172#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \
152 IPL_PARMBLOCK_ORIGIN) 173 IPL_PARMBLOCK_ORIGIN)
153#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len) 174#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len)
diff --git a/arch/s390/math-emu/sfp-util.h b/include/asm-s390/sfp-util.h
index 5b6ca4570ea4..8cabcd23d976 100644
--- a/arch/s390/math-emu/sfp-util.h
+++ b/include/asm-s390/sfp-util.h
@@ -52,12 +52,12 @@
52}) 52})
53 53
54#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \
55 do { unsigned long __r; \ 55 do { unsigned int __r; \
56 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ 56 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
57 (r) = __r; \ 57 (r) = __r; \
58 } while (0) 58 } while (0)
59extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long, 59extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
60 unsigned long , unsigned long); 60 unsigned int , unsigned int);
61 61
62#define UDIV_NEEDS_NORMALIZATION 0 62#define UDIV_NEEDS_NORMALIZATION 0
63 63
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 7097c96ed026..b957e4cda464 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -31,6 +31,10 @@ typedef struct
31 __u16 cpu; 31 __u16 cpu;
32} sigp_info; 32} sigp_info;
33 33
34extern void machine_restart_smp(char *);
35extern void machine_halt_smp(void);
36extern void machine_power_off_smp(void);
37
34extern void smp_setup_cpu_possible_map(void); 38extern void smp_setup_cpu_possible_map(void);
35extern int smp_call_function_on(void (*func) (void *info), void *info, 39extern int smp_call_function_on(void (*func) (void *info), void *info,
36 int nonatomic, int wait, int cpu); 40 int nonatomic, int wait, int cpu);
@@ -106,7 +110,7 @@ smp_call_function_on(void (*func) (void *info), void *info,
106static inline void smp_send_stop(void) 110static inline void smp_send_stop(void)
107{ 111{
108 /* Disable all interrupts/machine checks */ 112 /* Disable all interrupts/machine checks */
109 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 113 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
110} 114}
111 115
112#define smp_cpu_not_running(cpu) 1 116#define smp_cpu_not_running(cpu) 1
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index bd0b05ae87d2..bbe137c3ed69 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -373,8 +373,8 @@ __set_psw_mask(unsigned long mask)
373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 373 __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
374} 374}
375 375
376#define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 376#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
377#define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) 377#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
378 378
379#ifdef CONFIG_SMP 379#ifdef CONFIG_SMP
380 380
diff --git a/include/asm-s390/tape390.h b/include/asm-s390/tape390.h
index f1d66ba0deef..884fba48f1ff 100644
--- a/include/asm-s390/tape390.h
+++ b/include/asm-s390/tape390.h
@@ -1,11 +1,11 @@
1/************************************************************************* 1/*************************************************************************
2 * 2 *
3 * tape390.h 3 * tape390.h
4 * enables user programs to display messages on the tape device 4 * enables user programs to display messages and control encryption
5 * on s390 tape devices
5 * 6 *
6 * S390 and zSeries version 7 * Copyright IBM Corp. 2001,2006
7 * Copyright (C) 2001 IBM Corporation 8 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
8 * Author(s): Despina Papadopoulou <despina_p@de.ibm.com>
9 * 9 *
10 *************************************************************************/ 10 *************************************************************************/
11 11
@@ -36,4 +36,68 @@ typedef struct display_struct {
36 char message2[8]; 36 char message2[8];
37} display_struct; 37} display_struct;
38 38
39/*
40 * Tape encryption support
41 */
42
43struct tape390_crypt_info {
44 char capability;
45 char status;
46 char medium_status;
47} __attribute__ ((packed));
48
49
50/* Macros for "capable" field */
51#define TAPE390_CRYPT_SUPPORTED_MASK 0x01
52#define TAPE390_CRYPT_SUPPORTED(x) \
53 ((x.capability & TAPE390_CRYPT_SUPPORTED_MASK))
54
55/* Macros for "status" field */
56#define TAPE390_CRYPT_ON_MASK 0x01
57#define TAPE390_CRYPT_ON(x) (((x.status) & TAPE390_CRYPT_ON_MASK))
58
59/* Macros for "medium status" field */
60#define TAPE390_MEDIUM_LOADED_MASK 0x01
61#define TAPE390_MEDIUM_ENCRYPTED_MASK 0x02
62#define TAPE390_MEDIUM_ENCRYPTED(x) \
63 (((x.medium_status) & TAPE390_MEDIUM_ENCRYPTED_MASK))
64#define TAPE390_MEDIUM_LOADED(x) \
65 (((x.medium_status) & TAPE390_MEDIUM_LOADED_MASK))
66
67/*
68 * The TAPE390_CRYPT_SET ioctl is used to switch on/off encryption.
69 * The "encryption_capable" and "tape_status" fields are ignored for this ioctl!
70 */
71#define TAPE390_CRYPT_SET _IOW('d', 2, struct tape390_crypt_info)
72
73/*
74 * The TAPE390_CRYPT_QUERY ioctl is used to query the encryption state.
75 */
76#define TAPE390_CRYPT_QUERY _IOR('d', 3, struct tape390_crypt_info)
77
78/* Values for "kekl1/2_type" and "kekl1/2_type_on_tape" fields */
79#define TAPE390_KEKL_TYPE_NONE 0
80#define TAPE390_KEKL_TYPE_LABEL 1
81#define TAPE390_KEKL_TYPE_HASH 2
82
83struct tape390_kekl {
84 unsigned char type;
85 unsigned char type_on_tape;
86 char label[65];
87} __attribute__ ((packed));
88
89struct tape390_kekl_pair {
90 struct tape390_kekl kekl[2];
91} __attribute__ ((packed));
92
93/*
94 * The TAPE390_KEKL_SET ioctl is used to set Key Encrypting Key labels.
95 */
96#define TAPE390_KEKL_SET _IOW('d', 4, struct tape390_kekl_pair)
97
98/*
99 * The TAPE390_KEKL_QUERY ioctl is used to query Key Encrypting Key labels.
100 */
101#define TAPE390_KEKL_QUERY _IOR('d', 5, struct tape390_kekl_pair)
102
39#endif 103#endif
diff --git a/include/asm-s390/timer.h b/include/asm-s390/timer.h
index 30e5cbe570f2..adb34860a543 100644
--- a/include/asm-s390/timer.h
+++ b/include/asm-s390/timer.h
@@ -45,6 +45,9 @@ extern void add_virt_timer_periodic(void *new);
45extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); 45extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
46extern int del_virt_timer(struct vtimer_list *timer); 46extern int del_virt_timer(struct vtimer_list *timer);
47 47
48extern void init_cpu_vtimer(void);
49extern void vtime_init(void);
50
48#endif /* __KERNEL__ */ 51#endif /* __KERNEL__ */
49 52
50#endif /* _ASM_S390_TIMER_H */ 53#endif /* _ASM_S390_TIMER_H */
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h
index 4df4a41029a3..98229db24314 100644
--- a/include/asm-s390/timex.h
+++ b/include/asm-s390/timex.h
@@ -11,6 +11,41 @@
11#ifndef _ASM_S390_TIMEX_H 11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H 12#define _ASM_S390_TIMEX_H
13 13
14/* Inline functions for clock register access. */
15static inline int set_clock(__u64 time)
16{
17 int cc;
18
19 asm volatile(
20 " sck 0(%2)\n"
21 " ipm %0\n"
22 " srl %0,28\n"
23 : "=d" (cc) : "m" (time), "a" (&time) : "cc");
24 return cc;
25}
26
27static inline int store_clock(__u64 *time)
28{
29 int cc;
30
31 asm volatile(
32 " stck 0(%2)\n"
33 " ipm %0\n"
34 " srl %0,28\n"
35 : "=d" (cc), "=m" (*time) : "a" (time) : "cc");
36 return cc;
37}
38
39static inline void set_clock_comparator(__u64 time)
40{
41 asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time));
42}
43
44static inline void store_clock_comparator(__u64 *time)
45{
46 asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time));
47}
48
14#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ 49#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
15 50
16typedef unsigned long long cycles_t; 51typedef unsigned long long cycles_t;
@@ -27,9 +62,24 @@ static inline unsigned long long get_clock (void)
27 return clk; 62 return clk;
28} 63}
29 64
65static inline void get_clock_extended(void *dest)
66{
67 typedef struct { unsigned long long clk[2]; } __clock_t;
68
69#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
70 asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc");
71#else /* __GNUC__ */
72 asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest))
73 : "a" ((__clock_t *)dest) : "cc");
74#endif /* __GNUC__ */
75}
76
30static inline cycles_t get_cycles(void) 77static inline cycles_t get_cycles(void)
31{ 78{
32 return (cycles_t) get_clock() >> 2; 79 return (cycles_t) get_clock() >> 2;
33} 80}
34 81
82int get_sync_clock(unsigned long long *clock);
83void init_cpu_timer(void);
84
35#endif 85#endif
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index fa4dc916a9bf..66793f55c8b2 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <asm/processor.h> 5#include <asm/processor.h>
6#include <asm/pgalloc.h>
6 7
7/* 8/*
8 * TLB flushing: 9 * TLB flushing:
@@ -102,6 +103,14 @@ static inline void __flush_tlb_mm(struct mm_struct * mm)
102 if (unlikely(cpus_empty(mm->cpu_vm_mask))) 103 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return; 104 return;
104 if (MACHINE_HAS_IDTE) { 105 if (MACHINE_HAS_IDTE) {
106 pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd);
107
108 if (shadow_pgd) {
109 asm volatile(
110 " .insn rrf,0xb98e0000,0,%0,%1,0"
111 : : "a" (2048),
112 "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" );
113 }
105 asm volatile( 114 asm volatile(
106 " .insn rrf,0xb98e0000,0,%0,%1,0" 115 " .insn rrf,0xb98e0000,0,%0,%1,0"
107 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); 116 : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc");
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 73ac4e82217b..0235970278f0 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -90,6 +90,8 @@ struct uaccess_ops {
90extern struct uaccess_ops uaccess; 90extern struct uaccess_ops uaccess;
91extern struct uaccess_ops uaccess_std; 91extern struct uaccess_ops uaccess_std;
92extern struct uaccess_ops uaccess_mvcos; 92extern struct uaccess_ops uaccess_mvcos;
93extern struct uaccess_ops uaccess_mvcos_switch;
94extern struct uaccess_ops uaccess_pt;
93 95
94static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 96static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
95{ 97{
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index ba94ab3d2673..ab913ffcad56 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -1,6 +1,5 @@
1#ifndef _ASM_SWIOTLB_H 1#ifndef _ASM_SWIOTLB_H
2#define _ASM_SWTIOLB_H 1 2#define _ASM_SWIOTLB_H 1
3
4 3
5#include <asm/dma-mapping.h> 4#include <asm/dma-mapping.h>
6 5
@@ -45,6 +44,7 @@ extern void swiotlb_init(void);
45extern int swiotlb_force; 44extern int swiotlb_force;
46 45
47#ifdef CONFIG_SWIOTLB 46#ifdef CONFIG_SWIOTLB
47#define SWIOTLB_ARCH_NEED_ALLOC
48extern int swiotlb; 48extern int swiotlb;
49#else 49#else
50#define swiotlb 0 50#define swiotlb 0
@@ -52,4 +52,6 @@ extern int swiotlb;
52 52
53extern void pci_swiotlb_init(void); 53extern void pci_swiotlb_init(void);
54 54
55#endif /* _ASM_SWTIOLB_H */ 55static inline void dma_mark_clean(void *addr, size_t size) {}
56
57#endif /* _ASM_SWIOTLB_H */
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8e4dbb51fc70..50d568ec178a 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -1,10 +1,8 @@
1#ifndef __HID_DEBUG_H
2#define __HID_DEBUG_H
3
1/* 4/*
2 * $Id: hid-debug.h,v 1.8 2001/09/25 09:37:57 vojtech Exp $ 5 * Copyright (c) 2007 Jiri Kosina
3 *
4 * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
5 * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
6 *
7 * Some debug stuff for the HID parser.
8 */ 6 */
9 7
10/* 8/*
@@ -22,737 +20,26 @@
22 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * 22 *
25 * Should you need to contact me, the author, you can do so either by
26 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
27 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
28 */ 23 */
29 24
30#include <linux/input.h> 25#ifdef CONFIG_HID_DEBUG
31
32struct hid_usage_entry {
33 unsigned page;
34 unsigned usage;
35 char *description;
36};
37
38static const struct hid_usage_entry hid_usage_table[] = {
39 { 0, 0, "Undefined" },
40 { 1, 0, "GenericDesktop" },
41 {0, 0x01, "Pointer"},
42 {0, 0x02, "Mouse"},
43 {0, 0x04, "Joystick"},
44 {0, 0x05, "GamePad"},
45 {0, 0x06, "Keyboard"},
46 {0, 0x07, "Keypad"},
47 {0, 0x08, "MultiAxis"},
48 {0, 0x30, "X"},
49 {0, 0x31, "Y"},
50 {0, 0x32, "Z"},
51 {0, 0x33, "Rx"},
52 {0, 0x34, "Ry"},
53 {0, 0x35, "Rz"},
54 {0, 0x36, "Slider"},
55 {0, 0x37, "Dial"},
56 {0, 0x38, "Wheel"},
57 {0, 0x39, "HatSwitch"},
58 {0, 0x3a, "CountedBuffer"},
59 {0, 0x3b, "ByteCount"},
60 {0, 0x3c, "MotionWakeup"},
61 {0, 0x3d, "Start"},
62 {0, 0x3e, "Select"},
63 {0, 0x40, "Vx"},
64 {0, 0x41, "Vy"},
65 {0, 0x42, "Vz"},
66 {0, 0x43, "Vbrx"},
67 {0, 0x44, "Vbry"},
68 {0, 0x45, "Vbrz"},
69 {0, 0x46, "Vno"},
70 {0, 0x80, "SystemControl"},
71 {0, 0x81, "SystemPowerDown"},
72 {0, 0x82, "SystemSleep"},
73 {0, 0x83, "SystemWakeUp"},
74 {0, 0x84, "SystemContextMenu"},
75 {0, 0x85, "SystemMainMenu"},
76 {0, 0x86, "SystemAppMenu"},
77 {0, 0x87, "SystemMenuHelp"},
78 {0, 0x88, "SystemMenuExit"},
79 {0, 0x89, "SystemMenuSelect"},
80 {0, 0x8a, "SystemMenuRight"},
81 {0, 0x8b, "SystemMenuLeft"},
82 {0, 0x8c, "SystemMenuUp"},
83 {0, 0x8d, "SystemMenuDown"},
84 {0, 0x90, "D-PadUp"},
85 {0, 0x91, "D-PadDown"},
86 {0, 0x92, "D-PadRight"},
87 {0, 0x93, "D-PadLeft"},
88 { 2, 0, "Simulation" },
89 {0, 0xb0, "Aileron"},
90 {0, 0xb1, "AileronTrim"},
91 {0, 0xb2, "Anti-Torque"},
92 {0, 0xb3, "Autopilot"},
93 {0, 0xb4, "Chaff"},
94 {0, 0xb5, "Collective"},
95 {0, 0xb6, "DiveBrake"},
96 {0, 0xb7, "ElectronicCountermeasures"},
97 {0, 0xb8, "Elevator"},
98 {0, 0xb9, "ElevatorTrim"},
99 {0, 0xba, "Rudder"},
100 {0, 0xbb, "Throttle"},
101 {0, 0xbc, "FlightCommunications"},
102 {0, 0xbd, "FlareRelease"},
103 {0, 0xbe, "LandingGear"},
104 {0, 0xbf, "ToeBrake"},
105 { 7, 0, "Keyboard" },
106 { 8, 0, "LED" },
107 {0, 0x01, "NumLock"},
108 {0, 0x02, "CapsLock"},
109 {0, 0x03, "ScrollLock"},
110 {0, 0x04, "Compose"},
111 {0, 0x05, "Kana"},
112 {0, 0x4b, "GenericIndicator"},
113 { 9, 0, "Button" },
114 { 10, 0, "Ordinal" },
115 { 12, 0, "Consumer" },
116 {0, 0x238, "HorizontalWheel"},
117 { 13, 0, "Digitizers" },
118 {0, 0x01, "Digitizer"},
119 {0, 0x02, "Pen"},
120 {0, 0x03, "LightPen"},
121 {0, 0x04, "TouchScreen"},
122 {0, 0x05, "TouchPad"},
123 {0, 0x20, "Stylus"},
124 {0, 0x21, "Puck"},
125 {0, 0x22, "Finger"},
126 {0, 0x30, "TipPressure"},
127 {0, 0x31, "BarrelPressure"},
128 {0, 0x32, "InRange"},
129 {0, 0x33, "Touch"},
130 {0, 0x34, "UnTouch"},
131 {0, 0x35, "Tap"},
132 {0, 0x39, "TabletFunctionKey"},
133 {0, 0x3a, "ProgramChangeKey"},
134 {0, 0x3c, "Invert"},
135 {0, 0x42, "TipSwitch"},
136 {0, 0x43, "SecondaryTipSwitch"},
137 {0, 0x44, "BarrelSwitch"},
138 {0, 0x45, "Eraser"},
139 {0, 0x46, "TabletPick"},
140 { 15, 0, "PhysicalInterfaceDevice" },
141 {0, 0x00, "Undefined"},
142 {0, 0x01, "Physical_Interface_Device"},
143 {0, 0x20, "Normal"},
144 {0, 0x21, "Set_Effect_Report"},
145 {0, 0x22, "Effect_Block_Index"},
146 {0, 0x23, "Parameter_Block_Offset"},
147 {0, 0x24, "ROM_Flag"},
148 {0, 0x25, "Effect_Type"},
149 {0, 0x26, "ET_Constant_Force"},
150 {0, 0x27, "ET_Ramp"},
151 {0, 0x28, "ET_Custom_Force_Data"},
152 {0, 0x30, "ET_Square"},
153 {0, 0x31, "ET_Sine"},
154 {0, 0x32, "ET_Triangle"},
155 {0, 0x33, "ET_Sawtooth_Up"},
156 {0, 0x34, "ET_Sawtooth_Down"},
157 {0, 0x40, "ET_Spring"},
158 {0, 0x41, "ET_Damper"},
159 {0, 0x42, "ET_Inertia"},
160 {0, 0x43, "ET_Friction"},
161 {0, 0x50, "Duration"},
162 {0, 0x51, "Sample_Period"},
163 {0, 0x52, "Gain"},
164 {0, 0x53, "Trigger_Button"},
165 {0, 0x54, "Trigger_Repeat_Interval"},
166 {0, 0x55, "Axes_Enable"},
167 {0, 0x56, "Direction_Enable"},
168 {0, 0x57, "Direction"},
169 {0, 0x58, "Type_Specific_Block_Offset"},
170 {0, 0x59, "Block_Type"},
171 {0, 0x5A, "Set_Envelope_Report"},
172 {0, 0x5B, "Attack_Level"},
173 {0, 0x5C, "Attack_Time"},
174 {0, 0x5D, "Fade_Level"},
175 {0, 0x5E, "Fade_Time"},
176 {0, 0x5F, "Set_Condition_Report"},
177 {0, 0x60, "CP_Offset"},
178 {0, 0x61, "Positive_Coefficient"},
179 {0, 0x62, "Negative_Coefficient"},
180 {0, 0x63, "Positive_Saturation"},
181 {0, 0x64, "Negative_Saturation"},
182 {0, 0x65, "Dead_Band"},
183 {0, 0x66, "Download_Force_Sample"},
184 {0, 0x67, "Isoch_Custom_Force_Enable"},
185 {0, 0x68, "Custom_Force_Data_Report"},
186 {0, 0x69, "Custom_Force_Data"},
187 {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
188 {0, 0x6B, "Set_Custom_Force_Report"},
189 {0, 0x6C, "Custom_Force_Data_Offset"},
190 {0, 0x6D, "Sample_Count"},
191 {0, 0x6E, "Set_Periodic_Report"},
192 {0, 0x6F, "Offset"},
193 {0, 0x70, "Magnitude"},
194 {0, 0x71, "Phase"},
195 {0, 0x72, "Period"},
196 {0, 0x73, "Set_Constant_Force_Report"},
197 {0, 0x74, "Set_Ramp_Force_Report"},
198 {0, 0x75, "Ramp_Start"},
199 {0, 0x76, "Ramp_End"},
200 {0, 0x77, "Effect_Operation_Report"},
201 {0, 0x78, "Effect_Operation"},
202 {0, 0x79, "Op_Effect_Start"},
203 {0, 0x7A, "Op_Effect_Start_Solo"},
204 {0, 0x7B, "Op_Effect_Stop"},
205 {0, 0x7C, "Loop_Count"},
206 {0, 0x7D, "Device_Gain_Report"},
207 {0, 0x7E, "Device_Gain"},
208 {0, 0x7F, "PID_Pool_Report"},
209 {0, 0x80, "RAM_Pool_Size"},
210 {0, 0x81, "ROM_Pool_Size"},
211 {0, 0x82, "ROM_Effect_Block_Count"},
212 {0, 0x83, "Simultaneous_Effects_Max"},
213 {0, 0x84, "Pool_Alignment"},
214 {0, 0x85, "PID_Pool_Move_Report"},
215 {0, 0x86, "Move_Source"},
216 {0, 0x87, "Move_Destination"},
217 {0, 0x88, "Move_Length"},
218 {0, 0x89, "PID_Block_Load_Report"},
219 {0, 0x8B, "Block_Load_Status"},
220 {0, 0x8C, "Block_Load_Success"},
221 {0, 0x8D, "Block_Load_Full"},
222 {0, 0x8E, "Block_Load_Error"},
223 {0, 0x8F, "Block_Handle"},
224 {0, 0x90, "PID_Block_Free_Report"},
225 {0, 0x91, "Type_Specific_Block_Handle"},
226 {0, 0x92, "PID_State_Report"},
227 {0, 0x94, "Effect_Playing"},
228 {0, 0x95, "PID_Device_Control_Report"},
229 {0, 0x96, "PID_Device_Control"},
230 {0, 0x97, "DC_Enable_Actuators"},
231 {0, 0x98, "DC_Disable_Actuators"},
232 {0, 0x99, "DC_Stop_All_Effects"},
233 {0, 0x9A, "DC_Device_Reset"},
234 {0, 0x9B, "DC_Device_Pause"},
235 {0, 0x9C, "DC_Device_Continue"},
236 {0, 0x9F, "Device_Paused"},
237 {0, 0xA0, "Actuators_Enabled"},
238 {0, 0xA4, "Safety_Switch"},
239 {0, 0xA5, "Actuator_Override_Switch"},
240 {0, 0xA6, "Actuator_Power"},
241 {0, 0xA7, "Start_Delay"},
242 {0, 0xA8, "Parameter_Block_Size"},
243 {0, 0xA9, "Device_Managed_Pool"},
244 {0, 0xAA, "Shared_Parameter_Blocks"},
245 {0, 0xAB, "Create_New_Effect_Report"},
246 {0, 0xAC, "RAM_Pool_Available"},
247 { 0x84, 0, "Power Device" },
248 { 0x84, 0x02, "PresentStatus" },
249 { 0x84, 0x03, "ChangeStatus" },
250 { 0x84, 0x04, "UPS" },
251 { 0x84, 0x05, "PowerSupply" },
252 { 0x84, 0x10, "BatterySystem" },
253 { 0x84, 0x11, "BatterySystemID" },
254 { 0x84, 0x12, "Battery" },
255 { 0x84, 0x13, "BatteryID" },
256 { 0x84, 0x14, "Charger" },
257 { 0x84, 0x15, "ChargerID" },
258 { 0x84, 0x16, "PowerConverter" },
259 { 0x84, 0x17, "PowerConverterID" },
260 { 0x84, 0x18, "OutletSystem" },
261 { 0x84, 0x19, "OutletSystemID" },
262 { 0x84, 0x1a, "Input" },
263 { 0x84, 0x1b, "InputID" },
264 { 0x84, 0x1c, "Output" },
265 { 0x84, 0x1d, "OutputID" },
266 { 0x84, 0x1e, "Flow" },
267 { 0x84, 0x1f, "FlowID" },
268 { 0x84, 0x20, "Outlet" },
269 { 0x84, 0x21, "OutletID" },
270 { 0x84, 0x22, "Gang" },
271 { 0x84, 0x24, "PowerSummary" },
272 { 0x84, 0x25, "PowerSummaryID" },
273 { 0x84, 0x30, "Voltage" },
274 { 0x84, 0x31, "Current" },
275 { 0x84, 0x32, "Frequency" },
276 { 0x84, 0x33, "ApparentPower" },
277 { 0x84, 0x35, "PercentLoad" },
278 { 0x84, 0x40, "ConfigVoltage" },
279 { 0x84, 0x41, "ConfigCurrent" },
280 { 0x84, 0x43, "ConfigApparentPower" },
281 { 0x84, 0x53, "LowVoltageTransfer" },
282 { 0x84, 0x54, "HighVoltageTransfer" },
283 { 0x84, 0x56, "DelayBeforeStartup" },
284 { 0x84, 0x57, "DelayBeforeShutdown" },
285 { 0x84, 0x58, "Test" },
286 { 0x84, 0x5a, "AudibleAlarmControl" },
287 { 0x84, 0x60, "Present" },
288 { 0x84, 0x61, "Good" },
289 { 0x84, 0x62, "InternalFailure" },
290 { 0x84, 0x65, "Overload" },
291 { 0x84, 0x66, "OverCharged" },
292 { 0x84, 0x67, "OverTemperature" },
293 { 0x84, 0x68, "ShutdownRequested" },
294 { 0x84, 0x69, "ShutdownImminent" },
295 { 0x84, 0x6b, "SwitchOn/Off" },
296 { 0x84, 0x6c, "Switchable" },
297 { 0x84, 0x6d, "Used" },
298 { 0x84, 0x6e, "Boost" },
299 { 0x84, 0x73, "CommunicationLost" },
300 { 0x84, 0xfd, "iManufacturer" },
301 { 0x84, 0xfe, "iProduct" },
302 { 0x84, 0xff, "iSerialNumber" },
303 { 0x85, 0, "Battery System" },
304 { 0x85, 0x01, "SMBBatteryMode" },
305 { 0x85, 0x02, "SMBBatteryStatus" },
306 { 0x85, 0x03, "SMBAlarmWarning" },
307 { 0x85, 0x04, "SMBChargerMode" },
308 { 0x85, 0x05, "SMBChargerStatus" },
309 { 0x85, 0x06, "SMBChargerSpecInfo" },
310 { 0x85, 0x07, "SMBSelectorState" },
311 { 0x85, 0x08, "SMBSelectorPresets" },
312 { 0x85, 0x09, "SMBSelectorInfo" },
313 { 0x85, 0x29, "RemainingCapacityLimit" },
314 { 0x85, 0x2c, "CapacityMode" },
315 { 0x85, 0x42, "BelowRemainingCapacityLimit" },
316 { 0x85, 0x44, "Charging" },
317 { 0x85, 0x45, "Discharging" },
318 { 0x85, 0x4b, "NeedReplacement" },
319 { 0x85, 0x66, "RemainingCapacity" },
320 { 0x85, 0x68, "RunTimeToEmpty" },
321 { 0x85, 0x6a, "AverageTimeToFull" },
322 { 0x85, 0x83, "DesignCapacity" },
323 { 0x85, 0x85, "ManufacturerDate" },
324 { 0x85, 0x89, "iDeviceChemistry" },
325 { 0x85, 0x8b, "Rechargable" },
326 { 0x85, 0x8f, "iOEMInformation" },
327 { 0x85, 0x8d, "CapacityGranularity1" },
328 { 0x85, 0xd0, "ACPresent" },
329 /* pages 0xff00 to 0xffff are vendor-specific */
330 { 0xffff, 0, "Vendor-specific-FF" },
331 { 0, 0, NULL }
332};
333
334static void resolv_usage_page(unsigned page) {
335 const struct hid_usage_entry *p;
336
337 for (p = hid_usage_table; p->description; p++)
338 if (p->page == page) {
339 printk("%s", p->description);
340 return;
341 }
342 printk("%04x", page);
343}
344
345static void resolv_usage(unsigned usage) {
346 const struct hid_usage_entry *p;
347
348 resolv_usage_page(usage >> 16);
349 printk(".");
350 for (p = hid_usage_table; p->description; p++)
351 if (p->page == (usage >> 16)) {
352 for(++p; p->description && p->usage != 0; p++)
353 if (p->usage == (usage & 0xffff)) {
354 printk("%s", p->description);
355 return;
356 }
357 break;
358 }
359 printk("%04x", usage & 0xffff);
360}
361
362__inline__ static void tab(int n) {
363 while (n--) printk(" ");
364}
365
366static void hid_dump_field(struct hid_field *field, int n) {
367 int j;
368
369 if (field->physical) {
370 tab(n);
371 printk("Physical(");
372 resolv_usage(field->physical); printk(")\n");
373 }
374 if (field->logical) {
375 tab(n);
376 printk("Logical(");
377 resolv_usage(field->logical); printk(")\n");
378 }
379 tab(n); printk("Usage(%d)\n", field->maxusage);
380 for (j = 0; j < field->maxusage; j++) {
381 tab(n+2);resolv_usage(field->usage[j].hid); printk("\n");
382 }
383 if (field->logical_minimum != field->logical_maximum) {
384 tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
385 tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
386 }
387 if (field->physical_minimum != field->physical_maximum) {
388 tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
389 tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
390 }
391 if (field->unit_exponent) {
392 tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
393 }
394 if (field->unit) {
395 char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
396 char *units[5][8] = {
397 { "None", "None", "None", "None", "None", "None", "None", "None" },
398 { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
399 { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
400 { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" },
401 { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }
402 };
403
404 int i;
405 int sys;
406 __u32 data = field->unit;
407
408 /* First nibble tells us which system we're in. */
409 sys = data & 0xf;
410 data >>= 4;
411
412 if(sys > 4) {
413 tab(n); printk("Unit(Invalid)\n");
414 }
415 else {
416 int earlier_unit = 0;
417
418 tab(n); printk("Unit(%s : ", systems[sys]);
419
420 for (i=1 ; i<sizeof(__u32)*2 ; i++) {
421 char nibble = data & 0xf;
422 data >>= 4;
423 if (nibble != 0) {
424 if(earlier_unit++ > 0)
425 printk("*");
426 printk("%s", units[sys][i]);
427 if(nibble != 1) {
428 /* This is a _signed_ nibble(!) */
429
430 int val = nibble & 0x7;
431 if(nibble & 0x08)
432 val = -((0x7 & ~val) +1);
433 printk("^%d", val);
434 }
435 }
436 }
437 printk(")\n");
438 }
439 }
440 tab(n); printk("Report Size(%u)\n", field->report_size);
441 tab(n); printk("Report Count(%u)\n", field->report_count);
442 tab(n); printk("Report Offset(%u)\n", field->report_offset);
443
444 tab(n); printk("Flags( ");
445 j = field->flags;
446 printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
447 printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
448 printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
449 printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
450 printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
451 printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPrefferedState " : "");
452 printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
453 printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
454 printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
455 printk(")\n");
456}
457
458static void __attribute__((unused)) hid_dump_device(struct hid_device *device) {
459 struct hid_report_enum *report_enum;
460 struct hid_report *report;
461 struct list_head *list;
462 unsigned i,k;
463 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
464
465 for (i = 0; i < HID_REPORT_TYPES; i++) {
466 report_enum = device->report_enum + i;
467 list = report_enum->report_list.next;
468 while (list != &report_enum->report_list) {
469 report = (struct hid_report *) list;
470 tab(2);
471 printk("%s", table[i]);
472 if (report->id)
473 printk("(%d)", report->id);
474 printk("[%s]", table[report->type]);
475 printk("\n");
476 for (k = 0; k < report->maxfield; k++) {
477 tab(4);
478 printk("Field(%d)\n", k);
479 hid_dump_field(report->field[k], 6);
480 }
481 list = list->next;
482 }
483 }
484}
485
486static void __attribute__((unused)) hid_dump_input(struct hid_usage *usage, __s32 value) {
487 printk("hid-debug: input ");
488 resolv_usage(usage->hid);
489 printk(" = %d\n", value);
490}
491
492
493static char *events[EV_MAX + 1] = {
494 [EV_SYN] = "Sync", [EV_KEY] = "Key",
495 [EV_REL] = "Relative", [EV_ABS] = "Absolute",
496 [EV_MSC] = "Misc", [EV_LED] = "LED",
497 [EV_SND] = "Sound", [EV_REP] = "Repeat",
498 [EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
499 [EV_FF_STATUS] = "ForceFeedbackStatus",
500};
501
502static char *syncs[2] = {
503 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
504};
505static char *keys[KEY_MAX + 1] = {
506 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
507 [KEY_1] = "1", [KEY_2] = "2",
508 [KEY_3] = "3", [KEY_4] = "4",
509 [KEY_5] = "5", [KEY_6] = "6",
510 [KEY_7] = "7", [KEY_8] = "8",
511 [KEY_9] = "9", [KEY_0] = "0",
512 [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal",
513 [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab",
514 [KEY_Q] = "Q", [KEY_W] = "W",
515 [KEY_E] = "E", [KEY_R] = "R",
516 [KEY_T] = "T", [KEY_Y] = "Y",
517 [KEY_U] = "U", [KEY_I] = "I",
518 [KEY_O] = "O", [KEY_P] = "P",
519 [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace",
520 [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl",
521 [KEY_A] = "A", [KEY_S] = "S",
522 [KEY_D] = "D", [KEY_F] = "F",
523 [KEY_G] = "G", [KEY_H] = "H",
524 [KEY_J] = "J", [KEY_K] = "K",
525 [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon",
526 [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave",
527 [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash",
528 [KEY_Z] = "Z", [KEY_X] = "X",
529 [KEY_C] = "C", [KEY_V] = "V",
530 [KEY_B] = "B", [KEY_N] = "N",
531 [KEY_M] = "M", [KEY_COMMA] = "Comma",
532 [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash",
533 [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk",
534 [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space",
535 [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1",
536 [KEY_F2] = "F2", [KEY_F3] = "F3",
537 [KEY_F4] = "F4", [KEY_F5] = "F5",
538 [KEY_F6] = "F6", [KEY_F7] = "F7",
539 [KEY_F8] = "F8", [KEY_F9] = "F9",
540 [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock",
541 [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7",
542 [KEY_KP8] = "KP8", [KEY_KP9] = "KP9",
543 [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4",
544 [KEY_KP5] = "KP5", [KEY_KP6] = "KP6",
545 [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1",
546 [KEY_KP2] = "KP2", [KEY_KP3] = "KP3",
547 [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot",
548 [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd",
549 [KEY_F11] = "F11", [KEY_F12] = "F12",
550 [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana",
551 [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan",
552 [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan",
553 [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter",
554 [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash",
555 [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt",
556 [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home",
557 [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp",
558 [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right",
559 [KEY_END] = "End", [KEY_DOWN] = "Down",
560 [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert",
561 [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro",
562 [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown",
563 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
564 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
565 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
566 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
567 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
568 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
569 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
570 [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo",
571 [KEY_FRONT] = "Front", [KEY_COPY] = "Copy",
572 [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste",
573 [KEY_FIND] = "Find", [KEY_CUT] = "Cut",
574 [KEY_HELP] = "Help", [KEY_MENU] = "Menu",
575 [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup",
576 [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp",
577 [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile",
578 [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
579 [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
580 [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
581 [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
582 [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
583 [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
584 [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
585 [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD",
586 [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong",
587 [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong",
588 [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record",
589 [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone",
590 [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config",
591 [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh",
592 [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move",
593 [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp",
594 [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
595 [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
596 [KEY_REDO] = "Redo", [KEY_F13] = "F13",
597 [KEY_F14] = "F14", [KEY_F15] = "F15",
598 [KEY_F16] = "F16", [KEY_F17] = "F17",
599 [KEY_F18] = "F18", [KEY_F19] = "F19",
600 [KEY_F20] = "F20", [KEY_F21] = "F21",
601 [KEY_F22] = "F22", [KEY_F23] = "F23",
602 [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
603 [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
604 [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
605 [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
606 [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
607 [KEY_PRINT] = "Print", [KEY_HP] = "HP",
608 [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound",
609 [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email",
610 [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search",
611 [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance",
612 [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop",
613 [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
614 [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
615 [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
616 [BTN_0] = "Btn0", [BTN_1] = "Btn1",
617 [BTN_2] = "Btn2", [BTN_3] = "Btn3",
618 [BTN_4] = "Btn4", [BTN_5] = "Btn5",
619 [BTN_6] = "Btn6", [BTN_7] = "Btn7",
620 [BTN_8] = "Btn8", [BTN_9] = "Btn9",
621 [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn",
622 [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn",
623 [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn",
624 [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn",
625 [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn",
626 [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn",
627 [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn",
628 [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2",
629 [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4",
630 [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6",
631 [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA",
632 [BTN_B] = "BtnB", [BTN_C] = "BtnC",
633 [BTN_X] = "BtnX", [BTN_Y] = "BtnY",
634 [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL",
635 [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2",
636 [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
637 [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
638 [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
639 [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
640 [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
641 [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
642 [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
643 [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
644 [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
645 [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
646 [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
647 [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
648 [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
649 [KEY_OPTION] = "Option", [KEY_INFO] = "Info",
650 [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor",
651 [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program",
652 [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites",
653 [KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
654 [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
655 [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
656 [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
657 [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
658 [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
659 [KEY_TV] = "TV", [KEY_TV2] = "TV2",
660 [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
661 [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
662 [KEY_CD] = "CD", [KEY_TAPE] = "Tape",
663 [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner",
664 [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text",
665 [KEY_DVD] = "DVD", [KEY_AUX] = "Aux",
666 [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio",
667 [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory",
668 [KEY_LIST] = "List", [KEY_MEMO] = "Memo",
669 [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red",
670 [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow",
671 [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp",
672 [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First",
673 [KEY_LAST] = "Last", [KEY_AB] = "AB",
674 [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart",
675 [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle",
676 [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous",
677 [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN",
678 [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL",
679 [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine",
680 [KEY_DEL_LINE] = "DeleteLine",
681 [KEY_SEND] = "Send", [KEY_REPLY] = "Reply",
682 [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save",
683 [KEY_DOCUMENTS] = "Documents",
684 [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC",
685 [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2",
686 [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D",
687 [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F",
688 [KEY_FN_S] = "Fn+S",
689 [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2",
690 [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4",
691 [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6",
692 [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8",
693 [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10",
694 [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12",
695 [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle",
696 [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
697 [KEY_KBDILLUMUP] = "KbdIlluminationUp",
698 [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
699};
700
701static char *relatives[REL_MAX + 1] = {
702 [REL_X] = "X", [REL_Y] = "Y",
703 [REL_Z] = "Z", [REL_RX] = "Rx",
704 [REL_RY] = "Ry", [REL_RZ] = "Rz",
705 [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
706 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
707};
708
709static char *absolutes[ABS_MAX + 1] = {
710 [ABS_X] = "X", [ABS_Y] = "Y",
711 [ABS_Z] = "Z", [ABS_RX] = "Rx",
712 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
713 [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder",
714 [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas",
715 [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X",
716 [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X",
717 [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X",
718 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
719 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
720 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
721 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
722 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
723};
724 26
725static char *misc[MSC_MAX + 1] = { 27void hid_dump_input(struct hid_usage *, __s32);
726 [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled", 28void hid_dump_device(struct hid_device *);
727 [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData" 29void hid_dump_field(struct hid_field *, int);
728}; 30void hid_resolv_usage(unsigned);
31void hid_resolv_event(__u8, __u16);
729 32
730static char *leds[LED_MAX + 1] = { 33#else
731 [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock",
732 [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
733 [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
734 [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
735 [LED_MISC] = "Misc",
736};
737 34
738static char *repeats[REP_MAX + 1] = { 35#define hid_dump_input(a,b) do { } while (0)
739 [REP_DELAY] = "Delay", [REP_PERIOD] = "Period" 36#define hid_dump_device(c) do { } while (0)
740}; 37#define hid_dump_field(a,b) do { } while (0)
38#define hid_resolv_usage(a) do { } while (0)
39#define hid_resolv_event(a,b) do { } while (0)
741 40
742static char *sounds[SND_MAX + 1] = { 41#endif /* CONFIG_HID_DEBUG */
743 [SND_CLICK] = "Click", [SND_BELL] = "Bell",
744 [SND_TONE] = "Tone"
745};
746 42
747static char **names[EV_MAX + 1] = {
748 [EV_SYN] = syncs, [EV_KEY] = keys,
749 [EV_REL] = relatives, [EV_ABS] = absolutes,
750 [EV_MSC] = misc, [EV_LED] = leds,
751 [EV_SND] = sounds, [EV_REP] = repeats,
752};
753 43
754static void __attribute__((unused)) resolv_event(__u8 type, __u16 code) { 44#endif
755 45
756 printk("%s.%s", events[type] ? events[type] : "?",
757 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
758}
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 342b4e639acb..93173fe45634 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -264,6 +264,8 @@ struct hid_item {
264#define HID_QUIRK_INVERT_HWHEEL 0x00004000 264#define HID_QUIRK_INVERT_HWHEEL 0x00004000
265#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00008000 265#define HID_QUIRK_POWERBOOK_ISO_KEYBOARD 0x00008000
266#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00010000 266#define HID_QUIRK_BAD_RELATIVE_KEYS 0x00010000
267#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00020000
268#define HID_QUIRK_IGNORE_MOUSE 0x00040000
267 269
268/* 270/*
269 * This is the global environment of the parser. This information is 271 * This is the global environment of the parser. This information is
@@ -430,8 +432,8 @@ struct hid_device { /* device report descriptor */
430 432
431 /* device-specific function pointers */ 433 /* device-specific function pointers */
432 int (*hidinput_input_event) (struct input_dev *, unsigned int, unsigned int, int); 434 int (*hidinput_input_event) (struct input_dev *, unsigned int, unsigned int, int);
433 int (*hidinput_open) (struct input_dev *); 435 int (*hid_open) (struct hid_device *);
434 void (*hidinput_close) (struct input_dev *); 436 void (*hid_close) (struct hid_device *);
435 437
436 /* hiddev event handler */ 438 /* hiddev event handler */
437 void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field, 439 void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field,
@@ -471,16 +473,6 @@ struct hid_descriptor {
471 struct hid_class_descriptor desc[1]; 473 struct hid_class_descriptor desc[1];
472} __attribute__ ((packed)); 474} __attribute__ ((packed));
473 475
474#ifdef DEBUG
475#include "hid-debug.h"
476#else
477#define hid_dump_input(a,b) do { } while (0)
478#define hid_dump_device(c) do { } while (0)
479#define hid_dump_field(a,b) do { } while (0)
480#define resolv_usage(a) do { } while (0)
481#define resolv_event(a,b) do { } while (0)
482#endif
483
484/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 476/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
485/* We ignore a few input applications that are not widely used */ 477/* We ignore a few input applications that are not widely used */
486#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) 478#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001))
@@ -503,6 +495,7 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size);
503int hid_ff_init(struct hid_device *hid); 495int hid_ff_init(struct hid_device *hid);
504 496
505int hid_lgff_init(struct hid_device *hid); 497int hid_lgff_init(struct hid_device *hid);
498int hid_plff_init(struct hid_device *hid);
506int hid_tmff_init(struct hid_device *hid); 499int hid_tmff_init(struct hid_device *hid);
507int hid_zpff_init(struct hid_device *hid); 500int hid_zpff_init(struct hid_device *hid);
508#ifdef CONFIG_HID_PID 501#ifdef CONFIG_HID_PID
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d0e6a5497614..e45712acfac5 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -71,6 +71,7 @@ struct mmc_card {
71#define MMC_STATE_SDCARD (1<<3) /* is an SD card */ 71#define MMC_STATE_SDCARD (1<<3) /* is an SD card */
72#define MMC_STATE_READONLY (1<<4) /* card is read-only */ 72#define MMC_STATE_READONLY (1<<4) /* card is read-only */
73#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */ 73#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */
74#define MMC_STATE_BLOCKADDR (1<<6) /* card uses block-addressing */
74 u32 raw_cid[4]; /* raw card CID */ 75 u32 raw_cid[4]; /* raw card CID */
75 u32 raw_csd[4]; /* raw card CSD */ 76 u32 raw_csd[4]; /* raw card CSD */
76 u32 raw_scr[2]; /* raw card SCR */ 77 u32 raw_scr[2]; /* raw card SCR */
@@ -87,6 +88,7 @@ struct mmc_card {
87#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD) 88#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD)
88#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY) 89#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
89#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED) 90#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
91#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
90 92
91#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 93#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
92#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD) 94#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD)
@@ -94,6 +96,7 @@ struct mmc_card {
94#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD) 96#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD)
95#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) 97#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
96#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) 98#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
99#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
97 100
98#define mmc_card_name(c) ((c)->cid.prod_name) 101#define mmc_card_name(c) ((c)->cid.prod_name)
99#define mmc_card_id(c) ((c)->dev.bus_id) 102#define mmc_card_id(c) ((c)->dev.bus_id)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c15ae1986b98..913e5752569f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -92,8 +92,10 @@ struct mmc_host {
92 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 92 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
93 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */ 93 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
94 unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */ 94 unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */
95 unsigned short max_sectors; /* see blk_queue_max_sectors */
96 unsigned short unused; 95 unsigned short unused;
96 unsigned int max_req_size; /* maximum number of bytes in one req */
97 unsigned int max_blk_size; /* maximum size of one mmc block */
98 unsigned int max_blk_count; /* maximum number of blocks in one req */
97 99
98 /* private data */ 100 /* private data */
99 struct mmc_ios ios; /* current io bus settings */ 101 struct mmc_ios ios; /* current io bus settings */
@@ -106,8 +108,9 @@ struct mmc_host {
106 struct list_head cards; /* devices attached to this host */ 108 struct list_head cards; /* devices attached to this host */
107 109
108 wait_queue_head_t wq; 110 wait_queue_head_t wq;
109 spinlock_t lock; /* card_busy lock */ 111 spinlock_t lock; /* claimed lock */
110 struct mmc_card *card_busy; /* the MMC card claiming host */ 112 unsigned int claimed:1; /* host exclusively claimed */
113
111 struct mmc_card *card_selected; /* the selected MMC card */ 114 struct mmc_card *card_selected; /* the selected MMC card */
112 115
113 struct delayed_work detect; 116 struct delayed_work detect;
@@ -126,6 +129,7 @@ static inline void *mmc_priv(struct mmc_host *host)
126} 129}
127 130
128#define mmc_dev(x) ((x)->parent) 131#define mmc_dev(x) ((x)->parent)
132#define mmc_classdev(x) (&(x)->class_dev)
129#define mmc_hostname(x) ((x)->class_dev.bus_id) 133#define mmc_hostname(x) ((x)->class_dev.bus_id)
130 134
131extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 135extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index bcf24909d677..cdc54be804f1 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -43,6 +43,7 @@ struct mmc_command {
43#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC) 43#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
44#define MMC_RSP_R3 (MMC_RSP_PRESENT) 44#define MMC_RSP_R3 (MMC_RSP_PRESENT)
45#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE) 45#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
46#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
46 47
47#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE)) 48#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
48 49
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index 2dce60c43f4b..c90b6768329d 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -79,9 +79,12 @@
79#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */ 79#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
80 80
81/* SD commands type argument response */ 81/* SD commands type argument response */
82 /* class 8 */ 82 /* class 0 */
83/* This is basically the same command as for MMC with some quirks. */ 83/* This is basically the same command as for MMC with some quirks. */
84#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */ 84#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
85#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
86
87 /* class 10 */
85#define SD_SWITCH 6 /* adtc [31:0] See below R1 */ 88#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
86 89
87 /* Application commands */ 90 /* Application commands */
@@ -115,6 +118,14 @@
115 */ 118 */
116 119
117/* 120/*
121 * SD_SEND_IF_COND argument format:
122 *
123 * [31:12] Reserved (0)
124 * [11:8] Host Voltage Supply Flags
125 * [7:0] Check Pattern (0xAA)
126 */
127
128/*
118 MMC status in R1 129 MMC status in R1
119 Type 130 Type
120 e : error bit 131 e : error bit
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3d1d21035dec..ccd706f876ec 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -735,9 +735,11 @@
735#define PCI_DEVICE_ID_TI_TVP4020 0x3d07 735#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
736#define PCI_DEVICE_ID_TI_4450 0x8011 736#define PCI_DEVICE_ID_TI_4450 0x8011
737#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 737#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
738#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
738#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 739#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
739#define PCI_DEVICE_ID_TI_X515 0x8036 740#define PCI_DEVICE_ID_TI_X515 0x8036
740#define PCI_DEVICE_ID_TI_XX12 0x8039 741#define PCI_DEVICE_ID_TI_XX12 0x8039
742#define PCI_DEVICE_ID_TI_XX12_FM 0x803b
741#define PCI_DEVICE_ID_TI_1130 0xac12 743#define PCI_DEVICE_ID_TI_1130 0xac12
742#define PCI_DEVICE_ID_TI_1031 0xac13 744#define PCI_DEVICE_ID_TI_1031 0xac13
743#define PCI_DEVICE_ID_TI_1131 0xac15 745#define PCI_DEVICE_ID_TI_1131 0xac15
@@ -765,6 +767,7 @@
765#define PCI_DEVICE_ID_TI_1510 0xac56 767#define PCI_DEVICE_ID_TI_1510 0xac56
766#define PCI_DEVICE_ID_TI_X620 0xac8d 768#define PCI_DEVICE_ID_TI_X620 0xac8d
767#define PCI_DEVICE_ID_TI_X420 0xac8e 769#define PCI_DEVICE_ID_TI_X420 0xac8e
770#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
768 771
769#define PCI_VENDOR_ID_SONY 0x104d 772#define PCI_VENDOR_ID_SONY 0x104d
770 773
@@ -1971,6 +1974,7 @@
1971#define PCI_DEVICE_ID_TOPIC_TP560 0x0000 1974#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
1972 1975
1973#define PCI_VENDOR_ID_ENE 0x1524 1976#define PCI_VENDOR_ID_ENE 0x1524
1977#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
1974#define PCI_DEVICE_ID_ENE_1211 0x1211 1978#define PCI_DEVICE_ID_ENE_1211 0x1211
1975#define PCI_DEVICE_ID_ENE_1225 0x1225 1979#define PCI_DEVICE_ID_ENE_1225 0x1225
1976#define PCI_DEVICE_ID_ENE_1410 0x1410 1980#define PCI_DEVICE_ID_ENE_1410 0x1410
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
index dfb8052eee5e..3deb0a6c1370 100644
--- a/include/linux/tifm.h
+++ b/include/linux/tifm.h
@@ -17,7 +17,7 @@
17#include <linux/wait.h> 17#include <linux/wait.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/scatterlist.h> 20#include <linux/kthread.h>
21 21
22/* Host registers (relative to pci base address): */ 22/* Host registers (relative to pci base address): */
23enum { 23enum {
@@ -62,11 +62,10 @@ enum {
62 62
63 63
64#define TIFM_IRQ_ENABLE 0x80000000 64#define TIFM_IRQ_ENABLE 0x80000000
65#define TIFM_IRQ_SOCKMASK 0x00000001 65#define TIFM_IRQ_SOCKMASK(x) (x)
66#define TIFM_IRQ_CARDMASK 0x00000100 66#define TIFM_IRQ_CARDMASK(x) ((x) << 8)
67#define TIFM_IRQ_FIFOMASK 0x00010000 67#define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
68#define TIFM_IRQ_SETALL 0xffffffff 68#define TIFM_IRQ_SETALL 0xffffffff
69#define TIFM_IRQ_SETALLSOCK 0x0000000f
70 69
71#define TIFM_CTRL_LED 0x00000040 70#define TIFM_CTRL_LED 0x00000040
72#define TIFM_CTRL_FAST_CLK 0x00000100 71#define TIFM_CTRL_FAST_CLK 0x00000100
@@ -89,10 +88,9 @@ struct tifm_dev {
89 char __iomem *addr; 88 char __iomem *addr;
90 spinlock_t lock; 89 spinlock_t lock;
91 tifm_media_id media_id; 90 tifm_media_id media_id;
92 char wq_name[KOBJ_NAME_LEN]; 91 unsigned int socket_id;
93 struct workqueue_struct *wq;
94 92
95 unsigned int (*signal_irq)(struct tifm_dev *sock, 93 void (*signal_irq)(struct tifm_dev *sock,
96 unsigned int sock_irq_status); 94 unsigned int sock_irq_status);
97 95
98 struct tifm_driver *drv; 96 struct tifm_driver *drv;
@@ -103,24 +101,23 @@ struct tifm_driver {
103 tifm_media_id *id_table; 101 tifm_media_id *id_table;
104 int (*probe)(struct tifm_dev *dev); 102 int (*probe)(struct tifm_dev *dev);
105 void (*remove)(struct tifm_dev *dev); 103 void (*remove)(struct tifm_dev *dev);
104 int (*suspend)(struct tifm_dev *dev,
105 pm_message_t state);
106 int (*resume)(struct tifm_dev *dev);
106 107
107 struct device_driver driver; 108 struct device_driver driver;
108}; 109};
109 110
110struct tifm_adapter { 111struct tifm_adapter {
111 char __iomem *addr; 112 char __iomem *addr;
112 unsigned int irq_status;
113 unsigned int insert_mask;
114 unsigned int remove_mask;
115 spinlock_t lock; 113 spinlock_t lock;
114 unsigned int irq_status;
115 unsigned int socket_change_set;
116 wait_queue_head_t change_set_notify;
116 unsigned int id; 117 unsigned int id;
117 unsigned int max_sockets; 118 unsigned int num_sockets;
118 char wq_name[KOBJ_NAME_LEN];
119 unsigned int inhibit_new_cards;
120 struct workqueue_struct *wq;
121 struct work_struct media_inserter;
122 struct work_struct media_remover;
123 struct tifm_dev **sockets; 119 struct tifm_dev **sockets;
120 struct task_struct *media_switcher;
124 struct class_device cdev; 121 struct class_device cdev;
125 struct device *dev; 122 struct device *dev;
126 123
@@ -130,9 +127,9 @@ struct tifm_adapter {
130struct tifm_adapter *tifm_alloc_adapter(void); 127struct tifm_adapter *tifm_alloc_adapter(void);
131void tifm_free_device(struct device *dev); 128void tifm_free_device(struct device *dev);
132void tifm_free_adapter(struct tifm_adapter *fm); 129void tifm_free_adapter(struct tifm_adapter *fm);
133int tifm_add_adapter(struct tifm_adapter *fm); 130int tifm_add_adapter(struct tifm_adapter *fm, int (*mediathreadfn)(void *data));
134void tifm_remove_adapter(struct tifm_adapter *fm); 131void tifm_remove_adapter(struct tifm_adapter *fm);
135struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id); 132struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm);
136int tifm_register_driver(struct tifm_driver *drv); 133int tifm_register_driver(struct tifm_driver *drv);
137void tifm_unregister_driver(struct tifm_driver *drv); 134void tifm_unregister_driver(struct tifm_driver *drv);
138void tifm_eject(struct tifm_dev *sock); 135void tifm_eject(struct tifm_dev *sock);
diff --git a/include/rdma/ib_user_mad.h b/include/rdma/ib_user_mad.h
index 44537aa32e62..d66b15ea82c4 100644
--- a/include/rdma/ib_user_mad.h
+++ b/include/rdma/ib_user_mad.h
@@ -98,7 +98,7 @@ struct ib_user_mad_hdr {
98 */ 98 */
99struct ib_user_mad { 99struct ib_user_mad {
100 struct ib_user_mad_hdr hdr; 100 struct ib_user_mad_hdr hdr;
101 __u8 data[0]; 101 __u64 data[0];
102}; 102};
103 103
104/** 104/**
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0bfa3328d686..765589f4d166 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -45,6 +45,7 @@
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h> 46#include <linux/mm.h>
47#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
48#include <linux/kref.h>
48 49
49#include <asm/atomic.h> 50#include <asm/atomic.h>
50#include <asm/scatterlist.h> 51#include <asm/scatterlist.h>
@@ -419,8 +420,8 @@ struct ib_wc {
419 enum ib_wc_opcode opcode; 420 enum ib_wc_opcode opcode;
420 u32 vendor_err; 421 u32 vendor_err;
421 u32 byte_len; 422 u32 byte_len;
423 struct ib_qp *qp;
422 __be32 imm_data; 424 __be32 imm_data;
423 u32 qp_num;
424 u32 src_qp; 425 u32 src_qp;
425 int wc_flags; 426 int wc_flags;
426 u16 pkey_index; 427 u16 pkey_index;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10625785eefd..50a438010182 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Dynamic DMA mapping support. 2 * Dynamic DMA mapping support.
3 * 3 *
4 * This implementation is for IA-64 and EM64T platforms that do not support 4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware). 5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -28,6 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/scatterlist.h> 30#include <asm/scatterlist.h>
31#include <asm/swiotlb.h>
31 32
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/bootmem.h> 34#include <linux/bootmem.h>
@@ -35,8 +36,10 @@
35#define OFFSET(val,align) ((unsigned long) \ 36#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1))) 37 ( (val) & ( (align) - 1)))
37 38
39#ifndef SG_ENT_VIRT_ADDRESS
38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 40#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
39#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 41#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
42#endif
40 43
41/* 44/*
42 * Maximum allowable number of contiguous slabs to map, 45 * Maximum allowable number of contiguous slabs to map,
@@ -101,13 +104,25 @@ static unsigned int io_tlb_index;
101 * We need to save away the original address corresponding to a mapped entry 104 * We need to save away the original address corresponding to a mapped entry
102 * for the sync operations. 105 * for the sync operations.
103 */ 106 */
104static unsigned char **io_tlb_orig_addr; 107#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
108typedef char *io_tlb_addr_t;
109#define swiotlb_orig_addr_null(buffer) (!(buffer))
110#define ptr_to_io_tlb_addr(ptr) (ptr)
111#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
112#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
113#endif
114static io_tlb_addr_t *io_tlb_orig_addr;
105 115
106/* 116/*
107 * Protect the above data structures in the map and unmap calls 117 * Protect the above data structures in the map and unmap calls
108 */ 118 */
109static DEFINE_SPINLOCK(io_tlb_lock); 119static DEFINE_SPINLOCK(io_tlb_lock);
110 120
121#ifdef SWIOTLB_EXTRA_VARIABLES
122SWIOTLB_EXTRA_VARIABLES;
123#endif
124
125#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
111static int __init 126static int __init
112setup_io_tlb_npages(char *str) 127setup_io_tlb_npages(char *str)
113{ 128{
@@ -122,30 +137,50 @@ setup_io_tlb_npages(char *str)
122 swiotlb_force = 1; 137 swiotlb_force = 1;
123 return 1; 138 return 1;
124} 139}
140#endif
125__setup("swiotlb=", setup_io_tlb_npages); 141__setup("swiotlb=", setup_io_tlb_npages);
126/* make io_tlb_overflow tunable too? */ 142/* make io_tlb_overflow tunable too? */
127 143
144#ifndef swiotlb_adjust_size
145#define swiotlb_adjust_size(size) ((void)0)
146#endif
147
148#ifndef swiotlb_adjust_seg
149#define swiotlb_adjust_seg(start, size) ((void)0)
150#endif
151
152#ifndef swiotlb_print_info
153#define swiotlb_print_info(bytes) \
154 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
155 "0x%lx\n", bytes >> 20, \
156 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
157#endif
158
128/* 159/*
129 * Statically reserve bounce buffer space and initialize bounce buffer data 160 * Statically reserve bounce buffer space and initialize bounce buffer data
130 * structures for the software IO TLB used to implement the DMA API. 161 * structures for the software IO TLB used to implement the DMA API.
131 */ 162 */
132void 163void __init
133swiotlb_init_with_default_size (size_t default_size) 164swiotlb_init_with_default_size(size_t default_size)
134{ 165{
135 unsigned long i; 166 unsigned long i, bytes;
136 167
137 if (!io_tlb_nslabs) { 168 if (!io_tlb_nslabs) {
138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 169 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 170 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
140 } 171 }
172 swiotlb_adjust_size(io_tlb_nslabs);
173 swiotlb_adjust_size(io_tlb_overflow);
174
175 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
141 176
142 /* 177 /*
143 * Get IO TLB memory from the low pages 178 * Get IO TLB memory from the low pages
144 */ 179 */
145 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 180 io_tlb_start = alloc_bootmem_low_pages(bytes);
146 if (!io_tlb_start) 181 if (!io_tlb_start)
147 panic("Cannot allocate SWIOTLB buffer"); 182 panic("Cannot allocate SWIOTLB buffer");
148 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 183 io_tlb_end = io_tlb_start + bytes;
149 184
150 /* 185 /*
151 * Allocate and initialize the free list array. This array is used 186 * Allocate and initialize the free list array. This array is used
@@ -153,34 +188,45 @@ swiotlb_init_with_default_size (size_t default_size)
153 * between io_tlb_start and io_tlb_end. 188 * between io_tlb_start and io_tlb_end.
154 */ 189 */
155 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 190 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
156 for (i = 0; i < io_tlb_nslabs; i++) 191 for (i = 0; i < io_tlb_nslabs; i++) {
192 if ( !(i % IO_TLB_SEGSIZE) )
193 swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
194 IO_TLB_SEGSIZE << IO_TLB_SHIFT);
157 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 195 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
196 }
158 io_tlb_index = 0; 197 io_tlb_index = 0;
159 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); 198 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
160 199
161 /* 200 /*
162 * Get the overflow emergency buffer 201 * Get the overflow emergency buffer
163 */ 202 */
164 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 203 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
165 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 204 if (!io_tlb_overflow_buffer)
166 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); 205 panic("Cannot allocate SWIOTLB overflow buffer!\n");
206 swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
207
208 swiotlb_print_info(bytes);
167} 209}
210#ifndef __swiotlb_init_with_default_size
211#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
212#endif
168 213
169void 214void __init
170swiotlb_init (void) 215swiotlb_init(void)
171{ 216{
172 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 217 __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
173} 218}
174 219
220#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
175/* 221/*
176 * Systems with larger DMA zones (those that don't support ISA) can 222 * Systems with larger DMA zones (those that don't support ISA) can
177 * initialize the swiotlb later using the slab allocator if needed. 223 * initialize the swiotlb later using the slab allocator if needed.
178 * This should be just like above, but with some error catching. 224 * This should be just like above, but with some error catching.
179 */ 225 */
180int 226int
181swiotlb_late_init_with_default_size (size_t default_size) 227swiotlb_late_init_with_default_size(size_t default_size)
182{ 228{
183 unsigned long i, req_nslabs = io_tlb_nslabs; 229 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
184 unsigned int order; 230 unsigned int order;
185 231
186 if (!io_tlb_nslabs) { 232 if (!io_tlb_nslabs) {
@@ -191,8 +237,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
191 /* 237 /*
192 * Get IO TLB memory from the low pages 238 * Get IO TLB memory from the low pages
193 */ 239 */
194 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 240 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
195 io_tlb_nslabs = SLABS_PER_PAGE << order; 241 io_tlb_nslabs = SLABS_PER_PAGE << order;
242 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
196 243
197 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 244 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
198 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 245 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +252,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
205 if (!io_tlb_start) 252 if (!io_tlb_start)
206 goto cleanup1; 253 goto cleanup1;
207 254
208 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { 255 if (order != get_order(bytes)) {
209 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 256 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
210 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 257 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
211 io_tlb_nslabs = SLABS_PER_PAGE << order; 258 io_tlb_nslabs = SLABS_PER_PAGE << order;
259 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
212 } 260 }
213 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 261 io_tlb_end = io_tlb_start + bytes;
214 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 262 memset(io_tlb_start, 0, bytes);
215 263
216 /* 264 /*
217 * Allocate and initialize the free list array. This array is used 265 * Allocate and initialize the free list array. This array is used
@@ -227,12 +275,12 @@ swiotlb_late_init_with_default_size (size_t default_size)
227 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 275 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
228 io_tlb_index = 0; 276 io_tlb_index = 0;
229 277
230 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 278 io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
231 get_order(io_tlb_nslabs * sizeof(char *))); 279 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
232 if (!io_tlb_orig_addr) 280 if (!io_tlb_orig_addr)
233 goto cleanup3; 281 goto cleanup3;
234 282
235 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 283 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
236 284
237 /* 285 /*
238 * Get the overflow emergency buffer 286 * Get the overflow emergency buffer
@@ -242,29 +290,29 @@ swiotlb_late_init_with_default_size (size_t default_size)
242 if (!io_tlb_overflow_buffer) 290 if (!io_tlb_overflow_buffer)
243 goto cleanup4; 291 goto cleanup4;
244 292
245 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " 293 swiotlb_print_info(bytes);
246 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
247 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
248 294
249 return 0; 295 return 0;
250 296
251cleanup4: 297cleanup4:
252 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 298 free_pages((unsigned long)io_tlb_orig_addr,
253 sizeof(char *))); 299 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
254 io_tlb_orig_addr = NULL; 300 io_tlb_orig_addr = NULL;
255cleanup3: 301cleanup3:
256 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 302 free_pages((unsigned long)io_tlb_list,
257 sizeof(int))); 303 get_order(io_tlb_nslabs * sizeof(int)));
258 io_tlb_list = NULL; 304 io_tlb_list = NULL;
259 io_tlb_end = NULL;
260cleanup2: 305cleanup2:
306 io_tlb_end = NULL;
261 free_pages((unsigned long)io_tlb_start, order); 307 free_pages((unsigned long)io_tlb_start, order);
262 io_tlb_start = NULL; 308 io_tlb_start = NULL;
263cleanup1: 309cleanup1:
264 io_tlb_nslabs = req_nslabs; 310 io_tlb_nslabs = req_nslabs;
265 return -ENOMEM; 311 return -ENOMEM;
266} 312}
313#endif
267 314
315#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
268static inline int 316static inline int
269address_needs_mapping(struct device *hwdev, dma_addr_t addr) 317address_needs_mapping(struct device *hwdev, dma_addr_t addr)
270{ 318{
@@ -275,11 +323,35 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
275 return (addr & ~mask) != 0; 323 return (addr & ~mask) != 0;
276} 324}
277 325
326static inline int range_needs_mapping(const void *ptr, size_t size)
327{
328 return swiotlb_force;
329}
330
331static inline int order_needs_mapping(unsigned int order)
332{
333 return 0;
334}
335#endif
336
337static void
338__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
339{
340#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
341 if (dir == DMA_TO_DEVICE)
342 memcpy(dma_addr, buffer, size);
343 else
344 memcpy(buffer, dma_addr, size);
345#else
346 __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
347#endif
348}
349
278/* 350/*
279 * Allocates bounce buffer and returns its kernel virtual address. 351 * Allocates bounce buffer and returns its kernel virtual address.
280 */ 352 */
281static void * 353static void *
282map_single(struct device *hwdev, char *buffer, size_t size, int dir) 354map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
283{ 355{
284 unsigned long flags; 356 unsigned long flags;
285 char *dma_addr; 357 char *dma_addr;
@@ -352,7 +424,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
352 */ 424 */
353 io_tlb_orig_addr[index] = buffer; 425 io_tlb_orig_addr[index] = buffer;
354 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 426 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
355 memcpy(dma_addr, buffer, size); 427 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
356 428
357 return dma_addr; 429 return dma_addr;
358} 430}
@@ -366,17 +438,18 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
366 unsigned long flags; 438 unsigned long flags;
367 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 439 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
368 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
369 char *buffer = io_tlb_orig_addr[index]; 441 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
370 442
371 /* 443 /*
372 * First, sync the memory before unmapping the entry 444 * First, sync the memory before unmapping the entry
373 */ 445 */
374 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 446 if (!swiotlb_orig_addr_null(buffer)
447 && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
375 /* 448 /*
376 * bounce... copy the data back into the original buffer * and 449 * bounce... copy the data back into the original buffer * and
377 * delete the bounce buffer. 450 * delete the bounce buffer.
378 */ 451 */
379 memcpy(buffer, dma_addr, size); 452 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
380 453
381 /* 454 /*
382 * Return the buffer to the free list by setting the corresponding 455 * Return the buffer to the free list by setting the corresponding
@@ -409,18 +482,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
409 int dir, int target) 482 int dir, int target)
410{ 483{
411 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 484 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
412 char *buffer = io_tlb_orig_addr[index]; 485 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
413 486
414 switch (target) { 487 switch (target) {
415 case SYNC_FOR_CPU: 488 case SYNC_FOR_CPU:
416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 489 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
417 memcpy(buffer, dma_addr, size); 490 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
418 else 491 else
419 BUG_ON(dir != DMA_TO_DEVICE); 492 BUG_ON(dir != DMA_TO_DEVICE);
420 break; 493 break;
421 case SYNC_FOR_DEVICE: 494 case SYNC_FOR_DEVICE:
422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 495 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
423 memcpy(dma_addr, buffer, size); 496 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
424 else 497 else
425 BUG_ON(dir != DMA_FROM_DEVICE); 498 BUG_ON(dir != DMA_FROM_DEVICE);
426 break; 499 break;
@@ -429,11 +502,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
429 } 502 }
430} 503}
431 504
505#ifdef SWIOTLB_ARCH_NEED_ALLOC
506
432void * 507void *
433swiotlb_alloc_coherent(struct device *hwdev, size_t size, 508swiotlb_alloc_coherent(struct device *hwdev, size_t size,
434 dma_addr_t *dma_handle, gfp_t flags) 509 dma_addr_t *dma_handle, gfp_t flags)
435{ 510{
436 unsigned long dev_addr; 511 dma_addr_t dev_addr;
437 void *ret; 512 void *ret;
438 int order = get_order(size); 513 int order = get_order(size);
439 514
@@ -444,8 +519,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
444 */ 519 */
445 flags |= GFP_DMA; 520 flags |= GFP_DMA;
446 521
447 ret = (void *)__get_free_pages(flags, order); 522 if (!order_needs_mapping(order))
448 if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { 523 ret = (void *)__get_free_pages(flags, order);
524 else
525 ret = NULL;
526 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
449 /* 527 /*
450 * The allocated memory isn't reachable by the device. 528 * The allocated memory isn't reachable by the device.
451 * Fall back on swiotlb_map_single(). 529 * Fall back on swiotlb_map_single().
@@ -465,22 +543,24 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
465 if (swiotlb_dma_mapping_error(handle)) 543 if (swiotlb_dma_mapping_error(handle))
466 return NULL; 544 return NULL;
467 545
468 ret = phys_to_virt(handle); 546 ret = bus_to_virt(handle);
469 } 547 }
470 548
471 memset(ret, 0, size); 549 memset(ret, 0, size);
472 dev_addr = virt_to_phys(ret); 550 dev_addr = virt_to_bus(ret);
473 551
474 /* Confirm address can be DMA'd by device */ 552 /* Confirm address can be DMA'd by device */
475 if (address_needs_mapping(hwdev, dev_addr)) { 553 if (address_needs_mapping(hwdev, dev_addr)) {
476 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", 554 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
477 (unsigned long long)*hwdev->dma_mask, dev_addr); 555 (unsigned long long)*hwdev->dma_mask,
556 (unsigned long long)dev_addr);
478 panic("swiotlb_alloc_coherent: allocated memory is out of " 557 panic("swiotlb_alloc_coherent: allocated memory is out of "
479 "range for device"); 558 "range for device");
480 } 559 }
481 *dma_handle = dev_addr; 560 *dma_handle = dev_addr;
482 return ret; 561 return ret;
483} 562}
563EXPORT_SYMBOL(swiotlb_alloc_coherent);
484 564
485void 565void
486swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 566swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -493,6 +573,9 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
493 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 573 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
494 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 574 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
495} 575}
576EXPORT_SYMBOL(swiotlb_free_coherent);
577
578#endif
496 579
497static void 580static void
498swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 581swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -504,7 +587,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
504 * When the mapping is small enough return a static buffer to limit 587 * When the mapping is small enough return a static buffer to limit
505 * the damage, or panic when the transfer is too big. 588 * the damage, or panic when the transfer is too big.
506 */ 589 */
507 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " 590 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
508 "device %s\n", size, dev ? dev->bus_id : "?"); 591 "device %s\n", size, dev ? dev->bus_id : "?");
509 592
510 if (size > io_tlb_overflow && do_panic) { 593 if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +608,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
525dma_addr_t 608dma_addr_t
526swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 609swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
527{ 610{
528 unsigned long dev_addr = virt_to_phys(ptr); 611 dma_addr_t dev_addr = virt_to_bus(ptr);
529 void *map; 612 void *map;
530 613
531 BUG_ON(dir == DMA_NONE); 614 BUG_ON(dir == DMA_NONE);
@@ -534,19 +617,20 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
534 * we can safely return the device addr and not worry about bounce 617 * we can safely return the device addr and not worry about bounce
535 * buffering it. 618 * buffering it.
536 */ 619 */
537 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 620 if (!range_needs_mapping(ptr, size)
621 && !address_needs_mapping(hwdev, dev_addr))
538 return dev_addr; 622 return dev_addr;
539 623
540 /* 624 /*
541 * Oh well, have to allocate and map a bounce buffer. 625 * Oh well, have to allocate and map a bounce buffer.
542 */ 626 */
543 map = map_single(hwdev, ptr, size, dir); 627 map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
544 if (!map) { 628 if (!map) {
545 swiotlb_full(hwdev, size, dir, 1); 629 swiotlb_full(hwdev, size, dir, 1);
546 map = io_tlb_overflow_buffer; 630 map = io_tlb_overflow_buffer;
547 } 631 }
548 632
549 dev_addr = virt_to_phys(map); 633 dev_addr = virt_to_bus(map);
550 634
551 /* 635 /*
552 * Ensure that the address returned is DMA'ble 636 * Ensure that the address returned is DMA'ble
@@ -558,25 +642,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
558} 642}
559 643
560/* 644/*
561 * Since DMA is i-cache coherent, any (complete) pages that were written via
562 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
563 * flush them when they get mapped into an executable vm-area.
564 */
565static void
566mark_clean(void *addr, size_t size)
567{
568 unsigned long pg_addr, end;
569
570 pg_addr = PAGE_ALIGN((unsigned long) addr);
571 end = (unsigned long) addr + size;
572 while (pg_addr + PAGE_SIZE <= end) {
573 struct page *page = virt_to_page(pg_addr);
574 set_bit(PG_arch_1, &page->flags);
575 pg_addr += PAGE_SIZE;
576 }
577}
578
579/*
580 * Unmap a single streaming mode DMA translation. The dma_addr and size must 645 * Unmap a single streaming mode DMA translation. The dma_addr and size must
581 * match what was provided for in a previous swiotlb_map_single call. All 646 * match what was provided for in a previous swiotlb_map_single call. All
582 * other usages are undefined. 647 * other usages are undefined.
@@ -588,13 +653,13 @@ void
588swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 653swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
589 int dir) 654 int dir)
590{ 655{
591 char *dma_addr = phys_to_virt(dev_addr); 656 char *dma_addr = bus_to_virt(dev_addr);
592 657
593 BUG_ON(dir == DMA_NONE); 658 BUG_ON(dir == DMA_NONE);
594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 659 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
595 unmap_single(hwdev, dma_addr, size, dir); 660 unmap_single(hwdev, dma_addr, size, dir);
596 else if (dir == DMA_FROM_DEVICE) 661 else if (dir == DMA_FROM_DEVICE)
597 mark_clean(dma_addr, size); 662 dma_mark_clean(dma_addr, size);
598} 663}
599 664
600/* 665/*
@@ -611,13 +676,13 @@ static inline void
611swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 676swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
612 size_t size, int dir, int target) 677 size_t size, int dir, int target)
613{ 678{
614 char *dma_addr = phys_to_virt(dev_addr); 679 char *dma_addr = bus_to_virt(dev_addr);
615 680
616 BUG_ON(dir == DMA_NONE); 681 BUG_ON(dir == DMA_NONE);
617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 682 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
618 sync_single(hwdev, dma_addr, size, dir, target); 683 sync_single(hwdev, dma_addr, size, dir, target);
619 else if (dir == DMA_FROM_DEVICE) 684 else if (dir == DMA_FROM_DEVICE)
620 mark_clean(dma_addr, size); 685 dma_mark_clean(dma_addr, size);
621} 686}
622 687
623void 688void
@@ -642,13 +707,13 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
642 unsigned long offset, size_t size, 707 unsigned long offset, size_t size,
643 int dir, int target) 708 int dir, int target)
644{ 709{
645 char *dma_addr = phys_to_virt(dev_addr) + offset; 710 char *dma_addr = bus_to_virt(dev_addr) + offset;
646 711
647 BUG_ON(dir == DMA_NONE); 712 BUG_ON(dir == DMA_NONE);
648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 713 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
649 sync_single(hwdev, dma_addr, size, dir, target); 714 sync_single(hwdev, dma_addr, size, dir, target);
650 else if (dir == DMA_FROM_DEVICE) 715 else if (dir == DMA_FROM_DEVICE)
651 mark_clean(dma_addr, size); 716 dma_mark_clean(dma_addr, size);
652} 717}
653 718
654void 719void
@@ -687,18 +752,16 @@ int
687swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, 752swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
688 int dir) 753 int dir)
689{ 754{
690 void *addr; 755 dma_addr_t dev_addr;
691 unsigned long dev_addr;
692 int i; 756 int i;
693 757
694 BUG_ON(dir == DMA_NONE); 758 BUG_ON(dir == DMA_NONE);
695 759
696 for (i = 0; i < nelems; i++, sg++) { 760 for (i = 0; i < nelems; i++, sg++) {
697 addr = SG_ENT_VIRT_ADDRESS(sg); 761 dev_addr = SG_ENT_PHYS_ADDRESS(sg);
698 dev_addr = virt_to_phys(addr); 762 if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
699 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 763 || address_needs_mapping(hwdev, dev_addr)) {
700 void *map = map_single(hwdev, addr, sg->length, dir); 764 void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
701 sg->dma_address = virt_to_bus(map);
702 if (!map) { 765 if (!map) {
703 /* Don't panic here, we expect map_sg users 766 /* Don't panic here, we expect map_sg users
704 to do proper error handling. */ 767 to do proper error handling. */
@@ -707,6 +770,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
707 sg[0].dma_length = 0; 770 sg[0].dma_length = 0;
708 return 0; 771 return 0;
709 } 772 }
773 sg->dma_address = virt_to_bus(map);
710 } else 774 } else
711 sg->dma_address = dev_addr; 775 sg->dma_address = dev_addr;
712 sg->dma_length = sg->length; 776 sg->dma_length = sg->length;
@@ -728,9 +792,10 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
728 792
729 for (i = 0; i < nelems; i++, sg++) 793 for (i = 0; i < nelems; i++, sg++)
730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 794 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
731 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); 795 unmap_single(hwdev, bus_to_virt(sg->dma_address),
796 sg->dma_length, dir);
732 else if (dir == DMA_FROM_DEVICE) 797 else if (dir == DMA_FROM_DEVICE)
733 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 798 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
734} 799}
735 800
736/* 801/*
@@ -750,8 +815,10 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
750 815
751 for (i = 0; i < nelems; i++, sg++) 816 for (i = 0; i < nelems; i++, sg++)
752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 817 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
753 sync_single(hwdev, (void *) sg->dma_address, 818 sync_single(hwdev, bus_to_virt(sg->dma_address),
754 sg->dma_length, dir, target); 819 sg->dma_length, dir, target);
820 else if (dir == DMA_FROM_DEVICE)
821 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755} 822}
756 823
757void 824void
@@ -768,10 +835,48 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
768 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 835 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
769} 836}
770 837
838#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
839
840dma_addr_t
841swiotlb_map_page(struct device *hwdev, struct page *page,
842 unsigned long offset, size_t size,
843 enum dma_data_direction direction)
844{
845 dma_addr_t dev_addr;
846 char *map;
847
848 dev_addr = page_to_bus(page) + offset;
849 if (address_needs_mapping(hwdev, dev_addr)) {
850 map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
851 if (!map) {
852 swiotlb_full(hwdev, size, direction, 1);
853 map = io_tlb_overflow_buffer;
854 }
855 dev_addr = virt_to_bus(map);
856 }
857
858 return dev_addr;
859}
860
861void
862swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
863 size_t size, enum dma_data_direction direction)
864{
865 char *dma_addr = bus_to_virt(dev_addr);
866
867 BUG_ON(direction == DMA_NONE);
868 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
869 unmap_single(hwdev, dma_addr, size, direction);
870 else if (direction == DMA_FROM_DEVICE)
871 dma_mark_clean(dma_addr, size);
872}
873
874#endif
875
771int 876int
772swiotlb_dma_mapping_error(dma_addr_t dma_addr) 877swiotlb_dma_mapping_error(dma_addr_t dma_addr)
773{ 878{
774 return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); 879 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
775} 880}
776 881
777/* 882/*
@@ -780,10 +885,13 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
780 * during bus mastering, then you would pass 0x00ffffff as the mask to 885 * during bus mastering, then you would pass 0x00ffffff as the mask to
781 * this function. 886 * this function.
782 */ 887 */
888#ifndef __swiotlb_dma_supported
889#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
890#endif
783int 891int
784swiotlb_dma_supported (struct device *hwdev, u64 mask) 892swiotlb_dma_supported(struct device *hwdev, u64 mask)
785{ 893{
786 return (virt_to_phys (io_tlb_end) - 1) <= mask; 894 return __swiotlb_dma_supported(hwdev, mask);
787} 895}
788 896
789EXPORT_SYMBOL(swiotlb_init); 897EXPORT_SYMBOL(swiotlb_init);
@@ -798,6 +906,4 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
798EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 906EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
799EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 907EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
800EXPORT_SYMBOL(swiotlb_dma_mapping_error); 908EXPORT_SYMBOL(swiotlb_dma_mapping_error);
801EXPORT_SYMBOL(swiotlb_alloc_coherent);
802EXPORT_SYMBOL(swiotlb_free_coherent);
803EXPORT_SYMBOL(swiotlb_dma_supported); 909EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index f01f8c072852..96926eb13b0a 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -1,7 +1,7 @@
1#### 1####
2# kbuild: Generic definitions 2# kbuild: Generic definitions
3 3
4# Convinient variables 4# Convenient constants
5comma := , 5comma := ,
6squote := ' 6squote := '
7empty := 7empty :=
@@ -56,40 +56,46 @@ endef
56# gcc support functions 56# gcc support functions
57# See documentation in Documentation/kbuild/makefiles.txt 57# See documentation in Documentation/kbuild/makefiles.txt
58 58
59# output directory for tests below 59# checker-shell
60TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) 60# Usage: option = $(call checker-shell, $(CC)...-o $$OUT, option-ok, otherwise)
61# Exit code chooses option. $$OUT is safe location for needless output.
62define checker-shell
63 $(shell set -e; \
64 DIR=$(KBUILD_EXTMOD); \
65 cd $${DIR:-$(objtree)}; \
66 OUT=$$PWD/.$$$$.null; \
67 \
68 ln -s /dev/null $$OUT; \
69 if $(1) >/dev/null 2>&1; \
70 then echo "$(2)"; \
71 else echo "$(3)"; \
72 fi; \
73 rm -f $$OUT)
74endef
61 75
62# as-option 76# as-option
63# Usage: cflags-y += $(call as-option, -Wa$(comma)-isa=foo,) 77# Usage: cflags-y += $(call as-option, -Wa$(comma)-isa=foo,)
64 78as-option = $(call checker-shell, \
65as-option = $(shell if $(CC) $(CFLAGS) $(1) -Wa,-Z -c -o /dev/null \ 79 $(CC) $(CFLAGS) $(1) -c -xassembler /dev/null -o $$OUT, $(1), $(2))
66 -xassembler /dev/null > /dev/null 2>&1; then echo "$(1)"; \
67 else echo "$(2)"; fi ;)
68 80
69# as-instr 81# as-instr
70# Usage: cflags-y += $(call as-instr, instr, option1, option2) 82# Usage: cflags-y += $(call as-instr, instr, option1, option2)
71 83as-instr = $(call checker-shell, \
72as-instr = $(shell if echo -e "$(1)" | \ 84 printf "$(1)" | $(CC) $(AFLAGS) -c -xassembler -o $$OUT -, $(2), $(3))
73 $(CC) $(AFLAGS) -c -xassembler - \
74 -o $(TMPOUT)astest$$$$.out > /dev/null 2>&1; \
75 then rm $(TMPOUT)astest$$$$.out; echo "$(2)"; \
76 else echo "$(3)"; fi)
77 85
78# cc-option 86# cc-option
79# Usage: cflags-y += $(call cc-option, -march=winchip-c6, -march=i586) 87# Usage: cflags-y += $(call cc-option, -march=winchip-c6, -march=i586)
80 88cc-option = $(call checker-shell, \
81cc-option = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \ 89 $(CC) $(CFLAGS) $(if $(3),$(3),$(1)) -S -xc /dev/null -o $$OUT, $(1), $(2))
82 > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi ;)
83 90
84# cc-option-yn 91# cc-option-yn
85# Usage: flag := $(call cc-option-yn, -march=winchip-c6) 92# Usage: flag := $(call cc-option-yn, -march=winchip-c6)
86cc-option-yn = $(shell if $(CC) $(CFLAGS) $(1) -S -o /dev/null -xc /dev/null \ 93cc-option-yn = $(call cc-option, "y", "n", $(1))
87 > /dev/null 2>&1; then echo "y"; else echo "n"; fi;)
88 94
89# cc-option-align 95# cc-option-align
90# Prefix align with either -falign or -malign 96# Prefix align with either -falign or -malign
91cc-option-align = $(subst -functions=0,,\ 97cc-option-align = $(subst -functions=0,,\
92 $(call cc-option,-falign-functions=0,-malign-functions=0)) 98 $(call cc-option,-falign-functions=0,-malign-functions=0))
93 99
94# cc-version 100# cc-version
95# Usage gcc-ver := $(call cc-version, $(CC)) 101# Usage gcc-ver := $(call cc-version, $(CC))
@@ -97,35 +103,42 @@ cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
97 103
98# cc-ifversion 104# cc-ifversion
99# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 105# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
100cc-ifversion = $(shell if [ $(call cc-version, $(CC)) $(1) $(2) ]; then \ 106cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
101 echo $(3); fi;)
102 107
103# ld-option 108# ld-option
104# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both) 109# Usage: ldflags += $(call ld-option, -Wl$(comma)--hash-style=both)
105ld-option = $(shell if $(CC) $(1) -nostdlib -xc /dev/null \ 110ld-option = $(call checker-shell, \
106 -o $(TMPOUT)ldtest$$$$.out > /dev/null 2>&1; \ 111 $(CC) $(1) -nostdlib -xc /dev/null -o $$OUT, $(1), $(2))
107 then rm $(TMPOUT)ldtest$$$$.out; echo "$(1)"; \ 112
108 else echo "$(2)"; fi) 113######
109 114
110###
111# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj= 115# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.build obj=
112# Usage: 116# Usage:
113# $(Q)$(MAKE) $(build)=dir 117# $(Q)$(MAKE) $(build)=dir
114build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj 118build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
115 119
116# Prefix -I with $(srctree) if it is not an absolute path 120# Prefix -I with $(srctree) if it is not an absolute path,
117addtree = $(if $(filter-out -I/%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1))) $(1) 121# add original to the end
122addtree = $(if \
123 $(filter-out -I/%, $(1)), $(patsubst -I%,-I$(srctree)/%,$(1))) $(1)
124
118# Find all -I options and call addtree 125# Find all -I options and call addtree
119flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o))) 126flags = $(foreach o,$($(1)), \
127 $(if $(filter -I%,$(o)), $(call addtree, $(o)), $(o)))
120 128
121# If quiet is set, only print short version of command 129# echo command.
130# Short version is used, if $(quiet) equals `quiet_', otherwise full one.
131echo-cmd = $(if $($(quiet)cmd_$(1)), \
132 echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
133
134# printing commands
122cmd = @$(echo-cmd) $(cmd_$(1)) 135cmd = @$(echo-cmd) $(cmd_$(1))
123 136
124# Add $(obj)/ for paths that is not absolute 137# Add $(obj)/ for paths that are not absolute
125objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o))) 138objectify = $(foreach o,$(1), $(if $(filter /%,$(o)), $(o), $(obj)/$(o)))
126 139
127### 140###
128# if_changed - execute command if any prerequisite is newer than 141# if_changed - execute command if any prerequisite is newer than
129# target, or command line has changed 142# target, or command line has changed
130# if_changed_dep - as if_changed, but uses fixdep to reveal dependencies 143# if_changed_dep - as if_changed, but uses fixdep to reveal dependencies
131# including used config symbols 144# including used config symbols
@@ -133,16 +146,12 @@ objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
133# See Documentation/kbuild/makefiles.txt for more info 146# See Documentation/kbuild/makefiles.txt for more info
134 147
135ifneq ($(KBUILD_NOCMDDEP),1) 148ifneq ($(KBUILD_NOCMDDEP),1)
136# Check if both arguments has same arguments. Result in empty string if equal 149# Check if both arguments has same arguments. Result is empty string, if equal.
137# User may override this check using make KBUILD_NOCMDDEP=1 150# User may override this check using make KBUILD_NOCMDDEP=1
138arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \ 151arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
139 $(filter-out $(cmd_$@), $(cmd_$(1))) ) 152 $(filter-out $(cmd_$@), $(cmd_$(1))) )
140endif 153endif
141 154
142# echo command. Short version is $(quiet) equals quiet, otherwise full command
143echo-cmd = $(if $($(quiet)cmd_$(1)), \
144 echo ' $(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
145
146# >'< substitution is for echo to work, 155# >'< substitution is for echo to work,
147# >$< substitution to preserve $ when reloading .cmd file 156# >$< substitution to preserve $ when reloading .cmd file
148# note: when using inline perl scripts [perl -e '...$$t=1;...'] 157# note: when using inline perl scripts [perl -e '...$$t=1;...']
@@ -153,15 +162,15 @@ make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
153# PHONY targets skipped in both cases. 162# PHONY targets skipped in both cases.
154any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^) 163any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
155 164
156# Execute command if command has changed or prerequisitei(s) are updated 165# Execute command if command has changed or prerequisite(s) are updated.
157# 166#
158if_changed = $(if $(strip $(any-prereq) $(arg-check)), \ 167if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
159 @set -e; \ 168 @set -e; \
160 $(echo-cmd) $(cmd_$(1)); \ 169 $(echo-cmd) $(cmd_$(1)); \
161 echo 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd) 170 echo 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd)
162 171
163# execute the command and also postprocess generated .d dependencies 172# Execute the command and also postprocess generated .d dependencies file.
164# file 173#
165if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \ 174if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \
166 @set -e; \ 175 @set -e; \
167 $(echo-cmd) $(cmd_$(1)); \ 176 $(echo-cmd) $(cmd_$(1)); \
@@ -169,9 +178,10 @@ if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \
169 rm -f $(depfile); \ 178 rm -f $(depfile); \
170 mv -f $(dot-target).tmp $(dot-target).cmd) 179 mv -f $(dot-target).tmp $(dot-target).cmd)
171 180
181# Will check if $(cmd_foo) changed, or any of the prerequisites changed,
182# and if so will execute $(rule_foo).
172# Usage: $(call if_changed_rule,foo) 183# Usage: $(call if_changed_rule,foo)
173# will check if $(cmd_foo) changed, or any of the prequisites changed, 184#
174# and if so will execute $(rule_foo)
175if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \ 185if_changed_rule = $(if $(strip $(any-prereq) $(arg-check) ), \
176 @set -e; \ 186 @set -e; \
177 $(rule_$(1))) 187 $(rule_$(1)))
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 4c723fd18648..43f75d6e4d96 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -1,6 +1,6 @@
1#!/bin/bash 1#!/bin/bash
2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org> 2# Copyright (C) Martin Schlemmer <azarah@nosferatu.za.org>
3# Copyright (c) 2006 Sam Ravnborg <sam@ravnborg.org> 3# Copyright (C) 2006 Sam Ravnborg <sam@ravnborg.org>
4# 4#
5# Released under the terms of the GNU GPL 5# Released under the terms of the GNU GPL
6# 6#
@@ -17,15 +17,15 @@ cat << EOF
17Usage: 17Usage:
18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ... 18$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
19 -o <file> Create gzipped initramfs file named <file> using 19 -o <file> Create gzipped initramfs file named <file> using
20 gen_init_cpio and gzip 20 gen_init_cpio and gzip
21 -u <uid> User ID to map to user ID 0 (root). 21 -u <uid> User ID to map to user ID 0 (root).
22 <uid> is only meaningful if <cpio_source> 22 <uid> is only meaningful if <cpio_source>
23 is a directory. 23 is a directory.
24 -g <gid> Group ID to map to group ID 0 (root). 24 -g <gid> Group ID to map to group ID 0 (root).
25 <gid> is only meaningful if <cpio_source> 25 <gid> is only meaningful if <cpio_source>
26 is a directory. 26 is a directory.
27 <cpio_source> File list or directory for cpio archive. 27 <cpio_source> File list or directory for cpio archive.
28 If <cpio_source> is a .cpio file it will be used 28 If <cpio_source> is a .cpio file it will be used
29 as direct input to initramfs. 29 as direct input to initramfs.
30 -d Output the default cpio list. 30 -d Output the default cpio list.
31 31
@@ -36,6 +36,12 @@ to reset the root/group mapping.
36EOF 36EOF
37} 37}
38 38
39# awk style field access
40# $1 - field number; rest is argument string
41field() {
42 shift $1 ; echo $1
43}
44
39list_default_initramfs() { 45list_default_initramfs() {
40 # echo usr/kinit/kinit 46 # echo usr/kinit/kinit
41 : 47 :
@@ -119,22 +125,17 @@ parse() {
119 str="${ftype} ${name} ${location} ${str}" 125 str="${ftype} ${name} ${location} ${str}"
120 ;; 126 ;;
121 "nod") 127 "nod")
122 local dev_type= 128 local dev=`LC_ALL=C ls -l "${location}"`
123 local maj=$(LC_ALL=C ls -l "${location}" | \ 129 local maj=`field 5 ${dev}`
124 gawk '{sub(/,/, "", $5); print $5}') 130 local min=`field 6 ${dev}`
125 local min=$(LC_ALL=C ls -l "${location}" | \ 131 maj=${maj%,}
126 gawk '{print $6}') 132
127 133 [ -b "${location}" ] && dev="b" || dev="c"
128 if [ -b "${location}" ]; then 134
129 dev_type="b" 135 str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
130 else
131 dev_type="c"
132 fi
133 str="${ftype} ${name} ${str} ${dev_type} ${maj} ${min}"
134 ;; 136 ;;
135 "slink") 137 "slink")
136 local target=$(LC_ALL=C ls -l "${location}" | \ 138 local target=`field 11 $(LC_ALL=C ls -l "${location}")`
137 gawk '{print $11}')
138 str="${ftype} ${name} ${target} ${str}" 139 str="${ftype} ${name} ${target} ${str}"
139 ;; 140 ;;
140 *) 141 *)
diff --git a/scripts/makelst b/scripts/makelst
index 34bd72391238..4fc80f2b7e19 100755
--- a/scripts/makelst
+++ b/scripts/makelst
@@ -1,31 +1,31 @@
1#!/bin/bash 1#!/bin/sh
2# A script to dump mixed source code & assembly 2# A script to dump mixed source code & assembly
3# with correct relocations from System.map 3# with correct relocations from System.map
4# Requires the following lines in Rules.make. 4# Requires the following lines in makefile:
5# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
6# William Stearns <wstearns@pobox.com>
7#%.lst: %.c 5#%.lst: %.c
8# $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$@) -g -c -o $*.o $< 6# $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$@) -g -c -o $*.o $<
9# $(TOPDIR)/scripts/makelst $*.o $(TOPDIR)/System.map $(OBJDUMP) 7# $(srctree)/scripts/makelst $*.o $(objtree)/System.map $(OBJDUMP)
10# 8#
11# Copyright (C) 2000 IBM Corporation 9# Copyright (C) 2000 IBM Corporation
12# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 10# Author(s): DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
11# William Stearns <wstearns@pobox.com>
13# 12#
14 13
15t1=`$3 --syms $1 | grep .text | grep " F " | head -n 1` 14# awk style field access
15field() {
16 shift $1 ; echo $1
17}
18
19t1=`$3 --syms $1 | grep .text | grep -m1 " F "`
16if [ -n "$t1" ]; then 20if [ -n "$t1" ]; then
17 t2=`echo $t1 | gawk '{ print $6 }'` 21 t2=`field 6 $t1`
18 if [ ! -r $2 ]; then 22 if [ ! -r $2 ]; then
19 echo "No System.map" >&2 23 echo "No System.map" >&2
20 t7=0
21 else 24 else
22 t3=`grep $t2 $2` 25 t3=`grep $t2 $2`
23 t4=`echo $t3 | gawk '{ print $1 }'` 26 t4=`field 1 $t3`
24 t5=`echo $t1 | gawk '{ print $1 }'` 27 t5=`field 1 $t1`
25 t6=`echo $t4 - $t5 | tr a-f A-F` 28 t6=`printf "%lu" $((0x$t4 - 0x$t5))`
26 t7=`( echo ibase=16 ; echo $t6 ) | bc`
27 fi 29 fi
28else
29 t7=0
30fi 30fi
31$3 -r --source --adjust-vma=$t7 $1 31$3 -r --source --adjust-vma=${t6:-0} $1
diff --git a/security/keys/key.c b/security/keys/key.c
index ac9326c5f1da..700400d801dc 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -188,6 +188,7 @@ static inline void key_alloc_serial(struct key *key)
188 188
189 spin_lock(&key_serial_lock); 189 spin_lock(&key_serial_lock);
190 190
191attempt_insertion:
191 parent = NULL; 192 parent = NULL;
192 p = &key_serial_tree.rb_node; 193 p = &key_serial_tree.rb_node;
193 194
@@ -202,39 +203,33 @@ static inline void key_alloc_serial(struct key *key)
202 else 203 else
203 goto serial_exists; 204 goto serial_exists;
204 } 205 }
205 goto insert_here; 206
207 /* we've found a suitable hole - arrange for this key to occupy it */
208 rb_link_node(&key->serial_node, parent, p);
209 rb_insert_color(&key->serial_node, &key_serial_tree);
210
211 spin_unlock(&key_serial_lock);
212 return;
206 213
207 /* we found a key with the proposed serial number - walk the tree from 214 /* we found a key with the proposed serial number - walk the tree from
208 * that point looking for the next unused serial number */ 215 * that point looking for the next unused serial number */
209serial_exists: 216serial_exists:
210 for (;;) { 217 for (;;) {
211 key->serial++; 218 key->serial++;
212 if (key->serial < 2) 219 if (key->serial < 3) {
213 key->serial = 2; 220 key->serial = 3;
214 221 goto attempt_insertion;
215 if (!rb_parent(parent)) 222 }
216 p = &key_serial_tree.rb_node;
217 else if (rb_parent(parent)->rb_left == parent)
218 p = &(rb_parent(parent)->rb_left);
219 else
220 p = &(rb_parent(parent)->rb_right);
221 223
222 parent = rb_next(parent); 224 parent = rb_next(parent);
223 if (!parent) 225 if (!parent)
224 break; 226 goto attempt_insertion;
225 227
226 xkey = rb_entry(parent, struct key, serial_node); 228 xkey = rb_entry(parent, struct key, serial_node);
227 if (key->serial < xkey->serial) 229 if (key->serial < xkey->serial)
228 goto insert_here; 230 goto attempt_insertion;
229 } 231 }
230 232
231 /* we've found a suitable hole - arrange for this key to occupy it */
232insert_here:
233 rb_link_node(&key->serial_node, parent, p);
234 rb_insert_color(&key->serial_node, &key_serial_tree);
235
236 spin_unlock(&key_serial_lock);
237
238} /* end key_alloc_serial() */ 233} /* end key_alloc_serial() */
239 234
240/*****************************************************************************/ 235/*****************************************************************************/