aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/keys.txt18
-rw-r--r--Documentation/md.txt120
-rw-r--r--Documentation/power/interface.txt11
-rw-r--r--Documentation/power/swsusp.txt5
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/alpha/Kconfig13
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c1
-rw-r--r--arch/alpha/kernel/irq.c630
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c6
-rw-r--r--arch/frv/kernel/Makefile1
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/futex.c242
-rw-r--r--arch/frv/kernel/signal.c155
-rw-r--r--arch/i386/Kconfig8
-rw-r--r--arch/i386/Kconfig.cpu14
-rw-r--r--arch/i386/Kconfig.debug10
-rw-r--r--arch/i386/kernel/apic.c2
-rw-r--r--arch/i386/kernel/apm.c97
-rw-r--r--arch/i386/kernel/cpu/amd.c7
-rw-r--r--arch/i386/kernel/cpu/common.c8
-rw-r--r--arch/i386/kernel/cpu/cyrix.c27
-rw-r--r--arch/i386/kernel/cpuid.c3
-rw-r--r--arch/i386/kernel/entry.S1
-rw-r--r--arch/i386/kernel/head.S27
-rw-r--r--arch/i386/kernel/i386_ksyms.c3
-rw-r--r--arch/i386/kernel/io_apic.c4
-rw-r--r--arch/i386/kernel/mpparse.c26
-rw-r--r--arch/i386/kernel/msr.c3
-rw-r--r--arch/i386/kernel/process.c16
-rw-r--r--arch/i386/kernel/reboot.c6
-rw-r--r--arch/i386/kernel/setup.c8
-rw-r--r--arch/i386/kernel/smpboot.c6
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c2
-rw-r--r--arch/i386/kernel/traps.c33
-rw-r--r--arch/i386/mm/init.c24
-rw-r--r--arch/i386/mm/pageattr.c27
-rw-r--r--arch/i386/pci/irq.c2
-rw-r--r--arch/m32r/Kconfig26
-rw-r--r--arch/m32r/boot/compressed/head.S5
-rw-r--r--arch/m32r/boot/setup.S24
-rw-r--r--arch/m32r/kernel/Makefile1
-rw-r--r--arch/m32r/kernel/entry.S19
-rw-r--r--arch/m32r/kernel/io_m32104ut.c298
-rw-r--r--arch/m32r/kernel/io_m32700ut.c24
-rw-r--r--arch/m32r/kernel/io_mappi.c2
-rw-r--r--arch/m32r/kernel/io_mappi2.c24
-rw-r--r--arch/m32r/kernel/io_mappi3.c51
-rw-r--r--arch/m32r/kernel/io_oaks32r.c2
-rw-r--r--arch/m32r/kernel/io_opsput.c6
-rw-r--r--arch/m32r/kernel/setup.c7
-rw-r--r--arch/m32r/kernel/setup_m32104ut.c156
-rw-r--r--arch/m32r/kernel/setup_m32700ut.c8
-rw-r--r--arch/m32r/kernel/setup_mappi.c6
-rw-r--r--arch/m32r/kernel/setup_mappi2.c6
-rw-r--r--arch/m32r/kernel/setup_mappi3.c6
-rw-r--r--arch/m32r/kernel/setup_oaks32r.c6
-rw-r--r--arch/m32r/kernel/setup_opsput.c8
-rw-r--r--arch/m32r/kernel/setup_usrv.c6
-rw-r--r--arch/m32r/kernel/time.c4
-rw-r--r--arch/m32r/m32104ut/defconfig.m32104ut657
-rw-r--r--arch/m32r/mm/cache.c36
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c2
-rw-r--r--arch/m68knommu/kernel/setup.c2
-rw-r--r--arch/ppc/boot/simple/Makefile2
-rw-r--r--arch/ppc/kernel/idle.c4
-rw-r--r--arch/ppc/platforms/4xx/ibm440gx.c2
-rw-r--r--arch/ppc/platforms/4xx/ibm440sp.c1
-rw-r--r--arch/ppc/platforms/lite5200.c2
-rw-r--r--arch/ppc/platforms/mpc5200.c53
-rw-r--r--arch/ppc/syslib/mpc52xx_pci.c95
-rw-r--r--arch/ppc/syslib/mpc52xx_setup.c6
-rw-r--r--arch/s390/Kconfig34
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/appldata/appldata_base.c8
-rw-r--r--arch/s390/appldata/appldata_os.c14
-rw-r--r--arch/s390/crypto/Makefile8
-rw-r--r--arch/s390/crypto/aes_s390.c248
-rw-r--r--arch/s390/crypto/crypt_s390.h (renamed from arch/s390/crypto/crypt_z990.h)267
-rw-r--r--arch/s390/crypto/crypt_s390_query.c129
-rw-r--r--arch/s390/crypto/crypt_z990_query.c111
-rw-r--r--arch/s390/crypto/des_s390.c (renamed from arch/s390/crypto/des_z990.c)54
-rw-r--r--arch/s390/crypto/sha1_s390.c (renamed from arch/s390/crypto/sha1_z990.c)32
-rw-r--r--arch/s390/crypto/sha256_s390.c151
-rw-r--r--arch/s390/defconfig65
-rw-r--r--arch/s390/kernel/Makefile15
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/cpcmd.c16
-rw-r--r--arch/s390/kernel/entry64.S18
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/process.c12
-rw-r--r--arch/s390/kernel/ptrace.c24
-rw-r--r--arch/s390/kernel/reipl_diag.c2
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/signal.c8
-rw-r--r--arch/s390/kernel/smp.c14
-rw-r--r--arch/s390/kernel/sys_s390.c12
-rw-r--r--arch/s390/kernel/traps.c10
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/lib/Makefile5
-rw-r--r--arch/s390/lib/spinlock.c2
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/fault.c18
-rw-r--r--arch/s390/mm/init.c8
-rw-r--r--arch/s390/mm/mmap.c2
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/backtrace.c79
-rw-r--r--arch/s390/oprofile/init.c4
-rw-r--r--arch/um/drivers/chan_kern.c273
-rw-r--r--arch/um/drivers/line.c298
-rw-r--r--arch/um/drivers/mconsole_kern.c232
-rw-r--r--arch/um/drivers/mconsole_user.c12
-rw-r--r--arch/um/drivers/net_kern.c8
-rw-r--r--arch/um/drivers/ssl.c47
-rw-r--r--arch/um/drivers/stdio_console.c33
-rw-r--r--arch/um/drivers/ubd_kern.c15
-rw-r--r--arch/um/include/chan_kern.h25
-rw-r--r--arch/um/include/choose-mode.h3
-rw-r--r--arch/um/include/irq_user.h13
-rw-r--r--arch/um/include/kern.h13
-rw-r--r--arch/um/include/line.h37
-rw-r--r--arch/um/include/mconsole.h8
-rw-r--r--arch/um/include/os.h17
-rw-r--r--arch/um/include/user_util.h1
-rw-r--r--arch/um/kernel/Makefile6
-rw-r--r--arch/um/kernel/irq_user.c48
-rw-r--r--arch/um/kernel/process_kern.c4
-rw-r--r--arch/um/kernel/sigio_user.c2
-rw-r--r--arch/um/kernel/um_arch.c4
-rw-r--r--arch/um/kernel/umid.c323
-rw-r--r--arch/um/os-Linux/Makefile4
-rw-r--r--arch/um/os-Linux/aio.c467
-rw-r--r--arch/um/os-Linux/umid.c335
-rw-r--r--arch/x86_64/Kconfig.debug10
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/kernel/process.c5
-rw-r--r--arch/x86_64/kernel/syscall.c2
-rw-r--r--arch/x86_64/mm/init.c23
-rw-r--r--arch/x86_64/mm/pageattr.c9
-rw-r--r--block/Kconfig2
-rw-r--r--crypto/Kconfig43
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/tcrypt.h64
-rw-r--r--drivers/base/memory.c7
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/nbd.c122
-rw-r--r--drivers/block/paride/Kconfig5
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hw_random.c70
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/watchdog/Kconfig2
-rw-r--r--drivers/ieee1394/ieee1394_core.c4
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c39
-rw-r--r--drivers/macintosh/therm_pm72.c7
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c7
-rw-r--r--drivers/md/bitmap.c114
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-io.h3
-rw-r--r--drivers/md/dm-ioctl.c21
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-raid1.c13
-rw-r--r--drivers/md/dm-snap.c25
-rw-r--r--drivers/md/dm.c95
-rw-r--r--drivers/md/dm.h5
-rw-r--r--drivers/md/faulty.c9
-rw-r--r--drivers/md/kcopyd.c3
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c893
-rw-r--r--drivers/md/multipath.c22
-rw-r--r--drivers/md/raid0.c26
-rw-r--r--drivers/md/raid1.c726
-rw-r--r--drivers/md/raid10.c544
-rw-r--r--drivers/md/raid5.c174
-rw-r--r--drivers/md/raid6main.c348
-rw-r--r--drivers/media/video/cpia_pp.c30
-rw-r--r--drivers/message/i2o/Kconfig12
-rw-r--r--drivers/message/i2o/bus-osm.c23
-rw-r--r--drivers/message/i2o/config-osm.c2
-rw-r--r--drivers/message/i2o/core.h20
-rw-r--r--drivers/message/i2o/device.c339
-rw-r--r--drivers/message/i2o/driver.c12
-rw-r--r--drivers/message/i2o/exec-osm.c114
-rw-r--r--drivers/message/i2o/i2o_block.c188
-rw-r--r--drivers/message/i2o/i2o_config.c196
-rw-r--r--drivers/message/i2o/i2o_lan.h38
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c89
-rw-r--r--drivers/message/i2o/iop.c356
-rw-r--r--drivers/message/i2o/pci.c7
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/plip.c2
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/daisy.c51
-rw-r--r--drivers/parport/ieee1284_ops.c62
-rw-r--r--drivers/parport/parport_pc.c30
-rw-r--r--drivers/parport/probe.c199
-rw-r--r--drivers/parport/share.c1
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c45
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/Kconfig8
-rw-r--r--drivers/s390/block/dasd.c32
-rw-r--r--drivers/s390/block/dasd_diag.c11
-rw-r--r--drivers/s390/block/dasd_diag.h31
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c5
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/tape_block.c2
-rw-r--r--drivers/s390/char/vmwatchdog.c2
-rw-r--r--drivers/s390/cio/blacklist.c234
-rw-r--r--drivers/s390/cio/blacklist.h2
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c473
-rw-r--r--drivers/s390/cio/chsc.h13
-rw-r--r--drivers/s390/cio/cio.c168
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/cmf.c8
-rw-r--r--drivers/s390/cio/css.c297
-rw-r--r--drivers/s390/cio/css.h43
-rw-r--r--drivers/s390/cio/device.c47
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c29
-rw-r--r--drivers/s390/cio/device_id.c26
-rw-r--r--drivers/s390/cio/device_ops.c4
-rw-r--r--drivers/s390/cio/device_pgid.c56
-rw-r--r--drivers/s390/cio/device_status.c14
-rw-r--r--drivers/s390/cio/ioasm.h86
-rw-r--r--drivers/s390/cio/qdio.c713
-rw-r--r--drivers/s390/cio/qdio.h144
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/crypto/z90common.h9
-rw-r--r--drivers/s390/crypto/z90crypt.h13
-rw-r--r--drivers/s390/crypto/z90hardware.c309
-rw-r--r--drivers/s390/crypto/z90main.c111
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/claw.c6
-rw-r--r--drivers/s390/net/cu3088.c3
-rw-r--r--drivers/s390/net/iucv.c10
-rw-r--r--drivers/s390/net/qeth_main.c21
-rw-r--r--drivers/s390/s390_rdev.c53
-rw-r--r--drivers/s390/s390mach.c66
-rw-r--r--drivers/s390/sysinfo.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/ata_piix.c4
-rw-r--r--drivers/scsi/libata-core.c114
-rw-r--r--drivers/scsi/libata-scsi.c16
-rw-r--r--drivers/scsi/scsi_sysfs.c31
-rw-r--r--drivers/serial/mpc52xx_uart.c28
-rw-r--r--fs/exec.c2
-rw-r--r--fs/fuse/dev.c72
-rw-r--r--fs/fuse/dir.c278
-rw-r--r--fs/fuse/file.c49
-rw-r--r--fs/fuse/fuse_i.h12
-rw-r--r--fs/fuse/inode.c14
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/jbd/checkpoint.c418
-rw-r--r--fs/nfsd/nfs3proc.c11
-rw-r--r--fs/nfsd/nfs3xdr.c47
-rw-r--r--fs/nfsd/nfsxdr.c48
-rw-r--r--fs/nfsd/vfs.c40
-rw-r--r--fs/partitions/Kconfig2
-rw-r--r--fs/partitions/ibm.c30
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/ramfs/Makefile4
-rw-r--r--fs/ramfs/file-mmu.c57
-rw-r--r--fs/ramfs/file-nommu.c292
-rw-r--r--fs/ramfs/inode.c22
-rw-r--r--fs/ramfs/internal.h15
-rw-r--r--include/asm-alpha/atomic.h1
-rw-r--r--include/asm-alpha/dma-mapping.h2
-rw-r--r--include/asm-alpha/hardirq.h2
-rw-r--r--include/asm-alpha/mman.h1
-rw-r--r--include/asm-arm/atomic.h1
-rw-r--r--include/asm-arm/mman.h1
-rw-r--r--include/asm-arm26/atomic.h1
-rw-r--r--include/asm-arm26/mman.h1
-rw-r--r--include/asm-cris/atomic.h1
-rw-r--r--include/asm-cris/mman.h1
-rw-r--r--include/asm-frv/atomic.h1
-rw-r--r--include/asm-frv/futex.h42
-rw-r--r--include/asm-frv/mman.h1
-rw-r--r--include/asm-frv/signal.h1
-rw-r--r--include/asm-generic/atomic.h116
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/asm-h8300/atomic.h1
-rw-r--r--include/asm-h8300/irq.h5
-rw-r--r--include/asm-h8300/mman.h1
-rw-r--r--include/asm-i386/atomic.h1
-rw-r--r--include/asm-i386/bitops.h26
-rw-r--r--include/asm-i386/bugs.h23
-rw-r--r--include/asm-i386/cacheflush.h4
-rw-r--r--include/asm-i386/desc.h8
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apic.h79
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apicdef.h4
-rw-r--r--include/asm-i386/mman.h1
-rw-r--r--include/asm-i386/mmzone.h5
-rw-r--r--include/asm-i386/module.h4
-rw-r--r--include/asm-i386/mpspec_def.h2
-rw-r--r--include/asm-i386/segment.h14
-rw-r--r--include/asm-i386/system.h31
-rw-r--r--include/asm-i386/unistd.h2
-rw-r--r--include/asm-ia64/atomic.h1
-rw-r--r--include/asm-ia64/mman.h1
-rw-r--r--include/asm-ia64/unistd.h2
-rw-r--r--include/asm-m32r/assembler.h10
-rw-r--r--include/asm-m32r/atomic.h1
-rw-r--r--include/asm-m32r/cacheflush.h2
-rw-r--r--include/asm-m32r/irq.h16
-rw-r--r--include/asm-m32r/m32102.h38
-rw-r--r--include/asm-m32r/m32104ut/m32104ut_pld.h163
-rw-r--r--include/asm-m32r/m32r.h8
-rw-r--r--include/asm-m32r/mman.h1
-rw-r--r--include/asm-m32r/system.h12
-rw-r--r--include/asm-m32r/unistd.h12
-rw-r--r--include/asm-m68k/atomic.h1
-rw-r--r--include/asm-m68k/irq.h2
-rw-r--r--include/asm-m68k/mman.h1
-rw-r--r--include/asm-m68knommu/atomic.h1
-rw-r--r--include/asm-m68knommu/irq.h6
-rw-r--r--include/asm-mips/atomic.h1
-rw-r--r--include/asm-mips/mman.h1
-rw-r--r--include/asm-mips/riscos-syscall.h979
-rw-r--r--include/asm-parisc/atomic.h1
-rw-r--r--include/asm-parisc/mman.h1
-rw-r--r--include/asm-powerpc/atomic.h1
-rw-r--r--include/asm-powerpc/mman.h1
-rw-r--r--include/asm-ppc/ibm_ocp.h1
-rw-r--r--include/asm-ppc/io.h2
-rw-r--r--include/asm-ppc/mpc52xx.h13
-rw-r--r--include/asm-s390/atomic.h174
-rw-r--r--include/asm-s390/ccwdev.h3
-rw-r--r--include/asm-s390/mman.h1
-rw-r--r--include/asm-s390/qdio.h8
-rw-r--r--include/asm-s390/s390_rdev.h15
-rw-r--r--include/asm-s390/uaccess.h14
-rw-r--r--include/asm-s390/unistd.h2
-rw-r--r--include/asm-s390/vtoc.h24
-rw-r--r--include/asm-sh/atomic.h1
-rw-r--r--include/asm-sh/mman.h1
-rw-r--r--include/asm-sh64/atomic.h1
-rw-r--r--include/asm-sparc/atomic.h1
-rw-r--r--include/asm-sparc/mman.h1
-rw-r--r--include/asm-sparc64/atomic.h1
-rw-r--r--include/asm-sparc64/mman.h1
-rw-r--r--include/asm-v850/atomic.h1
-rw-r--r--include/asm-v850/mman.h1
-rw-r--r--include/asm-x86_64/atomic.h1
-rw-r--r--include/asm-x86_64/cacheflush.h4
-rw-r--r--include/asm-x86_64/mman.h1
-rw-r--r--include/asm-x86_64/mpspec.h2
-rw-r--r--include/asm-x86_64/pgtable.h2
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/asm-xtensa/atomic.h1
-rw-r--r--include/asm-xtensa/mman.h1
-rw-r--r--include/keys/user-type.h1
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/bootmem.h46
-rw-r--r--include/linux/dm-ioctl.h11
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fuse.h24
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/i2o.h976
-rw-r--r--include/linux/irq.h13
-rw-r--r--include/linux/jbd.h8
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/libata.h8
-rw-r--r--include/linux/mempolicy.h38
-rw-r--r--include/linux/mm.h55
-rw-r--r--include/linux/mmzone.h44
-rw-r--r--include/linux/nbd.h8
-rw-r--r--include/linux/nfsd/xdr.h3
-rw-r--r--include/linux/nfsd/xdr3.h1
-rw-r--r--include/linux/page-flags.h91
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/parport_pc.h4
-rw-r--r--include/linux/pci_ids.h10
-rw-r--r--include/linux/raid/md.h4
-rw-r--r--include/linux/raid/md_k.h80
-rw-r--r--include/linux/raid/raid1.h14
-rw-r--r--include/linux/raid/raid10.h22
-rw-r--r--include/linux/raid/raid5.h7
-rw-r--r--include/linux/ramfs.h10
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/sched.h25
-rw-r--r--include/linux/suspend.h8
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--init/Kconfig3
-rw-r--r--init/do_mounts_md.c22
-rw-r--r--init/do_mounts_rd.c4
-rw-r--r--init/main.c5
-rw-r--r--ipc/shm.c18
-rw-r--r--kernel/acct.c16
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/power/disk.c92
-rw-r--r--kernel/power/power.h24
-rw-r--r--kernel/power/snapshot.c89
-rw-r--r--kernel/power/swsusp.c1020
-rw-r--r--kernel/sysctl.c6
-rw-r--r--lib/Kconfig.debug5
-rw-r--r--lib/swiotlb.c3
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/bootmem.c58
-rw-r--r--mm/filemap.c5
-rw-r--r--mm/hugetlb.c192
-rw-r--r--mm/internal.h21
-rw-r--r--mm/madvise.c35
-rw-r--r--mm/memory.c32
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/mempolicy.c102
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/page_alloc.c343
-rw-r--r--mm/rmap.c57
-rw-r--r--mm/shmem.c34
-rw-r--r--mm/swap.c27
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/swapfile.c20
-rw-r--r--mm/tiny-shmem.c29
-rw-r--r--mm/truncate.c44
-rw-r--r--mm/vmscan.c123
-rw-r--r--net/sunrpc/svcauth_unix.c14
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--security/keys/internal.h1
-rw-r--r--security/keys/key.c58
-rw-r--r--security/keys/keyring.c66
-rw-r--r--security/keys/user_defined.c33
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/avtab.c2
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--sound/oss/ad1848.c92
-rw-r--r--sound/oss/cs4281/cs4281m.c21
-rw-r--r--sound/oss/cs4281/cs4281pm-24.c39
-rw-r--r--sound/oss/cs46xx.c60
-rw-r--r--sound/oss/cs46xxpm-24.h4
-rw-r--r--sound/oss/maestro.c149
-rw-r--r--sound/oss/nm256_audio.c47
-rw-r--r--sound/oss/opl3sa2.c110
451 files changed, 13873 insertions, 9593 deletions
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 31154882000a..6304db59bfe4 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -860,24 +860,6 @@ The structure has a number of fields, some of which are mandatory:
860 It is safe to sleep in this method. 860 It is safe to sleep in this method.
861 861
862 862
863 (*) int (*duplicate)(struct key *key, const struct key *source);
864
865 If this type of key can be duplicated, then this method should be
866 provided. It is called to copy the payload attached to the source into the
867 new key. The data length on the new key will have been updated and the
868 quota adjusted already.
869
870 This method will be called with the source key's semaphore read-locked to
871 prevent its payload from being changed, thus RCU constraints need not be
872 applied to the source key.
873
874 This method does not have to lock the destination key in order to attach a
875 payload. The fact that KEY_FLAG_INSTANTIATED is not set in key->flags
876 prevents anything else from gaining access to the key.
877
878 It is safe to sleep in this method.
879
880
881 (*) int (*update)(struct key *key, const void *data, size_t datalen); 863 (*) int (*update)(struct key *key, const void *data, size_t datalen);
882 864
883 If this type of key can be updated, then this method should be provided. 865 If this type of key can be updated, then this method should be provided.
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 23e6cce40f9c..03a13c462cf2 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -51,6 +51,30 @@ superblock can be autodetected and run at boot time.
51The kernel parameter "raid=partitionable" (or "raid=part") means 51The kernel parameter "raid=partitionable" (or "raid=part") means
52that all auto-detected arrays are assembled as partitionable. 52that all auto-detected arrays are assembled as partitionable.
53 53
54Boot time assembly of degraded/dirty arrays
55-------------------------------------------
56
57If a raid5 or raid6 array is both dirty and degraded, it could have
58undetectable data corruption. This is because the fact that it is
59'dirty' means that the parity cannot be trusted, and the fact that it
60is degraded means that some datablocks are missing and cannot reliably
61be reconstructed (due to no parity).
62
63For this reason, md will normally refuse to start such an array. This
64requires the sysadmin to take action to explicitly start the array
65desipite possible corruption. This is normally done with
66 mdadm --assemble --force ....
67
68This option is not really available if the array has the root
69filesystem on it. In order to support this booting from such an
70array, md supports a module parameter "start_dirty_degraded" which,
71when set to 1, bypassed the checks and will allows dirty degraded
72arrays to be started.
73
74So, to boot with a root filesystem of a dirty degraded raid[56], use
75
76 md-mod.start_dirty_degraded=1
77
54 78
55Superblock formats 79Superblock formats
56------------------ 80------------------
@@ -141,6 +165,70 @@ All md devices contain:
141 in a fully functional array. If this is not yet known, the file 165 in a fully functional array. If this is not yet known, the file
142 will be empty. If an array is being resized (not currently 166 will be empty. If an array is being resized (not currently
143 possible) this will contain the larger of the old and new sizes. 167 possible) this will contain the larger of the old and new sizes.
168 Some raid level (RAID1) allow this value to be set while the
169 array is active. This will reconfigure the array. Otherwise
170 it can only be set while assembling an array.
171
172 chunk_size
173 This is the size if bytes for 'chunks' and is only relevant to
174 raid levels that involve striping (1,4,5,6,10). The address space
175 of the array is conceptually divided into chunks and consecutive
176 chunks are striped onto neighbouring devices.
177 The size should be atleast PAGE_SIZE (4k) and should be a power
178 of 2. This can only be set while assembling an array
179
180 component_size
181 For arrays with data redundancy (i.e. not raid0, linear, faulty,
182 multipath), all components must be the same size - or at least
183 there must a size that they all provide space for. This is a key
184 part or the geometry of the array. It is measured in sectors
185 and can be read from here. Writing to this value may resize
186 the array if the personality supports it (raid1, raid5, raid6),
187 and if the component drives are large enough.
188
189 metadata_version
190 This indicates the format that is being used to record metadata
191 about the array. It can be 0.90 (traditional format), 1.0, 1.1,
192 1.2 (newer format in varying locations) or "none" indicating that
193 the kernel isn't managing metadata at all.
194
195 level
196 The raid 'level' for this array. The name will often (but not
197 always) be the same as the name of the module that implements the
198 level. To be auto-loaded the module must have an alias
199 md-$LEVEL e.g. md-raid5
200 This can be written only while the array is being assembled, not
201 after it is started.
202
203 new_dev
204 This file can be written but not read. The value written should
205 be a block device number as major:minor. e.g. 8:0
206 This will cause that device to be attached to the array, if it is
207 available. It will then appear at md/dev-XXX (depending on the
208 name of the device) and further configuration is then possible.
209
210 sync_speed_min
211 sync_speed_max
212 This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
213 however they only apply to the particular array.
214 If no value has been written to these, of if the word 'system'
215 is written, then the system-wide value is used. If a value,
216 in kibibytes-per-second is written, then it is used.
217 When the files are read, they show the currently active value
218 followed by "(local)" or "(system)" depending on whether it is
219 a locally set or system-wide value.
220
221 sync_completed
222 This shows the number of sectors that have been completed of
223 whatever the current sync_action is, followed by the number of
224 sectors in total that could need to be processed. The two
225 numbers are separated by a '/' thus effectively showing one
226 value, a fraction of the process that is complete.
227
228 sync_speed
229 This shows the current actual speed, in K/sec, of the current
230 sync_action. It is averaged over the last 30 seconds.
231
144 232
145As component devices are added to an md array, they appear in the 'md' 233As component devices are added to an md array, they appear in the 'md'
146directory as new directories named 234directory as new directories named
@@ -167,6 +255,38 @@ Each directory contains:
167 of being recoverred to 255 of being recoverred to
168 This list make grow in future. 256 This list make grow in future.
169 257
258 errors
259 An approximate count of read errors that have been detected on
260 this device but have not caused the device to be evicted from
261 the array (either because they were corrected or because they
262 happened while the array was read-only). When using version-1
263 metadata, this value persists across restarts of the array.
264
265 This value can be written while assembling an array thus
266 providing an ongoing count for arrays with metadata managed by
267 userspace.
268
269 slot
270 This gives the role that the device has in the array. It will
271 either be 'none' if the device is not active in the array
272 (i.e. is a spare or has failed) or an integer less than the
273 'raid_disks' number for the array indicating which possition
274 it currently fills. This can only be set while assembling an
275 array. A device for which this is set is assumed to be working.
276
277 offset
278 This gives the location in the device (in sectors from the
279 start) where data from the array will be stored. Any part of
280 the device before this offset us not touched, unless it is
281 used for storing metadata (Formats 1.1 and 1.2).
282
283 size
284 The amount of the device, after the offset, that can be used
285 for storage of data. This will normally be the same as the
286 component_size. This can be written while assembling an
287 array. If a value less than the current component_size is
288 written, component_size will be reduced to this value.
289
170 290
171An active md device will also contain and entry for each active device 291An active md device will also contain and entry for each active device
172in the array. These are named 292in the array. These are named
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
index f5ebda5f4276..bd4ffb5bd49a 100644
--- a/Documentation/power/interface.txt
+++ b/Documentation/power/interface.txt
@@ -41,3 +41,14 @@ to. Writing to this file will accept one of
41It will only change to 'firmware' or 'platform' if the system supports 41It will only change to 'firmware' or 'platform' if the system supports
42it. 42it.
43 43
44/sys/power/image_size controls the size of the image created by
45the suspend-to-disk mechanism. It can be written a string
46representing a non-negative integer that will be used as an upper
47limit of the image size, in megabytes. The suspend-to-disk mechanism will
48do its best to ensure the image size will not exceed that number. However,
49if this turns out to be impossible, it will try to suspend anyway using the
50smallest image possible. In particular, if "0" is written to this file, the
51suspend image will be as small as possible.
52
53Reading from this file will display the current image size limit, which
54is set to 500 MB by default.
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index b0d50840788e..cd0fcd89a6f0 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -27,6 +27,11 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state
27 27
28echo platform > /sys/power/disk; echo disk > /sys/power/state 28echo platform > /sys/power/disk; echo disk > /sys/power/state
29 29
30If you want to limit the suspend image size to N megabytes, do
31
32echo N > /sys/power/image_size
33
34before suspend (it is limited to 500 MB by default).
30 35
31Encrypted suspend image: 36Encrypted suspend image:
32------------------------ 37------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index e9db0d6b928a..cb536bbed9ff 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -258,6 +258,13 @@ P: Ivan Kokshaysky
258M: ink@jurassic.park.msu.ru 258M: ink@jurassic.park.msu.ru
259S: Maintained for 2.4; PCI support for 2.6. 259S: Maintained for 2.4; PCI support for 2.6.
260 260
261AMD GEODE PROCESSOR/CHIPSET SUPPORT
262P: Jordan Crouse
263M: info-linux@geode.amd.com
264L: info-linux@geode.amd.com
265W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
266S: Supported
267
261APM DRIVER 268APM DRIVER
262P: Stephen Rothwell 269P: Stephen Rothwell
263M: sfr@canb.auug.org.au 270M: sfr@canb.auug.org.au
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 786491f9ceb2..153337ff1d7b 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -40,6 +40,19 @@ config GENERIC_IOMAP
40 bool 40 bool
41 default n 41 default n
42 42
43config GENERIC_HARDIRQS
44 bool
45 default y
46
47config GENERIC_IRQ_PROBE
48 bool
49 default y
50
51config AUTO_IRQ_AFFINITY
52 bool
53 depends on SMP
54 default y
55
43source "init/Kconfig" 56source "init/Kconfig"
44 57
45 58
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 24ae9a366073..f3e98f837784 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -175,7 +175,6 @@ EXPORT_SYMBOL(up);
175 */ 175 */
176 176
177#ifdef CONFIG_SMP 177#ifdef CONFIG_SMP
178EXPORT_SYMBOL(synchronize_irq);
179EXPORT_SYMBOL(flush_tlb_mm); 178EXPORT_SYMBOL(flush_tlb_mm);
180EXPORT_SYMBOL(flush_tlb_range); 179EXPORT_SYMBOL(flush_tlb_range);
181EXPORT_SYMBOL(flush_tlb_page); 180EXPORT_SYMBOL(flush_tlb_page);
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index b6114f5c0d2b..76be5cf0de13 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -32,214 +32,25 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34 34
35/*
36 * Controller mappings for all interrupt sources:
37 */
38irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
39 [0 ... NR_IRQS-1] = {
40 .handler = &no_irq_type,
41 .lock = SPIN_LOCK_UNLOCKED
42 }
43};
44
45static void register_irq_proc(unsigned int irq);
46
47volatile unsigned long irq_err_count; 35volatile unsigned long irq_err_count;
48 36
49/* 37void ack_bad_irq(unsigned int irq)
50 * Special irq handlers.
51 */
52
53irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
54{
55 return IRQ_NONE;
56}
57
58/*
59 * Generic no controller code
60 */
61
62static void no_irq_enable_disable(unsigned int irq) { }
63static unsigned int no_irq_startup(unsigned int irq) { return 0; }
64
65static void
66no_irq_ack(unsigned int irq)
67{ 38{
68 irq_err_count++; 39 irq_err_count++;
69 printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); 40 printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
70} 41}
71 42
72struct hw_interrupt_type no_irq_type = {
73 .typename = "none",
74 .startup = no_irq_startup,
75 .shutdown = no_irq_enable_disable,
76 .enable = no_irq_enable_disable,
77 .disable = no_irq_enable_disable,
78 .ack = no_irq_ack,
79 .end = no_irq_enable_disable,
80};
81
82int
83handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
84 struct irqaction *action)
85{
86 int status = 1; /* Force the "do bottom halves" bit */
87 int ret;
88
89 do {
90 if (!(action->flags & SA_INTERRUPT))
91 local_irq_enable();
92 else
93 local_irq_disable();
94
95 ret = action->handler(irq, action->dev_id, regs);
96 if (ret == IRQ_HANDLED)
97 status |= action->flags;
98 action = action->next;
99 } while (action);
100 if (status & SA_SAMPLE_RANDOM)
101 add_interrupt_randomness(irq);
102 local_irq_disable();
103
104 return status;
105}
106
107/*
108 * Generic enable/disable code: this just calls
109 * down into the PIC-specific version for the actual
110 * hardware disable after having gotten the irq
111 * controller lock.
112 */
113void inline
114disable_irq_nosync(unsigned int irq)
115{
116 irq_desc_t *desc = irq_desc + irq;
117 unsigned long flags;
118
119 spin_lock_irqsave(&desc->lock, flags);
120 if (!desc->depth++) {
121 desc->status |= IRQ_DISABLED;
122 desc->handler->disable(irq);
123 }
124 spin_unlock_irqrestore(&desc->lock, flags);
125}
126
127/*
128 * Synchronous version of the above, making sure the IRQ is
129 * no longer running on any other IRQ..
130 */
131void
132disable_irq(unsigned int irq)
133{
134 disable_irq_nosync(irq);
135 synchronize_irq(irq);
136}
137
138void
139enable_irq(unsigned int irq)
140{
141 irq_desc_t *desc = irq_desc + irq;
142 unsigned long flags;
143
144 spin_lock_irqsave(&desc->lock, flags);
145 switch (desc->depth) {
146 case 1: {
147 unsigned int status = desc->status & ~IRQ_DISABLED;
148 desc->status = status;
149 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
150 desc->status = status | IRQ_REPLAY;
151 hw_resend_irq(desc->handler,irq);
152 }
153 desc->handler->enable(irq);
154 /* fall-through */
155 }
156 default:
157 desc->depth--;
158 break;
159 case 0:
160 printk(KERN_ERR "enable_irq() unbalanced from %p\n",
161 __builtin_return_address(0));
162 }
163 spin_unlock_irqrestore(&desc->lock, flags);
164}
165
166int
167setup_irq(unsigned int irq, struct irqaction * new)
168{
169 int shared = 0;
170 struct irqaction *old, **p;
171 unsigned long flags;
172 irq_desc_t *desc = irq_desc + irq;
173
174 if (desc->handler == &no_irq_type)
175 return -ENOSYS;
176
177 /*
178 * Some drivers like serial.c use request_irq() heavily,
179 * so we have to be careful not to interfere with a
180 * running system.
181 */
182 if (new->flags & SA_SAMPLE_RANDOM) {
183 /*
184 * This function might sleep, we want to call it first,
185 * outside of the atomic block.
186 * Yes, this might clear the entropy pool if the wrong
187 * driver is attempted to be loaded, without actually
188 * installing a new handler, but is this really a problem,
189 * only the sysadmin is able to do this.
190 */
191 rand_initialize_irq(irq);
192 }
193
194 /*
195 * The following block of code has to be executed atomically
196 */
197 spin_lock_irqsave(&desc->lock,flags);
198 p = &desc->action;
199 if ((old = *p) != NULL) {
200 /* Can't share interrupts unless both agree to */
201 if (!(old->flags & new->flags & SA_SHIRQ)) {
202 spin_unlock_irqrestore(&desc->lock,flags);
203 return -EBUSY;
204 }
205
206 /* add new interrupt at end of irq queue */
207 do {
208 p = &old->next;
209 old = *p;
210 } while (old);
211 shared = 1;
212 }
213
214 *p = new;
215
216 if (!shared) {
217 desc->depth = 0;
218 desc->status &=
219 ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS);
220 desc->handler->startup(irq);
221 }
222 spin_unlock_irqrestore(&desc->lock,flags);
223
224 return 0;
225}
226
227static struct proc_dir_entry * root_irq_dir;
228static struct proc_dir_entry * irq_dir[NR_IRQS];
229
230#ifdef CONFIG_SMP 43#ifdef CONFIG_SMP
231static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
232static char irq_user_affinity[NR_IRQS]; 44static char irq_user_affinity[NR_IRQS];
233static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
234 45
235static void 46int
236select_smp_affinity(int irq) 47select_smp_affinity(unsigned int irq)
237{ 48{
238 static int last_cpu; 49 static int last_cpu;
239 int cpu = last_cpu + 1; 50 int cpu = last_cpu + 1;
240 51
241 if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq]) 52 if (!irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
242 return; 53 return 1;
243 54
244 while (!cpu_possible(cpu)) 55 while (!cpu_possible(cpu))
245 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
@@ -247,208 +58,10 @@ select_smp_affinity(int irq)
247 58
248 irq_affinity[irq] = cpumask_of_cpu(cpu); 59 irq_affinity[irq] = cpumask_of_cpu(cpu);
249 irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu)); 60 irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu));
61 return 0;
250} 62}
251
252static int
253irq_affinity_read_proc (char *page, char **start, off_t off,
254 int count, int *eof, void *data)
255{
256 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
257 if (count - len < 2)
258 return -EINVAL;
259 len += sprintf(page + len, "\n");
260 return len;
261}
262
263static int
264irq_affinity_write_proc(struct file *file, const char __user *buffer,
265 unsigned long count, void *data)
266{
267 int irq = (long) data, full_count = count, err;
268 cpumask_t new_value;
269
270 if (!irq_desc[irq].handler->set_affinity)
271 return -EIO;
272
273 err = cpumask_parse(buffer, count, new_value);
274
275 /* The special value 0 means release control of the
276 affinity to kernel. */
277 cpus_and(new_value, new_value, cpu_online_map);
278 if (cpus_empty(new_value)) {
279 irq_user_affinity[irq] = 0;
280 select_smp_affinity(irq);
281 }
282 /* Do not allow disabling IRQs completely - it's a too easy
283 way to make the system unusable accidentally :-) At least
284 one online CPU still has to be targeted. */
285 else {
286 irq_affinity[irq] = new_value;
287 irq_user_affinity[irq] = 1;
288 irq_desc[irq].handler->set_affinity(irq, new_value);
289 }
290
291 return full_count;
292}
293
294#endif /* CONFIG_SMP */ 63#endif /* CONFIG_SMP */
295 64
296#define MAX_NAMELEN 10
297
298static void
299register_irq_proc (unsigned int irq)
300{
301 char name [MAX_NAMELEN];
302
303 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
304 irq_dir[irq])
305 return;
306
307 memset(name, 0, MAX_NAMELEN);
308 sprintf(name, "%d", irq);
309
310 /* create /proc/irq/1234 */
311 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
312
313#ifdef CONFIG_SMP
314 if (irq_desc[irq].handler->set_affinity) {
315 struct proc_dir_entry *entry;
316 /* create /proc/irq/1234/smp_affinity */
317 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
318
319 if (entry) {
320 entry->nlink = 1;
321 entry->data = (void *)(long)irq;
322 entry->read_proc = irq_affinity_read_proc;
323 entry->write_proc = irq_affinity_write_proc;
324 }
325
326 smp_affinity_entry[irq] = entry;
327 }
328#endif
329}
330
331void
332init_irq_proc (void)
333{
334 int i;
335
336 /* create /proc/irq */
337 root_irq_dir = proc_mkdir("irq", NULL);
338
339#ifdef CONFIG_SMP
340 /* create /proc/irq/prof_cpu_mask */
341 create_prof_cpu_mask(root_irq_dir);
342#endif
343
344 /*
345 * Create entries for all existing IRQs.
346 */
347 for (i = 0; i < ACTUAL_NR_IRQS; i++) {
348 if (irq_desc[i].handler == &no_irq_type)
349 continue;
350 register_irq_proc(i);
351 }
352}
353
354int
355request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
356 unsigned long irqflags, const char * devname, void *dev_id)
357{
358 int retval;
359 struct irqaction * action;
360
361 if (irq >= ACTUAL_NR_IRQS)
362 return -EINVAL;
363 if (!handler)
364 return -EINVAL;
365
366#if 1
367 /*
368 * Sanity-check: shared interrupts should REALLY pass in
369 * a real dev-ID, otherwise we'll have trouble later trying
370 * to figure out which interrupt is which (messes up the
371 * interrupt freeing logic etc).
372 */
373 if ((irqflags & SA_SHIRQ) && !dev_id) {
374 printk(KERN_ERR
375 "Bad boy: %s (at %p) called us without a dev_id!\n",
376 devname, __builtin_return_address(0));
377 }
378#endif
379
380 action = (struct irqaction *)
381 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
382 if (!action)
383 return -ENOMEM;
384
385 action->handler = handler;
386 action->flags = irqflags;
387 cpus_clear(action->mask);
388 action->name = devname;
389 action->next = NULL;
390 action->dev_id = dev_id;
391
392#ifdef CONFIG_SMP
393 select_smp_affinity(irq);
394#endif
395
396 retval = setup_irq(irq, action);
397 if (retval)
398 kfree(action);
399 return retval;
400}
401
402EXPORT_SYMBOL(request_irq);
403
404void
405free_irq(unsigned int irq, void *dev_id)
406{
407 irq_desc_t *desc;
408 struct irqaction **p;
409 unsigned long flags;
410
411 if (irq >= ACTUAL_NR_IRQS) {
412 printk(KERN_CRIT "Trying to free IRQ%d\n", irq);
413 return;
414 }
415
416 desc = irq_desc + irq;
417 spin_lock_irqsave(&desc->lock,flags);
418 p = &desc->action;
419 for (;;) {
420 struct irqaction * action = *p;
421 if (action) {
422 struct irqaction **pp = p;
423 p = &action->next;
424 if (action->dev_id != dev_id)
425 continue;
426
427 /* Found - now remove it from the list of entries. */
428 *pp = action->next;
429 if (!desc->action) {
430 desc->status |= IRQ_DISABLED;
431 desc->handler->shutdown(irq);
432 }
433 spin_unlock_irqrestore(&desc->lock,flags);
434
435#ifdef CONFIG_SMP
436 /* Wait to make sure it's not being used on
437 another CPU. */
438 while (desc->status & IRQ_INPROGRESS)
439 barrier();
440#endif
441 kfree(action);
442 return;
443 }
444 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
445 spin_unlock_irqrestore(&desc->lock,flags);
446 return;
447 }
448}
449
450EXPORT_SYMBOL(free_irq);
451
452int 65int
453show_interrupts(struct seq_file *p, void *v) 66show_interrupts(struct seq_file *p, void *v)
454{ 67{
@@ -531,10 +144,6 @@ handle_irq(int irq, struct pt_regs * regs)
531 * 0 return value means that this irq is already being 144 * 0 return value means that this irq is already being
532 * handled by some other CPU. (or is disabled) 145 * handled by some other CPU. (or is disabled)
533 */ 146 */
534 int cpu = smp_processor_id();
535 irq_desc_t *desc = irq_desc + irq;
536 struct irqaction * action;
537 unsigned int status;
538 static unsigned int illegal_count=0; 147 static unsigned int illegal_count=0;
539 148
540 if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { 149 if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
@@ -546,229 +155,8 @@ handle_irq(int irq, struct pt_regs * regs)
546 } 155 }
547 156
548 irq_enter(); 157 irq_enter();
549 kstat_cpu(cpu).irqs[irq]++; 158 local_irq_disable();
550 spin_lock_irq(&desc->lock); /* mask also the higher prio events */ 159 __do_IRQ(irq, regs);
551 desc->handler->ack(irq); 160 local_irq_enable();
552 /*
553 * REPLAY is when Linux resends an IRQ that was dropped earlier.
554 * WAITING is used by probe to mark irqs that are being tested.
555 */
556 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
557 status |= IRQ_PENDING; /* we _want_ to handle it */
558
559 /*
560 * If the IRQ is disabled for whatever reason, we cannot
561 * use the action we have.
562 */
563 action = NULL;
564 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
565 action = desc->action;
566 status &= ~IRQ_PENDING; /* we commit to handling */
567 status |= IRQ_INPROGRESS; /* we are handling it */
568 }
569 desc->status = status;
570
571 /*
572 * If there is no IRQ handler or it was disabled, exit early.
573 * Since we set PENDING, if another processor is handling
574 * a different instance of this same irq, the other processor
575 * will take care of it.
576 */
577 if (!action)
578 goto out;
579
580 /*
581 * Edge triggered interrupts need to remember pending events.
582 * This applies to any hw interrupts that allow a second
583 * instance of the same irq to arrive while we are in handle_irq
584 * or in the handler. But the code here only handles the _second_
585 * instance of the irq, not the third or fourth. So it is mostly
586 * useful for irq hardware that does not mask cleanly in an
587 * SMP environment.
588 */
589 for (;;) {
590 spin_unlock(&desc->lock);
591 handle_IRQ_event(irq, regs, action);
592 spin_lock(&desc->lock);
593
594 if (!(desc->status & IRQ_PENDING)
595 || (desc->status & IRQ_LEVEL))
596 break;
597 desc->status &= ~IRQ_PENDING;
598 }
599 desc->status &= ~IRQ_INPROGRESS;
600out:
601 /*
602 * The ->end() handler has to deal with interrupts which got
603 * disabled while the handler was running.
604 */
605 desc->handler->end(irq);
606 spin_unlock(&desc->lock);
607
608 irq_exit(); 161 irq_exit();
609} 162}
610
611/*
612 * IRQ autodetection code..
613 *
614 * This depends on the fact that any interrupt that
615 * comes in on to an unassigned handler will get stuck
616 * with "IRQ_WAITING" cleared and the interrupt
617 * disabled.
618 */
619unsigned long
620probe_irq_on(void)
621{
622 int i;
623 irq_desc_t *desc;
624 unsigned long delay;
625 unsigned long val;
626
627 /* Something may have generated an irq long ago and we want to
628 flush such a longstanding irq before considering it as spurious. */
629 for (i = NR_IRQS-1; i >= 0; i--) {
630 desc = irq_desc + i;
631
632 spin_lock_irq(&desc->lock);
633 if (!irq_desc[i].action)
634 irq_desc[i].handler->startup(i);
635 spin_unlock_irq(&desc->lock);
636 }
637
638 /* Wait for longstanding interrupts to trigger. */
639 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
640 /* about 20ms delay */ barrier();
641
642 /* enable any unassigned irqs (we must startup again here because
643 if a longstanding irq happened in the previous stage, it may have
644 masked itself) first, enable any unassigned irqs. */
645 for (i = NR_IRQS-1; i >= 0; i--) {
646 desc = irq_desc + i;
647
648 spin_lock_irq(&desc->lock);
649 if (!desc->action) {
650 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
651 if (desc->handler->startup(i))
652 desc->status |= IRQ_PENDING;
653 }
654 spin_unlock_irq(&desc->lock);
655 }
656
657 /*
658 * Wait for spurious interrupts to trigger
659 */
660 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
661 /* about 100ms delay */ barrier();
662
663 /*
664 * Now filter out any obviously spurious interrupts
665 */
666 val = 0;
667 for (i=0; i<NR_IRQS; i++) {
668 irq_desc_t *desc = irq_desc + i;
669 unsigned int status;
670
671 spin_lock_irq(&desc->lock);
672 status = desc->status;
673
674 if (status & IRQ_AUTODETECT) {
675 /* It triggered already - consider it spurious. */
676 if (!(status & IRQ_WAITING)) {
677 desc->status = status & ~IRQ_AUTODETECT;
678 desc->handler->shutdown(i);
679 } else
680 if (i < 32)
681 val |= 1 << i;
682 }
683 spin_unlock_irq(&desc->lock);
684 }
685
686 return val;
687}
688
689EXPORT_SYMBOL(probe_irq_on);
690
691/*
692 * Return a mask of triggered interrupts (this
693 * can handle only legacy ISA interrupts).
694 */
695unsigned int
696probe_irq_mask(unsigned long val)
697{
698 int i;
699 unsigned int mask;
700
701 mask = 0;
702 for (i = 0; i < NR_IRQS; i++) {
703 irq_desc_t *desc = irq_desc + i;
704 unsigned int status;
705
706 spin_lock_irq(&desc->lock);
707 status = desc->status;
708
709 if (status & IRQ_AUTODETECT) {
710 /* We only react to ISA interrupts */
711 if (!(status & IRQ_WAITING)) {
712 if (i < 16)
713 mask |= 1 << i;
714 }
715
716 desc->status = status & ~IRQ_AUTODETECT;
717 desc->handler->shutdown(i);
718 }
719 spin_unlock_irq(&desc->lock);
720 }
721
722 return mask & val;
723}
724
725/*
726 * Get the result of the IRQ probe.. A negative result means that
727 * we have several candidates (but we return the lowest-numbered
728 * one).
729 */
730
731int
732probe_irq_off(unsigned long val)
733{
734 int i, irq_found, nr_irqs;
735
736 nr_irqs = 0;
737 irq_found = 0;
738 for (i=0; i<NR_IRQS; i++) {
739 irq_desc_t *desc = irq_desc + i;
740 unsigned int status;
741
742 spin_lock_irq(&desc->lock);
743 status = desc->status;
744
745 if (status & IRQ_AUTODETECT) {
746 if (!(status & IRQ_WAITING)) {
747 if (!nr_irqs)
748 irq_found = i;
749 nr_irqs++;
750 }
751 desc->status = status & ~IRQ_AUTODETECT;
752 desc->handler->shutdown(i);
753 }
754 spin_unlock_irq(&desc->lock);
755 }
756
757 if (nr_irqs > 1)
758 irq_found = -irq_found;
759 return irq_found;
760}
761
762EXPORT_SYMBOL(probe_irq_off);
763
764#ifdef CONFIG_SMP
765void synchronize_irq(unsigned int irq)
766{
767 /* is there anything to synchronize with? */
768 if (!irq_desc[irq].action)
769 return;
770
771 while (irq_desc[irq].status & IRQ_INPROGRESS)
772 barrier();
773}
774#endif
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index b72e6a91a639..34528da98817 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -569,12 +569,6 @@ gdb_cris_strtol (const char *s, char **endptr, int base)
569 return x; 569 return x;
570} 570}
571 571
572int
573double_this(int x)
574{
575 return 2 * x;
576}
577
578/********************************* Register image ****************************/ 572/********************************* Register image ****************************/
579/* Copy the content of a register image into another. The size n is 573/* Copy the content of a register image into another. The size n is
580 the size of the register image. Due to struct assignment generation of 574 the size of the register image. Due to struct assignment generation of
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile
index 981c2c7dec0d..422f30ede575 100644
--- a/arch/frv/kernel/Makefile
+++ b/arch/frv/kernel/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_FUJITSU_MB93493) += irq-mb93493.o
20obj-$(CONFIG_PM) += pm.o cmode.o 20obj-$(CONFIG_PM) += pm.o cmode.o
21obj-$(CONFIG_MB93093_PDK) += pm-mb93093.o 21obj-$(CONFIG_MB93093_PDK) += pm-mb93093.o
22obj-$(CONFIG_SYSCTL) += sysctl.o 22obj-$(CONFIG_SYSCTL) += sysctl.o
23obj-$(CONFIG_FUTEX) += futex.o
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index ad10ea595459..5f6548388b74 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1076,7 +1076,7 @@ __entry_work_notifysig:
1076 LEDS 0x6410 1076 LEDS 0x6410
1077 ori.p gr4,#0,gr8 1077 ori.p gr4,#0,gr8
1078 call do_notify_resume 1078 call do_notify_resume
1079 bra __entry_return_direct 1079 bra __entry_resume_userspace
1080 1080
1081 # perform syscall entry tracing 1081 # perform syscall entry tracing
1082__syscall_trace_entry: 1082__syscall_trace_entry:
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
new file mode 100644
index 000000000000..eae874a970c6
--- /dev/null
+++ b/arch/frv/kernel/futex.c
@@ -0,0 +1,242 @@
1/* futex.c: futex operations
2 *
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/futex.h>
13#include <asm/futex.h>
14#include <asm/errno.h>
15#include <asm/uaccess.h>
16
17/*
18 * the various futex operations; MMU fault checking is ignored under no-MMU
19 * conditions
20 */
21static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval)
22{
23 int oldval, ret;
24
25 asm("0: \n"
26 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
27 " ckeq icc3,cc7 \n"
28 "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
29 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
30 "2: cst.p %3,%M0 ,cc3,#1 \n"
31 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
32 " beq icc3,#0,0b \n"
33 " setlos 0,%2 \n"
34 "3: \n"
35 ".subsection 2 \n"
36 "4: setlos %5,%2 \n"
37 " bra 3b \n"
38 ".previous \n"
39 ".section __ex_table,\"a\" \n"
40 " .balign 8 \n"
41 " .long 1b,4b \n"
42 " .long 2b,4b \n"
43 ".previous"
44 : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
45 : "3"(oparg), "i"(-EFAULT)
46 : "memory", "cc7", "cc3", "icc3"
47 );
48
49 *_oldval = oldval;
50 return ret;
51}
52
53static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval)
54{
55 int oldval, ret;
56
57 asm("0: \n"
58 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
59 " ckeq icc3,cc7 \n"
60 "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
61 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
62 " add %1,%3,%3 \n"
63 "2: cst.p %3,%M0 ,cc3,#1 \n"
64 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
65 " beq icc3,#0,0b \n"
66 " setlos 0,%2 \n"
67 "3: \n"
68 ".subsection 2 \n"
69 "4: setlos %5,%2 \n"
70 " bra 3b \n"
71 ".previous \n"
72 ".section __ex_table,\"a\" \n"
73 " .balign 8 \n"
74 " .long 1b,4b \n"
75 " .long 2b,4b \n"
76 ".previous"
77 : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
78 : "3"(oparg), "i"(-EFAULT)
79 : "memory", "cc7", "cc3", "icc3"
80 );
81
82 *_oldval = oldval;
83 return ret;
84}
85
86static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval)
87{
88 int oldval, ret;
89
90 asm("0: \n"
91 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
92 " ckeq icc3,cc7 \n"
93 "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
94 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
95 " or %1,%3,%3 \n"
96 "2: cst.p %3,%M0 ,cc3,#1 \n"
97 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
98 " beq icc3,#0,0b \n"
99 " setlos 0,%2 \n"
100 "3: \n"
101 ".subsection 2 \n"
102 "4: setlos %5,%2 \n"
103 " bra 3b \n"
104 ".previous \n"
105 ".section __ex_table,\"a\" \n"
106 " .balign 8 \n"
107 " .long 1b,4b \n"
108 " .long 2b,4b \n"
109 ".previous"
110 : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
111 : "3"(oparg), "i"(-EFAULT)
112 : "memory", "cc7", "cc3", "icc3"
113 );
114
115 *_oldval = oldval;
116 return ret;
117}
118
119static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval)
120{
121 int oldval, ret;
122
123 asm("0: \n"
124 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
125 " ckeq icc3,cc7 \n"
126 "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
127 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
128 " and %1,%3,%3 \n"
129 "2: cst.p %3,%M0 ,cc3,#1 \n"
130 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
131 " beq icc3,#0,0b \n"
132 " setlos 0,%2 \n"
133 "3: \n"
134 ".subsection 2 \n"
135 "4: setlos %5,%2 \n"
136 " bra 3b \n"
137 ".previous \n"
138 ".section __ex_table,\"a\" \n"
139 " .balign 8 \n"
140 " .long 1b,4b \n"
141 " .long 2b,4b \n"
142 ".previous"
143 : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
144 : "3"(oparg), "i"(-EFAULT)
145 : "memory", "cc7", "cc3", "icc3"
146 );
147
148 *_oldval = oldval;
149 return ret;
150}
151
152static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval)
153{
154 int oldval, ret;
155
156 asm("0: \n"
157 " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
158 " ckeq icc3,cc7 \n"
159 "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
160 " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
161 " xor %1,%3,%3 \n"
162 "2: cst.p %3,%M0 ,cc3,#1 \n"
163 " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
164 " beq icc3,#0,0b \n"
165 " setlos 0,%2 \n"
166 "3: \n"
167 ".subsection 2 \n"
168 "4: setlos %5,%2 \n"
169 " bra 3b \n"
170 ".previous \n"
171 ".section __ex_table,\"a\" \n"
172 " .balign 8 \n"
173 " .long 1b,4b \n"
174 " .long 2b,4b \n"
175 ".previous"
176 : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
177 : "3"(oparg), "i"(-EFAULT)
178 : "memory", "cc7", "cc3", "icc3"
179 );
180
181 *_oldval = oldval;
182 return ret;
183}
184
185/*****************************************************************************/
186/*
187 * do the futex operations
188 */
189int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
190{
191 int op = (encoded_op >> 28) & 7;
192 int cmp = (encoded_op >> 24) & 15;
193 int oparg = (encoded_op << 8) >> 20;
194 int cmparg = (encoded_op << 20) >> 20;
195 int oldval = 0, ret;
196
197 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
198 oparg = 1 << oparg;
199
200 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
201 return -EFAULT;
202
203 inc_preempt_count();
204
205 switch (op) {
206 case FUTEX_OP_SET:
207 ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
208 break;
209 case FUTEX_OP_ADD:
210 ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
211 break;
212 case FUTEX_OP_OR:
213 ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
214 break;
215 case FUTEX_OP_ANDN:
216 ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
217 break;
218 case FUTEX_OP_XOR:
219 ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
220 break;
221 default:
222 ret = -ENOSYS;
223 break;
224 }
225
226 dec_preempt_count();
227
228 if (!ret) {
229 switch (cmp) {
230 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
231 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
232 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
233 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
234 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
235 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
236 default: ret = -ENOSYS; break;
237 }
238 }
239
240 return ret;
241
242} /* end futex_atomic_op_inuser() */
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index d4ccc0728dfe..5b7146f54fd5 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -35,7 +35,7 @@ struct fdpic_func_descriptor {
35 unsigned long GOT; 35 unsigned long GOT;
36}; 36};
37 37
38asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); 38static int do_signal(sigset_t *oldset);
39 39
40/* 40/*
41 * Atomically swap in the new signal mask, and wait for a signal. 41 * Atomically swap in the new signal mask, and wait for a signal.
@@ -55,7 +55,7 @@ asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask)
55 while (1) { 55 while (1) {
56 current->state = TASK_INTERRUPTIBLE; 56 current->state = TASK_INTERRUPTIBLE;
57 schedule(); 57 schedule();
58 if (do_signal(__frame, &saveset)) 58 if (do_signal(&saveset))
59 /* return the signal number as the return value of this function 59 /* return the signal number as the return value of this function
60 * - this is an utterly evil hack. syscalls should not invoke do_signal() 60 * - this is an utterly evil hack. syscalls should not invoke do_signal()
61 * as entry.S sets regs->gr8 to the return value of the system call 61 * as entry.S sets regs->gr8 to the return value of the system call
@@ -91,7 +91,7 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
91 while (1) { 91 while (1) {
92 current->state = TASK_INTERRUPTIBLE; 92 current->state = TASK_INTERRUPTIBLE;
93 schedule(); 93 schedule();
94 if (do_signal(__frame, &saveset)) 94 if (do_signal(&saveset))
95 /* return the signal number as the return value of this function 95 /* return the signal number as the return value of this function
96 * - this is an utterly evil hack. syscalls should not invoke do_signal() 96 * - this is an utterly evil hack. syscalls should not invoke do_signal()
97 * as entry.S sets regs->gr8 to the return value of the system call 97 * as entry.S sets regs->gr8 to the return value of the system call
@@ -276,13 +276,12 @@ static int setup_sigcontext(struct sigcontext __user *sc, unsigned long mask)
276 * Determine which stack to use.. 276 * Determine which stack to use..
277 */ 277 */
278static inline void __user *get_sigframe(struct k_sigaction *ka, 278static inline void __user *get_sigframe(struct k_sigaction *ka,
279 struct pt_regs *regs,
280 size_t frame_size) 279 size_t frame_size)
281{ 280{
282 unsigned long sp; 281 unsigned long sp;
283 282
284 /* Default to using normal stack */ 283 /* Default to using normal stack */
285 sp = regs->sp; 284 sp = __frame->sp;
286 285
287 /* This is the X/Open sanctioned signal stack switching. */ 286 /* This is the X/Open sanctioned signal stack switching. */
288 if (ka->sa.sa_flags & SA_ONSTACK) { 287 if (ka->sa.sa_flags & SA_ONSTACK) {
@@ -291,18 +290,19 @@ static inline void __user *get_sigframe(struct k_sigaction *ka,
291 } 290 }
292 291
293 return (void __user *) ((sp - frame_size) & ~7UL); 292 return (void __user *) ((sp - frame_size) & ~7UL);
293
294} /* end get_sigframe() */ 294} /* end get_sigframe() */
295 295
296/*****************************************************************************/ 296/*****************************************************************************/
297/* 297/*
298 * 298 *
299 */ 299 */
300static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) 300static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
301{ 301{
302 struct sigframe __user *frame; 302 struct sigframe __user *frame;
303 int rsig; 303 int rsig;
304 304
305 frame = get_sigframe(ka, regs, sizeof(*frame)); 305 frame = get_sigframe(ka, sizeof(*frame));
306 306
307 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 307 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
308 goto give_sigsegv; 308 goto give_sigsegv;
@@ -346,47 +346,51 @@ static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct p
346 } 346 }
347 347
348 /* set up registers for signal handler */ 348 /* set up registers for signal handler */
349 regs->sp = (unsigned long) frame; 349 __frame->sp = (unsigned long) frame;
350 regs->lr = (unsigned long) &frame->retcode; 350 __frame->lr = (unsigned long) &frame->retcode;
351 regs->gr8 = sig; 351 __frame->gr8 = sig;
352 352
353 if (get_personality & FDPIC_FUNCPTRS) { 353 if (get_personality & FDPIC_FUNCPTRS) {
354 struct fdpic_func_descriptor __user *funcptr = 354 struct fdpic_func_descriptor __user *funcptr =
355 (struct fdpic_func_descriptor *) ka->sa.sa_handler; 355 (struct fdpic_func_descriptor *) ka->sa.sa_handler;
356 __get_user(regs->pc, &funcptr->text); 356 __get_user(__frame->pc, &funcptr->text);
357 __get_user(regs->gr15, &funcptr->GOT); 357 __get_user(__frame->gr15, &funcptr->GOT);
358 } else { 358 } else {
359 regs->pc = (unsigned long) ka->sa.sa_handler; 359 __frame->pc = (unsigned long) ka->sa.sa_handler;
360 regs->gr15 = 0; 360 __frame->gr15 = 0;
361 } 361 }
362 362
363 set_fs(USER_DS); 363 set_fs(USER_DS);
364 364
365 /* the tracer may want to single-step inside the handler */
366 if (test_thread_flag(TIF_SINGLESTEP))
367 ptrace_notify(SIGTRAP);
368
365#if DEBUG_SIG 369#if DEBUG_SIG
366 printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", 370 printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
367 sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); 371 sig, current->comm, current->pid, frame, __frame->pc,
372 frame->pretcode);
368#endif 373#endif
369 374
370 return; 375 return 1;
371 376
372give_sigsegv: 377give_sigsegv:
373 if (sig == SIGSEGV)
374 ka->sa.sa_handler = SIG_DFL;
375
376 force_sig(SIGSEGV, current); 378 force_sig(SIGSEGV, current);
379 return 0;
380
377} /* end setup_frame() */ 381} /* end setup_frame() */
378 382
379/*****************************************************************************/ 383/*****************************************************************************/
380/* 384/*
381 * 385 *
382 */ 386 */
383static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 387static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
384 sigset_t *set, struct pt_regs * regs) 388 sigset_t *set)
385{ 389{
386 struct rt_sigframe __user *frame; 390 struct rt_sigframe __user *frame;
387 int rsig; 391 int rsig;
388 392
389 frame = get_sigframe(ka, regs, sizeof(*frame)); 393 frame = get_sigframe(ka, sizeof(*frame));
390 394
391 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 395 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
392 goto give_sigsegv; 396 goto give_sigsegv;
@@ -409,7 +413,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
409 if (__put_user(0, &frame->uc.uc_flags) || 413 if (__put_user(0, &frame->uc.uc_flags) ||
410 __put_user(0, &frame->uc.uc_link) || 414 __put_user(0, &frame->uc.uc_link) ||
411 __put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) || 415 __put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) ||
412 __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags) || 416 __put_user(sas_ss_flags(__frame->sp), &frame->uc.uc_stack.ss_flags) ||
413 __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size)) 417 __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size))
414 goto give_sigsegv; 418 goto give_sigsegv;
415 419
@@ -440,34 +444,38 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
440 } 444 }
441 445
442 /* Set up registers for signal handler */ 446 /* Set up registers for signal handler */
443 regs->sp = (unsigned long) frame; 447 __frame->sp = (unsigned long) frame;
444 regs->lr = (unsigned long) &frame->retcode; 448 __frame->lr = (unsigned long) &frame->retcode;
445 regs->gr8 = sig; 449 __frame->gr8 = sig;
446 regs->gr9 = (unsigned long) &frame->info; 450 __frame->gr9 = (unsigned long) &frame->info;
447 451
448 if (get_personality & FDPIC_FUNCPTRS) { 452 if (get_personality & FDPIC_FUNCPTRS) {
449 struct fdpic_func_descriptor *funcptr = 453 struct fdpic_func_descriptor *funcptr =
450 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; 454 (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
451 __get_user(regs->pc, &funcptr->text); 455 __get_user(__frame->pc, &funcptr->text);
452 __get_user(regs->gr15, &funcptr->GOT); 456 __get_user(__frame->gr15, &funcptr->GOT);
453 } else { 457 } else {
454 regs->pc = (unsigned long) ka->sa.sa_handler; 458 __frame->pc = (unsigned long) ka->sa.sa_handler;
455 regs->gr15 = 0; 459 __frame->gr15 = 0;
456 } 460 }
457 461
458 set_fs(USER_DS); 462 set_fs(USER_DS);
459 463
464 /* the tracer may want to single-step inside the handler */
465 if (test_thread_flag(TIF_SINGLESTEP))
466 ptrace_notify(SIGTRAP);
467
460#if DEBUG_SIG 468#if DEBUG_SIG
461 printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", 469 printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
462 sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); 470 sig, current->comm, current->pid, frame, __frame->pc,
471 frame->pretcode);
463#endif 472#endif
464 473
465 return; 474 return 1;
466 475
467give_sigsegv: 476give_sigsegv:
468 if (sig == SIGSEGV)
469 ka->sa.sa_handler = SIG_DFL;
470 force_sig(SIGSEGV, current); 477 force_sig(SIGSEGV, current);
478 return 0;
471 479
472} /* end setup_rt_frame() */ 480} /* end setup_rt_frame() */
473 481
@@ -475,43 +483,51 @@ give_sigsegv:
475/* 483/*
476 * OK, we're invoking a handler 484 * OK, we're invoking a handler
477 */ 485 */
478static void handle_signal(unsigned long sig, siginfo_t *info, 486static int handle_signal(unsigned long sig, siginfo_t *info,
479 struct k_sigaction *ka, sigset_t *oldset, 487 struct k_sigaction *ka, sigset_t *oldset)
480 struct pt_regs *regs)
481{ 488{
489 int ret;
490
482 /* Are we from a system call? */ 491 /* Are we from a system call? */
483 if (in_syscall(regs)) { 492 if (in_syscall(__frame)) {
484 /* If so, check system call restarting.. */ 493 /* If so, check system call restarting.. */
485 switch (regs->gr8) { 494 switch (__frame->gr8) {
486 case -ERESTART_RESTARTBLOCK: 495 case -ERESTART_RESTARTBLOCK:
487 case -ERESTARTNOHAND: 496 case -ERESTARTNOHAND:
488 regs->gr8 = -EINTR; 497 __frame->gr8 = -EINTR;
489 break; 498 break;
490 499
491 case -ERESTARTSYS: 500 case -ERESTARTSYS:
492 if (!(ka->sa.sa_flags & SA_RESTART)) { 501 if (!(ka->sa.sa_flags & SA_RESTART)) {
493 regs->gr8 = -EINTR; 502 __frame->gr8 = -EINTR;
494 break; 503 break;
495 } 504 }
505
496 /* fallthrough */ 506 /* fallthrough */
497 case -ERESTARTNOINTR: 507 case -ERESTARTNOINTR:
498 regs->gr8 = regs->orig_gr8; 508 __frame->gr8 = __frame->orig_gr8;
499 regs->pc -= 4; 509 __frame->pc -= 4;
500 } 510 }
501 } 511 }
502 512
503 /* Set up the stack frame */ 513 /* Set up the stack frame */
504 if (ka->sa.sa_flags & SA_SIGINFO) 514 if (ka->sa.sa_flags & SA_SIGINFO)
505 setup_rt_frame(sig, ka, info, oldset, regs); 515 ret = setup_rt_frame(sig, ka, info, oldset);
506 else 516 else
507 setup_frame(sig, ka, oldset, regs); 517 ret = setup_frame(sig, ka, oldset);
518
519 if (ret) {
520 spin_lock_irq(&current->sighand->siglock);
521 sigorsets(&current->blocked, &current->blocked,
522 &ka->sa.sa_mask);
523 if (!(ka->sa.sa_flags & SA_NODEFER))
524 sigaddset(&current->blocked, sig);
525 recalc_sigpending();
526 spin_unlock_irq(&current->sighand->siglock);
527 }
528
529 return ret;
508 530
509 spin_lock_irq(&current->sighand->siglock);
510 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
511 if (!(ka->sa.sa_flags & SA_NODEFER))
512 sigaddset(&current->blocked, sig);
513 recalc_sigpending();
514 spin_unlock_irq(&current->sighand->siglock);
515} /* end handle_signal() */ 531} /* end handle_signal() */
516 532
517/*****************************************************************************/ 533/*****************************************************************************/
@@ -520,7 +536,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
520 * want to handle. Thus you cannot kill init even with a SIGKILL even by 536 * want to handle. Thus you cannot kill init even with a SIGKILL even by
521 * mistake. 537 * mistake.
522 */ 538 */
523int do_signal(struct pt_regs *regs, sigset_t *oldset) 539static int do_signal(sigset_t *oldset)
524{ 540{
525 struct k_sigaction ka; 541 struct k_sigaction ka;
526 siginfo_t info; 542 siginfo_t info;
@@ -532,7 +548,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
532 * kernel mode. Just return without doing anything 548 * kernel mode. Just return without doing anything
533 * if so. 549 * if so.
534 */ 550 */
535 if (!user_mode(regs)) 551 if (!user_mode(__frame))
536 return 1; 552 return 1;
537 553
538 if (try_to_freeze()) 554 if (try_to_freeze())
@@ -541,30 +557,29 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
541 if (!oldset) 557 if (!oldset)
542 oldset = &current->blocked; 558 oldset = &current->blocked;
543 559
544 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 560 signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
545 if (signr > 0) { 561 if (signr > 0)
546 handle_signal(signr, &info, &ka, oldset, regs); 562 return handle_signal(signr, &info, &ka, oldset);
547 return 1;
548 }
549 563
550 no_signal: 564no_signal:
551 /* Did we come from a system call? */ 565 /* Did we come from a system call? */
552 if (regs->syscallno >= 0) { 566 if (__frame->syscallno >= 0) {
553 /* Restart the system call - no handlers present */ 567 /* Restart the system call - no handlers present */
554 if (regs->gr8 == -ERESTARTNOHAND || 568 if (__frame->gr8 == -ERESTARTNOHAND ||
555 regs->gr8 == -ERESTARTSYS || 569 __frame->gr8 == -ERESTARTSYS ||
556 regs->gr8 == -ERESTARTNOINTR) { 570 __frame->gr8 == -ERESTARTNOINTR) {
557 regs->gr8 = regs->orig_gr8; 571 __frame->gr8 = __frame->orig_gr8;
558 regs->pc -= 4; 572 __frame->pc -= 4;
559 } 573 }
560 574
561 if (regs->gr8 == -ERESTART_RESTARTBLOCK){ 575 if (__frame->gr8 == -ERESTART_RESTARTBLOCK){
562 regs->gr8 = __NR_restart_syscall; 576 __frame->gr8 = __NR_restart_syscall;
563 regs->pc -= 4; 577 __frame->pc -= 4;
564 } 578 }
565 } 579 }
566 580
567 return 0; 581 return 0;
582
568} /* end do_signal() */ 583} /* end do_signal() */
569 584
570/*****************************************************************************/ 585/*****************************************************************************/
@@ -580,6 +595,6 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
580 595
581 /* deal with pending signal delivery */ 596 /* deal with pending signal delivery */
582 if (thread_info_flags & _TIF_SIGPENDING) 597 if (thread_info_flags & _TIF_SIGPENDING)
583 do_signal(__frame, NULL); 598 do_signal(NULL);
584 599
585} /* end do_notify_resume() */ 600} /* end do_notify_resume() */
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 6004bb0795e0..968fabd8723f 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -464,7 +464,6 @@ config NUMA
464 depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) 464 depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
465 default n if X86_PC 465 default n if X86_PC
466 default y if (X86_NUMAQ || X86_SUMMIT) 466 default y if (X86_NUMAQ || X86_SUMMIT)
467 select SPARSEMEM_STATIC
468 467
469# Need comments to help the hapless user trying to turn on NUMA support 468# Need comments to help the hapless user trying to turn on NUMA support
470comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" 469comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
@@ -493,6 +492,10 @@ config HAVE_ARCH_ALLOC_REMAP
493 depends on NUMA 492 depends on NUMA
494 default y 493 default y
495 494
495config ARCH_FLATMEM_ENABLE
496 def_bool y
497 depends on (ARCH_SELECT_MEMORY_MODEL && X86_PC)
498
496config ARCH_DISCONTIGMEM_ENABLE 499config ARCH_DISCONTIGMEM_ENABLE
497 def_bool y 500 def_bool y
498 depends on NUMA 501 depends on NUMA
@@ -503,7 +506,8 @@ config ARCH_DISCONTIGMEM_DEFAULT
503 506
504config ARCH_SPARSEMEM_ENABLE 507config ARCH_SPARSEMEM_ENABLE
505 def_bool y 508 def_bool y
506 depends on NUMA 509 depends on (NUMA || (X86_PC && EXPERIMENTAL))
510 select SPARSEMEM_STATIC
507 511
508config ARCH_SELECT_MEMORY_MODEL 512config ARCH_SELECT_MEMORY_MODEL
509 def_bool y 513 def_bool y
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index 53bbb3c008ee..79603b3471f9 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -39,6 +39,7 @@ config M386
39 - "Winchip-2" for IDT Winchip 2. 39 - "Winchip-2" for IDT Winchip 2.
40 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities. 40 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX). 41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
42 - "Geode GX/LX" For AMD Geode GX and LX processors.
42 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3. 43 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
43 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above). 44 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
44 45
@@ -171,6 +172,11 @@ config MGEODEGX1
171 help 172 help
172 Select this for a Geode GX1 (Cyrix MediaGX) chip. 173 Select this for a Geode GX1 (Cyrix MediaGX) chip.
173 174
175config MGEODE_LX
176 bool "Geode GX/LX"
177 help
178 Select this for AMD Geode GX and LX processors.
179
174config MCYRIXIII 180config MCYRIXIII
175 bool "CyrixIII/VIA-C3" 181 bool "CyrixIII/VIA-C3"
176 help 182 help
@@ -220,8 +226,8 @@ config X86_XADD
220config X86_L1_CACHE_SHIFT 226config X86_L1_CACHE_SHIFT
221 int 227 int
222 default "7" if MPENTIUM4 || X86_GENERIC 228 default "7" if MPENTIUM4 || X86_GENERIC
223 default "4" if X86_ELAN || M486 || M386 229 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
224 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1 230 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
225 default "6" if MK7 || MK8 || MPENTIUMM 231 default "6" if MK7 || MK8 || MPENTIUMM
226 232
227config RWSEM_GENERIC_SPINLOCK 233config RWSEM_GENERIC_SPINLOCK
@@ -290,12 +296,12 @@ config X86_INTEL_USERCOPY
290 296
291config X86_USE_PPRO_CHECKSUM 297config X86_USE_PPRO_CHECKSUM
292 bool 298 bool
293 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON 299 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX
294 default y 300 default y
295 301
296config X86_USE_3DNOW 302config X86_USE_3DNOW
297 bool 303 bool
298 depends on MCYRIXIII || MK7 304 depends on MCYRIXIII || MK7 || MGEODE_LX
299 default y 305 default y
300 306
301config X86_OOSTORE 307config X86_OOSTORE
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index c48b424dd640..bf32ecc9ad04 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -42,6 +42,16 @@ config DEBUG_PAGEALLOC
42 This results in a large slowdown, but helps to find certain types 42 This results in a large slowdown, but helps to find certain types
43 of memory corruptions. 43 of memory corruptions.
44 44
45config DEBUG_RODATA
46 bool "Write protect kernel read-only data structures"
47 depends on DEBUG_KERNEL
48 help
49 Mark the kernel read-only data as write-protected in the pagetables,
50 in order to catch accidental (and incorrect) writes to such const
51 data. This option may have a slight performance impact because a
52 portion of the kernel code won't be covered by a 2MB TLB anymore.
53 If in doubt, say "N".
54
45config 4KSTACKS 55config 4KSTACKS
46 bool "Use 4Kb for kernel stacks instead of 8Kb" 56 bool "Use 4Kb for kernel stacks instead of 8Kb"
47 depends on DEBUG_KERNEL 57 depends on DEBUG_KERNEL
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 496a2c9909fe..d8f94e78de8a 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -721,7 +721,7 @@ static int __init apic_set_verbosity(char *str)
721 apic_verbosity = APIC_VERBOSE; 721 apic_verbosity = APIC_VERBOSE;
722 else 722 else
723 printk(KERN_WARNING "APIC Verbosity level %s not recognised" 723 printk(KERN_WARNING "APIC Verbosity level %s not recognised"
724 " use apic=verbose or apic=debug", str); 724 " use apic=verbose or apic=debug\n", str);
725 725
726 return 0; 726 return 0;
727} 727}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 1e60acbed3c1..2d793d4aef1a 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -303,17 +303,6 @@ extern int (*console_blank_hook)(int);
303#include "apm.h" 303#include "apm.h"
304 304
305/* 305/*
306 * Define to make all _set_limit calls use 64k limits. The APM 1.1 BIOS is
307 * supposed to provide limit information that it recognizes. Many machines
308 * do this correctly, but many others do not restrict themselves to their
309 * claimed limit. When this happens, they will cause a segmentation
310 * violation in the kernel at boot time. Most BIOS's, however, will
311 * respect a 64k limit, so we use that. If you want to be pedantic and
312 * hold your BIOS to its claims, then undefine this.
313 */
314#define APM_RELAX_SEGMENTS
315
316/*
317 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend. 306 * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
318 * This patched by Chad Miller <cmiller@surfsouth.com>, original code by 307 * This patched by Chad Miller <cmiller@surfsouth.com>, original code by
319 * David Chen <chen@ctpa04.mit.edu> 308 * David Chen <chen@ctpa04.mit.edu>
@@ -1075,22 +1064,23 @@ static int apm_engage_power_management(u_short device, int enable)
1075 1064
1076static int apm_console_blank(int blank) 1065static int apm_console_blank(int blank)
1077{ 1066{
1078 int error; 1067 int error, i;
1079 u_short state; 1068 u_short state;
1069 static const u_short dev[3] = { 0x100, 0x1FF, 0x101 };
1080 1070
1081 state = blank ? APM_STATE_STANDBY : APM_STATE_READY; 1071 state = blank ? APM_STATE_STANDBY : APM_STATE_READY;
1082 /* Blank the first display device */ 1072
1083 error = set_power_state(0x100, state); 1073 for (i = 0; i < ARRAY_SIZE(dev); i++) {
1084 if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) { 1074 error = set_power_state(dev[i], state);
1085 /* try to blank them all instead */ 1075
1086 error = set_power_state(0x1ff, state); 1076 if ((error == APM_SUCCESS) || (error == APM_NO_ERROR))
1087 if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) 1077 return 1;
1088 /* try to blank device one instead */ 1078
1089 error = set_power_state(0x101, state); 1079 if (error == APM_NOT_ENGAGED)
1080 break;
1090 } 1081 }
1091 if ((error == APM_SUCCESS) || (error == APM_NO_ERROR)) 1082
1092 return 1; 1083 if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) {
1093 if (error == APM_NOT_ENGAGED) {
1094 static int tried; 1084 static int tried;
1095 int eng_error; 1085 int eng_error;
1096 if (tried++ == 0) { 1086 if (tried++ == 0) {
@@ -2233,8 +2223,8 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
2233static int __init apm_init(void) 2223static int __init apm_init(void)
2234{ 2224{
2235 struct proc_dir_entry *apm_proc; 2225 struct proc_dir_entry *apm_proc;
2226 struct desc_struct *gdt;
2236 int ret; 2227 int ret;
2237 int i;
2238 2228
2239 dmi_check_system(apm_dmi_table); 2229 dmi_check_system(apm_dmi_table);
2240 2230
@@ -2312,45 +2302,30 @@ static int __init apm_init(void)
2312 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 2302 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
2313 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 2303 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
2314 2304
2305 /*
2306 * Set up the long jump entry point to the APM BIOS, which is called
2307 * from inline assembly.
2308 */
2315 apm_bios_entry.offset = apm_info.bios.offset; 2309 apm_bios_entry.offset = apm_info.bios.offset;
2316 apm_bios_entry.segment = APM_CS; 2310 apm_bios_entry.segment = APM_CS;
2317 2311
2318 for (i = 0; i < NR_CPUS; i++) { 2312 /*
2319 struct desc_struct *gdt = get_cpu_gdt_table(i); 2313 * The APM 1.1 BIOS is supposed to provide limit information that it
2320 set_base(gdt[APM_CS >> 3], 2314 * recognizes. Many machines do this correctly, but many others do
2321 __va((unsigned long)apm_info.bios.cseg << 4)); 2315 * not restrict themselves to their claimed limit. When this happens,
2322 set_base(gdt[APM_CS_16 >> 3], 2316 * they will cause a segmentation violation in the kernel at boot time.
2323 __va((unsigned long)apm_info.bios.cseg_16 << 4)); 2317 * Most BIOS's, however, will respect a 64k limit, so we use that.
2324 set_base(gdt[APM_DS >> 3], 2318 *
2325 __va((unsigned long)apm_info.bios.dseg << 4)); 2319 * Note we only set APM segments on CPU zero, since we pin the APM
2326#ifndef APM_RELAX_SEGMENTS 2320 * code to that CPU.
2327 if (apm_info.bios.version == 0x100) { 2321 */
2328#endif 2322 gdt = get_cpu_gdt_table(0);
2329 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ 2323 set_base(gdt[APM_CS >> 3],
2330 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1); 2324 __va((unsigned long)apm_info.bios.cseg << 4));
2331 /* For some unknown machine. */ 2325 set_base(gdt[APM_CS_16 >> 3],
2332 _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1); 2326 __va((unsigned long)apm_info.bios.cseg_16 << 4));
2333 /* For the DEC Hinote Ultra CT475 (and others?) */ 2327 set_base(gdt[APM_DS >> 3],
2334 _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1); 2328 __va((unsigned long)apm_info.bios.dseg << 4));
2335#ifndef APM_RELAX_SEGMENTS
2336 } else {
2337 _set_limit((char *)&gdt[APM_CS >> 3],
2338 (apm_info.bios.cseg_len - 1) & 0xffff);
2339 _set_limit((char *)&gdt[APM_CS_16 >> 3],
2340 (apm_info.bios.cseg_16_len - 1) & 0xffff);
2341 _set_limit((char *)&gdt[APM_DS >> 3],
2342 (apm_info.bios.dseg_len - 1) & 0xffff);
2343 /* workaround for broken BIOSes */
2344 if (apm_info.bios.cseg_len <= apm_info.bios.offset)
2345 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
2346 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
2347 /* for the BIOS that assumes granularity = 1 */
2348 gdt[APM_DS >> 3].b |= 0x800000;
2349 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
2350 }
2351 }
2352#endif
2353 }
2354 2329
2355 apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info); 2330 apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info);
2356 if (apm_proc) 2331 if (apm_proc)
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index e344ef88cfcd..e7697e077f6b 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -161,8 +161,13 @@ static void __init init_amd(struct cpuinfo_x86 *c)
161 set_bit(X86_FEATURE_K6_MTRR, c->x86_capability); 161 set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
162 break; 162 break;
163 } 163 }
164 break;
165 164
165 if (c->x86_model == 10) {
166 /* AMD Geode LX is model 10 */
167 /* placeholder for any needed mods */
168 break;
169 }
170 break;
166 case 6: /* An Athlon/Duron */ 171 case 6: /* An Athlon/Duron */
167 172
168 /* Bit 15 of Athlon specific MSR 15, needs to be 0 173 /* Bit 15 of Athlon specific MSR 15, needs to be 0
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 31e344b26bae..cca655688ffc 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -18,9 +18,6 @@
18 18
19#include "cpu.h" 19#include "cpu.h"
20 20
21DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
22EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
23
24DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 21DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
25EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 22EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
26 23
@@ -599,11 +596,6 @@ void __devinit cpu_init(void)
599 load_idt(&idt_descr); 596 load_idt(&idt_descr);
600 597
601 /* 598 /*
602 * Delete NT
603 */
604 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
605
606 /*
607 * Set up and load the per-CPU TSS and LDT 599 * Set up and load the per-CPU TSS and LDT
608 */ 600 */
609 atomic_inc(&init_mm.mm_count); 601 atomic_inc(&init_mm.mm_count);
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index ff87cc22b323..75015975d038 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -343,6 +343,31 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
343} 343}
344 344
345/* 345/*
346 * Handle National Semiconductor branded processors
347 */
348static void __devinit init_nsc(struct cpuinfo_x86 *c)
349{
350 /* There may be GX1 processors in the wild that are branded
351 * NSC and not Cyrix.
352 *
353 * This function only handles the GX processor, and kicks every
354 * thing else to the Cyrix init function above - that should
355 * cover any processors that might have been branded differently
356 * after NSC aquired Cyrix.
357 *
358 * If this breaks your GX1 horribly, please e-mail
359 * info-linux@ldcmail.amd.com to tell us.
360 */
361
362 /* Handle the GX (Formally known as the GX2) */
363
364 if (c->x86 == 5 && c->x86_model == 5)
365 display_cacheinfo(c);
366 else
367 init_cyrix(c);
368}
369
370/*
346 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected 371 * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
347 * by the fact that they preserve the flags across the division of 5/2. 372 * by the fact that they preserve the flags across the division of 5/2.
348 * PII and PPro exhibit this behavior too, but they have cpuid available. 373 * PII and PPro exhibit this behavior too, but they have cpuid available.
@@ -422,7 +447,7 @@ int __init cyrix_init_cpu(void)
422static struct cpu_dev nsc_cpu_dev __initdata = { 447static struct cpu_dev nsc_cpu_dev __initdata = {
423 .c_vendor = "NSC", 448 .c_vendor = "NSC",
424 .c_ident = { "Geode by NSC" }, 449 .c_ident = { "Geode by NSC" },
425 .c_init = init_cyrix, 450 .c_init = init_nsc,
426 .c_identify = generic_identify, 451 .c_identify = generic_identify,
427}; 452};
428 453
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 13bae799e626..006141d1c12a 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -117,14 +117,13 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
117{ 117{
118 char __user *tmp = buf; 118 char __user *tmp = buf;
119 u32 data[4]; 119 u32 data[4];
120 size_t rv;
121 u32 reg = *ppos; 120 u32 reg = *ppos;
122 int cpu = iminor(file->f_dentry->d_inode); 121 int cpu = iminor(file->f_dentry->d_inode);
123 122
124 if (count % 16) 123 if (count % 16)
125 return -EINVAL; /* Invalid chunk size */ 124 return -EINVAL; /* Invalid chunk size */
126 125
127 for (rv = 0; count; count -= 16) { 126 for (; count; count -= 16) {
128 do_cpuid(cpu, reg, data); 127 do_cpuid(cpu, reg, data);
129 if (copy_to_user(tmp, &data, 16)) 128 if (copy_to_user(tmp, &data, 16))
130 return -EFAULT; 129 return -EFAULT;
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index e50b93155249..607c06007508 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -657,6 +657,7 @@ ENTRY(spurious_interrupt_bug)
657 pushl $do_spurious_interrupt_bug 657 pushl $do_spurious_interrupt_bug
658 jmp error_code 658 jmp error_code
659 659
660.section .rodata,"a"
660#include "syscall_table.S" 661#include "syscall_table.S"
661 662
662syscall_table_size=(.-sys_call_table) 663syscall_table_size=(.-sys_call_table)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index e437fb367498..5884469f6bfe 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -504,19 +504,24 @@ ENTRY(cpu_gdt_table)
504 .quad 0x0000000000000000 /* 0x80 TSS descriptor */ 504 .quad 0x0000000000000000 /* 0x80 TSS descriptor */
505 .quad 0x0000000000000000 /* 0x88 LDT descriptor */ 505 .quad 0x0000000000000000 /* 0x88 LDT descriptor */
506 506
507 /* Segments used for calling PnP BIOS */ 507 /*
508 .quad 0x00c09a0000000000 /* 0x90 32-bit code */ 508 * Segments used for calling PnP BIOS have byte granularity.
509 .quad 0x00809a0000000000 /* 0x98 16-bit code */ 509 * They code segments and data segments have fixed 64k limits,
510 .quad 0x0080920000000000 /* 0xa0 16-bit data */ 510 * the transfer segment sizes are set at run time.
511 .quad 0x0080920000000000 /* 0xa8 16-bit data */ 511 */
512 .quad 0x0080920000000000 /* 0xb0 16-bit data */ 512 .quad 0x00409a000000ffff /* 0x90 32-bit code */
513 .quad 0x00009a000000ffff /* 0x98 16-bit code */
514 .quad 0x000092000000ffff /* 0xa0 16-bit data */
515 .quad 0x0000920000000000 /* 0xa8 16-bit data */
516 .quad 0x0000920000000000 /* 0xb0 16-bit data */
517
513 /* 518 /*
514 * The APM segments have byte granularity and their bases 519 * The APM segments have byte granularity and their bases
515 * and limits are set at run time. 520 * are set at run time. All have 64k limits.
516 */ 521 */
517 .quad 0x00409a0000000000 /* 0xb8 APM CS code */ 522 .quad 0x00409a000000ffff /* 0xb8 APM CS code */
518 .quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */ 523 .quad 0x00009a000000ffff /* 0xc0 APM CS 16 code (16 bit) */
519 .quad 0x0040920000000000 /* 0xc8 APM DS data */ 524 .quad 0x004092000000ffff /* 0xc8 APM DS data */
520 525
521 .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */ 526 .quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */
522 .quad 0x0000000000000000 /* 0xd8 - unused */ 527 .quad 0x0000000000000000 /* 0xd8 - unused */
@@ -525,3 +530,5 @@ ENTRY(cpu_gdt_table)
525 .quad 0x0000000000000000 /* 0xf0 - unused */ 530 .quad 0x0000000000000000 /* 0xf0 - unused */
526 .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ 531 .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
527 532
533 /* Be sure this is zeroed to avoid false validations in Xen */
534 .fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 180f070d03cb..3999bec50c33 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -3,8 +3,7 @@
3#include <asm/checksum.h> 3#include <asm/checksum.h>
4#include <asm/desc.h> 4#include <asm/desc.h>
5 5
6/* This is definitely a GPL-only symbol */ 6EXPORT_SYMBOL_GPL(cpu_gdt_descr);
7EXPORT_SYMBOL_GPL(cpu_gdt_table);
8 7
9EXPORT_SYMBOL(__down_failed); 8EXPORT_SYMBOL(__down_failed);
10EXPORT_SYMBOL(__down_failed_interruptible); 9EXPORT_SYMBOL(__down_failed_interruptible);
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 22c8675c79f4..7554f8fd874a 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -1722,8 +1722,8 @@ void disable_IO_APIC(void)
1722 entry.dest_mode = 0; /* Physical */ 1722 entry.dest_mode = 0; /* Physical */
1723 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1723 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1724 entry.vector = 0; 1724 entry.vector = 0;
1725 entry.dest.physical.physical_dest = 0; 1725 entry.dest.physical.physical_dest =
1726 1726 GET_APIC_ID(apic_read(APIC_ID));
1727 1727
1728 /* 1728 /*
1729 * Add it to the IO-APIC irq-routing table: 1729 * Add it to the IO-APIC irq-routing table:
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 1ca5269b1e86..91a64016956e 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -38,6 +38,12 @@
38int smp_found_config; 38int smp_found_config;
39unsigned int __initdata maxcpus = NR_CPUS; 39unsigned int __initdata maxcpus = NR_CPUS;
40 40
41#ifdef CONFIG_HOTPLUG_CPU
42#define CPU_HOTPLUG_ENABLED (1)
43#else
44#define CPU_HOTPLUG_ENABLED (0)
45#endif
46
41/* 47/*
42 * Various Linux-internal data structures created from the 48 * Various Linux-internal data structures created from the
43 * MP-table. 49 * MP-table.
@@ -219,14 +225,18 @@ static void __devinit MP_processor_info (struct mpc_config_processor *m)
219 cpu_set(num_processors, cpu_possible_map); 225 cpu_set(num_processors, cpu_possible_map);
220 num_processors++; 226 num_processors++;
221 227
222 if ((num_processors > 8) && 228 if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) {
223 ((APIC_XAPIC(ver) && 229 switch (boot_cpu_data.x86_vendor) {
224 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) || 230 case X86_VENDOR_INTEL:
225 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))) 231 if (!APIC_XAPIC(ver)) {
226 def_to_bigsmp = 1; 232 def_to_bigsmp = 0;
227 else 233 break;
228 def_to_bigsmp = 0; 234 }
229 235 /* If P4 and above fall through */
236 case X86_VENDOR_AMD:
237 def_to_bigsmp = 1;
238 }
239 }
230 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; 240 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
231} 241}
232 242
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 44470fea4309..1d0a55e68760 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -172,7 +172,6 @@ static ssize_t msr_read(struct file *file, char __user * buf,
172{ 172{
173 u32 __user *tmp = (u32 __user *) buf; 173 u32 __user *tmp = (u32 __user *) buf;
174 u32 data[2]; 174 u32 data[2];
175 size_t rv;
176 u32 reg = *ppos; 175 u32 reg = *ppos;
177 int cpu = iminor(file->f_dentry->d_inode); 176 int cpu = iminor(file->f_dentry->d_inode);
178 int err; 177 int err;
@@ -180,7 +179,7 @@ static ssize_t msr_read(struct file *file, char __user * buf,
180 if (count % 8) 179 if (count % 8)
181 return -EINVAL; /* Invalid chunk size */ 180 return -EINVAL; /* Invalid chunk size */
182 181
183 for (rv = 0; count; count -= 8) { 182 for (; count; count -= 8) {
184 err = do_rdmsr(cpu, reg, &data[0], &data[1]); 183 err = do_rdmsr(cpu, reg, &data[0], &data[1]);
185 if (err) 184 if (err)
186 return err; 185 return err;
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 2333aead0563..45e7f0ac4b04 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -308,9 +308,7 @@ void show_regs(struct pt_regs * regs)
308 cr0 = read_cr0(); 308 cr0 = read_cr0();
309 cr2 = read_cr2(); 309 cr2 = read_cr2();
310 cr3 = read_cr3(); 310 cr3 = read_cr3();
311 if (current_cpu_data.x86 > 4) { 311 cr4 = read_cr4_safe();
312 cr4 = read_cr4();
313 }
314 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 312 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
315 show_trace(NULL, &regs->esp); 313 show_trace(NULL, &regs->esp);
316} 314}
@@ -404,17 +402,7 @@ void flush_thread(void)
404 402
405void release_thread(struct task_struct *dead_task) 403void release_thread(struct task_struct *dead_task)
406{ 404{
407 if (dead_task->mm) { 405 BUG_ON(dead_task->mm);
408 // temporary debugging check
409 if (dead_task->mm->context.size) {
410 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
411 dead_task->comm,
412 dead_task->mm->context.ldt,
413 dead_task->mm->context.size);
414 BUG();
415 }
416 }
417
418 release_vm86_irqs(dead_task); 406 release_vm86_irqs(dead_task);
419} 407}
420 408
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 2afe0f8d555a..2fa5803a759d 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -111,12 +111,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
111 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), 111 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
112 }, 112 },
113 }, 113 },
114 { /* Handle problems with rebooting on HP nc6120 */ 114 { /* Handle problems with rebooting on HP laptops */
115 .callback = set_bios_reboot, 115 .callback = set_bios_reboot,
116 .ident = "HP Compaq nc6120", 116 .ident = "HP Compaq Laptop",
117 .matches = { 117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 118 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
119 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6120"), 119 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
120 }, 120 },
121 }, 121 },
122 { } 122 { }
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index fdfcb0cba9b4..27c956db0461 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -954,6 +954,12 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
954 return 0; 954 return 0;
955} 955}
956 956
957static int __init
958efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
959{
960 memory_present(0, start, end);
961 return 0;
962}
957 963
958/* 964/*
959 * Find the highest page frame number we have available 965 * Find the highest page frame number we have available
@@ -965,6 +971,7 @@ void __init find_max_pfn(void)
965 max_pfn = 0; 971 max_pfn = 0;
966 if (efi_enabled) { 972 if (efi_enabled) {
967 efi_memmap_walk(efi_find_max_pfn, &max_pfn); 973 efi_memmap_walk(efi_find_max_pfn, &max_pfn);
974 efi_memmap_walk(efi_memory_present_wrapper, NULL);
968 return; 975 return;
969 } 976 }
970 977
@@ -979,6 +986,7 @@ void __init find_max_pfn(void)
979 continue; 986 continue;
980 if (end > max_pfn) 987 if (end > max_pfn)
981 max_pfn = end; 988 max_pfn = end;
989 memory_present(0, start, end);
982 } 990 }
983} 991}
984 992
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 9ed449af8e9f..b3c2e2c26743 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -903,6 +903,12 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
903 unsigned long start_eip; 903 unsigned long start_eip;
904 unsigned short nmi_high = 0, nmi_low = 0; 904 unsigned short nmi_high = 0, nmi_low = 0;
905 905
906 if (!cpu_gdt_descr[cpu].address &&
907 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
908 printk("Failed to allocate GDT for CPU %d\n", cpu);
909 return 1;
910 }
911
906 ++cpucount; 912 ++cpucount;
907 913
908 /* 914 /*
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 9b21a31d4f4e..f7ba4acc20ec 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -1,4 +1,3 @@
1.data
2ENTRY(sys_call_table) 1ENTRY(sys_call_table)
3 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ 2 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
4 .long sys_exit 3 .long sys_exit
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index d395e3b42485..47675bbbb316 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -330,7 +330,9 @@ int recalibrate_cpu_khz(void)
330 unsigned int cpu_khz_old = cpu_khz; 330 unsigned int cpu_khz_old = cpu_khz;
331 331
332 if (cpu_has_tsc) { 332 if (cpu_has_tsc) {
333 local_irq_disable();
333 init_cpu_khz(); 334 init_cpu_khz();
335 local_irq_enable();
334 cpu_data[0].loops_per_jiffy = 336 cpu_data[0].loops_per_jiffy =
335 cpufreq_scale(cpu_data[0].loops_per_jiffy, 337 cpufreq_scale(cpu_data[0].loops_per_jiffy,
336 cpu_khz_old, 338 cpu_khz_old,
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index ab0e9430f775..53ad954e3ba4 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -306,14 +306,17 @@ void die(const char * str, struct pt_regs * regs, long err)
306 .lock_owner_depth = 0 306 .lock_owner_depth = 0
307 }; 307 };
308 static int die_counter; 308 static int die_counter;
309 unsigned long flags;
309 310
310 if (die.lock_owner != raw_smp_processor_id()) { 311 if (die.lock_owner != raw_smp_processor_id()) {
311 console_verbose(); 312 console_verbose();
312 spin_lock_irq(&die.lock); 313 spin_lock_irqsave(&die.lock, flags);
313 die.lock_owner = smp_processor_id(); 314 die.lock_owner = smp_processor_id();
314 die.lock_owner_depth = 0; 315 die.lock_owner_depth = 0;
315 bust_spinlocks(1); 316 bust_spinlocks(1);
316 } 317 }
318 else
319 local_save_flags(flags);
317 320
318 if (++die.lock_owner_depth < 3) { 321 if (++die.lock_owner_depth < 3) {
319 int nl = 0; 322 int nl = 0;
@@ -340,7 +343,7 @@ void die(const char * str, struct pt_regs * regs, long err)
340 343
341 bust_spinlocks(0); 344 bust_spinlocks(0);
342 die.lock_owner = -1; 345 die.lock_owner = -1;
343 spin_unlock_irq(&die.lock); 346 spin_unlock_irqrestore(&die.lock, flags);
344 347
345 if (kexec_should_crash(current)) 348 if (kexec_should_crash(current))
346 crash_kexec(regs); 349 crash_kexec(regs);
@@ -1075,9 +1078,9 @@ void __init trap_init(void)
1075 set_trap_gate(0,&divide_error); 1078 set_trap_gate(0,&divide_error);
1076 set_intr_gate(1,&debug); 1079 set_intr_gate(1,&debug);
1077 set_intr_gate(2,&nmi); 1080 set_intr_gate(2,&nmi);
1078 set_system_intr_gate(3, &int3); /* int3-5 can be called from all */ 1081 set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
1079 set_system_gate(4,&overflow); 1082 set_system_gate(4,&overflow);
1080 set_system_gate(5,&bounds); 1083 set_trap_gate(5,&bounds);
1081 set_trap_gate(6,&invalid_op); 1084 set_trap_gate(6,&invalid_op);
1082 set_trap_gate(7,&device_not_available); 1085 set_trap_gate(7,&device_not_available);
1083 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS); 1086 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
@@ -1095,6 +1098,28 @@ void __init trap_init(void)
1095#endif 1098#endif
1096 set_trap_gate(19,&simd_coprocessor_error); 1099 set_trap_gate(19,&simd_coprocessor_error);
1097 1100
1101 if (cpu_has_fxsr) {
1102 /*
1103 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1104 * Generates a compile-time "error: zero width for bit-field" if
1105 * the alignment is wrong.
1106 */
1107 struct fxsrAlignAssert {
1108 int _:!(offsetof(struct task_struct,
1109 thread.i387.fxsave) & 15);
1110 };
1111
1112 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1113 set_in_cr4(X86_CR4_OSFXSR);
1114 printk("done.\n");
1115 }
1116 if (cpu_has_xmm) {
1117 printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
1118 "support... ");
1119 set_in_cr4(X86_CR4_OSXMMEXCPT);
1120 printk("done.\n");
1121 }
1122
1098 set_system_gate(SYSCALL_VECTOR,&system_call); 1123 set_system_gate(SYSCALL_VECTOR,&system_call);
1099 1124
1100 /* 1125 /*
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 06e26f006238..7df494b51a5b 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -735,6 +735,30 @@ void free_initmem(void)
735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); 735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
736} 736}
737 737
738#ifdef CONFIG_DEBUG_RODATA
739
740extern char __start_rodata, __end_rodata;
741void mark_rodata_ro(void)
742{
743 unsigned long addr = (unsigned long)&__start_rodata;
744
745 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
746 change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
747
748 printk ("Write protecting the kernel read-only data: %luk\n",
749 (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
750
751 /*
752 * change_page_attr() requires a global_flush_tlb() call after it.
753 * We do this after the printk so that if something went wrong in the
754 * change, the printk gets out at least to give a better debug hint
755 * of who is the culprit.
756 */
757 global_flush_tlb();
758}
759#endif
760
761
738#ifdef CONFIG_BLK_DEV_INITRD 762#ifdef CONFIG_BLK_DEV_INITRD
739void free_initrd_mem(unsigned long start, unsigned long end) 763void free_initrd_mem(unsigned long start, unsigned long end)
740{ 764{
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index f600fc244f02..c30a16df6440 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -13,6 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15#include <asm/pgalloc.h> 15#include <asm/pgalloc.h>
16#include <asm/sections.h>
16 17
17static DEFINE_SPINLOCK(cpa_lock); 18static DEFINE_SPINLOCK(cpa_lock);
18static struct list_head df_list = LIST_HEAD_INIT(df_list); 19static struct list_head df_list = LIST_HEAD_INIT(df_list);
@@ -36,7 +37,8 @@ pte_t *lookup_address(unsigned long address)
36 return pte_offset_kernel(pmd, address); 37 return pte_offset_kernel(pmd, address);
37} 38}
38 39
39static struct page *split_large_page(unsigned long address, pgprot_t prot) 40static struct page *split_large_page(unsigned long address, pgprot_t prot,
41 pgprot_t ref_prot)
40{ 42{
41 int i; 43 int i;
42 unsigned long addr; 44 unsigned long addr;
@@ -54,7 +56,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot)
54 pbase = (pte_t *)page_address(base); 56 pbase = (pte_t *)page_address(base);
55 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { 57 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
56 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, 58 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
57 addr == address ? prot : PAGE_KERNEL)); 59 addr == address ? prot : ref_prot));
58 } 60 }
59 return base; 61 return base;
60} 62}
@@ -98,11 +100,18 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
98 */ 100 */
99static inline void revert_page(struct page *kpte_page, unsigned long address) 101static inline void revert_page(struct page *kpte_page, unsigned long address)
100{ 102{
101 pte_t *linear = (pte_t *) 103 pgprot_t ref_prot;
104 pte_t *linear;
105
106 ref_prot =
107 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
108 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
109
110 linear = (pte_t *)
102 pmd_offset(pud_offset(pgd_offset_k(address), address), address); 111 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
103 set_pmd_pte(linear, address, 112 set_pmd_pte(linear, address,
104 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT, 113 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
105 PAGE_KERNEL_LARGE)); 114 ref_prot));
106} 115}
107 116
108static int 117static int
@@ -123,10 +132,16 @@ __change_page_attr(struct page *page, pgprot_t prot)
123 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 132 if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
124 set_pte_atomic(kpte, mk_pte(page, prot)); 133 set_pte_atomic(kpte, mk_pte(page, prot));
125 } else { 134 } else {
126 struct page *split = split_large_page(address, prot); 135 pgprot_t ref_prot;
136 struct page *split;
137
138 ref_prot =
139 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
140 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
141 split = split_large_page(address, prot, ref_prot);
127 if (!split) 142 if (!split)
128 return -ENOMEM; 143 return -ENOMEM;
129 set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL)); 144 set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
130 kpte_page = split; 145 kpte_page = split;
131 } 146 }
132 get_page(kpte_page); 147 get_page(kpte_page);
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 19e6f4871d1e..ee8e01697d96 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -846,7 +846,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
846 * reported by the device if possible. 846 * reported by the device if possible.
847 */ 847 */
848 newirq = dev->irq; 848 newirq = dev->irq;
849 if (!((1 << newirq) & mask)) { 849 if (newirq && !((1 << newirq) & mask)) {
850 if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0; 850 if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
851 else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev)); 851 else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev));
852 } 852 }
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 4d100f3886e1..fae67bbb52f6 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -81,6 +81,12 @@ config PLAT_MAPPI2
81config PLAT_MAPPI3 81config PLAT_MAPPI3
82 bool "Mappi-III(M3A-2170)" 82 bool "Mappi-III(M3A-2170)"
83 83
84config PLAT_M32104UT
85 bool "M32104UT"
86 help
87 The M3T-M32104UT is an reference board based on uT-Engine
88 specification. This board has a M32104 chip.
89
84endchoice 90endchoice
85 91
86choice 92choice
@@ -93,6 +99,10 @@ config CHIP_M32700
93config CHIP_M32102 99config CHIP_M32102
94 bool "M32102" 100 bool "M32102"
95 101
102config CHIP_M32104
103 bool "M32104"
104 depends on PLAT_M32104UT
105
96config CHIP_VDEC2 106config CHIP_VDEC2
97 bool "VDEC2" 107 bool "VDEC2"
98 108
@@ -115,7 +125,7 @@ config TLB_ENTRIES
115 125
116config ISA_M32R 126config ISA_M32R
117 bool 127 bool
118 depends on CHIP_M32102 128 depends on CHIP_M32102 || CHIP_M32104
119 default y 129 default y
120 130
121config ISA_M32R2 131config ISA_M32R2
@@ -140,6 +150,7 @@ config BUS_CLOCK
140 default "50000000" if PLAT_MAPPI3 150 default "50000000" if PLAT_MAPPI3
141 default "50000000" if PLAT_M32700UT 151 default "50000000" if PLAT_M32700UT
142 default "50000000" if PLAT_OPSPUT 152 default "50000000" if PLAT_OPSPUT
153 default "54000000" if PLAT_M32104UT
143 default "33333333" if PLAT_OAKS32R 154 default "33333333" if PLAT_OAKS32R
144 default "20000000" if PLAT_MAPPI2 155 default "20000000" if PLAT_MAPPI2
145 156
@@ -157,6 +168,7 @@ config MEMORY_START
157 default "08000000" if PLAT_USRV 168 default "08000000" if PLAT_USRV
158 default "08000000" if PLAT_M32700UT 169 default "08000000" if PLAT_M32700UT
159 default "08000000" if PLAT_OPSPUT 170 default "08000000" if PLAT_OPSPUT
171 default "04000000" if PLAT_M32104UT
160 default "01000000" if PLAT_OAKS32R 172 default "01000000" if PLAT_OAKS32R
161 173
162config MEMORY_SIZE 174config MEMORY_SIZE
@@ -166,6 +178,7 @@ config MEMORY_SIZE
166 default "02000000" if PLAT_USRV 178 default "02000000" if PLAT_USRV
167 default "01000000" if PLAT_M32700UT 179 default "01000000" if PLAT_M32700UT
168 default "01000000" if PLAT_OPSPUT 180 default "01000000" if PLAT_OPSPUT
181 default "01000000" if PLAT_M32104UT
169 default "00800000" if PLAT_OAKS32R 182 default "00800000" if PLAT_OAKS32R
170 183
171config NOHIGHMEM 184config NOHIGHMEM
@@ -174,21 +187,22 @@ config NOHIGHMEM
174 187
175config ARCH_DISCONTIGMEM_ENABLE 188config ARCH_DISCONTIGMEM_ENABLE
176 bool "Internal RAM Support" 189 bool "Internal RAM Support"
177 depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP 190 depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104
178 default y 191 default y
179 192
180source "mm/Kconfig" 193source "mm/Kconfig"
181 194
182config IRAM_START 195config IRAM_START
183 hex "Internal memory start address (hex)" 196 hex "Internal memory start address (hex)"
184 default "00f00000" 197 default "00f00000" if !CHIP_M32104
185 depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP) && DISCONTIGMEM 198 default "00700000" if CHIP_M32104
199 depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104) && DISCONTIGMEM
186 200
187config IRAM_SIZE 201config IRAM_SIZE
188 hex "Internal memory size (hex)" 202 hex "Internal memory size (hex)"
189 depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP) && DISCONTIGMEM 203 depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104) && DISCONTIGMEM
190 default "00080000" if CHIP_M32700 204 default "00080000" if CHIP_M32700
191 default "00010000" if CHIP_M32102 || CHIP_OPSP 205 default "00010000" if CHIP_M32102 || CHIP_OPSP || CHIP_M32104
192 default "00008000" if CHIP_VDEC2 206 default "00008000" if CHIP_VDEC2
193 207
194# 208#
diff --git a/arch/m32r/boot/compressed/head.S b/arch/m32r/boot/compressed/head.S
index 07cfd6ad1ae4..234d8b1e0ac1 100644
--- a/arch/m32r/boot/compressed/head.S
+++ b/arch/m32r/boot/compressed/head.S
@@ -143,6 +143,11 @@ startup:
143 ldi r0, -2 143 ldi r0, -2
144 ldi r1, 0x0100 ; invalidate 144 ldi r1, 0x0100 ; invalidate
145 stb r1, @r0 145 stb r1, @r0
146#elif defined(CONFIG_CHIP_M32104)
147 /* Cache flush */
148 ldi r0, -2
149 ldi r1, 0x0700 ; invalidate i-cache, copy back d-cache
150 sth r1, @r0
146#else 151#else
147#error "put your cache flush function, please" 152#error "put your cache flush function, please"
148#endif 153#endif
diff --git a/arch/m32r/boot/setup.S b/arch/m32r/boot/setup.S
index 5d256434b4ad..398542507d84 100644
--- a/arch/m32r/boot/setup.S
+++ b/arch/m32r/boot/setup.S
@@ -1,11 +1,10 @@
1/* 1/*
2 * linux/arch/m32r/boot/setup.S -- A setup code. 2 * linux/arch/m32r/boot/setup.S -- A setup code.
3 * 3 *
4 * Copyright (C) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata, 4 * Copyright (C) 2001-2005 Hiroyuki Kondo, Hirokazu Takata,
5 * and Hitoshi Yamamoto 5 * Hitoshi Yamamoto, Hayato Fujiwara
6 * 6 *
7 */ 7 */
8/* $Id$ */
9 8
10#include <linux/linkage.h> 9#include <linux/linkage.h>
11#include <asm/segment.h> 10#include <asm/segment.h>
@@ -80,6 +79,20 @@ ENTRY(boot)
80 ldi r1, #0x101 ; cache on (with invalidation) 79 ldi r1, #0x101 ; cache on (with invalidation)
81; ldi r1, #0x00 ; cache off 80; ldi r1, #0x00 ; cache off
82 st r1, @r0 81 st r1, @r0
82#elif defined(CONFIG_CHIP_M32104)
83 ldi r0, #-96 ; DNCR0
84 seth r1, #0x0060 ; from 0x00600000
85 or3 r1, r1, #0x0005 ; size 2MB
86 st r1, @r0
87 seth r1, #0x0100 ; from 0x01000000
88 or3 r1, r1, #0x0003 ; size 16MB
89 st r1, @+r0
90 seth r1, #0x0200 ; from 0x02000000
91 or3 r1, r1, #0x0002 ; size 32MB
92 st r1, @+r0
93 ldi r0, #-4 ;LDIMM (r0, M32R_MCCR)
94 ldi r1, #0x703 ; cache on (with invalidation)
95 st r1, @r0
83#else 96#else
84#error unknown chip configuration 97#error unknown chip configuration
85#endif 98#endif
@@ -115,10 +128,15 @@ mmu_on:
115 st r1, @(MATM_offset,r0) ; Set MATM (T bit ON) 128 st r1, @(MATM_offset,r0) ; Set MATM (T bit ON)
116 ld r0, @(MATM_offset,r0) ; Check 129 ld r0, @(MATM_offset,r0) ; Check
117#else 130#else
131#if defined(CONFIG_CHIP_M32700)
118 seth r0,#high(M32R_MCDCAR) 132 seth r0,#high(M32R_MCDCAR)
119 or3 r0,r0,#low(M32R_MCDCAR) 133 or3 r0,r0,#low(M32R_MCDCAR)
120 ld24 r1,#0x8080 134 ld24 r1,#0x8080
121 st r1,@r0 135 st r1,@r0
136#elif defined(CONFIG_CHIP_M32104)
137 LDIMM (r2, eit_vector) ; set EVB(cr5)
138 mvtc r2, cr5
139#endif
122#endif /* CONFIG_MMU */ 140#endif /* CONFIG_MMU */
123 jmp r13 141 jmp r13
124 nop 142 nop
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
index 6c6b6c376638..5a2fa886906f 100644
--- a/arch/m32r/kernel/Makefile
+++ b/arch/m32r/kernel/Makefile
@@ -16,5 +16,6 @@ obj-$(CONFIG_PLAT_M32700UT) += setup_m32700ut.o io_m32700ut.o
16obj-$(CONFIG_PLAT_OPSPUT) += setup_opsput.o io_opsput.o 16obj-$(CONFIG_PLAT_OPSPUT) += setup_opsput.o io_opsput.o
17obj-$(CONFIG_MODULES) += module.o 17obj-$(CONFIG_MODULES) += module.o
18obj-$(CONFIG_PLAT_OAKS32R) += setup_oaks32r.o io_oaks32r.o 18obj-$(CONFIG_PLAT_OAKS32R) += setup_oaks32r.o io_oaks32r.o
19obj-$(CONFIG_PLAT_M32104UT) += setup_m32104ut.o io_m32104ut.o
19 20
20EXTRA_AFLAGS := -traditional 21EXTRA_AFLAGS := -traditional
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 396c94218cc2..3871b65f0c82 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -315,7 +315,7 @@ ENTRY(ei_handler)
315 mv r1, sp ; arg1(regs) 315 mv r1, sp ; arg1(regs)
316#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \ 316#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
317 || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \ 317 || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
318 || defined(CONFIG_CHIP_OPSP) 318 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
319 319
320; GET_ICU_STATUS; 320; GET_ICU_STATUS;
321 seth r0, #shigh(M32R_ICU_ISTS_ADDR) 321 seth r0, #shigh(M32R_ICU_ISTS_ADDR)
@@ -541,7 +541,20 @@ check_int2:
541 bra check_end 541 bra check_end
542 .fillinsn 542 .fillinsn
543check_end: 543check_end:
544#endif /* CONFIG_PLAT_OPSPUT */ 544#elif defined(CONFIG_PLAT_M32104UT)
545 add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
546 bnez r2, check_end
547 ; read ICU status register of PLD
548 seth r0, #high(PLD_ICUISTS)
549 or3 r0, r0, #low(PLD_ICUISTS)
550 lduh r0, @r0
551 slli r0, #21
552 srli r0, #27 ; ISN
553 addi r0, #(M32104UT_PLD_IRQ_BASE)
554 bra check_end
555 .fillinsn
556check_end:
557#endif /* CONFIG_PLAT_M32104UT */
545 bl do_IRQ 558 bl do_IRQ
546#endif /* CONFIG_SMP */ 559#endif /* CONFIG_SMP */
547 ld r14, @sp+ 560 ld r14, @sp+
@@ -651,8 +664,6 @@ ENTRY(rie_handler)
651/* void rie_handler(int error_code) */ 664/* void rie_handler(int error_code) */
652 SWITCH_TO_KERNEL_STACK 665 SWITCH_TO_KERNEL_STACK
653 SAVE_ALL 666 SAVE_ALL
654 mvfc r0, bpc
655 ld r1, @r0
656 ldi r1, #0x20 ; error_code 667 ldi r1, #0x20 ; error_code
657 mv r0, sp ; pt_regs 668 mv r0, sp ; pt_regs
658 bl do_rie_handler 669 bl do_rie_handler
diff --git a/arch/m32r/kernel/io_m32104ut.c b/arch/m32r/kernel/io_m32104ut.c
new file mode 100644
index 000000000000..d26adab9586c
--- /dev/null
+++ b/arch/m32r/kernel/io_m32104ut.c
@@ -0,0 +1,298 @@
1/*
2 * linux/arch/m32r/kernel/io_m32104ut.c
3 *
4 * Typical I/O routines for M32104UT board.
5 *
6 * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata,
7 * Hitoshi Yamamoto, Mamoru Sakugawa,
8 * Naoto Sugai, Hayato Fujiwara
9 */
10
11#include <linux/config.h>
12#include <asm/m32r.h>
13#include <asm/page.h>
14#include <asm/io.h>
15#include <asm/byteorder.h>
16
17#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
18#include <linux/types.h>
19
20#define M32R_PCC_IOMAP_SIZE 0x1000
21
22#define M32R_PCC_IOSTART0 0x1000
23#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1)
24
25extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
26extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
27extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
28extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
29#endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */
30
31#define PORT2ADDR(port) _port2addr(port)
32
33static inline void *_port2addr(unsigned long port)
34{
35 return (void *)(port | NONCACHE_OFFSET);
36}
37
38#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
39static inline void *__port2addr_ata(unsigned long port)
40{
41 static int dummy_reg;
42
43 switch (port) {
44 case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET);
45 case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET);
46 case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET);
47 case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET);
48 case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET);
49 case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET);
50 case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET);
51 case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET);
52 case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET);
53 default: return (void *)&dummy_reg;
54 }
55}
56#endif
57
58/*
59 * M32104T-LAN is located in the extended bus space
60 * from 0x01000000 to 0x01ffffff on physical address.
61 * The base address of LAN controller(LAN91C111) is 0x300.
62 */
63#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
64#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
65static inline void *_port2addr_ne(unsigned long port)
66{
67 return (void *)(port + NONCACHE_OFFSET + 0x01000000);
68}
69
70static inline void delay(void)
71{
72 __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
73}
74
75/*
76 * NIC I/O function
77 */
78
79#define PORT2ADDR_NE(port) _port2addr_ne(port)
80
81static inline unsigned char _ne_inb(void *portp)
82{
83 return *(volatile unsigned char *)portp;
84}
85
86static inline unsigned short _ne_inw(void *portp)
87{
88 return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp);
89}
90
91static inline void _ne_insb(void *portp, void *addr, unsigned long count)
92{
93 unsigned char *buf = (unsigned char *)addr;
94
95 while (count--)
96 *buf++ = _ne_inb(portp);
97}
98
99static inline void _ne_outb(unsigned char b, void *portp)
100{
101 *(volatile unsigned char *)portp = b;
102}
103
104static inline void _ne_outw(unsigned short w, void *portp)
105{
106 *(volatile unsigned short *)portp = cpu_to_le16(w);
107}
108
109unsigned char _inb(unsigned long port)
110{
111 if (port >= LAN_IOSTART && port < LAN_IOEND)
112 return _ne_inb(PORT2ADDR_NE(port));
113
114 return *(volatile unsigned char *)PORT2ADDR(port);
115}
116
117unsigned short _inw(unsigned long port)
118{
119 if (port >= LAN_IOSTART && port < LAN_IOEND)
120 return _ne_inw(PORT2ADDR_NE(port));
121
122 return *(volatile unsigned short *)PORT2ADDR(port);
123}
124
125unsigned long _inl(unsigned long port)
126{
127 return *(volatile unsigned long *)PORT2ADDR(port);
128}
129
130unsigned char _inb_p(unsigned long port)
131{
132 unsigned char v = _inb(port);
133 delay();
134 return (v);
135}
136
137unsigned short _inw_p(unsigned long port)
138{
139 unsigned short v = _inw(port);
140 delay();
141 return (v);
142}
143
144unsigned long _inl_p(unsigned long port)
145{
146 unsigned long v = _inl(port);
147 delay();
148 return (v);
149}
150
151void _outb(unsigned char b, unsigned long port)
152{
153 if (port >= LAN_IOSTART && port < LAN_IOEND)
154 _ne_outb(b, PORT2ADDR_NE(port));
155 else
156 *(volatile unsigned char *)PORT2ADDR(port) = b;
157}
158
159void _outw(unsigned short w, unsigned long port)
160{
161 if (port >= LAN_IOSTART && port < LAN_IOEND)
162 _ne_outw(w, PORT2ADDR_NE(port));
163 else
164 *(volatile unsigned short *)PORT2ADDR(port) = w;
165}
166
167void _outl(unsigned long l, unsigned long port)
168{
169 *(volatile unsigned long *)PORT2ADDR(port) = l;
170}
171
172void _outb_p(unsigned char b, unsigned long port)
173{
174 _outb(b, port);
175 delay();
176}
177
178void _outw_p(unsigned short w, unsigned long port)
179{
180 _outw(w, port);
181 delay();
182}
183
184void _outl_p(unsigned long l, unsigned long port)
185{
186 _outl(l, port);
187 delay();
188}
189
190void _insb(unsigned int port, void *addr, unsigned long count)
191{
192 if (port >= LAN_IOSTART && port < LAN_IOEND)
193 _ne_insb(PORT2ADDR_NE(port), addr, count);
194 else {
195 unsigned char *buf = addr;
196 unsigned char *portp = PORT2ADDR(port);
197 while (count--)
198 *buf++ = *(volatile unsigned char *)portp;
199 }
200}
201
202void _insw(unsigned int port, void *addr, unsigned long count)
203{
204 unsigned short *buf = addr;
205 unsigned short *portp;
206
207 if (port >= LAN_IOSTART && port < LAN_IOEND) {
208 /*
209 * This portion is only used by smc91111.c to read data
210 * from the DATA_REG. Do not swap the data.
211 */
212 portp = PORT2ADDR_NE(port);
213 while (count--)
214 *buf++ = *(volatile unsigned short *)portp;
215#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
216 } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
217 pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short),
218 count, 1);
219#endif
220#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
221 } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
222 portp = __port2addr_ata(port);
223 while (count--)
224 *buf++ = *(volatile unsigned short *)portp;
225#endif
226 } else {
227 portp = PORT2ADDR(port);
228 while (count--)
229 *buf++ = *(volatile unsigned short *)portp;
230 }
231}
232
233void _insl(unsigned int port, void *addr, unsigned long count)
234{
235 unsigned long *buf = addr;
236 unsigned long *portp;
237
238 portp = PORT2ADDR(port);
239 while (count--)
240 *buf++ = *(volatile unsigned long *)portp;
241}
242
243void _outsb(unsigned int port, const void *addr, unsigned long count)
244{
245 const unsigned char *buf = addr;
246 unsigned char *portp;
247
248 if (port >= LAN_IOSTART && port < LAN_IOEND) {
249 portp = PORT2ADDR_NE(port);
250 while (count--)
251 _ne_outb(*buf++, portp);
252 } else {
253 portp = PORT2ADDR(port);
254 while (count--)
255 *(volatile unsigned char *)portp = *buf++;
256 }
257}
258
259void _outsw(unsigned int port, const void *addr, unsigned long count)
260{
261 const unsigned short *buf = addr;
262 unsigned short *portp;
263
264 if (port >= LAN_IOSTART && port < LAN_IOEND) {
265 /*
266 * This portion is only used by smc91111.c to write data
267 * into the DATA_REG. Do not swap the data.
268 */
269 portp = PORT2ADDR_NE(port);
270 while (count--)
271 *(volatile unsigned short *)portp = *buf++;
272#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
273 } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
274 portp = __port2addr_ata(port);
275 while (count--)
276 *(volatile unsigned short *)portp = *buf++;
277#endif
278#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
279 } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
280 pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short),
281 count, 1);
282#endif
283 } else {
284 portp = PORT2ADDR(port);
285 while (count--)
286 *(volatile unsigned short *)portp = *buf++;
287 }
288}
289
290void _outsl(unsigned int port, const void *addr, unsigned long count)
291{
292 const unsigned long *buf = addr;
293 unsigned char *portp;
294
295 portp = PORT2ADDR(port);
296 while (count--)
297 *(volatile unsigned long *)portp = *buf++;
298}
diff --git a/arch/m32r/kernel/io_m32700ut.c b/arch/m32r/kernel/io_m32700ut.c
index eda9f963c1eb..939932d6cc00 100644
--- a/arch/m32r/kernel/io_m32700ut.c
+++ b/arch/m32r/kernel/io_m32700ut.c
@@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
36 36
37static inline void *_port2addr(unsigned long port) 37static inline void *_port2addr(unsigned long port)
38{ 38{
39 return (void *)(port + NONCACHE_OFFSET); 39 return (void *)(port | NONCACHE_OFFSET);
40} 40}
41 41
42#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) 42#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
@@ -45,15 +45,15 @@ static inline void *__port2addr_ata(unsigned long port)
45 static int dummy_reg; 45 static int dummy_reg;
46 46
47 switch (port) { 47 switch (port) {
48 case 0x1f0: return (void *)0xac002000; 48 case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET);
49 case 0x1f1: return (void *)0xac012800; 49 case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET);
50 case 0x1f2: return (void *)0xac012002; 50 case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET);
51 case 0x1f3: return (void *)0xac012802; 51 case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET);
52 case 0x1f4: return (void *)0xac012004; 52 case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET);
53 case 0x1f5: return (void *)0xac012804; 53 case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET);
54 case 0x1f6: return (void *)0xac012006; 54 case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET);
55 case 0x1f7: return (void *)0xac012806; 55 case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET);
56 case 0x3f6: return (void *)0xac01200e; 56 case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET);
57 default: return (void *)&dummy_reg; 57 default: return (void *)&dummy_reg;
58 } 58 }
59} 59}
@@ -64,8 +64,8 @@ static inline void *__port2addr_ata(unsigned long port)
64 * from 0x10000000 to 0x13ffffff on physical address. 64 * from 0x10000000 to 0x13ffffff on physical address.
65 * The base address of LAN controller(LAN91C111) is 0x300. 65 * The base address of LAN controller(LAN91C111) is 0x300.
66 */ 66 */
67#define LAN_IOSTART 0xa0000300 67#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
68#define LAN_IOEND 0xa0000320 68#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
69static inline void *_port2addr_ne(unsigned long port) 69static inline void *_port2addr_ne(unsigned long port)
70{ 70{
71 return (void *)(port + 0x10000000); 71 return (void *)(port + 0x10000000);
diff --git a/arch/m32r/kernel/io_mappi.c b/arch/m32r/kernel/io_mappi.c
index 3c3da042fbd1..a662b537c5ba 100644
--- a/arch/m32r/kernel/io_mappi.c
+++ b/arch/m32r/kernel/io_mappi.c
@@ -31,7 +31,7 @@ extern void pcc_iowrite(int, unsigned long, void *, size_t, size_t, int);
31 31
32static inline void *_port2addr(unsigned long port) 32static inline void *_port2addr(unsigned long port)
33{ 33{
34 return (void *)(port | (NONCACHE_OFFSET)); 34 return (void *)(port | NONCACHE_OFFSET);
35} 35}
36 36
37static inline void *_port2addr_ne(unsigned long port) 37static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_mappi2.c b/arch/m32r/kernel/io_mappi2.c
index df3c729cb3e0..e72d725606af 100644
--- a/arch/m32r/kernel/io_mappi2.c
+++ b/arch/m32r/kernel/io_mappi2.c
@@ -33,7 +33,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
33 33
34static inline void *_port2addr(unsigned long port) 34static inline void *_port2addr(unsigned long port)
35{ 35{
36 return (void *)(port | (NONCACHE_OFFSET)); 36 return (void *)(port | NONCACHE_OFFSET);
37} 37}
38 38
39#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) 39#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
@@ -42,22 +42,22 @@ static inline void *__port2addr_ata(unsigned long port)
42 static int dummy_reg; 42 static int dummy_reg;
43 43
44 switch (port) { 44 switch (port) {
45 case 0x1f0: return (void *)0xac002000; 45 case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET);
46 case 0x1f1: return (void *)0xac012800; 46 case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET);
47 case 0x1f2: return (void *)0xac012002; 47 case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET);
48 case 0x1f3: return (void *)0xac012802; 48 case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET);
49 case 0x1f4: return (void *)0xac012004; 49 case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET);
50 case 0x1f5: return (void *)0xac012804; 50 case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET);
51 case 0x1f6: return (void *)0xac012006; 51 case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET);
52 case 0x1f7: return (void *)0xac012806; 52 case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET);
53 case 0x3f6: return (void *)0xac01200e; 53 case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET);
54 default: return (void *)&dummy_reg; 54 default: return (void *)&dummy_reg;
55 } 55 }
56} 56}
57#endif 57#endif
58 58
59#define LAN_IOSTART 0xa0000300 59#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
60#define LAN_IOEND 0xa0000320 60#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
61#ifdef CONFIG_CHIP_OPSP 61#ifdef CONFIG_CHIP_OPSP
62static inline void *_port2addr_ne(unsigned long port) 62static inline void *_port2addr_ne(unsigned long port)
63{ 63{
diff --git a/arch/m32r/kernel/io_mappi3.c b/arch/m32r/kernel/io_mappi3.c
index f80321a58764..ed6da930bc64 100644
--- a/arch/m32r/kernel/io_mappi3.c
+++ b/arch/m32r/kernel/io_mappi3.c
@@ -33,7 +33,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
33 33
34static inline void *_port2addr(unsigned long port) 34static inline void *_port2addr(unsigned long port)
35{ 35{
36 return (void *)(port + NONCACHE_OFFSET); 36 return (void *)(port | NONCACHE_OFFSET);
37} 37}
38 38
39#if defined(CONFIG_IDE) 39#if defined(CONFIG_IDE)
@@ -43,33 +43,42 @@ static inline void *__port2addr_ata(unsigned long port)
43 43
44 switch (port) { 44 switch (port) {
45 /* IDE0 CF */ 45 /* IDE0 CF */
46 case 0x1f0: return (void *)0xb4002000; 46 case 0x1f0: return (void *)(0x14002000 | NONCACHE_OFFSET);
47 case 0x1f1: return (void *)0xb4012800; 47 case 0x1f1: return (void *)(0x14012800 | NONCACHE_OFFSET);
48 case 0x1f2: return (void *)0xb4012002; 48 case 0x1f2: return (void *)(0x14012002 | NONCACHE_OFFSET);
49 case 0x1f3: return (void *)0xb4012802; 49 case 0x1f3: return (void *)(0x14012802 | NONCACHE_OFFSET);
50 case 0x1f4: return (void *)0xb4012004; 50 case 0x1f4: return (void *)(0x14012004 | NONCACHE_OFFSET);
51 case 0x1f5: return (void *)0xb4012804; 51 case 0x1f5: return (void *)(0x14012804 | NONCACHE_OFFSET);
52 case 0x1f6: return (void *)0xb4012006; 52 case 0x1f6: return (void *)(0x14012006 | NONCACHE_OFFSET);
53 case 0x1f7: return (void *)0xb4012806; 53 case 0x1f7: return (void *)(0x14012806 | NONCACHE_OFFSET);
54 case 0x3f6: return (void *)0xb401200e; 54 case 0x3f6: return (void *)(0x1401200e | NONCACHE_OFFSET);
55 /* IDE1 IDE */ 55 /* IDE1 IDE */
56 case 0x170: return (void *)0xb4810000; /* Data 16bit */ 56 case 0x170: /* Data 16bit */
57 case 0x171: return (void *)0xb4810002; /* Features / Error */ 57 return (void *)(0x14810000 | NONCACHE_OFFSET);
58 case 0x172: return (void *)0xb4810004; /* Sector count */ 58 case 0x171: /* Features / Error */
59 case 0x173: return (void *)0xb4810006; /* Sector number */ 59 return (void *)(0x14810002 | NONCACHE_OFFSET);
60 case 0x174: return (void *)0xb4810008; /* Cylinder low */ 60 case 0x172: /* Sector count */
61 case 0x175: return (void *)0xb481000a; /* Cylinder high */ 61 return (void *)(0x14810004 | NONCACHE_OFFSET);
62 case 0x176: return (void *)0xb481000c; /* Device head */ 62 case 0x173: /* Sector number */
63 case 0x177: return (void *)0xb481000e; /* Command */ 63 return (void *)(0x14810006 | NONCACHE_OFFSET);
64 case 0x376: return (void *)0xb480800c; /* Device control / Alt status */ 64 case 0x174: /* Cylinder low */
65 return (void *)(0x14810008 | NONCACHE_OFFSET);
66 case 0x175: /* Cylinder high */
67 return (void *)(0x1481000a | NONCACHE_OFFSET);
68 case 0x176: /* Device head */
69 return (void *)(0x1481000c | NONCACHE_OFFSET);
70 case 0x177: /* Command */
71 return (void *)(0x1481000e | NONCACHE_OFFSET);
72 case 0x376: /* Device control / Alt status */
73 return (void *)(0x1480800c | NONCACHE_OFFSET);
65 74
66 default: return (void *)&dummy_reg; 75 default: return (void *)&dummy_reg;
67 } 76 }
68} 77}
69#endif 78#endif
70 79
71#define LAN_IOSTART 0xa0000300 80#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
72#define LAN_IOEND 0xa0000320 81#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
73static inline void *_port2addr_ne(unsigned long port) 82static inline void *_port2addr_ne(unsigned long port)
74{ 83{
75 return (void *)(port + 0x10000000); 84 return (void *)(port + 0x10000000);
diff --git a/arch/m32r/kernel/io_oaks32r.c b/arch/m32r/kernel/io_oaks32r.c
index 8be323931e4a..910dd131c227 100644
--- a/arch/m32r/kernel/io_oaks32r.c
+++ b/arch/m32r/kernel/io_oaks32r.c
@@ -16,7 +16,7 @@
16 16
17static inline void *_port2addr(unsigned long port) 17static inline void *_port2addr(unsigned long port)
18{ 18{
19 return (void *)(port | (NONCACHE_OFFSET)); 19 return (void *)(port | NONCACHE_OFFSET);
20} 20}
21 21
22static inline void *_port2addr_ne(unsigned long port) 22static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_opsput.c b/arch/m32r/kernel/io_opsput.c
index 4793bd18e115..bec69297db3c 100644
--- a/arch/m32r/kernel/io_opsput.c
+++ b/arch/m32r/kernel/io_opsput.c
@@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
36 36
37static inline void *_port2addr(unsigned long port) 37static inline void *_port2addr(unsigned long port)
38{ 38{
39 return (void *)(port | (NONCACHE_OFFSET)); 39 return (void *)(port | NONCACHE_OFFSET);
40} 40}
41 41
42/* 42/*
@@ -44,8 +44,8 @@ static inline void *_port2addr(unsigned long port)
44 * from 0x10000000 to 0x13ffffff on physical address. 44 * from 0x10000000 to 0x13ffffff on physical address.
45 * The base address of LAN controller(LAN91C111) is 0x300. 45 * The base address of LAN controller(LAN91C111) is 0x300.
46 */ 46 */
47#define LAN_IOSTART 0xa0000300 47#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
48#define LAN_IOEND 0xa0000320 48#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
49static inline void *_port2addr_ne(unsigned long port) 49static inline void *_port2addr_ne(unsigned long port)
50{ 50{
51 return (void *)(port + 0x10000000); 51 return (void *)(port + 0x10000000);
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index f722ec8eb021..c2e4dccf0112 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -320,6 +320,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
320#elif defined(CONFIG_CHIP_MP) 320#elif defined(CONFIG_CHIP_MP)
321 seq_printf(m, "cpu family\t: M32R-MP\n" 321 seq_printf(m, "cpu family\t: M32R-MP\n"
322 "cache size\t: I-xxKB/D-xxKB\n"); 322 "cache size\t: I-xxKB/D-xxKB\n");
323#elif defined(CONFIG_CHIP_M32104)
324 seq_printf(m,"cpu family\t: M32104\n"
325 "cache size\t: I-8KB/D-8KB\n");
323#else 326#else
324 seq_printf(m, "cpu family\t: Unknown\n"); 327 seq_printf(m, "cpu family\t: Unknown\n");
325#endif 328#endif
@@ -340,6 +343,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
340 seq_printf(m, "Machine\t\t: uServer\n"); 343 seq_printf(m, "Machine\t\t: uServer\n");
341#elif defined(CONFIG_PLAT_OAKS32R) 344#elif defined(CONFIG_PLAT_OAKS32R)
342 seq_printf(m, "Machine\t\t: OAKS32R\n"); 345 seq_printf(m, "Machine\t\t: OAKS32R\n");
346#elif defined(CONFIG_PLAT_M32104UT)
347 seq_printf(m, "Machine\t\t: M3T-M32104UT uT Engine board\n");
343#else 348#else
344 seq_printf(m, "Machine\t\t: Unknown\n"); 349 seq_printf(m, "Machine\t\t: Unknown\n");
345#endif 350#endif
@@ -389,7 +394,7 @@ unsigned long cpu_initialized __initdata = 0;
389 */ 394 */
390#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \ 395#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
391 || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \ 396 || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
392 || defined(CONFIG_CHIP_OPSP) 397 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
393void __init cpu_init (void) 398void __init cpu_init (void)
394{ 399{
395 int cpu_id = smp_processor_id(); 400 int cpu_id = smp_processor_id();
diff --git a/arch/m32r/kernel/setup_m32104ut.c b/arch/m32r/kernel/setup_m32104ut.c
new file mode 100644
index 000000000000..6328e1357a80
--- /dev/null
+++ b/arch/m32r/kernel/setup_m32104ut.c
@@ -0,0 +1,156 @@
1/*
2 * linux/arch/m32r/kernel/setup_m32104ut.c
3 *
4 * Setup routines for M32104UT Board
5 *
6 * Copyright (c) 2002-2005 Hiroyuki Kondo, Hirokazu Takata,
7 * Hitoshi Yamamoto, Mamoru Sakugawa,
8 * Naoto Sugai, Hayato Fujiwara
9 */
10
11#include <linux/config.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/device.h>
16
17#include <asm/system.h>
18#include <asm/m32r.h>
19#include <asm/io.h>
20
21#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
22
23icu_data_t icu_data[NR_IRQS];
24
25static void disable_m32104ut_irq(unsigned int irq)
26{
27 unsigned long port, data;
28
29 port = irq2port(irq);
30 data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
31 outl(data, port);
32}
33
34static void enable_m32104ut_irq(unsigned int irq)
35{
36 unsigned long port, data;
37
38 port = irq2port(irq);
39 data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
40 outl(data, port);
41}
42
43static void mask_and_ack_m32104ut(unsigned int irq)
44{
45 disable_m32104ut_irq(irq);
46}
47
48static void end_m32104ut_irq(unsigned int irq)
49{
50 enable_m32104ut_irq(irq);
51}
52
53static unsigned int startup_m32104ut_irq(unsigned int irq)
54{
55 enable_m32104ut_irq(irq);
56 return (0);
57}
58
59static void shutdown_m32104ut_irq(unsigned int irq)
60{
61 unsigned long port;
62
63 port = irq2port(irq);
64 outl(M32R_ICUCR_ILEVEL7, port);
65}
66
67static struct hw_interrupt_type m32104ut_irq_type =
68{
69 .typename = "M32104UT-IRQ",
70 .startup = startup_m32104ut_irq,
71 .shutdown = shutdown_m32104ut_irq,
72 .enable = enable_m32104ut_irq,
73 .disable = disable_m32104ut_irq,
74 .ack = mask_and_ack_m32104ut,
75 .end = end_m32104ut_irq
76};
77
78void __init init_IRQ(void)
79{
80 static int once = 0;
81
82 if (once)
83 return;
84 else
85 once++;
86
87#if defined(CONFIG_SMC91X)
88 /* INT#0: LAN controller on M32104UT-LAN (SMC91C111)*/
89 irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
90 irq_desc[M32R_IRQ_INT0].handler = &m32104ut_irq_type;
91 irq_desc[M32R_IRQ_INT0].action = 0;
92 irq_desc[M32R_IRQ_INT0].depth = 1;
93 icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11; /* "H" level sense */
94 disable_m32104ut_irq(M32R_IRQ_INT0);
95#endif /* CONFIG_SMC91X */
96
97 /* MFT2 : system timer */
98 irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
99 irq_desc[M32R_IRQ_MFT2].handler = &m32104ut_irq_type;
100 irq_desc[M32R_IRQ_MFT2].action = 0;
101 irq_desc[M32R_IRQ_MFT2].depth = 1;
102 icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
103 disable_m32104ut_irq(M32R_IRQ_MFT2);
104
105#ifdef CONFIG_SERIAL_M32R_SIO
106 /* SIO0_R : uart receive data */
107 irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
108 irq_desc[M32R_IRQ_SIO0_R].handler = &m32104ut_irq_type;
109 irq_desc[M32R_IRQ_SIO0_R].action = 0;
110 irq_desc[M32R_IRQ_SIO0_R].depth = 1;
111 icu_data[M32R_IRQ_SIO0_R].icucr = M32R_ICUCR_IEN;
112 disable_m32104ut_irq(M32R_IRQ_SIO0_R);
113
114 /* SIO0_S : uart send data */
115 irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
116 irq_desc[M32R_IRQ_SIO0_S].handler = &m32104ut_irq_type;
117 irq_desc[M32R_IRQ_SIO0_S].action = 0;
118 irq_desc[M32R_IRQ_SIO0_S].depth = 1;
119 icu_data[M32R_IRQ_SIO0_S].icucr = M32R_ICUCR_IEN;
120 disable_m32104ut_irq(M32R_IRQ_SIO0_S);
121#endif /* CONFIG_SERIAL_M32R_SIO */
122}
123
124#if defined(CONFIG_SMC91X)
125
126#define LAN_IOSTART 0x300
127#define LAN_IOEND 0x320
128static struct resource smc91x_resources[] = {
129 [0] = {
130 .start = (LAN_IOSTART),
131 .end = (LAN_IOEND),
132 .flags = IORESOURCE_MEM,
133 },
134 [1] = {
135 .start = M32R_IRQ_INT0,
136 .end = M32R_IRQ_INT0,
137 .flags = IORESOURCE_IRQ,
138 }
139};
140
141static struct platform_device smc91x_device = {
142 .name = "smc91x",
143 .id = 0,
144 .num_resources = ARRAY_SIZE(smc91x_resources),
145 .resource = smc91x_resources,
146};
147#endif
148
149static int __init platform_init(void)
150{
151#if defined(CONFIG_SMC91X)
152 platform_device_register(&smc91x_device);
153#endif
154 return 0;
155}
156arch_initcall(platform_init);
diff --git a/arch/m32r/kernel/setup_m32700ut.c b/arch/m32r/kernel/setup_m32700ut.c
index cb76916b014d..fad1fc99bb27 100644
--- a/arch/m32r/kernel/setup_m32700ut.c
+++ b/arch/m32r/kernel/setup_m32700ut.c
@@ -26,15 +26,7 @@
26 */ 26 */
27#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) 27#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
28 28
29#ifndef CONFIG_SMP
30typedef struct {
31 unsigned long icucr; /* ICU Control Register */
32} icu_data_t;
33static icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
34#else
35icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; 29icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
36#endif /* CONFIG_SMP */
37
38 30
39static void disable_m32700ut_irq(unsigned int irq) 31static void disable_m32700ut_irq(unsigned int irq)
40{ 32{
diff --git a/arch/m32r/kernel/setup_mappi.c b/arch/m32r/kernel/setup_mappi.c
index 501d798cf050..00f253209cb3 100644
--- a/arch/m32r/kernel/setup_mappi.c
+++ b/arch/m32r/kernel/setup_mappi.c
@@ -19,12 +19,6 @@
19 19
20#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) 20#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
21 21
22#ifndef CONFIG_SMP
23typedef struct {
24 unsigned long icucr; /* ICU Control Register */
25} icu_data_t;
26#endif /* CONFIG_SMP */
27
28icu_data_t icu_data[NR_IRQS]; 22icu_data_t icu_data[NR_IRQS];
29 23
30static void disable_mappi_irq(unsigned int irq) 24static void disable_mappi_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_mappi2.c b/arch/m32r/kernel/setup_mappi2.c
index 7f2db5bfd626..eebc9d8b4e72 100644
--- a/arch/m32r/kernel/setup_mappi2.c
+++ b/arch/m32r/kernel/setup_mappi2.c
@@ -19,12 +19,6 @@
19 19
20#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) 20#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
21 21
22#ifndef CONFIG_SMP
23typedef struct {
24 unsigned long icucr; /* ICU Control Register */
25} icu_data_t;
26#endif /* CONFIG_SMP */
27
28icu_data_t icu_data[NR_IRQS]; 22icu_data_t icu_data[NR_IRQS];
29 23
30static void disable_mappi2_irq(unsigned int irq) 24static void disable_mappi2_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_mappi3.c b/arch/m32r/kernel/setup_mappi3.c
index f6ecdf7f555c..d2ff021e2d3d 100644
--- a/arch/m32r/kernel/setup_mappi3.c
+++ b/arch/m32r/kernel/setup_mappi3.c
@@ -19,12 +19,6 @@
19 19
20#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) 20#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
21 21
22#ifndef CONFIG_SMP
23typedef struct {
24 unsigned long icucr; /* ICU Control Register */
25} icu_data_t;
26#endif /* CONFIG_SMP */
27
28icu_data_t icu_data[NR_IRQS]; 22icu_data_t icu_data[NR_IRQS];
29 23
30static void disable_mappi3_irq(unsigned int irq) 24static void disable_mappi3_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_oaks32r.c b/arch/m32r/kernel/setup_oaks32r.c
index 45add5b76f19..0e9e63538c0f 100644
--- a/arch/m32r/kernel/setup_oaks32r.c
+++ b/arch/m32r/kernel/setup_oaks32r.c
@@ -18,12 +18,6 @@
18 18
19#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) 19#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
20 20
21#ifndef CONFIG_SMP
22typedef struct {
23 unsigned long icucr; /* ICU Control Register */
24} icu_data_t;
25#endif /* CONFIG_SMP */
26
27icu_data_t icu_data[NR_IRQS]; 21icu_data_t icu_data[NR_IRQS];
28 22
29static void disable_oaks32r_irq(unsigned int irq) 23static void disable_oaks32r_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_opsput.c b/arch/m32r/kernel/setup_opsput.c
index 1fbb140854e7..548e8fc7949b 100644
--- a/arch/m32r/kernel/setup_opsput.c
+++ b/arch/m32r/kernel/setup_opsput.c
@@ -27,15 +27,7 @@
27 */ 27 */
28#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) 28#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
29 29
30#ifndef CONFIG_SMP
31typedef struct {
32 unsigned long icucr; /* ICU Control Register */
33} icu_data_t;
34static icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
35#else
36icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ]; 30icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
37#endif /* CONFIG_SMP */
38
39 31
40static void disable_opsput_irq(unsigned int irq) 32static void disable_opsput_irq(unsigned int irq)
41{ 33{
diff --git a/arch/m32r/kernel/setup_usrv.c b/arch/m32r/kernel/setup_usrv.c
index 634741bf9d35..64be659a23e7 100644
--- a/arch/m32r/kernel/setup_usrv.c
+++ b/arch/m32r/kernel/setup_usrv.c
@@ -18,12 +18,6 @@
18 18
19#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long))) 19#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
20 20
21#if !defined(CONFIG_SMP)
22typedef struct {
23 unsigned long icucr; /* ICU Control Register */
24} icu_data_t;
25#endif /* CONFIG_SMP */
26
27icu_data_t icu_data[M32700UT_NUM_CPU_IRQ]; 21icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
28 22
29static void disable_mappi_irq(unsigned int irq) 23static void disable_mappi_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 2ebce2063fea..b8e68b542302 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -57,7 +57,7 @@ static unsigned long do_gettimeoffset(void)
57 57
58#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \ 58#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
59 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \ 59 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
60 || defined(CONFIG_CHIP_OPSP) 60 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
61#ifndef CONFIG_SMP 61#ifndef CONFIG_SMP
62 62
63 unsigned long count; 63 unsigned long count;
@@ -268,7 +268,7 @@ void __init time_init(void)
268 268
269#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \ 269#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
270 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \ 270 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
271 || defined(CONFIG_CHIP_OPSP) 271 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
272 272
273 /* M32102 MFT setup */ 273 /* M32102 MFT setup */
274 setup_irq(M32R_IRQ_MFT2, &irq0); 274 setup_irq(M32R_IRQ_MFT2, &irq0);
diff --git a/arch/m32r/m32104ut/defconfig.m32104ut b/arch/m32r/m32104ut/defconfig.m32104ut
new file mode 100644
index 000000000000..454de336803a
--- /dev/null
+++ b/arch/m32r/m32104ut/defconfig.m32104ut
@@ -0,0 +1,657 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.14
4# Wed Nov 9 16:04:51 2005
5#
6CONFIG_M32R=y
7# CONFIG_UID16 is not set
8CONFIG_GENERIC_ISA_DMA=y
9CONFIG_GENERIC_HARDIRQS=y
10CONFIG_GENERIC_IRQ_PROBE=y
11
12#
13# Code maturity level options
14#
15CONFIG_EXPERIMENTAL=y
16CONFIG_CLEAN_COMPILE=y
17CONFIG_BROKEN_ON_SMP=y
18CONFIG_INIT_ENV_ARG_LIMIT=32
19
20#
21# General setup
22#
23CONFIG_LOCALVERSION=""
24CONFIG_LOCALVERSION_AUTO=y
25# CONFIG_POSIX_MQUEUE is not set
26# CONFIG_BSD_PROCESS_ACCT is not set
27CONFIG_SYSCTL=y
28# CONFIG_AUDIT is not set
29CONFIG_HOTPLUG=y
30# CONFIG_KOBJECT_UEVENT is not set
31# CONFIG_IKCONFIG is not set
32CONFIG_INITRAMFS_SOURCE=""
33CONFIG_EMBEDDED=y
34# CONFIG_KALLSYMS is not set
35CONFIG_PRINTK=y
36CONFIG_BUG=y
37CONFIG_BASE_FULL=y
38# CONFIG_FUTEX is not set
39# CONFIG_EPOLL is not set
40# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
41CONFIG_CC_ALIGN_FUNCTIONS=0
42CONFIG_CC_ALIGN_LABELS=0
43CONFIG_CC_ALIGN_LOOPS=0
44CONFIG_CC_ALIGN_JUMPS=0
45CONFIG_TINY_SHMEM=y
46CONFIG_BASE_SMALL=0
47
48#
49# Loadable module support
50#
51# CONFIG_MODULES is not set
52
53#
54# Processor type and features
55#
56# CONFIG_PLAT_MAPPI is not set
57# CONFIG_PLAT_USRV is not set
58# CONFIG_PLAT_M32700UT is not set
59# CONFIG_PLAT_OPSPUT is not set
60# CONFIG_PLAT_OAKS32R is not set
61# CONFIG_PLAT_MAPPI2 is not set
62# CONFIG_PLAT_MAPPI3 is not set
63CONFIG_PLAT_M32104UT=y
64# CONFIG_CHIP_M32700 is not set
65# CONFIG_CHIP_M32102 is not set
66CONFIG_CHIP_M32104=y
67# CONFIG_CHIP_VDEC2 is not set
68# CONFIG_CHIP_OPSP is not set
69CONFIG_ISA_M32R=y
70CONFIG_BUS_CLOCK=54000000
71CONFIG_TIMER_DIVIDE=128
72# CONFIG_CPU_LITTLE_ENDIAN is not set
73CONFIG_MEMORY_START=04000000
74CONFIG_MEMORY_SIZE=01000000
75CONFIG_NOHIGHMEM=y
76# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
77CONFIG_SELECT_MEMORY_MODEL=y
78CONFIG_FLATMEM_MANUAL=y
79# CONFIG_DISCONTIGMEM_MANUAL is not set
80# CONFIG_SPARSEMEM_MANUAL is not set
81CONFIG_FLATMEM=y
82CONFIG_FLAT_NODE_MEM_MAP=y
83# CONFIG_SPARSEMEM_STATIC is not set
84CONFIG_RWSEM_GENERIC_SPINLOCK=y
85# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
86CONFIG_GENERIC_CALIBRATE_DELAY=y
87# CONFIG_PREEMPT is not set
88# CONFIG_SMP is not set
89
90#
91# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
92#
93# CONFIG_ISA is not set
94
95#
96# PCCARD (PCMCIA/CardBus) support
97#
98CONFIG_PCCARD=y
99# CONFIG_PCMCIA_DEBUG is not set
100CONFIG_PCMCIA=y
101CONFIG_PCMCIA_LOAD_CIS=y
102CONFIG_PCMCIA_IOCTL=y
103
104#
105# PC-card bridges
106#
107
108#
109# PCI Hotplug Support
110#
111
112#
113# Executable file formats
114#
115CONFIG_BINFMT_FLAT=y
116# CONFIG_BINFMT_ZFLAT is not set
117# CONFIG_BINFMT_SHARED_FLAT is not set
118# CONFIG_BINFMT_MISC is not set
119
120#
121# Networking
122#
123CONFIG_NET=y
124
125#
126# Networking options
127#
128# CONFIG_PACKET is not set
129CONFIG_UNIX=y
130# CONFIG_NET_KEY is not set
131CONFIG_INET=y
132# CONFIG_IP_MULTICAST is not set
133# CONFIG_IP_ADVANCED_ROUTER is not set
134CONFIG_IP_FIB_HASH=y
135CONFIG_IP_PNP=y
136CONFIG_IP_PNP_DHCP=y
137# CONFIG_IP_PNP_BOOTP is not set
138# CONFIG_IP_PNP_RARP is not set
139# CONFIG_NET_IPIP is not set
140# CONFIG_NET_IPGRE is not set
141# CONFIG_ARPD is not set
142# CONFIG_SYN_COOKIES is not set
143# CONFIG_INET_AH is not set
144# CONFIG_INET_ESP is not set
145# CONFIG_INET_IPCOMP is not set
146# CONFIG_INET_TUNNEL is not set
147CONFIG_INET_DIAG=y
148CONFIG_INET_TCP_DIAG=y
149# CONFIG_TCP_CONG_ADVANCED is not set
150CONFIG_TCP_CONG_BIC=y
151# CONFIG_IPV6 is not set
152# CONFIG_NETFILTER is not set
153
154#
155# DCCP Configuration (EXPERIMENTAL)
156#
157# CONFIG_IP_DCCP is not set
158
159#
160# SCTP Configuration (EXPERIMENTAL)
161#
162# CONFIG_IP_SCTP is not set
163# CONFIG_ATM is not set
164# CONFIG_BRIDGE is not set
165# CONFIG_VLAN_8021Q is not set
166# CONFIG_DECNET is not set
167# CONFIG_LLC2 is not set
168# CONFIG_IPX is not set
169# CONFIG_ATALK is not set
170# CONFIG_X25 is not set
171# CONFIG_LAPB is not set
172# CONFIG_NET_DIVERT is not set
173# CONFIG_ECONET is not set
174# CONFIG_WAN_ROUTER is not set
175# CONFIG_NET_SCHED is not set
176# CONFIG_NET_CLS_ROUTE is not set
177
178#
179# Network testing
180#
181# CONFIG_NET_PKTGEN is not set
182# CONFIG_HAMRADIO is not set
183# CONFIG_IRDA is not set
184# CONFIG_BT is not set
185# CONFIG_IEEE80211 is not set
186
187#
188# Device Drivers
189#
190
191#
192# Generic Driver Options
193#
194CONFIG_STANDALONE=y
195CONFIG_PREVENT_FIRMWARE_BUILD=y
196CONFIG_FW_LOADER=y
197# CONFIG_DEBUG_DRIVER is not set
198
199#
200# Connector - unified userspace <-> kernelspace linker
201#
202# CONFIG_CONNECTOR is not set
203
204#
205# Memory Technology Devices (MTD)
206#
207# CONFIG_MTD is not set
208
209#
210# Parallel port support
211#
212# CONFIG_PARPORT is not set
213
214#
215# Plug and Play support
216#
217
218#
219# Block devices
220#
221# CONFIG_BLK_DEV_COW_COMMON is not set
222CONFIG_BLK_DEV_LOOP=y
223# CONFIG_BLK_DEV_CRYPTOLOOP is not set
224CONFIG_BLK_DEV_NBD=y
225CONFIG_BLK_DEV_RAM=y
226CONFIG_BLK_DEV_RAM_COUNT=16
227CONFIG_BLK_DEV_RAM_SIZE=4096
228CONFIG_BLK_DEV_INITRD=y
229# CONFIG_CDROM_PKTCDVD is not set
230
231#
232# IO Schedulers
233#
234CONFIG_IOSCHED_NOOP=y
235# CONFIG_IOSCHED_AS is not set
236# CONFIG_IOSCHED_DEADLINE is not set
237# CONFIG_IOSCHED_CFQ is not set
238# CONFIG_ATA_OVER_ETH is not set
239
240#
241# ATA/ATAPI/MFM/RLL support
242#
243# CONFIG_IDE is not set
244
245#
246# SCSI device support
247#
248# CONFIG_RAID_ATTRS is not set
249# CONFIG_SCSI is not set
250
251#
252# Multi-device support (RAID and LVM)
253#
254# CONFIG_MD is not set
255
256#
257# Fusion MPT device support
258#
259# CONFIG_FUSION is not set
260
261#
262# IEEE 1394 (FireWire) support
263#
264
265#
266# I2O device support
267#
268
269#
270# Network device support
271#
272CONFIG_NETDEVICES=y
273CONFIG_DUMMY=y
274# CONFIG_BONDING is not set
275# CONFIG_EQUALIZER is not set
276# CONFIG_TUN is not set
277
278#
279# PHY device support
280#
281# CONFIG_PHYLIB is not set
282
283#
284# Ethernet (10 or 100Mbit)
285#
286CONFIG_NET_ETHERNET=y
287CONFIG_MII=y
288CONFIG_SMC91X=y
289# CONFIG_NE2000 is not set
290
291#
292# Ethernet (1000 Mbit)
293#
294
295#
296# Ethernet (10000 Mbit)
297#
298
299#
300# Token Ring devices
301#
302
303#
304# Wireless LAN (non-hamradio)
305#
306# CONFIG_NET_RADIO is not set
307
308#
309# PCMCIA network device support
310#
311# CONFIG_NET_PCMCIA is not set
312
313#
314# Wan interfaces
315#
316# CONFIG_WAN is not set
317# CONFIG_PPP is not set
318# CONFIG_SLIP is not set
319# CONFIG_SHAPER is not set
320# CONFIG_NETCONSOLE is not set
321# CONFIG_NETPOLL is not set
322# CONFIG_NET_POLL_CONTROLLER is not set
323
324#
325# ISDN subsystem
326#
327# CONFIG_ISDN is not set
328
329#
330# Telephony Support
331#
332# CONFIG_PHONE is not set
333
334#
335# Input device support
336#
337# CONFIG_INPUT is not set
338
339#
340# Hardware I/O ports
341#
342# CONFIG_SERIO is not set
343# CONFIG_GAMEPORT is not set
344
345#
346# Character devices
347#
348# CONFIG_VT is not set
349# CONFIG_SERIAL_NONSTANDARD is not set
350
351#
352# Serial drivers
353#
354# CONFIG_SERIAL_8250 is not set
355
356#
357# Non-8250 serial port support
358#
359CONFIG_SERIAL_CORE=y
360CONFIG_SERIAL_CORE_CONSOLE=y
361CONFIG_SERIAL_M32R_SIO=y
362CONFIG_SERIAL_M32R_SIO_CONSOLE=y
363CONFIG_UNIX98_PTYS=y
364CONFIG_LEGACY_PTYS=y
365CONFIG_LEGACY_PTY_COUNT=256
366
367#
368# IPMI
369#
370# CONFIG_IPMI_HANDLER is not set
371
372#
373# Watchdog Cards
374#
375CONFIG_WATCHDOG=y
376# CONFIG_WATCHDOG_NOWAYOUT is not set
377
378#
379# Watchdog Device Drivers
380#
381CONFIG_SOFT_WATCHDOG=y
382# CONFIG_RTC is not set
383# CONFIG_DTLK is not set
384# CONFIG_R3964 is not set
385
386#
387# Ftape, the floppy tape device driver
388#
389
390#
391# PCMCIA character devices
392#
393# CONFIG_SYNCLINK_CS is not set
394# CONFIG_RAW_DRIVER is not set
395
396#
397# TPM devices
398#
399
400#
401# I2C support
402#
403# CONFIG_I2C is not set
404
405#
406# Dallas's 1-wire bus
407#
408# CONFIG_W1 is not set
409
410#
411# Hardware Monitoring support
412#
413# CONFIG_HWMON is not set
414# CONFIG_HWMON_VID is not set
415
416#
417# Misc devices
418#
419
420#
421# Multimedia Capabilities Port drivers
422#
423
424#
425# Multimedia devices
426#
427# CONFIG_VIDEO_DEV is not set
428
429#
430# Digital Video Broadcasting Devices
431#
432# CONFIG_DVB is not set
433
434#
435# Graphics support
436#
437# CONFIG_FB is not set
438
439#
440# Sound
441#
442# CONFIG_SOUND is not set
443
444#
445# USB support
446#
447# CONFIG_USB_ARCH_HAS_HCD is not set
448# CONFIG_USB_ARCH_HAS_OHCI is not set
449
450#
451# USB Gadget Support
452#
453# CONFIG_USB_GADGET is not set
454
455#
456# MMC/SD Card support
457#
458# CONFIG_MMC is not set
459
460#
461# InfiniBand support
462#
463
464#
465# SN Devices
466#
467
468#
469# File systems
470#
471CONFIG_EXT2_FS=y
472# CONFIG_EXT2_FS_XATTR is not set
473# CONFIG_EXT2_FS_XIP is not set
474CONFIG_EXT3_FS=y
475CONFIG_EXT3_FS_XATTR=y
476CONFIG_EXT3_FS_POSIX_ACL=y
477# CONFIG_EXT3_FS_SECURITY is not set
478CONFIG_JBD=y
479# CONFIG_JBD_DEBUG is not set
480CONFIG_FS_MBCACHE=y
481# CONFIG_REISERFS_FS is not set
482# CONFIG_JFS_FS is not set
483CONFIG_FS_POSIX_ACL=y
484# CONFIG_XFS_FS is not set
485# CONFIG_MINIX_FS is not set
486# CONFIG_ROMFS_FS is not set
487# CONFIG_INOTIFY is not set
488# CONFIG_QUOTA is not set
489CONFIG_DNOTIFY=y
490# CONFIG_AUTOFS_FS is not set
491# CONFIG_AUTOFS4_FS is not set
492# CONFIG_FUSE_FS is not set
493
494#
495# CD-ROM/DVD Filesystems
496#
497# CONFIG_ISO9660_FS is not set
498# CONFIG_UDF_FS is not set
499
500#
501# DOS/FAT/NT Filesystems
502#
503CONFIG_FAT_FS=y
504CONFIG_MSDOS_FS=y
505CONFIG_VFAT_FS=y
506CONFIG_FAT_DEFAULT_CODEPAGE=932
507CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
508# CONFIG_NTFS_FS is not set
509
510#
511# Pseudo filesystems
512#
513CONFIG_PROC_FS=y
514CONFIG_SYSFS=y
515CONFIG_TMPFS=y
516# CONFIG_HUGETLB_PAGE is not set
517CONFIG_RAMFS=y
518# CONFIG_RELAYFS_FS is not set
519
520#
521# Miscellaneous filesystems
522#
523# CONFIG_ADFS_FS is not set
524# CONFIG_AFFS_FS is not set
525# CONFIG_HFS_FS is not set
526# CONFIG_HFSPLUS_FS is not set
527# CONFIG_BEFS_FS is not set
528# CONFIG_BFS_FS is not set
529# CONFIG_EFS_FS is not set
530CONFIG_CRAMFS=y
531# CONFIG_VXFS_FS is not set
532# CONFIG_HPFS_FS is not set
533# CONFIG_QNX4FS_FS is not set
534# CONFIG_SYSV_FS is not set
535# CONFIG_UFS_FS is not set
536
537#
538# Network File Systems
539#
540CONFIG_NFS_FS=y
541CONFIG_NFS_V3=y
542# CONFIG_NFS_V3_ACL is not set
543# CONFIG_NFS_V4 is not set
544# CONFIG_NFS_DIRECTIO is not set
545# CONFIG_NFSD is not set
546CONFIG_ROOT_NFS=y
547CONFIG_LOCKD=y
548CONFIG_LOCKD_V4=y
549CONFIG_NFS_COMMON=y
550CONFIG_SUNRPC=y
551# CONFIG_RPCSEC_GSS_KRB5 is not set
552# CONFIG_RPCSEC_GSS_SPKM3 is not set
553# CONFIG_SMB_FS is not set
554# CONFIG_CIFS is not set
555# CONFIG_NCP_FS is not set
556# CONFIG_CODA_FS is not set
557# CONFIG_AFS_FS is not set
558# CONFIG_9P_FS is not set
559
560#
561# Partition Types
562#
563# CONFIG_PARTITION_ADVANCED is not set
564CONFIG_MSDOS_PARTITION=y
565
566#
567# Native Language Support
568#
569CONFIG_NLS=y
570CONFIG_NLS_DEFAULT="iso8859-1"
571CONFIG_NLS_CODEPAGE_437=y
572# CONFIG_NLS_CODEPAGE_737 is not set
573# CONFIG_NLS_CODEPAGE_775 is not set
574# CONFIG_NLS_CODEPAGE_850 is not set
575# CONFIG_NLS_CODEPAGE_852 is not set
576# CONFIG_NLS_CODEPAGE_855 is not set
577# CONFIG_NLS_CODEPAGE_857 is not set
578# CONFIG_NLS_CODEPAGE_860 is not set
579# CONFIG_NLS_CODEPAGE_861 is not set
580# CONFIG_NLS_CODEPAGE_862 is not set
581# CONFIG_NLS_CODEPAGE_863 is not set
582# CONFIG_NLS_CODEPAGE_864 is not set
583# CONFIG_NLS_CODEPAGE_865 is not set
584# CONFIG_NLS_CODEPAGE_866 is not set
585# CONFIG_NLS_CODEPAGE_869 is not set
586# CONFIG_NLS_CODEPAGE_936 is not set
587# CONFIG_NLS_CODEPAGE_950 is not set
588CONFIG_NLS_CODEPAGE_932=y
589# CONFIG_NLS_CODEPAGE_949 is not set
590# CONFIG_NLS_CODEPAGE_874 is not set
591# CONFIG_NLS_ISO8859_8 is not set
592# CONFIG_NLS_CODEPAGE_1250 is not set
593# CONFIG_NLS_CODEPAGE_1251 is not set
594# CONFIG_NLS_ASCII is not set
595# CONFIG_NLS_ISO8859_1 is not set
596# CONFIG_NLS_ISO8859_2 is not set
597# CONFIG_NLS_ISO8859_3 is not set
598# CONFIG_NLS_ISO8859_4 is not set
599# CONFIG_NLS_ISO8859_5 is not set
600# CONFIG_NLS_ISO8859_6 is not set
601# CONFIG_NLS_ISO8859_7 is not set
602# CONFIG_NLS_ISO8859_9 is not set
603# CONFIG_NLS_ISO8859_13 is not set
604# CONFIG_NLS_ISO8859_14 is not set
605# CONFIG_NLS_ISO8859_15 is not set
606# CONFIG_NLS_KOI8_R is not set
607# CONFIG_NLS_KOI8_U is not set
608CONFIG_NLS_UTF8=y
609
610#
611# Profiling support
612#
613# CONFIG_PROFILING is not set
614
615#
616# Kernel hacking
617#
618# CONFIG_PRINTK_TIME is not set
619CONFIG_DEBUG_KERNEL=y
620CONFIG_MAGIC_SYSRQ=y
621CONFIG_LOG_BUF_SHIFT=14
622CONFIG_DETECT_SOFTLOCKUP=y
623# CONFIG_SCHEDSTATS is not set
624# CONFIG_DEBUG_SLAB is not set
625# CONFIG_DEBUG_SPINLOCK is not set
626# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
627# CONFIG_DEBUG_KOBJECT is not set
628# CONFIG_DEBUG_BUGVERBOSE is not set
629CONFIG_DEBUG_INFO=y
630# CONFIG_DEBUG_FS is not set
631# CONFIG_FRAME_POINTER is not set
632# CONFIG_DEBUG_STACKOVERFLOW is not set
633# CONFIG_DEBUG_STACK_USAGE is not set
634
635#
636# Security options
637#
638# CONFIG_KEYS is not set
639# CONFIG_SECURITY is not set
640
641#
642# Cryptographic options
643#
644# CONFIG_CRYPTO is not set
645
646#
647# Hardware crypto devices
648#
649
650#
651# Library routines
652#
653# CONFIG_CRC_CCITT is not set
654# CONFIG_CRC16 is not set
655CONFIG_CRC32=y
656CONFIG_LIBCRC32C=y
657CONFIG_ZLIB_INFLATE=y
diff --git a/arch/m32r/mm/cache.c b/arch/m32r/mm/cache.c
index 31b0789c1992..9f54dd937013 100644
--- a/arch/m32r/mm/cache.c
+++ b/arch/m32r/mm/cache.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/arch/m32r/mm/cache.c 2 * linux/arch/m32r/mm/cache.c
3 * 3 *
4 * Copyright (C) 2002 Hirokazu Takata 4 * Copyright (C) 2002-2005 Hirokazu Takata, Hayato Fujiwara
5 */ 5 */
6 6
7#include <linux/config.h> 7#include <linux/config.h>
@@ -9,7 +9,8 @@
9 9
10#undef MCCR 10#undef MCCR
11 11
12#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP) 12#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) \
13 || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP)
13/* Cache Control Register */ 14/* Cache Control Register */
14#define MCCR ((volatile unsigned long*)0xfffffffc) 15#define MCCR ((volatile unsigned long*)0xfffffffc)
15#define MCCR_CC (1UL << 7) /* Cache mode modify bit */ 16#define MCCR_CC (1UL << 7) /* Cache mode modify bit */
@@ -26,7 +27,17 @@
26#define MCCR ((volatile unsigned char*)0xfffffffe) 27#define MCCR ((volatile unsigned char*)0xfffffffe)
27#define MCCR_IIV (1UL << 0) /* I-cache invalidate */ 28#define MCCR_IIV (1UL << 0) /* I-cache invalidate */
28#define MCCR_ICACHE_INV MCCR_IIV 29#define MCCR_ICACHE_INV MCCR_IIV
29#endif /* CONFIG_CHIP_XNUX2 || CONFIG_CHIP_M32700 */ 30#elif defined(CONFIG_CHIP_M32104)
31#define MCCR ((volatile unsigned short*)0xfffffffe)
32#define MCCR_IIV (1UL << 8) /* I-cache invalidate */
33#define MCCR_DIV (1UL << 9) /* D-cache invalidate */
34#define MCCR_DCB (1UL << 10) /* D-cache copy back */
35#define MCCR_ICM (1UL << 0) /* I-cache mode [0:off,1:on] */
36#define MCCR_DCM (1UL << 1) /* D-cache mode [0:off,1:on] */
37#define MCCR_ICACHE_INV MCCR_IIV
38#define MCCR_DCACHE_CB MCCR_DCB
39#define MCCR_DCACHE_CBINV (MCCR_DIV|MCCR_DCB)
40#endif
30 41
31#ifndef MCCR 42#ifndef MCCR
32#error Unknown cache type. 43#error Unknown cache type.
@@ -37,29 +48,42 @@
37void _flush_cache_all(void) 48void _flush_cache_all(void)
38{ 49{
39#if defined(CONFIG_CHIP_M32102) 50#if defined(CONFIG_CHIP_M32102)
51 unsigned char mccr;
40 *MCCR = MCCR_ICACHE_INV; 52 *MCCR = MCCR_ICACHE_INV;
53#elif defined(CONFIG_CHIP_M32104)
54 unsigned short mccr;
55
56 /* Copyback and invalidate D-cache */
57 /* Invalidate I-cache */
58 *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CBINV);
41#else 59#else
42 unsigned long mccr; 60 unsigned long mccr;
43 61
44 /* Copyback and invalidate D-cache */ 62 /* Copyback and invalidate D-cache */
45 /* Invalidate I-cache */ 63 /* Invalidate I-cache */
46 *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV; 64 *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV;
47 while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
48#endif 65#endif
66 while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
49} 67}
50 68
51/* Copy back D-cache and invalidate I-cache all */ 69/* Copy back D-cache and invalidate I-cache all */
52void _flush_cache_copyback_all(void) 70void _flush_cache_copyback_all(void)
53{ 71{
54#if defined(CONFIG_CHIP_M32102) 72#if defined(CONFIG_CHIP_M32102)
73 unsigned char mccr;
55 *MCCR = MCCR_ICACHE_INV; 74 *MCCR = MCCR_ICACHE_INV;
75#elif defined(CONFIG_CHIP_M32104)
76 unsigned short mccr;
77
78 /* Copyback and invalidate D-cache */
79 /* Invalidate I-cache */
80 *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CB);
56#else 81#else
57 unsigned long mccr; 82 unsigned long mccr;
58 83
59 /* Copyback D-cache */ 84 /* Copyback D-cache */
60 /* Invalidate I-cache */ 85 /* Invalidate I-cache */
61 *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB; 86 *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB;
62 while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
63
64#endif 87#endif
88 while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
65} 89}
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index e93a5ad56496..b2c62eeb3bab 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -38,8 +38,6 @@ EXPORT_SYMBOL(strncmp);
38 38
39EXPORT_SYMBOL(ip_fast_csum); 39EXPORT_SYMBOL(ip_fast_csum);
40 40
41EXPORT_SYMBOL(mach_enable_irq);
42EXPORT_SYMBOL(mach_disable_irq);
43EXPORT_SYMBOL(kernel_thread); 41EXPORT_SYMBOL(kernel_thread);
44 42
45/* Networking helper routines. */ 43/* Networking helper routines. */
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index abb80fa2b940..93120b9bfff1 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -65,8 +65,6 @@ void (*mach_kbd_leds) (unsigned int) = NULL;
65/* machine dependent irq functions */ 65/* machine dependent irq functions */
66void (*mach_init_IRQ) (void) = NULL; 66void (*mach_init_IRQ) (void) = NULL;
67irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *) = NULL; 67irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *) = NULL;
68void (*mach_enable_irq) (unsigned int) = NULL;
69void (*mach_disable_irq) (unsigned int) = NULL;
70int (*mach_get_irq_list) (struct seq_file *, void *) = NULL; 68int (*mach_get_irq_list) (struct seq_file *, void *) = NULL;
71void (*mach_process_int) (int irq, struct pt_regs *fp) = NULL; 69void (*mach_process_int) (int irq, struct pt_regs *fp) = NULL;
72void (*mach_trap_init) (void); 70void (*mach_trap_init) (void);
diff --git a/arch/ppc/boot/simple/Makefile b/arch/ppc/boot/simple/Makefile
index f3e9c534aa82..9533f8de238f 100644
--- a/arch/ppc/boot/simple/Makefile
+++ b/arch/ppc/boot/simple/Makefile
@@ -190,6 +190,8 @@ boot-$(CONFIG_REDWOOD_5) += embed_config.o
190boot-$(CONFIG_REDWOOD_6) += embed_config.o 190boot-$(CONFIG_REDWOOD_6) += embed_config.o
191boot-$(CONFIG_8xx) += embed_config.o 191boot-$(CONFIG_8xx) += embed_config.o
192boot-$(CONFIG_8260) += embed_config.o 192boot-$(CONFIG_8260) += embed_config.o
193boot-$(CONFIG_EP405) += embed_config.o
194boot-$(CONFIG_XILINX_ML300) += embed_config.o
193boot-$(CONFIG_BSEIP) += iic.o 195boot-$(CONFIG_BSEIP) += iic.o
194boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o 196boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o
195boot-$(CONFIG_MV64X60) += misc-mv64x60.o 197boot-$(CONFIG_MV64X60) += misc-mv64x60.o
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index 821a75e45602..1be3ca5bae40 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -37,7 +37,6 @@
37void default_idle(void) 37void default_idle(void)
38{ 38{
39 void (*powersave)(void); 39 void (*powersave)(void);
40 int cpu = smp_processor_id();
41 40
42 powersave = ppc_md.power_save; 41 powersave = ppc_md.power_save;
43 42
@@ -47,7 +46,8 @@ void default_idle(void)
47#ifdef CONFIG_SMP 46#ifdef CONFIG_SMP
48 else { 47 else {
49 set_thread_flag(TIF_POLLING_NRFLAG); 48 set_thread_flag(TIF_POLLING_NRFLAG);
50 while (!need_resched() && !cpu_is_offline(cpu)) 49 while (!need_resched() &&
50 !cpu_is_offline(smp_processor_id()))
51 barrier(); 51 barrier();
52 clear_thread_flag(TIF_POLLING_NRFLAG); 52 clear_thread_flag(TIF_POLLING_NRFLAG);
53 } 53 }
diff --git a/arch/ppc/platforms/4xx/ibm440gx.c b/arch/ppc/platforms/4xx/ibm440gx.c
index 956f45e4ef97..d24c09ee7b18 100644
--- a/arch/ppc/platforms/4xx/ibm440gx.c
+++ b/arch/ppc/platforms/4xx/ibm440gx.c
@@ -58,7 +58,6 @@ static struct ocp_func_emac_data ibm440gx_emac2_def = {
58 .wol_irq = 65, /* WOL interrupt number */ 58 .wol_irq = 65, /* WOL interrupt number */
59 .mdio_idx = -1, /* No shared MDIO */ 59 .mdio_idx = -1, /* No shared MDIO */
60 .tah_idx = 0, /* TAH device index */ 60 .tah_idx = 0, /* TAH device index */
61 .jumbo = 1, /* Jumbo frames supported */
62}; 61};
63 62
64static struct ocp_func_emac_data ibm440gx_emac3_def = { 63static struct ocp_func_emac_data ibm440gx_emac3_def = {
@@ -72,7 +71,6 @@ static struct ocp_func_emac_data ibm440gx_emac3_def = {
72 .wol_irq = 67, /* WOL interrupt number */ 71 .wol_irq = 67, /* WOL interrupt number */
73 .mdio_idx = -1, /* No shared MDIO */ 72 .mdio_idx = -1, /* No shared MDIO */
74 .tah_idx = 1, /* TAH device index */ 73 .tah_idx = 1, /* TAH device index */
75 .jumbo = 1, /* Jumbo frames supported */
76}; 74};
77OCP_SYSFS_EMAC_DATA() 75OCP_SYSFS_EMAC_DATA()
78 76
diff --git a/arch/ppc/platforms/4xx/ibm440sp.c b/arch/ppc/platforms/4xx/ibm440sp.c
index feb17e41ef69..71a0117d3597 100644
--- a/arch/ppc/platforms/4xx/ibm440sp.c
+++ b/arch/ppc/platforms/4xx/ibm440sp.c
@@ -31,7 +31,6 @@ static struct ocp_func_emac_data ibm440sp_emac0_def = {
31 .wol_irq = 61, /* WOL interrupt number */ 31 .wol_irq = 61, /* WOL interrupt number */
32 .mdio_idx = -1, /* No shared MDIO */ 32 .mdio_idx = -1, /* No shared MDIO */
33 .tah_idx = -1, /* No TAH */ 33 .tah_idx = -1, /* No TAH */
34 .jumbo = 1, /* Jumbo frames supported */
35}; 34};
36OCP_SYSFS_EMAC_DATA() 35OCP_SYSFS_EMAC_DATA()
37 36
diff --git a/arch/ppc/platforms/lite5200.c b/arch/ppc/platforms/lite5200.c
index d44cc991179f..7ed52dc340c9 100644
--- a/arch/ppc/platforms/lite5200.c
+++ b/arch/ppc/platforms/lite5200.c
@@ -196,8 +196,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
196 mpc52xx_set_bat(); 196 mpc52xx_set_bat();
197 197
198 /* No ISA bus by default */ 198 /* No ISA bus by default */
199#ifdef CONFIG_PCI
199 isa_io_base = 0; 200 isa_io_base = 0;
200 isa_mem_base = 0; 201 isa_mem_base = 0;
202#endif
201 203
202 /* Powersave */ 204 /* Powersave */
203 /* This is provided as an example on how to do it. But you 205 /* This is provided as an example on how to do it. But you
diff --git a/arch/ppc/platforms/mpc5200.c b/arch/ppc/platforms/mpc5200.c
deleted file mode 100644
index a58db438c162..000000000000
--- a/arch/ppc/platforms/mpc5200.c
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * arch/ppc/platforms/mpc5200.c
3 *
4 * OCP Definitions for the boards based on MPC5200 processor. Contains
5 * definitions for every common peripherals. (Mostly all but PSCs)
6 *
7 * Maintainer : Sylvain Munaut <tnt@246tNt.com>
8 *
9 * Copyright 2004 Sylvain Munaut <tnt@246tNt.com>
10 *
11 * This file is licensed under the terms of the GNU General Public License
12 * version 2. This program is licensed "as is" without any warranty of any
13 * kind, whether express or implied.
14 */
15
16#include <asm/ocp.h>
17#include <asm/mpc52xx.h>
18
19
20static struct ocp_fs_i2c_data mpc5200_i2c_def = {
21 .flags = FS_I2C_CLOCK_5200,
22};
23
24
25/* Here is the core_ocp struct.
26 * With all the devices common to all board. Even if port multiplexing is
27 * not setup for them (if the user don't want them, just don't select the
28 * config option). The potentially conflicting devices (like PSCs) goes in
29 * board specific file.
30 */
31struct ocp_def core_ocp[] = {
32 {
33 .vendor = OCP_VENDOR_FREESCALE,
34 .function = OCP_FUNC_IIC,
35 .index = 0,
36 .paddr = MPC52xx_I2C1,
37 .irq = OCP_IRQ_NA, /* MPC52xx_IRQ_I2C1 - Buggy */
38 .pm = OCP_CPM_NA,
39 .additions = &mpc5200_i2c_def,
40 },
41 {
42 .vendor = OCP_VENDOR_FREESCALE,
43 .function = OCP_FUNC_IIC,
44 .index = 1,
45 .paddr = MPC52xx_I2C2,
46 .irq = OCP_IRQ_NA, /* MPC52xx_IRQ_I2C2 - Buggy */
47 .pm = OCP_CPM_NA,
48 .additions = &mpc5200_i2c_def,
49 },
50 { /* Terminating entry */
51 .vendor = OCP_VENDOR_INVALID
52 }
53};
diff --git a/arch/ppc/syslib/mpc52xx_pci.c b/arch/ppc/syslib/mpc52xx_pci.c
index 4ac19080eb85..313c96ec7eb1 100644
--- a/arch/ppc/syslib/mpc52xx_pci.c
+++ b/arch/ppc/syslib/mpc52xx_pci.c
@@ -24,6 +24,12 @@
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25 25
26 26
27/* This macro is defined to activate the workaround for the bug
28 435 of the MPC5200 (L25R). With it activated, we don't do any
29 32 bits configuration access during type-1 cycles */
30#define MPC5200_BUG_435_WORKAROUND
31
32
27static int 33static int
28mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, 34mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
29 int offset, int len, u32 *val) 35 int offset, int len, u32 *val)
@@ -40,17 +46,39 @@ mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
40 ((bus->number - hose->bus_offset) << 16) | 46 ((bus->number - hose->bus_offset) << 16) |
41 (devfn << 8) | 47 (devfn << 8) |
42 (offset & 0xfc)); 48 (offset & 0xfc));
49 mb();
50
51#ifdef MPC5200_BUG_435_WORKAROUND
52 if (bus->number != hose->bus_offset) {
53 switch (len) {
54 case 1:
55 value = in_8(((u8 __iomem *)hose->cfg_data) + (offset & 3));
56 break;
57 case 2:
58 value = in_le16(((u16 __iomem *)hose->cfg_data) + ((offset>>1) & 1));
59 break;
60
61 default:
62 value = in_le16((u16 __iomem *)hose->cfg_data) |
63 (in_le16(((u16 __iomem *)hose->cfg_data) + 1) << 16);
64 break;
65 }
66 }
67 else
68#endif
69 {
70 value = in_le32(hose->cfg_data);
43 71
44 value = in_le32(hose->cfg_data); 72 if (len != 4) {
45 73 value >>= ((offset & 0x3) << 3);
46 if (len != 4) { 74 value &= 0xffffffff >> (32 - (len << 3));
47 value >>= ((offset & 0x3) << 3); 75 }
48 value &= 0xffffffff >> (32 - (len << 3));
49 } 76 }
50 77
51 *val = value; 78 *val = value;
52 79
53 out_be32(hose->cfg_addr, 0); 80 out_be32(hose->cfg_addr, 0);
81 mb();
54 82
55 return PCIBIOS_SUCCESSFUL; 83 return PCIBIOS_SUCCESSFUL;
56} 84}
@@ -71,21 +99,48 @@ mpc52xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
71 ((bus->number - hose->bus_offset) << 16) | 99 ((bus->number - hose->bus_offset) << 16) |
72 (devfn << 8) | 100 (devfn << 8) |
73 (offset & 0xfc)); 101 (offset & 0xfc));
102 mb();
103
104#ifdef MPC5200_BUG_435_WORKAROUND
105 if (bus->number != hose->bus_offset) {
106 switch (len) {
107 case 1:
108 out_8(((u8 __iomem *)hose->cfg_data) +
109 (offset & 3), val);
110 break;
111 case 2:
112 out_le16(((u16 __iomem *)hose->cfg_data) +
113 ((offset>>1) & 1), val);
114 break;
115
116 default:
117 out_le16((u16 __iomem *)hose->cfg_data,
118 (u16)val);
119 out_le16(((u16 __iomem *)hose->cfg_data) + 1,
120 (u16)(val>>16));
121 break;
122 }
123 }
124 else
125#endif
126 {
127 if (len != 4) {
128 value = in_le32(hose->cfg_data);
74 129
75 if (len != 4) { 130 offset = (offset & 0x3) << 3;
76 value = in_le32(hose->cfg_data); 131 mask = (0xffffffff >> (32 - (len << 3)));
132 mask <<= offset;
77 133
78 offset = (offset & 0x3) << 3; 134 value &= ~mask;
79 mask = (0xffffffff >> (32 - (len << 3))); 135 val = value | ((val << offset) & mask);
80 mask <<= offset; 136 }
81 137
82 value &= ~mask; 138 out_le32(hose->cfg_data, val);
83 val = value | ((val << offset) & mask);
84 } 139 }
85 140 mb();
86 out_le32(hose->cfg_data, val);
87 141
88 out_be32(hose->cfg_addr, 0); 142 out_be32(hose->cfg_addr, 0);
143 mb();
89 144
90 return PCIBIOS_SUCCESSFUL; 145 return PCIBIOS_SUCCESSFUL;
91} 146}
@@ -99,9 +154,12 @@ static struct pci_ops mpc52xx_pci_ops = {
99static void __init 154static void __init
100mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs) 155mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs)
101{ 156{
157 u32 tmp;
102 158
103 /* Setup control regs */ 159 /* Setup control regs */
104 /* Nothing to do afaik */ 160 tmp = in_be32(&pci_regs->scr);
161 tmp |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
162 out_be32(&pci_regs->scr, tmp);
105 163
106 /* Setup windows */ 164 /* Setup windows */
107 out_be32(&pci_regs->iw0btar, MPC52xx_PCI_IWBTAR_TRANSLATION( 165 out_be32(&pci_regs->iw0btar, MPC52xx_PCI_IWBTAR_TRANSLATION(
@@ -142,16 +200,15 @@ mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs)
142 /* Not necessary and can be a bad thing if for example the bootloader 200 /* Not necessary and can be a bad thing if for example the bootloader
143 is displaying a splash screen or ... Just left here for 201 is displaying a splash screen or ... Just left here for
144 documentation purpose if anyone need it */ 202 documentation purpose if anyone need it */
145#if 0
146 u32 tmp;
147 tmp = in_be32(&pci_regs->gscr); 203 tmp = in_be32(&pci_regs->gscr);
204#if 0
148 out_be32(&pci_regs->gscr, tmp | MPC52xx_PCI_GSCR_PR); 205 out_be32(&pci_regs->gscr, tmp | MPC52xx_PCI_GSCR_PR);
149 udelay(50); 206 udelay(50);
150 out_be32(&pci_regs->gscr, tmp);
151#endif 207#endif
208 out_be32(&pci_regs->gscr, tmp & ~MPC52xx_PCI_GSCR_PR);
152} 209}
153 210
154static void __init 211static void
155mpc52xx_pci_fixup_resources(struct pci_dev *dev) 212mpc52xx_pci_fixup_resources(struct pci_dev *dev)
156{ 213{
157 int i; 214 int i;
diff --git a/arch/ppc/syslib/mpc52xx_setup.c b/arch/ppc/syslib/mpc52xx_setup.c
index bb2374585a7b..a4a4b02227df 100644
--- a/arch/ppc/syslib/mpc52xx_setup.c
+++ b/arch/ppc/syslib/mpc52xx_setup.c
@@ -84,9 +84,11 @@ mpc52xx_set_bat(void)
84void __init 84void __init
85mpc52xx_map_io(void) 85mpc52xx_map_io(void)
86{ 86{
87 /* Here we only map the MBAR */ 87 /* Here we map the MBAR and the whole upper zone. MBAR is only
88 64k but we can't map only 64k with BATs. Map the whole
89 0xf0000000 range is ok and helps eventual lpb devices placed there */
88 io_block_mapping( 90 io_block_mapping(
89 MPC52xx_MBAR_VIRT, MPC52xx_MBAR, MPC52xx_MBAR_SIZE, _PAGE_IO); 91 MPC52xx_MBAR_VIRT, MPC52xx_MBAR, 0x10000000, _PAGE_IO);
90} 92}
91 93
92 94
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 477ac2758bd5..6fe532d82417 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -23,14 +23,14 @@ config GENERIC_BUST_SPINLOCK
23 23
24mainmenu "Linux Kernel Configuration" 24mainmenu "Linux Kernel Configuration"
25 25
26config ARCH_S390 26config S390
27 bool 27 bool
28 default y 28 default y
29 29
30config UID16 30config UID16
31 bool 31 bool
32 default y 32 default y
33 depends on ARCH_S390X = 'n' 33 depends on !64BIT
34 34
35source "init/Kconfig" 35source "init/Kconfig"
36 36
@@ -38,20 +38,12 @@ menu "Base setup"
38 38
39comment "Processor type and features" 39comment "Processor type and features"
40 40
41config ARCH_S390X 41config 64BIT
42 bool "64 bit kernel" 42 bool "64 bit kernel"
43 help 43 help
44 Select this option if you have a 64 bit IBM zSeries machine 44 Select this option if you have a 64 bit IBM zSeries machine
45 and want to use the 64 bit addressing mode. 45 and want to use the 64 bit addressing mode.
46 46
47config 64BIT
48 def_bool ARCH_S390X
49
50config ARCH_S390_31
51 bool
52 depends on ARCH_S390X = 'n'
53 default y
54
55config SMP 47config SMP
56 bool "Symmetric multi-processing support" 48 bool "Symmetric multi-processing support"
57 ---help--- 49 ---help---
@@ -101,20 +93,15 @@ config MATHEMU
101 on older S/390 machines. Say Y unless you know your machine doesn't 93 on older S/390 machines. Say Y unless you know your machine doesn't
102 need this. 94 need this.
103 95
104config S390_SUPPORT 96config COMPAT
105 bool "Kernel support for 31 bit emulation" 97 bool "Kernel support for 31 bit emulation"
106 depends on ARCH_S390X 98 depends on 64BIT
107 help 99 help
108 Select this option if you want to enable your system kernel to 100 Select this option if you want to enable your system kernel to
109 handle system-calls from ELF binaries for 31 bit ESA. This option 101 handle system-calls from ELF binaries for 31 bit ESA. This option
110 (and some other stuff like libraries and such) is needed for 102 (and some other stuff like libraries and such) is needed for
111 executing 31 bit applications. It is safe to say "Y". 103 executing 31 bit applications. It is safe to say "Y".
112 104
113config COMPAT
114 bool
115 depends on S390_SUPPORT
116 default y
117
118config SYSVIPC_COMPAT 105config SYSVIPC_COMPAT
119 bool 106 bool
120 depends on COMPAT && SYSVIPC 107 depends on COMPAT && SYSVIPC
@@ -122,7 +109,7 @@ config SYSVIPC_COMPAT
122 109
123config BINFMT_ELF32 110config BINFMT_ELF32
124 tristate "Kernel support for 31 bit ELF binaries" 111 tristate "Kernel support for 31 bit ELF binaries"
125 depends on S390_SUPPORT 112 depends on COMPAT
126 help 113 help
127 This allows you to run 32-bit Linux/ELF binaries on your zSeries 114 This allows you to run 32-bit Linux/ELF binaries on your zSeries
128 in 64 bit mode. Everybody wants this; say Y. 115 in 64 bit mode. Everybody wants this; say Y.
@@ -135,7 +122,7 @@ choice
135 122
136config MARCH_G5 123config MARCH_G5
137 bool "S/390 model G5 and G6" 124 bool "S/390 model G5 and G6"
138 depends on ARCH_S390_31 125 depends on !64BIT
139 help 126 help
140 Select this to build a 31 bit kernel that works 127 Select this to build a 31 bit kernel that works
141 on all S/390 and zSeries machines. 128 on all S/390 and zSeries machines.
@@ -240,8 +227,8 @@ config MACHCHK_WARNING
240config QDIO 227config QDIO
241 tristate "QDIO support" 228 tristate "QDIO support"
242 ---help--- 229 ---help---
243 This driver provides the Queued Direct I/O base support for the 230 This driver provides the Queued Direct I/O base support for
244 IBM S/390 (G5 and G6) and eServer zSeries (z800, z890, z900 and z990). 231 IBM mainframes.
245 232
246 For details please refer to the documentation provided by IBM at 233 For details please refer to the documentation provided by IBM at
247 <http://www10.software.ibm.com/developerworks/opensource/linux390> 234 <http://www10.software.ibm.com/developerworks/opensource/linux390>
@@ -263,7 +250,8 @@ config QDIO_DEBUG
263 bool "Extended debugging information" 250 bool "Extended debugging information"
264 depends on QDIO 251 depends on QDIO
265 help 252 help
266 Say Y here to get extended debugging output in /proc/s390dbf/qdio... 253 Say Y here to get extended debugging output in
254 /sys/kernel/debug/s390dbf/qdio...
267 Warning: this option reduces the performance of the QDIO module. 255 Warning: this option reduces the performance of the QDIO module.
268 256
269 If unsure, say N. 257 If unsure, say N.
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 73a09a6ee6c8..6c6b197898d0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -13,16 +13,14 @@
13# Copyright (C) 1994 by Linus Torvalds 13# Copyright (C) 1994 by Linus Torvalds
14# 14#
15 15
16ifdef CONFIG_ARCH_S390_31 16ifndef CONFIG_64BIT
17LDFLAGS := -m elf_s390 17LDFLAGS := -m elf_s390
18CFLAGS += -m31 18CFLAGS += -m31
19AFLAGS += -m31 19AFLAGS += -m31
20UTS_MACHINE := s390 20UTS_MACHINE := s390
21STACK_SIZE := 8192 21STACK_SIZE := 8192
22CHECKFLAGS += -D__s390__ 22CHECKFLAGS += -D__s390__
23endif 23else
24
25ifdef CONFIG_ARCH_S390X
26LDFLAGS := -m elf64_s390 24LDFLAGS := -m elf64_s390
27MODFLAGS += -fpic -D__PIC__ 25MODFLAGS += -fpic -D__PIC__
28CFLAGS += -m64 26CFLAGS += -m64
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index dee6ab54984d..d06a8d71c71d 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -40,7 +40,7 @@
40 40
41#define TOD_MICRO 0x01000 /* nr. of TOD clock units 41#define TOD_MICRO 0x01000 /* nr. of TOD clock units
42 for 1 microsecond */ 42 for 1 microsecond */
43#ifndef CONFIG_ARCH_S390X 43#ifndef CONFIG_64BIT
44 44
45#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ 45#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
46#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ 46#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
@@ -54,13 +54,13 @@
54#define APPLDATA_GEN_EVENT_RECORD 0x82 54#define APPLDATA_GEN_EVENT_RECORD 0x82
55#define APPLDATA_START_CONFIG_REC 0x83 55#define APPLDATA_START_CONFIG_REC 0x83
56 56
57#endif /* CONFIG_ARCH_S390X */ 57#endif /* CONFIG_64BIT */
58 58
59 59
60/* 60/*
61 * Parameter list for DIAGNOSE X'DC' 61 * Parameter list for DIAGNOSE X'DC'
62 */ 62 */
63#ifndef CONFIG_ARCH_S390X 63#ifndef CONFIG_64BIT
64struct appldata_parameter_list { 64struct appldata_parameter_list {
65 u16 diag; /* The DIAGNOSE code X'00DC' */ 65 u16 diag; /* The DIAGNOSE code X'00DC' */
66 u8 function; /* The function code for the DIAGNOSE */ 66 u8 function; /* The function code for the DIAGNOSE */
@@ -82,7 +82,7 @@ struct appldata_parameter_list {
82 u64 product_id_addr; 82 u64 product_id_addr;
83 u64 buffer_addr; 83 u64 buffer_addr;
84}; 84};
85#endif /* CONFIG_ARCH_S390X */ 85#endif /* CONFIG_64BIT */
86 86
87/* 87/*
88 * /proc entries (sysctl) 88 * /proc entries (sysctl)
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index e0a476bf4fd6..99ddd3bf2fba 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -141,19 +141,19 @@ static void appldata_get_os_data(void *data)
141 j = 0; 141 j = 0;
142 for_each_online_cpu(i) { 142 for_each_online_cpu(i) {
143 os_data->os_cpu[j].per_cpu_user = 143 os_data->os_cpu[j].per_cpu_user =
144 kstat_cpu(i).cpustat.user; 144 cputime_to_jiffies(kstat_cpu(i).cpustat.user);
145 os_data->os_cpu[j].per_cpu_nice = 145 os_data->os_cpu[j].per_cpu_nice =
146 kstat_cpu(i).cpustat.nice; 146 cputime_to_jiffies(kstat_cpu(i).cpustat.nice);
147 os_data->os_cpu[j].per_cpu_system = 147 os_data->os_cpu[j].per_cpu_system =
148 kstat_cpu(i).cpustat.system; 148 cputime_to_jiffies(kstat_cpu(i).cpustat.system);
149 os_data->os_cpu[j].per_cpu_idle = 149 os_data->os_cpu[j].per_cpu_idle =
150 kstat_cpu(i).cpustat.idle; 150 cputime_to_jiffies(kstat_cpu(i).cpustat.idle);
151 os_data->os_cpu[j].per_cpu_irq = 151 os_data->os_cpu[j].per_cpu_irq =
152 kstat_cpu(i).cpustat.irq; 152 cputime_to_jiffies(kstat_cpu(i).cpustat.irq);
153 os_data->os_cpu[j].per_cpu_softirq = 153 os_data->os_cpu[j].per_cpu_softirq =
154 kstat_cpu(i).cpustat.softirq; 154 cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
155 os_data->os_cpu[j].per_cpu_iowait = 155 os_data->os_cpu[j].per_cpu_iowait =
156 kstat_cpu(i).cpustat.iowait; 156 cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
157 j++; 157 j++;
158 } 158 }
159 159
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 96a05e6b51e0..bfe2541dc5cf 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -2,7 +2,9 @@
2# Cryptographic API 2# Cryptographic API
3# 3#
4 4
5obj-$(CONFIG_CRYPTO_SHA1_Z990) += sha1_z990.o 5obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
6obj-$(CONFIG_CRYPTO_DES_Z990) += des_z990.o des_check_key.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
7 9
8obj-$(CONFIG_CRYPTO_TEST) += crypt_z990_query.o 10obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
new file mode 100644
index 000000000000..7a1033d8e00f
--- /dev/null
+++ b/arch/s390/crypto/aes_s390.c
@@ -0,0 +1,248 @@
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the AES Cipher Algorithm.
5 *
6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation
8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 *
10 * Derived from "crypto/aes.c"
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/crypto.h>
22#include "crypt_s390.h"
23
24#define AES_MIN_KEY_SIZE 16
25#define AES_MAX_KEY_SIZE 32
26
27/* data block size for all key lengths */
28#define AES_BLOCK_SIZE 16
29
30int has_aes_128 = 0;
31int has_aes_192 = 0;
32int has_aes_256 = 0;
33
34struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE];
36 u8 key[AES_MAX_KEY_SIZE];
37 int key_len;
38};
39
40static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len,
41 u32 *flags)
42{
43 struct s390_aes_ctx *sctx = ctx;
44
45 switch (key_len) {
46 case 16:
47 if (!has_aes_128)
48 goto fail;
49 break;
50 case 24:
51 if (!has_aes_192)
52 goto fail;
53
54 break;
55 case 32:
56 if (!has_aes_256)
57 goto fail;
58 break;
59 default:
60 /* invalid key length */
61 goto fail;
62 break;
63 }
64
65 sctx->key_len = key_len;
66 memcpy(sctx->key, in_key, key_len);
67 return 0;
68fail:
69 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
70 return -EINVAL;
71}
72
73static void aes_encrypt(void *ctx, u8 *out, const u8 *in)
74{
75 const struct s390_aes_ctx *sctx = ctx;
76
77 switch (sctx->key_len) {
78 case 16:
79 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
80 AES_BLOCK_SIZE);
81 break;
82 case 24:
83 crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
84 AES_BLOCK_SIZE);
85 break;
86 case 32:
87 crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
88 AES_BLOCK_SIZE);
89 break;
90 }
91}
92
93static void aes_decrypt(void *ctx, u8 *out, const u8 *in)
94{
95 const struct s390_aes_ctx *sctx = ctx;
96
97 switch (sctx->key_len) {
98 case 16:
99 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
100 AES_BLOCK_SIZE);
101 break;
102 case 24:
103 crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
104 AES_BLOCK_SIZE);
105 break;
106 case 32:
107 crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
108 AES_BLOCK_SIZE);
109 break;
110 }
111}
112
113static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
114 const u8 *in, unsigned int nbytes)
115{
116 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
117
118 switch (sctx->key_len) {
119 case 16:
120 crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes);
121 break;
122 case 24:
123 crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes);
124 break;
125 case 32:
126 crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes);
127 break;
128 }
129 return nbytes & ~(AES_BLOCK_SIZE - 1);
130}
131
132static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
133 const u8 *in, unsigned int nbytes)
134{
135 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
136
137 switch (sctx->key_len) {
138 case 16:
139 crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes);
140 break;
141 case 24:
142 crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes);
143 break;
144 case 32:
145 crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes);
146 break;
147 }
148 return nbytes & ~(AES_BLOCK_SIZE - 1);
149}
150
151static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
152 const u8 *in, unsigned int nbytes)
153{
154 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
155
156 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
157 switch (sctx->key_len) {
158 case 16:
159 crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes);
160 break;
161 case 24:
162 crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes);
163 break;
164 case 32:
165 crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes);
166 break;
167 }
168 memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE);
169
170 return nbytes & ~(AES_BLOCK_SIZE - 1);
171}
172
173static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
174 const u8 *in, unsigned int nbytes)
175{
176 struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
177
178 memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
179 switch (sctx->key_len) {
180 case 16:
181 crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes);
182 break;
183 case 24:
184 crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes);
185 break;
186 case 32:
187 crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes);
188 break;
189 }
190 return nbytes & ~(AES_BLOCK_SIZE - 1);
191}
192
193
194static struct crypto_alg aes_alg = {
195 .cra_name = "aes",
196 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
197 .cra_blocksize = AES_BLOCK_SIZE,
198 .cra_ctxsize = sizeof(struct s390_aes_ctx),
199 .cra_module = THIS_MODULE,
200 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
201 .cra_u = {
202 .cipher = {
203 .cia_min_keysize = AES_MIN_KEY_SIZE,
204 .cia_max_keysize = AES_MAX_KEY_SIZE,
205 .cia_setkey = aes_set_key,
206 .cia_encrypt = aes_encrypt,
207 .cia_decrypt = aes_decrypt,
208 .cia_encrypt_ecb = aes_encrypt_ecb,
209 .cia_decrypt_ecb = aes_decrypt_ecb,
210 .cia_encrypt_cbc = aes_encrypt_cbc,
211 .cia_decrypt_cbc = aes_decrypt_cbc,
212 }
213 }
214};
215
216static int __init aes_init(void)
217{
218 int ret;
219
220 if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
221 has_aes_128 = 1;
222 if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
223 has_aes_192 = 1;
224 if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
225 has_aes_256 = 1;
226
227 if (!has_aes_128 && !has_aes_192 && !has_aes_256)
228 return -ENOSYS;
229
230 ret = crypto_register_alg(&aes_alg);
231 if (ret != 0)
232 printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n");
233 return ret;
234}
235
236static void __exit aes_fini(void)
237{
238 crypto_unregister_alg(&aes_alg);
239}
240
241module_init(aes_init);
242module_exit(aes_fini);
243
244MODULE_ALIAS("aes");
245
246MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
247MODULE_LICENSE("GPL");
248
diff --git a/arch/s390/crypto/crypt_z990.h b/arch/s390/crypto/crypt_s390.h
index 4df660b99e5a..d1c259a7fe33 100644
--- a/arch/s390/crypto/crypt_z990.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * Support for z990 cryptographic instructions. 4 * Support for s390 cryptographic instructions.
5 * 5 *
6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
@@ -12,84 +12,108 @@
12 * any later version. 12 * any later version.
13 * 13 *
14 */ 14 */
15#ifndef _CRYPTO_ARCH_S390_CRYPT_Z990_H 15#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
16#define _CRYPTO_ARCH_S390_CRYPT_Z990_H 16#define _CRYPTO_ARCH_S390_CRYPT_S390_H
17 17
18#include <asm/errno.h> 18#include <asm/errno.h>
19 19
20#define CRYPT_Z990_OP_MASK 0xFF00 20#define CRYPT_S390_OP_MASK 0xFF00
21#define CRYPT_Z990_FUNC_MASK 0x00FF 21#define CRYPT_S390_FUNC_MASK 0x00FF
22 22
23 23/* s930 cryptographic operations */
24/*z990 cryptographic operations*/ 24enum crypt_s390_operations {
25enum crypt_z990_operations { 25 CRYPT_S390_KM = 0x0100,
26 CRYPT_Z990_KM = 0x0100, 26 CRYPT_S390_KMC = 0x0200,
27 CRYPT_Z990_KMC = 0x0200, 27 CRYPT_S390_KIMD = 0x0300,
28 CRYPT_Z990_KIMD = 0x0300, 28 CRYPT_S390_KLMD = 0x0400,
29 CRYPT_Z990_KLMD = 0x0400, 29 CRYPT_S390_KMAC = 0x0500
30 CRYPT_Z990_KMAC = 0x0500
31}; 30};
32 31
33/*function codes for KM (CIPHER MESSAGE) instruction*/ 32/* function codes for KM (CIPHER MESSAGE) instruction
34enum crypt_z990_km_func { 33 * 0x80 is the decipher modifier bit
35 KM_QUERY = CRYPT_Z990_KM | 0, 34 */
36 KM_DEA_ENCRYPT = CRYPT_Z990_KM | 1, 35enum crypt_s390_km_func {
37 KM_DEA_DECRYPT = CRYPT_Z990_KM | 1 | 0x80, //modifier bit->decipher 36 KM_QUERY = CRYPT_S390_KM | 0x0,
38 KM_TDEA_128_ENCRYPT = CRYPT_Z990_KM | 2, 37 KM_DEA_ENCRYPT = CRYPT_S390_KM | 0x1,
39 KM_TDEA_128_DECRYPT = CRYPT_Z990_KM | 2 | 0x80, 38 KM_DEA_DECRYPT = CRYPT_S390_KM | 0x1 | 0x80,
40 KM_TDEA_192_ENCRYPT = CRYPT_Z990_KM | 3, 39 KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
41 KM_TDEA_192_DECRYPT = CRYPT_Z990_KM | 3 | 0x80, 40 KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
41 KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
42 KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
43 KM_AES_128_ENCRYPT = CRYPT_S390_KM | 0x12,
44 KM_AES_128_DECRYPT = CRYPT_S390_KM | 0x12 | 0x80,
45 KM_AES_192_ENCRYPT = CRYPT_S390_KM | 0x13,
46 KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
47 KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
48 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
42}; 49};
43 50
44/*function codes for KMC (CIPHER MESSAGE WITH CHAINING) instruction*/ 51/* function codes for KMC (CIPHER MESSAGE WITH CHAINING)
45enum crypt_z990_kmc_func { 52 * instruction
46 KMC_QUERY = CRYPT_Z990_KMC | 0, 53 */
47 KMC_DEA_ENCRYPT = CRYPT_Z990_KMC | 1, 54enum crypt_s390_kmc_func {
48 KMC_DEA_DECRYPT = CRYPT_Z990_KMC | 1 | 0x80, //modifier bit->decipher 55 KMC_QUERY = CRYPT_S390_KMC | 0x0,
49 KMC_TDEA_128_ENCRYPT = CRYPT_Z990_KMC | 2, 56 KMC_DEA_ENCRYPT = CRYPT_S390_KMC | 0x1,
50 KMC_TDEA_128_DECRYPT = CRYPT_Z990_KMC | 2 | 0x80, 57 KMC_DEA_DECRYPT = CRYPT_S390_KMC | 0x1 | 0x80,
51 KMC_TDEA_192_ENCRYPT = CRYPT_Z990_KMC | 3, 58 KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
52 KMC_TDEA_192_DECRYPT = CRYPT_Z990_KMC | 3 | 0x80, 59 KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
60 KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
61 KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
62 KMC_AES_128_ENCRYPT = CRYPT_S390_KMC | 0x12,
63 KMC_AES_128_DECRYPT = CRYPT_S390_KMC | 0x12 | 0x80,
64 KMC_AES_192_ENCRYPT = CRYPT_S390_KMC | 0x13,
65 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
66 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
67 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
53}; 68};
54 69
55/*function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) instruction*/ 70/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
56enum crypt_z990_kimd_func { 71 * instruction
57 KIMD_QUERY = CRYPT_Z990_KIMD | 0, 72 */
58 KIMD_SHA_1 = CRYPT_Z990_KIMD | 1, 73enum crypt_s390_kimd_func {
74 KIMD_QUERY = CRYPT_S390_KIMD | 0,
75 KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
76 KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
59}; 77};
60 78
61/*function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) instruction*/ 79/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
62enum crypt_z990_klmd_func { 80 * instruction
63 KLMD_QUERY = CRYPT_Z990_KLMD | 0, 81 */
64 KLMD_SHA_1 = CRYPT_Z990_KLMD | 1, 82enum crypt_s390_klmd_func {
83 KLMD_QUERY = CRYPT_S390_KLMD | 0,
84 KLMD_SHA_1 = CRYPT_S390_KLMD | 1,
85 KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
65}; 86};
66 87
67/*function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) instruction*/ 88/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
68enum crypt_z990_kmac_func { 89 * instruction
69 KMAC_QUERY = CRYPT_Z990_KMAC | 0, 90 */
70 KMAC_DEA = CRYPT_Z990_KMAC | 1, 91enum crypt_s390_kmac_func {
71 KMAC_TDEA_128 = CRYPT_Z990_KMAC | 2, 92 KMAC_QUERY = CRYPT_S390_KMAC | 0,
72 KMAC_TDEA_192 = CRYPT_Z990_KMAC | 3 93 KMAC_DEA = CRYPT_S390_KMAC | 1,
94 KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
95 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
73}; 96};
74 97
75/*status word for z990 crypto instructions' QUERY functions*/ 98/* status word for s390 crypto instructions' QUERY functions */
76struct crypt_z990_query_status { 99struct crypt_s390_query_status {
77 u64 high; 100 u64 high;
78 u64 low; 101 u64 low;
79}; 102};
80 103
81/* 104/*
82 * Standard fixup and ex_table sections for crypt_z990 inline functions. 105 * Standard fixup and ex_table sections for crypt_s390 inline functions.
83 * label 0: the z990 crypto operation 106 * label 0: the s390 crypto operation
84 * label 1: just after 1 to catch illegal operation exception on non-z990 107 * label 1: just after 1 to catch illegal operation exception
108 * (unsupported model)
85 * label 6: the return point after fixup 109 * label 6: the return point after fixup
86 * label 7: set error value if exception _in_ crypto operation 110 * label 7: set error value if exception _in_ crypto operation
87 * label 8: set error value if illegal operation exception 111 * label 8: set error value if illegal operation exception
88 * [ret] is the variable to receive the error code 112 * [ret] is the variable to receive the error code
89 * [ERR] is the error code value 113 * [ERR] is the error code value
90 */ 114 */
91#ifndef __s390x__ 115#ifndef CONFIG_64BIT
92#define __crypt_z990_fixup \ 116#define __crypt_s390_fixup \
93 ".section .fixup,\"ax\" \n" \ 117 ".section .fixup,\"ax\" \n" \
94 "7: lhi %0,%h[e1] \n" \ 118 "7: lhi %0,%h[e1] \n" \
95 " bras 1,9f \n" \ 119 " bras 1,9f \n" \
@@ -105,8 +129,8 @@ struct crypt_z990_query_status {
105 " .long 0b,7b \n" \ 129 " .long 0b,7b \n" \
106 " .long 1b,8b \n" \ 130 " .long 1b,8b \n" \
107 ".previous" 131 ".previous"
108#else /* __s390x__ */ 132#else /* CONFIG_64BIT */
109#define __crypt_z990_fixup \ 133#define __crypt_s390_fixup \
110 ".section .fixup,\"ax\" \n" \ 134 ".section .fixup,\"ax\" \n" \
111 "7: lhi %0,%h[e1] \n" \ 135 "7: lhi %0,%h[e1] \n" \
112 " jg 6b \n" \ 136 " jg 6b \n" \
@@ -118,25 +142,25 @@ struct crypt_z990_query_status {
118 " .quad 0b,7b \n" \ 142 " .quad 0b,7b \n" \
119 " .quad 1b,8b \n" \ 143 " .quad 1b,8b \n" \
120 ".previous" 144 ".previous"
121#endif /* __s390x__ */ 145#endif /* CONFIG_64BIT */
122 146
123/* 147/*
124 * Standard code for setting the result of z990 crypto instructions. 148 * Standard code for setting the result of s390 crypto instructions.
125 * %0: the register which will receive the result 149 * %0: the register which will receive the result
126 * [result]: the register containing the result (e.g. second operand length 150 * [result]: the register containing the result (e.g. second operand length
127 * to compute number of processed bytes]. 151 * to compute number of processed bytes].
128 */ 152 */
129#ifndef __s390x__ 153#ifndef CONFIG_64BIT
130#define __crypt_z990_set_result \ 154#define __crypt_s390_set_result \
131 " lr %0,%[result] \n" 155 " lr %0,%[result] \n"
132#else /* __s390x__ */ 156#else /* CONFIG_64BIT */
133#define __crypt_z990_set_result \ 157#define __crypt_s390_set_result \
134 " lgr %0,%[result] \n" 158 " lgr %0,%[result] \n"
135#endif 159#endif
136 160
137/* 161/*
138 * Executes the KM (CIPHER MESSAGE) operation of the z990 CPU. 162 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
139 * @param func: the function code passed to KM; see crypt_z990_km_func 163 * @param func: the function code passed to KM; see crypt_s390_km_func
140 * @param param: address of parameter block; see POP for details on each func 164 * @param param: address of parameter block; see POP for details on each func
141 * @param dest: address of destination memory area 165 * @param dest: address of destination memory area
142 * @param src: address of source memory area 166 * @param src: address of source memory area
@@ -145,9 +169,9 @@ struct crypt_z990_query_status {
145 * for encryption/decryption funcs 169 * for encryption/decryption funcs
146 */ 170 */
147static inline int 171static inline int
148crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len) 172crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
149{ 173{
150 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 174 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
151 register void* __param asm("1") = param; 175 register void* __param asm("1") = param;
152 register u8* __dest asm("4") = dest; 176 register u8* __dest asm("4") = dest;
153 register const u8* __src asm("2") = src; 177 register const u8* __src asm("2") = src;
@@ -156,26 +180,26 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
156 180
157 ret = 0; 181 ret = 0;
158 __asm__ __volatile__ ( 182 __asm__ __volatile__ (
159 "0: .insn rre,0xB92E0000,%1,%2 \n" //KM opcode 183 "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
160 "1: brc 1,0b \n" //handle partial completion 184 "1: brc 1,0b \n" /* handle partial completion */
161 __crypt_z990_set_result 185 __crypt_s390_set_result
162 "6: \n" 186 "6: \n"
163 __crypt_z990_fixup 187 __crypt_s390_fixup
164 : "+d" (ret), "+a" (__dest), "+a" (__src), 188 : "+d" (ret), "+a" (__dest), "+a" (__src),
165 [result] "+d" (__src_len) 189 [result] "+d" (__src_len)
166 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 190 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
167 "a" (__param) 191 "a" (__param)
168 : "cc", "memory" 192 : "cc", "memory"
169 ); 193 );
170 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 194 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
171 ret = src_len - ret; 195 ret = src_len - ret;
172 } 196 }
173 return ret; 197 return ret;
174} 198}
175 199
176/* 200/*
177 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the z990 CPU. 201 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
178 * @param func: the function code passed to KM; see crypt_z990_kmc_func 202 * @param func: the function code passed to KM; see crypt_s390_kmc_func
179 * @param param: address of parameter block; see POP for details on each func 203 * @param param: address of parameter block; see POP for details on each func
180 * @param dest: address of destination memory area 204 * @param dest: address of destination memory area
181 * @param src: address of source memory area 205 * @param src: address of source memory area
@@ -184,9 +208,9 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
184 * for encryption/decryption funcs 208 * for encryption/decryption funcs
185 */ 209 */
186static inline int 210static inline int
187crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len) 211crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
188{ 212{
189 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 213 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
190 register void* __param asm("1") = param; 214 register void* __param asm("1") = param;
191 register u8* __dest asm("4") = dest; 215 register u8* __dest asm("4") = dest;
192 register const u8* __src asm("2") = src; 216 register const u8* __src asm("2") = src;
@@ -195,18 +219,18 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
195 219
196 ret = 0; 220 ret = 0;
197 __asm__ __volatile__ ( 221 __asm__ __volatile__ (
198 "0: .insn rre,0xB92F0000,%1,%2 \n" //KMC opcode 222 "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
199 "1: brc 1,0b \n" //handle partial completion 223 "1: brc 1,0b \n" /* handle partial completion */
200 __crypt_z990_set_result 224 __crypt_s390_set_result
201 "6: \n" 225 "6: \n"
202 __crypt_z990_fixup 226 __crypt_s390_fixup
203 : "+d" (ret), "+a" (__dest), "+a" (__src), 227 : "+d" (ret), "+a" (__dest), "+a" (__src),
204 [result] "+d" (__src_len) 228 [result] "+d" (__src_len)
205 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 229 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
206 "a" (__param) 230 "a" (__param)
207 : "cc", "memory" 231 : "cc", "memory"
208 ); 232 );
209 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 233 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
210 ret = src_len - ret; 234 ret = src_len - ret;
211 } 235 }
212 return ret; 236 return ret;
@@ -214,8 +238,8 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
214 238
215/* 239/*
216 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation 240 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
217 * of the z990 CPU. 241 * of the CPU.
218 * @param func: the function code passed to KM; see crypt_z990_kimd_func 242 * @param func: the function code passed to KM; see crypt_s390_kimd_func
219 * @param param: address of parameter block; see POP for details on each func 243 * @param param: address of parameter block; see POP for details on each func
220 * @param src: address of source memory area 244 * @param src: address of source memory area
221 * @param src_len: length of src operand in bytes 245 * @param src_len: length of src operand in bytes
@@ -223,9 +247,9 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
223 * for digest funcs 247 * for digest funcs
224 */ 248 */
225static inline int 249static inline int
226crypt_z990_kimd(long func, void* param, const u8* src, long src_len) 250crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
227{ 251{
228 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 252 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
229 register void* __param asm("1") = param; 253 register void* __param asm("1") = param;
230 register const u8* __src asm("2") = src; 254 register const u8* __src asm("2") = src;
231 register long __src_len asm("3") = src_len; 255 register long __src_len asm("3") = src_len;
@@ -233,25 +257,25 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
233 257
234 ret = 0; 258 ret = 0;
235 __asm__ __volatile__ ( 259 __asm__ __volatile__ (
236 "0: .insn rre,0xB93E0000,%1,%1 \n" //KIMD opcode 260 "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */
237 "1: brc 1,0b \n" /*handle partical completion of kimd*/ 261 "1: brc 1,0b \n" /* handle partical completion */
238 __crypt_z990_set_result 262 __crypt_s390_set_result
239 "6: \n" 263 "6: \n"
240 __crypt_z990_fixup 264 __crypt_s390_fixup
241 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 265 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
242 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 266 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
243 "a" (__param) 267 "a" (__param)
244 : "cc", "memory" 268 : "cc", "memory"
245 ); 269 );
246 if (ret >= 0 && (func & CRYPT_Z990_FUNC_MASK)){ 270 if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){
247 ret = src_len - ret; 271 ret = src_len - ret;
248 } 272 }
249 return ret; 273 return ret;
250} 274}
251 275
252/* 276/*
253 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the z990 CPU. 277 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
254 * @param func: the function code passed to KM; see crypt_z990_klmd_func 278 * @param func: the function code passed to KM; see crypt_s390_klmd_func
255 * @param param: address of parameter block; see POP for details on each func 279 * @param param: address of parameter block; see POP for details on each func
256 * @param src: address of source memory area 280 * @param src: address of source memory area
257 * @param src_len: length of src operand in bytes 281 * @param src_len: length of src operand in bytes
@@ -259,9 +283,9 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
259 * for digest funcs 283 * for digest funcs
260 */ 284 */
261static inline int 285static inline int
262crypt_z990_klmd(long func, void* param, const u8* src, long src_len) 286crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
263{ 287{
264 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 288 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
265 register void* __param asm("1") = param; 289 register void* __param asm("1") = param;
266 register const u8* __src asm("2") = src; 290 register const u8* __src asm("2") = src;
267 register long __src_len asm("3") = src_len; 291 register long __src_len asm("3") = src_len;
@@ -269,17 +293,17 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
269 293
270 ret = 0; 294 ret = 0;
271 __asm__ __volatile__ ( 295 __asm__ __volatile__ (
272 "0: .insn rre,0xB93F0000,%1,%1 \n" //KLMD opcode 296 "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */
273 "1: brc 1,0b \n" /*handle partical completion of klmd*/ 297 "1: brc 1,0b \n" /* handle partical completion */
274 __crypt_z990_set_result 298 __crypt_s390_set_result
275 "6: \n" 299 "6: \n"
276 __crypt_z990_fixup 300 __crypt_s390_fixup
277 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 301 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
278 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 302 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
279 "a" (__param) 303 "a" (__param)
280 : "cc", "memory" 304 : "cc", "memory"
281 ); 305 );
282 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 306 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
283 ret = src_len - ret; 307 ret = src_len - ret;
284 } 308 }
285 return ret; 309 return ret;
@@ -287,8 +311,8 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
287 311
288/* 312/*
289 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation 313 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
290 * of the z990 CPU. 314 * of the CPU.
291 * @param func: the function code passed to KM; see crypt_z990_klmd_func 315 * @param func: the function code passed to KM; see crypt_s390_klmd_func
292 * @param param: address of parameter block; see POP for details on each func 316 * @param param: address of parameter block; see POP for details on each func
293 * @param src: address of source memory area 317 * @param src: address of source memory area
294 * @param src_len: length of src operand in bytes 318 * @param src_len: length of src operand in bytes
@@ -296,9 +320,9 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
296 * for digest funcs 320 * for digest funcs
297 */ 321 */
298static inline int 322static inline int
299crypt_z990_kmac(long func, void* param, const u8* src, long src_len) 323crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
300{ 324{
301 register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK; 325 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
302 register void* __param asm("1") = param; 326 register void* __param asm("1") = param;
303 register const u8* __src asm("2") = src; 327 register const u8* __src asm("2") = src;
304 register long __src_len asm("3") = src_len; 328 register long __src_len asm("3") = src_len;
@@ -306,58 +330,58 @@ crypt_z990_kmac(long func, void* param, const u8* src, long src_len)
306 330
307 ret = 0; 331 ret = 0;
308 __asm__ __volatile__ ( 332 __asm__ __volatile__ (
309 "0: .insn rre,0xB91E0000,%5,%5 \n" //KMAC opcode 333 "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */
310 "1: brc 1,0b \n" /*handle partical completion of klmd*/ 334 "1: brc 1,0b \n" /* handle partical completion */
311 __crypt_z990_set_result 335 __crypt_s390_set_result
312 "6: \n" 336 "6: \n"
313 __crypt_z990_fixup 337 __crypt_s390_fixup
314 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len) 338 : "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
315 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func), 339 : [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
316 "a" (__param) 340 "a" (__param)
317 : "cc", "memory" 341 : "cc", "memory"
318 ); 342 );
319 if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){ 343 if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
320 ret = src_len - ret; 344 ret = src_len - ret;
321 } 345 }
322 return ret; 346 return ret;
323} 347}
324 348
325/** 349/**
326 * Tests if a specific z990 crypto function is implemented on the machine. 350 * Tests if a specific crypto function is implemented on the machine.
327 * @param func: the function code of the specific function; 0 if op in general 351 * @param func: the function code of the specific function; 0 if op in general
328 * @return 1 if func available; 0 if func or op in general not available 352 * @return 1 if func available; 0 if func or op in general not available
329 */ 353 */
330static inline int 354static inline int
331crypt_z990_func_available(int func) 355crypt_s390_func_available(int func)
332{ 356{
333 int ret; 357 int ret;
334 358
335 struct crypt_z990_query_status status = { 359 struct crypt_s390_query_status status = {
336 .high = 0, 360 .high = 0,
337 .low = 0 361 .low = 0
338 }; 362 };
339 switch (func & CRYPT_Z990_OP_MASK){ 363 switch (func & CRYPT_S390_OP_MASK){
340 case CRYPT_Z990_KM: 364 case CRYPT_S390_KM:
341 ret = crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0); 365 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
342 break; 366 break;
343 case CRYPT_Z990_KMC: 367 case CRYPT_S390_KMC:
344 ret = crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0); 368 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
345 break; 369 break;
346 case CRYPT_Z990_KIMD: 370 case CRYPT_S390_KIMD:
347 ret = crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0); 371 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
348 break; 372 break;
349 case CRYPT_Z990_KLMD: 373 case CRYPT_S390_KLMD:
350 ret = crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0); 374 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
351 break; 375 break;
352 case CRYPT_Z990_KMAC: 376 case CRYPT_S390_KMAC:
353 ret = crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0); 377 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
354 break; 378 break;
355 default: 379 default:
356 ret = 0; 380 ret = 0;
357 return ret; 381 return ret;
358 } 382 }
359 if (ret >= 0){ 383 if (ret >= 0){
360 func &= CRYPT_Z990_FUNC_MASK; 384 func &= CRYPT_S390_FUNC_MASK;
361 func &= 0x7f; //mask modifier bit 385 func &= 0x7f; //mask modifier bit
362 if (func < 64){ 386 if (func < 64){
363 ret = (status.high >> (64 - func - 1)) & 0x1; 387 ret = (status.high >> (64 - func - 1)) & 0x1;
@@ -370,5 +394,4 @@ crypt_z990_func_available(int func)
370 return ret; 394 return ret;
371} 395}
372 396
373 397#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H
374#endif // _CRYPTO_ARCH_S390_CRYPT_Z990_H
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
new file mode 100644
index 000000000000..def02bdc44a4
--- /dev/null
+++ b/arch/s390/crypto/crypt_s390_query.c
@@ -0,0 +1,129 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for s390 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_s390.h"
21
22static void query_available_functions(void)
23{
24 printk(KERN_INFO "#####################\n");
25
26 /* query available KM functions */
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_s390_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_s390_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_s390_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_s390_func_available(KM_TDEA_192_ENCRYPT));
35 printk(KERN_INFO "KM_AES_128: %d\n",
36 crypt_s390_func_available(KM_AES_128_ENCRYPT));
37 printk(KERN_INFO "KM_AES_192: %d\n",
38 crypt_s390_func_available(KM_AES_192_ENCRYPT));
39 printk(KERN_INFO "KM_AES_256: %d\n",
40 crypt_s390_func_available(KM_AES_256_ENCRYPT));
41
42 /* query available KMC functions */
43 printk(KERN_INFO "KMC_QUERY: %d\n",
44 crypt_s390_func_available(KMC_QUERY));
45 printk(KERN_INFO "KMC_DEA: %d\n",
46 crypt_s390_func_available(KMC_DEA_ENCRYPT));
47 printk(KERN_INFO "KMC_TDEA_128: %d\n",
48 crypt_s390_func_available(KMC_TDEA_128_ENCRYPT));
49 printk(KERN_INFO "KMC_TDEA_192: %d\n",
50 crypt_s390_func_available(KMC_TDEA_192_ENCRYPT));
51 printk(KERN_INFO "KMC_AES_128: %d\n",
52 crypt_s390_func_available(KMC_AES_128_ENCRYPT));
53 printk(KERN_INFO "KMC_AES_192: %d\n",
54 crypt_s390_func_available(KMC_AES_192_ENCRYPT));
55 printk(KERN_INFO "KMC_AES_256: %d\n",
56 crypt_s390_func_available(KMC_AES_256_ENCRYPT));
57
58 /* query available KIMD fucntions */
59 printk(KERN_INFO "KIMD_QUERY: %d\n",
60 crypt_s390_func_available(KIMD_QUERY));
61 printk(KERN_INFO "KIMD_SHA_1: %d\n",
62 crypt_s390_func_available(KIMD_SHA_1));
63 printk(KERN_INFO "KIMD_SHA_256: %d\n",
64 crypt_s390_func_available(KIMD_SHA_256));
65
66 /* query available KLMD functions */
67 printk(KERN_INFO "KLMD_QUERY: %d\n",
68 crypt_s390_func_available(KLMD_QUERY));
69 printk(KERN_INFO "KLMD_SHA_1: %d\n",
70 crypt_s390_func_available(KLMD_SHA_1));
71 printk(KERN_INFO "KLMD_SHA_256: %d\n",
72 crypt_s390_func_available(KLMD_SHA_256));
73
74 /* query available KMAC functions */
75 printk(KERN_INFO "KMAC_QUERY: %d\n",
76 crypt_s390_func_available(KMAC_QUERY));
77 printk(KERN_INFO "KMAC_DEA: %d\n",
78 crypt_s390_func_available(KMAC_DEA));
79 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
80 crypt_s390_func_available(KMAC_TDEA_128));
81 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
82 crypt_s390_func_available(KMAC_TDEA_192));
83}
84
85static int init(void)
86{
87 struct crypt_s390_query_status status = {
88 .high = 0,
89 .low = 0
90 };
91
92 printk(KERN_INFO "crypt_s390: querying available crypto functions\n");
93 crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
94 printk(KERN_INFO "KM:\t%016llx %016llx\n",
95 (unsigned long long) status.high,
96 (unsigned long long) status.low);
97 status.high = status.low = 0;
98 crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
99 printk(KERN_INFO "KMC:\t%016llx %016llx\n",
100 (unsigned long long) status.high,
101 (unsigned long long) status.low);
102 status.high = status.low = 0;
103 crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
104 printk(KERN_INFO "KIMD:\t%016llx %016llx\n",
105 (unsigned long long) status.high,
106 (unsigned long long) status.low);
107 status.high = status.low = 0;
108 crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
109 printk(KERN_INFO "KLMD:\t%016llx %016llx\n",
110 (unsigned long long) status.high,
111 (unsigned long long) status.low);
112 status.high = status.low = 0;
113 crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
114 printk(KERN_INFO "KMAC:\t%016llx %016llx\n",
115 (unsigned long long) status.high,
116 (unsigned long long) status.low);
117
118 query_available_functions();
119 return -ECANCELED;
120}
121
122static void __exit cleanup(void)
123{
124}
125
126module_init(init);
127module_exit(cleanup);
128
129MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/crypt_z990_query.c b/arch/s390/crypto/crypt_z990_query.c
deleted file mode 100644
index 7133983d1384..000000000000
--- a/arch/s390/crypto/crypt_z990_query.c
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for z990 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_z990.h"
21
22static void
23query_available_functions(void)
24{
25 printk(KERN_INFO "#####################\n");
26 //query available KM functions
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_z990_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_z990_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_z990_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_z990_func_available(KM_TDEA_192_ENCRYPT));
35 //query available KMC functions
36 printk(KERN_INFO "KMC_QUERY: %d\n",
37 crypt_z990_func_available(KMC_QUERY));
38 printk(KERN_INFO "KMC_DEA: %d\n",
39 crypt_z990_func_available(KMC_DEA_ENCRYPT));
40 printk(KERN_INFO "KMC_TDEA_128: %d\n",
41 crypt_z990_func_available(KMC_TDEA_128_ENCRYPT));
42 printk(KERN_INFO "KMC_TDEA_192: %d\n",
43 crypt_z990_func_available(KMC_TDEA_192_ENCRYPT));
44 //query available KIMD fucntions
45 printk(KERN_INFO "KIMD_QUERY: %d\n",
46 crypt_z990_func_available(KIMD_QUERY));
47 printk(KERN_INFO "KIMD_SHA_1: %d\n",
48 crypt_z990_func_available(KIMD_SHA_1));
49 //query available KLMD functions
50 printk(KERN_INFO "KLMD_QUERY: %d\n",
51 crypt_z990_func_available(KLMD_QUERY));
52 printk(KERN_INFO "KLMD_SHA_1: %d\n",
53 crypt_z990_func_available(KLMD_SHA_1));
54 //query available KMAC functions
55 printk(KERN_INFO "KMAC_QUERY: %d\n",
56 crypt_z990_func_available(KMAC_QUERY));
57 printk(KERN_INFO "KMAC_DEA: %d\n",
58 crypt_z990_func_available(KMAC_DEA));
59 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
60 crypt_z990_func_available(KMAC_TDEA_128));
61 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
62 crypt_z990_func_available(KMAC_TDEA_192));
63}
64
65static int
66init(void)
67{
68 struct crypt_z990_query_status status = {
69 .high = 0,
70 .low = 0
71 };
72
73 printk(KERN_INFO "crypt_z990: querying available crypto functions\n");
74 crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0);
75 printk(KERN_INFO "KM: %016llx %016llx\n",
76 (unsigned long long) status.high,
77 (unsigned long long) status.low);
78 status.high = status.low = 0;
79 crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0);
80 printk(KERN_INFO "KMC: %016llx %016llx\n",
81 (unsigned long long) status.high,
82 (unsigned long long) status.low);
83 status.high = status.low = 0;
84 crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0);
85 printk(KERN_INFO "KIMD: %016llx %016llx\n",
86 (unsigned long long) status.high,
87 (unsigned long long) status.low);
88 status.high = status.low = 0;
89 crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0);
90 printk(KERN_INFO "KLMD: %016llx %016llx\n",
91 (unsigned long long) status.high,
92 (unsigned long long) status.low);
93 status.high = status.low = 0;
94 crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0);
95 printk(KERN_INFO "KMAC: %016llx %016llx\n",
96 (unsigned long long) status.high,
97 (unsigned long long) status.low);
98
99 query_available_functions();
100 return -1;
101}
102
103static void __exit
104cleanup(void)
105{
106}
107
108module_init(init);
109module_exit(cleanup);
110
111MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_z990.c b/arch/s390/crypto/des_s390.c
index 813cf37b1177..a38bb2a3eef6 100644
--- a/arch/s390/crypto/des_z990.c
+++ b/arch/s390/crypto/des_s390.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * z990 implementation of the DES Cipher Algorithm. 4 * s390 implementation of the DES Cipher Algorithm.
5 * 5 *
6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
@@ -19,7 +19,7 @@
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <asm/scatterlist.h> 20#include <asm/scatterlist.h>
21#include <linux/crypto.h> 21#include <linux/crypto.h>
22#include "crypt_z990.h" 22#include "crypt_s390.h"
23#include "crypto_des.h" 23#include "crypto_des.h"
24 24
25#define DES_BLOCK_SIZE 8 25#define DES_BLOCK_SIZE 8
@@ -31,17 +31,17 @@
31#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE) 31#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE)
32#define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE 32#define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE
33 33
34struct crypt_z990_des_ctx { 34struct crypt_s390_des_ctx {
35 u8 iv[DES_BLOCK_SIZE]; 35 u8 iv[DES_BLOCK_SIZE];
36 u8 key[DES_KEY_SIZE]; 36 u8 key[DES_KEY_SIZE];
37}; 37};
38 38
39struct crypt_z990_des3_128_ctx { 39struct crypt_s390_des3_128_ctx {
40 u8 iv[DES_BLOCK_SIZE]; 40 u8 iv[DES_BLOCK_SIZE];
41 u8 key[DES3_128_KEY_SIZE]; 41 u8 key[DES3_128_KEY_SIZE];
42}; 42};
43 43
44struct crypt_z990_des3_192_ctx { 44struct crypt_s390_des3_192_ctx {
45 u8 iv[DES_BLOCK_SIZE]; 45 u8 iv[DES_BLOCK_SIZE];
46 u8 key[DES3_192_KEY_SIZE]; 46 u8 key[DES3_192_KEY_SIZE];
47}; 47};
@@ -49,7 +49,7 @@ struct crypt_z990_des3_192_ctx {
49static int 49static int
50des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) 50des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
51{ 51{
52 struct crypt_z990_des_ctx *dctx; 52 struct crypt_s390_des_ctx *dctx;
53 int ret; 53 int ret;
54 54
55 dctx = ctx; 55 dctx = ctx;
@@ -65,26 +65,26 @@ des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
65static void 65static void
66des_encrypt(void *ctx, u8 *dst, const u8 *src) 66des_encrypt(void *ctx, u8 *dst, const u8 *src)
67{ 67{
68 struct crypt_z990_des_ctx *dctx; 68 struct crypt_s390_des_ctx *dctx;
69 69
70 dctx = ctx; 70 dctx = ctx;
71 crypt_z990_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); 71 crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
72} 72}
73 73
74static void 74static void
75des_decrypt(void *ctx, u8 *dst, const u8 *src) 75des_decrypt(void *ctx, u8 *dst, const u8 *src)
76{ 76{
77 struct crypt_z990_des_ctx *dctx; 77 struct crypt_s390_des_ctx *dctx;
78 78
79 dctx = ctx; 79 dctx = ctx;
80 crypt_z990_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE); 80 crypt_s390_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
81} 81}
82 82
83static struct crypto_alg des_alg = { 83static struct crypto_alg des_alg = {
84 .cra_name = "des", 84 .cra_name = "des",
85 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 85 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
86 .cra_blocksize = DES_BLOCK_SIZE, 86 .cra_blocksize = DES_BLOCK_SIZE,
87 .cra_ctxsize = sizeof(struct crypt_z990_des_ctx), 87 .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
88 .cra_module = THIS_MODULE, 88 .cra_module = THIS_MODULE,
89 .cra_list = LIST_HEAD_INIT(des_alg.cra_list), 89 .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
90 .cra_u = { .cipher = { 90 .cra_u = { .cipher = {
@@ -111,7 +111,7 @@ static int
111des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) 111des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
112{ 112{
113 int i, ret; 113 int i, ret;
114 struct crypt_z990_des3_128_ctx *dctx; 114 struct crypt_s390_des3_128_ctx *dctx;
115 const u8* temp_key = key; 115 const u8* temp_key = key;
116 116
117 dctx = ctx; 117 dctx = ctx;
@@ -132,20 +132,20 @@ des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
132static void 132static void
133des3_128_encrypt(void *ctx, u8 *dst, const u8 *src) 133des3_128_encrypt(void *ctx, u8 *dst, const u8 *src)
134{ 134{
135 struct crypt_z990_des3_128_ctx *dctx; 135 struct crypt_s390_des3_128_ctx *dctx;
136 136
137 dctx = ctx; 137 dctx = ctx;
138 crypt_z990_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, 138 crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
139 DES3_128_BLOCK_SIZE); 139 DES3_128_BLOCK_SIZE);
140} 140}
141 141
142static void 142static void
143des3_128_decrypt(void *ctx, u8 *dst, const u8 *src) 143des3_128_decrypt(void *ctx, u8 *dst, const u8 *src)
144{ 144{
145 struct crypt_z990_des3_128_ctx *dctx; 145 struct crypt_s390_des3_128_ctx *dctx;
146 146
147 dctx = ctx; 147 dctx = ctx;
148 crypt_z990_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, 148 crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
149 DES3_128_BLOCK_SIZE); 149 DES3_128_BLOCK_SIZE);
150} 150}
151 151
@@ -153,7 +153,7 @@ static struct crypto_alg des3_128_alg = {
153 .cra_name = "des3_ede128", 153 .cra_name = "des3_ede128",
154 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 154 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
155 .cra_blocksize = DES3_128_BLOCK_SIZE, 155 .cra_blocksize = DES3_128_BLOCK_SIZE,
156 .cra_ctxsize = sizeof(struct crypt_z990_des3_128_ctx), 156 .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
157 .cra_module = THIS_MODULE, 157 .cra_module = THIS_MODULE,
158 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), 158 .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list),
159 .cra_u = { .cipher = { 159 .cra_u = { .cipher = {
@@ -181,7 +181,7 @@ static int
181des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) 181des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
182{ 182{
183 int i, ret; 183 int i, ret;
184 struct crypt_z990_des3_192_ctx *dctx; 184 struct crypt_s390_des3_192_ctx *dctx;
185 const u8* temp_key; 185 const u8* temp_key;
186 186
187 dctx = ctx; 187 dctx = ctx;
@@ -206,20 +206,20 @@ des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
206static void 206static void
207des3_192_encrypt(void *ctx, u8 *dst, const u8 *src) 207des3_192_encrypt(void *ctx, u8 *dst, const u8 *src)
208{ 208{
209 struct crypt_z990_des3_192_ctx *dctx; 209 struct crypt_s390_des3_192_ctx *dctx;
210 210
211 dctx = ctx; 211 dctx = ctx;
212 crypt_z990_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, 212 crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
213 DES3_192_BLOCK_SIZE); 213 DES3_192_BLOCK_SIZE);
214} 214}
215 215
216static void 216static void
217des3_192_decrypt(void *ctx, u8 *dst, const u8 *src) 217des3_192_decrypt(void *ctx, u8 *dst, const u8 *src)
218{ 218{
219 struct crypt_z990_des3_192_ctx *dctx; 219 struct crypt_s390_des3_192_ctx *dctx;
220 220
221 dctx = ctx; 221 dctx = ctx;
222 crypt_z990_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, 222 crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
223 DES3_192_BLOCK_SIZE); 223 DES3_192_BLOCK_SIZE);
224} 224}
225 225
@@ -227,7 +227,7 @@ static struct crypto_alg des3_192_alg = {
227 .cra_name = "des3_ede", 227 .cra_name = "des3_ede",
228 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 228 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
229 .cra_blocksize = DES3_192_BLOCK_SIZE, 229 .cra_blocksize = DES3_192_BLOCK_SIZE,
230 .cra_ctxsize = sizeof(struct crypt_z990_des3_192_ctx), 230 .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
231 .cra_module = THIS_MODULE, 231 .cra_module = THIS_MODULE,
232 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), 232 .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
233 .cra_u = { .cipher = { 233 .cra_u = { .cipher = {
@@ -245,9 +245,9 @@ init(void)
245{ 245{
246 int ret; 246 int ret;
247 247
248 if (!crypt_z990_func_available(KM_DEA_ENCRYPT) || 248 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
249 !crypt_z990_func_available(KM_TDEA_128_ENCRYPT) || 249 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
250 !crypt_z990_func_available(KM_TDEA_192_ENCRYPT)){ 250 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)){
251 return -ENOSYS; 251 return -ENOSYS;
252 } 252 }
253 253
@@ -262,7 +262,7 @@ init(void)
262 return -EEXIST; 262 return -EEXIST;
263 } 263 }
264 264
265 printk(KERN_INFO "crypt_z990: des_z990 loaded.\n"); 265 printk(KERN_INFO "crypt_s390: des_s390 loaded.\n");
266 return 0; 266 return 0;
267} 267}
268 268
diff --git a/arch/s390/crypto/sha1_z990.c b/arch/s390/crypto/sha1_s390.c
index 298174ddf5b1..98c896b86dcd 100644
--- a/arch/s390/crypto/sha1_z990.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cryptographic API. 2 * Cryptographic API.
3 * 3 *
4 * z990 implementation of the SHA1 Secure Hash Algorithm. 4 * s390 implementation of the SHA1 Secure Hash Algorithm.
5 * 5 *
6 * Derived from cryptoapi implementation, adapted for in-place 6 * Derived from cryptoapi implementation, adapted for in-place
7 * scatterlist interface. Originally based on the public domain 7 * scatterlist interface. Originally based on the public domain
@@ -28,22 +28,22 @@
28#include <linux/crypto.h> 28#include <linux/crypto.h>
29#include <asm/scatterlist.h> 29#include <asm/scatterlist.h>
30#include <asm/byteorder.h> 30#include <asm/byteorder.h>
31#include "crypt_z990.h" 31#include "crypt_s390.h"
32 32
33#define SHA1_DIGEST_SIZE 20 33#define SHA1_DIGEST_SIZE 20
34#define SHA1_BLOCK_SIZE 64 34#define SHA1_BLOCK_SIZE 64
35 35
36struct crypt_z990_sha1_ctx { 36struct crypt_s390_sha1_ctx {
37 u64 count; 37 u64 count;
38 u32 state[5]; 38 u32 state[5];
39 u32 buf_len; 39 u32 buf_len;
40 u8 buffer[2 * SHA1_BLOCK_SIZE]; 40 u8 buffer[2 * SHA1_BLOCK_SIZE];
41}; 41};
42 42
43static void 43static void
44sha1_init(void *ctx) 44sha1_init(void *ctx)
45{ 45{
46 static const struct crypt_z990_sha1_ctx initstate = { 46 static const struct crypt_s390_sha1_ctx initstate = {
47 .state = { 47 .state = {
48 0x67452301, 48 0x67452301,
49 0xEFCDAB89, 49 0xEFCDAB89,
@@ -58,7 +58,7 @@ sha1_init(void *ctx)
58static void 58static void
59sha1_update(void *ctx, const u8 *data, unsigned int len) 59sha1_update(void *ctx, const u8 *data, unsigned int len)
60{ 60{
61 struct crypt_z990_sha1_ctx *sctx; 61 struct crypt_s390_sha1_ctx *sctx;
62 long imd_len; 62 long imd_len;
63 63
64 sctx = ctx; 64 sctx = ctx;
@@ -69,7 +69,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
69 //complete full block and hash 69 //complete full block and hash
70 memcpy(sctx->buffer + sctx->buf_len, data, 70 memcpy(sctx->buffer + sctx->buf_len, data,
71 SHA1_BLOCK_SIZE - sctx->buf_len); 71 SHA1_BLOCK_SIZE - sctx->buf_len);
72 crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
73 SHA1_BLOCK_SIZE); 73 SHA1_BLOCK_SIZE);
74 data += SHA1_BLOCK_SIZE - sctx->buf_len; 74 data += SHA1_BLOCK_SIZE - sctx->buf_len;
75 len -= SHA1_BLOCK_SIZE - sctx->buf_len; 75 len -= SHA1_BLOCK_SIZE - sctx->buf_len;
@@ -79,7 +79,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
79 //rest of data contains full blocks? 79 //rest of data contains full blocks?
80 imd_len = len & ~0x3ful; 80 imd_len = len & ~0x3ful;
81 if (imd_len){ 81 if (imd_len){
82 crypt_z990_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
83 data += imd_len; 83 data += imd_len;
84 len -= imd_len; 84 len -= imd_len;
85 } 85 }
@@ -92,7 +92,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
92 92
93 93
94static void 94static void
95pad_message(struct crypt_z990_sha1_ctx* sctx) 95pad_message(struct crypt_s390_sha1_ctx* sctx)
96{ 96{
97 int index; 97 int index;
98 98
@@ -113,11 +113,11 @@ pad_message(struct crypt_z990_sha1_ctx* sctx)
113static void 113static void
114sha1_final(void* ctx, u8 *out) 114sha1_final(void* ctx, u8 *out)
115{ 115{
116 struct crypt_z990_sha1_ctx *sctx = ctx; 116 struct crypt_s390_sha1_ctx *sctx = ctx;
117 117
118 //must perform manual padding 118 //must perform manual padding
119 pad_message(sctx); 119 pad_message(sctx);
120 crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); 120 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
121 //copy digest to out 121 //copy digest to out
122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
123 /* Wipe context */ 123 /* Wipe context */
@@ -128,7 +128,7 @@ static struct crypto_alg alg = {
128 .cra_name = "sha1", 128 .cra_name = "sha1",
129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
130 .cra_blocksize = SHA1_BLOCK_SIZE, 130 .cra_blocksize = SHA1_BLOCK_SIZE,
131 .cra_ctxsize = sizeof(struct crypt_z990_sha1_ctx), 131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
132 .cra_module = THIS_MODULE, 132 .cra_module = THIS_MODULE,
133 .cra_list = LIST_HEAD_INIT(alg.cra_list), 133 .cra_list = LIST_HEAD_INIT(alg.cra_list),
134 .cra_u = { .digest = { 134 .cra_u = { .digest = {
@@ -143,10 +143,10 @@ init(void)
143{ 143{
144 int ret = -ENOSYS; 144 int ret = -ENOSYS;
145 145
146 if (crypt_z990_func_available(KIMD_SHA_1)){ 146 if (crypt_s390_func_available(KIMD_SHA_1)){
147 ret = crypto_register_alg(&alg); 147 ret = crypto_register_alg(&alg);
148 if (ret == 0){ 148 if (ret == 0){
149 printk(KERN_INFO "crypt_z990: sha1_z990 loaded.\n"); 149 printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n");
150 } 150 }
151 } 151 }
152 return ret; 152 return ret;
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
new file mode 100644
index 000000000000..b75bdbd476c7
--- /dev/null
+++ b/arch/s390/crypto/sha256_s390.c
@@ -0,0 +1,151 @@
1/*
2 * Cryptographic API.
3 *
4 * s390 implementation of the SHA256 Secure Hash Algorithm.
5 *
6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation
8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 *
10 * Derived from "crypto/sha256.c"
11 * and "arch/s390/crypto/sha1_s390.c"
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 */
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/crypto.h>
22
23#include "crypt_s390.h"
24
25#define SHA256_DIGEST_SIZE 32
26#define SHA256_BLOCK_SIZE 64
27
28struct s390_sha256_ctx {
29 u64 count;
30 u32 state[8];
31 u8 buf[2 * SHA256_BLOCK_SIZE];
32};
33
34static void sha256_init(void *ctx)
35{
36 struct s390_sha256_ctx *sctx = ctx;
37
38 sctx->state[0] = 0x6a09e667;
39 sctx->state[1] = 0xbb67ae85;
40 sctx->state[2] = 0x3c6ef372;
41 sctx->state[3] = 0xa54ff53a;
42 sctx->state[4] = 0x510e527f;
43 sctx->state[5] = 0x9b05688c;
44 sctx->state[6] = 0x1f83d9ab;
45 sctx->state[7] = 0x5be0cd19;
46 sctx->count = 0;
47 memset(sctx->buf, 0, sizeof(sctx->buf));
48}
49
50static void sha256_update(void *ctx, const u8 *data, unsigned int len)
51{
52 struct s390_sha256_ctx *sctx = ctx;
53 unsigned int index;
54
55 /* how much is already in the buffer? */
56 index = sctx->count / 8 & 0x3f;
57
58 /* update message bit length */
59 sctx->count += len * 8;
60
61 /* process one block */
62 if ((index + len) >= SHA256_BLOCK_SIZE) {
63 memcpy(sctx->buf + index, data, SHA256_BLOCK_SIZE - index);
64 crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
65 SHA256_BLOCK_SIZE);
66 data += SHA256_BLOCK_SIZE - index;
67 len -= SHA256_BLOCK_SIZE - index;
68 }
69
70 /* anything left? */
71 if (len)
72 memcpy(sctx->buf + index , data, len);
73}
74
75static void pad_message(struct s390_sha256_ctx* sctx)
76{
77 int index, end;
78
79 index = sctx->count / 8 & 0x3f;
80 end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE;
81
82 /* start pad with 1 */
83 sctx->buf[index] = 0x80;
84
85 /* pad with zeros */
86 index++;
87 memset(sctx->buf + index, 0x00, end - index - 8);
88
89 /* append message length */
90 memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count);
91
92 sctx->count = end * 8;
93}
94
95/* Add padding and return the message digest */
96static void sha256_final(void* ctx, u8 *out)
97{
98 struct s390_sha256_ctx *sctx = ctx;
99
100 /* must perform manual padding */
101 pad_message(sctx);
102
103 crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
104 sctx->count / 8);
105
106 /* copy digest to out */
107 memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
108
109 /* wipe context */
110 memset(sctx, 0, sizeof *sctx);
111}
112
113static struct crypto_alg alg = {
114 .cra_name = "sha256",
115 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
116 .cra_blocksize = SHA256_BLOCK_SIZE,
117 .cra_ctxsize = sizeof(struct s390_sha256_ctx),
118 .cra_module = THIS_MODULE,
119 .cra_list = LIST_HEAD_INIT(alg.cra_list),
120 .cra_u = { .digest = {
121 .dia_digestsize = SHA256_DIGEST_SIZE,
122 .dia_init = sha256_init,
123 .dia_update = sha256_update,
124 .dia_final = sha256_final } }
125};
126
127static int init(void)
128{
129 int ret;
130
131 if (!crypt_s390_func_available(KIMD_SHA_256))
132 return -ENOSYS;
133
134 ret = crypto_register_alg(&alg);
135 if (ret != 0)
136 printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded.");
137 return ret;
138}
139
140static void __exit fini(void)
141{
142 crypto_unregister_alg(&alg);
143}
144
145module_init(init);
146module_exit(fini);
147
148MODULE_ALIAS("sha256");
149
150MODULE_LICENSE("GPL");
151MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 45d44c6bb39d..7d23edc6facb 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,12 +1,12 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.14-rc1 3# Linux kernel version: 2.6.15-rc2
4# Wed Sep 14 16:46:19 2005 4# Mon Nov 21 13:51:30 2005
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_RWSEM_XCHGADD_ALGORITHM=y 7CONFIG_RWSEM_XCHGADD_ALGORITHM=y
8CONFIG_GENERIC_CALIBRATE_DELAY=y 8CONFIG_GENERIC_CALIBRATE_DELAY=y
9CONFIG_ARCH_S390=y 9CONFIG_S390=y
10CONFIG_UID16=y 10CONFIG_UID16=y
11 11
12# 12#
@@ -65,15 +65,31 @@ CONFIG_KMOD=y
65CONFIG_STOP_MACHINE=y 65CONFIG_STOP_MACHINE=y
66 66
67# 67#
68# Block layer
69#
70# CONFIG_LBD is not set
71
72#
73# IO Schedulers
74#
75CONFIG_IOSCHED_NOOP=y
76CONFIG_IOSCHED_AS=y
77CONFIG_IOSCHED_DEADLINE=y
78CONFIG_IOSCHED_CFQ=y
79CONFIG_DEFAULT_AS=y
80# CONFIG_DEFAULT_DEADLINE is not set
81# CONFIG_DEFAULT_CFQ is not set
82# CONFIG_DEFAULT_NOOP is not set
83CONFIG_DEFAULT_IOSCHED="anticipatory"
84
85#
68# Base setup 86# Base setup
69# 87#
70 88
71# 89#
72# Processor type and features 90# Processor type and features
73# 91#
74# CONFIG_ARCH_S390X is not set
75# CONFIG_64BIT is not set 92# CONFIG_64BIT is not set
76CONFIG_ARCH_S390_31=y
77CONFIG_SMP=y 93CONFIG_SMP=y
78CONFIG_NR_CPUS=32 94CONFIG_NR_CPUS=32
79CONFIG_HOTPLUG_CPU=y 95CONFIG_HOTPLUG_CPU=y
@@ -97,6 +113,7 @@ CONFIG_FLATMEM_MANUAL=y
97CONFIG_FLATMEM=y 113CONFIG_FLATMEM=y
98CONFIG_FLAT_NODE_MEM_MAP=y 114CONFIG_FLAT_NODE_MEM_MAP=y
99# CONFIG_SPARSEMEM_STATIC is not set 115# CONFIG_SPARSEMEM_STATIC is not set
116CONFIG_SPLIT_PTLOCK_CPUS=4
100 117
101# 118#
102# I/O subsystem configuration 119# I/O subsystem configuration
@@ -188,10 +205,18 @@ CONFIG_IPV6=y
188# CONFIG_NET_DIVERT is not set 205# CONFIG_NET_DIVERT is not set
189# CONFIG_ECONET is not set 206# CONFIG_ECONET is not set
190# CONFIG_WAN_ROUTER is not set 207# CONFIG_WAN_ROUTER is not set
208
209#
210# QoS and/or fair queueing
211#
191CONFIG_NET_SCHED=y 212CONFIG_NET_SCHED=y
192CONFIG_NET_SCH_CLK_JIFFIES=y 213CONFIG_NET_SCH_CLK_JIFFIES=y
193# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set 214# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
194# CONFIG_NET_SCH_CLK_CPU is not set 215# CONFIG_NET_SCH_CLK_CPU is not set
216
217#
218# Queueing/Scheduling
219#
195CONFIG_NET_SCH_CBQ=m 220CONFIG_NET_SCH_CBQ=m
196# CONFIG_NET_SCH_HTB is not set 221# CONFIG_NET_SCH_HTB is not set
197# CONFIG_NET_SCH_HFSC is not set 222# CONFIG_NET_SCH_HFSC is not set
@@ -204,8 +229,10 @@ CONFIG_NET_SCH_GRED=m
204CONFIG_NET_SCH_DSMARK=m 229CONFIG_NET_SCH_DSMARK=m
205# CONFIG_NET_SCH_NETEM is not set 230# CONFIG_NET_SCH_NETEM is not set
206# CONFIG_NET_SCH_INGRESS is not set 231# CONFIG_NET_SCH_INGRESS is not set
207CONFIG_NET_QOS=y 232
208CONFIG_NET_ESTIMATOR=y 233#
234# Classification
235#
209CONFIG_NET_CLS=y 236CONFIG_NET_CLS=y
210# CONFIG_NET_CLS_BASIC is not set 237# CONFIG_NET_CLS_BASIC is not set
211CONFIG_NET_CLS_TCINDEX=m 238CONFIG_NET_CLS_TCINDEX=m
@@ -214,18 +241,18 @@ CONFIG_NET_CLS_ROUTE=y
214CONFIG_NET_CLS_FW=m 241CONFIG_NET_CLS_FW=m
215CONFIG_NET_CLS_U32=m 242CONFIG_NET_CLS_U32=m
216# CONFIG_CLS_U32_PERF is not set 243# CONFIG_CLS_U32_PERF is not set
217# CONFIG_NET_CLS_IND is not set
218CONFIG_NET_CLS_RSVP=m 244CONFIG_NET_CLS_RSVP=m
219CONFIG_NET_CLS_RSVP6=m 245CONFIG_NET_CLS_RSVP6=m
220# CONFIG_NET_EMATCH is not set 246# CONFIG_NET_EMATCH is not set
221# CONFIG_NET_CLS_ACT is not set 247# CONFIG_NET_CLS_ACT is not set
222CONFIG_NET_CLS_POLICE=y 248CONFIG_NET_CLS_POLICE=y
249# CONFIG_NET_CLS_IND is not set
250CONFIG_NET_ESTIMATOR=y
223 251
224# 252#
225# Network testing 253# Network testing
226# 254#
227# CONFIG_NET_PKTGEN is not set 255# CONFIG_NET_PKTGEN is not set
228# CONFIG_NETFILTER_NETLINK is not set
229# CONFIG_HAMRADIO is not set 256# CONFIG_HAMRADIO is not set
230# CONFIG_IRDA is not set 257# CONFIG_IRDA is not set
231# CONFIG_BT is not set 258# CONFIG_BT is not set
@@ -276,6 +303,7 @@ CONFIG_SCSI_FC_ATTRS=y
276# 303#
277# SCSI low-level drivers 304# SCSI low-level drivers
278# 305#
306# CONFIG_ISCSI_TCP is not set
279# CONFIG_SCSI_SATA is not set 307# CONFIG_SCSI_SATA is not set
280# CONFIG_SCSI_DEBUG is not set 308# CONFIG_SCSI_DEBUG is not set
281CONFIG_ZFCP=y 309CONFIG_ZFCP=y
@@ -292,7 +320,6 @@ CONFIG_BLK_DEV_RAM=y
292CONFIG_BLK_DEV_RAM_COUNT=16 320CONFIG_BLK_DEV_RAM_COUNT=16
293CONFIG_BLK_DEV_RAM_SIZE=4096 321CONFIG_BLK_DEV_RAM_SIZE=4096
294CONFIG_BLK_DEV_INITRD=y 322CONFIG_BLK_DEV_INITRD=y
295# CONFIG_LBD is not set
296# CONFIG_CDROM_PKTCDVD is not set 323# CONFIG_CDROM_PKTCDVD is not set
297 324
298# 325#
@@ -305,15 +332,8 @@ CONFIG_DASD_PROFILE=y
305CONFIG_DASD_ECKD=y 332CONFIG_DASD_ECKD=y
306CONFIG_DASD_FBA=y 333CONFIG_DASD_FBA=y
307CONFIG_DASD_DIAG=y 334CONFIG_DASD_DIAG=y
335CONFIG_DASD_EER=m
308# CONFIG_DASD_CMB is not set 336# CONFIG_DASD_CMB is not set
309
310#
311# IO Schedulers
312#
313CONFIG_IOSCHED_NOOP=y
314CONFIG_IOSCHED_AS=y
315CONFIG_IOSCHED_DEADLINE=y
316CONFIG_IOSCHED_CFQ=y
317# CONFIG_ATA_OVER_ETH is not set 337# CONFIG_ATA_OVER_ETH is not set
318 338
319# 339#
@@ -378,7 +398,6 @@ CONFIG_S390_TAPE_34XX=m
378# CONFIG_VMLOGRDR is not set 398# CONFIG_VMLOGRDR is not set
379# CONFIG_VMCP is not set 399# CONFIG_VMCP is not set
380# CONFIG_MONREADER is not set 400# CONFIG_MONREADER is not set
381# CONFIG_DCSS_SHM is not set
382 401
383# 402#
384# Cryptographic devices 403# Cryptographic devices
@@ -593,6 +612,8 @@ CONFIG_DEBUG_PREEMPT=y
593# CONFIG_DEBUG_KOBJECT is not set 612# CONFIG_DEBUG_KOBJECT is not set
594# CONFIG_DEBUG_INFO is not set 613# CONFIG_DEBUG_INFO is not set
595CONFIG_DEBUG_FS=y 614CONFIG_DEBUG_FS=y
615# CONFIG_DEBUG_VM is not set
616# CONFIG_RCU_TORTURE_TEST is not set
596 617
597# 618#
598# Security options 619# Security options
@@ -609,17 +630,19 @@ CONFIG_CRYPTO=y
609# CONFIG_CRYPTO_MD4 is not set 630# CONFIG_CRYPTO_MD4 is not set
610# CONFIG_CRYPTO_MD5 is not set 631# CONFIG_CRYPTO_MD5 is not set
611# CONFIG_CRYPTO_SHA1 is not set 632# CONFIG_CRYPTO_SHA1 is not set
612# CONFIG_CRYPTO_SHA1_Z990 is not set 633# CONFIG_CRYPTO_SHA1_S390 is not set
613# CONFIG_CRYPTO_SHA256 is not set 634# CONFIG_CRYPTO_SHA256 is not set
635# CONFIG_CRYPTO_SHA256_S390 is not set
614# CONFIG_CRYPTO_SHA512 is not set 636# CONFIG_CRYPTO_SHA512 is not set
615# CONFIG_CRYPTO_WP512 is not set 637# CONFIG_CRYPTO_WP512 is not set
616# CONFIG_CRYPTO_TGR192 is not set 638# CONFIG_CRYPTO_TGR192 is not set
617# CONFIG_CRYPTO_DES is not set 639# CONFIG_CRYPTO_DES is not set
618# CONFIG_CRYPTO_DES_Z990 is not set 640# CONFIG_CRYPTO_DES_S390 is not set
619# CONFIG_CRYPTO_BLOWFISH is not set 641# CONFIG_CRYPTO_BLOWFISH is not set
620# CONFIG_CRYPTO_TWOFISH is not set 642# CONFIG_CRYPTO_TWOFISH is not set
621# CONFIG_CRYPTO_SERPENT is not set 643# CONFIG_CRYPTO_SERPENT is not set
622# CONFIG_CRYPTO_AES is not set 644# CONFIG_CRYPTO_AES is not set
645# CONFIG_CRYPTO_AES_S390 is not set
623# CONFIG_CRYPTO_CAST5 is not set 646# CONFIG_CRYPTO_CAST5 is not set
624# CONFIG_CRYPTO_CAST6 is not set 647# CONFIG_CRYPTO_CAST6 is not set
625# CONFIG_CRYPTO_TEA is not set 648# CONFIG_CRYPTO_TEA is not set
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7434c32bc631..4865e4b49464 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -8,31 +8,26 @@ obj-y := bitmap.o traps.o time.o process.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o 9 semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
13
11extra-y += head.o init_task.o vmlinux.lds 14extra-y += head.o init_task.o vmlinux.lds
12 15
13obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 16obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
14obj-$(CONFIG_SMP) += smp.o 17obj-$(CONFIG_SMP) += smp.o
15 18
16obj-$(CONFIG_S390_SUPPORT) += compat_linux.o compat_signal.o \ 19obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
17 compat_ioctl.o compat_wrapper.o \ 20 compat_ioctl.o compat_wrapper.o \
18 compat_exec_domain.o 21 compat_exec_domain.o
19obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o 22obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
20 23
21obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o
22obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o
23
24obj-$(CONFIG_VIRT_TIMER) += vtime.o 24obj-$(CONFIG_VIRT_TIMER) += vtime.o
25 25
26# Kexec part 26# Kexec part
27S390_KEXEC_OBJS := machine_kexec.o crash.o 27S390_KEXEC_OBJS := machine_kexec.o crash.o
28ifeq ($(CONFIG_ARCH_S390X),y) 28S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
29S390_KEXEC_OBJS += relocate_kernel64.o
30else
31S390_KEXEC_OBJS += relocate_kernel.o
32endif
33obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) 29obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
34 30
35
36# 31#
37# This is just to get the dependencies... 32# This is just to get the dependencies...
38# 33#
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index ed877d0f27e6..41b197a3f3a3 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -279,7 +279,7 @@ asmlinkage long sys32_getegid16(void)
279 279
280static inline long get_tv32(struct timeval *o, struct compat_timeval *i) 280static inline long get_tv32(struct timeval *o, struct compat_timeval *i)
281{ 281{
282 return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) || 282 return (!access_ok(VERIFY_READ, o, sizeof(*o)) ||
283 (__get_user(o->tv_sec, &i->tv_sec) || 283 (__get_user(o->tv_sec, &i->tv_sec) ||
284 __get_user(o->tv_usec, &i->tv_usec))); 284 __get_user(o->tv_usec, &i->tv_usec)));
285} 285}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4ff6808456ea..fa2b3bc22f20 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -467,8 +467,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
467 if (err) 467 if (err)
468 goto badframe; 468 goto badframe;
469 469
470 /* It is more difficult to avoid calling this function than to
471 call it and ignore errors. */
472 set_fs (KERNEL_DS); 470 set_fs (KERNEL_DS);
473 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); 471 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]);
474 set_fs (old_fs); 472 set_fs (old_fs);
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d47fecb42cc5..4ef44e536b2c 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -39,7 +39,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
39 39
40 if (response != NULL && rlen > 0) { 40 if (response != NULL && rlen > 0) {
41 memset(response, 0, rlen); 41 memset(response, 0, rlen);
42#ifndef CONFIG_ARCH_S390X 42#ifndef CONFIG_64BIT
43 asm volatile ( "lra 2,0(%2)\n" 43 asm volatile ( "lra 2,0(%2)\n"
44 "lr 4,%3\n" 44 "lr 4,%3\n"
45 "o 4,%6\n" 45 "o 4,%6\n"
@@ -55,7 +55,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
55 : "a" (cpcmd_buf), "d" (cmdlen), 55 : "a" (cpcmd_buf), "d" (cmdlen),
56 "a" (response), "d" (rlen), "m" (mask) 56 "a" (response), "d" (rlen), "m" (mask)
57 : "cc", "2", "3", "4", "5" ); 57 : "cc", "2", "3", "4", "5" );
58#else /* CONFIG_ARCH_S390X */ 58#else /* CONFIG_64BIT */
59 asm volatile ( "lrag 2,0(%2)\n" 59 asm volatile ( "lrag 2,0(%2)\n"
60 "lgr 4,%3\n" 60 "lgr 4,%3\n"
61 "o 4,%6\n" 61 "o 4,%6\n"
@@ -73,11 +73,11 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
73 : "a" (cpcmd_buf), "d" (cmdlen), 73 : "a" (cpcmd_buf), "d" (cmdlen),
74 "a" (response), "d" (rlen), "m" (mask) 74 "a" (response), "d" (rlen), "m" (mask)
75 : "cc", "2", "3", "4", "5" ); 75 : "cc", "2", "3", "4", "5" );
76#endif /* CONFIG_ARCH_S390X */ 76#endif /* CONFIG_64BIT */
77 EBCASC(response, rlen); 77 EBCASC(response, rlen);
78 } else { 78 } else {
79 return_len = 0; 79 return_len = 0;
80#ifndef CONFIG_ARCH_S390X 80#ifndef CONFIG_64BIT
81 asm volatile ( "lra 2,0(%1)\n" 81 asm volatile ( "lra 2,0(%1)\n"
82 "lr 3,%2\n" 82 "lr 3,%2\n"
83 "diag 2,3,0x8\n" 83 "diag 2,3,0x8\n"
@@ -85,7 +85,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
85 : "=d" (return_code) 85 : "=d" (return_code)
86 : "a" (cpcmd_buf), "d" (cmdlen) 86 : "a" (cpcmd_buf), "d" (cmdlen)
87 : "2", "3" ); 87 : "2", "3" );
88#else /* CONFIG_ARCH_S390X */ 88#else /* CONFIG_64BIT */
89 asm volatile ( "lrag 2,0(%1)\n" 89 asm volatile ( "lrag 2,0(%1)\n"
90 "lgr 3,%2\n" 90 "lgr 3,%2\n"
91 "sam31\n" 91 "sam31\n"
@@ -95,7 +95,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
95 : "=d" (return_code) 95 : "=d" (return_code)
96 : "a" (cpcmd_buf), "d" (cmdlen) 96 : "a" (cpcmd_buf), "d" (cmdlen)
97 : "2", "3" ); 97 : "2", "3" );
98#endif /* CONFIG_ARCH_S390X */ 98#endif /* CONFIG_64BIT */
99 } 99 }
100 spin_unlock_irqrestore(&cpcmd_lock, flags); 100 spin_unlock_irqrestore(&cpcmd_lock, flags);
101 if (response_code != NULL) 101 if (response_code != NULL)
@@ -105,7 +105,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
105 105
106EXPORT_SYMBOL(__cpcmd); 106EXPORT_SYMBOL(__cpcmd);
107 107
108#ifdef CONFIG_ARCH_S390X 108#ifdef CONFIG_64BIT
109int cpcmd(const char *cmd, char *response, int rlen, int *response_code) 109int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
110{ 110{
111 char *lowbuf; 111 char *lowbuf;
@@ -129,4 +129,4 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
129} 129}
130 130
131EXPORT_SYMBOL(cpcmd); 131EXPORT_SYMBOL(cpcmd);
132#endif /* CONFIG_ARCH_S390X */ 132#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4eb71ffcf484..369ab4413ec7 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -213,7 +213,7 @@ sysc_nr_ok:
213 mvc SP_ARGS(8,%r15),SP_R7(%r15) 213 mvc SP_ARGS(8,%r15),SP_R7(%r15)
214sysc_do_restart: 214sysc_do_restart:
215 larl %r10,sys_call_table 215 larl %r10,sys_call_table
216#ifdef CONFIG_S390_SUPPORT 216#ifdef CONFIG_COMPAT
217 tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? 217 tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ?
218 jno sysc_noemu 218 jno sysc_noemu
219 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 219 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
@@ -361,7 +361,7 @@ sys_clone_glue:
361 la %r2,SP_PTREGS(%r15) # load pt_regs 361 la %r2,SP_PTREGS(%r15) # load pt_regs
362 jg sys_clone # branch to sys_clone 362 jg sys_clone # branch to sys_clone
363 363
364#ifdef CONFIG_S390_SUPPORT 364#ifdef CONFIG_COMPAT
365sys32_clone_glue: 365sys32_clone_glue:
366 la %r2,SP_PTREGS(%r15) # load pt_regs 366 la %r2,SP_PTREGS(%r15) # load pt_regs
367 jg sys32_clone # branch to sys32_clone 367 jg sys32_clone # branch to sys32_clone
@@ -383,7 +383,7 @@ sys_execve_glue:
383 bnz 0(%r12) # it did fail -> store result in gpr2 383 bnz 0(%r12) # it did fail -> store result in gpr2
384 b 6(%r12) # SKIP STG 2,SP_R2(15) in 384 b 6(%r12) # SKIP STG 2,SP_R2(15) in
385 # system_call/sysc_tracesys 385 # system_call/sysc_tracesys
386#ifdef CONFIG_S390_SUPPORT 386#ifdef CONFIG_COMPAT
387sys32_execve_glue: 387sys32_execve_glue:
388 la %r2,SP_PTREGS(%r15) # load pt_regs 388 la %r2,SP_PTREGS(%r15) # load pt_regs
389 lgr %r12,%r14 # save return address 389 lgr %r12,%r14 # save return address
@@ -398,7 +398,7 @@ sys_sigreturn_glue:
398 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 398 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
399 jg sys_sigreturn # branch to sys_sigreturn 399 jg sys_sigreturn # branch to sys_sigreturn
400 400
401#ifdef CONFIG_S390_SUPPORT 401#ifdef CONFIG_COMPAT
402sys32_sigreturn_glue: 402sys32_sigreturn_glue:
403 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 403 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
404 jg sys32_sigreturn # branch to sys32_sigreturn 404 jg sys32_sigreturn # branch to sys32_sigreturn
@@ -408,7 +408,7 @@ sys_rt_sigreturn_glue:
408 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 408 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
409 jg sys_rt_sigreturn # branch to sys_sigreturn 409 jg sys_rt_sigreturn # branch to sys_sigreturn
410 410
411#ifdef CONFIG_S390_SUPPORT 411#ifdef CONFIG_COMPAT
412sys32_rt_sigreturn_glue: 412sys32_rt_sigreturn_glue:
413 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter 413 la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
414 jg sys32_rt_sigreturn # branch to sys32_sigreturn 414 jg sys32_rt_sigreturn # branch to sys32_sigreturn
@@ -429,7 +429,7 @@ sys_sigsuspend_glue:
429 la %r14,6(%r14) # skip store of return value 429 la %r14,6(%r14) # skip store of return value
430 jg sys_sigsuspend # branch to sys_sigsuspend 430 jg sys_sigsuspend # branch to sys_sigsuspend
431 431
432#ifdef CONFIG_S390_SUPPORT 432#ifdef CONFIG_COMPAT
433sys32_sigsuspend_glue: 433sys32_sigsuspend_glue:
434 llgfr %r4,%r4 # unsigned long 434 llgfr %r4,%r4 # unsigned long
435 lgr %r5,%r4 # move mask back 435 lgr %r5,%r4 # move mask back
@@ -449,7 +449,7 @@ sys_rt_sigsuspend_glue:
449 la %r14,6(%r14) # skip store of return value 449 la %r14,6(%r14) # skip store of return value
450 jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend 450 jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend
451 451
452#ifdef CONFIG_S390_SUPPORT 452#ifdef CONFIG_COMPAT
453sys32_rt_sigsuspend_glue: 453sys32_rt_sigsuspend_glue:
454 llgfr %r3,%r3 # size_t 454 llgfr %r3,%r3 # size_t
455 lgr %r4,%r3 # move sigsetsize parameter 455 lgr %r4,%r3 # move sigsetsize parameter
@@ -464,7 +464,7 @@ sys_sigaltstack_glue:
464 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter 464 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
465 jg sys_sigaltstack # branch to sys_sigreturn 465 jg sys_sigaltstack # branch to sys_sigreturn
466 466
467#ifdef CONFIG_S390_SUPPORT 467#ifdef CONFIG_COMPAT
468sys32_sigaltstack_glue: 468sys32_sigaltstack_glue:
469 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter 469 la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
470 jg sys32_sigaltstack_wrapper # branch to sys_sigreturn 470 jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
@@ -1009,7 +1009,7 @@ sys_call_table:
1009#include "syscalls.S" 1009#include "syscalls.S"
1010#undef SYSCALL 1010#undef SYSCALL
1011 1011
1012#ifdef CONFIG_S390_SUPPORT 1012#ifdef CONFIG_COMPAT
1013 1013
1014#define SYSCALL(esa,esame,emu) .long emu 1014#define SYSCALL(esa,esame,emu) .long emu
1015 .globl sys_call_table_emu 1015 .globl sys_call_table_emu
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d31a97c89f68..ea88d066bf04 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -30,7 +30,7 @@
30#include <asm/thread_info.h> 30#include <asm/thread_info.h>
31#include <asm/page.h> 31#include <asm/page.h>
32 32
33#ifdef CONFIG_ARCH_S390X 33#ifdef CONFIG_64BIT
34#define ARCH_OFFSET 4 34#define ARCH_OFFSET 4
35#else 35#else
36#define ARCH_OFFSET 0 36#define ARCH_OFFSET 0
@@ -539,7 +539,7 @@ ipl_devno:
539 .word 0 539 .word 0
540.endm 540.endm
541 541
542#ifdef CONFIG_ARCH_S390X 542#ifdef CONFIG_64BIT
543#include "head64.S" 543#include "head64.S"
544#else 544#else
545#include "head31.S" 545#include "head31.S"
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 5aa71b05b8ae..f0ed5c642c74 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image)
85 pfault_fini(); 85 pfault_fini();
86#endif 86#endif
87 87
88 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 88 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
89 signal_processor(smp_processor_id(), sigp_stop); 89 signal_processor(smp_processor_id(), sigp_stop);
90 90
91 /* Wait for all other cpus to enter stopped state */ 91 /* Wait for all other cpus to enter stopped state */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 607d506689c8..c271cdab58e2 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -37,11 +37,11 @@
37#define DEBUGP(fmt , ...) 37#define DEBUGP(fmt , ...)
38#endif 38#endif
39 39
40#ifndef CONFIG_ARCH_S390X 40#ifndef CONFIG_64BIT
41#define PLT_ENTRY_SIZE 12 41#define PLT_ENTRY_SIZE 12
42#else /* CONFIG_ARCH_S390X */ 42#else /* CONFIG_64BIT */
43#define PLT_ENTRY_SIZE 20 43#define PLT_ENTRY_SIZE 20
44#endif /* CONFIG_ARCH_S390X */ 44#endif /* CONFIG_64BIT */
45 45
46void *module_alloc(unsigned long size) 46void *module_alloc(unsigned long size)
47{ 47{
@@ -294,17 +294,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
294 unsigned int *ip; 294 unsigned int *ip;
295 ip = me->module_core + me->arch.plt_offset + 295 ip = me->module_core + me->arch.plt_offset +
296 info->plt_offset; 296 info->plt_offset;
297#ifndef CONFIG_ARCH_S390X 297#ifndef CONFIG_64BIT
298 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ 298 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
299 ip[1] = 0x100607f1; 299 ip[1] = 0x100607f1;
300 ip[2] = val; 300 ip[2] = val;
301#else /* CONFIG_ARCH_S390X */ 301#else /* CONFIG_64BIT */
302 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 302 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
303 ip[1] = 0x100a0004; 303 ip[1] = 0x100a0004;
304 ip[2] = 0x07f10000; 304 ip[2] = 0x07f10000;
305 ip[3] = (unsigned int) (val >> 32); 305 ip[3] = (unsigned int) (val >> 32);
306 ip[4] = (unsigned int) val; 306 ip[4] = (unsigned int) val;
307#endif /* CONFIG_ARCH_S390X */ 307#endif /* CONFIG_64BIT */
308 info->plt_initialized = 1; 308 info->plt_initialized = 1;
309 } 309 }
310 if (r_type == R_390_PLTOFF16 || 310 if (r_type == R_390_PLTOFF16 ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 78b64fe5e7c2..a942bf2d58e9 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -235,7 +235,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
235 /* Save access registers to new thread structure. */ 235 /* Save access registers to new thread structure. */
236 save_access_regs(&p->thread.acrs[0]); 236 save_access_regs(&p->thread.acrs[0]);
237 237
238#ifndef CONFIG_ARCH_S390X 238#ifndef CONFIG_64BIT
239 /* 239 /*
240 * save fprs to current->thread.fp_regs to merge them with 240 * save fprs to current->thread.fp_regs to merge them with
241 * the emulated registers and then copy the result to the child. 241 * the emulated registers and then copy the result to the child.
@@ -247,7 +247,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
247 /* Set a new TLS ? */ 247 /* Set a new TLS ? */
248 if (clone_flags & CLONE_SETTLS) 248 if (clone_flags & CLONE_SETTLS)
249 p->thread.acrs[0] = regs->gprs[6]; 249 p->thread.acrs[0] = regs->gprs[6];
250#else /* CONFIG_ARCH_S390X */ 250#else /* CONFIG_64BIT */
251 /* Save the fpu registers to new thread structure. */ 251 /* Save the fpu registers to new thread structure. */
252 save_fp_regs(&p->thread.fp_regs); 252 save_fp_regs(&p->thread.fp_regs);
253 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE; 253 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
@@ -260,7 +260,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
260 p->thread.acrs[1] = (unsigned int) regs->gprs[6]; 260 p->thread.acrs[1] = (unsigned int) regs->gprs[6];
261 } 261 }
262 } 262 }
263#endif /* CONFIG_ARCH_S390X */ 263#endif /* CONFIG_64BIT */
264 /* start new process with ar4 pointing to the correct address space */ 264 /* start new process with ar4 pointing to the correct address space */
265 p->thread.mm_segment = get_fs(); 265 p->thread.mm_segment = get_fs();
266 /* Don't copy debug registers */ 266 /* Don't copy debug registers */
@@ -339,16 +339,16 @@ out:
339 */ 339 */
340int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 340int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
341{ 341{
342#ifndef CONFIG_ARCH_S390X 342#ifndef CONFIG_64BIT
343 /* 343 /*
344 * save fprs to current->thread.fp_regs to merge them with 344 * save fprs to current->thread.fp_regs to merge them with
345 * the emulated registers and then copy the result to the dump. 345 * the emulated registers and then copy the result to the dump.
346 */ 346 */
347 save_fp_regs(&current->thread.fp_regs); 347 save_fp_regs(&current->thread.fp_regs);
348 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs)); 348 memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
349#else /* CONFIG_ARCH_S390X */ 349#else /* CONFIG_64BIT */
350 save_fp_regs(fpregs); 350 save_fp_regs(fpregs);
351#endif /* CONFIG_ARCH_S390X */ 351#endif /* CONFIG_64BIT */
352 return 1; 352 return 1;
353} 353}
354 354
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 06afa3103ace..8ecda6d66de4 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -42,7 +42,7 @@
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/unistd.h> 43#include <asm/unistd.h>
44 44
45#ifdef CONFIG_S390_SUPPORT 45#ifdef CONFIG_COMPAT
46#include "compat_ptrace.h" 46#include "compat_ptrace.h"
47#endif 47#endif
48 48
@@ -59,7 +59,7 @@ FixPerRegisters(struct task_struct *task)
59 59
60 if (per_info->single_step) { 60 if (per_info->single_step) {
61 per_info->control_regs.bits.starting_addr = 0; 61 per_info->control_regs.bits.starting_addr = 0;
62#ifdef CONFIG_S390_SUPPORT 62#ifdef CONFIG_COMPAT
63 if (test_thread_flag(TIF_31BIT)) 63 if (test_thread_flag(TIF_31BIT))
64 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 64 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
65 else 65 else
@@ -112,7 +112,7 @@ ptrace_disable(struct task_struct *child)
112 clear_single_step(child); 112 clear_single_step(child);
113} 113}
114 114
115#ifndef CONFIG_ARCH_S390X 115#ifndef CONFIG_64BIT
116# define __ADDR_MASK 3 116# define __ADDR_MASK 3
117#else 117#else
118# define __ADDR_MASK 7 118# define __ADDR_MASK 7
@@ -138,7 +138,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
138 * an alignment of 4. Programmers from hell... 138 * an alignment of 4. Programmers from hell...
139 */ 139 */
140 mask = __ADDR_MASK; 140 mask = __ADDR_MASK;
141#ifdef CONFIG_ARCH_S390X 141#ifdef CONFIG_64BIT
142 if (addr >= (addr_t) &dummy->regs.acrs && 142 if (addr >= (addr_t) &dummy->regs.acrs &&
143 addr < (addr_t) &dummy->regs.orig_gpr2) 143 addr < (addr_t) &dummy->regs.orig_gpr2)
144 mask = 3; 144 mask = 3;
@@ -160,7 +160,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
160 * access registers are stored in the thread structure 160 * access registers are stored in the thread structure
161 */ 161 */
162 offset = addr - (addr_t) &dummy->regs.acrs; 162 offset = addr - (addr_t) &dummy->regs.acrs;
163#ifdef CONFIG_ARCH_S390X 163#ifdef CONFIG_64BIT
164 /* 164 /*
165 * Very special case: old & broken 64 bit gdb reading 165 * Very special case: old & broken 64 bit gdb reading
166 * from acrs[15]. Result is a 64 bit value. Read the 166 * from acrs[15]. Result is a 64 bit value. Read the
@@ -218,7 +218,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
218 * an alignment of 4. Programmers from hell indeed... 218 * an alignment of 4. Programmers from hell indeed...
219 */ 219 */
220 mask = __ADDR_MASK; 220 mask = __ADDR_MASK;
221#ifdef CONFIG_ARCH_S390X 221#ifdef CONFIG_64BIT
222 if (addr >= (addr_t) &dummy->regs.acrs && 222 if (addr >= (addr_t) &dummy->regs.acrs &&
223 addr < (addr_t) &dummy->regs.orig_gpr2) 223 addr < (addr_t) &dummy->regs.orig_gpr2)
224 mask = 3; 224 mask = 3;
@@ -231,13 +231,13 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
231 * psw and gprs are stored on the stack 231 * psw and gprs are stored on the stack
232 */ 232 */
233 if (addr == (addr_t) &dummy->regs.psw.mask && 233 if (addr == (addr_t) &dummy->regs.psw.mask &&
234#ifdef CONFIG_S390_SUPPORT 234#ifdef CONFIG_COMPAT
235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && 235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
236#endif 236#endif
237 data != PSW_MASK_MERGE(PSW_USER_BITS, data)) 237 data != PSW_MASK_MERGE(PSW_USER_BITS, data))
238 /* Invalid psw mask. */ 238 /* Invalid psw mask. */
239 return -EINVAL; 239 return -EINVAL;
240#ifndef CONFIG_ARCH_S390X 240#ifndef CONFIG_64BIT
241 if (addr == (addr_t) &dummy->regs.psw.addr) 241 if (addr == (addr_t) &dummy->regs.psw.addr)
242 /* I'd like to reject addresses without the 242 /* I'd like to reject addresses without the
243 high order bit but older gdb's rely on it */ 243 high order bit but older gdb's rely on it */
@@ -250,7 +250,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
250 * access registers are stored in the thread structure 250 * access registers are stored in the thread structure
251 */ 251 */
252 offset = addr - (addr_t) &dummy->regs.acrs; 252 offset = addr - (addr_t) &dummy->regs.acrs;
253#ifdef CONFIG_ARCH_S390X 253#ifdef CONFIG_64BIT
254 /* 254 /*
255 * Very special case: old & broken 64 bit gdb writing 255 * Very special case: old & broken 64 bit gdb writing
256 * to acrs[15] with a 64 bit value. Ignore the lower 256 * to acrs[15] with a 64 bit value. Ignore the lower
@@ -357,7 +357,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
357 return ptrace_request(child, request, addr, data); 357 return ptrace_request(child, request, addr, data);
358} 358}
359 359
360#ifdef CONFIG_S390_SUPPORT 360#ifdef CONFIG_COMPAT
361/* 361/*
362 * Now the fun part starts... a 31 bit program running in the 362 * Now the fun part starts... a 31 bit program running in the
363 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, 363 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
@@ -629,7 +629,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
629 return peek_user(child, addr, data); 629 return peek_user(child, addr, data);
630 if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP) 630 if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
631 return poke_user(child, addr, data); 631 return poke_user(child, addr, data);
632#ifdef CONFIG_S390_SUPPORT 632#ifdef CONFIG_COMPAT
633 if (request == PTRACE_PEEKUSR && 633 if (request == PTRACE_PEEKUSR &&
634 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT)) 634 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
635 return peek_user_emu31(child, addr, data); 635 return peek_user_emu31(child, addr, data);
@@ -695,7 +695,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
695 695
696 /* Do requests that differ for 31/64 bit */ 696 /* Do requests that differ for 31/64 bit */
697 default: 697 default:
698#ifdef CONFIG_S390_SUPPORT 698#ifdef CONFIG_COMPAT
699 if (test_thread_flag(TIF_31BIT)) 699 if (test_thread_flag(TIF_31BIT))
700 return do_ptrace_emu31(child, request, addr, data); 700 return do_ptrace_emu31(child, request, addr, data);
701#endif 701#endif
diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c
index 83cb42bc0b76..1f33951ba439 100644
--- a/arch/s390/kernel/reipl_diag.c
+++ b/arch/s390/kernel/reipl_diag.c
@@ -26,7 +26,7 @@ void reipl_diag(void)
26 " st %%r4,%0\n" 26 " st %%r4,%0\n"
27 " st %%r5,%1\n" 27 " st %%r5,%1\n"
28 ".section __ex_table,\"a\"\n" 28 ".section __ex_table,\"a\"\n"
29#ifdef __s390x__ 29#ifdef CONFIG_64BIT
30 " .align 8\n" 30 " .align 8\n"
31 " .quad 0b, 0b\n" 31 " .quad 0b, 0b\n"
32#else 32#else
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 31e7b19348b7..b03847d100d9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -427,7 +427,7 @@ setup_lowcore(void)
427 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 427 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
428 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 428 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
429 lc->thread_info = (unsigned long) &init_thread_union; 429 lc->thread_info = (unsigned long) &init_thread_union;
430#ifndef CONFIG_ARCH_S390X 430#ifndef CONFIG_64BIT
431 if (MACHINE_HAS_IEEE) { 431 if (MACHINE_HAS_IEEE) {
432 lc->extended_save_area_addr = (__u32) 432 lc->extended_save_area_addr = (__u32)
433 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); 433 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
@@ -562,21 +562,21 @@ setup_arch(char **cmdline_p)
562 /* 562 /*
563 * print what head.S has found out about the machine 563 * print what head.S has found out about the machine
564 */ 564 */
565#ifndef CONFIG_ARCH_S390X 565#ifndef CONFIG_64BIT
566 printk((MACHINE_IS_VM) ? 566 printk((MACHINE_IS_VM) ?
567 "We are running under VM (31 bit mode)\n" : 567 "We are running under VM (31 bit mode)\n" :
568 "We are running native (31 bit mode)\n"); 568 "We are running native (31 bit mode)\n");
569 printk((MACHINE_HAS_IEEE) ? 569 printk((MACHINE_HAS_IEEE) ?
570 "This machine has an IEEE fpu\n" : 570 "This machine has an IEEE fpu\n" :
571 "This machine has no IEEE fpu\n"); 571 "This machine has no IEEE fpu\n");
572#else /* CONFIG_ARCH_S390X */ 572#else /* CONFIG_64BIT */
573 printk((MACHINE_IS_VM) ? 573 printk((MACHINE_IS_VM) ?
574 "We are running under VM (64 bit mode)\n" : 574 "We are running under VM (64 bit mode)\n" :
575 "We are running native (64 bit mode)\n"); 575 "We are running native (64 bit mode)\n");
576#endif /* CONFIG_ARCH_S390X */ 576#endif /* CONFIG_64BIT */
577 577
578 ROOT_DEV = Root_RAM0; 578 ROOT_DEV = Root_RAM0;
579#ifndef CONFIG_ARCH_S390X 579#ifndef CONFIG_64BIT
580 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ 580 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */
581 /* 581 /*
582 * We need some free virtual space to be able to do vmalloc. 582 * We need some free virtual space to be able to do vmalloc.
@@ -585,9 +585,9 @@ setup_arch(char **cmdline_p)
585 */ 585 */
586 if (memory_end > 1920*1024*1024) 586 if (memory_end > 1920*1024*1024)
587 memory_end = 1920*1024*1024; 587 memory_end = 1920*1024*1024;
588#else /* CONFIG_ARCH_S390X */ 588#else /* CONFIG_64BIT */
589 memory_end = memory_size & ~0x200000UL; /* detected in head.s */ 589 memory_end = memory_size & ~0x200000UL; /* detected in head.s */
590#endif /* CONFIG_ARCH_S390X */ 590#endif /* CONFIG_64BIT */
591 591
592 init_mm.start_code = PAGE_OFFSET; 592 init_mm.start_code = PAGE_OFFSET;
593 init_mm.end_code = (unsigned long) &_etext; 593 init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6e0110d71191..6ae4a77270b5 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -254,9 +254,9 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
254 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 254 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
255 goto badframe; 255 goto badframe;
256 256
257 /* It is more difficult to avoid calling this function than to 257 if (do_sigaltstack(&frame->uc.uc_stack, NULL,
258 call it and ignore errors. */ 258 regs->gprs[15]) == -EFAULT)
259 do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]); 259 goto badframe;
260 return regs->gprs[2]; 260 return regs->gprs[2];
261 261
262badframe: 262badframe:
@@ -501,7 +501,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
501 501
502 if (signr > 0) { 502 if (signr > 0) {
503 /* Whee! Actually deliver the signal. */ 503 /* Whee! Actually deliver the signal. */
504#ifdef CONFIG_S390_SUPPORT 504#ifdef CONFIG_COMPAT
505 if (test_thread_flag(TIF_31BIT)) { 505 if (test_thread_flag(TIF_31BIT)) {
506 extern void handle_signal32(unsigned long sig, 506 extern void handle_signal32(unsigned long sig,
507 struct k_sigaction *ka, 507 struct k_sigaction *ka,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5856b3fda6bf..e10f4ca00499 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused)
263 int cpu; 263 int cpu;
264 static atomic_t cpuid = ATOMIC_INIT(-1); 264 static atomic_t cpuid = ATOMIC_INIT(-1);
265 265
266 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 266 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
267 signal_processor(smp_processor_id(), sigp_stop); 267 signal_processor(smp_processor_id(), sigp_stop);
268 268
269 /* Wait for all other cpus to enter stopped state */ 269 /* Wait for all other cpus to enter stopped state */
@@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused)
313{ 313{
314 static atomic_t cpuid = ATOMIC_INIT(-1); 314 static atomic_t cpuid = ATOMIC_INIT(-1);
315 315
316 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { 316 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
317 smp_send_stop(); 317 smp_send_stop();
318 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 318 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
319 cpcmd(vmhalt_cmd, NULL, 0, NULL); 319 cpcmd(vmhalt_cmd, NULL, 0, NULL);
@@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused)
332{ 332{
333 static atomic_t cpuid = ATOMIC_INIT(-1); 333 static atomic_t cpuid = ATOMIC_INIT(-1);
334 334
335 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) { 335 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
336 smp_send_stop(); 336 smp_send_stop();
337 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 337 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
338 cpcmd(vmpoff_cmd, NULL, 0, NULL); 338 cpcmd(vmpoff_cmd, NULL, 0, NULL);
@@ -402,7 +402,7 @@ static void smp_ext_bitcall_others(ec_bit_sig sig)
402 } 402 }
403} 403}
404 404
405#ifndef CONFIG_ARCH_S390X 405#ifndef CONFIG_64BIT
406/* 406/*
407 * this function sends a 'purge tlb' signal to another CPU. 407 * this function sends a 'purge tlb' signal to another CPU.
408 */ 408 */
@@ -416,7 +416,7 @@ void smp_ptlb_all(void)
416 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 416 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
417} 417}
418EXPORT_SYMBOL(smp_ptlb_all); 418EXPORT_SYMBOL(smp_ptlb_all);
419#endif /* ! CONFIG_ARCH_S390X */ 419#endif /* ! CONFIG_64BIT */
420 420
421/* 421/*
422 * this function sends a 'reschedule' IPI to another CPU. 422 * this function sends a 'reschedule' IPI to another CPU.
@@ -783,7 +783,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
783 if (stack == 0ULL) 783 if (stack == 0ULL)
784 panic("smp_boot_cpus failed to allocate memory\n"); 784 panic("smp_boot_cpus failed to allocate memory\n");
785 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 785 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
786#ifndef __s390x__ 786#ifndef CONFIG_64BIT
787 if (MACHINE_HAS_IEEE) { 787 if (MACHINE_HAS_IEEE) {
788 lowcore_ptr[i]->extended_save_area_addr = 788 lowcore_ptr[i]->extended_save_area_addr =
789 (__u32) __get_free_pages(GFP_KERNEL,0); 789 (__u32) __get_free_pages(GFP_KERNEL,0);
@@ -793,7 +793,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
793 } 793 }
794#endif 794#endif
795 } 795 }
796#ifndef __s390x__ 796#ifndef CONFIG_64BIT
797 if (MACHINE_HAS_IEEE) 797 if (MACHINE_HAS_IEEE)
798 ctl_set_bit(14, 29); /* enable extended save area */ 798 ctl_set_bit(14, 29); /* enable extended save area */
799#endif 799#endif
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index efe6b83b53f7..6a63553493c5 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -26,9 +26,7 @@
26#include <linux/mman.h> 26#include <linux/mman.h>
27#include <linux/file.h> 27#include <linux/file.h>
28#include <linux/utsname.h> 28#include <linux/utsname.h>
29#ifdef CONFIG_ARCH_S390X
30#include <linux/personality.h> 29#include <linux/personality.h>
31#endif /* CONFIG_ARCH_S390X */
32 30
33#include <asm/uaccess.h> 31#include <asm/uaccess.h>
34#include <asm/ipc.h> 32#include <asm/ipc.h>
@@ -121,7 +119,7 @@ out:
121 return error; 119 return error;
122} 120}
123 121
124#ifndef CONFIG_ARCH_S390X 122#ifndef CONFIG_64BIT
125struct sel_arg_struct { 123struct sel_arg_struct {
126 unsigned long n; 124 unsigned long n;
127 fd_set *inp, *outp, *exp; 125 fd_set *inp, *outp, *exp;
@@ -138,7 +136,7 @@ asmlinkage long old_select(struct sel_arg_struct __user *arg)
138 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 136 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
139 137
140} 138}
141#endif /* CONFIG_ARCH_S390X */ 139#endif /* CONFIG_64BIT */
142 140
143/* 141/*
144 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 142 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
@@ -211,7 +209,7 @@ asmlinkage long sys_ipc(uint call, int first, unsigned long second,
211 return -EINVAL; 209 return -EINVAL;
212} 210}
213 211
214#ifdef CONFIG_ARCH_S390X 212#ifdef CONFIG_64BIT
215asmlinkage long s390x_newuname(struct new_utsname __user *name) 213asmlinkage long s390x_newuname(struct new_utsname __user *name)
216{ 214{
217 int ret = sys_newuname(name); 215 int ret = sys_newuname(name);
@@ -235,12 +233,12 @@ asmlinkage long s390x_personality(unsigned long personality)
235 233
236 return ret; 234 return ret;
237} 235}
238#endif /* CONFIG_ARCH_S390X */ 236#endif /* CONFIG_64BIT */
239 237
240/* 238/*
241 * Wrapper function for sys_fadvise64/fadvise64_64 239 * Wrapper function for sys_fadvise64/fadvise64_64
242 */ 240 */
243#ifndef CONFIG_ARCH_S390X 241#ifndef CONFIG_64BIT
244 242
245asmlinkage long 243asmlinkage long
246s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice) 244s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c5bd36fae56b..95d109968619 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -67,13 +67,13 @@ extern pgm_check_handler_t do_monitor_call;
67 67
68#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 68#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
69 69
70#ifndef CONFIG_ARCH_S390X 70#ifndef CONFIG_64BIT
71#define FOURLONG "%08lx %08lx %08lx %08lx\n" 71#define FOURLONG "%08lx %08lx %08lx %08lx\n"
72static int kstack_depth_to_print = 12; 72static int kstack_depth_to_print = 12;
73#else /* CONFIG_ARCH_S390X */ 73#else /* CONFIG_64BIT */
74#define FOURLONG "%016lx %016lx %016lx %016lx\n" 74#define FOURLONG "%016lx %016lx %016lx %016lx\n"
75static int kstack_depth_to_print = 20; 75static int kstack_depth_to_print = 20;
76#endif /* CONFIG_ARCH_S390X */ 76#endif /* CONFIG_64BIT */
77 77
78/* 78/*
79 * For show_trace we have tree different stack to consider: 79 * For show_trace we have tree different stack to consider:
@@ -702,12 +702,12 @@ void __init trap_init(void)
702 pgm_check_table[0x11] = &do_dat_exception; 702 pgm_check_table[0x11] = &do_dat_exception;
703 pgm_check_table[0x12] = &translation_exception; 703 pgm_check_table[0x12] = &translation_exception;
704 pgm_check_table[0x13] = &special_op_exception; 704 pgm_check_table[0x13] = &special_op_exception;
705#ifdef CONFIG_ARCH_S390X 705#ifdef CONFIG_64BIT
706 pgm_check_table[0x38] = &do_dat_exception; 706 pgm_check_table[0x38] = &do_dat_exception;
707 pgm_check_table[0x39] = &do_dat_exception; 707 pgm_check_table[0x39] = &do_dat_exception;
708 pgm_check_table[0x3A] = &do_dat_exception; 708 pgm_check_table[0x3A] = &do_dat_exception;
709 pgm_check_table[0x3B] = &do_dat_exception; 709 pgm_check_table[0x3B] = &do_dat_exception;
710#endif /* CONFIG_ARCH_S390X */ 710#endif /* CONFIG_64BIT */
711 pgm_check_table[0x15] = &operand_exception; 711 pgm_check_table[0x15] = &operand_exception;
712 pgm_check_table[0x1C] = &space_switch_exception; 712 pgm_check_table[0x1C] = &space_switch_exception;
713 pgm_check_table[0x1D] = &hfp_sqrt_exception; 713 pgm_check_table[0x1D] = &hfp_sqrt_exception;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 89fdb3808bc0..9289face3027 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -5,7 +5,7 @@
5#include <asm-generic/vmlinux.lds.h> 5#include <asm-generic/vmlinux.lds.h>
6#include <linux/config.h> 6#include <linux/config.h>
7 7
8#ifndef CONFIG_ARCH_S390X 8#ifndef CONFIG_64BIT
9OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 9OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
10OUTPUT_ARCH(s390) 10OUTPUT_ARCH(s390)
11ENTRY(_start) 11ENTRY(_start)
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b701efa1f00e..d9b97b3c597f 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,5 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o 7lib-y += delay.o string.o spinlock.o
8lib-$(CONFIG_ARCH_S390_31) += uaccess.o spinlock.o 8lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o)
9lib-$(CONFIG_ARCH_S390X) += uaccess64.o spinlock.o
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 2dc14e9c8327..68d79c502081 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -29,7 +29,7 @@ __setup("spin_retry=", spin_retry_setup);
29static inline void 29static inline void
30_diag44(void) 30_diag44(void)
31{ 31{
32#ifdef __s390x__ 32#ifdef CONFIG_64BIT
33 if (MACHINE_HAS_DIAG44) 33 if (MACHINE_HAS_DIAG44)
34#endif 34#endif
35 asm volatile("diag 0,0,0x44"); 35 asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 506a33b51e4f..a9566bcab682 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -143,7 +143,7 @@ dcss_diag (__u8 func, void *parameter,
143 rx = (unsigned long) parameter; 143 rx = (unsigned long) parameter;
144 ry = (unsigned long) func; 144 ry = (unsigned long) func;
145 __asm__ __volatile__( 145 __asm__ __volatile__(
146#ifdef CONFIG_ARCH_S390X 146#ifdef CONFIG_64BIT
147 " sam31\n" // switch to 31 bit 147 " sam31\n" // switch to 31 bit
148 " diag %0,%1,0x64\n" 148 " diag %0,%1,0x64\n"
149 " sam64\n" // switch back to 64 bit 149 " sam64\n" // switch back to 64 bit
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fb2607c369ed..81ade401b073 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -31,17 +31,17 @@
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33 33
34#ifndef CONFIG_ARCH_S390X 34#ifndef CONFIG_64BIT
35#define __FAIL_ADDR_MASK 0x7ffff000 35#define __FAIL_ADDR_MASK 0x7ffff000
36#define __FIXUP_MASK 0x7fffffff 36#define __FIXUP_MASK 0x7fffffff
37#define __SUBCODE_MASK 0x0200 37#define __SUBCODE_MASK 0x0200
38#define __PF_RES_FIELD 0ULL 38#define __PF_RES_FIELD 0ULL
39#else /* CONFIG_ARCH_S390X */ 39#else /* CONFIG_64BIT */
40#define __FAIL_ADDR_MASK -4096L 40#define __FAIL_ADDR_MASK -4096L
41#define __FIXUP_MASK ~0L 41#define __FIXUP_MASK ~0L
42#define __SUBCODE_MASK 0x0600 42#define __SUBCODE_MASK 0x0600
43#define __PF_RES_FIELD 0x8000000000000000ULL 43#define __PF_RES_FIELD 0x8000000000000000ULL
44#endif /* CONFIG_ARCH_S390X */ 44#endif /* CONFIG_64BIT */
45 45
46#ifdef CONFIG_SYSCTL 46#ifdef CONFIG_SYSCTL
47extern int sysctl_userprocess_debug; 47extern int sysctl_userprocess_debug;
@@ -393,11 +393,11 @@ int pfault_init(void)
393 "2:\n" 393 "2:\n"
394 ".section __ex_table,\"a\"\n" 394 ".section __ex_table,\"a\"\n"
395 " .align 4\n" 395 " .align 4\n"
396#ifndef CONFIG_ARCH_S390X 396#ifndef CONFIG_64BIT
397 " .long 0b,1b\n" 397 " .long 0b,1b\n"
398#else /* CONFIG_ARCH_S390X */ 398#else /* CONFIG_64BIT */
399 " .quad 0b,1b\n" 399 " .quad 0b,1b\n"
400#endif /* CONFIG_ARCH_S390X */ 400#endif /* CONFIG_64BIT */
401 ".previous" 401 ".previous"
402 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" ); 402 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
403 __ctl_set_bit(0, 9); 403 __ctl_set_bit(0, 9);
@@ -417,11 +417,11 @@ void pfault_fini(void)
417 "0:\n" 417 "0:\n"
418 ".section __ex_table,\"a\"\n" 418 ".section __ex_table,\"a\"\n"
419 " .align 4\n" 419 " .align 4\n"
420#ifndef CONFIG_ARCH_S390X 420#ifndef CONFIG_64BIT
421 " .long 0b,0b\n" 421 " .long 0b,0b\n"
422#else /* CONFIG_ARCH_S390X */ 422#else /* CONFIG_64BIT */
423 " .quad 0b,0b\n" 423 " .quad 0b,0b\n"
424#endif /* CONFIG_ARCH_S390X */ 424#endif /* CONFIG_64BIT */
425 ".previous" 425 ".previous"
426 : : "a" (&refbk), "m" (refbk) : "cc" ); 426 : : "a" (&refbk), "m" (refbk) : "cc" );
427} 427}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6ec5cd981e74..df953383724d 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -44,7 +44,7 @@ void diag10(unsigned long addr)
44{ 44{
45 if (addr >= 0x7ff00000) 45 if (addr >= 0x7ff00000)
46 return; 46 return;
47#ifdef __s390x__ 47#ifdef CONFIG_64BIT
48 asm volatile ( 48 asm volatile (
49 " sam31\n" 49 " sam31\n"
50 " diag %0,%0,0x10\n" 50 " diag %0,%0,0x10\n"
@@ -106,7 +106,7 @@ extern unsigned long __initdata zholes_size[];
106 * paging_init() sets up the page tables 106 * paging_init() sets up the page tables
107 */ 107 */
108 108
109#ifndef CONFIG_ARCH_S390X 109#ifndef CONFIG_64BIT
110void __init paging_init(void) 110void __init paging_init(void)
111{ 111{
112 pgd_t * pg_dir; 112 pgd_t * pg_dir;
@@ -175,7 +175,7 @@ void __init paging_init(void)
175 return; 175 return;
176} 176}
177 177
178#else /* CONFIG_ARCH_S390X */ 178#else /* CONFIG_64BIT */
179void __init paging_init(void) 179void __init paging_init(void)
180{ 180{
181 pgd_t * pg_dir; 181 pgd_t * pg_dir;
@@ -256,7 +256,7 @@ void __init paging_init(void)
256 256
257 return; 257 return;
258} 258}
259#endif /* CONFIG_ARCH_S390X */ 259#endif /* CONFIG_64BIT */
260 260
261void __init mem_init(void) 261void __init mem_init(void)
262{ 262{
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index fb187e5a54b4..356257c171de 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -50,7 +50,7 @@ static inline unsigned long mmap_base(void)
50 50
51static inline int mmap_is_legacy(void) 51static inline int mmap_is_legacy(void)
52{ 52{
53#ifdef CONFIG_ARCH_S390X 53#ifdef CONFIG_64BIT
54 /* 54 /*
55 * Force standard allocation for 64 bit programs. 55 * Force standard allocation for 64 bit programs.
56 */ 56 */
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index ec349276258a..537b2d840e69 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -6,4 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
6 oprofilefs.o oprofile_stats.o \ 6 oprofilefs.o oprofile_stats.o \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) init.o 9oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
new file mode 100644
index 000000000000..bc4b84a35cad
--- /dev/null
+++ b/arch/s390/oprofile/backtrace.c
@@ -0,0 +1,79 @@
1/**
2 * arch/s390/oprofile/backtrace.c
3 *
4 * S390 Version
5 * Copyright (C) 2005 IBM Corporation, IBM Deutschland Entwicklung GmbH.
6 * Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
7 */
8
9#include <linux/oprofile.h>
10
11#include <asm/processor.h> /* for struct stack_frame */
12
13static unsigned long
14__show_trace(unsigned int *depth, unsigned long sp,
15 unsigned long low, unsigned long high)
16{
17 struct stack_frame *sf;
18 struct pt_regs *regs;
19
20 while (*depth) {
21 sp = sp & PSW_ADDR_INSN;
22 if (sp < low || sp > high - sizeof(*sf))
23 return sp;
24 sf = (struct stack_frame *) sp;
25 (*depth)--;
26 oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
27
28 /* Follow the backchain. */
29 while (*depth) {
30 low = sp;
31 sp = sf->back_chain & PSW_ADDR_INSN;
32 if (!sp)
33 break;
34 if (sp <= low || sp > high - sizeof(*sf))
35 return sp;
36 sf = (struct stack_frame *) sp;
37 (*depth)--;
38 oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
39
40 }
41
42 if (*depth == 0)
43 break;
44
45 /* Zero backchain detected, check for interrupt frame. */
46 sp = (unsigned long) (sf + 1);
47 if (sp <= low || sp > high - sizeof(*regs))
48 return sp;
49 regs = (struct pt_regs *) sp;
50 (*depth)--;
51 oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
52 low = sp;
53 sp = regs->gprs[15];
54 }
55 return sp;
56}
57
58void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
59{
60 unsigned long head;
61 struct stack_frame* head_sf;
62
63 if (user_mode (regs))
64 return;
65
66 head = regs->gprs[15];
67 head_sf = (struct stack_frame*)head;
68
69 if (!head_sf->back_chain)
70 return;
71
72 head = head_sf->back_chain;
73
74 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE,
75 S390_lowcore.async_stack);
76
77 __show_trace(&depth, head, S390_lowcore.thread_info,
78 S390_lowcore.thread_info + THREAD_SIZE);
79}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a65ead0e200a..7a995113b918 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -12,8 +12,12 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14 14
15
16extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
17
15int __init oprofile_arch_init(struct oprofile_operations* ops) 18int __init oprofile_arch_init(struct oprofile_operations* ops)
16{ 19{
20 ops->backtrace = s390_backtrace;
17 return -ENODEV; 21 return -ENODEV;
18} 22}
19 23
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 5b58fad45290..cd13b91b9ff6 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -58,7 +58,7 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts)
58{ 58{
59 my_puts("Using a channel type which is configured out of " 59 my_puts("Using a channel type which is configured out of "
60 "UML\n"); 60 "UML\n");
61 return(NULL); 61 return NULL;
62} 62}
63 63
64static int not_configged_open(int input, int output, int primary, void *data, 64static int not_configged_open(int input, int output, int primary, void *data,
@@ -66,7 +66,7 @@ static int not_configged_open(int input, int output, int primary, void *data,
66{ 66{
67 my_puts("Using a channel type which is configured out of " 67 my_puts("Using a channel type which is configured out of "
68 "UML\n"); 68 "UML\n");
69 return(-ENODEV); 69 return -ENODEV;
70} 70}
71 71
72static void not_configged_close(int fd, void *data) 72static void not_configged_close(int fd, void *data)
@@ -79,21 +79,21 @@ static int not_configged_read(int fd, char *c_out, void *data)
79{ 79{
80 my_puts("Using a channel type which is configured out of " 80 my_puts("Using a channel type which is configured out of "
81 "UML\n"); 81 "UML\n");
82 return(-EIO); 82 return -EIO;
83} 83}
84 84
85static int not_configged_write(int fd, const char *buf, int len, void *data) 85static int not_configged_write(int fd, const char *buf, int len, void *data)
86{ 86{
87 my_puts("Using a channel type which is configured out of " 87 my_puts("Using a channel type which is configured out of "
88 "UML\n"); 88 "UML\n");
89 return(-EIO); 89 return -EIO;
90} 90}
91 91
92static int not_configged_console_write(int fd, const char *buf, int len) 92static int not_configged_console_write(int fd, const char *buf, int len)
93{ 93{
94 my_puts("Using a channel type which is configured out of " 94 my_puts("Using a channel type which is configured out of "
95 "UML\n"); 95 "UML\n");
96 return(-EIO); 96 return -EIO;
97} 97}
98 98
99static int not_configged_window_size(int fd, void *data, unsigned short *rows, 99static int not_configged_window_size(int fd, void *data, unsigned short *rows,
@@ -101,7 +101,7 @@ static int not_configged_window_size(int fd, void *data, unsigned short *rows,
101{ 101{
102 my_puts("Using a channel type which is configured out of " 102 my_puts("Using a channel type which is configured out of "
103 "UML\n"); 103 "UML\n");
104 return(-ENODEV); 104 return -ENODEV;
105} 105}
106 106
107static void not_configged_free(void *data) 107static void not_configged_free(void *data)
@@ -135,17 +135,17 @@ int generic_read(int fd, char *c_out, void *unused)
135 n = os_read_file(fd, c_out, sizeof(*c_out)); 135 n = os_read_file(fd, c_out, sizeof(*c_out));
136 136
137 if(n == -EAGAIN) 137 if(n == -EAGAIN)
138 return(0); 138 return 0;
139 else if(n == 0) 139 else if(n == 0)
140 return(-EIO); 140 return -EIO;
141 return(n); 141 return n;
142} 142}
143 143
144/* XXX Trivial wrapper around os_write_file */ 144/* XXX Trivial wrapper around os_write_file */
145 145
146int generic_write(int fd, const char *buf, int n, void *unused) 146int generic_write(int fd, const char *buf, int n, void *unused)
147{ 147{
148 return(os_write_file(fd, buf, n)); 148 return os_write_file(fd, buf, n);
149} 149}
150 150
151int generic_window_size(int fd, void *unused, unsigned short *rows_out, 151int generic_window_size(int fd, void *unused, unsigned short *rows_out,
@@ -156,14 +156,14 @@ int generic_window_size(int fd, void *unused, unsigned short *rows_out,
156 156
157 ret = os_window_size(fd, &rows, &cols); 157 ret = os_window_size(fd, &rows, &cols);
158 if(ret < 0) 158 if(ret < 0)
159 return(ret); 159 return ret;
160 160
161 ret = ((*rows_out != rows) || (*cols_out != cols)); 161 ret = ((*rows_out != rows) || (*cols_out != cols));
162 162
163 *rows_out = rows; 163 *rows_out = rows;
164 *cols_out = cols; 164 *cols_out = cols;
165 165
166 return(ret); 166 return ret;
167} 167}
168 168
169void generic_free(void *data) 169void generic_free(void *data)
@@ -186,25 +186,29 @@ static void tty_receive_char(struct tty_struct *tty, char ch)
186 } 186 }
187 } 187 }
188 188
189 if((tty->flip.flag_buf_ptr == NULL) || 189 if((tty->flip.flag_buf_ptr == NULL) ||
190 (tty->flip.char_buf_ptr == NULL)) 190 (tty->flip.char_buf_ptr == NULL))
191 return; 191 return;
192 tty_insert_flip_char(tty, ch, TTY_NORMAL); 192 tty_insert_flip_char(tty, ch, TTY_NORMAL);
193} 193}
194 194
195static int open_one_chan(struct chan *chan, int input, int output, int primary) 195static int open_one_chan(struct chan *chan)
196{ 196{
197 int fd; 197 int fd;
198 198
199 if(chan->opened) return(0); 199 if(chan->opened)
200 if(chan->ops->open == NULL) fd = 0; 200 return 0;
201 else fd = (*chan->ops->open)(input, output, primary, chan->data, 201
202 &chan->dev); 202 if(chan->ops->open == NULL)
203 if(fd < 0) return(fd); 203 fd = 0;
204 else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary,
205 chan->data, &chan->dev);
206 if(fd < 0)
207 return fd;
204 chan->fd = fd; 208 chan->fd = fd;
205 209
206 chan->opened = 1; 210 chan->opened = 1;
207 return(0); 211 return 0;
208} 212}
209 213
210int open_chan(struct list_head *chans) 214int open_chan(struct list_head *chans)
@@ -215,11 +219,11 @@ int open_chan(struct list_head *chans)
215 219
216 list_for_each(ele, chans){ 220 list_for_each(ele, chans){
217 chan = list_entry(ele, struct chan, list); 221 chan = list_entry(ele, struct chan, list);
218 ret = open_one_chan(chan, chan->input, chan->output, 222 ret = open_one_chan(chan);
219 chan->primary); 223 if(chan->primary)
220 if(chan->primary) err = ret; 224 err = ret;
221 } 225 }
222 return(err); 226 return err;
223} 227}
224 228
225void chan_enable_winch(struct list_head *chans, struct tty_struct *tty) 229void chan_enable_winch(struct list_head *chans, struct tty_struct *tty)
@@ -236,20 +240,65 @@ void chan_enable_winch(struct list_head *chans, struct tty_struct *tty)
236 } 240 }
237} 241}
238 242
239void enable_chan(struct list_head *chans, struct tty_struct *tty) 243void enable_chan(struct line *line)
240{ 244{
241 struct list_head *ele; 245 struct list_head *ele;
242 struct chan *chan; 246 struct chan *chan;
243 247
244 list_for_each(ele, chans){ 248 list_for_each(ele, &line->chan_list){
245 chan = list_entry(ele, struct chan, list); 249 chan = list_entry(ele, struct chan, list);
246 if(!chan->opened) continue; 250 if(open_one_chan(chan))
251 continue;
252
253 if(chan->enabled)
254 continue;
255 line_setup_irq(chan->fd, chan->input, chan->output, line,
256 chan);
257 chan->enabled = 1;
258 }
259}
260
261static LIST_HEAD(irqs_to_free);
262
263void free_irqs(void)
264{
265 struct chan *chan;
266
267 while(!list_empty(&irqs_to_free)){
268 chan = list_entry(irqs_to_free.next, struct chan, free_list);
269 list_del(&chan->free_list);
247 270
248 line_setup_irq(chan->fd, chan->input, chan->output, tty); 271 if(chan->input)
272 free_irq(chan->line->driver->read_irq, chan);
273 if(chan->output)
274 free_irq(chan->line->driver->write_irq, chan);
275 chan->enabled = 0;
276 }
277}
278
279static void close_one_chan(struct chan *chan, int delay_free_irq)
280{
281 if(!chan->opened)
282 return;
283
284 if(delay_free_irq){
285 list_add(&chan->free_list, &irqs_to_free);
249 } 286 }
287 else {
288 if(chan->input)
289 free_irq(chan->line->driver->read_irq, chan);
290 if(chan->output)
291 free_irq(chan->line->driver->write_irq, chan);
292 chan->enabled = 0;
293 }
294 if(chan->ops->close != NULL)
295 (*chan->ops->close)(chan->fd, chan->data);
296
297 chan->opened = 0;
298 chan->fd = -1;
250} 299}
251 300
252void close_chan(struct list_head *chans) 301void close_chan(struct list_head *chans, int delay_free_irq)
253{ 302{
254 struct chan *chan; 303 struct chan *chan;
255 304
@@ -259,15 +308,37 @@ void close_chan(struct list_head *chans)
259 * so it must be the last closed. 308 * so it must be the last closed.
260 */ 309 */
261 list_for_each_entry_reverse(chan, chans, list) { 310 list_for_each_entry_reverse(chan, chans, list) {
262 if(!chan->opened) continue; 311 close_one_chan(chan, delay_free_irq);
263 if(chan->ops->close != NULL) 312 }
264 (*chan->ops->close)(chan->fd, chan->data); 313}
265 chan->opened = 0; 314
266 chan->fd = -1; 315void deactivate_chan(struct list_head *chans, int irq)
316{
317 struct list_head *ele;
318
319 struct chan *chan;
320 list_for_each(ele, chans) {
321 chan = list_entry(ele, struct chan, list);
322
323 if(chan->enabled && chan->input)
324 deactivate_fd(chan->fd, irq);
325 }
326}
327
328void reactivate_chan(struct list_head *chans, int irq)
329{
330 struct list_head *ele;
331 struct chan *chan;
332
333 list_for_each(ele, chans) {
334 chan = list_entry(ele, struct chan, list);
335
336 if(chan->enabled && chan->input)
337 reactivate_fd(chan->fd, irq);
267 } 338 }
268} 339}
269 340
270int write_chan(struct list_head *chans, const char *buf, int len, 341int write_chan(struct list_head *chans, const char *buf, int len,
271 int write_irq) 342 int write_irq)
272{ 343{
273 struct list_head *ele; 344 struct list_head *ele;
@@ -285,7 +356,7 @@ int write_chan(struct list_head *chans, const char *buf, int len,
285 reactivate_fd(chan->fd, write_irq); 356 reactivate_fd(chan->fd, write_irq);
286 } 357 }
287 } 358 }
288 return(ret); 359 return ret;
289} 360}
290 361
291int console_write_chan(struct list_head *chans, const char *buf, int len) 362int console_write_chan(struct list_head *chans, const char *buf, int len)
@@ -301,19 +372,18 @@ int console_write_chan(struct list_head *chans, const char *buf, int len)
301 n = chan->ops->console_write(chan->fd, buf, len); 372 n = chan->ops->console_write(chan->fd, buf, len);
302 if(chan->primary) ret = n; 373 if(chan->primary) ret = n;
303 } 374 }
304 return(ret); 375 return ret;
305} 376}
306 377
307int console_open_chan(struct line *line, struct console *co, struct chan_opts *opts) 378int console_open_chan(struct line *line, struct console *co,
379 struct chan_opts *opts)
308{ 380{
309 if (!list_empty(&line->chan_list)) 381 int err;
310 return 0; 382
383 err = open_chan(&line->chan_list);
384 if(err)
385 return err;
311 386
312 if (0 != parse_chan_pair(line->init_str, &line->chan_list,
313 line->init_pri, co->index, opts))
314 return -1;
315 if (0 != open_chan(&line->chan_list))
316 return -1;
317 printk("Console initialized on /dev/%s%d\n",co->name,co->index); 387 printk("Console initialized on /dev/%s%d\n",co->name,co->index);
318 return 0; 388 return 0;
319} 389}
@@ -327,32 +397,36 @@ int chan_window_size(struct list_head *chans, unsigned short *rows_out,
327 list_for_each(ele, chans){ 397 list_for_each(ele, chans){
328 chan = list_entry(ele, struct chan, list); 398 chan = list_entry(ele, struct chan, list);
329 if(chan->primary){ 399 if(chan->primary){
330 if(chan->ops->window_size == NULL) return(0); 400 if(chan->ops->window_size == NULL)
331 return(chan->ops->window_size(chan->fd, chan->data, 401 return 0;
332 rows_out, cols_out)); 402 return chan->ops->window_size(chan->fd, chan->data,
403 rows_out, cols_out);
333 } 404 }
334 } 405 }
335 return(0); 406 return 0;
336} 407}
337 408
338void free_one_chan(struct chan *chan) 409void free_one_chan(struct chan *chan, int delay_free_irq)
339{ 410{
340 list_del(&chan->list); 411 list_del(&chan->list);
412
413 close_one_chan(chan, delay_free_irq);
414
341 if(chan->ops->free != NULL) 415 if(chan->ops->free != NULL)
342 (*chan->ops->free)(chan->data); 416 (*chan->ops->free)(chan->data);
343 free_irq_by_fd(chan->fd); 417
344 if(chan->primary && chan->output) ignore_sigio_fd(chan->fd); 418 if(chan->primary && chan->output) ignore_sigio_fd(chan->fd);
345 kfree(chan); 419 kfree(chan);
346} 420}
347 421
348void free_chan(struct list_head *chans) 422void free_chan(struct list_head *chans, int delay_free_irq)
349{ 423{
350 struct list_head *ele, *next; 424 struct list_head *ele, *next;
351 struct chan *chan; 425 struct chan *chan;
352 426
353 list_for_each_safe(ele, next, chans){ 427 list_for_each_safe(ele, next, chans){
354 chan = list_entry(ele, struct chan, list); 428 chan = list_entry(ele, struct chan, list);
355 free_one_chan(chan); 429 free_one_chan(chan, delay_free_irq);
356 } 430 }
357} 431}
358 432
@@ -363,23 +437,23 @@ static int one_chan_config_string(struct chan *chan, char *str, int size,
363 437
364 if(chan == NULL){ 438 if(chan == NULL){
365 CONFIG_CHUNK(str, size, n, "none", 1); 439 CONFIG_CHUNK(str, size, n, "none", 1);
366 return(n); 440 return n;
367 } 441 }
368 442
369 CONFIG_CHUNK(str, size, n, chan->ops->type, 0); 443 CONFIG_CHUNK(str, size, n, chan->ops->type, 0);
370 444
371 if(chan->dev == NULL){ 445 if(chan->dev == NULL){
372 CONFIG_CHUNK(str, size, n, "", 1); 446 CONFIG_CHUNK(str, size, n, "", 1);
373 return(n); 447 return n;
374 } 448 }
375 449
376 CONFIG_CHUNK(str, size, n, ":", 0); 450 CONFIG_CHUNK(str, size, n, ":", 0);
377 CONFIG_CHUNK(str, size, n, chan->dev, 0); 451 CONFIG_CHUNK(str, size, n, chan->dev, 0);
378 452
379 return(n); 453 return n;
380} 454}
381 455
382static int chan_pair_config_string(struct chan *in, struct chan *out, 456static int chan_pair_config_string(struct chan *in, struct chan *out,
383 char *str, int size, char **error_out) 457 char *str, int size, char **error_out)
384{ 458{
385 int n; 459 int n;
@@ -390,7 +464,7 @@ static int chan_pair_config_string(struct chan *in, struct chan *out,
390 464
391 if(in == out){ 465 if(in == out){
392 CONFIG_CHUNK(str, size, n, "", 1); 466 CONFIG_CHUNK(str, size, n, "", 1);
393 return(n); 467 return n;
394 } 468 }
395 469
396 CONFIG_CHUNK(str, size, n, ",", 1); 470 CONFIG_CHUNK(str, size, n, ",", 1);
@@ -399,10 +473,10 @@ static int chan_pair_config_string(struct chan *in, struct chan *out,
399 size -= n; 473 size -= n;
400 CONFIG_CHUNK(str, size, n, "", 1); 474 CONFIG_CHUNK(str, size, n, "", 1);
401 475
402 return(n); 476 return n;
403} 477}
404 478
405int chan_config_string(struct list_head *chans, char *str, int size, 479int chan_config_string(struct list_head *chans, char *str, int size,
406 char **error_out) 480 char **error_out)
407{ 481{
408 struct list_head *ele; 482 struct list_head *ele;
@@ -418,7 +492,7 @@ int chan_config_string(struct list_head *chans, char *str, int size,
418 out = chan; 492 out = chan;
419 } 493 }
420 494
421 return(chan_pair_config_string(in, out, str, size, error_out)); 495 return chan_pair_config_string(in, out, str, size, error_out);
422} 496}
423 497
424struct chan_type { 498struct chan_type {
@@ -462,7 +536,7 @@ struct chan_type chan_table[] = {
462#endif 536#endif
463}; 537};
464 538
465static struct chan *parse_chan(char *str, int pri, int device, 539static struct chan *parse_chan(struct line *line, char *str, int device,
466 struct chan_opts *opts) 540 struct chan_opts *opts)
467{ 541{
468 struct chan_type *entry; 542 struct chan_type *entry;
@@ -484,36 +558,42 @@ static struct chan *parse_chan(char *str, int pri, int device,
484 if(ops == NULL){ 558 if(ops == NULL){
485 my_printf("parse_chan couldn't parse \"%s\"\n", 559 my_printf("parse_chan couldn't parse \"%s\"\n",
486 str); 560 str);
487 return(NULL); 561 return NULL;
488 } 562 }
489 if(ops->init == NULL) return(NULL); 563 if(ops->init == NULL)
564 return NULL;
490 data = (*ops->init)(str, device, opts); 565 data = (*ops->init)(str, device, opts);
491 if(data == NULL) return(NULL); 566 if(data == NULL)
567 return NULL;
492 568
493 chan = kmalloc(sizeof(*chan), GFP_ATOMIC); 569 chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
494 if(chan == NULL) return(NULL); 570 if(chan == NULL)
571 return NULL;
495 *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list), 572 *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
573 .free_list =
574 LIST_HEAD_INIT(chan->free_list),
575 .line = line,
496 .primary = 1, 576 .primary = 1,
497 .input = 0, 577 .input = 0,
498 .output = 0, 578 .output = 0,
499 .opened = 0, 579 .opened = 0,
580 .enabled = 0,
500 .fd = -1, 581 .fd = -1,
501 .pri = pri,
502 .ops = ops, 582 .ops = ops,
503 .data = data }); 583 .data = data });
504 return(chan); 584 return chan;
505} 585}
506 586
507int parse_chan_pair(char *str, struct list_head *chans, int pri, int device, 587int parse_chan_pair(char *str, struct line *line, int device,
508 struct chan_opts *opts) 588 struct chan_opts *opts)
509{ 589{
590 struct list_head *chans = &line->chan_list;
510 struct chan *new, *chan; 591 struct chan *new, *chan;
511 char *in, *out; 592 char *in, *out;
512 593
513 if(!list_empty(chans)){ 594 if(!list_empty(chans)){
514 chan = list_entry(chans->next, struct chan, list); 595 chan = list_entry(chans->next, struct chan, list);
515 if(chan->pri >= pri) return(0); 596 free_chan(chans, 0);
516 free_chan(chans);
517 INIT_LIST_HEAD(chans); 597 INIT_LIST_HEAD(chans);
518 } 598 }
519 599
@@ -522,24 +602,30 @@ int parse_chan_pair(char *str, struct list_head *chans, int pri, int device,
522 in = str; 602 in = str;
523 *out = '\0'; 603 *out = '\0';
524 out++; 604 out++;
525 new = parse_chan(in, pri, device, opts); 605 new = parse_chan(line, in, device, opts);
526 if(new == NULL) return(-1); 606 if(new == NULL)
607 return -1;
608
527 new->input = 1; 609 new->input = 1;
528 list_add(&new->list, chans); 610 list_add(&new->list, chans);
529 611
530 new = parse_chan(out, pri, device, opts); 612 new = parse_chan(line, out, device, opts);
531 if(new == NULL) return(-1); 613 if(new == NULL)
614 return -1;
615
532 list_add(&new->list, chans); 616 list_add(&new->list, chans);
533 new->output = 1; 617 new->output = 1;
534 } 618 }
535 else { 619 else {
536 new = parse_chan(str, pri, device, opts); 620 new = parse_chan(line, str, device, opts);
537 if(new == NULL) return(-1); 621 if(new == NULL)
622 return -1;
623
538 list_add(&new->list, chans); 624 list_add(&new->list, chans);
539 new->input = 1; 625 new->input = 1;
540 new->output = 1; 626 new->output = 1;
541 } 627 }
542 return(0); 628 return 0;
543} 629}
544 630
545int chan_out_fd(struct list_head *chans) 631int chan_out_fd(struct list_head *chans)
@@ -550,9 +636,9 @@ int chan_out_fd(struct list_head *chans)
550 list_for_each(ele, chans){ 636 list_for_each(ele, chans){
551 chan = list_entry(ele, struct chan, list); 637 chan = list_entry(ele, struct chan, list);
552 if(chan->primary && chan->output) 638 if(chan->primary && chan->output)
553 return(chan->fd); 639 return chan->fd;
554 } 640 }
555 return(-1); 641 return -1;
556} 642}
557 643
558void chan_interrupt(struct list_head *chans, struct work_struct *task, 644void chan_interrupt(struct list_head *chans, struct work_struct *task,
@@ -567,9 +653,9 @@ void chan_interrupt(struct list_head *chans, struct work_struct *task,
567 chan = list_entry(ele, struct chan, list); 653 chan = list_entry(ele, struct chan, list);
568 if(!chan->input || (chan->ops->read == NULL)) continue; 654 if(!chan->input || (chan->ops->read == NULL)) continue;
569 do { 655 do {
570 if((tty != NULL) && 656 if((tty != NULL) &&
571 (tty->flip.count >= TTY_FLIPBUF_SIZE)){ 657 (tty->flip.count >= TTY_FLIPBUF_SIZE)){
572 schedule_work(task); 658 schedule_delayed_work(task, 1);
573 goto out; 659 goto out;
574 } 660 }
575 err = chan->ops->read(chan->fd, &c, chan->data); 661 err = chan->ops->read(chan->fd, &c, chan->data);
@@ -582,29 +668,12 @@ void chan_interrupt(struct list_head *chans, struct work_struct *task,
582 if(chan->primary){ 668 if(chan->primary){
583 if(tty != NULL) 669 if(tty != NULL)
584 tty_hangup(tty); 670 tty_hangup(tty);
585 line_disable(tty, irq); 671 close_chan(chans, 1);
586 close_chan(chans);
587 free_chan(chans);
588 return; 672 return;
589 } 673 }
590 else { 674 else close_one_chan(chan, 1);
591 if(chan->ops->close != NULL)
592 chan->ops->close(chan->fd, chan->data);
593 free_one_chan(chan);
594 }
595 } 675 }
596 } 676 }
597 out: 677 out:
598 if(tty) tty_flip_buffer_push(tty); 678 if(tty) tty_flip_buffer_push(tty);
599} 679}
600
601/*
602 * Overrides for Emacs so that we follow Linus's tabbing style.
603 * Emacs will notice this stuff at the end of the file and automatically
604 * adjust the settings for this buffer only. This must remain at the end
605 * of the file.
606 * ---------------------------------------------------------------------------
607 * Local variables:
608 * c-file-style: "linux"
609 * End:
610 */
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index e0fdffa2d542..46ceb25a9959 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
@@ -23,8 +23,9 @@
23 23
24static irqreturn_t line_interrupt(int irq, void *data, struct pt_regs *unused) 24static irqreturn_t line_interrupt(int irq, void *data, struct pt_regs *unused)
25{ 25{
26 struct tty_struct *tty = data; 26 struct chan *chan = data;
27 struct line *line = tty->driver_data; 27 struct line *line = chan->line;
28 struct tty_struct *tty = line->tty;
28 29
29 if (line) 30 if (line)
30 chan_interrupt(&line->chan_list, &line->task, tty, irq); 31 chan_interrupt(&line->chan_list, &line->task, tty, irq);
@@ -33,10 +34,11 @@ static irqreturn_t line_interrupt(int irq, void *data, struct pt_regs *unused)
33 34
34static void line_timer_cb(void *arg) 35static void line_timer_cb(void *arg)
35{ 36{
36 struct tty_struct *tty = arg; 37 struct line *line = arg;
37 struct line *line = tty->driver_data;
38 38
39 line_interrupt(line->driver->read_irq, arg, NULL); 39 if(!line->throttled)
40 chan_interrupt(&line->chan_list, &line->task, line->tty,
41 line->driver->read_irq);
40} 42}
41 43
42/* Returns the free space inside the ring buffer of this line. 44/* Returns the free space inside the ring buffer of this line.
@@ -124,7 +126,8 @@ static int buffer_data(struct line *line, const char *buf, int len)
124 if (len < end){ 126 if (len < end){
125 memcpy(line->tail, buf, len); 127 memcpy(line->tail, buf, len);
126 line->tail += len; 128 line->tail += len;
127 } else { 129 }
130 else {
128 /* The circular buffer is wrapping */ 131 /* The circular buffer is wrapping */
129 memcpy(line->tail, buf, end); 132 memcpy(line->tail, buf, end);
130 buf += end; 133 buf += end;
@@ -170,7 +173,7 @@ static int flush_buffer(struct line *line)
170 } 173 }
171 174
172 count = line->tail - line->head; 175 count = line->tail - line->head;
173 n = write_chan(&line->chan_list, line->head, count, 176 n = write_chan(&line->chan_list, line->head, count,
174 line->driver->write_irq); 177 line->driver->write_irq);
175 178
176 if(n < 0) 179 if(n < 0)
@@ -227,7 +230,7 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
227 if (err <= 0 && (err != -EAGAIN || !ret)) 230 if (err <= 0 && (err != -EAGAIN || !ret))
228 ret = err; 231 ret = err;
229 } else { 232 } else {
230 n = write_chan(&line->chan_list, buf, len, 233 n = write_chan(&line->chan_list, buf, len,
231 line->driver->write_irq); 234 line->driver->write_irq);
232 if (n < 0) { 235 if (n < 0) {
233 ret = n; 236 ret = n;
@@ -338,11 +341,36 @@ int line_ioctl(struct tty_struct *tty, struct file * file,
338 return ret; 341 return ret;
339} 342}
340 343
344void line_throttle(struct tty_struct *tty)
345{
346 struct line *line = tty->driver_data;
347
348 deactivate_chan(&line->chan_list, line->driver->read_irq);
349 line->throttled = 1;
350}
351
352void line_unthrottle(struct tty_struct *tty)
353{
354 struct line *line = tty->driver_data;
355
356 line->throttled = 0;
357 chan_interrupt(&line->chan_list, &line->task, tty,
358 line->driver->read_irq);
359
360 /* Maybe there is enough stuff pending that calling the interrupt
361 * throttles us again. In this case, line->throttled will be 1
362 * again and we shouldn't turn the interrupt back on.
363 */
364 if(!line->throttled)
365 reactivate_chan(&line->chan_list, line->driver->read_irq);
366}
367
341static irqreturn_t line_write_interrupt(int irq, void *data, 368static irqreturn_t line_write_interrupt(int irq, void *data,
342 struct pt_regs *unused) 369 struct pt_regs *unused)
343{ 370{
344 struct tty_struct *tty = data; 371 struct chan *chan = data;
345 struct line *line = tty->driver_data; 372 struct line *line = chan->line;
373 struct tty_struct *tty = line->tty;
346 int err; 374 int err;
347 375
348 /* Interrupts are enabled here because we registered the interrupt with 376 /* Interrupts are enabled here because we registered the interrupt with
@@ -364,7 +392,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data,
364 if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && 392 if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
365 (tty->ldisc.write_wakeup != NULL)) 393 (tty->ldisc.write_wakeup != NULL))
366 (tty->ldisc.write_wakeup)(tty); 394 (tty->ldisc.write_wakeup)(tty);
367 395
368 /* BLOCKING mode 396 /* BLOCKING mode
369 * In blocking mode, everything sleeps on tty->write_wait. 397 * In blocking mode, everything sleeps on tty->write_wait.
370 * Sleeping in the console driver would break non-blocking 398 * Sleeping in the console driver would break non-blocking
@@ -376,53 +404,29 @@ static irqreturn_t line_write_interrupt(int irq, void *data,
376 return IRQ_HANDLED; 404 return IRQ_HANDLED;
377} 405}
378 406
379int line_setup_irq(int fd, int input, int output, struct tty_struct *tty) 407int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
380{ 408{
381 struct line *line = tty->driver_data;
382 struct line_driver *driver = line->driver; 409 struct line_driver *driver = line->driver;
383 int err = 0, flags = SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM; 410 int err = 0, flags = SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM;
384 411
385 if (input) 412 if (input)
386 err = um_request_irq(driver->read_irq, fd, IRQ_READ, 413 err = um_request_irq(driver->read_irq, fd, IRQ_READ,
387 line_interrupt, flags, 414 line_interrupt, flags,
388 driver->read_irq_name, tty); 415 driver->read_irq_name, data);
389 if (err) 416 if (err)
390 return err; 417 return err;
391 if (output) 418 if (output)
392 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, 419 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
393 line_write_interrupt, flags, 420 line_write_interrupt, flags,
394 driver->write_irq_name, tty); 421 driver->write_irq_name, data);
395 line->have_irq = 1; 422 line->have_irq = 1;
396 return err; 423 return err;
397} 424}
398 425
399void line_disable(struct tty_struct *tty, int current_irq) 426int line_open(struct line *lines, struct tty_struct *tty)
400{
401 struct line *line = tty->driver_data;
402
403 if(!line->have_irq)
404 return;
405
406 if(line->driver->read_irq == current_irq)
407 free_irq_later(line->driver->read_irq, tty);
408 else {
409 free_irq(line->driver->read_irq, tty);
410 }
411
412 if(line->driver->write_irq == current_irq)
413 free_irq_later(line->driver->write_irq, tty);
414 else {
415 free_irq(line->driver->write_irq, tty);
416 }
417
418 line->have_irq = 0;
419}
420
421int line_open(struct line *lines, struct tty_struct *tty,
422 struct chan_opts *opts)
423{ 427{
424 struct line *line; 428 struct line *line;
425 int err = 0; 429 int err = -ENODEV;
426 430
427 line = &lines[tty->index]; 431 line = &lines[tty->index];
428 tty->driver_data = line; 432 tty->driver_data = line;
@@ -430,31 +434,29 @@ int line_open(struct line *lines, struct tty_struct *tty,
430 /* The IRQ which takes this lock is not yet enabled and won't be run 434 /* The IRQ which takes this lock is not yet enabled and won't be run
431 * before the end, so we don't need to use spin_lock_irq.*/ 435 * before the end, so we don't need to use spin_lock_irq.*/
432 spin_lock(&line->lock); 436 spin_lock(&line->lock);
433 if (tty->count == 1) { 437
434 if (!line->valid) { 438 tty->driver_data = line;
435 err = -ENODEV; 439 line->tty = tty;
436 goto out; 440 if(!line->valid)
437 } 441 goto out;
438 if (list_empty(&line->chan_list)) { 442
439 err = parse_chan_pair(line->init_str, &line->chan_list, 443 if(tty->count == 1){
440 line->init_pri, tty->index, opts); 444 /* Here the device is opened, if necessary, and interrupt
441 if(err) goto out; 445 * is registered.
442 err = open_chan(&line->chan_list); 446 */
443 if(err) goto out; 447 enable_chan(line);
448 INIT_WORK(&line->task, line_timer_cb, line);
449
450 if(!line->sigio){
451 chan_enable_winch(&line->chan_list, tty);
452 line->sigio = 1;
444 } 453 }
445 /* Here the interrupt is registered.*/
446 enable_chan(&line->chan_list, tty);
447 INIT_WORK(&line->task, line_timer_cb, tty);
448 }
449 454
450 if(!line->sigio){ 455 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
451 chan_enable_winch(&line->chan_list, tty); 456 &tty->winsize.ws_col);
452 line->sigio = 1;
453 } 457 }
454 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
455 &tty->winsize.ws_col);
456 line->count++;
457 458
459 err = 0;
458out: 460out:
459 spin_unlock(&line->lock); 461 spin_unlock(&line->lock);
460 return err; 462 return err;
@@ -474,15 +476,14 @@ void line_close(struct tty_struct *tty, struct file * filp)
474 /* We ignore the error anyway! */ 476 /* We ignore the error anyway! */
475 flush_buffer(line); 477 flush_buffer(line);
476 478
477 line->count--; 479 if(tty->count == 1){
478 if (tty->count == 1) { 480 line->tty = NULL;
479 line_disable(tty, -1);
480 tty->driver_data = NULL; 481 tty->driver_data = NULL;
481 }
482 482
483 if((line->count == 0) && line->sigio){ 483 if(line->sigio){
484 unregister_winch(tty); 484 unregister_winch(tty);
485 line->sigio = 0; 485 line->sigio = 0;
486 }
486 } 487 }
487 488
488 spin_unlock_irq(&line->lock); 489 spin_unlock_irq(&line->lock);
@@ -493,17 +494,15 @@ void close_lines(struct line *lines, int nlines)
493 int i; 494 int i;
494 495
495 for(i = 0; i < nlines; i++) 496 for(i = 0; i < nlines; i++)
496 close_chan(&lines[i].chan_list); 497 close_chan(&lines[i].chan_list, 0);
497} 498}
498 499
499/* Common setup code for both startup command line and mconsole initialization. 500/* Common setup code for both startup command line and mconsole initialization.
500 * @lines contains the the array (of size @num) to modify; 501 * @lines contains the the array (of size @num) to modify;
501 * @init is the setup string; 502 * @init is the setup string;
502 * @all_allowed is a boolean saying if we can setup the whole @lines 503 */
503 * at once. For instance, it will be usually true for startup init. (where we
504 * can use con=xterm) and false for mconsole.*/
505 504
506int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed) 505int line_setup(struct line *lines, unsigned int num, char *init)
507{ 506{
508 int i, n; 507 int i, n;
509 char *end; 508 char *end;
@@ -512,10 +511,11 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
512 /* We said con=/ssl= instead of con#=, so we are configuring all 511 /* We said con=/ssl= instead of con#=, so we are configuring all
513 * consoles at once.*/ 512 * consoles at once.*/
514 n = -1; 513 n = -1;
515 } else { 514 }
515 else {
516 n = simple_strtoul(init, &end, 0); 516 n = simple_strtoul(init, &end, 0);
517 if(*end != '='){ 517 if(*end != '='){
518 printk(KERN_ERR "line_setup failed to parse \"%s\"\n", 518 printk(KERN_ERR "line_setup failed to parse \"%s\"\n",
519 init); 519 init);
520 return 0; 520 return 0;
521 } 521 }
@@ -527,8 +527,9 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
527 printk("line_setup - %d out of range ((0 ... %d) allowed)\n", 527 printk("line_setup - %d out of range ((0 ... %d) allowed)\n",
528 n, num - 1); 528 n, num - 1);
529 return 0; 529 return 0;
530 } else if (n >= 0){ 530 }
531 if (lines[n].count > 0) { 531 else if (n >= 0){
532 if (lines[n].tty != NULL) {
532 printk("line_setup - device %d is open\n", n); 533 printk("line_setup - device %d is open\n", n);
533 return 0; 534 return 0;
534 } 535 }
@@ -539,13 +540,10 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
539 else { 540 else {
540 lines[n].init_str = init; 541 lines[n].init_str = init;
541 lines[n].valid = 1; 542 lines[n].valid = 1;
542 } 543 }
543 } 544 }
544 } else if(!all_allowed){ 545 }
545 printk("line_setup - can't configure all devices from " 546 else {
546 "mconsole\n");
547 return 0;
548 } else {
549 for(i = 0; i < num; i++){ 547 for(i = 0; i < num; i++){
550 if(lines[i].init_pri <= INIT_ALL){ 548 if(lines[i].init_pri <= INIT_ALL){
551 lines[i].init_pri = INIT_ALL; 549 lines[i].init_pri = INIT_ALL;
@@ -557,18 +555,33 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
557 } 555 }
558 } 556 }
559 } 557 }
560 return 1; 558 return n == -1 ? num : n;
561} 559}
562 560
563int line_config(struct line *lines, unsigned int num, char *str) 561int line_config(struct line *lines, unsigned int num, char *str,
562 struct chan_opts *opts)
564{ 563{
565 char *new = uml_strdup(str); 564 struct line *line;
565 char *new;
566 int n;
566 567
568 if(*str == '='){
569 printk("line_config - can't configure all devices from "
570 "mconsole\n");
571 return 1;
572 }
573
574 new = kstrdup(str, GFP_KERNEL);
567 if(new == NULL){ 575 if(new == NULL){
568 printk("line_config - uml_strdup failed\n"); 576 printk("line_config - kstrdup failed\n");
569 return -ENOMEM; 577 return 1;
570 } 578 }
571 return !line_setup(lines, num, new, 0); 579 n = line_setup(lines, num, new);
580 if(n < 0)
581 return 1;
582
583 line = &lines[n];
584 return parse_chan_pair(line->init_str, line, n, opts);
572} 585}
573 586
574int line_get_config(char *name, struct line *lines, unsigned int num, char *str, 587int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
@@ -594,7 +607,7 @@ int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
594 spin_lock(&line->lock); 607 spin_lock(&line->lock);
595 if(!line->valid) 608 if(!line->valid)
596 CONFIG_CHUNK(str, size, n, "none", 1); 609 CONFIG_CHUNK(str, size, n, "none", 1);
597 else if(line->count == 0) 610 else if(line->tty == NULL)
598 CONFIG_CHUNK(str, size, n, line->init_str, 1); 611 CONFIG_CHUNK(str, size, n, line->init_str, 1);
599 else n = chan_config_string(&line->chan_list, str, size, error_out); 612 else n = chan_config_string(&line->chan_list, str, size, error_out);
600 spin_unlock(&line->lock); 613 spin_unlock(&line->lock);
@@ -619,14 +632,18 @@ int line_id(char **str, int *start_out, int *end_out)
619 632
620int line_remove(struct line *lines, unsigned int num, int n) 633int line_remove(struct line *lines, unsigned int num, int n)
621{ 634{
635 int err;
622 char config[sizeof("conxxxx=none\0")]; 636 char config[sizeof("conxxxx=none\0")];
623 637
624 sprintf(config, "%d=none", n); 638 sprintf(config, "%d=none", n);
625 return !line_setup(lines, num, config, 0); 639 err = line_setup(lines, num, config);
640 if(err >= 0)
641 err = 0;
642 return err;
626} 643}
627 644
628struct tty_driver *line_register_devfs(struct lines *set, 645struct tty_driver *line_register_devfs(struct lines *set,
629 struct line_driver *line_driver, 646 struct line_driver *line_driver,
630 struct tty_operations *ops, struct line *lines, 647 struct tty_operations *ops, struct line *lines,
631 int nlines) 648 int nlines)
632{ 649{
@@ -655,7 +672,7 @@ struct tty_driver *line_register_devfs(struct lines *set,
655 } 672 }
656 673
657 for(i = 0; i < nlines; i++){ 674 for(i = 0; i < nlines; i++){
658 if(!lines[i].valid) 675 if(!lines[i].valid)
659 tty_unregister_device(driver, i); 676 tty_unregister_device(driver, i);
660 } 677 }
661 678
@@ -663,24 +680,28 @@ struct tty_driver *line_register_devfs(struct lines *set,
663 return driver; 680 return driver;
664} 681}
665 682
666static spinlock_t winch_handler_lock; 683static DEFINE_SPINLOCK(winch_handler_lock);
667LIST_HEAD(winch_handlers); 684static LIST_HEAD(winch_handlers);
668 685
669void lines_init(struct line *lines, int nlines) 686void lines_init(struct line *lines, int nlines, struct chan_opts *opts)
670{ 687{
671 struct line *line; 688 struct line *line;
672 int i; 689 int i;
673 690
674 spin_lock_init(&winch_handler_lock);
675 for(i = 0; i < nlines; i++){ 691 for(i = 0; i < nlines; i++){
676 line = &lines[i]; 692 line = &lines[i];
677 INIT_LIST_HEAD(&line->chan_list); 693 INIT_LIST_HEAD(&line->chan_list);
678 spin_lock_init(&line->lock); 694
679 if(line->init_str != NULL){ 695 if(line->init_str == NULL)
680 line->init_str = uml_strdup(line->init_str); 696 continue;
681 if(line->init_str == NULL) 697
682 printk("lines_init - uml_strdup returned " 698 line->init_str = kstrdup(line->init_str, GFP_KERNEL);
683 "NULL\n"); 699 if(line->init_str == NULL)
700 printk("lines_init - kstrdup returned NULL\n");
701
702 if(parse_chan_pair(line->init_str, line, i, opts)){
703 printk("parse_chan_pair failed for device %d\n", i);
704 line->valid = 0;
684 } 705 }
685 } 706 }
686} 707}
@@ -717,8 +738,7 @@ irqreturn_t winch_interrupt(int irq, void *data, struct pt_regs *unused)
717 tty = winch->tty; 738 tty = winch->tty;
718 if (tty != NULL) { 739 if (tty != NULL) {
719 line = tty->driver_data; 740 line = tty->driver_data;
720 chan_window_size(&line->chan_list, 741 chan_window_size(&line->chan_list, &tty->winsize.ws_row,
721 &tty->winsize.ws_row,
722 &tty->winsize.ws_col); 742 &tty->winsize.ws_col);
723 kill_pg(tty->pgrp, SIGWINCH, 1); 743 kill_pg(tty->pgrp, SIGWINCH, 1);
724 } 744 }
@@ -749,60 +769,54 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty)
749 spin_unlock(&winch_handler_lock); 769 spin_unlock(&winch_handler_lock);
750 770
751 if(um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 771 if(um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
752 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, 772 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
753 "winch", winch) < 0) 773 "winch", winch) < 0)
754 printk("register_winch_irq - failed to register IRQ\n"); 774 printk("register_winch_irq - failed to register IRQ\n");
755} 775}
756 776
777static void free_winch(struct winch *winch)
778{
779 list_del(&winch->list);
780
781 if(winch->pid != -1)
782 os_kill_process(winch->pid, 1);
783 if(winch->fd != -1)
784 os_close_file(winch->fd);
785
786 free_irq(WINCH_IRQ, winch);
787 kfree(winch);
788}
789
757static void unregister_winch(struct tty_struct *tty) 790static void unregister_winch(struct tty_struct *tty)
758{ 791{
759 struct list_head *ele; 792 struct list_head *ele;
760 struct winch *winch, *found = NULL; 793 struct winch *winch;
761 794
762 spin_lock(&winch_handler_lock); 795 spin_lock(&winch_handler_lock);
796
763 list_for_each(ele, &winch_handlers){ 797 list_for_each(ele, &winch_handlers){
764 winch = list_entry(ele, struct winch, list); 798 winch = list_entry(ele, struct winch, list);
765 if(winch->tty == tty){ 799 if(winch->tty == tty){
766 found = winch; 800 free_winch(winch);
767 break; 801 break;
768 } 802 }
769 } 803 }
770 if(found == NULL)
771 goto err;
772
773 list_del(&winch->list);
774 spin_unlock(&winch_handler_lock);
775
776 if(winch->pid != -1)
777 os_kill_process(winch->pid, 1);
778
779 free_irq(WINCH_IRQ, winch);
780 kfree(winch);
781
782 return;
783err:
784 spin_unlock(&winch_handler_lock); 804 spin_unlock(&winch_handler_lock);
785} 805}
786 806
787/* XXX: No lock as it's an exitcall... is this valid? Depending on cleanup
788 * order... are we sure that nothing else is done on the list? */
789static void winch_cleanup(void) 807static void winch_cleanup(void)
790{ 808{
791 struct list_head *ele; 809 struct list_head *ele, *next;
792 struct winch *winch; 810 struct winch *winch;
793 811
794 list_for_each(ele, &winch_handlers){ 812 spin_lock(&winch_handler_lock);
813
814 list_for_each_safe(ele, next, &winch_handlers){
795 winch = list_entry(ele, struct winch, list); 815 winch = list_entry(ele, struct winch, list);
796 if(winch->fd != -1){ 816 free_winch(winch);
797 /* Why is this different from the above free_irq(),
798 * which deactivates SIGIO? This searches the FD
799 * somewhere else and removes it from the list... */
800 deactivate_fd(winch->fd, WINCH_IRQ);
801 os_close_file(winch->fd);
802 }
803 if(winch->pid != -1)
804 os_kill_process(winch->pid, 1);
805 } 817 }
818
819 spin_unlock(&winch_handler_lock);
806} 820}
807__uml_exitcall(winch_cleanup); 821__uml_exitcall(winch_cleanup);
808 822
@@ -811,10 +825,10 @@ char *add_xterm_umid(char *base)
811 char *umid, *title; 825 char *umid, *title;
812 int len; 826 int len;
813 827
814 umid = get_umid(1); 828 umid = get_umid();
815 if(umid == NULL) 829 if(*umid == '\0')
816 return base; 830 return base;
817 831
818 len = strlen(base) + strlen(" ()") + strlen(umid) + 1; 832 len = strlen(base) + strlen(" ()") + strlen(umid) + 1;
819 title = kmalloc(len, GFP_KERNEL); 833 title = kmalloc(len, GFP_KERNEL);
820 if(title == NULL){ 834 if(title == NULL){
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 12c95368124a..be610125429f 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -20,6 +20,7 @@
20#include "linux/namei.h" 20#include "linux/namei.h"
21#include "linux/proc_fs.h" 21#include "linux/proc_fs.h"
22#include "linux/syscalls.h" 22#include "linux/syscalls.h"
23#include "linux/console.h"
23#include "asm/irq.h" 24#include "asm/irq.h"
24#include "asm/uaccess.h" 25#include "asm/uaccess.h"
25#include "user_util.h" 26#include "user_util.h"
@@ -34,7 +35,7 @@
34#include "irq_kern.h" 35#include "irq_kern.h"
35#include "choose-mode.h" 36#include "choose-mode.h"
36 37
37static int do_unlink_socket(struct notifier_block *notifier, 38static int do_unlink_socket(struct notifier_block *notifier,
38 unsigned long what, void *data) 39 unsigned long what, void *data)
39{ 40{
40 return(mconsole_unlink_socket()); 41 return(mconsole_unlink_socket());
@@ -46,12 +47,12 @@ static struct notifier_block reboot_notifier = {
46 .priority = 0, 47 .priority = 0,
47}; 48};
48 49
49/* Safe without explicit locking for now. Tasklets provide their own 50/* Safe without explicit locking for now. Tasklets provide their own
50 * locking, and the interrupt handler is safe because it can't interrupt 51 * locking, and the interrupt handler is safe because it can't interrupt
51 * itself and it can only happen on CPU 0. 52 * itself and it can only happen on CPU 0.
52 */ 53 */
53 54
54LIST_HEAD(mc_requests); 55static LIST_HEAD(mc_requests);
55 56
56static void mc_work_proc(void *unused) 57static void mc_work_proc(void *unused)
57{ 58{
@@ -60,7 +61,7 @@ static void mc_work_proc(void *unused)
60 61
61 while(!list_empty(&mc_requests)){ 62 while(!list_empty(&mc_requests)){
62 local_save_flags(flags); 63 local_save_flags(flags);
63 req = list_entry(mc_requests.next, struct mconsole_entry, 64 req = list_entry(mc_requests.next, struct mconsole_entry,
64 list); 65 list);
65 list_del(&req->list); 66 list_del(&req->list);
66 local_irq_restore(flags); 67 local_irq_restore(flags);
@@ -69,7 +70,7 @@ static void mc_work_proc(void *unused)
69 } 70 }
70} 71}
71 72
72DECLARE_WORK(mconsole_work, mc_work_proc, NULL); 73static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
73 74
74static irqreturn_t mconsole_interrupt(int irq, void *dev_id, 75static irqreturn_t mconsole_interrupt(int irq, void *dev_id,
75 struct pt_regs *regs) 76 struct pt_regs *regs)
@@ -103,8 +104,8 @@ void mconsole_version(struct mc_request *req)
103{ 104{
104 char version[256]; 105 char version[256];
105 106
106 sprintf(version, "%s %s %s %s %s", system_utsname.sysname, 107 sprintf(version, "%s %s %s %s %s", system_utsname.sysname,
107 system_utsname.nodename, system_utsname.release, 108 system_utsname.nodename, system_utsname.release,
108 system_utsname.version, system_utsname.machine); 109 system_utsname.version, system_utsname.machine);
109 mconsole_reply(req, version, 0, 0); 110 mconsole_reply(req, version, 0, 0);
110} 111}
@@ -348,7 +349,7 @@ static struct mc_device *mconsole_find_dev(char *name)
348 349
349#define CONFIG_BUF_SIZE 64 350#define CONFIG_BUF_SIZE 64
350 351
351static void mconsole_get_config(int (*get_config)(char *, char *, int, 352static void mconsole_get_config(int (*get_config)(char *, char *, int,
352 char **), 353 char **),
353 struct mc_request *req, char *name) 354 struct mc_request *req, char *name)
354{ 355{
@@ -389,7 +390,6 @@ static void mconsole_get_config(int (*get_config)(char *, char *, int,
389 out: 390 out:
390 if(buf != default_buf) 391 if(buf != default_buf)
391 kfree(buf); 392 kfree(buf);
392
393} 393}
394 394
395void mconsole_config(struct mc_request *req) 395void mconsole_config(struct mc_request *req)
@@ -420,9 +420,9 @@ void mconsole_config(struct mc_request *req)
420 420
421void mconsole_remove(struct mc_request *req) 421void mconsole_remove(struct mc_request *req)
422{ 422{
423 struct mc_device *dev; 423 struct mc_device *dev;
424 char *ptr = req->request.data, *err_msg = ""; 424 char *ptr = req->request.data, *err_msg = "";
425 char error[256]; 425 char error[256];
426 int err, start, end, n; 426 int err, start, end, n;
427 427
428 ptr += strlen("remove"); 428 ptr += strlen("remove");
@@ -433,37 +433,112 @@ void mconsole_remove(struct mc_request *req)
433 return; 433 return;
434 } 434 }
435 435
436 ptr = &ptr[strlen(dev->name)]; 436 ptr = &ptr[strlen(dev->name)];
437 437
438 err = 1; 438 err = 1;
439 n = (*dev->id)(&ptr, &start, &end); 439 n = (*dev->id)(&ptr, &start, &end);
440 if(n < 0){ 440 if(n < 0){
441 err_msg = "Couldn't parse device number"; 441 err_msg = "Couldn't parse device number";
442 goto out; 442 goto out;
443 } 443 }
444 else if((n < start) || (n > end)){ 444 else if((n < start) || (n > end)){
445 sprintf(error, "Invalid device number - must be between " 445 sprintf(error, "Invalid device number - must be between "
446 "%d and %d", start, end); 446 "%d and %d", start, end);
447 err_msg = error; 447 err_msg = error;
448 goto out; 448 goto out;
449 } 449 }
450 450
451 err = (*dev->remove)(n); 451 err = (*dev->remove)(n);
452 switch(err){ 452 switch(err){
453 case -ENODEV: 453 case -ENODEV:
454 err_msg = "Device doesn't exist"; 454 err_msg = "Device doesn't exist";
455 break; 455 break;
456 case -EBUSY: 456 case -EBUSY:
457 err_msg = "Device is currently open"; 457 err_msg = "Device is currently open";
458 break; 458 break;
459 default: 459 default:
460 break; 460 break;
461 } 461 }
462 out: 462out:
463 mconsole_reply(req, err_msg, err, 0); 463 mconsole_reply(req, err_msg, err, 0);
464} 464}
465 465
466static DEFINE_SPINLOCK(console_lock);
467static LIST_HEAD(clients);
468static char console_buf[MCONSOLE_MAX_DATA];
469static int console_index = 0;
470
471static void console_write(struct console *console, const char *string,
472 unsigned len)
473{
474 struct list_head *ele;
475 int n;
476
477 if(list_empty(&clients))
478 return;
479
480 while(1){
481 n = min(len, ARRAY_SIZE(console_buf) - console_index);
482 strncpy(&console_buf[console_index], string, n);
483 console_index += n;
484 string += n;
485 len -= n;
486 if(len == 0)
487 return;
488
489 list_for_each(ele, &clients){
490 struct mconsole_entry *entry;
491
492 entry = list_entry(ele, struct mconsole_entry, list);
493 mconsole_reply_len(&entry->request, console_buf,
494 console_index, 0, 1);
495 }
496
497 console_index = 0;
498 }
499}
500
501static struct console mc_console = { .name = "mc",
502 .write = console_write,
503 .flags = CON_PRINTBUFFER | CON_ENABLED,
504 .index = -1 };
505
506static int mc_add_console(void)
507{
508 register_console(&mc_console);
509 return 0;
510}
511
512late_initcall(mc_add_console);
513
514static void with_console(struct mc_request *req, void (*proc)(void *),
515 void *arg)
516{
517 struct mconsole_entry entry;
518 unsigned long flags;
519
520 INIT_LIST_HEAD(&entry.list);
521 entry.request = *req;
522 list_add(&entry.list, &clients);
523 spin_lock_irqsave(&console_lock, flags);
524
525 (*proc)(arg);
526
527 mconsole_reply_len(req, console_buf, console_index, 0, 0);
528 console_index = 0;
529
530 spin_unlock_irqrestore(&console_lock, flags);
531 list_del(&entry.list);
532}
533
466#ifdef CONFIG_MAGIC_SYSRQ 534#ifdef CONFIG_MAGIC_SYSRQ
535static void sysrq_proc(void *arg)
536{
537 char *op = arg;
538
539 handle_sysrq(*op, &current->thread.regs, NULL);
540}
541
467void mconsole_sysrq(struct mc_request *req) 542void mconsole_sysrq(struct mc_request *req)
468{ 543{
469 char *ptr = req->request.data; 544 char *ptr = req->request.data;
@@ -471,8 +546,13 @@ void mconsole_sysrq(struct mc_request *req)
471 ptr += strlen("sysrq"); 546 ptr += strlen("sysrq");
472 while(isspace(*ptr)) ptr++; 547 while(isspace(*ptr)) ptr++;
473 548
474 mconsole_reply(req, "", 0, 0); 549 /* With 'b', the system will shut down without a chance to reply,
475 handle_sysrq(*ptr, &current->thread.regs, NULL); 550 * so in this case, we reply first.
551 */
552 if(*ptr == 'b')
553 mconsole_reply(req, "", 0, 0);
554
555 with_console(req, sysrq_proc, ptr);
476} 556}
477#else 557#else
478void mconsole_sysrq(struct mc_request *req) 558void mconsole_sysrq(struct mc_request *req)
@@ -481,6 +561,14 @@ void mconsole_sysrq(struct mc_request *req)
481} 561}
482#endif 562#endif
483 563
564static void stack_proc(void *arg)
565{
566 struct task_struct *from = current, *to = arg;
567
568 to->thread.saved_task = from;
569 switch_to(from, to, from);
570}
571
484/* Mconsole stack trace 572/* Mconsole stack trace
485 * Added by Allan Graves, Jeff Dike 573 * Added by Allan Graves, Jeff Dike
486 * Dumps a stacks registers to the linux console. 574 * Dumps a stacks registers to the linux console.
@@ -488,37 +576,34 @@ void mconsole_sysrq(struct mc_request *req)
488 */ 576 */
489void do_stack(struct mc_request *req) 577void do_stack(struct mc_request *req)
490{ 578{
491 char *ptr = req->request.data; 579 char *ptr = req->request.data;
492 int pid_requested= -1; 580 int pid_requested= -1;
493 struct task_struct *from = NULL; 581 struct task_struct *from = NULL;
494 struct task_struct *to = NULL; 582 struct task_struct *to = NULL;
495 583
496 /* Would be nice: 584 /* Would be nice:
497 * 1) Send showregs output to mconsole. 585 * 1) Send showregs output to mconsole.
498 * 2) Add a way to stack dump all pids. 586 * 2) Add a way to stack dump all pids.
499 */ 587 */
500 588
501 ptr += strlen("stack"); 589 ptr += strlen("stack");
502 while(isspace(*ptr)) ptr++; 590 while(isspace(*ptr)) ptr++;
503
504 /* Should really check for multiple pids or reject bad args here */
505 /* What do the arguments in mconsole_reply mean? */
506 if(sscanf(ptr, "%d", &pid_requested) == 0){
507 mconsole_reply(req, "Please specify a pid", 1, 0);
508 return;
509 }
510 591
511 from = current; 592 /* Should really check for multiple pids or reject bad args here */
512 to = find_task_by_pid(pid_requested); 593 /* What do the arguments in mconsole_reply mean? */
594 if(sscanf(ptr, "%d", &pid_requested) == 0){
595 mconsole_reply(req, "Please specify a pid", 1, 0);
596 return;
597 }
513 598
514 if((to == NULL) || (pid_requested == 0)) { 599 from = current;
515 mconsole_reply(req, "Couldn't find that pid", 1, 0);
516 return;
517 }
518 to->thread.saved_task = current;
519 600
520 switch_to(from, to, from); 601 to = find_task_by_pid(pid_requested);
521 mconsole_reply(req, "Stack Dumped to console and message log", 0, 0); 602 if((to == NULL) || (pid_requested == 0)) {
603 mconsole_reply(req, "Couldn't find that pid", 1, 0);
604 return;
605 }
606 with_console(req, stack_proc, to);
522} 607}
523 608
524void mconsole_stack(struct mc_request *req) 609void mconsole_stack(struct mc_request *req)
@@ -534,9 +619,9 @@ void mconsole_stack(struct mc_request *req)
534/* Changed by mconsole_setup, which is __setup, and called before SMP is 619/* Changed by mconsole_setup, which is __setup, and called before SMP is
535 * active. 620 * active.
536 */ 621 */
537static char *notify_socket = NULL; 622static char *notify_socket = NULL;
538 623
539int mconsole_init(void) 624static int mconsole_init(void)
540{ 625{
541 /* long to avoid size mismatch warnings from gcc */ 626 /* long to avoid size mismatch warnings from gcc */
542 long sock; 627 long sock;
@@ -563,16 +648,16 @@ int mconsole_init(void)
563 } 648 }
564 649
565 if(notify_socket != NULL){ 650 if(notify_socket != NULL){
566 notify_socket = uml_strdup(notify_socket); 651 notify_socket = kstrdup(notify_socket, GFP_KERNEL);
567 if(notify_socket != NULL) 652 if(notify_socket != NULL)
568 mconsole_notify(notify_socket, MCONSOLE_SOCKET, 653 mconsole_notify(notify_socket, MCONSOLE_SOCKET,
569 mconsole_socket_name, 654 mconsole_socket_name,
570 strlen(mconsole_socket_name) + 1); 655 strlen(mconsole_socket_name) + 1);
571 else printk(KERN_ERR "mconsole_setup failed to strdup " 656 else printk(KERN_ERR "mconsole_setup failed to strdup "
572 "string\n"); 657 "string\n");
573 } 658 }
574 659
575 printk("mconsole (version %d) initialized on %s\n", 660 printk("mconsole (version %d) initialized on %s\n",
576 MCONSOLE_VERSION, mconsole_socket_name); 661 MCONSOLE_VERSION, mconsole_socket_name);
577 return(0); 662 return(0);
578} 663}
@@ -585,7 +670,7 @@ static int write_proc_mconsole(struct file *file, const char __user *buffer,
585 char *buf; 670 char *buf;
586 671
587 buf = kmalloc(count + 1, GFP_KERNEL); 672 buf = kmalloc(count + 1, GFP_KERNEL);
588 if(buf == NULL) 673 if(buf == NULL)
589 return(-ENOMEM); 674 return(-ENOMEM);
590 675
591 if(copy_from_user(buf, buffer, count)){ 676 if(copy_from_user(buf, buffer, count)){
@@ -661,7 +746,7 @@ static int notify_panic(struct notifier_block *self, unsigned long unused1,
661 746
662 if(notify_socket == NULL) return(0); 747 if(notify_socket == NULL) return(0);
663 748
664 mconsole_notify(notify_socket, MCONSOLE_PANIC, message, 749 mconsole_notify(notify_socket, MCONSOLE_PANIC, message,
665 strlen(message) + 1); 750 strlen(message) + 1);
666 return(0); 751 return(0);
667} 752}
@@ -686,14 +771,3 @@ char *mconsole_notify_socket(void)
686} 771}
687 772
688EXPORT_SYMBOL(mconsole_notify_socket); 773EXPORT_SYMBOL(mconsole_notify_socket);
689
690/*
691 * Overrides for Emacs so that we follow Linus's tabbing style.
692 * Emacs will notice this stuff at the end of the file and automatically
693 * adjust the settings for this buffer only. This must remain at the end
694 * of the file.
695 * ---------------------------------------------------------------------------
696 * Local variables:
697 * c-file-style: "linux"
698 * End:
699 */
diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c
index 310c1f823f26..4b109fe7fff8 100644
--- a/arch/um/drivers/mconsole_user.c
+++ b/arch/um/drivers/mconsole_user.c
@@ -122,12 +122,12 @@ int mconsole_get_request(int fd, struct mc_request *req)
122 return(1); 122 return(1);
123} 123}
124 124
125int mconsole_reply(struct mc_request *req, char *str, int err, int more) 125int mconsole_reply_len(struct mc_request *req, const char *str, int total,
126 int err, int more)
126{ 127{
127 struct mconsole_reply reply; 128 struct mconsole_reply reply;
128 int total, len, n; 129 int len, n;
129 130
130 total = strlen(str);
131 do { 131 do {
132 reply.err = err; 132 reply.err = err;
133 133
@@ -155,6 +155,12 @@ int mconsole_reply(struct mc_request *req, char *str, int err, int more)
155 return(0); 155 return(0);
156} 156}
157 157
158int mconsole_reply(struct mc_request *req, const char *str, int err, int more)
159{
160 return mconsole_reply_len(req, str, strlen(str), err, more);
161}
162
163
158int mconsole_unlink_socket(void) 164int mconsole_unlink_socket(void)
159{ 165{
160 unlink(mconsole_socket_name); 166 unlink(mconsole_socket_name);
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 84c73a300acb..fb1f9fb9b871 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -34,7 +34,7 @@
34#define DRIVER_NAME "uml-netdev" 34#define DRIVER_NAME "uml-netdev"
35 35
36static DEFINE_SPINLOCK(opened_lock); 36static DEFINE_SPINLOCK(opened_lock);
37LIST_HEAD(opened); 37static LIST_HEAD(opened);
38 38
39static int uml_net_rx(struct net_device *dev) 39static int uml_net_rx(struct net_device *dev)
40{ 40{
@@ -150,6 +150,7 @@ static int uml_net_close(struct net_device *dev)
150 if(lp->close != NULL) 150 if(lp->close != NULL)
151 (*lp->close)(lp->fd, &lp->user); 151 (*lp->close)(lp->fd, &lp->user);
152 lp->fd = -1; 152 lp->fd = -1;
153 list_del(&lp->list);
153 154
154 spin_unlock(&lp->lock); 155 spin_unlock(&lp->lock);
155 return 0; 156 return 0;
@@ -266,7 +267,7 @@ void uml_net_user_timer_expire(unsigned long _conn)
266} 267}
267 268
268static DEFINE_SPINLOCK(devices_lock); 269static DEFINE_SPINLOCK(devices_lock);
269static struct list_head devices = LIST_HEAD_INIT(devices); 270static LIST_HEAD(devices);
270 271
271static struct platform_driver uml_net_driver = { 272static struct platform_driver uml_net_driver = {
272 .driver = { 273 .driver = {
@@ -586,7 +587,7 @@ static int net_config(char *str)
586 err = eth_parse(str, &n, &str); 587 err = eth_parse(str, &n, &str);
587 if(err) return(err); 588 if(err) return(err);
588 589
589 str = uml_strdup(str); 590 str = kstrdup(str, GFP_KERNEL);
590 if(str == NULL){ 591 if(str == NULL){
591 printk(KERN_ERR "net_config failed to strdup string\n"); 592 printk(KERN_ERR "net_config failed to strdup string\n");
592 return(-1); 593 return(-1);
@@ -715,6 +716,7 @@ static void close_devices(void)
715 716
716 list_for_each(ele, &opened){ 717 list_for_each(ele, &opened){
717 lp = list_entry(ele, struct uml_net_private, list); 718 lp = list_entry(ele, struct uml_net_private, list);
719 free_irq(lp->dev->irq, lp->dev);
718 if((lp->close != NULL) && (lp->fd >= 0)) 720 if((lp->close != NULL) && (lp->fd >= 0))
719 (*lp->close)(lp->fd, &lp->user); 721 (*lp->close)(lp->fd, &lp->user);
720 if(lp->remove != NULL) (*lp->remove)(&lp->user); 722 if(lp->remove != NULL) (*lp->remove)(&lp->user);
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 62e04ecfada8..a32ef55cb244 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -69,7 +69,7 @@ static struct line_driver driver = {
69 .name = "ssl", 69 .name = "ssl",
70 .config = ssl_config, 70 .config = ssl_config,
71 .get_config = ssl_get_config, 71 .get_config = ssl_get_config,
72 .id = line_id, 72 .id = line_id,
73 .remove = ssl_remove, 73 .remove = ssl_remove,
74 }, 74 },
75}; 75};
@@ -84,26 +84,23 @@ static struct lines lines = LINES_INIT(NR_PORTS);
84 84
85static int ssl_config(char *str) 85static int ssl_config(char *str)
86{ 86{
87 return(line_config(serial_lines, 87 return line_config(serial_lines, ARRAY_SIZE(serial_lines), str, &opts);
88 sizeof(serial_lines)/sizeof(serial_lines[0]), str));
89} 88}
90 89
91static int ssl_get_config(char *dev, char *str, int size, char **error_out) 90static int ssl_get_config(char *dev, char *str, int size, char **error_out)
92{ 91{
93 return(line_get_config(dev, serial_lines, 92 return line_get_config(dev, serial_lines, ARRAY_SIZE(serial_lines), str,
94 sizeof(serial_lines)/sizeof(serial_lines[0]), 93 size, error_out);
95 str, size, error_out));
96} 94}
97 95
98static int ssl_remove(int n) 96static int ssl_remove(int n)
99{ 97{
100 return line_remove(serial_lines, 98 return line_remove(serial_lines, ARRAY_SIZE(serial_lines), n);
101 sizeof(serial_lines)/sizeof(serial_lines[0]), n);
102} 99}
103 100
104int ssl_open(struct tty_struct *tty, struct file *filp) 101int ssl_open(struct tty_struct *tty, struct file *filp)
105{ 102{
106 return line_open(serial_lines, tty, &opts); 103 return line_open(serial_lines, tty);
107} 104}
108 105
109#if 0 106#if 0
@@ -112,16 +109,6 @@ static void ssl_flush_buffer(struct tty_struct *tty)
112 return; 109 return;
113} 110}
114 111
115static void ssl_throttle(struct tty_struct * tty)
116{
117 printk(KERN_ERR "Someone should implement ssl_throttle\n");
118}
119
120static void ssl_unthrottle(struct tty_struct * tty)
121{
122 printk(KERN_ERR "Someone should implement ssl_unthrottle\n");
123}
124
125static void ssl_stop(struct tty_struct *tty) 112static void ssl_stop(struct tty_struct *tty)
126{ 113{
127 printk(KERN_ERR "Someone should implement ssl_stop\n"); 114 printk(KERN_ERR "Someone should implement ssl_stop\n");
@@ -148,9 +135,9 @@ static struct tty_operations ssl_ops = {
148 .flush_chars = line_flush_chars, 135 .flush_chars = line_flush_chars,
149 .set_termios = line_set_termios, 136 .set_termios = line_set_termios,
150 .ioctl = line_ioctl, 137 .ioctl = line_ioctl,
138 .throttle = line_throttle,
139 .unthrottle = line_unthrottle,
151#if 0 140#if 0
152 .throttle = ssl_throttle,
153 .unthrottle = ssl_unthrottle,
154 .stop = ssl_stop, 141 .stop = ssl_stop,
155 .start = ssl_start, 142 .start = ssl_start,
156 .hangup = ssl_hangup, 143 .hangup = ssl_hangup,
@@ -183,7 +170,7 @@ static int ssl_console_setup(struct console *co, char *options)
183{ 170{
184 struct line *line = &serial_lines[co->index]; 171 struct line *line = &serial_lines[co->index];
185 172
186 return console_open_chan(line,co,&opts); 173 return console_open_chan(line, co, &opts);
187} 174}
188 175
189static struct console ssl_cons = { 176static struct console ssl_cons = {
@@ -199,12 +186,13 @@ int ssl_init(void)
199{ 186{
200 char *new_title; 187 char *new_title;
201 188
202 printk(KERN_INFO "Initializing software serial port version %d\n", 189 printk(KERN_INFO "Initializing software serial port version %d\n",
203 ssl_version); 190 ssl_version);
204 ssl_driver = line_register_devfs(&lines, &driver, &ssl_ops, 191 ssl_driver = line_register_devfs(&lines, &driver, &ssl_ops,
205 serial_lines, ARRAY_SIZE(serial_lines)); 192 serial_lines,
193 ARRAY_SIZE(serial_lines));
206 194
207 lines_init(serial_lines, sizeof(serial_lines)/sizeof(serial_lines[0])); 195 lines_init(serial_lines, ARRAY_SIZE(serial_lines), &opts);
208 196
209 new_title = add_xterm_umid(opts.xterm_title); 197 new_title = add_xterm_umid(opts.xterm_title);
210 if (new_title != NULL) 198 if (new_title != NULL)
@@ -212,7 +200,7 @@ int ssl_init(void)
212 200
213 ssl_init_done = 1; 201 ssl_init_done = 1;
214 register_console(&ssl_cons); 202 register_console(&ssl_cons);
215 return(0); 203 return 0;
216} 204}
217late_initcall(ssl_init); 205late_initcall(ssl_init);
218 206
@@ -220,16 +208,13 @@ static void ssl_exit(void)
220{ 208{
221 if (!ssl_init_done) 209 if (!ssl_init_done)
222 return; 210 return;
223 close_lines(serial_lines, 211 close_lines(serial_lines, ARRAY_SIZE(serial_lines));
224 sizeof(serial_lines)/sizeof(serial_lines[0]));
225} 212}
226__uml_exitcall(ssl_exit); 213__uml_exitcall(ssl_exit);
227 214
228static int ssl_chan_setup(char *str) 215static int ssl_chan_setup(char *str)
229{ 216{
230 return(line_setup(serial_lines, 217 return line_setup(serial_lines, ARRAY_SIZE(serial_lines), str);
231 sizeof(serial_lines)/sizeof(serial_lines[0]),
232 str, 1));
233} 218}
234 219
235__setup("ssl", ssl_chan_setup); 220__setup("ssl", ssl_chan_setup);
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 005aa6333b6e..61db8b2fc83f 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -75,7 +75,7 @@ static struct line_driver driver = {
75 .name = "con", 75 .name = "con",
76 .config = con_config, 76 .config = con_config,
77 .get_config = con_get_config, 77 .get_config = con_get_config,
78 .id = line_id, 78 .id = line_id,
79 .remove = con_remove, 79 .remove = con_remove,
80 }, 80 },
81}; 81};
@@ -86,28 +86,27 @@ static struct lines console_lines = LINES_INIT(MAX_TTYS);
86 * individual elements are protected by individual semaphores. 86 * individual elements are protected by individual semaphores.
87 */ 87 */
88struct line vts[MAX_TTYS] = { LINE_INIT(CONFIG_CON_ZERO_CHAN, &driver), 88struct line vts[MAX_TTYS] = { LINE_INIT(CONFIG_CON_ZERO_CHAN, &driver),
89 [ 1 ... MAX_TTYS - 1 ] = 89 [ 1 ... MAX_TTYS - 1 ] =
90 LINE_INIT(CONFIG_CON_CHAN, &driver) }; 90 LINE_INIT(CONFIG_CON_CHAN, &driver) };
91 91
92static int con_config(char *str) 92static int con_config(char *str)
93{ 93{
94 return(line_config(vts, sizeof(vts)/sizeof(vts[0]), str)); 94 return line_config(vts, ARRAY_SIZE(vts), str, &opts);
95} 95}
96 96
97static int con_get_config(char *dev, char *str, int size, char **error_out) 97static int con_get_config(char *dev, char *str, int size, char **error_out)
98{ 98{
99 return(line_get_config(dev, vts, sizeof(vts)/sizeof(vts[0]), str, 99 return line_get_config(dev, vts, ARRAY_SIZE(vts), str, size, error_out);
100 size, error_out));
101} 100}
102 101
103static int con_remove(int n) 102static int con_remove(int n)
104{ 103{
105 return line_remove(vts, sizeof(vts)/sizeof(vts[0]), n); 104 return line_remove(vts, ARRAY_SIZE(vts), n);
106} 105}
107 106
108static int con_open(struct tty_struct *tty, struct file *filp) 107static int con_open(struct tty_struct *tty, struct file *filp)
109{ 108{
110 return line_open(vts, tty, &opts); 109 return line_open(vts, tty);
111} 110}
112 111
113static int con_init_done = 0; 112static int con_init_done = 0;
@@ -117,16 +116,18 @@ static struct tty_operations console_ops = {
117 .close = line_close, 116 .close = line_close,
118 .write = line_write, 117 .write = line_write,
119 .put_char = line_put_char, 118 .put_char = line_put_char,
120 .write_room = line_write_room, 119 .write_room = line_write_room,
121 .chars_in_buffer = line_chars_in_buffer, 120 .chars_in_buffer = line_chars_in_buffer,
122 .flush_buffer = line_flush_buffer, 121 .flush_buffer = line_flush_buffer,
123 .flush_chars = line_flush_chars, 122 .flush_chars = line_flush_chars,
124 .set_termios = line_set_termios, 123 .set_termios = line_set_termios,
125 .ioctl = line_ioctl, 124 .ioctl = line_ioctl,
125 .throttle = line_throttle,
126 .unthrottle = line_unthrottle,
126}; 127};
127 128
128static void uml_console_write(struct console *console, const char *string, 129static void uml_console_write(struct console *console, const char *string,
129 unsigned len) 130 unsigned len)
130{ 131{
131 struct line *line = &vts[console->index]; 132 struct line *line = &vts[console->index];
132 unsigned long flags; 133 unsigned long flags;
@@ -146,7 +147,7 @@ static int uml_console_setup(struct console *co, char *options)
146{ 147{
147 struct line *line = &vts[co->index]; 148 struct line *line = &vts[co->index];
148 149
149 return console_open_chan(line,co,&opts); 150 return console_open_chan(line, co, &opts);
150} 151}
151 152
152static struct console stdiocons = { 153static struct console stdiocons = {
@@ -156,7 +157,7 @@ static struct console stdiocons = {
156 .setup = uml_console_setup, 157 .setup = uml_console_setup,
157 .flags = CON_PRINTBUFFER, 158 .flags = CON_PRINTBUFFER,
158 .index = -1, 159 .index = -1,
159 .data = &vts, 160 .data = &vts,
160}; 161};
161 162
162int stdio_init(void) 163int stdio_init(void)
@@ -166,11 +167,11 @@ int stdio_init(void)
166 console_driver = line_register_devfs(&console_lines, &driver, 167 console_driver = line_register_devfs(&console_lines, &driver,
167 &console_ops, vts, 168 &console_ops, vts,
168 ARRAY_SIZE(vts)); 169 ARRAY_SIZE(vts));
169 if (NULL == console_driver) 170 if (console_driver == NULL)
170 return -1; 171 return -1;
171 printk(KERN_INFO "Initialized stdio console driver\n"); 172 printk(KERN_INFO "Initialized stdio console driver\n");
172 173
173 lines_init(vts, sizeof(vts)/sizeof(vts[0])); 174 lines_init(vts, ARRAY_SIZE(vts), &opts);
174 175
175 new_title = add_xterm_umid(opts.xterm_title); 176 new_title = add_xterm_umid(opts.xterm_title);
176 if(new_title != NULL) 177 if(new_title != NULL)
@@ -178,7 +179,7 @@ int stdio_init(void)
178 179
179 con_init_done = 1; 180 con_init_done = 1;
180 register_console(&stdiocons); 181 register_console(&stdiocons);
181 return(0); 182 return 0;
182} 183}
183late_initcall(stdio_init); 184late_initcall(stdio_init);
184 185
@@ -186,13 +187,13 @@ static void console_exit(void)
186{ 187{
187 if (!con_init_done) 188 if (!con_init_done)
188 return; 189 return;
189 close_lines(vts, sizeof(vts)/sizeof(vts[0])); 190 close_lines(vts, ARRAY_SIZE(vts));
190} 191}
191__uml_exitcall(console_exit); 192__uml_exitcall(console_exit);
192 193
193static int console_chan_setup(char *str) 194static int console_chan_setup(char *str)
194{ 195{
195 return(line_setup(vts, sizeof(vts)/sizeof(vts[0]), str, 1)); 196 return line_setup(vts, ARRAY_SIZE(vts), str);
196} 197}
197__setup("con", console_chan_setup); 198__setup("con", console_chan_setup);
198__channel_help(console_chan_setup, "con"); 199__channel_help(console_chan_setup, "con");
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 93898917cbe5..73f9652b2ee9 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -706,7 +706,7 @@ static int ubd_config(char *str)
706{ 706{
707 int n, err; 707 int n, err;
708 708
709 str = uml_strdup(str); 709 str = kstrdup(str, GFP_KERNEL);
710 if(str == NULL){ 710 if(str == NULL){
711 printk(KERN_ERR "ubd_config failed to strdup string\n"); 711 printk(KERN_ERR "ubd_config failed to strdup string\n");
712 return(1); 712 return(1);
@@ -1387,15 +1387,6 @@ int io_thread(void *arg)
1387 printk("io_thread - write failed, fd = %d, err = %d\n", 1387 printk("io_thread - write failed, fd = %d, err = %d\n",
1388 kernel_fd, -n); 1388 kernel_fd, -n);
1389 } 1389 }
1390}
1391 1390
1392/* 1391 return 0;
1393 * Overrides for Emacs so that we follow Linus's tabbing style. 1392}
1394 * Emacs will notice this stuff at the end of the file and automatically
1395 * adjust the settings for this buffer only. This must remain at the end
1396 * of the file.
1397 * ---------------------------------------------------------------------------
1398 * Local variables:
1399 * c-file-style: "linux"
1400 * End:
1401 */
diff --git a/arch/um/include/chan_kern.h b/arch/um/include/chan_kern.h
index da9a6717e7a4..1bb5e9d94270 100644
--- a/arch/um/include/chan_kern.h
+++ b/arch/um/include/chan_kern.h
@@ -14,21 +14,23 @@
14 14
15struct chan { 15struct chan {
16 struct list_head list; 16 struct list_head list;
17 struct list_head free_list;
18 struct line *line;
17 char *dev; 19 char *dev;
18 unsigned int primary:1; 20 unsigned int primary:1;
19 unsigned int input:1; 21 unsigned int input:1;
20 unsigned int output:1; 22 unsigned int output:1;
21 unsigned int opened:1; 23 unsigned int opened:1;
24 unsigned int enabled:1;
22 int fd; 25 int fd;
23 enum chan_init_pri pri;
24 struct chan_ops *ops; 26 struct chan_ops *ops;
25 void *data; 27 void *data;
26}; 28};
27 29
28extern void chan_interrupt(struct list_head *chans, struct work_struct *task, 30extern void chan_interrupt(struct list_head *chans, struct work_struct *task,
29 struct tty_struct *tty, int irq); 31 struct tty_struct *tty, int irq);
30extern int parse_chan_pair(char *str, struct list_head *chans, int pri, 32extern int parse_chan_pair(char *str, struct line *line, int device,
31 int device, struct chan_opts *opts); 33 struct chan_opts *opts);
32extern int open_chan(struct list_head *chans); 34extern int open_chan(struct list_head *chans);
33extern int write_chan(struct list_head *chans, const char *buf, int len, 35extern int write_chan(struct list_head *chans, const char *buf, int len,
34 int write_irq); 36 int write_irq);
@@ -36,9 +38,11 @@ extern int console_write_chan(struct list_head *chans, const char *buf,
36 int len); 38 int len);
37extern int console_open_chan(struct line *line, struct console *co, 39extern int console_open_chan(struct line *line, struct console *co,
38 struct chan_opts *opts); 40 struct chan_opts *opts);
39extern void close_chan(struct list_head *chans); 41extern void deactivate_chan(struct list_head *chans, int irq);
42extern void reactivate_chan(struct list_head *chans, int irq);
40extern void chan_enable_winch(struct list_head *chans, struct tty_struct *tty); 43extern void chan_enable_winch(struct list_head *chans, struct tty_struct *tty);
41extern void enable_chan(struct list_head *chans, struct tty_struct *tty); 44extern void enable_chan(struct line *line);
45extern void close_chan(struct list_head *chans, int delay_free_irq);
42extern int chan_window_size(struct list_head *chans, 46extern int chan_window_size(struct list_head *chans,
43 unsigned short *rows_out, 47 unsigned short *rows_out,
44 unsigned short *cols_out); 48 unsigned short *cols_out);
@@ -47,14 +51,3 @@ extern int chan_config_string(struct list_head *chans, char *str, int size,
47 char **error_out); 51 char **error_out);
48 52
49#endif 53#endif
50
51/*
52 * Overrides for Emacs so that we follow Linus's tabbing style.
53 * Emacs will notice this stuff at the end of the file and automatically
54 * adjust the settings for this buffer only. This must remain at the end
55 * of the file.
56 * ---------------------------------------------------------------------------
57 * Local variables:
58 * c-file-style: "linux"
59 * End:
60 */
diff --git a/arch/um/include/choose-mode.h b/arch/um/include/choose-mode.h
index f25fa83a5da6..b87b36a87d91 100644
--- a/arch/um/include/choose-mode.h
+++ b/arch/um/include/choose-mode.h
@@ -23,6 +23,9 @@ static inline void *__choose_mode(void *tt, void *skas) {
23 23
24#elif defined(UML_CONFIG_MODE_TT) 24#elif defined(UML_CONFIG_MODE_TT)
25#define CHOOSE_MODE(tt, skas) (tt) 25#define CHOOSE_MODE(tt, skas) (tt)
26
27#else
28#error CONFIG_MODE_SKAS and CONFIG_MODE_TT are both disabled
26#endif 29#endif
27 30
28#define CHOOSE_MODE_PROC(tt, skas, args...) \ 31#define CHOOSE_MODE_PROC(tt, skas, args...) \
diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h
index f724b717213f..b61deb8b362a 100644
--- a/arch/um/include/irq_user.h
+++ b/arch/um/include/irq_user.h
@@ -18,19 +18,8 @@ extern int deactivate_all_fds(void);
18extern void forward_interrupts(int pid); 18extern void forward_interrupts(int pid);
19extern void init_irq_signals(int on_sigstack); 19extern void init_irq_signals(int on_sigstack);
20extern void forward_ipi(int fd, int pid); 20extern void forward_ipi(int fd, int pid);
21extern void free_irq_later(int irq, void *dev_id);
22extern int activate_ipi(int fd, int pid); 21extern int activate_ipi(int fd, int pid);
23extern unsigned long irq_lock(void); 22extern unsigned long irq_lock(void);
24extern void irq_unlock(unsigned long flags); 23extern void irq_unlock(unsigned long flags);
25#endif
26 24
27/* 25#endif
28 * Overrides for Emacs so that we follow Linus's tabbing style.
29 * Emacs will notice this stuff at the end of the file and automatically
30 * adjust the settings for this buffer only. This must remain at the end
31 * of the file.
32 * ---------------------------------------------------------------------------
33 * Local variables:
34 * c-file-style: "linux"
35 * End:
36 */
diff --git a/arch/um/include/kern.h b/arch/um/include/kern.h
index 1e3170768b5c..7d223beccbc0 100644
--- a/arch/um/include/kern.h
+++ b/arch/um/include/kern.h
@@ -17,7 +17,7 @@ extern int errno;
17 17
18extern int clone(int (*proc)(void *), void *sp, int flags, void *data); 18extern int clone(int (*proc)(void *), void *sp, int flags, void *data);
19extern int sleep(int); 19extern int sleep(int);
20extern int printf(char *fmt, ...); 20extern int printf(const char *fmt, ...);
21extern char *strerror(int errnum); 21extern char *strerror(int errnum);
22extern char *ptsname(int __fd); 22extern char *ptsname(int __fd);
23extern int munmap(void *, int); 23extern int munmap(void *, int);
@@ -35,15 +35,6 @@ extern int read(unsigned int, char *, int);
35extern int pipe(int *); 35extern int pipe(int *);
36extern int sched_yield(void); 36extern int sched_yield(void);
37extern int ptrace(int op, int pid, long addr, long data); 37extern int ptrace(int op, int pid, long addr, long data);
38
38#endif 39#endif
39 40
40/*
41 * Overrides for Emacs so that we follow Linus's tabbing style.
42 * Emacs will notice this stuff at the end of the file and automatically
43 * adjust the settings for this buffer only. This must remain at the end
44 * of the file.
45 * ---------------------------------------------------------------------------
46 * Local variables:
47 * c-file-style: "linux"
48 * End:
49 */
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 5323d22a6ca7..6f4d680dc1d4 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -32,11 +32,13 @@ struct line_driver {
32}; 32};
33 33
34struct line { 34struct line {
35 struct tty_struct *tty;
35 char *init_str; 36 char *init_str;
36 int init_pri; 37 int init_pri;
37 struct list_head chan_list; 38 struct list_head chan_list;
38 int valid; 39 int valid;
39 int count; 40 int count;
41 int throttled;
40 /*This lock is actually, mostly, local to*/ 42 /*This lock is actually, mostly, local to*/
41 spinlock_t lock; 43 spinlock_t lock;
42 44
@@ -58,14 +60,15 @@ struct line {
58#define LINE_INIT(str, d) \ 60#define LINE_INIT(str, d) \
59 { init_str : str, \ 61 { init_str : str, \
60 init_pri : INIT_STATIC, \ 62 init_pri : INIT_STATIC, \
61 chan_list : { }, \
62 valid : 1, \ 63 valid : 1, \
64 throttled : 0, \
65 lock : SPIN_LOCK_UNLOCKED, \
63 buffer : NULL, \ 66 buffer : NULL, \
64 head : NULL, \ 67 head : NULL, \
65 tail : NULL, \ 68 tail : NULL, \
66 sigio : 0, \ 69 sigio : 0, \
67 driver : d, \ 70 driver : d, \
68 have_irq : 0 } 71 have_irq : 0 }
69 72
70struct lines { 73struct lines {
71 int num; 74 int num;
@@ -74,11 +77,11 @@ struct lines {
74#define LINES_INIT(n) { num : n } 77#define LINES_INIT(n) { num : n }
75 78
76extern void line_close(struct tty_struct *tty, struct file * filp); 79extern void line_close(struct tty_struct *tty, struct file * filp);
77extern int line_open(struct line *lines, struct tty_struct *tty, 80extern int line_open(struct line *lines, struct tty_struct *tty);
78 struct chan_opts *opts); 81extern int line_setup(struct line *lines, unsigned int sizeof_lines,
79extern int line_setup(struct line *lines, unsigned int sizeof_lines, char *init, 82 char *init);
80 int all_allowed); 83extern int line_write(struct tty_struct *tty, const unsigned char *buf,
81extern int line_write(struct tty_struct *tty, const unsigned char *buf, int len); 84 int len);
82extern void line_put_char(struct tty_struct *tty, unsigned char ch); 85extern void line_put_char(struct tty_struct *tty, unsigned char ch);
83extern void line_set_termios(struct tty_struct *tty, struct termios * old); 86extern void line_set_termios(struct tty_struct *tty, struct termios * old);
84extern int line_chars_in_buffer(struct tty_struct *tty); 87extern int line_chars_in_buffer(struct tty_struct *tty);
@@ -87,23 +90,27 @@ extern void line_flush_chars(struct tty_struct *tty);
87extern int line_write_room(struct tty_struct *tty); 90extern int line_write_room(struct tty_struct *tty);
88extern int line_ioctl(struct tty_struct *tty, struct file * file, 91extern int line_ioctl(struct tty_struct *tty, struct file * file,
89 unsigned int cmd, unsigned long arg); 92 unsigned int cmd, unsigned long arg);
93extern void line_throttle(struct tty_struct *tty);
94extern void line_unthrottle(struct tty_struct *tty);
90 95
91extern char *add_xterm_umid(char *base); 96extern char *add_xterm_umid(char *base);
92extern int line_setup_irq(int fd, int input, int output, struct tty_struct *tty); 97extern int line_setup_irq(int fd, int input, int output, struct line *line,
98 void *data);
93extern void line_close_chan(struct line *line); 99extern void line_close_chan(struct line *line);
94extern void line_disable(struct tty_struct *tty, int current_irq); 100extern struct tty_driver * line_register_devfs(struct lines *set,
95extern struct tty_driver * line_register_devfs(struct lines *set, 101 struct line_driver *line_driver,
96 struct line_driver *line_driver,
97 struct tty_operations *driver, 102 struct tty_operations *driver,
98 struct line *lines, 103 struct line *lines,
99 int nlines); 104 int nlines);
100extern void lines_init(struct line *lines, int nlines); 105extern void lines_init(struct line *lines, int nlines, struct chan_opts *opts);
101extern void close_lines(struct line *lines, int nlines); 106extern void close_lines(struct line *lines, int nlines);
102 107
103extern int line_config(struct line *lines, unsigned int sizeof_lines, char *str); 108extern int line_config(struct line *lines, unsigned int sizeof_lines,
109 char *str, struct chan_opts *opts);
104extern int line_id(char **str, int *start_out, int *end_out); 110extern int line_id(char **str, int *start_out, int *end_out);
105extern int line_remove(struct line *lines, unsigned int sizeof_lines, int n); 111extern int line_remove(struct line *lines, unsigned int sizeof_lines, int n);
106extern int line_get_config(char *dev, struct line *lines, unsigned int sizeof_lines, char *str, 112extern int line_get_config(char *dev, struct line *lines,
113 unsigned int sizeof_lines, char *str,
107 int size, char **error_out); 114 int size, char **error_out);
108 115
109#endif 116#endif
diff --git a/arch/um/include/mconsole.h b/arch/um/include/mconsole.h
index b1b512f47035..58f67d391105 100644
--- a/arch/um/include/mconsole.h
+++ b/arch/um/include/mconsole.h
@@ -32,7 +32,7 @@ struct mconsole_reply {
32 32
33struct mconsole_notify { 33struct mconsole_notify {
34 u32 magic; 34 u32 magic;
35 u32 version; 35 u32 version;
36 enum { MCONSOLE_SOCKET, MCONSOLE_PANIC, MCONSOLE_HANG, 36 enum { MCONSOLE_SOCKET, MCONSOLE_PANIC, MCONSOLE_HANG,
37 MCONSOLE_USER_NOTIFY } type; 37 MCONSOLE_USER_NOTIFY } type;
38 u32 len; 38 u32 len;
@@ -66,7 +66,9 @@ struct mc_request
66extern char mconsole_socket_name[]; 66extern char mconsole_socket_name[];
67 67
68extern int mconsole_unlink_socket(void); 68extern int mconsole_unlink_socket(void);
69extern int mconsole_reply(struct mc_request *req, char *reply, int err, 69extern int mconsole_reply_len(struct mc_request *req, const char *reply,
70 int len, int err, int more);
71extern int mconsole_reply(struct mc_request *req, const char *str, int err,
70 int more); 72 int more);
71 73
72extern void mconsole_version(struct mc_request *req); 74extern void mconsole_version(struct mc_request *req);
@@ -84,7 +86,7 @@ extern void mconsole_proc(struct mc_request *req);
84extern void mconsole_stack(struct mc_request *req); 86extern void mconsole_stack(struct mc_request *req);
85 87
86extern int mconsole_get_request(int fd, struct mc_request *req); 88extern int mconsole_get_request(int fd, struct mc_request *req);
87extern int mconsole_notify(char *sock_name, int type, const void *data, 89extern int mconsole_notify(char *sock_name, int type, const void *data,
88 int len); 90 int len);
89extern char *mconsole_notify_socket(void); 91extern char *mconsole_notify_socket(void);
90extern void lock_notify(void); 92extern void lock_notify(void);
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index 2cccfa5b8ab5..c279ee6d89e4 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -213,15 +213,10 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
213 int stack_order); 213 int stack_order);
214extern int helper_wait(int pid); 214extern int helper_wait(int pid);
215 215
216#endif 216/* umid.c */
217 217
218/* 218extern int umid_file_name(char *name, char *buf, int len);
219 * Overrides for Emacs so that we follow Linus's tabbing style. 219extern int set_umid(char *name);
220 * Emacs will notice this stuff at the end of the file and automatically 220extern char *get_umid(void);
221 * adjust the settings for this buffer only. This must remain at the end 221
222 * of the file. 222#endif
223 * ---------------------------------------------------------------------------
224 * Local variables:
225 * c-file-style: "linux"
226 * End:
227 */
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h
index bb505e01d994..b9984003e603 100644
--- a/arch/um/include/user_util.h
+++ b/arch/um/include/user_util.h
@@ -64,7 +64,6 @@ extern void setup_machinename(char *machine_out);
64extern void setup_hostinfo(void); 64extern void setup_hostinfo(void);
65extern void do_exec(int old_pid, int new_pid); 65extern void do_exec(int old_pid, int new_pid);
66extern void tracer_panic(char *msg, ...); 66extern void tracer_panic(char *msg, ...);
67extern char *get_umid(int only_if_set);
68extern void do_longjmp(void *p, int val); 67extern void do_longjmp(void *p, int val);
69extern int detach(int pid, int sig); 68extern int detach(int pid, int sig);
70extern int attach(int pid); 69extern int attach(int pid);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 3de9d21e36bf..6f7700593a6f 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -10,8 +10,8 @@ obj-y = config.o exec_kern.o exitcode.o \
10 init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \ 10 init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \
11 process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \ 11 process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \
12 signal_kern.o signal_user.o smp.o syscall_kern.o sysrq.o time.o \ 12 signal_kern.o signal_user.o smp.o syscall_kern.o sysrq.o time.o \
13 time_kern.o tlb.o trap_kern.o trap_user.o uaccess.o um_arch.o \ 13 time_kern.o tlb.o trap_kern.o trap_user.o uaccess.o um_arch.o umid.o \
14 umid.o user_util.o 14 user_util.o
15 15
16obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o 16obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
17obj-$(CONFIG_GPROF) += gprof_syms.o 17obj-$(CONFIG_GPROF) += gprof_syms.o
@@ -24,7 +24,7 @@ obj-$(CONFIG_MODE_SKAS) += skas/
24 24
25user-objs-$(CONFIG_TTY_LOG) += tty_log.o 25user-objs-$(CONFIG_TTY_LOG) += tty_log.o
26 26
27USER_OBJS := $(user-objs-y) config.o time.o tty_log.o umid.o user_util.o 27USER_OBJS := $(user-objs-y) config.o time.o tty_log.o user_util.o
28 28
29include arch/um/scripts/Makefile.rules 29include arch/um/scripts/Makefile.rules
30 30
diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c
index c3ccaf24f3e0..50a2aa35cda9 100644
--- a/arch/um/kernel/irq_user.c
+++ b/arch/um/kernel/irq_user.c
@@ -29,7 +29,6 @@ struct irq_fd {
29 int pid; 29 int pid;
30 int events; 30 int events;
31 int current_events; 31 int current_events;
32 int freed;
33}; 32};
34 33
35static struct irq_fd *active_fds = NULL; 34static struct irq_fd *active_fds = NULL;
@@ -41,9 +40,11 @@ static int pollfds_size = 0;
41 40
42extern int io_count, intr_count; 41extern int io_count, intr_count;
43 42
43extern void free_irqs(void);
44
44void sigio_handler(int sig, union uml_pt_regs *regs) 45void sigio_handler(int sig, union uml_pt_regs *regs)
45{ 46{
46 struct irq_fd *irq_fd, *next; 47 struct irq_fd *irq_fd;
47 int i, n; 48 int i, n;
48 49
49 if(smp_sigio_handler()) return; 50 if(smp_sigio_handler()) return;
@@ -66,29 +67,15 @@ void sigio_handler(int sig, union uml_pt_regs *regs)
66 irq_fd = irq_fd->next; 67 irq_fd = irq_fd->next;
67 } 68 }
68 69
69 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = next){ 70 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
70 next = irq_fd->next;
71 if(irq_fd->current_events != 0){ 71 if(irq_fd->current_events != 0){
72 irq_fd->current_events = 0; 72 irq_fd->current_events = 0;
73 do_IRQ(irq_fd->irq, regs); 73 do_IRQ(irq_fd->irq, regs);
74
75 /* This is here because the next irq may be
76 * freed in the handler. If a console goes
77 * away, both the read and write irqs will be
78 * freed. After do_IRQ, ->next will point to
79 * a good IRQ.
80 * Irqs can't be freed inside their handlers,
81 * so the next best thing is to have them
82 * marked as needing freeing, so that they
83 * can be freed here.
84 */
85 next = irq_fd->next;
86 if(irq_fd->freed){
87 free_irq(irq_fd->irq, irq_fd->id);
88 }
89 } 74 }
90 } 75 }
91 } 76 }
77
78 free_irqs();
92} 79}
93 80
94int activate_ipi(int fd, int pid) 81int activate_ipi(int fd, int pid)
@@ -136,8 +123,7 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
136 .irq = irq, 123 .irq = irq,
137 .pid = pid, 124 .pid = pid,
138 .events = events, 125 .events = events,
139 .current_events = 0, 126 .current_events = 0 } );
140 .freed = 0 } );
141 127
142 /* Critical section - locked by a spinlock because this stuff can 128 /* Critical section - locked by a spinlock because this stuff can
143 * be changed from interrupt handlers. The stuff above is done 129 * be changed from interrupt handlers. The stuff above is done
@@ -313,26 +299,6 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
313 return(irq); 299 return(irq);
314} 300}
315 301
316void free_irq_later(int irq, void *dev_id)
317{
318 struct irq_fd *irq_fd;
319 unsigned long flags;
320
321 flags = irq_lock();
322 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
323 if((irq_fd->irq == irq) && (irq_fd->id == dev_id))
324 break;
325 }
326 if(irq_fd == NULL){
327 printk("free_irq_later found no irq, irq = %d, "
328 "dev_id = 0x%p\n", irq, dev_id);
329 goto out;
330 }
331 irq_fd->freed = 1;
332 out:
333 irq_unlock(flags);
334}
335
336void reactivate_fd(int fd, int irqnum) 302void reactivate_fd(int fd, int irqnum)
337{ 303{
338 struct irq_fd *irq; 304 struct irq_fd *irq;
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 34b54a3e2132..651abf255bc5 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -324,10 +324,6 @@ int user_context(unsigned long sp)
324 return(stack != (unsigned long) current_thread); 324 return(stack != (unsigned long) current_thread);
325} 325}
326 326
327extern void remove_umid_dir(void);
328
329__uml_exitcall(remove_umid_dir);
330
331extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; 327extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
332 328
333void do_uml_exitcalls(void) 329void do_uml_exitcalls(void)
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c
index 48b1f644b9a6..62e5cfdf2188 100644
--- a/arch/um/kernel/sigio_user.c
+++ b/arch/um/kernel/sigio_user.c
@@ -216,6 +216,8 @@ static int write_sigio_thread(void *unused)
216 "err = %d\n", -n); 216 "err = %d\n", -n);
217 } 217 }
218 } 218 }
219
220 return 0;
219} 221}
220 222
221static int need_poll(int n) 223static int need_poll(int n)
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 142a9493912b..26626b2b9172 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -146,8 +146,8 @@ void set_cmdline(char *cmd)
146 146
147 if(CHOOSE_MODE(honeypot, 0)) return; 147 if(CHOOSE_MODE(honeypot, 0)) return;
148 148
149 umid = get_umid(1); 149 umid = get_umid();
150 if(umid != NULL){ 150 if(*umid != '\0'){
151 snprintf(argv1_begin, 151 snprintf(argv1_begin,
152 (argv1_end - argv1_begin) * sizeof(*ptr), 152 (argv1_end - argv1_begin) * sizeof(*ptr),
153 "(%s) ", umid); 153 "(%s) ", umid);
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index 0b21d59ba0cd..4eaee823bfd2 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -3,61 +3,30 @@
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#include <stdio.h> 6#include "asm/errno.h"
7#include <unistd.h>
8#include <errno.h>
9#include <string.h>
10#include <stdlib.h>
11#include <dirent.h>
12#include <signal.h>
13#include <sys/stat.h>
14#include <sys/param.h>
15#include "user.h"
16#include "umid.h"
17#include "init.h" 7#include "init.h"
18#include "os.h" 8#include "os.h"
19#include "user_util.h" 9#include "kern.h"
20#include "choose-mode.h" 10#include "linux/kernel.h"
21 11
22#define UMID_LEN 64 12/* Changed by set_umid_arg */
23#define UML_DIR "~/.uml/"
24
25/* Changed by set_umid and make_umid, which are run early in boot */
26static char umid[UMID_LEN] = { 0 };
27
28/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
29static char *uml_dir = UML_DIR;
30
31/* Changed by set_umid */
32static int umid_is_random = 1;
33static int umid_inited = 0; 13static int umid_inited = 0;
34/* Have we created the files? Should we remove them? */
35static int umid_owned = 0;
36 14
37static int make_umid(int (*printer)(const char *fmt, ...)); 15static int __init set_umid_arg(char *name, int *add)
38
39static int __init set_umid(char *name, int is_random,
40 int (*printer)(const char *fmt, ...))
41{ 16{
42 if(umid_inited){ 17 int err;
43 (*printer)("Unique machine name can't be set twice\n");
44 return(-1);
45 }
46 18
47 if(strlen(name) > UMID_LEN - 1) 19 if(umid_inited)
48 (*printer)("Unique machine name is being truncated to %d " 20 return 0;
49 "characters\n", UMID_LEN);
50 strlcpy(umid, name, sizeof(umid));
51 21
52 umid_is_random = is_random;
53 umid_inited = 1;
54 return 0;
55}
56
57static int __init set_umid_arg(char *name, int *add)
58{
59 *add = 0; 22 *add = 0;
60 return(set_umid(name, 0, printf)); 23 err = set_umid(name);
24 if(err == -EEXIST)
25 printf("umid '%s' already in use\n", name);
26 else if(!err)
27 umid_inited = 1;
28
29 return 0;
61} 30}
62 31
63__uml_setup("umid=", set_umid_arg, 32__uml_setup("umid=", set_umid_arg,
@@ -66,265 +35,3 @@ __uml_setup("umid=", set_umid_arg,
66" is used for naming the pid file and management console socket.\n\n" 35" is used for naming the pid file and management console socket.\n\n"
67); 36);
68 37
69int __init umid_file_name(char *name, char *buf, int len)
70{
71 int n;
72
73 if(!umid_inited && make_umid(printk)) return(-1);
74
75 n = strlen(uml_dir) + strlen(umid) + strlen(name) + 1;
76 if(n > len){
77 printk("umid_file_name : buffer too short\n");
78 return(-1);
79 }
80
81 sprintf(buf, "%s%s/%s", uml_dir, umid, name);
82 return(0);
83}
84
85extern int tracing_pid;
86
87static void __init create_pid_file(void)
88{
89 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
90 char pid[sizeof("nnnnn\0")];
91 int fd, n;
92
93 if(umid_file_name("pid", file, sizeof(file)))
94 return;
95
96 fd = os_open_file(file, of_create(of_excl(of_rdwr(OPENFLAGS()))),
97 0644);
98 if(fd < 0){
99 printf("Open of machine pid file \"%s\" failed: %s\n",
100 file, strerror(-fd));
101 return;
102 }
103
104 sprintf(pid, "%d\n", os_getpid());
105 n = os_write_file(fd, pid, strlen(pid));
106 if(n != strlen(pid))
107 printf("Write of pid file failed - err = %d\n", -n);
108 os_close_file(fd);
109}
110
111static int actually_do_remove(char *dir)
112{
113 DIR *directory;
114 struct dirent *ent;
115 int len;
116 char file[256];
117
118 directory = opendir(dir);
119 if(directory == NULL){
120 printk("actually_do_remove : couldn't open directory '%s', "
121 "errno = %d\n", dir, errno);
122 return(1);
123 }
124 while((ent = readdir(directory)) != NULL){
125 if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
126 continue;
127 len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1;
128 if(len > sizeof(file)){
129 printk("Not deleting '%s' from '%s' - name too long\n",
130 ent->d_name, dir);
131 continue;
132 }
133 sprintf(file, "%s/%s", dir, ent->d_name);
134 if(unlink(file) < 0){
135 printk("actually_do_remove : couldn't remove '%s' "
136 "from '%s', errno = %d\n", ent->d_name, dir,
137 errno);
138 return(1);
139 }
140 }
141 if(rmdir(dir) < 0){
142 printk("actually_do_remove : couldn't rmdir '%s', "
143 "errno = %d\n", dir, errno);
144 return(1);
145 }
146 return(0);
147}
148
149void remove_umid_dir(void)
150{
151 char dir[strlen(uml_dir) + UMID_LEN + 1];
152 if (!umid_owned)
153 return;
154
155 sprintf(dir, "%s%s", uml_dir, umid);
156 actually_do_remove(dir);
157}
158
159char *get_umid(int only_if_set)
160{
161 if(only_if_set && umid_is_random)
162 return NULL;
163 return umid;
164}
165
166static int not_dead_yet(char *dir)
167{
168 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
169 char pid[sizeof("nnnnn\0")], *end;
170 int dead, fd, p, n;
171
172 sprintf(file, "%s/pid", dir);
173 dead = 0;
174 fd = os_open_file(file, of_read(OPENFLAGS()), 0);
175 if(fd < 0){
176 if(fd != -ENOENT){
177 printk("not_dead_yet : couldn't open pid file '%s', "
178 "err = %d\n", file, -fd);
179 return(1);
180 }
181 dead = 1;
182 }
183 if(fd > 0){
184 n = os_read_file(fd, pid, sizeof(pid));
185 if(n < 0){
186 printk("not_dead_yet : couldn't read pid file '%s', "
187 "err = %d\n", file, -n);
188 return(1);
189 }
190 p = strtoul(pid, &end, 0);
191 if(end == pid){
192 printk("not_dead_yet : couldn't parse pid file '%s', "
193 "errno = %d\n", file, errno);
194 dead = 1;
195 }
196 if(((kill(p, 0) < 0) && (errno == ESRCH)) ||
197 (p == CHOOSE_MODE(tracing_pid, os_getpid())))
198 dead = 1;
199 }
200 if(!dead)
201 return(1);
202 return(actually_do_remove(dir));
203}
204
205static int __init set_uml_dir(char *name, int *add)
206{
207 if((strlen(name) > 0) && (name[strlen(name) - 1] != '/')){
208 uml_dir = malloc(strlen(name) + 2);
209 if(uml_dir == NULL){
210 printf("Failed to malloc uml_dir - error = %d\n",
211 errno);
212 uml_dir = name;
213 /* Return 0 here because do_initcalls doesn't look at
214 * the return value.
215 */
216 return(0);
217 }
218 sprintf(uml_dir, "%s/", name);
219 }
220 else uml_dir = name;
221 return(0);
222}
223
224static int __init make_uml_dir(void)
225{
226 char dir[MAXPATHLEN + 1] = { '\0' };
227 int len;
228
229 if(*uml_dir == '~'){
230 char *home = getenv("HOME");
231
232 if(home == NULL){
233 printf("make_uml_dir : no value in environment for "
234 "$HOME\n");
235 exit(1);
236 }
237 strlcpy(dir, home, sizeof(dir));
238 uml_dir++;
239 }
240 strlcat(dir, uml_dir, sizeof(dir));
241 len = strlen(dir);
242 if (len > 0 && dir[len - 1] != '/')
243 strlcat(dir, "/", sizeof(dir));
244
245 uml_dir = malloc(strlen(dir) + 1);
246 if (uml_dir == NULL) {
247 printf("make_uml_dir : malloc failed, errno = %d\n", errno);
248 exit(1);
249 }
250 strcpy(uml_dir, dir);
251
252 if((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)){
253 printf("Failed to mkdir %s: %s\n", uml_dir, strerror(errno));
254 return(-1);
255 }
256 return 0;
257}
258
259static int __init make_umid(int (*printer)(const char *fmt, ...))
260{
261 int fd, err;
262 char tmp[strlen(uml_dir) + UMID_LEN + 1];
263
264 strlcpy(tmp, uml_dir, sizeof(tmp));
265
266 if(!umid_inited){
267 strcat(tmp, "XXXXXX");
268 fd = mkstemp(tmp);
269 if(fd < 0){
270 (*printer)("make_umid - mkstemp(%s) failed: %s\n",
271 tmp,strerror(errno));
272 return(1);
273 }
274
275 os_close_file(fd);
276 /* There's a nice tiny little race between this unlink and
277 * the mkdir below. It'd be nice if there were a mkstemp
278 * for directories.
279 */
280 unlink(tmp);
281 set_umid(&tmp[strlen(uml_dir)], 1, printer);
282 }
283
284 sprintf(tmp, "%s%s", uml_dir, umid);
285
286 err = mkdir(tmp, 0777);
287 if(err < 0){
288 if(errno == EEXIST){
289 if(not_dead_yet(tmp)){
290 (*printer)("umid '%s' is in use\n", umid);
291 umid_owned = 0;
292 return(-1);
293 }
294 err = mkdir(tmp, 0777);
295 }
296 }
297 if(err < 0){
298 (*printer)("Failed to create %s - errno = %d\n", umid, errno);
299 return(-1);
300 }
301
302 umid_owned = 1;
303 return 0;
304}
305
306__uml_setup("uml_dir=", set_uml_dir,
307"uml_dir=<directory>\n"
308" The location to place the pid and umid files.\n\n"
309);
310
311static int __init make_umid_setup(void)
312{
313 /* one function with the ordering we need ... */
314 make_uml_dir();
315 make_umid(printf);
316 create_pid_file();
317 return 0;
318}
319__uml_postsetup(make_umid_setup);
320
321/*
322 * Overrides for Emacs so that we follow Linus's tabbing style.
323 * Emacs will notice this stuff at the end of the file and automatically
324 * adjust the settings for this buffer only. This must remain at the end
325 * of the file.
326 * ---------------------------------------------------------------------------
327 * Local variables:
328 * c-file-style: "linux"
329 * End:
330 */
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index b83ac8e21c35..11e30b13e318 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -4,11 +4,11 @@
4# 4#
5 5
6obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ 6obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
7 start_up.o time.o tt.o tty.o uaccess.o user_syms.o drivers/ \ 7 start_up.o time.o tt.o tty.o uaccess.o umid.o user_syms.o drivers/ \
8 sys-$(SUBARCH)/ 8 sys-$(SUBARCH)/
9 9
10USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \ 10USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
11 start_up.o time.o tt.o tty.o uaccess.o 11 start_up.o time.o tt.o tty.o uaccess.o umid.o
12 12
13elf_aux.o: $(ARCH_DIR)/kernel-offsets.h 13elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
14CFLAGS_elf_aux.o += -I$(objtree)/arch/um 14CFLAGS_elf_aux.o += -I$(objtree)/arch/um
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c
index ffa759addd3c..f897140cc4ae 100644
--- a/arch/um/os-Linux/aio.c
+++ b/arch/um/os-Linux/aio.c
@@ -16,12 +16,12 @@
16#include "mode.h" 16#include "mode.h"
17 17
18struct aio_thread_req { 18struct aio_thread_req {
19 enum aio_type type; 19 enum aio_type type;
20 int io_fd; 20 int io_fd;
21 unsigned long long offset; 21 unsigned long long offset;
22 char *buf; 22 char *buf;
23 int len; 23 int len;
24 struct aio_context *aio; 24 struct aio_context *aio;
25}; 25};
26 26
27static int aio_req_fd_r = -1; 27static int aio_req_fd_r = -1;
@@ -38,18 +38,18 @@ static int aio_req_fd_w = -1;
38 38
39static long io_setup(int n, aio_context_t *ctxp) 39static long io_setup(int n, aio_context_t *ctxp)
40{ 40{
41 return syscall(__NR_io_setup, n, ctxp); 41 return syscall(__NR_io_setup, n, ctxp);
42} 42}
43 43
44static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp) 44static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
45{ 45{
46 return syscall(__NR_io_submit, ctx, nr, iocbpp); 46 return syscall(__NR_io_submit, ctx, nr, iocbpp);
47} 47}
48 48
49static long io_getevents(aio_context_t ctx_id, long min_nr, long nr, 49static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
50 struct io_event *events, struct timespec *timeout) 50 struct io_event *events, struct timespec *timeout)
51{ 51{
52 return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout); 52 return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
53} 53}
54 54
55#endif 55#endif
@@ -66,243 +66,245 @@ static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
66 */ 66 */
67 67
68static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf, 68static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
69 int len, unsigned long long offset, struct aio_context *aio) 69 int len, unsigned long long offset, struct aio_context *aio)
70{ 70{
71 struct iocb iocb, *iocbp = &iocb; 71 struct iocb iocb, *iocbp = &iocb;
72 char c; 72 char c;
73 int err; 73 int err;
74 74
75 iocb = ((struct iocb) { .aio_data = (unsigned long) aio, 75 iocb = ((struct iocb) { .aio_data = (unsigned long) aio,
76 .aio_reqprio = 0, 76 .aio_reqprio = 0,
77 .aio_fildes = fd, 77 .aio_fildes = fd,
78 .aio_buf = (unsigned long) buf, 78 .aio_buf = (unsigned long) buf,
79 .aio_nbytes = len, 79 .aio_nbytes = len,
80 .aio_offset = offset, 80 .aio_offset = offset,
81 .aio_reserved1 = 0, 81 .aio_reserved1 = 0,
82 .aio_reserved2 = 0, 82 .aio_reserved2 = 0,
83 .aio_reserved3 = 0 }); 83 .aio_reserved3 = 0 });
84 84
85 switch(type){ 85 switch(type){
86 case AIO_READ: 86 case AIO_READ:
87 iocb.aio_lio_opcode = IOCB_CMD_PREAD; 87 iocb.aio_lio_opcode = IOCB_CMD_PREAD;
88 err = io_submit(ctx, 1, &iocbp); 88 err = io_submit(ctx, 1, &iocbp);
89 break; 89 break;
90 case AIO_WRITE: 90 case AIO_WRITE:
91 iocb.aio_lio_opcode = IOCB_CMD_PWRITE; 91 iocb.aio_lio_opcode = IOCB_CMD_PWRITE;
92 err = io_submit(ctx, 1, &iocbp); 92 err = io_submit(ctx, 1, &iocbp);
93 break; 93 break;
94 case AIO_MMAP: 94 case AIO_MMAP:
95 iocb.aio_lio_opcode = IOCB_CMD_PREAD; 95 iocb.aio_lio_opcode = IOCB_CMD_PREAD;
96 iocb.aio_buf = (unsigned long) &c; 96 iocb.aio_buf = (unsigned long) &c;
97 iocb.aio_nbytes = sizeof(c); 97 iocb.aio_nbytes = sizeof(c);
98 err = io_submit(ctx, 1, &iocbp); 98 err = io_submit(ctx, 1, &iocbp);
99 break; 99 break;
100 default: 100 default:
101 printk("Bogus op in do_aio - %d\n", type); 101 printk("Bogus op in do_aio - %d\n", type);
102 err = -EINVAL; 102 err = -EINVAL;
103 break; 103 break;
104 } 104 }
105 105
106 if(err > 0) 106 if(err > 0)
107 err = 0; 107 err = 0;
108 else 108 else
109 err = -errno; 109 err = -errno;
110 110
111 return err; 111 return err;
112} 112}
113 113
114static aio_context_t ctx = 0; 114static aio_context_t ctx = 0;
115 115
116static int aio_thread(void *arg) 116static int aio_thread(void *arg)
117{ 117{
118 struct aio_thread_reply reply; 118 struct aio_thread_reply reply;
119 struct io_event event; 119 struct io_event event;
120 int err, n, reply_fd; 120 int err, n, reply_fd;
121 121
122 signal(SIGWINCH, SIG_IGN); 122 signal(SIGWINCH, SIG_IGN);
123 123
124 while(1){ 124 while(1){
125 n = io_getevents(ctx, 1, 1, &event, NULL); 125 n = io_getevents(ctx, 1, 1, &event, NULL);
126 if(n < 0){ 126 if(n < 0){
127 if(errno == EINTR) 127 if(errno == EINTR)
128 continue; 128 continue;
129 printk("aio_thread - io_getevents failed, " 129 printk("aio_thread - io_getevents failed, "
130 "errno = %d\n", errno); 130 "errno = %d\n", errno);
131 } 131 }
132 else { 132 else {
133 reply = ((struct aio_thread_reply) 133 reply = ((struct aio_thread_reply)
134 { .data = (void *) (long) event.data, 134 { .data = (void *) (long) event.data,
135 .err = event.res }); 135 .err = event.res });
136 reply_fd = ((struct aio_context *) reply.data)->reply_fd; 136 reply_fd = ((struct aio_context *) reply.data)->reply_fd;
137 err = os_write_file(reply_fd, &reply, sizeof(reply)); 137 err = os_write_file(reply_fd, &reply, sizeof(reply));
138 if(err != sizeof(reply)) 138 if(err != sizeof(reply))
139 printk("aio_thread - write failed, fd = %d, " 139 printk("aio_thread - write failed, fd = %d, "
140 "err = %d\n", aio_req_fd_r, -err); 140 "err = %d\n", aio_req_fd_r, -err);
141 } 141 }
142 } 142 }
143 return 0; 143 return 0;
144} 144}
145 145
146#endif 146#endif
147 147
148static int do_not_aio(struct aio_thread_req *req) 148static int do_not_aio(struct aio_thread_req *req)
149{ 149{
150 char c; 150 char c;
151 int err; 151 int err;
152 152
153 switch(req->type){ 153 switch(req->type){
154 case AIO_READ: 154 case AIO_READ:
155 err = os_seek_file(req->io_fd, req->offset); 155 err = os_seek_file(req->io_fd, req->offset);
156 if(err) 156 if(err)
157 goto out; 157 goto out;
158 158
159 err = os_read_file(req->io_fd, req->buf, req->len); 159 err = os_read_file(req->io_fd, req->buf, req->len);
160 break; 160 break;
161 case AIO_WRITE: 161 case AIO_WRITE:
162 err = os_seek_file(req->io_fd, req->offset); 162 err = os_seek_file(req->io_fd, req->offset);
163 if(err) 163 if(err)
164 goto out; 164 goto out;
165 165
166 err = os_write_file(req->io_fd, req->buf, req->len); 166 err = os_write_file(req->io_fd, req->buf, req->len);
167 break; 167 break;
168 case AIO_MMAP: 168 case AIO_MMAP:
169 err = os_seek_file(req->io_fd, req->offset); 169 err = os_seek_file(req->io_fd, req->offset);
170 if(err) 170 if(err)
171 goto out; 171 goto out;
172 172
173 err = os_read_file(req->io_fd, &c, sizeof(c)); 173 err = os_read_file(req->io_fd, &c, sizeof(c));
174 break; 174 break;
175 default: 175 default:
176 printk("do_not_aio - bad request type : %d\n", req->type); 176 printk("do_not_aio - bad request type : %d\n", req->type);
177 err = -EINVAL; 177 err = -EINVAL;
178 break; 178 break;
179 } 179 }
180 180
181 out: 181out:
182 return err; 182 return err;
183} 183}
184 184
185static int not_aio_thread(void *arg) 185static int not_aio_thread(void *arg)
186{ 186{
187 struct aio_thread_req req; 187 struct aio_thread_req req;
188 struct aio_thread_reply reply; 188 struct aio_thread_reply reply;
189 int err; 189 int err;
190 190
191 signal(SIGWINCH, SIG_IGN); 191 signal(SIGWINCH, SIG_IGN);
192 while(1){ 192 while(1){
193 err = os_read_file(aio_req_fd_r, &req, sizeof(req)); 193 err = os_read_file(aio_req_fd_r, &req, sizeof(req));
194 if(err != sizeof(req)){ 194 if(err != sizeof(req)){
195 if(err < 0) 195 if(err < 0)
196 printk("not_aio_thread - read failed, " 196 printk("not_aio_thread - read failed, "
197 "fd = %d, err = %d\n", aio_req_fd_r, 197 "fd = %d, err = %d\n", aio_req_fd_r,
198 -err); 198 -err);
199 else { 199 else {
200 printk("not_aio_thread - short read, fd = %d, " 200 printk("not_aio_thread - short read, fd = %d, "
201 "length = %d\n", aio_req_fd_r, err); 201 "length = %d\n", aio_req_fd_r, err);
202 } 202 }
203 continue; 203 continue;
204 } 204 }
205 err = do_not_aio(&req); 205 err = do_not_aio(&req);
206 reply = ((struct aio_thread_reply) { .data = req.aio, 206 reply = ((struct aio_thread_reply) { .data = req.aio,
207 .err = err }); 207 .err = err });
208 err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply)); 208 err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply));
209 if(err != sizeof(reply)) 209 if(err != sizeof(reply))
210 printk("not_aio_thread - write failed, fd = %d, " 210 printk("not_aio_thread - write failed, fd = %d, "
211 "err = %d\n", aio_req_fd_r, -err); 211 "err = %d\n", aio_req_fd_r, -err);
212 } 212 }
213
214 return 0;
213} 215}
214 216
215static int aio_pid = -1; 217static int aio_pid = -1;
216 218
217static int init_aio_24(void) 219static int init_aio_24(void)
218{ 220{
219 unsigned long stack; 221 unsigned long stack;
220 int fds[2], err; 222 int fds[2], err;
221 223
222 err = os_pipe(fds, 1, 1); 224 err = os_pipe(fds, 1, 1);
223 if(err) 225 if(err)
224 goto out; 226 goto out;
225 227
226 aio_req_fd_w = fds[0]; 228 aio_req_fd_w = fds[0];
227 aio_req_fd_r = fds[1]; 229 aio_req_fd_r = fds[1];
228 err = run_helper_thread(not_aio_thread, NULL, 230 err = run_helper_thread(not_aio_thread, NULL,
229 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); 231 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
230 if(err < 0) 232 if(err < 0)
231 goto out_close_pipe; 233 goto out_close_pipe;
232 234
233 aio_pid = err; 235 aio_pid = err;
234 goto out; 236 goto out;
235 237
236 out_close_pipe: 238out_close_pipe:
237 os_close_file(fds[0]); 239 os_close_file(fds[0]);
238 os_close_file(fds[1]); 240 os_close_file(fds[1]);
239 aio_req_fd_w = -1; 241 aio_req_fd_w = -1;
240 aio_req_fd_r = -1; 242 aio_req_fd_r = -1;
241 out: 243out:
242#ifndef HAVE_AIO_ABI 244#ifndef HAVE_AIO_ABI
243 printk("/usr/include/linux/aio_abi.h not present during build\n"); 245 printk("/usr/include/linux/aio_abi.h not present during build\n");
244#endif 246#endif
245 printk("2.6 host AIO support not used - falling back to I/O " 247 printk("2.6 host AIO support not used - falling back to I/O "
246 "thread\n"); 248 "thread\n");
247 return 0; 249 return 0;
248} 250}
249 251
250#ifdef HAVE_AIO_ABI 252#ifdef HAVE_AIO_ABI
251#define DEFAULT_24_AIO 0 253#define DEFAULT_24_AIO 0
252static int init_aio_26(void) 254static int init_aio_26(void)
253{ 255{
254 unsigned long stack; 256 unsigned long stack;
255 int err; 257 int err;
256 258
257 if(io_setup(256, &ctx)){ 259 if(io_setup(256, &ctx)){
258 err = -errno; 260 err = -errno;
259 printk("aio_thread failed to initialize context, err = %d\n", 261 printk("aio_thread failed to initialize context, err = %d\n",
260 errno); 262 errno);
261 return err; 263 return err;
262 } 264 }
263 265
264 err = run_helper_thread(aio_thread, NULL, 266 err = run_helper_thread(aio_thread, NULL,
265 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); 267 CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
266 if(err < 0) 268 if(err < 0)
267 return err; 269 return err;
268 270
269 aio_pid = err; 271 aio_pid = err;
270 272
271 printk("Using 2.6 host AIO\n"); 273 printk("Using 2.6 host AIO\n");
272 return 0; 274 return 0;
273} 275}
274 276
275static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, 277static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
276 unsigned long long offset, struct aio_context *aio) 278 unsigned long long offset, struct aio_context *aio)
277{ 279{
278 struct aio_thread_reply reply; 280 struct aio_thread_reply reply;
279 int err; 281 int err;
280 282
281 err = do_aio(ctx, type, io_fd, buf, len, offset, aio); 283 err = do_aio(ctx, type, io_fd, buf, len, offset, aio);
282 if(err){ 284 if(err){
283 reply = ((struct aio_thread_reply) { .data = aio, 285 reply = ((struct aio_thread_reply) { .data = aio,
284 .err = err }); 286 .err = err });
285 err = os_write_file(aio->reply_fd, &reply, sizeof(reply)); 287 err = os_write_file(aio->reply_fd, &reply, sizeof(reply));
286 if(err != sizeof(reply)) 288 if(err != sizeof(reply))
287 printk("submit_aio_26 - write failed, " 289 printk("submit_aio_26 - write failed, "
288 "fd = %d, err = %d\n", aio->reply_fd, -err); 290 "fd = %d, err = %d\n", aio->reply_fd, -err);
289 else err = 0; 291 else err = 0;
290 } 292 }
291 293
292 return err; 294 return err;
293} 295}
294 296
295#else 297#else
296#define DEFAULT_24_AIO 1 298#define DEFAULT_24_AIO 1
297static int init_aio_26(void) 299static int init_aio_26(void)
298{ 300{
299 return -ENOSYS; 301 return -ENOSYS;
300} 302}
301 303
302static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, 304static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
303 unsigned long long offset, struct aio_context *aio) 305 unsigned long long offset, struct aio_context *aio)
304{ 306{
305 return -ENOSYS; 307 return -ENOSYS;
306} 308}
307#endif 309#endif
308 310
@@ -310,8 +312,8 @@ static int aio_24 = DEFAULT_24_AIO;
310 312
311static int __init set_aio_24(char *name, int *add) 313static int __init set_aio_24(char *name, int *add)
312{ 314{
313 aio_24 = 1; 315 aio_24 = 1;
314 return 0; 316 return 0;
315} 317}
316 318
317__uml_setup("aio=2.4", set_aio_24, 319__uml_setup("aio=2.4", set_aio_24,
@@ -328,28 +330,27 @@ __uml_setup("aio=2.4", set_aio_24,
328 330
329static int init_aio(void) 331static int init_aio(void)
330{ 332{
331 int err; 333 int err;
332 334
333 CHOOSE_MODE(({ 335 CHOOSE_MODE(({ if(!aio_24){
334 if(!aio_24){ 336 printk("Disabling 2.6 AIO in tt mode\n");
335 printk("Disabling 2.6 AIO in tt mode\n"); 337 aio_24 = 1;
336 aio_24 = 1; 338 } }), (void) 0);
337 } }), (void) 0); 339
338 340 if(!aio_24){
339 if(!aio_24){ 341 err = init_aio_26();
340 err = init_aio_26(); 342 if(err && (errno == ENOSYS)){
341 if(err && (errno == ENOSYS)){ 343 printk("2.6 AIO not supported on the host - "
342 printk("2.6 AIO not supported on the host - " 344 "reverting to 2.4 AIO\n");
343 "reverting to 2.4 AIO\n"); 345 aio_24 = 1;
344 aio_24 = 1; 346 }
345 } 347 else return err;
346 else return err; 348 }
347 } 349
348 350 if(aio_24)
349 if(aio_24) 351 return init_aio_24();
350 return init_aio_24(); 352
351 353 return 0;
352 return 0;
353} 354}
354 355
355/* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio 356/* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
@@ -362,8 +363,8 @@ __initcall(init_aio);
362 363
363static void exit_aio(void) 364static void exit_aio(void)
364{ 365{
365 if(aio_pid != -1) 366 if(aio_pid != -1)
366 os_kill_process(aio_pid, 1); 367 os_kill_process(aio_pid, 1);
367} 368}
368 369
369__uml_exitcall(exit_aio); 370__uml_exitcall(exit_aio);
@@ -371,30 +372,30 @@ __uml_exitcall(exit_aio);
371static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len, 372static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len,
372 unsigned long long offset, struct aio_context *aio) 373 unsigned long long offset, struct aio_context *aio)
373{ 374{
374 struct aio_thread_req req = { .type = type, 375 struct aio_thread_req req = { .type = type,
375 .io_fd = io_fd, 376 .io_fd = io_fd,
376 .offset = offset, 377 .offset = offset,
377 .buf = buf, 378 .buf = buf,
378 .len = len, 379 .len = len,
379 .aio = aio, 380 .aio = aio,
380 }; 381 };
381 int err; 382 int err;
382 383
383 err = os_write_file(aio_req_fd_w, &req, sizeof(req)); 384 err = os_write_file(aio_req_fd_w, &req, sizeof(req));
384 if(err == sizeof(req)) 385 if(err == sizeof(req))
385 err = 0; 386 err = 0;
386 387
387 return err; 388 return err;
388} 389}
389 390
390int submit_aio(enum aio_type type, int io_fd, char *buf, int len, 391int submit_aio(enum aio_type type, int io_fd, char *buf, int len,
391 unsigned long long offset, int reply_fd, 392 unsigned long long offset, int reply_fd,
392 struct aio_context *aio) 393 struct aio_context *aio)
393{ 394{
394 aio->reply_fd = reply_fd; 395 aio->reply_fd = reply_fd;
395 if(aio_24) 396 if(aio_24)
396 return submit_aio_24(type, io_fd, buf, len, offset, aio); 397 return submit_aio_24(type, io_fd, buf, len, offset, aio);
397 else { 398 else {
398 return submit_aio_26(type, io_fd, buf, len, offset, aio); 399 return submit_aio_26(type, io_fd, buf, len, offset, aio);
399 } 400 }
400} 401}
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
new file mode 100644
index 000000000000..ecf107ae5ac8
--- /dev/null
+++ b/arch/um/os-Linux/umid.c
@@ -0,0 +1,335 @@
1#include <stdio.h>
2#include <unistd.h>
3#include <stdlib.h>
4#include <string.h>
5#include <errno.h>
6#include <signal.h>
7#include <dirent.h>
8#include <sys/fcntl.h>
9#include <sys/stat.h>
10#include <sys/param.h>
11#include "init.h"
12#include "os.h"
13#include "user.h"
14#include "mode.h"
15
16#define UML_DIR "~/.uml/"
17
18#define UMID_LEN 64
19
20/* Changed by set_umid, which is run early in boot */
21char umid[UMID_LEN] = { 0 };
22
23/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
24static char *uml_dir = UML_DIR;
25
26static int __init make_uml_dir(void)
27{
28 char dir[512] = { '\0' };
29 int len, err;
30
31 if(*uml_dir == '~'){
32 char *home = getenv("HOME");
33
34 err = -ENOENT;
35 if(home == NULL){
36 printk("make_uml_dir : no value in environment for "
37 "$HOME\n");
38 goto err;
39 }
40 strlcpy(dir, home, sizeof(dir));
41 uml_dir++;
42 }
43 strlcat(dir, uml_dir, sizeof(dir));
44 len = strlen(dir);
45 if (len > 0 && dir[len - 1] != '/')
46 strlcat(dir, "/", sizeof(dir));
47
48 err = -ENOMEM;
49 uml_dir = malloc(strlen(dir) + 1);
50 if (uml_dir == NULL) {
51 printf("make_uml_dir : malloc failed, errno = %d\n", errno);
52 goto err;
53 }
54 strcpy(uml_dir, dir);
55
56 if((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)){
57 printf("Failed to mkdir '%s': %s\n", uml_dir, strerror(errno));
58 err = -errno;
59 goto err_free;
60 }
61 return 0;
62
63err_free:
64 free(uml_dir);
65err:
66 uml_dir = NULL;
67 return err;
68}
69
70static int actually_do_remove(char *dir)
71{
72 DIR *directory;
73 struct dirent *ent;
74 int len;
75 char file[256];
76
77 directory = opendir(dir);
78 if(directory == NULL)
79 return -errno;
80
81 while((ent = readdir(directory)) != NULL){
82 if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
83 continue;
84 len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1;
85 if(len > sizeof(file))
86 return -E2BIG;
87
88 sprintf(file, "%s/%s", dir, ent->d_name);
89 if(unlink(file) < 0)
90 return -errno;
91 }
92 if(rmdir(dir) < 0)
93 return -errno;
94
95 return 0;
96}
97
98/* This says that there isn't already a user of the specified directory even if
99 * there are errors during the checking. This is because if these errors
100 * happen, the directory is unusable by the pre-existing UML, so we might as
101 * well take it over. This could happen either by
102 * the existing UML somehow corrupting its umid directory
103 * something other than UML sticking stuff in the directory
104 * this boot racing with a shutdown of the other UML
105 * In any of these cases, the directory isn't useful for anything else.
106 */
107
108static int not_dead_yet(char *dir)
109{
110 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
111 char pid[sizeof("nnnnn\0")], *end;
112 int dead, fd, p, n, err;
113
114 n = snprintf(file, sizeof(file), "%s/pid", dir);
115 if(n >= sizeof(file)){
116 printk("not_dead_yet - pid filename too long\n");
117 err = -E2BIG;
118 goto out;
119 }
120
121 dead = 0;
122 fd = open(file, O_RDONLY);
123 if(fd < 0){
124 if(fd != -ENOENT){
125 printk("not_dead_yet : couldn't open pid file '%s', "
126 "err = %d\n", file, -fd);
127 }
128 goto out;
129 }
130
131 err = 0;
132 n = read(fd, pid, sizeof(pid));
133 if(n <= 0){
134 printk("not_dead_yet : couldn't read pid file '%s', "
135 "err = %d\n", file, -n);
136 goto out_close;
137 }
138
139 p = strtoul(pid, &end, 0);
140 if(end == pid){
141 printk("not_dead_yet : couldn't parse pid file '%s', "
142 "errno = %d\n", file, errno);
143 goto out_close;
144 }
145
146 if((kill(p, 0) == 0) || (errno != ESRCH))
147 return 1;
148
149 err = actually_do_remove(dir);
150 if(err)
151 printk("not_dead_yet - actually_do_remove failed with "
152 "err = %d\n", err);
153
154 return err;
155
156 out_close:
157 close(fd);
158 out:
159 return 0;
160}
161
162static void __init create_pid_file(void)
163{
164 char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
165 char pid[sizeof("nnnnn\0")];
166 int fd, n;
167
168 if(umid_file_name("pid", file, sizeof(file)))
169 return;
170
171 fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644);
172 if(fd < 0){
173 printk("Open of machine pid file \"%s\" failed: %s\n",
174 file, strerror(-fd));
175 return;
176 }
177
178 snprintf(pid, sizeof(pid), "%d\n", getpid());
179 n = write(fd, pid, strlen(pid));
180 if(n != strlen(pid))
181 printk("Write of pid file failed - err = %d\n", -n);
182
183 close(fd);
184}
185
186int __init set_umid(char *name)
187{
188 if(strlen(name) > UMID_LEN - 1)
189 return -E2BIG;
190
191 strlcpy(umid, name, sizeof(umid));
192
193 return 0;
194}
195
196static int umid_setup = 0;
197
198int __init make_umid(void)
199{
200 int fd, err;
201 char tmp[256];
202
203 if(umid_setup)
204 return 0;
205
206 make_uml_dir();
207
208 if(*umid == '\0'){
209 strlcpy(tmp, uml_dir, sizeof(tmp));
210 strlcat(tmp, "XXXXXX", sizeof(tmp));
211 fd = mkstemp(tmp);
212 if(fd < 0){
213 printk("make_umid - mkstemp(%s) failed: %s\n",
214 tmp, strerror(errno));
215 err = -errno;
216 goto err;
217 }
218
219 close(fd);
220
221 set_umid(&tmp[strlen(uml_dir)]);
222
223 /* There's a nice tiny little race between this unlink and
224 * the mkdir below. It'd be nice if there were a mkstemp
225 * for directories.
226 */
227 if(unlink(tmp)){
228 err = -errno;
229 goto err;
230 }
231 }
232
233 snprintf(tmp, sizeof(tmp), "%s%s", uml_dir, umid);
234 err = mkdir(tmp, 0777);
235 if(err < 0){
236 err = -errno;
237 if(errno != EEXIST)
238 goto err;
239
240 if(not_dead_yet(tmp) < 0)
241 goto err;
242
243 err = mkdir(tmp, 0777);
244 }
245 if(err < 0){
246 printk("Failed to create '%s' - err = %d\n", umid, err);
247 goto err_rmdir;
248 }
249
250 umid_setup = 1;
251
252 create_pid_file();
253
254 return 0;
255
256 err_rmdir:
257 rmdir(tmp);
258 err:
259 return err;
260}
261
262static int __init make_umid_init(void)
263{
264 make_umid();
265
266 return 0;
267}
268
269__initcall(make_umid_init);
270
271int __init umid_file_name(char *name, char *buf, int len)
272{
273 int n, err;
274
275 err = make_umid();
276 if(err)
277 return err;
278
279 n = snprintf(buf, len, "%s%s/%s", uml_dir, umid, name);
280 if(n >= len){
281 printk("umid_file_name : buffer too short\n");
282 return -E2BIG;
283 }
284
285 return 0;
286}
287
288char *get_umid(void)
289{
290 return umid;
291}
292
293static int __init set_uml_dir(char *name, int *add)
294{
295 if(*name == '\0'){
296 printf("uml_dir can't be an empty string\n");
297 return 0;
298 }
299
300 if(name[strlen(name) - 1] == '/'){
301 uml_dir = name;
302 return 0;
303 }
304
305 uml_dir = malloc(strlen(name) + 2);
306 if(uml_dir == NULL){
307 printf("Failed to malloc uml_dir - error = %d\n", errno);
308
309 /* Return 0 here because do_initcalls doesn't look at
310 * the return value.
311 */
312 return 0;
313 }
314 sprintf(uml_dir, "%s/", name);
315
316 return 0;
317}
318
319__uml_setup("uml_dir=", set_uml_dir,
320"uml_dir=<directory>\n"
321" The location to place the pid and umid files.\n\n"
322);
323
324static void remove_umid_dir(void)
325{
326 char dir[strlen(uml_dir) + UMID_LEN + 1], err;
327
328 sprintf(dir, "%s%s", uml_dir, umid);
329 err = actually_do_remove(dir);
330 if(err)
331 printf("remove_umid_dir - actually_do_remove failed with "
332 "err = %d\n", err);
333}
334
335__uml_exitcall(remove_umid_dir);
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index e2c6e64a85ec..fcb06a50fdd2 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -9,6 +9,16 @@ config INIT_DEBUG
9 Fill __init and __initdata at the end of boot. This helps debugging 9 Fill __init and __initdata at the end of boot. This helps debugging
10 illegal uses of __init and __initdata after initialization. 10 illegal uses of __init and __initdata after initialization.
11 11
12config DEBUG_RODATA
13 bool "Write protect kernel read-only data structures"
14 depends on DEBUG_KERNEL
15 help
16 Mark the kernel read-only data as write-protected in the pagetables,
17 in order to catch accidental (and incorrect) writes to such const data.
18 This option may have a slight performance impact because a portion
19 of the kernel code won't be covered by a 2MB TLB anymore.
20 If in doubt, say "N".
21
12config IOMMU_DEBUG 22config IOMMU_DEBUG
13 depends on GART_IOMMU && DEBUG_KERNEL 23 depends on GART_IOMMU && DEBUG_KERNEL
14 bool "Enable IOMMU debugging" 24 bool "Enable IOMMU debugging"
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index e0eb0c712fe9..df0773c9bdbe 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -341,7 +341,7 @@ ENTRY(ia32_ptregs_common)
341 jmp ia32_sysret /* misbalances the return cache */ 341 jmp ia32_sysret /* misbalances the return cache */
342 CFI_ENDPROC 342 CFI_ENDPROC
343 343
344 .data 344 .section .rodata,"a"
345 .align 8 345 .align 8
346 .globl ia32_sys_call_table 346 .globl ia32_sys_call_table
347ia32_sys_call_table: 347ia32_sys_call_table:
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7519fc520eb3..3060ed97b755 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
157DECLARE_PER_CPU(int, cpu_state); 157DECLARE_PER_CPU(int, cpu_state);
158 158
159#include <asm/nmi.h> 159#include <asm/nmi.h>
160/* We don't actually take CPU down, just spin without interrupts. */ 160/* We halt the CPU with physical CPU hotplug */
161static inline void play_dead(void) 161static inline void play_dead(void)
162{ 162{
163 idle_task_exit(); 163 idle_task_exit();
@@ -166,8 +166,9 @@ static inline void play_dead(void)
166 /* Ack it */ 166 /* Ack it */
167 __get_cpu_var(cpu_state) = CPU_DEAD; 167 __get_cpu_var(cpu_state) = CPU_DEAD;
168 168
169 local_irq_disable();
169 while (1) 170 while (1)
170 safe_halt(); 171 halt();
171} 172}
172#else 173#else
173static inline void play_dead(void) 174static inline void play_dead(void)
diff --git a/arch/x86_64/kernel/syscall.c b/arch/x86_64/kernel/syscall.c
index e263685f864c..7c176b3edde0 100644
--- a/arch/x86_64/kernel/syscall.c
+++ b/arch/x86_64/kernel/syscall.c
@@ -19,7 +19,7 @@ typedef void (*sys_call_ptr_t)(void);
19 19
20extern void sys_ni_syscall(void); 20extern void sys_ni_syscall(void);
21 21
22sys_call_ptr_t sys_call_table[__NR_syscall_max+1] __cacheline_aligned = { 22const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
23 /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ 23 /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */
24 [0 ... __NR_syscall_max] = &sys_ni_syscall, 24 [0 ... __NR_syscall_max] = &sys_ni_syscall,
25#include <asm-x86_64/unistd.h> 25#include <asm-x86_64/unistd.h>
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index c016dfe84784..1faae5fc1c01 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -498,6 +498,29 @@ void free_initmem(void)
498 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); 498 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
499} 499}
500 500
501#ifdef CONFIG_DEBUG_RODATA
502
503extern char __start_rodata, __end_rodata;
504void mark_rodata_ro(void)
505{
506 unsigned long addr = (unsigned long)&__start_rodata;
507
508 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
509 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
510
511 printk ("Write protecting the kernel read-only data: %luk\n",
512 (&__end_rodata - &__start_rodata) >> 10);
513
514 /*
515 * change_page_attr_addr() requires a global_flush_tlb() call after it.
516 * We do this after the printk so that if something went wrong in the
517 * change, the printk gets out at least to give a better debug hint
518 * of who is the culprit.
519 */
520 global_flush_tlb();
521}
522#endif
523
501#ifdef CONFIG_BLK_DEV_INITRD 524#ifdef CONFIG_BLK_DEV_INITRD
502void free_initrd_mem(unsigned long start, unsigned long end) 525void free_initrd_mem(unsigned long start, unsigned long end)
503{ 526{
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index b90e8fe9eeb0..35f1f1aab063 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
128 pte_t *kpte; 128 pte_t *kpte;
129 struct page *kpte_page; 129 struct page *kpte_page;
130 unsigned kpte_flags; 130 unsigned kpte_flags;
131 pgprot_t ref_prot2;
131 kpte = lookup_address(address); 132 kpte = lookup_address(address);
132 if (!kpte) return 0; 133 if (!kpte) return 0;
133 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); 134 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
140 * split_large_page will take the reference for this change_page_attr 141 * split_large_page will take the reference for this change_page_attr
141 * on the split page. 142 * on the split page.
142 */ 143 */
143 struct page *split = split_large_page(address, prot, ref_prot); 144
145 struct page *split;
146 ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
147
148 split = split_large_page(address, prot, ref_prot2);
144 if (!split) 149 if (!split)
145 return -ENOMEM; 150 return -ENOMEM;
146 set_pte(kpte,mk_pte(split, ref_prot)); 151 set_pte(kpte,mk_pte(split, ref_prot2));
147 kpte_page = split; 152 kpte_page = split;
148 } 153 }
149 get_page(kpte_page); 154 get_page(kpte_page);
diff --git a/block/Kconfig b/block/Kconfig
index eb48edb80c1d..377f6dd20e17 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -5,7 +5,7 @@
5#for instance. 5#for instance.
6config LBD 6config LBD
7 bool "Support for Large Block Devices" 7 bool "Support for Large Block Devices"
8 depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML 8 depends on X86 || (MIPS && 32BIT) || PPC32 || (S390 && !64BIT) || SUPERH || UML
9 help 9 help
10 Say Y here if you want to attach large (bigger than 2TB) discs to 10 Say Y here if you want to attach large (bigger than 2TB) discs to
11 your machine, or if you want to have a raid or loopback device 11 your machine, or if you want to have a raid or loopback device
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 89299f4ffe12..52e1d4108a99 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -40,10 +40,11 @@ config CRYPTO_SHA1
40 help 40 help
41 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 41 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
42 42
43config CRYPTO_SHA1_Z990 43config CRYPTO_SHA1_S390
44 tristate "SHA1 digest algorithm for IBM zSeries z990" 44 tristate "SHA1 digest algorithm (s390)"
45 depends on CRYPTO && ARCH_S390 45 depends on CRYPTO && S390
46 help 46 help
47 This is the s390 hardware accelerated implementation of the
47 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). 48 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
48 49
49config CRYPTO_SHA256 50config CRYPTO_SHA256
@@ -55,6 +56,16 @@ config CRYPTO_SHA256
55 This version of SHA implements a 256 bit hash with 128 bits of 56 This version of SHA implements a 256 bit hash with 128 bits of
56 security against collision attacks. 57 security against collision attacks.
57 58
59config CRYPTO_SHA256_S390
60 tristate "SHA256 digest algorithm (s390)"
61 depends on CRYPTO && S390
62 help
63 This is the s390 hardware accelerated implementation of the
64 SHA256 secure hash standard (DFIPS 180-2).
65
66 This version of SHA implements a 256 bit hash with 128 bits of
67 security against collision attacks.
68
58config CRYPTO_SHA512 69config CRYPTO_SHA512
59 tristate "SHA384 and SHA512 digest algorithms" 70 tristate "SHA384 and SHA512 digest algorithms"
60 depends on CRYPTO 71 depends on CRYPTO
@@ -98,9 +109,9 @@ config CRYPTO_DES
98 help 109 help
99 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 110 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
100 111
101config CRYPTO_DES_Z990 112config CRYPTO_DES_S390
102 tristate "DES and Triple DES cipher algorithms for IBM zSeries z990" 113 tristate "DES and Triple DES cipher algorithms (s390)"
103 depends on CRYPTO && ARCH_S390 114 depends on CRYPTO && S390
104 help 115 help
105 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). 116 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
106 117
@@ -204,6 +215,26 @@ config CRYPTO_AES_X86_64
204 215
205 See <http://csrc.nist.gov/encryption/aes/> for more information. 216 See <http://csrc.nist.gov/encryption/aes/> for more information.
206 217
218config CRYPTO_AES_S390
219 tristate "AES cipher algorithms (s390)"
220 depends on CRYPTO && S390
221 help
222 This is the s390 hardware accelerated implementation of the
223 AES cipher algorithms (FIPS-197). AES uses the Rijndael
224 algorithm.
225
226 Rijndael appears to be consistently a very good performer in
227 both hardware and software across a wide range of computing
228 environments regardless of its use in feedback or non-feedback
229 modes. Its key setup time is excellent, and its key agility is
230 good. Rijndael's very low memory requirements make it very well
231 suited for restricted-space environments, in which it also
232 demonstrates excellent performance. Rijndael's operations are
233 among the easiest to defend against power and timing attacks.
234
235 On s390 the System z9-109 currently only supports the key size
236 of 128 bit.
237
207config CRYPTO_CAST5 238config CRYPTO_CAST5
208 tristate "CAST5 (CAST-128) cipher algorithm" 239 tristate "CAST5 (CAST-128) cipher algorithm"
209 depends on CRYPTO 240 depends on CRYPTO
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 53f4ee804bdb..49e344f00806 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -805,6 +805,8 @@ static void do_test(void)
805 //AES 805 //AES
806 test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); 806 test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS);
807 test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); 807 test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS);
808 test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS);
809 test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS);
808 810
809 //CAST5 811 //CAST5
810 test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); 812 test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS);
@@ -910,6 +912,8 @@ static void do_test(void)
910 case 10: 912 case 10:
911 test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); 913 test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS);
912 test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); 914 test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS);
915 test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS);
916 test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS);
913 break; 917 break;
914 918
915 case 11: 919 case 11:
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 522ffd4b6f43..733d07ed75e9 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -1836,6 +1836,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
1836 */ 1836 */
1837#define AES_ENC_TEST_VECTORS 3 1837#define AES_ENC_TEST_VECTORS 3
1838#define AES_DEC_TEST_VECTORS 3 1838#define AES_DEC_TEST_VECTORS 3
1839#define AES_CBC_ENC_TEST_VECTORS 2
1840#define AES_CBC_DEC_TEST_VECTORS 2
1839 1841
1840static struct cipher_testvec aes_enc_tv_template[] = { 1842static struct cipher_testvec aes_enc_tv_template[] = {
1841 { /* From FIPS-197 */ 1843 { /* From FIPS-197 */
@@ -1911,6 +1913,68 @@ static struct cipher_testvec aes_dec_tv_template[] = {
1911 }, 1913 },
1912}; 1914};
1913 1915
1916static struct cipher_testvec aes_cbc_enc_tv_template[] = {
1917 { /* From RFC 3602 */
1918 .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
1919 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
1920 .klen = 16,
1921 .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
1922 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
1923 .input = { "Single block msg" },
1924 .ilen = 16,
1925 .result = { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8,
1926 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a },
1927 .rlen = 16,
1928 }, {
1929 .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
1930 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
1931 .klen = 16,
1932 .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
1933 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
1934 .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1935 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
1936 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
1937 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
1938 .ilen = 32,
1939 .result = { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a,
1940 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a,
1941 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
1942 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
1943 .rlen = 32,
1944 },
1945};
1946
1947static struct cipher_testvec aes_cbc_dec_tv_template[] = {
1948 { /* From RFC 3602 */
1949 .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
1950 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
1951 .klen = 16,
1952 .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
1953 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
1954 .input = { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8,
1955 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a },
1956 .ilen = 16,
1957 .result = { "Single block msg" },
1958 .rlen = 16,
1959 }, {
1960 .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
1961 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
1962 .klen = 16,
1963 .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
1964 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
1965 .input = { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a,
1966 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a,
1967 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
1968 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
1969 .ilen = 32,
1970 .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
1971 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
1972 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
1973 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
1974 .rlen = 32,
1975 },
1976};
1977
1914/* Cast5 test vectors from RFC 2144 */ 1978/* Cast5 test vectors from RFC 2144 */
1915#define CAST5_ENC_TEST_VECTORS 3 1979#define CAST5_ENC_TEST_VECTORS 3
1916#define CAST5_DEC_TEST_VECTORS 3 1980#define CAST5_DEC_TEST_VECTORS 3
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7e1d077874df..58801d718cc2 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -49,12 +49,12 @@ static struct kset_uevent_ops memory_uevent_ops = {
49 49
50static struct notifier_block *memory_chain; 50static struct notifier_block *memory_chain;
51 51
52static int register_memory_notifier(struct notifier_block *nb) 52int register_memory_notifier(struct notifier_block *nb)
53{ 53{
54 return notifier_chain_register(&memory_chain, nb); 54 return notifier_chain_register(&memory_chain, nb);
55} 55}
56 56
57static void unregister_memory_notifier(struct notifier_block *nb) 57void unregister_memory_notifier(struct notifier_block *nb)
58{ 58{
59 notifier_chain_unregister(&memory_chain, nb); 59 notifier_chain_unregister(&memory_chain, nb);
60} 60}
@@ -62,8 +62,7 @@ static void unregister_memory_notifier(struct notifier_block *nb)
62/* 62/*
63 * register_memory - Setup a sysfs device for a memory block 63 * register_memory - Setup a sysfs device for a memory block
64 */ 64 */
65static int 65int register_memory(struct memory_block *memory, struct mem_section *section,
66register_memory(struct memory_block *memory, struct mem_section *section,
67 struct node *root) 66 struct node *root)
68{ 67{
69 int error; 68 int error;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index c4b9d2adfc08..139cbba76180 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -117,7 +117,7 @@ config BLK_DEV_XD
117 117
118config PARIDE 118config PARIDE
119 tristate "Parallel port IDE device support" 119 tristate "Parallel port IDE device support"
120 depends on PARPORT 120 depends on PARPORT_PC
121 ---help--- 121 ---help---
122 There are many external CD-ROM and disk devices that connect through 122 There are many external CD-ROM and disk devices that connect through
123 your computer's parallel port. Most of them are actually IDE devices 123 your computer's parallel port. Most of them are actually IDE devices
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 485345c8e632..33d6f237b2ed 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -54,11 +54,15 @@
54#include <linux/errno.h> 54#include <linux/errno.h>
55#include <linux/file.h> 55#include <linux/file.h>
56#include <linux/ioctl.h> 56#include <linux/ioctl.h>
57#include <linux/compiler.h>
58#include <linux/err.h>
59#include <linux/kernel.h>
57#include <net/sock.h> 60#include <net/sock.h>
58 61
59#include <linux/devfs_fs_kernel.h> 62#include <linux/devfs_fs_kernel.h>
60 63
61#include <asm/uaccess.h> 64#include <asm/uaccess.h>
65#include <asm/system.h>
62#include <asm/types.h> 66#include <asm/types.h>
63 67
64#include <linux/nbd.h> 68#include <linux/nbd.h>
@@ -230,14 +234,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
230 request.len = htonl(size); 234 request.len = htonl(size);
231 memcpy(request.handle, &req, sizeof(req)); 235 memcpy(request.handle, &req, sizeof(req));
232 236
233 down(&lo->tx_lock);
234
235 if (!sock || !lo->sock) {
236 printk(KERN_ERR "%s: Attempted send on closed socket\n",
237 lo->disk->disk_name);
238 goto error_out;
239 }
240
241 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 237 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
242 lo->disk->disk_name, req, 238 lo->disk->disk_name, req,
243 nbdcmd_to_ascii(nbd_cmd(req)), 239 nbdcmd_to_ascii(nbd_cmd(req)),
@@ -276,11 +272,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
276 } 272 }
277 } 273 }
278 } 274 }
279 up(&lo->tx_lock);
280 return 0; 275 return 0;
281 276
282error_out: 277error_out:
283 up(&lo->tx_lock);
284 return 1; 278 return 1;
285} 279}
286 280
@@ -289,9 +283,14 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
289 struct request *req; 283 struct request *req;
290 struct list_head *tmp; 284 struct list_head *tmp;
291 struct request *xreq; 285 struct request *xreq;
286 int err;
292 287
293 memcpy(&xreq, handle, sizeof(xreq)); 288 memcpy(&xreq, handle, sizeof(xreq));
294 289
290 err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
291 if (unlikely(err))
292 goto out;
293
295 spin_lock(&lo->queue_lock); 294 spin_lock(&lo->queue_lock);
296 list_for_each(tmp, &lo->queue_head) { 295 list_for_each(tmp, &lo->queue_head) {
297 req = list_entry(tmp, struct request, queuelist); 296 req = list_entry(tmp, struct request, queuelist);
@@ -302,7 +301,11 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
302 return req; 301 return req;
303 } 302 }
304 spin_unlock(&lo->queue_lock); 303 spin_unlock(&lo->queue_lock);
305 return NULL; 304
305 err = -ENOENT;
306
307out:
308 return ERR_PTR(err);
306} 309}
307 310
308static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec) 311static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec)
@@ -331,7 +334,11 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
331 goto harderror; 334 goto harderror;
332 } 335 }
333 req = nbd_find_request(lo, reply.handle); 336 req = nbd_find_request(lo, reply.handle);
334 if (req == NULL) { 337 if (unlikely(IS_ERR(req))) {
338 result = PTR_ERR(req);
339 if (result != -ENOENT)
340 goto harderror;
341
335 printk(KERN_ERR "%s: Unexpected reply (%p)\n", 342 printk(KERN_ERR "%s: Unexpected reply (%p)\n",
336 lo->disk->disk_name, reply.handle); 343 lo->disk->disk_name, reply.handle);
337 result = -EBADR; 344 result = -EBADR;
@@ -395,19 +402,24 @@ static void nbd_clear_que(struct nbd_device *lo)
395 402
396 BUG_ON(lo->magic != LO_MAGIC); 403 BUG_ON(lo->magic != LO_MAGIC);
397 404
398 do { 405 /*
399 req = NULL; 406 * Because we have set lo->sock to NULL under the tx_lock, all
400 spin_lock(&lo->queue_lock); 407 * modifications to the list must have completed by now. For
401 if (!list_empty(&lo->queue_head)) { 408 * the same reason, the active_req must be NULL.
402 req = list_entry(lo->queue_head.next, struct request, queuelist); 409 *
403 list_del_init(&req->queuelist); 410 * As a consequence, we don't need to take the spin lock while
404 } 411 * purging the list here.
405 spin_unlock(&lo->queue_lock); 412 */
406 if (req) { 413 BUG_ON(lo->sock);
407 req->errors++; 414 BUG_ON(lo->active_req);
408 nbd_end_request(req); 415
409 } 416 while (!list_empty(&lo->queue_head)) {
410 } while (req); 417 req = list_entry(lo->queue_head.next, struct request,
418 queuelist);
419 list_del_init(&req->queuelist);
420 req->errors++;
421 nbd_end_request(req);
422 }
411} 423}
412 424
413/* 425/*
@@ -435,11 +447,6 @@ static void do_nbd_request(request_queue_t * q)
435 447
436 BUG_ON(lo->magic != LO_MAGIC); 448 BUG_ON(lo->magic != LO_MAGIC);
437 449
438 if (!lo->file) {
439 printk(KERN_ERR "%s: Request when not-ready\n",
440 lo->disk->disk_name);
441 goto error_out;
442 }
443 nbd_cmd(req) = NBD_CMD_READ; 450 nbd_cmd(req) = NBD_CMD_READ;
444 if (rq_data_dir(req) == WRITE) { 451 if (rq_data_dir(req) == WRITE) {
445 nbd_cmd(req) = NBD_CMD_WRITE; 452 nbd_cmd(req) = NBD_CMD_WRITE;
@@ -453,32 +460,34 @@ static void do_nbd_request(request_queue_t * q)
453 req->errors = 0; 460 req->errors = 0;
454 spin_unlock_irq(q->queue_lock); 461 spin_unlock_irq(q->queue_lock);
455 462
456 spin_lock(&lo->queue_lock); 463 down(&lo->tx_lock);
457 464 if (unlikely(!lo->sock)) {
458 if (!lo->file) { 465 up(&lo->tx_lock);
459 spin_unlock(&lo->queue_lock); 466 printk(KERN_ERR "%s: Attempted send on closed socket\n",
460 printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n", 467 lo->disk->disk_name);
461 lo->disk->disk_name);
462 req->errors++; 468 req->errors++;
463 nbd_end_request(req); 469 nbd_end_request(req);
464 spin_lock_irq(q->queue_lock); 470 spin_lock_irq(q->queue_lock);
465 continue; 471 continue;
466 } 472 }
467 473
468 list_add(&req->queuelist, &lo->queue_head); 474 lo->active_req = req;
469 spin_unlock(&lo->queue_lock);
470 475
471 if (nbd_send_req(lo, req) != 0) { 476 if (nbd_send_req(lo, req) != 0) {
472 printk(KERN_ERR "%s: Request send failed\n", 477 printk(KERN_ERR "%s: Request send failed\n",
473 lo->disk->disk_name); 478 lo->disk->disk_name);
474 if (nbd_find_request(lo, (char *)&req) != NULL) { 479 req->errors++;
475 /* we still own req */ 480 nbd_end_request(req);
476 req->errors++; 481 } else {
477 nbd_end_request(req); 482 spin_lock(&lo->queue_lock);
478 } else /* we're racing with nbd_clear_que */ 483 list_add(&req->queuelist, &lo->queue_head);
479 printk(KERN_DEBUG "nbd: can't find req\n"); 484 spin_unlock(&lo->queue_lock);
480 } 485 }
481 486
487 lo->active_req = NULL;
488 up(&lo->tx_lock);
489 wake_up_all(&lo->active_wq);
490
482 spin_lock_irq(q->queue_lock); 491 spin_lock_irq(q->queue_lock);
483 continue; 492 continue;
484 493
@@ -529,17 +538,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
529 down(&lo->tx_lock); 538 down(&lo->tx_lock);
530 lo->sock = NULL; 539 lo->sock = NULL;
531 up(&lo->tx_lock); 540 up(&lo->tx_lock);
532 spin_lock(&lo->queue_lock);
533 file = lo->file; 541 file = lo->file;
534 lo->file = NULL; 542 lo->file = NULL;
535 spin_unlock(&lo->queue_lock);
536 nbd_clear_que(lo); 543 nbd_clear_que(lo);
537 spin_lock(&lo->queue_lock); 544 BUG_ON(!list_empty(&lo->queue_head));
538 if (!list_empty(&lo->queue_head)) {
539 printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n");
540 error = -EBUSY;
541 }
542 spin_unlock(&lo->queue_lock);
543 if (file) 545 if (file)
544 fput(file); 546 fput(file);
545 return error; 547 return error;
@@ -598,24 +600,19 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
598 lo->sock = NULL; 600 lo->sock = NULL;
599 } 601 }
600 up(&lo->tx_lock); 602 up(&lo->tx_lock);
601 spin_lock(&lo->queue_lock);
602 file = lo->file; 603 file = lo->file;
603 lo->file = NULL; 604 lo->file = NULL;
604 spin_unlock(&lo->queue_lock);
605 nbd_clear_que(lo); 605 nbd_clear_que(lo);
606 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); 606 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
607 if (file) 607 if (file)
608 fput(file); 608 fput(file);
609 return lo->harderror; 609 return lo->harderror;
610 case NBD_CLEAR_QUE: 610 case NBD_CLEAR_QUE:
611 down(&lo->tx_lock); 611 /*
612 if (lo->sock) { 612 * This is for compatibility only. The queue is always cleared
613 up(&lo->tx_lock); 613 * by NBD_DO_IT or NBD_CLEAR_SOCK.
614 return 0; /* probably should be error, but that would 614 */
615 * break "nbd-client -d", so just return 0 */ 615 BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
616 }
617 up(&lo->tx_lock);
618 nbd_clear_que(lo);
619 return 0; 616 return 0;
620 case NBD_PRINT_DEBUG: 617 case NBD_PRINT_DEBUG:
621 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", 618 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
@@ -688,6 +685,7 @@ static int __init nbd_init(void)
688 spin_lock_init(&nbd_dev[i].queue_lock); 685 spin_lock_init(&nbd_dev[i].queue_lock);
689 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 686 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
690 init_MUTEX(&nbd_dev[i].tx_lock); 687 init_MUTEX(&nbd_dev[i].tx_lock);
688 init_waitqueue_head(&nbd_dev[i].active_wq);
691 nbd_dev[i].blksize = 1024; 689 nbd_dev[i].blksize = 1024;
692 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ 690 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
693 disk->major = NBD_MAJOR; 691 disk->major = NBD_MAJOR;
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 17ff40561257..c0d2854dd097 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -4,11 +4,12 @@
4# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module, 4# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
5# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option 5# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option
6# controls the choices given to the user ... 6# controls the choices given to the user ...
7# PARIDE only supports PC style parports. Tough for USB or other parports...
7config PARIDE_PARPORT 8config PARIDE_PARPORT
8 tristate 9 tristate
9 depends on PARIDE!=n 10 depends on PARIDE!=n
10 default m if PARPORT=m 11 default m if PARPORT_PC=m
11 default y if PARPORT!=m 12 default y if PARPORT_PC!=m
12 13
13comment "Parallel IDE high-level drivers" 14comment "Parallel IDE high-level drivers"
14 depends on PARIDE 15 depends on PARIDE
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 84e68cdd451b..5ebd06b1b4ca 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -985,7 +985,7 @@ config HPET_MMAP
985 985
986config HANGCHECK_TIMER 986config HANGCHECK_TIMER
987 tristate "Hangcheck timer" 987 tristate "Hangcheck timer"
988 depends on X86 || IA64 || PPC64 || ARCH_S390 988 depends on X86 || IA64 || PPC64 || S390
989 help 989 help
990 The hangcheck-timer module detects when the system has gone 990 The hangcheck-timer module detects when the system has gone
991 out to lunch past a certain margin. It can reboot the system 991 out to lunch past a certain margin. It can reboot the system
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 66e53dd450ff..40a67c86420c 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -120,7 +120,7 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
120#if defined(CONFIG_X86) 120#if defined(CONFIG_X86)
121# define HAVE_MONOTONIC 121# define HAVE_MONOTONIC
122# define TIMER_FREQ 1000000000ULL 122# define TIMER_FREQ 1000000000ULL
123#elif defined(CONFIG_ARCH_S390) 123#elif defined(CONFIG_S390)
124/* FA240000 is 1 Second in the IBM time universe (Page 4-38 Principles of Op for zSeries */ 124/* FA240000 is 1 Second in the IBM time universe (Page 4-38 Principles of Op for zSeries */
125# define TIMER_FREQ 0xFA240000ULL 125# define TIMER_FREQ 0xFA240000ULL
126#elif defined(CONFIG_IA64) 126#elif defined(CONFIG_IA64)
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c
index 6f673d2de0b1..49769f59ea1b 100644
--- a/drivers/char/hw_random.c
+++ b/drivers/char/hw_random.c
@@ -1,4 +1,9 @@
1/* 1/*
2 Added support for the AMD Geode LX RNG
3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
4
5 derived from
6
2 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) 7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
3 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> 8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
4 9
@@ -95,6 +100,11 @@ static unsigned int via_data_present (void);
95static u32 via_data_read (void); 100static u32 via_data_read (void);
96#endif 101#endif
97 102
103static int __init geode_init(struct pci_dev *dev);
104static void geode_cleanup(void);
105static unsigned int geode_data_present (void);
106static u32 geode_data_read (void);
107
98struct rng_operations { 108struct rng_operations {
99 int (*init) (struct pci_dev *dev); 109 int (*init) (struct pci_dev *dev);
100 void (*cleanup) (void); 110 void (*cleanup) (void);
@@ -122,6 +132,7 @@ enum {
122 rng_hw_intel, 132 rng_hw_intel,
123 rng_hw_amd, 133 rng_hw_amd,
124 rng_hw_via, 134 rng_hw_via,
135 rng_hw_geode,
125}; 136};
126 137
127static struct rng_operations rng_vendor_ops[] = { 138static struct rng_operations rng_vendor_ops[] = {
@@ -139,6 +150,9 @@ static struct rng_operations rng_vendor_ops[] = {
139 /* rng_hw_via */ 150 /* rng_hw_via */
140 { via_init, via_cleanup, via_data_present, via_data_read, 1 }, 151 { via_init, via_cleanup, via_data_present, via_data_read, 1 },
141#endif 152#endif
153
154 /* rng_hw_geode */
155 { geode_init, geode_cleanup, geode_data_present, geode_data_read, 4 }
142}; 156};
143 157
144/* 158/*
@@ -159,6 +173,9 @@ static struct pci_device_id rng_pci_tbl[] = {
159 { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, 173 { 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
160 { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel }, 174 { 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
161 175
176 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_geode },
178
162 { 0, }, /* terminate list */ 179 { 0, }, /* terminate list */
163}; 180};
164MODULE_DEVICE_TABLE (pci, rng_pci_tbl); 181MODULE_DEVICE_TABLE (pci, rng_pci_tbl);
@@ -460,6 +477,57 @@ static void via_cleanup(void)
460} 477}
461#endif 478#endif
462 479
480/***********************************************************************
481 *
482 * AMD Geode RNG operations
483 *
484 */
485
486static void __iomem *geode_rng_base = NULL;
487
488#define GEODE_RNG_DATA_REG 0x50
489#define GEODE_RNG_STATUS_REG 0x54
490
491static u32 geode_data_read(void)
492{
493 u32 val;
494
495 assert(geode_rng_base != NULL);
496 val = readl(geode_rng_base + GEODE_RNG_DATA_REG);
497 return val;
498}
499
500static unsigned int geode_data_present(void)
501{
502 u32 val;
503
504 assert(geode_rng_base != NULL);
505 val = readl(geode_rng_base + GEODE_RNG_STATUS_REG);
506 return val;
507}
508
509static void geode_cleanup(void)
510{
511 iounmap(geode_rng_base);
512 geode_rng_base = NULL;
513}
514
515static int geode_init(struct pci_dev *dev)
516{
517 unsigned long rng_base = pci_resource_start(dev, 0);
518
519 if (rng_base == 0)
520 return 1;
521
522 geode_rng_base = ioremap(rng_base, 0x58);
523
524 if (geode_rng_base == NULL) {
525 printk(KERN_ERR PFX "Cannot ioremap RNG memory\n");
526 return -EBUSY;
527 }
528
529 return 0;
530}
463 531
464/*********************************************************************** 532/***********************************************************************
465 * 533 *
@@ -574,7 +642,7 @@ static int __init rng_init (void)
574 642
575 DPRINTK ("ENTER\n"); 643 DPRINTK ("ENTER\n");
576 644
577 /* Probe for Intel, AMD RNGs */ 645 /* Probe for Intel, AMD, Geode RNGs */
578 for_each_pci_dev(pdev) { 646 for_each_pci_dev(pdev) {
579 ent = pci_match_id(rng_pci_tbl, pdev); 647 ent = pci_match_id(rng_pci_tbl, pdev);
580 if (ent) { 648 if (ent) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 1f56b4cf0f58..561430ed94af 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -787,7 +787,6 @@ int ipmi_destroy_user(ipmi_user_t user)
787 int i; 787 int i;
788 unsigned long flags; 788 unsigned long flags;
789 struct cmd_rcvr *rcvr; 789 struct cmd_rcvr *rcvr;
790 struct list_head *entry1, *entry2;
791 struct cmd_rcvr *rcvrs = NULL; 790 struct cmd_rcvr *rcvrs = NULL;
792 791
793 user->valid = 1; 792 user->valid = 1;
@@ -812,8 +811,7 @@ int ipmi_destroy_user(ipmi_user_t user)
812 * synchronize_rcu()) then free everything in that list. 811 * synchronize_rcu()) then free everything in that list.
813 */ 812 */
814 down(&intf->cmd_rcvrs_lock); 813 down(&intf->cmd_rcvrs_lock);
815 list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) { 814 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
816 rcvr = list_entry(entry1, struct cmd_rcvr, link);
817 if (rcvr->user == user) { 815 if (rcvr->user == user) {
818 list_del_rcu(&rcvr->link); 816 list_del_rcu(&rcvr->link);
819 rcvr->next = rcvrs; 817 rcvr->next = rcvrs;
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 344001b45af9..a6544790af60 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -438,7 +438,7 @@ config INDYDOG
438 438
439config ZVM_WATCHDOG 439config ZVM_WATCHDOG
440 tristate "z/VM Watchdog Timer" 440 tristate "z/VM Watchdog Timer"
441 depends on WATCHDOG && ARCH_S390 441 depends on WATCHDOG && S390
442 help 442 help
443 IBM s/390 and zSeries machines running under z/VM 5.1 or later 443 IBM s/390 and zSeries machines running under z/VM 5.1 or later
444 provide a virtual watchdog timer to their guest that cause a 444 provide a virtual watchdog timer to their guest that cause a
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 64fbbb01d52a..25ef5a86f5f0 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -1027,10 +1027,10 @@ static int hpsbpkt_thread(void *__hi)
1027 1027
1028 daemonize("khpsbpkt"); 1028 daemonize("khpsbpkt");
1029 1029
1030 current->flags |= PF_NOFREEZE;
1031
1030 while (1) { 1032 while (1) {
1031 if (down_interruptible(&khpsbpkt_sig)) { 1033 if (down_interruptible(&khpsbpkt_sig)) {
1032 if (try_to_freeze())
1033 continue;
1034 printk("khpsbpkt: received unexpected signal?!\n" ); 1034 printk("khpsbpkt: received unexpected signal?!\n" );
1035 break; 1035 break;
1036 } 1036 }
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 9f2352bd8348..a1e660e3531d 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -157,7 +157,7 @@ struct input_event_compat {
157# define COMPAT_TEST test_thread_flag(TIF_IA32) 157# define COMPAT_TEST test_thread_flag(TIF_IA32)
158#elif defined(CONFIG_IA64) 158#elif defined(CONFIG_IA64)
159# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) 159# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
160#elif defined(CONFIG_ARCH_S390) 160#elif defined(CONFIG_S390)
161# define COMPAT_TEST test_thread_flag(TIF_31BIT) 161# define COMPAT_TEST test_thread_flag(TIF_31BIT)
162#elif defined(CONFIG_MIPS) 162#elif defined(CONFIG_MIPS)
163# define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR) 163# define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR)
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index f38696622eb4..5e1f5e9653cb 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -52,6 +52,7 @@ static char *sensor_location[3] = {NULL, NULL, NULL};
52 52
53static int limit_adjust = 0; 53static int limit_adjust = 0;
54static int fan_speed = -1; 54static int fan_speed = -1;
55static int verbose = 0;
55 56
56MODULE_AUTHOR("Colin Leroy <colin@colino.net>"); 57MODULE_AUTHOR("Colin Leroy <colin@colino.net>");
57MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and " 58MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and "
@@ -66,6 +67,10 @@ module_param(fan_speed, int, 0644);
66MODULE_PARM_DESC(fan_speed,"Specify starting fan speed (0-255) " 67MODULE_PARM_DESC(fan_speed,"Specify starting fan speed (0-255) "
67 "(default 64)"); 68 "(default 64)");
68 69
70module_param(verbose, bool, 0);
71MODULE_PARM_DESC(verbose,"Verbose log operations "
72 "(default 0)");
73
69struct thermostat { 74struct thermostat {
70 struct i2c_client clt; 75 struct i2c_client clt;
71 u8 temps[3]; 76 u8 temps[3];
@@ -149,13 +154,13 @@ detach_thermostat(struct i2c_adapter *adapter)
149 if (thread_therm != NULL) { 154 if (thread_therm != NULL) {
150 kthread_stop(thread_therm); 155 kthread_stop(thread_therm);
151 } 156 }
152 157
153 printk(KERN_INFO "adt746x: Putting max temperatures back from " 158 printk(KERN_INFO "adt746x: Putting max temperatures back from "
154 "%d, %d, %d to %d, %d, %d\n", 159 "%d, %d, %d to %d, %d, %d\n",
155 th->limits[0], th->limits[1], th->limits[2], 160 th->limits[0], th->limits[1], th->limits[2],
156 th->initial_limits[0], th->initial_limits[1], 161 th->initial_limits[0], th->initial_limits[1],
157 th->initial_limits[2]); 162 th->initial_limits[2]);
158 163
159 for (i = 0; i < 3; i++) 164 for (i = 0; i < 3; i++)
160 write_reg(th, LIMIT_REG[i], th->initial_limits[i]); 165 write_reg(th, LIMIT_REG[i], th->initial_limits[i]);
161 166
@@ -212,12 +217,14 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
212 return; 217 return;
213 218
214 if (th->last_speed[fan] != speed) { 219 if (th->last_speed[fan] != speed) {
215 if (speed == -1) 220 if (verbose) {
216 printk(KERN_DEBUG "adt746x: Setting speed to automatic " 221 if (speed == -1)
217 "for %s fan.\n", sensor_location[fan+1]); 222 printk(KERN_DEBUG "adt746x: Setting speed to automatic "
218 else 223 "for %s fan.\n", sensor_location[fan+1]);
219 printk(KERN_DEBUG "adt746x: Setting speed to %d " 224 else
220 "for %s fan.\n", speed, sensor_location[fan+1]); 225 printk(KERN_DEBUG "adt746x: Setting speed to %d "
226 "for %s fan.\n", speed, sensor_location[fan+1]);
227 }
221 } else 228 } else
222 return; 229 return;
223 230
@@ -298,10 +305,11 @@ static void update_fans_speed (struct thermostat *th)
298 if (new_speed > 255) 305 if (new_speed > 255)
299 new_speed = 255; 306 new_speed = 255;
300 307
301 printk(KERN_DEBUG "adt746x: setting fans speed to %d " 308 if (verbose)
302 "(limit exceeded by %d on %s) \n", 309 printk(KERN_DEBUG "adt746x: Setting fans speed to %d "
303 new_speed, var, 310 "(limit exceeded by %d on %s) \n",
304 sensor_location[fan_number+1]); 311 new_speed, var,
312 sensor_location[fan_number+1]);
305 write_both_fan_speed(th, new_speed); 313 write_both_fan_speed(th, new_speed);
306 th->last_var[fan_number] = var; 314 th->last_var[fan_number] = var;
307 } else if (var < -2) { 315 } else if (var < -2) {
@@ -309,8 +317,9 @@ static void update_fans_speed (struct thermostat *th)
309 * so cold (lastvar >= -1) */ 317 * so cold (lastvar >= -1) */
310 if (i == 2 && lastvar < -1) { 318 if (i == 2 && lastvar < -1) {
311 if (th->last_speed[fan_number] != 0) 319 if (th->last_speed[fan_number] != 0)
312 printk(KERN_DEBUG "adt746x: Stopping " 320 if (verbose)
313 "fans.\n"); 321 printk(KERN_DEBUG "adt746x: Stopping "
322 "fans.\n");
314 write_both_fan_speed(th, 0); 323 write_both_fan_speed(th, 0);
315 } 324 }
316 } 325 }
@@ -406,7 +415,7 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
406 th->initial_limits[i] = read_reg(th, LIMIT_REG[i]); 415 th->initial_limits[i] = read_reg(th, LIMIT_REG[i]);
407 set_limit(th, i); 416 set_limit(th, i);
408 } 417 }
409 418
410 printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d" 419 printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d"
411 " to %d, %d, %d\n", 420 " to %d, %d, %d\n",
412 th->initial_limits[0], th->initial_limits[1], 421 th->initial_limits[0], th->initial_limits[1],
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 190878eef990..435427daed75 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -1988,18 +1988,13 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
1988 1988
1989static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match) 1989static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
1990{ 1990{
1991 int rc;
1992
1993 state = state_detached; 1991 state = state_detached;
1994 1992
1995 /* Lookup the fans in the device tree */ 1993 /* Lookup the fans in the device tree */
1996 fcu_lookup_fans(dev->node); 1994 fcu_lookup_fans(dev->node);
1997 1995
1998 /* Add the driver */ 1996 /* Add the driver */
1999 rc = i2c_add_driver(&therm_pm72_driver); 1997 return i2c_add_driver(&therm_pm72_driver);
2000 if (rc < 0)
2001 return rc;
2002 return 0;
2003} 1998}
2004 1999
2005static int fcu_of_remove(struct of_device* dev) 2000static int fcu_of_remove(struct of_device* dev)
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index a0a41ad0f2b5..c62ed68a3138 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -240,12 +240,7 @@ static int wf_lm75_detach(struct i2c_client *client)
240 240
241static int __init wf_lm75_sensor_init(void) 241static int __init wf_lm75_sensor_init(void)
242{ 242{
243 int rc; 243 return i2c_add_driver(&wf_lm75_driver);
244
245 rc = i2c_add_driver(&wf_lm75_driver);
246 if (rc < 0)
247 return rc;
248 return 0;
249} 244}
250 245
251static void __exit wf_lm75_sensor_exit(void) 246static void __exit wf_lm75_sensor_exit(void)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 252d55df9642..76a189ceb529 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -315,6 +315,8 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait)
315 if (bitmap->file == NULL) 315 if (bitmap->file == NULL)
316 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait); 316 return write_sb_page(bitmap->mddev, bitmap->offset, page, wait);
317 317
318 flush_dcache_page(page); /* make sure visible to anyone reading the file */
319
318 if (wait) 320 if (wait)
319 lock_page(page); 321 lock_page(page);
320 else { 322 else {
@@ -341,7 +343,7 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait)
341 /* add to list to be waited for by daemon */ 343 /* add to list to be waited for by daemon */
342 struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO); 344 struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO);
343 item->page = page; 345 item->page = page;
344 page_cache_get(page); 346 get_page(page);
345 spin_lock(&bitmap->write_lock); 347 spin_lock(&bitmap->write_lock);
346 list_add(&item->list, &bitmap->complete_pages); 348 list_add(&item->list, &bitmap->complete_pages);
347 spin_unlock(&bitmap->write_lock); 349 spin_unlock(&bitmap->write_lock);
@@ -357,10 +359,10 @@ static struct page *read_page(struct file *file, unsigned long index,
357 struct inode *inode = file->f_mapping->host; 359 struct inode *inode = file->f_mapping->host;
358 struct page *page = NULL; 360 struct page *page = NULL;
359 loff_t isize = i_size_read(inode); 361 loff_t isize = i_size_read(inode);
360 unsigned long end_index = isize >> PAGE_CACHE_SHIFT; 362 unsigned long end_index = isize >> PAGE_SHIFT;
361 363
362 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE, 364 PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
363 (unsigned long long)index << PAGE_CACHE_SHIFT); 365 (unsigned long long)index << PAGE_SHIFT);
364 366
365 page = read_cache_page(inode->i_mapping, index, 367 page = read_cache_page(inode->i_mapping, index,
366 (filler_t *)inode->i_mapping->a_ops->readpage, file); 368 (filler_t *)inode->i_mapping->a_ops->readpage, file);
@@ -368,7 +370,7 @@ static struct page *read_page(struct file *file, unsigned long index,
368 goto out; 370 goto out;
369 wait_on_page_locked(page); 371 wait_on_page_locked(page);
370 if (!PageUptodate(page) || PageError(page)) { 372 if (!PageUptodate(page) || PageError(page)) {
371 page_cache_release(page); 373 put_page(page);
372 page = ERR_PTR(-EIO); 374 page = ERR_PTR(-EIO);
373 goto out; 375 goto out;
374 } 376 }
@@ -376,14 +378,14 @@ static struct page *read_page(struct file *file, unsigned long index,
376 if (index > end_index) /* we have read beyond EOF */ 378 if (index > end_index) /* we have read beyond EOF */
377 *bytes_read = 0; 379 *bytes_read = 0;
378 else if (index == end_index) /* possible short read */ 380 else if (index == end_index) /* possible short read */
379 *bytes_read = isize & ~PAGE_CACHE_MASK; 381 *bytes_read = isize & ~PAGE_MASK;
380 else 382 else
381 *bytes_read = PAGE_CACHE_SIZE; /* got a full page */ 383 *bytes_read = PAGE_SIZE; /* got a full page */
382out: 384out:
383 if (IS_ERR(page)) 385 if (IS_ERR(page))
384 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n", 386 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
385 (int)PAGE_CACHE_SIZE, 387 (int)PAGE_SIZE,
386 (unsigned long long)index << PAGE_CACHE_SHIFT, 388 (unsigned long long)index << PAGE_SHIFT,
387 PTR_ERR(page)); 389 PTR_ERR(page));
388 return page; 390 return page;
389} 391}
@@ -406,11 +408,11 @@ int bitmap_update_sb(struct bitmap *bitmap)
406 return 0; 408 return 0;
407 } 409 }
408 spin_unlock_irqrestore(&bitmap->lock, flags); 410 spin_unlock_irqrestore(&bitmap->lock, flags);
409 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 411 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
410 sb->events = cpu_to_le64(bitmap->mddev->events); 412 sb->events = cpu_to_le64(bitmap->mddev->events);
411 if (!bitmap->mddev->degraded) 413 if (!bitmap->mddev->degraded)
412 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 414 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
413 kunmap(bitmap->sb_page); 415 kunmap_atomic(sb, KM_USER0);
414 return write_page(bitmap, bitmap->sb_page, 1); 416 return write_page(bitmap, bitmap->sb_page, 1);
415} 417}
416 418
@@ -421,7 +423,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
421 423
422 if (!bitmap || !bitmap->sb_page) 424 if (!bitmap || !bitmap->sb_page)
423 return; 425 return;
424 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 426 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
425 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); 427 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
426 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); 428 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
427 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); 429 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -440,7 +442,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
440 printk(KERN_DEBUG " sync size: %llu KB\n", 442 printk(KERN_DEBUG " sync size: %llu KB\n",
441 (unsigned long long)le64_to_cpu(sb->sync_size)/2); 443 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
442 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); 444 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
443 kunmap(bitmap->sb_page); 445 kunmap_atomic(sb, KM_USER0);
444} 446}
445 447
446/* read the superblock from the bitmap file and initialize some bitmap fields */ 448/* read the superblock from the bitmap file and initialize some bitmap fields */
@@ -466,7 +468,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
466 return err; 468 return err;
467 } 469 }
468 470
469 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 471 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
470 472
471 if (bytes_read < sizeof(*sb)) { /* short read */ 473 if (bytes_read < sizeof(*sb)) { /* short read */
472 printk(KERN_INFO "%s: bitmap file superblock truncated\n", 474 printk(KERN_INFO "%s: bitmap file superblock truncated\n",
@@ -485,12 +487,12 @@ static int bitmap_read_sb(struct bitmap *bitmap)
485 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || 487 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
486 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI) 488 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
487 reason = "unrecognized superblock version"; 489 reason = "unrecognized superblock version";
488 else if (chunksize < 512 || chunksize > (1024 * 1024 * 4)) 490 else if (chunksize < PAGE_SIZE)
489 reason = "bitmap chunksize out of range (512B - 4MB)"; 491 reason = "bitmap chunksize too small";
490 else if ((1 << ffz(~chunksize)) != chunksize) 492 else if ((1 << ffz(~chunksize)) != chunksize)
491 reason = "bitmap chunksize not a power of 2"; 493 reason = "bitmap chunksize not a power of 2";
492 else if (daemon_sleep < 1 || daemon_sleep > 15) 494 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ)
493 reason = "daemon sleep period out of range (1-15s)"; 495 reason = "daemon sleep period out of range";
494 else if (write_behind > COUNTER_MAX) 496 else if (write_behind > COUNTER_MAX)
495 reason = "write-behind limit out of range (0 - 16383)"; 497 reason = "write-behind limit out of range (0 - 16383)";
496 if (reason) { 498 if (reason) {
@@ -535,7 +537,7 @@ success:
535 bitmap->events_cleared = bitmap->mddev->events; 537 bitmap->events_cleared = bitmap->mddev->events;
536 err = 0; 538 err = 0;
537out: 539out:
538 kunmap(bitmap->sb_page); 540 kunmap_atomic(sb, KM_USER0);
539 if (err) 541 if (err)
540 bitmap_print_sb(bitmap); 542 bitmap_print_sb(bitmap);
541 return err; 543 return err;
@@ -558,9 +560,9 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
558 spin_unlock_irqrestore(&bitmap->lock, flags); 560 spin_unlock_irqrestore(&bitmap->lock, flags);
559 return; 561 return;
560 } 562 }
561 page_cache_get(bitmap->sb_page); 563 get_page(bitmap->sb_page);
562 spin_unlock_irqrestore(&bitmap->lock, flags); 564 spin_unlock_irqrestore(&bitmap->lock, flags);
563 sb = (bitmap_super_t *)kmap(bitmap->sb_page); 565 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
564 switch (op) { 566 switch (op) {
565 case MASK_SET: sb->state |= bits; 567 case MASK_SET: sb->state |= bits;
566 break; 568 break;
@@ -568,8 +570,8 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
568 break; 570 break;
569 default: BUG(); 571 default: BUG();
570 } 572 }
571 kunmap(bitmap->sb_page); 573 kunmap_atomic(sb, KM_USER0);
572 page_cache_release(bitmap->sb_page); 574 put_page(bitmap->sb_page);
573} 575}
574 576
575/* 577/*
@@ -622,12 +624,11 @@ static void bitmap_file_unmap(struct bitmap *bitmap)
622 624
623 while (pages--) 625 while (pages--)
624 if (map[pages]->index != 0) /* 0 is sb_page, release it below */ 626 if (map[pages]->index != 0) /* 0 is sb_page, release it below */
625 page_cache_release(map[pages]); 627 put_page(map[pages]);
626 kfree(map); 628 kfree(map);
627 kfree(attr); 629 kfree(attr);
628 630
629 if (sb_page) 631 safe_put_page(sb_page);
630 page_cache_release(sb_page);
631} 632}
632 633
633static void bitmap_stop_daemon(struct bitmap *bitmap); 634static void bitmap_stop_daemon(struct bitmap *bitmap);
@@ -654,7 +655,7 @@ static void drain_write_queues(struct bitmap *bitmap)
654 655
655 while ((item = dequeue_page(bitmap))) { 656 while ((item = dequeue_page(bitmap))) {
656 /* don't bother to wait */ 657 /* don't bother to wait */
657 page_cache_release(item->page); 658 put_page(item->page);
658 mempool_free(item, bitmap->write_pool); 659 mempool_free(item, bitmap->write_pool);
659 } 660 }
660 661
@@ -763,7 +764,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
763 764
764 /* make sure the page stays cached until it gets written out */ 765 /* make sure the page stays cached until it gets written out */
765 if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY)) 766 if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
766 page_cache_get(page); 767 get_page(page);
767 768
768 /* set the bit */ 769 /* set the bit */
769 kaddr = kmap_atomic(page, KM_USER0); 770 kaddr = kmap_atomic(page, KM_USER0);
@@ -854,6 +855,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
854 unsigned long bytes, offset, dummy; 855 unsigned long bytes, offset, dummy;
855 int outofdate; 856 int outofdate;
856 int ret = -ENOSPC; 857 int ret = -ENOSPC;
858 void *paddr;
857 859
858 chunks = bitmap->chunks; 860 chunks = bitmap->chunks;
859 file = bitmap->file; 861 file = bitmap->file;
@@ -887,12 +889,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
887 if (!bitmap->filemap) 889 if (!bitmap->filemap)
888 goto out; 890 goto out;
889 891
890 bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL); 892 bitmap->filemap_attr = kzalloc(sizeof(long) * num_pages, GFP_KERNEL);
891 if (!bitmap->filemap_attr) 893 if (!bitmap->filemap_attr)
892 goto out; 894 goto out;
893 895
894 memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages);
895
896 oldindex = ~0L; 896 oldindex = ~0L;
897 897
898 for (i = 0; i < chunks; i++) { 898 for (i = 0; i < chunks; i++) {
@@ -901,8 +901,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
901 bit = file_page_offset(i); 901 bit = file_page_offset(i);
902 if (index != oldindex) { /* this is a new page, read it in */ 902 if (index != oldindex) { /* this is a new page, read it in */
903 /* unmap the old page, we're done with it */ 903 /* unmap the old page, we're done with it */
904 if (oldpage != NULL)
905 kunmap(oldpage);
906 if (index == 0) { 904 if (index == 0) {
907 /* 905 /*
908 * if we're here then the superblock page 906 * if we're here then the superblock page
@@ -925,30 +923,32 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
925 923
926 oldindex = index; 924 oldindex = index;
927 oldpage = page; 925 oldpage = page;
928 kmap(page);
929 926
930 if (outofdate) { 927 if (outofdate) {
931 /* 928 /*
932 * if bitmap is out of date, dirty the 929 * if bitmap is out of date, dirty the
933 * whole page and write it out 930 * whole page and write it out
934 */ 931 */
935 memset(page_address(page) + offset, 0xff, 932 paddr = kmap_atomic(page, KM_USER0);
933 memset(paddr + offset, 0xff,
936 PAGE_SIZE - offset); 934 PAGE_SIZE - offset);
935 kunmap_atomic(paddr, KM_USER0);
937 ret = write_page(bitmap, page, 1); 936 ret = write_page(bitmap, page, 1);
938 if (ret) { 937 if (ret) {
939 kunmap(page);
940 /* release, page not in filemap yet */ 938 /* release, page not in filemap yet */
941 page_cache_release(page); 939 put_page(page);
942 goto out; 940 goto out;
943 } 941 }
944 } 942 }
945 943
946 bitmap->filemap[bitmap->file_pages++] = page; 944 bitmap->filemap[bitmap->file_pages++] = page;
947 } 945 }
946 paddr = kmap_atomic(page, KM_USER0);
948 if (bitmap->flags & BITMAP_HOSTENDIAN) 947 if (bitmap->flags & BITMAP_HOSTENDIAN)
949 b = test_bit(bit, page_address(page)); 948 b = test_bit(bit, paddr);
950 else 949 else
951 b = ext2_test_bit(bit, page_address(page)); 950 b = ext2_test_bit(bit, paddr);
951 kunmap_atomic(paddr, KM_USER0);
952 if (b) { 952 if (b) {
953 /* if the disk bit is set, set the memory bit */ 953 /* if the disk bit is set, set the memory bit */
954 bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap), 954 bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap),
@@ -963,9 +963,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
963 ret = 0; 963 ret = 0;
964 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET); 964 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
965 965
966 if (page) /* unmap the last page */
967 kunmap(page);
968
969 if (bit_cnt) { /* Kick recovery if any bits were set */ 966 if (bit_cnt) { /* Kick recovery if any bits were set */
970 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); 967 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
971 md_wakeup_thread(bitmap->mddev->thread); 968 md_wakeup_thread(bitmap->mddev->thread);
@@ -1021,6 +1018,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1021 int err = 0; 1018 int err = 0;
1022 int blocks; 1019 int blocks;
1023 int attr; 1020 int attr;
1021 void *paddr;
1024 1022
1025 if (bitmap == NULL) 1023 if (bitmap == NULL)
1026 return 0; 1024 return 0;
@@ -1043,7 +1041,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1043 /* skip this page unless it's marked as needing cleaning */ 1041 /* skip this page unless it's marked as needing cleaning */
1044 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) { 1042 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) {
1045 if (attr & BITMAP_PAGE_NEEDWRITE) { 1043 if (attr & BITMAP_PAGE_NEEDWRITE) {
1046 page_cache_get(page); 1044 get_page(page);
1047 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1045 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1048 } 1046 }
1049 spin_unlock_irqrestore(&bitmap->lock, flags); 1047 spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1057,13 +1055,13 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1057 default: 1055 default:
1058 bitmap_file_kick(bitmap); 1056 bitmap_file_kick(bitmap);
1059 } 1057 }
1060 page_cache_release(page); 1058 put_page(page);
1061 } 1059 }
1062 continue; 1060 continue;
1063 } 1061 }
1064 1062
1065 /* grab the new page, sync and release the old */ 1063 /* grab the new page, sync and release the old */
1066 page_cache_get(page); 1064 get_page(page);
1067 if (lastpage != NULL) { 1065 if (lastpage != NULL) {
1068 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) { 1066 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) {
1069 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1067 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
@@ -1077,14 +1075,12 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1077 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1075 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1078 spin_unlock_irqrestore(&bitmap->lock, flags); 1076 spin_unlock_irqrestore(&bitmap->lock, flags);
1079 } 1077 }
1080 kunmap(lastpage); 1078 put_page(lastpage);
1081 page_cache_release(lastpage);
1082 if (err) 1079 if (err)
1083 bitmap_file_kick(bitmap); 1080 bitmap_file_kick(bitmap);
1084 } else 1081 } else
1085 spin_unlock_irqrestore(&bitmap->lock, flags); 1082 spin_unlock_irqrestore(&bitmap->lock, flags);
1086 lastpage = page; 1083 lastpage = page;
1087 kmap(page);
1088/* 1084/*
1089 printk("bitmap clean at page %lu\n", j); 1085 printk("bitmap clean at page %lu\n", j);
1090*/ 1086*/
@@ -1107,10 +1103,12 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1107 -1); 1103 -1);
1108 1104
1109 /* clear the bit */ 1105 /* clear the bit */
1106 paddr = kmap_atomic(page, KM_USER0);
1110 if (bitmap->flags & BITMAP_HOSTENDIAN) 1107 if (bitmap->flags & BITMAP_HOSTENDIAN)
1111 clear_bit(file_page_offset(j), page_address(page)); 1108 clear_bit(file_page_offset(j), paddr);
1112 else 1109 else
1113 ext2_clear_bit(file_page_offset(j), page_address(page)); 1110 ext2_clear_bit(file_page_offset(j), paddr);
1111 kunmap_atomic(paddr, KM_USER0);
1114 } 1112 }
1115 } 1113 }
1116 spin_unlock_irqrestore(&bitmap->lock, flags); 1114 spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1118,7 +1116,6 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1118 1116
1119 /* now sync the final page */ 1117 /* now sync the final page */
1120 if (lastpage != NULL) { 1118 if (lastpage != NULL) {
1121 kunmap(lastpage);
1122 spin_lock_irqsave(&bitmap->lock, flags); 1119 spin_lock_irqsave(&bitmap->lock, flags);
1123 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) { 1120 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) {
1124 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1121 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
@@ -1133,7 +1130,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1133 spin_unlock_irqrestore(&bitmap->lock, flags); 1130 spin_unlock_irqrestore(&bitmap->lock, flags);
1134 } 1131 }
1135 1132
1136 page_cache_release(lastpage); 1133 put_page(lastpage);
1137 } 1134 }
1138 1135
1139 return err; 1136 return err;
@@ -1184,7 +1181,7 @@ static void bitmap_writeback_daemon(mddev_t *mddev)
1184 PRINTK("finished page writeback: %p\n", page); 1181 PRINTK("finished page writeback: %p\n", page);
1185 1182
1186 err = PageError(page); 1183 err = PageError(page);
1187 page_cache_release(page); 1184 put_page(page);
1188 if (err) { 1185 if (err) {
1189 printk(KERN_WARNING "%s: bitmap file writeback " 1186 printk(KERN_WARNING "%s: bitmap file writeback "
1190 "failed (page %lu): %d\n", 1187 "failed (page %lu): %d\n",
@@ -1530,6 +1527,8 @@ void bitmap_destroy(mddev_t *mddev)
1530 return; 1527 return;
1531 1528
1532 mddev->bitmap = NULL; /* disconnect from the md device */ 1529 mddev->bitmap = NULL; /* disconnect from the md device */
1530 if (mddev->thread)
1531 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1533 1532
1534 bitmap_free(bitmap); 1533 bitmap_free(bitmap);
1535} 1534}
@@ -1555,12 +1554,10 @@ int bitmap_create(mddev_t *mddev)
1555 1554
1556 BUG_ON(file && mddev->bitmap_offset); 1555 BUG_ON(file && mddev->bitmap_offset);
1557 1556
1558 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL); 1557 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1559 if (!bitmap) 1558 if (!bitmap)
1560 return -ENOMEM; 1559 return -ENOMEM;
1561 1560
1562 memset(bitmap, 0, sizeof(*bitmap));
1563
1564 spin_lock_init(&bitmap->lock); 1561 spin_lock_init(&bitmap->lock);
1565 bitmap->mddev = mddev; 1562 bitmap->mddev = mddev;
1566 1563
@@ -1601,12 +1598,11 @@ int bitmap_create(mddev_t *mddev)
1601#ifdef INJECT_FATAL_FAULT_1 1598#ifdef INJECT_FATAL_FAULT_1
1602 bitmap->bp = NULL; 1599 bitmap->bp = NULL;
1603#else 1600#else
1604 bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL); 1601 bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
1605#endif 1602#endif
1606 err = -ENOMEM; 1603 err = -ENOMEM;
1607 if (!bitmap->bp) 1604 if (!bitmap->bp)
1608 goto error; 1605 goto error;
1609 memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp));
1610 1606
1611 bitmap->flags |= BITMAP_ACTIVE; 1607 bitmap->flags |= BITMAP_ACTIVE;
1612 1608
@@ -1636,6 +1632,8 @@ int bitmap_create(mddev_t *mddev)
1636 1632
1637 if (IS_ERR(bitmap->writeback_daemon)) 1633 if (IS_ERR(bitmap->writeback_daemon))
1638 return PTR_ERR(bitmap->writeback_daemon); 1634 return PTR_ERR(bitmap->writeback_daemon);
1635 mddev->thread->timeout = bitmap->daemon_sleep * HZ;
1636
1639 return bitmap_update_sb(bitmap); 1637 return bitmap_update_sb(bitmap);
1640 1638
1641 error: 1639 error:
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index cf6631056683..a601a427885c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -690,6 +690,8 @@ bad3:
690bad2: 690bad2:
691 crypto_free_tfm(tfm); 691 crypto_free_tfm(tfm);
692bad1: 692bad1:
693 /* Must zero key material before freeing */
694 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
693 kfree(cc); 695 kfree(cc);
694 return -EINVAL; 696 return -EINVAL;
695} 697}
@@ -706,6 +708,9 @@ static void crypt_dtr(struct dm_target *ti)
706 cc->iv_gen_ops->dtr(cc); 708 cc->iv_gen_ops->dtr(cc);
707 crypto_free_tfm(cc->tfm); 709 crypto_free_tfm(cc->tfm);
708 dm_put_device(ti, cc->dev); 710 dm_put_device(ti, cc->dev);
711
712 /* Must zero key material before freeing */
713 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
709 kfree(cc); 714 kfree(cc);
710} 715}
711 716
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index 1a77f3265706..f9035bfd1a9f 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -9,9 +9,6 @@
9 9
10#include "dm.h" 10#include "dm.h"
11 11
12/* FIXME make this configurable */
13#define DM_MAX_IO_REGIONS 8
14
15struct io_region { 12struct io_region {
16 struct block_device *bdev; 13 struct block_device *bdev;
17 sector_t sector; 14 sector_t sector;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 07d44e19536e..561bda5011e0 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -270,6 +270,7 @@ static int dm_hash_rename(const char *old, const char *new)
270{ 270{
271 char *new_name, *old_name; 271 char *new_name, *old_name;
272 struct hash_cell *hc; 272 struct hash_cell *hc;
273 struct dm_table *table;
273 274
274 /* 275 /*
275 * duplicate new. 276 * duplicate new.
@@ -317,6 +318,15 @@ static int dm_hash_rename(const char *old, const char *new)
317 /* rename the device node in devfs */ 318 /* rename the device node in devfs */
318 register_with_devfs(hc); 319 register_with_devfs(hc);
319 320
321 /*
322 * Wake up any dm event waiters.
323 */
324 table = dm_get_table(hc->md);
325 if (table) {
326 dm_table_event(table);
327 dm_table_put(table);
328 }
329
320 up_write(&_hash_lock); 330 up_write(&_hash_lock);
321 kfree(old_name); 331 kfree(old_name);
322 return 0; 332 return 0;
@@ -683,14 +693,18 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
683static int do_suspend(struct dm_ioctl *param) 693static int do_suspend(struct dm_ioctl *param)
684{ 694{
685 int r = 0; 695 int r = 0;
696 int do_lockfs = 1;
686 struct mapped_device *md; 697 struct mapped_device *md;
687 698
688 md = find_device(param); 699 md = find_device(param);
689 if (!md) 700 if (!md)
690 return -ENXIO; 701 return -ENXIO;
691 702
703 if (param->flags & DM_SKIP_LOCKFS_FLAG)
704 do_lockfs = 0;
705
692 if (!dm_suspended(md)) 706 if (!dm_suspended(md))
693 r = dm_suspend(md); 707 r = dm_suspend(md, do_lockfs);
694 708
695 if (!r) 709 if (!r)
696 r = __dev_status(md, param); 710 r = __dev_status(md, param);
@@ -702,6 +716,7 @@ static int do_suspend(struct dm_ioctl *param)
702static int do_resume(struct dm_ioctl *param) 716static int do_resume(struct dm_ioctl *param)
703{ 717{
704 int r = 0; 718 int r = 0;
719 int do_lockfs = 1;
705 struct hash_cell *hc; 720 struct hash_cell *hc;
706 struct mapped_device *md; 721 struct mapped_device *md;
707 struct dm_table *new_map; 722 struct dm_table *new_map;
@@ -727,8 +742,10 @@ static int do_resume(struct dm_ioctl *param)
727 /* Do we need to load a new map ? */ 742 /* Do we need to load a new map ? */
728 if (new_map) { 743 if (new_map) {
729 /* Suspend if it isn't already suspended */ 744 /* Suspend if it isn't already suspended */
745 if (param->flags & DM_SKIP_LOCKFS_FLAG)
746 do_lockfs = 0;
730 if (!dm_suspended(md)) 747 if (!dm_suspended(md))
731 dm_suspend(md); 748 dm_suspend(md, do_lockfs);
732 749
733 r = dm_swap_table(md, new_map); 750 r = dm_swap_table(md, new_map);
734 if (r) { 751 if (r) {
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a76349cb10a5..efe4adf78530 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -573,7 +573,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
573 lc->sync_search); 573 lc->sync_search);
574 lc->sync_search = *region + 1; 574 lc->sync_search = *region + 1;
575 575
576 if (*region == lc->region_count) 576 if (*region >= lc->region_count)
577 return 0; 577 return 0;
578 578
579 } while (log_test_bit(lc->recovering_bits, *region)); 579 } while (log_test_bit(lc->recovering_bits, *region));
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6b0fc1670929..6cfa8d435d55 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -562,6 +562,8 @@ struct mirror_set {
562 region_t nr_regions; 562 region_t nr_regions;
563 int in_sync; 563 int in_sync;
564 564
565 struct mirror *default_mirror; /* Default mirror */
566
565 unsigned int nr_mirrors; 567 unsigned int nr_mirrors;
566 struct mirror mirror[0]; 568 struct mirror mirror[0];
567}; 569};
@@ -611,7 +613,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
611 unsigned long flags = 0; 613 unsigned long flags = 0;
612 614
613 /* fill in the source */ 615 /* fill in the source */
614 m = ms->mirror + DEFAULT_MIRROR; 616 m = ms->default_mirror;
615 from.bdev = m->dev->bdev; 617 from.bdev = m->dev->bdev;
616 from.sector = m->offset + region_to_sector(reg->rh, reg->key); 618 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
617 if (reg->key == (ms->nr_regions - 1)) { 619 if (reg->key == (ms->nr_regions - 1)) {
@@ -627,7 +629,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
627 629
628 /* fill in the destinations */ 630 /* fill in the destinations */
629 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 631 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
630 if (i == DEFAULT_MIRROR) 632 if (&ms->mirror[i] == ms->default_mirror)
631 continue; 633 continue;
632 634
633 m = ms->mirror + i; 635 m = ms->mirror + i;
@@ -682,7 +684,7 @@ static void do_recovery(struct mirror_set *ms)
682static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) 684static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
683{ 685{
684 /* FIXME: add read balancing */ 686 /* FIXME: add read balancing */
685 return ms->mirror + DEFAULT_MIRROR; 687 return ms->default_mirror;
686} 688}
687 689
688/* 690/*
@@ -709,7 +711,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
709 if (rh_in_sync(&ms->rh, region, 0)) 711 if (rh_in_sync(&ms->rh, region, 0))
710 m = choose_mirror(ms, bio->bi_sector); 712 m = choose_mirror(ms, bio->bi_sector);
711 else 713 else
712 m = ms->mirror + DEFAULT_MIRROR; 714 m = ms->default_mirror;
713 715
714 map_bio(ms, m, bio); 716 map_bio(ms, m, bio);
715 generic_make_request(bio); 717 generic_make_request(bio);
@@ -833,7 +835,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
833 rh_delay(&ms->rh, bio); 835 rh_delay(&ms->rh, bio);
834 836
835 while ((bio = bio_list_pop(&nosync))) { 837 while ((bio = bio_list_pop(&nosync))) {
836 map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); 838 map_bio(ms, ms->default_mirror, bio);
837 generic_make_request(bio); 839 generic_make_request(bio);
838 } 840 }
839} 841}
@@ -900,6 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
900 ms->nr_mirrors = nr_mirrors; 902 ms->nr_mirrors = nr_mirrors;
901 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 903 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
902 ms->in_sync = 0; 904 ms->in_sync = 0;
905 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
903 906
904 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 907 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
905 ti->error = "dm-mirror: Error creating dirty region hash"; 908 ti->error = "dm-mirror: Error creating dirty region hash";
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ab54f99b7c3b..4b9dd8fb1e5c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -371,6 +371,20 @@ static inline ulong round_up(ulong n, ulong size)
371 return (n + size) & ~size; 371 return (n + size) & ~size;
372} 372}
373 373
374static void read_snapshot_metadata(struct dm_snapshot *s)
375{
376 if (s->have_metadata)
377 return;
378
379 if (s->store.read_metadata(&s->store)) {
380 down_write(&s->lock);
381 s->valid = 0;
382 up_write(&s->lock);
383 }
384
385 s->have_metadata = 1;
386}
387
374/* 388/*
375 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 389 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
376 */ 390 */
@@ -848,16 +862,7 @@ static void snapshot_resume(struct dm_target *ti)
848{ 862{
849 struct dm_snapshot *s = (struct dm_snapshot *) ti->private; 863 struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
850 864
851 if (s->have_metadata) 865 read_snapshot_metadata(s);
852 return;
853
854 if (s->store.read_metadata(&s->store)) {
855 down_write(&s->lock);
856 s->valid = 0;
857 up_write(&s->lock);
858 }
859
860 s->have_metadata = 1;
861} 866}
862 867
863static int snapshot_status(struct dm_target *ti, status_type_t type, 868static int snapshot_status(struct dm_target *ti, status_type_t type,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 930b9fc27953..0e481512f918 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -55,6 +55,7 @@ union map_info *dm_get_mapinfo(struct bio *bio)
55 */ 55 */
56#define DMF_BLOCK_IO 0 56#define DMF_BLOCK_IO 0
57#define DMF_SUSPENDED 1 57#define DMF_SUSPENDED 1
58#define DMF_FROZEN 2
58 59
59struct mapped_device { 60struct mapped_device {
60 struct rw_semaphore io_lock; 61 struct rw_semaphore io_lock;
@@ -97,7 +98,7 @@ struct mapped_device {
97 * freeze/thaw support require holding onto a super block 98 * freeze/thaw support require holding onto a super block
98 */ 99 */
99 struct super_block *frozen_sb; 100 struct super_block *frozen_sb;
100 struct block_device *frozen_bdev; 101 struct block_device *suspended_bdev;
101}; 102};
102 103
103#define MIN_IOS 256 104#define MIN_IOS 256
@@ -836,9 +837,9 @@ static void __set_size(struct mapped_device *md, sector_t size)
836{ 837{
837 set_capacity(md->disk, size); 838 set_capacity(md->disk, size);
838 839
839 down(&md->frozen_bdev->bd_inode->i_sem); 840 down(&md->suspended_bdev->bd_inode->i_sem);
840 i_size_write(md->frozen_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); 841 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
841 up(&md->frozen_bdev->bd_inode->i_sem); 842 up(&md->suspended_bdev->bd_inode->i_sem);
842} 843}
843 844
844static int __bind(struct mapped_device *md, struct dm_table *t) 845static int __bind(struct mapped_device *md, struct dm_table *t)
@@ -902,10 +903,9 @@ int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
902 return create_aux(minor, 1, result); 903 return create_aux(minor, 1, result);
903} 904}
904 905
905void *dm_get_mdptr(dev_t dev) 906static struct mapped_device *dm_find_md(dev_t dev)
906{ 907{
907 struct mapped_device *md; 908 struct mapped_device *md;
908 void *mdptr = NULL;
909 unsigned minor = MINOR(dev); 909 unsigned minor = MINOR(dev);
910 910
911 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS)) 911 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
@@ -914,12 +914,32 @@ void *dm_get_mdptr(dev_t dev)
914 down(&_minor_lock); 914 down(&_minor_lock);
915 915
916 md = idr_find(&_minor_idr, minor); 916 md = idr_find(&_minor_idr, minor);
917 917 if (!md || (dm_disk(md)->first_minor != minor))
918 if (md && (dm_disk(md)->first_minor == minor)) 918 md = NULL;
919 mdptr = md->interface_ptr;
920 919
921 up(&_minor_lock); 920 up(&_minor_lock);
922 921
922 return md;
923}
924
925struct mapped_device *dm_get_md(dev_t dev)
926{
927 struct mapped_device *md = dm_find_md(dev);
928
929 if (md)
930 dm_get(md);
931
932 return md;
933}
934
935void *dm_get_mdptr(dev_t dev)
936{
937 struct mapped_device *md;
938 void *mdptr = NULL;
939
940 md = dm_find_md(dev);
941 if (md)
942 mdptr = md->interface_ptr;
923 return mdptr; 943 return mdptr;
924} 944}
925 945
@@ -991,43 +1011,33 @@ out:
991 */ 1011 */
992static int lock_fs(struct mapped_device *md) 1012static int lock_fs(struct mapped_device *md)
993{ 1013{
994 int r = -ENOMEM; 1014 int r;
995
996 md->frozen_bdev = bdget_disk(md->disk, 0);
997 if (!md->frozen_bdev) {
998 DMWARN("bdget failed in lock_fs");
999 goto out;
1000 }
1001 1015
1002 WARN_ON(md->frozen_sb); 1016 WARN_ON(md->frozen_sb);
1003 1017
1004 md->frozen_sb = freeze_bdev(md->frozen_bdev); 1018 md->frozen_sb = freeze_bdev(md->suspended_bdev);
1005 if (IS_ERR(md->frozen_sb)) { 1019 if (IS_ERR(md->frozen_sb)) {
1006 r = PTR_ERR(md->frozen_sb); 1020 r = PTR_ERR(md->frozen_sb);
1007 goto out_bdput; 1021 md->frozen_sb = NULL;
1022 return r;
1008 } 1023 }
1009 1024
1025 set_bit(DMF_FROZEN, &md->flags);
1026
1010 /* don't bdput right now, we don't want the bdev 1027 /* don't bdput right now, we don't want the bdev
1011 * to go away while it is locked. We'll bdput 1028 * to go away while it is locked.
1012 * in unlock_fs
1013 */ 1029 */
1014 return 0; 1030 return 0;
1015
1016out_bdput:
1017 bdput(md->frozen_bdev);
1018 md->frozen_sb = NULL;
1019 md->frozen_bdev = NULL;
1020out:
1021 return r;
1022} 1031}
1023 1032
1024static void unlock_fs(struct mapped_device *md) 1033static void unlock_fs(struct mapped_device *md)
1025{ 1034{
1026 thaw_bdev(md->frozen_bdev, md->frozen_sb); 1035 if (!test_bit(DMF_FROZEN, &md->flags))
1027 bdput(md->frozen_bdev); 1036 return;
1028 1037
1038 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1029 md->frozen_sb = NULL; 1039 md->frozen_sb = NULL;
1030 md->frozen_bdev = NULL; 1040 clear_bit(DMF_FROZEN, &md->flags);
1031} 1041}
1032 1042
1033/* 1043/*
@@ -1037,7 +1047,7 @@ static void unlock_fs(struct mapped_device *md)
1037 * dm_bind_table, dm_suspend must be called to flush any in 1047 * dm_bind_table, dm_suspend must be called to flush any in
1038 * flight bios and ensure that any further io gets deferred. 1048 * flight bios and ensure that any further io gets deferred.
1039 */ 1049 */
1040int dm_suspend(struct mapped_device *md) 1050int dm_suspend(struct mapped_device *md, int do_lockfs)
1041{ 1051{
1042 struct dm_table *map = NULL; 1052 struct dm_table *map = NULL;
1043 DECLARE_WAITQUEUE(wait, current); 1053 DECLARE_WAITQUEUE(wait, current);
@@ -1053,10 +1063,19 @@ int dm_suspend(struct mapped_device *md)
1053 /* This does not get reverted if there's an error later. */ 1063 /* This does not get reverted if there's an error later. */
1054 dm_table_presuspend_targets(map); 1064 dm_table_presuspend_targets(map);
1055 1065
1056 /* Flush I/O to the device. */ 1066 md->suspended_bdev = bdget_disk(md->disk, 0);
1057 r = lock_fs(md); 1067 if (!md->suspended_bdev) {
1058 if (r) 1068 DMWARN("bdget failed in dm_suspend");
1069 r = -ENOMEM;
1059 goto out; 1070 goto out;
1071 }
1072
1073 /* Flush I/O to the device. */
1074 if (do_lockfs) {
1075 r = lock_fs(md);
1076 if (r)
1077 goto out;
1078 }
1060 1079
1061 /* 1080 /*
1062 * First we set the BLOCK_IO flag so no more ios will be mapped. 1081 * First we set the BLOCK_IO flag so no more ios will be mapped.
@@ -1105,6 +1124,11 @@ int dm_suspend(struct mapped_device *md)
1105 r = 0; 1124 r = 0;
1106 1125
1107out: 1126out:
1127 if (r && md->suspended_bdev) {
1128 bdput(md->suspended_bdev);
1129 md->suspended_bdev = NULL;
1130 }
1131
1108 dm_table_put(map); 1132 dm_table_put(map);
1109 up(&md->suspend_lock); 1133 up(&md->suspend_lock);
1110 return r; 1134 return r;
@@ -1135,6 +1159,9 @@ int dm_resume(struct mapped_device *md)
1135 1159
1136 unlock_fs(md); 1160 unlock_fs(md);
1137 1161
1162 bdput(md->suspended_bdev);
1163 md->suspended_bdev = NULL;
1164
1138 clear_bit(DMF_SUSPENDED, &md->flags); 1165 clear_bit(DMF_SUSPENDED, &md->flags);
1139 1166
1140 dm_table_unplug_all(map); 1167 dm_table_unplug_all(map);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index e38c3fc1a1db..4eaf075da217 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -28,7 +28,7 @@
28 * in types.h. 28 * in types.h.
29 */ 29 */
30#ifdef CONFIG_LBD 30#ifdef CONFIG_LBD
31#define SECTOR_FORMAT "%Lu" 31#define SECTOR_FORMAT "%llu"
32#else 32#else
33#define SECTOR_FORMAT "%lu" 33#define SECTOR_FORMAT "%lu"
34#endif 34#endif
@@ -58,6 +58,7 @@ int dm_create(struct mapped_device **md);
58int dm_create_with_minor(unsigned int minor, struct mapped_device **md); 58int dm_create_with_minor(unsigned int minor, struct mapped_device **md);
59void dm_set_mdptr(struct mapped_device *md, void *ptr); 59void dm_set_mdptr(struct mapped_device *md, void *ptr);
60void *dm_get_mdptr(dev_t dev); 60void *dm_get_mdptr(dev_t dev);
61struct mapped_device *dm_get_md(dev_t dev);
61 62
62/* 63/*
63 * Reference counting for md. 64 * Reference counting for md.
@@ -68,7 +69,7 @@ void dm_put(struct mapped_device *md);
68/* 69/*
69 * A device can still be used while suspended, but I/O is deferred. 70 * A device can still be used while suspended, but I/O is deferred.
70 */ 71 */
71int dm_suspend(struct mapped_device *md); 72int dm_suspend(struct mapped_device *md, int with_lockfs);
72int dm_resume(struct mapped_device *md); 73int dm_resume(struct mapped_device *md);
73 74
74/* 75/*
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 0248f8e7eac0..a7a5ab554338 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -316,9 +316,10 @@ static int stop(mddev_t *mddev)
316 return 0; 316 return 0;
317} 317}
318 318
319static mdk_personality_t faulty_personality = 319static struct mdk_personality faulty_personality =
320{ 320{
321 .name = "faulty", 321 .name = "faulty",
322 .level = LEVEL_FAULTY,
322 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
323 .make_request = make_request, 324 .make_request = make_request,
324 .run = run, 325 .run = run,
@@ -329,15 +330,17 @@ static mdk_personality_t faulty_personality =
329 330
330static int __init raid_init(void) 331static int __init raid_init(void)
331{ 332{
332 return register_md_personality(FAULTY, &faulty_personality); 333 return register_md_personality(&faulty_personality);
333} 334}
334 335
335static void raid_exit(void) 336static void raid_exit(void)
336{ 337{
337 unregister_md_personality(FAULTY); 338 unregister_md_personality(&faulty_personality);
338} 339}
339 340
340module_init(raid_init); 341module_init(raid_init);
341module_exit(raid_exit); 342module_exit(raid_exit);
342MODULE_LICENSE("GPL"); 343MODULE_LICENSE("GPL");
343MODULE_ALIAS("md-personality-10"); /* faulty */ 344MODULE_ALIAS("md-personality-10"); /* faulty */
345MODULE_ALIAS("md-faulty");
346MODULE_ALIAS("md-level--5");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index eb7036485975..ca99979c868a 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -561,11 +561,13 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
561 * Cancels a kcopyd job, eg. someone might be deactivating a 561 * Cancels a kcopyd job, eg. someone might be deactivating a
562 * mirror. 562 * mirror.
563 */ 563 */
564#if 0
564int kcopyd_cancel(struct kcopyd_job *job, int block) 565int kcopyd_cancel(struct kcopyd_job *job, int block)
565{ 566{
566 /* FIXME: finish */ 567 /* FIXME: finish */
567 return -1; 568 return -1;
568} 569}
570#endif /* 0 */
569 571
570/*----------------------------------------------------------------- 572/*-----------------------------------------------------------------
571 * Unit setup 573 * Unit setup
@@ -684,4 +686,3 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
684EXPORT_SYMBOL(kcopyd_client_create); 686EXPORT_SYMBOL(kcopyd_client_create);
685EXPORT_SYMBOL(kcopyd_client_destroy); 687EXPORT_SYMBOL(kcopyd_client_destroy);
686EXPORT_SYMBOL(kcopyd_copy); 688EXPORT_SYMBOL(kcopyd_copy);
687EXPORT_SYMBOL(kcopyd_cancel);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 946efef3a8f5..777585458c85 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -121,11 +121,10 @@ static int linear_run (mddev_t *mddev)
121 sector_t curr_offset; 121 sector_t curr_offset;
122 struct list_head *tmp; 122 struct list_head *tmp;
123 123
124 conf = kmalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t), 124 conf = kzalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t),
125 GFP_KERNEL); 125 GFP_KERNEL);
126 if (!conf) 126 if (!conf)
127 goto out; 127 goto out;
128 memset(conf, 0, sizeof(*conf) + mddev->raid_disks*sizeof(dev_info_t));
129 mddev->private = conf; 128 mddev->private = conf;
130 129
131 cnt = 0; 130 cnt = 0;
@@ -352,9 +351,10 @@ static void linear_status (struct seq_file *seq, mddev_t *mddev)
352} 351}
353 352
354 353
355static mdk_personality_t linear_personality= 354static struct mdk_personality linear_personality =
356{ 355{
357 .name = "linear", 356 .name = "linear",
357 .level = LEVEL_LINEAR,
358 .owner = THIS_MODULE, 358 .owner = THIS_MODULE,
359 .make_request = linear_make_request, 359 .make_request = linear_make_request,
360 .run = linear_run, 360 .run = linear_run,
@@ -364,16 +364,18 @@ static mdk_personality_t linear_personality=
364 364
365static int __init linear_init (void) 365static int __init linear_init (void)
366{ 366{
367 return register_md_personality (LINEAR, &linear_personality); 367 return register_md_personality (&linear_personality);
368} 368}
369 369
370static void linear_exit (void) 370static void linear_exit (void)
371{ 371{
372 unregister_md_personality (LINEAR); 372 unregister_md_personality (&linear_personality);
373} 373}
374 374
375 375
376module_init(linear_init); 376module_init(linear_init);
377module_exit(linear_exit); 377module_exit(linear_exit);
378MODULE_LICENSE("GPL"); 378MODULE_LICENSE("GPL");
379MODULE_ALIAS("md-personality-1"); /* LINEAR */ 379MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
380MODULE_ALIAS("md-linear");
381MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8175a2a222da..1b76fb29fb70 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -42,6 +42,7 @@
42#include <linux/devfs_fs_kernel.h> 42#include <linux/devfs_fs_kernel.h>
43#include <linux/buffer_head.h> /* for invalidate_bdev */ 43#include <linux/buffer_head.h> /* for invalidate_bdev */
44#include <linux/suspend.h> 44#include <linux/suspend.h>
45#include <linux/poll.h>
45 46
46#include <linux/init.h> 47#include <linux/init.h>
47 48
@@ -67,7 +68,7 @@
67static void autostart_arrays (int part); 68static void autostart_arrays (int part);
68#endif 69#endif
69 70
70static mdk_personality_t *pers[MAX_PERSONALITY]; 71static LIST_HEAD(pers_list);
71static DEFINE_SPINLOCK(pers_lock); 72static DEFINE_SPINLOCK(pers_lock);
72 73
73/* 74/*
@@ -80,10 +81,22 @@ static DEFINE_SPINLOCK(pers_lock);
80 * idle IO detection. 81 * idle IO detection.
81 * 82 *
82 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 83 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
84 * or /sys/block/mdX/md/sync_speed_{min,max}
83 */ 85 */
84 86
85static int sysctl_speed_limit_min = 1000; 87static int sysctl_speed_limit_min = 1000;
86static int sysctl_speed_limit_max = 200000; 88static int sysctl_speed_limit_max = 200000;
89static inline int speed_min(mddev_t *mddev)
90{
91 return mddev->sync_speed_min ?
92 mddev->sync_speed_min : sysctl_speed_limit_min;
93}
94
95static inline int speed_max(mddev_t *mddev)
96{
97 return mddev->sync_speed_max ?
98 mddev->sync_speed_max : sysctl_speed_limit_max;
99}
87 100
88static struct ctl_table_header *raid_table_header; 101static struct ctl_table_header *raid_table_header;
89 102
@@ -134,6 +147,24 @@ static struct block_device_operations md_fops;
134static int start_readonly; 147static int start_readonly;
135 148
136/* 149/*
150 * We have a system wide 'event count' that is incremented
151 * on any 'interesting' event, and readers of /proc/mdstat
152 * can use 'poll' or 'select' to find out when the event
153 * count increases.
154 *
155 * Events are:
156 * start array, stop array, error, add device, remove device,
157 * start build, activate spare
158 */
159static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
160static atomic_t md_event_count;
161static void md_new_event(mddev_t *mddev)
162{
163 atomic_inc(&md_event_count);
164 wake_up(&md_event_waiters);
165}
166
167/*
137 * Enables to iterate over all existing md arrays 168 * Enables to iterate over all existing md arrays
138 * all_mddevs_lock protects this list. 169 * all_mddevs_lock protects this list.
139 */ 170 */
@@ -209,12 +240,10 @@ static mddev_t * mddev_find(dev_t unit)
209 } 240 }
210 spin_unlock(&all_mddevs_lock); 241 spin_unlock(&all_mddevs_lock);
211 242
212 new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL); 243 new = kzalloc(sizeof(*new), GFP_KERNEL);
213 if (!new) 244 if (!new)
214 return NULL; 245 return NULL;
215 246
216 memset(new, 0, sizeof(*new));
217
218 new->unit = unit; 247 new->unit = unit;
219 if (MAJOR(unit) == MD_MAJOR) 248 if (MAJOR(unit) == MD_MAJOR)
220 new->md_minor = MINOR(unit); 249 new->md_minor = MINOR(unit);
@@ -262,7 +291,7 @@ static inline void mddev_unlock(mddev_t * mddev)
262 md_wakeup_thread(mddev->thread); 291 md_wakeup_thread(mddev->thread);
263} 292}
264 293
265mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 294static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
266{ 295{
267 mdk_rdev_t * rdev; 296 mdk_rdev_t * rdev;
268 struct list_head *tmp; 297 struct list_head *tmp;
@@ -286,6 +315,18 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
286 return NULL; 315 return NULL;
287} 316}
288 317
318static struct mdk_personality *find_pers(int level, char *clevel)
319{
320 struct mdk_personality *pers;
321 list_for_each_entry(pers, &pers_list, list) {
322 if (level != LEVEL_NONE && pers->level == level)
323 return pers;
324 if (strcmp(pers->name, clevel)==0)
325 return pers;
326 }
327 return NULL;
328}
329
289static inline sector_t calc_dev_sboffset(struct block_device *bdev) 330static inline sector_t calc_dev_sboffset(struct block_device *bdev)
290{ 331{
291 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 332 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
@@ -320,7 +361,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
320static void free_disk_sb(mdk_rdev_t * rdev) 361static void free_disk_sb(mdk_rdev_t * rdev)
321{ 362{
322 if (rdev->sb_page) { 363 if (rdev->sb_page) {
323 page_cache_release(rdev->sb_page); 364 put_page(rdev->sb_page);
324 rdev->sb_loaded = 0; 365 rdev->sb_loaded = 0;
325 rdev->sb_page = NULL; 366 rdev->sb_page = NULL;
326 rdev->sb_offset = 0; 367 rdev->sb_offset = 0;
@@ -461,6 +502,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
461 bio_put(bio); 502 bio_put(bio);
462 return ret; 503 return ret;
463} 504}
505EXPORT_SYMBOL_GPL(sync_page_io);
464 506
465static int read_disk_sb(mdk_rdev_t * rdev, int size) 507static int read_disk_sb(mdk_rdev_t * rdev, int size)
466{ 508{
@@ -665,6 +707,10 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
665 } 707 }
666 rdev->size = calc_dev_size(rdev, sb->chunk_size); 708 rdev->size = calc_dev_size(rdev, sb->chunk_size);
667 709
710 if (rdev->size < sb->size && sb->level > 1)
711 /* "this cannot possibly happen" ... */
712 ret = -EINVAL;
713
668 abort: 714 abort:
669 return ret; 715 return ret;
670} 716}
@@ -688,6 +734,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
688 mddev->ctime = sb->ctime; 734 mddev->ctime = sb->ctime;
689 mddev->utime = sb->utime; 735 mddev->utime = sb->utime;
690 mddev->level = sb->level; 736 mddev->level = sb->level;
737 mddev->clevel[0] = 0;
691 mddev->layout = sb->layout; 738 mddev->layout = sb->layout;
692 mddev->raid_disks = sb->raid_disks; 739 mddev->raid_disks = sb->raid_disks;
693 mddev->size = sb->size; 740 mddev->size = sb->size;
@@ -714,9 +761,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
714 761
715 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 762 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
716 mddev->bitmap_file == NULL) { 763 mddev->bitmap_file == NULL) {
717 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) { 764 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
765 && mddev->level != 10) {
718 /* FIXME use a better test */ 766 /* FIXME use a better test */
719 printk(KERN_WARNING "md: bitmaps only support for raid1\n"); 767 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
720 return -EINVAL; 768 return -EINVAL;
721 } 769 }
722 mddev->bitmap_offset = mddev->default_bitmap_offset; 770 mddev->bitmap_offset = mddev->default_bitmap_offset;
@@ -968,6 +1016,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
968 } 1016 }
969 rdev->preferred_minor = 0xffff; 1017 rdev->preferred_minor = 0xffff;
970 rdev->data_offset = le64_to_cpu(sb->data_offset); 1018 rdev->data_offset = le64_to_cpu(sb->data_offset);
1019 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
971 1020
972 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1021 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
973 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1022 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
@@ -1006,6 +1055,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1006 rdev->size = le64_to_cpu(sb->data_size)/2; 1055 rdev->size = le64_to_cpu(sb->data_size)/2;
1007 if (le32_to_cpu(sb->chunksize)) 1056 if (le32_to_cpu(sb->chunksize))
1008 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1057 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1058
1059 if (le32_to_cpu(sb->size) > rdev->size*2)
1060 return -EINVAL;
1009 return 0; 1061 return 0;
1010} 1062}
1011 1063
@@ -1023,6 +1075,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1023 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1075 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1024 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1076 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1025 mddev->level = le32_to_cpu(sb->level); 1077 mddev->level = le32_to_cpu(sb->level);
1078 mddev->clevel[0] = 0;
1026 mddev->layout = le32_to_cpu(sb->layout); 1079 mddev->layout = le32_to_cpu(sb->layout);
1027 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1080 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1028 mddev->size = le64_to_cpu(sb->size)/2; 1081 mddev->size = le64_to_cpu(sb->size)/2;
@@ -1037,8 +1090,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1037 1090
1038 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1091 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1039 mddev->bitmap_file == NULL ) { 1092 mddev->bitmap_file == NULL ) {
1040 if (mddev->level != 1) { 1093 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1041 printk(KERN_WARNING "md: bitmaps only supported for raid1\n"); 1094 && mddev->level != 10) {
1095 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1042 return -EINVAL; 1096 return -EINVAL;
1043 } 1097 }
1044 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1098 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
@@ -1105,6 +1159,8 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1105 else 1159 else
1106 sb->resync_offset = cpu_to_le64(0); 1160 sb->resync_offset = cpu_to_le64(0);
1107 1161
1162 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1163
1108 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1164 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1109 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1165 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1110 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1166 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -1187,6 +1243,14 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1187 MD_BUG(); 1243 MD_BUG();
1188 return -EINVAL; 1244 return -EINVAL;
1189 } 1245 }
1246 /* make sure rdev->size exceeds mddev->size */
1247 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1248 if (mddev->pers)
1249 /* Cannot change size, so fail */
1250 return -ENOSPC;
1251 else
1252 mddev->size = rdev->size;
1253 }
1190 same_pdev = match_dev_unit(mddev, rdev); 1254 same_pdev = match_dev_unit(mddev, rdev);
1191 if (same_pdev) 1255 if (same_pdev)
1192 printk(KERN_WARNING 1256 printk(KERN_WARNING
@@ -1496,6 +1560,26 @@ repeat:
1496 1560
1497} 1561}
1498 1562
1563/* words written to sysfs files may, or my not, be \n terminated.
1564 * We want to accept with case. For this we use cmd_match.
1565 */
1566static int cmd_match(const char *cmd, const char *str)
1567{
1568 /* See if cmd, written into a sysfs file, matches
1569 * str. They must either be the same, or cmd can
1570 * have a trailing newline
1571 */
1572 while (*cmd && *str && *cmd == *str) {
1573 cmd++;
1574 str++;
1575 }
1576 if (*cmd == '\n')
1577 cmd++;
1578 if (*str || *cmd)
1579 return 0;
1580 return 1;
1581}
1582
1499struct rdev_sysfs_entry { 1583struct rdev_sysfs_entry {
1500 struct attribute attr; 1584 struct attribute attr;
1501 ssize_t (*show)(mdk_rdev_t *, char *); 1585 ssize_t (*show)(mdk_rdev_t *, char *);
@@ -1538,9 +1622,113 @@ super_show(mdk_rdev_t *rdev, char *page)
1538} 1622}
1539static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); 1623static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1540 1624
1625static ssize_t
1626errors_show(mdk_rdev_t *rdev, char *page)
1627{
1628 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1629}
1630
1631static ssize_t
1632errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1633{
1634 char *e;
1635 unsigned long n = simple_strtoul(buf, &e, 10);
1636 if (*buf && (*e == 0 || *e == '\n')) {
1637 atomic_set(&rdev->corrected_errors, n);
1638 return len;
1639 }
1640 return -EINVAL;
1641}
1642static struct rdev_sysfs_entry rdev_errors =
1643__ATTR(errors, 0644, errors_show, errors_store);
1644
1645static ssize_t
1646slot_show(mdk_rdev_t *rdev, char *page)
1647{
1648 if (rdev->raid_disk < 0)
1649 return sprintf(page, "none\n");
1650 else
1651 return sprintf(page, "%d\n", rdev->raid_disk);
1652}
1653
1654static ssize_t
1655slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1656{
1657 char *e;
1658 int slot = simple_strtoul(buf, &e, 10);
1659 if (strncmp(buf, "none", 4)==0)
1660 slot = -1;
1661 else if (e==buf || (*e && *e!= '\n'))
1662 return -EINVAL;
1663 if (rdev->mddev->pers)
1664 /* Cannot set slot in active array (yet) */
1665 return -EBUSY;
1666 if (slot >= rdev->mddev->raid_disks)
1667 return -ENOSPC;
1668 rdev->raid_disk = slot;
1669 /* assume it is working */
1670 rdev->flags = 0;
1671 set_bit(In_sync, &rdev->flags);
1672 return len;
1673}
1674
1675
1676static struct rdev_sysfs_entry rdev_slot =
1677__ATTR(slot, 0644, slot_show, slot_store);
1678
1679static ssize_t
1680offset_show(mdk_rdev_t *rdev, char *page)
1681{
1682 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1683}
1684
1685static ssize_t
1686offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1687{
1688 char *e;
1689 unsigned long long offset = simple_strtoull(buf, &e, 10);
1690 if (e==buf || (*e && *e != '\n'))
1691 return -EINVAL;
1692 if (rdev->mddev->pers)
1693 return -EBUSY;
1694 rdev->data_offset = offset;
1695 return len;
1696}
1697
1698static struct rdev_sysfs_entry rdev_offset =
1699__ATTR(offset, 0644, offset_show, offset_store);
1700
1701static ssize_t
1702rdev_size_show(mdk_rdev_t *rdev, char *page)
1703{
1704 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1705}
1706
1707static ssize_t
1708rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1709{
1710 char *e;
1711 unsigned long long size = simple_strtoull(buf, &e, 10);
1712 if (e==buf || (*e && *e != '\n'))
1713 return -EINVAL;
1714 if (rdev->mddev->pers)
1715 return -EBUSY;
1716 rdev->size = size;
1717 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1718 rdev->mddev->size = size;
1719 return len;
1720}
1721
1722static struct rdev_sysfs_entry rdev_size =
1723__ATTR(size, 0644, rdev_size_show, rdev_size_store);
1724
1541static struct attribute *rdev_default_attrs[] = { 1725static struct attribute *rdev_default_attrs[] = {
1542 &rdev_state.attr, 1726 &rdev_state.attr,
1543 &rdev_super.attr, 1727 &rdev_super.attr,
1728 &rdev_errors.attr,
1729 &rdev_slot.attr,
1730 &rdev_offset.attr,
1731 &rdev_size.attr,
1544 NULL, 1732 NULL,
1545}; 1733};
1546static ssize_t 1734static ssize_t
@@ -1598,12 +1786,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
1598 mdk_rdev_t *rdev; 1786 mdk_rdev_t *rdev;
1599 sector_t size; 1787 sector_t size;
1600 1788
1601 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL); 1789 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1602 if (!rdev) { 1790 if (!rdev) {
1603 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 1791 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1604 return ERR_PTR(-ENOMEM); 1792 return ERR_PTR(-ENOMEM);
1605 } 1793 }
1606 memset(rdev, 0, sizeof(*rdev));
1607 1794
1608 if ((err = alloc_disk_sb(rdev))) 1795 if ((err = alloc_disk_sb(rdev)))
1609 goto abort_free; 1796 goto abort_free;
@@ -1621,6 +1808,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
1621 rdev->data_offset = 0; 1808 rdev->data_offset = 0;
1622 atomic_set(&rdev->nr_pending, 0); 1809 atomic_set(&rdev->nr_pending, 0);
1623 atomic_set(&rdev->read_errors, 0); 1810 atomic_set(&rdev->read_errors, 0);
1811 atomic_set(&rdev->corrected_errors, 0);
1624 1812
1625 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 1813 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1626 if (!size) { 1814 if (!size) {
@@ -1725,16 +1913,37 @@ static void analyze_sbs(mddev_t * mddev)
1725static ssize_t 1913static ssize_t
1726level_show(mddev_t *mddev, char *page) 1914level_show(mddev_t *mddev, char *page)
1727{ 1915{
1728 mdk_personality_t *p = mddev->pers; 1916 struct mdk_personality *p = mddev->pers;
1729 if (p == NULL && mddev->raid_disks == 0) 1917 if (p)
1730 return 0;
1731 if (mddev->level >= 0)
1732 return sprintf(page, "raid%d\n", mddev->level);
1733 else
1734 return sprintf(page, "%s\n", p->name); 1918 return sprintf(page, "%s\n", p->name);
1919 else if (mddev->clevel[0])
1920 return sprintf(page, "%s\n", mddev->clevel);
1921 else if (mddev->level != LEVEL_NONE)
1922 return sprintf(page, "%d\n", mddev->level);
1923 else
1924 return 0;
1925}
1926
1927static ssize_t
1928level_store(mddev_t *mddev, const char *buf, size_t len)
1929{
1930 int rv = len;
1931 if (mddev->pers)
1932 return -EBUSY;
1933 if (len == 0)
1934 return 0;
1935 if (len >= sizeof(mddev->clevel))
1936 return -ENOSPC;
1937 strncpy(mddev->clevel, buf, len);
1938 if (mddev->clevel[len-1] == '\n')
1939 len--;
1940 mddev->clevel[len] = 0;
1941 mddev->level = LEVEL_NONE;
1942 return rv;
1735} 1943}
1736 1944
1737static struct md_sysfs_entry md_level = __ATTR_RO(level); 1945static struct md_sysfs_entry md_level =
1946__ATTR(level, 0644, level_show, level_store);
1738 1947
1739static ssize_t 1948static ssize_t
1740raid_disks_show(mddev_t *mddev, char *page) 1949raid_disks_show(mddev_t *mddev, char *page)
@@ -1744,7 +1953,197 @@ raid_disks_show(mddev_t *mddev, char *page)
1744 return sprintf(page, "%d\n", mddev->raid_disks); 1953 return sprintf(page, "%d\n", mddev->raid_disks);
1745} 1954}
1746 1955
1747static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks); 1956static int update_raid_disks(mddev_t *mddev, int raid_disks);
1957
1958static ssize_t
1959raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
1960{
1961 /* can only set raid_disks if array is not yet active */
1962 char *e;
1963 int rv = 0;
1964 unsigned long n = simple_strtoul(buf, &e, 10);
1965
1966 if (!*buf || (*e && *e != '\n'))
1967 return -EINVAL;
1968
1969 if (mddev->pers)
1970 rv = update_raid_disks(mddev, n);
1971 else
1972 mddev->raid_disks = n;
1973 return rv ? rv : len;
1974}
1975static struct md_sysfs_entry md_raid_disks =
1976__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
1977
1978static ssize_t
1979chunk_size_show(mddev_t *mddev, char *page)
1980{
1981 return sprintf(page, "%d\n", mddev->chunk_size);
1982}
1983
1984static ssize_t
1985chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
1986{
1987 /* can only set chunk_size if array is not yet active */
1988 char *e;
1989 unsigned long n = simple_strtoul(buf, &e, 10);
1990
1991 if (mddev->pers)
1992 return -EBUSY;
1993 if (!*buf || (*e && *e != '\n'))
1994 return -EINVAL;
1995
1996 mddev->chunk_size = n;
1997 return len;
1998}
1999static struct md_sysfs_entry md_chunk_size =
2000__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
2001
2002static ssize_t
2003null_show(mddev_t *mddev, char *page)
2004{
2005 return -EINVAL;
2006}
2007
2008static ssize_t
2009new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2010{
2011 /* buf must be %d:%d\n? giving major and minor numbers */
2012 /* The new device is added to the array.
2013 * If the array has a persistent superblock, we read the
2014 * superblock to initialise info and check validity.
2015 * Otherwise, only checking done is that in bind_rdev_to_array,
2016 * which mainly checks size.
2017 */
2018 char *e;
2019 int major = simple_strtoul(buf, &e, 10);
2020 int minor;
2021 dev_t dev;
2022 mdk_rdev_t *rdev;
2023 int err;
2024
2025 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2026 return -EINVAL;
2027 minor = simple_strtoul(e+1, &e, 10);
2028 if (*e && *e != '\n')
2029 return -EINVAL;
2030 dev = MKDEV(major, minor);
2031 if (major != MAJOR(dev) ||
2032 minor != MINOR(dev))
2033 return -EOVERFLOW;
2034
2035
2036 if (mddev->persistent) {
2037 rdev = md_import_device(dev, mddev->major_version,
2038 mddev->minor_version);
2039 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2040 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2041 mdk_rdev_t, same_set);
2042 err = super_types[mddev->major_version]
2043 .load_super(rdev, rdev0, mddev->minor_version);
2044 if (err < 0)
2045 goto out;
2046 }
2047 } else
2048 rdev = md_import_device(dev, -1, -1);
2049
2050 if (IS_ERR(rdev))
2051 return PTR_ERR(rdev);
2052 err = bind_rdev_to_array(rdev, mddev);
2053 out:
2054 if (err)
2055 export_rdev(rdev);
2056 return err ? err : len;
2057}
2058
2059static struct md_sysfs_entry md_new_device =
2060__ATTR(new_dev, 0200, null_show, new_dev_store);
2061
2062static ssize_t
2063size_show(mddev_t *mddev, char *page)
2064{
2065 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2066}
2067
2068static int update_size(mddev_t *mddev, unsigned long size);
2069
2070static ssize_t
2071size_store(mddev_t *mddev, const char *buf, size_t len)
2072{
2073 /* If array is inactive, we can reduce the component size, but
2074 * not increase it (except from 0).
2075 * If array is active, we can try an on-line resize
2076 */
2077 char *e;
2078 int err = 0;
2079 unsigned long long size = simple_strtoull(buf, &e, 10);
2080 if (!*buf || *buf == '\n' ||
2081 (*e && *e != '\n'))
2082 return -EINVAL;
2083
2084 if (mddev->pers) {
2085 err = update_size(mddev, size);
2086 md_update_sb(mddev);
2087 } else {
2088 if (mddev->size == 0 ||
2089 mddev->size > size)
2090 mddev->size = size;
2091 else
2092 err = -ENOSPC;
2093 }
2094 return err ? err : len;
2095}
2096
2097static struct md_sysfs_entry md_size =
2098__ATTR(component_size, 0644, size_show, size_store);
2099
2100
2101/* Metdata version.
2102 * This is either 'none' for arrays with externally managed metadata,
2103 * or N.M for internally known formats
2104 */
2105static ssize_t
2106metadata_show(mddev_t *mddev, char *page)
2107{
2108 if (mddev->persistent)
2109 return sprintf(page, "%d.%d\n",
2110 mddev->major_version, mddev->minor_version);
2111 else
2112 return sprintf(page, "none\n");
2113}
2114
2115static ssize_t
2116metadata_store(mddev_t *mddev, const char *buf, size_t len)
2117{
2118 int major, minor;
2119 char *e;
2120 if (!list_empty(&mddev->disks))
2121 return -EBUSY;
2122
2123 if (cmd_match(buf, "none")) {
2124 mddev->persistent = 0;
2125 mddev->major_version = 0;
2126 mddev->minor_version = 90;
2127 return len;
2128 }
2129 major = simple_strtoul(buf, &e, 10);
2130 if (e==buf || *e != '.')
2131 return -EINVAL;
2132 buf = e+1;
2133 minor = simple_strtoul(buf, &e, 10);
2134 if (e==buf || *e != '\n')
2135 return -EINVAL;
2136 if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2137 super_types[major].name == NULL)
2138 return -ENOENT;
2139 mddev->major_version = major;
2140 mddev->minor_version = minor;
2141 mddev->persistent = 1;
2142 return len;
2143}
2144
2145static struct md_sysfs_entry md_metadata =
2146__ATTR(metadata_version, 0644, metadata_show, metadata_store);
1748 2147
1749static ssize_t 2148static ssize_t
1750action_show(mddev_t *mddev, char *page) 2149action_show(mddev_t *mddev, char *page)
@@ -1771,31 +2170,27 @@ action_store(mddev_t *mddev, const char *page, size_t len)
1771 if (!mddev->pers || !mddev->pers->sync_request) 2170 if (!mddev->pers || !mddev->pers->sync_request)
1772 return -EINVAL; 2171 return -EINVAL;
1773 2172
1774 if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) { 2173 if (cmd_match(page, "idle")) {
1775 if (mddev->sync_thread) { 2174 if (mddev->sync_thread) {
1776 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2175 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1777 md_unregister_thread(mddev->sync_thread); 2176 md_unregister_thread(mddev->sync_thread);
1778 mddev->sync_thread = NULL; 2177 mddev->sync_thread = NULL;
1779 mddev->recovery = 0; 2178 mddev->recovery = 0;
1780 } 2179 }
1781 return len; 2180 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1782 } 2181 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1783
1784 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1785 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1786 return -EBUSY; 2182 return -EBUSY;
1787 if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 || 2183 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
1788 strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0)
1789 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2184 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1790 else { 2185 else {
1791 if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0) 2186 if (cmd_match(page, "check"))
1792 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2187 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1793 else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0) 2188 else if (cmd_match(page, "repair"))
1794 return -EINVAL; 2189 return -EINVAL;
1795 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2190 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1796 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2191 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1797 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1798 } 2192 }
2193 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1799 md_wakeup_thread(mddev->thread); 2194 md_wakeup_thread(mddev->thread);
1800 return len; 2195 return len;
1801} 2196}
@@ -1814,15 +2209,107 @@ md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
1814static struct md_sysfs_entry 2209static struct md_sysfs_entry
1815md_mismatches = __ATTR_RO(mismatch_cnt); 2210md_mismatches = __ATTR_RO(mismatch_cnt);
1816 2211
2212static ssize_t
2213sync_min_show(mddev_t *mddev, char *page)
2214{
2215 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2216 mddev->sync_speed_min ? "local": "system");
2217}
2218
2219static ssize_t
2220sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2221{
2222 int min;
2223 char *e;
2224 if (strncmp(buf, "system", 6)==0) {
2225 mddev->sync_speed_min = 0;
2226 return len;
2227 }
2228 min = simple_strtoul(buf, &e, 10);
2229 if (buf == e || (*e && *e != '\n') || min <= 0)
2230 return -EINVAL;
2231 mddev->sync_speed_min = min;
2232 return len;
2233}
2234
2235static struct md_sysfs_entry md_sync_min =
2236__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2237
2238static ssize_t
2239sync_max_show(mddev_t *mddev, char *page)
2240{
2241 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2242 mddev->sync_speed_max ? "local": "system");
2243}
2244
2245static ssize_t
2246sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2247{
2248 int max;
2249 char *e;
2250 if (strncmp(buf, "system", 6)==0) {
2251 mddev->sync_speed_max = 0;
2252 return len;
2253 }
2254 max = simple_strtoul(buf, &e, 10);
2255 if (buf == e || (*e && *e != '\n') || max <= 0)
2256 return -EINVAL;
2257 mddev->sync_speed_max = max;
2258 return len;
2259}
2260
2261static struct md_sysfs_entry md_sync_max =
2262__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2263
2264
2265static ssize_t
2266sync_speed_show(mddev_t *mddev, char *page)
2267{
2268 unsigned long resync, dt, db;
2269 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2270 dt = ((jiffies - mddev->resync_mark) / HZ);
2271 if (!dt) dt++;
2272 db = resync - (mddev->resync_mark_cnt);
2273 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2274}
2275
2276static struct md_sysfs_entry
2277md_sync_speed = __ATTR_RO(sync_speed);
2278
2279static ssize_t
2280sync_completed_show(mddev_t *mddev, char *page)
2281{
2282 unsigned long max_blocks, resync;
2283
2284 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2285 max_blocks = mddev->resync_max_sectors;
2286 else
2287 max_blocks = mddev->size << 1;
2288
2289 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2290 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2291}
2292
2293static struct md_sysfs_entry
2294md_sync_completed = __ATTR_RO(sync_completed);
2295
1817static struct attribute *md_default_attrs[] = { 2296static struct attribute *md_default_attrs[] = {
1818 &md_level.attr, 2297 &md_level.attr,
1819 &md_raid_disks.attr, 2298 &md_raid_disks.attr,
2299 &md_chunk_size.attr,
2300 &md_size.attr,
2301 &md_metadata.attr,
2302 &md_new_device.attr,
1820 NULL, 2303 NULL,
1821}; 2304};
1822 2305
1823static struct attribute *md_redundancy_attrs[] = { 2306static struct attribute *md_redundancy_attrs[] = {
1824 &md_scan_mode.attr, 2307 &md_scan_mode.attr,
1825 &md_mismatches.attr, 2308 &md_mismatches.attr,
2309 &md_sync_min.attr,
2310 &md_sync_max.attr,
2311 &md_sync_speed.attr,
2312 &md_sync_completed.attr,
1826 NULL, 2313 NULL,
1827}; 2314};
1828static struct attribute_group md_redundancy_group = { 2315static struct attribute_group md_redundancy_group = {
@@ -1937,14 +2424,16 @@ static void md_safemode_timeout(unsigned long data)
1937 md_wakeup_thread(mddev->thread); 2424 md_wakeup_thread(mddev->thread);
1938} 2425}
1939 2426
2427static int start_dirty_degraded;
1940 2428
1941static int do_md_run(mddev_t * mddev) 2429static int do_md_run(mddev_t * mddev)
1942{ 2430{
1943 int pnum, err; 2431 int err;
1944 int chunk_size; 2432 int chunk_size;
1945 struct list_head *tmp; 2433 struct list_head *tmp;
1946 mdk_rdev_t *rdev; 2434 mdk_rdev_t *rdev;
1947 struct gendisk *disk; 2435 struct gendisk *disk;
2436 struct mdk_personality *pers;
1948 char b[BDEVNAME_SIZE]; 2437 char b[BDEVNAME_SIZE];
1949 2438
1950 if (list_empty(&mddev->disks)) 2439 if (list_empty(&mddev->disks))
@@ -1961,20 +2450,8 @@ static int do_md_run(mddev_t * mddev)
1961 analyze_sbs(mddev); 2450 analyze_sbs(mddev);
1962 2451
1963 chunk_size = mddev->chunk_size; 2452 chunk_size = mddev->chunk_size;
1964 pnum = level_to_pers(mddev->level);
1965 2453
1966 if ((pnum != MULTIPATH) && (pnum != RAID1)) { 2454 if (chunk_size) {
1967 if (!chunk_size) {
1968 /*
1969 * 'default chunksize' in the old md code used to
1970 * be PAGE_SIZE, baaad.
1971 * we abort here to be on the safe side. We don't
1972 * want to continue the bad practice.
1973 */
1974 printk(KERN_ERR
1975 "no chunksize specified, see 'man raidtab'\n");
1976 return -EINVAL;
1977 }
1978 if (chunk_size > MAX_CHUNK_SIZE) { 2455 if (chunk_size > MAX_CHUNK_SIZE) {
1979 printk(KERN_ERR "too big chunk_size: %d > %d\n", 2456 printk(KERN_ERR "too big chunk_size: %d > %d\n",
1980 chunk_size, MAX_CHUNK_SIZE); 2457 chunk_size, MAX_CHUNK_SIZE);
@@ -2010,10 +2487,10 @@ static int do_md_run(mddev_t * mddev)
2010 } 2487 }
2011 2488
2012#ifdef CONFIG_KMOD 2489#ifdef CONFIG_KMOD
2013 if (!pers[pnum]) 2490 if (mddev->level != LEVEL_NONE)
2014 { 2491 request_module("md-level-%d", mddev->level);
2015 request_module("md-personality-%d", pnum); 2492 else if (mddev->clevel[0])
2016 } 2493 request_module("md-%s", mddev->clevel);
2017#endif 2494#endif
2018 2495
2019 /* 2496 /*
@@ -2035,30 +2512,39 @@ static int do_md_run(mddev_t * mddev)
2035 return -ENOMEM; 2512 return -ENOMEM;
2036 2513
2037 spin_lock(&pers_lock); 2514 spin_lock(&pers_lock);
2038 if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) { 2515 pers = find_pers(mddev->level, mddev->clevel);
2516 if (!pers || !try_module_get(pers->owner)) {
2039 spin_unlock(&pers_lock); 2517 spin_unlock(&pers_lock);
2040 printk(KERN_WARNING "md: personality %d is not loaded!\n", 2518 if (mddev->level != LEVEL_NONE)
2041 pnum); 2519 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
2520 mddev->level);
2521 else
2522 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
2523 mddev->clevel);
2042 return -EINVAL; 2524 return -EINVAL;
2043 } 2525 }
2044 2526 mddev->pers = pers;
2045 mddev->pers = pers[pnum];
2046 spin_unlock(&pers_lock); 2527 spin_unlock(&pers_lock);
2528 mddev->level = pers->level;
2529 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2047 2530
2048 mddev->recovery = 0; 2531 mddev->recovery = 0;
2049 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2532 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
2050 mddev->barriers_work = 1; 2533 mddev->barriers_work = 1;
2534 mddev->ok_start_degraded = start_dirty_degraded;
2051 2535
2052 if (start_readonly) 2536 if (start_readonly)
2053 mddev->ro = 2; /* read-only, but switch on first write */ 2537 mddev->ro = 2; /* read-only, but switch on first write */
2054 2538
2055 /* before we start the array running, initialise the bitmap */ 2539 err = mddev->pers->run(mddev);
2056 err = bitmap_create(mddev); 2540 if (!err && mddev->pers->sync_request) {
2057 if (err) 2541 err = bitmap_create(mddev);
2058 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 2542 if (err) {
2059 mdname(mddev), err); 2543 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
2060 else 2544 mdname(mddev), err);
2061 err = mddev->pers->run(mddev); 2545 mddev->pers->stop(mddev);
2546 }
2547 }
2062 if (err) { 2548 if (err) {
2063 printk(KERN_ERR "md: pers->run() failed ...\n"); 2549 printk(KERN_ERR "md: pers->run() failed ...\n");
2064 module_put(mddev->pers->owner); 2550 module_put(mddev->pers->owner);
@@ -2104,6 +2590,7 @@ static int do_md_run(mddev_t * mddev)
2104 mddev->queue->make_request_fn = mddev->pers->make_request; 2590 mddev->queue->make_request_fn = mddev->pers->make_request;
2105 2591
2106 mddev->changed = 1; 2592 mddev->changed = 1;
2593 md_new_event(mddev);
2107 return 0; 2594 return 0;
2108} 2595}
2109 2596
@@ -2231,6 +2718,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
2231 printk(KERN_INFO "md: %s switched to read-only mode.\n", 2718 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2232 mdname(mddev)); 2719 mdname(mddev));
2233 err = 0; 2720 err = 0;
2721 md_new_event(mddev);
2234out: 2722out:
2235 return err; 2723 return err;
2236} 2724}
@@ -2668,12 +3156,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2668 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3156 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2669 set_bit(WriteMostly, &rdev->flags); 3157 set_bit(WriteMostly, &rdev->flags);
2670 3158
2671 err = bind_rdev_to_array(rdev, mddev);
2672 if (err) {
2673 export_rdev(rdev);
2674 return err;
2675 }
2676
2677 if (!mddev->persistent) { 3159 if (!mddev->persistent) {
2678 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 3160 printk(KERN_INFO "md: nonpersistent superblock ...\n");
2679 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3161 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
@@ -2681,8 +3163,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2681 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3163 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2682 rdev->size = calc_dev_size(rdev, mddev->chunk_size); 3164 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2683 3165
2684 if (!mddev->size || (mddev->size > rdev->size)) 3166 err = bind_rdev_to_array(rdev, mddev);
2685 mddev->size = rdev->size; 3167 if (err) {
3168 export_rdev(rdev);
3169 return err;
3170 }
2686 } 3171 }
2687 3172
2688 return 0; 3173 return 0;
@@ -2705,6 +3190,7 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2705 3190
2706 kick_rdev_from_array(rdev); 3191 kick_rdev_from_array(rdev);
2707 md_update_sb(mddev); 3192 md_update_sb(mddev);
3193 md_new_event(mddev);
2708 3194
2709 return 0; 3195 return 0;
2710busy: 3196busy:
@@ -2753,15 +3239,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
2753 size = calc_dev_size(rdev, mddev->chunk_size); 3239 size = calc_dev_size(rdev, mddev->chunk_size);
2754 rdev->size = size; 3240 rdev->size = size;
2755 3241
2756 if (size < mddev->size) {
2757 printk(KERN_WARNING
2758 "%s: disk size %llu blocks < array size %llu\n",
2759 mdname(mddev), (unsigned long long)size,
2760 (unsigned long long)mddev->size);
2761 err = -ENOSPC;
2762 goto abort_export;
2763 }
2764
2765 if (test_bit(Faulty, &rdev->flags)) { 3242 if (test_bit(Faulty, &rdev->flags)) {
2766 printk(KERN_WARNING 3243 printk(KERN_WARNING
2767 "md: can not hot-add faulty %s disk to %s!\n", 3244 "md: can not hot-add faulty %s disk to %s!\n",
@@ -2771,7 +3248,9 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
2771 } 3248 }
2772 clear_bit(In_sync, &rdev->flags); 3249 clear_bit(In_sync, &rdev->flags);
2773 rdev->desc_nr = -1; 3250 rdev->desc_nr = -1;
2774 bind_rdev_to_array(rdev, mddev); 3251 err = bind_rdev_to_array(rdev, mddev);
3252 if (err)
3253 goto abort_export;
2775 3254
2776 /* 3255 /*
2777 * The rest should better be atomic, we can have disk failures 3256 * The rest should better be atomic, we can have disk failures
@@ -2795,7 +3274,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
2795 */ 3274 */
2796 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3275 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2797 md_wakeup_thread(mddev->thread); 3276 md_wakeup_thread(mddev->thread);
2798 3277 md_new_event(mddev);
2799 return 0; 3278 return 0;
2800 3279
2801abort_unbind_export: 3280abort_unbind_export:
@@ -2942,6 +3421,81 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2942 return 0; 3421 return 0;
2943} 3422}
2944 3423
3424static int update_size(mddev_t *mddev, unsigned long size)
3425{
3426 mdk_rdev_t * rdev;
3427 int rv;
3428 struct list_head *tmp;
3429
3430 if (mddev->pers->resize == NULL)
3431 return -EINVAL;
3432 /* The "size" is the amount of each device that is used.
3433 * This can only make sense for arrays with redundancy.
3434 * linear and raid0 always use whatever space is available
3435 * We can only consider changing the size if no resync
3436 * or reconstruction is happening, and if the new size
3437 * is acceptable. It must fit before the sb_offset or,
3438 * if that is <data_offset, it must fit before the
3439 * size of each device.
3440 * If size is zero, we find the largest size that fits.
3441 */
3442 if (mddev->sync_thread)
3443 return -EBUSY;
3444 ITERATE_RDEV(mddev,rdev,tmp) {
3445 sector_t avail;
3446 int fit = (size == 0);
3447 if (rdev->sb_offset > rdev->data_offset)
3448 avail = (rdev->sb_offset*2) - rdev->data_offset;
3449 else
3450 avail = get_capacity(rdev->bdev->bd_disk)
3451 - rdev->data_offset;
3452 if (fit && (size == 0 || size > avail/2))
3453 size = avail/2;
3454 if (avail < ((sector_t)size << 1))
3455 return -ENOSPC;
3456 }
3457 rv = mddev->pers->resize(mddev, (sector_t)size *2);
3458 if (!rv) {
3459 struct block_device *bdev;
3460
3461 bdev = bdget_disk(mddev->gendisk, 0);
3462 if (bdev) {
3463 down(&bdev->bd_inode->i_sem);
3464 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3465 up(&bdev->bd_inode->i_sem);
3466 bdput(bdev);
3467 }
3468 }
3469 return rv;
3470}
3471
3472static int update_raid_disks(mddev_t *mddev, int raid_disks)
3473{
3474 int rv;
3475 /* change the number of raid disks */
3476 if (mddev->pers->reshape == NULL)
3477 return -EINVAL;
3478 if (raid_disks <= 0 ||
3479 raid_disks >= mddev->max_disks)
3480 return -EINVAL;
3481 if (mddev->sync_thread)
3482 return -EBUSY;
3483 rv = mddev->pers->reshape(mddev, raid_disks);
3484 if (!rv) {
3485 struct block_device *bdev;
3486
3487 bdev = bdget_disk(mddev->gendisk, 0);
3488 if (bdev) {
3489 down(&bdev->bd_inode->i_sem);
3490 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3491 up(&bdev->bd_inode->i_sem);
3492 bdput(bdev);
3493 }
3494 }
3495 return rv;
3496}
3497
3498
2945/* 3499/*
2946 * update_array_info is used to change the configuration of an 3500 * update_array_info is used to change the configuration of an
2947 * on-line array. 3501 * on-line array.
@@ -2990,71 +3544,12 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2990 else 3544 else
2991 return mddev->pers->reconfig(mddev, info->layout, -1); 3545 return mddev->pers->reconfig(mddev, info->layout, -1);
2992 } 3546 }
2993 if (mddev->size != info->size) { 3547 if (mddev->size != info->size)
2994 mdk_rdev_t * rdev; 3548 rv = update_size(mddev, info->size);
2995 struct list_head *tmp; 3549
2996 if (mddev->pers->resize == NULL) 3550 if (mddev->raid_disks != info->raid_disks)
2997 return -EINVAL; 3551 rv = update_raid_disks(mddev, info->raid_disks);
2998 /* The "size" is the amount of each device that is used. 3552
2999 * This can only make sense for arrays with redundancy.
3000 * linear and raid0 always use whatever space is available
3001 * We can only consider changing the size if no resync
3002 * or reconstruction is happening, and if the new size
3003 * is acceptable. It must fit before the sb_offset or,
3004 * if that is <data_offset, it must fit before the
3005 * size of each device.
3006 * If size is zero, we find the largest size that fits.
3007 */
3008 if (mddev->sync_thread)
3009 return -EBUSY;
3010 ITERATE_RDEV(mddev,rdev,tmp) {
3011 sector_t avail;
3012 int fit = (info->size == 0);
3013 if (rdev->sb_offset > rdev->data_offset)
3014 avail = (rdev->sb_offset*2) - rdev->data_offset;
3015 else
3016 avail = get_capacity(rdev->bdev->bd_disk)
3017 - rdev->data_offset;
3018 if (fit && (info->size == 0 || info->size > avail/2))
3019 info->size = avail/2;
3020 if (avail < ((sector_t)info->size << 1))
3021 return -ENOSPC;
3022 }
3023 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
3024 if (!rv) {
3025 struct block_device *bdev;
3026
3027 bdev = bdget_disk(mddev->gendisk, 0);
3028 if (bdev) {
3029 down(&bdev->bd_inode->i_sem);
3030 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3031 up(&bdev->bd_inode->i_sem);
3032 bdput(bdev);
3033 }
3034 }
3035 }
3036 if (mddev->raid_disks != info->raid_disks) {
3037 /* change the number of raid disks */
3038 if (mddev->pers->reshape == NULL)
3039 return -EINVAL;
3040 if (info->raid_disks <= 0 ||
3041 info->raid_disks >= mddev->max_disks)
3042 return -EINVAL;
3043 if (mddev->sync_thread)
3044 return -EBUSY;
3045 rv = mddev->pers->reshape(mddev, info->raid_disks);
3046 if (!rv) {
3047 struct block_device *bdev;
3048
3049 bdev = bdget_disk(mddev->gendisk, 0);
3050 if (bdev) {
3051 down(&bdev->bd_inode->i_sem);
3052 i_size_write(bdev->bd_inode, mddev->array_size << 10);
3053 up(&bdev->bd_inode->i_sem);
3054 bdput(bdev);
3055 }
3056 }
3057 }
3058 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 3553 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
3059 if (mddev->pers->quiesce == NULL) 3554 if (mddev->pers->quiesce == NULL)
3060 return -EINVAL; 3555 return -EINVAL;
@@ -3476,11 +3971,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3476{ 3971{
3477 mdk_thread_t *thread; 3972 mdk_thread_t *thread;
3478 3973
3479 thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL); 3974 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
3480 if (!thread) 3975 if (!thread)
3481 return NULL; 3976 return NULL;
3482 3977
3483 memset(thread, 0, sizeof(mdk_thread_t));
3484 init_waitqueue_head(&thread->wqueue); 3978 init_waitqueue_head(&thread->wqueue);
3485 3979
3486 thread->run = run; 3980 thread->run = run;
@@ -3524,6 +4018,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3524 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4018 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3525 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4019 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3526 md_wakeup_thread(mddev->thread); 4020 md_wakeup_thread(mddev->thread);
4021 md_new_event(mddev);
3527} 4022}
3528 4023
3529/* seq_file implementation /proc/mdstat */ 4024/* seq_file implementation /proc/mdstat */
@@ -3664,24 +4159,29 @@ static void md_seq_stop(struct seq_file *seq, void *v)
3664 mddev_put(mddev); 4159 mddev_put(mddev);
3665} 4160}
3666 4161
4162struct mdstat_info {
4163 int event;
4164};
4165
3667static int md_seq_show(struct seq_file *seq, void *v) 4166static int md_seq_show(struct seq_file *seq, void *v)
3668{ 4167{
3669 mddev_t *mddev = v; 4168 mddev_t *mddev = v;
3670 sector_t size; 4169 sector_t size;
3671 struct list_head *tmp2; 4170 struct list_head *tmp2;
3672 mdk_rdev_t *rdev; 4171 mdk_rdev_t *rdev;
3673 int i; 4172 struct mdstat_info *mi = seq->private;
3674 struct bitmap *bitmap; 4173 struct bitmap *bitmap;
3675 4174
3676 if (v == (void*)1) { 4175 if (v == (void*)1) {
4176 struct mdk_personality *pers;
3677 seq_printf(seq, "Personalities : "); 4177 seq_printf(seq, "Personalities : ");
3678 spin_lock(&pers_lock); 4178 spin_lock(&pers_lock);
3679 for (i = 0; i < MAX_PERSONALITY; i++) 4179 list_for_each_entry(pers, &pers_list, list)
3680 if (pers[i]) 4180 seq_printf(seq, "[%s] ", pers->name);
3681 seq_printf(seq, "[%s] ", pers[i]->name);
3682 4181
3683 spin_unlock(&pers_lock); 4182 spin_unlock(&pers_lock);
3684 seq_printf(seq, "\n"); 4183 seq_printf(seq, "\n");
4184 mi->event = atomic_read(&md_event_count);
3685 return 0; 4185 return 0;
3686 } 4186 }
3687 if (v == (void*)2) { 4187 if (v == (void*)2) {
@@ -3790,47 +4290,68 @@ static struct seq_operations md_seq_ops = {
3790static int md_seq_open(struct inode *inode, struct file *file) 4290static int md_seq_open(struct inode *inode, struct file *file)
3791{ 4291{
3792 int error; 4292 int error;
4293 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4294 if (mi == NULL)
4295 return -ENOMEM;
3793 4296
3794 error = seq_open(file, &md_seq_ops); 4297 error = seq_open(file, &md_seq_ops);
4298 if (error)
4299 kfree(mi);
4300 else {
4301 struct seq_file *p = file->private_data;
4302 p->private = mi;
4303 mi->event = atomic_read(&md_event_count);
4304 }
3795 return error; 4305 return error;
3796} 4306}
3797 4307
4308static int md_seq_release(struct inode *inode, struct file *file)
4309{
4310 struct seq_file *m = file->private_data;
4311 struct mdstat_info *mi = m->private;
4312 m->private = NULL;
4313 kfree(mi);
4314 return seq_release(inode, file);
4315}
4316
4317static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4318{
4319 struct seq_file *m = filp->private_data;
4320 struct mdstat_info *mi = m->private;
4321 int mask;
4322
4323 poll_wait(filp, &md_event_waiters, wait);
4324
4325 /* always allow read */
4326 mask = POLLIN | POLLRDNORM;
4327
4328 if (mi->event != atomic_read(&md_event_count))
4329 mask |= POLLERR | POLLPRI;
4330 return mask;
4331}
4332
3798static struct file_operations md_seq_fops = { 4333static struct file_operations md_seq_fops = {
3799 .open = md_seq_open, 4334 .open = md_seq_open,
3800 .read = seq_read, 4335 .read = seq_read,
3801 .llseek = seq_lseek, 4336 .llseek = seq_lseek,
3802 .release = seq_release, 4337 .release = md_seq_release,
4338 .poll = mdstat_poll,
3803}; 4339};
3804 4340
3805int register_md_personality(int pnum, mdk_personality_t *p) 4341int register_md_personality(struct mdk_personality *p)
3806{ 4342{
3807 if (pnum >= MAX_PERSONALITY) {
3808 printk(KERN_ERR
3809 "md: tried to install personality %s as nr %d, but max is %lu\n",
3810 p->name, pnum, MAX_PERSONALITY-1);
3811 return -EINVAL;
3812 }
3813
3814 spin_lock(&pers_lock); 4343 spin_lock(&pers_lock);
3815 if (pers[pnum]) { 4344 list_add_tail(&p->list, &pers_list);
3816 spin_unlock(&pers_lock); 4345 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
3817 return -EBUSY;
3818 }
3819
3820 pers[pnum] = p;
3821 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3822 spin_unlock(&pers_lock); 4346 spin_unlock(&pers_lock);
3823 return 0; 4347 return 0;
3824} 4348}
3825 4349
3826int unregister_md_personality(int pnum) 4350int unregister_md_personality(struct mdk_personality *p)
3827{ 4351{
3828 if (pnum >= MAX_PERSONALITY) 4352 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
3829 return -EINVAL;
3830
3831 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3832 spin_lock(&pers_lock); 4353 spin_lock(&pers_lock);
3833 pers[pnum] = NULL; 4354 list_del_init(&p->list);
3834 spin_unlock(&pers_lock); 4355 spin_unlock(&pers_lock);
3835 return 0; 4356 return 0;
3836} 4357}
@@ -4012,10 +4533,10 @@ static void md_do_sync(mddev_t *mddev)
4012 4533
4013 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 4534 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
4014 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 4535 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
4015 " %d KB/sec/disc.\n", sysctl_speed_limit_min); 4536 " %d KB/sec/disc.\n", speed_min(mddev));
4016 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 4537 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
4017 "(but not more than %d KB/sec) for reconstruction.\n", 4538 "(but not more than %d KB/sec) for reconstruction.\n",
4018 sysctl_speed_limit_max); 4539 speed_max(mddev));
4019 4540
4020 is_mddev_idle(mddev); /* this also initializes IO event counters */ 4541 is_mddev_idle(mddev); /* this also initializes IO event counters */
4021 /* we don't use the checkpoint if there's a bitmap */ 4542 /* we don't use the checkpoint if there's a bitmap */
@@ -4056,7 +4577,7 @@ static void md_do_sync(mddev_t *mddev)
4056 4577
4057 skipped = 0; 4578 skipped = 0;
4058 sectors = mddev->pers->sync_request(mddev, j, &skipped, 4579 sectors = mddev->pers->sync_request(mddev, j, &skipped,
4059 currspeed < sysctl_speed_limit_min); 4580 currspeed < speed_min(mddev));
4060 if (sectors == 0) { 4581 if (sectors == 0) {
4061 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4582 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4062 goto out; 4583 goto out;
@@ -4069,7 +4590,11 @@ static void md_do_sync(mddev_t *mddev)
4069 4590
4070 j += sectors; 4591 j += sectors;
4071 if (j>1) mddev->curr_resync = j; 4592 if (j>1) mddev->curr_resync = j;
4072 4593 if (last_check == 0)
4594 /* this is the earliers that rebuilt will be
4595 * visible in /proc/mdstat
4596 */
4597 md_new_event(mddev);
4073 4598
4074 if (last_check + window > io_sectors || j == max_sectors) 4599 if (last_check + window > io_sectors || j == max_sectors)
4075 continue; 4600 continue;
@@ -4117,8 +4642,8 @@ static void md_do_sync(mddev_t *mddev)
4117 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 4642 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
4118 /((jiffies-mddev->resync_mark)/HZ +1) +1; 4643 /((jiffies-mddev->resync_mark)/HZ +1) +1;
4119 4644
4120 if (currspeed > sysctl_speed_limit_min) { 4645 if (currspeed > speed_min(mddev)) {
4121 if ((currspeed > sysctl_speed_limit_max) || 4646 if ((currspeed > speed_max(mddev)) ||
4122 !is_mddev_idle(mddev)) { 4647 !is_mddev_idle(mddev)) {
4123 msleep(500); 4648 msleep(500);
4124 goto repeat; 4649 goto repeat;
@@ -4255,6 +4780,7 @@ void md_check_recovery(mddev_t *mddev)
4255 mddev->recovery = 0; 4780 mddev->recovery = 0;
4256 /* flag recovery needed just to double check */ 4781 /* flag recovery needed just to double check */
4257 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4782 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4783 md_new_event(mddev);
4258 goto unlock; 4784 goto unlock;
4259 } 4785 }
4260 /* Clear some bits that don't mean anything, but 4786 /* Clear some bits that don't mean anything, but
@@ -4292,6 +4818,7 @@ void md_check_recovery(mddev_t *mddev)
4292 sprintf(nm, "rd%d", rdev->raid_disk); 4818 sprintf(nm, "rd%d", rdev->raid_disk);
4293 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 4819 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
4294 spares++; 4820 spares++;
4821 md_new_event(mddev);
4295 } else 4822 } else
4296 break; 4823 break;
4297 } 4824 }
@@ -4324,9 +4851,9 @@ void md_check_recovery(mddev_t *mddev)
4324 mdname(mddev)); 4851 mdname(mddev));
4325 /* leave the spares where they are, it shouldn't hurt */ 4852 /* leave the spares where they are, it shouldn't hurt */
4326 mddev->recovery = 0; 4853 mddev->recovery = 0;
4327 } else { 4854 } else
4328 md_wakeup_thread(mddev->sync_thread); 4855 md_wakeup_thread(mddev->sync_thread);
4329 } 4856 md_new_event(mddev);
4330 } 4857 }
4331 unlock: 4858 unlock:
4332 mddev_unlock(mddev); 4859 mddev_unlock(mddev);
@@ -4503,12 +5030,14 @@ static int set_ro(const char *val, struct kernel_param *kp)
4503 int num = simple_strtoul(val, &e, 10); 5030 int num = simple_strtoul(val, &e, 10);
4504 if (*val && (*e == '\0' || *e == '\n')) { 5031 if (*val && (*e == '\0' || *e == '\n')) {
4505 start_readonly = num; 5032 start_readonly = num;
4506 return 0;; 5033 return 0;
4507 } 5034 }
4508 return -EINVAL; 5035 return -EINVAL;
4509} 5036}
4510 5037
4511module_param_call(start_ro, set_ro, get_ro, NULL, 0600); 5038module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
5039module_param(start_dirty_degraded, int, 0644);
5040
4512 5041
4513EXPORT_SYMBOL(register_md_personality); 5042EXPORT_SYMBOL(register_md_personality);
4514EXPORT_SYMBOL(unregister_md_personality); 5043EXPORT_SYMBOL(unregister_md_personality);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 145cdc5ad008..e6aa309a66d7 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -35,15 +35,10 @@
35#define NR_RESERVED_BUFS 32 35#define NR_RESERVED_BUFS 32
36 36
37 37
38static mdk_personality_t multipath_personality;
39
40
41static void *mp_pool_alloc(gfp_t gfp_flags, void *data) 38static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
42{ 39{
43 struct multipath_bh *mpb; 40 struct multipath_bh *mpb;
44 mpb = kmalloc(sizeof(*mpb), gfp_flags); 41 mpb = kzalloc(sizeof(*mpb), gfp_flags);
45 if (mpb)
46 memset(mpb, 0, sizeof(*mpb));
47 return mpb; 42 return mpb;
48} 43}
49 44
@@ -444,7 +439,7 @@ static int multipath_run (mddev_t *mddev)
444 * should be freed in multipath_stop()] 439 * should be freed in multipath_stop()]
445 */ 440 */
446 441
447 conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL); 442 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
448 mddev->private = conf; 443 mddev->private = conf;
449 if (!conf) { 444 if (!conf) {
450 printk(KERN_ERR 445 printk(KERN_ERR
@@ -452,9 +447,8 @@ static int multipath_run (mddev_t *mddev)
452 mdname(mddev)); 447 mdname(mddev));
453 goto out; 448 goto out;
454 } 449 }
455 memset(conf, 0, sizeof(*conf));
456 450
457 conf->multipaths = kmalloc(sizeof(struct multipath_info)*mddev->raid_disks, 451 conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
458 GFP_KERNEL); 452 GFP_KERNEL);
459 if (!conf->multipaths) { 453 if (!conf->multipaths) {
460 printk(KERN_ERR 454 printk(KERN_ERR
@@ -462,7 +456,6 @@ static int multipath_run (mddev_t *mddev)
462 mdname(mddev)); 456 mdname(mddev));
463 goto out_free_conf; 457 goto out_free_conf;
464 } 458 }
465 memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks);
466 459
467 conf->working_disks = 0; 460 conf->working_disks = 0;
468 ITERATE_RDEV(mddev,rdev,tmp) { 461 ITERATE_RDEV(mddev,rdev,tmp) {
@@ -557,9 +550,10 @@ static int multipath_stop (mddev_t *mddev)
557 return 0; 550 return 0;
558} 551}
559 552
560static mdk_personality_t multipath_personality= 553static struct mdk_personality multipath_personality =
561{ 554{
562 .name = "multipath", 555 .name = "multipath",
556 .level = LEVEL_MULTIPATH,
563 .owner = THIS_MODULE, 557 .owner = THIS_MODULE,
564 .make_request = multipath_make_request, 558 .make_request = multipath_make_request,
565 .run = multipath_run, 559 .run = multipath_run,
@@ -572,15 +566,17 @@ static mdk_personality_t multipath_personality=
572 566
573static int __init multipath_init (void) 567static int __init multipath_init (void)
574{ 568{
575 return register_md_personality (MULTIPATH, &multipath_personality); 569 return register_md_personality (&multipath_personality);
576} 570}
577 571
578static void __exit multipath_exit (void) 572static void __exit multipath_exit (void)
579{ 573{
580 unregister_md_personality (MULTIPATH); 574 unregister_md_personality (&multipath_personality);
581} 575}
582 576
583module_init(multipath_init); 577module_init(multipath_init);
584module_exit(multipath_exit); 578module_exit(multipath_exit);
585MODULE_LICENSE("GPL"); 579MODULE_LICENSE("GPL");
586MODULE_ALIAS("md-personality-7"); /* MULTIPATH */ 580MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
581MODULE_ALIAS("md-multipath");
582MODULE_ALIAS("md-level--4");
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index fece3277c2a5..abbca150202b 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -113,21 +113,16 @@ static int create_strip_zones (mddev_t *mddev)
113 } 113 }
114 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); 114 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
115 115
116 conf->strip_zone = kmalloc(sizeof(struct strip_zone)* 116 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
117 conf->nr_strip_zones, GFP_KERNEL); 117 conf->nr_strip_zones, GFP_KERNEL);
118 if (!conf->strip_zone) 118 if (!conf->strip_zone)
119 return 1; 119 return 1;
120 conf->devlist = kmalloc(sizeof(mdk_rdev_t*)* 120 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
121 conf->nr_strip_zones*mddev->raid_disks, 121 conf->nr_strip_zones*mddev->raid_disks,
122 GFP_KERNEL); 122 GFP_KERNEL);
123 if (!conf->devlist) 123 if (!conf->devlist)
124 return 1; 124 return 1;
125 125
126 memset(conf->strip_zone, 0,sizeof(struct strip_zone)*
127 conf->nr_strip_zones);
128 memset(conf->devlist, 0,
129 sizeof(mdk_rdev_t*) * conf->nr_strip_zones * mddev->raid_disks);
130
131 /* The first zone must contain all devices, so here we check that 126 /* The first zone must contain all devices, so here we check that
132 * there is a proper alignment of slots to devices and find them all 127 * there is a proper alignment of slots to devices and find them all
133 */ 128 */
@@ -280,7 +275,11 @@ static int raid0_run (mddev_t *mddev)
280 mdk_rdev_t *rdev; 275 mdk_rdev_t *rdev;
281 struct list_head *tmp; 276 struct list_head *tmp;
282 277
283 printk("%s: setting max_sectors to %d, segment boundary to %d\n", 278 if (mddev->chunk_size == 0) {
279 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
280 return -EINVAL;
281 }
282 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
284 mdname(mddev), 283 mdname(mddev),
285 mddev->chunk_size >> 9, 284 mddev->chunk_size >> 9,
286 (mddev->chunk_size>>1)-1); 285 (mddev->chunk_size>>1)-1);
@@ -361,7 +360,7 @@ static int raid0_run (mddev_t *mddev)
361 * chunksize should be used in that case. 360 * chunksize should be used in that case.
362 */ 361 */
363 { 362 {
364 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; 363 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
365 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 364 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
366 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 365 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
367 } 366 }
@@ -512,9 +511,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
512 return; 511 return;
513} 512}
514 513
515static mdk_personality_t raid0_personality= 514static struct mdk_personality raid0_personality=
516{ 515{
517 .name = "raid0", 516 .name = "raid0",
517 .level = 0,
518 .owner = THIS_MODULE, 518 .owner = THIS_MODULE,
519 .make_request = raid0_make_request, 519 .make_request = raid0_make_request,
520 .run = raid0_run, 520 .run = raid0_run,
@@ -524,15 +524,17 @@ static mdk_personality_t raid0_personality=
524 524
525static int __init raid0_init (void) 525static int __init raid0_init (void)
526{ 526{
527 return register_md_personality (RAID0, &raid0_personality); 527 return register_md_personality (&raid0_personality);
528} 528}
529 529
530static void raid0_exit (void) 530static void raid0_exit (void)
531{ 531{
532 unregister_md_personality (RAID0); 532 unregister_md_personality (&raid0_personality);
533} 533}
534 534
535module_init(raid0_init); 535module_init(raid0_init);
536module_exit(raid0_exit); 536module_exit(raid0_exit);
537MODULE_LICENSE("GPL"); 537MODULE_LICENSE("GPL");
538MODULE_ALIAS("md-personality-2"); /* RAID0 */ 538MODULE_ALIAS("md-personality-2"); /* RAID0 */
539MODULE_ALIAS("md-raid0");
540MODULE_ALIAS("md-level-0");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 229d7b204297..a06ff91f27e2 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -47,10 +47,11 @@
47 */ 47 */
48#define NR_RAID1_BIOS 256 48#define NR_RAID1_BIOS 256
49 49
50static mdk_personality_t raid1_personality;
51 50
52static void unplug_slaves(mddev_t *mddev); 51static void unplug_slaves(mddev_t *mddev);
53 52
53static void allow_barrier(conf_t *conf);
54static void lower_barrier(conf_t *conf);
54 55
55static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 56static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
56{ 57{
@@ -59,10 +60,8 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
59 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 60 int size = offsetof(r1bio_t, bios[pi->raid_disks]);
60 61
61 /* allocate a r1bio with room for raid_disks entries in the bios array */ 62 /* allocate a r1bio with room for raid_disks entries in the bios array */
62 r1_bio = kmalloc(size, gfp_flags); 63 r1_bio = kzalloc(size, gfp_flags);
63 if (r1_bio) 64 if (!r1_bio)
64 memset(r1_bio, 0, size);
65 else
66 unplug_slaves(pi->mddev); 65 unplug_slaves(pi->mddev);
67 66
68 return r1_bio; 67 return r1_bio;
@@ -104,15 +103,30 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
104 } 103 }
105 /* 104 /*
106 * Allocate RESYNC_PAGES data pages and attach them to 105 * Allocate RESYNC_PAGES data pages and attach them to
107 * the first bio; 106 * the first bio.
107 * If this is a user-requested check/repair, allocate
108 * RESYNC_PAGES for each bio.
108 */ 109 */
109 bio = r1_bio->bios[0]; 110 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
110 for (i = 0; i < RESYNC_PAGES; i++) { 111 j = pi->raid_disks;
111 page = alloc_page(gfp_flags); 112 else
112 if (unlikely(!page)) 113 j = 1;
113 goto out_free_pages; 114 while(j--) {
114 115 bio = r1_bio->bios[j];
115 bio->bi_io_vec[i].bv_page = page; 116 for (i = 0; i < RESYNC_PAGES; i++) {
117 page = alloc_page(gfp_flags);
118 if (unlikely(!page))
119 goto out_free_pages;
120
121 bio->bi_io_vec[i].bv_page = page;
122 }
123 }
124 /* If not user-requests, copy the page pointers to all bios */
125 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
126 for (i=0; i<RESYNC_PAGES ; i++)
127 for (j=1; j<pi->raid_disks; j++)
128 r1_bio->bios[j]->bi_io_vec[i].bv_page =
129 r1_bio->bios[0]->bi_io_vec[i].bv_page;
116 } 130 }
117 131
118 r1_bio->master_bio = NULL; 132 r1_bio->master_bio = NULL;
@@ -120,8 +134,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
120 return r1_bio; 134 return r1_bio;
121 135
122out_free_pages: 136out_free_pages:
123 for ( ; i > 0 ; i--) 137 for (i=0; i < RESYNC_PAGES ; i++)
124 __free_page(bio->bi_io_vec[i-1].bv_page); 138 for (j=0 ; j < pi->raid_disks; j++)
139 safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
140 j = -1;
125out_free_bio: 141out_free_bio:
126 while ( ++j < pi->raid_disks ) 142 while ( ++j < pi->raid_disks )
127 bio_put(r1_bio->bios[j]); 143 bio_put(r1_bio->bios[j]);
@@ -132,14 +148,16 @@ out_free_bio:
132static void r1buf_pool_free(void *__r1_bio, void *data) 148static void r1buf_pool_free(void *__r1_bio, void *data)
133{ 149{
134 struct pool_info *pi = data; 150 struct pool_info *pi = data;
135 int i; 151 int i,j;
136 r1bio_t *r1bio = __r1_bio; 152 r1bio_t *r1bio = __r1_bio;
137 struct bio *bio = r1bio->bios[0];
138 153
139 for (i = 0; i < RESYNC_PAGES; i++) { 154 for (i = 0; i < RESYNC_PAGES; i++)
140 __free_page(bio->bi_io_vec[i].bv_page); 155 for (j = pi->raid_disks; j-- ;) {
141 bio->bi_io_vec[i].bv_page = NULL; 156 if (j == 0 ||
142 } 157 r1bio->bios[j]->bi_io_vec[i].bv_page !=
158 r1bio->bios[0]->bi_io_vec[i].bv_page)
159 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
160 }
143 for (i=0 ; i < pi->raid_disks; i++) 161 for (i=0 ; i < pi->raid_disks; i++)
144 bio_put(r1bio->bios[i]); 162 bio_put(r1bio->bios[i]);
145 163
@@ -152,7 +170,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
152 170
153 for (i = 0; i < conf->raid_disks; i++) { 171 for (i = 0; i < conf->raid_disks; i++) {
154 struct bio **bio = r1_bio->bios + i; 172 struct bio **bio = r1_bio->bios + i;
155 if (*bio) 173 if (*bio && *bio != IO_BLOCKED)
156 bio_put(*bio); 174 bio_put(*bio);
157 *bio = NULL; 175 *bio = NULL;
158 } 176 }
@@ -160,20 +178,13 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
160 178
161static inline void free_r1bio(r1bio_t *r1_bio) 179static inline void free_r1bio(r1bio_t *r1_bio)
162{ 180{
163 unsigned long flags;
164
165 conf_t *conf = mddev_to_conf(r1_bio->mddev); 181 conf_t *conf = mddev_to_conf(r1_bio->mddev);
166 182
167 /* 183 /*
168 * Wake up any possible resync thread that waits for the device 184 * Wake up any possible resync thread that waits for the device
169 * to go idle. 185 * to go idle.
170 */ 186 */
171 spin_lock_irqsave(&conf->resync_lock, flags); 187 allow_barrier(conf);
172 if (!--conf->nr_pending) {
173 wake_up(&conf->wait_idle);
174 wake_up(&conf->wait_resume);
175 }
176 spin_unlock_irqrestore(&conf->resync_lock, flags);
177 188
178 put_all_bios(conf, r1_bio); 189 put_all_bios(conf, r1_bio);
179 mempool_free(r1_bio, conf->r1bio_pool); 190 mempool_free(r1_bio, conf->r1bio_pool);
@@ -182,22 +193,17 @@ static inline void free_r1bio(r1bio_t *r1_bio)
182static inline void put_buf(r1bio_t *r1_bio) 193static inline void put_buf(r1bio_t *r1_bio)
183{ 194{
184 conf_t *conf = mddev_to_conf(r1_bio->mddev); 195 conf_t *conf = mddev_to_conf(r1_bio->mddev);
185 unsigned long flags; 196 int i;
186 197
187 mempool_free(r1_bio, conf->r1buf_pool); 198 for (i=0; i<conf->raid_disks; i++) {
199 struct bio *bio = r1_bio->bios[i];
200 if (bio->bi_end_io)
201 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
202 }
188 203
189 spin_lock_irqsave(&conf->resync_lock, flags); 204 mempool_free(r1_bio, conf->r1buf_pool);
190 if (!conf->barrier)
191 BUG();
192 --conf->barrier;
193 wake_up(&conf->wait_resume);
194 wake_up(&conf->wait_idle);
195 205
196 if (!--conf->nr_pending) { 206 lower_barrier(conf);
197 wake_up(&conf->wait_idle);
198 wake_up(&conf->wait_resume);
199 }
200 spin_unlock_irqrestore(&conf->resync_lock, flags);
201} 207}
202 208
203static void reschedule_retry(r1bio_t *r1_bio) 209static void reschedule_retry(r1bio_t *r1_bio)
@@ -208,8 +214,10 @@ static void reschedule_retry(r1bio_t *r1_bio)
208 214
209 spin_lock_irqsave(&conf->device_lock, flags); 215 spin_lock_irqsave(&conf->device_lock, flags);
210 list_add(&r1_bio->retry_list, &conf->retry_list); 216 list_add(&r1_bio->retry_list, &conf->retry_list);
217 conf->nr_queued ++;
211 spin_unlock_irqrestore(&conf->device_lock, flags); 218 spin_unlock_irqrestore(&conf->device_lock, flags);
212 219
220 wake_up(&conf->wait_barrier);
213 md_wakeup_thread(mddev->thread); 221 md_wakeup_thread(mddev->thread);
214} 222}
215 223
@@ -261,9 +269,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
261 /* 269 /*
262 * this branch is our 'one mirror IO has finished' event handler: 270 * this branch is our 'one mirror IO has finished' event handler:
263 */ 271 */
264 if (!uptodate) 272 update_head_pos(mirror, r1_bio);
265 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 273
266 else 274 if (uptodate || conf->working_disks <= 1) {
267 /* 275 /*
268 * Set R1BIO_Uptodate in our master bio, so that 276 * Set R1BIO_Uptodate in our master bio, so that
269 * we will return a good error code for to the higher 277 * we will return a good error code for to the higher
@@ -273,16 +281,11 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
273 * user-side. So if something waits for IO, then it will 281 * user-side. So if something waits for IO, then it will
274 * wait for the 'master' bio. 282 * wait for the 'master' bio.
275 */ 283 */
276 set_bit(R1BIO_Uptodate, &r1_bio->state); 284 if (uptodate)
277 285 set_bit(R1BIO_Uptodate, &r1_bio->state);
278 update_head_pos(mirror, r1_bio);
279 286
280 /*
281 * we have only one bio on the read side
282 */
283 if (uptodate)
284 raid_end_bio_io(r1_bio); 287 raid_end_bio_io(r1_bio);
285 else { 288 } else {
286 /* 289 /*
287 * oops, read error: 290 * oops, read error:
288 */ 291 */
@@ -378,7 +381,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
378 /* free extra copy of the data pages */ 381 /* free extra copy of the data pages */
379 int i = bio->bi_vcnt; 382 int i = bio->bi_vcnt;
380 while (i--) 383 while (i--)
381 __free_page(bio->bi_io_vec[i].bv_page); 384 safe_put_page(bio->bi_io_vec[i].bv_page);
382 } 385 }
383 /* clear the bitmap if all writes complete successfully */ 386 /* clear the bitmap if all writes complete successfully */
384 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 387 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
@@ -433,11 +436,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
433 new_disk = 0; 436 new_disk = 0;
434 437
435 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 438 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
439 r1_bio->bios[new_disk] == IO_BLOCKED ||
436 !rdev || !test_bit(In_sync, &rdev->flags) 440 !rdev || !test_bit(In_sync, &rdev->flags)
437 || test_bit(WriteMostly, &rdev->flags); 441 || test_bit(WriteMostly, &rdev->flags);
438 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) { 442 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
439 443
440 if (rdev && test_bit(In_sync, &rdev->flags)) 444 if (rdev && test_bit(In_sync, &rdev->flags) &&
445 r1_bio->bios[new_disk] != IO_BLOCKED)
441 wonly_disk = new_disk; 446 wonly_disk = new_disk;
442 447
443 if (new_disk == conf->raid_disks - 1) { 448 if (new_disk == conf->raid_disks - 1) {
@@ -451,11 +456,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
451 456
452 /* make sure the disk is operational */ 457 /* make sure the disk is operational */
453 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); 458 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
459 r1_bio->bios[new_disk] == IO_BLOCKED ||
454 !rdev || !test_bit(In_sync, &rdev->flags) || 460 !rdev || !test_bit(In_sync, &rdev->flags) ||
455 test_bit(WriteMostly, &rdev->flags); 461 test_bit(WriteMostly, &rdev->flags);
456 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) { 462 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
457 463
458 if (rdev && test_bit(In_sync, &rdev->flags)) 464 if (rdev && test_bit(In_sync, &rdev->flags) &&
465 r1_bio->bios[new_disk] != IO_BLOCKED)
459 wonly_disk = new_disk; 466 wonly_disk = new_disk;
460 467
461 if (new_disk <= 0) 468 if (new_disk <= 0)
@@ -492,7 +499,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
492 499
493 rdev = rcu_dereference(conf->mirrors[disk].rdev); 500 rdev = rcu_dereference(conf->mirrors[disk].rdev);
494 501
495 if (!rdev || 502 if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
496 !test_bit(In_sync, &rdev->flags) || 503 !test_bit(In_sync, &rdev->flags) ||
497 test_bit(WriteMostly, &rdev->flags)) 504 test_bit(WriteMostly, &rdev->flags))
498 continue; 505 continue;
@@ -520,7 +527,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
520 /* cannot risk returning a device that failed 527 /* cannot risk returning a device that failed
521 * before we inc'ed nr_pending 528 * before we inc'ed nr_pending
522 */ 529 */
523 atomic_dec(&rdev->nr_pending); 530 rdev_dec_pending(rdev, conf->mddev);
524 goto retry; 531 goto retry;
525 } 532 }
526 conf->next_seq_sect = this_sector + sectors; 533 conf->next_seq_sect = this_sector + sectors;
@@ -593,42 +600,119 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
593 return ret; 600 return ret;
594} 601}
595 602
596/* 603/* Barriers....
597 * Throttle resync depth, so that we can both get proper overlapping of 604 * Sometimes we need to suspend IO while we do something else,
598 * requests, but are still able to handle normal requests quickly. 605 * either some resync/recovery, or reconfigure the array.
606 * To do this we raise a 'barrier'.
607 * The 'barrier' is a counter that can be raised multiple times
608 * to count how many activities are happening which preclude
609 * normal IO.
610 * We can only raise the barrier if there is no pending IO.
611 * i.e. if nr_pending == 0.
612 * We choose only to raise the barrier if no-one is waiting for the
613 * barrier to go down. This means that as soon as an IO request
614 * is ready, no other operations which require a barrier will start
615 * until the IO request has had a chance.
616 *
617 * So: regular IO calls 'wait_barrier'. When that returns there
618 * is no backgroup IO happening, It must arrange to call
619 * allow_barrier when it has finished its IO.
620 * backgroup IO calls must call raise_barrier. Once that returns
621 * there is no normal IO happeing. It must arrange to call
622 * lower_barrier when the particular background IO completes.
599 */ 623 */
600#define RESYNC_DEPTH 32 624#define RESYNC_DEPTH 32
601 625
602static void device_barrier(conf_t *conf, sector_t sect) 626static void raise_barrier(conf_t *conf)
603{ 627{
604 spin_lock_irq(&conf->resync_lock); 628 spin_lock_irq(&conf->resync_lock);
605 wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), 629
606 conf->resync_lock, raid1_unplug(conf->mddev->queue)); 630 /* Wait until no block IO is waiting */
607 631 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
608 if (!conf->barrier++) { 632 conf->resync_lock,
609 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, 633 raid1_unplug(conf->mddev->queue));
610 conf->resync_lock, raid1_unplug(conf->mddev->queue)); 634
611 if (conf->nr_pending) 635 /* block any new IO from starting */
612 BUG(); 636 conf->barrier++;
637
638 /* No wait for all pending IO to complete */
639 wait_event_lock_irq(conf->wait_barrier,
640 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
641 conf->resync_lock,
642 raid1_unplug(conf->mddev->queue));
643
644 spin_unlock_irq(&conf->resync_lock);
645}
646
647static void lower_barrier(conf_t *conf)
648{
649 unsigned long flags;
650 spin_lock_irqsave(&conf->resync_lock, flags);
651 conf->barrier--;
652 spin_unlock_irqrestore(&conf->resync_lock, flags);
653 wake_up(&conf->wait_barrier);
654}
655
656static void wait_barrier(conf_t *conf)
657{
658 spin_lock_irq(&conf->resync_lock);
659 if (conf->barrier) {
660 conf->nr_waiting++;
661 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
662 conf->resync_lock,
663 raid1_unplug(conf->mddev->queue));
664 conf->nr_waiting--;
613 } 665 }
614 wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, 666 conf->nr_pending++;
615 conf->resync_lock, raid1_unplug(conf->mddev->queue)); 667 spin_unlock_irq(&conf->resync_lock);
616 conf->next_resync = sect; 668}
669
670static void allow_barrier(conf_t *conf)
671{
672 unsigned long flags;
673 spin_lock_irqsave(&conf->resync_lock, flags);
674 conf->nr_pending--;
675 spin_unlock_irqrestore(&conf->resync_lock, flags);
676 wake_up(&conf->wait_barrier);
677}
678
679static void freeze_array(conf_t *conf)
680{
681 /* stop syncio and normal IO and wait for everything to
682 * go quite.
683 * We increment barrier and nr_waiting, and then
684 * wait until barrier+nr_pending match nr_queued+2
685 */
686 spin_lock_irq(&conf->resync_lock);
687 conf->barrier++;
688 conf->nr_waiting++;
689 wait_event_lock_irq(conf->wait_barrier,
690 conf->barrier+conf->nr_pending == conf->nr_queued+2,
691 conf->resync_lock,
692 raid1_unplug(conf->mddev->queue));
693 spin_unlock_irq(&conf->resync_lock);
694}
695static void unfreeze_array(conf_t *conf)
696{
697 /* reverse the effect of the freeze */
698 spin_lock_irq(&conf->resync_lock);
699 conf->barrier--;
700 conf->nr_waiting--;
701 wake_up(&conf->wait_barrier);
617 spin_unlock_irq(&conf->resync_lock); 702 spin_unlock_irq(&conf->resync_lock);
618} 703}
619 704
705
620/* duplicate the data pages for behind I/O */ 706/* duplicate the data pages for behind I/O */
621static struct page **alloc_behind_pages(struct bio *bio) 707static struct page **alloc_behind_pages(struct bio *bio)
622{ 708{
623 int i; 709 int i;
624 struct bio_vec *bvec; 710 struct bio_vec *bvec;
625 struct page **pages = kmalloc(bio->bi_vcnt * sizeof(struct page *), 711 struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
626 GFP_NOIO); 712 GFP_NOIO);
627 if (unlikely(!pages)) 713 if (unlikely(!pages))
628 goto do_sync_io; 714 goto do_sync_io;
629 715
630 memset(pages, 0, bio->bi_vcnt * sizeof(struct page *));
631
632 bio_for_each_segment(bvec, bio, i) { 716 bio_for_each_segment(bvec, bio, i) {
633 pages[i] = alloc_page(GFP_NOIO); 717 pages[i] = alloc_page(GFP_NOIO);
634 if (unlikely(!pages[i])) 718 if (unlikely(!pages[i]))
@@ -644,7 +728,7 @@ static struct page **alloc_behind_pages(struct bio *bio)
644do_sync_io: 728do_sync_io:
645 if (pages) 729 if (pages)
646 for (i = 0; i < bio->bi_vcnt && pages[i]; i++) 730 for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
647 __free_page(pages[i]); 731 put_page(pages[i]);
648 kfree(pages); 732 kfree(pages);
649 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 733 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
650 return NULL; 734 return NULL;
@@ -678,10 +762,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
678 */ 762 */
679 md_write_start(mddev, bio); /* wait on superblock update early */ 763 md_write_start(mddev, bio); /* wait on superblock update early */
680 764
681 spin_lock_irq(&conf->resync_lock); 765 wait_barrier(conf);
682 wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
683 conf->nr_pending++;
684 spin_unlock_irq(&conf->resync_lock);
685 766
686 disk_stat_inc(mddev->gendisk, ios[rw]); 767 disk_stat_inc(mddev->gendisk, ios[rw]);
687 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 768 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
@@ -749,7 +830,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
749 !test_bit(Faulty, &rdev->flags)) { 830 !test_bit(Faulty, &rdev->flags)) {
750 atomic_inc(&rdev->nr_pending); 831 atomic_inc(&rdev->nr_pending);
751 if (test_bit(Faulty, &rdev->flags)) { 832 if (test_bit(Faulty, &rdev->flags)) {
752 atomic_dec(&rdev->nr_pending); 833 rdev_dec_pending(rdev, mddev);
753 r1_bio->bios[i] = NULL; 834 r1_bio->bios[i] = NULL;
754 } else 835 } else
755 r1_bio->bios[i] = bio; 836 r1_bio->bios[i] = bio;
@@ -909,13 +990,8 @@ static void print_conf(conf_t *conf)
909 990
910static void close_sync(conf_t *conf) 991static void close_sync(conf_t *conf)
911{ 992{
912 spin_lock_irq(&conf->resync_lock); 993 wait_barrier(conf);
913 wait_event_lock_irq(conf->wait_resume, !conf->barrier, 994 allow_barrier(conf);
914 conf->resync_lock, raid1_unplug(conf->mddev->queue));
915 spin_unlock_irq(&conf->resync_lock);
916
917 if (conf->barrier) BUG();
918 if (waitqueue_active(&conf->wait_idle)) BUG();
919 995
920 mempool_destroy(conf->r1buf_pool); 996 mempool_destroy(conf->r1buf_pool);
921 conf->r1buf_pool = NULL; 997 conf->r1buf_pool = NULL;
@@ -1015,28 +1091,27 @@ abort:
1015 1091
1016static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1092static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1017{ 1093{
1018 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1019 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1094 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1020 conf_t *conf = mddev_to_conf(r1_bio->mddev); 1095 int i;
1021 1096
1022 if (bio->bi_size) 1097 if (bio->bi_size)
1023 return 1; 1098 return 1;
1024 1099
1025 if (r1_bio->bios[r1_bio->read_disk] != bio) 1100 for (i=r1_bio->mddev->raid_disks; i--; )
1026 BUG(); 1101 if (r1_bio->bios[i] == bio)
1027 update_head_pos(r1_bio->read_disk, r1_bio); 1102 break;
1103 BUG_ON(i < 0);
1104 update_head_pos(i, r1_bio);
1028 /* 1105 /*
1029 * we have read a block, now it needs to be re-written, 1106 * we have read a block, now it needs to be re-written,
1030 * or re-read if the read failed. 1107 * or re-read if the read failed.
1031 * We don't do much here, just schedule handling by raid1d 1108 * We don't do much here, just schedule handling by raid1d
1032 */ 1109 */
1033 if (!uptodate) { 1110 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1034 md_error(r1_bio->mddev,
1035 conf->mirrors[r1_bio->read_disk].rdev);
1036 } else
1037 set_bit(R1BIO_Uptodate, &r1_bio->state); 1111 set_bit(R1BIO_Uptodate, &r1_bio->state);
1038 rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev); 1112
1039 reschedule_retry(r1_bio); 1113 if (atomic_dec_and_test(&r1_bio->remaining))
1114 reschedule_retry(r1_bio);
1040 return 0; 1115 return 0;
1041} 1116}
1042 1117
@@ -1066,7 +1141,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1066 md_done_sync(mddev, r1_bio->sectors, uptodate); 1141 md_done_sync(mddev, r1_bio->sectors, uptodate);
1067 put_buf(r1_bio); 1142 put_buf(r1_bio);
1068 } 1143 }
1069 rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
1070 return 0; 1144 return 0;
1071} 1145}
1072 1146
@@ -1079,34 +1153,173 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
1079 1153
1080 bio = r1_bio->bios[r1_bio->read_disk]; 1154 bio = r1_bio->bios[r1_bio->read_disk];
1081 1155
1082/* 1156
1083 if (r1_bio->sector == 0) printk("First sync write startss\n"); 1157 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1084*/ 1158 /* We have read all readable devices. If we haven't
1085 /* 1159 * got the block, then there is no hope left.
1086 * schedule writes 1160 * If we have, then we want to do a comparison
1087 */ 1161 * and skip the write if everything is the same.
1162 * If any blocks failed to read, then we need to
1163 * attempt an over-write
1164 */
1165 int primary;
1166 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1167 for (i=0; i<mddev->raid_disks; i++)
1168 if (r1_bio->bios[i]->bi_end_io == end_sync_read)
1169 md_error(mddev, conf->mirrors[i].rdev);
1170
1171 md_done_sync(mddev, r1_bio->sectors, 1);
1172 put_buf(r1_bio);
1173 return;
1174 }
1175 for (primary=0; primary<mddev->raid_disks; primary++)
1176 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
1177 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
1178 r1_bio->bios[primary]->bi_end_io = NULL;
1179 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1180 break;
1181 }
1182 r1_bio->read_disk = primary;
1183 for (i=0; i<mddev->raid_disks; i++)
1184 if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
1185 test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
1186 int j;
1187 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1188 struct bio *pbio = r1_bio->bios[primary];
1189 struct bio *sbio = r1_bio->bios[i];
1190 for (j = vcnt; j-- ; )
1191 if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
1192 page_address(sbio->bi_io_vec[j].bv_page),
1193 PAGE_SIZE))
1194 break;
1195 if (j >= 0)
1196 mddev->resync_mismatches += r1_bio->sectors;
1197 if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
1198 sbio->bi_end_io = NULL;
1199 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
1200 } else {
1201 /* fixup the bio for reuse */
1202 sbio->bi_vcnt = vcnt;
1203 sbio->bi_size = r1_bio->sectors << 9;
1204 sbio->bi_idx = 0;
1205 sbio->bi_phys_segments = 0;
1206 sbio->bi_hw_segments = 0;
1207 sbio->bi_hw_front_size = 0;
1208 sbio->bi_hw_back_size = 0;
1209 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1210 sbio->bi_flags |= 1 << BIO_UPTODATE;
1211 sbio->bi_next = NULL;
1212 sbio->bi_sector = r1_bio->sector +
1213 conf->mirrors[i].rdev->data_offset;
1214 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1215 }
1216 }
1217 }
1088 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { 1218 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1089 /* 1219 /* ouch - failed to read all of that.
1090 * There is no point trying a read-for-reconstruct as 1220 * Try some synchronous reads of other devices to get
1091 * reconstruct is about to be aborted 1221 * good data, much like with normal read errors. Only
1222 * read into the pages we already have so they we don't
1223 * need to re-issue the read request.
1224 * We don't need to freeze the array, because being in an
1225 * active sync request, there is no normal IO, and
1226 * no overlapping syncs.
1092 */ 1227 */
1093 char b[BDEVNAME_SIZE]; 1228 sector_t sect = r1_bio->sector;
1094 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error" 1229 int sectors = r1_bio->sectors;
1095 " for block %llu\n", 1230 int idx = 0;
1096 bdevname(bio->bi_bdev,b), 1231
1097 (unsigned long long)r1_bio->sector); 1232 while(sectors) {
1098 md_done_sync(mddev, r1_bio->sectors, 0); 1233 int s = sectors;
1099 put_buf(r1_bio); 1234 int d = r1_bio->read_disk;
1100 return; 1235 int success = 0;
1236 mdk_rdev_t *rdev;
1237
1238 if (s > (PAGE_SIZE>>9))
1239 s = PAGE_SIZE >> 9;
1240 do {
1241 if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1242 rdev = conf->mirrors[d].rdev;
1243 if (sync_page_io(rdev->bdev,
1244 sect + rdev->data_offset,
1245 s<<9,
1246 bio->bi_io_vec[idx].bv_page,
1247 READ)) {
1248 success = 1;
1249 break;
1250 }
1251 }
1252 d++;
1253 if (d == conf->raid_disks)
1254 d = 0;
1255 } while (!success && d != r1_bio->read_disk);
1256
1257 if (success) {
1258 int start = d;
1259 /* write it back and re-read */
1260 set_bit(R1BIO_Uptodate, &r1_bio->state);
1261 while (d != r1_bio->read_disk) {
1262 if (d == 0)
1263 d = conf->raid_disks;
1264 d--;
1265 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1266 continue;
1267 rdev = conf->mirrors[d].rdev;
1268 atomic_add(s, &rdev->corrected_errors);
1269 if (sync_page_io(rdev->bdev,
1270 sect + rdev->data_offset,
1271 s<<9,
1272 bio->bi_io_vec[idx].bv_page,
1273 WRITE) == 0)
1274 md_error(mddev, rdev);
1275 }
1276 d = start;
1277 while (d != r1_bio->read_disk) {
1278 if (d == 0)
1279 d = conf->raid_disks;
1280 d--;
1281 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
1282 continue;
1283 rdev = conf->mirrors[d].rdev;
1284 if (sync_page_io(rdev->bdev,
1285 sect + rdev->data_offset,
1286 s<<9,
1287 bio->bi_io_vec[idx].bv_page,
1288 READ) == 0)
1289 md_error(mddev, rdev);
1290 }
1291 } else {
1292 char b[BDEVNAME_SIZE];
1293 /* Cannot read from anywhere, array is toast */
1294 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1295 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
1296 " for block %llu\n",
1297 bdevname(bio->bi_bdev,b),
1298 (unsigned long long)r1_bio->sector);
1299 md_done_sync(mddev, r1_bio->sectors, 0);
1300 put_buf(r1_bio);
1301 return;
1302 }
1303 sectors -= s;
1304 sect += s;
1305 idx ++;
1306 }
1101 } 1307 }
1102 1308
1309 /*
1310 * schedule writes
1311 */
1103 atomic_set(&r1_bio->remaining, 1); 1312 atomic_set(&r1_bio->remaining, 1);
1104 for (i = 0; i < disks ; i++) { 1313 for (i = 0; i < disks ; i++) {
1105 wbio = r1_bio->bios[i]; 1314 wbio = r1_bio->bios[i];
1106 if (wbio->bi_end_io != end_sync_write) 1315 if (wbio->bi_end_io == NULL ||
1316 (wbio->bi_end_io == end_sync_read &&
1317 (i == r1_bio->read_disk ||
1318 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
1107 continue; 1319 continue;
1108 1320
1109 atomic_inc(&conf->mirrors[i].rdev->nr_pending); 1321 wbio->bi_rw = WRITE;
1322 wbio->bi_end_io = end_sync_write;
1110 atomic_inc(&r1_bio->remaining); 1323 atomic_inc(&r1_bio->remaining);
1111 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1324 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1112 1325
@@ -1167,6 +1380,7 @@ static void raid1d(mddev_t *mddev)
1167 break; 1380 break;
1168 r1_bio = list_entry(head->prev, r1bio_t, retry_list); 1381 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1169 list_del(head->prev); 1382 list_del(head->prev);
1383 conf->nr_queued--;
1170 spin_unlock_irqrestore(&conf->device_lock, flags); 1384 spin_unlock_irqrestore(&conf->device_lock, flags);
1171 1385
1172 mddev = r1_bio->mddev; 1386 mddev = r1_bio->mddev;
@@ -1206,6 +1420,86 @@ static void raid1d(mddev_t *mddev)
1206 } 1420 }
1207 } else { 1421 } else {
1208 int disk; 1422 int disk;
1423
1424 /* we got a read error. Maybe the drive is bad. Maybe just
1425 * the block and we can fix it.
1426 * We freeze all other IO, and try reading the block from
1427 * other devices. When we find one, we re-write
1428 * and check it that fixes the read error.
1429 * This is all done synchronously while the array is
1430 * frozen
1431 */
1432 sector_t sect = r1_bio->sector;
1433 int sectors = r1_bio->sectors;
1434 freeze_array(conf);
1435 if (mddev->ro == 0) while(sectors) {
1436 int s = sectors;
1437 int d = r1_bio->read_disk;
1438 int success = 0;
1439
1440 if (s > (PAGE_SIZE>>9))
1441 s = PAGE_SIZE >> 9;
1442
1443 do {
1444 rdev = conf->mirrors[d].rdev;
1445 if (rdev &&
1446 test_bit(In_sync, &rdev->flags) &&
1447 sync_page_io(rdev->bdev,
1448 sect + rdev->data_offset,
1449 s<<9,
1450 conf->tmppage, READ))
1451 success = 1;
1452 else {
1453 d++;
1454 if (d == conf->raid_disks)
1455 d = 0;
1456 }
1457 } while (!success && d != r1_bio->read_disk);
1458
1459 if (success) {
1460 /* write it back and re-read */
1461 int start = d;
1462 while (d != r1_bio->read_disk) {
1463 if (d==0)
1464 d = conf->raid_disks;
1465 d--;
1466 rdev = conf->mirrors[d].rdev;
1467 atomic_add(s, &rdev->corrected_errors);
1468 if (rdev &&
1469 test_bit(In_sync, &rdev->flags)) {
1470 if (sync_page_io(rdev->bdev,
1471 sect + rdev->data_offset,
1472 s<<9, conf->tmppage, WRITE) == 0)
1473 /* Well, this device is dead */
1474 md_error(mddev, rdev);
1475 }
1476 }
1477 d = start;
1478 while (d != r1_bio->read_disk) {
1479 if (d==0)
1480 d = conf->raid_disks;
1481 d--;
1482 rdev = conf->mirrors[d].rdev;
1483 if (rdev &&
1484 test_bit(In_sync, &rdev->flags)) {
1485 if (sync_page_io(rdev->bdev,
1486 sect + rdev->data_offset,
1487 s<<9, conf->tmppage, READ) == 0)
1488 /* Well, this device is dead */
1489 md_error(mddev, rdev);
1490 }
1491 }
1492 } else {
1493 /* Cannot read from anywhere -- bye bye array */
1494 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
1495 break;
1496 }
1497 sectors -= s;
1498 sect += s;
1499 }
1500
1501 unfreeze_array(conf);
1502
1209 bio = r1_bio->bios[r1_bio->read_disk]; 1503 bio = r1_bio->bios[r1_bio->read_disk];
1210 if ((disk=read_balance(conf, r1_bio)) == -1) { 1504 if ((disk=read_balance(conf, r1_bio)) == -1) {
1211 printk(KERN_ALERT "raid1: %s: unrecoverable I/O" 1505 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
@@ -1214,7 +1508,8 @@ static void raid1d(mddev_t *mddev)
1214 (unsigned long long)r1_bio->sector); 1508 (unsigned long long)r1_bio->sector);
1215 raid_end_bio_io(r1_bio); 1509 raid_end_bio_io(r1_bio);
1216 } else { 1510 } else {
1217 r1_bio->bios[r1_bio->read_disk] = NULL; 1511 r1_bio->bios[r1_bio->read_disk] =
1512 mddev->ro ? IO_BLOCKED : NULL;
1218 r1_bio->read_disk = disk; 1513 r1_bio->read_disk = disk;
1219 bio_put(bio); 1514 bio_put(bio);
1220 bio = bio_clone(r1_bio->master_bio, GFP_NOIO); 1515 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
@@ -1269,14 +1564,13 @@ static int init_resync(conf_t *conf)
1269static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 1564static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1270{ 1565{
1271 conf_t *conf = mddev_to_conf(mddev); 1566 conf_t *conf = mddev_to_conf(mddev);
1272 mirror_info_t *mirror;
1273 r1bio_t *r1_bio; 1567 r1bio_t *r1_bio;
1274 struct bio *bio; 1568 struct bio *bio;
1275 sector_t max_sector, nr_sectors; 1569 sector_t max_sector, nr_sectors;
1276 int disk; 1570 int disk = -1;
1277 int i; 1571 int i;
1278 int wonly; 1572 int wonly = -1;
1279 int write_targets = 0; 1573 int write_targets = 0, read_targets = 0;
1280 int sync_blocks; 1574 int sync_blocks;
1281 int still_degraded = 0; 1575 int still_degraded = 0;
1282 1576
@@ -1317,55 +1611,35 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1317 return sync_blocks; 1611 return sync_blocks;
1318 } 1612 }
1319 /* 1613 /*
1320 * If there is non-resync activity waiting for us then 1614 * If there is non-resync activity waiting for a turn,
1321 * put in a delay to throttle resync. 1615 * and resync is going fast enough,
1616 * then let it though before starting on this new sync request.
1322 */ 1617 */
1323 if (!go_faster && waitqueue_active(&conf->wait_resume)) 1618 if (!go_faster && conf->nr_waiting)
1324 msleep_interruptible(1000); 1619 msleep_interruptible(1000);
1325 device_barrier(conf, sector_nr + RESYNC_SECTORS);
1326
1327 /*
1328 * If reconstructing, and >1 working disc,
1329 * could dedicate one to rebuild and others to
1330 * service read requests ..
1331 */
1332 disk = conf->last_used;
1333 /* make sure disk is operational */
1334 wonly = disk;
1335 while (conf->mirrors[disk].rdev == NULL ||
1336 !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) ||
1337 test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags)
1338 ) {
1339 if (conf->mirrors[disk].rdev &&
1340 test_bit(In_sync, &conf->mirrors[disk].rdev->flags))
1341 wonly = disk;
1342 if (disk <= 0)
1343 disk = conf->raid_disks;
1344 disk--;
1345 if (disk == conf->last_used) {
1346 disk = wonly;
1347 break;
1348 }
1349 }
1350 conf->last_used = disk;
1351 atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
1352 1620
1621 raise_barrier(conf);
1353 1622
1354 mirror = conf->mirrors + disk; 1623 conf->next_resync = sector_nr;
1355 1624
1356 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 1625 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
1357 1626 rcu_read_lock();
1358 spin_lock_irq(&conf->resync_lock); 1627 /*
1359 conf->nr_pending++; 1628 * If we get a correctably read error during resync or recovery,
1360 spin_unlock_irq(&conf->resync_lock); 1629 * we might want to read from a different device. So we
1630 * flag all drives that could conceivably be read from for READ,
1631 * and any others (which will be non-In_sync devices) for WRITE.
1632 * If a read fails, we try reading from something else for which READ
1633 * is OK.
1634 */
1361 1635
1362 r1_bio->mddev = mddev; 1636 r1_bio->mddev = mddev;
1363 r1_bio->sector = sector_nr; 1637 r1_bio->sector = sector_nr;
1364 r1_bio->state = 0; 1638 r1_bio->state = 0;
1365 set_bit(R1BIO_IsSync, &r1_bio->state); 1639 set_bit(R1BIO_IsSync, &r1_bio->state);
1366 r1_bio->read_disk = disk;
1367 1640
1368 for (i=0; i < conf->raid_disks; i++) { 1641 for (i=0; i < conf->raid_disks; i++) {
1642 mdk_rdev_t *rdev;
1369 bio = r1_bio->bios[i]; 1643 bio = r1_bio->bios[i];
1370 1644
1371 /* take from bio_init */ 1645 /* take from bio_init */
@@ -1380,35 +1654,49 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1380 bio->bi_end_io = NULL; 1654 bio->bi_end_io = NULL;
1381 bio->bi_private = NULL; 1655 bio->bi_private = NULL;
1382 1656
1383 if (i == disk) { 1657 rdev = rcu_dereference(conf->mirrors[i].rdev);
1384 bio->bi_rw = READ; 1658 if (rdev == NULL ||
1385 bio->bi_end_io = end_sync_read; 1659 test_bit(Faulty, &rdev->flags)) {
1386 } else if (conf->mirrors[i].rdev == NULL ||
1387 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
1388 still_degraded = 1; 1660 still_degraded = 1;
1389 continue; 1661 continue;
1390 } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 1662 } else if (!test_bit(In_sync, &rdev->flags)) {
1391 sector_nr + RESYNC_SECTORS > mddev->recovery_cp ||
1392 test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1393 bio->bi_rw = WRITE; 1663 bio->bi_rw = WRITE;
1394 bio->bi_end_io = end_sync_write; 1664 bio->bi_end_io = end_sync_write;
1395 write_targets ++; 1665 write_targets ++;
1396 } else 1666 } else {
1397 /* no need to read or write here */ 1667 /* may need to read from here */
1398 continue; 1668 bio->bi_rw = READ;
1399 bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset; 1669 bio->bi_end_io = end_sync_read;
1400 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1670 if (test_bit(WriteMostly, &rdev->flags)) {
1671 if (wonly < 0)
1672 wonly = i;
1673 } else {
1674 if (disk < 0)
1675 disk = i;
1676 }
1677 read_targets++;
1678 }
1679 atomic_inc(&rdev->nr_pending);
1680 bio->bi_sector = sector_nr + rdev->data_offset;
1681 bio->bi_bdev = rdev->bdev;
1401 bio->bi_private = r1_bio; 1682 bio->bi_private = r1_bio;
1402 } 1683 }
1684 rcu_read_unlock();
1685 if (disk < 0)
1686 disk = wonly;
1687 r1_bio->read_disk = disk;
1688
1689 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
1690 /* extra read targets are also write targets */
1691 write_targets += read_targets-1;
1403 1692
1404 if (write_targets == 0) { 1693 if (write_targets == 0 || read_targets == 0) {
1405 /* There is nowhere to write, so all non-sync 1694 /* There is nowhere to write, so all non-sync
1406 * drives must be failed - so we are finished 1695 * drives must be failed - so we are finished
1407 */ 1696 */
1408 sector_t rv = max_sector - sector_nr; 1697 sector_t rv = max_sector - sector_nr;
1409 *skipped = 1; 1698 *skipped = 1;
1410 put_buf(r1_bio); 1699 put_buf(r1_bio);
1411 rdev_dec_pending(conf->mirrors[disk].rdev, mddev);
1412 return rv; 1700 return rv;
1413 } 1701 }
1414 1702
@@ -1436,10 +1724,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1436 for (i=0 ; i < conf->raid_disks; i++) { 1724 for (i=0 ; i < conf->raid_disks; i++) {
1437 bio = r1_bio->bios[i]; 1725 bio = r1_bio->bios[i];
1438 if (bio->bi_end_io) { 1726 if (bio->bi_end_io) {
1439 page = r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page; 1727 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1440 if (bio_add_page(bio, page, len, 0) == 0) { 1728 if (bio_add_page(bio, page, len, 0) == 0) {
1441 /* stop here */ 1729 /* stop here */
1442 r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page = page; 1730 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1443 while (i > 0) { 1731 while (i > 0) {
1444 i--; 1732 i--;
1445 bio = r1_bio->bios[i]; 1733 bio = r1_bio->bios[i];
@@ -1459,12 +1747,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1459 sync_blocks -= (len>>9); 1747 sync_blocks -= (len>>9);
1460 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 1748 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
1461 bio_full: 1749 bio_full:
1462 bio = r1_bio->bios[disk];
1463 r1_bio->sectors = nr_sectors; 1750 r1_bio->sectors = nr_sectors;
1464 1751
1465 md_sync_acct(mirror->rdev->bdev, nr_sectors); 1752 /* For a user-requested sync, we read all readable devices and do a
1753 * compare
1754 */
1755 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1756 atomic_set(&r1_bio->remaining, read_targets);
1757 for (i=0; i<conf->raid_disks; i++) {
1758 bio = r1_bio->bios[i];
1759 if (bio->bi_end_io == end_sync_read) {
1760 md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
1761 generic_make_request(bio);
1762 }
1763 }
1764 } else {
1765 atomic_set(&r1_bio->remaining, 1);
1766 bio = r1_bio->bios[r1_bio->read_disk];
1767 md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
1768 nr_sectors);
1769 generic_make_request(bio);
1466 1770
1467 generic_make_request(bio); 1771 }
1468 1772
1469 return nr_sectors; 1773 return nr_sectors;
1470} 1774}
@@ -1487,18 +1791,19 @@ static int run(mddev_t *mddev)
1487 * bookkeeping area. [whatever we allocate in run(), 1791 * bookkeeping area. [whatever we allocate in run(),
1488 * should be freed in stop()] 1792 * should be freed in stop()]
1489 */ 1793 */
1490 conf = kmalloc(sizeof(conf_t), GFP_KERNEL); 1794 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1491 mddev->private = conf; 1795 mddev->private = conf;
1492 if (!conf) 1796 if (!conf)
1493 goto out_no_mem; 1797 goto out_no_mem;
1494 1798
1495 memset(conf, 0, sizeof(*conf)); 1799 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1496 conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1497 GFP_KERNEL); 1800 GFP_KERNEL);
1498 if (!conf->mirrors) 1801 if (!conf->mirrors)
1499 goto out_no_mem; 1802 goto out_no_mem;
1500 1803
1501 memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); 1804 conf->tmppage = alloc_page(GFP_KERNEL);
1805 if (!conf->tmppage)
1806 goto out_no_mem;
1502 1807
1503 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 1808 conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
1504 if (!conf->poolinfo) 1809 if (!conf->poolinfo)
@@ -1542,8 +1847,7 @@ static int run(mddev_t *mddev)
1542 mddev->recovery_cp = MaxSector; 1847 mddev->recovery_cp = MaxSector;
1543 1848
1544 spin_lock_init(&conf->resync_lock); 1849 spin_lock_init(&conf->resync_lock);
1545 init_waitqueue_head(&conf->wait_idle); 1850 init_waitqueue_head(&conf->wait_barrier);
1546 init_waitqueue_head(&conf->wait_resume);
1547 1851
1548 bio_list_init(&conf->pending_bio_list); 1852 bio_list_init(&conf->pending_bio_list);
1549 bio_list_init(&conf->flushing_bio_list); 1853 bio_list_init(&conf->flushing_bio_list);
@@ -1583,7 +1887,6 @@ static int run(mddev_t *mddev)
1583 mdname(mddev)); 1887 mdname(mddev));
1584 goto out_free_conf; 1888 goto out_free_conf;
1585 } 1889 }
1586 if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1587 1890
1588 printk(KERN_INFO 1891 printk(KERN_INFO
1589 "raid1: raid set %s active with %d out of %d mirrors\n", 1892 "raid1: raid set %s active with %d out of %d mirrors\n",
@@ -1608,6 +1911,7 @@ out_free_conf:
1608 if (conf->r1bio_pool) 1911 if (conf->r1bio_pool)
1609 mempool_destroy(conf->r1bio_pool); 1912 mempool_destroy(conf->r1bio_pool);
1610 kfree(conf->mirrors); 1913 kfree(conf->mirrors);
1914 safe_put_page(conf->tmppage);
1611 kfree(conf->poolinfo); 1915 kfree(conf->poolinfo);
1612 kfree(conf); 1916 kfree(conf);
1613 mddev->private = NULL; 1917 mddev->private = NULL;
@@ -1706,19 +2010,14 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
1706 kfree(newpoolinfo); 2010 kfree(newpoolinfo);
1707 return -ENOMEM; 2011 return -ENOMEM;
1708 } 2012 }
1709 newmirrors = kmalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); 2013 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
1710 if (!newmirrors) { 2014 if (!newmirrors) {
1711 kfree(newpoolinfo); 2015 kfree(newpoolinfo);
1712 mempool_destroy(newpool); 2016 mempool_destroy(newpool);
1713 return -ENOMEM; 2017 return -ENOMEM;
1714 } 2018 }
1715 memset(newmirrors, 0, sizeof(struct mirror_info)*raid_disks);
1716 2019
1717 spin_lock_irq(&conf->resync_lock); 2020 raise_barrier(conf);
1718 conf->barrier++;
1719 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
1720 conf->resync_lock, raid1_unplug(mddev->queue));
1721 spin_unlock_irq(&conf->resync_lock);
1722 2021
1723 /* ok, everything is stopped */ 2022 /* ok, everything is stopped */
1724 oldpool = conf->r1bio_pool; 2023 oldpool = conf->r1bio_pool;
@@ -1738,12 +2037,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
1738 conf->raid_disks = mddev->raid_disks = raid_disks; 2037 conf->raid_disks = mddev->raid_disks = raid_disks;
1739 2038
1740 conf->last_used = 0; /* just make sure it is in-range */ 2039 conf->last_used = 0; /* just make sure it is in-range */
1741 spin_lock_irq(&conf->resync_lock); 2040 lower_barrier(conf);
1742 conf->barrier--;
1743 spin_unlock_irq(&conf->resync_lock);
1744 wake_up(&conf->wait_resume);
1745 wake_up(&conf->wait_idle);
1746
1747 2041
1748 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2042 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1749 md_wakeup_thread(mddev->thread); 2043 md_wakeup_thread(mddev->thread);
@@ -1758,33 +2052,19 @@ static void raid1_quiesce(mddev_t *mddev, int state)
1758 2052
1759 switch(state) { 2053 switch(state) {
1760 case 1: 2054 case 1:
1761 spin_lock_irq(&conf->resync_lock); 2055 raise_barrier(conf);
1762 conf->barrier++;
1763 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
1764 conf->resync_lock, raid1_unplug(mddev->queue));
1765 spin_unlock_irq(&conf->resync_lock);
1766 break; 2056 break;
1767 case 0: 2057 case 0:
1768 spin_lock_irq(&conf->resync_lock); 2058 lower_barrier(conf);
1769 conf->barrier--;
1770 spin_unlock_irq(&conf->resync_lock);
1771 wake_up(&conf->wait_resume);
1772 wake_up(&conf->wait_idle);
1773 break; 2059 break;
1774 } 2060 }
1775 if (mddev->thread) {
1776 if (mddev->bitmap)
1777 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1778 else
1779 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1780 md_wakeup_thread(mddev->thread);
1781 }
1782} 2061}
1783 2062
1784 2063
1785static mdk_personality_t raid1_personality = 2064static struct mdk_personality raid1_personality =
1786{ 2065{
1787 .name = "raid1", 2066 .name = "raid1",
2067 .level = 1,
1788 .owner = THIS_MODULE, 2068 .owner = THIS_MODULE,
1789 .make_request = make_request, 2069 .make_request = make_request,
1790 .run = run, 2070 .run = run,
@@ -1802,15 +2082,17 @@ static mdk_personality_t raid1_personality =
1802 2082
1803static int __init raid_init(void) 2083static int __init raid_init(void)
1804{ 2084{
1805 return register_md_personality(RAID1, &raid1_personality); 2085 return register_md_personality(&raid1_personality);
1806} 2086}
1807 2087
1808static void raid_exit(void) 2088static void raid_exit(void)
1809{ 2089{
1810 unregister_md_personality(RAID1); 2090 unregister_md_personality(&raid1_personality);
1811} 2091}
1812 2092
1813module_init(raid_init); 2093module_init(raid_init);
1814module_exit(raid_exit); 2094module_exit(raid_exit);
1815MODULE_LICENSE("GPL"); 2095MODULE_LICENSE("GPL");
1816MODULE_ALIAS("md-personality-3"); /* RAID1 */ 2096MODULE_ALIAS("md-personality-3"); /* RAID1 */
2097MODULE_ALIAS("md-raid1");
2098MODULE_ALIAS("md-level-1");
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 713dc9c2c730..9e658e519a27 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -18,7 +18,9 @@
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20 20
21#include "dm-bio-list.h"
21#include <linux/raid/raid10.h> 22#include <linux/raid/raid10.h>
23#include <linux/raid/bitmap.h>
22 24
23/* 25/*
24 * RAID10 provides a combination of RAID0 and RAID1 functionality. 26 * RAID10 provides a combination of RAID0 and RAID1 functionality.
@@ -47,6 +49,9 @@
47 49
48static void unplug_slaves(mddev_t *mddev); 50static void unplug_slaves(mddev_t *mddev);
49 51
52static void allow_barrier(conf_t *conf);
53static void lower_barrier(conf_t *conf);
54
50static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 55static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
51{ 56{
52 conf_t *conf = data; 57 conf_t *conf = data;
@@ -54,10 +59,8 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
54 int size = offsetof(struct r10bio_s, devs[conf->copies]); 59 int size = offsetof(struct r10bio_s, devs[conf->copies]);
55 60
56 /* allocate a r10bio with room for raid_disks entries in the bios array */ 61 /* allocate a r10bio with room for raid_disks entries in the bios array */
57 r10_bio = kmalloc(size, gfp_flags); 62 r10_bio = kzalloc(size, gfp_flags);
58 if (r10_bio) 63 if (!r10_bio)
59 memset(r10_bio, 0, size);
60 else
61 unplug_slaves(conf->mddev); 64 unplug_slaves(conf->mddev);
62 65
63 return r10_bio; 66 return r10_bio;
@@ -129,10 +132,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
129 132
130out_free_pages: 133out_free_pages:
131 for ( ; i > 0 ; i--) 134 for ( ; i > 0 ; i--)
132 __free_page(bio->bi_io_vec[i-1].bv_page); 135 safe_put_page(bio->bi_io_vec[i-1].bv_page);
133 while (j--) 136 while (j--)
134 for (i = 0; i < RESYNC_PAGES ; i++) 137 for (i = 0; i < RESYNC_PAGES ; i++)
135 __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page); 138 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
136 j = -1; 139 j = -1;
137out_free_bio: 140out_free_bio:
138 while ( ++j < nalloc ) 141 while ( ++j < nalloc )
@@ -152,7 +155,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
152 struct bio *bio = r10bio->devs[j].bio; 155 struct bio *bio = r10bio->devs[j].bio;
153 if (bio) { 156 if (bio) {
154 for (i = 0; i < RESYNC_PAGES; i++) { 157 for (i = 0; i < RESYNC_PAGES; i++) {
155 __free_page(bio->bi_io_vec[i].bv_page); 158 safe_put_page(bio->bi_io_vec[i].bv_page);
156 bio->bi_io_vec[i].bv_page = NULL; 159 bio->bi_io_vec[i].bv_page = NULL;
157 } 160 }
158 bio_put(bio); 161 bio_put(bio);
@@ -167,7 +170,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
167 170
168 for (i = 0; i < conf->copies; i++) { 171 for (i = 0; i < conf->copies; i++) {
169 struct bio **bio = & r10_bio->devs[i].bio; 172 struct bio **bio = & r10_bio->devs[i].bio;
170 if (*bio) 173 if (*bio && *bio != IO_BLOCKED)
171 bio_put(*bio); 174 bio_put(*bio);
172 *bio = NULL; 175 *bio = NULL;
173 } 176 }
@@ -175,20 +178,13 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
175 178
176static inline void free_r10bio(r10bio_t *r10_bio) 179static inline void free_r10bio(r10bio_t *r10_bio)
177{ 180{
178 unsigned long flags;
179
180 conf_t *conf = mddev_to_conf(r10_bio->mddev); 181 conf_t *conf = mddev_to_conf(r10_bio->mddev);
181 182
182 /* 183 /*
183 * Wake up any possible resync thread that waits for the device 184 * Wake up any possible resync thread that waits for the device
184 * to go idle. 185 * to go idle.
185 */ 186 */
186 spin_lock_irqsave(&conf->resync_lock, flags); 187 allow_barrier(conf);
187 if (!--conf->nr_pending) {
188 wake_up(&conf->wait_idle);
189 wake_up(&conf->wait_resume);
190 }
191 spin_unlock_irqrestore(&conf->resync_lock, flags);
192 188
193 put_all_bios(conf, r10_bio); 189 put_all_bios(conf, r10_bio);
194 mempool_free(r10_bio, conf->r10bio_pool); 190 mempool_free(r10_bio, conf->r10bio_pool);
@@ -197,22 +193,10 @@ static inline void free_r10bio(r10bio_t *r10_bio)
197static inline void put_buf(r10bio_t *r10_bio) 193static inline void put_buf(r10bio_t *r10_bio)
198{ 194{
199 conf_t *conf = mddev_to_conf(r10_bio->mddev); 195 conf_t *conf = mddev_to_conf(r10_bio->mddev);
200 unsigned long flags;
201 196
202 mempool_free(r10_bio, conf->r10buf_pool); 197 mempool_free(r10_bio, conf->r10buf_pool);
203 198
204 spin_lock_irqsave(&conf->resync_lock, flags); 199 lower_barrier(conf);
205 if (!conf->barrier)
206 BUG();
207 --conf->barrier;
208 wake_up(&conf->wait_resume);
209 wake_up(&conf->wait_idle);
210
211 if (!--conf->nr_pending) {
212 wake_up(&conf->wait_idle);
213 wake_up(&conf->wait_resume);
214 }
215 spin_unlock_irqrestore(&conf->resync_lock, flags);
216} 200}
217 201
218static void reschedule_retry(r10bio_t *r10_bio) 202static void reschedule_retry(r10bio_t *r10_bio)
@@ -223,6 +207,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
223 207
224 spin_lock_irqsave(&conf->device_lock, flags); 208 spin_lock_irqsave(&conf->device_lock, flags);
225 list_add(&r10_bio->retry_list, &conf->retry_list); 209 list_add(&r10_bio->retry_list, &conf->retry_list);
210 conf->nr_queued ++;
226 spin_unlock_irqrestore(&conf->device_lock, flags); 211 spin_unlock_irqrestore(&conf->device_lock, flags);
227 212
228 md_wakeup_thread(mddev->thread); 213 md_wakeup_thread(mddev->thread);
@@ -268,9 +253,9 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
268 /* 253 /*
269 * this branch is our 'one mirror IO has finished' event handler: 254 * this branch is our 'one mirror IO has finished' event handler:
270 */ 255 */
271 if (!uptodate) 256 update_head_pos(slot, r10_bio);
272 md_error(r10_bio->mddev, conf->mirrors[dev].rdev); 257
273 else 258 if (uptodate) {
274 /* 259 /*
275 * Set R10BIO_Uptodate in our master bio, so that 260 * Set R10BIO_Uptodate in our master bio, so that
276 * we will return a good error code to the higher 261 * we will return a good error code to the higher
@@ -281,15 +266,8 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
281 * wait for the 'master' bio. 266 * wait for the 'master' bio.
282 */ 267 */
283 set_bit(R10BIO_Uptodate, &r10_bio->state); 268 set_bit(R10BIO_Uptodate, &r10_bio->state);
284
285 update_head_pos(slot, r10_bio);
286
287 /*
288 * we have only one bio on the read side
289 */
290 if (uptodate)
291 raid_end_bio_io(r10_bio); 269 raid_end_bio_io(r10_bio);
292 else { 270 } else {
293 /* 271 /*
294 * oops, read error: 272 * oops, read error:
295 */ 273 */
@@ -322,9 +300,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
322 /* 300 /*
323 * this branch is our 'one mirror IO has finished' event handler: 301 * this branch is our 'one mirror IO has finished' event handler:
324 */ 302 */
325 if (!uptodate) 303 if (!uptodate) {
326 md_error(r10_bio->mddev, conf->mirrors[dev].rdev); 304 md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
327 else 305 /* an I/O failed, we can't clear the bitmap */
306 set_bit(R10BIO_Degraded, &r10_bio->state);
307 } else
328 /* 308 /*
329 * Set R10BIO_Uptodate in our master bio, so that 309 * Set R10BIO_Uptodate in our master bio, so that
330 * we will return a good error code for to the higher 310 * we will return a good error code for to the higher
@@ -344,6 +324,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
344 * already. 324 * already.
345 */ 325 */
346 if (atomic_dec_and_test(&r10_bio->remaining)) { 326 if (atomic_dec_and_test(&r10_bio->remaining)) {
327 /* clear the bitmap if all writes complete successfully */
328 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
329 r10_bio->sectors,
330 !test_bit(R10BIO_Degraded, &r10_bio->state),
331 0);
347 md_write_end(r10_bio->mddev); 332 md_write_end(r10_bio->mddev);
348 raid_end_bio_io(r10_bio); 333 raid_end_bio_io(r10_bio);
349 } 334 }
@@ -502,8 +487,9 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
502 rcu_read_lock(); 487 rcu_read_lock();
503 /* 488 /*
504 * Check if we can balance. We can balance on the whole 489 * Check if we can balance. We can balance on the whole
505 * device if no resync is going on, or below the resync window. 490 * device if no resync is going on (recovery is ok), or below
506 * We take the first readable disk when above the resync window. 491 * the resync window. We take the first readable disk when
492 * above the resync window.
507 */ 493 */
508 if (conf->mddev->recovery_cp < MaxSector 494 if (conf->mddev->recovery_cp < MaxSector
509 && (this_sector + sectors >= conf->next_resync)) { 495 && (this_sector + sectors >= conf->next_resync)) {
@@ -512,6 +498,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
512 disk = r10_bio->devs[slot].devnum; 498 disk = r10_bio->devs[slot].devnum;
513 499
514 while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL || 500 while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
501 r10_bio->devs[slot].bio == IO_BLOCKED ||
515 !test_bit(In_sync, &rdev->flags)) { 502 !test_bit(In_sync, &rdev->flags)) {
516 slot++; 503 slot++;
517 if (slot == conf->copies) { 504 if (slot == conf->copies) {
@@ -529,6 +516,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
529 slot = 0; 516 slot = 0;
530 disk = r10_bio->devs[slot].devnum; 517 disk = r10_bio->devs[slot].devnum;
531 while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL || 518 while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
519 r10_bio->devs[slot].bio == IO_BLOCKED ||
532 !test_bit(In_sync, &rdev->flags)) { 520 !test_bit(In_sync, &rdev->flags)) {
533 slot ++; 521 slot ++;
534 if (slot == conf->copies) { 522 if (slot == conf->copies) {
@@ -549,6 +537,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
549 537
550 538
551 if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL || 539 if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
540 r10_bio->devs[nslot].bio == IO_BLOCKED ||
552 !test_bit(In_sync, &rdev->flags)) 541 !test_bit(In_sync, &rdev->flags))
553 continue; 542 continue;
554 543
@@ -607,7 +596,10 @@ static void unplug_slaves(mddev_t *mddev)
607 596
608static void raid10_unplug(request_queue_t *q) 597static void raid10_unplug(request_queue_t *q)
609{ 598{
599 mddev_t *mddev = q->queuedata;
600
610 unplug_slaves(q->queuedata); 601 unplug_slaves(q->queuedata);
602 md_wakeup_thread(mddev->thread);
611} 603}
612 604
613static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk, 605static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
@@ -640,27 +632,107 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
640 return ret; 632 return ret;
641} 633}
642 634
643/* 635/* Barriers....
644 * Throttle resync depth, so that we can both get proper overlapping of 636 * Sometimes we need to suspend IO while we do something else,
645 * requests, but are still able to handle normal requests quickly. 637 * either some resync/recovery, or reconfigure the array.
638 * To do this we raise a 'barrier'.
639 * The 'barrier' is a counter that can be raised multiple times
640 * to count how many activities are happening which preclude
641 * normal IO.
642 * We can only raise the barrier if there is no pending IO.
643 * i.e. if nr_pending == 0.
644 * We choose only to raise the barrier if no-one is waiting for the
645 * barrier to go down. This means that as soon as an IO request
646 * is ready, no other operations which require a barrier will start
647 * until the IO request has had a chance.
648 *
649 * So: regular IO calls 'wait_barrier'. When that returns there
650 * is no backgroup IO happening, It must arrange to call
651 * allow_barrier when it has finished its IO.
652 * backgroup IO calls must call raise_barrier. Once that returns
653 * there is no normal IO happeing. It must arrange to call
654 * lower_barrier when the particular background IO completes.
646 */ 655 */
647#define RESYNC_DEPTH 32 656#define RESYNC_DEPTH 32
648 657
649static void device_barrier(conf_t *conf, sector_t sect) 658static void raise_barrier(conf_t *conf, int force)
659{
660 BUG_ON(force && !conf->barrier);
661 spin_lock_irq(&conf->resync_lock);
662
663 /* Wait until no block IO is waiting (unless 'force') */
664 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
665 conf->resync_lock,
666 raid10_unplug(conf->mddev->queue));
667
668 /* block any new IO from starting */
669 conf->barrier++;
670
671 /* No wait for all pending IO to complete */
672 wait_event_lock_irq(conf->wait_barrier,
673 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
674 conf->resync_lock,
675 raid10_unplug(conf->mddev->queue));
676
677 spin_unlock_irq(&conf->resync_lock);
678}
679
680static void lower_barrier(conf_t *conf)
681{
682 unsigned long flags;
683 spin_lock_irqsave(&conf->resync_lock, flags);
684 conf->barrier--;
685 spin_unlock_irqrestore(&conf->resync_lock, flags);
686 wake_up(&conf->wait_barrier);
687}
688
689static void wait_barrier(conf_t *conf)
650{ 690{
651 spin_lock_irq(&conf->resync_lock); 691 spin_lock_irq(&conf->resync_lock);
652 wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), 692 if (conf->barrier) {
653 conf->resync_lock, unplug_slaves(conf->mddev)); 693 conf->nr_waiting++;
654 694 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
655 if (!conf->barrier++) { 695 conf->resync_lock,
656 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, 696 raid10_unplug(conf->mddev->queue));
657 conf->resync_lock, unplug_slaves(conf->mddev)); 697 conf->nr_waiting--;
658 if (conf->nr_pending)
659 BUG();
660 } 698 }
661 wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, 699 conf->nr_pending++;
662 conf->resync_lock, unplug_slaves(conf->mddev)); 700 spin_unlock_irq(&conf->resync_lock);
663 conf->next_resync = sect; 701}
702
703static void allow_barrier(conf_t *conf)
704{
705 unsigned long flags;
706 spin_lock_irqsave(&conf->resync_lock, flags);
707 conf->nr_pending--;
708 spin_unlock_irqrestore(&conf->resync_lock, flags);
709 wake_up(&conf->wait_barrier);
710}
711
712static void freeze_array(conf_t *conf)
713{
714 /* stop syncio and normal IO and wait for everything to
715 * go quiet.
716 * We increment barrier and nr_waiting, and then
717 * wait until barrier+nr_pending match nr_queued+2
718 */
719 spin_lock_irq(&conf->resync_lock);
720 conf->barrier++;
721 conf->nr_waiting++;
722 wait_event_lock_irq(conf->wait_barrier,
723 conf->barrier+conf->nr_pending == conf->nr_queued+2,
724 conf->resync_lock,
725 raid10_unplug(conf->mddev->queue));
726 spin_unlock_irq(&conf->resync_lock);
727}
728
729static void unfreeze_array(conf_t *conf)
730{
731 /* reverse the effect of the freeze */
732 spin_lock_irq(&conf->resync_lock);
733 conf->barrier--;
734 conf->nr_waiting--;
735 wake_up(&conf->wait_barrier);
664 spin_unlock_irq(&conf->resync_lock); 736 spin_unlock_irq(&conf->resync_lock);
665} 737}
666 738
@@ -674,6 +746,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
674 int i; 746 int i;
675 int chunk_sects = conf->chunk_mask + 1; 747 int chunk_sects = conf->chunk_mask + 1;
676 const int rw = bio_data_dir(bio); 748 const int rw = bio_data_dir(bio);
749 struct bio_list bl;
750 unsigned long flags;
677 751
678 if (unlikely(bio_barrier(bio))) { 752 if (unlikely(bio_barrier(bio))) {
679 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 753 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
@@ -719,10 +793,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
719 * thread has put up a bar for new requests. 793 * thread has put up a bar for new requests.
720 * Continue immediately if no resync is active currently. 794 * Continue immediately if no resync is active currently.
721 */ 795 */
722 spin_lock_irq(&conf->resync_lock); 796 wait_barrier(conf);
723 wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
724 conf->nr_pending++;
725 spin_unlock_irq(&conf->resync_lock);
726 797
727 disk_stat_inc(mddev->gendisk, ios[rw]); 798 disk_stat_inc(mddev->gendisk, ios[rw]);
728 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 799 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
@@ -734,6 +805,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
734 805
735 r10_bio->mddev = mddev; 806 r10_bio->mddev = mddev;
736 r10_bio->sector = bio->bi_sector; 807 r10_bio->sector = bio->bi_sector;
808 r10_bio->state = 0;
737 809
738 if (rw == READ) { 810 if (rw == READ) {
739 /* 811 /*
@@ -778,13 +850,16 @@ static int make_request(request_queue_t *q, struct bio * bio)
778 !test_bit(Faulty, &rdev->flags)) { 850 !test_bit(Faulty, &rdev->flags)) {
779 atomic_inc(&rdev->nr_pending); 851 atomic_inc(&rdev->nr_pending);
780 r10_bio->devs[i].bio = bio; 852 r10_bio->devs[i].bio = bio;
781 } else 853 } else {
782 r10_bio->devs[i].bio = NULL; 854 r10_bio->devs[i].bio = NULL;
855 set_bit(R10BIO_Degraded, &r10_bio->state);
856 }
783 } 857 }
784 rcu_read_unlock(); 858 rcu_read_unlock();
785 859
786 atomic_set(&r10_bio->remaining, 1); 860 atomic_set(&r10_bio->remaining, 0);
787 861
862 bio_list_init(&bl);
788 for (i = 0; i < conf->copies; i++) { 863 for (i = 0; i < conf->copies; i++) {
789 struct bio *mbio; 864 struct bio *mbio;
790 int d = r10_bio->devs[i].devnum; 865 int d = r10_bio->devs[i].devnum;
@@ -802,13 +877,14 @@ static int make_request(request_queue_t *q, struct bio * bio)
802 mbio->bi_private = r10_bio; 877 mbio->bi_private = r10_bio;
803 878
804 atomic_inc(&r10_bio->remaining); 879 atomic_inc(&r10_bio->remaining);
805 generic_make_request(mbio); 880 bio_list_add(&bl, mbio);
806 } 881 }
807 882
808 if (atomic_dec_and_test(&r10_bio->remaining)) { 883 bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
809 md_write_end(mddev); 884 spin_lock_irqsave(&conf->device_lock, flags);
810 raid_end_bio_io(r10_bio); 885 bio_list_merge(&conf->pending_bio_list, &bl);
811 } 886 blk_plug_device(mddev->queue);
887 spin_unlock_irqrestore(&conf->device_lock, flags);
812 888
813 return 0; 889 return 0;
814} 890}
@@ -897,13 +973,8 @@ static void print_conf(conf_t *conf)
897 973
898static void close_sync(conf_t *conf) 974static void close_sync(conf_t *conf)
899{ 975{
900 spin_lock_irq(&conf->resync_lock); 976 wait_barrier(conf);
901 wait_event_lock_irq(conf->wait_resume, !conf->barrier, 977 allow_barrier(conf);
902 conf->resync_lock, unplug_slaves(conf->mddev));
903 spin_unlock_irq(&conf->resync_lock);
904
905 if (conf->barrier) BUG();
906 if (waitqueue_active(&conf->wait_idle)) BUG();
907 978
908 mempool_destroy(conf->r10buf_pool); 979 mempool_destroy(conf->r10buf_pool);
909 conf->r10buf_pool = NULL; 980 conf->r10buf_pool = NULL;
@@ -971,7 +1042,12 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
971 if (!enough(conf)) 1042 if (!enough(conf))
972 return 0; 1043 return 0;
973 1044
974 for (mirror=0; mirror < mddev->raid_disks; mirror++) 1045 if (rdev->saved_raid_disk >= 0 &&
1046 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1047 mirror = rdev->saved_raid_disk;
1048 else
1049 mirror = 0;
1050 for ( ; mirror < mddev->raid_disks; mirror++)
975 if ( !(p=conf->mirrors+mirror)->rdev) { 1051 if ( !(p=conf->mirrors+mirror)->rdev) {
976 1052
977 blk_queue_stack_limits(mddev->queue, 1053 blk_queue_stack_limits(mddev->queue,
@@ -987,6 +1063,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
987 p->head_position = 0; 1063 p->head_position = 0;
988 rdev->raid_disk = mirror; 1064 rdev->raid_disk = mirror;
989 found = 1; 1065 found = 1;
1066 if (rdev->saved_raid_disk != mirror)
1067 conf->fullsync = 1;
990 rcu_assign_pointer(p->rdev, rdev); 1068 rcu_assign_pointer(p->rdev, rdev);
991 break; 1069 break;
992 } 1070 }
@@ -1027,7 +1105,6 @@ abort:
1027 1105
1028static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1106static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1029{ 1107{
1030 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1031 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1108 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1032 conf_t *conf = mddev_to_conf(r10_bio->mddev); 1109 conf_t *conf = mddev_to_conf(r10_bio->mddev);
1033 int i,d; 1110 int i,d;
@@ -1042,9 +1119,16 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1042 BUG(); 1119 BUG();
1043 update_head_pos(i, r10_bio); 1120 update_head_pos(i, r10_bio);
1044 d = r10_bio->devs[i].devnum; 1121 d = r10_bio->devs[i].devnum;
1045 if (!uptodate) 1122
1046 md_error(r10_bio->mddev, 1123 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1047 conf->mirrors[d].rdev); 1124 set_bit(R10BIO_Uptodate, &r10_bio->state);
1125 else {
1126 atomic_add(r10_bio->sectors,
1127 &conf->mirrors[d].rdev->corrected_errors);
1128 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1129 md_error(r10_bio->mddev,
1130 conf->mirrors[d].rdev);
1131 }
1048 1132
1049 /* for reconstruct, we always reschedule after a read. 1133 /* for reconstruct, we always reschedule after a read.
1050 * for resync, only after all reads 1134 * for resync, only after all reads
@@ -1132,23 +1216,32 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1132 fbio = r10_bio->devs[i].bio; 1216 fbio = r10_bio->devs[i].bio;
1133 1217
1134 /* now find blocks with errors */ 1218 /* now find blocks with errors */
1135 for (i=first+1 ; i < conf->copies ; i++) { 1219 for (i=0 ; i < conf->copies ; i++) {
1136 int vcnt, j, d; 1220 int j, d;
1221 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1137 1222
1138 if (!test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1139 continue;
1140 /* We know that the bi_io_vec layout is the same for
1141 * both 'first' and 'i', so we just compare them.
1142 * All vec entries are PAGE_SIZE;
1143 */
1144 tbio = r10_bio->devs[i].bio; 1223 tbio = r10_bio->devs[i].bio;
1145 vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); 1224
1146 for (j = 0; j < vcnt; j++) 1225 if (tbio->bi_end_io != end_sync_read)
1147 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 1226 continue;
1148 page_address(tbio->bi_io_vec[j].bv_page), 1227 if (i == first)
1149 PAGE_SIZE)) 1228 continue;
1150 break; 1229 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1151 if (j == vcnt) 1230 /* We know that the bi_io_vec layout is the same for
1231 * both 'first' and 'i', so we just compare them.
1232 * All vec entries are PAGE_SIZE;
1233 */
1234 for (j = 0; j < vcnt; j++)
1235 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1236 page_address(tbio->bi_io_vec[j].bv_page),
1237 PAGE_SIZE))
1238 break;
1239 if (j == vcnt)
1240 continue;
1241 mddev->resync_mismatches += r10_bio->sectors;
1242 }
1243 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1244 /* Don't fix anything. */
1152 continue; 1245 continue;
1153 /* Ok, we need to write this bio 1246 /* Ok, we need to write this bio
1154 * First we need to fixup bv_offset, bv_len and 1247 * First we need to fixup bv_offset, bv_len and
@@ -1227,7 +1320,10 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1227 1320
1228 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 1321 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1229 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 1322 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1230 generic_make_request(wbio); 1323 if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1324 generic_make_request(wbio);
1325 else
1326 bio_endio(wbio, wbio->bi_size, -EIO);
1231} 1327}
1232 1328
1233 1329
@@ -1254,10 +1350,31 @@ static void raid10d(mddev_t *mddev)
1254 for (;;) { 1350 for (;;) {
1255 char b[BDEVNAME_SIZE]; 1351 char b[BDEVNAME_SIZE];
1256 spin_lock_irqsave(&conf->device_lock, flags); 1352 spin_lock_irqsave(&conf->device_lock, flags);
1353
1354 if (conf->pending_bio_list.head) {
1355 bio = bio_list_get(&conf->pending_bio_list);
1356 blk_remove_plug(mddev->queue);
1357 spin_unlock_irqrestore(&conf->device_lock, flags);
1358 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1359 if (bitmap_unplug(mddev->bitmap) != 0)
1360 printk("%s: bitmap file write failed!\n", mdname(mddev));
1361
1362 while (bio) { /* submit pending writes */
1363 struct bio *next = bio->bi_next;
1364 bio->bi_next = NULL;
1365 generic_make_request(bio);
1366 bio = next;
1367 }
1368 unplug = 1;
1369
1370 continue;
1371 }
1372
1257 if (list_empty(head)) 1373 if (list_empty(head))
1258 break; 1374 break;
1259 r10_bio = list_entry(head->prev, r10bio_t, retry_list); 1375 r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1260 list_del(head->prev); 1376 list_del(head->prev);
1377 conf->nr_queued--;
1261 spin_unlock_irqrestore(&conf->device_lock, flags); 1378 spin_unlock_irqrestore(&conf->device_lock, flags);
1262 1379
1263 mddev = r10_bio->mddev; 1380 mddev = r10_bio->mddev;
@@ -1270,8 +1387,96 @@ static void raid10d(mddev_t *mddev)
1270 unplug = 1; 1387 unplug = 1;
1271 } else { 1388 } else {
1272 int mirror; 1389 int mirror;
1390 /* we got a read error. Maybe the drive is bad. Maybe just
1391 * the block and we can fix it.
1392 * We freeze all other IO, and try reading the block from
1393 * other devices. When we find one, we re-write
1394 * and check it that fixes the read error.
1395 * This is all done synchronously while the array is
1396 * frozen.
1397 */
1398 int sect = 0; /* Offset from r10_bio->sector */
1399 int sectors = r10_bio->sectors;
1400 freeze_array(conf);
1401 if (mddev->ro == 0) while(sectors) {
1402 int s = sectors;
1403 int sl = r10_bio->read_slot;
1404 int success = 0;
1405
1406 if (s > (PAGE_SIZE>>9))
1407 s = PAGE_SIZE >> 9;
1408
1409 do {
1410 int d = r10_bio->devs[sl].devnum;
1411 rdev = conf->mirrors[d].rdev;
1412 if (rdev &&
1413 test_bit(In_sync, &rdev->flags) &&
1414 sync_page_io(rdev->bdev,
1415 r10_bio->devs[sl].addr +
1416 sect + rdev->data_offset,
1417 s<<9,
1418 conf->tmppage, READ))
1419 success = 1;
1420 else {
1421 sl++;
1422 if (sl == conf->copies)
1423 sl = 0;
1424 }
1425 } while (!success && sl != r10_bio->read_slot);
1426
1427 if (success) {
1428 int start = sl;
1429 /* write it back and re-read */
1430 while (sl != r10_bio->read_slot) {
1431 int d;
1432 if (sl==0)
1433 sl = conf->copies;
1434 sl--;
1435 d = r10_bio->devs[sl].devnum;
1436 rdev = conf->mirrors[d].rdev;
1437 atomic_add(s, &rdev->corrected_errors);
1438 if (rdev &&
1439 test_bit(In_sync, &rdev->flags)) {
1440 if (sync_page_io(rdev->bdev,
1441 r10_bio->devs[sl].addr +
1442 sect + rdev->data_offset,
1443 s<<9, conf->tmppage, WRITE) == 0)
1444 /* Well, this device is dead */
1445 md_error(mddev, rdev);
1446 }
1447 }
1448 sl = start;
1449 while (sl != r10_bio->read_slot) {
1450 int d;
1451 if (sl==0)
1452 sl = conf->copies;
1453 sl--;
1454 d = r10_bio->devs[sl].devnum;
1455 rdev = conf->mirrors[d].rdev;
1456 if (rdev &&
1457 test_bit(In_sync, &rdev->flags)) {
1458 if (sync_page_io(rdev->bdev,
1459 r10_bio->devs[sl].addr +
1460 sect + rdev->data_offset,
1461 s<<9, conf->tmppage, READ) == 0)
1462 /* Well, this device is dead */
1463 md_error(mddev, rdev);
1464 }
1465 }
1466 } else {
1467 /* Cannot read from anywhere -- bye bye array */
1468 md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);
1469 break;
1470 }
1471 sectors -= s;
1472 sect += s;
1473 }
1474
1475 unfreeze_array(conf);
1476
1273 bio = r10_bio->devs[r10_bio->read_slot].bio; 1477 bio = r10_bio->devs[r10_bio->read_slot].bio;
1274 r10_bio->devs[r10_bio->read_slot].bio = NULL; 1478 r10_bio->devs[r10_bio->read_slot].bio =
1479 mddev->ro ? IO_BLOCKED : NULL;
1275 bio_put(bio); 1480 bio_put(bio);
1276 mirror = read_balance(conf, r10_bio); 1481 mirror = read_balance(conf, r10_bio);
1277 if (mirror == -1) { 1482 if (mirror == -1) {
@@ -1360,6 +1565,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1360 sector_t max_sector, nr_sectors; 1565 sector_t max_sector, nr_sectors;
1361 int disk; 1566 int disk;
1362 int i; 1567 int i;
1568 int max_sync;
1569 int sync_blocks;
1363 1570
1364 sector_t sectors_skipped = 0; 1571 sector_t sectors_skipped = 0;
1365 int chunks_skipped = 0; 1572 int chunks_skipped = 0;
@@ -1373,6 +1580,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1373 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 1580 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1374 max_sector = mddev->resync_max_sectors; 1581 max_sector = mddev->resync_max_sectors;
1375 if (sector_nr >= max_sector) { 1582 if (sector_nr >= max_sector) {
1583 /* If we aborted, we need to abort the
1584 * sync on the 'current' bitmap chucks (there can
1585 * be several when recovering multiple devices).
1586 * as we may have started syncing it but not finished.
1587 * We can find the current address in
1588 * mddev->curr_resync, but for recovery,
1589 * we need to convert that to several
1590 * virtual addresses.
1591 */
1592 if (mddev->curr_resync < max_sector) { /* aborted */
1593 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1594 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1595 &sync_blocks, 1);
1596 else for (i=0; i<conf->raid_disks; i++) {
1597 sector_t sect =
1598 raid10_find_virt(conf, mddev->curr_resync, i);
1599 bitmap_end_sync(mddev->bitmap, sect,
1600 &sync_blocks, 1);
1601 }
1602 } else /* completed sync */
1603 conf->fullsync = 0;
1604
1605 bitmap_close_sync(mddev->bitmap);
1376 close_sync(conf); 1606 close_sync(conf);
1377 *skipped = 1; 1607 *skipped = 1;
1378 return sectors_skipped; 1608 return sectors_skipped;
@@ -1395,9 +1625,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1395 * If there is non-resync activity waiting for us then 1625 * If there is non-resync activity waiting for us then
1396 * put in a delay to throttle resync. 1626 * put in a delay to throttle resync.
1397 */ 1627 */
1398 if (!go_faster && waitqueue_active(&conf->wait_resume)) 1628 if (!go_faster && conf->nr_waiting)
1399 msleep_interruptible(1000); 1629 msleep_interruptible(1000);
1400 device_barrier(conf, sector_nr + RESYNC_SECTORS);
1401 1630
1402 /* Again, very different code for resync and recovery. 1631 /* Again, very different code for resync and recovery.
1403 * Both must result in an r10bio with a list of bios that 1632 * Both must result in an r10bio with a list of bios that
@@ -1414,6 +1643,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1414 * end_sync_write if we will want to write. 1643 * end_sync_write if we will want to write.
1415 */ 1644 */
1416 1645
1646 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1417 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 1647 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1418 /* recovery... the complicated one */ 1648 /* recovery... the complicated one */
1419 int i, j, k; 1649 int i, j, k;
@@ -1422,14 +1652,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1422 for (i=0 ; i<conf->raid_disks; i++) 1652 for (i=0 ; i<conf->raid_disks; i++)
1423 if (conf->mirrors[i].rdev && 1653 if (conf->mirrors[i].rdev &&
1424 !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) { 1654 !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1655 int still_degraded = 0;
1425 /* want to reconstruct this device */ 1656 /* want to reconstruct this device */
1426 r10bio_t *rb2 = r10_bio; 1657 r10bio_t *rb2 = r10_bio;
1658 sector_t sect = raid10_find_virt(conf, sector_nr, i);
1659 int must_sync;
1660 /* Unless we are doing a full sync, we only need
1661 * to recover the block if it is set in the bitmap
1662 */
1663 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1664 &sync_blocks, 1);
1665 if (sync_blocks < max_sync)
1666 max_sync = sync_blocks;
1667 if (!must_sync &&
1668 !conf->fullsync) {
1669 /* yep, skip the sync_blocks here, but don't assume
1670 * that there will never be anything to do here
1671 */
1672 chunks_skipped = -1;
1673 continue;
1674 }
1427 1675
1428 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 1676 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1429 spin_lock_irq(&conf->resync_lock); 1677 raise_barrier(conf, rb2 != NULL);
1430 conf->nr_pending++;
1431 if (rb2) conf->barrier++;
1432 spin_unlock_irq(&conf->resync_lock);
1433 atomic_set(&r10_bio->remaining, 0); 1678 atomic_set(&r10_bio->remaining, 0);
1434 1679
1435 r10_bio->master_bio = (struct bio*)rb2; 1680 r10_bio->master_bio = (struct bio*)rb2;
@@ -1437,8 +1682,23 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1437 atomic_inc(&rb2->remaining); 1682 atomic_inc(&rb2->remaining);
1438 r10_bio->mddev = mddev; 1683 r10_bio->mddev = mddev;
1439 set_bit(R10BIO_IsRecover, &r10_bio->state); 1684 set_bit(R10BIO_IsRecover, &r10_bio->state);
1440 r10_bio->sector = raid10_find_virt(conf, sector_nr, i); 1685 r10_bio->sector = sect;
1686
1441 raid10_find_phys(conf, r10_bio); 1687 raid10_find_phys(conf, r10_bio);
1688 /* Need to check if this section will still be
1689 * degraded
1690 */
1691 for (j=0; j<conf->copies;j++) {
1692 int d = r10_bio->devs[j].devnum;
1693 if (conf->mirrors[d].rdev == NULL ||
1694 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) {
1695 still_degraded = 1;
1696 break;
1697 }
1698 }
1699 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1700 &sync_blocks, still_degraded);
1701
1442 for (j=0; j<conf->copies;j++) { 1702 for (j=0; j<conf->copies;j++) {
1443 int d = r10_bio->devs[j].devnum; 1703 int d = r10_bio->devs[j].devnum;
1444 if (conf->mirrors[d].rdev && 1704 if (conf->mirrors[d].rdev &&
@@ -1498,14 +1758,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1498 } else { 1758 } else {
1499 /* resync. Schedule a read for every block at this virt offset */ 1759 /* resync. Schedule a read for every block at this virt offset */
1500 int count = 0; 1760 int count = 0;
1501 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1502 1761
1503 spin_lock_irq(&conf->resync_lock); 1762 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1504 conf->nr_pending++; 1763 &sync_blocks, mddev->degraded) &&
1505 spin_unlock_irq(&conf->resync_lock); 1764 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1765 /* We can skip this block */
1766 *skipped = 1;
1767 return sync_blocks + sectors_skipped;
1768 }
1769 if (sync_blocks < max_sync)
1770 max_sync = sync_blocks;
1771 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1506 1772
1507 r10_bio->mddev = mddev; 1773 r10_bio->mddev = mddev;
1508 atomic_set(&r10_bio->remaining, 0); 1774 atomic_set(&r10_bio->remaining, 0);
1775 raise_barrier(conf, 0);
1776 conf->next_resync = sector_nr;
1509 1777
1510 r10_bio->master_bio = NULL; 1778 r10_bio->master_bio = NULL;
1511 r10_bio->sector = sector_nr; 1779 r10_bio->sector = sector_nr;
@@ -1558,6 +1826,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1558 } 1826 }
1559 1827
1560 nr_sectors = 0; 1828 nr_sectors = 0;
1829 if (sector_nr + max_sync < max_sector)
1830 max_sector = sector_nr + max_sync;
1561 do { 1831 do {
1562 struct page *page; 1832 struct page *page;
1563 int len = PAGE_SIZE; 1833 int len = PAGE_SIZE;
@@ -1632,11 +1902,11 @@ static int run(mddev_t *mddev)
1632 int nc, fc; 1902 int nc, fc;
1633 sector_t stride, size; 1903 sector_t stride, size;
1634 1904
1635 if (mddev->level != 10) { 1905 if (mddev->chunk_size == 0) {
1636 printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n", 1906 printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
1637 mdname(mddev), mddev->level); 1907 return -EINVAL;
1638 goto out;
1639 } 1908 }
1909
1640 nc = mddev->layout & 255; 1910 nc = mddev->layout & 255;
1641 fc = (mddev->layout >> 8) & 255; 1911 fc = (mddev->layout >> 8) & 255;
1642 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks || 1912 if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
@@ -1650,22 +1920,24 @@ static int run(mddev_t *mddev)
1650 * bookkeeping area. [whatever we allocate in run(), 1920 * bookkeeping area. [whatever we allocate in run(),
1651 * should be freed in stop()] 1921 * should be freed in stop()]
1652 */ 1922 */
1653 conf = kmalloc(sizeof(conf_t), GFP_KERNEL); 1923 conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
1654 mddev->private = conf; 1924 mddev->private = conf;
1655 if (!conf) { 1925 if (!conf) {
1656 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", 1926 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1657 mdname(mddev)); 1927 mdname(mddev));
1658 goto out; 1928 goto out;
1659 } 1929 }
1660 memset(conf, 0, sizeof(*conf)); 1930 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1661 conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks,
1662 GFP_KERNEL); 1931 GFP_KERNEL);
1663 if (!conf->mirrors) { 1932 if (!conf->mirrors) {
1664 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n", 1933 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
1665 mdname(mddev)); 1934 mdname(mddev));
1666 goto out_free_conf; 1935 goto out_free_conf;
1667 } 1936 }
1668 memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks); 1937
1938 conf->tmppage = alloc_page(GFP_KERNEL);
1939 if (!conf->tmppage)
1940 goto out_free_conf;
1669 1941
1670 conf->near_copies = nc; 1942 conf->near_copies = nc;
1671 conf->far_copies = fc; 1943 conf->far_copies = fc;
@@ -1713,8 +1985,7 @@ static int run(mddev_t *mddev)
1713 INIT_LIST_HEAD(&conf->retry_list); 1985 INIT_LIST_HEAD(&conf->retry_list);
1714 1986
1715 spin_lock_init(&conf->resync_lock); 1987 spin_lock_init(&conf->resync_lock);
1716 init_waitqueue_head(&conf->wait_idle); 1988 init_waitqueue_head(&conf->wait_barrier);
1717 init_waitqueue_head(&conf->wait_resume);
1718 1989
1719 /* need to check that every block has at least one working mirror */ 1990 /* need to check that every block has at least one working mirror */
1720 if (!enough(conf)) { 1991 if (!enough(conf)) {
@@ -1763,7 +2034,7 @@ static int run(mddev_t *mddev)
1763 * maybe... 2034 * maybe...
1764 */ 2035 */
1765 { 2036 {
1766 int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE; 2037 int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE;
1767 stripe /= conf->near_copies; 2038 stripe /= conf->near_copies;
1768 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) 2039 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
1769 mddev->queue->backing_dev_info.ra_pages = 2* stripe; 2040 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -1776,6 +2047,7 @@ static int run(mddev_t *mddev)
1776out_free_conf: 2047out_free_conf:
1777 if (conf->r10bio_pool) 2048 if (conf->r10bio_pool)
1778 mempool_destroy(conf->r10bio_pool); 2049 mempool_destroy(conf->r10bio_pool);
2050 safe_put_page(conf->tmppage);
1779 kfree(conf->mirrors); 2051 kfree(conf->mirrors);
1780 kfree(conf); 2052 kfree(conf);
1781 mddev->private = NULL; 2053 mddev->private = NULL;
@@ -1798,10 +2070,31 @@ static int stop(mddev_t *mddev)
1798 return 0; 2070 return 0;
1799} 2071}
1800 2072
2073static void raid10_quiesce(mddev_t *mddev, int state)
2074{
2075 conf_t *conf = mddev_to_conf(mddev);
2076
2077 switch(state) {
2078 case 1:
2079 raise_barrier(conf, 0);
2080 break;
2081 case 0:
2082 lower_barrier(conf);
2083 break;
2084 }
2085 if (mddev->thread) {
2086 if (mddev->bitmap)
2087 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2088 else
2089 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2090 md_wakeup_thread(mddev->thread);
2091 }
2092}
1801 2093
1802static mdk_personality_t raid10_personality = 2094static struct mdk_personality raid10_personality =
1803{ 2095{
1804 .name = "raid10", 2096 .name = "raid10",
2097 .level = 10,
1805 .owner = THIS_MODULE, 2098 .owner = THIS_MODULE,
1806 .make_request = make_request, 2099 .make_request = make_request,
1807 .run = run, 2100 .run = run,
@@ -1812,19 +2105,22 @@ static mdk_personality_t raid10_personality =
1812 .hot_remove_disk= raid10_remove_disk, 2105 .hot_remove_disk= raid10_remove_disk,
1813 .spare_active = raid10_spare_active, 2106 .spare_active = raid10_spare_active,
1814 .sync_request = sync_request, 2107 .sync_request = sync_request,
2108 .quiesce = raid10_quiesce,
1815}; 2109};
1816 2110
1817static int __init raid_init(void) 2111static int __init raid_init(void)
1818{ 2112{
1819 return register_md_personality(RAID10, &raid10_personality); 2113 return register_md_personality(&raid10_personality);
1820} 2114}
1821 2115
1822static void raid_exit(void) 2116static void raid_exit(void)
1823{ 2117{
1824 unregister_md_personality(RAID10); 2118 unregister_md_personality(&raid10_personality);
1825} 2119}
1826 2120
1827module_init(raid_init); 2121module_init(raid_init);
1828module_exit(raid_exit); 2122module_exit(raid_exit);
1829MODULE_LICENSE("GPL"); 2123MODULE_LICENSE("GPL");
1830MODULE_ALIAS("md-personality-9"); /* RAID10 */ 2124MODULE_ALIAS("md-personality-9"); /* RAID10 */
2125MODULE_ALIAS("md-raid10");
2126MODULE_ALIAS("md-level-10");
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fafc4bc045f7..54f4a9847e38 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -35,12 +35,10 @@
35#define STRIPE_SHIFT (PAGE_SHIFT - 9) 35#define STRIPE_SHIFT (PAGE_SHIFT - 9)
36#define STRIPE_SECTORS (STRIPE_SIZE>>9) 36#define STRIPE_SECTORS (STRIPE_SIZE>>9)
37#define IO_THRESHOLD 1 37#define IO_THRESHOLD 1
38#define HASH_PAGES 1 38#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
39#define HASH_PAGES_ORDER 0
40#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
41#define HASH_MASK (NR_HASH - 1) 39#define HASH_MASK (NR_HASH - 1)
42 40
43#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) 41#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
44 42
45/* bio's attached to a stripe+device for I/O are linked together in bi_sector 43/* bio's attached to a stripe+device for I/O are linked together in bi_sector
46 * order without overlap. There may be several bio's per stripe+device, and 44 * order without overlap. There may be several bio's per stripe+device, and
@@ -113,29 +111,21 @@ static void release_stripe(struct stripe_head *sh)
113 spin_unlock_irqrestore(&conf->device_lock, flags); 111 spin_unlock_irqrestore(&conf->device_lock, flags);
114} 112}
115 113
116static void remove_hash(struct stripe_head *sh) 114static inline void remove_hash(struct stripe_head *sh)
117{ 115{
118 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
119 117
120 if (sh->hash_pprev) { 118 hlist_del_init(&sh->hash);
121 if (sh->hash_next)
122 sh->hash_next->hash_pprev = sh->hash_pprev;
123 *sh->hash_pprev = sh->hash_next;
124 sh->hash_pprev = NULL;
125 }
126} 119}
127 120
128static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 121static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
129{ 122{
130 struct stripe_head **shp = &stripe_hash(conf, sh->sector); 123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
131 124
132 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
133 126
134 CHECK_DEVLOCK(); 127 CHECK_DEVLOCK();
135 if ((sh->hash_next = *shp) != NULL) 128 hlist_add_head(&sh->hash, hp);
136 (*shp)->hash_pprev = &sh->hash_next;
137 *shp = sh;
138 sh->hash_pprev = shp;
139} 129}
140 130
141 131
@@ -167,7 +157,7 @@ static void shrink_buffers(struct stripe_head *sh, int num)
167 if (!p) 157 if (!p)
168 continue; 158 continue;
169 sh->dev[i].page = NULL; 159 sh->dev[i].page = NULL;
170 page_cache_release(p); 160 put_page(p);
171 } 161 }
172} 162}
173 163
@@ -228,10 +218,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
228static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector) 218static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
229{ 219{
230 struct stripe_head *sh; 220 struct stripe_head *sh;
221 struct hlist_node *hn;
231 222
232 CHECK_DEVLOCK(); 223 CHECK_DEVLOCK();
233 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 224 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
234 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) 225 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
235 if (sh->sector == sector) 226 if (sh->sector == sector)
236 return sh; 227 return sh;
237 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 228 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -417,7 +408,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
417 set_bit(R5_UPTODATE, &sh->dev[i].flags); 408 set_bit(R5_UPTODATE, &sh->dev[i].flags);
418#endif 409#endif
419 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 410 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
420 printk("R5: read error corrected!!\n"); 411 printk(KERN_INFO "raid5: read error corrected!!\n");
421 clear_bit(R5_ReadError, &sh->dev[i].flags); 412 clear_bit(R5_ReadError, &sh->dev[i].flags);
422 clear_bit(R5_ReWrite, &sh->dev[i].flags); 413 clear_bit(R5_ReWrite, &sh->dev[i].flags);
423 } 414 }
@@ -428,13 +419,14 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
428 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 419 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
429 atomic_inc(&conf->disks[i].rdev->read_errors); 420 atomic_inc(&conf->disks[i].rdev->read_errors);
430 if (conf->mddev->degraded) 421 if (conf->mddev->degraded)
431 printk("R5: read error not correctable.\n"); 422 printk(KERN_WARNING "raid5: read error not correctable.\n");
432 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 423 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
433 /* Oh, no!!! */ 424 /* Oh, no!!! */
434 printk("R5: read error NOT corrected!!\n"); 425 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
435 else if (atomic_read(&conf->disks[i].rdev->read_errors) 426 else if (atomic_read(&conf->disks[i].rdev->read_errors)
436 > conf->max_nr_stripes) 427 > conf->max_nr_stripes)
437 printk("raid5: Too many read errors, failing device.\n"); 428 printk(KERN_WARNING
429 "raid5: Too many read errors, failing device.\n");
438 else 430 else
439 retry = 1; 431 retry = 1;
440 if (retry) 432 if (retry)
@@ -604,7 +596,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
604 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 596 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
605 break; 597 break;
606 default: 598 default:
607 printk("raid5: unsupported algorithm %d\n", 599 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
608 conf->algorithm); 600 conf->algorithm);
609 } 601 }
610 602
@@ -645,7 +637,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
645 i -= (sh->pd_idx + 1); 637 i -= (sh->pd_idx + 1);
646 break; 638 break;
647 default: 639 default:
648 printk("raid5: unsupported algorithm %d\n", 640 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
649 conf->algorithm); 641 conf->algorithm);
650 } 642 }
651 643
@@ -654,7 +646,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
654 646
655 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 647 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
656 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 648 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
657 printk("compute_blocknr: map not correct\n"); 649 printk(KERN_ERR "compute_blocknr: map not correct\n");
658 return 0; 650 return 0;
659 } 651 }
660 return r_sector; 652 return r_sector;
@@ -737,7 +729,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
737 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 729 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
738 ptr[count++] = p; 730 ptr[count++] = p;
739 else 731 else
740 printk("compute_block() %d, stripe %llu, %d" 732 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
741 " not present\n", dd_idx, 733 " not present\n", dd_idx,
742 (unsigned long long)sh->sector, i); 734 (unsigned long long)sh->sector, i);
743 735
@@ -960,11 +952,11 @@ static void handle_stripe(struct stripe_head *sh)
960 syncing = test_bit(STRIPE_SYNCING, &sh->state); 952 syncing = test_bit(STRIPE_SYNCING, &sh->state);
961 /* Now to look around and see what can be done */ 953 /* Now to look around and see what can be done */
962 954
955 rcu_read_lock();
963 for (i=disks; i--; ) { 956 for (i=disks; i--; ) {
964 mdk_rdev_t *rdev; 957 mdk_rdev_t *rdev;
965 dev = &sh->dev[i]; 958 dev = &sh->dev[i];
966 clear_bit(R5_Insync, &dev->flags); 959 clear_bit(R5_Insync, &dev->flags);
967 clear_bit(R5_Syncio, &dev->flags);
968 960
969 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 961 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
970 i, dev->flags, dev->toread, dev->towrite, dev->written); 962 i, dev->flags, dev->toread, dev->towrite, dev->written);
@@ -1003,9 +995,9 @@ static void handle_stripe(struct stripe_head *sh)
1003 non_overwrite++; 995 non_overwrite++;
1004 } 996 }
1005 if (dev->written) written++; 997 if (dev->written) written++;
1006 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ 998 rdev = rcu_dereference(conf->disks[i].rdev);
1007 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 999 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1008 /* The ReadError flag wil just be confusing now */ 1000 /* The ReadError flag will just be confusing now */
1009 clear_bit(R5_ReadError, &dev->flags); 1001 clear_bit(R5_ReadError, &dev->flags);
1010 clear_bit(R5_ReWrite, &dev->flags); 1002 clear_bit(R5_ReWrite, &dev->flags);
1011 } 1003 }
@@ -1016,6 +1008,7 @@ static void handle_stripe(struct stripe_head *sh)
1016 } else 1008 } else
1017 set_bit(R5_Insync, &dev->flags); 1009 set_bit(R5_Insync, &dev->flags);
1018 } 1010 }
1011 rcu_read_unlock();
1019 PRINTK("locked=%d uptodate=%d to_read=%d" 1012 PRINTK("locked=%d uptodate=%d to_read=%d"
1020 " to_write=%d failed=%d failed_num=%d\n", 1013 " to_write=%d failed=%d failed_num=%d\n",
1021 locked, uptodate, to_read, to_write, failed, failed_num); 1014 locked, uptodate, to_read, to_write, failed, failed_num);
@@ -1027,10 +1020,13 @@ static void handle_stripe(struct stripe_head *sh)
1027 int bitmap_end = 0; 1020 int bitmap_end = 0;
1028 1021
1029 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1022 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1030 mdk_rdev_t *rdev = conf->disks[i].rdev; 1023 mdk_rdev_t *rdev;
1024 rcu_read_lock();
1025 rdev = rcu_dereference(conf->disks[i].rdev);
1031 if (rdev && test_bit(In_sync, &rdev->flags)) 1026 if (rdev && test_bit(In_sync, &rdev->flags))
1032 /* multiple read failures in one stripe */ 1027 /* multiple read failures in one stripe */
1033 md_error(conf->mddev, rdev); 1028 md_error(conf->mddev, rdev);
1029 rcu_read_unlock();
1034 } 1030 }
1035 1031
1036 spin_lock_irq(&conf->device_lock); 1032 spin_lock_irq(&conf->device_lock);
@@ -1179,9 +1175,6 @@ static void handle_stripe(struct stripe_head *sh)
1179 locked++; 1175 locked++;
1180 PRINTK("Reading block %d (sync=%d)\n", 1176 PRINTK("Reading block %d (sync=%d)\n",
1181 i, syncing); 1177 i, syncing);
1182 if (syncing)
1183 md_sync_acct(conf->disks[i].rdev->bdev,
1184 STRIPE_SECTORS);
1185 } 1178 }
1186 } 1179 }
1187 } 1180 }
@@ -1288,7 +1281,7 @@ static void handle_stripe(struct stripe_head *sh)
1288 * is available 1281 * is available
1289 */ 1282 */
1290 if (syncing && locked == 0 && 1283 if (syncing && locked == 0 &&
1291 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) { 1284 !test_bit(STRIPE_INSYNC, &sh->state)) {
1292 set_bit(STRIPE_HANDLE, &sh->state); 1285 set_bit(STRIPE_HANDLE, &sh->state);
1293 if (failed == 0) { 1286 if (failed == 0) {
1294 char *pagea; 1287 char *pagea;
@@ -1306,27 +1299,25 @@ static void handle_stripe(struct stripe_head *sh)
1306 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 1299 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1307 /* don't try to repair!! */ 1300 /* don't try to repair!! */
1308 set_bit(STRIPE_INSYNC, &sh->state); 1301 set_bit(STRIPE_INSYNC, &sh->state);
1302 else {
1303 compute_block(sh, sh->pd_idx);
1304 uptodate++;
1305 }
1309 } 1306 }
1310 } 1307 }
1311 if (!test_bit(STRIPE_INSYNC, &sh->state)) { 1308 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1309 /* either failed parity check, or recovery is happening */
1312 if (failed==0) 1310 if (failed==0)
1313 failed_num = sh->pd_idx; 1311 failed_num = sh->pd_idx;
1314 /* should be able to compute the missing block and write it to spare */
1315 if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) {
1316 if (uptodate+1 != disks)
1317 BUG();
1318 compute_block(sh, failed_num);
1319 uptodate++;
1320 }
1321 if (uptodate != disks)
1322 BUG();
1323 dev = &sh->dev[failed_num]; 1312 dev = &sh->dev[failed_num];
1313 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1314 BUG_ON(uptodate != disks);
1315
1324 set_bit(R5_LOCKED, &dev->flags); 1316 set_bit(R5_LOCKED, &dev->flags);
1325 set_bit(R5_Wantwrite, &dev->flags); 1317 set_bit(R5_Wantwrite, &dev->flags);
1326 clear_bit(STRIPE_DEGRADED, &sh->state); 1318 clear_bit(STRIPE_DEGRADED, &sh->state);
1327 locked++; 1319 locked++;
1328 set_bit(STRIPE_INSYNC, &sh->state); 1320 set_bit(STRIPE_INSYNC, &sh->state);
1329 set_bit(R5_Syncio, &dev->flags);
1330 } 1321 }
1331 } 1322 }
1332 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1323 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
@@ -1392,7 +1383,7 @@ static void handle_stripe(struct stripe_head *sh)
1392 rcu_read_unlock(); 1383 rcu_read_unlock();
1393 1384
1394 if (rdev) { 1385 if (rdev) {
1395 if (test_bit(R5_Syncio, &sh->dev[i].flags)) 1386 if (syncing)
1396 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1387 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1397 1388
1398 bi->bi_bdev = rdev->bdev; 1389 bi->bi_bdev = rdev->bdev;
@@ -1409,6 +1400,9 @@ static void handle_stripe(struct stripe_head *sh)
1409 bi->bi_io_vec[0].bv_offset = 0; 1400 bi->bi_io_vec[0].bv_offset = 0;
1410 bi->bi_size = STRIPE_SIZE; 1401 bi->bi_size = STRIPE_SIZE;
1411 bi->bi_next = NULL; 1402 bi->bi_next = NULL;
1403 if (rw == WRITE &&
1404 test_bit(R5_ReWrite, &sh->dev[i].flags))
1405 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1412 generic_make_request(bi); 1406 generic_make_request(bi);
1413 } else { 1407 } else {
1414 if (rw == 1) 1408 if (rw == 1)
@@ -1822,21 +1816,21 @@ static int run(mddev_t *mddev)
1822 struct list_head *tmp; 1816 struct list_head *tmp;
1823 1817
1824 if (mddev->level != 5 && mddev->level != 4) { 1818 if (mddev->level != 5 && mddev->level != 4) {
1825 printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level); 1819 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
1820 mdname(mddev), mddev->level);
1826 return -EIO; 1821 return -EIO;
1827 } 1822 }
1828 1823
1829 mddev->private = kmalloc (sizeof (raid5_conf_t) 1824 mddev->private = kzalloc(sizeof (raid5_conf_t)
1830 + mddev->raid_disks * sizeof(struct disk_info), 1825 + mddev->raid_disks * sizeof(struct disk_info),
1831 GFP_KERNEL); 1826 GFP_KERNEL);
1832 if ((conf = mddev->private) == NULL) 1827 if ((conf = mddev->private) == NULL)
1833 goto abort; 1828 goto abort;
1834 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) ); 1829
1835 conf->mddev = mddev; 1830 conf->mddev = mddev;
1836 1831
1837 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) 1832 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1838 goto abort; 1833 goto abort;
1839 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1840 1834
1841 spin_lock_init(&conf->device_lock); 1835 spin_lock_init(&conf->device_lock);
1842 init_waitqueue_head(&conf->wait_for_stripe); 1836 init_waitqueue_head(&conf->wait_for_stripe);
@@ -1903,10 +1897,17 @@ static int run(mddev_t *mddev)
1903 1897
1904 if (mddev->degraded == 1 && 1898 if (mddev->degraded == 1 &&
1905 mddev->recovery_cp != MaxSector) { 1899 mddev->recovery_cp != MaxSector) {
1906 printk(KERN_ERR 1900 if (mddev->ok_start_degraded)
1907 "raid5: cannot start dirty degraded array for %s\n", 1901 printk(KERN_WARNING
1908 mdname(mddev)); 1902 "raid5: starting dirty degraded array: %s"
1909 goto abort; 1903 "- data corruption possible.\n",
1904 mdname(mddev));
1905 else {
1906 printk(KERN_ERR
1907 "raid5: cannot start dirty degraded array for %s\n",
1908 mdname(mddev));
1909 goto abort;
1910 }
1910 } 1911 }
1911 1912
1912 { 1913 {
@@ -1948,7 +1949,7 @@ static int run(mddev_t *mddev)
1948 */ 1949 */
1949 { 1950 {
1950 int stripe = (mddev->raid_disks-1) * mddev->chunk_size 1951 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
1951 / PAGE_CACHE_SIZE; 1952 / PAGE_SIZE;
1952 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 1953 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1953 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 1954 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1954 } 1955 }
@@ -1956,9 +1957,6 @@ static int run(mddev_t *mddev)
1956 /* Ok, everything is just fine now */ 1957 /* Ok, everything is just fine now */
1957 sysfs_create_group(&mddev->kobj, &raid5_attrs_group); 1958 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
1958 1959
1959 if (mddev->bitmap)
1960 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1961
1962 mddev->queue->unplug_fn = raid5_unplug_device; 1960 mddev->queue->unplug_fn = raid5_unplug_device;
1963 mddev->queue->issue_flush_fn = raid5_issue_flush; 1961 mddev->queue->issue_flush_fn = raid5_issue_flush;
1964 1962
@@ -1967,9 +1965,7 @@ static int run(mddev_t *mddev)
1967abort: 1965abort:
1968 if (conf) { 1966 if (conf) {
1969 print_raid5_conf(conf); 1967 print_raid5_conf(conf);
1970 if (conf->stripe_hashtbl) 1968 kfree(conf->stripe_hashtbl);
1971 free_pages((unsigned long) conf->stripe_hashtbl,
1972 HASH_PAGES_ORDER);
1973 kfree(conf); 1969 kfree(conf);
1974 } 1970 }
1975 mddev->private = NULL; 1971 mddev->private = NULL;
@@ -1986,7 +1982,7 @@ static int stop(mddev_t *mddev)
1986 md_unregister_thread(mddev->thread); 1982 md_unregister_thread(mddev->thread);
1987 mddev->thread = NULL; 1983 mddev->thread = NULL;
1988 shrink_stripes(conf); 1984 shrink_stripes(conf);
1989 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); 1985 kfree(conf->stripe_hashtbl);
1990 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 1986 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1991 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 1987 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
1992 kfree(conf); 1988 kfree(conf);
@@ -2014,12 +2010,12 @@ static void print_sh (struct stripe_head *sh)
2014static void printall (raid5_conf_t *conf) 2010static void printall (raid5_conf_t *conf)
2015{ 2011{
2016 struct stripe_head *sh; 2012 struct stripe_head *sh;
2013 struct hlist_node *hn;
2017 int i; 2014 int i;
2018 2015
2019 spin_lock_irq(&conf->device_lock); 2016 spin_lock_irq(&conf->device_lock);
2020 for (i = 0; i < NR_HASH; i++) { 2017 for (i = 0; i < NR_HASH; i++) {
2021 sh = conf->stripe_hashtbl[i]; 2018 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2022 for (; sh; sh = sh->hash_next) {
2023 if (sh->raid_conf != conf) 2019 if (sh->raid_conf != conf)
2024 continue; 2020 continue;
2025 print_sh(sh); 2021 print_sh(sh);
@@ -2192,17 +2188,12 @@ static void raid5_quiesce(mddev_t *mddev, int state)
2192 spin_unlock_irq(&conf->device_lock); 2188 spin_unlock_irq(&conf->device_lock);
2193 break; 2189 break;
2194 } 2190 }
2195 if (mddev->thread) {
2196 if (mddev->bitmap)
2197 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2198 else
2199 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2200 md_wakeup_thread(mddev->thread);
2201 }
2202} 2191}
2203static mdk_personality_t raid5_personality= 2192
2193static struct mdk_personality raid5_personality =
2204{ 2194{
2205 .name = "raid5", 2195 .name = "raid5",
2196 .level = 5,
2206 .owner = THIS_MODULE, 2197 .owner = THIS_MODULE,
2207 .make_request = make_request, 2198 .make_request = make_request,
2208 .run = run, 2199 .run = run,
@@ -2217,17 +2208,42 @@ static mdk_personality_t raid5_personality=
2217 .quiesce = raid5_quiesce, 2208 .quiesce = raid5_quiesce,
2218}; 2209};
2219 2210
2220static int __init raid5_init (void) 2211static struct mdk_personality raid4_personality =
2221{ 2212{
2222 return register_md_personality (RAID5, &raid5_personality); 2213 .name = "raid4",
2214 .level = 4,
2215 .owner = THIS_MODULE,
2216 .make_request = make_request,
2217 .run = run,
2218 .stop = stop,
2219 .status = status,
2220 .error_handler = error,
2221 .hot_add_disk = raid5_add_disk,
2222 .hot_remove_disk= raid5_remove_disk,
2223 .spare_active = raid5_spare_active,
2224 .sync_request = sync_request,
2225 .resize = raid5_resize,
2226 .quiesce = raid5_quiesce,
2227};
2228
2229static int __init raid5_init(void)
2230{
2231 register_md_personality(&raid5_personality);
2232 register_md_personality(&raid4_personality);
2233 return 0;
2223} 2234}
2224 2235
2225static void raid5_exit (void) 2236static void raid5_exit(void)
2226{ 2237{
2227 unregister_md_personality (RAID5); 2238 unregister_md_personality(&raid5_personality);
2239 unregister_md_personality(&raid4_personality);
2228} 2240}
2229 2241
2230module_init(raid5_init); 2242module_init(raid5_init);
2231module_exit(raid5_exit); 2243module_exit(raid5_exit);
2232MODULE_LICENSE("GPL"); 2244MODULE_LICENSE("GPL");
2233MODULE_ALIAS("md-personality-4"); /* RAID5 */ 2245MODULE_ALIAS("md-personality-4"); /* RAID5 */
2246MODULE_ALIAS("md-raid5");
2247MODULE_ALIAS("md-raid4");
2248MODULE_ALIAS("md-level-5");
2249MODULE_ALIAS("md-level-4");
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 0000d162d198..8c823d686a60 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -40,12 +40,10 @@
40#define STRIPE_SHIFT (PAGE_SHIFT - 9) 40#define STRIPE_SHIFT (PAGE_SHIFT - 9)
41#define STRIPE_SECTORS (STRIPE_SIZE>>9) 41#define STRIPE_SECTORS (STRIPE_SIZE>>9)
42#define IO_THRESHOLD 1 42#define IO_THRESHOLD 1
43#define HASH_PAGES 1 43#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
44#define HASH_PAGES_ORDER 0
45#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
46#define HASH_MASK (NR_HASH - 1) 44#define HASH_MASK (NR_HASH - 1)
47 45
48#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]) 46#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
49 47
50/* bio's attached to a stripe+device for I/O are linked together in bi_sector 48/* bio's attached to a stripe+device for I/O are linked together in bi_sector
51 * order without overlap. There may be several bio's per stripe+device, and 49 * order without overlap. There may be several bio's per stripe+device, and
@@ -132,29 +130,21 @@ static void release_stripe(struct stripe_head *sh)
132 spin_unlock_irqrestore(&conf->device_lock, flags); 130 spin_unlock_irqrestore(&conf->device_lock, flags);
133} 131}
134 132
135static void remove_hash(struct stripe_head *sh) 133static inline void remove_hash(struct stripe_head *sh)
136{ 134{
137 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector); 135 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
138 136
139 if (sh->hash_pprev) { 137 hlist_del_init(&sh->hash);
140 if (sh->hash_next)
141 sh->hash_next->hash_pprev = sh->hash_pprev;
142 *sh->hash_pprev = sh->hash_next;
143 sh->hash_pprev = NULL;
144 }
145} 138}
146 139
147static __inline__ void insert_hash(raid6_conf_t *conf, struct stripe_head *sh) 140static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh)
148{ 141{
149 struct stripe_head **shp = &stripe_hash(conf, sh->sector); 142 struct hlist_head *hp = stripe_hash(conf, sh->sector);
150 143
151 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector); 144 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
152 145
153 CHECK_DEVLOCK(); 146 CHECK_DEVLOCK();
154 if ((sh->hash_next = *shp) != NULL) 147 hlist_add_head(&sh->hash, hp);
155 (*shp)->hash_pprev = &sh->hash_next;
156 *shp = sh;
157 sh->hash_pprev = shp;
158} 148}
159 149
160 150
@@ -186,7 +176,7 @@ static void shrink_buffers(struct stripe_head *sh, int num)
186 if (!p) 176 if (!p)
187 continue; 177 continue;
188 sh->dev[i].page = NULL; 178 sh->dev[i].page = NULL;
189 page_cache_release(p); 179 put_page(p);
190 } 180 }
191} 181}
192 182
@@ -247,10 +237,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
247static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector) 237static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector)
248{ 238{
249 struct stripe_head *sh; 239 struct stripe_head *sh;
240 struct hlist_node *hn;
250 241
251 CHECK_DEVLOCK(); 242 CHECK_DEVLOCK();
252 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector); 243 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
253 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next) 244 hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash)
254 if (sh->sector == sector) 245 if (sh->sector == sector)
255 return sh; 246 return sh;
256 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector); 247 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -367,8 +358,8 @@ static void shrink_stripes(raid6_conf_t *conf)
367 conf->slab_cache = NULL; 358 conf->slab_cache = NULL;
368} 359}
369 360
370static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done, 361static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done,
371 int error) 362 int error)
372{ 363{
373 struct stripe_head *sh = bi->bi_private; 364 struct stripe_head *sh = bi->bi_private;
374 raid6_conf_t *conf = sh->raid_conf; 365 raid6_conf_t *conf = sh->raid_conf;
@@ -420,9 +411,35 @@ static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done,
420#else 411#else
421 set_bit(R5_UPTODATE, &sh->dev[i].flags); 412 set_bit(R5_UPTODATE, &sh->dev[i].flags);
422#endif 413#endif
414 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
415 printk(KERN_INFO "raid6: read error corrected!!\n");
416 clear_bit(R5_ReadError, &sh->dev[i].flags);
417 clear_bit(R5_ReWrite, &sh->dev[i].flags);
418 }
419 if (atomic_read(&conf->disks[i].rdev->read_errors))
420 atomic_set(&conf->disks[i].rdev->read_errors, 0);
423 } else { 421 } else {
424 md_error(conf->mddev, conf->disks[i].rdev); 422 int retry = 0;
425 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 423 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
424 atomic_inc(&conf->disks[i].rdev->read_errors);
425 if (conf->mddev->degraded)
426 printk(KERN_WARNING "raid6: read error not correctable.\n");
427 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
428 /* Oh, no!!! */
429 printk(KERN_WARNING "raid6: read error NOT corrected!!\n");
430 else if (atomic_read(&conf->disks[i].rdev->read_errors)
431 > conf->max_nr_stripes)
432 printk(KERN_WARNING
433 "raid6: Too many read errors, failing device.\n");
434 else
435 retry = 1;
436 if (retry)
437 set_bit(R5_ReadError, &sh->dev[i].flags);
438 else {
439 clear_bit(R5_ReadError, &sh->dev[i].flags);
440 clear_bit(R5_ReWrite, &sh->dev[i].flags);
441 md_error(conf->mddev, conf->disks[i].rdev);
442 }
426 } 443 }
427 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 444 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
428#if 0 445#if 0
@@ -805,7 +822,7 @@ static void compute_parity(struct stripe_head *sh, int method)
805} 822}
806 823
807/* Compute one missing block */ 824/* Compute one missing block */
808static void compute_block_1(struct stripe_head *sh, int dd_idx) 825static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
809{ 826{
810 raid6_conf_t *conf = sh->raid_conf; 827 raid6_conf_t *conf = sh->raid_conf;
811 int i, count, disks = conf->raid_disks; 828 int i, count, disks = conf->raid_disks;
@@ -821,7 +838,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx)
821 compute_parity(sh, UPDATE_PARITY); 838 compute_parity(sh, UPDATE_PARITY);
822 } else { 839 } else {
823 ptr[0] = page_address(sh->dev[dd_idx].page); 840 ptr[0] = page_address(sh->dev[dd_idx].page);
824 memset(ptr[0], 0, STRIPE_SIZE); 841 if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
825 count = 1; 842 count = 1;
826 for (i = disks ; i--; ) { 843 for (i = disks ; i--; ) {
827 if (i == dd_idx || i == qd_idx) 844 if (i == dd_idx || i == qd_idx)
@@ -838,7 +855,8 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx)
838 } 855 }
839 if (count != 1) 856 if (count != 1)
840 xor_block(count, STRIPE_SIZE, ptr); 857 xor_block(count, STRIPE_SIZE, ptr);
841 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 858 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
859 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
842 } 860 }
843} 861}
844 862
@@ -871,7 +889,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
871 return; 889 return;
872 } else { 890 } else {
873 /* We're missing D+Q; recompute D from P */ 891 /* We're missing D+Q; recompute D from P */
874 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1); 892 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
875 compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */ 893 compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */
876 return; 894 return;
877 } 895 }
@@ -982,6 +1000,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
982} 1000}
983 1001
984 1002
1003static int page_is_zero(struct page *p)
1004{
1005 char *a = page_address(p);
1006 return ((*(u32*)a) == 0 &&
1007 memcmp(a, a+4, STRIPE_SIZE-4)==0);
1008}
985/* 1009/*
986 * handle_stripe - do things to a stripe. 1010 * handle_stripe - do things to a stripe.
987 * 1011 *
@@ -1000,7 +1024,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
1000 * 1024 *
1001 */ 1025 */
1002 1026
1003static void handle_stripe(struct stripe_head *sh) 1027static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
1004{ 1028{
1005 raid6_conf_t *conf = sh->raid_conf; 1029 raid6_conf_t *conf = sh->raid_conf;
1006 int disks = conf->raid_disks; 1030 int disks = conf->raid_disks;
@@ -1027,11 +1051,11 @@ static void handle_stripe(struct stripe_head *sh)
1027 syncing = test_bit(STRIPE_SYNCING, &sh->state); 1051 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1028 /* Now to look around and see what can be done */ 1052 /* Now to look around and see what can be done */
1029 1053
1054 rcu_read_lock();
1030 for (i=disks; i--; ) { 1055 for (i=disks; i--; ) {
1031 mdk_rdev_t *rdev; 1056 mdk_rdev_t *rdev;
1032 dev = &sh->dev[i]; 1057 dev = &sh->dev[i];
1033 clear_bit(R5_Insync, &dev->flags); 1058 clear_bit(R5_Insync, &dev->flags);
1034 clear_bit(R5_Syncio, &dev->flags);
1035 1059
1036 PRINTK("check %d: state 0x%lx read %p write %p written %p\n", 1060 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1037 i, dev->flags, dev->toread, dev->towrite, dev->written); 1061 i, dev->flags, dev->toread, dev->towrite, dev->written);
@@ -1070,14 +1094,21 @@ static void handle_stripe(struct stripe_head *sh)
1070 non_overwrite++; 1094 non_overwrite++;
1071 } 1095 }
1072 if (dev->written) written++; 1096 if (dev->written) written++;
1073 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */ 1097 rdev = rcu_dereference(conf->disks[i].rdev);
1074 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 1098 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1099 /* The ReadError flag will just be confusing now */
1100 clear_bit(R5_ReadError, &dev->flags);
1101 clear_bit(R5_ReWrite, &dev->flags);
1102 }
1103 if (!rdev || !test_bit(In_sync, &rdev->flags)
1104 || test_bit(R5_ReadError, &dev->flags)) {
1075 if ( failed < 2 ) 1105 if ( failed < 2 )
1076 failed_num[failed] = i; 1106 failed_num[failed] = i;
1077 failed++; 1107 failed++;
1078 } else 1108 } else
1079 set_bit(R5_Insync, &dev->flags); 1109 set_bit(R5_Insync, &dev->flags);
1080 } 1110 }
1111 rcu_read_unlock();
1081 PRINTK("locked=%d uptodate=%d to_read=%d" 1112 PRINTK("locked=%d uptodate=%d to_read=%d"
1082 " to_write=%d failed=%d failed_num=%d,%d\n", 1113 " to_write=%d failed=%d failed_num=%d,%d\n",
1083 locked, uptodate, to_read, to_write, failed, 1114 locked, uptodate, to_read, to_write, failed,
@@ -1088,6 +1119,17 @@ static void handle_stripe(struct stripe_head *sh)
1088 if (failed > 2 && to_read+to_write+written) { 1119 if (failed > 2 && to_read+to_write+written) {
1089 for (i=disks; i--; ) { 1120 for (i=disks; i--; ) {
1090 int bitmap_end = 0; 1121 int bitmap_end = 0;
1122
1123 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1124 mdk_rdev_t *rdev;
1125 rcu_read_lock();
1126 rdev = rcu_dereference(conf->disks[i].rdev);
1127 if (rdev && test_bit(In_sync, &rdev->flags))
1128 /* multiple read failures in one stripe */
1129 md_error(conf->mddev, rdev);
1130 rcu_read_unlock();
1131 }
1132
1091 spin_lock_irq(&conf->device_lock); 1133 spin_lock_irq(&conf->device_lock);
1092 /* fail all writes first */ 1134 /* fail all writes first */
1093 bi = sh->dev[i].towrite; 1135 bi = sh->dev[i].towrite;
@@ -1123,7 +1165,8 @@ static void handle_stripe(struct stripe_head *sh)
1123 } 1165 }
1124 1166
1125 /* fail any reads if this device is non-operational */ 1167 /* fail any reads if this device is non-operational */
1126 if (!test_bit(R5_Insync, &sh->dev[i].flags)) { 1168 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1169 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1127 bi = sh->dev[i].toread; 1170 bi = sh->dev[i].toread;
1128 sh->dev[i].toread = NULL; 1171 sh->dev[i].toread = NULL;
1129 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1172 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
@@ -1228,7 +1271,7 @@ static void handle_stripe(struct stripe_head *sh)
1228 if (uptodate == disks-1) { 1271 if (uptodate == disks-1) {
1229 PRINTK("Computing stripe %llu block %d\n", 1272 PRINTK("Computing stripe %llu block %d\n",
1230 (unsigned long long)sh->sector, i); 1273 (unsigned long long)sh->sector, i);
1231 compute_block_1(sh, i); 1274 compute_block_1(sh, i, 0);
1232 uptodate++; 1275 uptodate++;
1233 } else if ( uptodate == disks-2 && failed >= 2 ) { 1276 } else if ( uptodate == disks-2 && failed >= 2 ) {
1234 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */ 1277 /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
@@ -1259,9 +1302,6 @@ static void handle_stripe(struct stripe_head *sh)
1259 locked++; 1302 locked++;
1260 PRINTK("Reading block %d (sync=%d)\n", 1303 PRINTK("Reading block %d (sync=%d)\n",
1261 i, syncing); 1304 i, syncing);
1262 if (syncing)
1263 md_sync_acct(conf->disks[i].rdev->bdev,
1264 STRIPE_SECTORS);
1265 } 1305 }
1266 } 1306 }
1267 } 1307 }
@@ -1323,7 +1363,7 @@ static void handle_stripe(struct stripe_head *sh)
1323 /* We have failed blocks and need to compute them */ 1363 /* We have failed blocks and need to compute them */
1324 switch ( failed ) { 1364 switch ( failed ) {
1325 case 0: BUG(); 1365 case 0: BUG();
1326 case 1: compute_block_1(sh, failed_num[0]); break; 1366 case 1: compute_block_1(sh, failed_num[0], 0); break;
1327 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break; 1367 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
1328 default: BUG(); /* This request should have been failed? */ 1368 default: BUG(); /* This request should have been failed? */
1329 } 1369 }
@@ -1338,12 +1378,10 @@ static void handle_stripe(struct stripe_head *sh)
1338 (unsigned long long)sh->sector, i); 1378 (unsigned long long)sh->sector, i);
1339 locked++; 1379 locked++;
1340 set_bit(R5_Wantwrite, &sh->dev[i].flags); 1380 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1341#if 0 /**** FIX: I don't understand the logic here... ****/
1342 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1343 || ((i==pd_idx || i==qd_idx) && failed == 0)) /* FIX? */
1344 set_bit(STRIPE_INSYNC, &sh->state);
1345#endif
1346 } 1381 }
1382 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
1383 set_bit(STRIPE_INSYNC, &sh->state);
1384
1347 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 1385 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1348 atomic_dec(&conf->preread_active_stripes); 1386 atomic_dec(&conf->preread_active_stripes);
1349 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 1387 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
@@ -1356,84 +1394,119 @@ static void handle_stripe(struct stripe_head *sh)
1356 * Any reads will already have been scheduled, so we just see if enough data 1394 * Any reads will already have been scheduled, so we just see if enough data
1357 * is available 1395 * is available
1358 */ 1396 */
1359 if (syncing && locked == 0 && 1397 if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
1360 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 2) { 1398 int update_p = 0, update_q = 0;
1361 set_bit(STRIPE_HANDLE, &sh->state); 1399 struct r5dev *dev;
1362#if 0 /* RAID-6: Don't support CHECK PARITY yet */
1363 if (failed == 0) {
1364 char *pagea;
1365 if (uptodate != disks)
1366 BUG();
1367 compute_parity(sh, CHECK_PARITY);
1368 uptodate--;
1369 pagea = page_address(sh->dev[pd_idx].page);
1370 if ((*(u32*)pagea) == 0 &&
1371 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1372 /* parity is correct (on disc, not in buffer any more) */
1373 set_bit(STRIPE_INSYNC, &sh->state);
1374 }
1375 }
1376#endif
1377 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1378 int failed_needupdate[2];
1379 struct r5dev *adev, *bdev;
1380
1381 if ( failed < 1 )
1382 failed_num[0] = pd_idx;
1383 if ( failed < 2 )
1384 failed_num[1] = (failed_num[0] == qd_idx) ? pd_idx : qd_idx;
1385 1400
1386 failed_needupdate[0] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[0]].flags); 1401 set_bit(STRIPE_HANDLE, &sh->state);
1387 failed_needupdate[1] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[1]].flags);
1388 1402
1389 PRINTK("sync: failed=%d num=%d,%d fnu=%u%u\n", 1403 BUG_ON(failed>2);
1390 failed, failed_num[0], failed_num[1], failed_needupdate[0], failed_needupdate[1]); 1404 BUG_ON(uptodate < disks);
1405 /* Want to check and possibly repair P and Q.
1406 * However there could be one 'failed' device, in which
1407 * case we can only check one of them, possibly using the
1408 * other to generate missing data
1409 */
1391 1410
1392#if 0 /* RAID-6: This code seems to require that CHECK_PARITY destroys the uptodateness of the parity */ 1411 /* If !tmp_page, we cannot do the calculations,
1393 /* should be able to compute the missing block(s) and write to spare */ 1412 * but as we have set STRIPE_HANDLE, we will soon be called
1394 if ( failed_needupdate[0] ^ failed_needupdate[1] ) { 1413 * by stripe_handle with a tmp_page - just wait until then.
1395 if (uptodate+1 != disks) 1414 */
1396 BUG(); 1415 if (tmp_page) {
1397 compute_block_1(sh, failed_needupdate[0] ? failed_num[0] : failed_num[1]); 1416 if (failed == q_failed) {
1398 uptodate++; 1417 /* The only possible failed device holds 'Q', so it makes
1399 } else if ( failed_needupdate[0] & failed_needupdate[1] ) { 1418 * sense to check P (If anything else were failed, we would
1400 if (uptodate+2 != disks) 1419 * have used P to recreate it).
1401 BUG(); 1420 */
1402 compute_block_2(sh, failed_num[0], failed_num[1]); 1421 compute_block_1(sh, pd_idx, 1);
1403 uptodate += 2; 1422 if (!page_is_zero(sh->dev[pd_idx].page)) {
1423 compute_block_1(sh,pd_idx,0);
1424 update_p = 1;
1425 }
1426 }
1427 if (!q_failed && failed < 2) {
1428 /* q is not failed, and we didn't use it to generate
1429 * anything, so it makes sense to check it
1430 */
1431 memcpy(page_address(tmp_page),
1432 page_address(sh->dev[qd_idx].page),
1433 STRIPE_SIZE);
1434 compute_parity(sh, UPDATE_PARITY);
1435 if (memcmp(page_address(tmp_page),
1436 page_address(sh->dev[qd_idx].page),
1437 STRIPE_SIZE)!= 0) {
1438 clear_bit(STRIPE_INSYNC, &sh->state);
1439 update_q = 1;
1440 }
1441 }
1442 if (update_p || update_q) {
1443 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1444 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1445 /* don't try to repair!! */
1446 update_p = update_q = 0;
1404 } 1447 }
1405#else
1406 compute_block_2(sh, failed_num[0], failed_num[1]);
1407 uptodate += failed_needupdate[0] + failed_needupdate[1];
1408#endif
1409 1448
1410 if (uptodate != disks) 1449 /* now write out any block on a failed drive,
1411 BUG(); 1450 * or P or Q if they need it
1451 */
1412 1452
1413 PRINTK("Marking for sync stripe %llu blocks %d,%d\n", 1453 if (failed == 2) {
1414 (unsigned long long)sh->sector, failed_num[0], failed_num[1]); 1454 dev = &sh->dev[failed_num[1]];
1455 locked++;
1456 set_bit(R5_LOCKED, &dev->flags);
1457 set_bit(R5_Wantwrite, &dev->flags);
1458 }
1459 if (failed >= 1) {
1460 dev = &sh->dev[failed_num[0]];
1461 locked++;
1462 set_bit(R5_LOCKED, &dev->flags);
1463 set_bit(R5_Wantwrite, &dev->flags);
1464 }
1415 1465
1416 /**** FIX: Should we really do both of these unconditionally? ****/ 1466 if (update_p) {
1417 adev = &sh->dev[failed_num[0]]; 1467 dev = &sh->dev[pd_idx];
1418 locked += !test_bit(R5_LOCKED, &adev->flags); 1468 locked ++;
1419 set_bit(R5_LOCKED, &adev->flags); 1469 set_bit(R5_LOCKED, &dev->flags);
1420 set_bit(R5_Wantwrite, &adev->flags); 1470 set_bit(R5_Wantwrite, &dev->flags);
1421 bdev = &sh->dev[failed_num[1]]; 1471 }
1422 locked += !test_bit(R5_LOCKED, &bdev->flags); 1472 if (update_q) {
1423 set_bit(R5_LOCKED, &bdev->flags); 1473 dev = &sh->dev[qd_idx];
1474 locked++;
1475 set_bit(R5_LOCKED, &dev->flags);
1476 set_bit(R5_Wantwrite, &dev->flags);
1477 }
1424 clear_bit(STRIPE_DEGRADED, &sh->state); 1478 clear_bit(STRIPE_DEGRADED, &sh->state);
1425 set_bit(R5_Wantwrite, &bdev->flags);
1426 1479
1427 set_bit(STRIPE_INSYNC, &sh->state); 1480 set_bit(STRIPE_INSYNC, &sh->state);
1428 set_bit(R5_Syncio, &adev->flags);
1429 set_bit(R5_Syncio, &bdev->flags);
1430 } 1481 }
1431 } 1482 }
1483
1432 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 1484 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1433 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 1485 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1434 clear_bit(STRIPE_SYNCING, &sh->state); 1486 clear_bit(STRIPE_SYNCING, &sh->state);
1435 } 1487 }
1436 1488
1489 /* If the failed drives are just a ReadError, then we might need
1490 * to progress the repair/check process
1491 */
1492 if (failed <= 2 && ! conf->mddev->ro)
1493 for (i=0; i<failed;i++) {
1494 dev = &sh->dev[failed_num[i]];
1495 if (test_bit(R5_ReadError, &dev->flags)
1496 && !test_bit(R5_LOCKED, &dev->flags)
1497 && test_bit(R5_UPTODATE, &dev->flags)
1498 ) {
1499 if (!test_bit(R5_ReWrite, &dev->flags)) {
1500 set_bit(R5_Wantwrite, &dev->flags);
1501 set_bit(R5_ReWrite, &dev->flags);
1502 set_bit(R5_LOCKED, &dev->flags);
1503 } else {
1504 /* let's read it back */
1505 set_bit(R5_Wantread, &dev->flags);
1506 set_bit(R5_LOCKED, &dev->flags);
1507 }
1508 }
1509 }
1437 spin_unlock(&sh->lock); 1510 spin_unlock(&sh->lock);
1438 1511
1439 while ((bi=return_bi)) { 1512 while ((bi=return_bi)) {
@@ -1472,7 +1545,7 @@ static void handle_stripe(struct stripe_head *sh)
1472 rcu_read_unlock(); 1545 rcu_read_unlock();
1473 1546
1474 if (rdev) { 1547 if (rdev) {
1475 if (test_bit(R5_Syncio, &sh->dev[i].flags)) 1548 if (syncing)
1476 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1549 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1477 1550
1478 bi->bi_bdev = rdev->bdev; 1551 bi->bi_bdev = rdev->bdev;
@@ -1489,6 +1562,9 @@ static void handle_stripe(struct stripe_head *sh)
1489 bi->bi_io_vec[0].bv_offset = 0; 1562 bi->bi_io_vec[0].bv_offset = 0;
1490 bi->bi_size = STRIPE_SIZE; 1563 bi->bi_size = STRIPE_SIZE;
1491 bi->bi_next = NULL; 1564 bi->bi_next = NULL;
1565 if (rw == WRITE &&
1566 test_bit(R5_ReWrite, &sh->dev[i].flags))
1567 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1492 generic_make_request(bi); 1568 generic_make_request(bi);
1493 } else { 1569 } else {
1494 if (rw == 1) 1570 if (rw == 1)
@@ -1664,7 +1740,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
1664 } 1740 }
1665 finish_wait(&conf->wait_for_overlap, &w); 1741 finish_wait(&conf->wait_for_overlap, &w);
1666 raid6_plug_device(conf); 1742 raid6_plug_device(conf);
1667 handle_stripe(sh); 1743 handle_stripe(sh, NULL);
1668 release_stripe(sh); 1744 release_stripe(sh);
1669 } else { 1745 } else {
1670 /* cannot get stripe for read-ahead, just give-up */ 1746 /* cannot get stripe for read-ahead, just give-up */
@@ -1728,6 +1804,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1728 return rv; 1804 return rv;
1729 } 1805 }
1730 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1806 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1807 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1731 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 1808 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1732 /* we can skip this block, and probably more */ 1809 /* we can skip this block, and probably more */
1733 sync_blocks /= STRIPE_SECTORS; 1810 sync_blocks /= STRIPE_SECTORS;
@@ -1765,7 +1842,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1765 clear_bit(STRIPE_INSYNC, &sh->state); 1842 clear_bit(STRIPE_INSYNC, &sh->state);
1766 spin_unlock(&sh->lock); 1843 spin_unlock(&sh->lock);
1767 1844
1768 handle_stripe(sh); 1845 handle_stripe(sh, NULL);
1769 release_stripe(sh); 1846 release_stripe(sh);
1770 1847
1771 return STRIPE_SECTORS; 1848 return STRIPE_SECTORS;
@@ -1821,7 +1898,7 @@ static void raid6d (mddev_t *mddev)
1821 spin_unlock_irq(&conf->device_lock); 1898 spin_unlock_irq(&conf->device_lock);
1822 1899
1823 handled++; 1900 handled++;
1824 handle_stripe(sh); 1901 handle_stripe(sh, conf->spare_page);
1825 release_stripe(sh); 1902 release_stripe(sh);
1826 1903
1827 spin_lock_irq(&conf->device_lock); 1904 spin_lock_irq(&conf->device_lock);
@@ -1848,17 +1925,19 @@ static int run(mddev_t *mddev)
1848 return -EIO; 1925 return -EIO;
1849 } 1926 }
1850 1927
1851 mddev->private = kmalloc (sizeof (raid6_conf_t) 1928 mddev->private = kzalloc(sizeof (raid6_conf_t)
1852 + mddev->raid_disks * sizeof(struct disk_info), 1929 + mddev->raid_disks * sizeof(struct disk_info),
1853 GFP_KERNEL); 1930 GFP_KERNEL);
1854 if ((conf = mddev->private) == NULL) 1931 if ((conf = mddev->private) == NULL)
1855 goto abort; 1932 goto abort;
1856 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
1857 conf->mddev = mddev; 1933 conf->mddev = mddev;
1858 1934
1859 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL) 1935 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
1936 goto abort;
1937
1938 conf->spare_page = alloc_page(GFP_KERNEL);
1939 if (!conf->spare_page)
1860 goto abort; 1940 goto abort;
1861 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1862 1941
1863 spin_lock_init(&conf->device_lock); 1942 spin_lock_init(&conf->device_lock);
1864 init_waitqueue_head(&conf->wait_for_stripe); 1943 init_waitqueue_head(&conf->wait_for_stripe);
@@ -1929,13 +2008,18 @@ static int run(mddev_t *mddev)
1929 goto abort; 2008 goto abort;
1930 } 2009 }
1931 2010
1932#if 0 /* FIX: For now */
1933 if (mddev->degraded > 0 && 2011 if (mddev->degraded > 0 &&
1934 mddev->recovery_cp != MaxSector) { 2012 mddev->recovery_cp != MaxSector) {
1935 printk(KERN_ERR "raid6: cannot start dirty degraded array for %s\n", mdname(mddev)); 2013 if (mddev->ok_start_degraded)
1936 goto abort; 2014 printk(KERN_WARNING "raid6: starting dirty degraded array:%s"
2015 "- data corruption possible.\n",
2016 mdname(mddev));
2017 else {
2018 printk(KERN_ERR "raid6: cannot start dirty degraded array"
2019 " for %s\n", mdname(mddev));
2020 goto abort;
2021 }
1937 } 2022 }
1938#endif
1939 2023
1940 { 2024 {
1941 mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6"); 2025 mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6");
@@ -1977,7 +2061,7 @@ static int run(mddev_t *mddev)
1977 */ 2061 */
1978 { 2062 {
1979 int stripe = (mddev->raid_disks-2) * mddev->chunk_size 2063 int stripe = (mddev->raid_disks-2) * mddev->chunk_size
1980 / PAGE_CACHE_SIZE; 2064 / PAGE_SIZE;
1981 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 2065 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1982 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 2066 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1983 } 2067 }
@@ -1985,18 +2069,14 @@ static int run(mddev_t *mddev)
1985 /* Ok, everything is just fine now */ 2069 /* Ok, everything is just fine now */
1986 mddev->array_size = mddev->size * (mddev->raid_disks - 2); 2070 mddev->array_size = mddev->size * (mddev->raid_disks - 2);
1987 2071
1988 if (mddev->bitmap)
1989 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1990
1991 mddev->queue->unplug_fn = raid6_unplug_device; 2072 mddev->queue->unplug_fn = raid6_unplug_device;
1992 mddev->queue->issue_flush_fn = raid6_issue_flush; 2073 mddev->queue->issue_flush_fn = raid6_issue_flush;
1993 return 0; 2074 return 0;
1994abort: 2075abort:
1995 if (conf) { 2076 if (conf) {
1996 print_raid6_conf(conf); 2077 print_raid6_conf(conf);
1997 if (conf->stripe_hashtbl) 2078 safe_put_page(conf->spare_page);
1998 free_pages((unsigned long) conf->stripe_hashtbl, 2079 kfree(conf->stripe_hashtbl);
1999 HASH_PAGES_ORDER);
2000 kfree(conf); 2080 kfree(conf);
2001 } 2081 }
2002 mddev->private = NULL; 2082 mddev->private = NULL;
@@ -2013,7 +2093,7 @@ static int stop (mddev_t *mddev)
2013 md_unregister_thread(mddev->thread); 2093 md_unregister_thread(mddev->thread);
2014 mddev->thread = NULL; 2094 mddev->thread = NULL;
2015 shrink_stripes(conf); 2095 shrink_stripes(conf);
2016 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER); 2096 kfree(conf->stripe_hashtbl);
2017 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 2097 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2018 kfree(conf); 2098 kfree(conf);
2019 mddev->private = NULL; 2099 mddev->private = NULL;
@@ -2040,12 +2120,13 @@ static void print_sh (struct seq_file *seq, struct stripe_head *sh)
2040static void printall (struct seq_file *seq, raid6_conf_t *conf) 2120static void printall (struct seq_file *seq, raid6_conf_t *conf)
2041{ 2121{
2042 struct stripe_head *sh; 2122 struct stripe_head *sh;
2123 struct hlist_node *hn;
2043 int i; 2124 int i;
2044 2125
2045 spin_lock_irq(&conf->device_lock); 2126 spin_lock_irq(&conf->device_lock);
2046 for (i = 0; i < NR_HASH; i++) { 2127 for (i = 0; i < NR_HASH; i++) {
2047 sh = conf->stripe_hashtbl[i]; 2128 sh = conf->stripe_hashtbl[i];
2048 for (; sh; sh = sh->hash_next) { 2129 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2049 if (sh->raid_conf != conf) 2130 if (sh->raid_conf != conf)
2050 continue; 2131 continue;
2051 print_sh(seq, sh); 2132 print_sh(seq, sh);
@@ -2223,17 +2304,12 @@ static void raid6_quiesce(mddev_t *mddev, int state)
2223 spin_unlock_irq(&conf->device_lock); 2304 spin_unlock_irq(&conf->device_lock);
2224 break; 2305 break;
2225 } 2306 }
2226 if (mddev->thread) {
2227 if (mddev->bitmap)
2228 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2229 else
2230 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2231 md_wakeup_thread(mddev->thread);
2232 }
2233} 2307}
2234static mdk_personality_t raid6_personality= 2308
2309static struct mdk_personality raid6_personality =
2235{ 2310{
2236 .name = "raid6", 2311 .name = "raid6",
2312 .level = 6,
2237 .owner = THIS_MODULE, 2313 .owner = THIS_MODULE,
2238 .make_request = make_request, 2314 .make_request = make_request,
2239 .run = run, 2315 .run = run,
@@ -2248,7 +2324,7 @@ static mdk_personality_t raid6_personality=
2248 .quiesce = raid6_quiesce, 2324 .quiesce = raid6_quiesce,
2249}; 2325};
2250 2326
2251static int __init raid6_init (void) 2327static int __init raid6_init(void)
2252{ 2328{
2253 int e; 2329 int e;
2254 2330
@@ -2256,15 +2332,17 @@ static int __init raid6_init (void)
2256 if ( e ) 2332 if ( e )
2257 return e; 2333 return e;
2258 2334
2259 return register_md_personality (RAID6, &raid6_personality); 2335 return register_md_personality(&raid6_personality);
2260} 2336}
2261 2337
2262static void raid6_exit (void) 2338static void raid6_exit (void)
2263{ 2339{
2264 unregister_md_personality (RAID6); 2340 unregister_md_personality(&raid6_personality);
2265} 2341}
2266 2342
2267module_init(raid6_init); 2343module_init(raid6_init);
2268module_exit(raid6_exit); 2344module_exit(raid6_exit);
2269MODULE_LICENSE("GPL"); 2345MODULE_LICENSE("GPL");
2270MODULE_ALIAS("md-personality-8"); /* RAID6 */ 2346MODULE_ALIAS("md-personality-8"); /* RAID6 */
2347MODULE_ALIAS("md-raid6");
2348MODULE_ALIAS("md-level-6");
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index ddf184f95d80..6861d408f1b3 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -170,16 +170,9 @@ static size_t cpia_read_nibble (struct parport *port,
170 /* Does the error line indicate end of data? */ 170 /* Does the error line indicate end of data? */
171 if (((i /*& 1*/) == 0) && 171 if (((i /*& 1*/) == 0) &&
172 (parport_read_status(port) & PARPORT_STATUS_ERROR)) { 172 (parport_read_status(port) & PARPORT_STATUS_ERROR)) {
173 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 173 DBG("%s: No more nibble data (%d bytes)\n",
174 DBG("%s: No more nibble data (%d bytes)\n", 174 port->name, i/2);
175 port->name, i/2); 175 goto end_of_data;
176
177 /* Go to reverse idle phase. */
178 parport_frob_control (port,
179 PARPORT_CONTROL_AUTOFD,
180 PARPORT_CONTROL_AUTOFD);
181 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
182 break;
183 } 176 }
184 177
185 /* Event 7: Set nAutoFd low. */ 178 /* Event 7: Set nAutoFd low. */
@@ -227,18 +220,21 @@ static size_t cpia_read_nibble (struct parport *port,
227 byte = nibble; 220 byte = nibble;
228 } 221 }
229 222
230 i /= 2; /* i is now in bytes */
231
232 if (i == len) { 223 if (i == len) {
233 /* Read the last nibble without checking data avail. */ 224 /* Read the last nibble without checking data avail. */
234 port = port->physport; 225 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
235 if (parport_read_status (port) & PARPORT_STATUS_ERROR) 226 end_of_data:
236 port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 227 /* Go to reverse idle phase. */
228 parport_frob_control (port,
229 PARPORT_CONTROL_AUTOFD,
230 PARPORT_CONTROL_AUTOFD);
231 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
232 }
237 else 233 else
238 port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; 234 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
239 } 235 }
240 236
241 return i; 237 return i/2;
242} 238}
243 239
244/* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1) 240/* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1)
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 43a942a29c2e..fef677103880 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -24,6 +24,18 @@ config I2O
24 24
25 If unsure, say N. 25 If unsure, say N.
26 26
27config I2O_LCT_NOTIFY_ON_CHANGES
28 bool "Enable LCT notification"
29 depends on I2O
30 default y
31 ---help---
32 Only say N here if you have a I2O controller from SUN. The SUN
33 firmware doesn't support LCT notification on changes. If this option
34 is enabled on such a controller the driver will hang up in a endless
35 loop. On all other controllers say Y.
36
37 If unsure, say Y.
38
27config I2O_EXT_ADAPTEC 39config I2O_EXT_ADAPTEC
28 bool "Enable Adaptec extensions" 40 bool "Enable Adaptec extensions"
29 depends on I2O 41 depends on I2O
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228e1cb3..ac06f10c54ec 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -17,7 +17,7 @@
17#include <linux/i2o.h> 17#include <linux/i2o.h>
18 18
19#define OSM_NAME "bus-osm" 19#define OSM_NAME "bus-osm"
20#define OSM_VERSION "$Rev$" 20#define OSM_VERSION "1.317"
21#define OSM_DESCRIPTION "I2O Bus Adapter OSM" 21#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
22 22
23static struct i2o_driver i2o_bus_driver; 23static struct i2o_driver i2o_bus_driver;
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
39 */ 39 */
40static int i2o_bus_scan(struct i2o_device *dev) 40static int i2o_bus_scan(struct i2o_device *dev)
41{ 41{
42 struct i2o_message __iomem *msg; 42 struct i2o_message *msg;
43 u32 m;
44 43
45 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 44 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
46 if (m == I2O_QUEUE_EMPTY) 45 if (IS_ERR(msg))
47 return -ETIMEDOUT; 46 return -ETIMEDOUT;
48 47
49 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 48 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
50 writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, 49 msg->u.head[1] =
51 &msg->u.head[1]); 50 cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
51 tid);
52 52
53 return i2o_msg_post_wait(dev->iop, m, 60); 53 return i2o_msg_post_wait(dev->iop, msg, 60);
54}; 54};
55 55
56/** 56/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
59 * 59 *
60 * Returns count. 60 * Returns count.
61 */ 61 */
62static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, 62static ssize_t i2o_bus_store_scan(struct device *d,
63 size_t count) 63 struct device_attribute *attr,
64 const char *buf, size_t count)
64{ 65{
65 struct i2o_device *i2o_dev = to_i2o_device(d); 66 struct i2o_device *i2o_dev = to_i2o_device(d);
66 int rc; 67 int rc;
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index 10432f665201..3bba7aa82e58 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -22,7 +22,7 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23 23
24#define OSM_NAME "config-osm" 24#define OSM_NAME "config-osm"
25#define OSM_VERSION "1.248" 25#define OSM_VERSION "1.323"
26#define OSM_DESCRIPTION "I2O Configuration OSM" 26#define OSM_DESCRIPTION "I2O Configuration OSM"
27 27
28/* access mode user rw */ 28/* access mode user rw */
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index 9eefedb16211..90628562851e 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -14,8 +14,6 @@
14 */ 14 */
15 15
16/* Exec-OSM */ 16/* Exec-OSM */
17extern struct bus_type i2o_bus_type;
18
19extern struct i2o_driver i2o_exec_driver; 17extern struct i2o_driver i2o_exec_driver;
20extern int i2o_exec_lct_get(struct i2o_controller *); 18extern int i2o_exec_lct_get(struct i2o_controller *);
21 19
@@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void);
23extern void __exit i2o_exec_exit(void); 21extern void __exit i2o_exec_exit(void);
24 22
25/* driver */ 23/* driver */
24extern struct bus_type i2o_bus_type;
25
26extern int i2o_driver_dispatch(struct i2o_controller *, u32); 26extern int i2o_driver_dispatch(struct i2o_controller *, u32);
27 27
28extern int __init i2o_driver_init(void); 28extern int __init i2o_driver_init(void);
@@ -33,19 +33,27 @@ extern int __init i2o_pci_init(void);
33extern void __exit i2o_pci_exit(void); 33extern void __exit i2o_pci_exit(void);
34 34
35/* device */ 35/* device */
36extern struct device_attribute i2o_device_attrs[];
37
36extern void i2o_device_remove(struct i2o_device *); 38extern void i2o_device_remove(struct i2o_device *);
37extern int i2o_device_parse_lct(struct i2o_controller *); 39extern int i2o_device_parse_lct(struct i2o_controller *);
38 40
39/* IOP */ 41/* IOP */
40extern struct i2o_controller *i2o_iop_alloc(void); 42extern struct i2o_controller *i2o_iop_alloc(void);
41extern void i2o_iop_free(struct i2o_controller *); 43
44/**
45 * i2o_iop_free - Free the i2o_controller struct
46 * @c: I2O controller to free
47 */
48static inline void i2o_iop_free(struct i2o_controller *c)
49{
50 i2o_pool_free(&c->in_msg);
51 kfree(c);
52}
42 53
43extern int i2o_iop_add(struct i2o_controller *); 54extern int i2o_iop_add(struct i2o_controller *);
44extern void i2o_iop_remove(struct i2o_controller *); 55extern void i2o_iop_remove(struct i2o_controller *);
45 56
46/* config */
47extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
48
49/* control registers relative to c->base */ 57/* control registers relative to c->base */
50#define I2O_IRQ_STATUS 0x30 58#define I2O_IRQ_STATUS 0x30
51#define I2O_IRQ_MASK 0x34 59#define I2O_IRQ_MASK 0x34
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cdb8ae1..ee183053fa23 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, 35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
36 u32 type) 36 u32 type)
37{ 37{
38 struct i2o_message __iomem *msg; 38 struct i2o_message *msg;
39 u32 m;
40 39
41 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 40 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
42 if (m == I2O_QUEUE_EMPTY) 41 if (IS_ERR(msg))
43 return -ETIMEDOUT; 42 return PTR_ERR(msg);
44 43
45 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 44 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
46 writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); 45 msg->u.head[1] =
47 writel(type, &msg->body[0]); 46 cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
47 msg->body[0] = cpu_to_le32(type);
48 48
49 return i2o_msg_post_wait(dev->iop, m, 60); 49 return i2o_msg_post_wait(dev->iop, msg, 60);
50} 50}
51 51
52/** 52/**
@@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev)
123 return rc; 123 return rc;
124} 124}
125 125
126
127/** 126/**
128 * i2o_device_release - release the memory for a I2O device 127 * i2o_device_release - release the memory for a I2O device
129 * @dev: I2O device which should be released 128 * @dev: I2O device which should be released
@@ -140,10 +139,10 @@ static void i2o_device_release(struct device *dev)
140 kfree(i2o_dev); 139 kfree(i2o_dev);
141} 140}
142 141
143
144/** 142/**
145 * i2o_device_class_show_class_id - Displays class id of I2O device 143 * i2o_device_show_class_id - Displays class id of I2O device
146 * @cd: class device of which the class id should be displayed 144 * @dev: device of which the class id should be displayed
145 * @attr: pointer to device attribute
147 * @buf: buffer into which the class id should be printed 146 * @buf: buffer into which the class id should be printed
148 * 147 *
149 * Returns the number of bytes which are printed into the buffer. 148 * Returns the number of bytes which are printed into the buffer.
@@ -159,15 +158,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev,
159} 158}
160 159
161/** 160/**
162 * i2o_device_class_show_tid - Displays TID of I2O device 161 * i2o_device_show_tid - Displays TID of I2O device
163 * @cd: class device of which the TID should be displayed 162 * @dev: device of which the TID should be displayed
164 * @buf: buffer into which the class id should be printed 163 * @attr: pointer to device attribute
164 * @buf: buffer into which the TID should be printed
165 * 165 *
166 * Returns the number of bytes which are printed into the buffer. 166 * Returns the number of bytes which are printed into the buffer.
167 */ 167 */
168static ssize_t i2o_device_show_tid(struct device *dev, 168static ssize_t i2o_device_show_tid(struct device *dev,
169 struct device_attribute *attr, 169 struct device_attribute *attr, char *buf)
170 char *buf)
171{ 170{
172 struct i2o_device *i2o_dev = to_i2o_device(dev); 171 struct i2o_device *i2o_dev = to_i2o_device(dev);
173 172
@@ -175,6 +174,7 @@ static ssize_t i2o_device_show_tid(struct device *dev,
175 return strlen(buf) + 1; 174 return strlen(buf) + 1;
176} 175}
177 176
177/* I2O device attributes */
178struct device_attribute i2o_device_attrs[] = { 178struct device_attribute i2o_device_attrs[] = {
179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL), 179 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL), 180 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
@@ -193,12 +193,10 @@ static struct i2o_device *i2o_device_alloc(void)
193{ 193{
194 struct i2o_device *dev; 194 struct i2o_device *dev;
195 195
196 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 196 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
197 if (!dev) 197 if (!dev)
198 return ERR_PTR(-ENOMEM); 198 return ERR_PTR(-ENOMEM);
199 199
200 memset(dev, 0, sizeof(*dev));
201
202 INIT_LIST_HEAD(&dev->list); 200 INIT_LIST_HEAD(&dev->list);
203 init_MUTEX(&dev->lock); 201 init_MUTEX(&dev->lock);
204 202
@@ -209,66 +207,6 @@ static struct i2o_device *i2o_device_alloc(void)
209} 207}
210 208
211/** 209/**
212 * i2o_setup_sysfs_links - Adds attributes to the I2O device
213 * @cd: I2O class device which is added to the I2O device class
214 *
215 * This function get called when a I2O device is added to the class. It
216 * creates the attributes for each device and creates user/parent symlink
217 * if necessary.
218 *
219 * Returns 0 on success or negative error code on failure.
220 */
221static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev)
222{
223 struct i2o_controller *c = i2o_dev->iop;
224 struct i2o_device *tmp;
225
226 /* create user entries for this device */
227 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
228 if (tmp && tmp != i2o_dev)
229 sysfs_create_link(&i2o_dev->device.kobj,
230 &tmp->device.kobj, "user");
231
232 /* create user entries refering to this device */
233 list_for_each_entry(tmp, &c->devices, list)
234 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid &&
235 tmp != i2o_dev)
236 sysfs_create_link(&tmp->device.kobj,
237 &i2o_dev->device.kobj, "user");
238
239 /* create parent entries for this device */
240 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
241 if (tmp && tmp != i2o_dev)
242 sysfs_create_link(&i2o_dev->device.kobj,
243 &tmp->device.kobj, "parent");
244
245 /* create parent entries refering to this device */
246 list_for_each_entry(tmp, &c->devices, list)
247 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid &&
248 tmp != i2o_dev)
249 sysfs_create_link(&tmp->device.kobj,
250 &i2o_dev->device.kobj, "parent");
251}
252
253static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
254{
255 struct i2o_controller *c = i2o_dev->iop;
256 struct i2o_device *tmp;
257
258 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
259 sysfs_remove_link(&i2o_dev->device.kobj, "user");
260
261 list_for_each_entry(tmp, &c->devices, list) {
262 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
263 sysfs_remove_link(&tmp->device.kobj, "parent");
264 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
265 sysfs_remove_link(&tmp->device.kobj, "user");
266 }
267}
268
269
270
271/**
272 * i2o_device_add - allocate a new I2O device and add it to the IOP 210 * i2o_device_add - allocate a new I2O device and add it to the IOP
273 * @iop: I2O controller where the device is on 211 * @iop: I2O controller where the device is on
274 * @entry: LCT entry of the I2O device 212 * @entry: LCT entry of the I2O device
@@ -282,33 +220,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
282static struct i2o_device *i2o_device_add(struct i2o_controller *c, 220static struct i2o_device *i2o_device_add(struct i2o_controller *c,
283 i2o_lct_entry * entry) 221 i2o_lct_entry * entry)
284{ 222{
285 struct i2o_device *dev; 223 struct i2o_device *i2o_dev, *tmp;
286 224
287 dev = i2o_device_alloc(); 225 i2o_dev = i2o_device_alloc();
288 if (IS_ERR(dev)) { 226 if (IS_ERR(i2o_dev)) {
289 printk(KERN_ERR "i2o: unable to allocate i2o device\n"); 227 printk(KERN_ERR "i2o: unable to allocate i2o device\n");
290 return dev; 228 return i2o_dev;
291 } 229 }
292 230
293 dev->lct_data = *entry; 231 i2o_dev->lct_data = *entry;
294 dev->iop = c;
295 232
296 snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, 233 snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
297 dev->lct_data.tid); 234 i2o_dev->lct_data.tid);
298 235
299 dev->device.parent = &c->device; 236 i2o_dev->iop = c;
237 i2o_dev->device.parent = &c->device;
300 238
301 device_register(&dev->device); 239 device_register(&i2o_dev->device);
302 240
303 list_add_tail(&dev->list, &c->devices); 241 list_add_tail(&i2o_dev->list, &c->devices);
304 242
305 i2o_setup_sysfs_links(dev); 243 /* create user entries for this device */
244 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
245 if (tmp && (tmp != i2o_dev))
246 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
247 "user");
306 248
307 i2o_driver_notify_device_add_all(dev); 249 /* create user entries refering to this device */
250 list_for_each_entry(tmp, &c->devices, list)
251 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
252 && (tmp != i2o_dev))
253 sysfs_create_link(&tmp->device.kobj,
254 &i2o_dev->device.kobj, "user");
308 255
309 pr_debug("i2o: device %s added\n", dev->device.bus_id); 256 /* create parent entries for this device */
257 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
258 if (tmp && (tmp != i2o_dev))
259 sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
260 "parent");
310 261
311 return dev; 262 /* create parent entries refering to this device */
263 list_for_each_entry(tmp, &c->devices, list)
264 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
265 && (tmp != i2o_dev))
266 sysfs_create_link(&tmp->device.kobj,
267 &i2o_dev->device.kobj, "parent");
268
269 i2o_driver_notify_device_add_all(i2o_dev);
270
271 pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
272
273 return i2o_dev;
312} 274}
313 275
314/** 276/**
@@ -321,9 +283,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
321 */ 283 */
322void i2o_device_remove(struct i2o_device *i2o_dev) 284void i2o_device_remove(struct i2o_device *i2o_dev)
323{ 285{
286 struct i2o_device *tmp;
287 struct i2o_controller *c = i2o_dev->iop;
288
324 i2o_driver_notify_device_remove_all(i2o_dev); 289 i2o_driver_notify_device_remove_all(i2o_dev);
325 i2o_remove_sysfs_links(i2o_dev); 290
291 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
292 sysfs_remove_link(&i2o_dev->device.kobj, "user");
293
294 list_for_each_entry(tmp, &c->devices, list) {
295 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
296 sysfs_remove_link(&tmp->device.kobj, "parent");
297 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
298 sysfs_remove_link(&tmp->device.kobj, "user");
299 }
326 list_del(&i2o_dev->list); 300 list_del(&i2o_dev->list);
301
327 device_unregister(&i2o_dev->device); 302 device_unregister(&i2o_dev->device);
328} 303}
329 304
@@ -341,56 +316,83 @@ int i2o_device_parse_lct(struct i2o_controller *c)
341{ 316{
342 struct i2o_device *dev, *tmp; 317 struct i2o_device *dev, *tmp;
343 i2o_lct *lct; 318 i2o_lct *lct;
344 int i; 319 u32 *dlct = c->dlct.virt;
345 int max; 320 int max = 0, i = 0;
321 u16 table_size;
322 u32 buf;
346 323
347 down(&c->lct_lock); 324 down(&c->lct_lock);
348 325
349 kfree(c->lct); 326 kfree(c->lct);
350 327
351 lct = c->dlct.virt; 328 buf = le32_to_cpu(*dlct++);
329 table_size = buf & 0xffff;
352 330
353 c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); 331 lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
354 if (!c->lct) { 332 if (!lct) {
355 up(&c->lct_lock); 333 up(&c->lct_lock);
356 return -ENOMEM; 334 return -ENOMEM;
357 } 335 }
358 336
359 if (lct->table_size * 4 > c->dlct.len) { 337 lct->lct_ver = buf >> 28;
360 memcpy(c->lct, c->dlct.virt, c->dlct.len); 338 lct->boot_tid = buf >> 16 & 0xfff;
361 up(&c->lct_lock); 339 lct->table_size = table_size;
362 return -EAGAIN; 340 lct->change_ind = le32_to_cpu(*dlct++);
363 } 341 lct->iop_flags = le32_to_cpu(*dlct++);
364 342
365 memcpy(c->lct, c->dlct.virt, lct->table_size * 4); 343 table_size -= 3;
366
367 lct = c->lct;
368
369 max = (lct->table_size - 3) / 9;
370 344
371 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, 345 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
372 lct->table_size); 346 lct->table_size);
373 347
374 /* remove devices, which are not in the LCT anymore */ 348 while (table_size > 0) {
375 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 349 i2o_lct_entry *entry = &lct->lct_entry[max];
376 int found = 0; 350 int found = 0;
377 351
378 for (i = 0; i < max; i++) { 352 buf = le32_to_cpu(*dlct++);
379 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 353 entry->entry_size = buf & 0xffff;
354 entry->tid = buf >> 16 & 0xfff;
355
356 entry->change_ind = le32_to_cpu(*dlct++);
357 entry->device_flags = le32_to_cpu(*dlct++);
358
359 buf = le32_to_cpu(*dlct++);
360 entry->class_id = buf & 0xfff;
361 entry->version = buf >> 12 & 0xf;
362 entry->vendor_id = buf >> 16;
363
364 entry->sub_class = le32_to_cpu(*dlct++);
365
366 buf = le32_to_cpu(*dlct++);
367 entry->user_tid = buf & 0xfff;
368 entry->parent_tid = buf >> 12 & 0xfff;
369 entry->bios_info = buf >> 24;
370
371 memcpy(&entry->identity_tag, dlct, 8);
372 dlct += 2;
373
374 entry->event_capabilities = le32_to_cpu(*dlct++);
375
376 /* add new devices, which are new in the LCT */
377 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
378 if (entry->tid == dev->lct_data.tid) {
380 found = 1; 379 found = 1;
381 break; 380 break;
382 } 381 }
383 } 382 }
384 383
385 if (!found) 384 if (!found)
386 i2o_device_remove(dev); 385 i2o_device_add(c, entry);
386
387 table_size -= 9;
388 max++;
387 } 389 }
388 390
389 /* add new devices, which are new in the LCT */ 391 /* remove devices, which are not in the LCT anymore */
390 for (i = 0; i < max; i++) { 392 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
391 int found = 0; 393 int found = 0;
392 394
393 list_for_each_entry_safe(dev, tmp, &c->devices, list) { 395 for (i = 0; i < max; i++) {
394 if (lct->lct_entry[i].tid == dev->lct_data.tid) { 396 if (lct->lct_entry[i].tid == dev->lct_data.tid) {
395 found = 1; 397 found = 1;
396 break; 398 break;
@@ -398,14 +400,14 @@ int i2o_device_parse_lct(struct i2o_controller *c)
398 } 400 }
399 401
400 if (!found) 402 if (!found)
401 i2o_device_add(c, &lct->lct_entry[i]); 403 i2o_device_remove(dev);
402 } 404 }
405
403 up(&c->lct_lock); 406 up(&c->lct_lock);
404 407
405 return 0; 408 return 0;
406} 409}
407 410
408
409/* 411/*
410 * Run time support routines 412 * Run time support routines
411 */ 413 */
@@ -419,13 +421,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
419 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. 421 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
420 */ 422 */
421int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, 423int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
422 int oplen, void *reslist, int reslen) 424 int oplen, void *reslist, int reslen)
423{ 425{
424 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
425 u32 m;
426 u32 *res32 = (u32 *) reslist;
427 u32 *restmp = (u32 *) reslist;
428 int len = 0;
429 int i = 0; 427 int i = 0;
430 int rc; 428 int rc;
431 struct i2o_dma res; 429 struct i2o_dma res;
@@ -437,26 +435,27 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
437 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) 435 if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
438 return -ENOMEM; 436 return -ENOMEM;
439 437
440 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 438 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
441 if (m == I2O_QUEUE_EMPTY) { 439 if (IS_ERR(msg)) {
442 i2o_dma_free(dev, &res); 440 i2o_dma_free(dev, &res);
443 return -ETIMEDOUT; 441 return PTR_ERR(msg);
444 } 442 }
445 443
446 i = 0; 444 i = 0;
447 writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, 445 msg->u.head[1] =
448 &msg->u.head[1]); 446 cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
449 writel(0, &msg->body[i++]); 447 msg->body[i++] = cpu_to_le32(0x00000000);
450 writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ 448 msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
451 memcpy_toio(&msg->body[i], oplist, oplen); 449 memcpy(&msg->body[i], oplist, oplen);
452 i += (oplen / 4 + (oplen % 4 ? 1 : 0)); 450 i += (oplen / 4 + (oplen % 4 ? 1 : 0));
453 writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ 451 msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
454 writel(res.phys, &msg->body[i++]); 452 msg->body[i++] = cpu_to_le32(res.phys);
455 453
456 writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | 454 msg->u.head[0] =
457 SGL_OFFSET_5, &msg->u.head[0]); 455 cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
456 SGL_OFFSET_5);
458 457
459 rc = i2o_msg_post_wait_mem(c, m, 10, &res); 458 rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
460 459
461 /* This only looks like a memory leak - don't "fix" it. */ 460 /* This only looks like a memory leak - don't "fix" it. */
462 if (rc == -ETIMEDOUT) 461 if (rc == -ETIMEDOUT)
@@ -465,36 +464,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
465 memcpy(reslist, res.virt, res.len); 464 memcpy(reslist, res.virt, res.len);
466 i2o_dma_free(dev, &res); 465 i2o_dma_free(dev, &res);
467 466
468 /* Query failed */ 467 return rc;
469 if (rc)
470 return rc;
471 /*
472 * Calculate number of bytes of Result LIST
473 * We need to loop through each Result BLOCK and grab the length
474 */
475 restmp = res32 + 1;
476 len = 1;
477 for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
478 if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */
479 printk(KERN_WARNING
480 "%s - Error:\n ErrorInfoSize = 0x%02x, "
481 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
482 (cmd ==
483 I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
484 "PARAMS_GET", res32[1] >> 24,
485 (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
486
487 /*
488 * If this is the only request,than we return an error
489 */
490 if ((res32[0] & 0x0000FFFF) == 1) {
491 return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
492 }
493 }
494 len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
495 restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
496 }
497 return (len << 2); /* bytes used by result list */
498} 468}
499 469
500/* 470/*
@@ -503,28 +473,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
503int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, 473int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
504 void *buf, int buflen) 474 void *buf, int buflen)
505{ 475{
506 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; 476 u32 opblk[] = { cpu_to_le32(0x00000001),
477 cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
478 cpu_to_le32((s16) field << 16 | 0x00000001)
479 };
507 u8 *resblk; /* 8 bytes for header */ 480 u8 *resblk; /* 8 bytes for header */
508 int size; 481 int rc;
509
510 if (field == -1) /* whole group */
511 opblk[4] = -1;
512 482
513 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC); 483 resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
514 if (!resblk) 484 if (!resblk)
515 return -ENOMEM; 485 return -ENOMEM;
516 486
517 size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, 487 rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
518 sizeof(opblk), resblk, buflen + 8); 488 sizeof(opblk), resblk, buflen + 8);
519 489
520 memcpy(buf, resblk + 8, buflen); /* cut off header */ 490 memcpy(buf, resblk + 8, buflen); /* cut off header */
521 491
522 kfree(resblk); 492 kfree(resblk);
523 493
524 if (size > buflen) 494 return rc;
525 return buflen;
526
527 return size;
528} 495}
529 496
530/* 497/*
@@ -534,12 +501,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
534 * else return specific fields 501 * else return specific fields
535 * ibuf contains fieldindexes 502 * ibuf contains fieldindexes
536 * 503 *
537 * if oper == I2O_PARAMS_LIST_GET, get from specific rows 504 * if oper == I2O_PARAMS_LIST_GET, get from specific rows
538 * if fieldcount == -1 return all fields 505 * if fieldcount == -1 return all fields
539 * ibuf contains rowcount, keyvalues 506 * ibuf contains rowcount, keyvalues
540 * else return specific fields 507 * else return specific fields
541 * fieldcount is # of fieldindexes 508 * fieldcount is # of fieldindexes
542 * ibuf contains fieldindexes, rowcount, keyvalues 509 * ibuf contains fieldindexes, rowcount, keyvalues
543 * 510 *
544 * You could also use directly function i2o_issue_params(). 511 * You could also use directly function i2o_issue_params().
545 */ 512 */
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 0fb9c4e2ad4c..64130227574f 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -61,12 +61,10 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
61}; 61};
62 62
63/* I2O bus type */ 63/* I2O bus type */
64extern struct device_attribute i2o_device_attrs[];
65
66struct bus_type i2o_bus_type = { 64struct bus_type i2o_bus_type = {
67 .name = "i2o", 65 .name = "i2o",
68 .match = i2o_bus_match, 66 .match = i2o_bus_match,
69 .dev_attrs = i2o_device_attrs, 67 .dev_attrs = i2o_device_attrs
70}; 68};
71 69
72/** 70/**
@@ -219,14 +217,14 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
219 /* cut of header from message size (in 32-bit words) */ 217 /* cut of header from message size (in 32-bit words) */
220 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; 218 size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5;
221 219
222 evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO); 220 evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
223 if (!evt) 221 if (!evt)
224 return -ENOMEM; 222 return -ENOMEM;
225 223
226 evt->size = size; 224 evt->size = size;
227 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); 225 evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt);
228 evt->event_indicator = le32_to_cpu(msg->body[0]); 226 evt->event_indicator = le32_to_cpu(msg->body[0]);
229 memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4); 227 memcpy(&evt->data, &msg->body[1], size * 4);
230 228
231 list_for_each_entry_safe(dev, tmp, &c->devices, list) 229 list_for_each_entry_safe(dev, tmp, &c->devices, list)
232 if (dev->lct_data.tid == tid) { 230 if (dev->lct_data.tid == tid) {
@@ -349,12 +347,10 @@ int __init i2o_driver_init(void)
349 osm_info("max drivers = %d\n", i2o_max_drivers); 347 osm_info("max drivers = %d\n", i2o_max_drivers);
350 348
351 i2o_drivers = 349 i2o_drivers =
352 kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); 350 kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
353 if (!i2o_drivers) 351 if (!i2o_drivers)
354 return -ENOMEM; 352 return -ENOMEM;
355 353
356 memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
357
358 rc = bus_register(&i2o_bus_type); 354 rc = bus_register(&i2o_bus_type);
359 355
360 if (rc < 0) 356 if (rc < 0)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2505b0..9bb9859f6dfe 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -33,7 +33,7 @@
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */ 36#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
37#include <asm/param.h> /* HZ */ 37#include <asm/param.h> /* HZ */
38#include "core.h" 38#include "core.h"
39 39
@@ -75,11 +75,9 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
75{ 75{
76 struct i2o_exec_wait *wait; 76 struct i2o_exec_wait *wait;
77 77
78 wait = kmalloc(sizeof(*wait), GFP_KERNEL); 78 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
79 if (!wait) 79 if (!wait)
80 return ERR_PTR(-ENOMEM); 80 return NULL;
81
82 memset(wait, 0, sizeof(*wait));
83 81
84 INIT_LIST_HEAD(&wait->list); 82 INIT_LIST_HEAD(&wait->list);
85 83
@@ -114,13 +112,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
114 * Returns 0 on success, negative error code on timeout or positive error 112 * Returns 0 on success, negative error code on timeout or positive error
115 * code from reply. 113 * code from reply.
116 */ 114 */
117int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long 115int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
118 timeout, struct i2o_dma *dma) 116 unsigned long timeout, struct i2o_dma *dma)
119{ 117{
120 DECLARE_WAIT_QUEUE_HEAD(wq); 118 DECLARE_WAIT_QUEUE_HEAD(wq);
121 struct i2o_exec_wait *wait; 119 struct i2o_exec_wait *wait;
122 static u32 tcntxt = 0x80000000; 120 static u32 tcntxt = 0x80000000;
123 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
124 int rc = 0; 121 int rc = 0;
125 122
126 wait = i2o_exec_wait_alloc(); 123 wait = i2o_exec_wait_alloc();
@@ -138,15 +135,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
138 * We will only use transaction contexts >= 0x80000000 for POST WAIT, 135 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
139 * so we could find a POST WAIT reply easier in the reply handler. 136 * so we could find a POST WAIT reply easier in the reply handler.
140 */ 137 */
141 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 138 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
142 wait->tcntxt = tcntxt++; 139 wait->tcntxt = tcntxt++;
143 writel(wait->tcntxt, &msg->u.s.tcntxt); 140 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
144 141
145 /* 142 /*
146 * Post the message to the controller. At some point later it will 143 * Post the message to the controller. At some point later it will
147 * return. If we time out before it returns then complete will be zero. 144 * return. If we time out before it returns then complete will be zero.
148 */ 145 */
149 i2o_msg_post(c, m); 146 i2o_msg_post(c, msg);
150 147
151 if (!wait->complete) { 148 if (!wait->complete) {
152 wait->wq = &wq; 149 wait->wq = &wq;
@@ -266,13 +263,14 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
266 * 263 *
267 * Returns number of bytes printed into buffer. 264 * Returns number of bytes printed into buffer.
268 */ 265 */
269static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) 266static ssize_t i2o_exec_show_vendor_id(struct device *d,
267 struct device_attribute *attr, char *buf)
270{ 268{
271 struct i2o_device *dev = to_i2o_device(d); 269 struct i2o_device *dev = to_i2o_device(d);
272 u16 id; 270 u16 id;
273 271
274 if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { 272 if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
275 sprintf(buf, "0x%04x", id); 273 sprintf(buf, "0x%04x", le16_to_cpu(id));
276 return strlen(buf) + 1; 274 return strlen(buf) + 1;
277 } 275 }
278 276
@@ -286,13 +284,15 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
286 * 284 *
287 * Returns number of bytes printed into buffer. 285 * Returns number of bytes printed into buffer.
288 */ 286 */
289static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) 287static ssize_t i2o_exec_show_product_id(struct device *d,
288 struct device_attribute *attr,
289 char *buf)
290{ 290{
291 struct i2o_device *dev = to_i2o_device(d); 291 struct i2o_device *dev = to_i2o_device(d);
292 u16 id; 292 u16 id;
293 293
294 if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { 294 if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
295 sprintf(buf, "0x%04x", id); 295 sprintf(buf, "0x%04x", le16_to_cpu(id));
296 return strlen(buf) + 1; 296 return strlen(buf) + 1;
297 } 297 }
298 298
@@ -362,7 +362,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c)
362 if (i2o_device_parse_lct(c) != -EAGAIN) 362 if (i2o_device_parse_lct(c) != -EAGAIN)
363 change_ind = c->lct->change_ind + 1; 363 change_ind = c->lct->change_ind + 1;
364 364
365#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
365 i2o_exec_lct_notify(c, change_ind); 366 i2o_exec_lct_notify(c, change_ind);
367#endif
366}; 368};
367 369
368/** 370/**
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
385 u32 context; 387 u32 context;
386 388
387 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { 389 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
390 struct i2o_message __iomem *pmsg;
391 u32 pm;
392
388 /* 393 /*
389 * If Fail bit is set we must take the transaction context of 394 * If Fail bit is set we must take the transaction context of
390 * the preserved message to find the right request again. 395 * the preserved message to find the right request again.
391 */ 396 */
392 struct i2o_message __iomem *pmsg;
393 u32 pm;
394 397
395 pm = le32_to_cpu(msg->body[3]); 398 pm = le32_to_cpu(msg->body[3]);
396
397 pmsg = i2o_msg_in_to_virt(c, pm); 399 pmsg = i2o_msg_in_to_virt(c, pm);
400 context = readl(&pmsg->u.s.tcntxt);
398 401
399 i2o_report_status(KERN_INFO, "i2o_core", msg); 402 i2o_report_status(KERN_INFO, "i2o_core", msg);
400 403
401 context = readl(&pmsg->u.s.tcntxt);
402
403 /* Release the preserved msg */ 404 /* Release the preserved msg */
404 i2o_msg_nop(c, pm); 405 i2o_msg_nop_mfa(c, pm);
405 } else 406 } else
406 context = le32_to_cpu(msg->u.s.tcntxt); 407 context = le32_to_cpu(msg->u.s.tcntxt);
407 408
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
462 */ 463 */
463int i2o_exec_lct_get(struct i2o_controller *c) 464int i2o_exec_lct_get(struct i2o_controller *c)
464{ 465{
465 struct i2o_message __iomem *msg; 466 struct i2o_message *msg;
466 u32 m;
467 int i = 0; 467 int i = 0;
468 int rc = -EAGAIN; 468 int rc = -EAGAIN;
469 469
470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { 470 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
471 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 471 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
472 if (m == I2O_QUEUE_EMPTY) 472 if (IS_ERR(msg))
473 return -ETIMEDOUT; 473 return PTR_ERR(msg);
474 474
475 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 475 msg->u.head[0] =
476 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 476 cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
477 &msg->u.head[1]); 477 msg->u.head[1] =
478 writel(0xffffffff, &msg->body[0]); 478 cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
479 writel(0x00000000, &msg->body[1]); 479 ADAPTER_TID);
480 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 480 msg->body[0] = cpu_to_le32(0xffffffff);
481 writel(c->dlct.phys, &msg->body[3]); 481 msg->body[1] = cpu_to_le32(0x00000000);
482 482 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
483 rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); 483 msg->body[3] = cpu_to_le32(c->dlct.phys);
484
485 rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
484 if (rc < 0) 486 if (rc < 0)
485 break; 487 break;
486 488
@@ -506,29 +508,29 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
506{ 508{
507 i2o_status_block *sb = c->status_block.virt; 509 i2o_status_block *sb = c->status_block.virt;
508 struct device *dev; 510 struct device *dev;
509 struct i2o_message __iomem *msg; 511 struct i2o_message *msg;
510 u32 m;
511 512
512 dev = &c->pdev->dev; 513 dev = &c->pdev->dev;
513 514
514 if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) 515 if (i2o_dma_realloc
516 (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL))
515 return -ENOMEM; 517 return -ENOMEM;
516 518
517 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 519 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
518 if (m == I2O_QUEUE_EMPTY) 520 if (IS_ERR(msg))
519 return -ETIMEDOUT; 521 return PTR_ERR(msg);
520 522
521 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 523 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
522 writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, 524 msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
523 &msg->u.head[1]); 525 ADAPTER_TID);
524 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 526 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
525 writel(0, &msg->u.s.tcntxt); /* FIXME */ 527 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
526 writel(0xffffffff, &msg->body[0]); 528 msg->body[0] = cpu_to_le32(0xffffffff);
527 writel(change_ind, &msg->body[1]); 529 msg->body[1] = cpu_to_le32(change_ind);
528 writel(0xd0000000 | c->dlct.len, &msg->body[2]); 530 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
529 writel(c->dlct.phys, &msg->body[3]); 531 msg->body[3] = cpu_to_le32(c->dlct.phys);
530 532
531 i2o_msg_post(c, m); 533 i2o_msg_post(c, msg);
532 534
533 return 0; 535 return 0;
534}; 536};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index 4f522527b7ed..5b1febed3133 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -59,10 +59,12 @@
59#include <linux/blkdev.h> 59#include <linux/blkdev.h>
60#include <linux/hdreg.h> 60#include <linux/hdreg.h>
61 61
62#include <scsi/scsi.h>
63
62#include "i2o_block.h" 64#include "i2o_block.h"
63 65
64#define OSM_NAME "block-osm" 66#define OSM_NAME "block-osm"
65#define OSM_VERSION "1.287" 67#define OSM_VERSION "1.325"
66#define OSM_DESCRIPTION "I2O Block Device OSM" 68#define OSM_DESCRIPTION "I2O Block Device OSM"
67 69
68static struct i2o_driver i2o_block_driver; 70static struct i2o_driver i2o_block_driver;
@@ -130,20 +132,20 @@ static int i2o_block_remove(struct device *dev)
130 */ 132 */
131static int i2o_block_device_flush(struct i2o_device *dev) 133static int i2o_block_device_flush(struct i2o_device *dev)
132{ 134{
133 struct i2o_message __iomem *msg; 135 struct i2o_message *msg;
134 u32 m;
135 136
136 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 137 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
137 if (m == I2O_QUEUE_EMPTY) 138 if (IS_ERR(msg))
138 return -ETIMEDOUT; 139 return PTR_ERR(msg);
139 140
140 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 141 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
141 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, 142 msg->u.head[1] =
142 &msg->u.head[1]); 143 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
143 writel(60 << 16, &msg->body[0]); 144 lct_data.tid);
145 msg->body[0] = cpu_to_le32(60 << 16);
144 osm_debug("Flushing...\n"); 146 osm_debug("Flushing...\n");
145 147
146 return i2o_msg_post_wait(dev->iop, m, 60); 148 return i2o_msg_post_wait(dev->iop, msg, 60);
147}; 149};
148 150
149/** 151/**
@@ -181,21 +183,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
181 */ 183 */
182static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 184static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
183{ 185{
184 struct i2o_message __iomem *msg; 186 struct i2o_message *msg;
185 u32 m; 187
186 188 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
187 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 189 if (IS_ERR(msg))
188 if (m == I2O_QUEUE_EMPTY) 190 return PTR_ERR(msg);
189 return -ETIMEDOUT; 191
190 192 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
191 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 193 msg->u.head[1] =
192 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, 194 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
193 &msg->u.head[1]); 195 lct_data.tid);
194 writel(-1, &msg->body[0]); 196 msg->body[0] = cpu_to_le32(-1);
195 writel(0, &msg->body[1]); 197 msg->body[1] = cpu_to_le32(0x00000000);
196 osm_debug("Mounting...\n"); 198 osm_debug("Mounting...\n");
197 199
198 return i2o_msg_post_wait(dev->iop, m, 2); 200 return i2o_msg_post_wait(dev->iop, msg, 2);
199}; 201};
200 202
201/** 203/**
@@ -210,20 +212,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
210 */ 212 */
211static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 213static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
212{ 214{
213 struct i2o_message __iomem *msg; 215 struct i2o_message *msg;
214 u32 m;
215 216
216 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
217 if (m == I2O_QUEUE_EMPTY) 218 if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
218 return -ETIMEDOUT; 219 return PTR_ERR(msg);
219 220
220 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
221 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 222 msg->u.head[1] =
222 &msg->u.head[1]); 223 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
223 writel(-1, &msg->body[0]); 224 lct_data.tid);
225 msg->body[0] = cpu_to_le32(-1);
224 osm_debug("Locking...\n"); 226 osm_debug("Locking...\n");
225 227
226 return i2o_msg_post_wait(dev->iop, m, 2); 228 return i2o_msg_post_wait(dev->iop, msg, 2);
227}; 229};
228 230
229/** 231/**
@@ -238,20 +240,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
238 */ 240 */
239static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 241static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
240{ 242{
241 struct i2o_message __iomem *msg; 243 struct i2o_message *msg;
242 u32 m;
243 244
244 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 245 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
245 if (m == I2O_QUEUE_EMPTY) 246 if (IS_ERR(msg))
246 return -ETIMEDOUT; 247 return PTR_ERR(msg);
247 248
248 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 249 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
249 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 250 msg->u.head[1] =
250 &msg->u.head[1]); 251 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
251 writel(media_id, &msg->body[0]); 252 lct_data.tid);
253 msg->body[0] = cpu_to_le32(media_id);
252 osm_debug("Unlocking...\n"); 254 osm_debug("Unlocking...\n");
253 255
254 return i2o_msg_post_wait(dev->iop, m, 2); 256 return i2o_msg_post_wait(dev->iop, msg, 2);
255}; 257};
256 258
257/** 259/**
@@ -267,21 +269,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
267{ 269{
268 struct i2o_device *i2o_dev = dev->i2o_dev; 270 struct i2o_device *i2o_dev = dev->i2o_dev;
269 struct i2o_controller *c = i2o_dev->iop; 271 struct i2o_controller *c = i2o_dev->iop;
270 struct i2o_message __iomem *msg; 272 struct i2o_message *msg;
271 u32 m;
272 int rc; 273 int rc;
273 274
274 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 275 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
275 if (m == I2O_QUEUE_EMPTY) 276 if (IS_ERR(msg))
276 return -ETIMEDOUT; 277 return PTR_ERR(msg);
277 278
278 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 279 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
279 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. 280 msg->u.head[1] =
280 tid, &msg->u.head[1]); 281 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
281 writel(op << 24, &msg->body[0]); 282 lct_data.tid);
283 msg->body[0] = cpu_to_le32(op << 24);
282 osm_debug("Power...\n"); 284 osm_debug("Power...\n");
283 285
284 rc = i2o_msg_post_wait(c, m, 60); 286 rc = i2o_msg_post_wait(c, msg, 60);
285 if (!rc) 287 if (!rc)
286 dev->power = op; 288 dev->power = op;
287 289
@@ -331,7 +333,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
331 */ 333 */
332static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 334static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
333 struct i2o_block_request *ireq, 335 struct i2o_block_request *ireq,
334 u32 __iomem ** mptr) 336 u32 ** mptr)
335{ 337{
336 int nents; 338 int nents;
337 enum dma_data_direction direction; 339 enum dma_data_direction direction;
@@ -745,10 +747,9 @@ static int i2o_block_transfer(struct request *req)
745 struct i2o_block_device *dev = req->rq_disk->private_data; 747 struct i2o_block_device *dev = req->rq_disk->private_data;
746 struct i2o_controller *c; 748 struct i2o_controller *c;
747 int tid = dev->i2o_dev->lct_data.tid; 749 int tid = dev->i2o_dev->lct_data.tid;
748 struct i2o_message __iomem *msg; 750 struct i2o_message *msg;
749 u32 __iomem *mptr; 751 u32 *mptr;
750 struct i2o_block_request *ireq = req->special; 752 struct i2o_block_request *ireq = req->special;
751 u32 m;
752 u32 tcntxt; 753 u32 tcntxt;
753 u32 sgl_offset = SGL_OFFSET_8; 754 u32 sgl_offset = SGL_OFFSET_8;
754 u32 ctl_flags = 0x00000000; 755 u32 ctl_flags = 0x00000000;
@@ -763,9 +764,9 @@ static int i2o_block_transfer(struct request *req)
763 764
764 c = dev->i2o_dev->iop; 765 c = dev->i2o_dev->iop;
765 766
766 m = i2o_msg_get(c, &msg); 767 msg = i2o_msg_get(c);
767 if (m == I2O_QUEUE_EMPTY) { 768 if (IS_ERR(msg)) {
768 rc = -EBUSY; 769 rc = PTR_ERR(msg);
769 goto exit; 770 goto exit;
770 } 771 }
771 772
@@ -775,8 +776,8 @@ static int i2o_block_transfer(struct request *req)
775 goto nop_msg; 776 goto nop_msg;
776 } 777 }
777 778
778 writel(i2o_block_driver.context, &msg->u.s.icntxt); 779 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
779 writel(tcntxt, &msg->u.s.tcntxt); 780 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
780 781
781 mptr = &msg->body[0]; 782 mptr = &msg->body[0];
782 783
@@ -834,11 +835,11 @@ static int i2o_block_transfer(struct request *req)
834 835
835 sgl_offset = SGL_OFFSET_12; 836 sgl_offset = SGL_OFFSET_12;
836 837
837 writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, 838 msg->u.head[1] =
838 &msg->u.head[1]); 839 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
839 840
840 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 841 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
841 writel(tid, mptr++); 842 *mptr++ = cpu_to_le32(tid);
842 843
843 /* 844 /*
844 * ENABLE_DISCONNECT 845 * ENABLE_DISCONNECT
@@ -846,29 +847,31 @@ static int i2o_block_transfer(struct request *req)
846 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 847 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
847 */ 848 */
848 if (rq_data_dir(req) == READ) { 849 if (rq_data_dir(req) == READ) {
849 cmd[0] = 0x28; 850 cmd[0] = READ_10;
850 scsi_flags = 0x60a0000a; 851 scsi_flags = 0x60a0000a;
851 } else { 852 } else {
852 cmd[0] = 0x2A; 853 cmd[0] = WRITE_10;
853 scsi_flags = 0xa0a0000a; 854 scsi_flags = 0xa0a0000a;
854 } 855 }
855 856
856 writel(scsi_flags, mptr++); 857 *mptr++ = cpu_to_le32(scsi_flags);
857 858
858 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 859 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
859 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 860 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
860 861
861 memcpy_toio(mptr, cmd, 10); 862 memcpy(mptr, cmd, 10);
862 mptr += 4; 863 mptr += 4;
863 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 864 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
864 } else 865 } else
865#endif 866#endif
866 { 867 {
867 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 868 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
868 writel(ctl_flags, mptr++); 869 *mptr++ = cpu_to_le32(ctl_flags);
869 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 870 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
870 writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); 871 *mptr++ =
871 writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); 872 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
873 *mptr++ =
874 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
872 } 875 }
873 876
874 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 877 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +879,13 @@ static int i2o_block_transfer(struct request *req)
876 goto context_remove; 879 goto context_remove;
877 } 880 }
878 881
879 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | 882 msg->u.head[0] =
880 sgl_offset, &msg->u.head[0]); 883 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
881 884
882 list_add_tail(&ireq->queue, &dev->open_queue); 885 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++; 886 dev->open_queue_depth++;
884 887
885 i2o_msg_post(c, m); 888 i2o_msg_post(c, msg);
886 889
887 return 0; 890 return 0;
888 891
@@ -890,7 +893,7 @@ static int i2o_block_transfer(struct request *req)
890 i2o_cntxt_list_remove(c, req); 893 i2o_cntxt_list_remove(c, req);
891 894
892 nop_msg: 895 nop_msg:
893 i2o_msg_nop(c, m); 896 i2o_msg_nop(c, msg);
894 897
895 exit: 898 exit:
896 return rc; 899 return rc;
@@ -978,13 +981,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
978 struct request_queue *queue; 981 struct request_queue *queue;
979 int rc; 982 int rc;
980 983
981 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 984 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
982 if (!dev) { 985 if (!dev) {
983 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 986 osm_err("Insufficient memory to allocate I2O Block disk.\n");
984 rc = -ENOMEM; 987 rc = -ENOMEM;
985 goto exit; 988 goto exit;
986 } 989 }
987 memset(dev, 0, sizeof(*dev));
988 990
989 INIT_LIST_HEAD(&dev->open_queue); 991 INIT_LIST_HEAD(&dev->open_queue);
990 spin_lock_init(&dev->lock); 992 spin_lock_init(&dev->lock);
@@ -1049,8 +1051,8 @@ static int i2o_block_probe(struct device *dev)
1049 int rc; 1051 int rc;
1050 u64 size; 1052 u64 size;
1051 u32 blocksize; 1053 u32 blocksize;
1052 u32 flags, status;
1053 u16 body_size = 4; 1054 u16 body_size = 4;
1055 u16 power;
1054 unsigned short max_sectors; 1056 unsigned short max_sectors;
1055 1057
1056#ifdef CONFIG_I2O_EXT_ADAPTEC 1058#ifdef CONFIG_I2O_EXT_ADAPTEC
@@ -1108,22 +1110,20 @@ static int i2o_block_probe(struct device *dev)
1108 * Ask for the current media data. If that isn't supported 1110 * Ask for the current media data. If that isn't supported
1109 * then we ask for the device capacity data 1111 * then we ask for the device capacity data
1110 */ 1112 */
1111 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1113 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1112 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1114 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1113 blk_queue_hardsect_size(queue, blocksize); 1115 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
1114 } else 1116 } else
1115 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1117 osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1116 1118
1117 if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1119 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1118 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1120 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1119 set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); 1121 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
1120 } else 1122 } else
1121 osm_warn("could not get size of %s\n", gd->disk_name); 1123 osm_warn("could not get size of %s\n", gd->disk_name);
1122 1124
1123 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) 1125 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1124 i2o_blk_dev->power = 0; 1126 i2o_blk_dev->power = power;
1125 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1126 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1127 1127
1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1129 1129
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7abebb1b..89daf67b764d 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -36,12 +36,12 @@
36 36
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39#include "core.h"
40
41#define SG_TABLESIZE 30 39#define SG_TABLESIZE 30
42 40
43static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, 41extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
44 unsigned long arg); 42
43static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
44 unsigned long);
45 45
46static spinlock_t i2o_config_lock; 46static spinlock_t i2o_config_lock;
47 47
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 230 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
231 unsigned char maxfrag = 0, curfrag = 1; 231 unsigned char maxfrag = 0, curfrag = 1;
232 struct i2o_dma buffer; 232 struct i2o_dma buffer;
233 struct i2o_message __iomem *msg; 233 struct i2o_message *msg;
234 u32 m;
235 unsigned int status = 0, swlen = 0, fragsize = 8192; 234 unsigned int status = 0, swlen = 0, fragsize = 8192;
236 struct i2o_controller *c; 235 struct i2o_controller *c;
237 236
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
257 if (!c) 256 if (!c)
258 return -ENXIO; 257 return -ENXIO;
259 258
260 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 259 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
261 if (m == I2O_QUEUE_EMPTY) 260 if (IS_ERR(msg))
262 return -EBUSY; 261 return PTR_ERR(msg);
263 262
264 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 263 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
265 i2o_msg_nop(c, m); 264 i2o_msg_nop(c, msg);
266 return -ENOMEM; 265 return -ENOMEM;
267 } 266 }
268 267
269 __copy_from_user(buffer.virt, kxfer.buf, fragsize); 268 __copy_from_user(buffer.virt, kxfer.buf, fragsize);
270 269
271 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 270 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
272 writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 271 msg->u.head[1] =
273 &msg->u.head[1]); 272 cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
274 writel(i2o_config_driver.context, &msg->u.head[2]); 273 ADAPTER_TID);
275 writel(0, &msg->u.head[3]); 274 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
276 writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | 275 msg->u.head[3] = cpu_to_le32(0);
277 (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); 276 msg->body[0] =
278 writel(swlen, &msg->body[1]); 277 cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
279 writel(kxfer.sw_id, &msg->body[2]); 278 sw_type) << 16) |
280 writel(0xD0000000 | fragsize, &msg->body[3]); 279 (((u32) maxfrag) << 8) | (((u32) curfrag)));
281 writel(buffer.phys, &msg->body[4]); 280 msg->body[1] = cpu_to_le32(swlen);
281 msg->body[2] = cpu_to_le32(kxfer.sw_id);
282 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
283 msg->body[4] = cpu_to_le32(buffer.phys);
282 284
283 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 285 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
284 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 286 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
285 287
286 if (status != -ETIMEDOUT) 288 if (status != -ETIMEDOUT)
287 i2o_dma_free(&c->pdev->dev, &buffer); 289 i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
302 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 304 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
303 unsigned char maxfrag = 0, curfrag = 1; 305 unsigned char maxfrag = 0, curfrag = 1;
304 struct i2o_dma buffer; 306 struct i2o_dma buffer;
305 struct i2o_message __iomem *msg; 307 struct i2o_message *msg;
306 u32 m;
307 unsigned int status = 0, swlen = 0, fragsize = 8192; 308 unsigned int status = 0, swlen = 0, fragsize = 8192;
308 struct i2o_controller *c; 309 struct i2o_controller *c;
309 int ret = 0; 310 int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
330 if (!c) 331 if (!c)
331 return -ENXIO; 332 return -ENXIO;
332 333
333 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 334 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
334 if (m == I2O_QUEUE_EMPTY) 335 if (IS_ERR(msg))
335 return -EBUSY; 336 return PTR_ERR(msg);
336 337
337 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { 338 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
338 i2o_msg_nop(c, m); 339 i2o_msg_nop(c, msg);
339 return -ENOMEM; 340 return -ENOMEM;
340 } 341 }
341 342
342 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); 343 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
343 writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, 344 msg->u.head[1] =
344 &msg->u.head[1]); 345 cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
345 writel(i2o_config_driver.context, &msg->u.head[2]); 346 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
346 writel(0, &msg->u.head[3]); 347 msg->u.head[3] = cpu_to_le32(0);
347 writel((u32) kxfer.flags << 24 | (u32) kxfer. 348 msg->body[0] =
348 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, 349 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
349 &msg->body[0]); 350 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
350 writel(swlen, &msg->body[1]); 351 msg->body[1] = cpu_to_le32(swlen);
351 writel(kxfer.sw_id, &msg->body[2]); 352 msg->body[2] = cpu_to_le32(kxfer.sw_id);
352 writel(0xD0000000 | fragsize, &msg->body[3]); 353 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
353 writel(buffer.phys, &msg->body[4]); 354 msg->body[4] = cpu_to_le32(buffer.phys);
354 355
355 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 356 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
356 status = i2o_msg_post_wait_mem(c, m, 60, &buffer); 357 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
357 358
358 if (status != I2O_POST_WAIT_OK) { 359 if (status != I2O_POST_WAIT_OK) {
359 if (status != -ETIMEDOUT) 360 if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
380 struct i2o_controller *c; 381 struct i2o_controller *c;
381 struct i2o_sw_xfer kxfer; 382 struct i2o_sw_xfer kxfer;
382 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 383 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
383 struct i2o_message __iomem *msg; 384 struct i2o_message *msg;
384 u32 m;
385 unsigned int swlen; 385 unsigned int swlen;
386 int token; 386 int token;
387 387
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
395 if (!c) 395 if (!c)
396 return -ENXIO; 396 return -ENXIO;
397 397
398 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 398 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
399 if (m == I2O_QUEUE_EMPTY) 399 if (IS_ERR(msg))
400 return -EBUSY; 400 return PTR_ERR(msg);
401 401
402 writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 402 msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
403 writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, 403 msg->u.head[1] =
404 &msg->u.head[1]); 404 cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
405 writel(i2o_config_driver.context, &msg->u.head[2]); 405 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
406 writel(0, &msg->u.head[3]); 406 msg->u.head[3] = cpu_to_le32(0);
407 writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, 407 msg->body[0] =
408 &msg->body[0]); 408 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
409 writel(swlen, &msg->body[1]); 409 msg->body[1] = cpu_to_le32(swlen);
410 writel(kxfer.sw_id, &msg->body[2]); 410 msg->body[2] = cpu_to_le32(kxfer.sw_id);
411 411
412 token = i2o_msg_post_wait(c, m, 10); 412 token = i2o_msg_post_wait(c, msg, 10);
413 413
414 if (token != I2O_POST_WAIT_OK) { 414 if (token != I2O_POST_WAIT_OK) {
415 osm_info("swdel failed, DetailedStatus = %d\n", token); 415 osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
423{ 423{
424 int token; 424 int token;
425 int iop = (int)arg; 425 int iop = (int)arg;
426 struct i2o_message __iomem *msg; 426 struct i2o_message *msg;
427 u32 m;
428 struct i2o_controller *c; 427 struct i2o_controller *c;
429 428
430 c = i2o_find_iop(iop); 429 c = i2o_find_iop(iop);
431 if (!c) 430 if (!c)
432 return -ENXIO; 431 return -ENXIO;
433 432
434 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 433 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
435 if (m == I2O_QUEUE_EMPTY) 434 if (IS_ERR(msg))
436 return -EBUSY; 435 return PTR_ERR(msg);
437 436
438 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 437 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
439 writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, 438 msg->u.head[1] =
440 &msg->u.head[1]); 439 cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
441 writel(i2o_config_driver.context, &msg->u.head[2]); 440 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
442 writel(0, &msg->u.head[3]); 441 msg->u.head[3] = cpu_to_le32(0);
443 442
444 token = i2o_msg_post_wait(c, m, 10); 443 token = i2o_msg_post_wait(c, msg, 10);
445 444
446 if (token != I2O_POST_WAIT_OK) { 445 if (token != I2O_POST_WAIT_OK) {
447 osm_info("Can't validate configuration, ErrorStatus = %d\n", 446 osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
454 453
455static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) 454static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
456{ 455{
457 struct i2o_message __iomem *msg; 456 struct i2o_message *msg;
458 u32 m;
459 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; 457 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
460 struct i2o_evt_id kdesc; 458 struct i2o_evt_id kdesc;
461 struct i2o_controller *c; 459 struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
474 if (!d) 472 if (!d)
475 return -ENODEV; 473 return -ENODEV;
476 474
477 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 475 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
478 if (m == I2O_QUEUE_EMPTY) 476 if (IS_ERR(msg))
479 return -EBUSY; 477 return PTR_ERR(msg);
480 478
481 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 479 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
482 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, 480 msg->u.head[1] =
483 &msg->u.head[1]); 481 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
484 writel(i2o_config_driver.context, &msg->u.head[2]); 482 kdesc.tid);
485 writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); 483 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
486 writel(kdesc.evt_mask, &msg->body[0]); 484 msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
485 msg->body[0] = cpu_to_le32(kdesc.evt_mask);
487 486
488 i2o_msg_post(c, m); 487 i2o_msg_post(c, msg);
489 488
490 return 0; 489 return 0;
491} 490}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
537 u32 sg_index = 0; 536 u32 sg_index = 0;
538 i2o_status_block *sb; 537 i2o_status_block *sb;
539 struct i2o_message *msg; 538 struct i2o_message *msg;
540 u32 m;
541 unsigned int iop; 539 unsigned int iop;
542 540
543 cmd = (struct i2o_cmd_passthru32 __user *)arg; 541 cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
553 return -ENXIO; 551 return -ENXIO;
554 } 552 }
555 553
556 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 554 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
557 555
558 sb = c->status_block.virt; 556 sb = c->status_block.virt;
559 557
@@ -585,19 +583,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
585 reply_size >>= 16; 583 reply_size >>= 16;
586 reply_size <<= 2; 584 reply_size <<= 2;
587 585
588 reply = kmalloc(reply_size, GFP_KERNEL); 586 reply = kzalloc(reply_size, GFP_KERNEL);
589 if (!reply) { 587 if (!reply) {
590 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 588 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
591 c->name); 589 c->name);
592 return -ENOMEM; 590 return -ENOMEM;
593 } 591 }
594 memset(reply, 0, reply_size);
595 592
596 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 593 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
597 594
598 writel(i2o_config_driver.context, &msg->u.s.icntxt);
599 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
600
601 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 595 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
602 if (sg_offset) { 596 if (sg_offset) {
603 struct sg_simple_element *sg; 597 struct sg_simple_element *sg;
@@ -631,7 +625,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
631 goto cleanup; 625 goto cleanup;
632 } 626 }
633 sg_size = sg[i].flag_count & 0xffffff; 627 sg_size = sg[i].flag_count & 0xffffff;
634 p = &(sg_list[sg_index++]); 628 p = &(sg_list[sg_index]);
635 /* Allocate memory for the transfer */ 629 /* Allocate memory for the transfer */
636 if (i2o_dma_alloc 630 if (i2o_dma_alloc
637 (&c->pdev->dev, p, sg_size, 631 (&c->pdev->dev, p, sg_size,
@@ -642,6 +636,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
642 rcode = -ENOMEM; 636 rcode = -ENOMEM;
643 goto sg_list_cleanup; 637 goto sg_list_cleanup;
644 } 638 }
639 sg_index++;
645 /* Copy in the user's SG buffer if necessary */ 640 /* Copy in the user's SG buffer if necessary */
646 if (sg[i]. 641 if (sg[i].
647 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { 642 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
@@ -662,9 +657,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
662 } 657 }
663 } 658 }
664 659
665 rcode = i2o_msg_post_wait(c, m, 60); 660 rcode = i2o_msg_post_wait(c, msg, 60);
666 if (rcode) 661 if (rcode) {
662 reply[4] = ((u32) rcode) << 24;
667 goto sg_list_cleanup; 663 goto sg_list_cleanup;
664 }
668 665
669 if (sg_offset) { 666 if (sg_offset) {
670 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 667 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
@@ -714,6 +711,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
714 } 711 }
715 } 712 }
716 713
714 sg_list_cleanup:
717 /* Copy back the reply to user space */ 715 /* Copy back the reply to user space */
718 if (reply_size) { 716 if (reply_size) {
719 // we wrote our own values for context - now restore the user supplied ones 717 // we wrote our own values for context - now restore the user supplied ones
@@ -731,7 +729,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
731 } 729 }
732 } 730 }
733 731
734 sg_list_cleanup:
735 for (i = 0; i < sg_index; i++) 732 for (i = 0; i < sg_index; i++)
736 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 733 i2o_dma_free(&c->pdev->dev, &sg_list[i]);
737 734
@@ -780,8 +777,7 @@ static int i2o_cfg_passthru(unsigned long arg)
780 u32 i = 0; 777 u32 i = 0;
781 void *p = NULL; 778 void *p = NULL;
782 i2o_status_block *sb; 779 i2o_status_block *sb;
783 struct i2o_message __iomem *msg; 780 struct i2o_message *msg;
784 u32 m;
785 unsigned int iop; 781 unsigned int iop;
786 782
787 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) 783 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +789,7 @@ static int i2o_cfg_passthru(unsigned long arg)
793 return -ENXIO; 789 return -ENXIO;
794 } 790 }
795 791
796 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 792 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
797 793
798 sb = c->status_block.virt; 794 sb = c->status_block.virt;
799 795
@@ -820,19 +816,15 @@ static int i2o_cfg_passthru(unsigned long arg)
820 reply_size >>= 16; 816 reply_size >>= 16;
821 reply_size <<= 2; 817 reply_size <<= 2;
822 818
823 reply = kmalloc(reply_size, GFP_KERNEL); 819 reply = kzalloc(reply_size, GFP_KERNEL);
824 if (!reply) { 820 if (!reply) {
825 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 821 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
826 c->name); 822 c->name);
827 return -ENOMEM; 823 return -ENOMEM;
828 } 824 }
829 memset(reply, 0, reply_size);
830 825
831 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 826 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
832 827
833 writel(i2o_config_driver.context, &msg->u.s.icntxt);
834 writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
835
836 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 828 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
837 if (sg_offset) { 829 if (sg_offset) {
838 struct sg_simple_element *sg; 830 struct sg_simple_element *sg;
@@ -894,9 +886,11 @@ static int i2o_cfg_passthru(unsigned long arg)
894 } 886 }
895 } 887 }
896 888
897 rcode = i2o_msg_post_wait(c, m, 60); 889 rcode = i2o_msg_post_wait(c, msg, 60);
898 if (rcode) 890 if (rcode) {
891 reply[4] = ((u32) rcode) << 24;
899 goto sg_list_cleanup; 892 goto sg_list_cleanup;
893 }
900 894
901 if (sg_offset) { 895 if (sg_offset) {
902 u32 msg[128]; 896 u32 msg[128];
@@ -946,6 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg)
946 } 940 }
947 } 941 }
948 942
943 sg_list_cleanup:
949 /* Copy back the reply to user space */ 944 /* Copy back the reply to user space */
950 if (reply_size) { 945 if (reply_size) {
951 // we wrote our own values for context - now restore the user supplied ones 946 // we wrote our own values for context - now restore the user supplied ones
@@ -962,7 +957,6 @@ static int i2o_cfg_passthru(unsigned long arg)
962 } 957 }
963 } 958 }
964 959
965 sg_list_cleanup:
966 for (i = 0; i < sg_index; i++) 960 for (i = 0; i < sg_index; i++)
967 kfree(sg_list[i]); 961 kfree(sg_list[i]);
968 962
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
index 561d63304d7e..6502b817df58 100644
--- a/drivers/message/i2o/i2o_lan.h
+++ b/drivers/message/i2o/i2o_lan.h
@@ -103,14 +103,14 @@
103#define I2O_LAN_DSC_SUSPENDED 0x11 103#define I2O_LAN_DSC_SUSPENDED 0x11
104 104
105struct i2o_packet_info { 105struct i2o_packet_info {
106 u32 offset : 24; 106 u32 offset:24;
107 u32 flags : 8; 107 u32 flags:8;
108 u32 len : 24; 108 u32 len:24;
109 u32 status : 8; 109 u32 status:8;
110}; 110};
111 111
112struct i2o_bucket_descriptor { 112struct i2o_bucket_descriptor {
113 u32 context; /* FIXME: 64bit support */ 113 u32 context; /* FIXME: 64bit support */
114 struct i2o_packet_info packet_info[1]; 114 struct i2o_packet_info packet_info[1];
115}; 115};
116 116
@@ -127,14 +127,14 @@ struct i2o_lan_local {
127 u8 unit; 127 u8 unit;
128 struct i2o_device *i2o_dev; 128 struct i2o_device *i2o_dev;
129 129
130 struct fddi_statistics stats; /* see also struct net_device_stats */ 130 struct fddi_statistics stats; /* see also struct net_device_stats */
131 unsigned short (*type_trans)(struct sk_buff *, struct net_device *); 131 unsigned short (*type_trans) (struct sk_buff *, struct net_device *);
132 atomic_t buckets_out; /* nbr of unused buckets on DDM */ 132 atomic_t buckets_out; /* nbr of unused buckets on DDM */
133 atomic_t tx_out; /* outstanding TXes */ 133 atomic_t tx_out; /* outstanding TXes */
134 u8 tx_count; /* packets in one TX message frame */ 134 u8 tx_count; /* packets in one TX message frame */
135 u16 tx_max_out; /* DDM's Tx queue len */ 135 u16 tx_max_out; /* DDM's Tx queue len */
136 u8 sgl_max; /* max SGLs in one message frame */ 136 u8 sgl_max; /* max SGLs in one message frame */
137 u32 m; /* IOP address of the batch msg frame */ 137 u32 m; /* IOP address of the batch msg frame */
138 138
139 struct work_struct i2o_batch_send_task; 139 struct work_struct i2o_batch_send_task;
140 int send_active; 140 int send_active;
@@ -144,16 +144,16 @@ struct i2o_lan_local {
144 144
145 spinlock_t tx_lock; 145 spinlock_t tx_lock;
146 146
147 u32 max_size_mc_table; /* max number of multicast addresses */ 147 u32 max_size_mc_table; /* max number of multicast addresses */
148 148
149 /* LAN OSM configurable parameters are here: */ 149 /* LAN OSM configurable parameters are here: */
150 150
151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */ 151 u16 max_buckets_out; /* max nbr of buckets to send to DDM */
152 u16 bucket_thresh; /* send more when this many used */ 152 u16 bucket_thresh; /* send more when this many used */
153 u16 rx_copybreak; 153 u16 rx_copybreak;
154 154
155 u8 tx_batch_mode; /* Set when using batch mode sends */ 155 u8 tx_batch_mode; /* Set when using batch mode sends */
156 u32 i2o_event_mask; /* To turn on interesting event flags */ 156 u32 i2o_event_mask; /* To turn on interesting event flags */
157}; 157};
158 158
159#endif /* _I2O_LAN_H */ 159#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index d559a1758363..2a0c42b8cda5 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -28,7 +28,7 @@
28 */ 28 */
29 29
30#define OSM_NAME "proc-osm" 30#define OSM_NAME "proc-osm"
31#define OSM_VERSION "1.145" 31#define OSM_VERSION "1.316"
32#define OSM_DESCRIPTION "I2O ProcFS OSM" 32#define OSM_DESCRIPTION "I2O ProcFS OSM"
33 33
34#define I2O_MAX_MODULES 4 34#define I2O_MAX_MODULES 4
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c3933b..f9e5a23697a1 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -70,7 +70,7 @@
70#include <scsi/sg_request.h> 70#include <scsi/sg_request.h>
71 71
72#define OSM_NAME "scsi-osm" 72#define OSM_NAME "scsi-osm"
73#define OSM_VERSION "1.282" 73#define OSM_VERSION "1.316"
74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" 74#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
75 75
76static struct i2o_driver i2o_scsi_driver; 76static struct i2o_driver i2o_scsi_driver;
@@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
113 113
114 list_for_each_entry(i2o_dev, &c->devices, list) 114 list_for_each_entry(i2o_dev, &c->devices, list)
115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 115 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
116 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 116 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
117 && (type == 0x01)) /* SCSI bus */ 117 && (type == 0x01)) /* SCSI bus */
118 max_channel++; 118 max_channel++;
119 } 119 }
@@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
146 i = 0; 146 i = 0;
147 list_for_each_entry(i2o_dev, &c->devices, list) 147 list_for_each_entry(i2o_dev, &c->devices, list)
148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { 148 if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
149 if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) 149 if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
150 && (type == 0x01)) /* only SCSI bus */ 150 && (type == 0x01)) /* only SCSI bus */
151 i2o_shost->channel[i++] = i2o_dev; 151 i2o_shost->channel[i++] = i2o_dev;
152 152
@@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev)
238 u8 type; 238 u8 type;
239 struct i2o_device *d = i2o_shost->channel[0]; 239 struct i2o_device *d = i2o_shost->channel[0];
240 240
241 if (i2o_parm_field_get(d, 0x0000, 0, &type, 1) 241 if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1)
242 && (type == 0x01)) /* SCSI bus */ 242 && (type == 0x01)) /* SCSI bus */
243 if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { 243 if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
244 channel = 0; 244 channel = 0;
245 if (i2o_dev->lct_data.class_id == 245 if (i2o_dev->lct_data.class_id ==
246 I2O_CLASS_RANDOM_BLOCK_STORAGE) 246 I2O_CLASS_RANDOM_BLOCK_STORAGE)
247 lun = i2o_shost->lun++; 247 lun =
248 cpu_to_le64(i2o_shost->
249 lun++);
248 else 250 else
249 lun = 0; 251 lun = 0;
250 } 252 }
@@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev)
253 break; 255 break;
254 256
255 case I2O_CLASS_SCSI_PERIPHERAL: 257 case I2O_CLASS_SCSI_PERIPHERAL:
256 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0) 258 if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4))
257 return -EFAULT; 259 return -EFAULT;
258 260
259 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0) 261 if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8))
260 return -EFAULT; 262 return -EFAULT;
261 263
262 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); 264 parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
@@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev)
281 return -EFAULT; 283 return -EFAULT;
282 } 284 }
283 285
284 if (id >= scsi_host->max_id) { 286 if (le32_to_cpu(id) >= scsi_host->max_id) {
285 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, 287 osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)",
286 scsi_host->max_id); 288 le32_to_cpu(id), scsi_host->max_id);
287 return -EFAULT; 289 return -EFAULT;
288 } 290 }
289 291
290 if (lun >= scsi_host->max_lun) { 292 if (le64_to_cpu(lun) >= scsi_host->max_lun) {
291 osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", 293 osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
292 (unsigned int)lun, scsi_host->max_lun); 294 (long unsigned int)le64_to_cpu(lun),
295 scsi_host->max_lun);
293 return -EFAULT; 296 return -EFAULT;
294 } 297 }
295 298
296 scsi_dev = 299 scsi_dev =
297 __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); 300 __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id),
301 le64_to_cpu(lun), i2o_dev);
298 302
299 if (IS_ERR(scsi_dev)) { 303 if (IS_ERR(scsi_dev)) {
300 osm_warn("can not add SCSI device %03x\n", 304 osm_warn("can not add SCSI device %03x\n",
@@ -305,8 +309,9 @@ static int i2o_scsi_probe(struct device *dev)
305 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, 309 sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
306 "scsi"); 310 "scsi");
307 311
308 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", 312 osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
309 i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); 313 i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
314 (long unsigned int)le64_to_cpu(lun));
310 315
311 return 0; 316 return 0;
312}; 317};
@@ -510,8 +515,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
510 struct i2o_controller *c; 515 struct i2o_controller *c;
511 struct i2o_device *i2o_dev; 516 struct i2o_device *i2o_dev;
512 int tid; 517 int tid;
513 struct i2o_message __iomem *msg; 518 struct i2o_message *msg;
514 u32 m;
515 /* 519 /*
516 * ENABLE_DISCONNECT 520 * ENABLE_DISCONNECT
517 * SIMPLE_TAG 521 * SIMPLE_TAG
@@ -519,7 +523,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
519 */ 523 */
520 u32 scsi_flags = 0x20a00000; 524 u32 scsi_flags = 0x20a00000;
521 u32 sgl_offset; 525 u32 sgl_offset;
522 u32 __iomem *mptr; 526 u32 *mptr;
523 u32 cmd = I2O_CMD_SCSI_EXEC << 24; 527 u32 cmd = I2O_CMD_SCSI_EXEC << 24;
524 int rc = 0; 528 int rc = 0;
525 529
@@ -576,8 +580,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
576 * throw it back to the scsi layer 580 * throw it back to the scsi layer
577 */ 581 */
578 582
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 583 msg = i2o_msg_get(c);
580 if (m == I2O_QUEUE_EMPTY) { 584 if (IS_ERR(msg)) {
581 rc = SCSI_MLQUEUE_HOST_BUSY; 585 rc = SCSI_MLQUEUE_HOST_BUSY;
582 goto exit; 586 goto exit;
583 } 587 }
@@ -617,16 +621,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
617 if (sgl_offset == SGL_OFFSET_10) 621 if (sgl_offset == SGL_OFFSET_10)
618 sgl_offset = SGL_OFFSET_12; 622 sgl_offset = SGL_OFFSET_12;
619 cmd = I2O_CMD_PRIVATE << 24; 623 cmd = I2O_CMD_PRIVATE << 24;
620 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 624 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
621 writel(adpt_flags | tid, mptr++); 625 *mptr++ = cpu_to_le32(adpt_flags | tid);
622 } 626 }
623#endif 627#endif
624 628
625 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 629 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
626 writel(i2o_scsi_driver.context, &msg->u.s.icntxt); 630 msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
627 631
628 /* We want the SCSI control block back */ 632 /* We want the SCSI control block back */
629 writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); 633 msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
630 634
631 /* LSI_920_PCI_QUIRK 635 /* LSI_920_PCI_QUIRK
632 * 636 *
@@ -649,15 +653,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
649 } 653 }
650 */ 654 */
651 655
652 writel(scsi_flags | SCpnt->cmd_len, mptr++); 656 *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
653 657
654 /* Write SCSI command into the message - always 16 byte block */ 658 /* Write SCSI command into the message - always 16 byte block */
655 memcpy_toio(mptr, SCpnt->cmnd, 16); 659 memcpy(mptr, SCpnt->cmnd, 16);
656 mptr += 4; 660 mptr += 4;
657 661
658 if (sgl_offset != SGL_OFFSET_0) { 662 if (sgl_offset != SGL_OFFSET_0) {
659 /* write size of data addressed by SGL */ 663 /* write size of data addressed by SGL */
660 writel(SCpnt->request_bufflen, mptr++); 664 *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
661 665
662 /* Now fill in the SGList and command */ 666 /* Now fill in the SGList and command */
663 if (SCpnt->use_sg) { 667 if (SCpnt->use_sg) {
@@ -676,11 +680,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
676 } 680 }
677 681
678 /* Stick the headers on */ 682 /* Stick the headers on */
679 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, 683 msg->u.head[0] =
680 &msg->u.head[0]); 684 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
681 685
682 /* Queue the message */ 686 /* Queue the message */
683 i2o_msg_post(c, m); 687 i2o_msg_post(c, msg);
684 688
685 osm_debug("Issued %ld\n", SCpnt->serial_number); 689 osm_debug("Issued %ld\n", SCpnt->serial_number);
686 690
@@ -688,7 +692,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
688 692
689 nomem: 693 nomem:
690 rc = -ENOMEM; 694 rc = -ENOMEM;
691 i2o_msg_nop(c, m); 695 i2o_msg_nop(c, msg);
692 696
693 exit: 697 exit:
694 return rc; 698 return rc;
@@ -709,8 +713,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
709{ 713{
710 struct i2o_device *i2o_dev; 714 struct i2o_device *i2o_dev;
711 struct i2o_controller *c; 715 struct i2o_controller *c;
712 struct i2o_message __iomem *msg; 716 struct i2o_message *msg;
713 u32 m;
714 int tid; 717 int tid;
715 int status = FAILED; 718 int status = FAILED;
716 719
@@ -720,16 +723,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
720 c = i2o_dev->iop; 723 c = i2o_dev->iop;
721 tid = i2o_dev->lct_data.tid; 724 tid = i2o_dev->lct_data.tid;
722 725
723 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 726 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
724 if (m == I2O_QUEUE_EMPTY) 727 if (IS_ERR(msg))
725 return SCSI_MLQUEUE_HOST_BUSY; 728 return SCSI_MLQUEUE_HOST_BUSY;
726 729
727 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 730 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
728 writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, 731 msg->u.head[1] =
729 &msg->u.head[1]); 732 cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
730 writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); 733 msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
731 734
732 if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) 735 if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
733 status = SUCCESS; 736 status = SUCCESS;
734 737
735 return status; 738 return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb53258842e..492167446936 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -32,7 +32,7 @@
32#include "core.h" 32#include "core.h"
33 33
34#define OSM_NAME "i2o" 34#define OSM_NAME "i2o"
35#define OSM_VERSION "1.288" 35#define OSM_VERSION "1.325"
36#define OSM_DESCRIPTION "I2O subsystem" 36#define OSM_DESCRIPTION "I2O subsystem"
37 37
38/* global I2O controller list */ 38/* global I2O controller list */
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
47static int i2o_hrt_get(struct i2o_controller *c); 47static int i2o_hrt_get(struct i2o_controller *c);
48 48
49/** 49/**
50 * i2o_msg_nop - Returns a message which is not used
51 * @c: I2O controller from which the message was created
52 * @m: message which should be returned
53 *
54 * If you fetch a message via i2o_msg_get, and can't use it, you must
55 * return the message with this function. Otherwise the message frame
56 * is lost.
57 */
58void i2o_msg_nop(struct i2o_controller *c, u32 m)
59{
60 struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
61
62 writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
63 writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
64 &msg->u.head[1]);
65 writel(0, &msg->u.head[2]);
66 writel(0, &msg->u.head[3]);
67 i2o_msg_post(c, m);
68};
69
70/**
71 * i2o_msg_get_wait - obtain an I2O message from the IOP 50 * i2o_msg_get_wait - obtain an I2O message from the IOP
72 * @c: I2O controller 51 * @c: I2O controller
73 * @msg: pointer to a I2O message pointer 52 * @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
81 * address from the read port (see the i2o spec). If no message is 60 * address from the read port (see the i2o spec). If no message is
82 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. 61 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
83 */ 62 */
84u32 i2o_msg_get_wait(struct i2o_controller *c, 63struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
85 struct i2o_message __iomem ** msg, int wait)
86{ 64{
87 unsigned long timeout = jiffies + wait * HZ; 65 unsigned long timeout = jiffies + wait * HZ;
88 u32 m; 66 struct i2o_message *msg;
89 67
90 while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { 68 while (IS_ERR(msg = i2o_msg_get(c))) {
91 if (time_after(jiffies, timeout)) { 69 if (time_after(jiffies, timeout)) {
92 osm_debug("%s: Timeout waiting for message frame.\n", 70 osm_debug("%s: Timeout waiting for message frame.\n",
93 c->name); 71 c->name);
94 return I2O_QUEUE_EMPTY; 72 return ERR_PTR(-ETIMEDOUT);
95 } 73 }
96 schedule_timeout_uninterruptible(1); 74 schedule_timeout_uninterruptible(1);
97 } 75 }
98 76
99 return m; 77 return msg;
100}; 78};
101 79
102#if BITS_PER_LONG == 64 80#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
301 */ 279 */
302static int i2o_iop_quiesce(struct i2o_controller *c) 280static int i2o_iop_quiesce(struct i2o_controller *c)
303{ 281{
304 struct i2o_message __iomem *msg; 282 struct i2o_message *msg;
305 u32 m;
306 i2o_status_block *sb = c->status_block.virt; 283 i2o_status_block *sb = c->status_block.virt;
307 int rc; 284 int rc;
308 285
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
313 (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) 290 (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
314 return 0; 291 return 0;
315 292
316 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 293 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
317 if (m == I2O_QUEUE_EMPTY) 294 if (IS_ERR(msg))
318 return -ETIMEDOUT; 295 return PTR_ERR(msg);
319 296
320 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 297 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
321 writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, 298 msg->u.head[1] =
322 &msg->u.head[1]); 299 cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
300 ADAPTER_TID);
323 301
324 /* Long timeout needed for quiesce if lots of devices */ 302 /* Long timeout needed for quiesce if lots of devices */
325 if ((rc = i2o_msg_post_wait(c, m, 240))) 303 if ((rc = i2o_msg_post_wait(c, msg, 240)))
326 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); 304 osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
327 else 305 else
328 osm_debug("%s: Quiesced.\n", c->name); 306 osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
342 */ 320 */
343static int i2o_iop_enable(struct i2o_controller *c) 321static int i2o_iop_enable(struct i2o_controller *c)
344{ 322{
345 struct i2o_message __iomem *msg; 323 struct i2o_message *msg;
346 u32 m;
347 i2o_status_block *sb = c->status_block.virt; 324 i2o_status_block *sb = c->status_block.virt;
348 int rc; 325 int rc;
349 326
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
353 if (sb->iop_state != ADAPTER_STATE_READY) 330 if (sb->iop_state != ADAPTER_STATE_READY)
354 return -EINVAL; 331 return -EINVAL;
355 332
356 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 333 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
357 if (m == I2O_QUEUE_EMPTY) 334 if (IS_ERR(msg))
358 return -ETIMEDOUT; 335 return PTR_ERR(msg);
359 336
360 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 337 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
361 writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, 338 msg->u.head[1] =
362 &msg->u.head[1]); 339 cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
340 ADAPTER_TID);
363 341
364 /* How long of a timeout do we need? */ 342 /* How long of a timeout do we need? */
365 if ((rc = i2o_msg_post_wait(c, m, 240))) 343 if ((rc = i2o_msg_post_wait(c, msg, 240)))
366 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); 344 osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
367 else 345 else
368 osm_debug("%s: Enabled.\n", c->name); 346 osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
413 */ 391 */
414static int i2o_iop_clear(struct i2o_controller *c) 392static int i2o_iop_clear(struct i2o_controller *c)
415{ 393{
416 struct i2o_message __iomem *msg; 394 struct i2o_message *msg;
417 u32 m;
418 int rc; 395 int rc;
419 396
420 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
421 if (m == I2O_QUEUE_EMPTY) 398 if (IS_ERR(msg))
422 return -ETIMEDOUT; 399 return PTR_ERR(msg);
423 400
424 /* Quiesce all IOPs first */ 401 /* Quiesce all IOPs first */
425 i2o_iop_quiesce_all(); 402 i2o_iop_quiesce_all();
426 403
427 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 404 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
428 writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, 405 msg->u.head[1] =
429 &msg->u.head[1]); 406 cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
407 ADAPTER_TID);
430 408
431 if ((rc = i2o_msg_post_wait(c, m, 30))) 409 if ((rc = i2o_msg_post_wait(c, msg, 30)))
432 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); 410 osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
433 else 411 else
434 osm_debug("%s: Cleared.\n", c->name); 412 osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
446 * Clear and (re)initialize IOP's outbound queue and post the message 424 * Clear and (re)initialize IOP's outbound queue and post the message
447 * frames to the IOP. 425 * frames to the IOP.
448 * 426 *
449 * Returns 0 on success or a negative errno code on failure. 427 * Returns 0 on success or negative error code on failure.
450 */ 428 */
451static int i2o_iop_init_outbound_queue(struct i2o_controller *c) 429static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
452{ 430{
453 volatile u8 *status = c->status.virt;
454 u32 m; 431 u32 m;
455 struct i2o_message __iomem *msg; 432 volatile u8 *status = c->status.virt;
433 struct i2o_message *msg;
456 ulong timeout; 434 ulong timeout;
457 int i; 435 int i;
458 436
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
460 438
461 memset(c->status.virt, 0, 4); 439 memset(c->status.virt, 0, 4);
462 440
463 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 441 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
464 if (m == I2O_QUEUE_EMPTY) 442 if (IS_ERR(msg))
465 return -ETIMEDOUT; 443 return PTR_ERR(msg);
466 444
467 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); 445 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
468 writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, 446 msg->u.head[1] =
469 &msg->u.head[1]); 447 cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
470 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 448 ADAPTER_TID);
471 writel(0x00000000, &msg->u.s.tcntxt); 449 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
472 writel(PAGE_SIZE, &msg->body[0]); 450 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
451 msg->body[0] = cpu_to_le32(PAGE_SIZE);
473 /* Outbound msg frame size in words and Initcode */ 452 /* Outbound msg frame size in words and Initcode */
474 writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); 453 msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
475 writel(0xd0000004, &msg->body[2]); 454 msg->body[2] = cpu_to_le32(0xd0000004);
476 writel(i2o_dma_low(c->status.phys), &msg->body[3]); 455 msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
477 writel(i2o_dma_high(c->status.phys), &msg->body[4]); 456 msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
478 457
479 i2o_msg_post(c, m); 458 i2o_msg_post(c, msg);
480 459
481 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; 460 timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
482 while (*status <= I2O_CMD_IN_PROGRESS) { 461 while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
511static int i2o_iop_reset(struct i2o_controller *c) 490static int i2o_iop_reset(struct i2o_controller *c)
512{ 491{
513 volatile u8 *status = c->status.virt; 492 volatile u8 *status = c->status.virt;
514 struct i2o_message __iomem *msg; 493 struct i2o_message *msg;
515 u32 m;
516 unsigned long timeout; 494 unsigned long timeout;
517 i2o_status_block *sb = c->status_block.virt; 495 i2o_status_block *sb = c->status_block.virt;
518 int rc = 0; 496 int rc = 0;
519 497
520 osm_debug("%s: Resetting controller\n", c->name); 498 osm_debug("%s: Resetting controller\n", c->name);
521 499
522 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 500 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
523 if (m == I2O_QUEUE_EMPTY) 501 if (IS_ERR(msg))
524 return -ETIMEDOUT; 502 return PTR_ERR(msg);
525 503
526 memset(c->status_block.virt, 0, 8); 504 memset(c->status_block.virt, 0, 8);
527 505
528 /* Quiesce all IOPs first */ 506 /* Quiesce all IOPs first */
529 i2o_iop_quiesce_all(); 507 i2o_iop_quiesce_all();
530 508
531 writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 509 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
532 writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, 510 msg->u.head[1] =
533 &msg->u.head[1]); 511 cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
534 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 512 ADAPTER_TID);
535 writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context 513 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
536 writel(0, &msg->body[0]); 514 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
537 writel(0, &msg->body[1]); 515 msg->body[0] = cpu_to_le32(0x00000000);
538 writel(i2o_dma_low(c->status.phys), &msg->body[2]); 516 msg->body[1] = cpu_to_le32(0x00000000);
539 writel(i2o_dma_high(c->status.phys), &msg->body[3]); 517 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
518 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
540 519
541 i2o_msg_post(c, m); 520 i2o_msg_post(c, msg);
542 521
543 /* Wait for a reply */ 522 /* Wait for a reply */
544 timeout = jiffies + I2O_TIMEOUT_RESET * HZ; 523 timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
567 osm_debug("%s: Reset in progress, waiting for reboot...\n", 546 osm_debug("%s: Reset in progress, waiting for reboot...\n",
568 c->name); 547 c->name);
569 548
570 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); 549 while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
571 while (m == I2O_QUEUE_EMPTY) {
572 if (time_after(jiffies, timeout)) { 550 if (time_after(jiffies, timeout)) {
573 osm_err("%s: IOP reset timeout.\n", c->name); 551 osm_err("%s: IOP reset timeout.\n", c->name);
574 rc = -ETIMEDOUT; 552 rc = PTR_ERR(msg);
575 goto exit; 553 goto exit;
576 } 554 }
577 schedule_timeout_uninterruptible(1); 555 schedule_timeout_uninterruptible(1);
578
579 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
580 } 556 }
581 i2o_msg_nop(c, m); 557 i2o_msg_nop(c, msg);
582 558
583 /* from here all quiesce commands are safe */ 559 /* from here all quiesce commands are safe */
584 c->no_quiesce = 0; 560 c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
686 */ 662 */
687static int i2o_iop_systab_set(struct i2o_controller *c) 663static int i2o_iop_systab_set(struct i2o_controller *c)
688{ 664{
689 struct i2o_message __iomem *msg; 665 struct i2o_message *msg;
690 u32 m;
691 i2o_status_block *sb = c->status_block.virt; 666 i2o_status_block *sb = c->status_block.virt;
692 struct device *dev = &c->pdev->dev; 667 struct device *dev = &c->pdev->dev;
693 struct resource *root; 668 struct resource *root;
@@ -735,41 +710,38 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
735 } 710 }
736 } 711 }
737 712
738 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 713 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
739 if (m == I2O_QUEUE_EMPTY) 714 if (IS_ERR(msg))
740 return -ETIMEDOUT; 715 return PTR_ERR(msg);
741 716
742 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, 717 i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
743 PCI_DMA_TODEVICE); 718 PCI_DMA_TODEVICE);
744 if (!i2o_systab.phys) { 719 if (!i2o_systab.phys) {
745 i2o_msg_nop(c, m); 720 i2o_msg_nop(c, msg);
746 return -ENOMEM; 721 return -ENOMEM;
747 } 722 }
748 723
749 writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); 724 msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
750 writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, 725 msg->u.head[1] =
751 &msg->u.head[1]); 726 cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
727 ADAPTER_TID);
752 728
753 /* 729 /*
754 * Provide three SGL-elements: 730 * Provide three SGL-elements:
755 * System table (SysTab), Private memory space declaration and 731 * System table (SysTab), Private memory space declaration and
756 * Private i/o space declaration 732 * Private i/o space declaration
757 *
758 * FIXME: is this still true?
759 * Nasty one here. We can't use dma_alloc_coherent to send the
760 * same table to everyone. We have to go remap it for them all
761 */ 733 */
762 734
763 writel(c->unit + 2, &msg->body[0]); 735 msg->body[0] = cpu_to_le32(c->unit + 2);
764 writel(0, &msg->body[1]); 736 msg->body[1] = cpu_to_le32(0x00000000);
765 writel(0x54000000 | i2o_systab.len, &msg->body[2]); 737 msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
766 writel(i2o_systab.phys, &msg->body[3]); 738 msg->body[3] = cpu_to_le32(i2o_systab.phys);
767 writel(0x54000000 | sb->current_mem_size, &msg->body[4]); 739 msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
768 writel(sb->current_mem_base, &msg->body[5]); 740 msg->body[5] = cpu_to_le32(sb->current_mem_base);
769 writel(0xd4000000 | sb->current_io_size, &msg->body[6]); 741 msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
770 writel(sb->current_io_base, &msg->body[6]); 742 msg->body[6] = cpu_to_le32(sb->current_io_base);
771 743
772 rc = i2o_msg_post_wait(c, m, 120); 744 rc = i2o_msg_post_wait(c, msg, 120);
773 745
774 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, 746 dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
775 PCI_DMA_TODEVICE); 747 PCI_DMA_TODEVICE);
@@ -780,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
780 else 752 else
781 osm_debug("%s: SysTab set.\n", c->name); 753 osm_debug("%s: SysTab set.\n", c->name);
782 754
783 i2o_status_get(c); // Entered READY state
784
785 return rc; 755 return rc;
786} 756}
787 757
@@ -791,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
791 * 761 *
792 * Send the system table and enable the I2O controller. 762 * Send the system table and enable the I2O controller.
793 * 763 *
794 * Returns 0 on success or negativer error code on failure. 764 * Returns 0 on success or negative error code on failure.
795 */ 765 */
796static int i2o_iop_online(struct i2o_controller *c) 766static int i2o_iop_online(struct i2o_controller *c)
797{ 767{
@@ -830,7 +800,6 @@ void i2o_iop_remove(struct i2o_controller *c)
830 list_for_each_entry_safe(dev, tmp, &c->devices, list) 800 list_for_each_entry_safe(dev, tmp, &c->devices, list)
831 i2o_device_remove(dev); 801 i2o_device_remove(dev);
832 802
833 class_device_unregister(c->classdev);
834 device_del(&c->device); 803 device_del(&c->device);
835 804
836 /* Ask the IOP to switch to RESET state */ 805 /* Ask the IOP to switch to RESET state */
@@ -869,12 +838,11 @@ static int i2o_systab_build(void)
869 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * 838 i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
870 sizeof(struct i2o_sys_tbl_entry); 839 sizeof(struct i2o_sys_tbl_entry);
871 840
872 systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); 841 systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL);
873 if (!systab) { 842 if (!systab) {
874 osm_err("unable to allocate memory for System Table\n"); 843 osm_err("unable to allocate memory for System Table\n");
875 return -ENOMEM; 844 return -ENOMEM;
876 } 845 }
877 memset(systab, 0, i2o_systab.len);
878 846
879 systab->version = I2OVERSION; 847 systab->version = I2OVERSION;
880 systab->change_ind = change_ind + 1; 848 systab->change_ind = change_ind + 1;
@@ -952,30 +920,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
952 */ 920 */
953int i2o_status_get(struct i2o_controller *c) 921int i2o_status_get(struct i2o_controller *c)
954{ 922{
955 struct i2o_message __iomem *msg; 923 struct i2o_message *msg;
956 u32 m;
957 volatile u8 *status_block; 924 volatile u8 *status_block;
958 unsigned long timeout; 925 unsigned long timeout;
959 926
960 status_block = (u8 *) c->status_block.virt; 927 status_block = (u8 *) c->status_block.virt;
961 memset(c->status_block.virt, 0, sizeof(i2o_status_block)); 928 memset(c->status_block.virt, 0, sizeof(i2o_status_block));
962 929
963 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 930 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
964 if (m == I2O_QUEUE_EMPTY) 931 if (IS_ERR(msg))
965 return -ETIMEDOUT; 932 return PTR_ERR(msg);
966 933
967 writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 934 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
968 writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 935 msg->u.head[1] =
969 &msg->u.head[1]); 936 cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
970 writel(i2o_exec_driver.context, &msg->u.s.icntxt); 937 ADAPTER_TID);
971 writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context 938 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
972 writel(0, &msg->body[0]); 939 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
973 writel(0, &msg->body[1]); 940 msg->body[0] = cpu_to_le32(0x00000000);
974 writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); 941 msg->body[1] = cpu_to_le32(0x00000000);
975 writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); 942 msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
976 writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ 943 msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
944 msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
977 945
978 i2o_msg_post(c, m); 946 i2o_msg_post(c, msg);
979 947
980 /* Wait for a reply */ 948 /* Wait for a reply */
981 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; 949 timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1002,7 +970,7 @@ int i2o_status_get(struct i2o_controller *c)
1002 * The HRT contains information about possible hidden devices but is 970 * The HRT contains information about possible hidden devices but is
1003 * mostly useless to us. 971 * mostly useless to us.
1004 * 972 *
1005 * Returns 0 on success or negativer error code on failure. 973 * Returns 0 on success or negative error code on failure.
1006 */ 974 */
1007static int i2o_hrt_get(struct i2o_controller *c) 975static int i2o_hrt_get(struct i2o_controller *c)
1008{ 976{
@@ -1013,20 +981,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
1013 struct device *dev = &c->pdev->dev; 981 struct device *dev = &c->pdev->dev;
1014 982
1015 for (i = 0; i < I2O_HRT_GET_TRIES; i++) { 983 for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
1016 struct i2o_message __iomem *msg; 984 struct i2o_message *msg;
1017 u32 m;
1018 985
1019 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 986 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1020 if (m == I2O_QUEUE_EMPTY) 987 if (IS_ERR(msg))
1021 return -ETIMEDOUT; 988 return PTR_ERR(msg);
1022 989
1023 writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); 990 msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
1024 writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, 991 msg->u.head[1] =
1025 &msg->u.head[1]); 992 cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
1026 writel(0xd0000000 | c->hrt.len, &msg->body[0]); 993 ADAPTER_TID);
1027 writel(c->hrt.phys, &msg->body[1]); 994 msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
995 msg->body[1] = cpu_to_le32(c->hrt.phys);
1028 996
1029 rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); 997 rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
1030 998
1031 if (rc < 0) { 999 if (rc < 0) {
1032 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, 1000 osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1051,15 +1019,6 @@ static int i2o_hrt_get(struct i2o_controller *c)
1051} 1019}
1052 1020
1053/** 1021/**
1054 * i2o_iop_free - Free the i2o_controller struct
1055 * @c: I2O controller to free
1056 */
1057void i2o_iop_free(struct i2o_controller *c)
1058{
1059 kfree(c);
1060};
1061
1062/**
1063 * i2o_iop_release - release the memory for a I2O controller 1022 * i2o_iop_release - release the memory for a I2O controller
1064 * @dev: I2O controller which should be released 1023 * @dev: I2O controller which should be released
1065 * 1024 *
@@ -1073,14 +1032,11 @@ static void i2o_iop_release(struct device *dev)
1073 i2o_iop_free(c); 1032 i2o_iop_free(c);
1074}; 1033};
1075 1034
1076/* I2O controller class */
1077static struct class *i2o_controller_class;
1078
1079/** 1035/**
1080 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct 1036 * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
1081 * 1037 *
1082 * Allocate the necessary memory for a i2o_controller struct and 1038 * Allocate the necessary memory for a i2o_controller struct and
1083 * initialize the lists. 1039 * initialize the lists and message mempool.
1084 * 1040 *
1085 * Returns a pointer to the I2O controller or a negative error code on 1041 * Returns a pointer to the I2O controller or a negative error code on
1086 * failure. 1042 * failure.
@@ -1089,20 +1045,29 @@ struct i2o_controller *i2o_iop_alloc(void)
1089{ 1045{
1090 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ 1046 static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
1091 struct i2o_controller *c; 1047 struct i2o_controller *c;
1048 char poolname[32];
1092 1049
1093 c = kmalloc(sizeof(*c), GFP_KERNEL); 1050 c = kzalloc(sizeof(*c), GFP_KERNEL);
1094 if (!c) { 1051 if (!c) {
1095 osm_err("i2o: Insufficient memory to allocate a I2O controller." 1052 osm_err("i2o: Insufficient memory to allocate a I2O controller."
1096 "\n"); 1053 "\n");
1097 return ERR_PTR(-ENOMEM); 1054 return ERR_PTR(-ENOMEM);
1098 } 1055 }
1099 memset(c, 0, sizeof(*c)); 1056
1057 c->unit = unit++;
1058 sprintf(c->name, "iop%d", c->unit);
1059
1060 snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
1061 if (i2o_pool_alloc
1062 (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
1063 I2O_MSG_INPOOL_MIN)) {
1064 kfree(c);
1065 return ERR_PTR(-ENOMEM);
1066 };
1100 1067
1101 INIT_LIST_HEAD(&c->devices); 1068 INIT_LIST_HEAD(&c->devices);
1102 spin_lock_init(&c->lock); 1069 spin_lock_init(&c->lock);
1103 init_MUTEX(&c->lct_lock); 1070 init_MUTEX(&c->lct_lock);
1104 c->unit = unit++;
1105 sprintf(c->name, "iop%d", c->unit);
1106 1071
1107 device_initialize(&c->device); 1072 device_initialize(&c->device);
1108 1073
@@ -1137,36 +1102,29 @@ int i2o_iop_add(struct i2o_controller *c)
1137 goto iop_reset; 1102 goto iop_reset;
1138 } 1103 }
1139 1104
1140 c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0),
1141 &c->device, "iop%d", c->unit);
1142 if (IS_ERR(c->classdev)) {
1143 osm_err("%s: could not add controller class\n", c->name);
1144 goto device_del;
1145 }
1146
1147 osm_info("%s: Activating I2O controller...\n", c->name); 1105 osm_info("%s: Activating I2O controller...\n", c->name);
1148 osm_info("%s: This may take a few minutes if there are many devices\n", 1106 osm_info("%s: This may take a few minutes if there are many devices\n",
1149 c->name); 1107 c->name);
1150 1108
1151 if ((rc = i2o_iop_activate(c))) { 1109 if ((rc = i2o_iop_activate(c))) {
1152 osm_err("%s: could not activate controller\n", c->name); 1110 osm_err("%s: could not activate controller\n", c->name);
1153 goto class_del; 1111 goto device_del;
1154 } 1112 }
1155 1113
1156 osm_debug("%s: building sys table...\n", c->name); 1114 osm_debug("%s: building sys table...\n", c->name);
1157 1115
1158 if ((rc = i2o_systab_build())) 1116 if ((rc = i2o_systab_build()))
1159 goto class_del; 1117 goto device_del;
1160 1118
1161 osm_debug("%s: online controller...\n", c->name); 1119 osm_debug("%s: online controller...\n", c->name);
1162 1120
1163 if ((rc = i2o_iop_online(c))) 1121 if ((rc = i2o_iop_online(c)))
1164 goto class_del; 1122 goto device_del;
1165 1123
1166 osm_debug("%s: getting LCT...\n", c->name); 1124 osm_debug("%s: getting LCT...\n", c->name);
1167 1125
1168 if ((rc = i2o_exec_lct_get(c))) 1126 if ((rc = i2o_exec_lct_get(c)))
1169 goto class_del; 1127 goto device_del;
1170 1128
1171 list_add(&c->list, &i2o_controllers); 1129 list_add(&c->list, &i2o_controllers);
1172 1130
@@ -1176,9 +1134,6 @@ int i2o_iop_add(struct i2o_controller *c)
1176 1134
1177 return 0; 1135 return 0;
1178 1136
1179 class_del:
1180 class_device_unregister(c->classdev);
1181
1182 device_del: 1137 device_del:
1183 device_del(&c->device); 1138 device_del(&c->device);
1184 1139
@@ -1199,28 +1154,27 @@ int i2o_iop_add(struct i2o_controller *c)
1199 * is waited for, or expected. If you do not want further notifications, 1154 * is waited for, or expected. If you do not want further notifications,
1200 * call the i2o_event_register again with a evt_mask of 0. 1155 * call the i2o_event_register again with a evt_mask of 0.
1201 * 1156 *
1202 * Returns 0 on success or -ETIMEDOUT if no message could be fetched for 1157 * Returns 0 on success or negative error code on failure.
1203 * sending the request.
1204 */ 1158 */
1205int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, 1159int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
1206 int tcntxt, u32 evt_mask) 1160 int tcntxt, u32 evt_mask)
1207{ 1161{
1208 struct i2o_controller *c = dev->iop; 1162 struct i2o_controller *c = dev->iop;
1209 struct i2o_message __iomem *msg; 1163 struct i2o_message *msg;
1210 u32 m;
1211 1164
1212 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 1165 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
1213 if (m == I2O_QUEUE_EMPTY) 1166 if (IS_ERR(msg))
1214 return -ETIMEDOUT; 1167 return PTR_ERR(msg);
1215 1168
1216 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 1169 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
1217 writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. 1170 msg->u.head[1] =
1218 tid, &msg->u.head[1]); 1171 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
1219 writel(drv->context, &msg->u.s.icntxt); 1172 lct_data.tid);
1220 writel(tcntxt, &msg->u.s.tcntxt); 1173 msg->u.s.icntxt = cpu_to_le32(drv->context);
1221 writel(evt_mask, &msg->body[0]); 1174 msg->u.s.tcntxt = cpu_to_le32(tcntxt);
1175 msg->body[0] = cpu_to_le32(evt_mask);
1222 1176
1223 i2o_msg_post(c, m); 1177 i2o_msg_post(c, msg);
1224 1178
1225 return 0; 1179 return 0;
1226}; 1180};
@@ -1239,14 +1193,8 @@ static int __init i2o_iop_init(void)
1239 1193
1240 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1194 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1241 1195
1242 i2o_controller_class = class_create(THIS_MODULE, "i2o_controller");
1243 if (IS_ERR(i2o_controller_class)) {
1244 osm_err("can't register class i2o_controller\n");
1245 goto exit;
1246 }
1247
1248 if ((rc = i2o_driver_init())) 1196 if ((rc = i2o_driver_init()))
1249 goto class_exit; 1197 goto exit;
1250 1198
1251 if ((rc = i2o_exec_init())) 1199 if ((rc = i2o_exec_init()))
1252 goto driver_exit; 1200 goto driver_exit;
@@ -1262,9 +1210,6 @@ static int __init i2o_iop_init(void)
1262 driver_exit: 1210 driver_exit:
1263 i2o_driver_exit(); 1211 i2o_driver_exit();
1264 1212
1265 class_exit:
1266 class_destroy(i2o_controller_class);
1267
1268 exit: 1213 exit:
1269 return rc; 1214 return rc;
1270} 1215}
@@ -1279,7 +1224,6 @@ static void __exit i2o_iop_exit(void)
1279 i2o_pci_exit(); 1224 i2o_pci_exit();
1280 i2o_exec_exit(); 1225 i2o_exec_exit();
1281 i2o_driver_exit(); 1226 i2o_driver_exit();
1282 class_destroy(i2o_controller_class);
1283}; 1227};
1284 1228
1285module_init(i2o_iop_init); 1229module_init(i2o_iop_init);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075fa1ec3..c5b656cdea7c 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
339 pci_name(pdev)); 339 pci_name(pdev));
340 340
341 c->pdev = pdev; 341 c->pdev = pdev;
342 c->device.parent = get_device(&pdev->dev); 342 c->device.parent = &pdev->dev;
343 343
344 /* Cards that fall apart if you hit them with large I/O loads... */ 344 /* Cards that fall apart if you hit them with large I/O loads... */
345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { 345 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
@@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
410 if ((rc = i2o_iop_add(c))) 410 if ((rc = i2o_iop_add(c)))
411 goto uninstall; 411 goto uninstall;
412 412
413 get_device(&c->device);
414
415 if (i960) 413 if (i960)
416 pci_write_config_word(i960, 0x42, 0x03ff); 414 pci_write_config_word(i960, 0x42, 0x03ff);
417 415
@@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
424 i2o_pci_free(c); 422 i2o_pci_free(c);
425 423
426 free_controller: 424 free_controller:
427 put_device(c->device.parent);
428 i2o_iop_free(c); 425 i2o_iop_free(c);
429 426
430 disable: 427 disable:
@@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev)
454 451
455 printk(KERN_INFO "%s: Controller removed.\n", c->name); 452 printk(KERN_INFO "%s: Controller removed.\n", c->name);
456 453
457 put_device(c->device.parent);
458 put_device(&c->device); 454 put_device(&c->device);
459}; 455};
460 456
@@ -483,4 +479,5 @@ void __exit i2o_pci_exit(void)
483{ 479{
484 pci_unregister_driver(&i2o_pci_driver); 480 pci_unregister_driver(&i2o_pci_driver);
485}; 481};
482
486MODULE_DEVICE_TABLE(pci, i2o_pci_ids); 483MODULE_DEVICE_TABLE(pci, i2o_pci_ids);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c782a6329805..fa39b944bc46 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -6,7 +6,7 @@ menu "PHY device support"
6 6
7config PHYLIB 7config PHYLIB
8 tristate "PHY Device support and infrastructure" 8 tristate "PHY Device support and infrastructure"
9 depends on NET_ETHERNET && (BROKEN || !ARCH_S390) 9 depends on NET_ETHERNET && (BROKEN || !S390)
10 help 10 help
11 Ethernet controllers are usually attached to PHY 11 Ethernet controllers are usually attached to PHY
12 devices. This option provides infrastructure for 12 devices. This option provides infrastructure for
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 1bd22cd40c75..87ee3271b17d 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -98,7 +98,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
98#include <linux/in.h> 98#include <linux/in.h>
99#include <linux/errno.h> 99#include <linux/errno.h>
100#include <linux/delay.h> 100#include <linux/delay.h>
101#include <linux/lp.h>
102#include <linux/init.h> 101#include <linux/init.h>
103#include <linux/netdevice.h> 102#include <linux/netdevice.h>
104#include <linux/etherdevice.h> 103#include <linux/etherdevice.h>
@@ -106,7 +105,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
106#include <linux/skbuff.h> 105#include <linux/skbuff.h>
107#include <linux/if_plip.h> 106#include <linux/if_plip.h>
108#include <linux/workqueue.h> 107#include <linux/workqueue.h>
109#include <linux/ioport.h>
110#include <linux/spinlock.h> 108#include <linux/spinlock.h>
111#include <linux/parport.h> 109#include <linux/parport.h>
112#include <linux/bitops.h> 110#include <linux/bitops.h>
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 725a14119f2a..b8241561da45 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -77,7 +77,7 @@ config PARPORT_PC_SUPERIO
77 77
78config PARPORT_PC_PCMCIA 78config PARPORT_PC_PCMCIA
79 tristate "Support for PCMCIA management for PC-style ports" 79 tristate "Support for PCMCIA management for PC-style ports"
80 depends on PARPORT!=n && (PCMCIA!=n && PARPORT_PC=m && PARPORT_PC || PARPORT_PC=y && PCMCIA) 80 depends on PCMCIA && PARPORT_PC
81 help 81 help
82 Say Y here if you need PCMCIA support for your PC-style parallel 82 Say Y here if you need PCMCIA support for your PC-style parallel
83 ports. If unsure, say N. 83 ports. If unsure, say N.
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 075c7eb5c85d..9ee67321b630 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -144,9 +144,9 @@ again:
144 add_dev (numdevs++, port, -1); 144 add_dev (numdevs++, port, -1);
145 145
146 /* Find out the legacy device's IEEE 1284 device ID. */ 146 /* Find out the legacy device's IEEE 1284 device ID. */
147 deviceid = kmalloc (1000, GFP_KERNEL); 147 deviceid = kmalloc (1024, GFP_KERNEL);
148 if (deviceid) { 148 if (deviceid) {
149 if (parport_device_id (numdevs - 1, deviceid, 1000) > 2) 149 if (parport_device_id (numdevs - 1, deviceid, 1024) > 2)
150 detected++; 150 detected++;
151 151
152 kfree (deviceid); 152 kfree (deviceid);
@@ -252,7 +252,7 @@ struct pardevice *parport_open (int devnum, const char *name,
252 selected = port->daisy; 252 selected = port->daisy;
253 parport_release (dev); 253 parport_release (dev);
254 254
255 if (selected != port->daisy) { 255 if (selected != daisy) {
256 /* No corresponding device. */ 256 /* No corresponding device. */
257 parport_unregister_device (dev); 257 parport_unregister_device (dev);
258 return NULL; 258 return NULL;
@@ -344,9 +344,9 @@ static int cpp_daisy (struct parport *port, int cmd)
344 PARPORT_CONTROL_STROBE, 344 PARPORT_CONTROL_STROBE,
345 PARPORT_CONTROL_STROBE); 345 PARPORT_CONTROL_STROBE);
346 udelay (1); 346 udelay (1);
347 s = parport_read_status (port);
347 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); 348 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
348 udelay (1); 349 udelay (1);
349 s = parport_read_status (port);
350 parport_write_data (port, 0xff); udelay (2); 350 parport_write_data (port, 0xff); udelay (2);
351 351
352 return s; 352 return s;
@@ -395,15 +395,15 @@ int parport_daisy_select (struct parport *port, int daisy, int mode)
395 case IEEE1284_MODE_EPP: 395 case IEEE1284_MODE_EPP:
396 case IEEE1284_MODE_EPPSL: 396 case IEEE1284_MODE_EPPSL:
397 case IEEE1284_MODE_EPPSWE: 397 case IEEE1284_MODE_EPPSWE:
398 return (cpp_daisy (port, 0x20 + daisy) & 398 return !(cpp_daisy (port, 0x20 + daisy) &
399 PARPORT_STATUS_ERROR); 399 PARPORT_STATUS_ERROR);
400 400
401 // For these modes we should switch to ECP mode: 401 // For these modes we should switch to ECP mode:
402 case IEEE1284_MODE_ECP: 402 case IEEE1284_MODE_ECP:
403 case IEEE1284_MODE_ECPRLE: 403 case IEEE1284_MODE_ECPRLE:
404 case IEEE1284_MODE_ECPSWE: 404 case IEEE1284_MODE_ECPSWE:
405 return (cpp_daisy (port, 0xd0 + daisy) & 405 return !(cpp_daisy (port, 0xd0 + daisy) &
406 PARPORT_STATUS_ERROR); 406 PARPORT_STATUS_ERROR);
407 407
408 // Nothing was told for BECP in Daisy chain specification. 408 // Nothing was told for BECP in Daisy chain specification.
409 // May be it's wise to use ECP? 409 // May be it's wise to use ECP?
@@ -413,8 +413,8 @@ int parport_daisy_select (struct parport *port, int daisy, int mode)
413 case IEEE1284_MODE_BYTE: 413 case IEEE1284_MODE_BYTE:
414 case IEEE1284_MODE_COMPAT: 414 case IEEE1284_MODE_COMPAT:
415 default: 415 default:
416 return (cpp_daisy (port, 0xe0 + daisy) & 416 return !(cpp_daisy (port, 0xe0 + daisy) &
417 PARPORT_STATUS_ERROR); 417 PARPORT_STATUS_ERROR);
418 } 418 }
419} 419}
420 420
@@ -436,7 +436,7 @@ static int select_port (struct parport *port)
436 436
437static int assign_addrs (struct parport *port) 437static int assign_addrs (struct parport *port)
438{ 438{
439 unsigned char s, last_dev; 439 unsigned char s;
440 unsigned char daisy; 440 unsigned char daisy;
441 int thisdev = numdevs; 441 int thisdev = numdevs;
442 int detected; 442 int detected;
@@ -472,10 +472,13 @@ static int assign_addrs (struct parport *port)
472 } 472 }
473 473
474 parport_write_data (port, 0x78); udelay (2); 474 parport_write_data (port, 0x78); udelay (2);
475 last_dev = 0; /* We've just been speaking to a device, so we 475 s = parport_read_status (port);
476 know there must be at least _one_ out there. */
477 476
478 for (daisy = 0; daisy < 4; daisy++) { 477 for (daisy = 0;
478 (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT))
479 == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)
480 && daisy < 4;
481 ++daisy) {
479 parport_write_data (port, daisy); 482 parport_write_data (port, daisy);
480 udelay (2); 483 udelay (2);
481 parport_frob_control (port, 484 parport_frob_control (port,
@@ -485,14 +488,18 @@ static int assign_addrs (struct parport *port)
485 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0); 488 parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
486 udelay (1); 489 udelay (1);
487 490
488 if (last_dev) 491 add_dev (numdevs++, port, daisy);
489 /* No more devices. */
490 break;
491 492
492 last_dev = !(parport_read_status (port) 493 /* See if this device thought it was the last in the
493 & PARPORT_STATUS_BUSY); 494 * chain. */
495 if (!(s & PARPORT_STATUS_BUSY))
496 break;
494 497
495 add_dev (numdevs++, port, daisy); 498 /* We are seeing pass through status now. We see
499 last_dev from next device or if last_dev does not
500 work status lines from some non-daisy chain
501 device. */
502 s = parport_read_status (port);
496 } 503 }
497 504
498 parport_write_data (port, 0xff); udelay (2); 505 parport_write_data (port, 0xff); udelay (2);
@@ -501,11 +508,11 @@ static int assign_addrs (struct parport *port)
501 detected); 508 detected);
502 509
503 /* Ask the new devices to introduce themselves. */ 510 /* Ask the new devices to introduce themselves. */
504 deviceid = kmalloc (1000, GFP_KERNEL); 511 deviceid = kmalloc (1024, GFP_KERNEL);
505 if (!deviceid) return 0; 512 if (!deviceid) return 0;
506 513
507 for (daisy = 0; thisdev < numdevs; thisdev++, daisy++) 514 for (daisy = 0; thisdev < numdevs; thisdev++, daisy++)
508 parport_device_id (thisdev, deviceid, 1000); 515 parport_device_id (thisdev, deviceid, 1024);
509 516
510 kfree (deviceid); 517 kfree (deviceid);
511 return detected; 518 return detected;
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index ce1e2aad8b10..d6c77658231e 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -165,17 +165,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
165 /* Does the error line indicate end of data? */ 165 /* Does the error line indicate end of data? */
166 if (((i & 1) == 0) && 166 if (((i & 1) == 0) &&
167 (parport_read_status(port) & PARPORT_STATUS_ERROR)) { 167 (parport_read_status(port) & PARPORT_STATUS_ERROR)) {
168 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 168 goto end_of_data;
169 DPRINTK (KERN_DEBUG
170 "%s: No more nibble data (%d bytes)\n",
171 port->name, i/2);
172
173 /* Go to reverse idle phase. */
174 parport_frob_control (port,
175 PARPORT_CONTROL_AUTOFD,
176 PARPORT_CONTROL_AUTOFD);
177 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
178 break;
179 } 169 }
180 170
181 /* Event 7: Set nAutoFd low. */ 171 /* Event 7: Set nAutoFd low. */
@@ -225,18 +215,25 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
225 byte = nibble; 215 byte = nibble;
226 } 216 }
227 217
228 i /= 2; /* i is now in bytes */
229
230 if (i == len) { 218 if (i == len) {
231 /* Read the last nibble without checking data avail. */ 219 /* Read the last nibble without checking data avail. */
232 port = port->physport; 220 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
233 if (parport_read_status (port) & PARPORT_STATUS_ERROR) 221 end_of_data:
234 port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 222 DPRINTK (KERN_DEBUG
223 "%s: No more nibble data (%d bytes)\n",
224 port->name, i/2);
225
226 /* Go to reverse idle phase. */
227 parport_frob_control (port,
228 PARPORT_CONTROL_AUTOFD,
229 PARPORT_CONTROL_AUTOFD);
230 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
231 }
235 else 232 else
236 port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; 233 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
237 } 234 }
238 235
239 return i; 236 return i/2;
240#endif /* IEEE1284 support */ 237#endif /* IEEE1284 support */
241} 238}
242 239
@@ -256,17 +253,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
256 253
257 /* Data available? */ 254 /* Data available? */
258 if (parport_read_status (port) & PARPORT_STATUS_ERROR) { 255 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
259 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 256 goto end_of_data;
260 DPRINTK (KERN_DEBUG
261 "%s: No more byte data (%Zd bytes)\n",
262 port->name, count);
263
264 /* Go to reverse idle phase. */
265 parport_frob_control (port,
266 PARPORT_CONTROL_AUTOFD,
267 PARPORT_CONTROL_AUTOFD);
268 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
269 break;
270 } 257 }
271 258
272 /* Event 14: Place data bus in high impedance state. */ 259 /* Event 14: Place data bus in high impedance state. */
@@ -318,11 +305,20 @@ size_t parport_ieee1284_read_byte (struct parport *port,
318 305
319 if (count == len) { 306 if (count == len) {
320 /* Read the last byte without checking data avail. */ 307 /* Read the last byte without checking data avail. */
321 port = port->physport; 308 if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
322 if (parport_read_status (port) & PARPORT_STATUS_ERROR) 309 end_of_data:
323 port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA; 310 DPRINTK (KERN_DEBUG
311 "%s: No more byte data (%Zd bytes)\n",
312 port->name, count);
313
314 /* Go to reverse idle phase. */
315 parport_frob_control (port,
316 PARPORT_CONTROL_AUTOFD,
317 PARPORT_CONTROL_AUTOFD);
318 port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
319 }
324 else 320 else
325 port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL; 321 port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
326 } 322 }
327 323
328 return count; 324 return count;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index c6493ad7c0c8..18e85ccdae67 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1169,7 +1169,7 @@ dump_parport_state ("fwd idle", port);
1169 1169
1170/* GCC is not inlining extern inline function later overwriten to non-inline, 1170/* GCC is not inlining extern inline function later overwriten to non-inline,
1171 so we use outlined_ variants here. */ 1171 so we use outlined_ variants here. */
1172static struct parport_operations parport_pc_ops = 1172static const struct parport_operations parport_pc_ops =
1173{ 1173{
1174 .write_data = parport_pc_write_data, 1174 .write_data = parport_pc_write_data,
1175 .read_data = parport_pc_read_data, 1175 .read_data = parport_pc_read_data,
@@ -1211,10 +1211,11 @@ static struct parport_operations parport_pc_ops =
1211static void __devinit show_parconfig_smsc37c669(int io, int key) 1211static void __devinit show_parconfig_smsc37c669(int io, int key)
1212{ 1212{
1213 int cr1,cr4,cra,cr23,cr26,cr27,i=0; 1213 int cr1,cr4,cra,cr23,cr26,cr27,i=0;
1214 static const char *modes[]={ "SPP and Bidirectional (PS/2)", 1214 static const char *const modes[]={
1215 "EPP and SPP", 1215 "SPP and Bidirectional (PS/2)",
1216 "ECP", 1216 "EPP and SPP",
1217 "ECP and EPP" }; 1217 "ECP",
1218 "ECP and EPP" };
1218 1219
1219 outb(key,io); 1220 outb(key,io);
1220 outb(key,io); 1221 outb(key,io);
@@ -1288,7 +1289,7 @@ static void __devinit show_parconfig_smsc37c669(int io, int key)
1288static void __devinit show_parconfig_winbond(int io, int key) 1289static void __devinit show_parconfig_winbond(int io, int key)
1289{ 1290{
1290 int cr30,cr60,cr61,cr70,cr74,crf0,i=0; 1291 int cr30,cr60,cr61,cr70,cr74,crf0,i=0;
1291 static const char *modes[] = { 1292 static const char *const modes[] = {
1292 "Standard (SPP) and Bidirectional(PS/2)", /* 0 */ 1293 "Standard (SPP) and Bidirectional(PS/2)", /* 0 */
1293 "EPP-1.9 and SPP", 1294 "EPP-1.9 and SPP",
1294 "ECP", 1295 "ECP",
@@ -1297,7 +1298,9 @@ static void __devinit show_parconfig_winbond(int io, int key)
1297 "EPP-1.7 and SPP", /* 5 */ 1298 "EPP-1.7 and SPP", /* 5 */
1298 "undefined!", 1299 "undefined!",
1299 "ECP and EPP-1.7" }; 1300 "ECP and EPP-1.7" };
1300 static char *irqtypes[] = { "pulsed low, high-Z", "follows nACK" }; 1301 static char *const irqtypes[] = {
1302 "pulsed low, high-Z",
1303 "follows nACK" };
1301 1304
1302 /* The registers are called compatible-PnP because the 1305 /* The registers are called compatible-PnP because the
1303 register layout is modelled after ISA-PnP, the access 1306 register layout is modelled after ISA-PnP, the access
@@ -2396,7 +2399,8 @@ EXPORT_SYMBOL (parport_pc_unregister_port);
2396 2399
2397/* ITE support maintained by Rich Liu <richliu@poorman.org> */ 2400/* ITE support maintained by Rich Liu <richliu@poorman.org> */
2398static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq, 2401static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
2399 int autodma, struct parport_pc_via_data *via) 2402 int autodma,
2403 const struct parport_pc_via_data *via)
2400{ 2404{
2401 short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 }; 2405 short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
2402 struct resource *base_res; 2406 struct resource *base_res;
@@ -2524,7 +2528,8 @@ static struct parport_pc_via_data via_8231_data __devinitdata = {
2524}; 2528};
2525 2529
2526static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq, 2530static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
2527 int autodma, struct parport_pc_via_data *via) 2531 int autodma,
2532 const struct parport_pc_via_data *via)
2528{ 2533{
2529 u8 tmp, tmp2, siofunc; 2534 u8 tmp, tmp2, siofunc;
2530 u8 ppcontrol = 0; 2535 u8 ppcontrol = 0;
@@ -2694,8 +2699,9 @@ enum parport_pc_sio_types {
2694 2699
2695/* each element directly indexed from enum list, above */ 2700/* each element directly indexed from enum list, above */
2696static struct parport_pc_superio { 2701static struct parport_pc_superio {
2697 int (*probe) (struct pci_dev *pdev, int autoirq, int autodma, struct parport_pc_via_data *via); 2702 int (*probe) (struct pci_dev *pdev, int autoirq, int autodma,
2698 struct parport_pc_via_data *via; 2703 const struct parport_pc_via_data *via);
2704 const struct parport_pc_via_data *via;
2699} parport_pc_superio_info[] __devinitdata = { 2705} parport_pc_superio_info[] __devinitdata = {
2700 { sio_via_probe, &via_686a_data, }, 2706 { sio_via_probe, &via_686a_data, },
2701 { sio_via_probe, &via_8231_data, }, 2707 { sio_via_probe, &via_8231_data, },
@@ -2828,7 +2834,7 @@ static struct parport_pc_pci {
2828 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */ 2834 /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */
2829}; 2835};
2830 2836
2831static struct pci_device_id parport_pc_pci_tbl[] = { 2837static const struct pci_device_id parport_pc_pci_tbl[] = {
2832 /* Super-IO onboard chips */ 2838 /* Super-IO onboard chips */
2833 { 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a }, 2839 { 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a },
2834 { 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 }, 2840 { 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 },
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index 4b48b31ec235..b62aee8de3cb 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -11,9 +11,9 @@
11#include <linux/string.h> 11#include <linux/string.h>
12#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13 13
14static struct { 14static const struct {
15 char *token; 15 const char *token;
16 char *descr; 16 const char *descr;
17} classes[] = { 17} classes[] = {
18 { "", "Legacy device" }, 18 { "", "Legacy device" },
19 { "PRINTER", "Printer" }, 19 { "PRINTER", "Printer" },
@@ -128,8 +128,131 @@ static void parse_data(struct parport *port, int device, char *str)
128 kfree(txt); 128 kfree(txt);
129} 129}
130 130
131/* Read up to count-1 bytes of device id. Terminate buffer with
132 * '\0'. Buffer begins with two Device ID length bytes as given by
133 * device. */
134static ssize_t parport_read_device_id (struct parport *port, char *buffer,
135 size_t count)
136{
137 unsigned char length[2];
138 unsigned lelen, belen;
139 size_t idlens[4];
140 unsigned numidlens;
141 unsigned current_idlen;
142 ssize_t retval;
143 size_t len;
144
145 /* First two bytes are MSB,LSB of inclusive length. */
146 retval = parport_read (port, length, 2);
147
148 if (retval < 0)
149 return retval;
150 if (retval != 2)
151 return -EIO;
152
153 if (count < 2)
154 return 0;
155 memcpy(buffer, length, 2);
156 len = 2;
157
158 /* Some devices wrongly send LE length, and some send it two
159 * bytes short. Construct a sorted array of lengths to try. */
160 belen = (length[0] << 8) + length[1];
161 lelen = (length[1] << 8) + length[0];
162 idlens[0] = min(belen, lelen);
163 idlens[1] = idlens[0]+2;
164 if (belen != lelen) {
165 int off = 2;
166 /* Don't try lenghts of 0x100 and 0x200 as 1 and 2 */
167 if (idlens[0] <= 2)
168 off = 0;
169 idlens[off] = max(belen, lelen);
170 idlens[off+1] = idlens[off]+2;
171 numidlens = off+2;
172 }
173 else {
174 /* Some devices don't truly implement Device ID, but
175 * just return constant nibble forever. This catches
176 * also those cases. */
177 if (idlens[0] == 0 || idlens[0] > 0xFFF) {
178 printk (KERN_DEBUG "%s: reported broken Device ID"
179 " length of %#zX bytes\n",
180 port->name, idlens[0]);
181 return -EIO;
182 }
183 numidlens = 2;
184 }
185
186 /* Try to respect the given ID length despite all the bugs in
187 * the ID length. Read according to shortest possible ID
188 * first. */
189 for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) {
190 size_t idlen = idlens[current_idlen];
191 if (idlen+1 >= count)
192 break;
193
194 retval = parport_read (port, buffer+len, idlen-len);
195
196 if (retval < 0)
197 return retval;
198 len += retval;
199
200 if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
201 if (belen != len) {
202 printk (KERN_DEBUG "%s: Device ID was %d bytes"
203 " while device told it would be %d"
204 " bytes\n",
205 port->name, len, belen);
206 }
207 goto done;
208 }
209
210 /* This might end reading the Device ID too
211 * soon. Hopefully the needed fields were already in
212 * the first 256 bytes or so that we must have read so
213 * far. */
214 if (buffer[len-1] == ';') {
215 printk (KERN_DEBUG "%s: Device ID reading stopped"
216 " before device told data not available. "
217 "Current idlen %d of %d, len bytes %02X %02X\n",
218 port->name, current_idlen, numidlens,
219 length[0], length[1]);
220 goto done;
221 }
222 }
223 if (current_idlen < numidlens) {
224 /* Buffer not large enough, read to end of buffer. */
225 size_t idlen, len2;
226 if (len+1 < count) {
227 retval = parport_read (port, buffer+len, count-len-1);
228 if (retval < 0)
229 return retval;
230 len += retval;
231 }
232 /* Read the whole ID since some devices would not
233 * otherwise give back the Device ID from beginning
234 * next time when asked. */
235 idlen = idlens[current_idlen];
236 len2 = len;
237 while(len2 < idlen && retval > 0) {
238 char tmp[4];
239 retval = parport_read (port, tmp,
240 min(sizeof tmp, idlen-len2));
241 if (retval < 0)
242 return retval;
243 len2 += retval;
244 }
245 }
246 /* In addition, there are broken devices out there that don't
247 even finish off with a semi-colon. We do not need to care
248 about those at this time. */
249 done:
250 buffer[len] = '\0';
251 return len;
252}
253
131/* Get Std 1284 Device ID. */ 254/* Get Std 1284 Device ID. */
132ssize_t parport_device_id (int devnum, char *buffer, size_t len) 255ssize_t parport_device_id (int devnum, char *buffer, size_t count)
133{ 256{
134 ssize_t retval = -ENXIO; 257 ssize_t retval = -ENXIO;
135 struct pardevice *dev = parport_open (devnum, "Device ID probe", 258 struct pardevice *dev = parport_open (devnum, "Device ID probe",
@@ -139,76 +262,20 @@ ssize_t parport_device_id (int devnum, char *buffer, size_t len)
139 262
140 parport_claim_or_block (dev); 263 parport_claim_or_block (dev);
141 264
142 /* Negotiate to compatibility mode, and then to device ID mode. 265 /* Negotiate to compatibility mode, and then to device ID
143 * (This is in case we are already in device ID mode.) */ 266 * mode. (This so that we start form beginning of device ID if
267 * already in device ID mode.) */
144 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); 268 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
145 retval = parport_negotiate (dev->port, 269 retval = parport_negotiate (dev->port,
146 IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID); 270 IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID);
147 271
148 if (!retval) { 272 if (!retval) {
149 int idlen; 273 retval = parport_read_device_id (dev->port, buffer, count);
150 unsigned char length[2];
151
152 /* First two bytes are MSB,LSB of inclusive length. */
153 retval = parport_read (dev->port, length, 2);
154
155 if (retval != 2) goto end_id;
156
157 idlen = (length[0] << 8) + length[1] - 2;
158 /*
159 * Check if the caller-allocated buffer is large enough
160 * otherwise bail out or there will be an at least off by one.
161 */
162 if (idlen + 1 < len)
163 len = idlen;
164 else {
165 retval = -EINVAL;
166 goto out;
167 }
168 retval = parport_read (dev->port, buffer, len);
169
170 if (retval != len)
171 printk (KERN_DEBUG "%s: only read %Zd of %Zd ID bytes\n",
172 dev->port->name, retval,
173 len);
174
175 /* Some printer manufacturers mistakenly believe that
176 the length field is supposed to be _exclusive_.
177 In addition, there are broken devices out there
178 that don't even finish off with a semi-colon. */
179 if (buffer[len - 1] != ';') {
180 ssize_t diff;
181 diff = parport_read (dev->port, buffer + len, 2);
182 retval += diff;
183
184 if (diff)
185 printk (KERN_DEBUG
186 "%s: device reported incorrect "
187 "length field (%d, should be %Zd)\n",
188 dev->port->name, idlen, retval);
189 else {
190 /* One semi-colon short of a device ID. */
191 buffer[len++] = ';';
192 printk (KERN_DEBUG "%s: faking semi-colon\n",
193 dev->port->name);
194
195 /* If we get here, I don't think we
196 need to worry about the possible
197 standard violation of having read
198 more than we were told to. The
199 device is non-compliant anyhow. */
200 }
201 }
202
203 end_id:
204 buffer[len] = '\0';
205 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT); 274 parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
275 if (retval > 2)
276 parse_data (dev->port, dev->daisy, buffer+2);
206 } 277 }
207 278
208 if (retval > 2)
209 parse_data (dev->port, dev->daisy, buffer);
210
211out:
212 parport_release (dev); 279 parport_release (dev);
213 parport_close (dev); 280 parport_close (dev);
214 return retval; 281 return retval;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 9cb3ab156b09..ea62bed6bc83 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -1002,6 +1002,7 @@ EXPORT_SYMBOL(parport_register_driver);
1002EXPORT_SYMBOL(parport_unregister_driver); 1002EXPORT_SYMBOL(parport_unregister_driver);
1003EXPORT_SYMBOL(parport_register_device); 1003EXPORT_SYMBOL(parport_register_device);
1004EXPORT_SYMBOL(parport_unregister_device); 1004EXPORT_SYMBOL(parport_unregister_device);
1005EXPORT_SYMBOL(parport_get_port);
1005EXPORT_SYMBOL(parport_put_port); 1006EXPORT_SYMBOL(parport_put_port);
1006EXPORT_SYMBOL(parport_find_number); 1007EXPORT_SYMBOL(parport_find_number);
1007EXPORT_SYMBOL(parport_find_base); 1008EXPORT_SYMBOL(parport_find_base);
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 6b7583f497d0..a1f0b0ba2bfe 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -31,15 +31,6 @@ static struct {
31} pnp_bios_callpoint; 31} pnp_bios_callpoint;
32 32
33 33
34/* The PnP BIOS entries in the GDT */
35#define PNP_GDT (GDT_ENTRY_PNPBIOS_BASE * 8)
36
37#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */
38#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */
39#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */
40#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */
41#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */
42
43/* 34/*
44 * These are some opcodes for a "static asmlinkage" 35 * These are some opcodes for a "static asmlinkage"
45 * As this code is *not* executed inside the linux kernel segment, but in a 36 * As this code is *not* executed inside the linux kernel segment, but in a
@@ -67,16 +58,11 @@ __asm__(
67 ".previous \n" 58 ".previous \n"
68); 59);
69 60
70#define Q_SET_SEL(cpu, selname, address, size) \
71do { \
72set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], __va((u32)(address))); \
73set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \
74} while(0)
75
76#define Q2_SET_SEL(cpu, selname, address, size) \ 61#define Q2_SET_SEL(cpu, selname, address, size) \
77do { \ 62do { \
78set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], (u32)(address)); \ 63struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
79set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \ 64set_base(gdt[(selname) >> 3], (u32)(address)); \
65set_limit(gdt[(selname) >> 3], size); \
80} while(0) 66} while(0)
81 67
82static struct desc_struct bad_bios_desc = { 0, 0x00409200 }; 68static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
@@ -115,8 +101,8 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
115 return PNP_FUNCTION_NOT_SUPPORTED; 101 return PNP_FUNCTION_NOT_SUPPORTED;
116 102
117 cpu = get_cpu(); 103 cpu = get_cpu();
118 save_desc_40 = per_cpu(cpu_gdt_table,cpu)[0x40 / 8]; 104 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
119 per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = bad_bios_desc; 105 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
120 106
121 /* On some boxes IRQ's during PnP BIOS calls are deadly. */ 107 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
122 spin_lock_irqsave(&pnp_bios_lock, flags); 108 spin_lock_irqsave(&pnp_bios_lock, flags);
@@ -158,7 +144,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
158 ); 144 );
159 spin_unlock_irqrestore(&pnp_bios_lock, flags); 145 spin_unlock_irqrestore(&pnp_bios_lock, flags);
160 146
161 per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = save_desc_40; 147 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
162 put_cpu(); 148 put_cpu();
163 149
164 /* If we get here and this is set then the PnP BIOS faulted on us. */ 150 /* If we get here and this is set then the PnP BIOS faulted on us. */
@@ -290,12 +276,15 @@ int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
290static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data) 276static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
291{ 277{
292 u16 status; 278 u16 status;
279 u16 tmp_nodenum;
293 if (!pnp_bios_present()) 280 if (!pnp_bios_present())
294 return PNP_FUNCTION_NOT_SUPPORTED; 281 return PNP_FUNCTION_NOT_SUPPORTED;
295 if ( !boot && pnpbios_dont_use_current_config ) 282 if ( !boot && pnpbios_dont_use_current_config )
296 return PNP_FUNCTION_NOT_SUPPORTED; 283 return PNP_FUNCTION_NOT_SUPPORTED;
284 tmp_nodenum = *nodenum;
297 status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0, 285 status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0,
298 nodenum, sizeof(char), data, 65536); 286 &tmp_nodenum, sizeof(tmp_nodenum), data, 65536);
287 *nodenum = tmp_nodenum;
299 return status; 288 return status;
300} 289}
301 290
@@ -535,10 +524,12 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
535 524
536 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); 525 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
537 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); 526 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
538 for(i=0; i < NR_CPUS; i++) 527 for (i = 0; i < NR_CPUS; i++) {
539 { 528 struct desc_struct *gdt = get_cpu_gdt_table(i);
540 Q2_SET_SEL(i, PNP_CS32, &pnp_bios_callfunc, 64 * 1024); 529 if (!gdt)
541 Q_SET_SEL(i, PNP_CS16, header->fields.pm16cseg, 64 * 1024); 530 continue;
542 Q_SET_SEL(i, PNP_DS, header->fields.pm16dseg, 64 * 1024); 531 set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
543 } 532 set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], __va(header->fields.pm16cseg));
533 set_base(gdt[GDT_ENTRY_PNPBIOS_DS], __va(header->fields.pm16dseg));
534 }
544} 535}
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index c99a2fe92fb0..9803c9352d78 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5obj-y += s390mach.o sysinfo.o 5obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 6obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 7
8drivers-y += drivers/s390/built-in.o 8drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 6e7d7b06421d..6f50cc9323d9 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,11 +1,11 @@
1if ARCH_S390 1if S390
2 2
3comment "S/390 block device drivers" 3comment "S/390 block device drivers"
4 depends on ARCH_S390 4 depends on S390
5 5
6config BLK_DEV_XPRAM 6config BLK_DEV_XPRAM
7 tristate "XPRAM disk support" 7 tristate "XPRAM disk support"
8 depends on ARCH_S390 8 depends on S390
9 help 9 help
10 Select this option if you want to use your expanded storage on S/390 10 Select this option if you want to use your expanded storage on S/390
11 or zSeries as a disk. This is useful as a _fast_ swap device if you 11 or zSeries as a disk. This is useful as a _fast_ swap device if you
@@ -49,7 +49,7 @@ config DASD_FBA
49 49
50config DASD_DIAG 50config DASD_DIAG
51 tristate "Support for DIAG access to Disks" 51 tristate "Support for DIAG access to Disks"
52 depends on DASD && ( ARCH_S390X = 'n' || EXPERIMENTAL) 52 depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL)
53 help 53 help
54 Select this option if you want to use Diagnose250 command to access 54 Select this option if you want to use Diagnose250 command to access
55 Disks under VM. If you are not running under VM or unsure what it is, 55 Disks under VM. If you are not running under VM or unsure what it is,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index fdb61380c523..f779f674dfa0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.167 $ 10 * $Revision: 1.172 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -604,7 +604,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
604void 604void
605dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 605dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
606{ 606{
607#ifdef CONFIG_ARCH_S390X 607#ifdef CONFIG_64BIT
608 struct ccw1 *ccw; 608 struct ccw1 *ccw;
609 609
610 /* Clear any idals used for the request. */ 610 /* Clear any idals used for the request. */
@@ -1224,6 +1224,12 @@ __dasd_start_head(struct dasd_device * device)
1224 if (list_empty(&device->ccw_queue)) 1224 if (list_empty(&device->ccw_queue))
1225 return; 1225 return;
1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1227 /* check FAILFAST */
1228 if (device->stopped & ~DASD_STOPPED_PENDING &&
1229 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
1230 cqr->status = DASD_CQR_FAILED;
1231 dasd_schedule_bh(device);
1232 }
1227 if ((cqr->status == DASD_CQR_QUEUED) && 1233 if ((cqr->status == DASD_CQR_QUEUED) &&
1228 (!device->stopped)) { 1234 (!device->stopped)) {
1229 /* try to start the first I/O that can be started */ 1235 /* try to start the first I/O that can be started */
@@ -1323,7 +1329,7 @@ void
1323dasd_schedule_bh(struct dasd_device * device) 1329dasd_schedule_bh(struct dasd_device * device)
1324{ 1330{
1325 /* Protect against rescheduling. */ 1331 /* Protect against rescheduling. */
1326 if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) 1332 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1327 return; 1333 return;
1328 dasd_get_device(device); 1334 dasd_get_device(device);
1329 tasklet_hi_schedule(&device->tasklet); 1335 tasklet_hi_schedule(&device->tasklet);
@@ -1750,8 +1756,10 @@ dasd_exit(void)
1750 * SECTION: common functions for ccw_driver use 1756 * SECTION: common functions for ccw_driver use
1751 */ 1757 */
1752 1758
1753/* initial attempt at a probe function. this can be simplified once 1759/*
1754 * the other detection code is gone */ 1760 * Initial attempt at a probe function. this can be simplified once
1761 * the other detection code is gone.
1762 */
1755int 1763int
1756dasd_generic_probe (struct ccw_device *cdev, 1764dasd_generic_probe (struct ccw_device *cdev,
1757 struct dasd_discipline *discipline) 1765 struct dasd_discipline *discipline)
@@ -1770,8 +1778,10 @@ dasd_generic_probe (struct ccw_device *cdev,
1770 return ret; 1778 return ret;
1771} 1779}
1772 1780
1773/* this will one day be called from a global not_oper handler. 1781/*
1774 * It is also used by driver_unregister during module unload */ 1782 * This will one day be called from a global not_oper handler.
1783 * It is also used by driver_unregister during module unload.
1784 */
1775void 1785void
1776dasd_generic_remove (struct ccw_device *cdev) 1786dasd_generic_remove (struct ccw_device *cdev)
1777{ 1787{
@@ -1798,9 +1808,11 @@ dasd_generic_remove (struct ccw_device *cdev)
1798 dasd_delete_device(device); 1808 dasd_delete_device(device);
1799} 1809}
1800 1810
1801/* activate a device. This is called from dasd_{eckd,fba}_probe() when either 1811/*
1812 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1802 * the device is detected for the first time and is supposed to be used 1813 * the device is detected for the first time and is supposed to be used
1803 * or the user has started activation through sysfs */ 1814 * or the user has started activation through sysfs.
1815 */
1804int 1816int
1805dasd_generic_set_online (struct ccw_device *cdev, 1817dasd_generic_set_online (struct ccw_device *cdev,
1806 struct dasd_discipline *discipline) 1818 struct dasd_discipline *discipline)
@@ -1917,7 +1929,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1917 if (cqr->status == DASD_CQR_IN_IO) 1929 if (cqr->status == DASD_CQR_IN_IO)
1918 cqr->status = DASD_CQR_FAILED; 1930 cqr->status = DASD_CQR_FAILED;
1919 device->stopped |= DASD_STOPPED_DC_EIO; 1931 device->stopped |= DASD_STOPPED_DC_EIO;
1920 dasd_schedule_bh(device);
1921 } else { 1932 } else {
1922 list_for_each_entry(cqr, &device->ccw_queue, list) 1933 list_for_each_entry(cqr, &device->ccw_queue, list)
1923 if (cqr->status == DASD_CQR_IN_IO) { 1934 if (cqr->status == DASD_CQR_IN_IO) {
@@ -1927,6 +1938,7 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1927 device->stopped |= DASD_STOPPED_DC_WAIT; 1938 device->stopped |= DASD_STOPPED_DC_WAIT;
1928 dasd_set_timer(device, 0); 1939 dasd_set_timer(device, 0);
1929 } 1940 }
1941 dasd_schedule_bh(device);
1930 ret = 1; 1942 ret = 1;
1931 break; 1943 break;
1932 case CIO_OPER: 1944 case CIO_OPER:
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index ab8754e566bc..ba80fdea7ebf 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.51 $ 9 * $Revision: 1.53 $
10 */ 10 */
11 11
12#include <linux/config.h> 12#include <linux/config.h>
@@ -25,6 +25,7 @@
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/s390_ext.h> 26#include <asm/s390_ext.h>
27#include <asm/todclk.h> 27#include <asm/todclk.h>
28#include <asm/vtoc.h>
28 29
29#include "dasd_int.h" 30#include "dasd_int.h"
30#include "dasd_diag.h" 31#include "dasd_diag.h"
@@ -74,7 +75,7 @@ dia250(void *iob, int cmd)
74 int rc; 75 int rc;
75 76
76 __asm__ __volatile__( 77 __asm__ __volatile__(
77#ifdef CONFIG_ARCH_S390X 78#ifdef CONFIG_64BIT
78 " lghi %0,3\n" 79 " lghi %0,3\n"
79 " lgr 0,%3\n" 80 " lgr 0,%3\n"
80 " diag 0,%2,0x250\n" 81 " diag 0,%2,0x250\n"
@@ -329,7 +330,7 @@ dasd_diag_check_device(struct dasd_device *device)
329 struct dasd_diag_private *private; 330 struct dasd_diag_private *private;
330 struct dasd_diag_characteristics *rdc_data; 331 struct dasd_diag_characteristics *rdc_data;
331 struct dasd_diag_bio bio; 332 struct dasd_diag_bio bio;
332 struct dasd_diag_cms_label *label; 333 struct vtoc_cms_label *label;
333 blocknum_t end_block; 334 blocknum_t end_block;
334 unsigned int sb, bsize; 335 unsigned int sb, bsize;
335 int rc; 336 int rc;
@@ -380,7 +381,7 @@ dasd_diag_check_device(struct dasd_device *device)
380 mdsk_term_io(device); 381 mdsk_term_io(device);
381 382
382 /* figure out blocksize of device */ 383 /* figure out blocksize of device */
383 label = (struct dasd_diag_cms_label *) get_zeroed_page(GFP_KERNEL); 384 label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
384 if (label == NULL) { 385 if (label == NULL) {
385 DEV_MESSAGE(KERN_WARNING, device, "%s", 386 DEV_MESSAGE(KERN_WARNING, device, "%s",
386 "No memory to allocate initialization request"); 387 "No memory to allocate initialization request");
@@ -548,6 +549,8 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
548 } 549 }
549 cqr->retries = DIAG_MAX_RETRIES; 550 cqr->retries = DIAG_MAX_RETRIES;
550 cqr->buildclk = get_clock(); 551 cqr->buildclk = get_clock();
552 if (req->flags & REQ_FAILFAST)
553 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
551 cqr->device = device; 554 cqr->device = device;
552 cqr->expires = DIAG_TIMEOUT; 555 cqr->expires = DIAG_TIMEOUT;
553 cqr->status = DASD_CQR_FILLED; 556 cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index df31484d73a7..a4f80bd735f1 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.8 $ 9 * $Revision: 1.9 $
10 */ 10 */
11 11
12#define MDSK_WRITE_REQ 0x01 12#define MDSK_WRITE_REQ 0x01
@@ -44,29 +44,8 @@ struct dasd_diag_characteristics {
44 u8 rdev_features; 44 u8 rdev_features;
45} __attribute__ ((packed, aligned(4))); 45} __attribute__ ((packed, aligned(4)));
46 46
47struct dasd_diag_cms_label { 47
48 u8 label_id[4]; 48#ifdef CONFIG_64BIT
49 u8 vol_id[6];
50 u16 version_id;
51 u32 block_size;
52 u32 origin_ptr;
53 u32 usable_count;
54 u32 formatted_count;
55 u32 block_count;
56 u32 used_count;
57 u32 fst_size;
58 u32 fst_count;
59 u8 format_date[6];
60 u8 reserved1[2];
61 u32 disk_offset;
62 u32 map_block;
63 u32 hblk_disp;
64 u32 user_disp;
65 u8 reserved2[4];
66 u8 segment_name[8];
67} __attribute__ ((packed));
68
69#ifdef CONFIG_ARCH_S390X
70#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT 49#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
71 50
72typedef u64 blocknum_t; 51typedef u64 blocknum_t;
@@ -107,7 +86,7 @@ struct dasd_diag_rw_io {
107 struct dasd_diag_bio *bio_list; 86 struct dasd_diag_bio *bio_list;
108 u8 spare4[8]; 87 u8 spare4[8];
109} __attribute__ ((packed, aligned(8))); 88} __attribute__ ((packed, aligned(8)));
110#else /* CONFIG_ARCH_S390X */ 89#else /* CONFIG_64BIT */
111#define DASD_DIAG_FLAGA_DEFAULT 0x0 90#define DASD_DIAG_FLAGA_DEFAULT 0x0
112 91
113typedef u32 blocknum_t; 92typedef u32 blocknum_t;
@@ -146,4 +125,4 @@ struct dasd_diag_rw_io {
146 u32 interrupt_params; 125 u32 interrupt_params;
147 u8 spare3[20]; 126 u8 spare3[20];
148} __attribute__ ((packed, aligned(8))); 127} __attribute__ ((packed, aligned(8)));
149#endif /* CONFIG_ARCH_S390X */ 128#endif /* CONFIG_64BIT */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 811060e10c00..96eb48258580 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 * 9 *
10 * $Revision: 1.71 $ 10 * $Revision: 1.74 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -1041,7 +1041,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1041 /* Eckd can only do full blocks. */ 1041 /* Eckd can only do full blocks. */
1042 return ERR_PTR(-EINVAL); 1042 return ERR_PTR(-EINVAL);
1043 count += bv->bv_len >> (device->s2b_shift + 9); 1043 count += bv->bv_len >> (device->s2b_shift + 9);
1044#if defined(CONFIG_ARCH_S390X) 1044#if defined(CONFIG_64BIT)
1045 if (idal_is_needed (page_address(bv->bv_page), 1045 if (idal_is_needed (page_address(bv->bv_page),
1046 bv->bv_len)) 1046 bv->bv_len))
1047 cidaw += bv->bv_len >> (device->s2b_shift + 9); 1047 cidaw += bv->bv_len >> (device->s2b_shift + 9);
@@ -1136,6 +1136,8 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1136 recid++; 1136 recid++;
1137 } 1137 }
1138 } 1138 }
1139 if (req->flags & REQ_FAILFAST)
1140 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1139 cqr->device = device; 1141 cqr->device = device;
1140 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1142 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1141 cqr->lpm = private->path_data.ppm; 1143 cqr->lpm = private->path_data.ppm;
@@ -1252,6 +1254,7 @@ dasd_eckd_release(struct block_device *bdev, int no, long args)
1252 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1254 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1253 cqr->device = device; 1255 cqr->device = device;
1254 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1256 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1257 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1255 cqr->retries = 0; 1258 cqr->retries = 0;
1256 cqr->expires = 2 * HZ; 1259 cqr->expires = 2 * HZ;
1257 cqr->buildclk = get_clock(); 1260 cqr->buildclk = get_clock();
@@ -1296,6 +1299,7 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args)
1296 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1299 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1297 cqr->device = device; 1300 cqr->device = device;
1298 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1301 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1302 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1299 cqr->retries = 0; 1303 cqr->retries = 0;
1300 cqr->expires = 2 * HZ; 1304 cqr->expires = 2 * HZ;
1301 cqr->buildclk = get_clock(); 1305 cqr->buildclk = get_clock();
@@ -1339,6 +1343,7 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args)
1339 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1343 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1340 cqr->device = device; 1344 cqr->device = device;
1341 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1345 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1346 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1342 cqr->retries = 0; 1347 cqr->retries = 0;
1343 cqr->expires = 2 * HZ; 1348 cqr->expires = 2 * HZ;
1344 cqr->buildclk = get_clock(); 1349 cqr->buildclk = get_clock();
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 28cb4613b7f5..8ec75dc08e2c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -4,7 +4,7 @@
4 * Bugreports.to..: <Linux390@de.ibm.com> 4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 * 6 *
7 * $Revision: 1.40 $ 7 * $Revision: 1.41 $
8 */ 8 */
9 9
10#include <linux/config.h> 10#include <linux/config.h>
@@ -271,7 +271,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
271 /* Fba can only do full blocks. */ 271 /* Fba can only do full blocks. */
272 return ERR_PTR(-EINVAL); 272 return ERR_PTR(-EINVAL);
273 count += bv->bv_len >> (device->s2b_shift + 9); 273 count += bv->bv_len >> (device->s2b_shift + 9);
274#if defined(CONFIG_ARCH_S390X) 274#if defined(CONFIG_64BIT)
275 if (idal_is_needed (page_address(bv->bv_page), 275 if (idal_is_needed (page_address(bv->bv_page),
276 bv->bv_len)) 276 bv->bv_len))
277 cidaw += bv->bv_len / blksize; 277 cidaw += bv->bv_len / blksize;
@@ -352,6 +352,8 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
352 recid++; 352 recid++;
353 } 353 }
354 } 354 }
355 if (req->flags & REQ_FAILFAST)
356 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
355 cqr->device = device; 357 cqr->device = device;
356 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 358 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
357 cqr->retries = 32; 359 cqr->retries = 32;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9fab04f3056d..2fb05c4a528c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.65 $ 9 * $Revision: 1.68 $
10 */ 10 */
11 11
12#ifndef DASD_INT_H 12#ifndef DASD_INT_H
@@ -208,6 +208,7 @@ struct dasd_ccw_req {
208 208
209/* per dasd_ccw_req flags */ 209/* per dasd_ccw_req flags */
210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
211#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
211 212
212/* Signature for error recovery functions. */ 213/* Signature for error recovery functions. */
213typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 214typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 789595b3fa09..044b75371990 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.47 $ 10 * $Revision: 1.50 $
11 * 11 *
12 * i/o controls for the dasd driver. 12 * i/o controls for the dasd driver.
13 */ 13 */
@@ -352,6 +352,9 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
352 if (device == NULL) 352 if (device == NULL)
353 return -ENODEV; 353 return -ENODEV;
354 354
355 if (dasd_profile_level == DASD_PROFILE_OFF)
356 return -EIO;
357
355 if (copy_to_user((long __user *) args, (long *) &device->profile, 358 if (copy_to_user((long __user *) args, (long *) &device->profile,
356 sizeof (struct dasd_profile_info_t))) 359 sizeof (struct dasd_profile_info_t)))
357 return -EFAULT; 360 return -EFAULT;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4fde41188996..2e727f49ad19 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -15,7 +15,7 @@
15#include <asm/io.h> 15#include <asm/io.h>
16#include <linux/completion.h> 16#include <linux/completion.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <asm/ccwdev.h> // for s390_root_dev_(un)register() 18#include <asm/s390_rdev.h>
19 19
20//#define DCSSBLK_DEBUG /* Debug messages on/off */ 20//#define DCSSBLK_DEBUG /* Debug messages on/off */
21#define DCSSBLK_NAME "dcssblk" 21#define DCSSBLK_NAME "dcssblk"
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index d428c909b8a0..bf3a67c3cc5e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -160,7 +160,7 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
160 "0: ipm %0\n" 160 "0: ipm %0\n"
161 " srl %0,28\n" 161 " srl %0,28\n"
162 "1:\n" 162 "1:\n"
163#ifndef CONFIG_ARCH_S390X 163#ifndef CONFIG_64BIT
164 ".section __ex_table,\"a\"\n" 164 ".section __ex_table,\"a\"\n"
165 " .align 4\n" 165 " .align 4\n"
166 " .long 0b,1b\n" 166 " .long 0b,1b\n"
@@ -208,7 +208,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
208 "0: ipm %0\n" 208 "0: ipm %0\n"
209 " srl %0,28\n" 209 " srl %0,28\n"
210 "1:\n" 210 "1:\n"
211#ifndef CONFIG_ARCH_S390X 211#ifndef CONFIG_64BIT
212 ".section __ex_table,\"a\"\n" 212 ".section __ex_table,\"a\"\n"
213 " .align 4\n" 213 " .align 4\n"
214 " .long 0b,1b\n" 214 " .long 0b,1b\n"
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 5a6cef2dfa13..80f7f31310e6 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -204,7 +204,7 @@ cpi_module_init(void)
204 printk(KERN_WARNING "cpi: no control program identification " 204 printk(KERN_WARNING "cpi: no control program identification "
205 "support\n"); 205 "support\n");
206 sclp_unregister(&sclp_cpi_event); 206 sclp_unregister(&sclp_cpi_event);
207 return -ENOTSUPP; 207 return -EOPNOTSUPP;
208 } 208 }
209 209
210 req = cpi_prepare_req(); 210 req = cpi_prepare_req();
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 83f75774df60..56fa69168898 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused)
32 psw_t quiesce_psw; 32 psw_t quiesce_psw;
33 int cpu; 33 int cpu;
34 34
35 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 35 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
36 signal_processor(smp_processor_id(), sigp_stop); 36 signal_processor(smp_processor_id(), sigp_stop);
37 /* Wait for all other cpus to enter stopped state */ 37 /* Wait for all other cpus to enter stopped state */
38 for_each_online_cpu(cpu) { 38 for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 559d51490e2f..5ced2725d6c7 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -65,7 +65,7 @@ static void
65tapeblock_trigger_requeue(struct tape_device *device) 65tapeblock_trigger_requeue(struct tape_device *device)
66{ 66{
67 /* Protect against rescheduling. */ 67 /* Protect against rescheduling. */
68 if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) 68 if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
69 return; 69 return;
70 schedule_work(&device->blk_data.requeue_task); 70 schedule_work(&device->blk_data.requeue_task);
71} 71}
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 5473c23fcb52..5acc0ace3d7d 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -66,7 +66,7 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout,
66 __cmdl = len; 66 __cmdl = len;
67 err = 0; 67 err = 0;
68 asm volatile ( 68 asm volatile (
69#ifdef __s390x__ 69#ifdef CONFIG_64BIT
70 "diag %2,%4,0x288\n" 70 "diag %2,%4,0x288\n"
71 "1: \n" 71 "1: \n"
72 ".section .fixup,\"ax\"\n" 72 ".section .fixup,\"ax\"\n"
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a1c52a682191..daf21e03b21d 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/blacklist.c 2 * drivers/s390/cio/blacklist.c
3 * S/390 common I/O routines -- blacklisting of specific devices 3 * S/390 common I/O routines -- blacklisting of specific devices
4 * $Revision: 1.35 $ 4 * $Revision: 1.39 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -15,6 +15,7 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
18#include <linux/ctype.h> 19#include <linux/ctype.h>
19#include <linux/device.h> 20#include <linux/device.h>
20 21
@@ -34,10 +35,10 @@
34 * These can be single devices or ranges of devices 35 * These can be single devices or ranges of devices
35 */ 36 */
36 37
37/* 65536 bits to indicate if a devno is blacklisted or not */ 38/* 65536 bits for each set to indicate if a devno is blacklisted or not */
38#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \ 39#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
39 (8*sizeof(long))) 40 (8*sizeof(long)))
40static unsigned long bl_dev[__BL_DEV_WORDS]; 41static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
41typedef enum {add, free} range_action; 42typedef enum {add, free} range_action;
42 43
43/* 44/*
@@ -45,21 +46,23 @@ typedef enum {add, free} range_action;
45 * (Un-)blacklist the devices from-to 46 * (Un-)blacklist the devices from-to
46 */ 47 */
47static inline void 48static inline void
48blacklist_range (range_action action, unsigned int from, unsigned int to) 49blacklist_range (range_action action, unsigned int from, unsigned int to,
50 unsigned int ssid)
49{ 51{
50 if (!to) 52 if (!to)
51 to = from; 53 to = from;
52 54
53 if (from > to || to > __MAX_SUBCHANNELS) { 55 if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
54 printk (KERN_WARNING "Invalid blacklist range " 56 printk (KERN_WARNING "Invalid blacklist range "
55 "0x%04x to 0x%04x, skipping\n", from, to); 57 "0.%x.%04x to 0.%x.%04x, skipping\n",
58 ssid, from, ssid, to);
56 return; 59 return;
57 } 60 }
58 for (; from <= to; from++) { 61 for (; from <= to; from++) {
59 if (action == add) 62 if (action == add)
60 set_bit (from, bl_dev); 63 set_bit (from, bl_dev[ssid]);
61 else 64 else
62 clear_bit (from, bl_dev); 65 clear_bit (from, bl_dev[ssid]);
63 } 66 }
64} 67}
65 68
@@ -69,7 +72,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to)
69 * Shamelessly grabbed from dasd_devmap.c. 72 * Shamelessly grabbed from dasd_devmap.c.
70 */ 73 */
71static inline int 74static inline int
72blacklist_busid(char **str, int *id0, int *id1, int *devno) 75blacklist_busid(char **str, int *id0, int *ssid, int *devno)
73{ 76{
74 int val, old_style; 77 int val, old_style;
75 char *sav; 78 char *sav;
@@ -86,7 +89,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
86 goto confused; 89 goto confused;
87 val = simple_strtoul(*str, str, 16); 90 val = simple_strtoul(*str, str, 16);
88 if (old_style || (*str)[0] != '.') { 91 if (old_style || (*str)[0] != '.') {
89 *id0 = *id1 = 0; 92 *id0 = *ssid = 0;
90 if (val < 0 || val > 0xffff) 93 if (val < 0 || val > 0xffff)
91 goto confused; 94 goto confused;
92 *devno = val; 95 *devno = val;
@@ -105,7 +108,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
105 val = simple_strtoul(*str, str, 16); 108 val = simple_strtoul(*str, str, 16);
106 if (val < 0 || val > 0xff || (*str)++[0] != '.') 109 if (val < 0 || val > 0xff || (*str)++[0] != '.')
107 goto confused; 110 goto confused;
108 *id1 = val; 111 *ssid = val;
109 if (!isxdigit((*str)[0])) /* We require at least one hex digit */ 112 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
110 goto confused; 113 goto confused;
111 val = simple_strtoul(*str, str, 16); 114 val = simple_strtoul(*str, str, 16);
@@ -125,7 +128,7 @@ confused:
125static inline int 128static inline int
126blacklist_parse_parameters (char *str, range_action action) 129blacklist_parse_parameters (char *str, range_action action)
127{ 130{
128 unsigned int from, to, from_id0, to_id0, from_id1, to_id1; 131 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid;
129 132
130 while (*str != 0 && *str != '\n') { 133 while (*str != 0 && *str != '\n') {
131 range_action ra = action; 134 range_action ra = action;
@@ -142,23 +145,25 @@ blacklist_parse_parameters (char *str, range_action action)
142 */ 145 */
143 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || 146 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
144 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { 147 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
145 from = 0; 148 int j;
146 to = __MAX_SUBCHANNELS; 149
147 str += 3; 150 str += 3;
151 for (j=0; j <= __MAX_SSID; j++)
152 blacklist_range(ra, 0, __MAX_SUBCHANNEL, j);
148 } else { 153 } else {
149 int rc; 154 int rc;
150 155
151 rc = blacklist_busid(&str, &from_id0, 156 rc = blacklist_busid(&str, &from_id0,
152 &from_id1, &from); 157 &from_ssid, &from);
153 if (rc) 158 if (rc)
154 continue; 159 continue;
155 to = from; 160 to = from;
156 to_id0 = from_id0; 161 to_id0 = from_id0;
157 to_id1 = from_id1; 162 to_ssid = from_ssid;
158 if (*str == '-') { 163 if (*str == '-') {
159 str++; 164 str++;
160 rc = blacklist_busid(&str, &to_id0, 165 rc = blacklist_busid(&str, &to_id0,
161 &to_id1, &to); 166 &to_ssid, &to);
162 if (rc) 167 if (rc)
163 continue; 168 continue;
164 } 169 }
@@ -168,18 +173,19 @@ blacklist_parse_parameters (char *str, range_action action)
168 strsep(&str, ",\n")); 173 strsep(&str, ",\n"));
169 continue; 174 continue;
170 } 175 }
171 if ((from_id0 != to_id0) || (from_id1 != to_id1)) { 176 if ((from_id0 != to_id0) ||
177 (from_ssid != to_ssid)) {
172 printk(KERN_WARNING "invalid cio_ignore range " 178 printk(KERN_WARNING "invalid cio_ignore range "
173 "%x.%x.%04x-%x.%x.%04x\n", 179 "%x.%x.%04x-%x.%x.%04x\n",
174 from_id0, from_id1, from, 180 from_id0, from_ssid, from,
175 to_id0, to_id1, to); 181 to_id0, to_ssid, to);
176 continue; 182 continue;
177 } 183 }
184 pr_debug("blacklist_setup: adding range "
185 "from %x.%x.%04x to %x.%x.%04x\n",
186 from_id0, from_ssid, from, to_id0, to_ssid, to);
187 blacklist_range (ra, from, to, to_ssid);
178 } 188 }
179 /* FIXME: ignoring id0 and id1 here. */
180 pr_debug("blacklist_setup: adding range "
181 "from 0.0.%04x to 0.0.%04x\n", from, to);
182 blacklist_range (ra, from, to);
183 } 189 }
184 return 1; 190 return 1;
185} 191}
@@ -213,12 +219,33 @@ __setup ("cio_ignore=", blacklist_setup);
213 * Used by validate_subchannel() 219 * Used by validate_subchannel()
214 */ 220 */
215int 221int
216is_blacklisted (int devno) 222is_blacklisted (int ssid, int devno)
217{ 223{
218 return test_bit (devno, bl_dev); 224 return test_bit (devno, bl_dev[ssid]);
219} 225}
220 226
221#ifdef CONFIG_PROC_FS 227#ifdef CONFIG_PROC_FS
228static int
229__s390_redo_validation(struct subchannel_id schid, void *data)
230{
231 int ret;
232 struct subchannel *sch;
233
234 sch = get_subchannel_by_schid(schid);
235 if (sch) {
236 /* Already known. */
237 put_device(&sch->dev);
238 return 0;
239 }
240 ret = css_probe_device(schid);
241 if (ret == -ENXIO)
242 return ret; /* We're through. */
243 if (ret == -ENOMEM)
244 /* Stop validation for now. Bad, but no need for a panic. */
245 return ret;
246 return 0;
247}
248
222/* 249/*
223 * Function: s390_redo_validation 250 * Function: s390_redo_validation
224 * Look for no longer blacklisted devices 251 * Look for no longer blacklisted devices
@@ -226,29 +253,9 @@ is_blacklisted (int devno)
226static inline void 253static inline void
227s390_redo_validation (void) 254s390_redo_validation (void)
228{ 255{
229 unsigned int irq;
230
231 CIO_TRACE_EVENT (0, "redoval"); 256 CIO_TRACE_EVENT (0, "redoval");
232 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 257
233 int ret; 258 for_each_subchannel(__s390_redo_validation, NULL);
234 struct subchannel *sch;
235
236 sch = get_subchannel_by_schid(irq);
237 if (sch) {
238 /* Already known. */
239 put_device(&sch->dev);
240 continue;
241 }
242 ret = css_probe_device(irq);
243 if (ret == -ENXIO)
244 break; /* We're through. */
245 if (ret == -ENOMEM)
246 /*
247 * Stop validation for now. Bad, but no need for a
248 * panic.
249 */
250 break;
251 }
252} 259}
253 260
254/* 261/*
@@ -278,41 +285,90 @@ blacklist_parse_proc_parameters (char *buf)
278 s390_redo_validation (); 285 s390_redo_validation ();
279} 286}
280 287
281/* FIXME: These should be real bus ids and not home-grown ones! */ 288/* Iterator struct for all devices. */
282static int cio_ignore_read (char *page, char **start, off_t off, 289struct ccwdev_iter {
283 int count, int *eof, void *data) 290 int devno;
291 int ssid;
292 int in_range;
293};
294
295static void *
296cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
284{ 297{
285 const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */ 298 struct ccwdev_iter *iter;
286 long devno; 299
287 int len; 300 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
288 301 return NULL;
289 len = 0; 302 iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL);
290 for (devno = off; /* abuse the page variable 303 if (!iter)
291 * as counter, see fs/proc/generic.c */ 304 return ERR_PTR(-ENOMEM);
292 devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) { 305 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
293 if (!test_bit(devno, bl_dev)) 306 iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
294 continue; 307 return iter;
295 len += sprintf(page + len, "0.0.%04lx", devno); 308}
296 if (test_bit(devno + 1, bl_dev)) { /* print range */ 309
297 while (++devno < __MAX_SUBCHANNELS) 310static void
298 if (!test_bit(devno, bl_dev)) 311cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
299 break; 312{
300 len += sprintf(page + len, "-0.0.%04lx", --devno); 313 if (!IS_ERR(it))
301 } 314 kfree(it);
302 len += sprintf(page + len, "\n"); 315}
303 } 316
317static void *
318cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
319{
320 struct ccwdev_iter *iter;
321
322 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
323 return NULL;
324 iter = it;
325 if (iter->devno == __MAX_SUBCHANNEL) {
326 iter->devno = 0;
327 iter->ssid++;
328 if (iter->ssid > __MAX_SSID)
329 return NULL;
330 } else
331 iter->devno++;
332 (*offset)++;
333 return iter;
334}
304 335
305 if (devno < __MAX_SUBCHANNELS) 336static int
306 *eof = 1; 337cio_ignore_proc_seq_show(struct seq_file *s, void *it)
307 *start = (char *) (devno - off); /* number of checked entries */ 338{
308 return len; 339 struct ccwdev_iter *iter;
340
341 iter = it;
342 if (!is_blacklisted(iter->ssid, iter->devno))
343 /* Not blacklisted, nothing to output. */
344 return 0;
345 if (!iter->in_range) {
346 /* First device in range. */
347 if ((iter->devno == __MAX_SUBCHANNEL) ||
348 !is_blacklisted(iter->ssid, iter->devno + 1))
349 /* Singular device. */
350 return seq_printf(s, "0.%x.%04x\n",
351 iter->ssid, iter->devno);
352 iter->in_range = 1;
353 return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
354 }
355 if ((iter->devno == __MAX_SUBCHANNEL) ||
356 !is_blacklisted(iter->ssid, iter->devno + 1)) {
357 /* Last device in range. */
358 iter->in_range = 0;
359 return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
360 }
361 return 0;
309} 362}
310 363
311static int cio_ignore_write(struct file *file, const char __user *user_buf, 364static ssize_t
312 unsigned long user_len, void *data) 365cio_ignore_write(struct file *file, const char __user *user_buf,
366 size_t user_len, loff_t *offset)
313{ 367{
314 char *buf; 368 char *buf;
315 369
370 if (*offset)
371 return -EINVAL;
316 if (user_len > 65536) 372 if (user_len > 65536)
317 user_len = 65536; 373 user_len = 65536;
318 buf = vmalloc (user_len + 1); /* maybe better use the stack? */ 374 buf = vmalloc (user_len + 1); /* maybe better use the stack? */
@@ -330,6 +386,27 @@ static int cio_ignore_write(struct file *file, const char __user *user_buf,
330 return user_len; 386 return user_len;
331} 387}
332 388
389static struct seq_operations cio_ignore_proc_seq_ops = {
390 .start = cio_ignore_proc_seq_start,
391 .stop = cio_ignore_proc_seq_stop,
392 .next = cio_ignore_proc_seq_next,
393 .show = cio_ignore_proc_seq_show,
394};
395
396static int
397cio_ignore_proc_open(struct inode *inode, struct file *file)
398{
399 return seq_open(file, &cio_ignore_proc_seq_ops);
400}
401
402static struct file_operations cio_ignore_proc_fops = {
403 .open = cio_ignore_proc_open,
404 .read = seq_read,
405 .llseek = seq_lseek,
406 .release = seq_release,
407 .write = cio_ignore_write,
408};
409
333static int 410static int
334cio_ignore_proc_init (void) 411cio_ignore_proc_init (void)
335{ 412{
@@ -340,8 +417,7 @@ cio_ignore_proc_init (void)
340 if (!entry) 417 if (!entry)
341 return 0; 418 return 0;
342 419
343 entry->read_proc = cio_ignore_read; 420 entry->proc_fops = &cio_ignore_proc_fops;
344 entry->write_proc = cio_ignore_write;
345 421
346 return 1; 422 return 1;
347} 423}
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
index fb42cafbe57c..95e25c1df922 100644
--- a/drivers/s390/cio/blacklist.h
+++ b/drivers/s390/cio/blacklist.h
@@ -1,6 +1,6 @@
1#ifndef S390_BLACKLIST_H 1#ifndef S390_BLACKLIST_H
2#define S390_BLACKLIST_H 2#define S390_BLACKLIST_H
3 3
4extern int is_blacklisted (int devno); 4extern int is_blacklisted (int ssid, int devno);
5 5
6#endif 6#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index be9d2d65c22f..e849289d4f3c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/ccwgroup.c 2 * drivers/s390/cio/ccwgroup.c
3 * bus driver for ccwgroup 3 * bus driver for ccwgroup
4 * $Revision: 1.32 $ 4 * $Revision: 1.33 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev)
263 struct ccwgroup_driver *gdrv; 263 struct ccwgroup_driver *gdrv;
264 int ret; 264 int ret;
265 265
266 if (atomic_compare_and_swap(0, 1, &gdev->onoff)) 266 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
267 return -EAGAIN; 267 return -EAGAIN;
268 if (gdev->state == CCWGROUP_ONLINE) { 268 if (gdev->state == CCWGROUP_ONLINE) {
269 ret = 0; 269 ret = 0;
@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev)
289 struct ccwgroup_driver *gdrv; 289 struct ccwgroup_driver *gdrv;
290 int ret; 290 int ret;
291 291
292 if (atomic_compare_and_swap(0, 1, &gdev->onoff)) 292 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
293 return -EAGAIN; 293 return -EAGAIN;
294 if (gdev->state == CCWGROUP_OFFLINE) { 294 if (gdev->state == CCWGROUP_OFFLINE) {
295 ret = 0; 295 ret = 0;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fa3c23b80e3a..7270808c02d1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * $Revision: 1.120 $ 4 * $Revision: 1.126 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -24,8 +24,6 @@
24#include "ioasm.h" 24#include "ioasm.h"
25#include "chsc.h" 25#include "chsc.h"
26 26
27static struct channel_path *chps[NR_CHPIDS];
28
29static void *sei_page; 27static void *sei_page;
30 28
31static int new_channel_path(int chpid); 29static int new_channel_path(int chpid);
@@ -33,13 +31,13 @@ static int new_channel_path(int chpid);
33static inline void 31static inline void
34set_chp_logically_online(int chp, int onoff) 32set_chp_logically_online(int chp, int onoff)
35{ 33{
36 chps[chp]->state = onoff; 34 css[0]->chps[chp]->state = onoff;
37} 35}
38 36
39static int 37static int
40get_chp_status(int chp) 38get_chp_status(int chp)
41{ 39{
42 return (chps[chp] ? chps[chp]->state : -ENODEV); 40 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
43} 41}
44 42
45void 43void
@@ -77,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
77 75
78 struct { 76 struct {
79 struct chsc_header request; 77 struct chsc_header request;
80 u16 reserved1; 78 u16 reserved1a:10;
79 u16 ssid:2;
80 u16 reserved1b:4;
81 u16 f_sch; /* first subchannel */ 81 u16 f_sch; /* first subchannel */
82 u16 reserved2; 82 u16 reserved2;
83 u16 l_sch; /* last subchannel */ 83 u16 l_sch; /* last subchannel */
@@ -104,8 +104,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
104 .code = 0x0004, 104 .code = 0x0004,
105 }; 105 };
106 106
107 ssd_area->f_sch = sch->irq; 107 ssd_area->ssid = sch->schid.ssid;
108 ssd_area->l_sch = sch->irq; 108 ssd_area->f_sch = sch->schid.sch_no;
109 ssd_area->l_sch = sch->schid.sch_no;
109 110
110 ccode = chsc(ssd_area); 111 ccode = chsc(ssd_area);
111 if (ccode > 0) { 112 if (ccode > 0) {
@@ -147,7 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
147 */ 148 */
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 149 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d" 150 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 " for sch %04x\n", ssd_area->st, sch->irq); 151 " for sch 0.%x.%04x\n", ssd_area->st,
152 sch->schid.ssid, sch->schid.sch_no);
151 /* 153 /*
152 * There may have been a new subchannel type defined in the 154 * There may have been a new subchannel type defined in the
153 * time since this code was written; since we don't know which 155 * time since this code was written; since we don't know which
@@ -156,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
156 return 0; 158 return 0;
157 } else { 159 } else {
158 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 160 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", 161 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
160 sch->irq, type[ssd_area->st]); 162 sch->schid.ssid, sch->schid.sch_no,
163 type[ssd_area->st]);
161 164
162 sch->ssd_info.valid = 1; 165 sch->ssd_info.valid = 1;
163 sch->ssd_info.type = ssd_area->st; 166 sch->ssd_info.type = ssd_area->st;
@@ -218,13 +221,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
218 int j; 221 int j;
219 int mask; 222 int mask;
220 struct subchannel *sch; 223 struct subchannel *sch;
221 __u8 *chpid; 224 struct channel_path *chpid;
222 struct schib schib; 225 struct schib schib;
223 226
224 sch = to_subchannel(dev); 227 sch = to_subchannel(dev);
225 chpid = data; 228 chpid = data;
226 for (j = 0; j < 8; j++) 229 for (j = 0; j < 8; j++)
227 if (sch->schib.pmcw.chpid[j] == *chpid) 230 if (sch->schib.pmcw.chpid[j] == chpid->id)
228 break; 231 break;
229 if (j >= 8) 232 if (j >= 8)
230 return 0; 233 return 0;
@@ -232,7 +235,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
232 mask = 0x80 >> j; 235 mask = 0x80 >> j;
233 spin_lock(&sch->lock); 236 spin_lock(&sch->lock);
234 237
235 stsch(sch->irq, &schib); 238 stsch(sch->schid, &schib);
236 if (!schib.pmcw.dnv) 239 if (!schib.pmcw.dnv)
237 goto out_unreg; 240 goto out_unreg;
238 memcpy(&sch->schib, &schib, sizeof(struct schib)); 241 memcpy(&sch->schib, &schib, sizeof(struct schib));
@@ -284,7 +287,7 @@ out_unlock:
284out_unreg: 287out_unreg:
285 spin_unlock(&sch->lock); 288 spin_unlock(&sch->lock);
286 sch->lpm = 0; 289 sch->lpm = 0;
287 if (css_enqueue_subchannel_slow(sch->irq)) { 290 if (css_enqueue_subchannel_slow(sch->schid)) {
288 css_clear_subchannel_slow_list(); 291 css_clear_subchannel_slow_list();
289 need_rescan = 1; 292 need_rescan = 1;
290 } 293 }
@@ -295,23 +298,30 @@ static inline void
295s390_set_chpid_offline( __u8 chpid) 298s390_set_chpid_offline( __u8 chpid)
296{ 299{
297 char dbf_txt[15]; 300 char dbf_txt[15];
301 struct device *dev;
298 302
299 sprintf(dbf_txt, "chpr%x", chpid); 303 sprintf(dbf_txt, "chpr%x", chpid);
300 CIO_TRACE_EVENT(2, dbf_txt); 304 CIO_TRACE_EVENT(2, dbf_txt);
301 305
302 if (get_chp_status(chpid) <= 0) 306 if (get_chp_status(chpid) <= 0)
303 return; 307 return;
304 308 dev = get_device(&css[0]->chps[chpid]->dev);
305 bus_for_each_dev(&css_bus_type, NULL, &chpid, 309 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
306 s390_subchannel_remove_chpid); 310 s390_subchannel_remove_chpid);
307 311
308 if (need_rescan || css_slow_subchannels_exist()) 312 if (need_rescan || css_slow_subchannels_exist())
309 queue_work(slow_path_wq, &slow_path_work); 313 queue_work(slow_path_wq, &slow_path_work);
314 put_device(dev);
310} 315}
311 316
317struct res_acc_data {
318 struct channel_path *chp;
319 u32 fla_mask;
320 u16 fla;
321};
322
312static int 323static int
313s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, 324s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
314 struct subchannel *sch)
315{ 325{
316 int found; 326 int found;
317 int chp; 327 int chp;
@@ -323,8 +333,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
323 * check if chpid is in information updated by ssd 333 * check if chpid is in information updated by ssd
324 */ 334 */
325 if (sch->ssd_info.valid && 335 if (sch->ssd_info.valid &&
326 sch->ssd_info.chpid[chp] == chpid && 336 sch->ssd_info.chpid[chp] == res_data->chp->id &&
327 (sch->ssd_info.fla[chp] & fla_mask) == fla) { 337 (sch->ssd_info.fla[chp] & res_data->fla_mask)
338 == res_data->fla) {
328 found = 1; 339 found = 1;
329 break; 340 break;
330 } 341 }
@@ -337,24 +348,87 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
337 * new path information and eventually check for logically 348 * new path information and eventually check for logically
338 * offline chpids. 349 * offline chpids.
339 */ 350 */
340 ccode = stsch(sch->irq, &sch->schib); 351 ccode = stsch(sch->schid, &sch->schib);
341 if (ccode > 0) 352 if (ccode > 0)
342 return 0; 353 return 0;
343 354
344 return 0x80 >> chp; 355 return 0x80 >> chp;
345} 356}
346 357
358static inline int
359s390_process_res_acc_new_sch(struct subchannel_id schid)
360{
361 struct schib schib;
362 int ret;
363 /*
364 * We don't know the device yet, but since a path
365 * may be available now to the device we'll have
366 * to do recognition again.
367 * Since we don't have any idea about which chpid
368 * that beast may be on we'll have to do a stsch
369 * on all devices, grr...
370 */
371 if (stsch_err(schid, &schib))
372 /* We're through */
373 return need_rescan ? -EAGAIN : -ENXIO;
374
375 /* Put it on the slow path. */
376 ret = css_enqueue_subchannel_slow(schid);
377 if (ret) {
378 css_clear_subchannel_slow_list();
379 need_rescan = 1;
380 return -EAGAIN;
381 }
382 return 0;
383}
384
347static int 385static int
348s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) 386__s390_process_res_acc(struct subchannel_id schid, void *data)
349{ 387{
388 int chp_mask, old_lpm;
389 struct res_acc_data *res_data;
350 struct subchannel *sch; 390 struct subchannel *sch;
351 int irq, rc; 391
392 res_data = (struct res_acc_data *)data;
393 sch = get_subchannel_by_schid(schid);
394 if (!sch)
395 /* Check if a subchannel is newly available. */
396 return s390_process_res_acc_new_sch(schid);
397
398 spin_lock_irq(&sch->lock);
399
400 chp_mask = s390_process_res_acc_sch(res_data, sch);
401
402 if (chp_mask == 0) {
403 spin_unlock_irq(&sch->lock);
404 return 0;
405 }
406 old_lpm = sch->lpm;
407 sch->lpm = ((sch->schib.pmcw.pim &
408 sch->schib.pmcw.pam &
409 sch->schib.pmcw.pom)
410 | chp_mask) & sch->opm;
411 if (!old_lpm && sch->lpm)
412 device_trigger_reprobe(sch);
413 else if (sch->driver && sch->driver->verify)
414 sch->driver->verify(&sch->dev);
415
416 spin_unlock_irq(&sch->lock);
417 put_device(&sch->dev);
418 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
419}
420
421
422static int
423s390_process_res_acc (struct res_acc_data *res_data)
424{
425 int rc;
352 char dbf_txt[15]; 426 char dbf_txt[15];
353 427
354 sprintf(dbf_txt, "accpr%x", chpid); 428 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
355 CIO_TRACE_EVENT( 2, dbf_txt); 429 CIO_TRACE_EVENT( 2, dbf_txt);
356 if (fla != 0) { 430 if (res_data->fla != 0) {
357 sprintf(dbf_txt, "fla%x", fla); 431 sprintf(dbf_txt, "fla%x", res_data->fla);
358 CIO_TRACE_EVENT( 2, dbf_txt); 432 CIO_TRACE_EVENT( 2, dbf_txt);
359 } 433 }
360 434
@@ -365,70 +439,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
365 * The more information we have (info), the less scanning 439 * The more information we have (info), the less scanning
366 * will we have to do. 440 * will we have to do.
367 */ 441 */
368 442 rc = for_each_subchannel(__s390_process_res_acc, res_data);
369 if (!get_chp_status(chpid)) 443 if (css_slow_subchannels_exist())
370 return 0; /* no need to do the rest */ 444 rc = -EAGAIN;
371 445 else if (rc != -EAGAIN)
372 rc = 0; 446 rc = 0;
373 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
374 int chp_mask, old_lpm;
375
376 sch = get_subchannel_by_schid(irq);
377 if (!sch) {
378 struct schib schib;
379 int ret;
380 /*
381 * We don't know the device yet, but since a path
382 * may be available now to the device we'll have
383 * to do recognition again.
384 * Since we don't have any idea about which chpid
385 * that beast may be on we'll have to do a stsch
386 * on all devices, grr...
387 */
388 if (stsch(irq, &schib)) {
389 /* We're through */
390 if (need_rescan)
391 rc = -EAGAIN;
392 break;
393 }
394 if (need_rescan) {
395 rc = -EAGAIN;
396 continue;
397 }
398 /* Put it on the slow path. */
399 ret = css_enqueue_subchannel_slow(irq);
400 if (ret) {
401 css_clear_subchannel_slow_list();
402 need_rescan = 1;
403 }
404 rc = -EAGAIN;
405 continue;
406 }
407
408 spin_lock_irq(&sch->lock);
409
410 chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
411
412 if (chp_mask == 0) {
413
414 spin_unlock_irq(&sch->lock);
415 continue;
416 }
417 old_lpm = sch->lpm;
418 sch->lpm = ((sch->schib.pmcw.pim &
419 sch->schib.pmcw.pam &
420 sch->schib.pmcw.pom)
421 | chp_mask) & sch->opm;
422 if (!old_lpm && sch->lpm)
423 device_trigger_reprobe(sch);
424 else if (sch->driver && sch->driver->verify)
425 sch->driver->verify(&sch->dev);
426
427 spin_unlock_irq(&sch->lock);
428 put_device(&sch->dev);
429 if (fla_mask == 0xffff)
430 break;
431 }
432 return rc; 447 return rc;
433} 448}
434 449
@@ -466,6 +481,7 @@ int
466chsc_process_crw(void) 481chsc_process_crw(void)
467{ 482{
468 int chpid, ret; 483 int chpid, ret;
484 struct res_acc_data res_data;
469 struct { 485 struct {
470 struct chsc_header request; 486 struct chsc_header request;
471 u32 reserved1; 487 u32 reserved1;
@@ -499,8 +515,9 @@ chsc_process_crw(void)
499 ret = 0; 515 ret = 0;
500 do { 516 do {
501 int ccode, status; 517 int ccode, status;
518 struct device *dev;
502 memset(sei_area, 0, sizeof(*sei_area)); 519 memset(sei_area, 0, sizeof(*sei_area));
503 520 memset(&res_data, 0, sizeof(struct res_acc_data));
504 sei_area->request = (struct chsc_header) { 521 sei_area->request = (struct chsc_header) {
505 .length = 0x0010, 522 .length = 0x0010,
506 .code = 0x000e, 523 .code = 0x000e,
@@ -573,26 +590,25 @@ chsc_process_crw(void)
573 if (status < 0) 590 if (status < 0)
574 new_channel_path(sei_area->rsid); 591 new_channel_path(sei_area->rsid);
575 else if (!status) 592 else if (!status)
576 return 0; 593 break;
577 if ((sei_area->vf & 0x80) == 0) { 594 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
578 pr_debug("chpid: %x\n", sei_area->rsid); 595 res_data.chp = to_channelpath(dev);
579 ret = s390_process_res_acc(sei_area->rsid, 596 pr_debug("chpid: %x", sei_area->rsid);
580 0, 0); 597 if ((sei_area->vf & 0xc0) != 0) {
581 } else if ((sei_area->vf & 0xc0) == 0x80) { 598 res_data.fla = sei_area->fla;
582 pr_debug("chpid: %x link addr: %x\n", 599 if ((sei_area->vf & 0xc0) == 0xc0) {
583 sei_area->rsid, sei_area->fla); 600 pr_debug(" full link addr: %x",
584 ret = s390_process_res_acc(sei_area->rsid, 601 sei_area->fla);
585 sei_area->fla, 602 res_data.fla_mask = 0xffff;
586 0xff00); 603 } else {
587 } else if ((sei_area->vf & 0xc0) == 0xc0) { 604 pr_debug(" link addr: %x",
588 pr_debug("chpid: %x full link addr: %x\n", 605 sei_area->fla);
589 sei_area->rsid, sei_area->fla); 606 res_data.fla_mask = 0xff00;
590 ret = s390_process_res_acc(sei_area->rsid, 607 }
591 sei_area->fla,
592 0xffff);
593 } 608 }
594 pr_debug("\n"); 609 ret = s390_process_res_acc(&res_data);
595 610 pr_debug("\n\n");
611 put_device(dev);
596 break; 612 break;
597 613
598 default: /* other stuff */ 614 default: /* other stuff */
@@ -604,12 +620,72 @@ chsc_process_crw(void)
604 return ret; 620 return ret;
605} 621}
606 622
623static inline int
624__chp_add_new_sch(struct subchannel_id schid)
625{
626 struct schib schib;
627 int ret;
628
629 if (stsch(schid, &schib))
630 /* We're through */
631 return need_rescan ? -EAGAIN : -ENXIO;
632
633 /* Put it on the slow path. */
634 ret = css_enqueue_subchannel_slow(schid);
635 if (ret) {
636 css_clear_subchannel_slow_list();
637 need_rescan = 1;
638 return -EAGAIN;
639 }
640 return 0;
641}
642
643
607static int 644static int
608chp_add(int chpid) 645__chp_add(struct subchannel_id schid, void *data)
609{ 646{
647 int i;
648 struct channel_path *chp;
610 struct subchannel *sch; 649 struct subchannel *sch;
611 int irq, ret, rc; 650
651 chp = (struct channel_path *)data;
652 sch = get_subchannel_by_schid(schid);
653 if (!sch)
654 /* Check if the subchannel is now available. */
655 return __chp_add_new_sch(schid);
656 spin_lock(&sch->lock);
657 for (i=0; i<8; i++)
658 if (sch->schib.pmcw.chpid[i] == chp->id) {
659 if (stsch(sch->schid, &sch->schib) != 0) {
660 /* Endgame. */
661 spin_unlock(&sch->lock);
662 return -ENXIO;
663 }
664 break;
665 }
666 if (i==8) {
667 spin_unlock(&sch->lock);
668 return 0;
669 }
670 sch->lpm = ((sch->schib.pmcw.pim &
671 sch->schib.pmcw.pam &
672 sch->schib.pmcw.pom)
673 | 0x80 >> i) & sch->opm;
674
675 if (sch->driver && sch->driver->verify)
676 sch->driver->verify(&sch->dev);
677
678 spin_unlock(&sch->lock);
679 put_device(&sch->dev);
680 return 0;
681}
682
683static int
684chp_add(int chpid)
685{
686 int rc;
612 char dbf_txt[15]; 687 char dbf_txt[15];
688 struct device *dev;
613 689
614 if (!get_chp_status(chpid)) 690 if (!get_chp_status(chpid))
615 return 0; /* no need to do the rest */ 691 return 0; /* no need to do the rest */
@@ -617,59 +693,13 @@ chp_add(int chpid)
617 sprintf(dbf_txt, "cadd%x", chpid); 693 sprintf(dbf_txt, "cadd%x", chpid);
618 CIO_TRACE_EVENT(2, dbf_txt); 694 CIO_TRACE_EVENT(2, dbf_txt);
619 695
620 rc = 0; 696 dev = get_device(&css[0]->chps[chpid]->dev);
621 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 697 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
622 int i; 698 if (css_slow_subchannels_exist())
623 699 rc = -EAGAIN;
624 sch = get_subchannel_by_schid(irq); 700 if (rc != -EAGAIN)
625 if (!sch) { 701 rc = 0;
626 struct schib schib; 702 put_device(dev);
627
628 if (stsch(irq, &schib)) {
629 /* We're through */
630 if (need_rescan)
631 rc = -EAGAIN;
632 break;
633 }
634 if (need_rescan) {
635 rc = -EAGAIN;
636 continue;
637 }
638 /* Put it on the slow path. */
639 ret = css_enqueue_subchannel_slow(irq);
640 if (ret) {
641 css_clear_subchannel_slow_list();
642 need_rescan = 1;
643 }
644 rc = -EAGAIN;
645 continue;
646 }
647
648 spin_lock(&sch->lock);
649 for (i=0; i<8; i++)
650 if (sch->schib.pmcw.chpid[i] == chpid) {
651 if (stsch(sch->irq, &sch->schib) != 0) {
652 /* Endgame. */
653 spin_unlock(&sch->lock);
654 return rc;
655 }
656 break;
657 }
658 if (i==8) {
659 spin_unlock(&sch->lock);
660 return rc;
661 }
662 sch->lpm = ((sch->schib.pmcw.pim &
663 sch->schib.pmcw.pam &
664 sch->schib.pmcw.pom)
665 | 0x80 >> i) & sch->opm;
666
667 if (sch->driver && sch->driver->verify)
668 sch->driver->verify(&sch->dev);
669
670 spin_unlock(&sch->lock);
671 put_device(&sch->dev);
672 }
673 return rc; 703 return rc;
674} 704}
675 705
@@ -702,7 +732,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
702 if (!device_is_online(sch)) 732 if (!device_is_online(sch))
703 /* cio could be doing I/O. */ 733 /* cio could be doing I/O. */
704 return 0; 734 return 0;
705 cc = stsch(sch->irq, &sch->schib); 735 cc = stsch(sch->schid, &sch->schib);
706 if (cc) 736 if (cc)
707 return 0; 737 return 0;
708 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 738 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
@@ -743,7 +773,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
743 * just varied off path. Then kill it. 773 * just varied off path. Then kill it.
744 */ 774 */
745 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 775 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
746 if (css_enqueue_subchannel_slow(sch->irq)) { 776 if (css_enqueue_subchannel_slow(sch->schid)) {
747 css_clear_subchannel_slow_list(); 777 css_clear_subchannel_slow_list();
748 need_rescan = 1; 778 need_rescan = 1;
749 } 779 }
@@ -781,6 +811,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
781 return 0; 811 return 0;
782} 812}
783 813
814static int
815__s390_vary_chpid_on(struct subchannel_id schid, void *data)
816{
817 struct schib schib;
818 struct subchannel *sch;
819
820 sch = get_subchannel_by_schid(schid);
821 if (sch) {
822 put_device(&sch->dev);
823 return 0;
824 }
825 if (stsch_err(schid, &schib))
826 /* We're through */
827 return -ENXIO;
828 /* Put it on the slow path. */
829 if (css_enqueue_subchannel_slow(schid)) {
830 css_clear_subchannel_slow_list();
831 need_rescan = 1;
832 return -EAGAIN;
833 }
834 return 0;
835}
836
784/* 837/*
785 * Function: s390_vary_chpid 838 * Function: s390_vary_chpid
786 * Varies the specified chpid online or offline 839 * Varies the specified chpid online or offline
@@ -789,8 +842,7 @@ static int
789s390_vary_chpid( __u8 chpid, int on) 842s390_vary_chpid( __u8 chpid, int on)
790{ 843{
791 char dbf_text[15]; 844 char dbf_text[15];
792 int status, irq, ret; 845 int status;
793 struct subchannel *sch;
794 846
795 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 847 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
796 CIO_TRACE_EVENT( 2, dbf_text); 848 CIO_TRACE_EVENT( 2, dbf_text);
@@ -815,30 +867,9 @@ s390_vary_chpid( __u8 chpid, int on)
815 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 867 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
816 s390_subchannel_vary_chpid_on : 868 s390_subchannel_vary_chpid_on :
817 s390_subchannel_vary_chpid_off); 869 s390_subchannel_vary_chpid_off);
818 if (!on) 870 if (on)
819 goto out; 871 /* Scan for new devices on varied on path. */
820 /* Scan for new devices on varied on path. */ 872 for_each_subchannel(__s390_vary_chpid_on, NULL);
821 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
822 struct schib schib;
823
824 if (need_rescan)
825 break;
826 sch = get_subchannel_by_schid(irq);
827 if (sch) {
828 put_device(&sch->dev);
829 continue;
830 }
831 if (stsch(irq, &schib))
832 /* We're through */
833 break;
834 /* Put it on the slow path. */
835 ret = css_enqueue_subchannel_slow(irq);
836 if (ret) {
837 css_clear_subchannel_slow_list();
838 need_rescan = 1;
839 }
840 }
841out:
842 if (need_rescan || css_slow_subchannels_exist()) 873 if (need_rescan || css_slow_subchannels_exist())
843 queue_work(slow_path_wq, &slow_path_work); 874 queue_work(slow_path_wq, &slow_path_work);
844 return 0; 875 return 0;
@@ -995,7 +1026,7 @@ new_channel_path(int chpid)
995 chp->id = chpid; 1026 chp->id = chpid;
996 chp->state = 1; 1027 chp->state = 1;
997 chp->dev = (struct device) { 1028 chp->dev = (struct device) {
998 .parent = &css_bus_device, 1029 .parent = &css[0]->device,
999 .release = chp_release, 1030 .release = chp_release,
1000 }; 1031 };
1001 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1032 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
@@ -1017,7 +1048,7 @@ new_channel_path(int chpid)
1017 device_unregister(&chp->dev); 1048 device_unregister(&chp->dev);
1018 goto out_free; 1049 goto out_free;
1019 } else 1050 } else
1020 chps[chpid] = chp; 1051 css[0]->chps[chpid] = chp;
1021 return ret; 1052 return ret;
1022out_free: 1053out_free:
1023 kfree(chp); 1054 kfree(chp);
@@ -1030,7 +1061,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1030 struct channel_path *chp; 1061 struct channel_path *chp;
1031 struct channel_path_desc *desc; 1062 struct channel_path_desc *desc;
1032 1063
1033 chp = chps[sch->schib.pmcw.chpid[chp_no]]; 1064 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1034 if (!chp) 1065 if (!chp)
1035 return NULL; 1066 return NULL;
1036 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1067 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
@@ -1051,6 +1082,54 @@ chsc_alloc_sei_area(void)
1051 return (sei_page ? 0 : -ENOMEM); 1082 return (sei_page ? 0 : -ENOMEM);
1052} 1083}
1053 1084
1085int __init
1086chsc_enable_facility(int operation_code)
1087{
1088 int ret;
1089 struct {
1090 struct chsc_header request;
1091 u8 reserved1:4;
1092 u8 format:4;
1093 u8 reserved2;
1094 u16 operation_code;
1095 u32 reserved3;
1096 u32 reserved4;
1097 u32 operation_data_area[252];
1098 struct chsc_header response;
1099 u32 reserved5:4;
1100 u32 format2:4;
1101 u32 reserved6:24;
1102 } *sda_area;
1103
1104 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1105 if (!sda_area)
1106 return -ENOMEM;
1107 sda_area->request = (struct chsc_header) {
1108 .length = 0x0400,
1109 .code = 0x0031,
1110 };
1111 sda_area->operation_code = operation_code;
1112
1113 ret = chsc(sda_area);
1114 if (ret > 0) {
1115 ret = (ret == 3) ? -ENODEV : -EBUSY;
1116 goto out;
1117 }
1118 switch (sda_area->response.code) {
1119 case 0x0003: /* invalid request block */
1120 case 0x0007:
1121 ret = -EINVAL;
1122 break;
1123 case 0x0004: /* command not provided */
1124 case 0x0101: /* facility not provided */
1125 ret = -EOPNOTSUPP;
1126 break;
1127 }
1128 out:
1129 free_page((unsigned long)sda_area);
1130 return ret;
1131}
1132
1054subsys_initcall(chsc_alloc_sei_area); 1133subsys_initcall(chsc_alloc_sei_area);
1055 1134
1056struct css_general_char css_general_characteristics; 1135struct css_general_char css_general_characteristics;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index be20da49d147..44e4b4bb1c5a 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,12 +1,12 @@
1#ifndef S390_CHSC_H 1#ifndef S390_CHSC_H
2#define S390_CHSC_H 2#define S390_CHSC_H
3 3
4#define NR_CHPIDS 256
5
6#define CHSC_SEI_ACC_CHPID 1 4#define CHSC_SEI_ACC_CHPID 1
7#define CHSC_SEI_ACC_LINKADDR 2 5#define CHSC_SEI_ACC_LINKADDR 2
8#define CHSC_SEI_ACC_FULLLINKADDR 3 6#define CHSC_SEI_ACC_FULLLINKADDR 3
9 7
8#define CHSC_SDA_OC_MSS 0x2
9
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
@@ -43,7 +43,9 @@ struct css_general_char {
43 u32 ext_mb : 1; /* bit 48 */ 43 u32 ext_mb : 1; /* bit 48 */
44 u32 : 7; 44 u32 : 7;
45 u32 aif_tdd : 1; /* bit 56 */ 45 u32 aif_tdd : 1; /* bit 56 */
46 u32 : 10; 46 u32 : 1;
47 u32 qebsm : 1; /* bit 58 */
48 u32 : 8;
47 u32 aif_osa : 1; /* bit 67 */ 49 u32 aif_osa : 1; /* bit 67 */
48 u32 : 28; 50 u32 : 28;
49}__attribute__((packed)); 51}__attribute__((packed));
@@ -63,4 +65,9 @@ extern int chsc_determine_css_characteristics(void);
63extern int css_characteristics_avail; 65extern int css_characteristics_avail;
64 66
65extern void *chsc_get_chp_desc(struct subchannel*, int); 67extern void *chsc_get_chp_desc(struct subchannel*, int);
68
69extern int chsc_enable_facility(int);
70
71#define to_channelpath(dev) container_of(dev, struct channel_path, dev)
72
66#endif 73#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 185bc73c3ecd..7376bc87206d 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * $Revision: 1.135 $ 4 * $Revision: 1.138 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -135,7 +135,7 @@ cio_tpi(void)
135 return 0; 135 return 0;
136 irb = (struct irb *) __LC_IRB; 136 irb = (struct irb *) __LC_IRB;
137 /* Store interrupt response block to lowcore. */ 137 /* Store interrupt response block to lowcore. */
138 if (tsch (tpi_info->irq, irb) != 0) 138 if (tsch (tpi_info->schid, irb) != 0)
139 /* Not status pending or not operational. */ 139 /* Not status pending or not operational. */
140 return 1; 140 return 1;
141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -163,10 +163,11 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
163 else 163 else
164 sch->lpm = 0; 164 sch->lpm = 0;
165 165
166 stsch (sch->irq, &sch->schib); 166 stsch (sch->schid, &sch->schib);
167 167
168 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " 168 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
169 "subchannel %04x!\n", sch->irq); 169 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
170 sch->schid.sch_no);
170 sprintf(dbf_text, "no%s", sch->dev.bus_id); 171 sprintf(dbf_text, "no%s", sch->dev.bus_id);
171 CIO_TRACE_EVENT(0, dbf_text); 172 CIO_TRACE_EVENT(0, dbf_text);
172 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 173 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
@@ -194,7 +195,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
194 sch->orb.spnd = sch->options.suspend; 195 sch->orb.spnd = sch->options.suspend;
195 sch->orb.ssic = sch->options.suspend && sch->options.inter; 196 sch->orb.ssic = sch->options.suspend && sch->options.inter;
196 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; 197 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
197#ifdef CONFIG_ARCH_S390X 198#ifdef CONFIG_64BIT
198 /* 199 /*
199 * for 64 bit we always support 64 bit IDAWs with 4k page size only 200 * for 64 bit we always support 64 bit IDAWs with 4k page size only
200 */ 201 */
@@ -204,7 +205,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
204 sch->orb.key = key >> 4; 205 sch->orb.key = key >> 4;
205 /* issue "Start Subchannel" */ 206 /* issue "Start Subchannel" */
206 sch->orb.cpa = (__u32) __pa (cpa); 207 sch->orb.cpa = (__u32) __pa (cpa);
207 ccode = ssch (sch->irq, &sch->orb); 208 ccode = ssch (sch->schid, &sch->orb);
208 209
209 /* process condition code */ 210 /* process condition code */
210 sprintf (dbf_txt, "ccode:%d", ccode); 211 sprintf (dbf_txt, "ccode:%d", ccode);
@@ -243,7 +244,7 @@ cio_resume (struct subchannel *sch)
243 CIO_TRACE_EVENT (4, "resIO"); 244 CIO_TRACE_EVENT (4, "resIO");
244 CIO_TRACE_EVENT (4, sch->dev.bus_id); 245 CIO_TRACE_EVENT (4, sch->dev.bus_id);
245 246
246 ccode = rsch (sch->irq); 247 ccode = rsch (sch->schid);
247 248
248 sprintf (dbf_txt, "ccode:%d", ccode); 249 sprintf (dbf_txt, "ccode:%d", ccode);
249 CIO_TRACE_EVENT (4, dbf_txt); 250 CIO_TRACE_EVENT (4, dbf_txt);
@@ -283,7 +284,7 @@ cio_halt(struct subchannel *sch)
283 /* 284 /*
284 * Issue "Halt subchannel" and process condition code 285 * Issue "Halt subchannel" and process condition code
285 */ 286 */
286 ccode = hsch (sch->irq); 287 ccode = hsch (sch->schid);
287 288
288 sprintf (dbf_txt, "ccode:%d", ccode); 289 sprintf (dbf_txt, "ccode:%d", ccode);
289 CIO_TRACE_EVENT (2, dbf_txt); 290 CIO_TRACE_EVENT (2, dbf_txt);
@@ -318,7 +319,7 @@ cio_clear(struct subchannel *sch)
318 /* 319 /*
319 * Issue "Clear subchannel" and process condition code 320 * Issue "Clear subchannel" and process condition code
320 */ 321 */
321 ccode = csch (sch->irq); 322 ccode = csch (sch->schid);
322 323
323 sprintf (dbf_txt, "ccode:%d", ccode); 324 sprintf (dbf_txt, "ccode:%d", ccode);
324 CIO_TRACE_EVENT (2, dbf_txt); 325 CIO_TRACE_EVENT (2, dbf_txt);
@@ -351,7 +352,7 @@ cio_cancel (struct subchannel *sch)
351 CIO_TRACE_EVENT (2, "cancelIO"); 352 CIO_TRACE_EVENT (2, "cancelIO");
352 CIO_TRACE_EVENT (2, sch->dev.bus_id); 353 CIO_TRACE_EVENT (2, sch->dev.bus_id);
353 354
354 ccode = xsch (sch->irq); 355 ccode = xsch (sch->schid);
355 356
356 sprintf (dbf_txt, "ccode:%d", ccode); 357 sprintf (dbf_txt, "ccode:%d", ccode);
357 CIO_TRACE_EVENT (2, dbf_txt); 358 CIO_TRACE_EVENT (2, dbf_txt);
@@ -359,7 +360,7 @@ cio_cancel (struct subchannel *sch)
359 switch (ccode) { 360 switch (ccode) {
360 case 0: /* success */ 361 case 0: /* success */
361 /* Update information in scsw. */ 362 /* Update information in scsw. */
362 stsch (sch->irq, &sch->schib); 363 stsch (sch->schid, &sch->schib);
363 return 0; 364 return 0;
364 case 1: /* status pending */ 365 case 1: /* status pending */
365 return -EBUSY; 366 return -EBUSY;
@@ -381,7 +382,7 @@ cio_modify (struct subchannel *sch)
381 382
382 ret = 0; 383 ret = 0;
383 for (retry = 0; retry < 5; retry++) { 384 for (retry = 0; retry < 5; retry++) {
384 ccode = msch_err (sch->irq, &sch->schib); 385 ccode = msch_err (sch->schid, &sch->schib);
385 if (ccode < 0) /* -EIO if msch gets a program check. */ 386 if (ccode < 0) /* -EIO if msch gets a program check. */
386 return ccode; 387 return ccode;
387 switch (ccode) { 388 switch (ccode) {
@@ -414,7 +415,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
414 CIO_TRACE_EVENT (2, "ensch"); 415 CIO_TRACE_EVENT (2, "ensch");
415 CIO_TRACE_EVENT (2, sch->dev.bus_id); 416 CIO_TRACE_EVENT (2, sch->dev.bus_id);
416 417
417 ccode = stsch (sch->irq, &sch->schib); 418 ccode = stsch (sch->schid, &sch->schib);
418 if (ccode) 419 if (ccode)
419 return -ENODEV; 420 return -ENODEV;
420 421
@@ -432,13 +433,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
432 */ 433 */
433 sch->schib.pmcw.csense = 0; 434 sch->schib.pmcw.csense = 0;
434 if (ret == 0) { 435 if (ret == 0) {
435 stsch (sch->irq, &sch->schib); 436 stsch (sch->schid, &sch->schib);
436 if (sch->schib.pmcw.ena) 437 if (sch->schib.pmcw.ena)
437 break; 438 break;
438 } 439 }
439 if (ret == -EBUSY) { 440 if (ret == -EBUSY) {
440 struct irb irb; 441 struct irb irb;
441 if (tsch(sch->irq, &irb) != 0) 442 if (tsch(sch->schid, &irb) != 0)
442 break; 443 break;
443 } 444 }
444 } 445 }
@@ -461,7 +462,7 @@ cio_disable_subchannel (struct subchannel *sch)
461 CIO_TRACE_EVENT (2, "dissch"); 462 CIO_TRACE_EVENT (2, "dissch");
462 CIO_TRACE_EVENT (2, sch->dev.bus_id); 463 CIO_TRACE_EVENT (2, sch->dev.bus_id);
463 464
464 ccode = stsch (sch->irq, &sch->schib); 465 ccode = stsch (sch->schid, &sch->schib);
465 if (ccode == 3) /* Not operational. */ 466 if (ccode == 3) /* Not operational. */
466 return -ENODEV; 467 return -ENODEV;
467 468
@@ -485,7 +486,7 @@ cio_disable_subchannel (struct subchannel *sch)
485 */ 486 */
486 break; 487 break;
487 if (ret == 0) { 488 if (ret == 0) {
488 stsch (sch->irq, &sch->schib); 489 stsch (sch->schid, &sch->schib);
489 if (!sch->schib.pmcw.ena) 490 if (!sch->schib.pmcw.ena)
490 break; 491 break;
491 } 492 }
@@ -508,12 +509,12 @@ cio_disable_subchannel (struct subchannel *sch)
508 * -ENODEV for subchannels with invalid device number or blacklisted devices 509 * -ENODEV for subchannels with invalid device number or blacklisted devices
509 */ 510 */
510int 511int
511cio_validate_subchannel (struct subchannel *sch, unsigned int irq) 512cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
512{ 513{
513 char dbf_txt[15]; 514 char dbf_txt[15];
514 int ccode; 515 int ccode;
515 516
516 sprintf (dbf_txt, "valsch%x", irq); 517 sprintf (dbf_txt, "valsch%x", schid.sch_no);
517 CIO_TRACE_EVENT (4, dbf_txt); 518 CIO_TRACE_EVENT (4, dbf_txt);
518 519
519 /* Nuke all fields. */ 520 /* Nuke all fields. */
@@ -522,17 +523,20 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
522 spin_lock_init(&sch->lock); 523 spin_lock_init(&sch->lock);
523 524
524 /* Set a name for the subchannel */ 525 /* Set a name for the subchannel */
525 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); 526 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
527 schid.sch_no);
526 528
527 /* 529 /*
528 * The first subchannel that is not-operational (ccode==3) 530 * The first subchannel that is not-operational (ccode==3)
529 * indicates that there aren't any more devices available. 531 * indicates that there aren't any more devices available.
532 * If stsch gets an exception, it means the current subchannel set
533 * is not valid.
530 */ 534 */
531 sch->irq = irq; 535 ccode = stsch_err (schid, &sch->schib);
532 ccode = stsch (irq, &sch->schib);
533 if (ccode) 536 if (ccode)
534 return -ENXIO; 537 return (ccode == 3) ? -ENXIO : ccode;
535 538
539 sch->schid = schid;
536 /* Copy subchannel type from path management control word. */ 540 /* Copy subchannel type from path management control word. */
537 sch->st = sch->schib.pmcw.st; 541 sch->st = sch->schib.pmcw.st;
538 542
@@ -541,9 +545,9 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
541 */ 545 */
542 if (sch->st != 0) { 546 if (sch->st != 0) {
543 CIO_DEBUG(KERN_INFO, 0, 547 CIO_DEBUG(KERN_INFO, 0,
544 "Subchannel %04X reports " 548 "Subchannel 0.%x.%04x reports "
545 "non-I/O subchannel type %04X\n", 549 "non-I/O subchannel type %04X\n",
546 sch->irq, sch->st); 550 sch->schid.ssid, sch->schid.sch_no, sch->st);
547 /* We stop here for non-io subchannels. */ 551 /* We stop here for non-io subchannels. */
548 return sch->st; 552 return sch->st;
549 } 553 }
@@ -554,26 +558,29 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
554 return -ENODEV; 558 return -ENODEV;
555 559
556 /* Devno is valid. */ 560 /* Devno is valid. */
557 if (is_blacklisted (sch->schib.pmcw.dev)) { 561 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
558 /* 562 /*
559 * This device must not be known to Linux. So we simply 563 * This device must not be known to Linux. So we simply
560 * say that there is no device and return ENODEV. 564 * say that there is no device and return ENODEV.
561 */ 565 */
562 CIO_MSG_EVENT(0, "Blacklisted device detected " 566 CIO_MSG_EVENT(0, "Blacklisted device detected "
563 "at devno %04X\n", sch->schib.pmcw.dev); 567 "at devno %04X, subchannel set %x\n",
568 sch->schib.pmcw.dev, sch->schid.ssid);
564 return -ENODEV; 569 return -ENODEV;
565 } 570 }
566 sch->opm = 0xff; 571 sch->opm = 0xff;
567 chsc_validate_chpids(sch); 572 if (!cio_is_console(sch->schid))
573 chsc_validate_chpids(sch);
568 sch->lpm = sch->schib.pmcw.pim & 574 sch->lpm = sch->schib.pmcw.pim &
569 sch->schib.pmcw.pam & 575 sch->schib.pmcw.pam &
570 sch->schib.pmcw.pom & 576 sch->schib.pmcw.pom &
571 sch->opm; 577 sch->opm;
572 578
573 CIO_DEBUG(KERN_INFO, 0, 579 CIO_DEBUG(KERN_INFO, 0,
574 "Detected device %04X on subchannel %04X" 580 "Detected device %04x on subchannel 0.%x.%04X"
575 " - PIM = %02X, PAM = %02X, POM = %02X\n", 581 " - PIM = %02X, PAM = %02X, POM = %02X\n",
576 sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, 582 sch->schib.pmcw.dev, sch->schid.ssid,
583 sch->schid.sch_no, sch->schib.pmcw.pim,
577 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 584 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
578 585
579 /* 586 /*
@@ -632,7 +639,7 @@ do_IRQ (struct pt_regs *regs)
632 if (sch) 639 if (sch)
633 spin_lock(&sch->lock); 640 spin_lock(&sch->lock);
634 /* Store interrupt response block to lowcore. */ 641 /* Store interrupt response block to lowcore. */
635 if (tsch (tpi_info->irq, irb) == 0 && sch) { 642 if (tsch (tpi_info->schid, irb) == 0 && sch) {
636 /* Keep subchannel information word up to date. */ 643 /* Keep subchannel information word up to date. */
637 memcpy (&sch->schib.scsw, &irb->scsw, 644 memcpy (&sch->schib.scsw, &irb->scsw,
638 sizeof (irb->scsw)); 645 sizeof (irb->scsw));
@@ -691,28 +698,36 @@ wait_cons_dev (void)
691} 698}
692 699
693static int 700static int
694cio_console_irq(void) 701cio_test_for_console(struct subchannel_id schid, void *data)
695{ 702{
696 int irq; 703 if (stsch_err(schid, &console_subchannel.schib) != 0)
704 return -ENXIO;
705 if (console_subchannel.schib.pmcw.dnv &&
706 console_subchannel.schib.pmcw.dev ==
707 console_devno) {
708 console_irq = schid.sch_no;
709 return 1; /* found */
710 }
711 return 0;
712}
713
714
715static int
716cio_get_console_sch_no(void)
717{
718 struct subchannel_id schid;
697 719
720 init_subchannel_id(&schid);
698 if (console_irq != -1) { 721 if (console_irq != -1) {
699 /* VM provided us with the irq number of the console. */ 722 /* VM provided us with the irq number of the console. */
700 if (stsch(console_irq, &console_subchannel.schib) != 0 || 723 schid.sch_no = console_irq;
724 if (stsch(schid, &console_subchannel.schib) != 0 ||
701 !console_subchannel.schib.pmcw.dnv) 725 !console_subchannel.schib.pmcw.dnv)
702 return -1; 726 return -1;
703 console_devno = console_subchannel.schib.pmcw.dev; 727 console_devno = console_subchannel.schib.pmcw.dev;
704 } else if (console_devno != -1) { 728 } else if (console_devno != -1) {
705 /* At least the console device number is known. */ 729 /* At least the console device number is known. */
706 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 730 for_each_subchannel(cio_test_for_console, NULL);
707 if (stsch(irq, &console_subchannel.schib) != 0)
708 break;
709 if (console_subchannel.schib.pmcw.dnv &&
710 console_subchannel.schib.pmcw.dev ==
711 console_devno) {
712 console_irq = irq;
713 break;
714 }
715 }
716 if (console_irq == -1) 731 if (console_irq == -1)
717 return -1; 732 return -1;
718 } else { 733 } else {
@@ -728,17 +743,20 @@ cio_console_irq(void)
728struct subchannel * 743struct subchannel *
729cio_probe_console(void) 744cio_probe_console(void)
730{ 745{
731 int irq, ret; 746 int sch_no, ret;
747 struct subchannel_id schid;
732 748
733 if (xchg(&console_subchannel_in_use, 1) != 0) 749 if (xchg(&console_subchannel_in_use, 1) != 0)
734 return ERR_PTR(-EBUSY); 750 return ERR_PTR(-EBUSY);
735 irq = cio_console_irq(); 751 sch_no = cio_get_console_sch_no();
736 if (irq == -1) { 752 if (sch_no == -1) {
737 console_subchannel_in_use = 0; 753 console_subchannel_in_use = 0;
738 return ERR_PTR(-ENODEV); 754 return ERR_PTR(-ENODEV);
739 } 755 }
740 memset(&console_subchannel, 0, sizeof(struct subchannel)); 756 memset(&console_subchannel, 0, sizeof(struct subchannel));
741 ret = cio_validate_subchannel(&console_subchannel, irq); 757 init_subchannel_id(&schid);
758 schid.sch_no = sch_no;
759 ret = cio_validate_subchannel(&console_subchannel, schid);
742 if (ret) { 760 if (ret) {
743 console_subchannel_in_use = 0; 761 console_subchannel_in_use = 0;
744 return ERR_PTR(-ENODEV); 762 return ERR_PTR(-ENODEV);
@@ -770,11 +788,11 @@ cio_release_console(void)
770 788
771/* Bah... hack to catch console special sausages. */ 789/* Bah... hack to catch console special sausages. */
772int 790int
773cio_is_console(int irq) 791cio_is_console(struct subchannel_id schid)
774{ 792{
775 if (!console_subchannel_in_use) 793 if (!console_subchannel_in_use)
776 return 0; 794 return 0;
777 return (irq == console_subchannel.irq); 795 return schid_equal(&schid, &console_subchannel.schid);
778} 796}
779 797
780struct subchannel * 798struct subchannel *
@@ -787,7 +805,7 @@ cio_get_console_subchannel(void)
787 805
788#endif 806#endif
789static inline int 807static inline int
790__disable_subchannel_easy(unsigned int schid, struct schib *schib) 808__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
791{ 809{
792 int retry, cc; 810 int retry, cc;
793 811
@@ -805,7 +823,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib)
805} 823}
806 824
807static inline int 825static inline int
808__clear_subchannel_easy(unsigned int schid) 826__clear_subchannel_easy(struct subchannel_id schid)
809{ 827{
810 int retry; 828 int retry;
811 829
@@ -815,8 +833,8 @@ __clear_subchannel_easy(unsigned int schid)
815 struct tpi_info ti; 833 struct tpi_info ti;
816 834
817 if (tpi(&ti)) { 835 if (tpi(&ti)) {
818 tsch(ti.irq, (struct irb *)__LC_IRB); 836 tsch(ti.schid, (struct irb *)__LC_IRB);
819 if (ti.irq == schid) 837 if (schid_equal(&ti.schid, &schid))
820 return 0; 838 return 0;
821 } 839 }
822 udelay(100); 840 udelay(100);
@@ -825,31 +843,33 @@ __clear_subchannel_easy(unsigned int schid)
825} 843}
826 844
827extern void do_reipl(unsigned long devno); 845extern void do_reipl(unsigned long devno);
846static int
847__shutdown_subchannel_easy(struct subchannel_id schid, void *data)
848{
849 struct schib schib;
850
851 if (stsch_err(schid, &schib))
852 return -ENXIO;
853 if (!schib.pmcw.ena)
854 return 0;
855 switch(__disable_subchannel_easy(schid, &schib)) {
856 case 0:
857 case -ENODEV:
858 break;
859 default: /* -EBUSY */
860 if (__clear_subchannel_easy(schid))
861 break; /* give up... */
862 stsch(schid, &schib);
863 __disable_subchannel_easy(schid, &schib);
864 }
865 return 0;
866}
828 867
829/* Clear all subchannels. */
830void 868void
831clear_all_subchannels(void) 869clear_all_subchannels(void)
832{ 870{
833 unsigned int schid;
834
835 local_irq_disable(); 871 local_irq_disable();
836 for (schid=0;schid<=highest_subchannel;schid++) { 872 for_each_subchannel(__shutdown_subchannel_easy, NULL);
837 struct schib schib;
838 if (stsch(schid, &schib))
839 break; /* break out of the loop */
840 if (!schib.pmcw.ena)
841 continue;
842 switch(__disable_subchannel_easy(schid, &schib)) {
843 case 0:
844 case -ENODEV:
845 break;
846 default: /* -EBUSY */
847 if (__clear_subchannel_easy(schid))
848 break; /* give up... jump out of switch */
849 stsch(schid, &schib);
850 __disable_subchannel_easy(schid, &schib);
851 }
852 }
853} 873}
854 874
855/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 875/* Make sure all subchannels are quiet before we re-ipl an lpar. */
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index c50a9da420a9..0ca987344e07 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,6 +1,8 @@
1#ifndef S390_CIO_H 1#ifndef S390_CIO_H
2#define S390_CIO_H 2#define S390_CIO_H
3 3
4#include "schid.h"
5
4/* 6/*
5 * where we put the ssd info 7 * where we put the ssd info
6 */ 8 */
@@ -83,7 +85,7 @@ struct orb {
83 85
84/* subchannel data structure used by I/O subroutines */ 86/* subchannel data structure used by I/O subroutines */
85struct subchannel { 87struct subchannel {
86 unsigned int irq; /* aka. subchannel number */ 88 struct subchannel_id schid;
87 spinlock_t lock; /* subchannel lock */ 89 spinlock_t lock; /* subchannel lock */
88 90
89 enum { 91 enum {
@@ -114,7 +116,7 @@ struct subchannel {
114 116
115#define to_subchannel(n) container_of(n, struct subchannel, dev) 117#define to_subchannel(n) container_of(n, struct subchannel, dev)
116 118
117extern int cio_validate_subchannel (struct subchannel *, unsigned int); 119extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
118extern int cio_enable_subchannel (struct subchannel *, unsigned int); 120extern int cio_enable_subchannel (struct subchannel *, unsigned int);
119extern int cio_disable_subchannel (struct subchannel *); 121extern int cio_disable_subchannel (struct subchannel *);
120extern int cio_cancel (struct subchannel *); 122extern int cio_cancel (struct subchannel *);
@@ -127,14 +129,15 @@ extern int cio_cancel (struct subchannel *);
127extern int cio_set_options (struct subchannel *, int); 129extern int cio_set_options (struct subchannel *, int);
128extern int cio_get_options (struct subchannel *); 130extern int cio_get_options (struct subchannel *);
129extern int cio_modify (struct subchannel *); 131extern int cio_modify (struct subchannel *);
132
130/* Use with care. */ 133/* Use with care. */
131#ifdef CONFIG_CCW_CONSOLE 134#ifdef CONFIG_CCW_CONSOLE
132extern struct subchannel *cio_probe_console(void); 135extern struct subchannel *cio_probe_console(void);
133extern void cio_release_console(void); 136extern void cio_release_console(void);
134extern int cio_is_console(int irq); 137extern int cio_is_console(struct subchannel_id);
135extern struct subchannel *cio_get_console_subchannel(void); 138extern struct subchannel *cio_get_console_subchannel(void);
136#else 139#else
137#define cio_is_console(irq) 0 140#define cio_is_console(schid) 0
138#define cio_get_console_subchannel() NULL 141#define cio_get_console_subchannel() NULL
139#endif 142#endif
140 143
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b978f7fe8327..0b03714e696a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $) 2 * linux/drivers/s390/cio/cmf.c ($Revision: 1.19 $)
3 * 3 *
4 * Linux on zSeries Channel Measurement Facility support 4 * Linux on zSeries Channel Measurement Facility support
5 * 5 *
@@ -178,7 +178,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
178 /* msch can silently fail, so do it again if necessary */ 178 /* msch can silently fail, so do it again if necessary */
179 for (retry = 0; retry < 3; retry++) { 179 for (retry = 0; retry < 3; retry++) {
180 /* prepare schib */ 180 /* prepare schib */
181 stsch(sch->irq, schib); 181 stsch(sch->schid, schib);
182 schib->pmcw.mme = mme; 182 schib->pmcw.mme = mme;
183 schib->pmcw.mbfc = mbfc; 183 schib->pmcw.mbfc = mbfc;
184 /* address can be either a block address or a block index */ 184 /* address can be either a block address or a block index */
@@ -188,7 +188,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
188 schib->pmcw.mbi = address; 188 schib->pmcw.mbi = address;
189 189
190 /* try to submit it */ 190 /* try to submit it */
191 switch(ret = msch_err(sch->irq, schib)) { 191 switch(ret = msch_err(sch->schid, schib)) {
192 case 0: 192 case 0:
193 break; 193 break;
194 case 1: 194 case 1:
@@ -202,7 +202,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
202 ret = -EINVAL; 202 ret = -EINVAL;
203 break; 203 break;
204 } 204 }
205 stsch(sch->irq, schib); /* restore the schib */ 205 stsch(sch->schid, schib); /* restore the schib */
206 206
207 if (ret) 207 if (ret)
208 break; 208 break;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 555119cacc27..e565193650c7 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/css.c 2 * drivers/s390/cio/css.c
3 * driver for channel subsystem 3 * driver for channel subsystem
4 * $Revision: 1.85 $ 4 * $Revision: 1.93 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -21,19 +21,35 @@
21#include "ioasm.h" 21#include "ioasm.h"
22#include "chsc.h" 22#include "chsc.h"
23 23
24unsigned int highest_subchannel;
25int need_rescan = 0; 24int need_rescan = 0;
26int css_init_done = 0; 25int css_init_done = 0;
26static int max_ssid = 0;
27
28struct channel_subsystem *css[__MAX_CSSID + 1];
27 29
28struct pgid global_pgid;
29int css_characteristics_avail = 0; 30int css_characteristics_avail = 0;
30 31
31struct device css_bus_device = { 32inline int
32 .bus_id = "css0", 33for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
33}; 34{
35 struct subchannel_id schid;
36 int ret;
37
38 init_subchannel_id(&schid);
39 ret = -ENODEV;
40 do {
41 do {
42 ret = fn(schid, data);
43 if (ret)
44 break;
45 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
46 schid.sch_no = 0;
47 } while (schid.ssid++ < max_ssid);
48 return ret;
49}
34 50
35static struct subchannel * 51static struct subchannel *
36css_alloc_subchannel(int irq) 52css_alloc_subchannel(struct subchannel_id schid)
37{ 53{
38 struct subchannel *sch; 54 struct subchannel *sch;
39 int ret; 55 int ret;
@@ -41,13 +57,11 @@ css_alloc_subchannel(int irq)
41 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 57 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
42 if (sch == NULL) 58 if (sch == NULL)
43 return ERR_PTR(-ENOMEM); 59 return ERR_PTR(-ENOMEM);
44 ret = cio_validate_subchannel (sch, irq); 60 ret = cio_validate_subchannel (sch, schid);
45 if (ret < 0) { 61 if (ret < 0) {
46 kfree(sch); 62 kfree(sch);
47 return ERR_PTR(ret); 63 return ERR_PTR(ret);
48 } 64 }
49 if (irq > highest_subchannel)
50 highest_subchannel = irq;
51 65
52 if (sch->st != SUBCHANNEL_TYPE_IO) { 66 if (sch->st != SUBCHANNEL_TYPE_IO) {
53 /* For now we ignore all non-io subchannels. */ 67 /* For now we ignore all non-io subchannels. */
@@ -87,7 +101,7 @@ css_subchannel_release(struct device *dev)
87 struct subchannel *sch; 101 struct subchannel *sch;
88 102
89 sch = to_subchannel(dev); 103 sch = to_subchannel(dev);
90 if (!cio_is_console(sch->irq)) 104 if (!cio_is_console(sch->schid))
91 kfree(sch); 105 kfree(sch);
92} 106}
93 107
@@ -99,7 +113,7 @@ css_register_subchannel(struct subchannel *sch)
99 int ret; 113 int ret;
100 114
101 /* Initialize the subchannel structure */ 115 /* Initialize the subchannel structure */
102 sch->dev.parent = &css_bus_device; 116 sch->dev.parent = &css[0]->device;
103 sch->dev.bus = &css_bus_type; 117 sch->dev.bus = &css_bus_type;
104 sch->dev.release = &css_subchannel_release; 118 sch->dev.release = &css_subchannel_release;
105 119
@@ -114,12 +128,12 @@ css_register_subchannel(struct subchannel *sch)
114} 128}
115 129
116int 130int
117css_probe_device(int irq) 131css_probe_device(struct subchannel_id schid)
118{ 132{
119 int ret; 133 int ret;
120 struct subchannel *sch; 134 struct subchannel *sch;
121 135
122 sch = css_alloc_subchannel(irq); 136 sch = css_alloc_subchannel(schid);
123 if (IS_ERR(sch)) 137 if (IS_ERR(sch))
124 return PTR_ERR(sch); 138 return PTR_ERR(sch);
125 ret = css_register_subchannel(sch); 139 ret = css_register_subchannel(sch);
@@ -132,26 +146,26 @@ static int
132check_subchannel(struct device * dev, void * data) 146check_subchannel(struct device * dev, void * data)
133{ 147{
134 struct subchannel *sch; 148 struct subchannel *sch;
135 int irq = (unsigned long)data; 149 struct subchannel_id *schid = data;
136 150
137 sch = to_subchannel(dev); 151 sch = to_subchannel(dev);
138 return (sch->irq == irq); 152 return schid_equal(&sch->schid, schid);
139} 153}
140 154
141struct subchannel * 155struct subchannel *
142get_subchannel_by_schid(int irq) 156get_subchannel_by_schid(struct subchannel_id schid)
143{ 157{
144 struct device *dev; 158 struct device *dev;
145 159
146 dev = bus_find_device(&css_bus_type, NULL, 160 dev = bus_find_device(&css_bus_type, NULL,
147 (void *)(unsigned long)irq, check_subchannel); 161 (void *)&schid, check_subchannel);
148 162
149 return dev ? to_subchannel(dev) : NULL; 163 return dev ? to_subchannel(dev) : NULL;
150} 164}
151 165
152 166
153static inline int 167static inline int
154css_get_subchannel_status(struct subchannel *sch, int schid) 168css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
155{ 169{
156 struct schib schib; 170 struct schib schib;
157 int cc; 171 int cc;
@@ -170,13 +184,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid)
170} 184}
171 185
172static int 186static int
173css_evaluate_subchannel(int irq, int slow) 187css_evaluate_subchannel(struct subchannel_id schid, int slow)
174{ 188{
175 int event, ret, disc; 189 int event, ret, disc;
176 struct subchannel *sch; 190 struct subchannel *sch;
177 unsigned long flags; 191 unsigned long flags;
178 192
179 sch = get_subchannel_by_schid(irq); 193 sch = get_subchannel_by_schid(schid);
180 disc = sch ? device_is_disconnected(sch) : 0; 194 disc = sch ? device_is_disconnected(sch) : 0;
181 if (disc && slow) { 195 if (disc && slow) {
182 if (sch) 196 if (sch)
@@ -194,9 +208,10 @@ css_evaluate_subchannel(int irq, int slow)
194 put_device(&sch->dev); 208 put_device(&sch->dev);
195 return -EAGAIN; /* Will be done on the slow path. */ 209 return -EAGAIN; /* Will be done on the slow path. */
196 } 210 }
197 event = css_get_subchannel_status(sch, irq); 211 event = css_get_subchannel_status(sch, schid);
198 CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", 212 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
199 irq, event, sch?(disc?"disconnected":"normal"):"unknown", 213 schid.ssid, schid.sch_no, event,
214 sch?(disc?"disconnected":"normal"):"unknown",
200 slow?"slow":"fast"); 215 slow?"slow":"fast");
201 switch (event) { 216 switch (event) {
202 case CIO_NO_PATH: 217 case CIO_NO_PATH:
@@ -253,7 +268,7 @@ css_evaluate_subchannel(int irq, int slow)
253 sch->schib.pmcw.intparm = 0; 268 sch->schib.pmcw.intparm = 0;
254 cio_modify(sch); 269 cio_modify(sch);
255 put_device(&sch->dev); 270 put_device(&sch->dev);
256 ret = css_probe_device(irq); 271 ret = css_probe_device(schid);
257 } else { 272 } else {
258 /* 273 /*
259 * We can't immediately deregister the disconnected 274 * We can't immediately deregister the disconnected
@@ -272,7 +287,7 @@ css_evaluate_subchannel(int irq, int slow)
272 device_trigger_reprobe(sch); 287 device_trigger_reprobe(sch);
273 spin_unlock_irqrestore(&sch->lock, flags); 288 spin_unlock_irqrestore(&sch->lock, flags);
274 } 289 }
275 ret = sch ? 0 : css_probe_device(irq); 290 ret = sch ? 0 : css_probe_device(schid);
276 break; 291 break;
277 default: 292 default:
278 BUG(); 293 BUG();
@@ -281,28 +296,15 @@ css_evaluate_subchannel(int irq, int slow)
281 return ret; 296 return ret;
282} 297}
283 298
284static void 299static int
285css_rescan_devices(void) 300css_rescan_devices(struct subchannel_id schid, void *data)
286{ 301{
287 int irq, ret; 302 return css_evaluate_subchannel(schid, 1);
288
289 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
290 ret = css_evaluate_subchannel(irq, 1);
291 /* No more memory. It doesn't make sense to continue. No
292 * panic because this can happen in midflight and just
293 * because we can't use a new device is no reason to crash
294 * the system. */
295 if (ret == -ENOMEM)
296 break;
297 /* -ENXIO indicates that there are no more subchannels. */
298 if (ret == -ENXIO)
299 break;
300 }
301} 303}
302 304
303struct slow_subchannel { 305struct slow_subchannel {
304 struct list_head slow_list; 306 struct list_head slow_list;
305 unsigned long schid; 307 struct subchannel_id schid;
306}; 308};
307 309
308static LIST_HEAD(slow_subchannels_head); 310static LIST_HEAD(slow_subchannels_head);
@@ -315,7 +317,7 @@ css_trigger_slow_path(void)
315 317
316 if (need_rescan) { 318 if (need_rescan) {
317 need_rescan = 0; 319 need_rescan = 0;
318 css_rescan_devices(); 320 for_each_subchannel(css_rescan_devices, NULL);
319 return; 321 return;
320 } 322 }
321 323
@@ -354,23 +356,31 @@ css_reiterate_subchannels(void)
354 * Called from the machine check handler for subchannel report words. 356 * Called from the machine check handler for subchannel report words.
355 */ 357 */
356int 358int
357css_process_crw(int irq) 359css_process_crw(int rsid1, int rsid2)
358{ 360{
359 int ret; 361 int ret;
362 struct subchannel_id mchk_schid;
360 363
361 CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); 364 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
365 rsid1, rsid2);
362 366
363 if (need_rescan) 367 if (need_rescan)
364 /* We need to iterate all subchannels anyway. */ 368 /* We need to iterate all subchannels anyway. */
365 return -EAGAIN; 369 return -EAGAIN;
370
371 init_subchannel_id(&mchk_schid);
372 mchk_schid.sch_no = rsid1;
373 if (rsid2 != 0)
374 mchk_schid.ssid = (rsid2 >> 8) & 3;
375
366 /* 376 /*
367 * Since we are always presented with IPI in the CRW, we have to 377 * Since we are always presented with IPI in the CRW, we have to
368 * use stsch() to find out if the subchannel in question has come 378 * use stsch() to find out if the subchannel in question has come
369 * or gone. 379 * or gone.
370 */ 380 */
371 ret = css_evaluate_subchannel(irq, 0); 381 ret = css_evaluate_subchannel(mchk_schid, 0);
372 if (ret == -EAGAIN) { 382 if (ret == -EAGAIN) {
373 if (css_enqueue_subchannel_slow(irq)) { 383 if (css_enqueue_subchannel_slow(mchk_schid)) {
374 css_clear_subchannel_slow_list(); 384 css_clear_subchannel_slow_list();
375 need_rescan = 1; 385 need_rescan = 1;
376 } 386 }
@@ -378,22 +388,83 @@ css_process_crw(int irq)
378 return ret; 388 return ret;
379} 389}
380 390
381static void __init 391static int __init
382css_generate_pgid(void) 392__init_channel_subsystem(struct subchannel_id schid, void *data)
383{ 393{
384 /* Let's build our path group ID here. */ 394 struct subchannel *sch;
385 if (css_characteristics_avail && css_general_characteristics.mcss) 395 int ret;
386 global_pgid.cpu_addr = 0x8000; 396
397 if (cio_is_console(schid))
398 sch = cio_get_console_subchannel();
387 else { 399 else {
400 sch = css_alloc_subchannel(schid);
401 if (IS_ERR(sch))
402 ret = PTR_ERR(sch);
403 else
404 ret = 0;
405 switch (ret) {
406 case 0:
407 break;
408 case -ENOMEM:
409 panic("Out of memory in init_channel_subsystem\n");
410 /* -ENXIO: no more subchannels. */
411 case -ENXIO:
412 return ret;
413 default:
414 return 0;
415 }
416 }
417 /*
418 * We register ALL valid subchannels in ioinfo, even those
419 * that have been present before init_channel_subsystem.
420 * These subchannels can't have been registered yet (kmalloc
421 * not working) so we do it now. This is true e.g. for the
422 * console subchannel.
423 */
424 css_register_subchannel(sch);
425 return 0;
426}
427
428static void __init
429css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
430{
431 if (css_characteristics_avail && css_general_characteristics.mcss) {
432 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
433 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
434 } else {
388#ifdef CONFIG_SMP 435#ifdef CONFIG_SMP
389 global_pgid.cpu_addr = hard_smp_processor_id(); 436 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
390#else 437#else
391 global_pgid.cpu_addr = 0; 438 css->global_pgid.pgid_high.cpu_addr = 0;
392#endif 439#endif
393 } 440 }
394 global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 441 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
395 global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 442 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
396 global_pgid.tod_high = (__u32) (get_clock() >> 32); 443 css->global_pgid.tod_high = tod_high;
444
445}
446
447static void
448channel_subsystem_release(struct device *dev)
449{
450 struct channel_subsystem *css;
451
452 css = to_css(dev);
453 kfree(css);
454}
455
456static inline void __init
457setup_css(int nr)
458{
459 u32 tod_high;
460
461 memset(css[nr], 0, sizeof(struct channel_subsystem));
462 css[nr]->valid = 1;
463 css[nr]->cssid = nr;
464 sprintf(css[nr]->device.bus_id, "css%x", nr);
465 css[nr]->device.release = channel_subsystem_release;
466 tod_high = (u32) (get_clock() >> 32);
467 css_generate_pgid(css[nr], tod_high);
397} 468}
398 469
399/* 470/*
@@ -404,53 +475,50 @@ css_generate_pgid(void)
404static int __init 475static int __init
405init_channel_subsystem (void) 476init_channel_subsystem (void)
406{ 477{
407 int ret, irq; 478 int ret, i;
408 479
409 if (chsc_determine_css_characteristics() == 0) 480 if (chsc_determine_css_characteristics() == 0)
410 css_characteristics_avail = 1; 481 css_characteristics_avail = 1;
411 482
412 css_generate_pgid();
413
414 if ((ret = bus_register(&css_bus_type))) 483 if ((ret = bus_register(&css_bus_type)))
415 goto out; 484 goto out;
416 if ((ret = device_register (&css_bus_device)))
417 goto out_bus;
418 485
486 /* Try to enable MSS. */
487 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
488 switch (ret) {
489 case 0: /* Success. */
490 max_ssid = __MAX_SSID;
491 break;
492 case -ENOMEM:
493 goto out_bus;
494 default:
495 max_ssid = 0;
496 }
497 /* Setup css structure. */
498 for (i = 0; i <= __MAX_CSSID; i++) {
499 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
500 if (!css[i]) {
501 ret = -ENOMEM;
502 goto out_unregister;
503 }
504 setup_css(i);
505 ret = device_register(&css[i]->device);
506 if (ret)
507 goto out_free;
508 }
419 css_init_done = 1; 509 css_init_done = 1;
420 510
421 ctl_set_bit(6, 28); 511 ctl_set_bit(6, 28);
422 512
423 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 513 for_each_subchannel(__init_channel_subsystem, NULL);
424 struct subchannel *sch;
425
426 if (cio_is_console(irq))
427 sch = cio_get_console_subchannel();
428 else {
429 sch = css_alloc_subchannel(irq);
430 if (IS_ERR(sch))
431 ret = PTR_ERR(sch);
432 else
433 ret = 0;
434 if (ret == -ENOMEM)
435 panic("Out of memory in "
436 "init_channel_subsystem\n");
437 /* -ENXIO: no more subchannels. */
438 if (ret == -ENXIO)
439 break;
440 if (ret)
441 continue;
442 }
443 /*
444 * We register ALL valid subchannels in ioinfo, even those
445 * that have been present before init_channel_subsystem.
446 * These subchannels can't have been registered yet (kmalloc
447 * not working) so we do it now. This is true e.g. for the
448 * console subchannel.
449 */
450 css_register_subchannel(sch);
451 }
452 return 0; 514 return 0;
453 515out_free:
516 kfree(css[i]);
517out_unregister:
518 while (i > 0) {
519 i--;
520 device_unregister(&css[i]->device);
521 }
454out_bus: 522out_bus:
455 bus_unregister(&css_bus_type); 523 bus_unregister(&css_bus_type);
456out: 524out:
@@ -481,47 +549,8 @@ struct bus_type css_bus_type = {
481 549
482subsys_initcall(init_channel_subsystem); 550subsys_initcall(init_channel_subsystem);
483 551
484/*
485 * Register root devices for some drivers. The release function must not be
486 * in the device drivers, so we do it here.
487 */
488static void
489s390_root_dev_release(struct device *dev)
490{
491 kfree(dev);
492}
493
494struct device *
495s390_root_dev_register(const char *name)
496{
497 struct device *dev;
498 int ret;
499
500 if (!strlen(name))
501 return ERR_PTR(-EINVAL);
502 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
503 if (!dev)
504 return ERR_PTR(-ENOMEM);
505 memset(dev, 0, sizeof(struct device));
506 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
507 dev->release = s390_root_dev_release;
508 ret = device_register(dev);
509 if (ret) {
510 kfree(dev);
511 return ERR_PTR(ret);
512 }
513 return dev;
514}
515
516void
517s390_root_dev_unregister(struct device *dev)
518{
519 if (dev)
520 device_unregister(dev);
521}
522
523int 552int
524css_enqueue_subchannel_slow(unsigned long schid) 553css_enqueue_subchannel_slow(struct subchannel_id schid)
525{ 554{
526 struct slow_subchannel *new_slow_sch; 555 struct slow_subchannel *new_slow_sch;
527 unsigned long flags; 556 unsigned long flags;
@@ -564,6 +593,4 @@ css_slow_subchannels_exist(void)
564 593
565MODULE_LICENSE("GPL"); 594MODULE_LICENSE("GPL");
566EXPORT_SYMBOL(css_bus_type); 595EXPORT_SYMBOL(css_bus_type);
567EXPORT_SYMBOL(s390_root_dev_register);
568EXPORT_SYMBOL(s390_root_dev_unregister);
569EXPORT_SYMBOL_GPL(css_characteristics_avail); 596EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2004a6c49388..251ebd7a7d3a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -6,6 +6,8 @@
6 6
7#include <asm/cio.h> 7#include <asm/cio.h>
8 8
9#include "schid.h"
10
9/* 11/*
10 * path grouping stuff 12 * path grouping stuff
11 */ 13 */
@@ -33,19 +35,25 @@ struct path_state {
33 __u8 resvd : 3; /* reserved */ 35 __u8 resvd : 3; /* reserved */
34} __attribute__ ((packed)); 36} __attribute__ ((packed));
35 37
38struct extended_cssid {
39 u8 version;
40 u8 cssid;
41} __attribute__ ((packed));
42
36struct pgid { 43struct pgid {
37 union { 44 union {
38 __u8 fc; /* SPID function code */ 45 __u8 fc; /* SPID function code */
39 struct path_state ps; /* SNID path state */ 46 struct path_state ps; /* SNID path state */
40 } inf; 47 } inf;
41 __u32 cpu_addr : 16; /* CPU address */ 48 union {
49 __u32 cpu_addr : 16; /* CPU address */
50 struct extended_cssid ext_cssid;
51 } pgid_high;
42 __u32 cpu_id : 24; /* CPU identification */ 52 __u32 cpu_id : 24; /* CPU identification */
43 __u32 cpu_model : 16; /* CPU model */ 53 __u32 cpu_model : 16; /* CPU model */
44 __u32 tod_high; /* high word TOD clock */ 54 __u32 tod_high; /* high word TOD clock */
45} __attribute__ ((packed)); 55} __attribute__ ((packed));
46 56
47extern struct pgid global_pgid;
48
49#define MAX_CIWS 8 57#define MAX_CIWS 8
50 58
51/* 59/*
@@ -68,7 +76,8 @@ struct ccw_device_private {
68 atomic_t onoff; 76 atomic_t onoff;
69 unsigned long registered; 77 unsigned long registered;
70 __u16 devno; /* device number */ 78 __u16 devno; /* device number */
71 __u16 irq; /* subchannel number */ 79 __u16 sch_no; /* subchannel number */
80 __u8 ssid; /* subchannel set id */
72 __u8 imask; /* lpm mask for SNID/SID/SPGID */ 81 __u8 imask; /* lpm mask for SNID/SID/SPGID */
73 int iretry; /* retry counter SNID/SID/SPGID */ 82 int iretry; /* retry counter SNID/SID/SPGID */
74 struct { 83 struct {
@@ -121,15 +130,27 @@ struct css_driver {
121extern struct bus_type css_bus_type; 130extern struct bus_type css_bus_type;
122extern struct css_driver io_subchannel_driver; 131extern struct css_driver io_subchannel_driver;
123 132
124int css_probe_device(int irq); 133extern int css_probe_device(struct subchannel_id);
125extern struct subchannel * get_subchannel_by_schid(int irq); 134extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
126extern unsigned int highest_subchannel;
127extern int css_init_done; 135extern int css_init_done;
128 136extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
129#define __MAX_SUBCHANNELS 65536 137
138#define __MAX_SUBCHANNEL 65535
139#define __MAX_SSID 3
140#define __MAX_CHPID 255
141#define __MAX_CSSID 0
142
143struct channel_subsystem {
144 u8 cssid;
145 int valid;
146 struct channel_path *chps[__MAX_CHPID];
147 struct device device;
148 struct pgid global_pgid;
149};
150#define to_css(dev) container_of(dev, struct channel_subsystem, device)
130 151
131extern struct bus_type css_bus_type; 152extern struct bus_type css_bus_type;
132extern struct device css_bus_device; 153extern struct channel_subsystem *css[];
133 154
134/* Some helper functions for disconnected state. */ 155/* Some helper functions for disconnected state. */
135int device_is_disconnected(struct subchannel *); 156int device_is_disconnected(struct subchannel *);
@@ -144,7 +165,7 @@ void device_set_waiting(struct subchannel *);
144void device_kill_pending_timer(struct subchannel *); 165void device_kill_pending_timer(struct subchannel *);
145 166
146/* Helper functions to build lists for the slow path. */ 167/* Helper functions to build lists for the slow path. */
147int css_enqueue_subchannel_slow(unsigned long schid); 168extern int css_enqueue_subchannel_slow(struct subchannel_id schid);
148void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 169void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
149void css_clear_subchannel_slow_list(void); 170void css_clear_subchannel_slow_list(void);
150int css_slow_subchannels_exist(void); 171int css_slow_subchannels_exist(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 85908cacc3b8..fa3e4c0a2536 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * $Revision: 1.131 $ 4 * $Revision: 1.137 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
374 int i, force, ret; 374 int i, force, ret;
375 char *tmp; 375 char *tmp;
376 376
377 if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) 377 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
378 return -EAGAIN; 378 return -EAGAIN;
379 379
380 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 380 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -535,7 +535,8 @@ ccw_device_register(struct ccw_device *cdev)
535} 535}
536 536
537struct match_data { 537struct match_data {
538 unsigned int devno; 538 unsigned int devno;
539 unsigned int ssid;
539 struct ccw_device * sibling; 540 struct ccw_device * sibling;
540}; 541};
541 542
@@ -548,6 +549,7 @@ match_devno(struct device * dev, void * data)
548 cdev = to_ccwdev(dev); 549 cdev = to_ccwdev(dev);
549 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 550 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
550 (cdev->private->devno == d->devno) && 551 (cdev->private->devno == d->devno) &&
552 (cdev->private->ssid == d->ssid) &&
551 (cdev != d->sibling)) { 553 (cdev != d->sibling)) {
552 cdev->private->state = DEV_STATE_NOT_OPER; 554 cdev->private->state = DEV_STATE_NOT_OPER;
553 return 1; 555 return 1;
@@ -556,11 +558,13 @@ match_devno(struct device * dev, void * data)
556} 558}
557 559
558static struct ccw_device * 560static struct ccw_device *
559get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) 561get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
562 struct ccw_device *sibling)
560{ 563{
561 struct device *dev; 564 struct device *dev;
562 struct match_data data = { 565 struct match_data data = {
563 .devno = devno, 566 .devno = devno,
567 .ssid = ssid,
564 .sibling = sibling, 568 .sibling = sibling,
565 }; 569 };
566 570
@@ -616,13 +620,13 @@ ccw_device_do_unreg_rereg(void *data)
616 620
617 need_rename = 1; 621 need_rename = 1;
618 other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, 622 other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
619 cdev); 623 sch->schid.ssid, cdev);
620 if (other_cdev) { 624 if (other_cdev) {
621 struct subchannel *other_sch; 625 struct subchannel *other_sch;
622 626
623 other_sch = to_subchannel(other_cdev->dev.parent); 627 other_sch = to_subchannel(other_cdev->dev.parent);
624 if (get_device(&other_sch->dev)) { 628 if (get_device(&other_sch->dev)) {
625 stsch(other_sch->irq, &other_sch->schib); 629 stsch(other_sch->schid, &other_sch->schib);
626 if (other_sch->schib.pmcw.dnv) { 630 if (other_sch->schib.pmcw.dnv) {
627 other_sch->schib.pmcw.intparm = 0; 631 other_sch->schib.pmcw.intparm = 0;
628 cio_modify(other_sch); 632 cio_modify(other_sch);
@@ -639,8 +643,8 @@ ccw_device_do_unreg_rereg(void *data)
639 if (test_and_clear_bit(1, &cdev->private->registered)) 643 if (test_and_clear_bit(1, &cdev->private->registered))
640 device_del(&cdev->dev); 644 device_del(&cdev->dev);
641 if (need_rename) 645 if (need_rename)
642 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", 646 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
643 sch->schib.pmcw.dev); 647 sch->schid.ssid, sch->schib.pmcw.dev);
644 PREPARE_WORK(&cdev->private->kick_work, 648 PREPARE_WORK(&cdev->private->kick_work,
645 ccw_device_add_changed, (void *)cdev); 649 ccw_device_add_changed, (void *)cdev);
646 queue_work(ccw_device_work, &cdev->private->kick_work); 650 queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -769,18 +773,20 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
769 sch->dev.driver_data = cdev; 773 sch->dev.driver_data = cdev;
770 sch->driver = &io_subchannel_driver; 774 sch->driver = &io_subchannel_driver;
771 cdev->ccwlock = &sch->lock; 775 cdev->ccwlock = &sch->lock;
776
772 /* Init private data. */ 777 /* Init private data. */
773 priv = cdev->private; 778 priv = cdev->private;
774 priv->devno = sch->schib.pmcw.dev; 779 priv->devno = sch->schib.pmcw.dev;
775 priv->irq = sch->irq; 780 priv->ssid = sch->schid.ssid;
781 priv->sch_no = sch->schid.sch_no;
776 priv->state = DEV_STATE_NOT_OPER; 782 priv->state = DEV_STATE_NOT_OPER;
777 INIT_LIST_HEAD(&priv->cmb_list); 783 INIT_LIST_HEAD(&priv->cmb_list);
778 init_waitqueue_head(&priv->wait_q); 784 init_waitqueue_head(&priv->wait_q);
779 init_timer(&priv->timer); 785 init_timer(&priv->timer);
780 786
781 /* Set an initial name for the device. */ 787 /* Set an initial name for the device. */
782 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", 788 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
783 sch->schib.pmcw.dev); 789 sch->schid.ssid, sch->schib.pmcw.dev);
784 790
785 /* Increase counter of devices currently in recognition. */ 791 /* Increase counter of devices currently in recognition. */
786 atomic_inc(&ccw_device_init_count); 792 atomic_inc(&ccw_device_init_count);
@@ -951,7 +957,7 @@ io_subchannel_shutdown(struct device *dev)
951 sch = to_subchannel(dev); 957 sch = to_subchannel(dev);
952 cdev = dev->driver_data; 958 cdev = dev->driver_data;
953 959
954 if (cio_is_console(sch->irq)) 960 if (cio_is_console(sch->schid))
955 return; 961 return;
956 if (!sch->schib.pmcw.ena) 962 if (!sch->schib.pmcw.ena)
957 /* Nothing to do. */ 963 /* Nothing to do. */
@@ -986,10 +992,6 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
986 cdev->dev = (struct device) { 992 cdev->dev = (struct device) {
987 .parent = &sch->dev, 993 .parent = &sch->dev,
988 }; 994 };
989 /* Initialize the subchannel structure */
990 sch->dev.parent = &css_bus_device;
991 sch->dev.bus = &css_bus_type;
992
993 rc = io_subchannel_recog(cdev, sch); 995 rc = io_subchannel_recog(cdev, sch);
994 if (rc) 996 if (rc)
995 return rc; 997 return rc;
@@ -1146,6 +1148,16 @@ ccw_driver_unregister (struct ccw_driver *cdriver)
1146 driver_unregister(&cdriver->driver); 1148 driver_unregister(&cdriver->driver);
1147} 1149}
1148 1150
1151/* Helper func for qdio. */
1152struct subchannel_id
1153ccw_device_get_subchannel_id(struct ccw_device *cdev)
1154{
1155 struct subchannel *sch;
1156
1157 sch = to_subchannel(cdev->dev.parent);
1158 return sch->schid;
1159}
1160
1149MODULE_LICENSE("GPL"); 1161MODULE_LICENSE("GPL");
1150EXPORT_SYMBOL(ccw_device_set_online); 1162EXPORT_SYMBOL(ccw_device_set_online);
1151EXPORT_SYMBOL(ccw_device_set_offline); 1163EXPORT_SYMBOL(ccw_device_set_offline);
@@ -1155,3 +1167,4 @@ EXPORT_SYMBOL(get_ccwdev_by_busid);
1155EXPORT_SYMBOL(ccw_bus_type); 1167EXPORT_SYMBOL(ccw_bus_type);
1156EXPORT_SYMBOL(ccw_device_work); 1168EXPORT_SYMBOL(ccw_device_work);
1157EXPORT_SYMBOL(ccw_device_notify_work); 1169EXPORT_SYMBOL(ccw_device_notify_work);
1170EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index a3aa056d7245..11587ebb7289 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -110,6 +110,7 @@ int ccw_device_stlck(struct ccw_device *);
110 110
111/* qdio needs this. */ 111/* qdio needs this. */
112void ccw_device_set_timeout(struct ccw_device *, int); 112void ccw_device_set_timeout(struct ccw_device *, int);
113extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
113 114
114void retry_set_schib(struct ccw_device *cdev); 115void retry_set_schib(struct ccw_device *cdev);
115#endif 116#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c1c89f4fd4e3..23d12b65e5fa 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -133,7 +133,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
133 int ret; 133 int ret;
134 134
135 sch = to_subchannel(cdev->dev.parent); 135 sch = to_subchannel(cdev->dev.parent);
136 ret = stsch(sch->irq, &sch->schib); 136 ret = stsch(sch->schid, &sch->schib);
137 if (ret || !sch->schib.pmcw.dnv) 137 if (ret || !sch->schib.pmcw.dnv)
138 return -ENODEV; 138 return -ENODEV;
139 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) 139 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
@@ -231,7 +231,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
231 * through ssch() and the path information is up to date. 231 * through ssch() and the path information is up to date.
232 */ 232 */
233 old_lpm = sch->lpm; 233 old_lpm = sch->lpm;
234 stsch(sch->irq, &sch->schib); 234 stsch(sch->schid, &sch->schib);
235 sch->lpm = sch->schib.pmcw.pim & 235 sch->lpm = sch->schib.pmcw.pim &
236 sch->schib.pmcw.pam & 236 sch->schib.pmcw.pam &
237 sch->schib.pmcw.pom & 237 sch->schib.pmcw.pom &
@@ -257,8 +257,9 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
257 switch (state) { 257 switch (state) {
258 case DEV_STATE_NOT_OPER: 258 case DEV_STATE_NOT_OPER:
259 CIO_DEBUG(KERN_WARNING, 2, 259 CIO_DEBUG(KERN_WARNING, 2,
260 "SenseID : unknown device %04x on subchannel %04x\n", 260 "SenseID : unknown device %04x on subchannel "
261 cdev->private->devno, sch->irq); 261 "0.%x.%04x\n", cdev->private->devno,
262 sch->schid.ssid, sch->schid.sch_no);
262 break; 263 break;
263 case DEV_STATE_OFFLINE: 264 case DEV_STATE_OFFLINE:
264 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 265 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -282,16 +283,18 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
282 return; 283 return;
283 } 284 }
284 /* Issue device info message. */ 285 /* Issue device info message. */
285 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " 286 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
286 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 287 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
287 "%04X/%02X\n", cdev->private->devno, 288 "%04X/%02X\n",
289 cdev->private->ssid, cdev->private->devno,
288 cdev->id.cu_type, cdev->id.cu_model, 290 cdev->id.cu_type, cdev->id.cu_model,
289 cdev->id.dev_type, cdev->id.dev_model); 291 cdev->id.dev_type, cdev->id.dev_model);
290 break; 292 break;
291 case DEV_STATE_BOXED: 293 case DEV_STATE_BOXED:
292 CIO_DEBUG(KERN_WARNING, 2, 294 CIO_DEBUG(KERN_WARNING, 2,
293 "SenseID : boxed device %04x on subchannel %04x\n", 295 "SenseID : boxed device %04x on subchannel "
294 cdev->private->devno, sch->irq); 296 "0.%x.%04x\n", cdev->private->devno,
297 sch->schid.ssid, sch->schid.sch_no);
295 break; 298 break;
296 } 299 }
297 cdev->private->state = state; 300 cdev->private->state = state;
@@ -359,7 +362,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
359 if (state == DEV_STATE_BOXED) 362 if (state == DEV_STATE_BOXED)
360 CIO_DEBUG(KERN_WARNING, 2, 363 CIO_DEBUG(KERN_WARNING, 2,
361 "Boxed device %04x on subchannel %04x\n", 364 "Boxed device %04x on subchannel %04x\n",
362 cdev->private->devno, sch->irq); 365 cdev->private->devno, sch->schid.sch_no);
363 366
364 if (cdev->private->flags.donotify) { 367 if (cdev->private->flags.donotify) {
365 cdev->private->flags.donotify = 0; 368 cdev->private->flags.donotify = 0;
@@ -592,7 +595,7 @@ ccw_device_offline(struct ccw_device *cdev)
592 struct subchannel *sch; 595 struct subchannel *sch;
593 596
594 sch = to_subchannel(cdev->dev.parent); 597 sch = to_subchannel(cdev->dev.parent);
595 if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv) 598 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
596 return -ENODEV; 599 return -ENODEV;
597 if (cdev->private->state != DEV_STATE_ONLINE) { 600 if (cdev->private->state != DEV_STATE_ONLINE) {
598 if (sch->schib.scsw.actl != 0) 601 if (sch->schib.scsw.actl != 0)
@@ -711,7 +714,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
711 * Since we might not just be coming from an interrupt from the 714 * Since we might not just be coming from an interrupt from the
712 * subchannel we have to update the schib. 715 * subchannel we have to update the schib.
713 */ 716 */
714 stsch(sch->irq, &sch->schib); 717 stsch(sch->schid, &sch->schib);
715 718
716 if (sch->schib.scsw.actl != 0 || 719 if (sch->schib.scsw.actl != 0 ||
717 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 720 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
@@ -923,7 +926,7 @@ ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
923 926
924 /* Iff device is idle, reset timeout. */ 927 /* Iff device is idle, reset timeout. */
925 sch = to_subchannel(cdev->dev.parent); 928 sch = to_subchannel(cdev->dev.parent);
926 if (!stsch(sch->irq, &sch->schib)) 929 if (!stsch(sch->schid, &sch->schib))
927 if (sch->schib.scsw.actl == 0) 930 if (sch->schib.scsw.actl == 0)
928 ccw_device_set_timeout(cdev, 0); 931 ccw_device_set_timeout(cdev, 0);
929 /* Call the handler. */ 932 /* Call the handler. */
@@ -1035,7 +1038,7 @@ device_trigger_reprobe(struct subchannel *sch)
1035 return; 1038 return;
1036 1039
1037 /* Update some values. */ 1040 /* Update some values. */
1038 if (stsch(sch->irq, &sch->schib)) 1041 if (stsch(sch->schid, &sch->schib))
1039 return; 1042 return;
1040 1043
1041 /* 1044 /*
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 0e68fb511dc9..04ceba343db8 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -27,7 +27,7 @@
27/* 27/*
28 * diag210 is used under VM to get information about a virtual device 28 * diag210 is used under VM to get information about a virtual device
29 */ 29 */
30#ifdef CONFIG_ARCH_S390X 30#ifdef CONFIG_64BIT
31int 31int
32diag210(struct diag210 * addr) 32diag210(struct diag210 * addr)
33{ 33{
@@ -256,16 +256,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
256 * sense id information. So, for intervention required, 256 * sense id information. So, for intervention required,
257 * we use the "whack it until it talks" strategy... 257 * we use the "whack it until it talks" strategy...
258 */ 258 */
259 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " 259 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
260 "reports cmd reject\n", 260 "0.%x.%04x reports cmd reject\n",
261 cdev->private->devno, sch->irq); 261 cdev->private->devno, sch->schid.ssid,
262 sch->schid.sch_no);
262 return -EOPNOTSUPP; 263 return -EOPNOTSUPP;
263 } 264 }
264 if (irb->esw.esw0.erw.cons) { 265 if (irb->esw.esw0.erw.cons) {
265 CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, " 266 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
266 "lpum %02X, cnt %02d, sns :" 267 "lpum %02X, cnt %02d, sns :"
267 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 268 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
268 cdev->private->devno, 269 cdev->private->ssid, cdev->private->devno,
269 irb->esw.esw0.sublog.lpum, 270 irb->esw.esw0.sublog.lpum,
270 irb->esw.esw0.erw.scnt, 271 irb->esw.esw0.erw.scnt,
271 irb->ecw[0], irb->ecw[1], 272 irb->ecw[0], irb->ecw[1],
@@ -277,16 +278,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
277 if (irb->scsw.cc == 3) { 278 if (irb->scsw.cc == 3) {
278 if ((sch->orb.lpm & 279 if ((sch->orb.lpm &
279 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 280 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
280 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" 281 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
281 " subchannel %04x is 'not operational'\n", 282 "on subchannel 0.%x.%04x is "
282 sch->orb.lpm, cdev->private->devno, 283 "'not operational'\n", sch->orb.lpm,
283 sch->irq); 284 cdev->private->devno, sch->schid.ssid,
285 sch->schid.sch_no);
284 return -EACCES; 286 return -EACCES;
285 } 287 }
286 /* Hmm, whatever happened, try again. */ 288 /* Hmm, whatever happened, try again. */
287 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " 289 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
288 "subchannel %04x returns status %02X%02X\n", 290 "subchannel 0.%x.%04x returns status %02X%02X\n",
289 cdev->private->devno, sch->irq, 291 cdev->private->devno, sch->schid.ssid, sch->schid.sch_no,
290 irb->scsw.dstat, irb->scsw.cstat); 292 irb->scsw.dstat, irb->scsw.cstat);
291 return -EAGAIN; 293 return -EAGAIN;
292} 294}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 85a3026e6900..143b6c25a4e6 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/device_ops.c 2 * drivers/s390/cio/device_ops.c
3 * 3 *
4 * $Revision: 1.57 $ 4 * $Revision: 1.58 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -570,7 +570,7 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
570int 570int
571_ccw_device_get_subchannel_number(struct ccw_device *cdev) 571_ccw_device_get_subchannel_number(struct ccw_device *cdev)
572{ 572{
573 return cdev->private->irq; 573 return cdev->private->sch_no;
574} 574}
575 575
576int 576int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 0adac8a67331..052832d03d38 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -22,6 +22,7 @@
22#include "cio_debug.h" 22#include "cio_debug.h"
23#include "css.h" 23#include "css.h"
24#include "device.h" 24#include "device.h"
25#include "ioasm.h"
25 26
26/* 27/*
27 * Start Sense Path Group ID helper function. Used in ccw_device_recog 28 * Start Sense Path Group ID helper function. Used in ccw_device_recog
@@ -56,10 +57,10 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
56 if (ret != -EACCES) 57 if (ret != -EACCES)
57 return ret; 58 return ret;
58 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 59 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
59 "%04x, lpm %02X, became 'not " 60 "0.%x.%04x, lpm %02X, became 'not "
60 "operational'\n", 61 "operational'\n",
61 cdev->private->devno, sch->irq, 62 cdev->private->devno, sch->schid.ssid,
62 cdev->private->imask); 63 sch->schid.sch_no, cdev->private->imask);
63 64
64 } 65 }
65 cdev->private->imask >>= 1; 66 cdev->private->imask >>= 1;
@@ -105,10 +106,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
105 return -EOPNOTSUPP; 106 return -EOPNOTSUPP;
106 } 107 }
107 if (irb->esw.esw0.erw.cons) { 108 if (irb->esw.esw0.erw.cons) {
108 CIO_MSG_EVENT(2, "SNID - device %04x, unit check, " 109 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
109 "lpum %02X, cnt %02d, sns : " 110 "lpum %02X, cnt %02d, sns : "
110 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", 111 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
111 cdev->private->devno, 112 cdev->private->ssid, cdev->private->devno,
112 irb->esw.esw0.sublog.lpum, 113 irb->esw.esw0.sublog.lpum,
113 irb->esw.esw0.erw.scnt, 114 irb->esw.esw0.erw.scnt,
114 irb->ecw[0], irb->ecw[1], 115 irb->ecw[0], irb->ecw[1],
@@ -118,15 +119,17 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
118 return -EAGAIN; 119 return -EAGAIN;
119 } 120 }
120 if (irb->scsw.cc == 3) { 121 if (irb->scsw.cc == 3) {
121 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 122 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
122 "%04x, lpm %02X, became 'not operational'\n", 123 " lpm %02X, became 'not operational'\n",
123 cdev->private->devno, sch->irq, sch->orb.lpm); 124 cdev->private->devno, sch->schid.ssid,
125 sch->schid.sch_no, sch->orb.lpm);
124 return -EACCES; 126 return -EACCES;
125 } 127 }
126 if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { 128 if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
127 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " 129 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
128 "is reserved by someone else\n", 130 "is reserved by someone else\n",
129 cdev->private->devno, sch->irq); 131 cdev->private->devno, sch->schid.ssid,
132 sch->schid.sch_no);
130 return -EUSERS; 133 return -EUSERS;
131 } 134 }
132 return 0; 135 return 0;
@@ -162,7 +165,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
162 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ 165 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
163 case 0: /* Sense Path Group ID successful. */ 166 case 0: /* Sense Path Group ID successful. */
164 if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) 167 if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
165 memcpy(&cdev->private->pgid, &global_pgid, 168 memcpy(&cdev->private->pgid, &css[0]->global_pgid,
166 sizeof(struct pgid)); 169 sizeof(struct pgid));
167 ccw_device_sense_pgid_done(cdev, 0); 170 ccw_device_sense_pgid_done(cdev, 0);
168 break; 171 break;
@@ -235,8 +238,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
235 sch->lpm &= ~cdev->private->imask; 238 sch->lpm &= ~cdev->private->imask;
236 sch->vpm &= ~cdev->private->imask; 239 sch->vpm &= ~cdev->private->imask;
237 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 240 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
238 "%04x, lpm %02X, became 'not operational'\n", 241 "0.%x.%04x, lpm %02X, became 'not operational'\n",
239 cdev->private->devno, sch->irq, cdev->private->imask); 242 cdev->private->devno, sch->schid.ssid,
243 sch->schid.sch_no, cdev->private->imask);
240 return ret; 244 return ret;
241} 245}
242 246
@@ -258,8 +262,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
258 if (irb->ecw[0] & SNS0_CMD_REJECT) 262 if (irb->ecw[0] & SNS0_CMD_REJECT)
259 return -EOPNOTSUPP; 263 return -EOPNOTSUPP;
260 /* Hmm, whatever happened, try again. */ 264 /* Hmm, whatever happened, try again. */
261 CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, " 265 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
266 "cnt %02d, "
262 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 267 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
268 cdev->private->ssid,
263 cdev->private->devno, irb->esw.esw0.erw.scnt, 269 cdev->private->devno, irb->esw.esw0.erw.scnt,
264 irb->ecw[0], irb->ecw[1], 270 irb->ecw[0], irb->ecw[1],
265 irb->ecw[2], irb->ecw[3], 271 irb->ecw[2], irb->ecw[3],
@@ -268,10 +274,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
268 return -EAGAIN; 274 return -EAGAIN;
269 } 275 }
270 if (irb->scsw.cc == 3) { 276 if (irb->scsw.cc == 3) {
271 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 277 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
272 "%04x, lpm %02X, became 'not operational'\n", 278 " lpm %02X, became 'not operational'\n",
273 cdev->private->devno, sch->irq, 279 cdev->private->devno, sch->schid.ssid,
274 cdev->private->imask); 280 sch->schid.sch_no, cdev->private->imask);
275 return -EACCES; 281 return -EACCES;
276 } 282 }
277 return 0; 283 return 0;
@@ -364,8 +370,22 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
364void 370void
365ccw_device_verify_start(struct ccw_device *cdev) 371ccw_device_verify_start(struct ccw_device *cdev)
366{ 372{
373 struct subchannel *sch = to_subchannel(cdev->dev.parent);
374
367 cdev->private->flags.pgid_single = 0; 375 cdev->private->flags.pgid_single = 0;
368 cdev->private->iretry = 5; 376 cdev->private->iretry = 5;
377 /*
378 * Update sch->lpm with current values to catch paths becoming
379 * available again.
380 */
381 if (stsch(sch->schid, &sch->schib)) {
382 ccw_device_verify_done(cdev, -ENODEV);
383 return;
384 }
385 sch->lpm = sch->schib.pmcw.pim &
386 sch->schib.pmcw.pam &
387 sch->schib.pmcw.pom &
388 sch->opm;
369 __ccw_device_verify_start(cdev); 389 __ccw_device_verify_start(cdev);
370} 390}
371 391
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 12a24d4331a2..db09c209098b 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -36,15 +36,16 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
36 36
37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
38 "received" 38 "received"
39 " ... device %04X on subchannel %04X, dev_stat " 39 " ... device %04x on subchannel 0.%x.%04x, dev_stat "
40 ": %02X sch_stat : %02X\n", 40 ": %02X sch_stat : %02X\n",
41 cdev->private->devno, cdev->private->irq, 41 cdev->private->devno, cdev->private->ssid,
42 cdev->private->sch_no,
42 irb->scsw.dstat, irb->scsw.cstat); 43 irb->scsw.dstat, irb->scsw.cstat);
43 44
44 if (irb->scsw.cc != 3) { 45 if (irb->scsw.cc != 3) {
45 char dbf_text[15]; 46 char dbf_text[15];
46 47
47 sprintf(dbf_text, "chk%x", cdev->private->irq); 48 sprintf(dbf_text, "chk%x", cdev->private->sch_no);
48 CIO_TRACE_EVENT(0, dbf_text); 49 CIO_TRACE_EVENT(0, dbf_text);
49 CIO_HEX_EVENT(0, irb, sizeof (struct irb)); 50 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
50 } 51 }
@@ -59,10 +60,11 @@ ccw_device_path_notoper(struct ccw_device *cdev)
59 struct subchannel *sch; 60 struct subchannel *sch;
60 61
61 sch = to_subchannel(cdev->dev.parent); 62 sch = to_subchannel(cdev->dev.parent);
62 stsch (sch->irq, &sch->schib); 63 stsch (sch->schid, &sch->schib);
63 64
64 CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " 65 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
65 "not operational \n", __FUNCTION__, sch->irq, 66 "not operational \n", __FUNCTION__,
67 sch->schid.ssid, sch->schid.sch_no,
66 sch->schib.pmcw.pnom); 68 sch->schib.pmcw.pnom);
67 69
68 sch->lpm &= ~sch->schib.pmcw.pnom; 70 sch->lpm &= ~sch->schib.pmcw.pnom;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 45480a2bc4c0..95a9462f9a91 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,12 +1,13 @@
1#ifndef S390_CIO_IOASM_H 1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include "schid.h"
5
4/* 6/*
5 * TPI info structure 7 * TPI info structure
6 */ 8 */
7struct tpi_info { 9struct tpi_info {
8 __u32 reserved1 : 16; /* reserved 0x00000001 */ 10 struct subchannel_id schid;
9 __u32 irq : 16; /* aka. subchannel number */
10 __u32 intparm; /* interruption parameter */ 11 __u32 intparm; /* interruption parameter */
11 __u32 adapter_IO : 1; 12 __u32 adapter_IO : 1;
12 __u32 reserved2 : 1; 13 __u32 reserved2 : 1;
@@ -21,7 +22,8 @@ struct tpi_info {
21 * Some S390 specific IO instructions as inline 22 * Some S390 specific IO instructions as inline
22 */ 23 */
23 24
24static inline int stsch(int irq, volatile struct schib *addr) 25static inline int stsch(struct subchannel_id schid,
26 volatile struct schib *addr)
25{ 27{
26 int ccode; 28 int ccode;
27 29
@@ -31,12 +33,42 @@ static inline int stsch(int irq, volatile struct schib *addr)
31 " ipm %0\n" 33 " ipm %0\n"
32 " srl %0,28" 34 " srl %0,28"
33 : "=d" (ccode) 35 : "=d" (ccode)
34 : "d" (irq | 0x10000), "a" (addr) 36 : "d" (schid), "a" (addr), "m" (*addr)
37 : "cc", "1" );
38 return ccode;
39}
40
41static inline int stsch_err(struct subchannel_id schid,
42 volatile struct schib *addr)
43{
44 int ccode;
45
46 __asm__ __volatile__(
47 " lhi %0,%3\n"
48 " lr 1,%1\n"
49 " stsch 0(%2)\n"
50 "0: ipm %0\n"
51 " srl %0,28\n"
52 "1:\n"
53#ifdef CONFIG_64BIT
54 ".section __ex_table,\"a\"\n"
55 " .align 8\n"
56 " .quad 0b,1b\n"
57 ".previous"
58#else
59 ".section __ex_table,\"a\"\n"
60 " .align 4\n"
61 " .long 0b,1b\n"
62 ".previous"
63#endif
64 : "=&d" (ccode)
65 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
35 : "cc", "1" ); 66 : "cc", "1" );
36 return ccode; 67 return ccode;
37} 68}
38 69
39static inline int msch(int irq, volatile struct schib *addr) 70static inline int msch(struct subchannel_id schid,
71 volatile struct schib *addr)
40{ 72{
41 int ccode; 73 int ccode;
42 74
@@ -46,12 +78,13 @@ static inline int msch(int irq, volatile struct schib *addr)
46 " ipm %0\n" 78 " ipm %0\n"
47 " srl %0,28" 79 " srl %0,28"
48 : "=d" (ccode) 80 : "=d" (ccode)
49 : "d" (irq | 0x10000L), "a" (addr) 81 : "d" (schid), "a" (addr), "m" (*addr)
50 : "cc", "1" ); 82 : "cc", "1" );
51 return ccode; 83 return ccode;
52} 84}
53 85
54static inline int msch_err(int irq, volatile struct schib *addr) 86static inline int msch_err(struct subchannel_id schid,
87 volatile struct schib *addr)
55{ 88{
56 int ccode; 89 int ccode;
57 90
@@ -62,7 +95,7 @@ static inline int msch_err(int irq, volatile struct schib *addr)
62 "0: ipm %0\n" 95 "0: ipm %0\n"
63 " srl %0,28\n" 96 " srl %0,28\n"
64 "1:\n" 97 "1:\n"
65#ifdef CONFIG_ARCH_S390X 98#ifdef CONFIG_64BIT
66 ".section __ex_table,\"a\"\n" 99 ".section __ex_table,\"a\"\n"
67 " .align 8\n" 100 " .align 8\n"
68 " .quad 0b,1b\n" 101 " .quad 0b,1b\n"
@@ -74,12 +107,13 @@ static inline int msch_err(int irq, volatile struct schib *addr)
74 ".previous" 107 ".previous"
75#endif 108#endif
76 : "=&d" (ccode) 109 : "=&d" (ccode)
77 : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO) 110 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
78 : "cc", "1" ); 111 : "cc", "1" );
79 return ccode; 112 return ccode;
80} 113}
81 114
82static inline int tsch(int irq, volatile struct irb *addr) 115static inline int tsch(struct subchannel_id schid,
116 volatile struct irb *addr)
83{ 117{
84 int ccode; 118 int ccode;
85 119
@@ -89,7 +123,7 @@ static inline int tsch(int irq, volatile struct irb *addr)
89 " ipm %0\n" 123 " ipm %0\n"
90 " srl %0,28" 124 " srl %0,28"
91 : "=d" (ccode) 125 : "=d" (ccode)
92 : "d" (irq | 0x10000L), "a" (addr) 126 : "d" (schid), "a" (addr), "m" (*addr)
93 : "cc", "1" ); 127 : "cc", "1" );
94 return ccode; 128 return ccode;
95} 129}
@@ -103,12 +137,13 @@ static inline int tpi( volatile struct tpi_info *addr)
103 " ipm %0\n" 137 " ipm %0\n"
104 " srl %0,28" 138 " srl %0,28"
105 : "=d" (ccode) 139 : "=d" (ccode)
106 : "a" (addr) 140 : "a" (addr), "m" (*addr)
107 : "cc", "1" ); 141 : "cc", "1" );
108 return ccode; 142 return ccode;
109} 143}
110 144
111static inline int ssch(int irq, volatile struct orb *addr) 145static inline int ssch(struct subchannel_id schid,
146 volatile struct orb *addr)
112{ 147{
113 int ccode; 148 int ccode;
114 149
@@ -118,12 +153,12 @@ static inline int ssch(int irq, volatile struct orb *addr)
118 " ipm %0\n" 153 " ipm %0\n"
119 " srl %0,28" 154 " srl %0,28"
120 : "=d" (ccode) 155 : "=d" (ccode)
121 : "d" (irq | 0x10000L), "a" (addr) 156 : "d" (schid), "a" (addr), "m" (*addr)
122 : "cc", "1" ); 157 : "cc", "1" );
123 return ccode; 158 return ccode;
124} 159}
125 160
126static inline int rsch(int irq) 161static inline int rsch(struct subchannel_id schid)
127{ 162{
128 int ccode; 163 int ccode;
129 164
@@ -133,12 +168,12 @@ static inline int rsch(int irq)
133 " ipm %0\n" 168 " ipm %0\n"
134 " srl %0,28" 169 " srl %0,28"
135 : "=d" (ccode) 170 : "=d" (ccode)
136 : "d" (irq | 0x10000L) 171 : "d" (schid)
137 : "cc", "1" ); 172 : "cc", "1" );
138 return ccode; 173 return ccode;
139} 174}
140 175
141static inline int csch(int irq) 176static inline int csch(struct subchannel_id schid)
142{ 177{
143 int ccode; 178 int ccode;
144 179
@@ -148,12 +183,12 @@ static inline int csch(int irq)
148 " ipm %0\n" 183 " ipm %0\n"
149 " srl %0,28" 184 " srl %0,28"
150 : "=d" (ccode) 185 : "=d" (ccode)
151 : "d" (irq | 0x10000L) 186 : "d" (schid)
152 : "cc", "1" ); 187 : "cc", "1" );
153 return ccode; 188 return ccode;
154} 189}
155 190
156static inline int hsch(int irq) 191static inline int hsch(struct subchannel_id schid)
157{ 192{
158 int ccode; 193 int ccode;
159 194
@@ -163,12 +198,12 @@ static inline int hsch(int irq)
163 " ipm %0\n" 198 " ipm %0\n"
164 " srl %0,28" 199 " srl %0,28"
165 : "=d" (ccode) 200 : "=d" (ccode)
166 : "d" (irq | 0x10000L) 201 : "d" (schid)
167 : "cc", "1" ); 202 : "cc", "1" );
168 return ccode; 203 return ccode;
169} 204}
170 205
171static inline int xsch(int irq) 206static inline int xsch(struct subchannel_id schid)
172{ 207{
173 int ccode; 208 int ccode;
174 209
@@ -178,21 +213,22 @@ static inline int xsch(int irq)
178 " ipm %0\n" 213 " ipm %0\n"
179 " srl %0,28" 214 " srl %0,28"
180 : "=d" (ccode) 215 : "=d" (ccode)
181 : "d" (irq | 0x10000L) 216 : "d" (schid)
182 : "cc", "1" ); 217 : "cc", "1" );
183 return ccode; 218 return ccode;
184} 219}
185 220
186static inline int chsc(void *chsc_area) 221static inline int chsc(void *chsc_area)
187{ 222{
223 typedef struct { char _[4096]; } addr_type;
188 int cc; 224 int cc;
189 225
190 __asm__ __volatile__ ( 226 __asm__ __volatile__ (
191 ".insn rre,0xb25f0000,%1,0 \n\t" 227 ".insn rre,0xb25f0000,%2,0 \n\t"
192 "ipm %0 \n\t" 228 "ipm %0 \n\t"
193 "srl %0,28 \n\t" 229 "srl %0,28 \n\t"
194 : "=d" (cc) 230 : "=d" (cc), "=m" (*(addr_type *) chsc_area)
195 : "d" (chsc_area) 231 : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
196 : "cc" ); 232 : "cc" );
197 233
198 return cc; 234 return cc;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index eb39218b925e..30a836ffc31f 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -56,7 +56,7 @@
56#include "ioasm.h" 56#include "ioasm.h"
57#include "chsc.h" 57#include "chsc.h"
58 58
59#define VERSION_QDIO_C "$Revision: 1.108 $" 59#define VERSION_QDIO_C "$Revision: 1.114 $"
60 60
61/****************** MODULE PARAMETER VARIABLES ********************/ 61/****************** MODULE PARAMETER VARIABLES ********************/
62MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); 62MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
@@ -76,6 +76,7 @@ static struct qdio_perf_stats perf_stats;
76#endif /* QDIO_PERFORMANCE_STATS */ 76#endif /* QDIO_PERFORMANCE_STATS */
77 77
78static int hydra_thinints; 78static int hydra_thinints;
79static int is_passthrough = 0;
79static int omit_svs; 80static int omit_svs;
80 81
81static int indicator_used[INDICATORS_PER_CACHELINE]; 82static int indicator_used[INDICATORS_PER_CACHELINE];
@@ -136,12 +137,126 @@ qdio_release_q(struct qdio_q *q)
136 atomic_dec(&q->use_count); 137 atomic_dec(&q->use_count);
137} 138}
138 139
139static volatile inline void 140/*check ccq */
140qdio_set_slsb(volatile char *slsb, unsigned char value) 141static inline int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{
144 char dbf_text[15];
145
146 if (ccq == 0 || ccq == 32 || ccq == 96)
147 return 0;
148 if (ccq == 97)
149 return 1;
150 /*notify devices immediately*/
151 sprintf(dbf_text,"%d", ccq);
152 QDIO_DBF_TEXT2(1,trace,dbf_text);
153 return -EIO;
154}
155/* EQBS: extract buffer states */
156static inline int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt)
159{
160 struct qdio_irq *irq;
161 unsigned int tmp_cnt, q_no, ccq;
162 int rc ;
163 char dbf_text[15];
164
165 ccq = 0;
166 tmp_cnt = *cnt;
167 irq = (struct qdio_irq*)q->irq_ptr;
168 q_no = q->q_no;
169 if(!q->is_input_q)
170 q_no += irq->no_input_qs;
171 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
172 rc = qdio_check_ccq(q, ccq);
173 if (rc < 0) {
174 QDIO_DBF_TEXT2(1,trace,"eqberr");
175 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
176 QDIO_DBF_TEXT2(1,trace,dbf_text);
177 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
178 QDIO_STATUS_LOOK_FOR_ERROR,
179 0, 0, 0, -1, -1, q->int_parm);
180 return 0;
181 }
182 return (tmp_cnt - *cnt);
183}
184
185/* SQBS: set buffer states */
186static inline int
187qdio_do_sqbs(struct qdio_q *q, unsigned char state,
188 unsigned int *start, unsigned int *cnt)
141{ 189{
142 xchg((char*)slsb,value); 190 struct qdio_irq *irq;
191 unsigned int tmp_cnt, q_no, ccq;
192 int rc;
193 char dbf_text[15];
194
195 ccq = 0;
196 tmp_cnt = *cnt;
197 irq = (struct qdio_irq*)q->irq_ptr;
198 q_no = q->q_no;
199 if(!q->is_input_q)
200 q_no += irq->no_input_qs;
201 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
202 rc = qdio_check_ccq(q, ccq);
203 if (rc < 0) {
204 QDIO_DBF_TEXT3(1,trace,"sqberr");
205 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
206 QDIO_DBF_TEXT3(1,trace,dbf_text);
207 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
208 QDIO_STATUS_LOOK_FOR_ERROR,
209 0, 0, 0, -1, -1, q->int_parm);
210 return 0;
211 }
212 return (tmp_cnt - *cnt);
143} 213}
144 214
215static inline int
216qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
217 unsigned char state, unsigned int *count)
218{
219 volatile char *slsb;
220 struct qdio_irq *irq;
221
222 irq = (struct qdio_irq*)q->irq_ptr;
223 if (!irq->is_qebsm) {
224 slsb = (char *)&q->slsb.acc.val[(*bufno)];
225 xchg(slsb, state);
226 return 1;
227 }
228 return qdio_do_sqbs(q, state, bufno, count);
229}
230
231#ifdef CONFIG_QDIO_DEBUG
232static inline void
233qdio_trace_slsb(struct qdio_q *q)
234{
235 if (q->queue_type==QDIO_TRACE_QTYPE) {
236 if (q->is_input_q)
237 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
238 QDIO_MAX_BUFFERS_PER_Q);
239 else
240 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
241 QDIO_MAX_BUFFERS_PER_Q);
242 }
243}
244#endif
245
246static inline int
247set_slsb(struct qdio_q *q, unsigned int *bufno,
248 unsigned char state, unsigned int *count)
249{
250 int rc;
251#ifdef CONFIG_QDIO_DEBUG
252 qdio_trace_slsb(q);
253#endif
254 rc = qdio_set_slsb(q, bufno, state, count);
255#ifdef CONFIG_QDIO_DEBUG
256 qdio_trace_slsb(q);
257#endif
258 return rc;
259}
145static inline int 260static inline int
146qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, 261qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
147 unsigned int gpr3) 262 unsigned int gpr3)
@@ -155,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
155 perf_stats.siga_syncs++; 270 perf_stats.siga_syncs++;
156#endif /* QDIO_PERFORMANCE_STATS */ 271#endif /* QDIO_PERFORMANCE_STATS */
157 272
158 cc = do_siga_sync(q->irq, gpr2, gpr3); 273 cc = do_siga_sync(q->schid, gpr2, gpr3);
159 if (cc) 274 if (cc)
160 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); 275 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
161 276
@@ -170,6 +285,23 @@ qdio_siga_sync_q(struct qdio_q *q)
170 return qdio_siga_sync(q, q->mask, 0); 285 return qdio_siga_sync(q, q->mask, 0);
171} 286}
172 287
288static int
289__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
290{
291 struct qdio_irq *irq;
292 unsigned int fc = 0;
293 unsigned long schid;
294
295 irq = (struct qdio_irq *) q->irq_ptr;
296 if (!irq->is_qebsm)
297 schid = *((u32 *)&q->schid);
298 else {
299 schid = irq->sch_token;
300 fc |= 0x80;
301 }
302 return do_siga_output(schid, q->mask, busy_bit, fc);
303}
304
173/* 305/*
174 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 306 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
175 * an access exception 307 * an access exception
@@ -189,7 +321,7 @@ qdio_siga_output(struct qdio_q *q)
189 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 321 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
190 322
191 for (;;) { 323 for (;;) {
192 cc = do_siga_output(q->irq, q->mask, &busy_bit); 324 cc = __do_siga_output(q, &busy_bit);
193//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); 325//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
194 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { 326 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
195 if (!start_time) 327 if (!start_time)
@@ -221,7 +353,7 @@ qdio_siga_input(struct qdio_q *q)
221 perf_stats.siga_ins++; 353 perf_stats.siga_ins++;
222#endif /* QDIO_PERFORMANCE_STATS */ 354#endif /* QDIO_PERFORMANCE_STATS */
223 355
224 cc = do_siga_input(q->irq, q->mask); 356 cc = do_siga_input(q->schid, q->mask);
225 357
226 if (cc) 358 if (cc)
227 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); 359 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
@@ -230,7 +362,7 @@ qdio_siga_input(struct qdio_q *q)
230} 362}
231 363
232/* locked by the locks in qdio_activate and qdio_cleanup */ 364/* locked by the locks in qdio_activate and qdio_cleanup */
233static __u32 volatile * 365static __u32 *
234qdio_get_indicator(void) 366qdio_get_indicator(void)
235{ 367{
236 int i; 368 int i;
@@ -258,7 +390,7 @@ qdio_put_indicator(__u32 *addr)
258 atomic_dec(&spare_indicator_usecount); 390 atomic_dec(&spare_indicator_usecount);
259} 391}
260 392
261static inline volatile void 393static inline void
262tiqdio_clear_summary_bit(__u32 *location) 394tiqdio_clear_summary_bit(__u32 *location)
263{ 395{
264 QDIO_DBF_TEXT5(0,trace,"clrsummb"); 396 QDIO_DBF_TEXT5(0,trace,"clrsummb");
@@ -267,7 +399,7 @@ tiqdio_clear_summary_bit(__u32 *location)
267 xchg(location,0); 399 xchg(location,0);
268} 400}
269 401
270static inline volatile void 402static inline void
271tiqdio_set_summary_bit(__u32 *location) 403tiqdio_set_summary_bit(__u32 *location)
272{ 404{
273 QDIO_DBF_TEXT5(0,trace,"setsummb"); 405 QDIO_DBF_TEXT5(0,trace,"setsummb");
@@ -336,7 +468,9 @@ static inline int
336qdio_stop_polling(struct qdio_q *q) 468qdio_stop_polling(struct qdio_q *q)
337{ 469{
338#ifdef QDIO_USE_PROCESSING_STATE 470#ifdef QDIO_USE_PROCESSING_STATE
339 int gsf; 471 unsigned int tmp, gsf, count = 1;
472 unsigned char state = 0;
473 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
340 474
341 if (!atomic_swap(&q->polling,0)) 475 if (!atomic_swap(&q->polling,0))
342 return 1; 476 return 1;
@@ -348,17 +482,22 @@ qdio_stop_polling(struct qdio_q *q)
348 if (!q->is_input_q) 482 if (!q->is_input_q)
349 return 1; 483 return 1;
350 484
351 gsf=GET_SAVED_FRONTIER(q); 485 tmp = gsf = GET_SAVED_FRONTIER(q);
352 set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& 486 tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
353 (QDIO_MAX_BUFFERS_PER_Q-1)], 487 set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
354 SLSB_P_INPUT_NOT_INIT); 488
355 /* 489 /*
356 * we don't issue this SYNC_MEMORY, as we trust Rick T and 490 * we don't issue this SYNC_MEMORY, as we trust Rick T and
357 * moreover will not use the PROCESSING state under VM, so 491 * moreover will not use the PROCESSING state under VM, so
358 * q->polling was 0 anyway 492 * q->polling was 0 anyway
359 */ 493 */
360 /*SYNC_MEMORY;*/ 494 /*SYNC_MEMORY;*/
361 if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) 495 if (irq->is_qebsm) {
496 count = 1;
497 qdio_do_eqbs(q, &state, &gsf, &count);
498 } else
499 state = q->slsb.acc.val[gsf];
500 if (state != SLSB_P_INPUT_PRIMED)
362 return 1; 501 return 1;
363 /* 502 /*
364 * set our summary bit again, as otherwise there is a 503 * set our summary bit again, as otherwise there is a
@@ -431,18 +570,136 @@ tiqdio_clear_global_summary(void)
431 570
432 571
433/************************* OUTBOUND ROUTINES *******************************/ 572/************************* OUTBOUND ROUTINES *******************************/
573static int
574qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
575{
576 struct qdio_irq *irq;
577 unsigned char state;
578 unsigned int cnt, count, ftc;
579
580 irq = (struct qdio_irq *) q->irq_ptr;
581 if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
582 SYNC_MEMORY;
583
584 ftc = q->first_to_check;
585 count = qdio_min(atomic_read(&q->number_of_buffers_used),
586 (QDIO_MAX_BUFFERS_PER_Q-1));
587 if (count == 0)
588 return q->first_to_check;
589 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
590 if (cnt == 0)
591 return q->first_to_check;
592 switch (state) {
593 case SLSB_P_OUTPUT_ERROR:
594 QDIO_DBF_TEXT3(0,trace,"outperr");
595 atomic_sub(cnt , &q->number_of_buffers_used);
596 if (q->qdio_error)
597 q->error_status_flags |=
598 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
599 q->qdio_error = SLSB_P_OUTPUT_ERROR;
600 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
601 q->first_to_check = ftc;
602 break;
603 case SLSB_P_OUTPUT_EMPTY:
604 QDIO_DBF_TEXT5(0,trace,"outpempt");
605 atomic_sub(cnt, &q->number_of_buffers_used);
606 q->first_to_check = ftc;
607 break;
608 case SLSB_CU_OUTPUT_PRIMED:
609 /* all buffers primed */
610 QDIO_DBF_TEXT5(0,trace,"outpprim");
611 break;
612 default:
613 break;
614 }
615 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
616 return q->first_to_check;
617}
618
619static int
620qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
621{
622 struct qdio_irq *irq;
623 unsigned char state;
624 int tmp, ftc, count, cnt;
625 char dbf_text[15];
626
627
628 irq = (struct qdio_irq *) q->irq_ptr;
629 ftc = q->first_to_check;
630 count = qdio_min(atomic_read(&q->number_of_buffers_used),
631 (QDIO_MAX_BUFFERS_PER_Q-1));
632 if (count == 0)
633 return q->first_to_check;
634 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
635 if (cnt == 0)
636 return q->first_to_check;
637 switch (state) {
638 case SLSB_P_INPUT_ERROR :
639#ifdef CONFIG_QDIO_DEBUG
640 QDIO_DBF_TEXT3(1,trace,"inperr");
641 sprintf(dbf_text,"%2x,%2x",ftc,count);
642 QDIO_DBF_TEXT3(1,trace,dbf_text);
643#endif /* CONFIG_QDIO_DEBUG */
644 if (q->qdio_error)
645 q->error_status_flags |=
646 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
647 q->qdio_error = SLSB_P_INPUT_ERROR;
648 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
649 atomic_sub(cnt, &q->number_of_buffers_used);
650 q->first_to_check = ftc;
651 break;
652 case SLSB_P_INPUT_PRIMED :
653 QDIO_DBF_TEXT3(0,trace,"inptprim");
654 sprintf(dbf_text,"%2x,%2x",ftc,count);
655 QDIO_DBF_TEXT3(1,trace,dbf_text);
656 tmp = 0;
657 ftc = q->first_to_check;
658#ifdef QDIO_USE_PROCESSING_STATE
659 if (cnt > 1) {
660 cnt -= 1;
661 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
662 if (!tmp)
663 break;
664 }
665 cnt = 1;
666 tmp += set_slsb(q, &ftc,
667 SLSB_P_INPUT_PROCESSING, &cnt);
668 atomic_set(&q->polling, 1);
669#else
670 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
671#endif
672 atomic_sub(tmp, &q->number_of_buffers_used);
673 q->first_to_check = ftc;
674 break;
675 case SLSB_CU_INPUT_EMPTY:
676 case SLSB_P_INPUT_NOT_INIT:
677 case SLSB_P_INPUT_PROCESSING:
678 QDIO_DBF_TEXT5(0,trace,"inpnipro");
679 break;
680 default:
681 break;
682 }
683 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
684 return q->first_to_check;
685}
434 686
435static inline int 687static inline int
436qdio_get_outbound_buffer_frontier(struct qdio_q *q) 688qdio_get_outbound_buffer_frontier(struct qdio_q *q)
437{ 689{
438 int f,f_mod_no; 690 struct qdio_irq *irq;
439 volatile char *slsb; 691 volatile char *slsb;
440 int first_not_to_check; 692 unsigned int count = 1;
693 int first_not_to_check, f, f_mod_no;
441 char dbf_text[15]; 694 char dbf_text[15];
442 695
443 QDIO_DBF_TEXT4(0,trace,"getobfro"); 696 QDIO_DBF_TEXT4(0,trace,"getobfro");
444 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 697 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
445 698
699 irq = (struct qdio_irq *) q->irq_ptr;
700 if (irq->is_qebsm)
701 return qdio_qebsm_get_outbound_buffer_frontier(q);
702
446 slsb=&q->slsb.acc.val[0]; 703 slsb=&q->slsb.acc.val[0];
447 f_mod_no=f=q->first_to_check; 704 f_mod_no=f=q->first_to_check;
448 /* 705 /*
@@ -484,7 +741,7 @@ check_next:
484 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); 741 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
485 742
486 /* kind of process the buffer */ 743 /* kind of process the buffer */
487 set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); 744 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
488 745
489 /* 746 /*
490 * we increment the frontier, as this buffer 747 * we increment the frontier, as this buffer
@@ -597,48 +854,48 @@ qdio_kick_outbound_q(struct qdio_q *q)
597 854
598 result=qdio_siga_output(q); 855 result=qdio_siga_output(q);
599 856
600 switch (result) { 857 switch (result) {
601 case 0: 858 case 0:
602 /* went smooth this time, reset timestamp */ 859 /* went smooth this time, reset timestamp */
603#ifdef CONFIG_QDIO_DEBUG 860#ifdef CONFIG_QDIO_DEBUG
604 QDIO_DBF_TEXT3(0,trace,"cc2reslv"); 861 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
605 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, 862 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
606 atomic_read(&q->busy_siga_counter)); 863 atomic_read(&q->busy_siga_counter));
607 QDIO_DBF_TEXT3(0,trace,dbf_text); 864 QDIO_DBF_TEXT3(0,trace,dbf_text);
608#endif /* CONFIG_QDIO_DEBUG */ 865#endif /* CONFIG_QDIO_DEBUG */
609 q->timing.busy_start=0; 866 q->timing.busy_start=0;
867 break;
868 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
869 /* cc=2 and busy bit: */
870 atomic_inc(&q->busy_siga_counter);
871
872 /* if the last siga was successful, save
873 * timestamp here */
874 if (!q->timing.busy_start)
875 q->timing.busy_start=NOW;
876
877 /* if we're in time, don't touch error_status_flags
878 * and siga_error */
879 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
880 qdio_mark_q(q);
610 break; 881 break;
611 case (2|QDIO_SIGA_ERROR_B_BIT_SET): 882 }
612 /* cc=2 and busy bit: */ 883 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
613 atomic_inc(&q->busy_siga_counter);
614
615 /* if the last siga was successful, save
616 * timestamp here */
617 if (!q->timing.busy_start)
618 q->timing.busy_start=NOW;
619
620 /* if we're in time, don't touch error_status_flags
621 * and siga_error */
622 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
623 qdio_mark_q(q);
624 break;
625 }
626 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
627#ifdef CONFIG_QDIO_DEBUG 884#ifdef CONFIG_QDIO_DEBUG
628 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, 885 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
629 atomic_read(&q->busy_siga_counter)); 886 atomic_read(&q->busy_siga_counter));
630 QDIO_DBF_TEXT3(0,trace,dbf_text); 887 QDIO_DBF_TEXT3(0,trace,dbf_text);
631#endif /* CONFIG_QDIO_DEBUG */ 888#endif /* CONFIG_QDIO_DEBUG */
632 /* else fallthrough and report error */ 889 /* else fallthrough and report error */
633 default: 890 default:
634 /* for plain cc=1, 2 or 3: */ 891 /* for plain cc=1, 2 or 3: */
635 if (q->siga_error) 892 if (q->siga_error)
636 q->error_status_flags|=
637 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
638 q->error_status_flags|= 893 q->error_status_flags|=
639 QDIO_STATUS_LOOK_FOR_ERROR; 894 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
640 q->siga_error=result; 895 q->error_status_flags|=
641 } 896 QDIO_STATUS_LOOK_FOR_ERROR;
897 q->siga_error=result;
898 }
642} 899}
643 900
644static inline void 901static inline void
@@ -743,8 +1000,10 @@ qdio_outbound_processing(struct qdio_q *q)
743static inline int 1000static inline int
744qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1001qdio_get_inbound_buffer_frontier(struct qdio_q *q)
745{ 1002{
1003 struct qdio_irq *irq;
746 int f,f_mod_no; 1004 int f,f_mod_no;
747 volatile char *slsb; 1005 volatile char *slsb;
1006 unsigned int count = 1;
748 int first_not_to_check; 1007 int first_not_to_check;
749#ifdef CONFIG_QDIO_DEBUG 1008#ifdef CONFIG_QDIO_DEBUG
750 char dbf_text[15]; 1009 char dbf_text[15];
@@ -756,6 +1015,10 @@ qdio_get_inbound_buffer_frontier(struct qdio_q *q)
756 QDIO_DBF_TEXT4(0,trace,"getibfro"); 1015 QDIO_DBF_TEXT4(0,trace,"getibfro");
757 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 1016 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
758 1017
1018 irq = (struct qdio_irq *) q->irq_ptr;
1019 if (irq->is_qebsm)
1020 return qdio_qebsm_get_inbound_buffer_frontier(q);
1021
759 slsb=&q->slsb.acc.val[0]; 1022 slsb=&q->slsb.acc.val[0];
760 f_mod_no=f=q->first_to_check; 1023 f_mod_no=f=q->first_to_check;
761 /* 1024 /*
@@ -792,19 +1055,19 @@ check_next:
792 * kill VM in terms of CP overhead 1055 * kill VM in terms of CP overhead
793 */ 1056 */
794 if (q->siga_sync) { 1057 if (q->siga_sync) {
795 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1058 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
796 } else { 1059 } else {
797 /* set the previous buffer to NOT_INIT. The current 1060 /* set the previous buffer to NOT_INIT. The current
798 * buffer will be set to PROCESSING at the end of 1061 * buffer will be set to PROCESSING at the end of
799 * this function to avoid further interrupts. */ 1062 * this function to avoid further interrupts. */
800 if (last_position>=0) 1063 if (last_position>=0)
801 set_slsb(&slsb[last_position], 1064 set_slsb(q, &last_position,
802 SLSB_P_INPUT_NOT_INIT); 1065 SLSB_P_INPUT_NOT_INIT, &count);
803 atomic_set(&q->polling,1); 1066 atomic_set(&q->polling,1);
804 last_position=f_mod_no; 1067 last_position=f_mod_no;
805 } 1068 }
806#else /* QDIO_USE_PROCESSING_STATE */ 1069#else /* QDIO_USE_PROCESSING_STATE */
807 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1070 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
808#endif /* QDIO_USE_PROCESSING_STATE */ 1071#endif /* QDIO_USE_PROCESSING_STATE */
809 /* 1072 /*
810 * not needed, as the inbound queue will be synced on the next 1073 * not needed, as the inbound queue will be synced on the next
@@ -829,7 +1092,7 @@ check_next:
829 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); 1092 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
830 1093
831 /* kind of process the buffer */ 1094 /* kind of process the buffer */
832 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1095 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
833 1096
834 if (q->qdio_error) 1097 if (q->qdio_error)
835 q->error_status_flags|= 1098 q->error_status_flags|=
@@ -857,7 +1120,7 @@ out:
857 1120
858#ifdef QDIO_USE_PROCESSING_STATE 1121#ifdef QDIO_USE_PROCESSING_STATE
859 if (last_position>=0) 1122 if (last_position>=0)
860 set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); 1123 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count);
861#endif /* QDIO_USE_PROCESSING_STATE */ 1124#endif /* QDIO_USE_PROCESSING_STATE */
862 1125
863 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); 1126 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
@@ -902,6 +1165,10 @@ static inline int
902tiqdio_is_inbound_q_done(struct qdio_q *q) 1165tiqdio_is_inbound_q_done(struct qdio_q *q)
903{ 1166{
904 int no_used; 1167 int no_used;
1168 unsigned int start_buf, count;
1169 unsigned char state = 0;
1170 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1171
905#ifdef CONFIG_QDIO_DEBUG 1172#ifdef CONFIG_QDIO_DEBUG
906 char dbf_text[15]; 1173 char dbf_text[15];
907#endif 1174#endif
@@ -927,8 +1194,13 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
927 if (!q->siga_sync) 1194 if (!q->siga_sync)
928 /* we'll check for more primed buffers in qeth_stop_polling */ 1195 /* we'll check for more primed buffers in qeth_stop_polling */
929 return 0; 1196 return 0;
930 1197 if (irq->is_qebsm) {
931 if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) 1198 count = 1;
1199 start_buf = q->first_to_check;
1200 qdio_do_eqbs(q, &state, &start_buf, &count);
1201 } else
1202 state = q->slsb.acc.val[q->first_to_check];
1203 if (state != SLSB_P_INPUT_PRIMED)
932 /* 1204 /*
933 * nothing more to do, if next buffer is not PRIMED. 1205 * nothing more to do, if next buffer is not PRIMED.
934 * note that we did a SYNC_MEMORY before, that there 1206 * note that we did a SYNC_MEMORY before, that there
@@ -955,6 +1227,10 @@ static inline int
955qdio_is_inbound_q_done(struct qdio_q *q) 1227qdio_is_inbound_q_done(struct qdio_q *q)
956{ 1228{
957 int no_used; 1229 int no_used;
1230 unsigned int start_buf, count;
1231 unsigned char state = 0;
1232 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1233
958#ifdef CONFIG_QDIO_DEBUG 1234#ifdef CONFIG_QDIO_DEBUG
959 char dbf_text[15]; 1235 char dbf_text[15];
960#endif 1236#endif
@@ -973,8 +1249,13 @@ qdio_is_inbound_q_done(struct qdio_q *q)
973 QDIO_DBF_TEXT4(0,trace,dbf_text); 1249 QDIO_DBF_TEXT4(0,trace,dbf_text);
974 return 1; 1250 return 1;
975 } 1251 }
976 1252 if (irq->is_qebsm) {
977 if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { 1253 count = 1;
1254 start_buf = q->first_to_check;
1255 qdio_do_eqbs(q, &state, &start_buf, &count);
1256 } else
1257 state = q->slsb.acc.val[q->first_to_check];
1258 if (state == SLSB_P_INPUT_PRIMED) {
978 /* we got something to do */ 1259 /* we got something to do */
979 QDIO_DBF_TEXT4(0,trace,"inqisntA"); 1260 QDIO_DBF_TEXT4(0,trace,"inqisntA");
980 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 1261 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -1456,7 +1737,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1456 void *ptr; 1737 void *ptr;
1457 int available; 1738 int available;
1458 1739
1459 sprintf(dbf_text,"qfqs%4x",cdev->private->irq); 1740 sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
1460 QDIO_DBF_TEXT0(0,setup,dbf_text); 1741 QDIO_DBF_TEXT0(0,setup,dbf_text);
1461 for (i=0;i<no_input_qs;i++) { 1742 for (i=0;i<no_input_qs;i++) {
1462 q=irq_ptr->input_qs[i]; 1743 q=irq_ptr->input_qs[i];
@@ -1476,7 +1757,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1476 1757
1477 q->queue_type=q_format; 1758 q->queue_type=q_format;
1478 q->int_parm=int_parm; 1759 q->int_parm=int_parm;
1479 q->irq=irq_ptr->irq; 1760 q->schid = irq_ptr->schid;
1480 q->irq_ptr = irq_ptr; 1761 q->irq_ptr = irq_ptr;
1481 q->cdev = cdev; 1762 q->cdev = cdev;
1482 q->mask=1<<(31-i); 1763 q->mask=1<<(31-i);
@@ -1523,11 +1804,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1523 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); 1804 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1524 1805
1525 /* fill in slsb */ 1806 /* fill in slsb */
1526 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { 1807 if (!irq_ptr->is_qebsm) {
1527 set_slsb(&q->slsb.acc.val[j], 1808 unsigned int count = 1;
1528 SLSB_P_INPUT_NOT_INIT); 1809 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1529/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ 1810 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1530 } 1811 }
1531 } 1812 }
1532 1813
1533 for (i=0;i<no_output_qs;i++) { 1814 for (i=0;i<no_output_qs;i++) {
@@ -1549,7 +1830,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1549 q->queue_type=q_format; 1830 q->queue_type=q_format;
1550 q->int_parm=int_parm; 1831 q->int_parm=int_parm;
1551 q->is_input_q=0; 1832 q->is_input_q=0;
1552 q->irq=irq_ptr->irq; 1833 q->schid = irq_ptr->schid;
1553 q->cdev = cdev; 1834 q->cdev = cdev;
1554 q->irq_ptr = irq_ptr; 1835 q->irq_ptr = irq_ptr;
1555 q->mask=1<<(31-i); 1836 q->mask=1<<(31-i);
@@ -1584,11 +1865,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1584 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); 1865 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1585 1866
1586 /* fill in slsb */ 1867 /* fill in slsb */
1587 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { 1868 if (!irq_ptr->is_qebsm) {
1588 set_slsb(&q->slsb.acc.val[j], 1869 unsigned int count = 1;
1589 SLSB_P_OUTPUT_NOT_INIT); 1870 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1590/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ 1871 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1591 } 1872 }
1592 } 1873 }
1593} 1874}
1594 1875
@@ -1656,7 +1937,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1656 char dbf_text[15]; 1937 char dbf_text[15];
1657 1938
1658 QDIO_DBF_TEXT5(0,trace,"newstate"); 1939 QDIO_DBF_TEXT5(0,trace,"newstate");
1659 sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state); 1940 sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1660 QDIO_DBF_TEXT5(0,trace,dbf_text); 1941 QDIO_DBF_TEXT5(0,trace,dbf_text);
1661#endif /* CONFIG_QDIO_DEBUG */ 1942#endif /* CONFIG_QDIO_DEBUG */
1662 1943
@@ -1669,12 +1950,12 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1669} 1950}
1670 1951
1671static inline void 1952static inline void
1672qdio_irq_check_sense(int irq, struct irb *irb) 1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1673{ 1954{
1674 char dbf_text[15]; 1955 char dbf_text[15];
1675 1956
1676 if (irb->esw.esw0.erw.cons) { 1957 if (irb->esw.esw0.erw.cons) {
1677 sprintf(dbf_text,"sens%4x",irq); 1958 sprintf(dbf_text,"sens%4x",schid.sch_no);
1678 QDIO_DBF_TEXT2(1,trace,dbf_text); 1959 QDIO_DBF_TEXT2(1,trace,dbf_text);
1679 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); 1960 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1680 1961
@@ -1785,21 +2066,22 @@ qdio_timeout_handler(struct ccw_device *cdev)
1785 2066
1786 switch (irq_ptr->state) { 2067 switch (irq_ptr->state) {
1787 case QDIO_IRQ_STATE_INACTIVE: 2068 case QDIO_IRQ_STATE_INACTIVE:
1788 QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", 2069 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
1789 irq_ptr->irq); 2070 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1790 QDIO_DBF_TEXT2(1,setup,"eq:timeo"); 2071 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
1791 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2072 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1792 break; 2073 break;
1793 case QDIO_IRQ_STATE_CLEANUP: 2074 case QDIO_IRQ_STATE_CLEANUP:
1794 QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", 2075 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
1795 irq_ptr->irq); 2076 "irq=0.%x.%x.\n",
2077 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1796 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2078 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1797 break; 2079 break;
1798 case QDIO_IRQ_STATE_ESTABLISHED: 2080 case QDIO_IRQ_STATE_ESTABLISHED:
1799 case QDIO_IRQ_STATE_ACTIVE: 2081 case QDIO_IRQ_STATE_ACTIVE:
1800 /* I/O has been terminated by common I/O layer. */ 2082 /* I/O has been terminated by common I/O layer. */
1801 QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", 2083 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
1802 irq_ptr->irq); 2084 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1803 QDIO_DBF_TEXT2(1, trace, "cio:term"); 2085 QDIO_DBF_TEXT2(1, trace, "cio:term");
1804 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 2086 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1805 if (get_device(&cdev->dev)) { 2087 if (get_device(&cdev->dev)) {
@@ -1862,7 +2144,7 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1862 } 2144 }
1863 } 2145 }
1864 2146
1865 qdio_irq_check_sense(irq_ptr->irq, irb); 2147 qdio_irq_check_sense(irq_ptr->schid, irb);
1866 2148
1867#ifdef CONFIG_QDIO_DEBUG 2149#ifdef CONFIG_QDIO_DEBUG
1868 sprintf(dbf_text, "state:%d", irq_ptr->state); 2150 sprintf(dbf_text, "state:%d", irq_ptr->state);
@@ -1905,7 +2187,7 @@ int
1905qdio_synchronize(struct ccw_device *cdev, unsigned int flags, 2187qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1906 unsigned int queue_number) 2188 unsigned int queue_number)
1907{ 2189{
1908 int cc; 2190 int cc = 0;
1909 struct qdio_q *q; 2191 struct qdio_q *q;
1910 struct qdio_irq *irq_ptr; 2192 struct qdio_irq *irq_ptr;
1911 void *ptr; 2193 void *ptr;
@@ -1918,7 +2200,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1918 return -ENODEV; 2200 return -ENODEV;
1919 2201
1920#ifdef CONFIG_QDIO_DEBUG 2202#ifdef CONFIG_QDIO_DEBUG
1921 *((int*)(&dbf_text[4])) = irq_ptr->irq; 2203 *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
1922 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); 2204 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
1923 *((int*)(&dbf_text[0]))=flags; 2205 *((int*)(&dbf_text[0]))=flags;
1924 *((int*)(&dbf_text[4]))=queue_number; 2206 *((int*)(&dbf_text[4]))=queue_number;
@@ -1929,12 +2211,14 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1929 q=irq_ptr->input_qs[queue_number]; 2211 q=irq_ptr->input_qs[queue_number];
1930 if (!q) 2212 if (!q)
1931 return -EINVAL; 2213 return -EINVAL;
1932 cc = do_siga_sync(q->irq, 0, q->mask); 2214 if (!(irq_ptr->is_qebsm))
2215 cc = do_siga_sync(q->schid, 0, q->mask);
1933 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { 2216 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
1934 q=irq_ptr->output_qs[queue_number]; 2217 q=irq_ptr->output_qs[queue_number];
1935 if (!q) 2218 if (!q)
1936 return -EINVAL; 2219 return -EINVAL;
1937 cc = do_siga_sync(q->irq, q->mask, 0); 2220 if (!(irq_ptr->is_qebsm))
2221 cc = do_siga_sync(q->schid, q->mask, 0);
1938 } else 2222 } else
1939 return -EINVAL; 2223 return -EINVAL;
1940 2224
@@ -1945,15 +2229,54 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1945 return cc; 2229 return cc;
1946} 2230}
1947 2231
1948static unsigned char 2232static inline void
1949qdio_check_siga_needs(int sch) 2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token)
2235{
2236 struct qdio_q *q;
2237 int i;
2238 unsigned int count, start_buf;
2239 char dbf_text[15];
2240
2241 /*check if QEBSM is disabled */
2242 if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
2243 irq_ptr->is_qebsm = 0;
2244 irq_ptr->sch_token = 0;
2245 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2246 QDIO_DBF_TEXT0(0,setup,"noV=V");
2247 return;
2248 }
2249 irq_ptr->sch_token = token;
2250 /*input queue*/
2251 for (i = 0; i < irq_ptr->no_input_qs;i++) {
2252 q = irq_ptr->input_qs[i];
2253 count = QDIO_MAX_BUFFERS_PER_Q;
2254 start_buf = 0;
2255 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2256 }
2257 sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2258 QDIO_DBF_TEXT0(0,setup,dbf_text);
2259 sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2260 QDIO_DBF_TEXT0(0,setup,dbf_text);
2261 /*output queue*/
2262 for (i = 0; i < irq_ptr->no_output_qs; i++) {
2263 q = irq_ptr->output_qs[i];
2264 count = QDIO_MAX_BUFFERS_PER_Q;
2265 start_buf = 0;
2266 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2267 }
2268}
2269
2270static void
2271qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
1950{ 2272{
1951 int result; 2273 int result;
1952 unsigned char qdioac; 2274 unsigned char qdioac;
1953
1954 struct { 2275 struct {
1955 struct chsc_header request; 2276 struct chsc_header request;
1956 u16 reserved1; 2277 u16 reserved1:10;
2278 u16 ssid:2;
2279 u16 fmt:4;
1957 u16 first_sch; 2280 u16 first_sch;
1958 u16 reserved2; 2281 u16 reserved2;
1959 u16 last_sch; 2282 u16 last_sch;
@@ -1964,67 +2287,83 @@ qdio_check_siga_needs(int sch)
1964 u8 reserved5; 2287 u8 reserved5;
1965 u16 sch; 2288 u16 sch;
1966 u8 qfmt; 2289 u8 qfmt;
1967 u8 reserved6; 2290 u8 parm;
1968 u8 qdioac; 2291 u8 qdioac1;
1969 u8 sch_class; 2292 u8 sch_class;
1970 u8 reserved7; 2293 u8 reserved7;
1971 u8 icnt; 2294 u8 icnt;
1972 u8 reserved8; 2295 u8 reserved8;
1973 u8 ocnt; 2296 u8 ocnt;
2297 u8 reserved9;
2298 u8 mbccnt;
2299 u16 qdioac2;
2300 u64 sch_token;
1974 } *ssqd_area; 2301 } *ssqd_area;
1975 2302
2303 QDIO_DBF_TEXT0(0,setup,"getssqd");
2304 qdioac = 0;
1976 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2305 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1977 if (!ssqd_area) { 2306 if (!ssqd_area) {
1978 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ 2307 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
1979 "SIGAs for sch x%x.\n", sch); 2308 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
1980 return CHSC_FLAG_SIGA_INPUT_NECESSARY || 2309 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1981 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2310 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
1982 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2311 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2312 irq_ptr->is_qebsm = 0;
2313 irq_ptr->sch_token = 0;
2314 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2315 return;
1983 } 2316 }
2317
1984 ssqd_area->request = (struct chsc_header) { 2318 ssqd_area->request = (struct chsc_header) {
1985 .length = 0x0010, 2319 .length = 0x0010,
1986 .code = 0x0024, 2320 .code = 0x0024,
1987 }; 2321 };
1988 2322 ssqd_area->first_sch = irq_ptr->schid.sch_no;
1989 ssqd_area->first_sch = sch; 2323 ssqd_area->last_sch = irq_ptr->schid.sch_no;
1990 ssqd_area->last_sch = sch; 2324 ssqd_area->ssid = irq_ptr->schid.ssid;
1991 2325 result = chsc(ssqd_area);
1992 result=chsc(ssqd_area);
1993 2326
1994 if (result) { 2327 if (result) {
1995 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ 2328 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
1996 "SIGAs for sch x%x.\n", 2329 "SIGAs for sch 0.%x.%x.\n", result,
1997 result,sch); 2330 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1998 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || 2331 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1999 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2332 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2000 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2333 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2334 irq_ptr->is_qebsm = 0;
2001 goto out; 2335 goto out;
2002 } 2336 }
2003 2337
2004 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { 2338 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2005 QDIO_PRINT_WARN("response upon checking SIGA needs " \ 2339 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2006 "is 0x%x. Using all SIGAs for sch x%x.\n", 2340 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
2007 ssqd_area->response.code, sch); 2341 ssqd_area->response.code,
2342 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2008 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || 2343 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2009 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2344 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2010 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2345 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2346 irq_ptr->is_qebsm = 0;
2011 goto out; 2347 goto out;
2012 } 2348 }
2013 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || 2349 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2014 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || 2350 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2015 (ssqd_area->sch != sch)) { 2351 (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2016 QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ 2352 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2017 "using all SIGAs.\n",sch); 2353 "using all SIGAs.\n",
2354 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2018 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | 2355 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2019 CHSC_FLAG_SIGA_OUTPUT_NECESSARY | 2356 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2020 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ 2357 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2358 irq_ptr->is_qebsm = 0;
2021 goto out; 2359 goto out;
2022 } 2360 }
2023 2361 qdioac = ssqd_area->qdioac1;
2024 qdioac = ssqd_area->qdioac;
2025out: 2362out:
2363 qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2364 ssqd_area->sch_token);
2026 free_page ((unsigned long) ssqd_area); 2365 free_page ((unsigned long) ssqd_area);
2027 return qdioac; 2366 irq_ptr->qdioac = qdioac;
2028} 2367}
2029 2368
2030static unsigned int 2369static unsigned int
@@ -2055,6 +2394,13 @@ tiqdio_check_chsc_availability(void)
2055 sprintf(dbf_text,"hydrati%1x", hydra_thinints); 2394 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2056 QDIO_DBF_TEXT0(0,setup,dbf_text); 2395 QDIO_DBF_TEXT0(0,setup,dbf_text);
2057 2396
2397#ifdef CONFIG_64BIT
2398 /* Check for QEBSM support in general (bit 58). */
2399 is_passthrough = css_general_characteristics.qebsm;
2400#endif
2401 sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2402 QDIO_DBF_TEXT0(0,setup,dbf_text);
2403
2058 /* Check for aif time delay disablement fac (bit 56). If installed, 2404 /* Check for aif time delay disablement fac (bit 56). If installed,
2059 * omit svs even under lpar (good point by rick again) */ 2405 * omit svs even under lpar (good point by rick again) */
2060 omit_svs = css_general_characteristics.aif_tdd; 2406 omit_svs = css_general_characteristics.aif_tdd;
@@ -2091,7 +2437,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2091 /* set to 0x10000000 to enable 2437 /* set to 0x10000000 to enable
2092 * time delay disablement facility */ 2438 * time delay disablement facility */
2093 u32 reserved5; 2439 u32 reserved5;
2094 u32 subsystem_id; 2440 struct subchannel_id schid;
2095 u32 reserved6[1004]; 2441 u32 reserved6[1004];
2096 struct chsc_header response; 2442 struct chsc_header response;
2097 u32 reserved7; 2443 u32 reserved7;
@@ -2113,7 +2459,8 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2113 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2459 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2114 if (!scssc_area) { 2460 if (!scssc_area) {
2115 QDIO_PRINT_WARN("No memory for setting indicators on " \ 2461 QDIO_PRINT_WARN("No memory for setting indicators on " \
2116 "subchannel x%x.\n", irq_ptr->irq); 2462 "subchannel 0.%x.%x.\n",
2463 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2117 return -ENOMEM; 2464 return -ENOMEM;
2118 } 2465 }
2119 scssc_area->request = (struct chsc_header) { 2466 scssc_area->request = (struct chsc_header) {
@@ -2127,7 +2474,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2127 scssc_area->ks = QDIO_STORAGE_KEY; 2474 scssc_area->ks = QDIO_STORAGE_KEY;
2128 scssc_area->kc = QDIO_STORAGE_KEY; 2475 scssc_area->kc = QDIO_STORAGE_KEY;
2129 scssc_area->isc = TIQDIO_THININT_ISC; 2476 scssc_area->isc = TIQDIO_THININT_ISC;
2130 scssc_area->subsystem_id = (1<<16) + irq_ptr->irq; 2477 scssc_area->schid = irq_ptr->schid;
2131 /* enables the time delay disablement facility. Don't care 2478 /* enables the time delay disablement facility. Don't care
2132 * whether it is really there (i.e. we haven't checked for 2479 * whether it is really there (i.e. we haven't checked for
2133 * it) */ 2480 * it) */
@@ -2137,12 +2484,11 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2137 QDIO_PRINT_WARN("Time delay disablement facility " \ 2484 QDIO_PRINT_WARN("Time delay disablement facility " \
2138 "not available\n"); 2485 "not available\n");
2139 2486
2140
2141
2142 result = chsc(scssc_area); 2487 result = chsc(scssc_area);
2143 if (result) { 2488 if (result) {
2144 QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ 2489 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2145 "cc=%i.\n",irq_ptr->irq,result); 2490 "cc=%i.\n",
2491 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2146 result = -EIO; 2492 result = -EIO;
2147 goto out; 2493 goto out;
2148 } 2494 }
@@ -2198,7 +2544,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2198 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2544 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2199 if (!scsscf_area) { 2545 if (!scsscf_area) {
2200 QDIO_PRINT_WARN("No memory for setting delay target on " \ 2546 QDIO_PRINT_WARN("No memory for setting delay target on " \
2201 "subchannel x%x.\n", irq_ptr->irq); 2547 "subchannel 0.%x.%x.\n",
2548 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2202 return -ENOMEM; 2549 return -ENOMEM;
2203 } 2550 }
2204 scsscf_area->request = (struct chsc_header) { 2551 scsscf_area->request = (struct chsc_header) {
@@ -2210,8 +2557,10 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2210 2557
2211 result=chsc(scsscf_area); 2558 result=chsc(scsscf_area);
2212 if (result) { 2559 if (result) {
2213 QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ 2560 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2214 "cc=%i. Continuing.\n",irq_ptr->irq,result); 2561 "cc=%i. Continuing.\n",
2562 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2563 result);
2215 result = -EIO; 2564 result = -EIO;
2216 goto out; 2565 goto out;
2217 } 2566 }
@@ -2245,7 +2594,7 @@ qdio_cleanup(struct ccw_device *cdev, int how)
2245 if (!irq_ptr) 2594 if (!irq_ptr)
2246 return -ENODEV; 2595 return -ENODEV;
2247 2596
2248 sprintf(dbf_text,"qcln%4x",irq_ptr->irq); 2597 sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2249 QDIO_DBF_TEXT1(0,trace,dbf_text); 2598 QDIO_DBF_TEXT1(0,trace,dbf_text);
2250 QDIO_DBF_TEXT0(0,setup,dbf_text); 2599 QDIO_DBF_TEXT0(0,setup,dbf_text);
2251 2600
@@ -2272,7 +2621,7 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2272 2621
2273 down(&irq_ptr->setting_up_sema); 2622 down(&irq_ptr->setting_up_sema);
2274 2623
2275 sprintf(dbf_text,"qsqs%4x",irq_ptr->irq); 2624 sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2276 QDIO_DBF_TEXT1(0,trace,dbf_text); 2625 QDIO_DBF_TEXT1(0,trace,dbf_text);
2277 QDIO_DBF_TEXT0(0,setup,dbf_text); 2626 QDIO_DBF_TEXT0(0,setup,dbf_text);
2278 2627
@@ -2378,7 +2727,7 @@ qdio_free(struct ccw_device *cdev)
2378 2727
2379 down(&irq_ptr->setting_up_sema); 2728 down(&irq_ptr->setting_up_sema);
2380 2729
2381 sprintf(dbf_text,"qfqs%4x",irq_ptr->irq); 2730 sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2382 QDIO_DBF_TEXT1(0,trace,dbf_text); 2731 QDIO_DBF_TEXT1(0,trace,dbf_text);
2383 QDIO_DBF_TEXT0(0,setup,dbf_text); 2732 QDIO_DBF_TEXT0(0,setup,dbf_text);
2384 2733
@@ -2526,13 +2875,14 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2526 irq_ptr = cdev->private->qdio_data; 2875 irq_ptr = cdev->private->qdio_data;
2527 2876
2528 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { 2877 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2529 sprintf(dbf_text,"ick1%4x",irq_ptr->irq); 2878 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2530 QDIO_DBF_TEXT2(1,trace,dbf_text); 2879 QDIO_DBF_TEXT2(1,trace,dbf_text);
2531 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); 2880 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2532 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); 2881 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2533 QDIO_PRINT_ERR("received check condition on establish " \ 2882 QDIO_PRINT_ERR("received check condition on establish " \
2534 "queues on irq 0x%x (cs=x%x, ds=x%x).\n", 2883 "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2535 irq_ptr->irq,cstat,dstat); 2884 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2885 cstat,dstat);
2536 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); 2886 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2537 } 2887 }
2538 2888
@@ -2540,9 +2890,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2540 QDIO_DBF_TEXT2(1,setup,"eq:no de"); 2890 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2541 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); 2891 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2542 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); 2892 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2543 QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " 2893 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2544 "device end: dstat=%02x, cstat=%02x\n", 2894 "device end: dstat=%02x, cstat=%02x\n",
2545 irq_ptr->irq, dstat, cstat); 2895 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2896 dstat, cstat);
2546 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2897 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2547 return 1; 2898 return 1;
2548 } 2899 }
@@ -2551,10 +2902,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2551 QDIO_DBF_TEXT2(1,setup,"eq:badio"); 2902 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2552 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); 2903 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2553 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); 2904 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2554 QDIO_PRINT_ERR("establish queues on irq %04x: got " 2905 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2555 "the following devstat: dstat=%02x, " 2906 "the following devstat: dstat=%02x, "
2556 "cstat=%02x\n", 2907 "cstat=%02x\n", irq_ptr->schid.ssid,
2557 irq_ptr->irq, dstat, cstat); 2908 irq_ptr->schid.sch_no, dstat, cstat);
2558 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2909 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2559 return 1; 2910 return 1;
2560 } 2911 }
@@ -2569,7 +2920,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2569 2920
2570 irq_ptr = cdev->private->qdio_data; 2921 irq_ptr = cdev->private->qdio_data;
2571 2922
2572 sprintf(dbf_text,"qehi%4x",cdev->private->irq); 2923 sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
2573 QDIO_DBF_TEXT0(0,setup,dbf_text); 2924 QDIO_DBF_TEXT0(0,setup,dbf_text);
2574 QDIO_DBF_TEXT0(0,trace,dbf_text); 2925 QDIO_DBF_TEXT0(0,trace,dbf_text);
2575 2926
@@ -2588,7 +2939,7 @@ qdio_initialize(struct qdio_initialize *init_data)
2588 int rc; 2939 int rc;
2589 char dbf_text[15]; 2940 char dbf_text[15];
2590 2941
2591 sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq); 2942 sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
2592 QDIO_DBF_TEXT0(0,setup,dbf_text); 2943 QDIO_DBF_TEXT0(0,setup,dbf_text);
2593 QDIO_DBF_TEXT0(0,trace,dbf_text); 2944 QDIO_DBF_TEXT0(0,trace,dbf_text);
2594 2945
@@ -2609,7 +2960,7 @@ qdio_allocate(struct qdio_initialize *init_data)
2609 struct qdio_irq *irq_ptr; 2960 struct qdio_irq *irq_ptr;
2610 char dbf_text[15]; 2961 char dbf_text[15];
2611 2962
2612 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq); 2963 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
2613 QDIO_DBF_TEXT0(0,setup,dbf_text); 2964 QDIO_DBF_TEXT0(0,setup,dbf_text);
2614 QDIO_DBF_TEXT0(0,trace,dbf_text); 2965 QDIO_DBF_TEXT0(0,trace,dbf_text);
2615 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || 2966 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
@@ -2682,7 +3033,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2682 3033
2683 irq_ptr->int_parm=init_data->int_parm; 3034 irq_ptr->int_parm=init_data->int_parm;
2684 3035
2685 irq_ptr->irq = init_data->cdev->private->irq; 3036 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
2686 irq_ptr->no_input_qs=init_data->no_input_qs; 3037 irq_ptr->no_input_qs=init_data->no_input_qs;
2687 irq_ptr->no_output_qs=init_data->no_output_qs; 3038 irq_ptr->no_output_qs=init_data->no_output_qs;
2688 3039
@@ -2698,11 +3049,12 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2698 QDIO_DBF_TEXT2(0,setup,dbf_text); 3049 QDIO_DBF_TEXT2(0,setup,dbf_text);
2699 3050
2700 if (irq_ptr->is_thinint_irq) { 3051 if (irq_ptr->is_thinint_irq) {
2701 irq_ptr->dev_st_chg_ind=qdio_get_indicator(); 3052 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
2702 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); 3053 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
2703 if (!irq_ptr->dev_st_chg_ind) { 3054 if (!irq_ptr->dev_st_chg_ind) {
2704 QDIO_PRINT_WARN("no indicator location available " \ 3055 QDIO_PRINT_WARN("no indicator location available " \
2705 "for irq 0x%x\n",irq_ptr->irq); 3056 "for irq 0.%x.%x\n",
3057 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2706 qdio_release_irq_memory(irq_ptr); 3058 qdio_release_irq_memory(irq_ptr);
2707 return -ENOBUFS; 3059 return -ENOBUFS;
2708 } 3060 }
@@ -2747,6 +3099,10 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2747 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; 3099 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
2748 3100
2749 /* fill in qib */ 3101 /* fill in qib */
3102 irq_ptr->is_qebsm = is_passthrough;
3103 if (irq_ptr->is_qebsm)
3104 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3105
2750 irq_ptr->qib.qfmt=init_data->q_format; 3106 irq_ptr->qib.qfmt=init_data->q_format;
2751 if (init_data->no_input_qs) 3107 if (init_data->no_input_qs)
2752 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); 3108 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -2829,7 +3185,7 @@ qdio_establish(struct qdio_initialize *init_data)
2829 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); 3185 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
2830 } 3186 }
2831 3187
2832 sprintf(dbf_text,"qest%4x",cdev->private->irq); 3188 sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
2833 QDIO_DBF_TEXT0(0,setup,dbf_text); 3189 QDIO_DBF_TEXT0(0,setup,dbf_text);
2834 QDIO_DBF_TEXT0(0,trace,dbf_text); 3190 QDIO_DBF_TEXT0(0,trace,dbf_text);
2835 3191
@@ -2855,9 +3211,10 @@ qdio_establish(struct qdio_initialize *init_data)
2855 sprintf(dbf_text,"eq:io%4x",result); 3211 sprintf(dbf_text,"eq:io%4x",result);
2856 QDIO_DBF_TEXT2(1,setup,dbf_text); 3212 QDIO_DBF_TEXT2(1,setup,dbf_text);
2857 } 3213 }
2858 QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ 3214 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
2859 "returned %i, next try returned %i\n", 3215 "returned %i, next try returned %i\n",
2860 irq_ptr->irq,result,result2); 3216 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3217 result, result2);
2861 result=result2; 3218 result=result2;
2862 if (result) 3219 if (result)
2863 ccw_device_set_timeout(cdev, 0); 3220 ccw_device_set_timeout(cdev, 0);
@@ -2884,7 +3241,7 @@ qdio_establish(struct qdio_initialize *init_data)
2884 return -EIO; 3241 return -EIO;
2885 } 3242 }
2886 3243
2887 irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); 3244 qdio_get_ssqd_information(irq_ptr);
2888 /* if this gets set once, we're running under VM and can omit SVSes */ 3245 /* if this gets set once, we're running under VM and can omit SVSes */
2889 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) 3246 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
2890 omit_svs=1; 3247 omit_svs=1;
@@ -2930,7 +3287,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
2930 goto out; 3287 goto out;
2931 } 3288 }
2932 3289
2933 sprintf(dbf_text,"qact%4x", irq_ptr->irq); 3290 sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
2934 QDIO_DBF_TEXT2(0,setup,dbf_text); 3291 QDIO_DBF_TEXT2(0,setup,dbf_text);
2935 QDIO_DBF_TEXT2(0,trace,dbf_text); 3292 QDIO_DBF_TEXT2(0,trace,dbf_text);
2936 3293
@@ -2955,9 +3312,10 @@ qdio_activate(struct ccw_device *cdev, int flags)
2955 sprintf(dbf_text,"aq:io%4x",result); 3312 sprintf(dbf_text,"aq:io%4x",result);
2956 QDIO_DBF_TEXT2(1,setup,dbf_text); 3313 QDIO_DBF_TEXT2(1,setup,dbf_text);
2957 } 3314 }
2958 QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ 3315 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
2959 "returned %i, next try returned %i\n", 3316 "returned %i, next try returned %i\n",
2960 irq_ptr->irq,result,result2); 3317 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3318 result, result2);
2961 result=result2; 3319 result=result2;
2962 } 3320 }
2963 3321
@@ -3015,30 +3373,40 @@ static inline void
3015qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3373qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3016 unsigned int count, struct qdio_buffer *buffers) 3374 unsigned int count, struct qdio_buffer *buffers)
3017{ 3375{
3376 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3377 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3378 if (irq->is_qebsm) {
3379 while (count)
3380 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3381 return;
3382 }
3018 for (;;) { 3383 for (;;) {
3019 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY); 3384 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3020 count--; 3385 count--;
3021 if (!count) break; 3386 if (!count) break;
3022 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); 3387 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3023 } 3388 }
3024
3025 /* not necessary, as the queues are synced during the SIGA read */
3026 /*SYNC_MEMORY;*/
3027} 3389}
3028 3390
3029static inline void 3391static inline void
3030qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3392qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3031 unsigned int count, struct qdio_buffer *buffers) 3393 unsigned int count, struct qdio_buffer *buffers)
3032{ 3394{
3395 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3396
3397 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3398 if (irq->is_qebsm) {
3399 while (count)
3400 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3401 return;
3402 }
3403
3033 for (;;) { 3404 for (;;) {
3034 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED); 3405 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3035 count--; 3406 count--;
3036 if (!count) break; 3407 if (!count) break;
3037 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); 3408 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3038 } 3409 }
3039
3040 /* SIGA write will sync the queues */
3041 /*SYNC_MEMORY;*/
3042} 3410}
3043 3411
3044static inline void 3412static inline void
@@ -3083,6 +3451,9 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3083 struct qdio_buffer *buffers) 3451 struct qdio_buffer *buffers)
3084{ 3452{
3085 int used_elements; 3453 int used_elements;
3454 unsigned int cnt, start_buf;
3455 unsigned char state = 0;
3456 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3086 3457
3087 /* This is the outbound handling of queues */ 3458 /* This is the outbound handling of queues */
3088#ifdef QDIO_PERFORMANCE_STATS 3459#ifdef QDIO_PERFORMANCE_STATS
@@ -3115,9 +3486,15 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3115 * SYNC_MEMORY :-/ ), we try to 3486 * SYNC_MEMORY :-/ ), we try to
3116 * fast-requeue buffers 3487 * fast-requeue buffers
3117 */ 3488 */
3118 if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) 3489 if (irq->is_qebsm) {
3119 &(QDIO_MAX_BUFFERS_PER_Q-1)]!= 3490 cnt = 1;
3120 SLSB_CU_OUTPUT_PRIMED) { 3491 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3492 (QDIO_MAX_BUFFERS_PER_Q-1));
3493 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3494 } else
3495 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3496 &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3497 if (state != SLSB_CU_OUTPUT_PRIMED) {
3121 qdio_kick_outbound_q(q); 3498 qdio_kick_outbound_q(q);
3122 } else { 3499 } else {
3123 QDIO_DBF_TEXT3(0,trace, "fast-req"); 3500 QDIO_DBF_TEXT3(0,trace, "fast-req");
@@ -3150,7 +3527,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3150#ifdef CONFIG_QDIO_DEBUG 3527#ifdef CONFIG_QDIO_DEBUG
3151 char dbf_text[20]; 3528 char dbf_text[20];
3152 3529
3153 sprintf(dbf_text,"doQD%04x",cdev->private->irq); 3530 sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
3154 QDIO_DBF_TEXT3(0,trace,dbf_text); 3531 QDIO_DBF_TEXT3(0,trace,dbf_text);
3155#endif /* CONFIG_QDIO_DEBUG */ 3532#endif /* CONFIG_QDIO_DEBUG */
3156 3533
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 328e31cc6854..fa385e761fe1 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -3,14 +3,15 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6#define VERSION_CIO_QDIO_H "$Revision: 1.33 $" 6#include "schid.h"
7
8#define VERSION_CIO_QDIO_H "$Revision: 1.40 $"
7 9
8#ifdef CONFIG_QDIO_DEBUG 10#ifdef CONFIG_QDIO_DEBUG
9#define QDIO_VERBOSE_LEVEL 9 11#define QDIO_VERBOSE_LEVEL 9
10#else /* CONFIG_QDIO_DEBUG */ 12#else /* CONFIG_QDIO_DEBUG */
11#define QDIO_VERBOSE_LEVEL 5 13#define QDIO_VERBOSE_LEVEL 5
12#endif /* CONFIG_QDIO_DEBUG */ 14#endif /* CONFIG_QDIO_DEBUG */
13
14#define QDIO_USE_PROCESSING_STATE 15#define QDIO_USE_PROCESSING_STATE
15 16
16#ifdef CONFIG_QDIO_PERF_STATS 17#ifdef CONFIG_QDIO_PERF_STATS
@@ -265,12 +266,64 @@ QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
265/* 266/*
266 * Some instructions as assembly 267 * Some instructions as assembly
267 */ 268 */
269
270static inline int
271do_sqbs(unsigned long sch, unsigned char state, int queue,
272 unsigned int *start, unsigned int *count)
273{
274#ifdef CONFIG_64BIT
275 register unsigned long _ccq asm ("0") = *count;
276 register unsigned long _sch asm ("1") = sch;
277 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
278
279 asm volatile (
280 " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t"
281 : "+d" (_ccq), "+d" (_queuestart)
282 : "d" ((unsigned long)state), "d" (_sch)
283 : "memory", "cc"
284 );
285 *count = _ccq & 0xff;
286 *start = _queuestart & 0xff;
287
288 return (_ccq >> 32) & 0xff;
289#else
290 return 0;
291#endif
292}
293
294static inline int
295do_eqbs(unsigned long sch, unsigned char *state, int queue,
296 unsigned int *start, unsigned int *count)
297{
298#ifdef CONFIG_64BIT
299 register unsigned long _ccq asm ("0") = *count;
300 register unsigned long _sch asm ("1") = sch;
301 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
302 unsigned long _state = 0;
303
304 asm volatile (
305 " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t"
306 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
307 : "d" (_sch)
308 : "memory", "cc"
309 );
310 *count = _ccq & 0xff;
311 *start = _queuestart & 0xff;
312 *state = _state & 0xff;
313
314 return (_ccq >> 32) & 0xff;
315#else
316 return 0;
317#endif
318}
319
320
268static inline int 321static inline int
269do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) 322do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
270{ 323{
271 int cc; 324 int cc;
272 325
273#ifndef CONFIG_ARCH_S390X 326#ifndef CONFIG_64BIT
274 asm volatile ( 327 asm volatile (
275 "lhi 0,2 \n\t" 328 "lhi 0,2 \n\t"
276 "lr 1,%1 \n\t" 329 "lr 1,%1 \n\t"
@@ -280,10 +333,10 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
280 "ipm %0 \n\t" 333 "ipm %0 \n\t"
281 "srl %0,28 \n\t" 334 "srl %0,28 \n\t"
282 : "=d" (cc) 335 : "=d" (cc)
283 : "d" (0x10000|irq), "d" (mask1), "d" (mask2) 336 : "d" (schid), "d" (mask1), "d" (mask2)
284 : "cc", "0", "1", "2", "3" 337 : "cc", "0", "1", "2", "3"
285 ); 338 );
286#else /* CONFIG_ARCH_S390X */ 339#else /* CONFIG_64BIT */
287 asm volatile ( 340 asm volatile (
288 "lghi 0,2 \n\t" 341 "lghi 0,2 \n\t"
289 "llgfr 1,%1 \n\t" 342 "llgfr 1,%1 \n\t"
@@ -293,19 +346,19 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
293 "ipm %0 \n\t" 346 "ipm %0 \n\t"
294 "srl %0,28 \n\t" 347 "srl %0,28 \n\t"
295 : "=d" (cc) 348 : "=d" (cc)
296 : "d" (0x10000|irq), "d" (mask1), "d" (mask2) 349 : "d" (schid), "d" (mask1), "d" (mask2)
297 : "cc", "0", "1", "2", "3" 350 : "cc", "0", "1", "2", "3"
298 ); 351 );
299#endif /* CONFIG_ARCH_S390X */ 352#endif /* CONFIG_64BIT */
300 return cc; 353 return cc;
301} 354}
302 355
303static inline int 356static inline int
304do_siga_input(unsigned int irq, unsigned int mask) 357do_siga_input(struct subchannel_id schid, unsigned int mask)
305{ 358{
306 int cc; 359 int cc;
307 360
308#ifndef CONFIG_ARCH_S390X 361#ifndef CONFIG_64BIT
309 asm volatile ( 362 asm volatile (
310 "lhi 0,1 \n\t" 363 "lhi 0,1 \n\t"
311 "lr 1,%1 \n\t" 364 "lr 1,%1 \n\t"
@@ -314,10 +367,10 @@ do_siga_input(unsigned int irq, unsigned int mask)
314 "ipm %0 \n\t" 367 "ipm %0 \n\t"
315 "srl %0,28 \n\t" 368 "srl %0,28 \n\t"
316 : "=d" (cc) 369 : "=d" (cc)
317 : "d" (0x10000|irq), "d" (mask) 370 : "d" (schid), "d" (mask)
318 : "cc", "0", "1", "2", "memory" 371 : "cc", "0", "1", "2", "memory"
319 ); 372 );
320#else /* CONFIG_ARCH_S390X */ 373#else /* CONFIG_64BIT */
321 asm volatile ( 374 asm volatile (
322 "lghi 0,1 \n\t" 375 "lghi 0,1 \n\t"
323 "llgfr 1,%1 \n\t" 376 "llgfr 1,%1 \n\t"
@@ -326,21 +379,22 @@ do_siga_input(unsigned int irq, unsigned int mask)
326 "ipm %0 \n\t" 379 "ipm %0 \n\t"
327 "srl %0,28 \n\t" 380 "srl %0,28 \n\t"
328 : "=d" (cc) 381 : "=d" (cc)
329 : "d" (0x10000|irq), "d" (mask) 382 : "d" (schid), "d" (mask)
330 : "cc", "0", "1", "2", "memory" 383 : "cc", "0", "1", "2", "memory"
331 ); 384 );
332#endif /* CONFIG_ARCH_S390X */ 385#endif /* CONFIG_64BIT */
333 386
334 return cc; 387 return cc;
335} 388}
336 389
337static inline int 390static inline int
338do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) 391do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
392 unsigned int fc)
339{ 393{
340 int cc; 394 int cc;
341 __u32 busy_bit; 395 __u32 busy_bit;
342 396
343#ifndef CONFIG_ARCH_S390X 397#ifndef CONFIG_64BIT
344 asm volatile ( 398 asm volatile (
345 "lhi 0,0 \n\t" 399 "lhi 0,0 \n\t"
346 "lr 1,%2 \n\t" 400 "lr 1,%2 \n\t"
@@ -366,14 +420,14 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
366 ".long 0b,2b \n\t" 420 ".long 0b,2b \n\t"
367 ".previous \n\t" 421 ".previous \n\t"
368 : "=d" (cc), "=d" (busy_bit) 422 : "=d" (cc), "=d" (busy_bit)
369 : "d" (0x10000|irq), "d" (mask), 423 : "d" (schid), "d" (mask),
370 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 424 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
371 : "cc", "0", "1", "2", "memory" 425 : "cc", "0", "1", "2", "memory"
372 ); 426 );
373#else /* CONFIG_ARCH_S390X */ 427#else /* CONFIG_64BIT */
374 asm volatile ( 428 asm volatile (
375 "lghi 0,0 \n\t" 429 "llgfr 0,%5 \n\t"
376 "llgfr 1,%2 \n\t" 430 "lgr 1,%2 \n\t"
377 "llgfr 2,%3 \n\t" 431 "llgfr 2,%3 \n\t"
378 "siga 0 \n\t" 432 "siga 0 \n\t"
379 "0:" 433 "0:"
@@ -391,11 +445,11 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
391 ".quad 0b,1b \n\t" 445 ".quad 0b,1b \n\t"
392 ".previous \n\t" 446 ".previous \n\t"
393 : "=d" (cc), "=d" (busy_bit) 447 : "=d" (cc), "=d" (busy_bit)
394 : "d" (0x10000|irq), "d" (mask), 448 : "d" (schid), "d" (mask),
395 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 449 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
396 : "cc", "0", "1", "2", "memory" 450 : "cc", "0", "1", "2", "memory"
397 ); 451 );
398#endif /* CONFIG_ARCH_S390X */ 452#endif /* CONFIG_64BIT */
399 453
400 (*bb) = busy_bit; 454 (*bb) = busy_bit;
401 return cc; 455 return cc;
@@ -407,21 +461,21 @@ do_clear_global_summary(void)
407 461
408 unsigned long time; 462 unsigned long time;
409 463
410#ifndef CONFIG_ARCH_S390X 464#ifndef CONFIG_64BIT
411 asm volatile ( 465 asm volatile (
412 "lhi 1,3 \n\t" 466 "lhi 1,3 \n\t"
413 ".insn rre,0xb2650000,2,0 \n\t" 467 ".insn rre,0xb2650000,2,0 \n\t"
414 "lr %0,3 \n\t" 468 "lr %0,3 \n\t"
415 : "=d" (time) : : "cc", "1", "2", "3" 469 : "=d" (time) : : "cc", "1", "2", "3"
416 ); 470 );
417#else /* CONFIG_ARCH_S390X */ 471#else /* CONFIG_64BIT */
418 asm volatile ( 472 asm volatile (
419 "lghi 1,3 \n\t" 473 "lghi 1,3 \n\t"
420 ".insn rre,0xb2650000,2,0 \n\t" 474 ".insn rre,0xb2650000,2,0 \n\t"
421 "lgr %0,3 \n\t" 475 "lgr %0,3 \n\t"
422 : "=d" (time) : : "cc", "1", "2", "3" 476 : "=d" (time) : : "cc", "1", "2", "3"
423 ); 477 );
424#endif /* CONFIG_ARCH_S390X */ 478#endif /* CONFIG_64BIT */
425 479
426 return time; 480 return time;
427} 481}
@@ -488,42 +542,21 @@ struct qdio_perf_stats {
488 542
489#define MY_MODULE_STRING(x) #x 543#define MY_MODULE_STRING(x) #x
490 544
491#ifdef CONFIG_ARCH_S390X 545#ifdef CONFIG_64BIT
492#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) 546#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
493#else /* CONFIG_ARCH_S390X */ 547#else /* CONFIG_64BIT */
494#define QDIO_GET_ADDR(x) ((__u32)(long)x) 548#define QDIO_GET_ADDR(x) ((__u32)(long)x)
495#endif /* CONFIG_ARCH_S390X */ 549#endif /* CONFIG_64BIT */
496
497#ifdef CONFIG_QDIO_DEBUG
498#define set_slsb(x,y) \
499 if(q->queue_type==QDIO_TRACE_QTYPE) { \
500 if(q->is_input_q) { \
501 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
502 } else { \
503 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
504 } \
505 } \
506 qdio_set_slsb(x,y); \
507 if(q->queue_type==QDIO_TRACE_QTYPE) { \
508 if(q->is_input_q) { \
509 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
510 } else { \
511 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
512 } \
513 }
514#else /* CONFIG_QDIO_DEBUG */
515#define set_slsb(x,y) qdio_set_slsb(x,y)
516#endif /* CONFIG_QDIO_DEBUG */
517 550
518struct qdio_q { 551struct qdio_q {
519 volatile struct slsb slsb; 552 volatile struct slsb slsb;
520 553
521 char unused[QDIO_MAX_BUFFERS_PER_Q]; 554 char unused[QDIO_MAX_BUFFERS_PER_Q];
522 555
523 __u32 * volatile dev_st_chg_ind; 556 __u32 * dev_st_chg_ind;
524 557
525 int is_input_q; 558 int is_input_q;
526 int irq; 559 struct subchannel_id schid;
527 struct ccw_device *cdev; 560 struct ccw_device *cdev;
528 561
529 unsigned int is_iqdio_q; 562 unsigned int is_iqdio_q;
@@ -568,6 +601,7 @@ struct qdio_q {
568 struct tasklet_struct tasklet; 601 struct tasklet_struct tasklet;
569#endif /* QDIO_USE_TIMERS_FOR_POLLING */ 602#endif /* QDIO_USE_TIMERS_FOR_POLLING */
570 603
604
571 enum qdio_irq_states state; 605 enum qdio_irq_states state;
572 606
573 /* used to store the error condition during a data transfer */ 607 /* used to store the error condition during a data transfer */
@@ -617,13 +651,17 @@ struct qdio_irq {
617 __u32 * volatile dev_st_chg_ind; 651 __u32 * volatile dev_st_chg_ind;
618 652
619 unsigned long int_parm; 653 unsigned long int_parm;
620 int irq; 654 struct subchannel_id schid;
621 655
622 unsigned int is_iqdio_irq; 656 unsigned int is_iqdio_irq;
623 unsigned int is_thinint_irq; 657 unsigned int is_thinint_irq;
624 unsigned int hydra_gives_outbound_pcis; 658 unsigned int hydra_gives_outbound_pcis;
625 unsigned int sync_done_on_outb_pcis; 659 unsigned int sync_done_on_outb_pcis;
626 660
661 /* QEBSM facility */
662 unsigned int is_qebsm;
663 unsigned long sch_token;
664
627 enum qdio_irq_states state; 665 enum qdio_irq_states state;
628 666
629 unsigned int no_input_qs; 667 unsigned int no_input_qs;
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
new file mode 100644
index 000000000000..54328fec5ade
--- /dev/null
+++ b/drivers/s390/cio/schid.h
@@ -0,0 +1,26 @@
1#ifndef S390_SCHID_H
2#define S390_SCHID_H
3
4struct subchannel_id {
5 __u32 reserved:13;
6 __u32 ssid:2;
7 __u32 one:1;
8 __u32 sch_no:16;
9} __attribute__ ((packed,aligned(4)));
10
11
12/* Helper function for sane state of pre-allocated subchannel_id. */
13static inline void
14init_subchannel_id(struct subchannel_id *schid)
15{
16 memset(schid, 0, sizeof(struct subchannel_id));
17 schid->one = 1;
18}
19
20static inline int
21schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
22{
23 return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
24}
25
26#endif /* S390_SCHID_H */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
index e319e78b5ea2..f87c785f2039 100644
--- a/drivers/s390/crypto/z90common.h
+++ b/drivers/s390/crypto/z90common.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90common.h 2 * linux/drivers/s390/crypto/z90common.h
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -91,12 +91,13 @@ enum hdstat {
91#define TSQ_FATAL_ERROR 34 91#define TSQ_FATAL_ERROR 34
92#define RSQ_FATAL_ERROR 35 92#define RSQ_FATAL_ERROR 35
93 93
94#define Z90CRYPT_NUM_TYPES 5 94#define Z90CRYPT_NUM_TYPES 6
95#define PCICA 0 95#define PCICA 0
96#define PCICC 1 96#define PCICC 1
97#define PCIXCC_MCL2 2 97#define PCIXCC_MCL2 2
98#define PCIXCC_MCL3 3 98#define PCIXCC_MCL3 3
99#define CEX2C 4 99#define CEX2C 4
100#define CEX2A 5
100#define NILDEV -1 101#define NILDEV -1
101#define ANYDEV -1 102#define ANYDEV -1
102#define PCIXCC_UNK -2 103#define PCIXCC_UNK -2
@@ -105,7 +106,7 @@ enum hdevice_type {
105 PCICC_HW = 3, 106 PCICC_HW = 3,
106 PCICA_HW = 4, 107 PCICA_HW = 4,
107 PCIXCC_HW = 5, 108 PCIXCC_HW = 5,
108 OTHER_HW = 6, 109 CEX2A_HW = 6,
109 CEX2C_HW = 7 110 CEX2C_HW = 7
110}; 111};
111 112
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
index 0a3bb5a10dd4..3a18443fdfa7 100644
--- a/drivers/s390/crypto/z90crypt.h
+++ b/drivers/s390/crypto/z90crypt.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90crypt.h 2 * linux/drivers/s390/crypto/z90crypt.h
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -29,11 +29,11 @@
29 29
30#include <linux/ioctl.h> 30#include <linux/ioctl.h>
31 31
32#define VERSION_Z90CRYPT_H "$Revision: 1.11 $" 32#define VERSION_Z90CRYPT_H "$Revision: 1.2.2.4 $"
33 33
34#define z90crypt_VERSION 1 34#define z90crypt_VERSION 1
35#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards 35#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
36#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support 36#define z90crypt_VARIANT 3 // 3 = CEX2A support
37 37
38/** 38/**
39 * struct ica_rsa_modexpo 39 * struct ica_rsa_modexpo
@@ -122,6 +122,9 @@ struct ica_rsa_modexpo_crt {
122 * Z90STAT_CEX2CCOUNT 122 * Z90STAT_CEX2CCOUNT
123 * Return an integer count of all CEX2Cs. 123 * Return an integer count of all CEX2Cs.
124 * 124 *
125 * Z90STAT_CEX2ACOUNT
126 * Return an integer count of all CEX2As.
127 *
125 * Z90STAT_REQUESTQ_COUNT 128 * Z90STAT_REQUESTQ_COUNT
126 * Return an integer count of the number of entries waiting to be 129 * Return an integer count of the number of entries waiting to be
127 * sent to a device. 130 * sent to a device.
@@ -144,6 +147,7 @@ struct ica_rsa_modexpo_crt {
144 * 0x03: PCIXCC_MCL2 147 * 0x03: PCIXCC_MCL2
145 * 0x04: PCIXCC_MCL3 148 * 0x04: PCIXCC_MCL3
146 * 0x05: CEX2C 149 * 0x05: CEX2C
150 * 0x06: CEX2A
147 * 0x0d: device is disabled via the proc filesystem 151 * 0x0d: device is disabled via the proc filesystem
148 * 152 *
149 * Z90STAT_QDEPTH_MASK 153 * Z90STAT_QDEPTH_MASK
@@ -199,6 +203,7 @@ struct ica_rsa_modexpo_crt {
199#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int) 203#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
200#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int) 204#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
201#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int) 205#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
206#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int)
202#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int) 207#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
203#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int) 208#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
204#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int) 209#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
index c215e0889736..d7f7494a0cbe 100644
--- a/drivers/s390/crypto/z90hardware.c
+++ b/drivers/s390/crypto/z90hardware.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90hardware.c 2 * linux/drivers/s390/crypto/z90hardware.c
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -648,6 +648,87 @@ static struct cca_public_sec static_cca_pub_sec = {
648#define RESPONSE_CPRB_SIZE 0x000006B8 648#define RESPONSE_CPRB_SIZE 0x000006B8
649#define RESPONSE_CPRBX_SIZE 0x00000724 649#define RESPONSE_CPRBX_SIZE 0x00000724
650 650
651struct type50_hdr {
652 u8 reserved1;
653 u8 msg_type_code;
654 u16 msg_len;
655 u8 reserved2;
656 u8 ignored;
657 u16 reserved3;
658};
659
660#define TYPE50_TYPE_CODE 0x50
661
662#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
663#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
664#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
665#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
666
667#define TYPE50_MEB1_FMT 0x0001
668#define TYPE50_MEB2_FMT 0x0002
669#define TYPE50_CRB1_FMT 0x0011
670#define TYPE50_CRB2_FMT 0x0012
671
672struct type50_meb1_msg {
673 struct type50_hdr header;
674 u16 keyblock_type;
675 u8 reserved[6];
676 u8 exponent[128];
677 u8 modulus[128];
678 u8 message[128];
679};
680
681struct type50_meb2_msg {
682 struct type50_hdr header;
683 u16 keyblock_type;
684 u8 reserved[6];
685 u8 exponent[256];
686 u8 modulus[256];
687 u8 message[256];
688};
689
690struct type50_crb1_msg {
691 struct type50_hdr header;
692 u16 keyblock_type;
693 u8 reserved[6];
694 u8 p[64];
695 u8 q[64];
696 u8 dp[64];
697 u8 dq[64];
698 u8 u[64];
699 u8 message[128];
700};
701
702struct type50_crb2_msg {
703 struct type50_hdr header;
704 u16 keyblock_type;
705 u8 reserved[6];
706 u8 p[128];
707 u8 q[128];
708 u8 dp[128];
709 u8 dq[128];
710 u8 u[128];
711 u8 message[256];
712};
713
714union type50_msg {
715 struct type50_meb1_msg meb1;
716 struct type50_meb2_msg meb2;
717 struct type50_crb1_msg crb1;
718 struct type50_crb2_msg crb2;
719};
720
721struct type80_hdr {
722 u8 reserved1;
723 u8 type;
724 u16 len;
725 u8 code;
726 u8 reserved2[3];
727 u8 reserved3[8];
728};
729
730#define TYPE80_RSP_CODE 0x80
731
651struct error_hdr { 732struct error_hdr {
652 unsigned char reserved1; 733 unsigned char reserved1;
653 unsigned char type; 734 unsigned char type;
@@ -657,6 +738,7 @@ struct error_hdr {
657}; 738};
658 739
659#define TYPE82_RSP_CODE 0x82 740#define TYPE82_RSP_CODE 0x82
741#define TYPE88_RSP_CODE 0x88
660 742
661#define REP82_ERROR_MACHINE_FAILURE 0x10 743#define REP82_ERROR_MACHINE_FAILURE 0x10
662#define REP82_ERROR_PREEMPT_FAILURE 0x12 744#define REP82_ERROR_PREEMPT_FAILURE 0x12
@@ -679,6 +761,22 @@ struct error_hdr {
679#define REP82_ERROR_PACKET_TRUNCATED 0xA0 761#define REP82_ERROR_PACKET_TRUNCATED 0xA0
680#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 762#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
681 763
764#define REP88_ERROR_MODULE_FAILURE 0x10
765#define REP88_ERROR_MODULE_TIMEOUT 0x11
766#define REP88_ERROR_MODULE_NOTINIT 0x13
767#define REP88_ERROR_MODULE_NOTAVAIL 0x14
768#define REP88_ERROR_MODULE_DISABLED 0x15
769#define REP88_ERROR_MODULE_IN_DIAGN 0x17
770#define REP88_ERROR_FASTPATH_DISABLD 0x19
771#define REP88_ERROR_MESSAGE_TYPE 0x20
772#define REP88_ERROR_MESSAGE_MALFORMD 0x22
773#define REP88_ERROR_MESSAGE_LENGTH 0x23
774#define REP88_ERROR_RESERVED_FIELD 0x24
775#define REP88_ERROR_KEY_TYPE 0x34
776#define REP88_ERROR_INVALID_KEY 0x82
777#define REP88_ERROR_OPERAND 0x84
778#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
779
682#define CALLER_HEADER 12 780#define CALLER_HEADER 12
683 781
684static inline int 782static inline int
@@ -687,7 +785,7 @@ testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
687 int ccode; 785 int ccode;
688 786
689 asm volatile 787 asm volatile
690#ifdef __s390x__ 788#ifdef CONFIG_64BIT
691 (" llgfr 0,%4 \n" 789 (" llgfr 0,%4 \n"
692 " slgr 1,1 \n" 790 " slgr 1,1 \n"
693 " lgr 2,1 \n" 791 " lgr 2,1 \n"
@@ -757,7 +855,7 @@ resetq(int q_nr, struct ap_status_word *stat_p)
757 int ccode; 855 int ccode;
758 856
759 asm volatile 857 asm volatile
760#ifdef __s390x__ 858#ifdef CONFIG_64BIT
761 (" llgfr 0,%2 \n" 859 (" llgfr 0,%2 \n"
762 " lghi 1,1 \n" 860 " lghi 1,1 \n"
763 " sll 1,24 \n" 861 " sll 1,24 \n"
@@ -823,7 +921,7 @@ sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
823 int ccode; 921 int ccode;
824 922
825 asm volatile 923 asm volatile
826#ifdef __s390x__ 924#ifdef CONFIG_64BIT
827 (" lgr 6,%3 \n" 925 (" lgr 6,%3 \n"
828 " llgfr 7,%2 \n" 926 " llgfr 7,%2 \n"
829 " llgt 0,0(6) \n" 927 " llgt 0,0(6) \n"
@@ -902,7 +1000,7 @@ rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
902 int ccode; 1000 int ccode;
903 1001
904 asm volatile 1002 asm volatile
905#ifdef __s390x__ 1003#ifdef CONFIG_64BIT
906 (" llgfr 0,%2 \n" 1004 (" llgfr 0,%2 \n"
907 " lgr 3,%4 \n" 1005 " lgr 3,%4 \n"
908 " lgr 6,%3 \n" 1006 " lgr 6,%3 \n"
@@ -1029,10 +1127,6 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1029 stat = HD_ONLINE; 1127 stat = HD_ONLINE;
1030 *q_depth = t_depth + 1; 1128 *q_depth = t_depth + 1;
1031 switch (t_dev_type) { 1129 switch (t_dev_type) {
1032 case OTHER_HW:
1033 stat = HD_NOT_THERE;
1034 *dev_type = NILDEV;
1035 break;
1036 case PCICA_HW: 1130 case PCICA_HW:
1037 *dev_type = PCICA; 1131 *dev_type = PCICA;
1038 break; 1132 break;
@@ -1045,6 +1139,9 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1045 case CEX2C_HW: 1139 case CEX2C_HW:
1046 *dev_type = CEX2C; 1140 *dev_type = CEX2C;
1047 break; 1141 break;
1142 case CEX2A_HW:
1143 *dev_type = CEX2A;
1144 break;
1048 default: 1145 default:
1049 *dev_type = NILDEV; 1146 *dev_type = NILDEV;
1050 break; 1147 break;
@@ -2029,6 +2126,177 @@ ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
2029 return 0; 2126 return 0;
2030} 2127}
2031 2128
2129static int
2130ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
2131 union type50_msg *z90cMsg_p)
2132{
2133 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
2134 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
2135 union type50_msg *tmp_type50_msg;
2136
2137 mod_len = icaMex_p->inputdatalength;
2138
2139 msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
2140 CALLER_HEADER;
2141
2142 memset(z90cMsg_p, 0, msg_size);
2143
2144 tmp_type50_msg = (union type50_msg *)
2145 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2146
2147 tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
2148
2149 if (mod_len <= 128) {
2150 tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
2151 tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
2152 mod_tgt = tmp_type50_msg->meb1.modulus;
2153 mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
2154 exp_tgt = tmp_type50_msg->meb1.exponent;
2155 exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
2156 inp_tgt = tmp_type50_msg->meb1.message;
2157 inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
2158 } else {
2159 tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
2160 tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
2161 mod_tgt = tmp_type50_msg->meb2.modulus;
2162 mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
2163 exp_tgt = tmp_type50_msg->meb2.exponent;
2164 exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
2165 inp_tgt = tmp_type50_msg->meb2.message;
2166 inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
2167 }
2168
2169 mod_tgt += (mod_tgt_len - mod_len);
2170 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
2171 return SEN_RELEASED;
2172 if (is_empty(mod_tgt, mod_len))
2173 return SEN_USER_ERROR;
2174 exp_tgt += (exp_tgt_len - mod_len);
2175 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
2176 return SEN_RELEASED;
2177 if (is_empty(exp_tgt, mod_len))
2178 return SEN_USER_ERROR;
2179 inp_tgt += (inp_tgt_len - mod_len);
2180 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
2181 return SEN_RELEASED;
2182 if (is_empty(inp_tgt, mod_len))
2183 return SEN_USER_ERROR;
2184
2185 *z90cMsg_l_p = msg_size - CALLER_HEADER;
2186
2187 return 0;
2188}
2189
2190static int
2191ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
2192 int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
2193{
2194 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
2195 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
2196 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
2197 temp[8];
2198 union type50_msg *tmp_type50_msg;
2199
2200 mod_len = icaMsg_p->inputdatalength;
2201 short_len = mod_len / 2;
2202 long_len = mod_len / 2 + 8;
2203 long_offset = 0;
2204
2205 if (long_len > 128) {
2206 memset(temp, 0x00, sizeof(temp));
2207 if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
2208 return SEN_RELEASED;
2209 if (!is_empty(temp, 8))
2210 return SEN_NOT_AVAIL;
2211 if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
2212 return SEN_RELEASED;
2213 if (!is_empty(temp, 8))
2214 return SEN_NOT_AVAIL;
2215 if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
2216 return SEN_RELEASED;
2217 if (!is_empty(temp, 8))
2218 return SEN_NOT_AVAIL;
2219 long_offset = long_len - 128;
2220 long_len = 128;
2221 }
2222
2223 tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
2224 CALLER_HEADER;
2225
2226 memset(z90cMsg_p, 0, tmp_size);
2227
2228 tmp_type50_msg = (union type50_msg *)
2229 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2230
2231 tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
2232 if (long_len <= 64) {
2233 tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
2234 tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
2235 p_tgt = tmp_type50_msg->crb1.p;
2236 p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
2237 q_tgt = tmp_type50_msg->crb1.q;
2238 q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
2239 dp_tgt = tmp_type50_msg->crb1.dp;
2240 dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
2241 dq_tgt = tmp_type50_msg->crb1.dq;
2242 dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
2243 u_tgt = tmp_type50_msg->crb1.u;
2244 u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
2245 inp_tgt = tmp_type50_msg->crb1.message;
2246 inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
2247 } else {
2248 tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
2249 tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
2250 p_tgt = tmp_type50_msg->crb2.p;
2251 p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
2252 q_tgt = tmp_type50_msg->crb2.q;
2253 q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
2254 dp_tgt = tmp_type50_msg->crb2.dp;
2255 dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
2256 dq_tgt = tmp_type50_msg->crb2.dq;
2257 dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
2258 u_tgt = tmp_type50_msg->crb2.u;
2259 u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
2260 inp_tgt = tmp_type50_msg->crb2.message;
2261 inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
2262 }
2263
2264 p_tgt += (p_tgt_len - long_len);
2265 if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
2266 return SEN_RELEASED;
2267 if (is_empty(p_tgt, long_len))
2268 return SEN_USER_ERROR;
2269 q_tgt += (q_tgt_len - short_len);
2270 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
2271 return SEN_RELEASED;
2272 if (is_empty(q_tgt, short_len))
2273 return SEN_USER_ERROR;
2274 dp_tgt += (dp_tgt_len - long_len);
2275 if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
2276 return SEN_RELEASED;
2277 if (is_empty(dp_tgt, long_len))
2278 return SEN_USER_ERROR;
2279 dq_tgt += (dq_tgt_len - short_len);
2280 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
2281 return SEN_RELEASED;
2282 if (is_empty(dq_tgt, short_len))
2283 return SEN_USER_ERROR;
2284 u_tgt += (u_tgt_len - long_len);
2285 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
2286 return SEN_RELEASED;
2287 if (is_empty(u_tgt, long_len))
2288 return SEN_USER_ERROR;
2289 inp_tgt += (inp_tgt_len - mod_len);
2290 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
2291 return SEN_RELEASED;
2292 if (is_empty(inp_tgt, mod_len))
2293 return SEN_USER_ERROR;
2294
2295 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2296
2297 return 0;
2298}
2299
2032int 2300int
2033convert_request(unsigned char *buffer, int func, unsigned short function, 2301convert_request(unsigned char *buffer, int func, unsigned short function,
2034 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p) 2302 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
@@ -2071,6 +2339,16 @@ convert_request(unsigned char *buffer, int func, unsigned short function,
2071 cdx, msg_l_p, (struct type6_msg *) msg_p, 2339 cdx, msg_l_p, (struct type6_msg *) msg_p,
2072 dev_type); 2340 dev_type);
2073 } 2341 }
2342 if (dev_type == CEX2A) {
2343 if (func == ICARSACRT)
2344 return ICACRT_msg_to_type50CRT_msg(
2345 (struct ica_rsa_modexpo_crt *) buffer,
2346 msg_l_p, (union type50_msg *) msg_p);
2347 else
2348 return ICAMEX_msg_to_type50MEX_msg(
2349 (struct ica_rsa_modexpo *) buffer,
2350 msg_l_p, (union type50_msg *) msg_p);
2351 }
2074 2352
2075 return 0; 2353 return 0;
2076} 2354}
@@ -2081,8 +2359,8 @@ unset_ext_bitlens(void)
2081{ 2359{
2082 if (!ext_bitlens_msg_count) { 2360 if (!ext_bitlens_msg_count) {
2083 PRINTK("Unable to use coprocessors for extended bitlengths. " 2361 PRINTK("Unable to use coprocessors for extended bitlengths. "
2084 "Using PCICAs (if present) for extended bitlengths. " 2362 "Using PCICAs/CEX2As (if present) for extended "
2085 "This is not an error.\n"); 2363 "bitlengths. This is not an error.\n");
2086 ext_bitlens_msg_count++; 2364 ext_bitlens_msg_count++;
2087 } 2365 }
2088 ext_bitlens = 0; 2366 ext_bitlens = 0;
@@ -2094,6 +2372,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2094{ 2372{
2095 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer; 2373 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2096 struct error_hdr *errh_p = (struct error_hdr *) response; 2374 struct error_hdr *errh_p = (struct error_hdr *) response;
2375 struct type80_hdr *t80h_p = (struct type80_hdr *) response;
2097 struct type84_hdr *t84h_p = (struct type84_hdr *) response; 2376 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2098 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response; 2377 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2099 int reply_code, service_rc, service_rs, src_l; 2378 int reply_code, service_rc, service_rs, src_l;
@@ -2108,6 +2387,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2108 src_l = 0; 2387 src_l = 0;
2109 switch (errh_p->type) { 2388 switch (errh_p->type) {
2110 case TYPE82_RSP_CODE: 2389 case TYPE82_RSP_CODE:
2390 case TYPE88_RSP_CODE:
2111 reply_code = errh_p->reply_code; 2391 reply_code = errh_p->reply_code;
2112 src_p = (unsigned char *)errh_p; 2392 src_p = (unsigned char *)errh_p;
2113 PRINTK("Hardware error: Type %02X Message Header: " 2393 PRINTK("Hardware error: Type %02X Message Header: "
@@ -2116,6 +2396,10 @@ convert_response(unsigned char *response, unsigned char *buffer,
2116 src_p[0], src_p[1], src_p[2], src_p[3], 2396 src_p[0], src_p[1], src_p[2], src_p[3],
2117 src_p[4], src_p[5], src_p[6], src_p[7]); 2397 src_p[4], src_p[5], src_p[6], src_p[7]);
2118 break; 2398 break;
2399 case TYPE80_RSP_CODE:
2400 src_l = icaMsg_p->outputdatalength;
2401 src_p = response + (int)t80h_p->len - src_l;
2402 break;
2119 case TYPE84_RSP_CODE: 2403 case TYPE84_RSP_CODE:
2120 src_l = icaMsg_p->outputdatalength; 2404 src_l = icaMsg_p->outputdatalength;
2121 src_p = response + (int)t84h_p->len - src_l; 2405 src_p = response + (int)t84h_p->len - src_l;
@@ -2202,6 +2486,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2202 if (reply_code) 2486 if (reply_code)
2203 switch (reply_code) { 2487 switch (reply_code) {
2204 case REP82_ERROR_OPERAND_INVALID: 2488 case REP82_ERROR_OPERAND_INVALID:
2489 case REP88_ERROR_MESSAGE_MALFORMD:
2205 return REC_OPERAND_INV; 2490 return REC_OPERAND_INV;
2206 case REP82_ERROR_OPERAND_SIZE: 2491 case REP82_ERROR_OPERAND_SIZE:
2207 return REC_OPERAND_SIZE; 2492 return REC_OPERAND_SIZE;
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 790fcbb74b43..135ae04e6e75 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -228,7 +228,7 @@ struct device_x {
228 */ 228 */
229struct device { 229struct device {
230 int dev_type; // PCICA, PCICC, PCIXCC_MCL2, 230 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
231 // PCIXCC_MCL3, CEX2C 231 // PCIXCC_MCL3, CEX2C, CEX2A
232 enum devstat dev_stat; // current device status 232 enum devstat dev_stat; // current device status
233 int dev_self_x; // Index in array 233 int dev_self_x; // Index in array
234 int disabled; // Set when device is in error 234 int disabled; // Set when device is in error
@@ -295,26 +295,30 @@ struct caller {
295/** 295/**
296 * Function prototypes from z90hardware.c 296 * Function prototypes from z90hardware.c
297 */ 297 */
298enum hdstat query_online(int, int, int, int *, int *); 298enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
299enum devstat reset_device(int, int, int); 299 int *dev_type);
300enum devstat send_to_AP(int, int, int, unsigned char *); 300enum devstat reset_device(int deviceNr, int cdx, int resetNr);
301enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *); 301enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
302int convert_request(unsigned char *, int, short, int, int, int *, 302enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
303 unsigned char *); 303 unsigned char *resp, unsigned char *psmid);
304int convert_response(unsigned char *, unsigned char *, int *, unsigned char *); 304int convert_request(unsigned char *buffer, int func, unsigned short function,
305 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
306int convert_response(unsigned char *response, unsigned char *buffer,
307 int *respbufflen_p, unsigned char *resp_buff);
305 308
306/** 309/**
307 * Low level function prototypes 310 * Low level function prototypes
308 */ 311 */
309static int create_z90crypt(int *); 312static int create_z90crypt(int *cdx_p);
310static int refresh_z90crypt(int *); 313static int refresh_z90crypt(int *cdx_p);
311static int find_crypto_devices(struct status *); 314static int find_crypto_devices(struct status *deviceMask);
312static int create_crypto_device(int); 315static int create_crypto_device(int index);
313static int destroy_crypto_device(int); 316static int destroy_crypto_device(int index);
314static void destroy_z90crypt(void); 317static void destroy_z90crypt(void);
315static int refresh_index_array(struct status *, struct device_x *); 318static int refresh_index_array(struct status *status_str,
316static int probe_device_type(struct device *); 319 struct device_x *index_array);
317static int probe_PCIXCC_type(struct device *); 320static int probe_device_type(struct device *devPtr);
321static int probe_PCIXCC_type(struct device *devPtr);
318 322
319/** 323/**
320 * proc fs definitions 324 * proc fs definitions
@@ -425,7 +429,7 @@ static struct miscdevice z90crypt_misc_device = {
425MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman" 429MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
426 "and Jochen Roehrig"); 430 "and Jochen Roehrig");
427MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, " 431MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
428 "Copyright 2001, 2004 IBM Corporation"); 432 "Copyright 2001, 2005 IBM Corporation");
429MODULE_LICENSE("GPL"); 433MODULE_LICENSE("GPL");
430module_param(domain, int, 0); 434module_param(domain, int, 0);
431MODULE_PARM_DESC(domain, "domain index for device"); 435MODULE_PARM_DESC(domain, "domain index for device");
@@ -860,6 +864,12 @@ get_status_CEX2Ccount(void)
860} 864}
861 865
862static inline int 866static inline int
867get_status_CEX2Acount(void)
868{
869 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
870}
871
872static inline int
863get_status_requestq_count(void) 873get_status_requestq_count(void)
864{ 874{
865 return requestq_count; 875 return requestq_count;
@@ -1008,11 +1018,13 @@ static inline int
1008select_device_type(int *dev_type_p, int bytelength) 1018select_device_type(int *dev_type_p, int bytelength)
1009{ 1019{
1010 static int count = 0; 1020 static int count = 0;
1011 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use; 1021 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1022 index_to_use;
1012 struct status *stat; 1023 struct status *stat;
1013 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) && 1024 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1014 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) && 1025 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1015 (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV)) 1026 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1027 (*dev_type_p != ANYDEV))
1016 return -1; 1028 return -1;
1017 if (*dev_type_p != ANYDEV) { 1029 if (*dev_type_p != ANYDEV) {
1018 stat = &z90crypt.hdware_info->type_mask[*dev_type_p]; 1030 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
@@ -1022,7 +1034,13 @@ select_device_type(int *dev_type_p, int bytelength)
1022 return -1; 1034 return -1;
1023 } 1035 }
1024 1036
1025 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */ 1037 /**
1038 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1039 * speed.
1040 *
1041 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1042 * other present.
1043 */
1026 stat = &z90crypt.hdware_info->type_mask[PCICA]; 1044 stat = &z90crypt.hdware_info->type_mask[PCICA];
1027 PCICA_avail = stat->st_count - 1045 PCICA_avail = stat->st_count -
1028 (stat->disabled_count + stat->user_disabled_count); 1046 (stat->disabled_count + stat->user_disabled_count);
@@ -1032,29 +1050,38 @@ select_device_type(int *dev_type_p, int bytelength)
1032 stat = &z90crypt.hdware_info->type_mask[CEX2C]; 1050 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1033 CEX2C_avail = stat->st_count - 1051 CEX2C_avail = stat->st_count -
1034 (stat->disabled_count + stat->user_disabled_count); 1052 (stat->disabled_count + stat->user_disabled_count);
1035 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) { 1053 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1054 CEX2A_avail = stat->st_count -
1055 (stat->disabled_count + stat->user_disabled_count);
1056 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1036 /** 1057 /**
1037 * bitlength is a factor, PCICA is the most capable, even with 1058 * bitlength is a factor, PCICA or CEX2A are the most capable,
1038 * the new MCL for PCIXCC. 1059 * even with the new MCL for PCIXCC.
1039 */ 1060 */
1040 if ((bytelength < PCIXCC_MIN_MOD_SIZE) || 1061 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1041 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { 1062 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1042 if (!PCICA_avail) 1063 if (PCICA_avail) {
1043 return -1;
1044 else {
1045 *dev_type_p = PCICA; 1064 *dev_type_p = PCICA;
1046 return 0; 1065 return 0;
1047 } 1066 }
1067 if (CEX2A_avail) {
1068 *dev_type_p = CEX2A;
1069 return 0;
1070 }
1071 return -1;
1048 } 1072 }
1049 1073
1050 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail + 1074 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1051 CEX2C_avail); 1075 CEX2C_avail + CEX2A_avail);
1052 if (index_to_use < PCICA_avail) 1076 if (index_to_use < PCICA_avail)
1053 *dev_type_p = PCICA; 1077 *dev_type_p = PCICA;
1054 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail)) 1078 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1055 *dev_type_p = PCIXCC_MCL3; 1079 *dev_type_p = PCIXCC_MCL3;
1056 else 1080 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1081 CEX2C_avail))
1057 *dev_type_p = CEX2C; 1082 *dev_type_p = CEX2C;
1083 else
1084 *dev_type_p = CEX2A;
1058 count++; 1085 count++;
1059 return 0; 1086 return 0;
1060 } 1087 }
@@ -1359,7 +1386,7 @@ build_caller(struct work_element *we_p, short function)
1359 1386
1360 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) && 1387 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1361 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1388 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1362 (we_p->devtype != CEX2C)) 1389 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1363 return SEN_NOT_AVAIL; 1390 return SEN_NOT_AVAIL;
1364 1391
1365 memcpy(caller_p->caller_id, we_p->caller_id, 1392 memcpy(caller_p->caller_id, we_p->caller_id,
@@ -1428,7 +1455,8 @@ get_crypto_request_buffer(struct work_element *we_p)
1428 1455
1429 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) && 1456 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1430 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1457 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1431 (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) { 1458 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1459 (we_p->devtype != ANYDEV)) {
1432 PRINTK("invalid device type\n"); 1460 PRINTK("invalid device type\n");
1433 return SEN_USER_ERROR; 1461 return SEN_USER_ERROR;
1434 } 1462 }
@@ -1503,8 +1531,9 @@ get_crypto_request_buffer(struct work_element *we_p)
1503 1531
1504 function = PCI_FUNC_KEY_ENCRYPT; 1532 function = PCI_FUNC_KEY_ENCRYPT;
1505 switch (we_p->devtype) { 1533 switch (we_p->devtype) {
1506 /* PCICA does everything with a simple RSA mod-expo operation */ 1534 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1507 case PCICA: 1535 case PCICA:
1536 case CEX2A:
1508 function = PCI_FUNC_KEY_ENCRYPT; 1537 function = PCI_FUNC_KEY_ENCRYPT;
1509 break; 1538 break;
1510 /** 1539 /**
@@ -1662,7 +1691,8 @@ z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1662 * trigger a fallback to software. 1691 * trigger a fallback to software.
1663 */ 1692 */
1664 case -EINVAL: 1693 case -EINVAL:
1665 if (we_p->devtype != PCICA) 1694 if ((we_p->devtype != PCICA) &&
1695 (we_p->devtype != CEX2A))
1666 rv = -EGETBUFF; 1696 rv = -EGETBUFF;
1667 break; 1697 break;
1668 case -ETIMEOUT: 1698 case -ETIMEOUT:
@@ -1779,6 +1809,12 @@ z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1779 ret = -EFAULT; 1809 ret = -EFAULT;
1780 break; 1810 break;
1781 1811
1812 case Z90STAT_CEX2ACOUNT:
1813 tempstat = get_status_CEX2Acount();
1814 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1815 ret = -EFAULT;
1816 break;
1817
1782 case Z90STAT_REQUESTQ_COUNT: 1818 case Z90STAT_REQUESTQ_COUNT:
1783 tempstat = get_status_requestq_count(); 1819 tempstat = get_status_requestq_count();
1784 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1820 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
@@ -2019,6 +2055,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
2019 get_status_PCIXCCMCL3count()); 2055 get_status_PCIXCCMCL3count());
2020 len += sprintf(resp_buff+len, "CEX2C count: %d\n", 2056 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2021 get_status_CEX2Ccount()); 2057 get_status_CEX2Ccount());
2058 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2059 get_status_CEX2Acount());
2022 len += sprintf(resp_buff+len, "requestq count: %d\n", 2060 len += sprintf(resp_buff+len, "requestq count: %d\n",
2023 get_status_requestq_count()); 2061 get_status_requestq_count());
2024 len += sprintf(resp_buff+len, "pendingq count: %d\n", 2062 len += sprintf(resp_buff+len, "pendingq count: %d\n",
@@ -2026,8 +2064,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
2026 len += sprintf(resp_buff+len, "Total open handles: %d\n\n", 2064 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2027 get_status_totalopen_count()); 2065 get_status_totalopen_count());
2028 len += sprinthx( 2066 len += sprinthx(
2029 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), " 2067 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2030 "4: PCIXCC (MCL3), 5: CEX2C", 2068 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2031 resp_buff+len, 2069 resp_buff+len,
2032 get_status_status_mask(workarea), 2070 get_status_status_mask(workarea),
2033 Z90CRYPT_NUM_APS); 2071 Z90CRYPT_NUM_APS);
@@ -2140,6 +2178,7 @@ z90crypt_status_write(struct file *file, const char __user *buffer,
2140 case '3': // PCIXCC_MCL2 2178 case '3': // PCIXCC_MCL2
2141 case '4': // PCIXCC_MCL3 2179 case '4': // PCIXCC_MCL3
2142 case '5': // CEX2C 2180 case '5': // CEX2C
2181 case '6': // CEX2A
2143 j++; 2182 j++;
2144 break; 2183 break;
2145 case 'd': 2184 case 'd':
@@ -3007,7 +3046,9 @@ create_crypto_device(int index)
3007 z90crypt.hdware_info->device_type_array[index] = 4; 3046 z90crypt.hdware_info->device_type_array[index] = 4;
3008 else if (deviceType == CEX2C) 3047 else if (deviceType == CEX2C)
3009 z90crypt.hdware_info->device_type_array[index] = 5; 3048 z90crypt.hdware_info->device_type_array[index] = 5;
3010 else 3049 else if (deviceType == CEX2A)
3050 z90crypt.hdware_info->device_type_array[index] = 6;
3051 else // No idea how this would happen.
3011 z90crypt.hdware_info->device_type_array[index] = -1; 3052 z90crypt.hdware_info->device_type_array[index] = -1;
3012 } 3053 }
3013 3054
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a7efc394515e..548854754921 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -1,5 +1,5 @@
1menu "S/390 network device drivers" 1menu "S/390 network device drivers"
2 depends on NETDEVICES && ARCH_S390 2 depends on NETDEVICES && S390
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 tristate "Lan Channel Station Interface"
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 6b63d21612ec..e70af7f39946 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1603,7 +1603,7 @@ dumpit(char* buf, int len)
1603 __u32 ct, sw, rm, dup; 1603 __u32 ct, sw, rm, dup;
1604 char *ptr, *rptr; 1604 char *ptr, *rptr;
1605 char tbuf[82], tdup[82]; 1605 char tbuf[82], tdup[82];
1606#if (CONFIG_ARCH_S390X) 1606#if (CONFIG_64BIT)
1607 char addr[22]; 1607 char addr[22];
1608#else 1608#else
1609 char addr[12]; 1609 char addr[12];
@@ -1619,7 +1619,7 @@ dumpit(char* buf, int len)
1619 dup = 0; 1619 dup = 0;
1620 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) { 1620 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) {
1621 if (sw == 0) { 1621 if (sw == 0) {
1622#if (CONFIG_ARCH_S390X) 1622#if (CONFIG_64BIT)
1623 sprintf(addr, "%16.16lX",(unsigned long)rptr); 1623 sprintf(addr, "%16.16lX",(unsigned long)rptr);
1624#else 1624#else
1625 sprintf(addr, "%8.8X",(__u32)rptr); 1625 sprintf(addr, "%8.8X",(__u32)rptr);
@@ -1634,7 +1634,7 @@ dumpit(char* buf, int len)
1634 if (sw == 8) { 1634 if (sw == 8) {
1635 strcat(bhex, " "); 1635 strcat(bhex, " ");
1636 } 1636 }
1637#if (CONFIG_ARCH_S390X) 1637#if (CONFIG_64BIT)
1638 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr); 1638 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
1639#else 1639#else
1640 sprintf(tbuf,"%2.2X", (__u32)*ptr); 1640 sprintf(tbuf,"%2.2X", (__u32)*ptr);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 0075894c71db..77dacb465732 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $ 2 * $Id: cu3088.c,v 1.36 2005/10/25 14:37:17 cohuck Exp $
3 * 3 *
4 * CTC / LCS ccw_device driver 4 * CTC / LCS ccw_device driver
5 * 5 *
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/err.h> 28#include <linux/err.h>
29 29
30#include <asm/s390_rdev.h>
30#include <asm/ccwdev.h> 31#include <asm/ccwdev.h>
31#include <asm/ccwgroup.h> 32#include <asm/ccwgroup.h>
32 33
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index df7647c3c100..ea8177392564 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $ 2 * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $
3 * 3 *
4 * IUCV network driver 4 * IUCV network driver
5 * 5 *
@@ -29,7 +29,7 @@
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 * 31 *
32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $ 32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $
33 * 33 *
34 */ 34 */
35 35
@@ -54,7 +54,7 @@
54#include <asm/s390_ext.h> 54#include <asm/s390_ext.h>
55#include <asm/ebcdic.h> 55#include <asm/ebcdic.h>
56#include <asm/smp.h> 56#include <asm/smp.h>
57#include <asm/ccwdev.h> //for root device stuff 57#include <asm/s390_rdev.h>
58 58
59/* FLAGS: 59/* FLAGS:
60 * All flags are defined in the field IPFLAGS1 of each function 60 * All flags are defined in the field IPFLAGS1 of each function
@@ -355,7 +355,7 @@ do { \
355static void 355static void
356iucv_banner(void) 356iucv_banner(void)
357{ 357{
358 char vbuf[] = "$Revision: 1.45 $"; 358 char vbuf[] = "$Revision: 1.47 $";
359 char *version = vbuf; 359 char *version = vbuf;
360 360
361 if ((version = strchr(version, ':'))) { 361 if ((version = strchr(version, ':'))) {
@@ -477,7 +477,7 @@ grab_param(void)
477 ptr++; 477 ptr++;
478 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) 478 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
479 ptr = iucv_param_pool; 479 ptr = iucv_param_pool;
480 } while (atomic_compare_and_swap(0, 1, &ptr->in_use)); 480 } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
481 hint = ptr - iucv_param_pool; 481 hint = ptr - iucv_param_pool;
482 482
483 memset(&ptr->param, 0, sizeof(ptr->param)); 483 memset(&ptr->param, 0, sizeof(ptr->param));
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index f8f55cc468ba..97f927c01a82 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -65,6 +65,7 @@
65#include <asm/timex.h> 65#include <asm/timex.h>
66#include <asm/semaphore.h> 66#include <asm/semaphore.h>
67#include <asm/uaccess.h> 67#include <asm/uaccess.h>
68#include <asm/s390_rdev.h>
68 69
69#include "qeth.h" 70#include "qeth.h"
70#include "qeth_mpc.h" 71#include "qeth_mpc.h"
@@ -1396,7 +1397,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel,
1396 channel->ccw.cda = (__u32) __pa(iob->data); 1397 channel->ccw.cda = (__u32) __pa(iob->data);
1397 1398
1398 wait_event(card->wait_q, 1399 wait_event(card->wait_q,
1399 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); 1400 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1400 QETH_DBF_TEXT(setup, 6, "noirqpnd"); 1401 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1401 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1402 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1402 rc = ccw_device_start(channel->ccwdev, 1403 rc = ccw_device_start(channel->ccwdev,
@@ -1463,7 +1464,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel,
1463 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); 1464 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1464 1465
1465 wait_event(card->wait_q, 1466 wait_event(card->wait_q,
1466 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); 1467 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1467 QETH_DBF_TEXT(setup, 6, "noirqpnd"); 1468 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1468 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1469 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1469 rc = ccw_device_start(channel->ccwdev, 1470 rc = ccw_device_start(channel->ccwdev,
@@ -1616,7 +1617,7 @@ qeth_issue_next_read(struct qeth_card *card)
1616 } 1617 }
1617 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); 1618 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1618 wait_event(card->wait_q, 1619 wait_event(card->wait_q,
1619 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); 1620 atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
1620 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1621 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1621 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, 1622 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1622 (addr_t) iob, 0, 0); 1623 (addr_t) iob, 0, 0);
@@ -1882,7 +1883,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
1882 spin_unlock_irqrestore(&card->lock, flags); 1883 spin_unlock_irqrestore(&card->lock, flags);
1883 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1884 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1884 wait_event(card->wait_q, 1885 wait_event(card->wait_q,
1885 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); 1886 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1886 qeth_prepare_control_data(card, len, iob); 1887 qeth_prepare_control_data(card, len, iob);
1887 if (IS_IPA(iob->data)) 1888 if (IS_IPA(iob->data))
1888 timer.expires = jiffies + QETH_IPA_TIMEOUT; 1889 timer.expires = jiffies + QETH_IPA_TIMEOUT;
@@ -1924,7 +1925,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len,
1924 QETH_DBF_TEXT(trace, 5, "osndctrd"); 1925 QETH_DBF_TEXT(trace, 5, "osndctrd");
1925 1926
1926 wait_event(card->wait_q, 1927 wait_event(card->wait_q,
1927 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); 1928 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1928 qeth_prepare_control_data(card, len, iob); 1929 qeth_prepare_control_data(card, len, iob);
1929 QETH_DBF_TEXT(trace, 6, "osnoirqp"); 1930 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1930 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1931 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
@@ -4236,9 +4237,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4236 QETH_DBF_TEXT(trace, 6, "dosndpfa"); 4237 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4237 4238
4238 /* spin until we get the queue ... */ 4239 /* spin until we get the queue ... */
4239 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, 4240 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4240 QETH_OUT_Q_LOCKED, 4241 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4241 &queue->state));
4242 /* ... now we've got the queue */ 4242 /* ... now we've got the queue */
4243 index = queue->next_buf_to_fill; 4243 index = queue->next_buf_to_fill;
4244 buffer = &queue->bufs[queue->next_buf_to_fill]; 4244 buffer = &queue->bufs[queue->next_buf_to_fill];
@@ -4292,9 +4292,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4292 QETH_DBF_TEXT(trace, 6, "dosndpkt"); 4292 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4293 4293
4294 /* spin until we get the queue ... */ 4294 /* spin until we get the queue ... */
4295 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, 4295 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4296 QETH_OUT_Q_LOCKED, 4296 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4297 &queue->state));
4298 start_index = queue->next_buf_to_fill; 4297 start_index = queue->next_buf_to_fill;
4299 buffer = &queue->bufs[queue->next_buf_to_fill]; 4298 buffer = &queue->bufs[queue->next_buf_to_fill];
4300 /* 4299 /*
diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c
new file mode 100644
index 000000000000..566cc3d185b6
--- /dev/null
+++ b/drivers/s390/s390_rdev.c
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/s390_rdev.c
3 * s390 root device
4 * $Revision: 1.2 $
5 *
6 * Copyright (C) 2002, 2005 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Cornelia Huck (cohuck@de.ibm.com)
9 * Carsten Otte (cotte@de.ibm.com)
10 */
11
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/device.h>
15#include <asm/s390_rdev.h>
16
17static void
18s390_root_dev_release(struct device *dev)
19{
20 kfree(dev);
21}
22
23struct device *
24s390_root_dev_register(const char *name)
25{
26 struct device *dev;
27 int ret;
28
29 if (!strlen(name))
30 return ERR_PTR(-EINVAL);
31 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
32 if (!dev)
33 return ERR_PTR(-ENOMEM);
34 memset(dev, 0, sizeof(struct device));
35 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
36 dev->release = s390_root_dev_release;
37 ret = device_register(dev);
38 if (ret) {
39 kfree(dev);
40 return ERR_PTR(ret);
41 }
42 return dev;
43}
44
45void
46s390_root_dev_unregister(struct device *dev)
47{
48 if (dev)
49 device_unregister(dev);
50}
51
52EXPORT_SYMBOL(s390_root_dev_register);
53EXPORT_SYMBOL(s390_root_dev_unregister);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 4191fd9d4d11..3bf466603512 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -23,7 +23,7 @@
23 23
24static struct semaphore m_sem; 24static struct semaphore m_sem;
25 25
26extern int css_process_crw(int); 26extern int css_process_crw(int, int);
27extern int chsc_process_crw(void); 27extern int chsc_process_crw(void);
28extern int chp_process_crw(int, int); 28extern int chp_process_crw(int, int);
29extern void css_reiterate_subchannels(void); 29extern void css_reiterate_subchannels(void);
@@ -49,9 +49,10 @@ s390_handle_damage(char *msg)
49static int 49static int
50s390_collect_crw_info(void *param) 50s390_collect_crw_info(void *param)
51{ 51{
52 struct crw crw; 52 struct crw crw[2];
53 int ccode, ret, slow; 53 int ccode, ret, slow;
54 struct semaphore *sem; 54 struct semaphore *sem;
55 unsigned int chain;
55 56
56 sem = (struct semaphore *)param; 57 sem = (struct semaphore *)param;
57 /* Set a nice name. */ 58 /* Set a nice name. */
@@ -59,25 +60,50 @@ s390_collect_crw_info(void *param)
59repeat: 60repeat:
60 down_interruptible(sem); 61 down_interruptible(sem);
61 slow = 0; 62 slow = 0;
63 chain = 0;
62 while (1) { 64 while (1) {
63 ccode = stcrw(&crw); 65 if (unlikely(chain > 1)) {
66 struct crw tmp_crw;
67
68 printk(KERN_WARNING"%s: Code does not support more "
69 "than two chained crws; please report to "
70 "linux390@de.ibm.com!\n", __FUNCTION__);
71 ccode = stcrw(&tmp_crw);
72 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
73 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
74 __FUNCTION__, tmp_crw.slct, tmp_crw.oflw,
75 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
76 tmp_crw.erc, tmp_crw.rsid);
77 printk(KERN_WARNING"%s: This was crw number %x in the "
78 "chain\n", __FUNCTION__, chain);
79 if (ccode != 0)
80 break;
81 chain = tmp_crw.chn ? chain + 1 : 0;
82 continue;
83 }
84 ccode = stcrw(&crw[chain]);
64 if (ccode != 0) 85 if (ccode != 0)
65 break; 86 break;
66 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " 87 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
67 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 88 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
68 crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc, 89 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
69 crw.erc, crw.rsid); 90 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
91 crw[chain].rsid);
70 /* Check for overflows. */ 92 /* Check for overflows. */
71 if (crw.oflw) { 93 if (crw[chain].oflw) {
72 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 94 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
73 css_reiterate_subchannels(); 95 css_reiterate_subchannels();
96 chain = 0;
74 slow = 1; 97 slow = 1;
75 continue; 98 continue;
76 } 99 }
77 switch (crw.rsc) { 100 switch (crw[chain].rsc) {
78 case CRW_RSC_SCH: 101 case CRW_RSC_SCH:
79 pr_debug("source is subchannel %04X\n", crw.rsid); 102 if (crw[0].chn && !chain)
80 ret = css_process_crw (crw.rsid); 103 break;
104 pr_debug("source is subchannel %04X\n", crw[0].rsid);
105 ret = css_process_crw (crw[0].rsid,
106 chain ? crw[1].rsid : 0);
81 if (ret == -EAGAIN) 107 if (ret == -EAGAIN)
82 slow = 1; 108 slow = 1;
83 break; 109 break;
@@ -85,18 +111,18 @@ repeat:
85 pr_debug("source is monitoring facility\n"); 111 pr_debug("source is monitoring facility\n");
86 break; 112 break;
87 case CRW_RSC_CPATH: 113 case CRW_RSC_CPATH:
88 pr_debug("source is channel path %02X\n", crw.rsid); 114 pr_debug("source is channel path %02X\n", crw[0].rsid);
89 switch (crw.erc) { 115 switch (crw[0].erc) {
90 case CRW_ERC_IPARM: /* Path has come. */ 116 case CRW_ERC_IPARM: /* Path has come. */
91 ret = chp_process_crw(crw.rsid, 1); 117 ret = chp_process_crw(crw[0].rsid, 1);
92 break; 118 break;
93 case CRW_ERC_PERRI: /* Path has gone. */ 119 case CRW_ERC_PERRI: /* Path has gone. */
94 case CRW_ERC_PERRN: 120 case CRW_ERC_PERRN:
95 ret = chp_process_crw(crw.rsid, 0); 121 ret = chp_process_crw(crw[0].rsid, 0);
96 break; 122 break;
97 default: 123 default:
98 pr_debug("Don't know how to handle erc=%x\n", 124 pr_debug("Don't know how to handle erc=%x\n",
99 crw.erc); 125 crw[0].erc);
100 ret = 0; 126 ret = 0;
101 } 127 }
102 if (ret == -EAGAIN) 128 if (ret == -EAGAIN)
@@ -115,6 +141,8 @@ repeat:
115 pr_debug("unknown source\n"); 141 pr_debug("unknown source\n");
116 break; 142 break;
117 } 143 }
144 /* chain is always 0 or 1 here. */
145 chain = crw[chain].chn ? chain + 1 : 0;
118 } 146 }
119 if (slow) 147 if (slow)
120 queue_work(slow_path_wq, &slow_path_work); 148 queue_work(slow_path_wq, &slow_path_work);
@@ -218,7 +246,7 @@ s390_revalidate_registers(struct mci *mci)
218 */ 246 */
219 kill_task = 1; 247 kill_task = 1;
220 248
221#ifndef __s390x__ 249#ifndef CONFIG_64BIT
222 asm volatile("ld 0,0(%0)\n" 250 asm volatile("ld 0,0(%0)\n"
223 "ld 2,8(%0)\n" 251 "ld 2,8(%0)\n"
224 "ld 4,16(%0)\n" 252 "ld 4,16(%0)\n"
@@ -227,7 +255,7 @@ s390_revalidate_registers(struct mci *mci)
227#endif 255#endif
228 256
229 if (MACHINE_HAS_IEEE) { 257 if (MACHINE_HAS_IEEE) {
230#ifdef __s390x__ 258#ifdef CONFIG_64BIT
231 fpt_save_area = &S390_lowcore.floating_pt_save_area; 259 fpt_save_area = &S390_lowcore.floating_pt_save_area;
232 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 260 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
233#else 261#else
@@ -286,7 +314,7 @@ s390_revalidate_registers(struct mci *mci)
286 */ 314 */
287 s390_handle_damage("invalid control registers."); 315 s390_handle_damage("invalid control registers.");
288 else 316 else
289#ifdef __s390x__ 317#ifdef CONFIG_64BIT
290 asm volatile("lctlg 0,15,0(%0)" 318 asm volatile("lctlg 0,15,0(%0)"
291 : : "a" (&S390_lowcore.cregs_save_area)); 319 : : "a" (&S390_lowcore.cregs_save_area));
292#else 320#else
@@ -299,7 +327,7 @@ s390_revalidate_registers(struct mci *mci)
299 * can't write something sensible into that register. 327 * can't write something sensible into that register.
300 */ 328 */
301 329
302#ifdef __s390x__ 330#ifdef CONFIG_64BIT
303 /* 331 /*
304 * See if we can revalidate the TOD programmable register with its 332 * See if we can revalidate the TOD programmable register with its
305 * old contents (should be zero) otherwise set it to zero. 333 * old contents (should be zero) otherwise set it to zero.
@@ -356,7 +384,7 @@ s390_do_machine_check(struct pt_regs *regs)
356 if (mci->b) { 384 if (mci->b) {
357 /* Processing backup -> verify if we can survive this */ 385 /* Processing backup -> verify if we can survive this */
358 u64 z_mcic, o_mcic, t_mcic; 386 u64 z_mcic, o_mcic, t_mcic;
359#ifdef __s390x__ 387#ifdef CONFIG_64BIT
360 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 388 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
361 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 389 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
362 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 390 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 87c2db1bd4f5..66da840c9316 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -106,7 +106,7 @@ static inline int stsi (void *sysinfo,
106{ 106{
107 int cc, retv; 107 int cc, retv;
108 108
109#ifndef CONFIG_ARCH_S390X 109#ifndef CONFIG_64BIT
110 __asm__ __volatile__ ( "lr\t0,%2\n" 110 __asm__ __volatile__ ( "lr\t0,%2\n"
111 "\tlr\t1,%3\n" 111 "\tlr\t1,%3\n"
112 "\tstsi\t0(%4)\n" 112 "\tstsi\t0(%4)\n"
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4c42065dea88..3c606cf8c8ca 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -914,7 +914,7 @@ config SCSI_INIA100
914 914
915config SCSI_PPA 915config SCSI_PPA
916 tristate "IOMEGA parallel port (ppa - older drives)" 916 tristate "IOMEGA parallel port (ppa - older drives)"
917 depends on SCSI && PARPORT 917 depends on SCSI && PARPORT_PC
918 ---help--- 918 ---help---
919 This driver supports older versions of IOMEGA's parallel port ZIP 919 This driver supports older versions of IOMEGA's parallel port ZIP
920 drive (a 100 MB removable media device). 920 drive (a 100 MB removable media device).
@@ -941,7 +941,7 @@ config SCSI_PPA
941 941
942config SCSI_IMM 942config SCSI_IMM
943 tristate "IOMEGA parallel port (imm - newer drives)" 943 tristate "IOMEGA parallel port (imm - newer drives)"
944 depends on SCSI && PARPORT 944 depends on SCSI && PARPORT_PC
945 ---help--- 945 ---help---
946 This driver supports newer versions of IOMEGA's parallel port ZIP 946 This driver supports newer versions of IOMEGA's parallel port ZIP
947 drive (a 100 MB removable media device). 947 drive (a 100 MB removable media device).
@@ -968,7 +968,7 @@ config SCSI_IMM
968 968
969config SCSI_IZIP_EPP16 969config SCSI_IZIP_EPP16
970 bool "ppa/imm option - Use slow (but safe) EPP-16" 970 bool "ppa/imm option - Use slow (but safe) EPP-16"
971 depends on PARPORT && (SCSI_PPA || SCSI_IMM) 971 depends on SCSI_PPA || SCSI_IMM
972 ---help--- 972 ---help---
973 EPP (Enhanced Parallel Port) is a standard for parallel ports which 973 EPP (Enhanced Parallel Port) is a standard for parallel ports which
974 allows them to act as expansion buses that can handle up to 64 974 allows them to act as expansion buses that can handle up to 64
@@ -983,7 +983,7 @@ config SCSI_IZIP_EPP16
983 983
984config SCSI_IZIP_SLOW_CTR 984config SCSI_IZIP_SLOW_CTR
985 bool "ppa/imm option - Assume slow parport control register" 985 bool "ppa/imm option - Assume slow parport control register"
986 depends on PARPORT && (SCSI_PPA || SCSI_IMM) 986 depends on SCSI_PPA || SCSI_IMM
987 help 987 help
988 Some parallel ports are known to have excessive delays between 988 Some parallel ports are known to have excessive delays between
989 changing the parallel port control register and good data being 989 changing the parallel port control register and good data being
@@ -1815,7 +1815,7 @@ config SCSI_SUNESP
1815 1815
1816config ZFCP 1816config ZFCP
1817 tristate "FCP host bus adapter driver for IBM eServer zSeries" 1817 tristate "FCP host bus adapter driver for IBM eServer zSeries"
1818 depends on ARCH_S390 && QDIO && SCSI 1818 depends on S390 && QDIO && SCSI
1819 select SCSI_FC_ATTRS 1819 select SCSI_FC_ATTRS
1820 help 1820 help
1821 If you want to access SCSI devices attached to your IBM eServer 1821 If you want to access SCSI devices attached to your IBM eServer
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 4b647eefc9a9..557788ec4eec 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -166,6 +166,8 @@ static struct pci_driver piix_pci_driver = {
166 .id_table = piix_pci_tbl, 166 .id_table = piix_pci_tbl,
167 .probe = piix_init_one, 167 .probe = piix_init_one,
168 .remove = ata_pci_remove_one, 168 .remove = ata_pci_remove_one,
169 .suspend = ata_pci_device_suspend,
170 .resume = ata_pci_device_resume,
169}; 171};
170 172
171static struct scsi_host_template piix_sht = { 173static struct scsi_host_template piix_sht = {
@@ -185,6 +187,8 @@ static struct scsi_host_template piix_sht = {
185 .dma_boundary = ATA_DMA_BOUNDARY, 187 .dma_boundary = ATA_DMA_BOUNDARY,
186 .slave_configure = ata_scsi_slave_config, 188 .slave_configure = ata_scsi_slave_config,
187 .bios_param = ata_std_bios_param, 189 .bios_param = ata_std_bios_param,
190 .resume = ata_scsi_device_resume,
191 .suspend = ata_scsi_device_suspend,
188}; 192};
189 193
190static const struct ata_port_operations piix_pata_ops = { 194static const struct ata_port_operations piix_pata_ops = {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index bdfb0a88cd6f..f55b9b3f7b37 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -4173,6 +4173,96 @@ err_out:
4173 * Inherited from caller. 4173 * Inherited from caller.
4174 */ 4174 */
4175 4175
4176/*
4177 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4178 * without filling any other registers
4179 */
4180static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4181 u8 cmd)
4182{
4183 struct ata_taskfile tf;
4184 int err;
4185
4186 ata_tf_init(ap, &tf, dev->devno);
4187
4188 tf.command = cmd;
4189 tf.flags |= ATA_TFLAG_DEVICE;
4190 tf.protocol = ATA_PROT_NODATA;
4191
4192 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4193 if (err)
4194 printk(KERN_ERR "%s: ata command failed: %d\n",
4195 __FUNCTION__, err);
4196
4197 return err;
4198}
4199
4200static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4201{
4202 u8 cmd;
4203
4204 if (!ata_try_flush_cache(dev))
4205 return 0;
4206
4207 if (ata_id_has_flush_ext(dev->id))
4208 cmd = ATA_CMD_FLUSH_EXT;
4209 else
4210 cmd = ATA_CMD_FLUSH;
4211
4212 return ata_do_simple_cmd(ap, dev, cmd);
4213}
4214
4215static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4216{
4217 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4218}
4219
4220static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4221{
4222 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4223}
4224
4225/**
4226 * ata_device_resume - wakeup a previously suspended devices
4227 *
4228 * Kick the drive back into action, by sending it an idle immediate
4229 * command and making sure its transfer mode matches between drive
4230 * and host.
4231 *
4232 */
4233int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4234{
4235 if (ap->flags & ATA_FLAG_SUSPENDED) {
4236 ap->flags &= ~ATA_FLAG_SUSPENDED;
4237 ata_set_mode(ap);
4238 }
4239 if (!ata_dev_present(dev))
4240 return 0;
4241 if (dev->class == ATA_DEV_ATA)
4242 ata_start_drive(ap, dev);
4243
4244 return 0;
4245}
4246
4247/**
4248 * ata_device_suspend - prepare a device for suspend
4249 *
4250 * Flush the cache on the drive, if appropriate, then issue a
4251 * standbynow command.
4252 *
4253 */
4254int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4255{
4256 if (!ata_dev_present(dev))
4257 return 0;
4258 if (dev->class == ATA_DEV_ATA)
4259 ata_flush_cache(ap, dev);
4260
4261 ata_standby_drive(ap, dev);
4262 ap->flags |= ATA_FLAG_SUSPENDED;
4263 return 0;
4264}
4265
4176int ata_port_start (struct ata_port *ap) 4266int ata_port_start (struct ata_port *ap)
4177{ 4267{
4178 struct device *dev = ap->host_set->dev; 4268 struct device *dev = ap->host_set->dev;
@@ -4921,6 +5011,23 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4921 5011
4922 return (tmp == bits->val) ? 1 : 0; 5012 return (tmp == bits->val) ? 1 : 0;
4923} 5013}
5014
5015int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5016{
5017 pci_save_state(pdev);
5018 pci_disable_device(pdev);
5019 pci_set_power_state(pdev, PCI_D3hot);
5020 return 0;
5021}
5022
5023int ata_pci_device_resume(struct pci_dev *pdev)
5024{
5025 pci_set_power_state(pdev, PCI_D0);
5026 pci_restore_state(pdev);
5027 pci_enable_device(pdev);
5028 pci_set_master(pdev);
5029 return 0;
5030}
4924#endif /* CONFIG_PCI */ 5031#endif /* CONFIG_PCI */
4925 5032
4926 5033
@@ -5024,4 +5131,11 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5024EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 5131EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5025EXPORT_SYMBOL_GPL(ata_pci_init_one); 5132EXPORT_SYMBOL_GPL(ata_pci_init_one);
5026EXPORT_SYMBOL_GPL(ata_pci_remove_one); 5133EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5134EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5135EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5027#endif /* CONFIG_PCI */ 5136#endif /* CONFIG_PCI */
5137
5138EXPORT_SYMBOL_GPL(ata_device_suspend);
5139EXPORT_SYMBOL_GPL(ata_device_resume);
5140EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5141EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 2c644cbb6e9c..cfbceb504718 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -396,6 +396,22 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
396 } 396 }
397} 397}
398 398
399int ata_scsi_device_resume(struct scsi_device *sdev)
400{
401 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
402 struct ata_device *dev = &ap->device[sdev->id];
403
404 return ata_device_resume(ap, dev);
405}
406
407int ata_scsi_device_suspend(struct scsi_device *sdev)
408{
409 struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
410 struct ata_device *dev = &ap->device[sdev->id];
411
412 return ata_device_suspend(ap, dev);
413}
414
399/** 415/**
400 * ata_to_sense_error - convert ATA error to SCSI error 416 * ata_to_sense_error - convert ATA error to SCSI error
401 * @id: ATA device number 417 * @id: ATA device number
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 15842b1f0f4a..ea7f3a433572 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -263,9 +263,40 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
263 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 263 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
264} 264}
265 265
266static int scsi_bus_suspend(struct device * dev, pm_message_t state)
267{
268 struct scsi_device *sdev = to_scsi_device(dev);
269 struct scsi_host_template *sht = sdev->host->hostt;
270 int err;
271
272 err = scsi_device_quiesce(sdev);
273 if (err)
274 return err;
275
276 if (sht->suspend)
277 err = sht->suspend(sdev);
278
279 return err;
280}
281
282static int scsi_bus_resume(struct device * dev)
283{
284 struct scsi_device *sdev = to_scsi_device(dev);
285 struct scsi_host_template *sht = sdev->host->hostt;
286 int err = 0;
287
288 if (sht->resume)
289 err = sht->resume(sdev);
290
291 scsi_device_resume(sdev);
292 return err;
293}
294
266struct bus_type scsi_bus_type = { 295struct bus_type scsi_bus_type = {
267 .name = "scsi", 296 .name = "scsi",
268 .match = scsi_bus_match, 297 .match = scsi_bus_match,
298 .suspend = scsi_bus_suspend,
299 .resume = scsi_bus_resume,
269}; 300};
270 301
271int scsi_sysfs_register(void) 302int scsi_sysfs_register(void)
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index b8727d9bf690..1288d6203e94 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -37,11 +37,11 @@
37 * by the bootloader or in the platform init code. 37 * by the bootloader or in the platform init code.
38 * 38 *
39 * The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2, 39 * The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2,
40 * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so 40 * and so on). So the PSC1 is mapped to /dev/ttyPSC0, PSC2 to /dev/ttyPSC1 and
41 * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for 41 * so on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly
42 * the console code : without this 1:1 mapping, at early boot time, when we are 42 * fpr the console code : without this 1:1 mapping, at early boot time, when we
43 * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be 43 * are parsing the kernel args console=ttyPSC?, we wouldn't know wich PSC it
44 * mapped to. 44 * will be mapped to.
45 */ 45 */
46 46
47#include <linux/config.h> 47#include <linux/config.h>
@@ -65,6 +65,10 @@
65#include <linux/serial_core.h> 65#include <linux/serial_core.h>
66 66
67 67
68/* We've been assigned a range on the "Low-density serial ports" major */
69#define SERIAL_PSC_MAJOR 204
70#define SERIAL_PSC_MINOR 148
71
68 72
69#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ 73#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */
70 74
@@ -668,15 +672,15 @@ mpc52xx_console_setup(struct console *co, char *options)
668} 672}
669 673
670 674
671extern struct uart_driver mpc52xx_uart_driver; 675static struct uart_driver mpc52xx_uart_driver;
672 676
673static struct console mpc52xx_console = { 677static struct console mpc52xx_console = {
674 .name = "ttyS", 678 .name = "ttyPSC",
675 .write = mpc52xx_console_write, 679 .write = mpc52xx_console_write,
676 .device = uart_console_device, 680 .device = uart_console_device,
677 .setup = mpc52xx_console_setup, 681 .setup = mpc52xx_console_setup,
678 .flags = CON_PRINTBUFFER, 682 .flags = CON_PRINTBUFFER,
679 .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */ 683 .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0 ) */
680 .data = &mpc52xx_uart_driver, 684 .data = &mpc52xx_uart_driver,
681}; 685};
682 686
@@ -703,10 +707,10 @@ console_initcall(mpc52xx_console_init);
703static struct uart_driver mpc52xx_uart_driver = { 707static struct uart_driver mpc52xx_uart_driver = {
704 .owner = THIS_MODULE, 708 .owner = THIS_MODULE,
705 .driver_name = "mpc52xx_psc_uart", 709 .driver_name = "mpc52xx_psc_uart",
706 .dev_name = "ttyS", 710 .dev_name = "ttyPSC",
707 .devfs_name = "ttyS", 711 .devfs_name = "ttyPSC",
708 .major = TTY_MAJOR, 712 .major = SERIAL_PSC_MAJOR,
709 .minor = 64, 713 .minor = SERIAL_PSC_MINOR,
710 .nr = MPC52xx_PSC_MAXNUM, 714 .nr = MPC52xx_PSC_MAXNUM,
711 .cons = MPC52xx_PSC_CONSOLE, 715 .cons = MPC52xx_PSC_CONSOLE,
712}; 716};
diff --git a/fs/exec.c b/fs/exec.c
index 22533cce0611..e75a9548da8e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -324,7 +324,7 @@ void install_arg_page(struct vm_area_struct *vma,
324 lru_cache_add_active(page); 324 lru_cache_add_active(page);
325 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( 325 set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
326 page, vma->vm_page_prot)))); 326 page, vma->vm_page_prot))));
327 page_add_anon_rmap(page, vma, address); 327 page_add_new_anon_rmap(page, vma, address);
328 pte_unmap_unlock(pte, ptl); 328 pte_unmap_unlock(pte, ptl);
329 329
330 /* no need for flush_tlb */ 330 /* no need for flush_tlb */
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8f873e621f41..e08ab4702d97 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -148,6 +148,26 @@ void fuse_release_background(struct fuse_req *req)
148 spin_unlock(&fuse_lock); 148 spin_unlock(&fuse_lock);
149} 149}
150 150
151static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
152{
153 int i;
154 struct fuse_init_out *arg = &req->misc.init_out;
155
156 if (arg->major != FUSE_KERNEL_VERSION)
157 fc->conn_error = 1;
158 else {
159 fc->minor = arg->minor;
160 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
161 }
162
163 /* After INIT reply is received other requests can go
164 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
165 up()s on outstanding_sem. The last up() is done in
166 fuse_putback_request() */
167 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
168 up(&fc->outstanding_sem);
169}
170
151/* 171/*
152 * This function is called when a request is finished. Either a reply 172 * This function is called when a request is finished. Either a reply
153 * has arrived or it was interrupted (and not yet sent) or some error 173 * has arrived or it was interrupted (and not yet sent) or some error
@@ -172,19 +192,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
172 up_read(&fc->sbput_sem); 192 up_read(&fc->sbput_sem);
173 } 193 }
174 wake_up(&req->waitq); 194 wake_up(&req->waitq);
175 if (req->in.h.opcode == FUSE_INIT) { 195 if (req->in.h.opcode == FUSE_INIT)
176 int i; 196 process_init_reply(fc, req);
177 197 else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
178 if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION)
179 fc->conn_error = 1;
180
181 /* After INIT reply is received other requests can go
182 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
183 up()s on outstanding_sem. The last up() is done in
184 fuse_putback_request() */
185 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
186 up(&fc->outstanding_sem);
187 } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
188 /* Special case for failed iget in CREATE */ 198 /* Special case for failed iget in CREATE */
189 u64 nodeid = req->in.h.nodeid; 199 u64 nodeid = req->in.h.nodeid;
190 __fuse_get_request(req); 200 __fuse_get_request(req);
@@ -357,7 +367,7 @@ void fuse_send_init(struct fuse_conn *fc)
357 /* This is called from fuse_read_super() so there's guaranteed 367 /* This is called from fuse_read_super() so there's guaranteed
358 to be a request available */ 368 to be a request available */
359 struct fuse_req *req = do_get_request(fc); 369 struct fuse_req *req = do_get_request(fc);
360 struct fuse_init_in_out *arg = &req->misc.init_in_out; 370 struct fuse_init_in *arg = &req->misc.init_in;
361 arg->major = FUSE_KERNEL_VERSION; 371 arg->major = FUSE_KERNEL_VERSION;
362 arg->minor = FUSE_KERNEL_MINOR_VERSION; 372 arg->minor = FUSE_KERNEL_MINOR_VERSION;
363 req->in.h.opcode = FUSE_INIT; 373 req->in.h.opcode = FUSE_INIT;
@@ -365,8 +375,12 @@ void fuse_send_init(struct fuse_conn *fc)
365 req->in.args[0].size = sizeof(*arg); 375 req->in.args[0].size = sizeof(*arg);
366 req->in.args[0].value = arg; 376 req->in.args[0].value = arg;
367 req->out.numargs = 1; 377 req->out.numargs = 1;
368 req->out.args[0].size = sizeof(*arg); 378 /* Variable length arguement used for backward compatibility
369 req->out.args[0].value = arg; 379 with interface version < 7.5. Rest of init_out is zeroed
380 by do_get_request(), so a short reply is not a problem */
381 req->out.argvar = 1;
382 req->out.args[0].size = sizeof(struct fuse_init_out);
383 req->out.args[0].value = &req->misc.init_out;
370 request_send_background(fc, req); 384 request_send_background(fc, req);
371} 385}
372 386
@@ -615,6 +629,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
615 struct fuse_copy_state cs; 629 struct fuse_copy_state cs;
616 unsigned reqsize; 630 unsigned reqsize;
617 631
632 restart:
618 spin_lock(&fuse_lock); 633 spin_lock(&fuse_lock);
619 fc = file->private_data; 634 fc = file->private_data;
620 err = -EPERM; 635 err = -EPERM;
@@ -630,20 +645,25 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
630 645
631 req = list_entry(fc->pending.next, struct fuse_req, list); 646 req = list_entry(fc->pending.next, struct fuse_req, list);
632 list_del_init(&req->list); 647 list_del_init(&req->list);
633 spin_unlock(&fuse_lock);
634 648
635 in = &req->in; 649 in = &req->in;
636 reqsize = req->in.h.len; 650 reqsize = in->h.len;
637 fuse_copy_init(&cs, 1, req, iov, nr_segs); 651 /* If request is too large, reply with an error and restart the read */
638 err = -EINVAL; 652 if (iov_length(iov, nr_segs) < reqsize) {
639 if (iov_length(iov, nr_segs) >= reqsize) { 653 req->out.h.error = -EIO;
640 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 654 /* SETXATTR is special, since it may contain too large data */
641 if (!err) 655 if (in->h.opcode == FUSE_SETXATTR)
642 err = fuse_copy_args(&cs, in->numargs, in->argpages, 656 req->out.h.error = -E2BIG;
643 (struct fuse_arg *) in->args, 0); 657 request_end(fc, req);
658 goto restart;
644 } 659 }
660 spin_unlock(&fuse_lock);
661 fuse_copy_init(&cs, 1, req, iov, nr_segs);
662 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
663 if (!err)
664 err = fuse_copy_args(&cs, in->numargs, in->argpages,
665 (struct fuse_arg *) in->args, 0);
645 fuse_copy_finish(&cs); 666 fuse_copy_finish(&cs);
646
647 spin_lock(&fuse_lock); 667 spin_lock(&fuse_lock);
648 req->locked = 0; 668 req->locked = 0;
649 if (!err && req->interrupted) 669 if (!err && req->interrupted)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 51f5da652771..417bcee466f6 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -13,8 +13,16 @@
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/namei.h> 15#include <linux/namei.h>
16#include <linux/mount.h>
17 16
17/*
18 * FUSE caches dentries and attributes with separate timeout. The
19 * time in jiffies until the dentry/attributes are valid is stored in
20 * dentry->d_time and fuse_inode->i_time respectively.
21 */
22
23/*
24 * Calculate the time in jiffies until a dentry/attributes are valid
25 */
18static inline unsigned long time_to_jiffies(unsigned long sec, 26static inline unsigned long time_to_jiffies(unsigned long sec,
19 unsigned long nsec) 27 unsigned long nsec)
20{ 28{
@@ -22,6 +30,50 @@ static inline unsigned long time_to_jiffies(unsigned long sec,
22 return jiffies + timespec_to_jiffies(&ts); 30 return jiffies + timespec_to_jiffies(&ts);
23} 31}
24 32
33/*
34 * Set dentry and possibly attribute timeouts from the lookup/mk*
35 * replies
36 */
37static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o)
38{
39 entry->d_time = time_to_jiffies(o->entry_valid, o->entry_valid_nsec);
40 if (entry->d_inode)
41 get_fuse_inode(entry->d_inode)->i_time =
42 time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
43}
44
45/*
46 * Mark the attributes as stale, so that at the next call to
47 * ->getattr() they will be fetched from userspace
48 */
49void fuse_invalidate_attr(struct inode *inode)
50{
51 get_fuse_inode(inode)->i_time = jiffies - 1;
52}
53
54/*
55 * Just mark the entry as stale, so that a next attempt to look it up
56 * will result in a new lookup call to userspace
57 *
58 * This is called when a dentry is about to become negative and the
59 * timeout is unknown (unlink, rmdir, rename and in some cases
60 * lookup)
61 */
62static void fuse_invalidate_entry_cache(struct dentry *entry)
63{
64 entry->d_time = jiffies - 1;
65}
66
67/*
68 * Same as fuse_invalidate_entry_cache(), but also try to remove the
69 * dentry from the hash
70 */
71static void fuse_invalidate_entry(struct dentry *entry)
72{
73 d_invalidate(entry);
74 fuse_invalidate_entry_cache(entry);
75}
76
25static void fuse_lookup_init(struct fuse_req *req, struct inode *dir, 77static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
26 struct dentry *entry, 78 struct dentry *entry,
27 struct fuse_entry_out *outarg) 79 struct fuse_entry_out *outarg)
@@ -37,17 +89,34 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
37 req->out.args[0].value = outarg; 89 req->out.args[0].value = outarg;
38} 90}
39 91
92/*
93 * Check whether the dentry is still valid
94 *
95 * If the entry validity timeout has expired and the dentry is
96 * positive, try to redo the lookup. If the lookup results in a
97 * different inode, then let the VFS invalidate the dentry and redo
98 * the lookup once more. If the lookup results in the same inode,
99 * then refresh the attributes, timeouts and mark the dentry valid.
100 */
40static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) 101static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
41{ 102{
42 if (!entry->d_inode || is_bad_inode(entry->d_inode)) 103 struct inode *inode = entry->d_inode;
104
105 if (inode && is_bad_inode(inode))
43 return 0; 106 return 0;
44 else if (time_after(jiffies, entry->d_time)) { 107 else if (time_after(jiffies, entry->d_time)) {
45 int err; 108 int err;
46 struct fuse_entry_out outarg; 109 struct fuse_entry_out outarg;
47 struct inode *inode = entry->d_inode; 110 struct fuse_conn *fc;
48 struct fuse_inode *fi = get_fuse_inode(inode); 111 struct fuse_req *req;
49 struct fuse_conn *fc = get_fuse_conn(inode); 112
50 struct fuse_req *req = fuse_get_request(fc); 113 /* Doesn't hurt to "reset" the validity timeout */
114 fuse_invalidate_entry_cache(entry);
115 if (!inode)
116 return 0;
117
118 fc = get_fuse_conn(inode);
119 req = fuse_get_request(fc);
51 if (!req) 120 if (!req)
52 return 0; 121 return 0;
53 122
@@ -55,6 +124,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
55 request_send(fc, req); 124 request_send(fc, req);
56 err = req->out.h.error; 125 err = req->out.h.error;
57 if (!err) { 126 if (!err) {
127 struct fuse_inode *fi = get_fuse_inode(inode);
58 if (outarg.nodeid != get_node_id(inode)) { 128 if (outarg.nodeid != get_node_id(inode)) {
59 fuse_send_forget(fc, req, outarg.nodeid, 1); 129 fuse_send_forget(fc, req, outarg.nodeid, 1);
60 return 0; 130 return 0;
@@ -66,18 +136,18 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
66 return 0; 136 return 0;
67 137
68 fuse_change_attributes(inode, &outarg.attr); 138 fuse_change_attributes(inode, &outarg.attr);
69 entry->d_time = time_to_jiffies(outarg.entry_valid, 139 fuse_change_timeout(entry, &outarg);
70 outarg.entry_valid_nsec);
71 fi->i_time = time_to_jiffies(outarg.attr_valid,
72 outarg.attr_valid_nsec);
73 } 140 }
74 return 1; 141 return 1;
75} 142}
76 143
144/*
145 * Check if there's already a hashed alias of this directory inode.
146 * If yes, then lookup and mkdir must not create a new alias.
147 */
77static int dir_alias(struct inode *inode) 148static int dir_alias(struct inode *inode)
78{ 149{
79 if (S_ISDIR(inode->i_mode)) { 150 if (S_ISDIR(inode->i_mode)) {
80 /* Don't allow creating an alias to a directory */
81 struct dentry *alias = d_find_alias(inode); 151 struct dentry *alias = d_find_alias(inode);
82 if (alias) { 152 if (alias) {
83 dput(alias); 153 dput(alias);
@@ -96,8 +166,14 @@ static struct dentry_operations fuse_dentry_operations = {
96 .d_revalidate = fuse_dentry_revalidate, 166 .d_revalidate = fuse_dentry_revalidate,
97}; 167};
98 168
99static int fuse_lookup_iget(struct inode *dir, struct dentry *entry, 169static inline int valid_mode(int m)
100 struct inode **inodep) 170{
171 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
172 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
173}
174
175static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
176 struct nameidata *nd)
101{ 177{
102 int err; 178 int err;
103 struct fuse_entry_out outarg; 179 struct fuse_entry_out outarg;
@@ -106,53 +182,49 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry,
106 struct fuse_req *req; 182 struct fuse_req *req;
107 183
108 if (entry->d_name.len > FUSE_NAME_MAX) 184 if (entry->d_name.len > FUSE_NAME_MAX)
109 return -ENAMETOOLONG; 185 return ERR_PTR(-ENAMETOOLONG);
110 186
111 req = fuse_get_request(fc); 187 req = fuse_get_request(fc);
112 if (!req) 188 if (!req)
113 return -EINTR; 189 return ERR_PTR(-EINTR);
114 190
115 fuse_lookup_init(req, dir, entry, &outarg); 191 fuse_lookup_init(req, dir, entry, &outarg);
116 request_send(fc, req); 192 request_send(fc, req);
117 err = req->out.h.error; 193 err = req->out.h.error;
118 if (!err && invalid_nodeid(outarg.nodeid)) 194 if (!err && ((outarg.nodeid && invalid_nodeid(outarg.nodeid)) ||
195 !valid_mode(outarg.attr.mode)))
119 err = -EIO; 196 err = -EIO;
120 if (!err) { 197 if (!err && outarg.nodeid) {
121 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 198 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
122 &outarg.attr); 199 &outarg.attr);
123 if (!inode) { 200 if (!inode) {
124 fuse_send_forget(fc, req, outarg.nodeid, 1); 201 fuse_send_forget(fc, req, outarg.nodeid, 1);
125 return -ENOMEM; 202 return ERR_PTR(-ENOMEM);
126 } 203 }
127 } 204 }
128 fuse_put_request(fc, req); 205 fuse_put_request(fc, req);
129 if (err && err != -ENOENT) 206 if (err && err != -ENOENT)
130 return err; 207 return ERR_PTR(err);
131 208
132 if (inode) { 209 if (inode && dir_alias(inode)) {
133 struct fuse_inode *fi = get_fuse_inode(inode); 210 iput(inode);
134 entry->d_time = time_to_jiffies(outarg.entry_valid, 211 return ERR_PTR(-EIO);
135 outarg.entry_valid_nsec);
136 fi->i_time = time_to_jiffies(outarg.attr_valid,
137 outarg.attr_valid_nsec);
138 } 212 }
139 213 d_add(entry, inode);
140 entry->d_op = &fuse_dentry_operations; 214 entry->d_op = &fuse_dentry_operations;
141 *inodep = inode; 215 if (!err)
142 return 0; 216 fuse_change_timeout(entry, &outarg);
143} 217 else
144 218 fuse_invalidate_entry_cache(entry);
145void fuse_invalidate_attr(struct inode *inode) 219 return NULL;
146{
147 get_fuse_inode(inode)->i_time = jiffies - 1;
148}
149
150static void fuse_invalidate_entry(struct dentry *entry)
151{
152 d_invalidate(entry);
153 entry->d_time = jiffies - 1;
154} 220}
155 221
222/*
223 * Atomic create+open operation
224 *
225 * If the filesystem doesn't support this, then fall back to separate
226 * 'mknod' + 'open' requests.
227 */
156static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, 228static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
157 struct nameidata *nd) 229 struct nameidata *nd)
158{ 230{
@@ -163,7 +235,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
163 struct fuse_open_in inarg; 235 struct fuse_open_in inarg;
164 struct fuse_open_out outopen; 236 struct fuse_open_out outopen;
165 struct fuse_entry_out outentry; 237 struct fuse_entry_out outentry;
166 struct fuse_inode *fi;
167 struct fuse_file *ff; 238 struct fuse_file *ff;
168 struct file *file; 239 struct file *file;
169 int flags = nd->intent.open.flags - 1; 240 int flags = nd->intent.open.flags - 1;
@@ -172,10 +243,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
172 if (fc->no_create) 243 if (fc->no_create)
173 goto out; 244 goto out;
174 245
175 err = -ENAMETOOLONG;
176 if (entry->d_name.len > FUSE_NAME_MAX)
177 goto out;
178
179 err = -EINTR; 246 err = -EINTR;
180 req = fuse_get_request(fc); 247 req = fuse_get_request(fc);
181 if (!req) 248 if (!req)
@@ -220,17 +287,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
220 if (!inode) { 287 if (!inode) {
221 flags &= ~(O_CREAT | O_EXCL | O_TRUNC); 288 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
222 ff->fh = outopen.fh; 289 ff->fh = outopen.fh;
290 /* Special release, with inode = NULL, this will
291 trigger a 'forget' request when the release is
292 complete */
223 fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0); 293 fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0);
224 goto out_put_request; 294 goto out_put_request;
225 } 295 }
226 fuse_put_request(fc, req); 296 fuse_put_request(fc, req);
227 entry->d_time = time_to_jiffies(outentry.entry_valid,
228 outentry.entry_valid_nsec);
229 fi = get_fuse_inode(inode);
230 fi->i_time = time_to_jiffies(outentry.attr_valid,
231 outentry.attr_valid_nsec);
232
233 d_instantiate(entry, inode); 297 d_instantiate(entry, inode);
298 fuse_change_timeout(entry, &outentry);
234 file = lookup_instantiate_filp(nd, entry, generic_file_open); 299 file = lookup_instantiate_filp(nd, entry, generic_file_open);
235 if (IS_ERR(file)) { 300 if (IS_ERR(file)) {
236 ff->fh = outopen.fh; 301 ff->fh = outopen.fh;
@@ -248,13 +313,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
248 return err; 313 return err;
249} 314}
250 315
316/*
317 * Code shared between mknod, mkdir, symlink and link
318 */
251static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, 319static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
252 struct inode *dir, struct dentry *entry, 320 struct inode *dir, struct dentry *entry,
253 int mode) 321 int mode)
254{ 322{
255 struct fuse_entry_out outarg; 323 struct fuse_entry_out outarg;
256 struct inode *inode; 324 struct inode *inode;
257 struct fuse_inode *fi;
258 int err; 325 int err;
259 326
260 req->in.h.nodeid = get_node_id(dir); 327 req->in.h.nodeid = get_node_id(dir);
@@ -268,10 +335,13 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
268 fuse_put_request(fc, req); 335 fuse_put_request(fc, req);
269 return err; 336 return err;
270 } 337 }
271 if (invalid_nodeid(outarg.nodeid)) { 338 err = -EIO;
272 fuse_put_request(fc, req); 339 if (invalid_nodeid(outarg.nodeid))
273 return -EIO; 340 goto out_put_request;
274 } 341
342 if ((outarg.attr.mode ^ mode) & S_IFMT)
343 goto out_put_request;
344
275 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 345 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
276 &outarg.attr); 346 &outarg.attr);
277 if (!inode) { 347 if (!inode) {
@@ -280,22 +350,19 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
280 } 350 }
281 fuse_put_request(fc, req); 351 fuse_put_request(fc, req);
282 352
283 /* Don't allow userspace to do really stupid things... */ 353 if (dir_alias(inode)) {
284 if (((inode->i_mode ^ mode) & S_IFMT) || dir_alias(inode)) {
285 iput(inode); 354 iput(inode);
286 return -EIO; 355 return -EIO;
287 } 356 }
288 357
289 entry->d_time = time_to_jiffies(outarg.entry_valid,
290 outarg.entry_valid_nsec);
291
292 fi = get_fuse_inode(inode);
293 fi->i_time = time_to_jiffies(outarg.attr_valid,
294 outarg.attr_valid_nsec);
295
296 d_instantiate(entry, inode); 358 d_instantiate(entry, inode);
359 fuse_change_timeout(entry, &outarg);
297 fuse_invalidate_attr(dir); 360 fuse_invalidate_attr(dir);
298 return 0; 361 return 0;
362
363 out_put_request:
364 fuse_put_request(fc, req);
365 return err;
299} 366}
300 367
301static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode, 368static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
@@ -355,12 +422,7 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
355{ 422{
356 struct fuse_conn *fc = get_fuse_conn(dir); 423 struct fuse_conn *fc = get_fuse_conn(dir);
357 unsigned len = strlen(link) + 1; 424 unsigned len = strlen(link) + 1;
358 struct fuse_req *req; 425 struct fuse_req *req = fuse_get_request(fc);
359
360 if (len > FUSE_SYMLINK_MAX)
361 return -ENAMETOOLONG;
362
363 req = fuse_get_request(fc);
364 if (!req) 426 if (!req)
365 return -EINTR; 427 return -EINTR;
366 428
@@ -399,6 +461,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
399 inode->i_nlink = 0; 461 inode->i_nlink = 0;
400 fuse_invalidate_attr(inode); 462 fuse_invalidate_attr(inode);
401 fuse_invalidate_attr(dir); 463 fuse_invalidate_attr(dir);
464 fuse_invalidate_entry_cache(entry);
402 } else if (err == -EINTR) 465 } else if (err == -EINTR)
403 fuse_invalidate_entry(entry); 466 fuse_invalidate_entry(entry);
404 return err; 467 return err;
@@ -424,6 +487,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
424 if (!err) { 487 if (!err) {
425 entry->d_inode->i_nlink = 0; 488 entry->d_inode->i_nlink = 0;
426 fuse_invalidate_attr(dir); 489 fuse_invalidate_attr(dir);
490 fuse_invalidate_entry_cache(entry);
427 } else if (err == -EINTR) 491 } else if (err == -EINTR)
428 fuse_invalidate_entry(entry); 492 fuse_invalidate_entry(entry);
429 return err; 493 return err;
@@ -459,6 +523,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
459 fuse_invalidate_attr(olddir); 523 fuse_invalidate_attr(olddir);
460 if (olddir != newdir) 524 if (olddir != newdir)
461 fuse_invalidate_attr(newdir); 525 fuse_invalidate_attr(newdir);
526
527 /* newent will end up negative */
528 if (newent->d_inode)
529 fuse_invalidate_entry_cache(newent);
462 } else if (err == -EINTR) { 530 } else if (err == -EINTR) {
463 /* If request was interrupted, DEITY only knows if the 531 /* If request was interrupted, DEITY only knows if the
464 rename actually took place. If the invalidation 532 rename actually took place. If the invalidation
@@ -566,6 +634,15 @@ static int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task)
566 return 0; 634 return 0;
567} 635}
568 636
637/*
638 * Check whether the inode attributes are still valid
639 *
640 * If the attribute validity timeout has expired, then fetch the fresh
641 * attributes with a 'getattr' request
642 *
643 * I'm not sure why cached attributes are never returned for the root
644 * inode, this is probably being too cautious.
645 */
569static int fuse_revalidate(struct dentry *entry) 646static int fuse_revalidate(struct dentry *entry)
570{ 647{
571 struct inode *inode = entry->d_inode; 648 struct inode *inode = entry->d_inode;
@@ -613,6 +690,19 @@ static int fuse_access(struct inode *inode, int mask)
613 return err; 690 return err;
614} 691}
615 692
693/*
694 * Check permission. The two basic access models of FUSE are:
695 *
696 * 1) Local access checking ('default_permissions' mount option) based
697 * on file mode. This is the plain old disk filesystem permission
698 * modell.
699 *
700 * 2) "Remote" access checking, where server is responsible for
701 * checking permission in each inode operation. An exception to this
702 * is if ->permission() was invoked from sys_access() in which case an
703 * access request is sent. Execute permission is still checked
704 * locally based on file mode.
705 */
616static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd) 706static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
617{ 707{
618 struct fuse_conn *fc = get_fuse_conn(inode); 708 struct fuse_conn *fc = get_fuse_conn(inode);
@@ -631,14 +721,10 @@ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
631 err = generic_permission(inode, mask, NULL); 721 err = generic_permission(inode, mask, NULL);
632 } 722 }
633 723
634 /* FIXME: Need some mechanism to revoke permissions: 724 /* Note: the opposite of the above test does not
635 currently if the filesystem suddenly changes the 725 exist. So if permissions are revoked this won't be
636 file mode, we will not be informed about it, and 726 noticed immediately, only after the attribute
637 continue to allow access to the file/directory. 727 timeout has expired */
638
639 This is actually not so grave, since the user can
640 simply keep access to the file/directory anyway by
641 keeping it open... */
642 728
643 return err; 729 return err;
644 } else { 730 } else {
@@ -691,7 +777,12 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
691 struct page *page; 777 struct page *page;
692 struct inode *inode = file->f_dentry->d_inode; 778 struct inode *inode = file->f_dentry->d_inode;
693 struct fuse_conn *fc = get_fuse_conn(inode); 779 struct fuse_conn *fc = get_fuse_conn(inode);
694 struct fuse_req *req = fuse_get_request(fc); 780 struct fuse_req *req;
781
782 if (is_bad_inode(inode))
783 return -EIO;
784
785 req = fuse_get_request(fc);
695 if (!req) 786 if (!req)
696 return -EINTR; 787 return -EINTR;
697 788
@@ -806,6 +897,15 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
806 } 897 }
807} 898}
808 899
900/*
901 * Set attributes, and at the same time refresh them.
902 *
903 * Truncation is slightly complicated, because the 'truncate' request
904 * may fail, in which case we don't want to touch the mapping.
905 * vmtruncate() doesn't allow for this case. So do the rlimit
906 * checking by hand and call vmtruncate() only after the file has
907 * actually been truncated.
908 */
809static int fuse_setattr(struct dentry *entry, struct iattr *attr) 909static int fuse_setattr(struct dentry *entry, struct iattr *attr)
810{ 910{
811 struct inode *inode = entry->d_inode; 911 struct inode *inode = entry->d_inode;
@@ -883,23 +983,6 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
883 return err; 983 return err;
884} 984}
885 985
886static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
887 struct nameidata *nd)
888{
889 struct inode *inode;
890 int err;
891
892 err = fuse_lookup_iget(dir, entry, &inode);
893 if (err)
894 return ERR_PTR(err);
895 if (inode && dir_alias(inode)) {
896 iput(inode);
897 return ERR_PTR(-EIO);
898 }
899 d_add(entry, inode);
900 return NULL;
901}
902
903static int fuse_setxattr(struct dentry *entry, const char *name, 986static int fuse_setxattr(struct dentry *entry, const char *name,
904 const void *value, size_t size, int flags) 987 const void *value, size_t size, int flags)
905{ 988{
@@ -909,9 +992,6 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
909 struct fuse_setxattr_in inarg; 992 struct fuse_setxattr_in inarg;
910 int err; 993 int err;
911 994
912 if (size > FUSE_XATTR_SIZE_MAX)
913 return -E2BIG;
914
915 if (fc->no_setxattr) 995 if (fc->no_setxattr)
916 return -EOPNOTSUPP; 996 return -EOPNOTSUPP;
917 997
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 2ca86141d13a..05dedddf4289 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -163,6 +163,9 @@ static int fuse_flush(struct file *file)
163 struct fuse_flush_in inarg; 163 struct fuse_flush_in inarg;
164 int err; 164 int err;
165 165
166 if (is_bad_inode(inode))
167 return -EIO;
168
166 if (fc->no_flush) 169 if (fc->no_flush)
167 return 0; 170 return 0;
168 171
@@ -199,6 +202,9 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
199 struct fuse_fsync_in inarg; 202 struct fuse_fsync_in inarg;
200 int err; 203 int err;
201 204
205 if (is_bad_inode(inode))
206 return -EIO;
207
202 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 208 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
203 return 0; 209 return 0;
204 210
@@ -272,16 +278,22 @@ static int fuse_readpage(struct file *file, struct page *page)
272{ 278{
273 struct inode *inode = page->mapping->host; 279 struct inode *inode = page->mapping->host;
274 struct fuse_conn *fc = get_fuse_conn(inode); 280 struct fuse_conn *fc = get_fuse_conn(inode);
275 loff_t pos = (loff_t) page->index << PAGE_CACHE_SHIFT; 281 struct fuse_req *req;
276 struct fuse_req *req = fuse_get_request(fc); 282 int err;
277 int err = -EINTR; 283
284 err = -EIO;
285 if (is_bad_inode(inode))
286 goto out;
287
288 err = -EINTR;
289 req = fuse_get_request(fc);
278 if (!req) 290 if (!req)
279 goto out; 291 goto out;
280 292
281 req->out.page_zeroing = 1; 293 req->out.page_zeroing = 1;
282 req->num_pages = 1; 294 req->num_pages = 1;
283 req->pages[0] = page; 295 req->pages[0] = page;
284 fuse_send_read(req, file, inode, pos, PAGE_CACHE_SIZE); 296 fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE);
285 err = req->out.h.error; 297 err = req->out.h.error;
286 fuse_put_request(fc, req); 298 fuse_put_request(fc, req);
287 if (!err) 299 if (!err)
@@ -295,7 +307,7 @@ static int fuse_readpage(struct file *file, struct page *page)
295static int fuse_send_readpages(struct fuse_req *req, struct file *file, 307static int fuse_send_readpages(struct fuse_req *req, struct file *file,
296 struct inode *inode) 308 struct inode *inode)
297{ 309{
298 loff_t pos = (loff_t) req->pages[0]->index << PAGE_CACHE_SHIFT; 310 loff_t pos = page_offset(req->pages[0]);
299 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 311 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
300 unsigned i; 312 unsigned i;
301 req->out.page_zeroing = 1; 313 req->out.page_zeroing = 1;
@@ -345,6 +357,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
345 struct fuse_conn *fc = get_fuse_conn(inode); 357 struct fuse_conn *fc = get_fuse_conn(inode);
346 struct fuse_readpages_data data; 358 struct fuse_readpages_data data;
347 int err; 359 int err;
360
361 if (is_bad_inode(inode))
362 return -EIO;
363
348 data.file = file; 364 data.file = file;
349 data.inode = inode; 365 data.inode = inode;
350 data.req = fuse_get_request(fc); 366 data.req = fuse_get_request(fc);
@@ -402,8 +418,13 @@ static int fuse_commit_write(struct file *file, struct page *page,
402 unsigned count = to - offset; 418 unsigned count = to - offset;
403 struct inode *inode = page->mapping->host; 419 struct inode *inode = page->mapping->host;
404 struct fuse_conn *fc = get_fuse_conn(inode); 420 struct fuse_conn *fc = get_fuse_conn(inode);
405 loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; 421 loff_t pos = page_offset(page) + offset;
406 struct fuse_req *req = fuse_get_request(fc); 422 struct fuse_req *req;
423
424 if (is_bad_inode(inode))
425 return -EIO;
426
427 req = fuse_get_request(fc);
407 if (!req) 428 if (!req)
408 return -EINTR; 429 return -EINTR;
409 430
@@ -454,7 +475,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
454 475
455 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 476 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
456 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 477 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
457 npages = min(npages, FUSE_MAX_PAGES_PER_REQ); 478 npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
458 down_read(&current->mm->mmap_sem); 479 down_read(&current->mm->mmap_sem);
459 npages = get_user_pages(current, current->mm, user_addr, npages, write, 480 npages = get_user_pages(current, current->mm, user_addr, npages, write,
460 0, req->pages, NULL); 481 0, req->pages, NULL);
@@ -475,12 +496,16 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
475 size_t nmax = write ? fc->max_write : fc->max_read; 496 size_t nmax = write ? fc->max_write : fc->max_read;
476 loff_t pos = *ppos; 497 loff_t pos = *ppos;
477 ssize_t res = 0; 498 ssize_t res = 0;
478 struct fuse_req *req = fuse_get_request(fc); 499 struct fuse_req *req;
500
501 if (is_bad_inode(inode))
502 return -EIO;
503
504 req = fuse_get_request(fc);
479 if (!req) 505 if (!req)
480 return -EINTR; 506 return -EINTR;
481 507
482 while (count) { 508 while (count) {
483 size_t tmp;
484 size_t nres; 509 size_t nres;
485 size_t nbytes = min(count, nmax); 510 size_t nbytes = min(count, nmax);
486 int err = fuse_get_user_pages(req, buf, nbytes, !write); 511 int err = fuse_get_user_pages(req, buf, nbytes, !write);
@@ -488,8 +513,8 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
488 res = err; 513 res = err;
489 break; 514 break;
490 } 515 }
491 tmp = (req->num_pages << PAGE_SHIFT) - req->page_offset; 516 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
492 nbytes = min(nbytes, tmp); 517 nbytes = min(count, nbytes);
493 if (write) 518 if (write)
494 nres = fuse_send_write(req, file, inode, pos, nbytes); 519 nres = fuse_send_write(req, file, inode, pos, nbytes);
495 else 520 else
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0ea5301f86be..74c8d098a14a 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,9 @@
21/** If more requests are outstanding, then the operation will block */ 21/** If more requests are outstanding, then the operation will block */
22#define FUSE_MAX_OUTSTANDING 10 22#define FUSE_MAX_OUTSTANDING 10
23 23
24/** It could be as large as PATH_MAX, but would that have any uses? */
25#define FUSE_NAME_MAX 1024
26
24/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem 27/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem
25 module will check permissions based on the file mode. Otherwise no 28 module will check permissions based on the file mode. Otherwise no
26 permission checking is done in the kernel */ 29 permission checking is done in the kernel */
@@ -108,9 +111,6 @@ struct fuse_out {
108 struct fuse_arg args[3]; 111 struct fuse_arg args[3];
109}; 112};
110 113
111struct fuse_req;
112struct fuse_conn;
113
114/** 114/**
115 * A request to the client 115 * A request to the client
116 */ 116 */
@@ -159,7 +159,8 @@ struct fuse_req {
159 union { 159 union {
160 struct fuse_forget_in forget_in; 160 struct fuse_forget_in forget_in;
161 struct fuse_release_in release_in; 161 struct fuse_release_in release_in;
162 struct fuse_init_in_out init_in_out; 162 struct fuse_init_in init_in;
163 struct fuse_init_out init_out;
163 } misc; 164 } misc;
164 165
165 /** page vector */ 166 /** page vector */
@@ -272,6 +273,9 @@ struct fuse_conn {
272 /** Is create not implemented by fs? */ 273 /** Is create not implemented by fs? */
273 unsigned no_create : 1; 274 unsigned no_create : 1;
274 275
276 /** Negotiated minor version */
277 unsigned minor;
278
275 /** Backing dev info */ 279 /** Backing dev info */
276 struct backing_dev_info bdi; 280 struct backing_dev_info bdi;
277}; 281};
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e69a546844d0..04c80cc957a3 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -135,12 +135,8 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
135 fuse_init_common(inode); 135 fuse_init_common(inode);
136 init_special_inode(inode, inode->i_mode, 136 init_special_inode(inode, inode->i_mode,
137 new_decode_dev(attr->rdev)); 137 new_decode_dev(attr->rdev));
138 } else { 138 } else
139 /* Don't let user create weird files */ 139 BUG();
140 inode->i_mode = S_IFREG;
141 fuse_init_common(inode);
142 fuse_init_file_inode(inode);
143 }
144} 140}
145 141
146static int fuse_inode_eq(struct inode *inode, void *_nodeidp) 142static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
@@ -218,6 +214,7 @@ static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr
218{ 214{
219 stbuf->f_type = FUSE_SUPER_MAGIC; 215 stbuf->f_type = FUSE_SUPER_MAGIC;
220 stbuf->f_bsize = attr->bsize; 216 stbuf->f_bsize = attr->bsize;
217 stbuf->f_frsize = attr->frsize;
221 stbuf->f_blocks = attr->blocks; 218 stbuf->f_blocks = attr->blocks;
222 stbuf->f_bfree = attr->bfree; 219 stbuf->f_bfree = attr->bfree;
223 stbuf->f_bavail = attr->bavail; 220 stbuf->f_bavail = attr->bavail;
@@ -238,10 +235,12 @@ static int fuse_statfs(struct super_block *sb, struct kstatfs *buf)
238 if (!req) 235 if (!req)
239 return -EINTR; 236 return -EINTR;
240 237
238 memset(&outarg, 0, sizeof(outarg));
241 req->in.numargs = 0; 239 req->in.numargs = 0;
242 req->in.h.opcode = FUSE_STATFS; 240 req->in.h.opcode = FUSE_STATFS;
243 req->out.numargs = 1; 241 req->out.numargs = 1;
244 req->out.args[0].size = sizeof(outarg); 242 req->out.args[0].size =
243 fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
245 req->out.args[0].value = &outarg; 244 req->out.args[0].value = &outarg;
246 request_send(fc, req); 245 request_send(fc, req);
247 err = req->out.h.error; 246 err = req->out.h.error;
@@ -482,7 +481,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
482 fc->max_read = d.max_read; 481 fc->max_read = d.max_read;
483 if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages) 482 if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages)
484 fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE; 483 fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE;
485 fc->max_write = FUSE_MAX_IN / 2;
486 484
487 err = -ENOMEM; 485 err = -ENOMEM;
488 root = get_root_inode(sb, d.rootmode); 486 root = get_root_inode(sb, d.rootmode);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 8093351bd7c3..6daaf7c755a6 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -320,7 +320,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
320 /* temporarily use utf8 to correctly find the hidden dir below */ 320 /* temporarily use utf8 to correctly find the hidden dir below */
321 nls = sbi->nls; 321 nls = sbi->nls;
322 sbi->nls = load_nls("utf8"); 322 sbi->nls = load_nls("utf8");
323 if (!nls) { 323 if (!sbi->nls) {
324 printk("HFS+: unable to load nls for utf8\n"); 324 printk("HFS+: unable to load nls for utf8\n");
325 err = -EINVAL; 325 err = -EINVAL;
326 goto cleanup; 326 goto cleanup;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8c1cef3bb677..8c41315a6e42 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -100,9 +100,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
100 loff_t len, vma_len; 100 loff_t len, vma_len;
101 int ret; 101 int ret;
102 102
103 if ((vma->vm_flags & (VM_MAYSHARE | VM_WRITE)) == VM_WRITE)
104 return -EINVAL;
105
106 if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1)) 103 if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
107 return -EINVAL; 104 return -EINVAL;
108 105
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 014a51fd00d7..cb3cef525c3b 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -24,29 +24,75 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26/* 26/*
27 * Unlink a buffer from a transaction. 27 * Unlink a buffer from a transaction checkpoint list.
28 * 28 *
29 * Called with j_list_lock held. 29 * Called with j_list_lock held.
30 */ 30 */
31 31
32static inline void __buffer_unlink(struct journal_head *jh) 32static void __buffer_unlink_first(struct journal_head *jh)
33{ 33{
34 transaction_t *transaction; 34 transaction_t *transaction;
35 35
36 transaction = jh->b_cp_transaction; 36 transaction = jh->b_cp_transaction;
37 jh->b_cp_transaction = NULL;
38 37
39 jh->b_cpnext->b_cpprev = jh->b_cpprev; 38 jh->b_cpnext->b_cpprev = jh->b_cpprev;
40 jh->b_cpprev->b_cpnext = jh->b_cpnext; 39 jh->b_cpprev->b_cpnext = jh->b_cpnext;
41 if (transaction->t_checkpoint_list == jh) 40 if (transaction->t_checkpoint_list == jh) {
42 transaction->t_checkpoint_list = jh->b_cpnext; 41 transaction->t_checkpoint_list = jh->b_cpnext;
43 if (transaction->t_checkpoint_list == jh) 42 if (transaction->t_checkpoint_list == jh)
44 transaction->t_checkpoint_list = NULL; 43 transaction->t_checkpoint_list = NULL;
44 }
45}
46
47/*
48 * Unlink a buffer from a transaction checkpoint(io) list.
49 *
50 * Called with j_list_lock held.
51 */
52
53static inline void __buffer_unlink(struct journal_head *jh)
54{
55 transaction_t *transaction;
56
57 transaction = jh->b_cp_transaction;
58
59 __buffer_unlink_first(jh);
60 if (transaction->t_checkpoint_io_list == jh) {
61 transaction->t_checkpoint_io_list = jh->b_cpnext;
62 if (transaction->t_checkpoint_io_list == jh)
63 transaction->t_checkpoint_io_list = NULL;
64 }
65}
66
67/*
68 * Move a buffer from the checkpoint list to the checkpoint io list
69 *
70 * Called with j_list_lock held
71 */
72
73static inline void __buffer_relink_io(struct journal_head *jh)
74{
75 transaction_t *transaction;
76
77 transaction = jh->b_cp_transaction;
78 __buffer_unlink_first(jh);
79
80 if (!transaction->t_checkpoint_io_list) {
81 jh->b_cpnext = jh->b_cpprev = jh;
82 } else {
83 jh->b_cpnext = transaction->t_checkpoint_io_list;
84 jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
85 jh->b_cpprev->b_cpnext = jh;
86 jh->b_cpnext->b_cpprev = jh;
87 }
88 transaction->t_checkpoint_io_list = jh;
45} 89}
46 90
47/* 91/*
48 * Try to release a checkpointed buffer from its transaction. 92 * Try to release a checkpointed buffer from its transaction.
49 * Returns 1 if we released it. 93 * Returns 1 if we released it and 2 if we also released the
94 * whole transaction.
95 *
50 * Requires j_list_lock 96 * Requires j_list_lock
51 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it 97 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
52 */ 98 */
@@ -57,12 +103,11 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
57 103
58 if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) { 104 if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
59 JBUFFER_TRACE(jh, "remove from checkpoint list"); 105 JBUFFER_TRACE(jh, "remove from checkpoint list");
60 __journal_remove_checkpoint(jh); 106 ret = __journal_remove_checkpoint(jh) + 1;
61 jbd_unlock_bh_state(bh); 107 jbd_unlock_bh_state(bh);
62 journal_remove_journal_head(bh); 108 journal_remove_journal_head(bh);
63 BUFFER_TRACE(bh, "release"); 109 BUFFER_TRACE(bh, "release");
64 __brelse(bh); 110 __brelse(bh);
65 ret = 1;
66 } else { 111 } else {
67 jbd_unlock_bh_state(bh); 112 jbd_unlock_bh_state(bh);
68 } 113 }
@@ -117,83 +162,53 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
117} 162}
118 163
119/* 164/*
120 * Clean up a transaction's checkpoint list. 165 * Clean up transaction's list of buffers submitted for io.
121 * 166 * We wait for any pending IO to complete and remove any clean
122 * We wait for any pending IO to complete and make sure any clean 167 * buffers. Note that we take the buffers in the opposite ordering
123 * buffers are removed from the transaction. 168 * from the one in which they were submitted for IO.
124 *
125 * Return 1 if we performed any actions which might have destroyed the
126 * checkpoint. (journal_remove_checkpoint() deletes the transaction when
127 * the last checkpoint buffer is cleansed)
128 * 169 *
129 * Called with j_list_lock held. 170 * Called with j_list_lock held.
130 */ 171 */
131static int __cleanup_transaction(journal_t *journal, transaction_t *transaction) 172
173static void __wait_cp_io(journal_t *journal, transaction_t *transaction)
132{ 174{
133 struct journal_head *jh, *next_jh, *last_jh; 175 struct journal_head *jh;
134 struct buffer_head *bh; 176 struct buffer_head *bh;
135 int ret = 0; 177 tid_t this_tid;
136 178 int released = 0;
137 assert_spin_locked(&journal->j_list_lock); 179
138 jh = transaction->t_checkpoint_list; 180 this_tid = transaction->t_tid;
139 if (!jh) 181restart:
140 return 0; 182 /* Didn't somebody clean up the transaction in the meanwhile */
141 183 if (journal->j_checkpoint_transactions != transaction ||
142 last_jh = jh->b_cpprev; 184 transaction->t_tid != this_tid)
143 next_jh = jh; 185 return;
144 do { 186 while (!released && transaction->t_checkpoint_io_list) {
145 jh = next_jh; 187 jh = transaction->t_checkpoint_io_list;
146 bh = jh2bh(jh); 188 bh = jh2bh(jh);
189 if (!jbd_trylock_bh_state(bh)) {
190 jbd_sync_bh(journal, bh);
191 spin_lock(&journal->j_list_lock);
192 goto restart;
193 }
147 if (buffer_locked(bh)) { 194 if (buffer_locked(bh)) {
148 atomic_inc(&bh->b_count); 195 atomic_inc(&bh->b_count);
149 spin_unlock(&journal->j_list_lock); 196 spin_unlock(&journal->j_list_lock);
197 jbd_unlock_bh_state(bh);
150 wait_on_buffer(bh); 198 wait_on_buffer(bh);
151 /* the journal_head may have gone by now */ 199 /* the journal_head may have gone by now */
152 BUFFER_TRACE(bh, "brelse"); 200 BUFFER_TRACE(bh, "brelse");
153 __brelse(bh); 201 __brelse(bh);
154 goto out_return_1; 202 spin_lock(&journal->j_list_lock);
155 } 203 goto restart;
156
157 /*
158 * This is foul
159 */
160 if (!jbd_trylock_bh_state(bh)) {
161 jbd_sync_bh(journal, bh);
162 goto out_return_1;
163 } 204 }
164
165 if (jh->b_transaction != NULL) {
166 transaction_t *t = jh->b_transaction;
167 tid_t tid = t->t_tid;
168
169 spin_unlock(&journal->j_list_lock);
170 jbd_unlock_bh_state(bh);
171 log_start_commit(journal, tid);
172 log_wait_commit(journal, tid);
173 goto out_return_1;
174 }
175
176 /* 205 /*
177 * AKPM: I think the buffer_jbddirty test is redundant - it 206 * Now in whatever state the buffer currently is, we know that
178 * shouldn't have NULL b_transaction? 207 * it has been written out and so we can drop it from the list
179 */ 208 */
180 next_jh = jh->b_cpnext; 209 released = __journal_remove_checkpoint(jh);
181 if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) { 210 jbd_unlock_bh_state(bh);
182 BUFFER_TRACE(bh, "remove from checkpoint"); 211 }
183 __journal_remove_checkpoint(jh);
184 jbd_unlock_bh_state(bh);
185 journal_remove_journal_head(bh);
186 __brelse(bh);
187 ret = 1;
188 } else {
189 jbd_unlock_bh_state(bh);
190 }
191 } while (jh != last_jh);
192
193 return ret;
194out_return_1:
195 spin_lock(&journal->j_list_lock);
196 return 1;
197} 212}
198 213
199#define NR_BATCH 64 214#define NR_BATCH 64
@@ -203,9 +218,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
203{ 218{
204 int i; 219 int i;
205 220
206 spin_unlock(&journal->j_list_lock);
207 ll_rw_block(SWRITE, *batch_count, bhs); 221 ll_rw_block(SWRITE, *batch_count, bhs);
208 spin_lock(&journal->j_list_lock);
209 for (i = 0; i < *batch_count; i++) { 222 for (i = 0; i < *batch_count; i++) {
210 struct buffer_head *bh = bhs[i]; 223 struct buffer_head *bh = bhs[i];
211 clear_buffer_jwrite(bh); 224 clear_buffer_jwrite(bh);
@@ -221,19 +234,46 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
221 * Return 1 if something happened which requires us to abort the current 234 * Return 1 if something happened which requires us to abort the current
222 * scan of the checkpoint list. 235 * scan of the checkpoint list.
223 * 236 *
224 * Called with j_list_lock held. 237 * Called with j_list_lock held and drops it if 1 is returned
225 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it 238 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
226 */ 239 */
227static int __flush_buffer(journal_t *journal, struct journal_head *jh, 240static int __process_buffer(journal_t *journal, struct journal_head *jh,
228 struct buffer_head **bhs, int *batch_count, 241 struct buffer_head **bhs, int *batch_count)
229 int *drop_count)
230{ 242{
231 struct buffer_head *bh = jh2bh(jh); 243 struct buffer_head *bh = jh2bh(jh);
232 int ret = 0; 244 int ret = 0;
233 245
234 if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) { 246 if (buffer_locked(bh)) {
235 J_ASSERT_JH(jh, jh->b_transaction == NULL); 247 get_bh(bh);
248 spin_unlock(&journal->j_list_lock);
249 jbd_unlock_bh_state(bh);
250 wait_on_buffer(bh);
251 /* the journal_head may have gone by now */
252 BUFFER_TRACE(bh, "brelse");
253 put_bh(bh);
254 ret = 1;
255 }
256 else if (jh->b_transaction != NULL) {
257 transaction_t *t = jh->b_transaction;
258 tid_t tid = t->t_tid;
236 259
260 spin_unlock(&journal->j_list_lock);
261 jbd_unlock_bh_state(bh);
262 log_start_commit(journal, tid);
263 log_wait_commit(journal, tid);
264 ret = 1;
265 }
266 else if (!buffer_dirty(bh)) {
267 J_ASSERT_JH(jh, !buffer_jbddirty(bh));
268 BUFFER_TRACE(bh, "remove from checkpoint");
269 __journal_remove_checkpoint(jh);
270 spin_unlock(&journal->j_list_lock);
271 jbd_unlock_bh_state(bh);
272 journal_remove_journal_head(bh);
273 put_bh(bh);
274 ret = 1;
275 }
276 else {
237 /* 277 /*
238 * Important: we are about to write the buffer, and 278 * Important: we are about to write the buffer, and
239 * possibly block, while still holding the journal lock. 279 * possibly block, while still holding the journal lock.
@@ -246,45 +286,30 @@ static int __flush_buffer(journal_t *journal, struct journal_head *jh,
246 J_ASSERT_BH(bh, !buffer_jwrite(bh)); 286 J_ASSERT_BH(bh, !buffer_jwrite(bh));
247 set_buffer_jwrite(bh); 287 set_buffer_jwrite(bh);
248 bhs[*batch_count] = bh; 288 bhs[*batch_count] = bh;
289 __buffer_relink_io(jh);
249 jbd_unlock_bh_state(bh); 290 jbd_unlock_bh_state(bh);
250 (*batch_count)++; 291 (*batch_count)++;
251 if (*batch_count == NR_BATCH) { 292 if (*batch_count == NR_BATCH) {
293 spin_unlock(&journal->j_list_lock);
252 __flush_batch(journal, bhs, batch_count); 294 __flush_batch(journal, bhs, batch_count);
253 ret = 1; 295 ret = 1;
254 } 296 }
255 } else {
256 int last_buffer = 0;
257 if (jh->b_cpnext == jh) {
258 /* We may be about to drop the transaction. Tell the
259 * caller that the lists have changed.
260 */
261 last_buffer = 1;
262 }
263 if (__try_to_free_cp_buf(jh)) {
264 (*drop_count)++;
265 ret = last_buffer;
266 }
267 } 297 }
268 return ret; 298 return ret;
269} 299}
270 300
271/* 301/*
272 * Perform an actual checkpoint. We don't write out only enough to 302 * Perform an actual checkpoint. We take the first transaction on the
273 * satisfy the current blocked requests: rather we submit a reasonably 303 * list of transactions to be checkpointed and send all its buffers
274 * sized chunk of the outstanding data to disk at once for 304 * to disk. We submit larger chunks of data at once.
275 * efficiency. __log_wait_for_space() will retry if we didn't free enough.
276 * 305 *
277 * However, we _do_ take into account the amount requested so that once
278 * the IO has been queued, we can return as soon as enough of it has
279 * completed to disk.
280 *
281 * The journal should be locked before calling this function. 306 * The journal should be locked before calling this function.
282 */ 307 */
283int log_do_checkpoint(journal_t *journal) 308int log_do_checkpoint(journal_t *journal)
284{ 309{
310 transaction_t *transaction;
311 tid_t this_tid;
285 int result; 312 int result;
286 int batch_count = 0;
287 struct buffer_head *bhs[NR_BATCH];
288 313
289 jbd_debug(1, "Start checkpoint\n"); 314 jbd_debug(1, "Start checkpoint\n");
290 315
@@ -299,79 +324,70 @@ int log_do_checkpoint(journal_t *journal)
299 return result; 324 return result;
300 325
301 /* 326 /*
302 * OK, we need to start writing disk blocks. Try to free up a 327 * OK, we need to start writing disk blocks. Take one transaction
303 * quarter of the log in a single checkpoint if we can. 328 * and write it.
304 */ 329 */
330 spin_lock(&journal->j_list_lock);
331 if (!journal->j_checkpoint_transactions)
332 goto out;
333 transaction = journal->j_checkpoint_transactions;
334 this_tid = transaction->t_tid;
335restart:
305 /* 336 /*
306 * AKPM: check this code. I had a feeling a while back that it 337 * If someone cleaned up this transaction while we slept, we're
307 * degenerates into a busy loop at unmount time. 338 * done (maybe it's a new transaction, but it fell at the same
339 * address).
308 */ 340 */
309 spin_lock(&journal->j_list_lock); 341 if (journal->j_checkpoint_transactions == transaction ||
310 while (journal->j_checkpoint_transactions) { 342 transaction->t_tid == this_tid) {
311 transaction_t *transaction; 343 int batch_count = 0;
312 struct journal_head *jh, *last_jh, *next_jh; 344 struct buffer_head *bhs[NR_BATCH];
313 int drop_count = 0; 345 struct journal_head *jh;
314 int cleanup_ret, retry = 0; 346 int retry = 0;
315 tid_t this_tid; 347
316 348 while (!retry && transaction->t_checkpoint_list) {
317 transaction = journal->j_checkpoint_transactions;
318 this_tid = transaction->t_tid;
319 jh = transaction->t_checkpoint_list;
320 last_jh = jh->b_cpprev;
321 next_jh = jh;
322 do {
323 struct buffer_head *bh; 349 struct buffer_head *bh;
324 350
325 jh = next_jh; 351 jh = transaction->t_checkpoint_list;
326 next_jh = jh->b_cpnext;
327 bh = jh2bh(jh); 352 bh = jh2bh(jh);
328 if (!jbd_trylock_bh_state(bh)) { 353 if (!jbd_trylock_bh_state(bh)) {
329 jbd_sync_bh(journal, bh); 354 jbd_sync_bh(journal, bh);
330 spin_lock(&journal->j_list_lock);
331 retry = 1; 355 retry = 1;
332 break; 356 break;
333 } 357 }
334 retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count); 358 retry = __process_buffer(journal, jh, bhs,
335 if (cond_resched_lock(&journal->j_list_lock)) { 359 &batch_count);
360 if (!retry &&
361 lock_need_resched(&journal->j_list_lock)) {
362 spin_unlock(&journal->j_list_lock);
336 retry = 1; 363 retry = 1;
337 break; 364 break;
338 } 365 }
339 } while (jh != last_jh && !retry); 366 }
340 367
341 if (batch_count) { 368 if (batch_count) {
369 if (!retry) {
370 spin_unlock(&journal->j_list_lock);
371 retry = 1;
372 }
342 __flush_batch(journal, bhs, &batch_count); 373 __flush_batch(journal, bhs, &batch_count);
343 retry = 1;
344 } 374 }
345 375
376 if (retry) {
377 spin_lock(&journal->j_list_lock);
378 goto restart;
379 }
346 /* 380 /*
347 * If someone cleaned up this transaction while we slept, we're 381 * Now we have cleaned up the first transaction's checkpoint
348 * done 382 * list. Let's clean up the second one.
349 */
350 if (journal->j_checkpoint_transactions != transaction)
351 break;
352 if (retry)
353 continue;
354 /*
355 * Maybe it's a new transaction, but it fell at the same
356 * address
357 */
358 if (transaction->t_tid != this_tid)
359 continue;
360 /*
361 * We have walked the whole transaction list without
362 * finding anything to write to disk. We had better be
363 * able to make some progress or we are in trouble.
364 */ 383 */
365 cleanup_ret = __cleanup_transaction(journal, transaction); 384 __wait_cp_io(journal, transaction);
366 J_ASSERT(drop_count != 0 || cleanup_ret != 0);
367 if (journal->j_checkpoint_transactions != transaction)
368 break;
369 } 385 }
386out:
370 spin_unlock(&journal->j_list_lock); 387 spin_unlock(&journal->j_list_lock);
371 result = cleanup_journal_tail(journal); 388 result = cleanup_journal_tail(journal);
372 if (result < 0) 389 if (result < 0)
373 return result; 390 return result;
374
375 return 0; 391 return 0;
376} 392}
377 393
@@ -456,52 +472,91 @@ int cleanup_journal_tail(journal_t *journal)
456/* Checkpoint list management */ 472/* Checkpoint list management */
457 473
458/* 474/*
475 * journal_clean_one_cp_list
476 *
477 * Find all the written-back checkpoint buffers in the given list and release them.
478 *
479 * Called with the journal locked.
480 * Called with j_list_lock held.
481 * Returns number of bufers reaped (for debug)
482 */
483
484static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
485{
486 struct journal_head *last_jh;
487 struct journal_head *next_jh = jh;
488 int ret, freed = 0;
489
490 *released = 0;
491 if (!jh)
492 return 0;
493
494 last_jh = jh->b_cpprev;
495 do {
496 jh = next_jh;
497 next_jh = jh->b_cpnext;
498 /* Use trylock because of the ranking */
499 if (jbd_trylock_bh_state(jh2bh(jh))) {
500 ret = __try_to_free_cp_buf(jh);
501 if (ret) {
502 freed++;
503 if (ret == 2) {
504 *released = 1;
505 return freed;
506 }
507 }
508 }
509 /*
510 * This function only frees up some memory if possible so we
511 * dont have an obligation to finish processing. Bail out if
512 * preemption requested:
513 */
514 if (need_resched())
515 return freed;
516 } while (jh != last_jh);
517
518 return freed;
519}
520
521/*
459 * journal_clean_checkpoint_list 522 * journal_clean_checkpoint_list
460 * 523 *
461 * Find all the written-back checkpoint buffers in the journal and release them. 524 * Find all the written-back checkpoint buffers in the journal and release them.
462 * 525 *
463 * Called with the journal locked. 526 * Called with the journal locked.
464 * Called with j_list_lock held. 527 * Called with j_list_lock held.
465 * Returns number of bufers reaped (for debug) 528 * Returns number of buffers reaped (for debug)
466 */ 529 */
467 530
468int __journal_clean_checkpoint_list(journal_t *journal) 531int __journal_clean_checkpoint_list(journal_t *journal)
469{ 532{
470 transaction_t *transaction, *last_transaction, *next_transaction; 533 transaction_t *transaction, *last_transaction, *next_transaction;
471 int ret = 0; 534 int ret = 0, released;
472 535
473 transaction = journal->j_checkpoint_transactions; 536 transaction = journal->j_checkpoint_transactions;
474 if (transaction == 0) 537 if (!transaction)
475 goto out; 538 goto out;
476 539
477 last_transaction = transaction->t_cpprev; 540 last_transaction = transaction->t_cpprev;
478 next_transaction = transaction; 541 next_transaction = transaction;
479 do { 542 do {
480 struct journal_head *jh;
481
482 transaction = next_transaction; 543 transaction = next_transaction;
483 next_transaction = transaction->t_cpnext; 544 next_transaction = transaction->t_cpnext;
484 jh = transaction->t_checkpoint_list; 545 ret += journal_clean_one_cp_list(transaction->
485 if (jh) { 546 t_checkpoint_list, &released);
486 struct journal_head *last_jh = jh->b_cpprev; 547 if (need_resched())
487 struct journal_head *next_jh = jh; 548 goto out;
488 549 if (released)
489 do { 550 continue;
490 jh = next_jh; 551 /*
491 next_jh = jh->b_cpnext; 552 * It is essential that we are as careful as in the case of
492 /* Use trylock because of the ranknig */ 553 * t_checkpoint_list with removing the buffer from the list as
493 if (jbd_trylock_bh_state(jh2bh(jh))) 554 * we can possibly see not yet submitted buffers on io_list
494 ret += __try_to_free_cp_buf(jh); 555 */
495 /* 556 ret += journal_clean_one_cp_list(transaction->
496 * This function only frees up some memory 557 t_checkpoint_io_list, &released);
497 * if possible so we dont have an obligation 558 if (need_resched())
498 * to finish processing. Bail out if preemption 559 goto out;
499 * requested:
500 */
501 if (need_resched())
502 goto out;
503 } while (jh != last_jh);
504 }
505 } while (transaction != last_transaction); 560 } while (transaction != last_transaction);
506out: 561out:
507 return ret; 562 return ret;
@@ -516,18 +571,22 @@ out:
516 * buffer updates committed in that transaction have safely been stored 571 * buffer updates committed in that transaction have safely been stored
517 * elsewhere on disk. To achieve this, all of the buffers in a 572 * elsewhere on disk. To achieve this, all of the buffers in a
518 * transaction need to be maintained on the transaction's checkpoint 573 * transaction need to be maintained on the transaction's checkpoint
519 * list until they have been rewritten, at which point this function is 574 * lists until they have been rewritten, at which point this function is
520 * called to remove the buffer from the existing transaction's 575 * called to remove the buffer from the existing transaction's
521 * checkpoint list. 576 * checkpoint lists.
577 *
578 * The function returns 1 if it frees the transaction, 0 otherwise.
522 * 579 *
523 * This function is called with the journal locked. 580 * This function is called with the journal locked.
524 * This function is called with j_list_lock held. 581 * This function is called with j_list_lock held.
582 * This function is called with jbd_lock_bh_state(jh2bh(jh))
525 */ 583 */
526 584
527void __journal_remove_checkpoint(struct journal_head *jh) 585int __journal_remove_checkpoint(struct journal_head *jh)
528{ 586{
529 transaction_t *transaction; 587 transaction_t *transaction;
530 journal_t *journal; 588 journal_t *journal;
589 int ret = 0;
531 590
532 JBUFFER_TRACE(jh, "entry"); 591 JBUFFER_TRACE(jh, "entry");
533 592
@@ -538,8 +597,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
538 journal = transaction->t_journal; 597 journal = transaction->t_journal;
539 598
540 __buffer_unlink(jh); 599 __buffer_unlink(jh);
600 jh->b_cp_transaction = NULL;
541 601
542 if (transaction->t_checkpoint_list != NULL) 602 if (transaction->t_checkpoint_list != NULL ||
603 transaction->t_checkpoint_io_list != NULL)
543 goto out; 604 goto out;
544 JBUFFER_TRACE(jh, "transaction has no more buffers"); 605 JBUFFER_TRACE(jh, "transaction has no more buffers");
545 606
@@ -565,8 +626,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
565 /* Just in case anybody was waiting for more transactions to be 626 /* Just in case anybody was waiting for more transactions to be
566 checkpointed... */ 627 checkpointed... */
567 wake_up(&journal->j_wait_logspace); 628 wake_up(&journal->j_wait_logspace);
629 ret = 1;
568out: 630out:
569 JBUFFER_TRACE(jh, "exit"); 631 JBUFFER_TRACE(jh, "exit");
632 return ret;
570} 633}
571 634
572/* 635/*
@@ -628,6 +691,7 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
628 J_ASSERT(transaction->t_shadow_list == NULL); 691 J_ASSERT(transaction->t_shadow_list == NULL);
629 J_ASSERT(transaction->t_log_list == NULL); 692 J_ASSERT(transaction->t_log_list == NULL);
630 J_ASSERT(transaction->t_checkpoint_list == NULL); 693 J_ASSERT(transaction->t_checkpoint_list == NULL);
694 J_ASSERT(transaction->t_checkpoint_io_list == NULL);
631 J_ASSERT(transaction->t_updates == 0); 695 J_ASSERT(transaction->t_updates == 0);
632 J_ASSERT(journal->j_committing_transaction != transaction); 696 J_ASSERT(journal->j_committing_transaction != transaction);
633 J_ASSERT(journal->j_running_transaction != transaction); 697 J_ASSERT(journal->j_running_transaction != transaction);
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 041380fe667b..6d2dfed1de08 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -56,13 +56,20 @@ static int
56nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp, 56nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
57 struct nfsd3_attrstat *resp) 57 struct nfsd3_attrstat *resp)
58{ 58{
59 int nfserr; 59 int err, nfserr;
60 60
61 dprintk("nfsd: GETATTR(3) %s\n", 61 dprintk("nfsd: GETATTR(3) %s\n",
62 SVCFH_fmt(&argp->fh)); 62 SVCFH_fmt(&argp->fh));
63 63
64 fh_copy(&resp->fh, &argp->fh); 64 fh_copy(&resp->fh, &argp->fh);
65 nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP); 65 nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
66 if (nfserr)
67 RETURN_STATUS(nfserr);
68
69 err = vfs_getattr(resp->fh.fh_export->ex_mnt,
70 resp->fh.fh_dentry, &resp->stat);
71 nfserr = nfserrno(err);
72
66 RETURN_STATUS(nfserr); 73 RETURN_STATUS(nfserr);
67} 74}
68 75
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 9147b8524d05..243d94b9653a 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -154,37 +154,34 @@ decode_sattr3(u32 *p, struct iattr *iap)
154} 154}
155 155
156static inline u32 * 156static inline u32 *
157encode_fattr3(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) 157encode_fattr3(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp,
158 struct kstat *stat)
158{ 159{
159 struct vfsmount *mnt = fhp->fh_export->ex_mnt;
160 struct dentry *dentry = fhp->fh_dentry; 160 struct dentry *dentry = fhp->fh_dentry;
161 struct kstat stat;
162 struct timespec time; 161 struct timespec time;
163 162
164 vfs_getattr(mnt, dentry, &stat); 163 *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
165 164 *p++ = htonl((u32) stat->mode);
166 *p++ = htonl(nfs3_ftypes[(stat.mode & S_IFMT) >> 12]); 165 *p++ = htonl((u32) stat->nlink);
167 *p++ = htonl((u32) stat.mode); 166 *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
168 *p++ = htonl((u32) stat.nlink); 167 *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
169 *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid)); 168 if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
170 *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid));
171 if (S_ISLNK(stat.mode) && stat.size > NFS3_MAXPATHLEN) {
172 p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN); 169 p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
173 } else { 170 } else {
174 p = xdr_encode_hyper(p, (u64) stat.size); 171 p = xdr_encode_hyper(p, (u64) stat->size);
175 } 172 }
176 p = xdr_encode_hyper(p, ((u64)stat.blocks) << 9); 173 p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9);
177 *p++ = htonl((u32) MAJOR(stat.rdev)); 174 *p++ = htonl((u32) MAJOR(stat->rdev));
178 *p++ = htonl((u32) MINOR(stat.rdev)); 175 *p++ = htonl((u32) MINOR(stat->rdev));
179 if (is_fsid(fhp, rqstp->rq_reffh)) 176 if (is_fsid(fhp, rqstp->rq_reffh))
180 p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid); 177 p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid);
181 else 178 else
182 p = xdr_encode_hyper(p, (u64) huge_encode_dev(stat.dev)); 179 p = xdr_encode_hyper(p, (u64) huge_encode_dev(stat->dev));
183 p = xdr_encode_hyper(p, (u64) stat.ino); 180 p = xdr_encode_hyper(p, (u64) stat->ino);
184 p = encode_time3(p, &stat.atime); 181 p = encode_time3(p, &stat->atime);
185 lease_get_mtime(dentry->d_inode, &time); 182 lease_get_mtime(dentry->d_inode, &time);
186 p = encode_time3(p, &time); 183 p = encode_time3(p, &time);
187 p = encode_time3(p, &stat.ctime); 184 p = encode_time3(p, &stat->ctime);
188 185
189 return p; 186 return p;
190} 187}
@@ -232,8 +229,14 @@ encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
232{ 229{
233 struct dentry *dentry = fhp->fh_dentry; 230 struct dentry *dentry = fhp->fh_dentry;
234 if (dentry && dentry->d_inode != NULL) { 231 if (dentry && dentry->d_inode != NULL) {
235 *p++ = xdr_one; /* attributes follow */ 232 int err;
236 return encode_fattr3(rqstp, p, fhp); 233 struct kstat stat;
234
235 err = vfs_getattr(fhp->fh_export->ex_mnt, dentry, &stat);
236 if (!err) {
237 *p++ = xdr_one; /* attributes follow */
238 return encode_fattr3(rqstp, p, fhp, &stat);
239 }
237 } 240 }
238 *p++ = xdr_zero; 241 *p++ = xdr_zero;
239 return p; 242 return p;
@@ -616,7 +619,7 @@ nfs3svc_encode_attrstat(struct svc_rqst *rqstp, u32 *p,
616 struct nfsd3_attrstat *resp) 619 struct nfsd3_attrstat *resp)
617{ 620{
618 if (resp->status == 0) 621 if (resp->status == 0)
619 p = encode_fattr3(rqstp, p, &resp->fh); 622 p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat);
620 return xdr_ressize_check(rqstp, p); 623 return xdr_ressize_check(rqstp, p);
621} 624}
622 625
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index b45999ff33e6..aa7bb41b293d 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -152,46 +152,44 @@ decode_sattr(u32 *p, struct iattr *iap)
152} 152}
153 153
154static inline u32 * 154static inline u32 *
155encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) 155encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp,
156 struct kstat *stat)
156{ 157{
157 struct vfsmount *mnt = fhp->fh_export->ex_mnt;
158 struct dentry *dentry = fhp->fh_dentry; 158 struct dentry *dentry = fhp->fh_dentry;
159 struct kstat stat;
160 int type; 159 int type;
161 struct timespec time; 160 struct timespec time;
162 161
163 vfs_getattr(mnt, dentry, &stat); 162 type = (stat->mode & S_IFMT);
164 type = (stat.mode & S_IFMT);
165 163
166 *p++ = htonl(nfs_ftypes[type >> 12]); 164 *p++ = htonl(nfs_ftypes[type >> 12]);
167 *p++ = htonl((u32) stat.mode); 165 *p++ = htonl((u32) stat->mode);
168 *p++ = htonl((u32) stat.nlink); 166 *p++ = htonl((u32) stat->nlink);
169 *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid)); 167 *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
170 *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid)); 168 *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
171 169
172 if (S_ISLNK(type) && stat.size > NFS_MAXPATHLEN) { 170 if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
173 *p++ = htonl(NFS_MAXPATHLEN); 171 *p++ = htonl(NFS_MAXPATHLEN);
174 } else { 172 } else {
175 *p++ = htonl((u32) stat.size); 173 *p++ = htonl((u32) stat->size);
176 } 174 }
177 *p++ = htonl((u32) stat.blksize); 175 *p++ = htonl((u32) stat->blksize);
178 if (S_ISCHR(type) || S_ISBLK(type)) 176 if (S_ISCHR(type) || S_ISBLK(type))
179 *p++ = htonl(new_encode_dev(stat.rdev)); 177 *p++ = htonl(new_encode_dev(stat->rdev));
180 else 178 else
181 *p++ = htonl(0xffffffff); 179 *p++ = htonl(0xffffffff);
182 *p++ = htonl((u32) stat.blocks); 180 *p++ = htonl((u32) stat->blocks);
183 if (is_fsid(fhp, rqstp->rq_reffh)) 181 if (is_fsid(fhp, rqstp->rq_reffh))
184 *p++ = htonl((u32) fhp->fh_export->ex_fsid); 182 *p++ = htonl((u32) fhp->fh_export->ex_fsid);
185 else 183 else
186 *p++ = htonl(new_encode_dev(stat.dev)); 184 *p++ = htonl(new_encode_dev(stat->dev));
187 *p++ = htonl((u32) stat.ino); 185 *p++ = htonl((u32) stat->ino);
188 *p++ = htonl((u32) stat.atime.tv_sec); 186 *p++ = htonl((u32) stat->atime.tv_sec);
189 *p++ = htonl(stat.atime.tv_nsec ? stat.atime.tv_nsec / 1000 : 0); 187 *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
190 lease_get_mtime(dentry->d_inode, &time); 188 lease_get_mtime(dentry->d_inode, &time);
191 *p++ = htonl((u32) time.tv_sec); 189 *p++ = htonl((u32) time.tv_sec);
192 *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); 190 *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);
193 *p++ = htonl((u32) stat.ctime.tv_sec); 191 *p++ = htonl((u32) stat->ctime.tv_sec);
194 *p++ = htonl(stat.ctime.tv_nsec ? stat.ctime.tv_nsec / 1000 : 0); 192 *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0);
195 193
196 return p; 194 return p;
197} 195}
@@ -199,7 +197,9 @@ encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
199/* Helper function for NFSv2 ACL code */ 197/* Helper function for NFSv2 ACL code */
200u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp) 198u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
201{ 199{
202 return encode_fattr(rqstp, p, fhp); 200 struct kstat stat;
201 vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, &stat);
202 return encode_fattr(rqstp, p, fhp, &stat);
203} 203}
204 204
205/* 205/*
@@ -394,7 +394,7 @@ int
394nfssvc_encode_attrstat(struct svc_rqst *rqstp, u32 *p, 394nfssvc_encode_attrstat(struct svc_rqst *rqstp, u32 *p,
395 struct nfsd_attrstat *resp) 395 struct nfsd_attrstat *resp)
396{ 396{
397 p = encode_fattr(rqstp, p, &resp->fh); 397 p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
398 return xdr_ressize_check(rqstp, p); 398 return xdr_ressize_check(rqstp, p);
399} 399}
400 400
@@ -403,7 +403,7 @@ nfssvc_encode_diropres(struct svc_rqst *rqstp, u32 *p,
403 struct nfsd_diropres *resp) 403 struct nfsd_diropres *resp)
404{ 404{
405 p = encode_fh(p, &resp->fh); 405 p = encode_fh(p, &resp->fh);
406 p = encode_fattr(rqstp, p, &resp->fh); 406 p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
407 return xdr_ressize_check(rqstp, p); 407 return xdr_ressize_check(rqstp, p);
408} 408}
409 409
@@ -428,7 +428,7 @@ int
428nfssvc_encode_readres(struct svc_rqst *rqstp, u32 *p, 428nfssvc_encode_readres(struct svc_rqst *rqstp, u32 *p,
429 struct nfsd_readres *resp) 429 struct nfsd_readres *resp)
430{ 430{
431 p = encode_fattr(rqstp, p, &resp->fh); 431 p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
432 *p++ = htonl(resp->count); 432 *p++ = htonl(resp->count);
433 xdr_ressize_check(rqstp, p); 433 xdr_ressize_check(rqstp, p);
434 434
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index af7c3c3074b0..df4019f04560 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -717,27 +717,33 @@ nfsd_close(struct file *filp)
717 * As this calls fsync (not fdatasync) there is no need for a write_inode 717 * As this calls fsync (not fdatasync) there is no need for a write_inode
718 * after it. 718 * after it.
719 */ 719 */
720static inline void nfsd_dosync(struct file *filp, struct dentry *dp, 720static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
721 struct file_operations *fop) 721 struct file_operations *fop)
722{ 722{
723 struct inode *inode = dp->d_inode; 723 struct inode *inode = dp->d_inode;
724 int (*fsync) (struct file *, struct dentry *, int); 724 int (*fsync) (struct file *, struct dentry *, int);
725 int err = nfs_ok;
725 726
726 filemap_fdatawrite(inode->i_mapping); 727 filemap_fdatawrite(inode->i_mapping);
727 if (fop && (fsync = fop->fsync)) 728 if (fop && (fsync = fop->fsync))
728 fsync(filp, dp, 0); 729 err=fsync(filp, dp, 0);
729 filemap_fdatawait(inode->i_mapping); 730 filemap_fdatawait(inode->i_mapping);
731
732 return nfserrno(err);
730} 733}
731 734
732 735
733static void 736static int
734nfsd_sync(struct file *filp) 737nfsd_sync(struct file *filp)
735{ 738{
739 int err;
736 struct inode *inode = filp->f_dentry->d_inode; 740 struct inode *inode = filp->f_dentry->d_inode;
737 dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name); 741 dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
738 down(&inode->i_sem); 742 down(&inode->i_sem);
739 nfsd_dosync(filp, filp->f_dentry, filp->f_op); 743 err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
740 up(&inode->i_sem); 744 up(&inode->i_sem);
745
746 return err;
741} 747}
742 748
743void 749void
@@ -874,6 +880,16 @@ out:
874 return err; 880 return err;
875} 881}
876 882
883static void kill_suid(struct dentry *dentry)
884{
885 struct iattr ia;
886 ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
887
888 down(&dentry->d_inode->i_sem);
889 notify_change(dentry, &ia);
890 up(&dentry->d_inode->i_sem);
891}
892
877static inline int 893static inline int
878nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, 894nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
879 loff_t offset, struct kvec *vec, int vlen, 895 loff_t offset, struct kvec *vec, int vlen,
@@ -927,14 +943,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
927 } 943 }
928 944
929 /* clear setuid/setgid flag after write */ 945 /* clear setuid/setgid flag after write */
930 if (err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID))) { 946 if (err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
931 struct iattr ia; 947 kill_suid(dentry);
932 ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
933
934 down(&inode->i_sem);
935 notify_change(dentry, &ia);
936 up(&inode->i_sem);
937 }
938 948
939 if (err >= 0 && stable) { 949 if (err >= 0 && stable) {
940 static ino_t last_ino; 950 static ino_t last_ino;
@@ -962,7 +972,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
962 972
963 if (inode->i_state & I_DIRTY) { 973 if (inode->i_state & I_DIRTY) {
964 dprintk("nfsd: write sync %d\n", current->pid); 974 dprintk("nfsd: write sync %d\n", current->pid);
965 nfsd_sync(file); 975 err=nfsd_sync(file);
966 } 976 }
967#if 0 977#if 0
968 wake_up(&inode->i_wait); 978 wake_up(&inode->i_wait);
@@ -1066,7 +1076,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
1066 return err; 1076 return err;
1067 if (EX_ISSYNC(fhp->fh_export)) { 1077 if (EX_ISSYNC(fhp->fh_export)) {
1068 if (file->f_op && file->f_op->fsync) { 1078 if (file->f_op && file->f_op->fsync) {
1069 nfsd_sync(file); 1079 err = nfsd_sync(file);
1070 } else { 1080 } else {
1071 err = nfserr_notsupp; 1081 err = nfserr_notsupp;
1072 } 1082 }
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
index 656bc43431b9..e227a04261ab 100644
--- a/fs/partitions/Kconfig
+++ b/fs/partitions/Kconfig
@@ -85,7 +85,7 @@ config ATARI_PARTITION
85 85
86config IBM_PARTITION 86config IBM_PARTITION
87 bool "IBM disk label and partition support" 87 bool "IBM disk label and partition support"
88 depends on PARTITION_ADVANCED && ARCH_S390 88 depends on PARTITION_ADVANCED && S390
89 help 89 help
90 Say Y here if you would like to be able to read the hard disk 90 Say Y here if you would like to be able to read the hard disk
91 partition table format used by IBM DASD disks operating under CMS. 91 partition table format used by IBM DASD disks operating under CMS.
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index 6327bcb2d73d..78010ad60e47 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -56,7 +56,10 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
56 struct hd_geometry *geo; 56 struct hd_geometry *geo;
57 char type[5] = {0,}; 57 char type[5] = {0,};
58 char name[7] = {0,}; 58 char name[7] = {0,};
59 struct vtoc_volume_label *vlabel; 59 union label_t {
60 struct vtoc_volume_label vol;
61 struct vtoc_cms_label cms;
62 } *label;
60 unsigned char *data; 63 unsigned char *data;
61 Sector sect; 64 Sector sect;
62 65
@@ -64,9 +67,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
64 goto out_noinfo; 67 goto out_noinfo;
65 if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL) 68 if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
66 goto out_nogeo; 69 goto out_nogeo;
67 if ((vlabel = kmalloc(sizeof(struct vtoc_volume_label), 70 if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL)
68 GFP_KERNEL)) == NULL) 71 goto out_nolab;
69 goto out_novlab;
70 72
71 if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 || 73 if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
72 ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0) 74 ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
@@ -87,7 +89,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
87 strncpy(name, data + 8, 6); 89 strncpy(name, data + 8, 6);
88 else 90 else
89 strncpy(name, data + 4, 6); 91 strncpy(name, data + 4, 6);
90 memcpy (vlabel, data, sizeof(struct vtoc_volume_label)); 92 memcpy(label, data, sizeof(union label_t));
91 put_dev_sector(sect); 93 put_dev_sector(sect);
92 94
93 EBCASC(type, 4); 95 EBCASC(type, 4);
@@ -100,14 +102,12 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
100 /* 102 /*
101 * VM style CMS1 labeled disk 103 * VM style CMS1 labeled disk
102 */ 104 */
103 int *label = (int *) vlabel; 105 if (label->cms.disk_offset != 0) {
104
105 if (label[13] != 0) {
106 printk("CMS1/%8s(MDSK):", name); 106 printk("CMS1/%8s(MDSK):", name);
107 /* disk is reserved minidisk */ 107 /* disk is reserved minidisk */
108 blocksize = label[3]; 108 blocksize = label->cms.block_size;
109 offset = label[13]; 109 offset = label->cms.disk_offset;
110 size = (label[7] - 1)*(blocksize >> 9); 110 size = (label->cms.block_count - 1) * (blocksize >> 9);
111 } else { 111 } else {
112 printk("CMS1/%8s:", name); 112 printk("CMS1/%8s:", name);
113 offset = (info->label_block + 1); 113 offset = (info->label_block + 1);
@@ -126,7 +126,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
126 printk("VOL1/%8s:", name); 126 printk("VOL1/%8s:", name);
127 127
128 /* get block number and read then go through format1 labels */ 128 /* get block number and read then go through format1 labels */
129 blk = cchhb2blk(&vlabel->vtoc, geo) + 1; 129 blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
130 counter = 0; 130 counter = 0;
131 while ((data = read_dev_sector(bdev, blk*(blocksize/512), 131 while ((data = read_dev_sector(bdev, blk*(blocksize/512),
132 &sect)) != NULL) { 132 &sect)) != NULL) {
@@ -174,7 +174,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
174 } 174 }
175 175
176 printk("\n"); 176 printk("\n");
177 kfree(vlabel); 177 kfree(label);
178 kfree(geo); 178 kfree(geo);
179 kfree(info); 179 kfree(info);
180 return 1; 180 return 1;
@@ -182,8 +182,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
182out_readerr: 182out_readerr:
183out_badsect: 183out_badsect:
184out_noioctl: 184out_noioctl:
185 kfree(vlabel); 185 kfree(label);
186out_novlab: 186out_nolab:
187 kfree(geo); 187 kfree(geo);
188out_nogeo: 188out_nogeo:
189 kfree(info); 189 kfree(info);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 3e1239e4b303..5e9251f65317 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -308,7 +308,7 @@ int proc_pid_status(struct task_struct *task, char * buffer)
308 buffer = task_sig(task, buffer); 308 buffer = task_sig(task, buffer);
309 buffer = task_cap(task, buffer); 309 buffer = task_cap(task, buffer);
310 buffer = cpuset_task_status_allowed(task, buffer); 310 buffer = cpuset_task_status_allowed(task, buffer);
311#if defined(CONFIG_ARCH_S390) 311#if defined(CONFIG_S390)
312 buffer = task_show_regs(task, buffer); 312 buffer = task_show_regs(task, buffer);
313#endif 313#endif
314 return buffer - orig; 314 return buffer - orig;
diff --git a/fs/ramfs/Makefile b/fs/ramfs/Makefile
index f096f3007091..5a0236e02ee1 100644
--- a/fs/ramfs/Makefile
+++ b/fs/ramfs/Makefile
@@ -4,4 +4,6 @@
4 4
5obj-$(CONFIG_RAMFS) += ramfs.o 5obj-$(CONFIG_RAMFS) += ramfs.o
6 6
7ramfs-objs := inode.o 7file-mmu-y := file-nommu.o
8file-mmu-$(CONFIG_MMU) := file-mmu.o
9ramfs-objs += inode.o $(file-mmu-y)
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
new file mode 100644
index 000000000000..2115383dcc8d
--- /dev/null
+++ b/fs/ramfs/file-mmu.c
@@ -0,0 +1,57 @@
1/* file-mmu.c: ramfs MMU-based file operations
2 *
3 * Resizable simple ram filesystem for Linux.
4 *
5 * Copyright (C) 2000 Linus Torvalds.
6 * 2000 Transmeta Corp.
7 *
8 * Usage limits added by David Gibson, Linuxcare Australia.
9 * This file is released under the GPL.
10 */
11
12/*
13 * NOTE! This filesystem is probably most useful
14 * not as a real filesystem, but as an example of
15 * how virtual filesystems can be written.
16 *
17 * It doesn't get much simpler than this. Consider
18 * that this file implements the full semantics of
19 * a POSIX-compliant read-write filesystem.
20 *
21 * Note in particular how the filesystem does not
22 * need to implement any data structures of its own
23 * to keep track of the virtual data: using the VFS
24 * caches is sufficient.
25 */
26
27#include <linux/module.h>
28#include <linux/fs.h>
29#include <linux/pagemap.h>
30#include <linux/highmem.h>
31#include <linux/init.h>
32#include <linux/string.h>
33#include <linux/smp_lock.h>
34#include <linux/backing-dev.h>
35#include <linux/ramfs.h>
36
37#include <asm/uaccess.h>
38#include "internal.h"
39
40struct address_space_operations ramfs_aops = {
41 .readpage = simple_readpage,
42 .prepare_write = simple_prepare_write,
43 .commit_write = simple_commit_write
44};
45
46struct file_operations ramfs_file_operations = {
47 .read = generic_file_read,
48 .write = generic_file_write,
49 .mmap = generic_file_mmap,
50 .fsync = simple_sync_file,
51 .sendfile = generic_file_sendfile,
52 .llseek = generic_file_llseek,
53};
54
55struct inode_operations ramfs_file_inode_operations = {
56 .getattr = simple_getattr,
57};
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
new file mode 100644
index 000000000000..3f810acd0bfa
--- /dev/null
+++ b/fs/ramfs/file-nommu.c
@@ -0,0 +1,292 @@
1/* file-nommu.c: no-MMU version of ramfs
2 *
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15#include <linux/highmem.h>
16#include <linux/init.h>
17#include <linux/string.h>
18#include <linux/smp_lock.h>
19#include <linux/backing-dev.h>
20#include <linux/ramfs.h>
21#include <linux/quotaops.h>
22#include <linux/pagevec.h>
23#include <linux/mman.h>
24
25#include <asm/uaccess.h>
26#include "internal.h"
27
28static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
29
30struct address_space_operations ramfs_aops = {
31 .readpage = simple_readpage,
32 .prepare_write = simple_prepare_write,
33 .commit_write = simple_commit_write
34};
35
36struct file_operations ramfs_file_operations = {
37 .mmap = ramfs_nommu_mmap,
38 .get_unmapped_area = ramfs_nommu_get_unmapped_area,
39 .read = generic_file_read,
40 .write = generic_file_write,
41 .fsync = simple_sync_file,
42 .sendfile = generic_file_sendfile,
43 .llseek = generic_file_llseek,
44};
45
46struct inode_operations ramfs_file_inode_operations = {
47 .setattr = ramfs_nommu_setattr,
48 .getattr = simple_getattr,
49};
50
51/*****************************************************************************/
52/*
53 * add a contiguous set of pages into a ramfs inode when it's truncated from
54 * size 0 on the assumption that it's going to be used for an mmap of shared
55 * memory
56 */
57static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
58{
59 struct pagevec lru_pvec;
60 unsigned long npages, xpages, loop, limit;
61 struct page *pages;
62 unsigned order;
63 void *data;
64 int ret;
65
66 /* make various checks */
67 order = get_order(newsize);
68 if (unlikely(order >= MAX_ORDER))
69 goto too_big;
70
71 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
72 if (limit != RLIM_INFINITY && newsize > limit)
73 goto fsize_exceeded;
74
75 if (newsize > inode->i_sb->s_maxbytes)
76 goto too_big;
77
78 i_size_write(inode, newsize);
79
80 /* allocate enough contiguous pages to be able to satisfy the
81 * request */
82 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
83 if (!pages)
84 return -ENOMEM;
85
86 /* split the high-order page into an array of single pages */
87 xpages = 1UL << order;
88 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
89
90 for (loop = 0; loop < npages; loop++)
91 set_page_count(pages + loop, 1);
92
93 /* trim off any pages we don't actually require */
94 for (loop = npages; loop < xpages; loop++)
95 __free_page(pages + loop);
96
97 /* clear the memory we allocated */
98 newsize = PAGE_SIZE * npages;
99 data = page_address(pages);
100 memset(data, 0, newsize);
101
102 /* attach all the pages to the inode's address space */
103 pagevec_init(&lru_pvec, 0);
104 for (loop = 0; loop < npages; loop++) {
105 struct page *page = pages + loop;
106
107 ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL);
108 if (ret < 0)
109 goto add_error;
110
111 if (!pagevec_add(&lru_pvec, page))
112 __pagevec_lru_add(&lru_pvec);
113
114 unlock_page(page);
115 }
116
117 pagevec_lru_add(&lru_pvec);
118 return 0;
119
120 fsize_exceeded:
121 send_sig(SIGXFSZ, current, 0);
122 too_big:
123 return -EFBIG;
124
125 add_error:
126 page_cache_release(pages + loop);
127 for (loop++; loop < npages; loop++)
128 __free_page(pages + loop);
129 return ret;
130}
131
132/*****************************************************************************/
133/*
134 * check that file shrinkage doesn't leave any VMAs dangling in midair
135 */
136static int ramfs_nommu_check_mappings(struct inode *inode,
137 size_t newsize, size_t size)
138{
139 struct vm_area_struct *vma;
140 struct prio_tree_iter iter;
141
142 /* search for VMAs that fall within the dead zone */
143 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
144 newsize >> PAGE_SHIFT,
145 (size + PAGE_SIZE - 1) >> PAGE_SHIFT
146 ) {
147 /* found one - only interested if it's shared out of the page
148 * cache */
149 if (vma->vm_flags & VM_SHARED)
150 return -ETXTBSY; /* not quite true, but near enough */
151 }
152
153 return 0;
154}
155
156/*****************************************************************************/
157/*
158 *
159 */
160static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
161{
162 int ret;
163
164 /* assume a truncate from zero size is going to be for the purposes of
165 * shared mmap */
166 if (size == 0) {
167 if (unlikely(newsize >> 32))
168 return -EFBIG;
169
170 return ramfs_nommu_expand_for_mapping(inode, newsize);
171 }
172
173 /* check that a decrease in size doesn't cut off any shared mappings */
174 if (newsize < size) {
175 ret = ramfs_nommu_check_mappings(inode, newsize, size);
176 if (ret < 0)
177 return ret;
178 }
179
180 ret = vmtruncate(inode, size);
181
182 return ret;
183}
184
185/*****************************************************************************/
186/*
187 * handle a change of attributes
188 * - we're specifically interested in a change of size
189 */
190static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
191{
192 struct inode *inode = dentry->d_inode;
193 unsigned int old_ia_valid = ia->ia_valid;
194 int ret = 0;
195
196 /* by providing our own setattr() method, we skip this quotaism */
197 if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
198 (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
199 ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
200
201 /* pick out size-changing events */
202 if (ia->ia_valid & ATTR_SIZE) {
203 loff_t size = i_size_read(inode);
204 if (ia->ia_size != size) {
205 ret = ramfs_nommu_resize(inode, ia->ia_size, size);
206 if (ret < 0 || ia->ia_valid == ATTR_SIZE)
207 goto out;
208 } else {
209 /* we skipped the truncate but must still update
210 * timestamps
211 */
212 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
213 }
214 }
215
216 ret = inode_setattr(inode, ia);
217 out:
218 ia->ia_valid = old_ia_valid;
219 return ret;
220}
221
222/*****************************************************************************/
223/*
224 * try to determine where a shared mapping can be made
225 * - we require that:
226 * - the pages to be mapped must exist
227 * - the pages be physically contiguous in sequence
228 */
229unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
230 unsigned long addr, unsigned long len,
231 unsigned long pgoff, unsigned long flags)
232{
233 unsigned long maxpages, lpages, nr, loop, ret;
234 struct inode *inode = file->f_dentry->d_inode;
235 struct page **pages = NULL, **ptr, *page;
236 loff_t isize;
237
238 if (!(flags & MAP_SHARED))
239 return addr;
240
241 /* the mapping mustn't extend beyond the EOF */
242 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
243 isize = i_size_read(inode);
244
245 ret = -EINVAL;
246 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
247 if (pgoff >= maxpages)
248 goto out;
249
250 if (maxpages - pgoff < lpages)
251 goto out;
252
253 /* gang-find the pages */
254 ret = -ENOMEM;
255 pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
256 if (!pages)
257 goto out;
258
259 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
260 if (nr != lpages)
261 goto out; /* leave if some pages were missing */
262
263 /* check the pages for physical adjacency */
264 ptr = pages;
265 page = *ptr++;
266 page++;
267 for (loop = lpages; loop > 1; loop--)
268 if (*ptr++ != page++)
269 goto out;
270
271 /* okay - all conditions fulfilled */
272 ret = (unsigned long) page_address(pages[0]);
273
274 out:
275 if (pages) {
276 ptr = pages;
277 for (loop = lpages; loop > 0; loop--)
278 put_page(*ptr++);
279 kfree(pages);
280 }
281
282 return ret;
283}
284
285/*****************************************************************************/
286/*
287 * set up a mapping
288 */
289int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
290{
291 return 0;
292}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 0a88917605ae..c66bd5e4c05c 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -34,13 +34,12 @@
34#include <linux/ramfs.h> 34#include <linux/ramfs.h>
35 35
36#include <asm/uaccess.h> 36#include <asm/uaccess.h>
37#include "internal.h"
37 38
38/* some random number */ 39/* some random number */
39#define RAMFS_MAGIC 0x858458f6 40#define RAMFS_MAGIC 0x858458f6
40 41
41static struct super_operations ramfs_ops; 42static struct super_operations ramfs_ops;
42static struct address_space_operations ramfs_aops;
43static struct inode_operations ramfs_file_inode_operations;
44static struct inode_operations ramfs_dir_inode_operations; 43static struct inode_operations ramfs_dir_inode_operations;
45 44
46static struct backing_dev_info ramfs_backing_dev_info = { 45static struct backing_dev_info ramfs_backing_dev_info = {
@@ -142,25 +141,6 @@ static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char *
142 return error; 141 return error;
143} 142}
144 143
145static struct address_space_operations ramfs_aops = {
146 .readpage = simple_readpage,
147 .prepare_write = simple_prepare_write,
148 .commit_write = simple_commit_write
149};
150
151struct file_operations ramfs_file_operations = {
152 .read = generic_file_read,
153 .write = generic_file_write,
154 .mmap = generic_file_mmap,
155 .fsync = simple_sync_file,
156 .sendfile = generic_file_sendfile,
157 .llseek = generic_file_llseek,
158};
159
160static struct inode_operations ramfs_file_inode_operations = {
161 .getattr = simple_getattr,
162};
163
164static struct inode_operations ramfs_dir_inode_operations = { 144static struct inode_operations ramfs_dir_inode_operations = {
165 .create = ramfs_create, 145 .create = ramfs_create,
166 .lookup = simple_lookup, 146 .lookup = simple_lookup,
diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h
new file mode 100644
index 000000000000..272c8a7120b0
--- /dev/null
+++ b/fs/ramfs/internal.h
@@ -0,0 +1,15 @@
1/* internal.h: ramfs internal definitions
2 *
3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12
13extern struct address_space_operations ramfs_aops;
14extern struct file_operations ramfs_file_operations;
15extern struct inode_operations ramfs_file_inode_operations;
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 6183eab006d4..cb03bbe92cdf 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -216,4 +216,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
216#define smp_mb__before_atomic_inc() smp_mb() 216#define smp_mb__before_atomic_inc() smp_mb()
217#define smp_mb__after_atomic_inc() smp_mb() 217#define smp_mb__after_atomic_inc() smp_mb()
218 218
219#include <asm-generic/atomic.h>
219#endif /* _ALPHA_ATOMIC_H */ 220#endif /* _ALPHA_ATOMIC_H */
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index 680f7ecbb28f..9dc7256cf979 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -16,7 +16,7 @@
16#define dma_free_coherent(dev, size, va, addr) \ 16#define dma_free_coherent(dev, size, va, addr) \
17 pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr) 17 pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr)
18#define dma_map_page(dev, page, off, size, dir) \ 18#define dma_map_page(dev, page, off, size, dir) \
19 pci_map_single(alpha_gendev_to_pci(dev), page, off, size, dir) 19 pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir)
20#define dma_unmap_page(dev, addr, size, dir) \ 20#define dma_unmap_page(dev, addr, size, dir) \
21 pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir) 21 pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir)
22#define dma_map_sg(dev, sg, nents, dir) \ 22#define dma_map_sg(dev, sg, nents, dir) \
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index c0593f9b21e1..7bb6a36c96a1 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -13,6 +13,8 @@ typedef struct {
13 13
14#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 14#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
15 15
16void ack_bad_irq(unsigned int irq);
17
16#define HARDIRQ_BITS 12 18#define HARDIRQ_BITS 12
17 19
18/* 20/*
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h
index eb9c279045ef..f6439532a262 100644
--- a/include/asm-alpha/mman.h
+++ b/include/asm-alpha/mman.h
@@ -42,6 +42,7 @@
42#define MADV_WILLNEED 3 /* will need these pages */ 42#define MADV_WILLNEED 3 /* will need these pages */
43#define MADV_SPACEAVAIL 5 /* ensure resources are available */ 43#define MADV_SPACEAVAIL 5 /* ensure resources are available */
44#define MADV_DONTNEED 6 /* don't need these pages */ 44#define MADV_DONTNEED 6 /* don't need these pages */
45#define MADV_REMOVE 7 /* remove these pages & resources */
45 46
46/* compatibility flags */ 47/* compatibility flags */
47#define MAP_ANON MAP_ANONYMOUS 48#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index d586f65c8228..f72b63309bc5 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -205,5 +205,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
205#define smp_mb__before_atomic_inc() barrier() 205#define smp_mb__before_atomic_inc() barrier()
206#define smp_mb__after_atomic_inc() barrier() 206#define smp_mb__after_atomic_inc() barrier()
207 207
208#include <asm-generic/atomic.h>
208#endif 209#endif
209#endif 210#endif
diff --git a/include/asm-arm/mman.h b/include/asm-arm/mman.h
index 8e4f69c4fa5f..f0bebca2ac21 100644
--- a/include/asm-arm/mman.h
+++ b/include/asm-arm/mman.h
@@ -35,6 +35,7 @@
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */ 36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */ 37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
38 39
39/* compatibility flags */ 40/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS 41#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index a47cadc59686..3074b0e76343 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -118,5 +118,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
118#define smp_mb__before_atomic_inc() barrier() 118#define smp_mb__before_atomic_inc() barrier()
119#define smp_mb__after_atomic_inc() barrier() 119#define smp_mb__after_atomic_inc() barrier()
120 120
121#include <asm-generic/atomic.h>
121#endif 122#endif
122#endif 123#endif
diff --git a/include/asm-arm26/mman.h b/include/asm-arm26/mman.h
index cc27b8240265..0ed7780541fa 100644
--- a/include/asm-arm26/mman.h
+++ b/include/asm-arm26/mman.h
@@ -35,6 +35,7 @@
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */ 36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */ 37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
38 39
39/* compatibility flags */ 40/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS 41#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 683b05a57d88..2df2c7aa19b7 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -156,4 +156,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
156#define smp_mb__before_atomic_inc() barrier() 156#define smp_mb__before_atomic_inc() barrier()
157#define smp_mb__after_atomic_inc() barrier() 157#define smp_mb__after_atomic_inc() barrier()
158 158
159#include <asm-generic/atomic.h>
159#endif 160#endif
diff --git a/include/asm-cris/mman.h b/include/asm-cris/mman.h
index 8570e72b9502..5a382b8bf3f7 100644
--- a/include/asm-cris/mman.h
+++ b/include/asm-cris/mman.h
@@ -37,6 +37,7 @@
37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
38#define MADV_WILLNEED 0x3 /* pre-fault pages */ 38#define MADV_WILLNEED 0x3 /* pre-fault pages */
39#define MADV_DONTNEED 0x4 /* discard these pages */ 39#define MADV_DONTNEED 0x4 /* discard these pages */
40#define MADV_REMOVE 0x5 /* remove these pages & resources */
40 41
41/* compatibility flags */ 42/* compatibility flags */
42#define MAP_ANON MAP_ANONYMOUS 43#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index f6539ff569c5..3f54fea2b051 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -426,4 +426,5 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
426}) 426})
427#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 427#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
428 428
429#include <asm-generic/atomic.h>
429#endif /* _ASM_ATOMIC_H */ 430#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h
index 9feff4ce1424..fca9d90e32c9 100644
--- a/include/asm-frv/futex.h
+++ b/include/asm-frv/futex.h
@@ -7,47 +7,7 @@
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9 9
10static inline int 10extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51 11
52#endif 12#endif
53#endif 13#endif
diff --git a/include/asm-frv/mman.h b/include/asm-frv/mman.h
index c684720dfbdd..8af4a41c255e 100644
--- a/include/asm-frv/mman.h
+++ b/include/asm-frv/mman.h
@@ -35,6 +35,7 @@
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */ 36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */ 37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
38 39
39/* compatibility flags */ 40/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS 41#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h
index d407bde57eca..67366894780f 100644
--- a/include/asm-frv/signal.h
+++ b/include/asm-frv/signal.h
@@ -151,7 +151,6 @@ typedef struct sigaltstack {
151 size_t ss_size; 151 size_t ss_size;
152} stack_t; 152} stack_t;
153 153
154extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
155#define ptrace_signal_deliver(regs, cookie) do { } while (0) 154#define ptrace_signal_deliver(regs, cookie) do { } while (0)
156 155
157#ifdef __KERNEL__ 156#ifdef __KERNEL__
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
new file mode 100644
index 000000000000..e0a28b925ef0
--- /dev/null
+++ b/include/asm-generic/atomic.h
@@ -0,0 +1,116 @@
1#ifndef _ASM_GENERIC_ATOMIC_H
2#define _ASM_GENERIC_ATOMIC_H
3/*
4 * Copyright (C) 2005 Silicon Graphics, Inc.
5 * Christoph Lameter <clameter@sgi.com>
6 *
7 * Allows to provide arch independent atomic definitions without the need to
8 * edit all arch specific atomic.h files.
9 */
10
11
12/*
13 * Suppport for atomic_long_t
14 *
15 * Casts for parameters are avoided for existing atomic functions in order to
16 * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
17 * macros of a platform may have.
18 */
19
20#if BITS_PER_LONG == 64
21
22typedef atomic64_t atomic_long_t;
23
24#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
25
26static inline long atomic_long_read(atomic_long_t *l)
27{
28 atomic64_t *v = (atomic64_t *)l;
29
30 return (long)atomic64_read(v);
31}
32
33static inline void atomic_long_set(atomic_long_t *l, long i)
34{
35 atomic64_t *v = (atomic64_t *)l;
36
37 atomic_set(v, i);
38}
39
40static inline void atomic_long_inc(atomic_long_t *l)
41{
42 atomic64_t *v = (atomic64_t *)l;
43
44 atomic64_inc(v);
45}
46
47static inline void atomic_long_dec(atomic_long_t *l)
48{
49 atomic64_t *v = (atomic64_t *)l;
50
51 atomic64_dec(v);
52}
53
54static inline void atomic_long_add(long i, atomic_long_t *l)
55{
56 atomic64_t *v = (atomic64_t *)l;
57
58 atomic64_add(i, v);
59}
60
61static inline void atomic_long_sub(long i, atomic_long_t *l)
62{
63 atomic64_t *v = (atomic64_t *)l;
64
65 atomic64_sub(i, v);
66}
67
68#else
69
70typedef atomic_t atomic_long_t;
71
72#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
73static inline long atomic_long_read(atomic_long_t *l)
74{
75 atomic_t *v = (atomic_t *)l;
76
77 return (long)atomic_read(v);
78}
79
80static inline void atomic_long_set(atomic_long_t *l, long i)
81{
82 atomic_t *v = (atomic_t *)l;
83
84 atomic_set(v, i);
85}
86
87static inline void atomic_long_inc(atomic_long_t *l)
88{
89 atomic_t *v = (atomic_t *)l;
90
91 atomic_inc(v);
92}
93
94static inline void atomic_long_dec(atomic_long_t *l)
95{
96 atomic_t *v = (atomic_t *)l;
97
98 atomic_dec(v);
99}
100
101static inline void atomic_long_add(long i, atomic_long_t *l)
102{
103 atomic_t *v = (atomic_t *)l;
104
105 atomic_add(i, v);
106}
107
108static inline void atomic_long_sub(long i, atomic_long_t *l)
109{
110 atomic_t *v = (atomic_t *)l;
111
112 atomic_sub(i, v);
113}
114
115#endif
116#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 094d4917c1a9..35de20cf8fac 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -10,6 +10,8 @@
10#define ALIGN_FUNCTION() . = ALIGN(8) 10#define ALIGN_FUNCTION() . = ALIGN(8)
11 11
12#define RODATA \ 12#define RODATA \
13 . = ALIGN(4096); \
14 __start_rodata = .; \
13 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 15 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
14 *(.rodata) *(.rodata.*) \ 16 *(.rodata) *(.rodata.*) \
15 *(__vermagic) /* Kernel version magic */ \ 17 *(__vermagic) /* Kernel version magic */ \
@@ -74,6 +76,8 @@
74 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 76 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
75 *(__ksymtab_strings) \ 77 *(__ksymtab_strings) \
76 } \ 78 } \
79 __end_rodata = .; \
80 . = ALIGN(4096); \
77 \ 81 \
78 /* Built-in module parameters. */ \ 82 /* Built-in module parameters. */ \
79 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 83 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index f23d86819ea8..d891541e89c3 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -137,4 +137,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
137#define smp_mb__before_atomic_inc() barrier() 137#define smp_mb__before_atomic_inc() barrier()
138#define smp_mb__after_atomic_inc() barrier() 138#define smp_mb__after_atomic_inc() barrier()
139 139
140#include <asm-generic/atomic.h>
140#endif /* __ARCH_H8300_ATOMIC __ */ 141#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/include/asm-h8300/irq.h b/include/asm-h8300/irq.h
index 5027181ed067..73065f5bda0e 100644
--- a/include/asm-h8300/irq.h
+++ b/include/asm-h8300/irq.h
@@ -61,11 +61,6 @@ static __inline__ int irq_canonicalize(int irq)
61 61
62extern void enable_irq(unsigned int); 62extern void enable_irq(unsigned int);
63extern void disable_irq(unsigned int); 63extern void disable_irq(unsigned int);
64
65/*
66 * Some drivers want these entry points
67 */
68#define enable_irq_nosync(x) enable_irq(x)
69#define disable_irq_nosync(x) disable_irq(x) 64#define disable_irq_nosync(x) disable_irq(x)
70 65
71struct irqaction; 66struct irqaction;
diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h
index 63f727a59850..744a8fb485c2 100644
--- a/include/asm-h8300/mman.h
+++ b/include/asm-h8300/mman.h
@@ -35,6 +35,7 @@
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */ 36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */ 37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
38 39
39/* compatibility flags */ 40/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS 41#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index c68557aa04b2..7a5472d77091 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -254,4 +254,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
254#define smp_mb__before_atomic_inc() barrier() 254#define smp_mb__before_atomic_inc() barrier()
255#define smp_mb__after_atomic_inc() barrier() 255#define smp_mb__after_atomic_inc() barrier()
256 256
257#include <asm-generic/atomic.h>
257#endif 258#endif
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 4807aa1d2e3d..65679aca4b22 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -332,9 +332,9 @@ static inline unsigned long __ffs(unsigned long word)
332 * Returns the bit-number of the first set bit, not the number of the byte 332 * Returns the bit-number of the first set bit, not the number of the byte
333 * containing a bit. 333 * containing a bit.
334 */ 334 */
335static inline int find_first_bit(const unsigned long *addr, unsigned size) 335static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
336{ 336{
337 int x = 0; 337 unsigned x = 0;
338 338
339 while (x < size) { 339 while (x < size) {
340 unsigned long val = *addr++; 340 unsigned long val = *addr++;
@@ -367,11 +367,6 @@ static inline unsigned long ffz(unsigned long word)
367 return word; 367 return word;
368} 368}
369 369
370/*
371 * fls: find last bit set.
372 */
373
374#define fls(x) generic_fls(x)
375#define fls64(x) generic_fls64(x) 370#define fls64(x) generic_fls64(x)
376 371
377#ifdef __KERNEL__ 372#ifdef __KERNEL__
@@ -415,6 +410,23 @@ static inline int ffs(int x)
415} 410}
416 411
417/** 412/**
413 * fls - find last bit set
414 * @x: the word to search
415 *
416 * This is defined the same way as ffs.
417 */
418static inline int fls(int x)
419{
420 int r;
421
422 __asm__("bsrl %1,%0\n\t"
423 "jnz 1f\n\t"
424 "movl $-1,%0\n"
425 "1:" : "=r" (r) : "rm" (x));
426 return r+1;
427}
428
429/**
418 * hweightN - returns the hamming weight of a N-bit word 430 * hweightN - returns the hamming weight of a N-bit word
419 * @x: the word to weigh 431 * @x: the word to weigh
420 * 432 *
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index ea54540638d2..50233e0345fb 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -8,9 +8,6 @@
8 * <rreilova@ececs.uc.edu> 8 * <rreilova@ececs.uc.edu>
9 * - Channing Corn (tests & fixes), 9 * - Channing Corn (tests & fixes),
10 * - Andrew D. Balsa (code cleanup). 10 * - Andrew D. Balsa (code cleanup).
11 *
12 * Pentium III FXSR, SSE support
13 * Gareth Hughes <gareth@valinux.com>, May 2000
14 */ 11 */
15 12
16/* 13/*
@@ -76,25 +73,7 @@ static void __init check_fpu(void)
76 return; 73 return;
77 } 74 }
78 75
79/* Enable FXSR and company _before_ testing for FP problems. */ 76/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */
80 /*
81 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
82 */
83 if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
84 extern void __buggy_fxsr_alignment(void);
85 __buggy_fxsr_alignment();
86 }
87 if (cpu_has_fxsr) {
88 printk(KERN_INFO "Enabling fast FPU save and restore... ");
89 set_in_cr4(X86_CR4_OSFXSR);
90 printk("done.\n");
91 }
92 if (cpu_has_xmm) {
93 printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... ");
94 set_in_cr4(X86_CR4_OSXMMEXCPT);
95 printk("done.\n");
96 }
97
98 /* Test for the divl bug.. */ 77 /* Test for the divl bug.. */
99 __asm__("fninit\n\t" 78 __asm__("fninit\n\t"
100 "fldl %1\n\t" 79 "fldl %1\n\t"
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h
index 2ea36dea37d9..7199f7b326f1 100644
--- a/include/asm-i386/cacheflush.h
+++ b/include/asm-i386/cacheflush.h
@@ -31,4 +31,8 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot);
31void kernel_map_pages(struct page *page, int numpages, int enable); 31void kernel_map_pages(struct page *page, int numpages, int enable);
32#endif 32#endif
33 33
34#ifdef CONFIG_DEBUG_RODATA
35void mark_rodata_ro(void);
36#endif
37
34#endif /* _I386_CACHEFLUSH_H */ 38#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 29b851a18c6e..494e73bca095 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -15,9 +15,6 @@
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16 16
17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; 17extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
19
20#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
21 18
22DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 19DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
23 20
@@ -29,6 +26,11 @@ struct Xgt_desc_struct {
29 26
30extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS]; 27extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
31 28
29static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
30{
31 return ((struct desc_struct *)cpu_gdt_descr[cpu].address);
32}
33
32#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) 34#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
33#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) 35#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
34 36
diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h
index ba936d4daedb..18b19a773440 100644
--- a/include/asm-i386/mach-bigsmp/mach_apic.h
+++ b/include/asm-i386/mach-bigsmp/mach_apic.h
@@ -1,17 +1,10 @@
1#ifndef __ASM_MACH_APIC_H 1#ifndef __ASM_MACH_APIC_H
2#define __ASM_MACH_APIC_H 2#define __ASM_MACH_APIC_H
3#include <asm/smp.h> 3
4 4
5#define SEQUENTIAL_APICID 5extern u8 bios_cpu_apicid[];
6#ifdef SEQUENTIAL_APICID 6
7#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\ 7#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
8 ((phys_apic<<2) & (~0xf)) )
9#elif CLUSTERED_APICID
10#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\
11 ((phys_apic) & (~0xf)) )
12#endif
13
14#define NO_BALANCE_IRQ (1)
15#define esr_disable (1) 8#define esr_disable (1)
16 9
17static inline int apic_id_registered(void) 10static inline int apic_id_registered(void)
@@ -19,7 +12,6 @@ static inline int apic_id_registered(void)
19 return (1); 12 return (1);
20} 13}
21 14
22#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
23/* Round robin the irqs amoung the online cpus */ 15/* Round robin the irqs amoung the online cpus */
24static inline cpumask_t target_cpus(void) 16static inline cpumask_t target_cpus(void)
25{ 17{
@@ -32,29 +24,34 @@ static inline cpumask_t target_cpus(void)
32 } while (cpu >= NR_CPUS); 24 } while (cpu >= NR_CPUS);
33 return cpumask_of_cpu(cpu); 25 return cpumask_of_cpu(cpu);
34} 26}
35#define TARGET_CPUS (target_cpus())
36 27
37#define INT_DELIVERY_MODE dest_Fixed 28#undef APIC_DEST_LOGICAL
38#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ 29#define APIC_DEST_LOGICAL 0
30#define TARGET_CPUS (target_cpus())
31#define APIC_DFR_VALUE (APIC_DFR_FLAT)
32#define INT_DELIVERY_MODE (dest_Fixed)
33#define INT_DEST_MODE (0) /* phys delivery to target proc */
34#define NO_BALANCE_IRQ (0)
35#define WAKE_SECONDARY_VIA_INIT
36
39 37
40static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) 38static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
41{ 39{
42 return 0; 40 return (0);
43} 41}
44 42
45/* we don't use the phys_cpu_present_map to indicate apicid presence */ 43static inline unsigned long check_apicid_present(int bit)
46static inline unsigned long check_apicid_present(int bit)
47{ 44{
48 return 1; 45 return (1);
49} 46}
50 47
51#define apicid_cluster(apicid) (apicid & 0xF0) 48static inline unsigned long calculate_ldr(int cpu)
52
53static inline unsigned long calculate_ldr(unsigned long old)
54{ 49{
55 unsigned long id; 50 unsigned long val, id;
56 id = xapic_phys_to_log_apicid(hard_smp_processor_id()); 51 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
57 return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id)); 52 id = xapic_phys_to_log_apicid(cpu);
53 val |= SET_APIC_LOGICAL_ID(id);
54 return val;
58} 55}
59 56
60/* 57/*
@@ -67,37 +64,35 @@ static inline unsigned long calculate_ldr(unsigned long old)
67static inline void init_apic_ldr(void) 64static inline void init_apic_ldr(void)
68{ 65{
69 unsigned long val; 66 unsigned long val;
67 int cpu = smp_processor_id();
70 68
71 apic_write_around(APIC_DFR, APIC_DFR_VALUE); 69 apic_write_around(APIC_DFR, APIC_DFR_VALUE);
72 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 70 val = calculate_ldr(cpu);
73 val = calculate_ldr(val);
74 apic_write_around(APIC_LDR, val); 71 apic_write_around(APIC_LDR, val);
75} 72}
76 73
77static inline void clustered_apic_check(void) 74static inline void clustered_apic_check(void)
78{ 75{
79 printk("Enabling APIC mode: %s. Using %d I/O APICs\n", 76 printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
80 "Cluster", nr_ioapics); 77 "Physflat", nr_ioapics);
81} 78}
82 79
83static inline int multi_timer_check(int apic, int irq) 80static inline int multi_timer_check(int apic, int irq)
84{ 81{
85 return 0; 82 return (0);
86} 83}
87 84
88static inline int apicid_to_node(int logical_apicid) 85static inline int apicid_to_node(int logical_apicid)
89{ 86{
90 return 0; 87 return (0);
91} 88}
92 89
93extern u8 bios_cpu_apicid[];
94
95static inline int cpu_present_to_apicid(int mps_cpu) 90static inline int cpu_present_to_apicid(int mps_cpu)
96{ 91{
97 if (mps_cpu < NR_CPUS) 92 if (mps_cpu < NR_CPUS)
98 return (int)bios_cpu_apicid[mps_cpu]; 93 return (int) bios_cpu_apicid[mps_cpu];
99 else 94
100 return BAD_APICID; 95 return BAD_APICID;
101} 96}
102 97
103static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) 98static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
@@ -109,10 +104,10 @@ extern u8 cpu_2_logical_apicid[];
109/* Mapping from cpu number to logical apicid */ 104/* Mapping from cpu number to logical apicid */
110static inline int cpu_to_logical_apicid(int cpu) 105static inline int cpu_to_logical_apicid(int cpu)
111{ 106{
112 if (cpu >= NR_CPUS) 107 if (cpu >= NR_CPUS)
113 return BAD_APICID; 108 return BAD_APICID;
114 return (int)cpu_2_logical_apicid[cpu]; 109 return cpu_physical_id(cpu);
115 } 110}
116 111
117static inline int mpc_apic_id(struct mpc_config_processor *m, 112static inline int mpc_apic_id(struct mpc_config_processor *m,
118 struct mpc_config_translation *translation_record) 113 struct mpc_config_translation *translation_record)
@@ -128,11 +123,9 @@ static inline int mpc_apic_id(struct mpc_config_processor *m,
128static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) 123static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
129{ 124{
130 /* For clustered we don't have a good way to do this yet - hack */ 125 /* For clustered we don't have a good way to do this yet - hack */
131 return physids_promote(0xFUL); 126 return physids_promote(0xFFL);
132} 127}
133 128
134#define WAKE_SECONDARY_VIA_INIT
135
136static inline void setup_portio_remap(void) 129static inline void setup_portio_remap(void)
137{ 130{
138} 131}
diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h
index 23e58b317c79..a58ab5a75c8c 100644
--- a/include/asm-i386/mach-bigsmp/mach_apicdef.h
+++ b/include/asm-i386/mach-bigsmp/mach_apicdef.h
@@ -1,11 +1,11 @@
1#ifndef __ASM_MACH_APICDEF_H 1#ifndef __ASM_MACH_APICDEF_H
2#define __ASM_MACH_APICDEF_H 2#define __ASM_MACH_APICDEF_H
3 3
4#define APIC_ID_MASK (0x0F<<24) 4#define APIC_ID_MASK (0xFF<<24)
5 5
6static inline unsigned get_apic_id(unsigned long x) 6static inline unsigned get_apic_id(unsigned long x)
7{ 7{
8 return (((x)>>24)&0x0F); 8 return (((x)>>24)&0xFF);
9} 9}
10 10
11#define GET_APIC_ID(x) get_apic_id(x) 11#define GET_APIC_ID(x) get_apic_id(x)
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h
index 196619a83854..ba4941e6f643 100644
--- a/include/asm-i386/mman.h
+++ b/include/asm-i386/mman.h
@@ -35,6 +35,7 @@
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */ 36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */ 37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
38 39
39/* compatibility flags */ 40/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS 41#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 620a90641ea8..74f595d80579 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -76,11 +76,6 @@ static inline int pfn_to_nid(unsigned long pfn)
76 * Following are macros that each numa implmentation must define. 76 * Following are macros that each numa implmentation must define.
77 */ 77 */
78 78
79/*
80 * Given a kernel address, find the home node of the underlying memory.
81 */
82#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
83
84#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 79#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
85#define node_end_pfn(nid) \ 80#define node_end_pfn(nid) \
86({ \ 81({ \
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
index eb7f2b4234aa..424661d25bd3 100644
--- a/include/asm-i386/module.h
+++ b/include/asm-i386/module.h
@@ -52,8 +52,10 @@ struct mod_arch_specific
52#define MODULE_PROC_FAMILY "CYRIXIII " 52#define MODULE_PROC_FAMILY "CYRIXIII "
53#elif defined CONFIG_MVIAC3_2 53#elif defined CONFIG_MVIAC3_2
54#define MODULE_PROC_FAMILY "VIAC3-2 " 54#define MODULE_PROC_FAMILY "VIAC3-2 "
55#elif CONFIG_MGEODEGX1 55#elif defined CONFIG_MGEODEGX1
56#define MODULE_PROC_FAMILY "GEODEGX1 " 56#define MODULE_PROC_FAMILY "GEODEGX1 "
57#elif defined CONFIG_MGEODE_LX
58#define MODULE_PROC_FAMILY "GEODE "
57#else 59#else
58#error unknown processor family 60#error unknown processor family
59#endif 61#endif
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
index a961093dbf88..76feedf85a8a 100644
--- a/include/asm-i386/mpspec_def.h
+++ b/include/asm-i386/mpspec_def.h
@@ -75,7 +75,7 @@ struct mpc_config_bus
75{ 75{
76 unsigned char mpc_type; 76 unsigned char mpc_type;
77 unsigned char mpc_busid; 77 unsigned char mpc_busid;
78 unsigned char mpc_bustype[6] __attribute((packed)); 78 unsigned char mpc_bustype[6];
79}; 79};
80 80
81/* List of Bus Type string values, Intel MP Spec. */ 81/* List of Bus Type string values, Intel MP Spec. */
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index bb5ff5b2c02e..faf995307b9e 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -91,6 +91,20 @@
91#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) 91#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
92#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) 92#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
93 93
94/* The PnP BIOS entries in the GDT */
95#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
96#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
97#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
98#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
99#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
100
101/* The PnP BIOS selectors */
102#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
103#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
104#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
105#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
106#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
107
94/* 108/*
95 * The interrupt descriptor table has room for 256 idt's, 109 * The interrupt descriptor table has room for 256 idt's,
96 * the global descriptor table is dependent on the number 110 * the global descriptor table is dependent on the number
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 772f85da1206..9c0593b7a94e 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -54,23 +54,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
54 ); } while(0) 54 ); } while(0)
55 55
56#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) 56#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
57#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) 57#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
58
59static inline unsigned long _get_base(char * addr)
60{
61 unsigned long __base;
62 __asm__("movb %3,%%dh\n\t"
63 "movb %2,%%dl\n\t"
64 "shll $16,%%edx\n\t"
65 "movw %1,%%dx"
66 :"=&d" (__base)
67 :"m" (*((addr)+2)),
68 "m" (*((addr)+4)),
69 "m" (*((addr)+7)));
70 return __base;
71}
72
73#define get_base(ldt) _get_base( ((char *)&(ldt)) )
74 58
75/* 59/*
76 * Load a segment. Fall back on loading the zero 60 * Load a segment. Fall back on loading the zero
@@ -140,6 +124,19 @@ static inline unsigned long _get_base(char * addr)
140 :"=r" (__dummy)); \ 124 :"=r" (__dummy)); \
141 __dummy; \ 125 __dummy; \
142}) 126})
127
128#define read_cr4_safe() ({ \
129 unsigned int __dummy; \
130 /* This could fault if %cr4 does not exist */ \
131 __asm__("1: movl %%cr4, %0 \n" \
132 "2: \n" \
133 ".section __ex_table,\"a\" \n" \
134 ".long 1b,2b \n" \
135 ".previous \n" \
136 : "=r" (__dummy): "0" (0)); \
137 __dummy; \
138})
139
143#define write_cr4(x) \ 140#define write_cr4(x) \
144 __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); 141 __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
145#define stts() write_cr0(8 | read_cr0()) 142#define stts() write_cr0(8 | read_cr0())
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 0f92e78dfea1..fe38b9a96233 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -256,7 +256,7 @@
256#define __NR_io_submit 248 256#define __NR_io_submit 248
257#define __NR_io_cancel 249 257#define __NR_io_cancel 249
258#define __NR_fadvise64 250 258#define __NR_fadvise64 250
259#define __NR_set_zone_reclaim 251 259/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
260#define __NR_exit_group 252 260#define __NR_exit_group 252
261#define __NR_lookup_dcookie 253 261#define __NR_lookup_dcookie 253
262#define __NR_epoll_create 254 262#define __NR_epoll_create 254
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 2fbebf85c31d..15cf7984c48e 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -192,4 +192,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
192#define smp_mb__before_atomic_inc() barrier() 192#define smp_mb__before_atomic_inc() barrier()
193#define smp_mb__after_atomic_inc() barrier() 193#define smp_mb__after_atomic_inc() barrier()
194 194
195#include <asm-generic/atomic.h>
195#endif /* _ASM_IA64_ATOMIC_H */ 196#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h
index 1c0a73af1461..828beb24a20e 100644
--- a/include/asm-ia64/mman.h
+++ b/include/asm-ia64/mman.h
@@ -43,6 +43,7 @@
43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
44#define MADV_WILLNEED 0x3 /* pre-fault pages */ 44#define MADV_WILLNEED 0x3 /* pre-fault pages */
45#define MADV_DONTNEED 0x4 /* discard these pages */ 45#define MADV_DONTNEED 0x4 /* discard these pages */
46#define MADV_REMOVE 0x5 /* remove these pages & resources */
46 47
47/* compatibility flags */ 48/* compatibility flags */
48#define MAP_ANON MAP_ANONYMOUS 49#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 6d96a67439be..2bf543493cb8 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -265,7 +265,7 @@
265#define __NR_keyctl 1273 265#define __NR_keyctl 1273
266#define __NR_ioprio_set 1274 266#define __NR_ioprio_set 1274
267#define __NR_ioprio_get 1275 267#define __NR_ioprio_get 1275
268#define __NR_set_zone_reclaim 1276 268/* 1276 is available for reuse (was briefly sys_set_zone_reclaim) */
269#define __NR_inotify_init 1277 269#define __NR_inotify_init 1277
270#define __NR_inotify_add_watch 1278 270#define __NR_inotify_add_watch 1278
271#define __NR_inotify_rm_watch 1279 271#define __NR_inotify_rm_watch 1279
diff --git a/include/asm-m32r/assembler.h b/include/asm-m32r/assembler.h
index e1dff9d6baad..b7f4d8aaeb46 100644
--- a/include/asm-m32r/assembler.h
+++ b/include/asm-m32r/assembler.h
@@ -52,7 +52,7 @@
52 or3 \reg, \reg, #low(\x) 52 or3 \reg, \reg, #low(\x)
53 .endm 53 .endm
54 54
55#if !defined(CONFIG_CHIP_M32102) 55#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
56#define STI(reg) STI_M reg 56#define STI(reg) STI_M reg
57 .macro STI_M reg 57 .macro STI_M reg
58 setpsw #0x40 -> nop 58 setpsw #0x40 -> nop
@@ -64,7 +64,7 @@
64 clrpsw #0x40 -> nop 64 clrpsw #0x40 -> nop
65 ; WORKAROUND: "-> nop" is a workaround for the M32700(TS1). 65 ; WORKAROUND: "-> nop" is a workaround for the M32700(TS1).
66 .endm 66 .endm
67#else /* CONFIG_CHIP_M32102 */ 67#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
68#define STI(reg) STI_M reg 68#define STI(reg) STI_M reg
69 .macro STI_M reg 69 .macro STI_M reg
70 mvfc \reg, psw 70 mvfc \reg, psw
@@ -191,12 +191,12 @@
191 and \reg, sp 191 and \reg, sp
192 .endm 192 .endm
193 193
194#if !defined(CONFIG_CHIP_M32102) 194#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
195 .macro SWITCH_TO_KERNEL_STACK 195 .macro SWITCH_TO_KERNEL_STACK
196 ; switch to kernel stack (spi) 196 ; switch to kernel stack (spi)
197 clrpsw #0x80 -> nop 197 clrpsw #0x80 -> nop
198 .endm 198 .endm
199#else /* CONFIG_CHIP_M32102 */ 199#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
200 .macro SWITCH_TO_KERNEL_STACK 200 .macro SWITCH_TO_KERNEL_STACK
201 push r0 ; save r0 for working 201 push r0 ; save r0 for working
202 mvfc r0, psw 202 mvfc r0, psw
@@ -218,7 +218,7 @@
218 .fillinsn 218 .fillinsn
2192: 2192:
220 .endm 220 .endm
221#endif /* CONFIG_CHIP_M32102 */ 221#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
222 222
223#endif /* __ASSEMBLY__ */ 223#endif /* __ASSEMBLY__ */
224 224
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index ef1fb8ea4726..70761278b6cb 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -313,4 +313,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
313#define smp_mb__before_atomic_inc() barrier() 313#define smp_mb__before_atomic_inc() barrier()
314#define smp_mb__after_atomic_inc() barrier() 314#define smp_mb__after_atomic_inc() barrier()
315 315
316#include <asm-generic/atomic.h>
316#endif /* _ASM_M32R_ATOMIC_H */ 317#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/include/asm-m32r/cacheflush.h b/include/asm-m32r/cacheflush.h
index 46fc4c325108..e57427b6e249 100644
--- a/include/asm-m32r/cacheflush.h
+++ b/include/asm-m32r/cacheflush.h
@@ -7,7 +7,7 @@
7extern void _flush_cache_all(void); 7extern void _flush_cache_all(void);
8extern void _flush_cache_copyback_all(void); 8extern void _flush_cache_copyback_all(void);
9 9
10#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) 10#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
11#define flush_cache_all() do { } while (0) 11#define flush_cache_all() do { } while (0)
12#define flush_cache_mm(mm) do { } while (0) 12#define flush_cache_mm(mm) do { } while (0)
13#define flush_cache_range(vma, start, end) do { } while (0) 13#define flush_cache_range(vma, start, end) do { } while (0)
diff --git a/include/asm-m32r/irq.h b/include/asm-m32r/irq.h
index 8ed77968ecb4..ca943954572a 100644
--- a/include/asm-m32r/irq.h
+++ b/include/asm-m32r/irq.h
@@ -65,6 +65,22 @@
65#define NR_IRQS \ 65#define NR_IRQS \
66 (OPSPUT_NUM_CPU_IRQ + OPSPUT_NUM_PLD_IRQ \ 66 (OPSPUT_NUM_CPU_IRQ + OPSPUT_NUM_PLD_IRQ \
67 + OPSPUT_NUM_LCD_PLD_IRQ + OPSPUT_NUM_LAN_PLD_IRQ) 67 + OPSPUT_NUM_LCD_PLD_IRQ + OPSPUT_NUM_LAN_PLD_IRQ)
68
69#elif defined(CONFIG_PLAT_M32104UT)
70/*
71 * IRQ definitions for M32104UT
72 * M32104 Chip: 64 interrupts
73 * ICU of M32104UT-on-board PLD: 32 interrupts cascaded to INT1# chip pin
74 */
75#define M32104UT_NUM_CPU_IRQ (64)
76#define M32104UT_NUM_PLD_IRQ (32)
77#define M32104UT_IRQ_BASE 0
78#define M32104UT_CPU_IRQ_BASE M32104UT_IRQ_BASE
79#define M32104UT_PLD_IRQ_BASE (M32104UT_CPU_IRQ_BASE + M32104UT_NUM_CPU_IRQ)
80
81#define NR_IRQS \
82 (M32104UT_NUM_CPU_IRQ + M32104UT_NUM_PLD_IRQ)
83
68#else 84#else
69#define NR_IRQS 64 85#define NR_IRQS 64
70#endif 86#endif
diff --git a/include/asm-m32r/m32102.h b/include/asm-m32r/m32102.h
index cb98101f4f6e..a1f0d1fe9eb8 100644
--- a/include/asm-m32r/m32102.h
+++ b/include/asm-m32r/m32102.h
@@ -11,7 +11,11 @@
11/*======================================================================* 11/*======================================================================*
12 * Special Function Register 12 * Special Function Register
13 *======================================================================*/ 13 *======================================================================*/
14#if !defined(CONFIG_CHIP_M32104)
14#define M32R_SFR_OFFSET (0x00E00000) /* 0x00E00000-0x00EFFFFF 1[MB] */ 15#define M32R_SFR_OFFSET (0x00E00000) /* 0x00E00000-0x00EFFFFF 1[MB] */
16#else
17#define M32R_SFR_OFFSET (0x00700000) /* 0x00700000-0x007FFFFF 1[MB] */
18#endif
15 19
16/* 20/*
17 * Clock and Power Management registers. 21 * Clock and Power Management registers.
@@ -100,7 +104,7 @@
100#define M32R_MFT5RLD_PORTL (0x0C+M32R_MFT5_OFFSET) /* MFT4 reload */ 104#define M32R_MFT5RLD_PORTL (0x0C+M32R_MFT5_OFFSET) /* MFT4 reload */
101#define M32R_MFT5CMPRLD_PORTL (0x10+M32R_MFT5_OFFSET) /* MFT4 compare reload */ 105#define M32R_MFT5CMPRLD_PORTL (0x10+M32R_MFT5_OFFSET) /* MFT4 compare reload */
102 106
103#ifdef CONFIG_CHIP_M32700 107#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32104)
104#define M32R_MFTCR_MFT0MSK (1UL<<31) /* b0 */ 108#define M32R_MFTCR_MFT0MSK (1UL<<31) /* b0 */
105#define M32R_MFTCR_MFT1MSK (1UL<<30) /* b1 */ 109#define M32R_MFTCR_MFT1MSK (1UL<<30) /* b1 */
106#define M32R_MFTCR_MFT2MSK (1UL<<29) /* b2 */ 110#define M32R_MFTCR_MFT2MSK (1UL<<29) /* b2 */
@@ -113,7 +117,7 @@
113#define M32R_MFTCR_MFT3EN (1UL<<20) /* b11 */ 117#define M32R_MFTCR_MFT3EN (1UL<<20) /* b11 */
114#define M32R_MFTCR_MFT4EN (1UL<<19) /* b12 */ 118#define M32R_MFTCR_MFT4EN (1UL<<19) /* b12 */
115#define M32R_MFTCR_MFT5EN (1UL<<18) /* b13 */ 119#define M32R_MFTCR_MFT5EN (1UL<<18) /* b13 */
116#else /* not CONFIG_CHIP_M32700 */ 120#else /* not CONFIG_CHIP_M32700 && not CONFIG_CHIP_M32104 */
117#define M32R_MFTCR_MFT0MSK (1UL<<15) /* b16 */ 121#define M32R_MFTCR_MFT0MSK (1UL<<15) /* b16 */
118#define M32R_MFTCR_MFT1MSK (1UL<<14) /* b17 */ 122#define M32R_MFTCR_MFT1MSK (1UL<<14) /* b17 */
119#define M32R_MFTCR_MFT2MSK (1UL<<13) /* b18 */ 123#define M32R_MFTCR_MFT2MSK (1UL<<13) /* b18 */
@@ -126,7 +130,7 @@
126#define M32R_MFTCR_MFT3EN (1UL<<4) /* b27 */ 130#define M32R_MFTCR_MFT3EN (1UL<<4) /* b27 */
127#define M32R_MFTCR_MFT4EN (1UL<<3) /* b28 */ 131#define M32R_MFTCR_MFT4EN (1UL<<3) /* b28 */
128#define M32R_MFTCR_MFT5EN (1UL<<2) /* b29 */ 132#define M32R_MFTCR_MFT5EN (1UL<<2) /* b29 */
129#endif /* not CONFIG_CHIP_M32700 */ 133#endif /* not CONFIG_CHIP_M32700 && not CONFIG_CHIP_M32104 */
130 134
131#define M32R_MFTMOD_CC_MASK (1UL<<15) /* b16 */ 135#define M32R_MFTMOD_CC_MASK (1UL<<15) /* b16 */
132#define M32R_MFTMOD_TCCR (1UL<<13) /* b18 */ 136#define M32R_MFTMOD_TCCR (1UL<<13) /* b18 */
@@ -241,8 +245,24 @@
241#define M32R_IRQ_MFT1 (17) /* MFT1 */ 245#define M32R_IRQ_MFT1 (17) /* MFT1 */
242#define M32R_IRQ_MFT2 (18) /* MFT2 */ 246#define M32R_IRQ_MFT2 (18) /* MFT2 */
243#define M32R_IRQ_MFT3 (19) /* MFT3 */ 247#define M32R_IRQ_MFT3 (19) /* MFT3 */
244#define M32R_IRQ_MFT4 (20) /* MFT4 */ 248#ifdef CONFIG_CHIP_M32104
245#define M32R_IRQ_MFT5 (21) /* MFT5 */ 249#define M32R_IRQ_MFTX0 (24) /* MFTX0 */
250#define M32R_IRQ_MFTX1 (25) /* MFTX1 */
251#define M32R_IRQ_DMA0 (32) /* DMA0 */
252#define M32R_IRQ_DMA1 (33) /* DMA1 */
253#define M32R_IRQ_DMA2 (34) /* DMA2 */
254#define M32R_IRQ_DMA3 (35) /* DMA3 */
255#define M32R_IRQ_SIO0_R (40) /* SIO0 send */
256#define M32R_IRQ_SIO0_S (41) /* SIO0 receive */
257#define M32R_IRQ_SIO1_R (42) /* SIO1 send */
258#define M32R_IRQ_SIO1_S (43) /* SIO1 receive */
259#define M32R_IRQ_SIO2_R (44) /* SIO2 send */
260#define M32R_IRQ_SIO2_S (45) /* SIO2 receive */
261#define M32R_IRQ_SIO3_R (46) /* SIO3 send */
262#define M32R_IRQ_SIO3_S (47) /* SIO3 receive */
263#define M32R_IRQ_ADC (56) /* ADC */
264#define M32R_IRQ_PC (57) /* PC */
265#else /* ! M32104 */
246#define M32R_IRQ_DMA0 (32) /* DMA0 */ 266#define M32R_IRQ_DMA0 (32) /* DMA0 */
247#define M32R_IRQ_DMA1 (33) /* DMA1 */ 267#define M32R_IRQ_DMA1 (33) /* DMA1 */
248#define M32R_IRQ_SIO0_R (48) /* SIO0 send */ 268#define M32R_IRQ_SIO0_R (48) /* SIO0 send */
@@ -255,6 +275,7 @@
255#define M32R_IRQ_SIO3_S (55) /* SIO3 receive */ 275#define M32R_IRQ_SIO3_S (55) /* SIO3 receive */
256#define M32R_IRQ_SIO4_R (56) /* SIO4 send */ 276#define M32R_IRQ_SIO4_R (56) /* SIO4 send */
257#define M32R_IRQ_SIO4_S (57) /* SIO4 receive */ 277#define M32R_IRQ_SIO4_S (57) /* SIO4 receive */
278#endif /* ! M32104 */
258 279
259#ifdef CONFIG_SMP 280#ifdef CONFIG_SMP
260#define M32R_IRQ_IPI0 (56) 281#define M32R_IRQ_IPI0 (56)
@@ -281,15 +302,12 @@
281#define M32R_FPGA_VERSION0_PORTL (0x30+M32R_FPGA_TOP) 302#define M32R_FPGA_VERSION0_PORTL (0x30+M32R_FPGA_TOP)
282#define M32R_FPGA_VERSION1_PORTL (0x34+M32R_FPGA_TOP) 303#define M32R_FPGA_VERSION1_PORTL (0x34+M32R_FPGA_TOP)
283 304
305#endif /* CONFIG_SMP */
306
284#ifndef __ASSEMBLY__ 307#ifndef __ASSEMBLY__
285/* For NETDEV WATCHDOG */
286typedef struct { 308typedef struct {
287 unsigned long icucr; /* ICU Control Register */ 309 unsigned long icucr; /* ICU Control Register */
288} icu_data_t; 310} icu_data_t;
289
290extern icu_data_t icu_data[];
291#endif 311#endif
292 312
293#endif /* CONFIG_SMP */
294
295#endif /* _M32102_H_ */ 313#endif /* _M32102_H_ */
diff --git a/include/asm-m32r/m32104ut/m32104ut_pld.h b/include/asm-m32r/m32104ut/m32104ut_pld.h
new file mode 100644
index 000000000000..a4eac20553df
--- /dev/null
+++ b/include/asm-m32r/m32104ut/m32104ut_pld.h
@@ -0,0 +1,163 @@
1/*
2 * include/asm/m32104ut/m32104ut_pld.h
3 *
4 * Definitions for Programable Logic Device(PLD) on M32104UT board.
5 * Based on m32700ut_pld.h
6 *
7 * Copyright (c) 2002 Takeo Takahashi
8 * Copyright (c) 2005 Naoto Sugai
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file "COPYING" in the main directory of
12 * this archive for more details.
13 */
14
15#ifndef _M32104UT_M32104UT_PLD_H
16#define _M32104UT_M32104UT_PLD_H
17
18#include <linux/config.h>
19
20#if defined(CONFIG_PLAT_M32104UT)
21#define PLD_PLAT_BASE 0x02c00000
22#else
23#error "no platform configuration"
24#endif
25
26#ifndef __ASSEMBLY__
27/*
28 * C functions use non-cache address.
29 */
30#define PLD_BASE (PLD_PLAT_BASE /* + NONCACHE_OFFSET */)
31#define __reg8 (volatile unsigned char *)
32#define __reg16 (volatile unsigned short *)
33#define __reg32 (volatile unsigned int *)
34#else
35#define PLD_BASE (PLD_PLAT_BASE + NONCACHE_OFFSET)
36#define __reg8
37#define __reg16
38#define __reg32
39#endif /* __ASSEMBLY__ */
40
41/* CFC */
42#define PLD_CFRSTCR __reg16(PLD_BASE + 0x0000)
43#define PLD_CFSTS __reg16(PLD_BASE + 0x0002)
44#define PLD_CFIMASK __reg16(PLD_BASE + 0x0004)
45#define PLD_CFBUFCR __reg16(PLD_BASE + 0x0006)
46
47/* MMC */
48#define PLD_MMCCR __reg16(PLD_BASE + 0x4000)
49#define PLD_MMCMOD __reg16(PLD_BASE + 0x4002)
50#define PLD_MMCSTS __reg16(PLD_BASE + 0x4006)
51#define PLD_MMCBAUR __reg16(PLD_BASE + 0x400a)
52#define PLD_MMCCMDBCUT __reg16(PLD_BASE + 0x400c)
53#define PLD_MMCCDTBCUT __reg16(PLD_BASE + 0x400e)
54#define PLD_MMCDET __reg16(PLD_BASE + 0x4010)
55#define PLD_MMCWP __reg16(PLD_BASE + 0x4012)
56#define PLD_MMCWDATA __reg16(PLD_BASE + 0x5000)
57#define PLD_MMCRDATA __reg16(PLD_BASE + 0x6000)
58#define PLD_MMCCMDDATA __reg16(PLD_BASE + 0x7000)
59#define PLD_MMCRSPDATA __reg16(PLD_BASE + 0x7006)
60
61/* ICU
62 * ICUISTS: status register
63 * ICUIREQ0: request register
64 * ICUIREQ1: request register
65 * ICUCR3: control register for CFIREQ# interrupt
66 * ICUCR4: control register for CFC Card insert interrupt
67 * ICUCR5: control register for CFC Card eject interrupt
68 * ICUCR6: control register for external interrupt
69 * ICUCR11: control register for MMC Card insert/eject interrupt
70 * ICUCR13: control register for SC error interrupt
71 * ICUCR14: control register for SC receive interrupt
72 * ICUCR15: control register for SC send interrupt
73 */
74
75#define PLD_IRQ_INT0 (M32104UT_PLD_IRQ_BASE + 0) /* None */
76#define PLD_IRQ_CFIREQ (M32104UT_PLD_IRQ_BASE + 3) /* CF IREQ */
77#define PLD_IRQ_CFC_INSERT (M32104UT_PLD_IRQ_BASE + 4) /* CF Insert */
78#define PLD_IRQ_CFC_EJECT (M32104UT_PLD_IRQ_BASE + 5) /* CF Eject */
79#define PLD_IRQ_EXINT (M32104UT_PLD_IRQ_BASE + 6) /* EXINT */
80#define PLD_IRQ_MMCCARD (M32104UT_PLD_IRQ_BASE + 11) /* MMC Insert/Eject */
81#define PLD_IRQ_SC_ERROR (M32104UT_PLD_IRQ_BASE + 13) /* SC error */
82#define PLD_IRQ_SC_RCV (M32104UT_PLD_IRQ_BASE + 14) /* SC receive */
83#define PLD_IRQ_SC_SND (M32104UT_PLD_IRQ_BASE + 15) /* SC send */
84
85#define PLD_ICUISTS __reg16(PLD_BASE + 0x8002)
86#define PLD_ICUISTS_VECB_MASK (0xf000)
87#define PLD_ICUISTS_VECB(x) ((x) & PLD_ICUISTS_VECB_MASK)
88#define PLD_ICUISTS_ISN_MASK (0x07c0)
89#define PLD_ICUISTS_ISN(x) ((x) & PLD_ICUISTS_ISN_MASK)
90#define PLD_ICUCR3 __reg16(PLD_BASE + 0x8104)
91#define PLD_ICUCR4 __reg16(PLD_BASE + 0x8106)
92#define PLD_ICUCR5 __reg16(PLD_BASE + 0x8108)
93#define PLD_ICUCR6 __reg16(PLD_BASE + 0x810a)
94#define PLD_ICUCR11 __reg16(PLD_BASE + 0x8114)
95#define PLD_ICUCR13 __reg16(PLD_BASE + 0x8118)
96#define PLD_ICUCR14 __reg16(PLD_BASE + 0x811a)
97#define PLD_ICUCR15 __reg16(PLD_BASE + 0x811c)
98#define PLD_ICUCR_IEN (0x1000)
99#define PLD_ICUCR_IREQ (0x0100)
100#define PLD_ICUCR_ISMOD00 (0x0000) /* Low edge */
101#define PLD_ICUCR_ISMOD01 (0x0010) /* Low level */
102#define PLD_ICUCR_ISMOD02 (0x0020) /* High edge */
103#define PLD_ICUCR_ISMOD03 (0x0030) /* High level */
104#define PLD_ICUCR_ILEVEL0 (0x0000)
105#define PLD_ICUCR_ILEVEL1 (0x0001)
106#define PLD_ICUCR_ILEVEL2 (0x0002)
107#define PLD_ICUCR_ILEVEL3 (0x0003)
108#define PLD_ICUCR_ILEVEL4 (0x0004)
109#define PLD_ICUCR_ILEVEL5 (0x0005)
110#define PLD_ICUCR_ILEVEL6 (0x0006)
111#define PLD_ICUCR_ILEVEL7 (0x0007)
112
113/* Power Control of MMC and CF */
114#define PLD_CPCR __reg16(PLD_BASE + 0x14000)
115#define PLD_CPCR_CDP 0x0001
116
117/* LED Control
118 *
119 * 1: DIP swich side
120 * 2: Reset switch side
121 */
122#define PLD_IOLEDCR __reg16(PLD_BASE + 0x14002)
123#define PLD_IOLED_1_ON 0x001
124#define PLD_IOLED_1_OFF 0x000
125#define PLD_IOLED_2_ON 0x002
126#define PLD_IOLED_2_OFF 0x000
127
128/* DIP Switch
129 * 0: Write-protect of Flash Memory (0:protected, 1:non-protected)
130 * 1: -
131 * 2: -
132 * 3: -
133 */
134#define PLD_IOSWSTS __reg16(PLD_BASE + 0x14004)
135#define PLD_IOSWSTS_IOSW2 0x0200
136#define PLD_IOSWSTS_IOSW1 0x0100
137#define PLD_IOSWSTS_IOWP0 0x0001
138
139/* CRC */
140#define PLD_CRC7DATA __reg16(PLD_BASE + 0x18000)
141#define PLD_CRC7INDATA __reg16(PLD_BASE + 0x18002)
142#define PLD_CRC16DATA __reg16(PLD_BASE + 0x18004)
143#define PLD_CRC16INDATA __reg16(PLD_BASE + 0x18006)
144#define PLD_CRC16ADATA __reg16(PLD_BASE + 0x18008)
145#define PLD_CRC16AINDATA __reg16(PLD_BASE + 0x1800a)
146
147/* RTC */
148#define PLD_RTCCR __reg16(PLD_BASE + 0x1c000)
149#define PLD_RTCBAUR __reg16(PLD_BASE + 0x1c002)
150#define PLD_RTCWRDATA __reg16(PLD_BASE + 0x1c004)
151#define PLD_RTCRDDATA __reg16(PLD_BASE + 0x1c006)
152#define PLD_RTCRSTODT __reg16(PLD_BASE + 0x1c008)
153
154/* SIM Card */
155#define PLD_SCCR __reg16(PLD_BASE + 0x38000)
156#define PLD_SCMOD __reg16(PLD_BASE + 0x38004)
157#define PLD_SCSTS __reg16(PLD_BASE + 0x38006)
158#define PLD_SCINTCR __reg16(PLD_BASE + 0x38008)
159#define PLD_SCBAUR __reg16(PLD_BASE + 0x3800a)
160#define PLD_SCTXB __reg16(PLD_BASE + 0x3800c)
161#define PLD_SCRXB __reg16(PLD_BASE + 0x3800e)
162
163#endif /* _M32104UT_M32104UT_PLD_H */
diff --git a/include/asm-m32r/m32r.h b/include/asm-m32r/m32r.h
index ec142be00862..b133ca61acf1 100644
--- a/include/asm-m32r/m32r.h
+++ b/include/asm-m32r/m32r.h
@@ -14,7 +14,7 @@
14#include <asm/m32r_mp_fpga.h> 14#include <asm/m32r_mp_fpga.h>
15#elif defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \ 15#elif defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
16 || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \ 16 || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
17 || defined(CONFIG_CHIP_OPSP) 17 || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
18#include <asm/m32102.h> 18#include <asm/m32102.h>
19#endif 19#endif
20 20
@@ -43,6 +43,10 @@
43#include <asm/m32700ut/m32700ut_pld.h> 43#include <asm/m32700ut/m32700ut_pld.h>
44#endif 44#endif
45 45
46#if defined(CONFIG_PLAT_M32104UT)
47#include <asm/m32104ut/m32104ut_pld.h>
48#endif /* CONFIG_PLAT_M32104 */
49
46/* 50/*
47 * M32R Register 51 * M32R Register
48 */ 52 */
@@ -122,7 +126,7 @@
122 126
123#include <asm/page.h> 127#include <asm/page.h>
124#ifdef CONFIG_MMU 128#ifdef CONFIG_MMU
125#define NONCACHE_OFFSET __PAGE_OFFSET+0x20000000 129#define NONCACHE_OFFSET (__PAGE_OFFSET + 0x20000000)
126#else 130#else
127#define NONCACHE_OFFSET __PAGE_OFFSET 131#define NONCACHE_OFFSET __PAGE_OFFSET
128#endif /* CONFIG_MMU */ 132#endif /* CONFIG_MMU */
diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h
index 011f6d9ec5cc..12e29747bc84 100644
--- a/include/asm-m32r/mman.h
+++ b/include/asm-m32r/mman.h
@@ -37,6 +37,7 @@
37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 37#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
38#define MADV_WILLNEED 0x3 /* pre-fault pages */ 38#define MADV_WILLNEED 0x3 /* pre-fault pages */
39#define MADV_DONTNEED 0x4 /* discard these pages */ 39#define MADV_DONTNEED 0x4 /* discard these pages */
40#define MADV_REMOVE 0x5 /* remove these pages & resources */
40 41
41/* compatibility flags */ 42/* compatibility flags */
42#define MAP_ANON MAP_ANONYMOUS 43#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 5eee832b73a0..dcf619a0a0b0 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -69,12 +69,12 @@
69} while(0) 69} while(0)
70 70
71/* Interrupt Control */ 71/* Interrupt Control */
72#if !defined(CONFIG_CHIP_M32102) 72#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
73#define local_irq_enable() \ 73#define local_irq_enable() \
74 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") 74 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
75#define local_irq_disable() \ 75#define local_irq_disable() \
76 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") 76 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
77#else /* CONFIG_CHIP_M32102 */ 77#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
78static inline void local_irq_enable(void) 78static inline void local_irq_enable(void)
79{ 79{
80 unsigned long tmpreg; 80 unsigned long tmpreg;
@@ -96,7 +96,7 @@ static inline void local_irq_disable(void)
96 "mvtc %0, psw \n\t" 96 "mvtc %0, psw \n\t"
97 : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); 97 : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
98} 98}
99#endif /* CONFIG_CHIP_M32102 */ 99#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
100 100
101#define local_save_flags(x) \ 101#define local_save_flags(x) \
102 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) 102 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
@@ -105,13 +105,13 @@ static inline void local_irq_disable(void)
105 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ 105 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
106 : "r" (x) : "cbit", "memory") 106 : "r" (x) : "cbit", "memory")
107 107
108#if !defined(CONFIG_CHIP_M32102) 108#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
109#define local_irq_save(x) \ 109#define local_irq_save(x) \
110 __asm__ __volatile__( \ 110 __asm__ __volatile__( \
111 "mvfc %0, psw; \n\t" \ 111 "mvfc %0, psw; \n\t" \
112 "clrpsw #0x40 -> nop; \n\t" \ 112 "clrpsw #0x40 -> nop; \n\t" \
113 : "=r" (x) : /* no input */ : "memory") 113 : "=r" (x) : /* no input */ : "memory")
114#else /* CONFIG_CHIP_M32102 */ 114#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
115#define local_irq_save(x) \ 115#define local_irq_save(x) \
116 ({ \ 116 ({ \
117 unsigned long tmpreg; \ 117 unsigned long tmpreg; \
@@ -124,7 +124,7 @@ static inline void local_irq_disable(void)
124 : "=r" (x), "=&r" (tmpreg) \ 124 : "=r" (x), "=&r" (tmpreg) \
125 : : "cbit", "memory"); \ 125 : : "cbit", "memory"); \
126 }) 126 })
127#endif /* CONFIG_CHIP_M32102 */ 127#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
128 128
129#define irqs_disabled() \ 129#define irqs_disabled() \
130 ({ \ 130 ({ \
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index ac399e1f7bc0..39be87ca2a5a 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -319,7 +319,7 @@ type name(void) \
319register long __scno __asm__ ("r7") = __NR_##name; \ 319register long __scno __asm__ ("r7") = __NR_##name; \
320register long __res __asm__("r0"); \ 320register long __res __asm__("r0"); \
321__asm__ __volatile__ (\ 321__asm__ __volatile__ (\
322 "trap #" SYSCALL_VECTOR \ 322 "trap #" SYSCALL_VECTOR "|| nop"\
323 : "=r" (__res) \ 323 : "=r" (__res) \
324 : "r" (__scno) \ 324 : "r" (__scno) \
325 : "memory"); \ 325 : "memory"); \
@@ -332,7 +332,7 @@ type name(type1 arg1) \
332register long __scno __asm__ ("r7") = __NR_##name; \ 332register long __scno __asm__ ("r7") = __NR_##name; \
333register long __res __asm__ ("r0") = (long)(arg1); \ 333register long __res __asm__ ("r0") = (long)(arg1); \
334__asm__ __volatile__ (\ 334__asm__ __volatile__ (\
335 "trap #" SYSCALL_VECTOR \ 335 "trap #" SYSCALL_VECTOR "|| nop"\
336 : "=r" (__res) \ 336 : "=r" (__res) \
337 : "r" (__scno), "0" (__res) \ 337 : "r" (__scno), "0" (__res) \
338 : "memory"); \ 338 : "memory"); \
@@ -346,7 +346,7 @@ register long __scno __asm__ ("r7") = __NR_##name; \
346register long __arg2 __asm__ ("r1") = (long)(arg2); \ 346register long __arg2 __asm__ ("r1") = (long)(arg2); \
347register long __res __asm__ ("r0") = (long)(arg1); \ 347register long __res __asm__ ("r0") = (long)(arg1); \
348__asm__ __volatile__ (\ 348__asm__ __volatile__ (\
349 "trap #" SYSCALL_VECTOR \ 349 "trap #" SYSCALL_VECTOR "|| nop"\
350 : "=r" (__res) \ 350 : "=r" (__res) \
351 : "r" (__scno), "0" (__res), "r" (__arg2) \ 351 : "r" (__scno), "0" (__res), "r" (__arg2) \
352 : "memory"); \ 352 : "memory"); \
@@ -361,7 +361,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \
361register long __arg2 __asm__ ("r1") = (long)(arg2); \ 361register long __arg2 __asm__ ("r1") = (long)(arg2); \
362register long __res __asm__ ("r0") = (long)(arg1); \ 362register long __res __asm__ ("r0") = (long)(arg1); \
363__asm__ __volatile__ (\ 363__asm__ __volatile__ (\
364 "trap #" SYSCALL_VECTOR \ 364 "trap #" SYSCALL_VECTOR "|| nop"\
365 : "=r" (__res) \ 365 : "=r" (__res) \
366 : "r" (__scno), "0" (__res), "r" (__arg2), \ 366 : "r" (__scno), "0" (__res), "r" (__arg2), \
367 "r" (__arg3) \ 367 "r" (__arg3) \
@@ -378,7 +378,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \
378register long __arg2 __asm__ ("r1") = (long)(arg2); \ 378register long __arg2 __asm__ ("r1") = (long)(arg2); \
379register long __res __asm__ ("r0") = (long)(arg1); \ 379register long __res __asm__ ("r0") = (long)(arg1); \
380__asm__ __volatile__ (\ 380__asm__ __volatile__ (\
381 "trap #" SYSCALL_VECTOR \ 381 "trap #" SYSCALL_VECTOR "|| nop"\
382 : "=r" (__res) \ 382 : "=r" (__res) \
383 : "r" (__scno), "0" (__res), "r" (__arg2), \ 383 : "r" (__scno), "0" (__res), "r" (__arg2), \
384 "r" (__arg3), "r" (__arg4) \ 384 "r" (__arg3), "r" (__arg4) \
@@ -397,7 +397,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \
397register long __arg2 __asm__ ("r1") = (long)(arg2); \ 397register long __arg2 __asm__ ("r1") = (long)(arg2); \
398register long __res __asm__ ("r0") = (long)(arg1); \ 398register long __res __asm__ ("r0") = (long)(arg1); \
399__asm__ __volatile__ (\ 399__asm__ __volatile__ (\
400 "trap #" SYSCALL_VECTOR \ 400 "trap #" SYSCALL_VECTOR "|| nop"\
401 : "=r" (__res) \ 401 : "=r" (__res) \
402 : "r" (__scno), "0" (__res), "r" (__arg2), \ 402 : "r" (__scno), "0" (__res), "r" (__arg2), \
403 "r" (__arg3), "r" (__arg4), "r" (__arg5) \ 403 "r" (__arg3), "r" (__arg4), "r" (__arg5) \
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index e3c962eeabf3..b8a4e75d679d 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -157,4 +157,5 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
157#define smp_mb__before_atomic_inc() barrier() 157#define smp_mb__before_atomic_inc() barrier()
158#define smp_mb__after_atomic_inc() barrier() 158#define smp_mb__after_atomic_inc() barrier()
159 159
160#include <asm-generic/atomic.h>
160#endif /* __ARCH_M68K_ATOMIC __ */ 161#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index 1f569905cb74..127ad190cf2d 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -70,8 +70,6 @@ static __inline__ int irq_canonicalize(int irq)
70 70
71extern void (*enable_irq)(unsigned int); 71extern void (*enable_irq)(unsigned int);
72extern void (*disable_irq)(unsigned int); 72extern void (*disable_irq)(unsigned int);
73
74#define disable_irq_nosync disable_irq
75#define enable_irq_nosync enable_irq 73#define enable_irq_nosync enable_irq
76 74
77struct pt_regs; 75struct pt_regs;
diff --git a/include/asm-m68k/mman.h b/include/asm-m68k/mman.h
index f831c4eeae6e..ea262ab88b3b 100644
--- a/include/asm-m68k/mman.h
+++ b/include/asm-m68k/mman.h
@@ -35,6 +35,7 @@
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */ 36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */ 37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
38 39
39/* compatibility flags */ 40/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS 41#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 3c1cc153c415..1702dbe9318c 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -143,4 +143,5 @@ static inline int atomic_sub_return(int i, atomic_t * v)
143#define atomic_dec_return(v) atomic_sub_return(1,(v)) 143#define atomic_dec_return(v) atomic_sub_return(1,(v))
144#define atomic_inc_return(v) atomic_add_return(1,(v)) 144#define atomic_inc_return(v) atomic_add_return(1,(v))
145 145
146#include <asm-generic/atomic.h>
146#endif /* __ARCH_M68KNOMMU_ATOMIC __ */ 147#endif /* __ARCH_M68KNOMMU_ATOMIC __ */
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index a08fa9b958da..20c48ec858a4 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -84,10 +84,8 @@ extern void (*mach_disable_irq)(unsigned int);
84/* 84/*
85 * Some drivers want these entry points 85 * Some drivers want these entry points
86 */ 86 */
87#define enable_irq(x) (mach_enable_irq ? (*mach_enable_irq)(x) : 0) 87#define enable_irq(x) 0
88#define disable_irq(x) (mach_disable_irq ? (*mach_disable_irq)(x) : 0) 88#define disable_irq(x) do { } while (0)
89
90#define enable_irq_nosync(x) enable_irq(x)
91#define disable_irq_nosync(x) disable_irq(x) 89#define disable_irq_nosync(x) disable_irq(x)
92 90
93struct irqaction; 91struct irqaction;
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 55c37c106ef0..92256e43a938 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -713,4 +713,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
713#define smp_mb__before_atomic_inc() smp_mb() 713#define smp_mb__before_atomic_inc() smp_mb()
714#define smp_mb__after_atomic_inc() smp_mb() 714#define smp_mb__after_atomic_inc() smp_mb()
715 715
716#include <asm-generic/atomic.h>
716#endif /* _ASM_ATOMIC_H */ 717#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-mips/mman.h b/include/asm-mips/mman.h
index 62060957ba93..dd17c8bd62a1 100644
--- a/include/asm-mips/mman.h
+++ b/include/asm-mips/mman.h
@@ -65,6 +65,7 @@
65#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 65#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
66#define MADV_WILLNEED 0x3 /* pre-fault pages */ 66#define MADV_WILLNEED 0x3 /* pre-fault pages */
67#define MADV_DONTNEED 0x4 /* discard these pages */ 67#define MADV_DONTNEED 0x4 /* discard these pages */
68#define MADV_REMOVE 0x5 /* remove these pages & resources */
68 69
69/* compatibility flags */ 70/* compatibility flags */
70#define MAP_ANON MAP_ANONYMOUS 71#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-mips/riscos-syscall.h b/include/asm-mips/riscos-syscall.h
deleted file mode 100644
index 4d8eb15461eb..000000000000
--- a/include/asm-mips/riscos-syscall.h
+++ /dev/null
@@ -1,979 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle
7 */
8#ifndef _ASM_RISCOS_SYSCALL_H
9#define _ASM_RISCOS_SYSCALL_H
10
11/*
12 * The syscalls 0 - 3999 are reserved for a down to the root syscall
13 * compatibility with RISC/os and IRIX. We'll see how to deal with the
14 * various "real" BSD variants like Ultrix, NetBSD ...
15 */
16
17/*
18 * SVR4 syscalls are in the range from 1 to 999
19 */
20#define __NR_SVR4 0
21#define __NR_SVR4_syscall (__NR_SVR4 + 0)
22#define __NR_SVR4_exit (__NR_SVR4 + 1)
23#define __NR_SVR4_fork (__NR_SVR4 + 2)
24#define __NR_SVR4_read (__NR_SVR4 + 3)
25#define __NR_SVR4_write (__NR_SVR4 + 4)
26#define __NR_SVR4_open (__NR_SVR4 + 5)
27#define __NR_SVR4_close (__NR_SVR4 + 6)
28#define __NR_SVR4_wait (__NR_SVR4 + 7)
29#define __NR_SVR4_creat (__NR_SVR4 + 8)
30#define __NR_SVR4_link (__NR_SVR4 + 9)
31#define __NR_SVR4_unlink (__NR_SVR4 + 10)
32#define __NR_SVR4_exec (__NR_SVR4 + 11)
33#define __NR_SVR4_chdir (__NR_SVR4 + 12)
34#define __NR_SVR4_gtime (__NR_SVR4 + 13)
35#define __NR_SVR4_mknod (__NR_SVR4 + 14)
36#define __NR_SVR4_chmod (__NR_SVR4 + 15)
37#define __NR_SVR4_chown (__NR_SVR4 + 16)
38#define __NR_SVR4_sbreak (__NR_SVR4 + 17)
39#define __NR_SVR4_stat (__NR_SVR4 + 18)
40#define __NR_SVR4_lseek (__NR_SVR4 + 19)
41#define __NR_SVR4_getpid (__NR_SVR4 + 20)
42#define __NR_SVR4_mount (__NR_SVR4 + 21)
43#define __NR_SVR4_umount (__NR_SVR4 + 22)
44#define __NR_SVR4_setuid (__NR_SVR4 + 23)
45#define __NR_SVR4_getuid (__NR_SVR4 + 24)
46#define __NR_SVR4_stime (__NR_SVR4 + 25)
47#define __NR_SVR4_ptrace (__NR_SVR4 + 26)
48#define __NR_SVR4_alarm (__NR_SVR4 + 27)
49#define __NR_SVR4_fstat (__NR_SVR4 + 28)
50#define __NR_SVR4_pause (__NR_SVR4 + 29)
51#define __NR_SVR4_utime (__NR_SVR4 + 30)
52#define __NR_SVR4_stty (__NR_SVR4 + 31)
53#define __NR_SVR4_gtty (__NR_SVR4 + 32)
54#define __NR_SVR4_access (__NR_SVR4 + 33)
55#define __NR_SVR4_nice (__NR_SVR4 + 34)
56#define __NR_SVR4_statfs (__NR_SVR4 + 35)
57#define __NR_SVR4_sync (__NR_SVR4 + 36)
58#define __NR_SVR4_kill (__NR_SVR4 + 37)
59#define __NR_SVR4_fstatfs (__NR_SVR4 + 38)
60#define __NR_SVR4_setpgrp (__NR_SVR4 + 39)
61#define __NR_SVR4_cxenix (__NR_SVR4 + 40)
62#define __NR_SVR4_dup (__NR_SVR4 + 41)
63#define __NR_SVR4_pipe (__NR_SVR4 + 42)
64#define __NR_SVR4_times (__NR_SVR4 + 43)
65#define __NR_SVR4_profil (__NR_SVR4 + 44)
66#define __NR_SVR4_plock (__NR_SVR4 + 45)
67#define __NR_SVR4_setgid (__NR_SVR4 + 46)
68#define __NR_SVR4_getgid (__NR_SVR4 + 47)
69#define __NR_SVR4_sig (__NR_SVR4 + 48)
70#define __NR_SVR4_msgsys (__NR_SVR4 + 49)
71#define __NR_SVR4_sysmips (__NR_SVR4 + 50)
72#define __NR_SVR4_sysacct (__NR_SVR4 + 51)
73#define __NR_SVR4_shmsys (__NR_SVR4 + 52)
74#define __NR_SVR4_semsys (__NR_SVR4 + 53)
75#define __NR_SVR4_ioctl (__NR_SVR4 + 54)
76#define __NR_SVR4_uadmin (__NR_SVR4 + 55)
77#define __NR_SVR4_exch (__NR_SVR4 + 56)
78#define __NR_SVR4_utssys (__NR_SVR4 + 57)
79#define __NR_SVR4_fsync (__NR_SVR4 + 58)
80#define __NR_SVR4_exece (__NR_SVR4 + 59)
81#define __NR_SVR4_umask (__NR_SVR4 + 60)
82#define __NR_SVR4_chroot (__NR_SVR4 + 61)
83#define __NR_SVR4_fcntl (__NR_SVR4 + 62)
84#define __NR_SVR4_ulimit (__NR_SVR4 + 63)
85#define __NR_SVR4_reserved1 (__NR_SVR4 + 64)
86#define __NR_SVR4_reserved2 (__NR_SVR4 + 65)
87#define __NR_SVR4_reserved3 (__NR_SVR4 + 66)
88#define __NR_SVR4_reserved4 (__NR_SVR4 + 67)
89#define __NR_SVR4_reserved5 (__NR_SVR4 + 68)
90#define __NR_SVR4_reserved6 (__NR_SVR4 + 69)
91#define __NR_SVR4_advfs (__NR_SVR4 + 70)
92#define __NR_SVR4_unadvfs (__NR_SVR4 + 71)
93#define __NR_SVR4_unused1 (__NR_SVR4 + 72)
94#define __NR_SVR4_unused2 (__NR_SVR4 + 73)
95#define __NR_SVR4_rfstart (__NR_SVR4 + 74)
96#define __NR_SVR4_unused3 (__NR_SVR4 + 75)
97#define __NR_SVR4_rdebug (__NR_SVR4 + 76)
98#define __NR_SVR4_rfstop (__NR_SVR4 + 77)
99#define __NR_SVR4_rfsys (__NR_SVR4 + 78)
100#define __NR_SVR4_rmdir (__NR_SVR4 + 79)
101#define __NR_SVR4_mkdir (__NR_SVR4 + 80)
102#define __NR_SVR4_getdents (__NR_SVR4 + 81)
103#define __NR_SVR4_libattach (__NR_SVR4 + 82)
104#define __NR_SVR4_libdetach (__NR_SVR4 + 83)
105#define __NR_SVR4_sysfs (__NR_SVR4 + 84)
106#define __NR_SVR4_getmsg (__NR_SVR4 + 85)
107#define __NR_SVR4_putmsg (__NR_SVR4 + 86)
108#define __NR_SVR4_poll (__NR_SVR4 + 87)
109#define __NR_SVR4_lstat (__NR_SVR4 + 88)
110#define __NR_SVR4_symlink (__NR_SVR4 + 89)
111#define __NR_SVR4_readlink (__NR_SVR4 + 90)
112#define __NR_SVR4_setgroups (__NR_SVR4 + 91)
113#define __NR_SVR4_getgroups (__NR_SVR4 + 92)
114#define __NR_SVR4_fchmod (__NR_SVR4 + 93)
115#define __NR_SVR4_fchown (__NR_SVR4 + 94)
116#define __NR_SVR4_sigprocmask (__NR_SVR4 + 95)
117#define __NR_SVR4_sigsuspend (__NR_SVR4 + 96)
118#define __NR_SVR4_sigaltstack (__NR_SVR4 + 97)
119#define __NR_SVR4_sigaction (__NR_SVR4 + 98)
120#define __NR_SVR4_sigpending (__NR_SVR4 + 99)
121#define __NR_SVR4_setcontext (__NR_SVR4 + 100)
122#define __NR_SVR4_evsys (__NR_SVR4 + 101)
123#define __NR_SVR4_evtrapret (__NR_SVR4 + 102)
124#define __NR_SVR4_statvfs (__NR_SVR4 + 103)
125#define __NR_SVR4_fstatvfs (__NR_SVR4 + 104)
126#define __NR_SVR4_reserved7 (__NR_SVR4 + 105)
127#define __NR_SVR4_nfssys (__NR_SVR4 + 106)
128#define __NR_SVR4_waitid (__NR_SVR4 + 107)
129#define __NR_SVR4_sigsendset (__NR_SVR4 + 108)
130#define __NR_SVR4_hrtsys (__NR_SVR4 + 109)
131#define __NR_SVR4_acancel (__NR_SVR4 + 110)
132#define __NR_SVR4_async (__NR_SVR4 + 111)
133#define __NR_SVR4_priocntlset (__NR_SVR4 + 112)
134#define __NR_SVR4_pathconf (__NR_SVR4 + 113)
135#define __NR_SVR4_mincore (__NR_SVR4 + 114)
136#define __NR_SVR4_mmap (__NR_SVR4 + 115)
137#define __NR_SVR4_mprotect (__NR_SVR4 + 116)
138#define __NR_SVR4_munmap (__NR_SVR4 + 117)
139#define __NR_SVR4_fpathconf (__NR_SVR4 + 118)
140#define __NR_SVR4_vfork (__NR_SVR4 + 119)
141#define __NR_SVR4_fchdir (__NR_SVR4 + 120)
142#define __NR_SVR4_readv (__NR_SVR4 + 121)
143#define __NR_SVR4_writev (__NR_SVR4 + 122)
144#define __NR_SVR4_xstat (__NR_SVR4 + 123)
145#define __NR_SVR4_lxstat (__NR_SVR4 + 124)
146#define __NR_SVR4_fxstat (__NR_SVR4 + 125)
147#define __NR_SVR4_xmknod (__NR_SVR4 + 126)
148#define __NR_SVR4_clocal (__NR_SVR4 + 127)
149#define __NR_SVR4_setrlimit (__NR_SVR4 + 128)
150#define __NR_SVR4_getrlimit (__NR_SVR4 + 129)
151#define __NR_SVR4_lchown (__NR_SVR4 + 130)
152#define __NR_SVR4_memcntl (__NR_SVR4 + 131)
153#define __NR_SVR4_getpmsg (__NR_SVR4 + 132)
154#define __NR_SVR4_putpmsg (__NR_SVR4 + 133)
155#define __NR_SVR4_rename (__NR_SVR4 + 134)
156#define __NR_SVR4_nuname (__NR_SVR4 + 135)
157#define __NR_SVR4_setegid (__NR_SVR4 + 136)
158#define __NR_SVR4_sysconf (__NR_SVR4 + 137)
159#define __NR_SVR4_adjtime (__NR_SVR4 + 138)
160#define __NR_SVR4_sysinfo (__NR_SVR4 + 139)
161#define __NR_SVR4_reserved8 (__NR_SVR4 + 140)
162#define __NR_SVR4_seteuid (__NR_SVR4 + 141)
163#define __NR_SVR4_PYRAMID_statis (__NR_SVR4 + 142)
164#define __NR_SVR4_PYRAMID_tuning (__NR_SVR4 + 143)
165#define __NR_SVR4_PYRAMID_forcerr (__NR_SVR4 + 144)
166#define __NR_SVR4_PYRAMID_mpcntl (__NR_SVR4 + 145)
167#define __NR_SVR4_reserved9 (__NR_SVR4 + 146)
168#define __NR_SVR4_reserved10 (__NR_SVR4 + 147)
169#define __NR_SVR4_reserved11 (__NR_SVR4 + 148)
170#define __NR_SVR4_reserved12 (__NR_SVR4 + 149)
171#define __NR_SVR4_reserved13 (__NR_SVR4 + 150)
172#define __NR_SVR4_reserved14 (__NR_SVR4 + 151)
173#define __NR_SVR4_reserved15 (__NR_SVR4 + 152)
174#define __NR_SVR4_reserved16 (__NR_SVR4 + 153)
175#define __NR_SVR4_reserved17 (__NR_SVR4 + 154)
176#define __NR_SVR4_reserved18 (__NR_SVR4 + 155)
177#define __NR_SVR4_reserved19 (__NR_SVR4 + 156)
178#define __NR_SVR4_reserved20 (__NR_SVR4 + 157)
179#define __NR_SVR4_reserved21 (__NR_SVR4 + 158)
180#define __NR_SVR4_reserved22 (__NR_SVR4 + 159)
181#define __NR_SVR4_reserved23 (__NR_SVR4 + 160)
182#define __NR_SVR4_reserved24 (__NR_SVR4 + 161)
183#define __NR_SVR4_reserved25 (__NR_SVR4 + 162)
184#define __NR_SVR4_reserved26 (__NR_SVR4 + 163)
185#define __NR_SVR4_reserved27 (__NR_SVR4 + 164)
186#define __NR_SVR4_reserved28 (__NR_SVR4 + 165)
187#define __NR_SVR4_reserved29 (__NR_SVR4 + 166)
188#define __NR_SVR4_reserved30 (__NR_SVR4 + 167)
189#define __NR_SVR4_reserved31 (__NR_SVR4 + 168)
190#define __NR_SVR4_reserved32 (__NR_SVR4 + 169)
191#define __NR_SVR4_reserved33 (__NR_SVR4 + 170)
192#define __NR_SVR4_reserved34 (__NR_SVR4 + 171)
193#define __NR_SVR4_reserved35 (__NR_SVR4 + 172)
194#define __NR_SVR4_reserved36 (__NR_SVR4 + 173)
195#define __NR_SVR4_reserved37 (__NR_SVR4 + 174)
196#define __NR_SVR4_reserved38 (__NR_SVR4 + 175)
197#define __NR_SVR4_reserved39 (__NR_SVR4 + 176)
198#define __NR_SVR4_reserved40 (__NR_SVR4 + 177)
199#define __NR_SVR4_reserved41 (__NR_SVR4 + 178)
200#define __NR_SVR4_reserved42 (__NR_SVR4 + 179)
201#define __NR_SVR4_reserved43 (__NR_SVR4 + 180)
202#define __NR_SVR4_reserved44 (__NR_SVR4 + 181)
203#define __NR_SVR4_reserved45 (__NR_SVR4 + 182)
204#define __NR_SVR4_reserved46 (__NR_SVR4 + 183)
205#define __NR_SVR4_reserved47 (__NR_SVR4 + 184)
206#define __NR_SVR4_reserved48 (__NR_SVR4 + 185)
207#define __NR_SVR4_reserved49 (__NR_SVR4 + 186)
208#define __NR_SVR4_reserved50 (__NR_SVR4 + 187)
209#define __NR_SVR4_reserved51 (__NR_SVR4 + 188)
210#define __NR_SVR4_reserved52 (__NR_SVR4 + 189)
211#define __NR_SVR4_reserved53 (__NR_SVR4 + 190)
212#define __NR_SVR4_reserved54 (__NR_SVR4 + 191)
213#define __NR_SVR4_reserved55 (__NR_SVR4 + 192)
214#define __NR_SVR4_reserved56 (__NR_SVR4 + 193)
215#define __NR_SVR4_reserved57 (__NR_SVR4 + 194)
216#define __NR_SVR4_reserved58 (__NR_SVR4 + 195)
217#define __NR_SVR4_reserved59 (__NR_SVR4 + 196)
218#define __NR_SVR4_reserved60 (__NR_SVR4 + 197)
219#define __NR_SVR4_reserved61 (__NR_SVR4 + 198)
220#define __NR_SVR4_reserved62 (__NR_SVR4 + 199)
221#define __NR_SVR4_reserved63 (__NR_SVR4 + 200)
222#define __NR_SVR4_aread (__NR_SVR4 + 201)
223#define __NR_SVR4_awrite (__NR_SVR4 + 202)
224#define __NR_SVR4_listio (__NR_SVR4 + 203)
225#define __NR_SVR4_mips_acancel (__NR_SVR4 + 204)
226#define __NR_SVR4_astatus (__NR_SVR4 + 205)
227#define __NR_SVR4_await (__NR_SVR4 + 206)
228#define __NR_SVR4_areadv (__NR_SVR4 + 207)
229#define __NR_SVR4_awritev (__NR_SVR4 + 208)
230#define __NR_SVR4_MIPS_reserved1 (__NR_SVR4 + 209)
231#define __NR_SVR4_MIPS_reserved2 (__NR_SVR4 + 210)
232#define __NR_SVR4_MIPS_reserved3 (__NR_SVR4 + 211)
233#define __NR_SVR4_MIPS_reserved4 (__NR_SVR4 + 212)
234#define __NR_SVR4_MIPS_reserved5 (__NR_SVR4 + 213)
235#define __NR_SVR4_MIPS_reserved6 (__NR_SVR4 + 214)
236#define __NR_SVR4_MIPS_reserved7 (__NR_SVR4 + 215)
237#define __NR_SVR4_MIPS_reserved8 (__NR_SVR4 + 216)
238#define __NR_SVR4_MIPS_reserved9 (__NR_SVR4 + 217)
239#define __NR_SVR4_MIPS_reserved10 (__NR_SVR4 + 218)
240#define __NR_SVR4_MIPS_reserved11 (__NR_SVR4 + 219)
241#define __NR_SVR4_MIPS_reserved12 (__NR_SVR4 + 220)
242#define __NR_SVR4_CDC_reserved1 (__NR_SVR4 + 221)
243#define __NR_SVR4_CDC_reserved2 (__NR_SVR4 + 222)
244#define __NR_SVR4_CDC_reserved3 (__NR_SVR4 + 223)
245#define __NR_SVR4_CDC_reserved4 (__NR_SVR4 + 224)
246#define __NR_SVR4_CDC_reserved5 (__NR_SVR4 + 225)
247#define __NR_SVR4_CDC_reserved6 (__NR_SVR4 + 226)
248#define __NR_SVR4_CDC_reserved7 (__NR_SVR4 + 227)
249#define __NR_SVR4_CDC_reserved8 (__NR_SVR4 + 228)
250#define __NR_SVR4_CDC_reserved9 (__NR_SVR4 + 229)
251#define __NR_SVR4_CDC_reserved10 (__NR_SVR4 + 230)
252#define __NR_SVR4_CDC_reserved11 (__NR_SVR4 + 231)
253#define __NR_SVR4_CDC_reserved12 (__NR_SVR4 + 232)
254#define __NR_SVR4_CDC_reserved13 (__NR_SVR4 + 233)
255#define __NR_SVR4_CDC_reserved14 (__NR_SVR4 + 234)
256#define __NR_SVR4_CDC_reserved15 (__NR_SVR4 + 235)
257#define __NR_SVR4_CDC_reserved16 (__NR_SVR4 + 236)
258#define __NR_SVR4_CDC_reserved17 (__NR_SVR4 + 237)
259#define __NR_SVR4_CDC_reserved18 (__NR_SVR4 + 238)
260#define __NR_SVR4_CDC_reserved19 (__NR_SVR4 + 239)
261#define __NR_SVR4_CDC_reserved20 (__NR_SVR4 + 240)
262
263/*
264 * SYS V syscalls are in the range from 1000 to 1999
265 */
266#define __NR_SYSV 1000
267#define __NR_SYSV_syscall (__NR_SYSV + 0)
268#define __NR_SYSV_exit (__NR_SYSV + 1)
269#define __NR_SYSV_fork (__NR_SYSV + 2)
270#define __NR_SYSV_read (__NR_SYSV + 3)
271#define __NR_SYSV_write (__NR_SYSV + 4)
272#define __NR_SYSV_open (__NR_SYSV + 5)
273#define __NR_SYSV_close (__NR_SYSV + 6)
274#define __NR_SYSV_wait (__NR_SYSV + 7)
275#define __NR_SYSV_creat (__NR_SYSV + 8)
276#define __NR_SYSV_link (__NR_SYSV + 9)
277#define __NR_SYSV_unlink (__NR_SYSV + 10)
278#define __NR_SYSV_execv (__NR_SYSV + 11)
279#define __NR_SYSV_chdir (__NR_SYSV + 12)
280#define __NR_SYSV_time (__NR_SYSV + 13)
281#define __NR_SYSV_mknod (__NR_SYSV + 14)
282#define __NR_SYSV_chmod (__NR_SYSV + 15)
283#define __NR_SYSV_chown (__NR_SYSV + 16)
284#define __NR_SYSV_brk (__NR_SYSV + 17)
285#define __NR_SYSV_stat (__NR_SYSV + 18)
286#define __NR_SYSV_lseek (__NR_SYSV + 19)
287#define __NR_SYSV_getpid (__NR_SYSV + 20)
288#define __NR_SYSV_mount (__NR_SYSV + 21)
289#define __NR_SYSV_umount (__NR_SYSV + 22)
290#define __NR_SYSV_setuid (__NR_SYSV + 23)
291#define __NR_SYSV_getuid (__NR_SYSV + 24)
292#define __NR_SYSV_stime (__NR_SYSV + 25)
293#define __NR_SYSV_ptrace (__NR_SYSV + 26)
294#define __NR_SYSV_alarm (__NR_SYSV + 27)
295#define __NR_SYSV_fstat (__NR_SYSV + 28)
296#define __NR_SYSV_pause (__NR_SYSV + 29)
297#define __NR_SYSV_utime (__NR_SYSV + 30)
298#define __NR_SYSV_stty (__NR_SYSV + 31)
299#define __NR_SYSV_gtty (__NR_SYSV + 32)
300#define __NR_SYSV_access (__NR_SYSV + 33)
301#define __NR_SYSV_nice (__NR_SYSV + 34)
302#define __NR_SYSV_statfs (__NR_SYSV + 35)
303#define __NR_SYSV_sync (__NR_SYSV + 36)
304#define __NR_SYSV_kill (__NR_SYSV + 37)
305#define __NR_SYSV_fstatfs (__NR_SYSV + 38)
306#define __NR_SYSV_setpgrp (__NR_SYSV + 39)
307#define __NR_SYSV_syssgi (__NR_SYSV + 40)
308#define __NR_SYSV_dup (__NR_SYSV + 41)
309#define __NR_SYSV_pipe (__NR_SYSV + 42)
310#define __NR_SYSV_times (__NR_SYSV + 43)
311#define __NR_SYSV_profil (__NR_SYSV + 44)
312#define __NR_SYSV_plock (__NR_SYSV + 45)
313#define __NR_SYSV_setgid (__NR_SYSV + 46)
314#define __NR_SYSV_getgid (__NR_SYSV + 47)
315#define __NR_SYSV_sig (__NR_SYSV + 48)
316#define __NR_SYSV_msgsys (__NR_SYSV + 49)
317#define __NR_SYSV_sysmips (__NR_SYSV + 50)
318#define __NR_SYSV_acct (__NR_SYSV + 51)
319#define __NR_SYSV_shmsys (__NR_SYSV + 52)
320#define __NR_SYSV_semsys (__NR_SYSV + 53)
321#define __NR_SYSV_ioctl (__NR_SYSV + 54)
322#define __NR_SYSV_uadmin (__NR_SYSV + 55)
323#define __NR_SYSV_sysmp (__NR_SYSV + 56)
324#define __NR_SYSV_utssys (__NR_SYSV + 57)
325#define __NR_SYSV_USG_reserved1 (__NR_SYSV + 58)
326#define __NR_SYSV_execve (__NR_SYSV + 59)
327#define __NR_SYSV_umask (__NR_SYSV + 60)
328#define __NR_SYSV_chroot (__NR_SYSV + 61)
329#define __NR_SYSV_fcntl (__NR_SYSV + 62)
330#define __NR_SYSV_ulimit (__NR_SYSV + 63)
331#define __NR_SYSV_SAFARI4_reserved1 (__NR_SYSV + 64)
332#define __NR_SYSV_SAFARI4_reserved2 (__NR_SYSV + 65)
333#define __NR_SYSV_SAFARI4_reserved3 (__NR_SYSV + 66)
334#define __NR_SYSV_SAFARI4_reserved4 (__NR_SYSV + 67)
335#define __NR_SYSV_SAFARI4_reserved5 (__NR_SYSV + 68)
336#define __NR_SYSV_SAFARI4_reserved6 (__NR_SYSV + 69)
337#define __NR_SYSV_advfs (__NR_SYSV + 70)
338#define __NR_SYSV_unadvfs (__NR_SYSV + 71)
339#define __NR_SYSV_rmount (__NR_SYSV + 72)
340#define __NR_SYSV_rumount (__NR_SYSV + 73)
341#define __NR_SYSV_rfstart (__NR_SYSV + 74)
342#define __NR_SYSV_getrlimit64 (__NR_SYSV + 75)
343#define __NR_SYSV_setrlimit64 (__NR_SYSV + 76)
344#define __NR_SYSV_nanosleep (__NR_SYSV + 77)
345#define __NR_SYSV_lseek64 (__NR_SYSV + 78)
346#define __NR_SYSV_rmdir (__NR_SYSV + 79)
347#define __NR_SYSV_mkdir (__NR_SYSV + 80)
348#define __NR_SYSV_getdents (__NR_SYSV + 81)
349#define __NR_SYSV_sginap (__NR_SYSV + 82)
350#define __NR_SYSV_sgikopt (__NR_SYSV + 83)
351#define __NR_SYSV_sysfs (__NR_SYSV + 84)
352#define __NR_SYSV_getmsg (__NR_SYSV + 85)
353#define __NR_SYSV_putmsg (__NR_SYSV + 86)
354#define __NR_SYSV_poll (__NR_SYSV + 87)
355#define __NR_SYSV_sigreturn (__NR_SYSV + 88)
356#define __NR_SYSV_accept (__NR_SYSV + 89)
357#define __NR_SYSV_bind (__NR_SYSV + 90)
358#define __NR_SYSV_connect (__NR_SYSV + 91)
359#define __NR_SYSV_gethostid (__NR_SYSV + 92)
360#define __NR_SYSV_getpeername (__NR_SYSV + 93)
361#define __NR_SYSV_getsockname (__NR_SYSV + 94)
362#define __NR_SYSV_getsockopt (__NR_SYSV + 95)
363#define __NR_SYSV_listen (__NR_SYSV + 96)
364#define __NR_SYSV_recv (__NR_SYSV + 97)
365#define __NR_SYSV_recvfrom (__NR_SYSV + 98)
366#define __NR_SYSV_recvmsg (__NR_SYSV + 99)
367#define __NR_SYSV_select (__NR_SYSV + 100)
368#define __NR_SYSV_send (__NR_SYSV + 101)
369#define __NR_SYSV_sendmsg (__NR_SYSV + 102)
370#define __NR_SYSV_sendto (__NR_SYSV + 103)
371#define __NR_SYSV_sethostid (__NR_SYSV + 104)
372#define __NR_SYSV_setsockopt (__NR_SYSV + 105)
373#define __NR_SYSV_shutdown (__NR_SYSV + 106)
374#define __NR_SYSV_socket (__NR_SYSV + 107)
375#define __NR_SYSV_gethostname (__NR_SYSV + 108)
376#define __NR_SYSV_sethostname (__NR_SYSV + 109)
377#define __NR_SYSV_getdomainname (__NR_SYSV + 110)
378#define __NR_SYSV_setdomainname (__NR_SYSV + 111)
379#define __NR_SYSV_truncate (__NR_SYSV + 112)
380#define __NR_SYSV_ftruncate (__NR_SYSV + 113)
381#define __NR_SYSV_rename (__NR_SYSV + 114)
382#define __NR_SYSV_symlink (__NR_SYSV + 115)
383#define __NR_SYSV_readlink (__NR_SYSV + 116)
384#define __NR_SYSV_lstat (__NR_SYSV + 117)
385#define __NR_SYSV_nfsmount (__NR_SYSV + 118)
386#define __NR_SYSV_nfssvc (__NR_SYSV + 119)
387#define __NR_SYSV_getfh (__NR_SYSV + 120)
388#define __NR_SYSV_async_daemon (__NR_SYSV + 121)
389#define __NR_SYSV_exportfs (__NR_SYSV + 122)
390#define __NR_SYSV_setregid (__NR_SYSV + 123)
391#define __NR_SYSV_setreuid (__NR_SYSV + 124)
392#define __NR_SYSV_getitimer (__NR_SYSV + 125)
393#define __NR_SYSV_setitimer (__NR_SYSV + 126)
394#define __NR_SYSV_adjtime (__NR_SYSV + 127)
395#define __NR_SYSV_BSD_getime (__NR_SYSV + 128)
396#define __NR_SYSV_sproc (__NR_SYSV + 129)
397#define __NR_SYSV_prctl (__NR_SYSV + 130)
398#define __NR_SYSV_procblk (__NR_SYSV + 131)
399#define __NR_SYSV_sprocsp (__NR_SYSV + 132)
400#define __NR_SYSV_sgigsc (__NR_SYSV + 133)
401#define __NR_SYSV_mmap (__NR_SYSV + 134)
402#define __NR_SYSV_munmap (__NR_SYSV + 135)
403#define __NR_SYSV_mprotect (__NR_SYSV + 136)
404#define __NR_SYSV_msync (__NR_SYSV + 137)
405#define __NR_SYSV_madvise (__NR_SYSV + 138)
406#define __NR_SYSV_pagelock (__NR_SYSV + 139)
407#define __NR_SYSV_getpagesize (__NR_SYSV + 140)
408#define __NR_SYSV_quotactl (__NR_SYSV + 141)
409#define __NR_SYSV_libdetach (__NR_SYSV + 142)
410#define __NR_SYSV_BSDgetpgrp (__NR_SYSV + 143)
411#define __NR_SYSV_BSDsetpgrp (__NR_SYSV + 144)
412#define __NR_SYSV_vhangup (__NR_SYSV + 145)
413#define __NR_SYSV_fsync (__NR_SYSV + 146)
414#define __NR_SYSV_fchdir (__NR_SYSV + 147)
415#define __NR_SYSV_getrlimit (__NR_SYSV + 148)
416#define __NR_SYSV_setrlimit (__NR_SYSV + 149)
417#define __NR_SYSV_cacheflush (__NR_SYSV + 150)
418#define __NR_SYSV_cachectl (__NR_SYSV + 151)
419#define __NR_SYSV_fchown (__NR_SYSV + 152)
420#define __NR_SYSV_fchmod (__NR_SYSV + 153)
421#define __NR_SYSV_wait3 (__NR_SYSV + 154)
422#define __NR_SYSV_socketpair (__NR_SYSV + 155)
423#define __NR_SYSV_sysinfo (__NR_SYSV + 156)
424#define __NR_SYSV_nuname (__NR_SYSV + 157)
425#define __NR_SYSV_xstat (__NR_SYSV + 158)
426#define __NR_SYSV_lxstat (__NR_SYSV + 159)
427#define __NR_SYSV_fxstat (__NR_SYSV + 160)
428#define __NR_SYSV_xmknod (__NR_SYSV + 161)
429#define __NR_SYSV_ksigaction (__NR_SYSV + 162)
430#define __NR_SYSV_sigpending (__NR_SYSV + 163)
431#define __NR_SYSV_sigprocmask (__NR_SYSV + 164)
432#define __NR_SYSV_sigsuspend (__NR_SYSV + 165)
433#define __NR_SYSV_sigpoll (__NR_SYSV + 166)
434#define __NR_SYSV_swapctl (__NR_SYSV + 167)
435#define __NR_SYSV_getcontext (__NR_SYSV + 168)
436#define __NR_SYSV_setcontext (__NR_SYSV + 169)
437#define __NR_SYSV_waitsys (__NR_SYSV + 170)
438#define __NR_SYSV_sigstack (__NR_SYSV + 171)
439#define __NR_SYSV_sigaltstack (__NR_SYSV + 172)
440#define __NR_SYSV_sigsendset (__NR_SYSV + 173)
441#define __NR_SYSV_statvfs (__NR_SYSV + 174)
442#define __NR_SYSV_fstatvfs (__NR_SYSV + 175)
443#define __NR_SYSV_getpmsg (__NR_SYSV + 176)
444#define __NR_SYSV_putpmsg (__NR_SYSV + 177)
445#define __NR_SYSV_lchown (__NR_SYSV + 178)
446#define __NR_SYSV_priocntl (__NR_SYSV + 179)
447#define __NR_SYSV_ksigqueue (__NR_SYSV + 180)
448#define __NR_SYSV_readv (__NR_SYSV + 181)
449#define __NR_SYSV_writev (__NR_SYSV + 182)
450#define __NR_SYSV_truncate64 (__NR_SYSV + 183)
451#define __NR_SYSV_ftruncate64 (__NR_SYSV + 184)
452#define __NR_SYSV_mmap64 (__NR_SYSV + 185)
453#define __NR_SYSV_dmi (__NR_SYSV + 186)
454#define __NR_SYSV_pread (__NR_SYSV + 187)
455#define __NR_SYSV_pwrite (__NR_SYSV + 188)
456
457/*
458 * BSD 4.3 syscalls are in the range from 2000 to 2999
459 */
460#define __NR_BSD43 2000
461#define __NR_BSD43_syscall (__NR_BSD43 + 0)
462#define __NR_BSD43_exit (__NR_BSD43 + 1)
463#define __NR_BSD43_fork (__NR_BSD43 + 2)
464#define __NR_BSD43_read (__NR_BSD43 + 3)
465#define __NR_BSD43_write (__NR_BSD43 + 4)
466#define __NR_BSD43_open (__NR_BSD43 + 5)
467#define __NR_BSD43_close (__NR_BSD43 + 6)
468#define __NR_BSD43_wait (__NR_BSD43 + 7)
469#define __NR_BSD43_creat (__NR_BSD43 + 8)
470#define __NR_BSD43_link (__NR_BSD43 + 9)
471#define __NR_BSD43_unlink (__NR_BSD43 + 10)
472#define __NR_BSD43_exec (__NR_BSD43 + 11)
473#define __NR_BSD43_chdir (__NR_BSD43 + 12)
474#define __NR_BSD43_time (__NR_BSD43 + 13)
475#define __NR_BSD43_mknod (__NR_BSD43 + 14)
476#define __NR_BSD43_chmod (__NR_BSD43 + 15)
477#define __NR_BSD43_chown (__NR_BSD43 + 16)
478#define __NR_BSD43_sbreak (__NR_BSD43 + 17)
479#define __NR_BSD43_oldstat (__NR_BSD43 + 18)
480#define __NR_BSD43_lseek (__NR_BSD43 + 19)
481#define __NR_BSD43_getpid (__NR_BSD43 + 20)
482#define __NR_BSD43_oldmount (__NR_BSD43 + 21)
483#define __NR_BSD43_umount (__NR_BSD43 + 22)
484#define __NR_BSD43_setuid (__NR_BSD43 + 23)
485#define __NR_BSD43_getuid (__NR_BSD43 + 24)
486#define __NR_BSD43_stime (__NR_BSD43 + 25)
487#define __NR_BSD43_ptrace (__NR_BSD43 + 26)
488#define __NR_BSD43_alarm (__NR_BSD43 + 27)
489#define __NR_BSD43_oldfstat (__NR_BSD43 + 28)
490#define __NR_BSD43_pause (__NR_BSD43 + 29)
491#define __NR_BSD43_utime (__NR_BSD43 + 30)
492#define __NR_BSD43_stty (__NR_BSD43 + 31)
493#define __NR_BSD43_gtty (__NR_BSD43 + 32)
494#define __NR_BSD43_access (__NR_BSD43 + 33)
495#define __NR_BSD43_nice (__NR_BSD43 + 34)
496#define __NR_BSD43_ftime (__NR_BSD43 + 35)
497#define __NR_BSD43_sync (__NR_BSD43 + 36)
498#define __NR_BSD43_kill (__NR_BSD43 + 37)
499#define __NR_BSD43_stat (__NR_BSD43 + 38)
500#define __NR_BSD43_oldsetpgrp (__NR_BSD43 + 39)
501#define __NR_BSD43_lstat (__NR_BSD43 + 40)
502#define __NR_BSD43_dup (__NR_BSD43 + 41)
503#define __NR_BSD43_pipe (__NR_BSD43 + 42)
504#define __NR_BSD43_times (__NR_BSD43 + 43)
505#define __NR_BSD43_profil (__NR_BSD43 + 44)
506#define __NR_BSD43_msgsys (__NR_BSD43 + 45)
507#define __NR_BSD43_setgid (__NR_BSD43 + 46)
508#define __NR_BSD43_getgid (__NR_BSD43 + 47)
509#define __NR_BSD43_ssig (__NR_BSD43 + 48)
510#define __NR_BSD43_reserved1 (__NR_BSD43 + 49)
511#define __NR_BSD43_reserved2 (__NR_BSD43 + 50)
512#define __NR_BSD43_sysacct (__NR_BSD43 + 51)
513#define __NR_BSD43_phys (__NR_BSD43 + 52)
514#define __NR_BSD43_lock (__NR_BSD43 + 53)
515#define __NR_BSD43_ioctl (__NR_BSD43 + 54)
516#define __NR_BSD43_reboot (__NR_BSD43 + 55)
517#define __NR_BSD43_mpxchan (__NR_BSD43 + 56)
518#define __NR_BSD43_symlink (__NR_BSD43 + 57)
519#define __NR_BSD43_readlink (__NR_BSD43 + 58)
520#define __NR_BSD43_execve (__NR_BSD43 + 59)
521#define __NR_BSD43_umask (__NR_BSD43 + 60)
522#define __NR_BSD43_chroot (__NR_BSD43 + 61)
523#define __NR_BSD43_fstat (__NR_BSD43 + 62)
524#define __NR_BSD43_reserved3 (__NR_BSD43 + 63)
525#define __NR_BSD43_getpagesize (__NR_BSD43 + 64)
526#define __NR_BSD43_mremap (__NR_BSD43 + 65)
527#define __NR_BSD43_vfork (__NR_BSD43 + 66)
528#define __NR_BSD43_vread (__NR_BSD43 + 67)
529#define __NR_BSD43_vwrite (__NR_BSD43 + 68)
530#define __NR_BSD43_sbrk (__NR_BSD43 + 69)
531#define __NR_BSD43_sstk (__NR_BSD43 + 70)
532#define __NR_BSD43_mmap (__NR_BSD43 + 71)
533#define __NR_BSD43_vadvise (__NR_BSD43 + 72)
534#define __NR_BSD43_munmap (__NR_BSD43 + 73)
535#define __NR_BSD43_mprotect (__NR_BSD43 + 74)
536#define __NR_BSD43_madvise (__NR_BSD43 + 75)
537#define __NR_BSD43_vhangup (__NR_BSD43 + 76)
538#define __NR_BSD43_vlimit (__NR_BSD43 + 77)
539#define __NR_BSD43_mincore (__NR_BSD43 + 78)
540#define __NR_BSD43_getgroups (__NR_BSD43 + 79)
541#define __NR_BSD43_setgroups (__NR_BSD43 + 80)
542#define __NR_BSD43_getpgrp (__NR_BSD43 + 81)
543#define __NR_BSD43_setpgrp (__NR_BSD43 + 82)
544#define __NR_BSD43_setitimer (__NR_BSD43 + 83)
545#define __NR_BSD43_wait3 (__NR_BSD43 + 84)
546#define __NR_BSD43_swapon (__NR_BSD43 + 85)
547#define __NR_BSD43_getitimer (__NR_BSD43 + 86)
548#define __NR_BSD43_gethostname (__NR_BSD43 + 87)
549#define __NR_BSD43_sethostname (__NR_BSD43 + 88)
550#define __NR_BSD43_getdtablesize (__NR_BSD43 + 89)
551#define __NR_BSD43_dup2 (__NR_BSD43 + 90)
552#define __NR_BSD43_getdopt (__NR_BSD43 + 91)
553#define __NR_BSD43_fcntl (__NR_BSD43 + 92)
554#define __NR_BSD43_select (__NR_BSD43 + 93)
555#define __NR_BSD43_setdopt (__NR_BSD43 + 94)
556#define __NR_BSD43_fsync (__NR_BSD43 + 95)
557#define __NR_BSD43_setpriority (__NR_BSD43 + 96)
558#define __NR_BSD43_socket (__NR_BSD43 + 97)
559#define __NR_BSD43_connect (__NR_BSD43 + 98)
560#define __NR_BSD43_oldaccept (__NR_BSD43 + 99)
561#define __NR_BSD43_getpriority (__NR_BSD43 + 100)
562#define __NR_BSD43_send (__NR_BSD43 + 101)
563#define __NR_BSD43_recv (__NR_BSD43 + 102)
564#define __NR_BSD43_sigreturn (__NR_BSD43 + 103)
565#define __NR_BSD43_bind (__NR_BSD43 + 104)
566#define __NR_BSD43_setsockopt (__NR_BSD43 + 105)
567#define __NR_BSD43_listen (__NR_BSD43 + 106)
568#define __NR_BSD43_vtimes (__NR_BSD43 + 107)
569#define __NR_BSD43_sigvec (__NR_BSD43 + 108)
570#define __NR_BSD43_sigblock (__NR_BSD43 + 109)
571#define __NR_BSD43_sigsetmask (__NR_BSD43 + 110)
572#define __NR_BSD43_sigpause (__NR_BSD43 + 111)
573#define __NR_BSD43_sigstack (__NR_BSD43 + 112)
574#define __NR_BSD43_oldrecvmsg (__NR_BSD43 + 113)
575#define __NR_BSD43_oldsendmsg (__NR_BSD43 + 114)
576#define __NR_BSD43_vtrace (__NR_BSD43 + 115)
577#define __NR_BSD43_gettimeofday (__NR_BSD43 + 116)
578#define __NR_BSD43_getrusage (__NR_BSD43 + 117)
579#define __NR_BSD43_getsockopt (__NR_BSD43 + 118)
580#define __NR_BSD43_reserved4 (__NR_BSD43 + 119)
581#define __NR_BSD43_readv (__NR_BSD43 + 120)
582#define __NR_BSD43_writev (__NR_BSD43 + 121)
583#define __NR_BSD43_settimeofday (__NR_BSD43 + 122)
584#define __NR_BSD43_fchown (__NR_BSD43 + 123)
585#define __NR_BSD43_fchmod (__NR_BSD43 + 124)
586#define __NR_BSD43_oldrecvfrom (__NR_BSD43 + 125)
587#define __NR_BSD43_setreuid (__NR_BSD43 + 126)
588#define __NR_BSD43_setregid (__NR_BSD43 + 127)
589#define __NR_BSD43_rename (__NR_BSD43 + 128)
590#define __NR_BSD43_truncate (__NR_BSD43 + 129)
591#define __NR_BSD43_ftruncate (__NR_BSD43 + 130)
592#define __NR_BSD43_flock (__NR_BSD43 + 131)
593#define __NR_BSD43_semsys (__NR_BSD43 + 132)
594#define __NR_BSD43_sendto (__NR_BSD43 + 133)
595#define __NR_BSD43_shutdown (__NR_BSD43 + 134)
596#define __NR_BSD43_socketpair (__NR_BSD43 + 135)
597#define __NR_BSD43_mkdir (__NR_BSD43 + 136)
598#define __NR_BSD43_rmdir (__NR_BSD43 + 137)
599#define __NR_BSD43_utimes (__NR_BSD43 + 138)
600#define __NR_BSD43_sigcleanup (__NR_BSD43 + 139)
601#define __NR_BSD43_adjtime (__NR_BSD43 + 140)
602#define __NR_BSD43_oldgetpeername (__NR_BSD43 + 141)
603#define __NR_BSD43_gethostid (__NR_BSD43 + 142)
604#define __NR_BSD43_sethostid (__NR_BSD43 + 143)
605#define __NR_BSD43_getrlimit (__NR_BSD43 + 144)
606#define __NR_BSD43_setrlimit (__NR_BSD43 + 145)
607#define __NR_BSD43_killpg (__NR_BSD43 + 146)
608#define __NR_BSD43_shmsys (__NR_BSD43 + 147)
609#define __NR_BSD43_quota (__NR_BSD43 + 148)
610#define __NR_BSD43_qquota (__NR_BSD43 + 149)
611#define __NR_BSD43_oldgetsockname (__NR_BSD43 + 150)
612#define __NR_BSD43_sysmips (__NR_BSD43 + 151)
613#define __NR_BSD43_cacheflush (__NR_BSD43 + 152)
614#define __NR_BSD43_cachectl (__NR_BSD43 + 153)
615#define __NR_BSD43_debug (__NR_BSD43 + 154)
616#define __NR_BSD43_reserved5 (__NR_BSD43 + 155)
617#define __NR_BSD43_reserved6 (__NR_BSD43 + 156)
618#define __NR_BSD43_nfs_mount (__NR_BSD43 + 157)
619#define __NR_BSD43_nfs_svc (__NR_BSD43 + 158)
620#define __NR_BSD43_getdirentries (__NR_BSD43 + 159)
621#define __NR_BSD43_statfs (__NR_BSD43 + 160)
622#define __NR_BSD43_fstatfs (__NR_BSD43 + 161)
623#define __NR_BSD43_unmount (__NR_BSD43 + 162)
624#define __NR_BSD43_async_daemon (__NR_BSD43 + 163)
625#define __NR_BSD43_nfs_getfh (__NR_BSD43 + 164)
626#define __NR_BSD43_getdomainname (__NR_BSD43 + 165)
627#define __NR_BSD43_setdomainname (__NR_BSD43 + 166)
628#define __NR_BSD43_pcfs_mount (__NR_BSD43 + 167)
629#define __NR_BSD43_quotactl (__NR_BSD43 + 168)
630#define __NR_BSD43_oldexportfs (__NR_BSD43 + 169)
631#define __NR_BSD43_smount (__NR_BSD43 + 170)
632#define __NR_BSD43_mipshwconf (__NR_BSD43 + 171)
633#define __NR_BSD43_exportfs (__NR_BSD43 + 172)
634#define __NR_BSD43_nfsfh_open (__NR_BSD43 + 173)
635#define __NR_BSD43_libattach (__NR_BSD43 + 174)
636#define __NR_BSD43_libdetach (__NR_BSD43 + 175)
637#define __NR_BSD43_accept (__NR_BSD43 + 176)
638#define __NR_BSD43_reserved7 (__NR_BSD43 + 177)
639#define __NR_BSD43_reserved8 (__NR_BSD43 + 178)
640#define __NR_BSD43_recvmsg (__NR_BSD43 + 179)
641#define __NR_BSD43_recvfrom (__NR_BSD43 + 180)
642#define __NR_BSD43_sendmsg (__NR_BSD43 + 181)
643#define __NR_BSD43_getpeername (__NR_BSD43 + 182)
644#define __NR_BSD43_getsockname (__NR_BSD43 + 183)
645#define __NR_BSD43_aread (__NR_BSD43 + 184)
646#define __NR_BSD43_awrite (__NR_BSD43 + 185)
647#define __NR_BSD43_listio (__NR_BSD43 + 186)
648#define __NR_BSD43_acancel (__NR_BSD43 + 187)
649#define __NR_BSD43_astatus (__NR_BSD43 + 188)
650#define __NR_BSD43_await (__NR_BSD43 + 189)
651#define __NR_BSD43_areadv (__NR_BSD43 + 190)
652#define __NR_BSD43_awritev (__NR_BSD43 + 191)
653
654/*
655 * POSIX syscalls are in the range from 3000 to 3999
656 */
657#define __NR_POSIX 3000
658#define __NR_POSIX_syscall (__NR_POSIX + 0)
659#define __NR_POSIX_exit (__NR_POSIX + 1)
660#define __NR_POSIX_fork (__NR_POSIX + 2)
661#define __NR_POSIX_read (__NR_POSIX + 3)
662#define __NR_POSIX_write (__NR_POSIX + 4)
663#define __NR_POSIX_open (__NR_POSIX + 5)
664#define __NR_POSIX_close (__NR_POSIX + 6)
665#define __NR_POSIX_wait (__NR_POSIX + 7)
666#define __NR_POSIX_creat (__NR_POSIX + 8)
667#define __NR_POSIX_link (__NR_POSIX + 9)
668#define __NR_POSIX_unlink (__NR_POSIX + 10)
669#define __NR_POSIX_exec (__NR_POSIX + 11)
670#define __NR_POSIX_chdir (__NR_POSIX + 12)
671#define __NR_POSIX_gtime (__NR_POSIX + 13)
672#define __NR_POSIX_mknod (__NR_POSIX + 14)
673#define __NR_POSIX_chmod (__NR_POSIX + 15)
674#define __NR_POSIX_chown (__NR_POSIX + 16)
675#define __NR_POSIX_sbreak (__NR_POSIX + 17)
676#define __NR_POSIX_stat (__NR_POSIX + 18)
677#define __NR_POSIX_lseek (__NR_POSIX + 19)
678#define __NR_POSIX_getpid (__NR_POSIX + 20)
679#define __NR_POSIX_mount (__NR_POSIX + 21)
680#define __NR_POSIX_umount (__NR_POSIX + 22)
681#define __NR_POSIX_setuid (__NR_POSIX + 23)
682#define __NR_POSIX_getuid (__NR_POSIX + 24)
683#define __NR_POSIX_stime (__NR_POSIX + 25)
684#define __NR_POSIX_ptrace (__NR_POSIX + 26)
685#define __NR_POSIX_alarm (__NR_POSIX + 27)
686#define __NR_POSIX_fstat (__NR_POSIX + 28)
687#define __NR_POSIX_pause (__NR_POSIX + 29)
688#define __NR_POSIX_utime (__NR_POSIX + 30)
689#define __NR_POSIX_stty (__NR_POSIX + 31)
690#define __NR_POSIX_gtty (__NR_POSIX + 32)
691#define __NR_POSIX_access (__NR_POSIX + 33)
692#define __NR_POSIX_nice (__NR_POSIX + 34)
693#define __NR_POSIX_statfs (__NR_POSIX + 35)
694#define __NR_POSIX_sync (__NR_POSIX + 36)
695#define __NR_POSIX_kill (__NR_POSIX + 37)
696#define __NR_POSIX_fstatfs (__NR_POSIX + 38)
697#define __NR_POSIX_getpgrp (__NR_POSIX + 39)
698#define __NR_POSIX_syssgi (__NR_POSIX + 40)
699#define __NR_POSIX_dup (__NR_POSIX + 41)
700#define __NR_POSIX_pipe (__NR_POSIX + 42)
701#define __NR_POSIX_times (__NR_POSIX + 43)
702#define __NR_POSIX_profil (__NR_POSIX + 44)
703#define __NR_POSIX_lock (__NR_POSIX + 45)
704#define __NR_POSIX_setgid (__NR_POSIX + 46)
705#define __NR_POSIX_getgid (__NR_POSIX + 47)
706#define __NR_POSIX_sig (__NR_POSIX + 48)
707#define __NR_POSIX_msgsys (__NR_POSIX + 49)
708#define __NR_POSIX_sysmips (__NR_POSIX + 50)
709#define __NR_POSIX_sysacct (__NR_POSIX + 51)
710#define __NR_POSIX_shmsys (__NR_POSIX + 52)
711#define __NR_POSIX_semsys (__NR_POSIX + 53)
712#define __NR_POSIX_ioctl (__NR_POSIX + 54)
713#define __NR_POSIX_uadmin (__NR_POSIX + 55)
714#define __NR_POSIX_exch (__NR_POSIX + 56)
715#define __NR_POSIX_utssys (__NR_POSIX + 57)
716#define __NR_POSIX_USG_reserved1 (__NR_POSIX + 58)
717#define __NR_POSIX_exece (__NR_POSIX + 59)
718#define __NR_POSIX_umask (__NR_POSIX + 60)
719#define __NR_POSIX_chroot (__NR_POSIX + 61)
720#define __NR_POSIX_fcntl (__NR_POSIX + 62)
721#define __NR_POSIX_ulimit (__NR_POSIX + 63)
722#define __NR_POSIX_SAFARI4_reserved1 (__NR_POSIX + 64)
723#define __NR_POSIX_SAFARI4_reserved2 (__NR_POSIX + 65)
724#define __NR_POSIX_SAFARI4_reserved3 (__NR_POSIX + 66)
725#define __NR_POSIX_SAFARI4_reserved4 (__NR_POSIX + 67)
726#define __NR_POSIX_SAFARI4_reserved5 (__NR_POSIX + 68)
727#define __NR_POSIX_SAFARI4_reserved6 (__NR_POSIX + 69)
728#define __NR_POSIX_advfs (__NR_POSIX + 70)
729#define __NR_POSIX_unadvfs (__NR_POSIX + 71)
730#define __NR_POSIX_rmount (__NR_POSIX + 72)
731#define __NR_POSIX_rumount (__NR_POSIX + 73)
732#define __NR_POSIX_rfstart (__NR_POSIX + 74)
733#define __NR_POSIX_reserved1 (__NR_POSIX + 75)
734#define __NR_POSIX_rdebug (__NR_POSIX + 76)
735#define __NR_POSIX_rfstop (__NR_POSIX + 77)
736#define __NR_POSIX_rfsys (__NR_POSIX + 78)
737#define __NR_POSIX_rmdir (__NR_POSIX + 79)
738#define __NR_POSIX_mkdir (__NR_POSIX + 80)
739#define __NR_POSIX_getdents (__NR_POSIX + 81)
740#define __NR_POSIX_sginap (__NR_POSIX + 82)
741#define __NR_POSIX_sgikopt (__NR_POSIX + 83)
742#define __NR_POSIX_sysfs (__NR_POSIX + 84)
743#define __NR_POSIX_getmsg (__NR_POSIX + 85)
744#define __NR_POSIX_putmsg (__NR_POSIX + 86)
745#define __NR_POSIX_poll (__NR_POSIX + 87)
746#define __NR_POSIX_sigreturn (__NR_POSIX + 88)
747#define __NR_POSIX_accept (__NR_POSIX + 89)
748#define __NR_POSIX_bind (__NR_POSIX + 90)
749#define __NR_POSIX_connect (__NR_POSIX + 91)
750#define __NR_POSIX_gethostid (__NR_POSIX + 92)
751#define __NR_POSIX_getpeername (__NR_POSIX + 93)
752#define __NR_POSIX_getsockname (__NR_POSIX + 94)
753#define __NR_POSIX_getsockopt (__NR_POSIX + 95)
754#define __NR_POSIX_listen (__NR_POSIX + 96)
755#define __NR_POSIX_recv (__NR_POSIX + 97)
756#define __NR_POSIX_recvfrom (__NR_POSIX + 98)
757#define __NR_POSIX_recvmsg (__NR_POSIX + 99)
758#define __NR_POSIX_select (__NR_POSIX + 100)
759#define __NR_POSIX_send (__NR_POSIX + 101)
760#define __NR_POSIX_sendmsg (__NR_POSIX + 102)
761#define __NR_POSIX_sendto (__NR_POSIX + 103)
762#define __NR_POSIX_sethostid (__NR_POSIX + 104)
763#define __NR_POSIX_setsockopt (__NR_POSIX + 105)
764#define __NR_POSIX_shutdown (__NR_POSIX + 106)
765#define __NR_POSIX_socket (__NR_POSIX + 107)
766#define __NR_POSIX_gethostname (__NR_POSIX + 108)
767#define __NR_POSIX_sethostname (__NR_POSIX + 109)
768#define __NR_POSIX_getdomainname (__NR_POSIX + 110)
769#define __NR_POSIX_setdomainname (__NR_POSIX + 111)
770#define __NR_POSIX_truncate (__NR_POSIX + 112)
771#define __NR_POSIX_ftruncate (__NR_POSIX + 113)
772#define __NR_POSIX_rename (__NR_POSIX + 114)
773#define __NR_POSIX_symlink (__NR_POSIX + 115)
774#define __NR_POSIX_readlink (__NR_POSIX + 116)
775#define __NR_POSIX_lstat (__NR_POSIX + 117)
776#define __NR_POSIX_nfs_mount (__NR_POSIX + 118)
777#define __NR_POSIX_nfs_svc (__NR_POSIX + 119)
778#define __NR_POSIX_nfs_getfh (__NR_POSIX + 120)
779#define __NR_POSIX_async_daemon (__NR_POSIX + 121)
780#define __NR_POSIX_exportfs (__NR_POSIX + 122)
781#define __NR_POSIX_SGI_setregid (__NR_POSIX + 123)
782#define __NR_POSIX_SGI_setreuid (__NR_POSIX + 124)
783#define __NR_POSIX_getitimer (__NR_POSIX + 125)
784#define __NR_POSIX_setitimer (__NR_POSIX + 126)
785#define __NR_POSIX_adjtime (__NR_POSIX + 127)
786#define __NR_POSIX_SGI_bsdgettime (__NR_POSIX + 128)
787#define __NR_POSIX_SGI_sproc (__NR_POSIX + 129)
788#define __NR_POSIX_SGI_prctl (__NR_POSIX + 130)
789#define __NR_POSIX_SGI_blkproc (__NR_POSIX + 131)
790#define __NR_POSIX_SGI_reserved1 (__NR_POSIX + 132)
791#define __NR_POSIX_SGI_sgigsc (__NR_POSIX + 133)
792#define __NR_POSIX_SGI_mmap (__NR_POSIX + 134)
793#define __NR_POSIX_SGI_munmap (__NR_POSIX + 135)
794#define __NR_POSIX_SGI_mprotect (__NR_POSIX + 136)
795#define __NR_POSIX_SGI_msync (__NR_POSIX + 137)
796#define __NR_POSIX_SGI_madvise (__NR_POSIX + 138)
797#define __NR_POSIX_SGI_mpin (__NR_POSIX + 139)
798#define __NR_POSIX_SGI_getpagesize (__NR_POSIX + 140)
799#define __NR_POSIX_SGI_libattach (__NR_POSIX + 141)
800#define __NR_POSIX_SGI_libdetach (__NR_POSIX + 142)
801#define __NR_POSIX_SGI_getpgrp (__NR_POSIX + 143)
802#define __NR_POSIX_SGI_setpgrp (__NR_POSIX + 144)
803#define __NR_POSIX_SGI_reserved2 (__NR_POSIX + 145)
804#define __NR_POSIX_SGI_reserved3 (__NR_POSIX + 146)
805#define __NR_POSIX_SGI_reserved4 (__NR_POSIX + 147)
806#define __NR_POSIX_SGI_reserved5 (__NR_POSIX + 148)
807#define __NR_POSIX_SGI_reserved6 (__NR_POSIX + 149)
808#define __NR_POSIX_cacheflush (__NR_POSIX + 150)
809#define __NR_POSIX_cachectl (__NR_POSIX + 151)
810#define __NR_POSIX_fchown (__NR_POSIX + 152)
811#define __NR_POSIX_fchmod (__NR_POSIX + 153)
812#define __NR_POSIX_wait3 (__NR_POSIX + 154)
813#define __NR_POSIX_mmap (__NR_POSIX + 155)
814#define __NR_POSIX_munmap (__NR_POSIX + 156)
815#define __NR_POSIX_madvise (__NR_POSIX + 157)
816#define __NR_POSIX_BSD_getpagesize (__NR_POSIX + 158)
817#define __NR_POSIX_setreuid (__NR_POSIX + 159)
818#define __NR_POSIX_setregid (__NR_POSIX + 160)
819#define __NR_POSIX_setpgid (__NR_POSIX + 161)
820#define __NR_POSIX_getgroups (__NR_POSIX + 162)
821#define __NR_POSIX_setgroups (__NR_POSIX + 163)
822#define __NR_POSIX_gettimeofday (__NR_POSIX + 164)
823#define __NR_POSIX_getrusage (__NR_POSIX + 165)
824#define __NR_POSIX_getrlimit (__NR_POSIX + 166)
825#define __NR_POSIX_setrlimit (__NR_POSIX + 167)
826#define __NR_POSIX_waitpid (__NR_POSIX + 168)
827#define __NR_POSIX_dup2 (__NR_POSIX + 169)
828#define __NR_POSIX_reserved2 (__NR_POSIX + 170)
829#define __NR_POSIX_reserved3 (__NR_POSIX + 171)
830#define __NR_POSIX_reserved4 (__NR_POSIX + 172)
831#define __NR_POSIX_reserved5 (__NR_POSIX + 173)
832#define __NR_POSIX_reserved6 (__NR_POSIX + 174)
833#define __NR_POSIX_reserved7 (__NR_POSIX + 175)
834#define __NR_POSIX_reserved8 (__NR_POSIX + 176)
835#define __NR_POSIX_reserved9 (__NR_POSIX + 177)
836#define __NR_POSIX_reserved10 (__NR_POSIX + 178)
837#define __NR_POSIX_reserved11 (__NR_POSIX + 179)
838#define __NR_POSIX_reserved12 (__NR_POSIX + 180)
839#define __NR_POSIX_reserved13 (__NR_POSIX + 181)
840#define __NR_POSIX_reserved14 (__NR_POSIX + 182)
841#define __NR_POSIX_reserved15 (__NR_POSIX + 183)
842#define __NR_POSIX_reserved16 (__NR_POSIX + 184)
843#define __NR_POSIX_reserved17 (__NR_POSIX + 185)
844#define __NR_POSIX_reserved18 (__NR_POSIX + 186)
845#define __NR_POSIX_reserved19 (__NR_POSIX + 187)
846#define __NR_POSIX_reserved20 (__NR_POSIX + 188)
847#define __NR_POSIX_reserved21 (__NR_POSIX + 189)
848#define __NR_POSIX_reserved22 (__NR_POSIX + 190)
849#define __NR_POSIX_reserved23 (__NR_POSIX + 191)
850#define __NR_POSIX_reserved24 (__NR_POSIX + 192)
851#define __NR_POSIX_reserved25 (__NR_POSIX + 193)
852#define __NR_POSIX_reserved26 (__NR_POSIX + 194)
853#define __NR_POSIX_reserved27 (__NR_POSIX + 195)
854#define __NR_POSIX_reserved28 (__NR_POSIX + 196)
855#define __NR_POSIX_reserved29 (__NR_POSIX + 197)
856#define __NR_POSIX_reserved30 (__NR_POSIX + 198)
857#define __NR_POSIX_reserved31 (__NR_POSIX + 199)
858#define __NR_POSIX_reserved32 (__NR_POSIX + 200)
859#define __NR_POSIX_reserved33 (__NR_POSIX + 201)
860#define __NR_POSIX_reserved34 (__NR_POSIX + 202)
861#define __NR_POSIX_reserved35 (__NR_POSIX + 203)
862#define __NR_POSIX_reserved36 (__NR_POSIX + 204)
863#define __NR_POSIX_reserved37 (__NR_POSIX + 205)
864#define __NR_POSIX_reserved38 (__NR_POSIX + 206)
865#define __NR_POSIX_reserved39 (__NR_POSIX + 207)
866#define __NR_POSIX_reserved40 (__NR_POSIX + 208)
867#define __NR_POSIX_reserved41 (__NR_POSIX + 209)
868#define __NR_POSIX_reserved42 (__NR_POSIX + 210)
869#define __NR_POSIX_reserved43 (__NR_POSIX + 211)
870#define __NR_POSIX_reserved44 (__NR_POSIX + 212)
871#define __NR_POSIX_reserved45 (__NR_POSIX + 213)
872#define __NR_POSIX_reserved46 (__NR_POSIX + 214)
873#define __NR_POSIX_reserved47 (__NR_POSIX + 215)
874#define __NR_POSIX_reserved48 (__NR_POSIX + 216)
875#define __NR_POSIX_reserved49 (__NR_POSIX + 217)
876#define __NR_POSIX_reserved50 (__NR_POSIX + 218)
877#define __NR_POSIX_reserved51 (__NR_POSIX + 219)
878#define __NR_POSIX_reserved52 (__NR_POSIX + 220)
879#define __NR_POSIX_reserved53 (__NR_POSIX + 221)
880#define __NR_POSIX_reserved54 (__NR_POSIX + 222)
881#define __NR_POSIX_reserved55 (__NR_POSIX + 223)
882#define __NR_POSIX_reserved56 (__NR_POSIX + 224)
883#define __NR_POSIX_reserved57 (__NR_POSIX + 225)
884#define __NR_POSIX_reserved58 (__NR_POSIX + 226)
885#define __NR_POSIX_reserved59 (__NR_POSIX + 227)
886#define __NR_POSIX_reserved60 (__NR_POSIX + 228)
887#define __NR_POSIX_reserved61 (__NR_POSIX + 229)
888#define __NR_POSIX_reserved62 (__NR_POSIX + 230)
889#define __NR_POSIX_reserved63 (__NR_POSIX + 231)
890#define __NR_POSIX_reserved64 (__NR_POSIX + 232)
891#define __NR_POSIX_reserved65 (__NR_POSIX + 233)
892#define __NR_POSIX_reserved66 (__NR_POSIX + 234)
893#define __NR_POSIX_reserved67 (__NR_POSIX + 235)
894#define __NR_POSIX_reserved68 (__NR_POSIX + 236)
895#define __NR_POSIX_reserved69 (__NR_POSIX + 237)
896#define __NR_POSIX_reserved70 (__NR_POSIX + 238)
897#define __NR_POSIX_reserved71 (__NR_POSIX + 239)
898#define __NR_POSIX_reserved72 (__NR_POSIX + 240)
899#define __NR_POSIX_reserved73 (__NR_POSIX + 241)
900#define __NR_POSIX_reserved74 (__NR_POSIX + 242)
901#define __NR_POSIX_reserved75 (__NR_POSIX + 243)
902#define __NR_POSIX_reserved76 (__NR_POSIX + 244)
903#define __NR_POSIX_reserved77 (__NR_POSIX + 245)
904#define __NR_POSIX_reserved78 (__NR_POSIX + 246)
905#define __NR_POSIX_reserved79 (__NR_POSIX + 247)
906#define __NR_POSIX_reserved80 (__NR_POSIX + 248)
907#define __NR_POSIX_reserved81 (__NR_POSIX + 249)
908#define __NR_POSIX_reserved82 (__NR_POSIX + 250)
909#define __NR_POSIX_reserved83 (__NR_POSIX + 251)
910#define __NR_POSIX_reserved84 (__NR_POSIX + 252)
911#define __NR_POSIX_reserved85 (__NR_POSIX + 253)
912#define __NR_POSIX_reserved86 (__NR_POSIX + 254)
913#define __NR_POSIX_reserved87 (__NR_POSIX + 255)
914#define __NR_POSIX_reserved88 (__NR_POSIX + 256)
915#define __NR_POSIX_reserved89 (__NR_POSIX + 257)
916#define __NR_POSIX_reserved90 (__NR_POSIX + 258)
917#define __NR_POSIX_reserved91 (__NR_POSIX + 259)
918#define __NR_POSIX_netboot (__NR_POSIX + 260)
919#define __NR_POSIX_netunboot (__NR_POSIX + 261)
920#define __NR_POSIX_rdump (__NR_POSIX + 262)
921#define __NR_POSIX_setsid (__NR_POSIX + 263)
922#define __NR_POSIX_getmaxsig (__NR_POSIX + 264)
923#define __NR_POSIX_sigpending (__NR_POSIX + 265)
924#define __NR_POSIX_sigprocmask (__NR_POSIX + 266)
925#define __NR_POSIX_sigsuspend (__NR_POSIX + 267)
926#define __NR_POSIX_sigaction (__NR_POSIX + 268)
927#define __NR_POSIX_MIPS_reserved1 (__NR_POSIX + 269)
928#define __NR_POSIX_MIPS_reserved2 (__NR_POSIX + 270)
929#define __NR_POSIX_MIPS_reserved3 (__NR_POSIX + 271)
930#define __NR_POSIX_MIPS_reserved4 (__NR_POSIX + 272)
931#define __NR_POSIX_MIPS_reserved5 (__NR_POSIX + 273)
932#define __NR_POSIX_MIPS_reserved6 (__NR_POSIX + 274)
933#define __NR_POSIX_MIPS_reserved7 (__NR_POSIX + 275)
934#define __NR_POSIX_MIPS_reserved8 (__NR_POSIX + 276)
935#define __NR_POSIX_MIPS_reserved9 (__NR_POSIX + 277)
936#define __NR_POSIX_MIPS_reserved10 (__NR_POSIX + 278)
937#define __NR_POSIX_MIPS_reserved11 (__NR_POSIX + 279)
938#define __NR_POSIX_TANDEM_reserved1 (__NR_POSIX + 280)
939#define __NR_POSIX_TANDEM_reserved2 (__NR_POSIX + 281)
940#define __NR_POSIX_TANDEM_reserved3 (__NR_POSIX + 282)
941#define __NR_POSIX_TANDEM_reserved4 (__NR_POSIX + 283)
942#define __NR_POSIX_TANDEM_reserved5 (__NR_POSIX + 284)
943#define __NR_POSIX_TANDEM_reserved6 (__NR_POSIX + 285)
944#define __NR_POSIX_TANDEM_reserved7 (__NR_POSIX + 286)
945#define __NR_POSIX_TANDEM_reserved8 (__NR_POSIX + 287)
946#define __NR_POSIX_TANDEM_reserved9 (__NR_POSIX + 288)
947#define __NR_POSIX_TANDEM_reserved10 (__NR_POSIX + 289)
948#define __NR_POSIX_TANDEM_reserved11 (__NR_POSIX + 290)
949#define __NR_POSIX_TANDEM_reserved12 (__NR_POSIX + 291)
950#define __NR_POSIX_TANDEM_reserved13 (__NR_POSIX + 292)
951#define __NR_POSIX_TANDEM_reserved14 (__NR_POSIX + 293)
952#define __NR_POSIX_TANDEM_reserved15 (__NR_POSIX + 294)
953#define __NR_POSIX_TANDEM_reserved16 (__NR_POSIX + 295)
954#define __NR_POSIX_TANDEM_reserved17 (__NR_POSIX + 296)
955#define __NR_POSIX_TANDEM_reserved18 (__NR_POSIX + 297)
956#define __NR_POSIX_TANDEM_reserved19 (__NR_POSIX + 298)
957#define __NR_POSIX_TANDEM_reserved20 (__NR_POSIX + 299)
958#define __NR_POSIX_SGI_reserved7 (__NR_POSIX + 300)
959#define __NR_POSIX_SGI_reserved8 (__NR_POSIX + 301)
960#define __NR_POSIX_SGI_reserved9 (__NR_POSIX + 302)
961#define __NR_POSIX_SGI_reserved10 (__NR_POSIX + 303)
962#define __NR_POSIX_SGI_reserved11 (__NR_POSIX + 304)
963#define __NR_POSIX_SGI_reserved12 (__NR_POSIX + 305)
964#define __NR_POSIX_SGI_reserved13 (__NR_POSIX + 306)
965#define __NR_POSIX_SGI_reserved14 (__NR_POSIX + 307)
966#define __NR_POSIX_SGI_reserved15 (__NR_POSIX + 308)
967#define __NR_POSIX_SGI_reserved16 (__NR_POSIX + 309)
968#define __NR_POSIX_SGI_reserved17 (__NR_POSIX + 310)
969#define __NR_POSIX_SGI_reserved18 (__NR_POSIX + 311)
970#define __NR_POSIX_SGI_reserved19 (__NR_POSIX + 312)
971#define __NR_POSIX_SGI_reserved20 (__NR_POSIX + 313)
972#define __NR_POSIX_SGI_reserved21 (__NR_POSIX + 314)
973#define __NR_POSIX_SGI_reserved22 (__NR_POSIX + 315)
974#define __NR_POSIX_SGI_reserved23 (__NR_POSIX + 316)
975#define __NR_POSIX_SGI_reserved24 (__NR_POSIX + 317)
976#define __NR_POSIX_SGI_reserved25 (__NR_POSIX + 318)
977#define __NR_POSIX_SGI_reserved26 (__NR_POSIX + 319)
978
979#endif /* _ASM_RISCOS_SYSCALL_H */
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 983e9a2b6042..64ebd086c40d 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -216,4 +216,5 @@ static __inline__ int atomic_read(const atomic_t *v)
216#define smp_mb__before_atomic_inc() smp_mb() 216#define smp_mb__before_atomic_inc() smp_mb()
217#define smp_mb__after_atomic_inc() smp_mb() 217#define smp_mb__after_atomic_inc() smp_mb()
218 218
219#include <asm-generic/atomic.h>
219#endif 220#endif
diff --git a/include/asm-parisc/mman.h b/include/asm-parisc/mman.h
index e829607eb8bc..736b0abcac05 100644
--- a/include/asm-parisc/mman.h
+++ b/include/asm-parisc/mman.h
@@ -38,6 +38,7 @@
38#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */ 38#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
39#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */ 39#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
40#define MADV_VPS_INHERIT 7 /* Inherit parents page size */ 40#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
41#define MADV_REMOVE 8 /* remove these pages & resources */
41 42
42/* The range 12-64 is reserved for page size specification. */ 43/* The range 12-64 is reserved for page size specification. */
43#define MADV_4K_PAGES 12 /* Use 4K pages */ 44#define MADV_4K_PAGES 12 /* Use 4K pages */
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index ec4b14468959..ae395a0632a6 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -402,5 +402,6 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
402 402
403#endif /* __powerpc64__ */ 403#endif /* __powerpc64__ */
404 404
405#include <asm-generic/atomic.h>
405#endif /* __KERNEL__ */ 406#endif /* __KERNEL__ */
406#endif /* _ASM_POWERPC_ATOMIC_H_ */ 407#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h
index f5e5342fcac5..a2e34c21b44f 100644
--- a/include/asm-powerpc/mman.h
+++ b/include/asm-powerpc/mman.h
@@ -44,6 +44,7 @@
44#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 44#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
45#define MADV_WILLNEED 0x3 /* pre-fault pages */ 45#define MADV_WILLNEED 0x3 /* pre-fault pages */
46#define MADV_DONTNEED 0x4 /* discard these pages */ 46#define MADV_DONTNEED 0x4 /* discard these pages */
47#define MADV_REMOVE 0x5 /* remove these pages & resources */
47 48
48/* compatibility flags */ 49/* compatibility flags */
49#define MAP_ANON MAP_ANONYMOUS 50#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-ppc/ibm_ocp.h b/include/asm-ppc/ibm_ocp.h
index 9c21de1ff4ed..ddce616f765a 100644
--- a/include/asm-ppc/ibm_ocp.h
+++ b/include/asm-ppc/ibm_ocp.h
@@ -63,7 +63,6 @@ struct ocp_func_emac_data {
63 int wol_irq; /* WOL interrupt */ 63 int wol_irq; /* WOL interrupt */
64 int mdio_idx; /* EMAC idx of MDIO master or -1 */ 64 int mdio_idx; /* EMAC idx of MDIO master or -1 */
65 int tah_idx; /* TAH device index or -1 */ 65 int tah_idx; /* TAH device index or -1 */
66 int jumbo; /* Jumbo frames capable flag */
67 int phy_mode; /* PHY type or configurable mode */ 66 int phy_mode; /* PHY type or configurable mode */
68 u8 mac_addr[6]; /* EMAC mac address */ 67 u8 mac_addr[6]; /* EMAC mac address */
69 u32 phy_map; /* EMAC phy map */ 68 u32 phy_map; /* EMAC phy map */
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 84ac6e258eef..df9cf6ed189d 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -27,6 +27,8 @@
27 27
28#if defined(CONFIG_4xx) 28#if defined(CONFIG_4xx)
29#include <asm/ibm4xx.h> 29#include <asm/ibm4xx.h>
30#elif defined(CONFIG_PPC_MPC52xx)
31#include <asm/mpc52xx.h>
30#elif defined(CONFIG_8xx) 32#elif defined(CONFIG_8xx)
31#include <asm/mpc8xx.h> 33#include <asm/mpc8xx.h>
32#elif defined(CONFIG_8260) 34#elif defined(CONFIG_8260)
diff --git a/include/asm-ppc/mpc52xx.h b/include/asm-ppc/mpc52xx.h
index e5f80c22fbfc..a055e0756b9d 100644
--- a/include/asm-ppc/mpc52xx.h
+++ b/include/asm-ppc/mpc52xx.h
@@ -29,6 +29,17 @@ struct pt_regs;
29#endif /* __ASSEMBLY__ */ 29#endif /* __ASSEMBLY__ */
30 30
31 31
32#ifdef CONFIG_PCI
33#define _IO_BASE isa_io_base
34#define _ISA_MEM_BASE isa_mem_base
35#define PCI_DRAM_OFFSET pci_dram_offset
36#else
37#define _IO_BASE 0
38#define _ISA_MEM_BASE 0
39#define PCI_DRAM_OFFSET 0
40#endif
41
42
32/* ======================================================================== */ 43/* ======================================================================== */
33/* PPC Sys devices definition */ 44/* PPC Sys devices definition */
34/* ======================================================================== */ 45/* ======================================================================== */
@@ -107,7 +118,7 @@ enum ppc_sys_devices {
107#define MPC52xx_SDMA_IRQ_NUM 17 118#define MPC52xx_SDMA_IRQ_NUM 17
108#define MPC52xx_PERP_IRQ_NUM 23 119#define MPC52xx_PERP_IRQ_NUM 23
109 120
110#define MPC52xx_CRIT_IRQ_BASE 0 121#define MPC52xx_CRIT_IRQ_BASE 1
111#define MPC52xx_MAIN_IRQ_BASE (MPC52xx_CRIT_IRQ_BASE + MPC52xx_CRIT_IRQ_NUM) 122#define MPC52xx_MAIN_IRQ_BASE (MPC52xx_CRIT_IRQ_BASE + MPC52xx_CRIT_IRQ_NUM)
112#define MPC52xx_SDMA_IRQ_BASE (MPC52xx_MAIN_IRQ_BASE + MPC52xx_MAIN_IRQ_NUM) 123#define MPC52xx_SDMA_IRQ_BASE (MPC52xx_MAIN_IRQ_BASE + MPC52xx_MAIN_IRQ_NUM)
113#define MPC52xx_PERP_IRQ_BASE (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM) 124#define MPC52xx_PERP_IRQ_BASE (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM)
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index b3bd4f679f72..d82aedf616fe 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -5,7 +5,7 @@
5 * include/asm-s390/atomic.h 5 * include/asm-s390/atomic.h
6 * 6 *
7 * S390 version 7 * S390 version
8 * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 8 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
10 * Denis Joseph Barrow, 10 * Denis Joseph Barrow,
11 * Arnd Bergmann (arndb@de.ibm.com) 11 * Arnd Bergmann (arndb@de.ibm.com)
@@ -45,59 +45,57 @@ typedef struct {
45#define atomic_read(v) ((v)->counter) 45#define atomic_read(v) ((v)->counter)
46#define atomic_set(v,i) (((v)->counter) = (i)) 46#define atomic_set(v,i) (((v)->counter) = (i))
47 47
48static __inline__ void atomic_add(int i, atomic_t * v)
49{
50 __CS_LOOP(v, i, "ar");
51}
52static __inline__ int atomic_add_return(int i, atomic_t * v) 48static __inline__ int atomic_add_return(int i, atomic_t * v)
53{ 49{
54 return __CS_LOOP(v, i, "ar"); 50 return __CS_LOOP(v, i, "ar");
55} 51}
56static __inline__ int atomic_add_negative(int i, atomic_t * v) 52#define atomic_add(_i, _v) atomic_add_return(_i, _v)
57{ 53#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
58 return __CS_LOOP(v, i, "ar") < 0; 54#define atomic_inc(_v) atomic_add_return(1, _v)
59} 55#define atomic_inc_return(_v) atomic_add_return(1, _v)
60static __inline__ void atomic_sub(int i, atomic_t * v) 56#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
61{ 57
62 __CS_LOOP(v, i, "sr");
63}
64static __inline__ int atomic_sub_return(int i, atomic_t * v) 58static __inline__ int atomic_sub_return(int i, atomic_t * v)
65{ 59{
66 return __CS_LOOP(v, i, "sr"); 60 return __CS_LOOP(v, i, "sr");
67} 61}
68static __inline__ void atomic_inc(volatile atomic_t * v) 62#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
69{ 63#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
70 __CS_LOOP(v, 1, "ar"); 64#define atomic_dec(_v) atomic_sub_return(1, _v)
71} 65#define atomic_dec_return(_v) atomic_sub_return(1, _v)
72static __inline__ int atomic_inc_return(volatile atomic_t * v) 66#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
73{
74 return __CS_LOOP(v, 1, "ar");
75}
76 67
77static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
78{
79 return __CS_LOOP(v, 1, "ar") == 0;
80}
81static __inline__ void atomic_dec(volatile atomic_t * v)
82{
83 __CS_LOOP(v, 1, "sr");
84}
85static __inline__ int atomic_dec_return(volatile atomic_t * v)
86{
87 return __CS_LOOP(v, 1, "sr");
88}
89static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
90{
91 return __CS_LOOP(v, 1, "sr") == 0;
92}
93static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) 68static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
94{ 69{
95 __CS_LOOP(v, ~mask, "nr"); 70 __CS_LOOP(v, ~mask, "nr");
96} 71}
72
97static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) 73static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
98{ 74{
99 __CS_LOOP(v, mask, "or"); 75 __CS_LOOP(v, mask, "or");
100} 76}
77
78static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
79{
80 __asm__ __volatile__(" cs %0,%3,0(%2)\n"
81 : "+d" (old), "=m" (v->counter)
82 : "a" (v), "d" (new), "m" (v->counter)
83 : "cc", "memory" );
84 return old;
85}
86
87static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
88{
89 int c, old;
90
91 c = atomic_read(v);
92 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
93 c = old;
94 return c != u;
95}
96
97#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
98
101#undef __CS_LOOP 99#undef __CS_LOOP
102 100
103#ifdef __s390x__ 101#ifdef __s390x__
@@ -123,97 +121,67 @@ typedef struct {
123#define atomic64_read(v) ((v)->counter) 121#define atomic64_read(v) ((v)->counter)
124#define atomic64_set(v,i) (((v)->counter) = (i)) 122#define atomic64_set(v,i) (((v)->counter) = (i))
125 123
126static __inline__ void atomic64_add(long long i, atomic64_t * v)
127{
128 __CSG_LOOP(v, i, "agr");
129}
130static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) 124static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
131{ 125{
132 return __CSG_LOOP(v, i, "agr"); 126 return __CSG_LOOP(v, i, "agr");
133} 127}
134static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) 128#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
135{ 129#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
136 return __CSG_LOOP(v, i, "agr") < 0; 130#define atomic64_inc(_v) atomic64_add_return(1, _v)
137} 131#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
138static __inline__ void atomic64_sub(long long i, atomic64_t * v) 132#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
139{ 133
140 __CSG_LOOP(v, i, "sgr"); 134static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
141}
142static __inline__ void atomic64_inc(volatile atomic64_t * v)
143{
144 __CSG_LOOP(v, 1, "agr");
145}
146static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
147{
148 return __CSG_LOOP(v, 1, "agr");
149}
150static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
151{
152 return __CSG_LOOP(v, 1, "agr") == 0;
153}
154static __inline__ void atomic64_dec(volatile atomic64_t * v)
155{
156 __CSG_LOOP(v, 1, "sgr");
157}
158static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
159{
160 return __CSG_LOOP(v, 1, "sgr");
161}
162static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
163{ 135{
164 return __CSG_LOOP(v, 1, "sgr") == 0; 136 return __CSG_LOOP(v, i, "sgr");
165} 137}
138#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
139#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
140#define atomic64_dec(_v) atomic64_sub_return(1, _v)
141#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
142#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
143
166static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) 144static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
167{ 145{
168 __CSG_LOOP(v, ~mask, "ngr"); 146 __CSG_LOOP(v, ~mask, "ngr");
169} 147}
148
170static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) 149static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
171{ 150{
172 __CSG_LOOP(v, mask, "ogr"); 151 __CSG_LOOP(v, mask, "ogr");
173} 152}
174 153
175#undef __CSG_LOOP 154static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
176#endif 155 long long old, long long new)
177 156{
178/* 157 __asm__ __volatile__(" csg %0,%3,0(%2)\n"
179 returns 0 if expected_oldval==value in *v ( swap was successful ) 158 : "+d" (old), "=m" (v->counter)
180 returns 1 if unsuccessful. 159 : "a" (v), "d" (new), "m" (v->counter)
160 : "cc", "memory" );
161 return old;
162}
181 163
182 This is non-portable, use bitops or spinlocks instead! 164static __inline__ int atomic64_add_unless(atomic64_t *v,
183*/ 165 long long a, long long u)
184static __inline__ int
185atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
186{ 166{
187 int retval; 167 long long c, old;
188 168
189 __asm__ __volatile__( 169 c = atomic64_read(v);
190 " lr %0,%3\n" 170 while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
191 " cs %0,%4,0(%2)\n" 171 c = old;
192 " ipm %0\n" 172 return c != u;
193 " srl %0,28\n"
194 "0:"
195 : "=&d" (retval), "=m" (v->counter)
196 : "a" (v), "d" (expected_oldval) , "d" (new_val),
197 "m" (v->counter) : "cc", "memory" );
198 return retval;
199} 173}
200 174
201#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) 175#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
202 176
203#define atomic_add_unless(v, a, u) \ 177#undef __CSG_LOOP
204({ \ 178#endif
205 int c, old; \
206 c = atomic_read(v); \
207 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
208 c = old; \
209 c != (u); \
210})
211#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
212 179
213#define smp_mb__before_atomic_dec() smp_mb() 180#define smp_mb__before_atomic_dec() smp_mb()
214#define smp_mb__after_atomic_dec() smp_mb() 181#define smp_mb__after_atomic_dec() smp_mb()
215#define smp_mb__before_atomic_inc() smp_mb() 182#define smp_mb__before_atomic_inc() smp_mb()
216#define smp_mb__after_atomic_inc() smp_mb() 183#define smp_mb__after_atomic_inc() smp_mb()
217 184
185#include <asm-generic/atomic.h>
218#endif /* __KERNEL__ */ 186#endif /* __KERNEL__ */
219#endif /* __ARCH_S390_ATOMIC__ */ 187#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h
index 3eb231af5d51..12456cb2f882 100644
--- a/include/asm-s390/ccwdev.h
+++ b/include/asm-s390/ccwdev.h
@@ -185,8 +185,5 @@ extern struct ccw_device *ccw_device_probe_console(void);
185extern int _ccw_device_get_device_number(struct ccw_device *); 185extern int _ccw_device_get_device_number(struct ccw_device *);
186extern int _ccw_device_get_subchannel_number(struct ccw_device *); 186extern int _ccw_device_get_subchannel_number(struct ccw_device *);
187 187
188extern struct device *s390_root_dev_register(const char *);
189extern void s390_root_dev_unregister(struct device *);
190
191extern void *ccw_device_get_chp_desc(struct ccw_device *, int); 188extern void *ccw_device_get_chp_desc(struct ccw_device *, int);
192#endif /* _S390_CCWDEV_H_ */ 189#endif /* _S390_CCWDEV_H_ */
diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h
index ea86bd12204f..c8d5409b5d56 100644
--- a/include/asm-s390/mman.h
+++ b/include/asm-s390/mman.h
@@ -43,6 +43,7 @@
43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 43#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
44#define MADV_WILLNEED 0x3 /* pre-fault pages */ 44#define MADV_WILLNEED 0x3 /* pre-fault pages */
45#define MADV_DONTNEED 0x4 /* discard these pages */ 45#define MADV_DONTNEED 0x4 /* discard these pages */
46#define MADV_REMOVE 0x5 /* remove these pages & resources */
46 47
47/* compatibility flags */ 48/* compatibility flags */
48#define MAP_ANON MAP_ANONYMOUS 49#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h
index 0ddf0a8ef8de..7bc15f0231db 100644
--- a/include/asm-s390/qdio.h
+++ b/include/asm-s390/qdio.h
@@ -195,12 +195,14 @@ struct qdr {
195/* 195/*
196 * queue information block (QIB) 196 * queue information block (QIB)
197 */ 197 */
198#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80 198#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
199#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 199#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
200#define QIB_RFLAGS_ENABLE_QEBSM 0x80
201
200struct qib { 202struct qib {
201 unsigned int qfmt : 8; /* queue format */ 203 unsigned int qfmt : 8; /* queue format */
202 unsigned int pfmt : 8; /* impl. dep. parameter format */ 204 unsigned int pfmt : 8; /* impl. dep. parameter format */
203 unsigned int res1 : 8; /* reserved */ 205 unsigned int rflags : 8; /* QEBSM */
204 unsigned int ac : 8; /* adapter characteristics */ 206 unsigned int ac : 8; /* adapter characteristics */
205 unsigned int res2; /* reserved */ 207 unsigned int res2; /* reserved */
206#ifdef QDIO_32_BIT 208#ifdef QDIO_32_BIT
diff --git a/include/asm-s390/s390_rdev.h b/include/asm-s390/s390_rdev.h
new file mode 100644
index 000000000000..3ad78f2b9c48
--- /dev/null
+++ b/include/asm-s390/s390_rdev.h
@@ -0,0 +1,15 @@
1/*
2 * include/asm-s390/ccwdev.h
3 *
4 * Copyright (C) 2002,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Cornelia Huck <cohuck@de.ibm.com>
6 * Carsten Otte <cotte@de.ibm.com>
7 *
8 * Interface for s390 root device
9 */
10
11#ifndef _S390_RDEV_H_
12#define _S390_RDEV_H_
13extern struct device *s390_root_dev_register(const char *);
14extern void s390_root_dev_unregister(struct device *);
15#endif /* _S390_RDEV_H_ */
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 10a619da4761..be104f21c70a 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -61,8 +61,10 @@
61#define segment_eq(a,b) ((a).ar4 == (b).ar4) 61#define segment_eq(a,b) ((a).ar4 == (b).ar4)
62 62
63 63
64#define __access_ok(addr,size) (1) 64static inline int __access_ok(const void *addr, unsigned long size)
65 65{
66 return 1;
67}
66#define access_ok(type,addr,size) __access_ok(addr,size) 68#define access_ok(type,addr,size) __access_ok(addr,size)
67 69
68/* 70/*
@@ -206,25 +208,25 @@ extern int __put_user_bad(void) __attribute__((noreturn));
206 case 1: { \ 208 case 1: { \
207 unsigned char __x; \ 209 unsigned char __x; \
208 __get_user_asm(__x, ptr, __gu_err); \ 210 __get_user_asm(__x, ptr, __gu_err); \
209 (x) = (__typeof__(*(ptr))) __x; \ 211 (x) = *(__typeof__(*(ptr)) *) &__x; \
210 break; \ 212 break; \
211 }; \ 213 }; \
212 case 2: { \ 214 case 2: { \
213 unsigned short __x; \ 215 unsigned short __x; \
214 __get_user_asm(__x, ptr, __gu_err); \ 216 __get_user_asm(__x, ptr, __gu_err); \
215 (x) = (__typeof__(*(ptr))) __x; \ 217 (x) = *(__typeof__(*(ptr)) *) &__x; \
216 break; \ 218 break; \
217 }; \ 219 }; \
218 case 4: { \ 220 case 4: { \
219 unsigned int __x; \ 221 unsigned int __x; \
220 __get_user_asm(__x, ptr, __gu_err); \ 222 __get_user_asm(__x, ptr, __gu_err); \
221 (x) = (__typeof__(*(ptr))) __x; \ 223 (x) = *(__typeof__(*(ptr)) *) &__x; \
222 break; \ 224 break; \
223 }; \ 225 }; \
224 case 8: { \ 226 case 8: { \
225 unsigned long long __x; \ 227 unsigned long long __x; \
226 __get_user_asm(__x, ptr, __gu_err); \ 228 __get_user_asm(__x, ptr, __gu_err); \
227 (x) = (__typeof__(*(ptr))) __x; \ 229 (x) = *(__typeof__(*(ptr)) *) &__x; \
228 break; \ 230 break; \
229 }; \ 231 }; \
230 default: \ 232 default: \
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index f97d92691f17..2861cdc243ad 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -539,7 +539,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
539#define __ARCH_WANT_SYS_SIGPENDING 539#define __ARCH_WANT_SYS_SIGPENDING
540#define __ARCH_WANT_SYS_SIGPROCMASK 540#define __ARCH_WANT_SYS_SIGPROCMASK
541#define __ARCH_WANT_SYS_RT_SIGACTION 541#define __ARCH_WANT_SYS_RT_SIGACTION
542# ifdef CONFIG_ARCH_S390_31 542# ifndef CONFIG_64BIT
543# define __ARCH_WANT_STAT64 543# define __ARCH_WANT_STAT64
544# define __ARCH_WANT_SYS_TIME 544# define __ARCH_WANT_SYS_TIME
545# endif 545# endif
diff --git a/include/asm-s390/vtoc.h b/include/asm-s390/vtoc.h
index 41d369f38b0e..d1de5b7ebb0b 100644
--- a/include/asm-s390/vtoc.h
+++ b/include/asm-s390/vtoc.h
@@ -176,4 +176,28 @@ struct vtoc_format7_label
176 struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */ 176 struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */
177} __attribute__ ((packed)); 177} __attribute__ ((packed));
178 178
179struct vtoc_cms_label {
180 u8 label_id[4]; /* Label identifier */
181 u8 vol_id[6]; /* Volid */
182 u16 version_id; /* Version identifier */
183 u32 block_size; /* Disk block size */
184 u32 origin_ptr; /* Disk origin pointer */
185 u32 usable_count; /* Number of usable cylinders/blocks */
186 u32 formatted_count; /* Maximum number of formatted cylinders/
187 * blocks */
188 u32 block_count; /* Disk size in CMS blocks */
189 u32 used_count; /* Number of CMS blocks in use */
190 u32 fst_size; /* File Status Table (FST) size */
191 u32 fst_count; /* Number of FSTs per CMS block */
192 u8 format_date[6]; /* Disk FORMAT date */
193 u8 reserved1[2];
194 u32 disk_offset; /* Disk offset when reserved*/
195 u32 map_block; /* Allocation Map Block with next hole */
196 u32 hblk_disp; /* Displacement into HBLK data of next hole */
197 u32 user_disp; /* Displacement into user part of Allocation
198 * map */
199 u8 reserved2[4];
200 u8 segment_name[8]; /* Name of shared segment */
201} __attribute__ ((packed));
202
179#endif /* _ASM_S390_VTOC_H */ 203#endif /* _ASM_S390_VTOC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index aabfd334462c..618d8e0de348 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -140,4 +140,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
140#define smp_mb__before_atomic_inc() barrier() 140#define smp_mb__before_atomic_inc() barrier()
141#define smp_mb__after_atomic_inc() barrier() 141#define smp_mb__after_atomic_inc() barrier()
142 142
143#include <asm-generic/atomic.h>
143#endif /* __ASM_SH_ATOMIC_H */ 144#endif /* __ASM_SH_ATOMIC_H */
diff --git a/include/asm-sh/mman.h b/include/asm-sh/mman.h
index 3ebab5f79db7..693bd55a3710 100644
--- a/include/asm-sh/mman.h
+++ b/include/asm-sh/mman.h
@@ -35,6 +35,7 @@
35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 35#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
36#define MADV_WILLNEED 0x3 /* pre-fault pages */ 36#define MADV_WILLNEED 0x3 /* pre-fault pages */
37#define MADV_DONTNEED 0x4 /* discard these pages */ 37#define MADV_DONTNEED 0x4 /* discard these pages */
38#define MADV_REMOVE 0x5 /* remove these pages & resources */
38 39
39/* compatibility flags */ 40/* compatibility flags */
40#define MAP_ANON MAP_ANONYMOUS 41#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index 927a2bc27b30..f3ce5c0df13a 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -152,4 +152,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
152#define smp_mb__before_atomic_inc() barrier() 152#define smp_mb__before_atomic_inc() barrier()
153#define smp_mb__after_atomic_inc() barrier() 153#define smp_mb__after_atomic_inc() barrier()
154 154
155#include <asm-generic/atomic.h>
155#endif /* __ASM_SH64_ATOMIC_H */ 156#endif /* __ASM_SH64_ATOMIC_H */
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 62bec7ad271c..accb4967e9d2 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -159,4 +159,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v)
159 159
160#endif /* !(__KERNEL__) */ 160#endif /* !(__KERNEL__) */
161 161
162#include <asm-generic/atomic.h>
162#endif /* !(__ARCH_SPARC_ATOMIC__) */ 163#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/include/asm-sparc/mman.h b/include/asm-sparc/mman.h
index 138eb81dd70d..98435ad8619e 100644
--- a/include/asm-sparc/mman.h
+++ b/include/asm-sparc/mman.h
@@ -54,6 +54,7 @@
54#define MADV_WILLNEED 0x3 /* pre-fault pages */ 54#define MADV_WILLNEED 0x3 /* pre-fault pages */
55#define MADV_DONTNEED 0x4 /* discard these pages */ 55#define MADV_DONTNEED 0x4 /* discard these pages */
56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */ 56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
57#define MADV_REMOVE 0x6 /* remove these pages & resources */
57 58
58/* compatibility flags */ 59/* compatibility flags */
59#define MAP_ANON MAP_ANONYMOUS 60#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 3789fe315992..11f5aa5d108c 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -96,4 +96,5 @@ extern int atomic64_sub_ret(int, atomic64_t *);
96#define smp_mb__after_atomic_inc() barrier() 96#define smp_mb__after_atomic_inc() barrier()
97#endif 97#endif
98 98
99#include <asm-generic/atomic.h>
99#endif /* !(__ARCH_SPARC64_ATOMIC__) */ 100#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h
index 01cecf54357b..cb4b6156194d 100644
--- a/include/asm-sparc64/mman.h
+++ b/include/asm-sparc64/mman.h
@@ -54,6 +54,7 @@
54#define MADV_WILLNEED 0x3 /* pre-fault pages */ 54#define MADV_WILLNEED 0x3 /* pre-fault pages */
55#define MADV_DONTNEED 0x4 /* discard these pages */ 55#define MADV_DONTNEED 0x4 /* discard these pages */
56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */ 56#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
57#define MADV_REMOVE 0x6 /* remove these pages & resources */
57 58
58/* compatibility flags */ 59/* compatibility flags */
59#define MAP_ANON MAP_ANONYMOUS 60#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index bede3172ce7f..f5b9ab6f4e70 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -126,4 +126,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
126#define smp_mb__before_atomic_inc() barrier() 126#define smp_mb__before_atomic_inc() barrier()
127#define smp_mb__after_atomic_inc() barrier() 127#define smp_mb__after_atomic_inc() barrier()
128 128
129#include <asm-generic/atomic.h>
129#endif /* __V850_ATOMIC_H__ */ 130#endif /* __V850_ATOMIC_H__ */
diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h
index e2b90081b56f..edc79965193a 100644
--- a/include/asm-v850/mman.h
+++ b/include/asm-v850/mman.h
@@ -32,6 +32,7 @@
32#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 32#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
33#define MADV_WILLNEED 0x3 /* pre-fault pages */ 33#define MADV_WILLNEED 0x3 /* pre-fault pages */
34#define MADV_DONTNEED 0x4 /* discard these pages */ 34#define MADV_DONTNEED 0x4 /* discard these pages */
35#define MADV_REMOVE 0x5 /* remove these pages & resources */
35 36
36/* compatibility flags */ 37/* compatibility flags */
37#define MAP_ANON MAP_ANONYMOUS 38#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 50db9f39274f..72eb071488c7 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -424,4 +424,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
424#define smp_mb__before_atomic_inc() barrier() 424#define smp_mb__before_atomic_inc() barrier()
425#define smp_mb__after_atomic_inc() barrier() 425#define smp_mb__after_atomic_inc() barrier()
426 426
427#include <asm-generic/atomic.h>
427#endif 428#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
index b3189fb229d1..d32f7f58752a 100644
--- a/include/asm-x86_64/cacheflush.h
+++ b/include/asm-x86_64/cacheflush.h
@@ -27,4 +27,8 @@ void global_flush_tlb(void);
27int change_page_attr(struct page *page, int numpages, pgprot_t prot); 27int change_page_attr(struct page *page, int numpages, pgprot_t prot);
28int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); 28int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
29 29
30#ifdef CONFIG_DEBUG_RODATA
31void mark_rodata_ro(void);
32#endif
33
30#endif /* _X8664_CACHEFLUSH_H */ 34#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
index 78e60a4fd4ee..d0e97b74f735 100644
--- a/include/asm-x86_64/mman.h
+++ b/include/asm-x86_64/mman.h
@@ -36,6 +36,7 @@
36#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 36#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
37#define MADV_WILLNEED 0x3 /* pre-fault pages */ 37#define MADV_WILLNEED 0x3 /* pre-fault pages */
38#define MADV_DONTNEED 0x4 /* discard these pages */ 38#define MADV_DONTNEED 0x4 /* discard these pages */
39#define MADV_REMOVE 0x5 /* remove these pages & resources */
39 40
40/* compatibility flags */ 41/* compatibility flags */
41#define MAP_ANON MAP_ANONYMOUS 42#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index 6f8a17d105ab..10248a9a0582 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -76,7 +76,7 @@ struct mpc_config_bus
76{ 76{
77 unsigned char mpc_type; 77 unsigned char mpc_type;
78 unsigned char mpc_busid; 78 unsigned char mpc_busid;
79 unsigned char mpc_bustype[6] __attribute((packed)); 79 unsigned char mpc_bustype[6];
80}; 80};
81 81
82/* List of Bus Type string values, Intel MP Spec. */ 82/* List of Bus Type string values, Intel MP Spec. */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index ecf58c7c1650..02888d7a496f 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
122 122
123#define pte_same(a, b) ((a).pte == (b).pte) 123#define pte_same(a, b) ((a).pte == (b).pte)
124 124
125#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
126
125#define PMD_SIZE (1UL << PMD_SHIFT) 127#define PMD_SIZE (1UL << PMD_SHIFT)
126#define PMD_MASK (~(PMD_SIZE-1)) 128#define PMD_MASK (~(PMD_SIZE-1))
127#define PUD_SIZE (1UL << PUD_SHIFT) 129#define PUD_SIZE (1UL << PUD_SHIFT)
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 85348e02ad2e..b34cc2ee222b 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -315,6 +315,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
315#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") 315#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
316/* used in the idle loop; sti takes one instruction cycle to complete */ 316/* used in the idle loop; sti takes one instruction cycle to complete */
317#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 317#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
318/* used when interrupts are already enabled or to shutdown the processor */
319#define halt() __asm__ __volatile__("hlt": : :"memory")
318 320
319#define irqs_disabled() \ 321#define irqs_disabled() \
320({ \ 322({ \
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 3670cc7695da..e2ce06b101ad 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -286,6 +286,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
286#define smp_mb__before_atomic_inc() barrier() 286#define smp_mb__before_atomic_inc() barrier()
287#define smp_mb__after_atomic_inc() barrier() 287#define smp_mb__after_atomic_inc() barrier()
288 288
289#include <asm-generic/atomic.h>
289#endif /* __KERNEL__ */ 290#endif /* __KERNEL__ */
290 291
291#endif /* _XTENSA_ATOMIC_H */ 292#endif /* _XTENSA_ATOMIC_H */
diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h
index 9a95a45df996..082a7504925e 100644
--- a/include/asm-xtensa/mman.h
+++ b/include/asm-xtensa/mman.h
@@ -72,6 +72,7 @@
72#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ 72#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
73#define MADV_WILLNEED 0x3 /* pre-fault pages */ 73#define MADV_WILLNEED 0x3 /* pre-fault pages */
74#define MADV_DONTNEED 0x4 /* discard these pages */ 74#define MADV_DONTNEED 0x4 /* discard these pages */
75#define MADV_REMOVE 0x5 /* remove these pages & resources */
75 76
76/* compatibility flags */ 77/* compatibility flags */
77#define MAP_ANON MAP_ANONYMOUS 78#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index 26f6ec38577a..a3dae1803f45 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -35,7 +35,6 @@ struct user_key_payload {
35extern struct key_type key_type_user; 35extern struct key_type key_type_user;
36 36
37extern int user_instantiate(struct key *key, const void *data, size_t datalen); 37extern int user_instantiate(struct key *key, const void *data, size_t datalen);
38extern int user_duplicate(struct key *key, const struct key *source);
39extern int user_update(struct key *key, const void *data, size_t datalen); 38extern int user_update(struct key *key, const void *data, size_t datalen);
40extern int user_match(const struct key *key, const void *criterion); 39extern int user_match(const struct key *key, const void *criterion);
41extern void user_destroy(struct key *key); 40extern void user_destroy(struct key *key);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f63dad4165b1..94f77cce27fa 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -143,6 +143,8 @@ enum {
143 ATA_CMD_PACKET = 0xA0, 143 ATA_CMD_PACKET = 0xA0,
144 ATA_CMD_VERIFY = 0x40, 144 ATA_CMD_VERIFY = 0x40,
145 ATA_CMD_VERIFY_EXT = 0x42, 145 ATA_CMD_VERIFY_EXT = 0x42,
146 ATA_CMD_STANDBYNOW1 = 0xE0,
147 ATA_CMD_IDLEIMMEDIATE = 0xE1,
146 ATA_CMD_INIT_DEV_PARAMS = 0x91, 148 ATA_CMD_INIT_DEV_PARAMS = 0x91,
147 149
148 /* SETFEATURES stuff */ 150 /* SETFEATURES stuff */
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 3b03b0b868dd..993da8cc9706 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -43,50 +43,38 @@ typedef struct bootmem_data {
43extern unsigned long __init bootmem_bootmap_pages (unsigned long); 43extern unsigned long __init bootmem_bootmap_pages (unsigned long);
44extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend); 44extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend);
45extern void __init free_bootmem (unsigned long addr, unsigned long size); 45extern void __init free_bootmem (unsigned long addr, unsigned long size);
46extern void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, unsigned long limit); 46extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
47extern void * __init __alloc_bootmem_low(unsigned long size,
48 unsigned long align,
49 unsigned long goal);
50extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat,
51 unsigned long size,
52 unsigned long align,
53 unsigned long goal);
47#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 54#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
48extern void __init reserve_bootmem (unsigned long addr, unsigned long size); 55extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
49#define alloc_bootmem(x) \ 56#define alloc_bootmem(x) \
50 __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 57 __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
51#define alloc_bootmem_low(x) \ 58#define alloc_bootmem_low(x) \
52 __alloc_bootmem((x), SMP_CACHE_BYTES, 0) 59 __alloc_bootmem_low((x), SMP_CACHE_BYTES, 0)
53#define alloc_bootmem_pages(x) \ 60#define alloc_bootmem_pages(x) \
54 __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 61 __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
55#define alloc_bootmem_low_pages(x) \ 62#define alloc_bootmem_low_pages(x) \
56 __alloc_bootmem((x), PAGE_SIZE, 0) 63 __alloc_bootmem_low((x), PAGE_SIZE, 0)
57
58#define alloc_bootmem_limit(x, limit) \
59 __alloc_bootmem_limit((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
60#define alloc_bootmem_low_limit(x, limit) \
61 __alloc_bootmem_limit((x), SMP_CACHE_BYTES, 0, (limit))
62#define alloc_bootmem_pages_limit(x, limit) \
63 __alloc_bootmem_limit((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
64#define alloc_bootmem_low_pages_limit(x, limit) \
65 __alloc_bootmem_limit((x), PAGE_SIZE, 0, (limit))
66
67#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ 64#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
68extern unsigned long __init free_all_bootmem (void); 65extern unsigned long __init free_all_bootmem (void);
69 66extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal);
70extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn); 67extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn);
71extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size); 68extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size);
72extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size); 69extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size);
73extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat); 70extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat);
74extern void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit);
75#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 71#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
76#define alloc_bootmem_node(pgdat, x) \ 72#define alloc_bootmem_node(pgdat, x) \
77 __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 73 __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
78#define alloc_bootmem_pages_node(pgdat, x) \ 74#define alloc_bootmem_pages_node(pgdat, x) \
79 __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 75 __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
80#define alloc_bootmem_low_pages_node(pgdat, x) \ 76#define alloc_bootmem_low_pages_node(pgdat, x) \
81 __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0) 77 __alloc_bootmem_low_node((pgdat), (x), PAGE_SIZE, 0)
82
83#define alloc_bootmem_node_limit(pgdat, x, limit) \
84 __alloc_bootmem_node_limit((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
85#define alloc_bootmem_pages_node_limit(pgdat, x, limit) \
86 __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
87#define alloc_bootmem_low_pages_node_limit(pgdat, x, limit) \
88 __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, 0, (limit))
89
90#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ 78#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
91 79
92#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP 80#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
@@ -123,15 +111,5 @@ extern void *__init alloc_large_system_hash(const char *tablename,
123#endif 111#endif
124extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */ 112extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */
125 113
126static inline void *__alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
127{
128 return __alloc_bootmem_limit(size, align, goal, 0);
129}
130
131static inline void *__alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align,
132 unsigned long goal)
133{
134 return __alloc_bootmem_node_limit(pgdat, size, align, goal, 0);
135}
136 114
137#endif /* _LINUX_BOOTMEM_H */ 115#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index f5eb6b6cd109..fa75ba0d635e 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -272,9 +272,9 @@ typedef char ioctl_struct[308];
272#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) 272#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
273 273
274#define DM_VERSION_MAJOR 4 274#define DM_VERSION_MAJOR 4
275#define DM_VERSION_MINOR 4 275#define DM_VERSION_MINOR 5
276#define DM_VERSION_PATCHLEVEL 0 276#define DM_VERSION_PATCHLEVEL 0
277#define DM_VERSION_EXTRA "-ioctl (2005-01-12)" 277#define DM_VERSION_EXTRA "-ioctl (2005-10-04)"
278 278
279/* Status bits */ 279/* Status bits */
280#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 280#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -301,8 +301,13 @@ typedef char ioctl_struct[308];
301#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */ 301#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */
302 302
303/* 303/*
304 * Set this to improve performance when you aren't going to use open_count 304 * Set this to improve performance when you aren't going to use open_count.
305 */ 305 */
306#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */ 306#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */
307 307
308/*
309 * Set this to avoid attempting to freeze any filesystem when suspending.
310 */
311#define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */
312
308#endif /* _LINUX_DM_IOCTL_H */ 313#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ed9a41a71e8b..115e72be25d0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1050,6 +1050,7 @@ struct inode_operations {
1050 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 1050 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
1051 ssize_t (*listxattr) (struct dentry *, char *, size_t); 1051 ssize_t (*listxattr) (struct dentry *, char *, size_t);
1052 int (*removexattr) (struct dentry *, const char *); 1052 int (*removexattr) (struct dentry *, const char *);
1053 void (*truncate_range)(struct inode *, loff_t, loff_t);
1053}; 1054};
1054 1055
1055struct seq_file; 1056struct seq_file;
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index b76b558b03d4..528959c52f1b 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -14,7 +14,7 @@
14#define FUSE_KERNEL_VERSION 7 14#define FUSE_KERNEL_VERSION 7
15 15
16/** Minor version number of this interface */ 16/** Minor version number of this interface */
17#define FUSE_KERNEL_MINOR_VERSION 3 17#define FUSE_KERNEL_MINOR_VERSION 5
18 18
19/** The node ID of the root inode */ 19/** The node ID of the root inode */
20#define FUSE_ROOT_ID 1 20#define FUSE_ROOT_ID 1
@@ -53,6 +53,9 @@ struct fuse_kstatfs {
53 __u64 ffree; 53 __u64 ffree;
54 __u32 bsize; 54 __u32 bsize;
55 __u32 namelen; 55 __u32 namelen;
56 __u32 frsize;
57 __u32 padding;
58 __u32 spare[6];
56}; 59};
57 60
58#define FATTR_MODE (1 << 0) 61#define FATTR_MODE (1 << 0)
@@ -105,12 +108,8 @@ enum fuse_opcode {
105 FUSE_CREATE = 35 108 FUSE_CREATE = 35
106}; 109};
107 110
108/* Conservative buffer size for the client */ 111/* The read buffer is required to be at least 8k, but may be much larger */
109#define FUSE_MAX_IN 8192 112#define FUSE_MIN_READ_BUFFER 8192
110
111#define FUSE_NAME_MAX 1024
112#define FUSE_SYMLINK_MAX 4096
113#define FUSE_XATTR_SIZE_MAX 4096
114 113
115struct fuse_entry_out { 114struct fuse_entry_out {
116 __u64 nodeid; /* Inode ID */ 115 __u64 nodeid; /* Inode ID */
@@ -213,6 +212,8 @@ struct fuse_write_out {
213 __u32 padding; 212 __u32 padding;
214}; 213};
215 214
215#define FUSE_COMPAT_STATFS_SIZE 48
216
216struct fuse_statfs_out { 217struct fuse_statfs_out {
217 struct fuse_kstatfs st; 218 struct fuse_kstatfs st;
218}; 219};
@@ -243,9 +244,16 @@ struct fuse_access_in {
243 __u32 padding; 244 __u32 padding;
244}; 245};
245 246
246struct fuse_init_in_out { 247struct fuse_init_in {
248 __u32 major;
249 __u32 minor;
250};
251
252struct fuse_init_out {
247 __u32 major; 253 __u32 major;
248 __u32 minor; 254 __u32 minor;
255 __u32 unused[3];
256 __u32 max_write;
249}; 257};
250 258
251struct fuse_in_header { 259struct fuse_in_header {
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1056717ee501..68d82ad6b17c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -22,7 +22,7 @@ int hugetlb_report_meminfo(char *);
22int hugetlb_report_node_meminfo(int, char *); 22int hugetlb_report_node_meminfo(int, char *);
23int is_hugepage_mem_enough(size_t); 23int is_hugepage_mem_enough(size_t);
24unsigned long hugetlb_total_pages(void); 24unsigned long hugetlb_total_pages(void);
25struct page *alloc_huge_page(void); 25struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
26void free_huge_page(struct page *); 26void free_huge_page(struct page *);
27int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 27int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
28 unsigned long address, int write_access); 28 unsigned long address, int write_access);
@@ -97,7 +97,7 @@ static inline unsigned long hugetlb_total_pages(void)
97#define is_hugepage_only_range(mm, addr, len) 0 97#define is_hugepage_only_range(mm, addr, len) 0
98#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 98#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
99 do { } while (0) 99 do { } while (0)
100#define alloc_huge_page() ({ NULL; }) 100#define alloc_huge_page(vma, addr) ({ NULL; })
101#define free_huge_page(p) ({ (void)(p); BUG(); }) 101#define free_huge_page(p) ({ (void)(p); BUG(); })
102#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) 102#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
103 103
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index d79c8a4bc4f8..9ba806796667 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -30,6 +30,7 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/workqueue.h> /* work_struct */ 32#include <linux/workqueue.h> /* work_struct */
33#include <linux/mempool.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/semaphore.h> /* Needed for MUTEX init macros */ 36#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -38,6 +39,355 @@
38#define I2O_QUEUE_EMPTY 0xffffffff 39#define I2O_QUEUE_EMPTY 0xffffffff
39 40
40/* 41/*
42 * Cache strategies
43 */
44
45/* The NULL strategy leaves everything up to the controller. This tends to be a
46 * pessimal but functional choice.
47 */
48#define CACHE_NULL 0
49/* Prefetch data when reading. We continually attempt to load the next 32 sectors
50 * into the controller cache.
51 */
52#define CACHE_PREFETCH 1
53/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
54 * into the controller cache. When an I/O is less <= 8K we assume its probably
55 * not sequential and don't prefetch (default)
56 */
57#define CACHE_SMARTFETCH 2
58/* Data is written to the cache and then out on to the disk. The I/O must be
59 * physically on the medium before the write is acknowledged (default without
60 * NVRAM)
61 */
62#define CACHE_WRITETHROUGH 17
63/* Data is written to the cache and then out on to the disk. The controller
64 * is permitted to write back the cache any way it wants. (default if battery
65 * backed NVRAM is present). It can be useful to set this for swap regardless of
66 * battery state.
67 */
68#define CACHE_WRITEBACK 18
69/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
70 * write large I/O's directly to disk bypassing the cache to avoid the extra
71 * memory copy hits. Small writes are writeback cached
72 */
73#define CACHE_SMARTBACK 19
74/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
75 * write large I/O's directly to disk bypassing the cache to avoid the extra
76 * memory copy hits. Small writes are writethrough cached. Suitable for devices
77 * lacking battery backup
78 */
79#define CACHE_SMARTTHROUGH 20
80
81/*
82 * Ioctl structures
83 */
84
85#define BLKI2OGRSTRAT _IOR('2', 1, int)
86#define BLKI2OGWSTRAT _IOR('2', 2, int)
87#define BLKI2OSRSTRAT _IOW('2', 3, int)
88#define BLKI2OSWSTRAT _IOW('2', 4, int)
89
90/*
91 * I2O Function codes
92 */
93
94/*
95 * Executive Class
96 */
97#define I2O_CMD_ADAPTER_ASSIGN 0xB3
98#define I2O_CMD_ADAPTER_READ 0xB2
99#define I2O_CMD_ADAPTER_RELEASE 0xB5
100#define I2O_CMD_BIOS_INFO_SET 0xA5
101#define I2O_CMD_BOOT_DEVICE_SET 0xA7
102#define I2O_CMD_CONFIG_VALIDATE 0xBB
103#define I2O_CMD_CONN_SETUP 0xCA
104#define I2O_CMD_DDM_DESTROY 0xB1
105#define I2O_CMD_DDM_ENABLE 0xD5
106#define I2O_CMD_DDM_QUIESCE 0xC7
107#define I2O_CMD_DDM_RESET 0xD9
108#define I2O_CMD_DDM_SUSPEND 0xAF
109#define I2O_CMD_DEVICE_ASSIGN 0xB7
110#define I2O_CMD_DEVICE_RELEASE 0xB9
111#define I2O_CMD_HRT_GET 0xA8
112#define I2O_CMD_ADAPTER_CLEAR 0xBE
113#define I2O_CMD_ADAPTER_CONNECT 0xC9
114#define I2O_CMD_ADAPTER_RESET 0xBD
115#define I2O_CMD_LCT_NOTIFY 0xA2
116#define I2O_CMD_OUTBOUND_INIT 0xA1
117#define I2O_CMD_PATH_ENABLE 0xD3
118#define I2O_CMD_PATH_QUIESCE 0xC5
119#define I2O_CMD_PATH_RESET 0xD7
120#define I2O_CMD_STATIC_MF_CREATE 0xDD
121#define I2O_CMD_STATIC_MF_RELEASE 0xDF
122#define I2O_CMD_STATUS_GET 0xA0
123#define I2O_CMD_SW_DOWNLOAD 0xA9
124#define I2O_CMD_SW_UPLOAD 0xAB
125#define I2O_CMD_SW_REMOVE 0xAD
126#define I2O_CMD_SYS_ENABLE 0xD1
127#define I2O_CMD_SYS_MODIFY 0xC1
128#define I2O_CMD_SYS_QUIESCE 0xC3
129#define I2O_CMD_SYS_TAB_SET 0xA3
130
131/*
132 * Utility Class
133 */
134#define I2O_CMD_UTIL_NOP 0x00
135#define I2O_CMD_UTIL_ABORT 0x01
136#define I2O_CMD_UTIL_CLAIM 0x09
137#define I2O_CMD_UTIL_RELEASE 0x0B
138#define I2O_CMD_UTIL_PARAMS_GET 0x06
139#define I2O_CMD_UTIL_PARAMS_SET 0x05
140#define I2O_CMD_UTIL_EVT_REGISTER 0x13
141#define I2O_CMD_UTIL_EVT_ACK 0x14
142#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
143#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
144#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
145#define I2O_CMD_UTIL_LOCK 0x17
146#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
147#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
148
149/*
150 * SCSI Host Bus Adapter Class
151 */
152#define I2O_CMD_SCSI_EXEC 0x81
153#define I2O_CMD_SCSI_ABORT 0x83
154#define I2O_CMD_SCSI_BUSRESET 0x27
155
156/*
157 * Bus Adapter Class
158 */
159#define I2O_CMD_BUS_ADAPTER_RESET 0x85
160#define I2O_CMD_BUS_RESET 0x87
161#define I2O_CMD_BUS_SCAN 0x89
162#define I2O_CMD_BUS_QUIESCE 0x8b
163
164/*
165 * Random Block Storage Class
166 */
167#define I2O_CMD_BLOCK_READ 0x30
168#define I2O_CMD_BLOCK_WRITE 0x31
169#define I2O_CMD_BLOCK_CFLUSH 0x37
170#define I2O_CMD_BLOCK_MLOCK 0x49
171#define I2O_CMD_BLOCK_MUNLOCK 0x4B
172#define I2O_CMD_BLOCK_MMOUNT 0x41
173#define I2O_CMD_BLOCK_MEJECT 0x43
174#define I2O_CMD_BLOCK_POWER 0x70
175
176#define I2O_CMD_PRIVATE 0xFF
177
178/* Command status values */
179
180#define I2O_CMD_IN_PROGRESS 0x01
181#define I2O_CMD_REJECTED 0x02
182#define I2O_CMD_FAILED 0x03
183#define I2O_CMD_COMPLETED 0x04
184
185/* I2O API function return values */
186
187#define I2O_RTN_NO_ERROR 0
188#define I2O_RTN_NOT_INIT 1
189#define I2O_RTN_FREE_Q_EMPTY 2
190#define I2O_RTN_TCB_ERROR 3
191#define I2O_RTN_TRANSACTION_ERROR 4
192#define I2O_RTN_ADAPTER_ALREADY_INIT 5
193#define I2O_RTN_MALLOC_ERROR 6
194#define I2O_RTN_ADPTR_NOT_REGISTERED 7
195#define I2O_RTN_MSG_REPLY_TIMEOUT 8
196#define I2O_RTN_NO_STATUS 9
197#define I2O_RTN_NO_FIRM_VER 10
198#define I2O_RTN_NO_LINK_SPEED 11
199
200/* Reply message status defines for all messages */
201
202#define I2O_REPLY_STATUS_SUCCESS 0x00
203#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
204#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
205#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
206#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
207#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
208#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
209#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
210#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
211#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
212#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
213#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
214
215/* Status codes and Error Information for Parameter functions */
216
217#define I2O_PARAMS_STATUS_SUCCESS 0x00
218#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
219#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
220#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
221#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
222#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
223#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
224#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
225#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
226#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
227#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
228#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
229#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
230#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
231#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
232#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
233#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
234
235/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
236 * messages: Table 3-2 Detailed Status Codes.*/
237
238#define I2O_DSC_SUCCESS 0x0000
239#define I2O_DSC_BAD_KEY 0x0002
240#define I2O_DSC_TCL_ERROR 0x0003
241#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
242#define I2O_DSC_NO_SUCH_PAGE 0x0005
243#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
244#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
245#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
246#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
247#define I2O_DSC_DEVICE_LOCKED 0x000B
248#define I2O_DSC_DEVICE_RESET 0x000C
249#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
250#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
251#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
252#define I2O_DSC_INVALID_OFFSET 0x0010
253#define I2O_DSC_INVALID_PARAMETER 0x0011
254#define I2O_DSC_INVALID_REQUEST 0x0012
255#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
256#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
257#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
258#define I2O_DSC_MISSING_PARAMETER 0x0016
259#define I2O_DSC_TIMEOUT 0x0017
260#define I2O_DSC_UNKNOWN_ERROR 0x0018
261#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
262#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
263#define I2O_DSC_DEVICE_BUSY 0x001B
264#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
265
266/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
267 Status Codes.*/
268
269#define I2O_BSA_DSC_SUCCESS 0x0000
270#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
271#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
272#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
273#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
274#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
275#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
276#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
277#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
278#define I2O_BSA_DSC_BUS_FAILURE 0x0009
279#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
280#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
281#define I2O_BSA_DSC_DEVICE_RESET 0x000C
282#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
283#define I2O_BSA_DSC_TIMEOUT 0x000E
284
285/* FailureStatusCodes, Table 3-3 Message Failure Codes */
286
287#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
288#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
289#define I2O_FSC_TRANSPORT_CONGESTION 0x83
290#define I2O_FSC_TRANSPORT_FAILURE 0x84
291#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
292#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
293#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
294#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
295#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
296#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
297#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
298#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
299#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
300#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
301#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
302#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
303
304/* Device Claim Types */
305#define I2O_CLAIM_PRIMARY 0x01000000
306#define I2O_CLAIM_MANAGEMENT 0x02000000
307#define I2O_CLAIM_AUTHORIZED 0x03000000
308#define I2O_CLAIM_SECONDARY 0x04000000
309
310/* Message header defines for VersionOffset */
311#define I2OVER15 0x0001
312#define I2OVER20 0x0002
313
314/* Default is 1.5 */
315#define I2OVERSION I2OVER15
316
317#define SGL_OFFSET_0 I2OVERSION
318#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
319#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
320#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
321#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
322#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
323#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
324#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
325#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
326#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
327#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
328
329/* Transaction Reply Lists (TRL) Control Word structure */
330#define TRL_SINGLE_FIXED_LENGTH 0x00
331#define TRL_SINGLE_VARIABLE_LENGTH 0x40
332#define TRL_MULTIPLE_FIXED_LENGTH 0x80
333
334 /* msg header defines for MsgFlags */
335#define MSG_STATIC 0x0100
336#define MSG_64BIT_CNTXT 0x0200
337#define MSG_MULTI_TRANS 0x1000
338#define MSG_FAIL 0x2000
339#define MSG_FINAL 0x4000
340#define MSG_REPLY 0x8000
341
342 /* minimum size msg */
343#define THREE_WORD_MSG_SIZE 0x00030000
344#define FOUR_WORD_MSG_SIZE 0x00040000
345#define FIVE_WORD_MSG_SIZE 0x00050000
346#define SIX_WORD_MSG_SIZE 0x00060000
347#define SEVEN_WORD_MSG_SIZE 0x00070000
348#define EIGHT_WORD_MSG_SIZE 0x00080000
349#define NINE_WORD_MSG_SIZE 0x00090000
350#define TEN_WORD_MSG_SIZE 0x000A0000
351#define ELEVEN_WORD_MSG_SIZE 0x000B0000
352#define I2O_MESSAGE_SIZE(x) ((x)<<16)
353
354/* special TID assignments */
355#define ADAPTER_TID 0
356#define HOST_TID 1
357
358/* outbound queue defines */
359#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
360#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
361
362/* inbound queue definitions */
363#define I2O_MSG_INPOOL_MIN 32
364#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
365
366#define I2O_POST_WAIT_OK 0
367#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
368
369#define I2O_CONTEXT_LIST_MIN_LENGTH 15
370#define I2O_CONTEXT_LIST_USED 0x01
371#define I2O_CONTEXT_LIST_DELETED 0x02
372
373/* timeouts */
374#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
375#define I2O_TIMEOUT_MESSAGE_GET 5
376#define I2O_TIMEOUT_RESET 30
377#define I2O_TIMEOUT_STATUS_GET 5
378#define I2O_TIMEOUT_LCT_GET 360
379#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
380
381/* retries */
382#define I2O_HRT_GET_TRIES 3
383#define I2O_LCT_GET_TRIES 3
384
385/* defines for max_sectors and max_phys_segments */
386#define I2O_MAX_SECTORS 1024
387#define I2O_MAX_SECTORS_LIMITED 128
388#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
389
390/*
41 * Message structures 391 * Message structures
42 */ 392 */
43struct i2o_message { 393struct i2o_message {
@@ -58,6 +408,12 @@ struct i2o_message {
58 u32 body[0]; 408 u32 body[0];
59}; 409};
60 410
411/* MFA and I2O message used by mempool */
412struct i2o_msg_mfa {
413 u32 mfa; /* MFA returned by the controller */
414 struct i2o_message msg; /* I2O message */
415};
416
61/* 417/*
62 * Each I2O device entity has one of these. There is one per device. 418 * Each I2O device entity has one of these. There is one per device.
63 */ 419 */
@@ -130,6 +486,15 @@ struct i2o_dma {
130}; 486};
131 487
132/* 488/*
489 * Contains slab cache and mempool information
490 */
491struct i2o_pool {
492 char *name;
493 kmem_cache_t *slab;
494 mempool_t *mempool;
495};
496
497/*
133 * Contains IO mapped address information 498 * Contains IO mapped address information
134 */ 499 */
135struct i2o_io { 500struct i2o_io {
@@ -174,8 +539,6 @@ struct i2o_controller {
174 void __iomem *irq_status; /* Interrupt status register address */ 539 void __iomem *irq_status; /* Interrupt status register address */
175 void __iomem *irq_mask; /* Interrupt mask register address */ 540 void __iomem *irq_mask; /* Interrupt mask register address */
176 541
177 /* Dynamic LCT related data */
178
179 struct i2o_dma status; /* IOP status block */ 542 struct i2o_dma status; /* IOP status block */
180 543
181 struct i2o_dma hrt; /* HW Resource Table */ 544 struct i2o_dma hrt; /* HW Resource Table */
@@ -188,6 +551,8 @@ struct i2o_controller {
188 struct i2o_io in_queue; /* inbound message queue Host->IOP */ 551 struct i2o_io in_queue; /* inbound message queue Host->IOP */
189 struct i2o_dma out_queue; /* outbound message queue IOP->Host */ 552 struct i2o_dma out_queue; /* outbound message queue IOP->Host */
190 553
554 struct i2o_pool in_msg; /* mempool for inbound messages */
555
191 unsigned int battery:1; /* Has a battery backup */ 556 unsigned int battery:1; /* Has a battery backup */
192 unsigned int io_alloc:1; /* An I/O resource was allocated */ 557 unsigned int io_alloc:1; /* An I/O resource was allocated */
193 unsigned int mem_alloc:1; /* A memory resource was allocated */ 558 unsigned int mem_alloc:1; /* A memory resource was allocated */
@@ -196,7 +561,6 @@ struct i2o_controller {
196 struct resource mem_resource; /* Mem resource allocated to the IOP */ 561 struct resource mem_resource; /* Mem resource allocated to the IOP */
197 562
198 struct device device; 563 struct device device;
199 struct class_device *classdev; /* I2O controller class device */
200 struct i2o_device *exec; /* Executive */ 564 struct i2o_device *exec; /* Executive */
201#if BITS_PER_LONG == 64 565#if BITS_PER_LONG == 64
202 spinlock_t context_list_lock; /* lock for context_list */ 566 spinlock_t context_list_lock; /* lock for context_list */
@@ -247,16 +611,13 @@ struct i2o_sys_tbl {
247extern struct list_head i2o_controllers; 611extern struct list_head i2o_controllers;
248 612
249/* Message functions */ 613/* Message functions */
250static inline u32 i2o_msg_get(struct i2o_controller *, 614static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
251 struct i2o_message __iomem **); 615extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
252extern u32 i2o_msg_get_wait(struct i2o_controller *, 616static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
253 struct i2o_message __iomem **, int); 617static inline int i2o_msg_post_wait(struct i2o_controller *,
254static inline void i2o_msg_post(struct i2o_controller *, u32); 618 struct i2o_message *, unsigned long);
255static inline int i2o_msg_post_wait(struct i2o_controller *, u32, 619extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
256 unsigned long); 620 unsigned long, struct i2o_dma *);
257extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
258 struct i2o_dma *);
259extern void i2o_msg_nop(struct i2o_controller *, u32);
260static inline void i2o_flush_reply(struct i2o_controller *, u32); 621static inline void i2o_flush_reply(struct i2o_controller *, u32);
261 622
262/* IOP functions */ 623/* IOP functions */
@@ -384,10 +745,10 @@ static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
384static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, 745static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
385 size_t size, 746 size_t size,
386 enum dma_data_direction direction, 747 enum dma_data_direction direction,
387 u32 __iomem ** sg_ptr) 748 u32 ** sg_ptr)
388{ 749{
389 u32 sg_flags; 750 u32 sg_flags;
390 u32 __iomem *mptr = *sg_ptr; 751 u32 *mptr = *sg_ptr;
391 dma_addr_t dma_addr; 752 dma_addr_t dma_addr;
392 753
393 switch (direction) { 754 switch (direction) {
@@ -405,16 +766,16 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
405 if (!dma_mapping_error(dma_addr)) { 766 if (!dma_mapping_error(dma_addr)) {
406#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 767#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
407 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 768 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
408 writel(0x7C020002, mptr++); 769 *mptr++ = cpu_to_le32(0x7C020002);
409 writel(PAGE_SIZE, mptr++); 770 *mptr++ = cpu_to_le32(PAGE_SIZE);
410 } 771 }
411#endif 772#endif
412 773
413 writel(sg_flags | size, mptr++); 774 *mptr++ = cpu_to_le32(sg_flags | size);
414 writel(i2o_dma_low(dma_addr), mptr++); 775 *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
415#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 776#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
416 if ((sizeof(dma_addr_t) > 4) && c->pae_support) 777 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
417 writel(i2o_dma_high(dma_addr), mptr++); 778 *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
418#endif 779#endif
419 *sg_ptr = mptr; 780 *sg_ptr = mptr;
420 } 781 }
@@ -439,10 +800,10 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
439static inline int i2o_dma_map_sg(struct i2o_controller *c, 800static inline int i2o_dma_map_sg(struct i2o_controller *c,
440 struct scatterlist *sg, int sg_count, 801 struct scatterlist *sg, int sg_count,
441 enum dma_data_direction direction, 802 enum dma_data_direction direction,
442 u32 __iomem ** sg_ptr) 803 u32 ** sg_ptr)
443{ 804{
444 u32 sg_flags; 805 u32 sg_flags;
445 u32 __iomem *mptr = *sg_ptr; 806 u32 *mptr = *sg_ptr;
446 807
447 switch (direction) { 808 switch (direction) {
448 case DMA_TO_DEVICE: 809 case DMA_TO_DEVICE:
@@ -461,19 +822,19 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
461 822
462#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 823#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
463 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 824 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
464 writel(0x7C020002, mptr++); 825 *mptr++ = cpu_to_le32(0x7C020002);
465 writel(PAGE_SIZE, mptr++); 826 *mptr++ = cpu_to_le32(PAGE_SIZE);
466 } 827 }
467#endif 828#endif
468 829
469 while (sg_count-- > 0) { 830 while (sg_count-- > 0) {
470 if (!sg_count) 831 if (!sg_count)
471 sg_flags |= 0xC0000000; 832 sg_flags |= 0xC0000000;
472 writel(sg_flags | sg_dma_len(sg), mptr++); 833 *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
473 writel(i2o_dma_low(sg_dma_address(sg)), mptr++); 834 *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
474#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 835#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
475 if ((sizeof(dma_addr_t) > 4) && c->pae_support) 836 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
476 writel(i2o_dma_high(sg_dma_address(sg)), mptr++); 837 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
477#endif 838#endif
478 sg++; 839 sg++;
479 } 840 }
@@ -563,6 +924,64 @@ static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
563 return 0; 924 return 0;
564}; 925};
565 926
927/*
928 * i2o_pool_alloc - Allocate an slab cache and mempool
929 * @mempool: pointer to struct i2o_pool to write data into.
930 * @name: name which is used to identify cache
931 * @size: size of each object
932 * @min_nr: minimum number of objects
933 *
934 * First allocates a slab cache with name and size. Then allocates a
935 * mempool which uses the slab cache for allocation and freeing.
936 *
937 * Returns 0 on success or negative error code on failure.
938 */
939static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
940 size_t size, int min_nr)
941{
942 pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
943 if (!pool->name)
944 goto exit;
945 strcpy(pool->name, name);
946
947 pool->slab =
948 kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL,
949 NULL);
950 if (!pool->slab)
951 goto free_name;
952
953 pool->mempool =
954 mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
955 pool->slab);
956 if (!pool->mempool)
957 goto free_slab;
958
959 return 0;
960
961 free_slab:
962 kmem_cache_destroy(pool->slab);
963
964 free_name:
965 kfree(pool->name);
966
967 exit:
968 return -ENOMEM;
969};
970
971/*
972 * i2o_pool_free - Free slab cache and mempool again
973 * @mempool: pointer to struct i2o_pool which should be freed
974 *
975 * Note that you have to return all objects to the mempool again before
976 * calling i2o_pool_free().
977 */
978static inline void i2o_pool_free(struct i2o_pool *pool)
979{
980 mempool_destroy(pool->mempool);
981 kmem_cache_destroy(pool->slab);
982 kfree(pool->name);
983};
984
566/* I2O driver (OSM) functions */ 985/* I2O driver (OSM) functions */
567extern int i2o_driver_register(struct i2o_driver *); 986extern int i2o_driver_register(struct i2o_driver *);
568extern void i2o_driver_unregister(struct i2o_driver *); 987extern void i2o_driver_unregister(struct i2o_driver *);
@@ -638,39 +1057,89 @@ extern int i2o_exec_lct_get(struct i2o_controller *);
638#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) 1057#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
639 1058
640/** 1059/**
1060 * i2o_out_to_virt - Turn an I2O message to a virtual address
1061 * @c: controller
1062 * @m: message engine value
1063 *
1064 * Turn a receive message from an I2O controller bus address into
1065 * a Linux virtual address. The shared page frame is a linear block
1066 * so we simply have to shift the offset. This function does not
1067 * work for sender side messages as they are ioremap objects
1068 * provided by the I2O controller.
1069 */
1070static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
1071 u32 m)
1072{
1073 BUG_ON(m < c->out_queue.phys
1074 || m >= c->out_queue.phys + c->out_queue.len);
1075
1076 return c->out_queue.virt + (m - c->out_queue.phys);
1077};
1078
1079/**
1080 * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
1081 * @c: controller
1082 * @m: message engine value
1083 *
1084 * Turn a send message from an I2O controller bus address into
1085 * a Linux virtual address. The shared page frame is a linear block
1086 * so we simply have to shift the offset. This function does not
1087 * work for receive side messages as they are kmalloc objects
1088 * in a different pool.
1089 */
1090static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
1091 i2o_controller *c,
1092 u32 m)
1093{
1094 return c->in_queue.virt + m;
1095};
1096
1097/**
641 * i2o_msg_get - obtain an I2O message from the IOP 1098 * i2o_msg_get - obtain an I2O message from the IOP
642 * @c: I2O controller 1099 * @c: I2O controller
643 * @msg: pointer to a I2O message pointer
644 * 1100 *
645 * This function tries to get a message slot. If no message slot is 1101 * This function tries to get a message frame. If no message frame is
646 * available do not wait until one is availabe (see also i2o_msg_get_wait). 1102 * available do not wait until one is availabe (see also i2o_msg_get_wait).
1103 * The returned pointer to the message frame is not in I/O memory, it is
1104 * allocated from a mempool. But because a MFA is allocated from the
1105 * controller too it is guaranteed that i2o_msg_post() will never fail.
647 * 1106 *
648 * On a success the message is returned and the pointer to the message is 1107 * On a success a pointer to the message frame is returned. If the message
649 * set in msg. The returned message is the physical page frame offset 1108 * queue is empty -EBUSY is returned and if no memory is available -ENOMEM
650 * address from the read port (see the i2o spec). If no message is 1109 * is returned.
651 * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
652 */ 1110 */
653static inline u32 i2o_msg_get(struct i2o_controller *c, 1111static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
654 struct i2o_message __iomem ** msg)
655{ 1112{
656 u32 m = readl(c->in_port); 1113 struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
657 1114 if (!mmsg)
658 if (m != I2O_QUEUE_EMPTY) 1115 return ERR_PTR(-ENOMEM);
659 *msg = c->in_queue.virt + m; 1116
1117 mmsg->mfa = readl(c->in_port);
1118 if (mmsg->mfa == I2O_QUEUE_EMPTY) {
1119 mempool_free(mmsg, c->in_msg.mempool);
1120 return ERR_PTR(-EBUSY);
1121 }
660 1122
661 return m; 1123 return &mmsg->msg;
662}; 1124};
663 1125
664/** 1126/**
665 * i2o_msg_post - Post I2O message to I2O controller 1127 * i2o_msg_post - Post I2O message to I2O controller
666 * @c: I2O controller to which the message should be send 1128 * @c: I2O controller to which the message should be send
667 * @m: the message identifier 1129 * @msg: message returned by i2o_msg_get()
668 * 1130 *
669 * Post the message to the I2O controller. 1131 * Post the message to the I2O controller and return immediately.
670 */ 1132 */
671static inline void i2o_msg_post(struct i2o_controller *c, u32 m) 1133static inline void i2o_msg_post(struct i2o_controller *c,
1134 struct i2o_message *msg)
672{ 1135{
673 writel(m, c->in_port); 1136 struct i2o_msg_mfa *mmsg;
1137
1138 mmsg = container_of(msg, struct i2o_msg_mfa, msg);
1139 memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg,
1140 (le32_to_cpu(msg->u.head[0]) >> 16) << 2);
1141 writel(mmsg->mfa, c->in_port);
1142 mempool_free(mmsg, c->in_msg.mempool);
674}; 1143};
675 1144
676/** 1145/**
@@ -685,62 +1154,66 @@ static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
685 * 1154 *
686 * Returns 0 on success or negative error code on failure. 1155 * Returns 0 on success or negative error code on failure.
687 */ 1156 */
688static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m, 1157static inline int i2o_msg_post_wait(struct i2o_controller *c,
1158 struct i2o_message *msg,
689 unsigned long timeout) 1159 unsigned long timeout)
690{ 1160{
691 return i2o_msg_post_wait_mem(c, m, timeout, NULL); 1161 return i2o_msg_post_wait_mem(c, msg, timeout, NULL);
692}; 1162};
693 1163
694/** 1164/**
695 * i2o_flush_reply - Flush reply from I2O controller 1165 * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller
696 * @c: I2O controller 1166 * @c: I2O controller from which the MFA was fetched
697 * @m: the message identifier 1167 * @mfa: MFA which should be returned
698 * 1168 *
699 * The I2O controller must be informed that the reply message is not needed 1169 * This function must be used for preserved messages, because i2o_msg_nop()
700 * anymore. If you forget to flush the reply, the message frame can't be 1170 * also returns the allocated memory back to the msg_pool mempool.
701 * used by the controller anymore and is therefore lost.
702 */ 1171 */
703static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) 1172static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa)
704{ 1173{
705 writel(m, c->out_port); 1174 struct i2o_message __iomem *msg;
1175 u32 nop[3] = {
1176 THREE_WORD_MSG_SIZE | SGL_OFFSET_0,
1177 I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
1178 0x00000000
1179 };
1180
1181 msg = i2o_msg_in_to_virt(c, mfa);
1182 memcpy_toio(msg, nop, sizeof(nop));
1183 writel(mfa, c->in_port);
706}; 1184};
707 1185
708/** 1186/**
709 * i2o_out_to_virt - Turn an I2O message to a virtual address 1187 * i2o_msg_nop - Returns a message which is not used
710 * @c: controller 1188 * @c: I2O controller from which the message was created
711 * @m: message engine value 1189 * @msg: message which should be returned
712 * 1190 *
713 * Turn a receive message from an I2O controller bus address into 1191 * If you fetch a message via i2o_msg_get, and can't use it, you must
714 * a Linux virtual address. The shared page frame is a linear block 1192 * return the message with this function. Otherwise the MFA is lost as well
715 * so we simply have to shift the offset. This function does not 1193 * as the allocated memory from the mempool.
716 * work for sender side messages as they are ioremap objects
717 * provided by the I2O controller.
718 */ 1194 */
719static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, 1195static inline void i2o_msg_nop(struct i2o_controller *c,
720 u32 m) 1196 struct i2o_message *msg)
721{ 1197{
722 BUG_ON(m < c->out_queue.phys 1198 struct i2o_msg_mfa *mmsg;
723 || m >= c->out_queue.phys + c->out_queue.len); 1199 mmsg = container_of(msg, struct i2o_msg_mfa, msg);
724 1200
725 return c->out_queue.virt + (m - c->out_queue.phys); 1201 i2o_msg_nop_mfa(c, mmsg->mfa);
1202 mempool_free(mmsg, c->in_msg.mempool);
726}; 1203};
727 1204
728/** 1205/**
729 * i2o_msg_in_to_virt - Turn an I2O message to a virtual address 1206 * i2o_flush_reply - Flush reply from I2O controller
730 * @c: controller 1207 * @c: I2O controller
731 * @m: message engine value 1208 * @m: the message identifier
732 * 1209 *
733 * Turn a send message from an I2O controller bus address into 1210 * The I2O controller must be informed that the reply message is not needed
734 * a Linux virtual address. The shared page frame is a linear block 1211 * anymore. If you forget to flush the reply, the message frame can't be
735 * so we simply have to shift the offset. This function does not 1212 * used by the controller anymore and is therefore lost.
736 * work for receive side messages as they are kmalloc objects
737 * in a different pool.
738 */ 1213 */
739static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct 1214static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
740 i2o_controller *c,
741 u32 m)
742{ 1215{
743 return c->in_queue.virt + m; 1216 writel(m, c->out_port);
744}; 1217};
745 1218
746/* 1219/*
@@ -779,350 +1252,5 @@ extern void i2o_dump_message(struct i2o_message *);
779extern void i2o_dump_hrt(struct i2o_controller *c); 1252extern void i2o_dump_hrt(struct i2o_controller *c);
780extern void i2o_debug_state(struct i2o_controller *c); 1253extern void i2o_debug_state(struct i2o_controller *c);
781 1254
782/*
783 * Cache strategies
784 */
785
786/* The NULL strategy leaves everything up to the controller. This tends to be a
787 * pessimal but functional choice.
788 */
789#define CACHE_NULL 0
790/* Prefetch data when reading. We continually attempt to load the next 32 sectors
791 * into the controller cache.
792 */
793#define CACHE_PREFETCH 1
794/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
795 * into the controller cache. When an I/O is less <= 8K we assume its probably
796 * not sequential and don't prefetch (default)
797 */
798#define CACHE_SMARTFETCH 2
799/* Data is written to the cache and then out on to the disk. The I/O must be
800 * physically on the medium before the write is acknowledged (default without
801 * NVRAM)
802 */
803#define CACHE_WRITETHROUGH 17
804/* Data is written to the cache and then out on to the disk. The controller
805 * is permitted to write back the cache any way it wants. (default if battery
806 * backed NVRAM is present). It can be useful to set this for swap regardless of
807 * battery state.
808 */
809#define CACHE_WRITEBACK 18
810/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
811 * write large I/O's directly to disk bypassing the cache to avoid the extra
812 * memory copy hits. Small writes are writeback cached
813 */
814#define CACHE_SMARTBACK 19
815/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
816 * write large I/O's directly to disk bypassing the cache to avoid the extra
817 * memory copy hits. Small writes are writethrough cached. Suitable for devices
818 * lacking battery backup
819 */
820#define CACHE_SMARTTHROUGH 20
821
822/*
823 * Ioctl structures
824 */
825
826#define BLKI2OGRSTRAT _IOR('2', 1, int)
827#define BLKI2OGWSTRAT _IOR('2', 2, int)
828#define BLKI2OSRSTRAT _IOW('2', 3, int)
829#define BLKI2OSWSTRAT _IOW('2', 4, int)
830
831/*
832 * I2O Function codes
833 */
834
835/*
836 * Executive Class
837 */
838#define I2O_CMD_ADAPTER_ASSIGN 0xB3
839#define I2O_CMD_ADAPTER_READ 0xB2
840#define I2O_CMD_ADAPTER_RELEASE 0xB5
841#define I2O_CMD_BIOS_INFO_SET 0xA5
842#define I2O_CMD_BOOT_DEVICE_SET 0xA7
843#define I2O_CMD_CONFIG_VALIDATE 0xBB
844#define I2O_CMD_CONN_SETUP 0xCA
845#define I2O_CMD_DDM_DESTROY 0xB1
846#define I2O_CMD_DDM_ENABLE 0xD5
847#define I2O_CMD_DDM_QUIESCE 0xC7
848#define I2O_CMD_DDM_RESET 0xD9
849#define I2O_CMD_DDM_SUSPEND 0xAF
850#define I2O_CMD_DEVICE_ASSIGN 0xB7
851#define I2O_CMD_DEVICE_RELEASE 0xB9
852#define I2O_CMD_HRT_GET 0xA8
853#define I2O_CMD_ADAPTER_CLEAR 0xBE
854#define I2O_CMD_ADAPTER_CONNECT 0xC9
855#define I2O_CMD_ADAPTER_RESET 0xBD
856#define I2O_CMD_LCT_NOTIFY 0xA2
857#define I2O_CMD_OUTBOUND_INIT 0xA1
858#define I2O_CMD_PATH_ENABLE 0xD3
859#define I2O_CMD_PATH_QUIESCE 0xC5
860#define I2O_CMD_PATH_RESET 0xD7
861#define I2O_CMD_STATIC_MF_CREATE 0xDD
862#define I2O_CMD_STATIC_MF_RELEASE 0xDF
863#define I2O_CMD_STATUS_GET 0xA0
864#define I2O_CMD_SW_DOWNLOAD 0xA9
865#define I2O_CMD_SW_UPLOAD 0xAB
866#define I2O_CMD_SW_REMOVE 0xAD
867#define I2O_CMD_SYS_ENABLE 0xD1
868#define I2O_CMD_SYS_MODIFY 0xC1
869#define I2O_CMD_SYS_QUIESCE 0xC3
870#define I2O_CMD_SYS_TAB_SET 0xA3
871
872/*
873 * Utility Class
874 */
875#define I2O_CMD_UTIL_NOP 0x00
876#define I2O_CMD_UTIL_ABORT 0x01
877#define I2O_CMD_UTIL_CLAIM 0x09
878#define I2O_CMD_UTIL_RELEASE 0x0B
879#define I2O_CMD_UTIL_PARAMS_GET 0x06
880#define I2O_CMD_UTIL_PARAMS_SET 0x05
881#define I2O_CMD_UTIL_EVT_REGISTER 0x13
882#define I2O_CMD_UTIL_EVT_ACK 0x14
883#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
884#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
885#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
886#define I2O_CMD_UTIL_LOCK 0x17
887#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
888#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
889
890/*
891 * SCSI Host Bus Adapter Class
892 */
893#define I2O_CMD_SCSI_EXEC 0x81
894#define I2O_CMD_SCSI_ABORT 0x83
895#define I2O_CMD_SCSI_BUSRESET 0x27
896
897/*
898 * Bus Adapter Class
899 */
900#define I2O_CMD_BUS_ADAPTER_RESET 0x85
901#define I2O_CMD_BUS_RESET 0x87
902#define I2O_CMD_BUS_SCAN 0x89
903#define I2O_CMD_BUS_QUIESCE 0x8b
904
905/*
906 * Random Block Storage Class
907 */
908#define I2O_CMD_BLOCK_READ 0x30
909#define I2O_CMD_BLOCK_WRITE 0x31
910#define I2O_CMD_BLOCK_CFLUSH 0x37
911#define I2O_CMD_BLOCK_MLOCK 0x49
912#define I2O_CMD_BLOCK_MUNLOCK 0x4B
913#define I2O_CMD_BLOCK_MMOUNT 0x41
914#define I2O_CMD_BLOCK_MEJECT 0x43
915#define I2O_CMD_BLOCK_POWER 0x70
916
917#define I2O_CMD_PRIVATE 0xFF
918
919/* Command status values */
920
921#define I2O_CMD_IN_PROGRESS 0x01
922#define I2O_CMD_REJECTED 0x02
923#define I2O_CMD_FAILED 0x03
924#define I2O_CMD_COMPLETED 0x04
925
926/* I2O API function return values */
927
928#define I2O_RTN_NO_ERROR 0
929#define I2O_RTN_NOT_INIT 1
930#define I2O_RTN_FREE_Q_EMPTY 2
931#define I2O_RTN_TCB_ERROR 3
932#define I2O_RTN_TRANSACTION_ERROR 4
933#define I2O_RTN_ADAPTER_ALREADY_INIT 5
934#define I2O_RTN_MALLOC_ERROR 6
935#define I2O_RTN_ADPTR_NOT_REGISTERED 7
936#define I2O_RTN_MSG_REPLY_TIMEOUT 8
937#define I2O_RTN_NO_STATUS 9
938#define I2O_RTN_NO_FIRM_VER 10
939#define I2O_RTN_NO_LINK_SPEED 11
940
941/* Reply message status defines for all messages */
942
943#define I2O_REPLY_STATUS_SUCCESS 0x00
944#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
945#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
946#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
947#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
948#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
949#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
950#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
951#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
952#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
953#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
954#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
955
956/* Status codes and Error Information for Parameter functions */
957
958#define I2O_PARAMS_STATUS_SUCCESS 0x00
959#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
960#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
961#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
962#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
963#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
964#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
965#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
966#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
967#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
968#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
969#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
970#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
971#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
972#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
973#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
974#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
975
976/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
977 * messages: Table 3-2 Detailed Status Codes.*/
978
979#define I2O_DSC_SUCCESS 0x0000
980#define I2O_DSC_BAD_KEY 0x0002
981#define I2O_DSC_TCL_ERROR 0x0003
982#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
983#define I2O_DSC_NO_SUCH_PAGE 0x0005
984#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
985#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
986#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
987#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
988#define I2O_DSC_DEVICE_LOCKED 0x000B
989#define I2O_DSC_DEVICE_RESET 0x000C
990#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
991#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
992#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
993#define I2O_DSC_INVALID_OFFSET 0x0010
994#define I2O_DSC_INVALID_PARAMETER 0x0011
995#define I2O_DSC_INVALID_REQUEST 0x0012
996#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
997#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
998#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
999#define I2O_DSC_MISSING_PARAMETER 0x0016
1000#define I2O_DSC_TIMEOUT 0x0017
1001#define I2O_DSC_UNKNOWN_ERROR 0x0018
1002#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
1003#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
1004#define I2O_DSC_DEVICE_BUSY 0x001B
1005#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
1006
1007/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
1008 Status Codes.*/
1009
1010#define I2O_BSA_DSC_SUCCESS 0x0000
1011#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
1012#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
1013#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
1014#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
1015#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
1016#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
1017#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
1018#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
1019#define I2O_BSA_DSC_BUS_FAILURE 0x0009
1020#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
1021#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
1022#define I2O_BSA_DSC_DEVICE_RESET 0x000C
1023#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
1024#define I2O_BSA_DSC_TIMEOUT 0x000E
1025
1026/* FailureStatusCodes, Table 3-3 Message Failure Codes */
1027
1028#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
1029#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
1030#define I2O_FSC_TRANSPORT_CONGESTION 0x83
1031#define I2O_FSC_TRANSPORT_FAILURE 0x84
1032#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
1033#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
1034#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
1035#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
1036#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
1037#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
1038#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
1039#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
1040#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
1041#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
1042#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
1043#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
1044
1045/* Device Claim Types */
1046#define I2O_CLAIM_PRIMARY 0x01000000
1047#define I2O_CLAIM_MANAGEMENT 0x02000000
1048#define I2O_CLAIM_AUTHORIZED 0x03000000
1049#define I2O_CLAIM_SECONDARY 0x04000000
1050
1051/* Message header defines for VersionOffset */
1052#define I2OVER15 0x0001
1053#define I2OVER20 0x0002
1054
1055/* Default is 1.5 */
1056#define I2OVERSION I2OVER15
1057
1058#define SGL_OFFSET_0 I2OVERSION
1059#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
1060#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
1061#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
1062#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
1063#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
1064#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
1065#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
1066#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
1067#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
1068#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
1069
1070/* Transaction Reply Lists (TRL) Control Word structure */
1071#define TRL_SINGLE_FIXED_LENGTH 0x00
1072#define TRL_SINGLE_VARIABLE_LENGTH 0x40
1073#define TRL_MULTIPLE_FIXED_LENGTH 0x80
1074
1075 /* msg header defines for MsgFlags */
1076#define MSG_STATIC 0x0100
1077#define MSG_64BIT_CNTXT 0x0200
1078#define MSG_MULTI_TRANS 0x1000
1079#define MSG_FAIL 0x2000
1080#define MSG_FINAL 0x4000
1081#define MSG_REPLY 0x8000
1082
1083 /* minimum size msg */
1084#define THREE_WORD_MSG_SIZE 0x00030000
1085#define FOUR_WORD_MSG_SIZE 0x00040000
1086#define FIVE_WORD_MSG_SIZE 0x00050000
1087#define SIX_WORD_MSG_SIZE 0x00060000
1088#define SEVEN_WORD_MSG_SIZE 0x00070000
1089#define EIGHT_WORD_MSG_SIZE 0x00080000
1090#define NINE_WORD_MSG_SIZE 0x00090000
1091#define TEN_WORD_MSG_SIZE 0x000A0000
1092#define ELEVEN_WORD_MSG_SIZE 0x000B0000
1093#define I2O_MESSAGE_SIZE(x) ((x)<<16)
1094
1095/* special TID assignments */
1096#define ADAPTER_TID 0
1097#define HOST_TID 1
1098
1099/* outbound queue defines */
1100#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
1101#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
1102
1103#define I2O_POST_WAIT_OK 0
1104#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
1105
1106#define I2O_CONTEXT_LIST_MIN_LENGTH 15
1107#define I2O_CONTEXT_LIST_USED 0x01
1108#define I2O_CONTEXT_LIST_DELETED 0x02
1109
1110/* timeouts */
1111#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
1112#define I2O_TIMEOUT_MESSAGE_GET 5
1113#define I2O_TIMEOUT_RESET 30
1114#define I2O_TIMEOUT_STATUS_GET 5
1115#define I2O_TIMEOUT_LCT_GET 360
1116#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
1117
1118/* retries */
1119#define I2O_HRT_GET_TRIES 3
1120#define I2O_LCT_GET_TRIES 3
1121
1122/* defines for max_sectors and max_phys_segments */
1123#define I2O_MAX_SECTORS 1024
1124#define I2O_MAX_SECTORS_LIMITED 256
1125#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
1126
1127#endif /* __KERNEL__ */ 1255#endif /* __KERNEL__ */
1128#endif /* _I2O_H */ 1256#endif /* _I2O_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f04ba20712a2..6c5d4c898ccb 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -12,7 +12,7 @@
12#include <linux/config.h> 12#include <linux/config.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14 14
15#if !defined(CONFIG_ARCH_S390) 15#if !defined(CONFIG_S390)
16 16
17#include <linux/linkage.h> 17#include <linux/linkage.h>
18#include <linux/cache.h> 18#include <linux/cache.h>
@@ -221,6 +221,17 @@ extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
221extern int can_request_irq(unsigned int irq, unsigned long irqflags); 221extern int can_request_irq(unsigned int irq, unsigned long irqflags);
222 222
223extern void init_irq_proc(void); 223extern void init_irq_proc(void);
224
225#ifdef CONFIG_AUTO_IRQ_AFFINITY
226extern int select_smp_affinity(unsigned int irq);
227#else
228static inline int
229select_smp_affinity(unsigned int irq)
230{
231 return 1;
232}
233#endif
234
224#endif 235#endif
225 236
226extern hw_irq_controller no_irq_type; /* needed in every arch ? */ 237extern hw_irq_controller no_irq_type; /* needed in every arch ? */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index dcde7adfdce5..558cb4c26ec9 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -498,6 +498,12 @@ struct transaction_s
498 struct journal_head *t_checkpoint_list; 498 struct journal_head *t_checkpoint_list;
499 499
500 /* 500 /*
501 * Doubly-linked circular list of all buffers submitted for IO while
502 * checkpointing. [j_list_lock]
503 */
504 struct journal_head *t_checkpoint_io_list;
505
506 /*
501 * Doubly-linked circular list of temporary buffers currently undergoing 507 * Doubly-linked circular list of temporary buffers currently undergoing
502 * IO in the log [j_list_lock] 508 * IO in the log [j_list_lock]
503 */ 509 */
@@ -843,7 +849,7 @@ extern void journal_commit_transaction(journal_t *);
843 849
844/* Checkpoint list management */ 850/* Checkpoint list management */
845int __journal_clean_checkpoint_list(journal_t *journal); 851int __journal_clean_checkpoint_list(journal_t *journal);
846void __journal_remove_checkpoint(struct journal_head *); 852int __journal_remove_checkpoint(struct journal_head *);
847void __journal_insert_checkpoint(struct journal_head *, transaction_t *); 853void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
848 854
849/* Buffer IO */ 855/* Buffer IO */
diff --git a/include/linux/key.h b/include/linux/key.h
index 53513a3be53b..4d189e51bc6c 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -193,14 +193,6 @@ struct key_type {
193 */ 193 */
194 int (*instantiate)(struct key *key, const void *data, size_t datalen); 194 int (*instantiate)(struct key *key, const void *data, size_t datalen);
195 195
196 /* duplicate a key of this type (optional)
197 * - the source key will be locked against change
198 * - the new description will be attached
199 * - the quota will have been adjusted automatically from
200 * source->quotalen
201 */
202 int (*duplicate)(struct key *key, const struct key *source);
203
204 /* update a key of this type (optional) 196 /* update a key of this type (optional)
205 * - this method should call key_payload_reserve() to recalculate the 197 * - this method should call key_payload_reserve() to recalculate the
206 * quota consumption 198 * quota consumption
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6db2c0845731..a43c95f8f968 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -124,6 +124,8 @@ enum {
124 ATA_FLAG_DEBUGMSG = (1 << 10), 124 ATA_FLAG_DEBUGMSG = (1 << 10),
125 ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ 125 ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */
126 126
127 ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */
128
127 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 129 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
128 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 130 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
129 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 131 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
@@ -436,6 +438,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
436extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 438extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
437 unsigned int n_ports); 439 unsigned int n_ports);
438extern void ata_pci_remove_one (struct pci_dev *pdev); 440extern void ata_pci_remove_one (struct pci_dev *pdev);
441extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
442extern int ata_pci_device_resume(struct pci_dev *pdev);
439#endif /* CONFIG_PCI */ 443#endif /* CONFIG_PCI */
440extern int ata_device_add(const struct ata_probe_ent *ent); 444extern int ata_device_add(const struct ata_probe_ent *ent);
441extern void ata_host_set_remove(struct ata_host_set *host_set); 445extern void ata_host_set_remove(struct ata_host_set *host_set);
@@ -445,6 +449,10 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn
445extern int ata_scsi_error(struct Scsi_Host *host); 449extern int ata_scsi_error(struct Scsi_Host *host);
446extern int ata_scsi_release(struct Scsi_Host *host); 450extern int ata_scsi_release(struct Scsi_Host *host);
447extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 451extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
452extern int ata_scsi_device_resume(struct scsi_device *);
453extern int ata_scsi_device_suspend(struct scsi_device *);
454extern int ata_device_resume(struct ata_port *, struct ata_device *);
455extern int ata_device_suspend(struct ata_port *, struct ata_device *);
448extern int ata_ratelimit(void); 456extern int ata_ratelimit(void);
449 457
450/* 458/*
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8b67cf837ca9..ed00b278cb93 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -110,14 +110,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
110#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) 110#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
111 111
112/* 112/*
113 * Hugetlb policy. i386 hugetlb so far works with node numbers
114 * instead of zone lists, so give it special interfaces for now.
115 */
116extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
117extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
118 unsigned long addr);
119
120/*
121 * Tree of shared policies for a shared memory region. 113 * Tree of shared policies for a shared memory region.
122 * Maintain the policies in a pseudo mm that contains vmas. The vmas 114 * Maintain the policies in a pseudo mm that contains vmas. The vmas
123 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 115 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
@@ -156,6 +148,16 @@ extern void numa_default_policy(void);
156extern void numa_policy_init(void); 148extern void numa_policy_init(void);
157extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); 149extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new);
158extern struct mempolicy default_policy; 150extern struct mempolicy default_policy;
151extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
152 unsigned long addr);
153
154extern int policy_zone;
155
156static inline void check_highest_zone(int k)
157{
158 if (k > policy_zone)
159 policy_zone = k;
160}
159 161
160#else 162#else
161 163
@@ -182,17 +184,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old)
182 return NULL; 184 return NULL;
183} 185}
184 186
185static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
186{
187 return numa_node_id();
188}
189
190static inline int
191mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
192{
193 return 1;
194}
195
196struct shared_policy {}; 187struct shared_policy {};
197 188
198static inline int mpol_set_shared_policy(struct shared_policy *info, 189static inline int mpol_set_shared_policy(struct shared_policy *info,
@@ -232,6 +223,15 @@ static inline void numa_policy_rebind(const nodemask_t *old,
232{ 223{
233} 224}
234 225
226static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
227 unsigned long addr)
228{
229 return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
230}
231
232static inline void check_highest_zone(int k)
233{
234}
235#endif /* CONFIG_NUMA */ 235#endif /* CONFIG_NUMA */
236#endif /* __KERNEL__ */ 236#endif /* __KERNEL__ */
237 237
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a06a84d347fb..bc01fff3aa01 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -634,14 +634,38 @@ struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
634int shmem_lock(struct file *file, int lock, struct user_struct *user); 634int shmem_lock(struct file *file, int lock, struct user_struct *user);
635#else 635#else
636#define shmem_nopage filemap_nopage 636#define shmem_nopage filemap_nopage
637#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */ 637
638#define shmem_set_policy(a, b) (0) 638static inline int shmem_lock(struct file *file, int lock,
639#define shmem_get_policy(a, b) (NULL) 639 struct user_struct *user)
640{
641 return 0;
642}
643
644static inline int shmem_set_policy(struct vm_area_struct *vma,
645 struct mempolicy *new)
646{
647 return 0;
648}
649
650static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
651 unsigned long addr)
652{
653 return NULL;
654}
640#endif 655#endif
641struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); 656struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
657extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
642 658
643int shmem_zero_setup(struct vm_area_struct *); 659int shmem_zero_setup(struct vm_area_struct *);
644 660
661#ifndef CONFIG_MMU
662extern unsigned long shmem_get_unmapped_area(struct file *file,
663 unsigned long addr,
664 unsigned long len,
665 unsigned long pgoff,
666 unsigned long flags);
667#endif
668
645static inline int can_do_mlock(void) 669static inline int can_do_mlock(void)
646{ 670{
647 if (capable(CAP_IPC_LOCK)) 671 if (capable(CAP_IPC_LOCK))
@@ -690,14 +714,31 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
690} 714}
691 715
692extern int vmtruncate(struct inode * inode, loff_t offset); 716extern int vmtruncate(struct inode * inode, loff_t offset);
717extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
693extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); 718extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
694extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); 719extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
695extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
696 720
697static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) 721#ifdef CONFIG_MMU
722extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
723 unsigned long address, int write_access);
724
725static inline int handle_mm_fault(struct mm_struct *mm,
726 struct vm_area_struct *vma, unsigned long address,
727 int write_access)
698{ 728{
699 return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE); 729 return __handle_mm_fault(mm, vma, address, write_access) &
730 (~VM_FAULT_WRITE);
700} 731}
732#else
733static inline int handle_mm_fault(struct mm_struct *mm,
734 struct vm_area_struct *vma, unsigned long address,
735 int write_access)
736{
737 /* should never happen if there's no MMU */
738 BUG();
739 return VM_FAULT_SIGBUS;
740}
741#endif
701 742
702extern int make_pages_present(unsigned long addr, unsigned long end); 743extern int make_pages_present(unsigned long addr, unsigned long end);
703extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 744extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
@@ -896,6 +937,8 @@ extern unsigned long do_brk(unsigned long, unsigned long);
896/* filemap.c */ 937/* filemap.c */
897extern unsigned long page_unuse(struct page *); 938extern unsigned long page_unuse(struct page *);
898extern void truncate_inode_pages(struct address_space *, loff_t); 939extern void truncate_inode_pages(struct address_space *, loff_t);
940extern void truncate_inode_pages_range(struct address_space *,
941 loff_t lstart, loff_t lend);
899 942
900/* generic vm_area_ops exported for stackable file systems */ 943/* generic vm_area_ops exported for stackable file systems */
901extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); 944extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f22090df7dd..c34f4a2c62f8 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -46,7 +46,6 @@ struct zone_padding {
46 46
47struct per_cpu_pages { 47struct per_cpu_pages {
48 int count; /* number of pages in the list */ 48 int count; /* number of pages in the list */
49 int low; /* low watermark, refill needed */
50 int high; /* high watermark, emptying needed */ 49 int high; /* high watermark, emptying needed */
51 int batch; /* chunk size for buddy add/remove */ 50 int batch; /* chunk size for buddy add/remove */
52 struct list_head list; /* the list of pages */ 51 struct list_head list; /* the list of pages */
@@ -389,6 +388,11 @@ static inline struct zone *next_zone(struct zone *zone)
389#define for_each_zone(zone) \ 388#define for_each_zone(zone) \
390 for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) 389 for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
391 390
391static inline int populated_zone(struct zone *zone)
392{
393 return (!!zone->present_pages);
394}
395
392static inline int is_highmem_idx(int idx) 396static inline int is_highmem_idx(int idx)
393{ 397{
394 return (idx == ZONE_HIGHMEM); 398 return (idx == ZONE_HIGHMEM);
@@ -398,6 +402,7 @@ static inline int is_normal_idx(int idx)
398{ 402{
399 return (idx == ZONE_NORMAL); 403 return (idx == ZONE_NORMAL);
400} 404}
405
401/** 406/**
402 * is_highmem - helper function to quickly check if a struct zone is a 407 * is_highmem - helper function to quickly check if a struct zone is a
403 * highmem zone or not. This is an attempt to keep references 408 * highmem zone or not. This is an attempt to keep references
@@ -414,6 +419,16 @@ static inline int is_normal(struct zone *zone)
414 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; 419 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
415} 420}
416 421
422static inline int is_dma32(struct zone *zone)
423{
424 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
425}
426
427static inline int is_dma(struct zone *zone)
428{
429 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
430}
431
417/* These two functions are used to setup the per zone pages min values */ 432/* These two functions are used to setup the per zone pages min values */
418struct ctl_table; 433struct ctl_table;
419struct file; 434struct file;
@@ -435,7 +450,6 @@ extern struct pglist_data contig_page_data;
435#define NODE_DATA(nid) (&contig_page_data) 450#define NODE_DATA(nid) (&contig_page_data)
436#define NODE_MEM_MAP(nid) mem_map 451#define NODE_MEM_MAP(nid) mem_map
437#define MAX_NODES_SHIFT 1 452#define MAX_NODES_SHIFT 1
438#define pfn_to_nid(pfn) (0)
439 453
440#else /* CONFIG_NEED_MULTIPLE_NODES */ 454#else /* CONFIG_NEED_MULTIPLE_NODES */
441 455
@@ -470,6 +484,10 @@ extern struct pglist_data contig_page_data;
470#define early_pfn_to_nid(nid) (0UL) 484#define early_pfn_to_nid(nid) (0UL)
471#endif 485#endif
472 486
487#ifdef CONFIG_FLATMEM
488#define pfn_to_nid(pfn) (0)
489#endif
490
473#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) 491#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
474#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) 492#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
475 493
@@ -564,11 +582,6 @@ static inline int valid_section_nr(unsigned long nr)
564 return valid_section(__nr_to_section(nr)); 582 return valid_section(__nr_to_section(nr));
565} 583}
566 584
567/*
568 * Given a kernel address, find the home node of the underlying memory.
569 */
570#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
571
572static inline struct mem_section *__pfn_to_section(unsigned long pfn) 585static inline struct mem_section *__pfn_to_section(unsigned long pfn)
573{ 586{
574 return __nr_to_section(pfn_to_section_nr(pfn)); 587 return __nr_to_section(pfn_to_section_nr(pfn));
@@ -598,13 +611,14 @@ static inline int pfn_valid(unsigned long pfn)
598 * this restriction. 611 * this restriction.
599 */ 612 */
600#ifdef CONFIG_NUMA 613#ifdef CONFIG_NUMA
601#define pfn_to_nid early_pfn_to_nid 614#define pfn_to_nid(pfn) \
602#endif
603
604#define pfn_to_pgdat(pfn) \
605({ \ 615({ \
606 NODE_DATA(pfn_to_nid(pfn)); \ 616 unsigned long __pfn_to_nid_pfn = (pfn); \
617 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
607}) 618})
619#else
620#define pfn_to_nid(pfn) (0)
621#endif
608 622
609#define early_pfn_valid(pfn) pfn_valid(pfn) 623#define early_pfn_valid(pfn) pfn_valid(pfn)
610void sparse_init(void); 624void sparse_init(void);
@@ -613,12 +627,6 @@ void sparse_init(void);
613#define sparse_index_init(_sec, _nid) do {} while (0) 627#define sparse_index_init(_sec, _nid) do {} while (0)
614#endif /* CONFIG_SPARSEMEM */ 628#endif /* CONFIG_SPARSEMEM */
615 629
616#ifdef CONFIG_NODES_SPAN_OTHER_NODES
617#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
618#else
619#define early_pfn_in_nid(pfn, nid) (1)
620#endif
621
622#ifndef early_pfn_valid 630#ifndef early_pfn_valid
623#define early_pfn_valid(pfn) (1) 631#define early_pfn_valid(pfn) (1)
624#endif 632#endif
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 090e210e98f0..f95d51fae733 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -37,18 +37,26 @@ enum {
37/* userspace doesn't need the nbd_device structure */ 37/* userspace doesn't need the nbd_device structure */
38#ifdef __KERNEL__ 38#ifdef __KERNEL__
39 39
40#include <linux/wait.h>
41
40/* values for flags field */ 42/* values for flags field */
41#define NBD_READ_ONLY 0x0001 43#define NBD_READ_ONLY 0x0001
42#define NBD_WRITE_NOCHK 0x0002 44#define NBD_WRITE_NOCHK 0x0002
43 45
46struct request;
47
44struct nbd_device { 48struct nbd_device {
45 int flags; 49 int flags;
46 int harderror; /* Code of hard error */ 50 int harderror; /* Code of hard error */
47 struct socket * sock; 51 struct socket * sock;
48 struct file * file; /* If == NULL, device is not ready, yet */ 52 struct file * file; /* If == NULL, device is not ready, yet */
49 int magic; 53 int magic;
54
50 spinlock_t queue_lock; 55 spinlock_t queue_lock;
51 struct list_head queue_head;/* Requests are added here... */ 56 struct list_head queue_head;/* Requests are added here... */
57 struct request *active_req;
58 wait_queue_head_t active_wq;
59
52 struct semaphore tx_lock; 60 struct semaphore tx_lock;
53 struct gendisk *disk; 61 struct gendisk *disk;
54 int blksize; 62 int blksize;
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h
index 130d4f588a37..3f4f7142bbe3 100644
--- a/include/linux/nfsd/xdr.h
+++ b/include/linux/nfsd/xdr.h
@@ -88,10 +88,12 @@ struct nfsd_readdirargs {
88 88
89struct nfsd_attrstat { 89struct nfsd_attrstat {
90 struct svc_fh fh; 90 struct svc_fh fh;
91 struct kstat stat;
91}; 92};
92 93
93struct nfsd_diropres { 94struct nfsd_diropres {
94 struct svc_fh fh; 95 struct svc_fh fh;
96 struct kstat stat;
95}; 97};
96 98
97struct nfsd_readlinkres { 99struct nfsd_readlinkres {
@@ -101,6 +103,7 @@ struct nfsd_readlinkres {
101struct nfsd_readres { 103struct nfsd_readres {
102 struct svc_fh fh; 104 struct svc_fh fh;
103 unsigned long count; 105 unsigned long count;
106 struct kstat stat;
104}; 107};
105 108
106struct nfsd_readdirres { 109struct nfsd_readdirres {
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h
index 3c2a71b43bac..a4322741f8b9 100644
--- a/include/linux/nfsd/xdr3.h
+++ b/include/linux/nfsd/xdr3.h
@@ -126,6 +126,7 @@ struct nfsd3_setaclargs {
126struct nfsd3_attrstat { 126struct nfsd3_attrstat {
127 __u32 status; 127 __u32 status;
128 struct svc_fh fh; 128 struct svc_fh fh;
129 struct kstat stat;
129}; 130};
130 131
131/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */ 132/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 343083fec258..d52999c43336 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -79,13 +79,23 @@
79/* 79/*
80 * Global page accounting. One instance per CPU. Only unsigned longs are 80 * Global page accounting. One instance per CPU. Only unsigned longs are
81 * allowed. 81 * allowed.
82 *
83 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
84 * any time safely (which protects the instance from modification by
85 * interrupt.
86 * - The __xxx_page_state variants can be used safely when interrupts are
87 * disabled.
88 * - The __xxx_page_state variants can be used if the field is only
89 * modified from process context, or only modified from interrupt context.
90 * In this case, the field should be commented here.
82 */ 91 */
83struct page_state { 92struct page_state {
84 unsigned long nr_dirty; /* Dirty writeable pages */ 93 unsigned long nr_dirty; /* Dirty writeable pages */
85 unsigned long nr_writeback; /* Pages under writeback */ 94 unsigned long nr_writeback; /* Pages under writeback */
86 unsigned long nr_unstable; /* NFS unstable pages */ 95 unsigned long nr_unstable; /* NFS unstable pages */
87 unsigned long nr_page_table_pages;/* Pages used for pagetables */ 96 unsigned long nr_page_table_pages;/* Pages used for pagetables */
88 unsigned long nr_mapped; /* mapped into pagetables */ 97 unsigned long nr_mapped; /* mapped into pagetables.
98 * only modified from process context */
89 unsigned long nr_slab; /* In slab */ 99 unsigned long nr_slab; /* In slab */
90#define GET_PAGE_STATE_LAST nr_slab 100#define GET_PAGE_STATE_LAST nr_slab
91 101
@@ -97,32 +107,40 @@ struct page_state {
97 unsigned long pgpgout; /* Disk writes */ 107 unsigned long pgpgout; /* Disk writes */
98 unsigned long pswpin; /* swap reads */ 108 unsigned long pswpin; /* swap reads */
99 unsigned long pswpout; /* swap writes */ 109 unsigned long pswpout; /* swap writes */
100 unsigned long pgalloc_high; /* page allocations */
101 110
111 unsigned long pgalloc_high; /* page allocations */
102 unsigned long pgalloc_normal; 112 unsigned long pgalloc_normal;
113 unsigned long pgalloc_dma32;
103 unsigned long pgalloc_dma; 114 unsigned long pgalloc_dma;
115
104 unsigned long pgfree; /* page freeings */ 116 unsigned long pgfree; /* page freeings */
105 unsigned long pgactivate; /* pages moved inactive->active */ 117 unsigned long pgactivate; /* pages moved inactive->active */
106 unsigned long pgdeactivate; /* pages moved active->inactive */ 118 unsigned long pgdeactivate; /* pages moved active->inactive */
107 119
108 unsigned long pgfault; /* faults (major+minor) */ 120 unsigned long pgfault; /* faults (major+minor) */
109 unsigned long pgmajfault; /* faults (major only) */ 121 unsigned long pgmajfault; /* faults (major only) */
122
110 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ 123 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
111 unsigned long pgrefill_normal; 124 unsigned long pgrefill_normal;
125 unsigned long pgrefill_dma32;
112 unsigned long pgrefill_dma; 126 unsigned long pgrefill_dma;
113 127
114 unsigned long pgsteal_high; /* total highmem pages reclaimed */ 128 unsigned long pgsteal_high; /* total highmem pages reclaimed */
115 unsigned long pgsteal_normal; 129 unsigned long pgsteal_normal;
130 unsigned long pgsteal_dma32;
116 unsigned long pgsteal_dma; 131 unsigned long pgsteal_dma;
132
117 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ 133 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
118 unsigned long pgscan_kswapd_normal; 134 unsigned long pgscan_kswapd_normal;
119 135 unsigned long pgscan_kswapd_dma32;
120 unsigned long pgscan_kswapd_dma; 136 unsigned long pgscan_kswapd_dma;
137
121 unsigned long pgscan_direct_high;/* total highmem pages scanned */ 138 unsigned long pgscan_direct_high;/* total highmem pages scanned */
122 unsigned long pgscan_direct_normal; 139 unsigned long pgscan_direct_normal;
140 unsigned long pgscan_direct_dma32;
123 unsigned long pgscan_direct_dma; 141 unsigned long pgscan_direct_dma;
124 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
125 142
143 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
126 unsigned long slabs_scanned; /* slab objects scanned */ 144 unsigned long slabs_scanned; /* slab objects scanned */
127 unsigned long kswapd_steal; /* pages reclaimed by kswapd */ 145 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
128 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ 146 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
@@ -136,31 +154,54 @@ struct page_state {
136extern void get_page_state(struct page_state *ret); 154extern void get_page_state(struct page_state *ret);
137extern void get_page_state_node(struct page_state *ret, int node); 155extern void get_page_state_node(struct page_state *ret, int node);
138extern void get_full_page_state(struct page_state *ret); 156extern void get_full_page_state(struct page_state *ret);
139extern unsigned long __read_page_state(unsigned long offset); 157extern unsigned long read_page_state_offset(unsigned long offset);
140extern void __mod_page_state(unsigned long offset, unsigned long delta); 158extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
159extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
141 160
142#define read_page_state(member) \ 161#define read_page_state(member) \
143 __read_page_state(offsetof(struct page_state, member)) 162 read_page_state_offset(offsetof(struct page_state, member))
144 163
145#define mod_page_state(member, delta) \ 164#define mod_page_state(member, delta) \
146 __mod_page_state(offsetof(struct page_state, member), (delta)) 165 mod_page_state_offset(offsetof(struct page_state, member), (delta))
147 166
148#define inc_page_state(member) mod_page_state(member, 1UL) 167#define __mod_page_state(member, delta) \
149#define dec_page_state(member) mod_page_state(member, 0UL - 1) 168 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
150#define add_page_state(member,delta) mod_page_state(member, (delta)) 169
151#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) 170#define inc_page_state(member) mod_page_state(member, 1UL)
152 171#define dec_page_state(member) mod_page_state(member, 0UL - 1)
153#define mod_page_state_zone(zone, member, delta) \ 172#define add_page_state(member,delta) mod_page_state(member, (delta))
154 do { \ 173#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
155 unsigned offset; \ 174
156 if (is_highmem(zone)) \ 175#define __inc_page_state(member) __mod_page_state(member, 1UL)
157 offset = offsetof(struct page_state, member##_high); \ 176#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
158 else if (is_normal(zone)) \ 177#define __add_page_state(member,delta) __mod_page_state(member, (delta))
159 offset = offsetof(struct page_state, member##_normal); \ 178#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
160 else \ 179
161 offset = offsetof(struct page_state, member##_dma); \ 180#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
162 __mod_page_state(offset, (delta)); \ 181
163 } while (0) 182#define state_zone_offset(zone, member) \
183({ \
184 unsigned offset; \
185 if (is_highmem(zone)) \
186 offset = offsetof(struct page_state, member##_high); \
187 else if (is_normal(zone)) \
188 offset = offsetof(struct page_state, member##_normal); \
189 else if (is_dma32(zone)) \
190 offset = offsetof(struct page_state, member##_dma32); \
191 else \
192 offset = offsetof(struct page_state, member##_dma); \
193 offset; \
194})
195
196#define __mod_page_state_zone(zone, member, delta) \
197 do { \
198 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
199 } while (0)
200
201#define mod_page_state_zone(zone, member, delta) \
202 do { \
203 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
204 } while (0)
164 205
165/* 206/*
166 * Manipulation of page state flags 207 * Manipulation of page state flags
diff --git a/include/linux/parport.h b/include/linux/parport.h
index d2a4d9e1e6d1..f7ff0b0c4031 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -242,7 +242,6 @@ enum ieee1284_phase {
242 IEEE1284_PH_FWD_IDLE, 242 IEEE1284_PH_FWD_IDLE,
243 IEEE1284_PH_TERMINATE, 243 IEEE1284_PH_TERMINATE,
244 IEEE1284_PH_NEGOTIATION, 244 IEEE1284_PH_NEGOTIATION,
245 IEEE1284_PH_HBUSY_DNA,
246 IEEE1284_PH_REV_IDLE, 245 IEEE1284_PH_REV_IDLE,
247 IEEE1284_PH_HBUSY_DAVAIL, 246 IEEE1284_PH_HBUSY_DAVAIL,
248 IEEE1284_PH_REV_DATA, 247 IEEE1284_PH_REV_DATA,
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index c6f762470879..1cc0f6b1a49a 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -79,13 +79,13 @@ static __inline__ unsigned char parport_pc_read_data(struct parport *p)
79} 79}
80 80
81#ifdef DEBUG_PARPORT 81#ifdef DEBUG_PARPORT
82extern __inline__ void dump_parport_state (char *str, struct parport *p) 82static inline void dump_parport_state (char *str, struct parport *p)
83{ 83{
84 /* here's hoping that reading these ports won't side-effect anything underneath */ 84 /* here's hoping that reading these ports won't side-effect anything underneath */
85 unsigned char ecr = inb (ECONTROL (p)); 85 unsigned char ecr = inb (ECONTROL (p));
86 unsigned char dcr = inb (CONTROL (p)); 86 unsigned char dcr = inb (CONTROL (p));
87 unsigned char dsr = inb (STATUS (p)); 87 unsigned char dsr = inb (STATUS (p));
88 static char *ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"}; 88 static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"};
89 const struct parport_pc_private *priv = p->physport->private_data; 89 const struct parport_pc_private *priv = p->physport->private_data;
90 int i; 90 int i;
91 91
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4f01710485cd..24db7248301a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -394,6 +394,13 @@
394#define PCI_DEVICE_ID_NS_87410 0xd001 394#define PCI_DEVICE_ID_NS_87410 0xd001
395#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d 395#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
396 396
397#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028
398#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b
399#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
400#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
401#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
402#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030
403
397#define PCI_VENDOR_ID_TSENG 0x100c 404#define PCI_VENDOR_ID_TSENG 0x100c
398#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 405#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
399#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 406#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
@@ -496,6 +503,9 @@
496 503
497#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A 504#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
498 505
506#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
507#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
508
499#define PCI_VENDOR_ID_TRIDENT 0x1023 509#define PCI_VENDOR_ID_TRIDENT 0x1023
500#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 510#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
501#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001 511#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index 13e7c4b62367..b6e0bcad84e1 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -71,8 +71,8 @@
71 */ 71 */
72#define MD_PATCHLEVEL_VERSION 3 72#define MD_PATCHLEVEL_VERSION 3
73 73
74extern int register_md_personality (int p_num, mdk_personality_t *p); 74extern int register_md_personality (struct mdk_personality *p);
75extern int unregister_md_personality (int p_num); 75extern int unregister_md_personality (struct mdk_personality *p);
76extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), 76extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev),
77 mddev_t *mddev, const char *name); 77 mddev_t *mddev, const char *name);
78extern void md_unregister_thread (mdk_thread_t *thread); 78extern void md_unregister_thread (mdk_thread_t *thread);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 46629a275ba9..617b9506c760 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -18,62 +18,19 @@
18/* and dm-bio-list.h is not under include/linux because.... ??? */ 18/* and dm-bio-list.h is not under include/linux because.... ??? */
19#include "../../../drivers/md/dm-bio-list.h" 19#include "../../../drivers/md/dm-bio-list.h"
20 20
21#define MD_RESERVED 0UL
22#define LINEAR 1UL
23#define RAID0 2UL
24#define RAID1 3UL
25#define RAID5 4UL
26#define TRANSLUCENT 5UL
27#define HSM 6UL
28#define MULTIPATH 7UL
29#define RAID6 8UL
30#define RAID10 9UL
31#define FAULTY 10UL
32#define MAX_PERSONALITY 11UL
33
34#define LEVEL_MULTIPATH (-4) 21#define LEVEL_MULTIPATH (-4)
35#define LEVEL_LINEAR (-1) 22#define LEVEL_LINEAR (-1)
36#define LEVEL_FAULTY (-5) 23#define LEVEL_FAULTY (-5)
37 24
25/* we need a value for 'no level specified' and 0
26 * means 'raid0', so we need something else. This is
27 * for internal use only
28 */
29#define LEVEL_NONE (-1000000)
30
38#define MaxSector (~(sector_t)0) 31#define MaxSector (~(sector_t)0)
39#define MD_THREAD_NAME_MAX 14 32#define MD_THREAD_NAME_MAX 14
40 33
41static inline int pers_to_level (int pers)
42{
43 switch (pers) {
44 case FAULTY: return LEVEL_FAULTY;
45 case MULTIPATH: return LEVEL_MULTIPATH;
46 case HSM: return -3;
47 case TRANSLUCENT: return -2;
48 case LINEAR: return LEVEL_LINEAR;
49 case RAID0: return 0;
50 case RAID1: return 1;
51 case RAID5: return 5;
52 case RAID6: return 6;
53 case RAID10: return 10;
54 }
55 BUG();
56 return MD_RESERVED;
57}
58
59static inline int level_to_pers (int level)
60{
61 switch (level) {
62 case LEVEL_FAULTY: return FAULTY;
63 case LEVEL_MULTIPATH: return MULTIPATH;
64 case -3: return HSM;
65 case -2: return TRANSLUCENT;
66 case LEVEL_LINEAR: return LINEAR;
67 case 0: return RAID0;
68 case 1: return RAID1;
69 case 4:
70 case 5: return RAID5;
71 case 6: return RAID6;
72 case 10: return RAID10;
73 }
74 return MD_RESERVED;
75}
76
77typedef struct mddev_s mddev_t; 34typedef struct mddev_s mddev_t;
78typedef struct mdk_rdev_s mdk_rdev_t; 35typedef struct mdk_rdev_s mdk_rdev_t;
79 36
@@ -138,14 +95,16 @@ struct mdk_rdev_s
138 atomic_t read_errors; /* number of consecutive read errors that 95 atomic_t read_errors; /* number of consecutive read errors that
139 * we have tried to ignore. 96 * we have tried to ignore.
140 */ 97 */
98 atomic_t corrected_errors; /* number of corrected read errors,
99 * for reporting to userspace and storing
100 * in superblock.
101 */
141}; 102};
142 103
143typedef struct mdk_personality_s mdk_personality_t;
144
145struct mddev_s 104struct mddev_s
146{ 105{
147 void *private; 106 void *private;
148 mdk_personality_t *pers; 107 struct mdk_personality *pers;
149 dev_t unit; 108 dev_t unit;
150 int md_minor; 109 int md_minor;
151 struct list_head disks; 110 struct list_head disks;
@@ -164,6 +123,7 @@ struct mddev_s
164 int chunk_size; 123 int chunk_size;
165 time_t ctime, utime; 124 time_t ctime, utime;
166 int level, layout; 125 int level, layout;
126 char clevel[16];
167 int raid_disks; 127 int raid_disks;
168 int max_disks; 128 int max_disks;
169 sector_t size; /* used size of component devices */ 129 sector_t size; /* used size of component devices */
@@ -183,6 +143,11 @@ struct mddev_s
183 sector_t resync_mismatches; /* count of sectors where 143 sector_t resync_mismatches; /* count of sectors where
184 * parity/replica mismatch found 144 * parity/replica mismatch found
185 */ 145 */
146 /* if zero, use the system-wide default */
147 int sync_speed_min;
148 int sync_speed_max;
149
150 int ok_start_degraded;
186 /* recovery/resync flags 151 /* recovery/resync flags
187 * NEEDED: we might need to start a resync/recover 152 * NEEDED: we might need to start a resync/recover
188 * RUNNING: a thread is running, or about to be started 153 * RUNNING: a thread is running, or about to be started
@@ -265,9 +230,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
265 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); 230 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
266} 231}
267 232
268struct mdk_personality_s 233struct mdk_personality
269{ 234{
270 char *name; 235 char *name;
236 int level;
237 struct list_head list;
271 struct module *owner; 238 struct module *owner;
272 int (*make_request)(request_queue_t *q, struct bio *bio); 239 int (*make_request)(request_queue_t *q, struct bio *bio);
273 int (*run)(mddev_t *mddev); 240 int (*run)(mddev_t *mddev);
@@ -305,8 +272,6 @@ static inline char * mdname (mddev_t * mddev)
305 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; 272 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
306} 273}
307 274
308extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr);
309
310/* 275/*
311 * iterates through some rdev ringlist. It's safe to remove the 276 * iterates through some rdev ringlist. It's safe to remove the
312 * current 'rdev'. Dont touch 'tmp' though. 277 * current 'rdev'. Dont touch 'tmp' though.
@@ -366,5 +331,10 @@ do { \
366 __wait_event_lock_irq(wq, condition, lock, cmd); \ 331 __wait_event_lock_irq(wq, condition, lock, cmd); \
367} while (0) 332} while (0)
368 333
334static inline void safe_put_page(struct page *p)
335{
336 if (p) put_page(p);
337}
338
369#endif 339#endif
370 340
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
index 292b98f2b408..9d5494aaac0f 100644
--- a/include/linux/raid/raid1.h
+++ b/include/linux/raid/raid1.h
@@ -45,6 +45,8 @@ struct r1_private_data_s {
45 45
46 spinlock_t resync_lock; 46 spinlock_t resync_lock;
47 int nr_pending; 47 int nr_pending;
48 int nr_waiting;
49 int nr_queued;
48 int barrier; 50 int barrier;
49 sector_t next_resync; 51 sector_t next_resync;
50 int fullsync; /* set to 1 if a full sync is needed, 52 int fullsync; /* set to 1 if a full sync is needed,
@@ -52,11 +54,12 @@ struct r1_private_data_s {
52 * Cleared when a sync completes. 54 * Cleared when a sync completes.
53 */ 55 */
54 56
55 wait_queue_head_t wait_idle; 57 wait_queue_head_t wait_barrier;
56 wait_queue_head_t wait_resume;
57 58
58 struct pool_info *poolinfo; 59 struct pool_info *poolinfo;
59 60
61 struct page *tmppage;
62
60 mempool_t *r1bio_pool; 63 mempool_t *r1bio_pool;
61 mempool_t *r1buf_pool; 64 mempool_t *r1buf_pool;
62}; 65};
@@ -106,6 +109,13 @@ struct r1bio_s {
106 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ 109 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
107}; 110};
108 111
112/* when we get a read error on a read-only array, we redirect to another
113 * device without failing the first device, or trying to over-write to
114 * correct the read error. To keep track of bad blocks on a per-bio
115 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
116 */
117#define IO_BLOCKED ((struct bio*)1)
118
109/* bits for r1bio.state */ 119/* bits for r1bio.state */
110#define R1BIO_Uptodate 0 120#define R1BIO_Uptodate 0
111#define R1BIO_IsSync 1 121#define R1BIO_IsSync 1
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
index 60708789c8f9..b1103298a8c2 100644
--- a/include/linux/raid/raid10.h
+++ b/include/linux/raid/raid10.h
@@ -35,18 +35,26 @@ struct r10_private_data_s {
35 sector_t chunk_mask; 35 sector_t chunk_mask;
36 36
37 struct list_head retry_list; 37 struct list_head retry_list;
38 /* for use when syncing mirrors: */ 38 /* queue pending writes and submit them on unplug */
39 struct bio_list pending_bio_list;
40
39 41
40 spinlock_t resync_lock; 42 spinlock_t resync_lock;
41 int nr_pending; 43 int nr_pending;
44 int nr_waiting;
45 int nr_queued;
42 int barrier; 46 int barrier;
43 sector_t next_resync; 47 sector_t next_resync;
48 int fullsync; /* set to 1 if a full sync is needed,
49 * (fresh device added).
50 * Cleared when a sync completes.
51 */
44 52
45 wait_queue_head_t wait_idle; 53 wait_queue_head_t wait_barrier;
46 wait_queue_head_t wait_resume;
47 54
48 mempool_t *r10bio_pool; 55 mempool_t *r10bio_pool;
49 mempool_t *r10buf_pool; 56 mempool_t *r10buf_pool;
57 struct page *tmppage;
50}; 58};
51 59
52typedef struct r10_private_data_s conf_t; 60typedef struct r10_private_data_s conf_t;
@@ -96,8 +104,16 @@ struct r10bio_s {
96 } devs[0]; 104 } devs[0];
97}; 105};
98 106
107/* when we get a read error on a read-only array, we redirect to another
108 * device without failing the first device, or trying to over-write to
109 * correct the read error. To keep track of bad blocks on a per-bio
110 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
111 */
112#define IO_BLOCKED ((struct bio*)1)
113
99/* bits for r10bio.state */ 114/* bits for r10bio.state */
100#define R10BIO_Uptodate 0 115#define R10BIO_Uptodate 0
101#define R10BIO_IsSync 1 116#define R10BIO_IsSync 1
102#define R10BIO_IsRecover 2 117#define R10BIO_IsRecover 2
118#define R10BIO_Degraded 3
103#endif 119#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index f025ba6fb14c..394da8207b34 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -126,7 +126,7 @@
126 */ 126 */
127 127
128struct stripe_head { 128struct stripe_head {
129 struct stripe_head *hash_next, **hash_pprev; /* hash pointers */ 129 struct hlist_node hash;
130 struct list_head lru; /* inactive_list or handle_list */ 130 struct list_head lru; /* inactive_list or handle_list */
131 struct raid5_private_data *raid_conf; 131 struct raid5_private_data *raid_conf;
132 sector_t sector; /* sector of this row */ 132 sector_t sector; /* sector of this row */
@@ -152,7 +152,6 @@ struct stripe_head {
152#define R5_Insync 3 /* rdev && rdev->in_sync at start */ 152#define R5_Insync 3 /* rdev && rdev->in_sync at start */
153#define R5_Wantread 4 /* want to schedule a read */ 153#define R5_Wantread 4 /* want to schedule a read */
154#define R5_Wantwrite 5 154#define R5_Wantwrite 5
155#define R5_Syncio 6 /* this io need to be accounted as resync io */
156#define R5_Overlap 7 /* There is a pending overlapping request on this block */ 155#define R5_Overlap 7 /* There is a pending overlapping request on this block */
157#define R5_ReadError 8 /* seen a read error here recently */ 156#define R5_ReadError 8 /* seen a read error here recently */
158#define R5_ReWrite 9 /* have tried to over-write the readerror */ 157#define R5_ReWrite 9 /* have tried to over-write the readerror */
@@ -205,7 +204,7 @@ struct disk_info {
205}; 204};
206 205
207struct raid5_private_data { 206struct raid5_private_data {
208 struct stripe_head **stripe_hashtbl; 207 struct hlist_head *stripe_hashtbl;
209 mddev_t *mddev; 208 mddev_t *mddev;
210 struct disk_info *spare; 209 struct disk_info *spare;
211 int chunk_size, level, algorithm; 210 int chunk_size, level, algorithm;
@@ -228,6 +227,8 @@ struct raid5_private_data {
228 * Cleared when a sync completes. 227 * Cleared when a sync completes.
229 */ 228 */
230 229
230 struct page *spare_page; /* Used when checking P/Q in raid6 */
231
231 /* 232 /*
232 * Free stripes pool 233 * Free stripes pool
233 */ 234 */
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index e0a4faa9610c..953b6df5d037 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -5,6 +5,16 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev);
5struct super_block *ramfs_get_sb(struct file_system_type *fs_type, 5struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
6 int flags, const char *dev_name, void *data); 6 int flags, const char *dev_name, void *data);
7 7
8#ifndef CONFIG_MMU
9extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
10 unsigned long addr,
11 unsigned long len,
12 unsigned long pgoff,
13 unsigned long flags);
14
15extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
16#endif
17
8extern struct file_operations ramfs_file_operations; 18extern struct file_operations ramfs_file_operations;
9extern struct vm_operations_struct generic_file_vm_ops; 19extern struct vm_operations_struct generic_file_vm_ops;
10 20
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 33261f1d2239..9d6fbeef2104 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *);
71 * rmap interfaces called when adding or removing pte of page 71 * rmap interfaces called when adding or removing pte of page
72 */ 72 */
73void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 73void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
74void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
74void page_add_file_rmap(struct page *); 75void page_add_file_rmap(struct page *);
75void page_remove_rmap(struct page *); 76void page_remove_rmap(struct page *);
76 77
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b0ad6f30679e..7da33619d5d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
254 * The mm counters are not protected by its page_table_lock, 254 * The mm counters are not protected by its page_table_lock,
255 * so must be incremented atomically. 255 * so must be incremented atomically.
256 */ 256 */
257#ifdef ATOMIC64_INIT 257#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
258#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value) 258#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
259#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member)) 259#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
260#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member) 260#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
261#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member) 261#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
262#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member) 262typedef atomic_long_t mm_counter_t;
263typedef atomic64_t mm_counter_t;
264#else /* !ATOMIC64_INIT */
265/*
266 * The counters wrap back to 0 at 2^32 * PAGE_SIZE,
267 * that is, at 16TB if using 4kB page size.
268 */
269#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
270#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
271#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
272#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
273#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
274typedef atomic_t mm_counter_t;
275#endif /* !ATOMIC64_INIT */
276 263
277#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 264#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
278/* 265/*
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index a61c04f804b2..5dc94e777fab 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -14,11 +14,7 @@
14typedef struct pbe { 14typedef struct pbe {
15 unsigned long address; /* address of the copy */ 15 unsigned long address; /* address of the copy */
16 unsigned long orig_address; /* original address of page */ 16 unsigned long orig_address; /* original address of page */
17 swp_entry_t swap_address; 17 struct pbe *next;
18
19 struct pbe *next; /* also used as scratch space at
20 * end of page (see link, diskpage)
21 */
22} suspend_pagedir_t; 18} suspend_pagedir_t;
23 19
24#define for_each_pbe(pbe, pblist) \ 20#define for_each_pbe(pbe, pblist) \
@@ -77,6 +73,6 @@ unsigned long get_safe_page(gfp_t gfp_mask);
77 * XXX: We try to keep some more pages free so that I/O operations succeed 73 * XXX: We try to keep some more pages free so that I/O operations succeed
78 * without paging. Might this be more? 74 * without paging. Might this be more?
79 */ 75 */
80#define PAGES_FOR_IO 512 76#define PAGES_FOR_IO 1024
81 77
82#endif /* _LINUX_SWSUSP_H */ 78#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 508668f840b6..556617bcf7ac 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -172,7 +172,6 @@ extern void swap_setup(void);
172 172
173/* linux/mm/vmscan.c */ 173/* linux/mm/vmscan.c */
174extern int try_to_free_pages(struct zone **, gfp_t); 174extern int try_to_free_pages(struct zone **, gfp_t);
175extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
176extern int shrink_all_memory(int); 175extern int shrink_all_memory(int);
177extern int vm_swappiness; 176extern int vm_swappiness;
178 177
@@ -210,6 +209,7 @@ extern unsigned int nr_swapfiles;
210extern struct swap_info_struct swap_info[]; 209extern struct swap_info_struct swap_info[];
211extern void si_swapinfo(struct sysinfo *); 210extern void si_swapinfo(struct sysinfo *);
212extern swp_entry_t get_swap_page(void); 211extern swp_entry_t get_swap_page(void);
212extern swp_entry_t get_swap_page_of_type(int type);
213extern int swap_duplicate(swp_entry_t); 213extern int swap_duplicate(swp_entry_t);
214extern int valid_swaphandles(swp_entry_t, unsigned long *); 214extern int valid_swaphandles(swp_entry_t, unsigned long *);
215extern void swap_free(swp_entry_t); 215extern void swap_free(swp_entry_t);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 25f637bd38b9..230bc55c0bfa 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -296,6 +296,12 @@ struct scsi_host_template {
296 int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int); 296 int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
297 297
298 /* 298 /*
299 * suspend support
300 */
301 int (*resume)(struct scsi_device *);
302 int (*suspend)(struct scsi_device *);
303
304 /*
299 * Name of proc directory 305 * Name of proc directory
300 */ 306 */
301 char *proc_name; 307 char *proc_name;
diff --git a/init/Kconfig b/init/Kconfig
index ce737e02c5a2..ba42f3793a84 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -105,7 +105,6 @@ config SWAP
105 105
106config SYSVIPC 106config SYSVIPC
107 bool "System V IPC" 107 bool "System V IPC"
108 depends on MMU
109 ---help--- 108 ---help---
110 Inter Process Communication is a suite of library functions and 109 Inter Process Communication is a suite of library functions and
111 system calls which let processes (running programs) synchronize and 110 system calls which let processes (running programs) synchronize and
@@ -190,7 +189,7 @@ config AUDIT
190 189
191config AUDITSYSCALL 190config AUDITSYSCALL
192 bool "Enable system-call auditing support" 191 bool "Enable system-call auditing support"
193 depends on AUDIT && (X86 || PPC || PPC64 || ARCH_S390 || IA64 || UML || SPARC64) 192 depends on AUDIT && (X86 || PPC || PPC64 || S390 || IA64 || UML || SPARC64)
194 default y if SECURITY_SELINUX 193 default y if SECURITY_SELINUX
195 help 194 help
196 Enable low-overhead system-call auditing infrastructure that 195 Enable low-overhead system-call auditing infrastructure that
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 3fbc3555ce96..f6f36806f84a 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -17,7 +17,7 @@ static int __initdata raid_noautodetect, raid_autopart;
17static struct { 17static struct {
18 int minor; 18 int minor;
19 int partitioned; 19 int partitioned;
20 int pers; 20 int level;
21 int chunk; 21 int chunk;
22 char *device_names; 22 char *device_names;
23} md_setup_args[MAX_MD_DEVS] __initdata; 23} md_setup_args[MAX_MD_DEVS] __initdata;
@@ -47,7 +47,7 @@ extern int mdp_major;
47 */ 47 */
48static int __init md_setup(char *str) 48static int __init md_setup(char *str)
49{ 49{
50 int minor, level, factor, fault, pers, partitioned = 0; 50 int minor, level, factor, fault, partitioned = 0;
51 char *pername = ""; 51 char *pername = "";
52 char *str1; 52 char *str1;
53 int ent; 53 int ent;
@@ -78,7 +78,7 @@ static int __init md_setup(char *str)
78 } 78 }
79 if (ent >= md_setup_ents) 79 if (ent >= md_setup_ents)
80 md_setup_ents++; 80 md_setup_ents++;
81 switch (get_option(&str, &level)) { /* RAID Personality */ 81 switch (get_option(&str, &level)) { /* RAID level */
82 case 2: /* could be 0 or -1.. */ 82 case 2: /* could be 0 or -1.. */
83 if (level == 0 || level == LEVEL_LINEAR) { 83 if (level == 0 || level == LEVEL_LINEAR) {
84 if (get_option(&str, &factor) != 2 || /* Chunk Size */ 84 if (get_option(&str, &factor) != 2 || /* Chunk Size */
@@ -86,16 +86,12 @@ static int __init md_setup(char *str)
86 printk(KERN_WARNING "md: Too few arguments supplied to md=.\n"); 86 printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
87 return 0; 87 return 0;
88 } 88 }
89 md_setup_args[ent].pers = level; 89 md_setup_args[ent].level = level;
90 md_setup_args[ent].chunk = 1 << (factor+12); 90 md_setup_args[ent].chunk = 1 << (factor+12);
91 if (level == LEVEL_LINEAR) { 91 if (level == LEVEL_LINEAR)
92 pers = LINEAR;
93 pername = "linear"; 92 pername = "linear";
94 } else { 93 else
95 pers = RAID0;
96 pername = "raid0"; 94 pername = "raid0";
97 }
98 md_setup_args[ent].pers = pers;
99 break; 95 break;
100 } 96 }
101 /* FALL THROUGH */ 97 /* FALL THROUGH */
@@ -103,7 +99,7 @@ static int __init md_setup(char *str)
103 str = str1; 99 str = str1;
104 /* FALL THROUGH */ 100 /* FALL THROUGH */
105 case 0: 101 case 0:
106 md_setup_args[ent].pers = 0; 102 md_setup_args[ent].level = LEVEL_NONE;
107 pername="super-block"; 103 pername="super-block";
108 } 104 }
109 105
@@ -190,10 +186,10 @@ static void __init md_setup_drive(void)
190 continue; 186 continue;
191 } 187 }
192 188
193 if (md_setup_args[ent].pers) { 189 if (md_setup_args[ent].level != LEVEL_NONE) {
194 /* non-persistent */ 190 /* non-persistent */
195 mdu_array_info_t ainfo; 191 mdu_array_info_t ainfo;
196 ainfo.level = pers_to_level(md_setup_args[ent].pers); 192 ainfo.level = md_setup_args[ent].level;
197 ainfo.size = 0; 193 ainfo.size = 0;
198 ainfo.nr_disks =0; 194 ainfo.nr_disks =0;
199 ainfo.raid_disks =0; 195 ainfo.raid_disks =0;
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index c10b08a80982..c2683fcd792d 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -145,7 +145,7 @@ int __init rd_load_image(char *from)
145 int nblocks, i, disk; 145 int nblocks, i, disk;
146 char *buf = NULL; 146 char *buf = NULL;
147 unsigned short rotate = 0; 147 unsigned short rotate = 0;
148#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_PPC_ISERIES) 148#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
149 char rotator[4] = { '|' , '/' , '-' , '\\' }; 149 char rotator[4] = { '|' , '/' , '-' , '\\' };
150#endif 150#endif
151 151
@@ -237,7 +237,7 @@ int __init rd_load_image(char *from)
237 } 237 }
238 sys_read(in_fd, buf, BLOCK_SIZE); 238 sys_read(in_fd, buf, BLOCK_SIZE);
239 sys_write(out_fd, buf, BLOCK_SIZE); 239 sys_write(out_fd, buf, BLOCK_SIZE);
240#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_PPC_ISERIES) 240#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
241 if (!(i % 16)) { 241 if (!(i % 16)) {
242 printk("%c\b", rotator[rotate & 0x3]); 242 printk("%c\b", rotator[rotate & 0x3]);
243 rotate++; 243 rotate++;
diff --git a/init/main.c b/init/main.c
index 54aaf561cf66..2ed3638deec7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -52,6 +52,7 @@
52#include <asm/bugs.h> 52#include <asm/bugs.h>
53#include <asm/setup.h> 53#include <asm/setup.h>
54#include <asm/sections.h> 54#include <asm/sections.h>
55#include <asm/cacheflush.h>
55 56
56/* 57/*
57 * This is one of the first .c files built. Error out early 58 * This is one of the first .c files built. Error out early
@@ -99,6 +100,9 @@ extern void acpi_early_init(void);
99#else 100#else
100static inline void acpi_early_init(void) { } 101static inline void acpi_early_init(void) { }
101#endif 102#endif
103#ifndef CONFIG_DEBUG_RODATA
104static inline void mark_rodata_ro(void) { }
105#endif
102 106
103#ifdef CONFIG_TC 107#ifdef CONFIG_TC
104extern void tc_init(void); 108extern void tc_init(void);
@@ -708,6 +712,7 @@ static int init(void * unused)
708 */ 712 */
709 free_initmem(); 713 free_initmem();
710 unlock_kernel(); 714 unlock_kernel();
715 mark_rodata_ro();
711 system_state = SYSTEM_RUNNING; 716 system_state = SYSTEM_RUNNING;
712 numa_default_policy(); 717 numa_default_policy();
713 718
diff --git a/ipc/shm.c b/ipc/shm.c
index 587d836d80d9..0ef4a1cf3e27 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -157,14 +157,22 @@ static void shm_close (struct vm_area_struct *shmd)
157 157
158static int shm_mmap(struct file * file, struct vm_area_struct * vma) 158static int shm_mmap(struct file * file, struct vm_area_struct * vma)
159{ 159{
160 file_accessed(file); 160 int ret;
161 vma->vm_ops = &shm_vm_ops; 161
162 shm_inc(file->f_dentry->d_inode->i_ino); 162 ret = shmem_mmap(file, vma);
163 return 0; 163 if (ret == 0) {
164 vma->vm_ops = &shm_vm_ops;
165 shm_inc(file->f_dentry->d_inode->i_ino);
166 }
167
168 return ret;
164} 169}
165 170
166static struct file_operations shm_file_operations = { 171static struct file_operations shm_file_operations = {
167 .mmap = shm_mmap 172 .mmap = shm_mmap,
173#ifndef CONFIG_MMU
174 .get_unmapped_area = shmem_get_unmapped_area,
175#endif
168}; 176};
169 177
170static struct vm_operations_struct shm_vm_ops = { 178static struct vm_operations_struct shm_vm_ops = {
diff --git a/kernel/acct.c b/kernel/acct.c
index 6312d6bd43e3..38d57fa6b78f 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -427,6 +427,7 @@ static void do_acct_process(long exitcode, struct file *file)
427 u64 elapsed; 427 u64 elapsed;
428 u64 run_time; 428 u64 run_time;
429 struct timespec uptime; 429 struct timespec uptime;
430 unsigned long jiffies;
430 431
431 /* 432 /*
432 * First check to see if there is enough free_space to continue 433 * First check to see if there is enough free_space to continue
@@ -467,12 +468,12 @@ static void do_acct_process(long exitcode, struct file *file)
467#endif 468#endif
468 do_div(elapsed, AHZ); 469 do_div(elapsed, AHZ);
469 ac.ac_btime = xtime.tv_sec - elapsed; 470 ac.ac_btime = xtime.tv_sec - elapsed;
470 ac.ac_utime = encode_comp_t(jiffies_to_AHZ( 471 jiffies = cputime_to_jiffies(cputime_add(current->group_leader->utime,
471 current->signal->utime + 472 current->signal->utime));
472 current->group_leader->utime)); 473 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies));
473 ac.ac_stime = encode_comp_t(jiffies_to_AHZ( 474 jiffies = cputime_to_jiffies(cputime_add(current->group_leader->stime,
474 current->signal->stime + 475 current->signal->stime));
475 current->group_leader->stime)); 476 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies));
476 /* we really need to bite the bullet and change layout */ 477 /* we really need to bite the bullet and change layout */
477 ac.ac_uid = current->uid; 478 ac.ac_uid = current->uid;
478 ac.ac_gid = current->gid; 479 ac.ac_gid = current->gid;
@@ -580,7 +581,8 @@ void acct_process(long exitcode)
580void acct_update_integrals(struct task_struct *tsk) 581void acct_update_integrals(struct task_struct *tsk)
581{ 582{
582 if (likely(tsk->mm)) { 583 if (likely(tsk->mm)) {
583 long delta = tsk->stime - tsk->acct_stimexpd; 584 long delta =
585 cputime_to_jiffies(tsk->stime) - tsk->acct_stimexpd;
584 586
585 if (delta == 0) 587 if (delta == 0)
586 return; 588 return;
diff --git a/kernel/futex.c b/kernel/futex.c
index 5e71a6bf6f6b..5efa2f978032 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -356,6 +356,13 @@ retry:
356 if (bh1 != bh2) 356 if (bh1 != bh2)
357 spin_unlock(&bh2->lock); 357 spin_unlock(&bh2->lock);
358 358
359#ifndef CONFIG_MMU
360 /* we don't get EFAULT from MMU faults if we don't have an MMU,
361 * but we might get them from range checking */
362 ret = op_ret;
363 goto out;
364#endif
365
359 if (unlikely(op_ret != -EFAULT)) { 366 if (unlikely(op_ret != -EFAULT)) {
360 ret = op_ret; 367 ret = op_ret;
361 goto out; 368 goto out;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 81c49a4d679e..97d5559997d2 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -366,6 +366,8 @@ int request_irq(unsigned int irq,
366 action->next = NULL; 366 action->next = NULL;
367 action->dev_id = dev_id; 367 action->dev_id = dev_id;
368 368
369 select_smp_affinity(irq);
370
369 retval = setup_irq(irq, action); 371 retval = setup_irq(irq, action);
370 if (retval) 372 if (retval)
371 kfree(action); 373 kfree(action);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index f26e534c6585..8a64a4844cde 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -68,7 +68,9 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
68 */ 68 */
69 cpus_and(tmp, new_value, cpu_online_map); 69 cpus_and(tmp, new_value, cpu_online_map);
70 if (cpus_empty(tmp)) 70 if (cpus_empty(tmp))
71 return -EINVAL; 71 /* Special case for empty set - allow the architecture
72 code to set default SMP affinity. */
73 return select_smp_affinity(irq) ? -EINVAL : full_count;
72 74
73 proc_set_irq_affinity(irq, new_value); 75 proc_set_irq_affinity(irq, new_value);
74 76
diff --git a/kernel/module.c b/kernel/module.c
index 2ea929d51ad0..4b06bbad49c2 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1854,8 +1854,7 @@ static struct module *load_module(void __user *umod,
1854 kfree(args); 1854 kfree(args);
1855 free_hdr: 1855 free_hdr:
1856 vfree(hdr); 1856 vfree(hdr);
1857 if (err < 0) return ERR_PTR(err); 1857 return ERR_PTR(err);
1858 else return ptr;
1859 1858
1860 truncated: 1859 truncated:
1861 printk(KERN_ERR "Module len %lu truncated\n", len); 1860 printk(KERN_ERR "Module len %lu truncated\n", len);
diff --git a/kernel/panic.c b/kernel/panic.c
index aabc5f86fa3f..c5c4ab255834 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -60,7 +60,7 @@ NORET_TYPE void panic(const char * fmt, ...)
60 long i; 60 long i;
61 static char buf[1024]; 61 static char buf[1024];
62 va_list args; 62 va_list args;
63#if defined(CONFIG_ARCH_S390) 63#if defined(CONFIG_S390)
64 unsigned long caller = (unsigned long) __builtin_return_address(0); 64 unsigned long caller = (unsigned long) __builtin_return_address(0);
65#endif 65#endif
66 66
@@ -125,7 +125,7 @@ NORET_TYPE void panic(const char * fmt, ...)
125 printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); 125 printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n");
126 } 126 }
127#endif 127#endif
128#if defined(CONFIG_ARCH_S390) 128#if defined(CONFIG_S390)
129 disabled_wait(caller); 129 disabled_wait(caller);
130#endif 130#endif
131 local_irq_enable(); 131 local_irq_enable();
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 027322a564f4..e24446f8d8cd 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -24,10 +24,11 @@
24 24
25extern suspend_disk_method_t pm_disk_mode; 25extern suspend_disk_method_t pm_disk_mode;
26 26
27extern int swsusp_shrink_memory(void);
27extern int swsusp_suspend(void); 28extern int swsusp_suspend(void);
28extern int swsusp_write(void); 29extern int swsusp_write(struct pbe *pblist, unsigned int nr_pages);
29extern int swsusp_check(void); 30extern int swsusp_check(void);
30extern int swsusp_read(void); 31extern int swsusp_read(struct pbe **pblist_ptr);
31extern void swsusp_close(void); 32extern void swsusp_close(void);
32extern int swsusp_resume(void); 33extern int swsusp_resume(void);
33 34
@@ -73,31 +74,6 @@ static void power_down(suspend_disk_method_t mode)
73static int in_suspend __nosavedata = 0; 74static int in_suspend __nosavedata = 0;
74 75
75 76
76/**
77 * free_some_memory - Try to free as much memory as possible
78 *
79 * ... but do not OOM-kill anyone
80 *
81 * Notice: all userland should be stopped at this point, or
82 * livelock is possible.
83 */
84
85static void free_some_memory(void)
86{
87 unsigned int i = 0;
88 unsigned int tmp;
89 unsigned long pages = 0;
90 char *p = "-\\|/";
91
92 printk("Freeing memory... ");
93 while ((tmp = shrink_all_memory(10000))) {
94 pages += tmp;
95 printk("\b%c", p[i++ % 4]);
96 }
97 printk("\bdone (%li pages freed)\n", pages);
98}
99
100
101static inline void platform_finish(void) 77static inline void platform_finish(void)
102{ 78{
103 if (pm_disk_mode == PM_DISK_PLATFORM) { 79 if (pm_disk_mode == PM_DISK_PLATFORM) {
@@ -127,8 +103,8 @@ static int prepare_processes(void)
127 } 103 }
128 104
129 /* Free memory before shutting down devices. */ 105 /* Free memory before shutting down devices. */
130 free_some_memory(); 106 if (!(error = swsusp_shrink_memory()))
131 return 0; 107 return 0;
132thaw: 108thaw:
133 thaw_processes(); 109 thaw_processes();
134 enable_nonboot_cpus(); 110 enable_nonboot_cpus();
@@ -176,7 +152,7 @@ int pm_suspend_disk(void)
176 if (in_suspend) { 152 if (in_suspend) {
177 device_resume(); 153 device_resume();
178 pr_debug("PM: writing image.\n"); 154 pr_debug("PM: writing image.\n");
179 error = swsusp_write(); 155 error = swsusp_write(pagedir_nosave, nr_copy_pages);
180 if (!error) 156 if (!error)
181 power_down(pm_disk_mode); 157 power_down(pm_disk_mode);
182 else { 158 else {
@@ -247,7 +223,7 @@ static int software_resume(void)
247 223
248 pr_debug("PM: Reading swsusp image.\n"); 224 pr_debug("PM: Reading swsusp image.\n");
249 225
250 if ((error = swsusp_read())) { 226 if ((error = swsusp_read(&pagedir_nosave))) {
251 swsusp_free(); 227 swsusp_free();
252 goto Thaw; 228 goto Thaw;
253 } 229 }
@@ -363,37 +339,55 @@ static ssize_t resume_show(struct subsystem * subsys, char *buf)
363 MINOR(swsusp_resume_device)); 339 MINOR(swsusp_resume_device));
364} 340}
365 341
366static ssize_t resume_store(struct subsystem * subsys, const char * buf, size_t n) 342static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n)
367{ 343{
368 int len;
369 char *p;
370 unsigned int maj, min; 344 unsigned int maj, min;
371 int error = -EINVAL;
372 dev_t res; 345 dev_t res;
346 int ret = -EINVAL;
373 347
374 p = memchr(buf, '\n', n); 348 if (sscanf(buf, "%u:%u", &maj, &min) != 2)
375 len = p ? p - buf : n; 349 goto out;
376 350
377 if (sscanf(buf, "%u:%u", &maj, &min) == 2) { 351 res = MKDEV(maj,min);
378 res = MKDEV(maj,min); 352 if (maj != MAJOR(res) || min != MINOR(res))
379 if (maj == MAJOR(res) && min == MINOR(res)) { 353 goto out;
380 down(&pm_sem);
381 swsusp_resume_device = res;
382 up(&pm_sem);
383 printk("Attempting manual resume\n");
384 noresume = 0;
385 software_resume();
386 }
387 }
388 354
389 return error >= 0 ? n : error; 355 down(&pm_sem);
356 swsusp_resume_device = res;
357 up(&pm_sem);
358 printk("Attempting manual resume\n");
359 noresume = 0;
360 software_resume();
361 ret = n;
362out:
363 return ret;
390} 364}
391 365
392power_attr(resume); 366power_attr(resume);
393 367
368static ssize_t image_size_show(struct subsystem * subsys, char *buf)
369{
370 return sprintf(buf, "%u\n", image_size);
371}
372
373static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n)
374{
375 unsigned int size;
376
377 if (sscanf(buf, "%u", &size) == 1) {
378 image_size = size;
379 return n;
380 }
381
382 return -EINVAL;
383}
384
385power_attr(image_size);
386
394static struct attribute * g[] = { 387static struct attribute * g[] = {
395 &disk_attr.attr, 388 &disk_attr.attr,
396 &resume_attr.attr, 389 &resume_attr.attr,
390 &image_size_attr.attr,
397 NULL, 391 NULL,
398}; 392};
399 393
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 6c042b5ee14b..7e8492fd1423 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -9,19 +9,13 @@
9#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) 9#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
10#endif 10#endif
11 11
12#define MAX_PBES ((PAGE_SIZE - sizeof(struct new_utsname) \
13 - 4 - 3*sizeof(unsigned long) - sizeof(int) \
14 - sizeof(void *)) / sizeof(swp_entry_t))
15
16struct swsusp_info { 12struct swsusp_info {
17 struct new_utsname uts; 13 struct new_utsname uts;
18 u32 version_code; 14 u32 version_code;
19 unsigned long num_physpages; 15 unsigned long num_physpages;
20 int cpus; 16 int cpus;
21 unsigned long image_pages; 17 unsigned long image_pages;
22 unsigned long pagedir_pages; 18 unsigned long pages;
23 suspend_pagedir_t * suspend_pagedir;
24 swp_entry_t pagedir[MAX_PBES];
25} __attribute__((aligned(PAGE_SIZE))); 19} __attribute__((aligned(PAGE_SIZE)));
26 20
27 21
@@ -48,25 +42,27 @@ static struct subsys_attribute _name##_attr = { \
48 42
49extern struct subsystem power_subsys; 43extern struct subsystem power_subsys;
50 44
51extern int freeze_processes(void);
52extern void thaw_processes(void);
53
54extern int pm_prepare_console(void); 45extern int pm_prepare_console(void);
55extern void pm_restore_console(void); 46extern void pm_restore_console(void);
56 47
57
58/* References to section boundaries */ 48/* References to section boundaries */
59extern const void __nosave_begin, __nosave_end; 49extern const void __nosave_begin, __nosave_end;
60 50
61extern unsigned int nr_copy_pages; 51extern unsigned int nr_copy_pages;
62extern suspend_pagedir_t *pagedir_nosave; 52extern struct pbe *pagedir_nosave;
63extern suspend_pagedir_t *pagedir_save; 53
54/* Preferred image size in MB (default 500) */
55extern unsigned int image_size;
64 56
65extern asmlinkage int swsusp_arch_suspend(void); 57extern asmlinkage int swsusp_arch_suspend(void);
66extern asmlinkage int swsusp_arch_resume(void); 58extern asmlinkage int swsusp_arch_resume(void);
67 59
60extern unsigned int count_data_pages(void);
68extern void free_pagedir(struct pbe *pblist); 61extern void free_pagedir(struct pbe *pblist);
62extern void release_eaten_pages(void);
69extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed); 63extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed);
70extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages);
71extern void swsusp_free(void); 64extern void swsusp_free(void);
72extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); 65extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed);
66extern unsigned int snapshot_nr_pages(void);
67extern struct pbe *snapshot_pblist(void);
68extern void snapshot_pblist_set(struct pbe *pblist);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 4a6dbcefd378..41f66365f0d8 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -33,7 +33,35 @@
33 33
34#include "power.h" 34#include "power.h"
35 35
36struct pbe *pagedir_nosave;
37unsigned int nr_copy_pages;
38
36#ifdef CONFIG_HIGHMEM 39#ifdef CONFIG_HIGHMEM
40unsigned int count_highmem_pages(void)
41{
42 struct zone *zone;
43 unsigned long zone_pfn;
44 unsigned int n = 0;
45
46 for_each_zone (zone)
47 if (is_highmem(zone)) {
48 mark_free_pages(zone);
49 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
50 struct page *page;
51 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
52 if (!pfn_valid(pfn))
53 continue;
54 page = pfn_to_page(pfn);
55 if (PageReserved(page))
56 continue;
57 if (PageNosaveFree(page))
58 continue;
59 n++;
60 }
61 }
62 return n;
63}
64
37struct highmem_page { 65struct highmem_page {
38 char *data; 66 char *data;
39 struct page *page; 67 struct page *page;
@@ -149,17 +177,15 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn)
149 BUG_ON(PageReserved(page) && PageNosave(page)); 177 BUG_ON(PageReserved(page) && PageNosave(page));
150 if (PageNosave(page)) 178 if (PageNosave(page))
151 return 0; 179 return 0;
152 if (PageReserved(page) && pfn_is_nosave(pfn)) { 180 if (PageReserved(page) && pfn_is_nosave(pfn))
153 pr_debug("[nosave pfn 0x%lx]", pfn);
154 return 0; 181 return 0;
155 }
156 if (PageNosaveFree(page)) 182 if (PageNosaveFree(page))
157 return 0; 183 return 0;
158 184
159 return 1; 185 return 1;
160} 186}
161 187
162static unsigned count_data_pages(void) 188unsigned int count_data_pages(void)
163{ 189{
164 struct zone *zone; 190 struct zone *zone;
165 unsigned long zone_pfn; 191 unsigned long zone_pfn;
@@ -244,7 +270,7 @@ static inline void fill_pb_page(struct pbe *pbpage)
244 * of memory pages allocated with alloc_pagedir() 270 * of memory pages allocated with alloc_pagedir()
245 */ 271 */
246 272
247void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) 273static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
248{ 274{
249 struct pbe *pbpage, *p; 275 struct pbe *pbpage, *p;
250 unsigned int num = PBES_PER_PAGE; 276 unsigned int num = PBES_PER_PAGE;
@@ -261,7 +287,35 @@ void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
261 p->next = p + 1; 287 p->next = p + 1;
262 p->next = NULL; 288 p->next = NULL;
263 } 289 }
264 pr_debug("create_pbe_list(): initialized %d PBEs\n", num); 290}
291
292/**
293 * On resume it is necessary to trace and eventually free the unsafe
294 * pages that have been allocated, because they are needed for I/O
295 * (on x86-64 we likely will "eat" these pages once again while
296 * creating the temporary page translation tables)
297 */
298
299struct eaten_page {
300 struct eaten_page *next;
301 char padding[PAGE_SIZE - sizeof(void *)];
302};
303
304static struct eaten_page *eaten_pages = NULL;
305
306void release_eaten_pages(void)
307{
308 struct eaten_page *p, *q;
309
310 p = eaten_pages;
311 while (p) {
312 q = p->next;
313 /* We don't want swsusp_free() to free this page again */
314 ClearPageNosave(virt_to_page(p));
315 free_page((unsigned long)p);
316 p = q;
317 }
318 eaten_pages = NULL;
265} 319}
266 320
267/** 321/**
@@ -282,9 +336,12 @@ static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
282 if (safe_needed) 336 if (safe_needed)
283 do { 337 do {
284 res = (void *)get_zeroed_page(gfp_mask); 338 res = (void *)get_zeroed_page(gfp_mask);
285 if (res && PageNosaveFree(virt_to_page(res))) 339 if (res && PageNosaveFree(virt_to_page(res))) {
286 /* This is for swsusp_free() */ 340 /* This is for swsusp_free() */
287 SetPageNosave(virt_to_page(res)); 341 SetPageNosave(virt_to_page(res));
342 ((struct eaten_page *)res)->next = eaten_pages;
343 eaten_pages = res;
344 }
288 } while (res && PageNosaveFree(virt_to_page(res))); 345 } while (res && PageNosaveFree(virt_to_page(res)));
289 else 346 else
290 res = (void *)get_zeroed_page(gfp_mask); 347 res = (void *)get_zeroed_page(gfp_mask);
@@ -332,7 +389,8 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
332 if (!pbe) { /* get_zeroed_page() failed */ 389 if (!pbe) { /* get_zeroed_page() failed */
333 free_pagedir(pblist); 390 free_pagedir(pblist);
334 pblist = NULL; 391 pblist = NULL;
335 } 392 } else
393 create_pbe_list(pblist, nr_pages);
336 return pblist; 394 return pblist;
337} 395}
338 396
@@ -370,8 +428,14 @@ void swsusp_free(void)
370 428
371static int enough_free_mem(unsigned int nr_pages) 429static int enough_free_mem(unsigned int nr_pages)
372{ 430{
373 pr_debug("swsusp: available memory: %u pages\n", nr_free_pages()); 431 struct zone *zone;
374 return nr_free_pages() > (nr_pages + PAGES_FOR_IO + 432 unsigned int n = 0;
433
434 for_each_zone (zone)
435 if (!is_highmem(zone))
436 n += zone->free_pages;
437 pr_debug("swsusp: available memory: %u pages\n", n);
438 return n > (nr_pages + PAGES_FOR_IO +
375 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); 439 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
376} 440}
377 441
@@ -395,7 +459,6 @@ static struct pbe *swsusp_alloc(unsigned int nr_pages)
395 printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); 459 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
396 return NULL; 460 return NULL;
397 } 461 }
398 create_pbe_list(pblist, nr_pages);
399 462
400 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { 463 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
401 printk(KERN_ERR "suspend: Allocating image pages failed.\n"); 464 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
@@ -421,10 +484,6 @@ asmlinkage int swsusp_save(void)
421 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE, 484 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
422 PAGES_FOR_IO, nr_free_pages()); 485 PAGES_FOR_IO, nr_free_pages());
423 486
424 /* This is needed because of the fixed size of swsusp_info */
425 if (MAX_PBES < (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE)
426 return -ENOSPC;
427
428 if (!enough_free_mem(nr_pages)) { 487 if (!enough_free_mem(nr_pages)) {
429 printk(KERN_ERR "swsusp: Not enough free memory\n"); 488 printk(KERN_ERR "swsusp: Not enough free memory\n");
430 return -ENOMEM; 489 return -ENOMEM;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index c05f46e7348f..55a18d26abed 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -30,8 +30,8 @@
30 * Alex Badea <vampire@go.ro>: 30 * Alex Badea <vampire@go.ro>:
31 * Fixed runaway init 31 * Fixed runaway init
32 * 32 *
33 * Andreas Steinmetz <ast@domdv.de>: 33 * Rafael J. Wysocki <rjw@sisk.pl>
34 * Added encrypted suspend option 34 * Added the swap map data structure and reworked the handling of swap
35 * 35 *
36 * More state savers are welcome. Especially for the scsi layer... 36 * More state savers are welcome. Especially for the scsi layer...
37 * 37 *
@@ -67,44 +67,33 @@
67#include <asm/tlbflush.h> 67#include <asm/tlbflush.h>
68#include <asm/io.h> 68#include <asm/io.h>
69 69
70#include <linux/random.h>
71#include <linux/crypto.h>
72#include <asm/scatterlist.h>
73
74#include "power.h" 70#include "power.h"
75 71
72/*
73 * Preferred image size in MB (tunable via /sys/power/image_size).
74 * When it is set to N, swsusp will do its best to ensure the image
75 * size will not exceed N MB, but if that is impossible, it will
76 * try to create the smallest image possible.
77 */
78unsigned int image_size = 500;
79
76#ifdef CONFIG_HIGHMEM 80#ifdef CONFIG_HIGHMEM
81unsigned int count_highmem_pages(void);
77int save_highmem(void); 82int save_highmem(void);
78int restore_highmem(void); 83int restore_highmem(void);
79#else 84#else
80static int save_highmem(void) { return 0; } 85static int save_highmem(void) { return 0; }
81static int restore_highmem(void) { return 0; } 86static int restore_highmem(void) { return 0; }
87static unsigned int count_highmem_pages(void) { return 0; }
82#endif 88#endif
83 89
84#define CIPHER "aes"
85#define MAXKEY 32
86#define MAXIV 32
87
88extern char resume_file[]; 90extern char resume_file[];
89 91
90/* Local variables that should not be affected by save */
91unsigned int nr_copy_pages __nosavedata = 0;
92
93/* Suspend pagedir is allocated before final copy, therefore it
94 must be freed after resume
95
96 Warning: this is even more evil than it seems. Pagedirs this file
97 talks about are completely different from page directories used by
98 MMU hardware.
99 */
100suspend_pagedir_t *pagedir_nosave __nosavedata = NULL;
101
102#define SWSUSP_SIG "S1SUSPEND" 92#define SWSUSP_SIG "S1SUSPEND"
103 93
104static struct swsusp_header { 94static struct swsusp_header {
105 char reserved[PAGE_SIZE - 20 - MAXKEY - MAXIV - sizeof(swp_entry_t)]; 95 char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
106 u8 key_iv[MAXKEY+MAXIV]; 96 swp_entry_t image;
107 swp_entry_t swsusp_info;
108 char orig_sig[10]; 97 char orig_sig[10];
109 char sig[10]; 98 char sig[10];
110} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; 99} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
@@ -115,140 +104,9 @@ static struct swsusp_info swsusp_info;
115 * Saving part... 104 * Saving part...
116 */ 105 */
117 106
118/* We memorize in swapfile_used what swap devices are used for suspension */ 107static unsigned short root_swap = 0xffff;
119#define SWAPFILE_UNUSED 0
120#define SWAPFILE_SUSPEND 1 /* This is the suspending device */
121#define SWAPFILE_IGNORED 2 /* Those are other swap devices ignored for suspension */
122
123static unsigned short swapfile_used[MAX_SWAPFILES];
124static unsigned short root_swap;
125
126static int write_page(unsigned long addr, swp_entry_t *loc);
127static int bio_read_page(pgoff_t page_off, void *page);
128
129static u8 key_iv[MAXKEY+MAXIV];
130
131#ifdef CONFIG_SWSUSP_ENCRYPT
132
133static int crypto_init(int mode, void **mem)
134{
135 int error = 0;
136 int len;
137 char *modemsg;
138 struct crypto_tfm *tfm;
139
140 modemsg = mode ? "suspend not possible" : "resume not possible";
141
142 tfm = crypto_alloc_tfm(CIPHER, CRYPTO_TFM_MODE_CBC);
143 if(!tfm) {
144 printk(KERN_ERR "swsusp: no tfm, %s\n", modemsg);
145 error = -EINVAL;
146 goto out;
147 }
148
149 if(MAXKEY < crypto_tfm_alg_min_keysize(tfm)) {
150 printk(KERN_ERR "swsusp: key buffer too small, %s\n", modemsg);
151 error = -ENOKEY;
152 goto fail;
153 }
154
155 if (mode)
156 get_random_bytes(key_iv, MAXKEY+MAXIV);
157
158 len = crypto_tfm_alg_max_keysize(tfm);
159 if (len > MAXKEY)
160 len = MAXKEY;
161
162 if (crypto_cipher_setkey(tfm, key_iv, len)) {
163 printk(KERN_ERR "swsusp: key setup failure, %s\n", modemsg);
164 error = -EKEYREJECTED;
165 goto fail;
166 }
167
168 len = crypto_tfm_alg_ivsize(tfm);
169
170 if (MAXIV < len) {
171 printk(KERN_ERR "swsusp: iv buffer too small, %s\n", modemsg);
172 error = -EOVERFLOW;
173 goto fail;
174 }
175
176 crypto_cipher_set_iv(tfm, key_iv+MAXKEY, len);
177
178 *mem=(void *)tfm;
179
180 goto out;
181
182fail: crypto_free_tfm(tfm);
183out: return error;
184}
185
186static __inline__ void crypto_exit(void *mem)
187{
188 crypto_free_tfm((struct crypto_tfm *)mem);
189}
190
191static __inline__ int crypto_write(struct pbe *p, void *mem)
192{
193 int error = 0;
194 struct scatterlist src, dst;
195
196 src.page = virt_to_page(p->address);
197 src.offset = 0;
198 src.length = PAGE_SIZE;
199 dst.page = virt_to_page((void *)&swsusp_header);
200 dst.offset = 0;
201 dst.length = PAGE_SIZE;
202
203 error = crypto_cipher_encrypt((struct crypto_tfm *)mem, &dst, &src,
204 PAGE_SIZE);
205
206 if (!error)
207 error = write_page((unsigned long)&swsusp_header,
208 &(p->swap_address));
209 return error;
210}
211
212static __inline__ int crypto_read(struct pbe *p, void *mem)
213{
214 int error = 0;
215 struct scatterlist src, dst;
216
217 error = bio_read_page(swp_offset(p->swap_address), (void *)p->address);
218 if (!error) {
219 src.offset = 0;
220 src.length = PAGE_SIZE;
221 dst.offset = 0;
222 dst.length = PAGE_SIZE;
223 src.page = dst.page = virt_to_page((void *)p->address);
224
225 error = crypto_cipher_decrypt((struct crypto_tfm *)mem, &dst,
226 &src, PAGE_SIZE);
227 }
228 return error;
229}
230#else
231static __inline__ int crypto_init(int mode, void *mem)
232{
233 return 0;
234}
235
236static __inline__ void crypto_exit(void *mem)
237{
238}
239
240static __inline__ int crypto_write(struct pbe *p, void *mem)
241{
242 return write_page(p->address, &(p->swap_address));
243}
244 108
245static __inline__ int crypto_read(struct pbe *p, void *mem) 109static int mark_swapfiles(swp_entry_t start)
246{
247 return bio_read_page(swp_offset(p->swap_address), (void *)p->address);
248}
249#endif
250
251static int mark_swapfiles(swp_entry_t prev)
252{ 110{
253 int error; 111 int error;
254 112
@@ -259,8 +117,7 @@ static int mark_swapfiles(swp_entry_t prev)
259 !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { 117 !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
260 memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); 118 memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
261 memcpy(swsusp_header.sig,SWSUSP_SIG, 10); 119 memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
262 memcpy(swsusp_header.key_iv, key_iv, MAXKEY+MAXIV); 120 swsusp_header.image = start;
263 swsusp_header.swsusp_info = prev;
264 error = rw_swap_page_sync(WRITE, 121 error = rw_swap_page_sync(WRITE,
265 swp_entry(root_swap, 0), 122 swp_entry(root_swap, 0),
266 virt_to_page((unsigned long) 123 virt_to_page((unsigned long)
@@ -283,7 +140,7 @@ static int mark_swapfiles(swp_entry_t prev)
283 * devfs, since the resume code can only recognize the form /dev/hda4, 140 * devfs, since the resume code can only recognize the form /dev/hda4,
284 * but the suspend code would see the long name.) 141 * but the suspend code would see the long name.)
285 */ 142 */
286static int is_resume_device(const struct swap_info_struct *swap_info) 143static inline int is_resume_device(const struct swap_info_struct *swap_info)
287{ 144{
288 struct file *file = swap_info->swap_file; 145 struct file *file = swap_info->swap_file;
289 struct inode *inode = file->f_dentry->d_inode; 146 struct inode *inode = file->f_dentry->d_inode;
@@ -294,54 +151,22 @@ static int is_resume_device(const struct swap_info_struct *swap_info)
294 151
295static int swsusp_swap_check(void) /* This is called before saving image */ 152static int swsusp_swap_check(void) /* This is called before saving image */
296{ 153{
297 int i, len;
298
299 len=strlen(resume_file);
300 root_swap = 0xFFFF;
301
302 spin_lock(&swap_lock);
303 for (i=0; i<MAX_SWAPFILES; i++) {
304 if (!(swap_info[i].flags & SWP_WRITEOK)) {
305 swapfile_used[i]=SWAPFILE_UNUSED;
306 } else {
307 if (!len) {
308 printk(KERN_WARNING "resume= option should be used to set suspend device" );
309 if (root_swap == 0xFFFF) {
310 swapfile_used[i] = SWAPFILE_SUSPEND;
311 root_swap = i;
312 } else
313 swapfile_used[i] = SWAPFILE_IGNORED;
314 } else {
315 /* we ignore all swap devices that are not the resume_file */
316 if (is_resume_device(&swap_info[i])) {
317 swapfile_used[i] = SWAPFILE_SUSPEND;
318 root_swap = i;
319 } else {
320 swapfile_used[i] = SWAPFILE_IGNORED;
321 }
322 }
323 }
324 }
325 spin_unlock(&swap_lock);
326 return (root_swap != 0xffff) ? 0 : -ENODEV;
327}
328
329/**
330 * This is called after saving image so modification
331 * will be lost after resume... and that's what we want.
332 * we make the device unusable. A new call to
333 * lock_swapdevices can unlock the devices.
334 */
335static void lock_swapdevices(void)
336{
337 int i; 154 int i;
338 155
156 if (!swsusp_resume_device)
157 return -ENODEV;
339 spin_lock(&swap_lock); 158 spin_lock(&swap_lock);
340 for (i = 0; i< MAX_SWAPFILES; i++) 159 for (i = 0; i < MAX_SWAPFILES; i++) {
341 if (swapfile_used[i] == SWAPFILE_IGNORED) { 160 if (!(swap_info[i].flags & SWP_WRITEOK))
342 swap_info[i].flags ^= SWP_WRITEOK; 161 continue;
162 if (is_resume_device(swap_info + i)) {
163 spin_unlock(&swap_lock);
164 root_swap = i;
165 return 0;
343 } 166 }
167 }
344 spin_unlock(&swap_lock); 168 spin_unlock(&swap_lock);
169 return -ENODEV;
345} 170}
346 171
347/** 172/**
@@ -359,72 +184,217 @@ static void lock_swapdevices(void)
359static int write_page(unsigned long addr, swp_entry_t *loc) 184static int write_page(unsigned long addr, swp_entry_t *loc)
360{ 185{
361 swp_entry_t entry; 186 swp_entry_t entry;
362 int error = 0; 187 int error = -ENOSPC;
363 188
364 entry = get_swap_page(); 189 entry = get_swap_page_of_type(root_swap);
365 if (swp_offset(entry) && 190 if (swp_offset(entry)) {
366 swapfile_used[swp_type(entry)] == SWAPFILE_SUSPEND) { 191 error = rw_swap_page_sync(WRITE, entry, virt_to_page(addr));
367 error = rw_swap_page_sync(WRITE, entry, 192 if (!error || error == -EIO)
368 virt_to_page(addr));
369 if (error == -EIO)
370 error = 0;
371 if (!error)
372 *loc = entry; 193 *loc = entry;
373 } else 194 }
374 error = -ENOSPC;
375 return error; 195 return error;
376} 196}
377 197
378/** 198/**
379 * data_free - Free the swap entries used by the saved image. 199 * Swap map-handling functions
200 *
201 * The swap map is a data structure used for keeping track of each page
202 * written to the swap. It consists of many swap_map_page structures
203 * that contain each an array of MAP_PAGE_SIZE swap entries.
204 * These structures are linked together with the help of either the
205 * .next (in memory) or the .next_swap (in swap) member.
380 * 206 *
381 * Walk the list of used swap entries and free each one. 207 * The swap map is created during suspend. At that time we need to keep
382 * This is only used for cleanup when suspend fails. 208 * it in memory, because we have to free all of the allocated swap
209 * entries if an error occurs. The memory needed is preallocated
210 * so that we know in advance if there's enough of it.
211 *
212 * The first swap_map_page structure is filled with the swap entries that
213 * correspond to the first MAP_PAGE_SIZE data pages written to swap and
214 * so on. After the all of the data pages have been written, the order
215 * of the swap_map_page structures in the map is reversed so that they
216 * can be read from swap in the original order. This causes the data
217 * pages to be loaded in exactly the same order in which they have been
218 * saved.
219 *
220 * During resume we only need to use one swap_map_page structure
221 * at a time, which means that we only need to use two memory pages for
222 * reading the image - one for reading the swap_map_page structures
223 * and the second for reading the data pages from swap.
383 */ 224 */
384static void data_free(void) 225
226#define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \
227 / sizeof(swp_entry_t))
228
229struct swap_map_page {
230 swp_entry_t entries[MAP_PAGE_SIZE];
231 swp_entry_t next_swap;
232 struct swap_map_page *next;
233};
234
235static inline void free_swap_map(struct swap_map_page *swap_map)
385{ 236{
386 swp_entry_t entry; 237 struct swap_map_page *swp;
387 struct pbe *p;
388 238
389 for_each_pbe (p, pagedir_nosave) { 239 while (swap_map) {
390 entry = p->swap_address; 240 swp = swap_map->next;
391 if (entry.val) 241 free_page((unsigned long)swap_map);
392 swap_free(entry); 242 swap_map = swp;
393 else
394 break;
395 } 243 }
396} 244}
397 245
246static struct swap_map_page *alloc_swap_map(unsigned int nr_pages)
247{
248 struct swap_map_page *swap_map, *swp;
249 unsigned n = 0;
250
251 if (!nr_pages)
252 return NULL;
253
254 pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages);
255 swap_map = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
256 swp = swap_map;
257 for (n = MAP_PAGE_SIZE; n < nr_pages; n += MAP_PAGE_SIZE) {
258 swp->next = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
259 swp = swp->next;
260 if (!swp) {
261 free_swap_map(swap_map);
262 return NULL;
263 }
264 }
265 return swap_map;
266}
267
398/** 268/**
399 * data_write - Write saved image to swap. 269 * reverse_swap_map - reverse the order of pages in the swap map
400 * 270 * @swap_map
401 * Walk the list of pages in the image and sync each one to swap.
402 */ 271 */
403static int data_write(void) 272
273static inline struct swap_map_page *reverse_swap_map(struct swap_map_page *swap_map)
404{ 274{
405 int error = 0, i = 0; 275 struct swap_map_page *prev, *next;
406 unsigned int mod = nr_copy_pages / 100; 276
407 struct pbe *p; 277 prev = NULL;
408 void *tfm; 278 while (swap_map) {
279 next = swap_map->next;
280 swap_map->next = prev;
281 prev = swap_map;
282 swap_map = next;
283 }
284 return prev;
285}
409 286
410 if ((error = crypto_init(1, &tfm))) 287/**
411 return error; 288 * free_swap_map_entries - free the swap entries allocated to store
289 * the swap map @swap_map (this is only called in case of an error)
290 */
291static inline void free_swap_map_entries(struct swap_map_page *swap_map)
292{
293 while (swap_map) {
294 if (swap_map->next_swap.val)
295 swap_free(swap_map->next_swap);
296 swap_map = swap_map->next;
297 }
298}
412 299
413 if (!mod) 300/**
414 mod = 1; 301 * save_swap_map - save the swap map used for tracing the data pages
302 * stored in the swap
303 */
415 304
416 printk( "Writing data to swap (%d pages)... ", nr_copy_pages ); 305static int save_swap_map(struct swap_map_page *swap_map, swp_entry_t *start)
417 for_each_pbe (p, pagedir_nosave) { 306{
418 if (!(i%mod)) 307 swp_entry_t entry = (swp_entry_t){0};
419 printk( "\b\b\b\b%3d%%", i / mod ); 308 int error;
420 if ((error = crypto_write(p, tfm))) { 309
421 crypto_exit(tfm); 310 while (swap_map) {
311 swap_map->next_swap = entry;
312 if ((error = write_page((unsigned long)swap_map, &entry)))
422 return error; 313 return error;
423 } 314 swap_map = swap_map->next;
424 i++;
425 } 315 }
426 printk("\b\b\b\bdone\n"); 316 *start = entry;
427 crypto_exit(tfm); 317 return 0;
318}
319
320/**
321 * free_image_entries - free the swap entries allocated to store
322 * the image data pages (this is only called in case of an error)
323 */
324
325static inline void free_image_entries(struct swap_map_page *swp)
326{
327 unsigned k;
328
329 while (swp) {
330 for (k = 0; k < MAP_PAGE_SIZE; k++)
331 if (swp->entries[k].val)
332 swap_free(swp->entries[k]);
333 swp = swp->next;
334 }
335}
336
337/**
338 * The swap_map_handle structure is used for handling the swap map in
339 * a file-alike way
340 */
341
342struct swap_map_handle {
343 struct swap_map_page *cur;
344 unsigned int k;
345};
346
347static inline void init_swap_map_handle(struct swap_map_handle *handle,
348 struct swap_map_page *map)
349{
350 handle->cur = map;
351 handle->k = 0;
352}
353
354static inline int swap_map_write_page(struct swap_map_handle *handle,
355 unsigned long addr)
356{
357 int error;
358
359 error = write_page(addr, handle->cur->entries + handle->k);
360 if (error)
361 return error;
362 if (++handle->k >= MAP_PAGE_SIZE) {
363 handle->cur = handle->cur->next;
364 handle->k = 0;
365 }
366 return 0;
367}
368
369/**
370 * save_image_data - save the data pages pointed to by the PBEs
371 * from the list @pblist using the swap map handle @handle
372 * (assume there are @nr_pages data pages to save)
373 */
374
375static int save_image_data(struct pbe *pblist,
376 struct swap_map_handle *handle,
377 unsigned int nr_pages)
378{
379 unsigned int m;
380 struct pbe *p;
381 int error = 0;
382
383 printk("Saving image data pages (%u pages) ... ", nr_pages);
384 m = nr_pages / 100;
385 if (!m)
386 m = 1;
387 nr_pages = 0;
388 for_each_pbe (p, pblist) {
389 error = swap_map_write_page(handle, p->address);
390 if (error)
391 break;
392 if (!(nr_pages % m))
393 printk("\b\b\b\b%3d%%", nr_pages / m);
394 nr_pages++;
395 }
396 if (!error)
397 printk("\b\b\b\bdone\n");
428 return error; 398 return error;
429} 399}
430 400
@@ -440,70 +410,70 @@ static void dump_info(void)
440 pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname); 410 pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname);
441 pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus); 411 pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus);
442 pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages); 412 pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages);
443 pr_debug(" swsusp: Pagedir: %ld Pages\n",swsusp_info.pagedir_pages); 413 pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info.pages);
444} 414}
445 415
446static void init_header(void) 416static void init_header(unsigned int nr_pages)
447{ 417{
448 memset(&swsusp_info, 0, sizeof(swsusp_info)); 418 memset(&swsusp_info, 0, sizeof(swsusp_info));
449 swsusp_info.version_code = LINUX_VERSION_CODE; 419 swsusp_info.version_code = LINUX_VERSION_CODE;
450 swsusp_info.num_physpages = num_physpages; 420 swsusp_info.num_physpages = num_physpages;
451 memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname)); 421 memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname));
452 422
453 swsusp_info.suspend_pagedir = pagedir_nosave;
454 swsusp_info.cpus = num_online_cpus(); 423 swsusp_info.cpus = num_online_cpus();
455 swsusp_info.image_pages = nr_copy_pages; 424 swsusp_info.image_pages = nr_pages;
456} 425 swsusp_info.pages = nr_pages +
457 426 ((nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
458static int close_swap(void)
459{
460 swp_entry_t entry;
461 int error;
462
463 dump_info();
464 error = write_page((unsigned long)&swsusp_info, &entry);
465 if (!error) {
466 printk( "S" );
467 error = mark_swapfiles(entry);
468 printk( "|\n" );
469 }
470 return error;
471} 427}
472 428
473/** 429/**
474 * free_pagedir_entries - Free pages used by the page directory. 430 * pack_orig_addresses - the .orig_address fields of the PBEs from the
475 * 431 * list starting at @pbe are stored in the array @buf[] (1 page)
476 * This is used during suspend for error recovery.
477 */ 432 */
478 433
479static void free_pagedir_entries(void) 434static inline struct pbe *pack_orig_addresses(unsigned long *buf,
435 struct pbe *pbe)
480{ 436{
481 int i; 437 int j;
482 438
483 for (i = 0; i < swsusp_info.pagedir_pages; i++) 439 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
484 swap_free(swsusp_info.pagedir[i]); 440 buf[j] = pbe->orig_address;
441 pbe = pbe->next;
442 }
443 if (!pbe)
444 for (; j < PAGE_SIZE / sizeof(long); j++)
445 buf[j] = 0;
446 return pbe;
485} 447}
486 448
487
488/** 449/**
489 * write_pagedir - Write the array of pages holding the page directory. 450 * save_image_metadata - save the .orig_address fields of the PBEs
490 * @last: Last swap entry we write (needed for header). 451 * from the list @pblist using the swap map handle @handle
491 */ 452 */
492 453
493static int write_pagedir(void) 454static int save_image_metadata(struct pbe *pblist,
455 struct swap_map_handle *handle)
494{ 456{
495 int error = 0; 457 unsigned long *buf;
496 unsigned int n = 0; 458 unsigned int n = 0;
497 struct pbe *pbe; 459 struct pbe *p;
460 int error = 0;
498 461
499 printk( "Writing pagedir..."); 462 printk("Saving image metadata ... ");
500 for_each_pb_page (pbe, pagedir_nosave) { 463 buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
501 if ((error = write_page((unsigned long)pbe, &swsusp_info.pagedir[n++]))) 464 if (!buf)
502 return error; 465 return -ENOMEM;
466 p = pblist;
467 while (p) {
468 p = pack_orig_addresses(buf, p);
469 error = swap_map_write_page(handle, (unsigned long)buf);
470 if (error)
471 break;
472 n++;
503 } 473 }
504 474 free_page((unsigned long)buf);
505 swsusp_info.pagedir_pages = n; 475 if (!error)
506 printk("done (%u pages)\n", n); 476 printk("done (%u pages saved)\n", n);
507 return error; 477 return error;
508} 478}
509 479
@@ -511,75 +481,125 @@ static int write_pagedir(void)
511 * enough_swap - Make sure we have enough swap to save the image. 481 * enough_swap - Make sure we have enough swap to save the image.
512 * 482 *
513 * Returns TRUE or FALSE after checking the total amount of swap 483 * Returns TRUE or FALSE after checking the total amount of swap
514 * space avaiable. 484 * space avaiable from the resume partition.
515 *
516 * FIXME: si_swapinfo(&i) returns all swap devices information.
517 * We should only consider resume_device.
518 */ 485 */
519 486
520static int enough_swap(unsigned int nr_pages) 487static int enough_swap(unsigned int nr_pages)
521{ 488{
522 struct sysinfo i; 489 unsigned int free_swap = swap_info[root_swap].pages -
490 swap_info[root_swap].inuse_pages;
523 491
524 si_swapinfo(&i); 492 pr_debug("swsusp: free swap pages: %u\n", free_swap);
525 pr_debug("swsusp: available swap: %lu pages\n", i.freeswap); 493 return free_swap > (nr_pages + PAGES_FOR_IO +
526 return i.freeswap > (nr_pages + PAGES_FOR_IO +
527 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); 494 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
528} 495}
529 496
530/** 497/**
531 * write_suspend_image - Write entire image and metadata. 498 * swsusp_write - Write entire image and metadata.
532 * 499 *
500 * It is important _NOT_ to umount filesystems at this point. We want
501 * them synced (in case something goes wrong) but we DO not want to mark
502 * filesystem clean: it is not. (And it does not matter, if we resume
503 * correctly, we'll mark system clean, anyway.)
533 */ 504 */
534static int write_suspend_image(void) 505
506int swsusp_write(struct pbe *pblist, unsigned int nr_pages)
535{ 507{
508 struct swap_map_page *swap_map;
509 struct swap_map_handle handle;
510 swp_entry_t start;
536 int error; 511 int error;
537 512
538 if (!enough_swap(nr_copy_pages)) { 513 if ((error = swsusp_swap_check())) {
514 printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n");
515 return error;
516 }
517 if (!enough_swap(nr_pages)) {
539 printk(KERN_ERR "swsusp: Not enough free swap\n"); 518 printk(KERN_ERR "swsusp: Not enough free swap\n");
540 return -ENOSPC; 519 return -ENOSPC;
541 } 520 }
542 521
543 init_header(); 522 init_header(nr_pages);
544 if ((error = data_write())) 523 swap_map = alloc_swap_map(swsusp_info.pages);
545 goto FreeData; 524 if (!swap_map)
525 return -ENOMEM;
526 init_swap_map_handle(&handle, swap_map);
527
528 error = swap_map_write_page(&handle, (unsigned long)&swsusp_info);
529 if (!error)
530 error = save_image_metadata(pblist, &handle);
531 if (!error)
532 error = save_image_data(pblist, &handle, nr_pages);
533 if (error)
534 goto Free_image_entries;
546 535
547 if ((error = write_pagedir())) 536 swap_map = reverse_swap_map(swap_map);
548 goto FreePagedir; 537 error = save_swap_map(swap_map, &start);
538 if (error)
539 goto Free_map_entries;
549 540
550 if ((error = close_swap())) 541 dump_info();
551 goto FreePagedir; 542 printk( "S" );
552 Done: 543 error = mark_swapfiles(start);
553 memset(key_iv, 0, MAXKEY+MAXIV); 544 printk( "|\n" );
545 if (error)
546 goto Free_map_entries;
547
548Free_swap_map:
549 free_swap_map(swap_map);
554 return error; 550 return error;
555 FreePagedir: 551
556 free_pagedir_entries(); 552Free_map_entries:
557 FreeData: 553 free_swap_map_entries(swap_map);
558 data_free(); 554Free_image_entries:
559 goto Done; 555 free_image_entries(swap_map);
556 goto Free_swap_map;
560} 557}
561 558
562/* It is important _NOT_ to umount filesystems at this point. We want 559/**
563 * them synced (in case something goes wrong) but we DO not want to mark 560 * swsusp_shrink_memory - Try to free as much memory as needed
564 * filesystem clean: it is not. (And it does not matter, if we resume 561 *
565 * correctly, we'll mark system clean, anyway.) 562 * ... but do not OOM-kill anyone
563 *
564 * Notice: all userland should be stopped before it is called, or
565 * livelock is possible.
566 */ 566 */
567int swsusp_write(void)
568{
569 int error;
570 567
571 if ((error = swsusp_swap_check())) { 568#define SHRINK_BITE 10000
572 printk(KERN_ERR "swsusp: cannot find swap device, try swapon -a.\n");
573 return error;
574 }
575 lock_swapdevices();
576 error = write_suspend_image();
577 /* This will unlock ignored swap devices since writing is finished */
578 lock_swapdevices();
579 return error;
580}
581 569
570int swsusp_shrink_memory(void)
571{
572 long size, tmp;
573 struct zone *zone;
574 unsigned long pages = 0;
575 unsigned int i = 0;
576 char *p = "-\\|/";
577
578 printk("Shrinking memory... ");
579 do {
580 size = 2 * count_highmem_pages();
581 size += size / 50 + count_data_pages();
582 size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE +
583 PAGES_FOR_IO;
584 tmp = size;
585 for_each_zone (zone)
586 if (!is_highmem(zone))
587 tmp -= zone->free_pages;
588 if (tmp > 0) {
589 tmp = shrink_all_memory(SHRINK_BITE);
590 if (!tmp)
591 return -ENOMEM;
592 pages += tmp;
593 } else if (size > (image_size * 1024 * 1024) / PAGE_SIZE) {
594 tmp = shrink_all_memory(SHRINK_BITE);
595 pages += tmp;
596 }
597 printk("\b%c", p[i++%4]);
598 } while (tmp > 0);
599 printk("\bdone (%lu pages freed)\n", pages);
582 600
601 return 0;
602}
583 603
584int swsusp_suspend(void) 604int swsusp_suspend(void)
585{ 605{
@@ -677,7 +697,6 @@ static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
677 /* We assume both lists contain the same number of elements */ 697 /* We assume both lists contain the same number of elements */
678 while (src) { 698 while (src) {
679 dst->orig_address = src->orig_address; 699 dst->orig_address = src->orig_address;
680 dst->swap_address = src->swap_address;
681 dst = dst->next; 700 dst = dst->next;
682 src = src->next; 701 src = src->next;
683 } 702 }
@@ -757,198 +776,224 @@ static int bio_write_page(pgoff_t page_off, void *page)
757 return submit(WRITE, page_off, page); 776 return submit(WRITE, page_off, page);
758} 777}
759 778
760/* 779/**
761 * Sanity check if this image makes sense with this kernel/swap context 780 * The following functions allow us to read data using a swap map
762 * I really don't think that it's foolproof but more than nothing.. 781 * in a file-alike way
763 */ 782 */
764 783
765static const char *sanity_check(void) 784static inline void release_swap_map_reader(struct swap_map_handle *handle)
766{ 785{
767 dump_info(); 786 if (handle->cur)
768 if (swsusp_info.version_code != LINUX_VERSION_CODE) 787 free_page((unsigned long)handle->cur);
769 return "kernel version"; 788 handle->cur = NULL;
770 if (swsusp_info.num_physpages != num_physpages)
771 return "memory size";
772 if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname))
773 return "system type";
774 if (strcmp(swsusp_info.uts.release,system_utsname.release))
775 return "kernel release";
776 if (strcmp(swsusp_info.uts.version,system_utsname.version))
777 return "version";
778 if (strcmp(swsusp_info.uts.machine,system_utsname.machine))
779 return "machine";
780#if 0
781 /* We can't use number of online CPUs when we use hotplug to remove them ;-))) */
782 if (swsusp_info.cpus != num_possible_cpus())
783 return "number of cpus";
784#endif
785 return NULL;
786} 789}
787 790
788 791static inline int get_swap_map_reader(struct swap_map_handle *handle,
789static int check_header(void) 792 swp_entry_t start)
790{ 793{
791 const char *reason = NULL;
792 int error; 794 int error;
793 795
794 if ((error = bio_read_page(swp_offset(swsusp_header.swsusp_info), &swsusp_info))) 796 if (!swp_offset(start))
797 return -EINVAL;
798 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
799 if (!handle->cur)
800 return -ENOMEM;
801 error = bio_read_page(swp_offset(start), handle->cur);
802 if (error) {
803 release_swap_map_reader(handle);
795 return error; 804 return error;
796
797 /* Is this same machine? */
798 if ((reason = sanity_check())) {
799 printk(KERN_ERR "swsusp: Resume mismatch: %s\n",reason);
800 return -EPERM;
801 } 805 }
802 nr_copy_pages = swsusp_info.image_pages; 806 handle->k = 0;
803 return error; 807 return 0;
804} 808}
805 809
806static int check_sig(void) 810static inline int swap_map_read_page(struct swap_map_handle *handle, void *buf)
807{ 811{
812 unsigned long offset;
808 int error; 813 int error;
809 814
810 memset(&swsusp_header, 0, sizeof(swsusp_header)); 815 if (!handle->cur)
811 if ((error = bio_read_page(0, &swsusp_header))) 816 return -EINVAL;
812 return error; 817 offset = swp_offset(handle->cur->entries[handle->k]);
813 if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { 818 if (!offset)
814 memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
815 memcpy(key_iv, swsusp_header.key_iv, MAXKEY+MAXIV);
816 memset(swsusp_header.key_iv, 0, MAXKEY+MAXIV);
817
818 /*
819 * Reset swap signature now.
820 */
821 error = bio_write_page(0, &swsusp_header);
822 } else {
823 return -EINVAL; 819 return -EINVAL;
820 error = bio_read_page(offset, buf);
821 if (error)
822 return error;
823 if (++handle->k >= MAP_PAGE_SIZE) {
824 handle->k = 0;
825 offset = swp_offset(handle->cur->next_swap);
826 if (!offset)
827 release_swap_map_reader(handle);
828 else
829 error = bio_read_page(offset, handle->cur);
824 } 830 }
825 if (!error)
826 pr_debug("swsusp: Signature found, resuming\n");
827 return error; 831 return error;
828} 832}
829 833
830/** 834static int check_header(void)
831 * data_read - Read image pages from swap.
832 *
833 * You do not need to check for overlaps, check_pagedir()
834 * already did that.
835 */
836
837static int data_read(struct pbe *pblist)
838{ 835{
839 struct pbe *p; 836 char *reason = NULL;
840 int error = 0;
841 int i = 0;
842 int mod = swsusp_info.image_pages / 100;
843 void *tfm;
844
845 if ((error = crypto_init(0, &tfm)))
846 return error;
847
848 if (!mod)
849 mod = 1;
850
851 printk("swsusp: Reading image data (%lu pages): ",
852 swsusp_info.image_pages);
853
854 for_each_pbe (p, pblist) {
855 if (!(i % mod))
856 printk("\b\b\b\b%3d%%", i / mod);
857 837
858 if ((error = crypto_read(p, tfm))) { 838 dump_info();
859 crypto_exit(tfm); 839 if (swsusp_info.version_code != LINUX_VERSION_CODE)
860 return error; 840 reason = "kernel version";
861 } 841 if (swsusp_info.num_physpages != num_physpages)
862 842 reason = "memory size";
863 i++; 843 if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname))
844 reason = "system type";
845 if (strcmp(swsusp_info.uts.release,system_utsname.release))
846 reason = "kernel release";
847 if (strcmp(swsusp_info.uts.version,system_utsname.version))
848 reason = "version";
849 if (strcmp(swsusp_info.uts.machine,system_utsname.machine))
850 reason = "machine";
851 if (reason) {
852 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
853 return -EPERM;
864 } 854 }
865 printk("\b\b\b\bdone\n"); 855 return 0;
866 crypto_exit(tfm);
867 return error;
868} 856}
869 857
870/** 858/**
871 * read_pagedir - Read page backup list pages from swap 859 * load_image_data - load the image data using the swap map handle
860 * @handle and store them using the page backup list @pblist
861 * (assume there are @nr_pages pages to load)
872 */ 862 */
873 863
874static int read_pagedir(struct pbe *pblist) 864static int load_image_data(struct pbe *pblist,
865 struct swap_map_handle *handle,
866 unsigned int nr_pages)
875{ 867{
876 struct pbe *pbpage, *p;
877 unsigned int i = 0;
878 int error; 868 int error;
869 unsigned int m;
870 struct pbe *p;
879 871
880 if (!pblist) 872 if (!pblist)
881 return -EFAULT; 873 return -EINVAL;
882 874 printk("Loading image data pages (%u pages) ... ", nr_pages);
883 printk("swsusp: Reading pagedir (%lu pages)\n", 875 m = nr_pages / 100;
884 swsusp_info.pagedir_pages); 876 if (!m)
885 877 m = 1;
886 for_each_pb_page (pbpage, pblist) { 878 nr_pages = 0;
887 unsigned long offset = swp_offset(swsusp_info.pagedir[i++]); 879 p = pblist;
888 880 while (p) {
889 error = -EFAULT; 881 error = swap_map_read_page(handle, (void *)p->address);
890 if (offset) {
891 p = (pbpage + PB_PAGE_SKIP)->next;
892 error = bio_read_page(offset, (void *)pbpage);
893 (pbpage + PB_PAGE_SKIP)->next = p;
894 }
895 if (error) 882 if (error)
896 break; 883 break;
884 p = p->next;
885 if (!(nr_pages % m))
886 printk("\b\b\b\b%3d%%", nr_pages / m);
887 nr_pages++;
897 } 888 }
898
899 if (!error) 889 if (!error)
900 BUG_ON(i != swsusp_info.pagedir_pages); 890 printk("\b\b\b\bdone\n");
901
902 return error; 891 return error;
903} 892}
904 893
894/**
895 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
896 * the PBEs in the list starting at @pbe
897 */
905 898
906static int check_suspend_image(void) 899static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
900 struct pbe *pbe)
907{ 901{
908 int error = 0; 902 int j;
909 903
910 if ((error = check_sig())) 904 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
911 return error; 905 pbe->orig_address = buf[j];
912 906 pbe = pbe->next;
913 if ((error = check_header())) 907 }
914 return error; 908 return pbe;
915
916 return 0;
917} 909}
918 910
919static int read_suspend_image(void) 911/**
912 * load_image_metadata - load the image metadata using the swap map
913 * handle @handle and put them into the PBEs in the list @pblist
914 */
915
916static int load_image_metadata(struct pbe *pblist, struct swap_map_handle *handle)
920{ 917{
921 int error = 0;
922 struct pbe *p; 918 struct pbe *p;
919 unsigned long *buf;
920 unsigned int n = 0;
921 int error = 0;
923 922
924 if (!(p = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 0))) 923 printk("Loading image metadata ... ");
924 buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
925 if (!buf)
925 return -ENOMEM; 926 return -ENOMEM;
926 927 p = pblist;
927 if ((error = read_pagedir(p))) 928 while (p) {
928 return error; 929 error = swap_map_read_page(handle, buf);
929 create_pbe_list(p, nr_copy_pages); 930 if (error)
930 mark_unsafe_pages(p); 931 break;
931 pagedir_nosave = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); 932 p = unpack_orig_addresses(buf, p);
932 if (pagedir_nosave) { 933 n++;
933 create_pbe_list(pagedir_nosave, nr_copy_pages);
934 copy_page_backup_list(pagedir_nosave, p);
935 } 934 }
936 free_pagedir(p); 935 free_page((unsigned long)buf);
937 if (!pagedir_nosave) 936 if (!error)
938 return -ENOMEM; 937 printk("done (%u pages loaded)\n", n);
938 return error;
939}
939 940
940 /* Allocate memory for the image and read the data from swap */ 941int swsusp_read(struct pbe **pblist_ptr)
942{
943 int error;
944 struct pbe *p, *pblist;
945 struct swap_map_handle handle;
946 unsigned int nr_pages;
941 947
942 error = alloc_data_pages(pagedir_nosave, GFP_ATOMIC, 1); 948 if (IS_ERR(resume_bdev)) {
949 pr_debug("swsusp: block device not initialised\n");
950 return PTR_ERR(resume_bdev);
951 }
943 952
953 error = get_swap_map_reader(&handle, swsusp_header.image);
944 if (!error) 954 if (!error)
945 error = data_read(pagedir_nosave); 955 error = swap_map_read_page(&handle, &swsusp_info);
956 if (!error)
957 error = check_header();
958 if (error)
959 return error;
960 nr_pages = swsusp_info.image_pages;
961 p = alloc_pagedir(nr_pages, GFP_ATOMIC, 0);
962 if (!p)
963 return -ENOMEM;
964 error = load_image_metadata(p, &handle);
965 if (!error) {
966 mark_unsafe_pages(p);
967 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
968 if (pblist)
969 copy_page_backup_list(pblist, p);
970 free_pagedir(p);
971 if (!pblist)
972 error = -ENOMEM;
973
974 /* Allocate memory for the image and read the data from swap */
975 if (!error)
976 error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
977 if (!error) {
978 release_eaten_pages();
979 error = load_image_data(pblist, &handle, nr_pages);
980 }
981 if (!error)
982 *pblist_ptr = pblist;
983 }
984 release_swap_map_reader(&handle);
946 985
986 blkdev_put(resume_bdev);
987
988 if (!error)
989 pr_debug("swsusp: Reading resume file was successful\n");
990 else
991 pr_debug("swsusp: Error %d resuming\n", error);
947 return error; 992 return error;
948} 993}
949 994
950/** 995/**
951 * swsusp_check - Check for saved image in swap 996 * swsusp_check - Check for swsusp signature in the resume device
952 */ 997 */
953 998
954int swsusp_check(void) 999int swsusp_check(void)
@@ -958,40 +1003,27 @@ int swsusp_check(void)
958 resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); 1003 resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
959 if (!IS_ERR(resume_bdev)) { 1004 if (!IS_ERR(resume_bdev)) {
960 set_blocksize(resume_bdev, PAGE_SIZE); 1005 set_blocksize(resume_bdev, PAGE_SIZE);
961 error = check_suspend_image(); 1006 memset(&swsusp_header, 0, sizeof(swsusp_header));
1007 if ((error = bio_read_page(0, &swsusp_header)))
1008 return error;
1009 if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
1010 memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
1011 /* Reset swap signature now */
1012 error = bio_write_page(0, &swsusp_header);
1013 } else {
1014 return -EINVAL;
1015 }
962 if (error) 1016 if (error)
963 blkdev_put(resume_bdev); 1017 blkdev_put(resume_bdev);
964 } else 1018 else
1019 pr_debug("swsusp: Signature found, resuming\n");
1020 } else {
965 error = PTR_ERR(resume_bdev); 1021 error = PTR_ERR(resume_bdev);
966
967 if (!error)
968 pr_debug("swsusp: resume file found\n");
969 else
970 pr_debug("swsusp: Error %d check for resume file\n", error);
971 return error;
972}
973
974/**
975 * swsusp_read - Read saved image from swap.
976 */
977
978int swsusp_read(void)
979{
980 int error;
981
982 if (IS_ERR(resume_bdev)) {
983 pr_debug("swsusp: block device not initialised\n");
984 return PTR_ERR(resume_bdev);
985 } 1022 }
986 1023
987 error = read_suspend_image(); 1024 if (error)
988 blkdev_put(resume_bdev); 1025 pr_debug("swsusp: Error %d check for resume file\n", error);
989 memset(key_iv, 0, MAXKEY+MAXIV);
990 1026
991 if (!error)
992 pr_debug("swsusp: Reading resume file was successful\n");
993 else
994 pr_debug("swsusp: Error %d resuming\n", error);
995 return error; 1027 return error;
996} 1028}
997 1029
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 345f4a1d533f..a85047bb5739 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -108,7 +108,7 @@ extern int pwrsw_enabled;
108extern int unaligned_enabled; 108extern int unaligned_enabled;
109#endif 109#endif
110 110
111#ifdef CONFIG_ARCH_S390 111#ifdef CONFIG_S390
112#ifdef CONFIG_MATHEMU 112#ifdef CONFIG_MATHEMU
113extern int sysctl_ieee_emulation_warnings; 113extern int sysctl_ieee_emulation_warnings;
114#endif 114#endif
@@ -542,7 +542,7 @@ static ctl_table kern_table[] = {
542 .extra1 = &minolduid, 542 .extra1 = &minolduid,
543 .extra2 = &maxolduid, 543 .extra2 = &maxolduid,
544 }, 544 },
545#ifdef CONFIG_ARCH_S390 545#ifdef CONFIG_S390
546#ifdef CONFIG_MATHEMU 546#ifdef CONFIG_MATHEMU
547 { 547 {
548 .ctl_name = KERN_IEEE_EMULATION_WARNINGS, 548 .ctl_name = KERN_IEEE_EMULATION_WARNINGS,
@@ -644,7 +644,7 @@ static ctl_table kern_table[] = {
644 .mode = 0644, 644 .mode = 0644,
645 .proc_handler = &proc_dointvec, 645 .proc_handler = &proc_dointvec,
646 }, 646 },
647#if defined(CONFIG_ARCH_S390) 647#if defined(CONFIG_S390)
648 { 648 {
649 .ctl_name = KERN_SPIN_RETRY, 649 .ctl_name = KERN_SPIN_RETRY,
650 .procname = "spin_retry", 650 .procname = "spin_retry",
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 156822e3cc79..80598cfd728c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -32,7 +32,7 @@ config MAGIC_SYSRQ
32config LOG_BUF_SHIFT 32config LOG_BUF_SHIFT
33 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL 33 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
34 range 12 21 34 range 12 21
35 default 17 if ARCH_S390 35 default 17 if S390
36 default 16 if X86_NUMAQ || IA64 36 default 16 if X86_NUMAQ || IA64
37 default 15 if SMP 37 default 15 if SMP
38 default 14 38 default 14
@@ -172,7 +172,8 @@ config DEBUG_VM
172 bool "Debug VM" 172 bool "Debug VM"
173 depends on DEBUG_KERNEL 173 depends on DEBUG_KERNEL
174 help 174 help
175 Enable this to debug the virtual-memory system. 175 Enable this to turn on extended checks in the virtual-memory system
176 that may impact performance.
176 177
177 If unsure, say N. 178 If unsure, say N.
178 179
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 1ff8dcebf7c6..3b482052f403 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -142,8 +142,7 @@ swiotlb_init_with_default_size (size_t default_size)
142 /* 142 /*
143 * Get IO TLB memory from the low pages 143 * Get IO TLB memory from the low pages
144 */ 144 */
145 io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * 145 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
146 (1 << IO_TLB_SHIFT), 0x100000000);
147 if (!io_tlb_start) 146 if (!io_tlb_start)
148 panic("Cannot allocate SWIOTLB buffer"); 147 panic("Cannot allocate SWIOTLB buffer");
149 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 148 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
diff --git a/mm/Kconfig b/mm/Kconfig
index 21eb51d4da8f..b3db11f137e0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -11,7 +11,7 @@ choice
11 11
12config FLATMEM_MANUAL 12config FLATMEM_MANUAL
13 bool "Flat Memory" 13 bool "Flat Memory"
14 depends on !ARCH_DISCONTIGMEM_ENABLE || ARCH_FLATMEM_ENABLE 14 depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
15 help 15 help
16 This option allows you to change some of the ways that 16 This option allows you to change some of the ways that
17 Linux manages its memory internally. Most users will 17 Linux manages its memory internally. Most users will
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 16b9465eb4eb..35c32290f717 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -296,20 +296,12 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
296 unsigned long v = ~map[i / BITS_PER_LONG]; 296 unsigned long v = ~map[i / BITS_PER_LONG];
297 297
298 if (gofast && v == ~0UL) { 298 if (gofast && v == ~0UL) {
299 int j, order; 299 int order;
300 300
301 page = pfn_to_page(pfn); 301 page = pfn_to_page(pfn);
302 count += BITS_PER_LONG; 302 count += BITS_PER_LONG;
303 __ClearPageReserved(page);
304 order = ffs(BITS_PER_LONG) - 1; 303 order = ffs(BITS_PER_LONG) - 1;
305 set_page_refs(page, order); 304 __free_pages_bootmem(page, order);
306 for (j = 1; j < BITS_PER_LONG; j++) {
307 if (j + 16 < BITS_PER_LONG)
308 prefetchw(page + j + 16);
309 __ClearPageReserved(page + j);
310 set_page_count(page + j, 0);
311 }
312 __free_pages(page, order);
313 i += BITS_PER_LONG; 305 i += BITS_PER_LONG;
314 page += BITS_PER_LONG; 306 page += BITS_PER_LONG;
315 } else if (v) { 307 } else if (v) {
@@ -319,9 +311,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
319 for (m = 1; m && i < idx; m<<=1, page++, i++) { 311 for (m = 1; m && i < idx; m<<=1, page++, i++) {
320 if (v & m) { 312 if (v & m) {
321 count++; 313 count++;
322 __ClearPageReserved(page); 314 __free_pages_bootmem(page, 0);
323 set_page_refs(page, 0);
324 __free_page(page);
325 } 315 }
326 } 316 }
327 } else { 317 } else {
@@ -339,9 +329,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
339 count = 0; 329 count = 0;
340 for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) { 330 for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
341 count++; 331 count++;
342 __ClearPageReserved(page); 332 __free_pages_bootmem(page, 0);
343 set_page_count(page, 1);
344 __free_page(page);
345 } 333 }
346 total += count; 334 total += count;
347 bdata->node_bootmem_map = NULL; 335 bdata->node_bootmem_map = NULL;
@@ -393,15 +381,14 @@ unsigned long __init free_all_bootmem (void)
393 return(free_all_bootmem_core(NODE_DATA(0))); 381 return(free_all_bootmem_core(NODE_DATA(0)));
394} 382}
395 383
396void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, 384void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal)
397 unsigned long limit)
398{ 385{
399 pg_data_t *pgdat = pgdat_list; 386 pg_data_t *pgdat = pgdat_list;
400 void *ptr; 387 void *ptr;
401 388
402 for_each_pgdat(pgdat) 389 for_each_pgdat(pgdat)
403 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, 390 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
404 align, goal, limit))) 391 align, goal, 0)))
405 return(ptr); 392 return(ptr);
406 393
407 /* 394 /*
@@ -413,15 +400,40 @@ void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, un
413} 400}
414 401
415 402
416void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, 403void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align,
417 unsigned long goal, unsigned long limit) 404 unsigned long goal)
418{ 405{
419 void *ptr; 406 void *ptr;
420 407
421 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit); 408 ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
422 if (ptr) 409 if (ptr)
423 return (ptr); 410 return (ptr);
424 411
425 return __alloc_bootmem_limit(size, align, goal, limit); 412 return __alloc_bootmem(size, align, goal);
413}
414
415#define LOW32LIMIT 0xffffffff
416
417void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal)
418{
419 pg_data_t *pgdat = pgdat_list;
420 void *ptr;
421
422 for_each_pgdat(pgdat)
423 if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
424 align, goal, LOW32LIMIT)))
425 return(ptr);
426
427 /*
428 * Whoops, we cannot satisfy the allocation request.
429 */
430 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
431 panic("Out of low memory");
432 return NULL;
426} 433}
427 434
435void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
436 unsigned long align, unsigned long goal)
437{
438 return __alloc_bootmem_core(pgdat->bdata, size, align, goal, LOW32LIMIT);
439}
diff --git a/mm/filemap.c b/mm/filemap.c
index 6e1d08a2b8b9..4ef24a397684 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -555,11 +555,12 @@ repeat:
555 page_cache_get(page); 555 page_cache_get(page);
556 if (TestSetPageLocked(page)) { 556 if (TestSetPageLocked(page)) {
557 read_unlock_irq(&mapping->tree_lock); 557 read_unlock_irq(&mapping->tree_lock);
558 lock_page(page); 558 __lock_page(page);
559 read_lock_irq(&mapping->tree_lock); 559 read_lock_irq(&mapping->tree_lock);
560 560
561 /* Has the page been truncated while we slept? */ 561 /* Has the page been truncated while we slept? */
562 if (page->mapping != mapping || page->index != offset) { 562 if (unlikely(page->mapping != mapping ||
563 page->index != offset)) {
563 unlock_page(page); 564 unlock_page(page);
564 page_cache_release(page); 565 page_cache_release(page);
565 goto repeat; 566 goto repeat;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3e52df7c471b..f4c43d7980ba 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -11,6 +11,8 @@
11#include <linux/highmem.h> 11#include <linux/highmem.h>
12#include <linux/nodemask.h> 12#include <linux/nodemask.h>
13#include <linux/pagemap.h> 13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15
14#include <asm/page.h> 16#include <asm/page.h>
15#include <asm/pgtable.h> 17#include <asm/pgtable.h>
16 18
@@ -36,18 +38,21 @@ static void enqueue_huge_page(struct page *page)
36 free_huge_pages_node[nid]++; 38 free_huge_pages_node[nid]++;
37} 39}
38 40
39static struct page *dequeue_huge_page(void) 41static struct page *dequeue_huge_page(struct vm_area_struct *vma,
42 unsigned long address)
40{ 43{
41 int nid = numa_node_id(); 44 int nid = numa_node_id();
42 struct page *page = NULL; 45 struct page *page = NULL;
46 struct zonelist *zonelist = huge_zonelist(vma, address);
47 struct zone **z;
43 48
44 if (list_empty(&hugepage_freelists[nid])) { 49 for (z = zonelist->zones; *z; z++) {
45 for (nid = 0; nid < MAX_NUMNODES; ++nid) 50 nid = (*z)->zone_pgdat->node_id;
46 if (!list_empty(&hugepage_freelists[nid])) 51 if (!list_empty(&hugepage_freelists[nid]))
47 break; 52 break;
48 } 53 }
49 if (nid >= 0 && nid < MAX_NUMNODES && 54
50 !list_empty(&hugepage_freelists[nid])) { 55 if (*z) {
51 page = list_entry(hugepage_freelists[nid].next, 56 page = list_entry(hugepage_freelists[nid].next,
52 struct page, lru); 57 struct page, lru);
53 list_del(&page->lru); 58 list_del(&page->lru);
@@ -85,13 +90,13 @@ void free_huge_page(struct page *page)
85 spin_unlock(&hugetlb_lock); 90 spin_unlock(&hugetlb_lock);
86} 91}
87 92
88struct page *alloc_huge_page(void) 93struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
89{ 94{
90 struct page *page; 95 struct page *page;
91 int i; 96 int i;
92 97
93 spin_lock(&hugetlb_lock); 98 spin_lock(&hugetlb_lock);
94 page = dequeue_huge_page(); 99 page = dequeue_huge_page(vma, addr);
95 if (!page) { 100 if (!page) {
96 spin_unlock(&hugetlb_lock); 101 spin_unlock(&hugetlb_lock);
97 return NULL; 102 return NULL;
@@ -194,7 +199,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
194 spin_lock(&hugetlb_lock); 199 spin_lock(&hugetlb_lock);
195 try_to_free_low(count); 200 try_to_free_low(count);
196 while (count < nr_huge_pages) { 201 while (count < nr_huge_pages) {
197 struct page *page = dequeue_huge_page(); 202 struct page *page = dequeue_huge_page(NULL, 0);
198 if (!page) 203 if (!page)
199 break; 204 break;
200 update_and_free_page(page); 205 update_and_free_page(page);
@@ -261,11 +266,12 @@ struct vm_operations_struct hugetlb_vm_ops = {
261 .nopage = hugetlb_nopage, 266 .nopage = hugetlb_nopage,
262}; 267};
263 268
264static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page) 269static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
270 int writable)
265{ 271{
266 pte_t entry; 272 pte_t entry;
267 273
268 if (vma->vm_flags & VM_WRITE) { 274 if (writable) {
269 entry = 275 entry =
270 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 276 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
271 } else { 277 } else {
@@ -277,12 +283,27 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page)
277 return entry; 283 return entry;
278} 284}
279 285
286static void set_huge_ptep_writable(struct vm_area_struct *vma,
287 unsigned long address, pte_t *ptep)
288{
289 pte_t entry;
290
291 entry = pte_mkwrite(pte_mkdirty(*ptep));
292 ptep_set_access_flags(vma, address, ptep, entry, 1);
293 update_mmu_cache(vma, address, entry);
294 lazy_mmu_prot_update(entry);
295}
296
297
280int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 298int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
281 struct vm_area_struct *vma) 299 struct vm_area_struct *vma)
282{ 300{
283 pte_t *src_pte, *dst_pte, entry; 301 pte_t *src_pte, *dst_pte, entry;
284 struct page *ptepage; 302 struct page *ptepage;
285 unsigned long addr; 303 unsigned long addr;
304 int cow;
305
306 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
286 307
287 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 308 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
288 src_pte = huge_pte_offset(src, addr); 309 src_pte = huge_pte_offset(src, addr);
@@ -294,6 +315,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
294 spin_lock(&dst->page_table_lock); 315 spin_lock(&dst->page_table_lock);
295 spin_lock(&src->page_table_lock); 316 spin_lock(&src->page_table_lock);
296 if (!pte_none(*src_pte)) { 317 if (!pte_none(*src_pte)) {
318 if (cow)
319 ptep_set_wrprotect(src, addr, src_pte);
297 entry = *src_pte; 320 entry = *src_pte;
298 ptepage = pte_page(entry); 321 ptepage = pte_page(entry);
299 get_page(ptepage); 322 get_page(ptepage);
@@ -345,57 +368,63 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
345 flush_tlb_range(vma, start, end); 368 flush_tlb_range(vma, start, end);
346} 369}
347 370
348static struct page *find_lock_huge_page(struct address_space *mapping, 371static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
349 unsigned long idx) 372 unsigned long address, pte_t *ptep, pte_t pte)
350{ 373{
351 struct page *page; 374 struct page *old_page, *new_page;
352 int err; 375 int i, avoidcopy;
353 struct inode *inode = mapping->host;
354 unsigned long size;
355 376
356retry: 377 old_page = pte_page(pte);
357 page = find_lock_page(mapping, idx);
358 if (page)
359 goto out;
360 378
361 /* Check to make sure the mapping hasn't been truncated */ 379 /* If no-one else is actually using this page, avoid the copy
362 size = i_size_read(inode) >> HPAGE_SHIFT; 380 * and just make the page writable */
363 if (idx >= size) 381 avoidcopy = (page_count(old_page) == 1);
364 goto out; 382 if (avoidcopy) {
383 set_huge_ptep_writable(vma, address, ptep);
384 return VM_FAULT_MINOR;
385 }
365 386
366 if (hugetlb_get_quota(mapping)) 387 page_cache_get(old_page);
367 goto out; 388 new_page = alloc_huge_page(vma, address);
368 page = alloc_huge_page(); 389
369 if (!page) { 390 if (!new_page) {
370 hugetlb_put_quota(mapping); 391 page_cache_release(old_page);
371 goto out; 392
393 /* Logically this is OOM, not a SIGBUS, but an OOM
394 * could cause the kernel to go killing other
395 * processes which won't help the hugepage situation
396 * at all (?) */
397 return VM_FAULT_SIGBUS;
372 } 398 }
373 399
374 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 400 spin_unlock(&mm->page_table_lock);
375 if (err) { 401 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++)
376 put_page(page); 402 copy_user_highpage(new_page + i, old_page + i,
377 hugetlb_put_quota(mapping); 403 address + i*PAGE_SIZE);
378 if (err == -EEXIST) 404 spin_lock(&mm->page_table_lock);
379 goto retry; 405
380 page = NULL; 406 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
407 if (likely(pte_same(*ptep, pte))) {
408 /* Break COW */
409 set_huge_pte_at(mm, address, ptep,
410 make_huge_pte(vma, new_page, 1));
411 /* Make the old page be freed below */
412 new_page = old_page;
381 } 413 }
382out: 414 page_cache_release(new_page);
383 return page; 415 page_cache_release(old_page);
416 return VM_FAULT_MINOR;
384} 417}
385 418
386int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 419int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
387 unsigned long address, int write_access) 420 unsigned long address, pte_t *ptep, int write_access)
388{ 421{
389 int ret = VM_FAULT_SIGBUS; 422 int ret = VM_FAULT_SIGBUS;
390 unsigned long idx; 423 unsigned long idx;
391 unsigned long size; 424 unsigned long size;
392 pte_t *pte;
393 struct page *page; 425 struct page *page;
394 struct address_space *mapping; 426 struct address_space *mapping;
395 427 pte_t new_pte;
396 pte = huge_pte_alloc(mm, address);
397 if (!pte)
398 goto out;
399 428
400 mapping = vma->vm_file->f_mapping; 429 mapping = vma->vm_file->f_mapping;
401 idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 430 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
@@ -405,9 +434,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
405 * Use page lock to guard against racing truncation 434 * Use page lock to guard against racing truncation
406 * before we get page_table_lock. 435 * before we get page_table_lock.
407 */ 436 */
408 page = find_lock_huge_page(mapping, idx); 437retry:
409 if (!page) 438 page = find_lock_page(mapping, idx);
410 goto out; 439 if (!page) {
440 if (hugetlb_get_quota(mapping))
441 goto out;
442 page = alloc_huge_page(vma, address);
443 if (!page) {
444 hugetlb_put_quota(mapping);
445 goto out;
446 }
447
448 if (vma->vm_flags & VM_SHARED) {
449 int err;
450
451 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
452 if (err) {
453 put_page(page);
454 hugetlb_put_quota(mapping);
455 if (err == -EEXIST)
456 goto retry;
457 goto out;
458 }
459 } else
460 lock_page(page);
461 }
411 462
412 spin_lock(&mm->page_table_lock); 463 spin_lock(&mm->page_table_lock);
413 size = i_size_read(mapping->host) >> HPAGE_SHIFT; 464 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
@@ -415,11 +466,19 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
415 goto backout; 466 goto backout;
416 467
417 ret = VM_FAULT_MINOR; 468 ret = VM_FAULT_MINOR;
418 if (!pte_none(*pte)) 469 if (!pte_none(*ptep))
419 goto backout; 470 goto backout;
420 471
421 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); 472 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
422 set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page)); 473 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
474 && (vma->vm_flags & VM_SHARED)));
475 set_huge_pte_at(mm, address, ptep, new_pte);
476
477 if (write_access && !(vma->vm_flags & VM_SHARED)) {
478 /* Optimization, do the COW without a second fault */
479 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
480 }
481
423 spin_unlock(&mm->page_table_lock); 482 spin_unlock(&mm->page_table_lock);
424 unlock_page(page); 483 unlock_page(page);
425out: 484out:
@@ -433,6 +492,33 @@ backout:
433 goto out; 492 goto out;
434} 493}
435 494
495int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
496 unsigned long address, int write_access)
497{
498 pte_t *ptep;
499 pte_t entry;
500 int ret;
501
502 ptep = huge_pte_alloc(mm, address);
503 if (!ptep)
504 return VM_FAULT_OOM;
505
506 entry = *ptep;
507 if (pte_none(entry))
508 return hugetlb_no_page(mm, vma, address, ptep, write_access);
509
510 ret = VM_FAULT_MINOR;
511
512 spin_lock(&mm->page_table_lock);
513 /* Check for a racing update before calling hugetlb_cow */
514 if (likely(pte_same(entry, *ptep)))
515 if (write_access && !pte_write(entry))
516 ret = hugetlb_cow(mm, vma, address, ptep, entry);
517 spin_unlock(&mm->page_table_lock);
518
519 return ret;
520}
521
436int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 522int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
437 struct page **pages, struct vm_area_struct **vmas, 523 struct page **pages, struct vm_area_struct **vmas,
438 unsigned long *position, int *length, int i) 524 unsigned long *position, int *length, int i)
diff --git a/mm/internal.h b/mm/internal.h
index 6bf134e8fb3d..17256bb2f4ef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -9,5 +9,22 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12/* page_alloc.c */ 12static inline void set_page_refs(struct page *page, int order)
13extern void set_page_refs(struct page *page, int order); 13{
14#ifdef CONFIG_MMU
15 set_page_count(page, 1);
16#else
17 int i;
18
19 /*
20 * We need to reference all the pages for this order, otherwise if
21 * anyone accesses one of the pages with (get/put) it will be freed.
22 * - eg: access_process_vm()
23 */
24 for (i = 0; i < (1 << order); i++)
25 set_page_count(page + i, 1);
26#endif /* CONFIG_MMU */
27}
28
29extern void fastcall __init __free_pages_bootmem(struct page *page,
30 unsigned int order);
diff --git a/mm/madvise.c b/mm/madvise.c
index 2b7cf0400a21..ae0ae3ea299a 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -140,6 +140,36 @@ static long madvise_dontneed(struct vm_area_struct * vma,
140 return 0; 140 return 0;
141} 141}
142 142
143/*
144 * Application wants to free up the pages and associated backing store.
145 * This is effectively punching a hole into the middle of a file.
146 *
147 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
148 * Other filesystems return -ENOSYS.
149 */
150static long madvise_remove(struct vm_area_struct *vma,
151 unsigned long start, unsigned long end)
152{
153 struct address_space *mapping;
154 loff_t offset, endoff;
155
156 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
157 return -EINVAL;
158
159 if (!vma->vm_file || !vma->vm_file->f_mapping
160 || !vma->vm_file->f_mapping->host) {
161 return -EINVAL;
162 }
163
164 mapping = vma->vm_file->f_mapping;
165
166 offset = (loff_t)(start - vma->vm_start)
167 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
168 endoff = (loff_t)(end - vma->vm_start - 1)
169 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
170 return vmtruncate_range(mapping->host, offset, endoff);
171}
172
143static long 173static long
144madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, 174madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
145 unsigned long start, unsigned long end, int behavior) 175 unsigned long start, unsigned long end, int behavior)
@@ -152,6 +182,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
152 case MADV_RANDOM: 182 case MADV_RANDOM:
153 error = madvise_behavior(vma, prev, start, end, behavior); 183 error = madvise_behavior(vma, prev, start, end, behavior);
154 break; 184 break;
185 case MADV_REMOVE:
186 error = madvise_remove(vma, start, end);
187 break;
155 188
156 case MADV_WILLNEED: 189 case MADV_WILLNEED:
157 error = madvise_willneed(vma, prev, start, end); 190 error = madvise_willneed(vma, prev, start, end);
@@ -190,6 +223,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
190 * some pages ahead. 223 * some pages ahead.
191 * MADV_DONTNEED - the application is finished with the given range, 224 * MADV_DONTNEED - the application is finished with the given range,
192 * so the kernel can free resources associated with it. 225 * so the kernel can free resources associated with it.
226 * MADV_REMOVE - the application wants to free up the given range of
227 * pages and associated backing store.
193 * 228 *
194 * return values: 229 * return values:
195 * zero - success 230 * zero - success
diff --git a/mm/memory.c b/mm/memory.c
index d8dde07a3656..7197f9bcd384 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1498,7 +1498,7 @@ gotten:
1498 update_mmu_cache(vma, address, entry); 1498 update_mmu_cache(vma, address, entry);
1499 lazy_mmu_prot_update(entry); 1499 lazy_mmu_prot_update(entry);
1500 lru_cache_add_active(new_page); 1500 lru_cache_add_active(new_page);
1501 page_add_anon_rmap(new_page, vma, address); 1501 page_add_new_anon_rmap(new_page, vma, address);
1502 1502
1503 /* Free the old page.. */ 1503 /* Free the old page.. */
1504 new_page = old_page; 1504 new_page = old_page;
@@ -1770,9 +1770,32 @@ out_big:
1770out_busy: 1770out_busy:
1771 return -ETXTBSY; 1771 return -ETXTBSY;
1772} 1772}
1773
1774EXPORT_SYMBOL(vmtruncate); 1773EXPORT_SYMBOL(vmtruncate);
1775 1774
1775int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
1776{
1777 struct address_space *mapping = inode->i_mapping;
1778
1779 /*
1780 * If the underlying filesystem is not going to provide
1781 * a way to truncate a range of blocks (punch a hole) -
1782 * we should return failure right now.
1783 */
1784 if (!inode->i_op || !inode->i_op->truncate_range)
1785 return -ENOSYS;
1786
1787 down(&inode->i_sem);
1788 down_write(&inode->i_alloc_sem);
1789 unmap_mapping_range(mapping, offset, (end - offset), 1);
1790 truncate_inode_pages_range(mapping, offset, end);
1791 inode->i_op->truncate_range(inode, offset, end);
1792 up_write(&inode->i_alloc_sem);
1793 up(&inode->i_sem);
1794
1795 return 0;
1796}
1797EXPORT_SYMBOL(vmtruncate_range);
1798
1776/* 1799/*
1777 * Primitive swap readahead code. We simply read an aligned block of 1800 * Primitive swap readahead code. We simply read an aligned block of
1778 * (1 << page_cluster) entries in the swap area. This method is chosen 1801 * (1 << page_cluster) entries in the swap area. This method is chosen
@@ -1954,8 +1977,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1954 goto release; 1977 goto release;
1955 inc_mm_counter(mm, anon_rss); 1978 inc_mm_counter(mm, anon_rss);
1956 lru_cache_add_active(page); 1979 lru_cache_add_active(page);
1957 SetPageReferenced(page); 1980 page_add_new_anon_rmap(page, vma, address);
1958 page_add_anon_rmap(page, vma, address);
1959 } else { 1981 } else {
1960 /* Map the ZERO_PAGE - vm_page_prot is readonly */ 1982 /* Map the ZERO_PAGE - vm_page_prot is readonly */
1961 page = ZERO_PAGE(address); 1983 page = ZERO_PAGE(address);
@@ -2086,7 +2108,7 @@ retry:
2086 if (anon) { 2108 if (anon) {
2087 inc_mm_counter(mm, anon_rss); 2109 inc_mm_counter(mm, anon_rss);
2088 lru_cache_add_active(new_page); 2110 lru_cache_add_active(new_page);
2089 page_add_anon_rmap(new_page, vma, address); 2111 page_add_new_anon_rmap(new_page, vma, address);
2090 } else { 2112 } else {
2091 inc_mm_counter(mm, file_rss); 2113 inc_mm_counter(mm, file_rss);
2092 page_add_file_rmap(new_page); 2114 page_add_file_rmap(new_page);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f6d4af8af8a8..a918f77f02f3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -42,7 +42,6 @@ extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
42 int nr_pages); 42 int nr_pages);
43static int __add_section(struct zone *zone, unsigned long phys_start_pfn) 43static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
44{ 44{
45 struct pglist_data *pgdat = zone->zone_pgdat;
46 int nr_pages = PAGES_PER_SECTION; 45 int nr_pages = PAGES_PER_SECTION;
47 int ret; 46 int ret;
48 47
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 72f402cc9c9a..0f1d2b8a952b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -93,7 +93,7 @@ static kmem_cache_t *sn_cache;
93 93
94/* Highest zone. An specific allocation for a zone below that is not 94/* Highest zone. An specific allocation for a zone below that is not
95 policied. */ 95 policied. */
96static int policy_zone; 96int policy_zone = ZONE_DMA;
97 97
98struct mempolicy default_policy = { 98struct mempolicy default_policy = {
99 .refcnt = ATOMIC_INIT(1), /* never free it */ 99 .refcnt = ATOMIC_INIT(1), /* never free it */
@@ -131,17 +131,8 @@ static struct zonelist *bind_zonelist(nodemask_t *nodes)
131 if (!zl) 131 if (!zl)
132 return NULL; 132 return NULL;
133 num = 0; 133 num = 0;
134 for_each_node_mask(nd, *nodes) { 134 for_each_node_mask(nd, *nodes)
135 int k; 135 zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone];
136 for (k = MAX_NR_ZONES-1; k >= 0; k--) {
137 struct zone *z = &NODE_DATA(nd)->node_zones[k];
138 if (!z->present_pages)
139 continue;
140 zl->zones[num++] = z;
141 if (k > policy_zone)
142 policy_zone = k;
143 }
144 }
145 zl->zones[num] = NULL; 136 zl->zones[num] = NULL;
146 return zl; 137 return zl;
147} 138}
@@ -785,6 +776,34 @@ static unsigned offset_il_node(struct mempolicy *pol,
785 return nid; 776 return nid;
786} 777}
787 778
779/* Determine a node number for interleave */
780static inline unsigned interleave_nid(struct mempolicy *pol,
781 struct vm_area_struct *vma, unsigned long addr, int shift)
782{
783 if (vma) {
784 unsigned long off;
785
786 off = vma->vm_pgoff;
787 off += (addr - vma->vm_start) >> shift;
788 return offset_il_node(pol, vma, off);
789 } else
790 return interleave_nodes(pol);
791}
792
793/* Return a zonelist suitable for a huge page allocation. */
794struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
795{
796 struct mempolicy *pol = get_vma_policy(current, vma, addr);
797
798 if (pol->policy == MPOL_INTERLEAVE) {
799 unsigned nid;
800
801 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
802 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
803 }
804 return zonelist_policy(GFP_HIGHUSER, pol);
805}
806
788/* Allocate a page in interleaved policy. 807/* Allocate a page in interleaved policy.
789 Own path because it needs to do special accounting. */ 808 Own path because it needs to do special accounting. */
790static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, 809static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
@@ -833,15 +852,8 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
833 852
834 if (unlikely(pol->policy == MPOL_INTERLEAVE)) { 853 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
835 unsigned nid; 854 unsigned nid;
836 if (vma) { 855
837 unsigned long off; 856 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
838 off = vma->vm_pgoff;
839 off += (addr - vma->vm_start) >> PAGE_SHIFT;
840 nid = offset_il_node(pol, vma, off);
841 } else {
842 /* fall back to process interleaving */
843 nid = interleave_nodes(pol);
844 }
845 return alloc_page_interleave(gfp, 0, nid); 857 return alloc_page_interleave(gfp, 0, nid);
846 } 858 }
847 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); 859 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
@@ -940,54 +952,6 @@ void __mpol_free(struct mempolicy *p)
940} 952}
941 953
942/* 954/*
943 * Hugetlb policy. Same as above, just works with node numbers instead of
944 * zonelists.
945 */
946
947/* Find first node suitable for an allocation */
948int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
949{
950 struct mempolicy *pol = get_vma_policy(current, vma, addr);
951
952 switch (pol->policy) {
953 case MPOL_DEFAULT:
954 return numa_node_id();
955 case MPOL_BIND:
956 return pol->v.zonelist->zones[0]->zone_pgdat->node_id;
957 case MPOL_INTERLEAVE:
958 return interleave_nodes(pol);
959 case MPOL_PREFERRED:
960 return pol->v.preferred_node >= 0 ?
961 pol->v.preferred_node : numa_node_id();
962 }
963 BUG();
964 return 0;
965}
966
967/* Find secondary valid nodes for an allocation */
968int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
969{
970 struct mempolicy *pol = get_vma_policy(current, vma, addr);
971
972 switch (pol->policy) {
973 case MPOL_PREFERRED:
974 case MPOL_DEFAULT:
975 case MPOL_INTERLEAVE:
976 return 1;
977 case MPOL_BIND: {
978 struct zone **z;
979 for (z = pol->v.zonelist->zones; *z; z++)
980 if ((*z)->zone_pgdat->node_id == nid)
981 return 1;
982 return 0;
983 }
984 default:
985 BUG();
986 return 0;
987 }
988}
989
990/*
991 * Shared memory backing store policy support. 955 * Shared memory backing store policy support.
992 * 956 *
993 * Remember policies even when nobody has shared memory mapped. 957 * Remember policies even when nobody has shared memory mapped.
diff --git a/mm/nommu.c b/mm/nommu.c
index c1196812876b..c10262d68232 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1177,3 +1177,10 @@ int in_gate_area_no_task(unsigned long addr)
1177{ 1177{
1178 return 0; 1178 return 0;
1179} 1179}
1180
1181struct page *filemap_nopage(struct vm_area_struct *area,
1182 unsigned long address, int *type)
1183{
1184 BUG();
1185 return NULL;
1186}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fe14a8c87fc2..fd47494cb989 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -36,6 +36,7 @@
36#include <linux/memory_hotplug.h> 36#include <linux/memory_hotplug.h>
37#include <linux/nodemask.h> 37#include <linux/nodemask.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/mempolicy.h>
39 40
40#include <asm/tlbflush.h> 41#include <asm/tlbflush.h>
41#include "internal.h" 42#include "internal.h"
@@ -53,6 +54,8 @@ unsigned long totalram_pages __read_mostly;
53unsigned long totalhigh_pages __read_mostly; 54unsigned long totalhigh_pages __read_mostly;
54long nr_swap_pages; 55long nr_swap_pages;
55 56
57static void fastcall free_hot_cold_page(struct page *page, int cold);
58
56/* 59/*
57 * results with 256, 32 in the lowmem_reserve sysctl: 60 * results with 256, 32 in the lowmem_reserve sysctl:
58 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 61 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
@@ -81,6 +84,7 @@ int min_free_kbytes = 1024;
81unsigned long __initdata nr_kernel_pages; 84unsigned long __initdata nr_kernel_pages;
82unsigned long __initdata nr_all_pages; 85unsigned long __initdata nr_all_pages;
83 86
87#ifdef CONFIG_DEBUG_VM
84static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 88static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
85{ 89{
86 int ret = 0; 90 int ret = 0;
@@ -122,16 +126,23 @@ static int bad_range(struct zone *zone, struct page *page)
122 return 0; 126 return 0;
123} 127}
124 128
125static void bad_page(const char *function, struct page *page) 129#else
130static inline int bad_range(struct zone *zone, struct page *page)
131{
132 return 0;
133}
134#endif
135
136static void bad_page(struct page *page)
126{ 137{
127 printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", 138 printk(KERN_EMERG "Bad page state in process '%s'\n"
128 function, current->comm, page); 139 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
129 printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", 140 "Trying to fix it up, but a reboot is needed\n"
130 (int)(2*sizeof(unsigned long)), (unsigned long)page->flags, 141 "Backtrace:\n",
131 page->mapping, page_mapcount(page), page_count(page)); 142 current->comm, page, (int)(2*sizeof(unsigned long)),
132 printk(KERN_EMERG "Backtrace:\n"); 143 (unsigned long)page->flags, page->mapping,
144 page_mapcount(page), page_count(page));
133 dump_stack(); 145 dump_stack();
134 printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
135 page->flags &= ~(1 << PG_lru | 146 page->flags &= ~(1 << PG_lru |
136 1 << PG_private | 147 1 << PG_private |
137 1 << PG_locked | 148 1 << PG_locked |
@@ -184,19 +195,15 @@ static void destroy_compound_page(struct page *page, unsigned long order)
184 int i; 195 int i;
185 int nr_pages = 1 << order; 196 int nr_pages = 1 << order;
186 197
187 if (!PageCompound(page)) 198 if (unlikely(page[1].index != order))
188 return; 199 bad_page(page);
189
190 if (page[1].index != order)
191 bad_page(__FUNCTION__, page);
192 200
193 for (i = 0; i < nr_pages; i++) { 201 for (i = 0; i < nr_pages; i++) {
194 struct page *p = page + i; 202 struct page *p = page + i;
195 203
196 if (!PageCompound(p)) 204 if (unlikely(!PageCompound(p) |
197 bad_page(__FUNCTION__, page); 205 (page_private(p) != (unsigned long)page)))
198 if (page_private(p) != (unsigned long)page) 206 bad_page(page);
199 bad_page(__FUNCTION__, page);
200 ClearPageCompound(p); 207 ClearPageCompound(p);
201 } 208 }
202} 209}
@@ -255,14 +262,20 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
255/* 262/*
256 * This function checks whether a page is free && is the buddy 263 * This function checks whether a page is free && is the buddy
257 * we can do coalesce a page and its buddy if 264 * we can do coalesce a page and its buddy if
258 * (a) the buddy is free && 265 * (a) the buddy is not in a hole &&
259 * (b) the buddy is on the buddy system && 266 * (b) the buddy is free &&
260 * (c) a page and its buddy have the same order. 267 * (c) the buddy is on the buddy system &&
268 * (d) a page and its buddy have the same order.
261 * for recording page's order, we use page_private(page) and PG_private. 269 * for recording page's order, we use page_private(page) and PG_private.
262 * 270 *
263 */ 271 */
264static inline int page_is_buddy(struct page *page, int order) 272static inline int page_is_buddy(struct page *page, int order)
265{ 273{
274#ifdef CONFIG_HOLES_IN_ZONE
275 if (!pfn_valid(page_to_pfn(page)))
276 return 0;
277#endif
278
266 if (PagePrivate(page) && 279 if (PagePrivate(page) &&
267 (page_order(page) == order) && 280 (page_order(page) == order) &&
268 page_count(page) == 0) 281 page_count(page) == 0)
@@ -300,7 +313,7 @@ static inline void __free_pages_bulk (struct page *page,
300 unsigned long page_idx; 313 unsigned long page_idx;
301 int order_size = 1 << order; 314 int order_size = 1 << order;
302 315
303 if (unlikely(order)) 316 if (unlikely(PageCompound(page)))
304 destroy_compound_page(page, order); 317 destroy_compound_page(page, order);
305 318
306 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 319 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
@@ -314,17 +327,15 @@ static inline void __free_pages_bulk (struct page *page,
314 struct free_area *area; 327 struct free_area *area;
315 struct page *buddy; 328 struct page *buddy;
316 329
317 combined_idx = __find_combined_index(page_idx, order);
318 buddy = __page_find_buddy(page, page_idx, order); 330 buddy = __page_find_buddy(page, page_idx, order);
319
320 if (bad_range(zone, buddy))
321 break;
322 if (!page_is_buddy(buddy, order)) 331 if (!page_is_buddy(buddy, order))
323 break; /* Move the buddy up one level. */ 332 break; /* Move the buddy up one level. */
333
324 list_del(&buddy->lru); 334 list_del(&buddy->lru);
325 area = zone->free_area + order; 335 area = zone->free_area + order;
326 area->nr_free--; 336 area->nr_free--;
327 rmv_page_order(buddy); 337 rmv_page_order(buddy);
338 combined_idx = __find_combined_index(page_idx, order);
328 page = page + (combined_idx - page_idx); 339 page = page + (combined_idx - page_idx);
329 page_idx = combined_idx; 340 page_idx = combined_idx;
330 order++; 341 order++;
@@ -334,11 +345,11 @@ static inline void __free_pages_bulk (struct page *page,
334 zone->free_area[order].nr_free++; 345 zone->free_area[order].nr_free++;
335} 346}
336 347
337static inline int free_pages_check(const char *function, struct page *page) 348static inline int free_pages_check(struct page *page)
338{ 349{
339 if ( page_mapcount(page) || 350 if (unlikely(page_mapcount(page) |
340 page->mapping != NULL || 351 (page->mapping != NULL) |
341 page_count(page) != 0 || 352 (page_count(page) != 0) |
342 (page->flags & ( 353 (page->flags & (
343 1 << PG_lru | 354 1 << PG_lru |
344 1 << PG_private | 355 1 << PG_private |
@@ -348,8 +359,8 @@ static inline int free_pages_check(const char *function, struct page *page)
348 1 << PG_slab | 359 1 << PG_slab |
349 1 << PG_swapcache | 360 1 << PG_swapcache |
350 1 << PG_writeback | 361 1 << PG_writeback |
351 1 << PG_reserved ))) 362 1 << PG_reserved ))))
352 bad_page(function, page); 363 bad_page(page);
353 if (PageDirty(page)) 364 if (PageDirty(page))
354 __ClearPageDirty(page); 365 __ClearPageDirty(page);
355 /* 366 /*
@@ -375,11 +386,10 @@ static int
375free_pages_bulk(struct zone *zone, int count, 386free_pages_bulk(struct zone *zone, int count,
376 struct list_head *list, unsigned int order) 387 struct list_head *list, unsigned int order)
377{ 388{
378 unsigned long flags;
379 struct page *page = NULL; 389 struct page *page = NULL;
380 int ret = 0; 390 int ret = 0;
381 391
382 spin_lock_irqsave(&zone->lock, flags); 392 spin_lock(&zone->lock);
383 zone->all_unreclaimable = 0; 393 zone->all_unreclaimable = 0;
384 zone->pages_scanned = 0; 394 zone->pages_scanned = 0;
385 while (!list_empty(list) && count--) { 395 while (!list_empty(list) && count--) {
@@ -389,12 +399,13 @@ free_pages_bulk(struct zone *zone, int count,
389 __free_pages_bulk(page, zone, order); 399 __free_pages_bulk(page, zone, order);
390 ret++; 400 ret++;
391 } 401 }
392 spin_unlock_irqrestore(&zone->lock, flags); 402 spin_unlock(&zone->lock);
393 return ret; 403 return ret;
394} 404}
395 405
396void __free_pages_ok(struct page *page, unsigned int order) 406void __free_pages_ok(struct page *page, unsigned int order)
397{ 407{
408 unsigned long flags;
398 LIST_HEAD(list); 409 LIST_HEAD(list);
399 int i; 410 int i;
400 int reserved = 0; 411 int reserved = 0;
@@ -408,14 +419,49 @@ void __free_pages_ok(struct page *page, unsigned int order)
408#endif 419#endif
409 420
410 for (i = 0 ; i < (1 << order) ; ++i) 421 for (i = 0 ; i < (1 << order) ; ++i)
411 reserved += free_pages_check(__FUNCTION__, page + i); 422 reserved += free_pages_check(page + i);
412 if (reserved) 423 if (reserved)
413 return; 424 return;
414 425
415 list_add(&page->lru, &list); 426 list_add(&page->lru, &list);
416 mod_page_state(pgfree, 1 << order);
417 kernel_map_pages(page, 1<<order, 0); 427 kernel_map_pages(page, 1<<order, 0);
428 local_irq_save(flags);
429 __mod_page_state(pgfree, 1 << order);
418 free_pages_bulk(page_zone(page), 1, &list, order); 430 free_pages_bulk(page_zone(page), 1, &list, order);
431 local_irq_restore(flags);
432}
433
434/*
435 * permit the bootmem allocator to evade page validation on high-order frees
436 */
437void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
438{
439 if (order == 0) {
440 __ClearPageReserved(page);
441 set_page_count(page, 0);
442
443 free_hot_cold_page(page, 0);
444 } else {
445 LIST_HEAD(list);
446 int loop;
447
448 for (loop = 0; loop < BITS_PER_LONG; loop++) {
449 struct page *p = &page[loop];
450
451 if (loop + 16 < BITS_PER_LONG)
452 prefetchw(p + 16);
453 __ClearPageReserved(p);
454 set_page_count(p, 0);
455 }
456
457 arch_free_page(page, order);
458
459 mod_page_state(pgfree, 1 << order);
460
461 list_add(&page->lru, &list);
462 kernel_map_pages(page, 1 << order, 0);
463 free_pages_bulk(page_zone(page), 1, &list, order);
464 }
419} 465}
420 466
421 467
@@ -433,8 +479,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
433 * 479 *
434 * -- wli 480 * -- wli
435 */ 481 */
436static inline struct page * 482static inline void expand(struct zone *zone, struct page *page,
437expand(struct zone *zone, struct page *page,
438 int low, int high, struct free_area *area) 483 int low, int high, struct free_area *area)
439{ 484{
440 unsigned long size = 1 << high; 485 unsigned long size = 1 << high;
@@ -448,24 +493,6 @@ expand(struct zone *zone, struct page *page,
448 area->nr_free++; 493 area->nr_free++;
449 set_page_order(&page[size], high); 494 set_page_order(&page[size], high);
450 } 495 }
451 return page;
452}
453
454void set_page_refs(struct page *page, int order)
455{
456#ifdef CONFIG_MMU
457 set_page_count(page, 1);
458#else
459 int i;
460
461 /*
462 * We need to reference all the pages for this order, otherwise if
463 * anyone accesses one of the pages with (get/put) it will be freed.
464 * - eg: access_process_vm()
465 */
466 for (i = 0; i < (1 << order); i++)
467 set_page_count(page + i, 1);
468#endif /* CONFIG_MMU */
469} 496}
470 497
471/* 498/*
@@ -473,9 +500,9 @@ void set_page_refs(struct page *page, int order)
473 */ 500 */
474static int prep_new_page(struct page *page, int order) 501static int prep_new_page(struct page *page, int order)
475{ 502{
476 if ( page_mapcount(page) || 503 if (unlikely(page_mapcount(page) |
477 page->mapping != NULL || 504 (page->mapping != NULL) |
478 page_count(page) != 0 || 505 (page_count(page) != 0) |
479 (page->flags & ( 506 (page->flags & (
480 1 << PG_lru | 507 1 << PG_lru |
481 1 << PG_private | 508 1 << PG_private |
@@ -486,8 +513,8 @@ static int prep_new_page(struct page *page, int order)
486 1 << PG_slab | 513 1 << PG_slab |
487 1 << PG_swapcache | 514 1 << PG_swapcache |
488 1 << PG_writeback | 515 1 << PG_writeback |
489 1 << PG_reserved ))) 516 1 << PG_reserved ))))
490 bad_page(__FUNCTION__, page); 517 bad_page(page);
491 518
492 /* 519 /*
493 * For now, we report if PG_reserved was found set, but do not 520 * For now, we report if PG_reserved was found set, but do not
@@ -525,7 +552,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
525 rmv_page_order(page); 552 rmv_page_order(page);
526 area->nr_free--; 553 area->nr_free--;
527 zone->free_pages -= 1UL << order; 554 zone->free_pages -= 1UL << order;
528 return expand(zone, page, order, current_order, area); 555 expand(zone, page, order, current_order, area);
556 return page;
529 } 557 }
530 558
531 return NULL; 559 return NULL;
@@ -539,21 +567,17 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
539static int rmqueue_bulk(struct zone *zone, unsigned int order, 567static int rmqueue_bulk(struct zone *zone, unsigned int order,
540 unsigned long count, struct list_head *list) 568 unsigned long count, struct list_head *list)
541{ 569{
542 unsigned long flags;
543 int i; 570 int i;
544 int allocated = 0;
545 struct page *page;
546 571
547 spin_lock_irqsave(&zone->lock, flags); 572 spin_lock(&zone->lock);
548 for (i = 0; i < count; ++i) { 573 for (i = 0; i < count; ++i) {
549 page = __rmqueue(zone, order); 574 struct page *page = __rmqueue(zone, order);
550 if (page == NULL) 575 if (unlikely(page == NULL))
551 break; 576 break;
552 allocated++;
553 list_add_tail(&page->lru, list); 577 list_add_tail(&page->lru, list);
554 } 578 }
555 spin_unlock_irqrestore(&zone->lock, flags); 579 spin_unlock(&zone->lock);
556 return allocated; 580 return i;
557} 581}
558 582
559#ifdef CONFIG_NUMA 583#ifdef CONFIG_NUMA
@@ -589,6 +613,7 @@ void drain_remote_pages(void)
589#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 613#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
590static void __drain_pages(unsigned int cpu) 614static void __drain_pages(unsigned int cpu)
591{ 615{
616 unsigned long flags;
592 struct zone *zone; 617 struct zone *zone;
593 int i; 618 int i;
594 619
@@ -600,8 +625,10 @@ static void __drain_pages(unsigned int cpu)
600 struct per_cpu_pages *pcp; 625 struct per_cpu_pages *pcp;
601 626
602 pcp = &pset->pcp[i]; 627 pcp = &pset->pcp[i];
628 local_irq_save(flags);
603 pcp->count -= free_pages_bulk(zone, pcp->count, 629 pcp->count -= free_pages_bulk(zone, pcp->count,
604 &pcp->list, 0); 630 &pcp->list, 0);
631 local_irq_restore(flags);
605 } 632 }
606 } 633 }
607} 634}
@@ -647,18 +674,14 @@ void drain_local_pages(void)
647} 674}
648#endif /* CONFIG_PM */ 675#endif /* CONFIG_PM */
649 676
650static void zone_statistics(struct zonelist *zonelist, struct zone *z) 677static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
651{ 678{
652#ifdef CONFIG_NUMA 679#ifdef CONFIG_NUMA
653 unsigned long flags;
654 int cpu;
655 pg_data_t *pg = z->zone_pgdat; 680 pg_data_t *pg = z->zone_pgdat;
656 pg_data_t *orig = zonelist->zones[0]->zone_pgdat; 681 pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
657 struct per_cpu_pageset *p; 682 struct per_cpu_pageset *p;
658 683
659 local_irq_save(flags); 684 p = zone_pcp(z, cpu);
660 cpu = smp_processor_id();
661 p = zone_pcp(z,cpu);
662 if (pg == orig) { 685 if (pg == orig) {
663 p->numa_hit++; 686 p->numa_hit++;
664 } else { 687 } else {
@@ -669,14 +692,12 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z)
669 p->local_node++; 692 p->local_node++;
670 else 693 else
671 p->other_node++; 694 p->other_node++;
672 local_irq_restore(flags);
673#endif 695#endif
674} 696}
675 697
676/* 698/*
677 * Free a 0-order page 699 * Free a 0-order page
678 */ 700 */
679static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
680static void fastcall free_hot_cold_page(struct page *page, int cold) 701static void fastcall free_hot_cold_page(struct page *page, int cold)
681{ 702{
682 struct zone *zone = page_zone(page); 703 struct zone *zone = page_zone(page);
@@ -687,14 +708,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
687 708
688 if (PageAnon(page)) 709 if (PageAnon(page))
689 page->mapping = NULL; 710 page->mapping = NULL;
690 if (free_pages_check(__FUNCTION__, page)) 711 if (free_pages_check(page))
691 return; 712 return;
692 713
693 inc_page_state(pgfree);
694 kernel_map_pages(page, 1, 0); 714 kernel_map_pages(page, 1, 0);
695 715
696 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 716 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
697 local_irq_save(flags); 717 local_irq_save(flags);
718 __inc_page_state(pgfree);
698 list_add(&page->lru, &pcp->list); 719 list_add(&page->lru, &pcp->list);
699 pcp->count++; 720 pcp->count++;
700 if (pcp->count >= pcp->high) 721 if (pcp->count >= pcp->high)
@@ -727,49 +748,58 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
727 * we cheat by calling it from here, in the order > 0 path. Saves a branch 748 * we cheat by calling it from here, in the order > 0 path. Saves a branch
728 * or two. 749 * or two.
729 */ 750 */
730static struct page * 751static struct page *buffered_rmqueue(struct zonelist *zonelist,
731buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 752 struct zone *zone, int order, gfp_t gfp_flags)
732{ 753{
733 unsigned long flags; 754 unsigned long flags;
734 struct page *page; 755 struct page *page;
735 int cold = !!(gfp_flags & __GFP_COLD); 756 int cold = !!(gfp_flags & __GFP_COLD);
757 int cpu;
736 758
737again: 759again:
760 cpu = get_cpu();
738 if (order == 0) { 761 if (order == 0) {
739 struct per_cpu_pages *pcp; 762 struct per_cpu_pages *pcp;
740 763
741 page = NULL; 764 pcp = &zone_pcp(zone, cpu)->pcp[cold];
742 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
743 local_irq_save(flags); 765 local_irq_save(flags);
744 if (pcp->count <= pcp->low) 766 if (!pcp->count) {
745 pcp->count += rmqueue_bulk(zone, 0, 767 pcp->count += rmqueue_bulk(zone, 0,
746 pcp->batch, &pcp->list); 768 pcp->batch, &pcp->list);
747 if (pcp->count) { 769 if (unlikely(!pcp->count))
748 page = list_entry(pcp->list.next, struct page, lru); 770 goto failed;
749 list_del(&page->lru);
750 pcp->count--;
751 } 771 }
752 local_irq_restore(flags); 772 page = list_entry(pcp->list.next, struct page, lru);
753 put_cpu(); 773 list_del(&page->lru);
774 pcp->count--;
754 } else { 775 } else {
755 spin_lock_irqsave(&zone->lock, flags); 776 spin_lock_irqsave(&zone->lock, flags);
756 page = __rmqueue(zone, order); 777 page = __rmqueue(zone, order);
757 spin_unlock_irqrestore(&zone->lock, flags); 778 spin_unlock(&zone->lock);
779 if (!page)
780 goto failed;
758 } 781 }
759 782
760 if (page != NULL) { 783 __mod_page_state_zone(zone, pgalloc, 1 << order);
761 BUG_ON(bad_range(zone, page)); 784 zone_statistics(zonelist, zone, cpu);
762 mod_page_state_zone(zone, pgalloc, 1 << order); 785 local_irq_restore(flags);
763 if (prep_new_page(page, order)) 786 put_cpu();
764 goto again; 787
788 BUG_ON(bad_range(zone, page));
789 if (prep_new_page(page, order))
790 goto again;
765 791
766 if (gfp_flags & __GFP_ZERO) 792 if (gfp_flags & __GFP_ZERO)
767 prep_zero_page(page, order, gfp_flags); 793 prep_zero_page(page, order, gfp_flags);
768 794
769 if (order && (gfp_flags & __GFP_COMP)) 795 if (order && (gfp_flags & __GFP_COMP))
770 prep_compound_page(page, order); 796 prep_compound_page(page, order);
771 }
772 return page; 797 return page;
798
799failed:
800 local_irq_restore(flags);
801 put_cpu();
802 return NULL;
773} 803}
774 804
775#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 805#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
@@ -845,9 +875,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
845 continue; 875 continue;
846 } 876 }
847 877
848 page = buffered_rmqueue(*z, order, gfp_mask); 878 page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
849 if (page) { 879 if (page) {
850 zone_statistics(zonelist, *z);
851 break; 880 break;
852 } 881 }
853 } while (*(++z) != NULL); 882 } while (*(++z) != NULL);
@@ -903,8 +932,7 @@ restart:
903 alloc_flags |= ALLOC_HARDER; 932 alloc_flags |= ALLOC_HARDER;
904 if (gfp_mask & __GFP_HIGH) 933 if (gfp_mask & __GFP_HIGH)
905 alloc_flags |= ALLOC_HIGH; 934 alloc_flags |= ALLOC_HIGH;
906 if (wait) 935 alloc_flags |= ALLOC_CPUSET;
907 alloc_flags |= ALLOC_CPUSET;
908 936
909 /* 937 /*
910 * Go through the zonelist again. Let __GFP_HIGH and allocations 938 * Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -926,7 +954,7 @@ restart:
926nofail_alloc: 954nofail_alloc:
927 /* go through the zonelist yet again, ignoring mins */ 955 /* go through the zonelist yet again, ignoring mins */
928 page = get_page_from_freelist(gfp_mask, order, 956 page = get_page_from_freelist(gfp_mask, order,
929 zonelist, ALLOC_NO_WATERMARKS|ALLOC_CPUSET); 957 zonelist, ALLOC_NO_WATERMARKS);
930 if (page) 958 if (page)
931 goto got_pg; 959 goto got_pg;
932 if (gfp_mask & __GFP_NOFAIL) { 960 if (gfp_mask & __GFP_NOFAIL) {
@@ -1171,12 +1199,11 @@ EXPORT_SYMBOL(nr_pagecache);
1171DEFINE_PER_CPU(long, nr_pagecache_local) = 0; 1199DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1172#endif 1200#endif
1173 1201
1174void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) 1202static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1175{ 1203{
1176 int cpu = 0; 1204 int cpu = 0;
1177 1205
1178 memset(ret, 0, sizeof(*ret)); 1206 memset(ret, 0, sizeof(*ret));
1179 cpus_and(*cpumask, *cpumask, cpu_online_map);
1180 1207
1181 cpu = first_cpu(*cpumask); 1208 cpu = first_cpu(*cpumask);
1182 while (cpu < NR_CPUS) { 1209 while (cpu < NR_CPUS) {
@@ -1224,12 +1251,12 @@ void get_full_page_state(struct page_state *ret)
1224 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); 1251 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
1225} 1252}
1226 1253
1227unsigned long __read_page_state(unsigned long offset) 1254unsigned long read_page_state_offset(unsigned long offset)
1228{ 1255{
1229 unsigned long ret = 0; 1256 unsigned long ret = 0;
1230 int cpu; 1257 int cpu;
1231 1258
1232 for_each_online_cpu(cpu) { 1259 for_each_cpu(cpu) {
1233 unsigned long in; 1260 unsigned long in;
1234 1261
1235 in = (unsigned long)&per_cpu(page_states, cpu) + offset; 1262 in = (unsigned long)&per_cpu(page_states, cpu) + offset;
@@ -1238,18 +1265,26 @@ unsigned long __read_page_state(unsigned long offset)
1238 return ret; 1265 return ret;
1239} 1266}
1240 1267
1241void __mod_page_state(unsigned long offset, unsigned long delta) 1268void __mod_page_state_offset(unsigned long offset, unsigned long delta)
1269{
1270 void *ptr;
1271
1272 ptr = &__get_cpu_var(page_states);
1273 *(unsigned long *)(ptr + offset) += delta;
1274}
1275EXPORT_SYMBOL(__mod_page_state_offset);
1276
1277void mod_page_state_offset(unsigned long offset, unsigned long delta)
1242{ 1278{
1243 unsigned long flags; 1279 unsigned long flags;
1244 void* ptr; 1280 void *ptr;
1245 1281
1246 local_irq_save(flags); 1282 local_irq_save(flags);
1247 ptr = &__get_cpu_var(page_states); 1283 ptr = &__get_cpu_var(page_states);
1248 *(unsigned long*)(ptr + offset) += delta; 1284 *(unsigned long *)(ptr + offset) += delta;
1249 local_irq_restore(flags); 1285 local_irq_restore(flags);
1250} 1286}
1251 1287EXPORT_SYMBOL(mod_page_state_offset);
1252EXPORT_SYMBOL(__mod_page_state);
1253 1288
1254void __get_zone_counts(unsigned long *active, unsigned long *inactive, 1289void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1255 unsigned long *free, struct pglist_data *pgdat) 1290 unsigned long *free, struct pglist_data *pgdat)
@@ -1335,7 +1370,7 @@ void show_free_areas(void)
1335 show_node(zone); 1370 show_node(zone);
1336 printk("%s per-cpu:", zone->name); 1371 printk("%s per-cpu:", zone->name);
1337 1372
1338 if (!zone->present_pages) { 1373 if (!populated_zone(zone)) {
1339 printk(" empty\n"); 1374 printk(" empty\n");
1340 continue; 1375 continue;
1341 } else 1376 } else
@@ -1347,10 +1382,9 @@ void show_free_areas(void)
1347 pageset = zone_pcp(zone, cpu); 1382 pageset = zone_pcp(zone, cpu);
1348 1383
1349 for (temperature = 0; temperature < 2; temperature++) 1384 for (temperature = 0; temperature < 2; temperature++)
1350 printk("cpu %d %s: low %d, high %d, batch %d used:%d\n", 1385 printk("cpu %d %s: high %d, batch %d used:%d\n",
1351 cpu, 1386 cpu,
1352 temperature ? "cold" : "hot", 1387 temperature ? "cold" : "hot",
1353 pageset->pcp[temperature].low,
1354 pageset->pcp[temperature].high, 1388 pageset->pcp[temperature].high,
1355 pageset->pcp[temperature].batch, 1389 pageset->pcp[temperature].batch,
1356 pageset->pcp[temperature].count); 1390 pageset->pcp[temperature].count);
@@ -1413,7 +1447,7 @@ void show_free_areas(void)
1413 1447
1414 show_node(zone); 1448 show_node(zone);
1415 printk("%s: ", zone->name); 1449 printk("%s: ", zone->name);
1416 if (!zone->present_pages) { 1450 if (!populated_zone(zone)) {
1417 printk("empty\n"); 1451 printk("empty\n");
1418 continue; 1452 continue;
1419 } 1453 }
@@ -1433,36 +1467,29 @@ void show_free_areas(void)
1433 1467
1434/* 1468/*
1435 * Builds allocation fallback zone lists. 1469 * Builds allocation fallback zone lists.
1470 *
1471 * Add all populated zones of a node to the zonelist.
1436 */ 1472 */
1437static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k) 1473static int __init build_zonelists_node(pg_data_t *pgdat,
1438{ 1474 struct zonelist *zonelist, int nr_zones, int zone_type)
1439 switch (k) { 1475{
1440 struct zone *zone; 1476 struct zone *zone;
1441 default: 1477
1442 BUG(); 1478 BUG_ON(zone_type > ZONE_HIGHMEM);
1443 case ZONE_HIGHMEM: 1479
1444 zone = pgdat->node_zones + ZONE_HIGHMEM; 1480 do {
1445 if (zone->present_pages) { 1481 zone = pgdat->node_zones + zone_type;
1482 if (populated_zone(zone)) {
1446#ifndef CONFIG_HIGHMEM 1483#ifndef CONFIG_HIGHMEM
1447 BUG(); 1484 BUG_ON(zone_type > ZONE_NORMAL);
1448#endif 1485#endif
1449 zonelist->zones[j++] = zone; 1486 zonelist->zones[nr_zones++] = zone;
1487 check_highest_zone(zone_type);
1450 } 1488 }
1451 case ZONE_NORMAL: 1489 zone_type--;
1452 zone = pgdat->node_zones + ZONE_NORMAL;
1453 if (zone->present_pages)
1454 zonelist->zones[j++] = zone;
1455 case ZONE_DMA32:
1456 zone = pgdat->node_zones + ZONE_DMA32;
1457 if (zone->present_pages)
1458 zonelist->zones[j++] = zone;
1459 case ZONE_DMA:
1460 zone = pgdat->node_zones + ZONE_DMA;
1461 if (zone->present_pages)
1462 zonelist->zones[j++] = zone;
1463 }
1464 1490
1465 return j; 1491 } while (zone_type >= 0);
1492 return nr_zones;
1466} 1493}
1467 1494
1468static inline int highest_zone(int zone_bits) 1495static inline int highest_zone(int zone_bits)
@@ -1709,8 +1736,6 @@ void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1709 for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) { 1736 for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
1710 if (!early_pfn_valid(pfn)) 1737 if (!early_pfn_valid(pfn))
1711 continue; 1738 continue;
1712 if (!early_pfn_in_nid(pfn, nid))
1713 continue;
1714 page = pfn_to_page(pfn); 1739 page = pfn_to_page(pfn);
1715 set_page_links(page, zone, nid, pfn); 1740 set_page_links(page, zone, nid, pfn);
1716 set_page_count(page, 1); 1741 set_page_count(page, 1);
@@ -1794,14 +1819,12 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
1794 1819
1795 pcp = &p->pcp[0]; /* hot */ 1820 pcp = &p->pcp[0]; /* hot */
1796 pcp->count = 0; 1821 pcp->count = 0;
1797 pcp->low = 0;
1798 pcp->high = 6 * batch; 1822 pcp->high = 6 * batch;
1799 pcp->batch = max(1UL, 1 * batch); 1823 pcp->batch = max(1UL, 1 * batch);
1800 INIT_LIST_HEAD(&pcp->list); 1824 INIT_LIST_HEAD(&pcp->list);
1801 1825
1802 pcp = &p->pcp[1]; /* cold*/ 1826 pcp = &p->pcp[1]; /* cold*/
1803 pcp->count = 0; 1827 pcp->count = 0;
1804 pcp->low = 0;
1805 pcp->high = 2 * batch; 1828 pcp->high = 2 * batch;
1806 pcp->batch = max(1UL, batch/2); 1829 pcp->batch = max(1UL, batch/2);
1807 INIT_LIST_HEAD(&pcp->list); 1830 INIT_LIST_HEAD(&pcp->list);
@@ -2116,7 +2139,7 @@ static int frag_show(struct seq_file *m, void *arg)
2116 int order; 2139 int order;
2117 2140
2118 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 2141 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2119 if (!zone->present_pages) 2142 if (!populated_zone(zone))
2120 continue; 2143 continue;
2121 2144
2122 spin_lock_irqsave(&zone->lock, flags); 2145 spin_lock_irqsave(&zone->lock, flags);
@@ -2149,7 +2172,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
2149 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { 2172 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
2150 int i; 2173 int i;
2151 2174
2152 if (!zone->present_pages) 2175 if (!populated_zone(zone))
2153 continue; 2176 continue;
2154 2177
2155 spin_lock_irqsave(&zone->lock, flags); 2178 spin_lock_irqsave(&zone->lock, flags);
@@ -2197,12 +2220,10 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
2197 seq_printf(m, 2220 seq_printf(m,
2198 "\n cpu: %i pcp: %i" 2221 "\n cpu: %i pcp: %i"
2199 "\n count: %i" 2222 "\n count: %i"
2200 "\n low: %i"
2201 "\n high: %i" 2223 "\n high: %i"
2202 "\n batch: %i", 2224 "\n batch: %i",
2203 i, j, 2225 i, j,
2204 pageset->pcp[j].count, 2226 pageset->pcp[j].count,
2205 pageset->pcp[j].low,
2206 pageset->pcp[j].high, 2227 pageset->pcp[j].high,
2207 pageset->pcp[j].batch); 2228 pageset->pcp[j].batch);
2208 } 2229 }
@@ -2257,32 +2278,40 @@ static char *vmstat_text[] = {
2257 "pgpgout", 2278 "pgpgout",
2258 "pswpin", 2279 "pswpin",
2259 "pswpout", 2280 "pswpout",
2260 "pgalloc_high",
2261 2281
2282 "pgalloc_high",
2262 "pgalloc_normal", 2283 "pgalloc_normal",
2284 "pgalloc_dma32",
2263 "pgalloc_dma", 2285 "pgalloc_dma",
2286
2264 "pgfree", 2287 "pgfree",
2265 "pgactivate", 2288 "pgactivate",
2266 "pgdeactivate", 2289 "pgdeactivate",
2267 2290
2268 "pgfault", 2291 "pgfault",
2269 "pgmajfault", 2292 "pgmajfault",
2293
2270 "pgrefill_high", 2294 "pgrefill_high",
2271 "pgrefill_normal", 2295 "pgrefill_normal",
2296 "pgrefill_dma32",
2272 "pgrefill_dma", 2297 "pgrefill_dma",
2273 2298
2274 "pgsteal_high", 2299 "pgsteal_high",
2275 "pgsteal_normal", 2300 "pgsteal_normal",
2301 "pgsteal_dma32",
2276 "pgsteal_dma", 2302 "pgsteal_dma",
2303
2277 "pgscan_kswapd_high", 2304 "pgscan_kswapd_high",
2278 "pgscan_kswapd_normal", 2305 "pgscan_kswapd_normal",
2279 2306 "pgscan_kswapd_dma32",
2280 "pgscan_kswapd_dma", 2307 "pgscan_kswapd_dma",
2308
2281 "pgscan_direct_high", 2309 "pgscan_direct_high",
2282 "pgscan_direct_normal", 2310 "pgscan_direct_normal",
2311 "pgscan_direct_dma32",
2283 "pgscan_direct_dma", 2312 "pgscan_direct_dma",
2284 "pginodesteal",
2285 2313
2314 "pginodesteal",
2286 "slabs_scanned", 2315 "slabs_scanned",
2287 "kswapd_steal", 2316 "kswapd_steal",
2288 "kswapd_inodesteal", 2317 "kswapd_inodesteal",
diff --git a/mm/rmap.c b/mm/rmap.c
index f853c6def159..6f3f7db27128 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -435,6 +435,30 @@ int page_referenced(struct page *page, int is_locked)
435} 435}
436 436
437/** 437/**
438 * page_set_anon_rmap - setup new anonymous rmap
439 * @page: the page to add the mapping to
440 * @vma: the vm area in which the mapping is added
441 * @address: the user virtual address mapped
442 */
443static void __page_set_anon_rmap(struct page *page,
444 struct vm_area_struct *vma, unsigned long address)
445{
446 struct anon_vma *anon_vma = vma->anon_vma;
447
448 BUG_ON(!anon_vma);
449 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
450 page->mapping = (struct address_space *) anon_vma;
451
452 page->index = linear_page_index(vma, address);
453
454 /*
455 * nr_mapped state can be updated without turning off
456 * interrupts because it is not modified via interrupt.
457 */
458 __inc_page_state(nr_mapped);
459}
460
461/**
438 * page_add_anon_rmap - add pte mapping to an anonymous page 462 * page_add_anon_rmap - add pte mapping to an anonymous page
439 * @page: the page to add the mapping to 463 * @page: the page to add the mapping to
440 * @vma: the vm area in which the mapping is added 464 * @vma: the vm area in which the mapping is added
@@ -445,20 +469,27 @@ int page_referenced(struct page *page, int is_locked)
445void page_add_anon_rmap(struct page *page, 469void page_add_anon_rmap(struct page *page,
446 struct vm_area_struct *vma, unsigned long address) 470 struct vm_area_struct *vma, unsigned long address)
447{ 471{
448 if (atomic_inc_and_test(&page->_mapcount)) { 472 if (atomic_inc_and_test(&page->_mapcount))
449 struct anon_vma *anon_vma = vma->anon_vma; 473 __page_set_anon_rmap(page, vma, address);
450
451 BUG_ON(!anon_vma);
452 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
453 page->mapping = (struct address_space *) anon_vma;
454
455 page->index = linear_page_index(vma, address);
456
457 inc_page_state(nr_mapped);
458 }
459 /* else checking page index and mapping is racy */ 474 /* else checking page index and mapping is racy */
460} 475}
461 476
477/*
478 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
479 * @page: the page to add the mapping to
480 * @vma: the vm area in which the mapping is added
481 * @address: the user virtual address mapped
482 *
483 * Same as page_add_anon_rmap but must only be called on *new* pages.
484 * This means the inc-and-test can be bypassed.
485 */
486void page_add_new_anon_rmap(struct page *page,
487 struct vm_area_struct *vma, unsigned long address)
488{
489 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
490 __page_set_anon_rmap(page, vma, address);
491}
492
462/** 493/**
463 * page_add_file_rmap - add pte mapping to a file page 494 * page_add_file_rmap - add pte mapping to a file page
464 * @page: the page to add the mapping to 495 * @page: the page to add the mapping to
@@ -471,7 +502,7 @@ void page_add_file_rmap(struct page *page)
471 BUG_ON(!pfn_valid(page_to_pfn(page))); 502 BUG_ON(!pfn_valid(page_to_pfn(page)));
472 503
473 if (atomic_inc_and_test(&page->_mapcount)) 504 if (atomic_inc_and_test(&page->_mapcount))
474 inc_page_state(nr_mapped); 505 __inc_page_state(nr_mapped);
475} 506}
476 507
477/** 508/**
@@ -495,7 +526,7 @@ void page_remove_rmap(struct page *page)
495 */ 526 */
496 if (page_test_and_clear_dirty(page)) 527 if (page_test_and_clear_dirty(page))
497 set_page_dirty(page); 528 set_page_dirty(page);
498 dec_page_state(nr_mapped); 529 __dec_page_state(nr_mapped);
499 } 530 }
500} 531}
501 532
diff --git a/mm/shmem.c b/mm/shmem.c
index d9fc277940da..a1f2f02af724 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -457,7 +457,7 @@ static void shmem_free_pages(struct list_head *next)
457 } while (next); 457 } while (next);
458} 458}
459 459
460static void shmem_truncate(struct inode *inode) 460static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
461{ 461{
462 struct shmem_inode_info *info = SHMEM_I(inode); 462 struct shmem_inode_info *info = SHMEM_I(inode);
463 unsigned long idx; 463 unsigned long idx;
@@ -475,18 +475,27 @@ static void shmem_truncate(struct inode *inode)
475 long nr_swaps_freed = 0; 475 long nr_swaps_freed = 0;
476 int offset; 476 int offset;
477 int freed; 477 int freed;
478 int punch_hole = 0;
478 479
479 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 480 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
480 idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 481 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
481 if (idx >= info->next_index) 482 if (idx >= info->next_index)
482 return; 483 return;
483 484
484 spin_lock(&info->lock); 485 spin_lock(&info->lock);
485 info->flags |= SHMEM_TRUNCATE; 486 info->flags |= SHMEM_TRUNCATE;
486 limit = info->next_index; 487 if (likely(end == (loff_t) -1)) {
487 info->next_index = idx; 488 limit = info->next_index;
489 info->next_index = idx;
490 } else {
491 limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
492 if (limit > info->next_index)
493 limit = info->next_index;
494 punch_hole = 1;
495 }
496
488 topdir = info->i_indirect; 497 topdir = info->i_indirect;
489 if (topdir && idx <= SHMEM_NR_DIRECT) { 498 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
490 info->i_indirect = NULL; 499 info->i_indirect = NULL;
491 nr_pages_to_free++; 500 nr_pages_to_free++;
492 list_add(&topdir->lru, &pages_to_free); 501 list_add(&topdir->lru, &pages_to_free);
@@ -573,11 +582,12 @@ static void shmem_truncate(struct inode *inode)
573 set_page_private(subdir, page_private(subdir) - freed); 582 set_page_private(subdir, page_private(subdir) - freed);
574 if (offset) 583 if (offset)
575 spin_unlock(&info->lock); 584 spin_unlock(&info->lock);
576 BUG_ON(page_private(subdir) > offset); 585 if (!punch_hole)
586 BUG_ON(page_private(subdir) > offset);
577 } 587 }
578 if (offset) 588 if (offset)
579 offset = 0; 589 offset = 0;
580 else if (subdir) { 590 else if (subdir && !page_private(subdir)) {
581 dir[diroff] = NULL; 591 dir[diroff] = NULL;
582 nr_pages_to_free++; 592 nr_pages_to_free++;
583 list_add(&subdir->lru, &pages_to_free); 593 list_add(&subdir->lru, &pages_to_free);
@@ -594,7 +604,7 @@ done2:
594 * Also, though shmem_getpage checks i_size before adding to 604 * Also, though shmem_getpage checks i_size before adding to
595 * cache, no recheck after: so fix the narrow window there too. 605 * cache, no recheck after: so fix the narrow window there too.
596 */ 606 */
597 truncate_inode_pages(inode->i_mapping, inode->i_size); 607 truncate_inode_pages_range(inode->i_mapping, start, end);
598 } 608 }
599 609
600 spin_lock(&info->lock); 610 spin_lock(&info->lock);
@@ -614,6 +624,11 @@ done2:
614 } 624 }
615} 625}
616 626
627static void shmem_truncate(struct inode *inode)
628{
629 shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
630}
631
617static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 632static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
618{ 633{
619 struct inode *inode = dentry->d_inode; 634 struct inode *inode = dentry->d_inode;
@@ -1255,7 +1270,7 @@ out_nomem:
1255 return retval; 1270 return retval;
1256} 1271}
1257 1272
1258static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1273int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1259{ 1274{
1260 file_accessed(file); 1275 file_accessed(file);
1261 vma->vm_ops = &shmem_vm_ops; 1276 vma->vm_ops = &shmem_vm_ops;
@@ -2083,6 +2098,7 @@ static struct file_operations shmem_file_operations = {
2083static struct inode_operations shmem_inode_operations = { 2098static struct inode_operations shmem_inode_operations = {
2084 .truncate = shmem_truncate, 2099 .truncate = shmem_truncate,
2085 .setattr = shmem_notify_change, 2100 .setattr = shmem_notify_change,
2101 .truncate_range = shmem_truncate_range,
2086}; 2102};
2087 2103
2088static struct inode_operations shmem_dir_inode_operations = { 2104static struct inode_operations shmem_dir_inode_operations = {
diff --git a/mm/swap.c b/mm/swap.c
index 73d351439ef6..ee6d71ccfa56 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -156,16 +156,22 @@ void fastcall lru_cache_add_active(struct page *page)
156 put_cpu_var(lru_add_active_pvecs); 156 put_cpu_var(lru_add_active_pvecs);
157} 157}
158 158
159void lru_add_drain(void) 159static void __lru_add_drain(int cpu)
160{ 160{
161 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); 161 struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
162 162
163 /* CPU is dead, so no locking needed. */
163 if (pagevec_count(pvec)) 164 if (pagevec_count(pvec))
164 __pagevec_lru_add(pvec); 165 __pagevec_lru_add(pvec);
165 pvec = &__get_cpu_var(lru_add_active_pvecs); 166 pvec = &per_cpu(lru_add_active_pvecs, cpu);
166 if (pagevec_count(pvec)) 167 if (pagevec_count(pvec))
167 __pagevec_lru_add_active(pvec); 168 __pagevec_lru_add_active(pvec);
168 put_cpu_var(lru_add_pvecs); 169}
170
171void lru_add_drain(void)
172{
173 __lru_add_drain(get_cpu());
174 put_cpu();
169} 175}
170 176
171/* 177/*
@@ -412,17 +418,6 @@ void vm_acct_memory(long pages)
412} 418}
413 419
414#ifdef CONFIG_HOTPLUG_CPU 420#ifdef CONFIG_HOTPLUG_CPU
415static void lru_drain_cache(unsigned int cpu)
416{
417 struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
418
419 /* CPU is dead, so no locking needed. */
420 if (pagevec_count(pvec))
421 __pagevec_lru_add(pvec);
422 pvec = &per_cpu(lru_add_active_pvecs, cpu);
423 if (pagevec_count(pvec))
424 __pagevec_lru_add_active(pvec);
425}
426 421
427/* Drop the CPU's cached committed space back into the central pool. */ 422/* Drop the CPU's cached committed space back into the central pool. */
428static int cpu_swap_callback(struct notifier_block *nfb, 423static int cpu_swap_callback(struct notifier_block *nfb,
@@ -435,7 +430,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
435 if (action == CPU_DEAD) { 430 if (action == CPU_DEAD) {
436 atomic_add(*committed, &vm_committed_space); 431 atomic_add(*committed, &vm_committed_space);
437 *committed = 0; 432 *committed = 0;
438 lru_drain_cache((long)hcpu); 433 __lru_add_drain((long)hcpu);
439 } 434 }
440 return NOTIFY_OK; 435 return NOTIFY_OK;
441} 436}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 0df9a57b1de8..fc2aecb70a95 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -14,6 +14,7 @@
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/buffer_head.h> 15#include <linux/buffer_head.h>
16#include <linux/backing-dev.h> 16#include <linux/backing-dev.h>
17#include <linux/pagevec.h>
17 18
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19 20
@@ -272,12 +273,11 @@ void free_page_and_swap_cache(struct page *page)
272 */ 273 */
273void free_pages_and_swap_cache(struct page **pages, int nr) 274void free_pages_and_swap_cache(struct page **pages, int nr)
274{ 275{
275 int chunk = 16;
276 struct page **pagep = pages; 276 struct page **pagep = pages;
277 277
278 lru_add_drain(); 278 lru_add_drain();
279 while (nr) { 279 while (nr) {
280 int todo = min(chunk, nr); 280 int todo = min(nr, PAGEVEC_SIZE);
281 int i; 281 int i;
282 282
283 for (i = 0; i < todo; i++) 283 for (i = 0; i < todo; i++)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index edafeace301f..6da4b28b896b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -211,6 +211,26 @@ noswap:
211 return (swp_entry_t) {0}; 211 return (swp_entry_t) {0};
212} 212}
213 213
214swp_entry_t get_swap_page_of_type(int type)
215{
216 struct swap_info_struct *si;
217 pgoff_t offset;
218
219 spin_lock(&swap_lock);
220 si = swap_info + type;
221 if (si->flags & SWP_WRITEOK) {
222 nr_swap_pages--;
223 offset = scan_swap_map(si);
224 if (offset) {
225 spin_unlock(&swap_lock);
226 return swp_entry(type, offset);
227 }
228 nr_swap_pages++;
229 }
230 spin_unlock(&swap_lock);
231 return (swp_entry_t) {0};
232}
233
214static struct swap_info_struct * swap_info_get(swp_entry_t entry) 234static struct swap_info_struct * swap_info_get(swp_entry_t entry)
215{ 235{
216 struct swap_info_struct * p; 236 struct swap_info_struct * p;
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index b58abcf44ed6..cdc6d431972b 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -81,13 +81,19 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
81 goto close_file; 81 goto close_file;
82 82
83 d_instantiate(dentry, inode); 83 d_instantiate(dentry, inode);
84 inode->i_size = size;
85 inode->i_nlink = 0; /* It is unlinked */ 84 inode->i_nlink = 0; /* It is unlinked */
85
86 file->f_vfsmnt = mntget(shm_mnt); 86 file->f_vfsmnt = mntget(shm_mnt);
87 file->f_dentry = dentry; 87 file->f_dentry = dentry;
88 file->f_mapping = inode->i_mapping; 88 file->f_mapping = inode->i_mapping;
89 file->f_op = &ramfs_file_operations; 89 file->f_op = &ramfs_file_operations;
90 file->f_mode = FMODE_WRITE | FMODE_READ; 90 file->f_mode = FMODE_WRITE | FMODE_READ;
91
92 /* notify everyone as to the change of file size */
93 error = do_truncate(dentry, size, file);
94 if (error < 0)
95 goto close_file;
96
91 return file; 97 return file;
92 98
93close_file: 99close_file:
@@ -123,3 +129,24 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
123{ 129{
124 return 0; 130 return 0;
125} 131}
132
133int shmem_mmap(struct file *file, struct vm_area_struct *vma)
134{
135 file_accessed(file);
136#ifndef CONFIG_MMU
137 return ramfs_nommu_mmap(file, vma);
138#else
139 return 0;
140#endif
141}
142
143#ifndef CONFIG_MMU
144unsigned long shmem_get_unmapped_area(struct file *file,
145 unsigned long addr,
146 unsigned long len,
147 unsigned long pgoff,
148 unsigned long flags)
149{
150 return ramfs_nommu_get_unmapped_area(file, addr, len, pgoff, flags);
151}
152#endif
diff --git a/mm/truncate.c b/mm/truncate.c
index 9173ab500604..7dee32745901 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -82,12 +82,15 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
82} 82}
83 83
84/** 84/**
85 * truncate_inode_pages - truncate *all* the pages from an offset 85 * truncate_inode_pages - truncate range of pages specified by start and
86 * end byte offsets
86 * @mapping: mapping to truncate 87 * @mapping: mapping to truncate
87 * @lstart: offset from which to truncate 88 * @lstart: offset from which to truncate
89 * @lend: offset to which to truncate
88 * 90 *
89 * Truncate the page cache at a set offset, removing the pages that are beyond 91 * Truncate the page cache, removing the pages that are between
90 * that offset (and zeroing out partial pages). 92 * specified offsets (and zeroing out partial page
93 * (if lstart is not page aligned)).
91 * 94 *
92 * Truncate takes two passes - the first pass is nonblocking. It will not 95 * Truncate takes two passes - the first pass is nonblocking. It will not
93 * block on page locks and it will not block on writeback. The second pass 96 * block on page locks and it will not block on writeback. The second pass
@@ -101,12 +104,12 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
101 * We pass down the cache-hot hint to the page freeing code. Even if the 104 * We pass down the cache-hot hint to the page freeing code. Even if the
102 * mapping is large, it is probably the case that the final pages are the most 105 * mapping is large, it is probably the case that the final pages are the most
103 * recently touched, and freeing happens in ascending file offset order. 106 * recently touched, and freeing happens in ascending file offset order.
104 *
105 * Called under (and serialised by) inode->i_sem.
106 */ 107 */
107void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 108void truncate_inode_pages_range(struct address_space *mapping,
109 loff_t lstart, loff_t lend)
108{ 110{
109 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 111 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
112 pgoff_t end;
110 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 113 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
111 struct pagevec pvec; 114 struct pagevec pvec;
112 pgoff_t next; 115 pgoff_t next;
@@ -115,13 +118,22 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
115 if (mapping->nrpages == 0) 118 if (mapping->nrpages == 0)
116 return; 119 return;
117 120
121 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
122 end = (lend >> PAGE_CACHE_SHIFT);
123
118 pagevec_init(&pvec, 0); 124 pagevec_init(&pvec, 0);
119 next = start; 125 next = start;
120 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 126 while (next <= end &&
127 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
121 for (i = 0; i < pagevec_count(&pvec); i++) { 128 for (i = 0; i < pagevec_count(&pvec); i++) {
122 struct page *page = pvec.pages[i]; 129 struct page *page = pvec.pages[i];
123 pgoff_t page_index = page->index; 130 pgoff_t page_index = page->index;
124 131
132 if (page_index > end) {
133 next = page_index;
134 break;
135 }
136
125 if (page_index > next) 137 if (page_index > next)
126 next = page_index; 138 next = page_index;
127 next++; 139 next++;
@@ -157,9 +169,15 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
157 next = start; 169 next = start;
158 continue; 170 continue;
159 } 171 }
172 if (pvec.pages[0]->index > end) {
173 pagevec_release(&pvec);
174 break;
175 }
160 for (i = 0; i < pagevec_count(&pvec); i++) { 176 for (i = 0; i < pagevec_count(&pvec); i++) {
161 struct page *page = pvec.pages[i]; 177 struct page *page = pvec.pages[i];
162 178
179 if (page->index > end)
180 break;
163 lock_page(page); 181 lock_page(page);
164 wait_on_page_writeback(page); 182 wait_on_page_writeback(page);
165 if (page->index > next) 183 if (page->index > next)
@@ -171,7 +189,19 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
171 pagevec_release(&pvec); 189 pagevec_release(&pvec);
172 } 190 }
173} 191}
192EXPORT_SYMBOL(truncate_inode_pages_range);
174 193
194/**
195 * truncate_inode_pages - truncate *all* the pages from an offset
196 * @mapping: mapping to truncate
197 * @lstart: offset from which to truncate
198 *
199 * Called under (and serialised by) inode->i_sem.
200 */
201void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
202{
203 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
204}
175EXPORT_SYMBOL(truncate_inode_pages); 205EXPORT_SYMBOL(truncate_inode_pages);
176 206
177/** 207/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 795a050fe471..be8235fb1939 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -63,9 +63,6 @@ struct scan_control {
63 63
64 unsigned long nr_mapped; /* From page_state */ 64 unsigned long nr_mapped; /* From page_state */
65 65
66 /* How many pages shrink_cache() should reclaim */
67 int nr_to_reclaim;
68
69 /* Ask shrink_caches, or shrink_zone to scan at this priority */ 66 /* Ask shrink_caches, or shrink_zone to scan at this priority */
70 unsigned int priority; 67 unsigned int priority;
71 68
@@ -74,9 +71,6 @@ struct scan_control {
74 71
75 int may_writepage; 72 int may_writepage;
76 73
77 /* Can pages be swapped as part of reclaim? */
78 int may_swap;
79
80 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 74 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
81 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 75 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
82 * In this context, it doesn't matter that we scan the 76 * In this context, it doesn't matter that we scan the
@@ -430,8 +424,6 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
430 * Try to allocate it some swap space here. 424 * Try to allocate it some swap space here.
431 */ 425 */
432 if (PageAnon(page) && !PageSwapCache(page)) { 426 if (PageAnon(page) && !PageSwapCache(page)) {
433 if (!sc->may_swap)
434 goto keep_locked;
435 if (!add_to_swap(page)) 427 if (!add_to_swap(page))
436 goto activate_locked; 428 goto activate_locked;
437 } 429 }
@@ -653,17 +645,17 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
653 goto done; 645 goto done;
654 646
655 max_scan -= nr_scan; 647 max_scan -= nr_scan;
656 if (current_is_kswapd())
657 mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
658 else
659 mod_page_state_zone(zone, pgscan_direct, nr_scan);
660 nr_freed = shrink_list(&page_list, sc); 648 nr_freed = shrink_list(&page_list, sc);
661 if (current_is_kswapd())
662 mod_page_state(kswapd_steal, nr_freed);
663 mod_page_state_zone(zone, pgsteal, nr_freed);
664 sc->nr_to_reclaim -= nr_freed;
665 649
666 spin_lock_irq(&zone->lru_lock); 650 local_irq_disable();
651 if (current_is_kswapd()) {
652 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
653 __mod_page_state(kswapd_steal, nr_freed);
654 } else
655 __mod_page_state_zone(zone, pgscan_direct, nr_scan);
656 __mod_page_state_zone(zone, pgsteal, nr_freed);
657
658 spin_lock(&zone->lru_lock);
667 /* 659 /*
668 * Put back any unfreeable pages. 660 * Put back any unfreeable pages.
669 */ 661 */
@@ -825,11 +817,13 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
825 } 817 }
826 } 818 }
827 zone->nr_active += pgmoved; 819 zone->nr_active += pgmoved;
828 spin_unlock_irq(&zone->lru_lock); 820 spin_unlock(&zone->lru_lock);
829 pagevec_release(&pvec); 821
822 __mod_page_state_zone(zone, pgrefill, pgscanned);
823 __mod_page_state(pgdeactivate, pgdeactivate);
824 local_irq_enable();
830 825
831 mod_page_state_zone(zone, pgrefill, pgscanned); 826 pagevec_release(&pvec);
832 mod_page_state(pgdeactivate, pgdeactivate);
833} 827}
834 828
835/* 829/*
@@ -861,8 +855,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
861 else 855 else
862 nr_inactive = 0; 856 nr_inactive = 0;
863 857
864 sc->nr_to_reclaim = sc->swap_cluster_max;
865
866 while (nr_active || nr_inactive) { 858 while (nr_active || nr_inactive) {
867 if (nr_active) { 859 if (nr_active) {
868 sc->nr_to_scan = min(nr_active, 860 sc->nr_to_scan = min(nr_active,
@@ -876,8 +868,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
876 (unsigned long)sc->swap_cluster_max); 868 (unsigned long)sc->swap_cluster_max);
877 nr_inactive -= sc->nr_to_scan; 869 nr_inactive -= sc->nr_to_scan;
878 shrink_cache(zone, sc); 870 shrink_cache(zone, sc);
879 if (sc->nr_to_reclaim <= 0)
880 break;
881 } 871 }
882 } 872 }
883 873
@@ -910,7 +900,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
910 for (i = 0; zones[i] != NULL; i++) { 900 for (i = 0; zones[i] != NULL; i++) {
911 struct zone *zone = zones[i]; 901 struct zone *zone = zones[i];
912 902
913 if (zone->present_pages == 0) 903 if (!populated_zone(zone))
914 continue; 904 continue;
915 905
916 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) 906 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
@@ -952,7 +942,6 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
952 942
953 sc.gfp_mask = gfp_mask; 943 sc.gfp_mask = gfp_mask;
954 sc.may_writepage = 0; 944 sc.may_writepage = 0;
955 sc.may_swap = 1;
956 945
957 inc_page_state(allocstall); 946 inc_page_state(allocstall);
958 947
@@ -1055,7 +1044,6 @@ loop_again:
1055 total_reclaimed = 0; 1044 total_reclaimed = 0;
1056 sc.gfp_mask = GFP_KERNEL; 1045 sc.gfp_mask = GFP_KERNEL;
1057 sc.may_writepage = 0; 1046 sc.may_writepage = 0;
1058 sc.may_swap = 1;
1059 sc.nr_mapped = read_page_state(nr_mapped); 1047 sc.nr_mapped = read_page_state(nr_mapped);
1060 1048
1061 inc_page_state(pageoutrun); 1049 inc_page_state(pageoutrun);
@@ -1084,7 +1072,7 @@ loop_again:
1084 for (i = pgdat->nr_zones - 1; i >= 0; i--) { 1072 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1085 struct zone *zone = pgdat->node_zones + i; 1073 struct zone *zone = pgdat->node_zones + i;
1086 1074
1087 if (zone->present_pages == 0) 1075 if (!populated_zone(zone))
1088 continue; 1076 continue;
1089 1077
1090 if (zone->all_unreclaimable && 1078 if (zone->all_unreclaimable &&
@@ -1121,7 +1109,7 @@ scan:
1121 struct zone *zone = pgdat->node_zones + i; 1109 struct zone *zone = pgdat->node_zones + i;
1122 int nr_slab; 1110 int nr_slab;
1123 1111
1124 if (zone->present_pages == 0) 1112 if (!populated_zone(zone))
1125 continue; 1113 continue;
1126 1114
1127 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1115 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
@@ -1273,7 +1261,7 @@ void wakeup_kswapd(struct zone *zone, int order)
1273{ 1261{
1274 pg_data_t *pgdat; 1262 pg_data_t *pgdat;
1275 1263
1276 if (zone->present_pages == 0) 1264 if (!populated_zone(zone))
1277 return; 1265 return;
1278 1266
1279 pgdat = zone->zone_pgdat; 1267 pgdat = zone->zone_pgdat;
@@ -1353,76 +1341,3 @@ static int __init kswapd_init(void)
1353} 1341}
1354 1342
1355module_init(kswapd_init) 1343module_init(kswapd_init)
1356
1357
1358/*
1359 * Try to free up some pages from this zone through reclaim.
1360 */
1361int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1362{
1363 struct scan_control sc;
1364 int nr_pages = 1 << order;
1365 int total_reclaimed = 0;
1366
1367 /* The reclaim may sleep, so don't do it if sleep isn't allowed */
1368 if (!(gfp_mask & __GFP_WAIT))
1369 return 0;
1370 if (zone->all_unreclaimable)
1371 return 0;
1372
1373 sc.gfp_mask = gfp_mask;
1374 sc.may_writepage = 0;
1375 sc.may_swap = 0;
1376 sc.nr_mapped = read_page_state(nr_mapped);
1377 sc.nr_scanned = 0;
1378 sc.nr_reclaimed = 0;
1379 /* scan at the highest priority */
1380 sc.priority = 0;
1381 disable_swap_token();
1382
1383 if (nr_pages > SWAP_CLUSTER_MAX)
1384 sc.swap_cluster_max = nr_pages;
1385 else
1386 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
1387
1388 /* Don't reclaim the zone if there are other reclaimers active */
1389 if (atomic_read(&zone->reclaim_in_progress) > 0)
1390 goto out;
1391
1392 shrink_zone(zone, &sc);
1393 total_reclaimed = sc.nr_reclaimed;
1394
1395 out:
1396 return total_reclaimed;
1397}
1398
1399asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
1400 unsigned int state)
1401{
1402 struct zone *z;
1403 int i;
1404
1405 if (!capable(CAP_SYS_ADMIN))
1406 return -EACCES;
1407
1408 if (node >= MAX_NUMNODES || !node_online(node))
1409 return -EINVAL;
1410
1411 /* This will break if we ever add more zones */
1412 if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
1413 return -EINVAL;
1414
1415 for (i = 0; i < MAX_NR_ZONES; i++) {
1416 if (!(zone & 1<<i))
1417 continue;
1418
1419 z = &NODE_DATA(node)->node_zones[i];
1420
1421 if (state)
1422 z->reclaim_pages = 1;
1423 else
1424 z->reclaim_pages = 0;
1425 }
1426
1427 return 0;
1428}
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index cac2e774dd81..3e6c694bbad1 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -101,10 +101,22 @@ static void ip_map_put(struct cache_head *item, struct cache_detail *cd)
101 } 101 }
102} 102}
103 103
104#if IP_HASHBITS == 8
105/* hash_long on a 64 bit machine is currently REALLY BAD for
106 * IP addresses in reverse-endian (i.e. on a little-endian machine).
107 * So use a trivial but reliable hash instead
108 */
109static inline int hash_ip(unsigned long ip)
110{
111 int hash = ip ^ (ip>>16);
112 return (hash ^ (hash>>8)) & 0xff;
113}
114#endif
115
104static inline int ip_map_hash(struct ip_map *item) 116static inline int ip_map_hash(struct ip_map *item)
105{ 117{
106 return hash_str(item->m_class, IP_HASHBITS) ^ 118 return hash_str(item->m_class, IP_HASHBITS) ^
107 hash_long((unsigned long)item->m_addr.s_addr, IP_HASHBITS); 119 hash_ip((unsigned long)item->m_addr.s_addr);
108} 120}
109static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp) 121static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp)
110{ 122{
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d68eba481291..e67613e4eb18 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1026,7 +1026,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1026 } else { 1026 } else {
1027 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1027 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1028 svsk->sk_server->sv_name, -len); 1028 svsk->sk_server->sv_name, -len);
1029 svc_sock_received(svsk); 1029 goto err_delete;
1030 } 1030 }
1031 1031
1032 return len; 1032 return len;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index db99ed434f3a..39cba97c5eb9 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -25,7 +25,6 @@
25#define kdebug(FMT, a...) do {} while(0) 25#define kdebug(FMT, a...) do {} while(0)
26#endif 26#endif
27 27
28extern struct key_type key_type_dead;
29extern struct key_type key_type_user; 28extern struct key_type key_type_user;
30 29
31/*****************************************************************************/ 30/*****************************************************************************/
diff --git a/security/keys/key.c b/security/keys/key.c
index 01bcfecb7eae..99781b798312 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -36,7 +36,7 @@ static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
36DECLARE_RWSEM(key_construction_sem); 36DECLARE_RWSEM(key_construction_sem);
37 37
38/* any key who's type gets unegistered will be re-typed to this */ 38/* any key who's type gets unegistered will be re-typed to this */
39struct key_type key_type_dead = { 39static struct key_type key_type_dead = {
40 .name = "dead", 40 .name = "dead",
41}; 41};
42 42
@@ -240,9 +240,9 @@ static inline void key_alloc_serial(struct key *key)
240/* 240/*
241 * allocate a key of the specified type 241 * allocate a key of the specified type
242 * - update the user's quota to reflect the existence of the key 242 * - update the user's quota to reflect the existence of the key
243 * - called from a key-type operation with key_types_sem read-locked by either 243 * - called from a key-type operation with key_types_sem read-locked by
244 * key_create_or_update() or by key_duplicate(); this prevents unregistration 244 * key_create_or_update()
245 * of the key type 245 * - this prevents unregistration of the key type
246 * - upon return the key is as yet uninstantiated; the caller needs to either 246 * - upon return the key is as yet uninstantiated; the caller needs to either
247 * instantiate the key or discard it before returning 247 * instantiate the key or discard it before returning
248 */ 248 */
@@ -889,56 +889,6 @@ EXPORT_SYMBOL(key_update);
889 889
890/*****************************************************************************/ 890/*****************************************************************************/
891/* 891/*
892 * duplicate a key, potentially with a revised description
893 * - must be supported by the keytype (keyrings for instance can be duplicated)
894 */
895struct key *key_duplicate(struct key *source, const char *desc)
896{
897 struct key *key;
898 int ret;
899
900 key_check(source);
901
902 if (!desc)
903 desc = source->description;
904
905 down_read(&key_types_sem);
906
907 ret = -EINVAL;
908 if (!source->type->duplicate)
909 goto error;
910
911 /* allocate and instantiate a key */
912 key = key_alloc(source->type, desc, current->fsuid, current->fsgid,
913 source->perm, 0);
914 if (IS_ERR(key))
915 goto error_k;
916
917 down_read(&source->sem);
918 ret = key->type->duplicate(key, source);
919 up_read(&source->sem);
920 if (ret < 0)
921 goto error2;
922
923 atomic_inc(&key->user->nikeys);
924 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
925
926 error_k:
927 up_read(&key_types_sem);
928 out:
929 return key;
930
931 error2:
932 key_put(key);
933 error:
934 up_read(&key_types_sem);
935 key = ERR_PTR(ret);
936 goto out;
937
938} /* end key_duplicate() */
939
940/*****************************************************************************/
941/*
942 * revoke a key 892 * revoke a key
943 */ 893 */
944void key_revoke(struct key *key) 894void key_revoke(struct key *key)
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4e9fa8be44b8..5d22c0388b32 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -48,7 +48,6 @@ static inline unsigned keyring_hash(const char *desc)
48 */ 48 */
49static int keyring_instantiate(struct key *keyring, 49static int keyring_instantiate(struct key *keyring,
50 const void *data, size_t datalen); 50 const void *data, size_t datalen);
51static int keyring_duplicate(struct key *keyring, const struct key *source);
52static int keyring_match(const struct key *keyring, const void *criterion); 51static int keyring_match(const struct key *keyring, const void *criterion);
53static void keyring_destroy(struct key *keyring); 52static void keyring_destroy(struct key *keyring);
54static void keyring_describe(const struct key *keyring, struct seq_file *m); 53static void keyring_describe(const struct key *keyring, struct seq_file *m);
@@ -59,7 +58,6 @@ struct key_type key_type_keyring = {
59 .name = "keyring", 58 .name = "keyring",
60 .def_datalen = sizeof(struct keyring_list), 59 .def_datalen = sizeof(struct keyring_list),
61 .instantiate = keyring_instantiate, 60 .instantiate = keyring_instantiate,
62 .duplicate = keyring_duplicate,
63 .match = keyring_match, 61 .match = keyring_match,
64 .destroy = keyring_destroy, 62 .destroy = keyring_destroy,
65 .describe = keyring_describe, 63 .describe = keyring_describe,
@@ -70,7 +68,7 @@ struct key_type key_type_keyring = {
70 * semaphore to serialise link/link calls to prevent two link calls in parallel 68 * semaphore to serialise link/link calls to prevent two link calls in parallel
71 * introducing a cycle 69 * introducing a cycle
72 */ 70 */
73DECLARE_RWSEM(keyring_serialise_link_sem); 71static DECLARE_RWSEM(keyring_serialise_link_sem);
74 72
75/*****************************************************************************/ 73/*****************************************************************************/
76/* 74/*
@@ -120,68 +118,6 @@ static int keyring_instantiate(struct key *keyring,
120 118
121/*****************************************************************************/ 119/*****************************************************************************/
122/* 120/*
123 * duplicate the list of subscribed keys from a source keyring into this one
124 */
125static int keyring_duplicate(struct key *keyring, const struct key *source)
126{
127 struct keyring_list *sklist, *klist;
128 unsigned max;
129 size_t size;
130 int loop, ret;
131
132 const unsigned limit =
133 (PAGE_SIZE - sizeof(*klist)) / sizeof(struct key *);
134
135 ret = 0;
136
137 /* find out how many keys are currently linked */
138 rcu_read_lock();
139 sklist = rcu_dereference(source->payload.subscriptions);
140 max = 0;
141 if (sklist)
142 max = sklist->nkeys;
143 rcu_read_unlock();
144
145 /* allocate a new payload and stuff load with key links */
146 if (max > 0) {
147 BUG_ON(max > limit);
148
149 max = (max + 3) & ~3;
150 if (max > limit)
151 max = limit;
152
153 ret = -ENOMEM;
154 size = sizeof(*klist) + sizeof(struct key *) * max;
155 klist = kmalloc(size, GFP_KERNEL);
156 if (!klist)
157 goto error;
158
159 /* set links */
160 rcu_read_lock();
161 sklist = rcu_dereference(source->payload.subscriptions);
162
163 klist->maxkeys = max;
164 klist->nkeys = sklist->nkeys;
165 memcpy(klist->keys,
166 sklist->keys,
167 sklist->nkeys * sizeof(struct key *));
168
169 for (loop = klist->nkeys - 1; loop >= 0; loop--)
170 atomic_inc(&klist->keys[loop]->usage);
171
172 rcu_read_unlock();
173
174 rcu_assign_pointer(keyring->payload.subscriptions, klist);
175 ret = 0;
176 }
177
178 error:
179 return ret;
180
181} /* end keyring_duplicate() */
182
183/*****************************************************************************/
184/*
185 * match keyrings on their name 121 * match keyrings on their name
186 */ 122 */
187static int keyring_match(const struct key *keyring, const void *description) 123static int keyring_match(const struct key *keyring, const void *description)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index cbda3b2780a1..8e71895b97a7 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -26,7 +26,6 @@
26struct key_type key_type_user = { 26struct key_type key_type_user = {
27 .name = "user", 27 .name = "user",
28 .instantiate = user_instantiate, 28 .instantiate = user_instantiate,
29 .duplicate = user_duplicate,
30 .update = user_update, 29 .update = user_update,
31 .match = user_match, 30 .match = user_match,
32 .destroy = user_destroy, 31 .destroy = user_destroy,
@@ -68,42 +67,10 @@ error:
68 return ret; 67 return ret;
69 68
70} /* end user_instantiate() */ 69} /* end user_instantiate() */
71
72EXPORT_SYMBOL_GPL(user_instantiate); 70EXPORT_SYMBOL_GPL(user_instantiate);
73 71
74/*****************************************************************************/ 72/*****************************************************************************/
75/* 73/*
76 * duplicate a user defined key
77 * - both keys' semaphores are locked against further modification
78 * - the new key cannot yet be accessed
79 */
80int user_duplicate(struct key *key, const struct key *source)
81{
82 struct user_key_payload *upayload, *spayload;
83 int ret;
84
85 /* just copy the payload */
86 ret = -ENOMEM;
87 upayload = kmalloc(sizeof(*upayload) + source->datalen, GFP_KERNEL);
88 if (upayload) {
89 spayload = rcu_dereference(source->payload.data);
90 BUG_ON(source->datalen != spayload->datalen);
91
92 upayload->datalen = key->datalen = spayload->datalen;
93 memcpy(upayload->data, spayload->data, key->datalen);
94
95 key->payload.data = upayload;
96 ret = 0;
97 }
98
99 return ret;
100
101} /* end user_duplicate() */
102
103EXPORT_SYMBOL_GPL(user_duplicate);
104
105/*****************************************************************************/
106/*
107 * dispose of the old data from an updated user defined key 74 * dispose of the old data from an updated user defined key
108 */ 75 */
109static void user_update_rcu_disposal(struct rcu_head *rcu) 76static void user_update_rcu_disposal(struct rcu_head *rcu)
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 0e1352a555c8..e59da6398d44 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -376,7 +376,7 @@ static ssize_t selinux_transaction_write(struct file *file, const char __user *b
376 char *data; 376 char *data;
377 ssize_t rv; 377 ssize_t rv;
378 378
379 if (ino >= sizeof(write_op)/sizeof(write_op[0]) || !write_op[ino]) 379 if (ino >= ARRAY_SIZE(write_op) || !write_op[ino])
380 return -EINVAL; 380 return -EINVAL;
381 381
382 data = simple_transaction_get(file, buf, size); 382 data = simple_transaction_get(file, buf, size);
@@ -1161,7 +1161,7 @@ static int sel_make_avc_files(struct dentry *dir)
1161#endif 1161#endif
1162 }; 1162 };
1163 1163
1164 for (i = 0; i < sizeof (files) / sizeof (files[0]); i++) { 1164 for (i = 0; i < ARRAY_SIZE(files); i++) {
1165 struct inode *inode; 1165 struct inode *inode;
1166 struct dentry *dentry; 1166 struct dentry *dentry;
1167 1167
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index dde094feb20d..d049c7acbc8b 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -359,7 +359,7 @@ int avtab_read_item(void *fp, u32 vers, struct avtab *a,
359 return -1; 359 return -1;
360 } 360 }
361 361
362 for (i = 0; i < sizeof(spec_order)/sizeof(u16); i++) { 362 for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
363 if (val & spec_order[i]) { 363 if (val & spec_order[i]) {
364 key.specified = spec_order[i] | enabled; 364 key.specified = spec_order[i] | enabled;
365 datum.data = le32_to_cpu(buf32[items++]); 365 datum.data = le32_to_cpu(buf32[items++]);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 0ac311dc8371..0111990ba837 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -103,7 +103,7 @@ static struct policydb_compat_info *policydb_lookup_compat(int version)
103 int i; 103 int i;
104 struct policydb_compat_info *info = NULL; 104 struct policydb_compat_info *info = NULL;
105 105
106 for (i = 0; i < sizeof(policydb_compat)/sizeof(*info); i++) { 106 for (i = 0; i < ARRAY_SIZE(policydb_compat); i++) {
107 if (policydb_compat[i].version == version) { 107 if (policydb_compat[i].version == version) {
108 info = &policydb_compat[i]; 108 info = &policydb_compat[i];
109 break; 109 break;
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 3f30c57676c1..49796be955f3 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -46,8 +46,6 @@
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/module.h> 47#include <linux/module.h>
48#include <linux/stddef.h> 48#include <linux/stddef.h>
49#include <linux/pm.h>
50#include <linux/pm_legacy.h>
51#include <linux/isapnp.h> 49#include <linux/isapnp.h>
52#include <linux/pnp.h> 50#include <linux/pnp.h>
53#include <linux/spinlock.h> 51#include <linux/spinlock.h>
@@ -105,9 +103,6 @@ typedef struct
105 int irq_ok; 103 int irq_ok;
106 mixer_ents *mix_devices; 104 mixer_ents *mix_devices;
107 int mixer_output_port; 105 int mixer_output_port;
108
109 /* Power management */
110 struct pm_dev *pmdev;
111} ad1848_info; 106} ad1848_info;
112 107
113typedef struct ad1848_port_info 108typedef struct ad1848_port_info
@@ -201,7 +196,6 @@ static void ad1848_halt(int dev);
201static void ad1848_halt_input(int dev); 196static void ad1848_halt_input(int dev);
202static void ad1848_halt_output(int dev); 197static void ad1848_halt_output(int dev);
203static void ad1848_trigger(int dev, int bits); 198static void ad1848_trigger(int dev, int bits);
204static int ad1848_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data);
205 199
206#ifndef EXCLUDE_TIMERS 200#ifndef EXCLUDE_TIMERS
207static int ad1848_tmr_install(int dev); 201static int ad1848_tmr_install(int dev);
@@ -2027,10 +2021,6 @@ int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback,
2027 2021
2028 nr_ad1848_devs++; 2022 nr_ad1848_devs++;
2029 2023
2030 devc->pmdev = pm_register(PM_ISA_DEV, my_dev, ad1848_pm_callback);
2031 if (devc->pmdev)
2032 devc->pmdev->data = devc;
2033
2034 ad1848_init_hw(devc); 2024 ad1848_init_hw(devc);
2035 2025
2036 if (irq > 0) 2026 if (irq > 0)
@@ -2197,9 +2187,6 @@ void ad1848_unload(int io_base, int irq, int dma_playback, int dma_capture, int
2197 if(mixer>=0) 2187 if(mixer>=0)
2198 sound_unload_mixerdev(mixer); 2188 sound_unload_mixerdev(mixer);
2199 2189
2200 if (devc->pmdev)
2201 pm_unregister(devc->pmdev);
2202
2203 nr_ad1848_devs--; 2190 nr_ad1848_devs--;
2204 for ( ; i < nr_ad1848_devs ; i++) 2191 for ( ; i < nr_ad1848_devs ; i++)
2205 adev_info[i] = adev_info[i+1]; 2192 adev_info[i] = adev_info[i+1];
@@ -2811,85 +2798,6 @@ static int ad1848_tmr_install(int dev)
2811} 2798}
2812#endif /* EXCLUDE_TIMERS */ 2799#endif /* EXCLUDE_TIMERS */
2813 2800
2814static int ad1848_suspend(ad1848_info *devc)
2815{
2816 unsigned long flags;
2817
2818 spin_lock_irqsave(&devc->lock,flags);
2819
2820 ad_mute(devc);
2821
2822 spin_unlock_irqrestore(&devc->lock,flags);
2823 return 0;
2824}
2825
2826static int ad1848_resume(ad1848_info *devc)
2827{
2828 int mixer_levels[32], i;
2829
2830 /* Thinkpad is a bit more of PITA than normal. The BIOS tends to
2831 restore it in a different config to the one we use. Need to
2832 fix this somehow */
2833
2834 /* store old mixer levels */
2835 memcpy(mixer_levels, devc->levels, sizeof (mixer_levels));
2836 ad1848_init_hw(devc);
2837
2838 /* restore mixer levels */
2839 for (i = 0; i < 32; i++)
2840 ad1848_mixer_set(devc, devc->dev_no, mixer_levels[i]);
2841
2842 if (!devc->subtype) {
2843 static signed char interrupt_bits[12] = { -1, -1, -1, -1, -1, 0x00, -1, 0x08, -1, 0x10, 0x18, 0x20 };
2844 static char dma_bits[4] = { 1, 2, 0, 3 };
2845 unsigned long flags;
2846 signed char bits;
2847 char dma2_bit = 0;
2848
2849 int config_port = devc->base + 0;
2850
2851 bits = interrupt_bits[devc->irq];
2852 if (bits == -1) {
2853 printk(KERN_ERR "MSS: Bad IRQ %d\n", devc->irq);
2854 return -1;
2855 }
2856
2857 spin_lock_irqsave(&devc->lock,flags);
2858
2859 outb((bits | 0x40), config_port);
2860
2861 if (devc->dma2 != -1 && devc->dma2 != devc->dma1)
2862 if ( (devc->dma1 == 0 && devc->dma2 == 1) ||
2863 (devc->dma1 == 1 && devc->dma2 == 0) ||
2864 (devc->dma1 == 3 && devc->dma2 == 0))
2865 dma2_bit = 0x04;
2866
2867 outb((bits | dma_bits[devc->dma1] | dma2_bit), config_port);
2868 spin_unlock_irqrestore(&devc->lock,flags);
2869 }
2870
2871 return 0;
2872}
2873
2874static int ad1848_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
2875{
2876 ad1848_info *devc = dev->data;
2877 if (devc) {
2878 DEB(printk("ad1848: pm event received: 0x%x\n", rqst));
2879
2880 switch (rqst) {
2881 case PM_SUSPEND:
2882 ad1848_suspend(devc);
2883 break;
2884 case PM_RESUME:
2885 ad1848_resume(devc);
2886 break;
2887 }
2888 }
2889 return 0;
2890}
2891
2892
2893EXPORT_SYMBOL(ad1848_detect); 2801EXPORT_SYMBOL(ad1848_detect);
2894EXPORT_SYMBOL(ad1848_init); 2802EXPORT_SYMBOL(ad1848_init);
2895EXPORT_SYMBOL(ad1848_unload); 2803EXPORT_SYMBOL(ad1848_unload);
diff --git a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c
index adc689649fe1..46dd41dc2a34 100644
--- a/sound/oss/cs4281/cs4281m.c
+++ b/sound/oss/cs4281/cs4281m.c
@@ -298,7 +298,6 @@ struct cs4281_state {
298 struct cs4281_pipeline pl[CS4281_NUMBER_OF_PIPELINES]; 298 struct cs4281_pipeline pl[CS4281_NUMBER_OF_PIPELINES];
299}; 299};
300 300
301#include <linux/pm_legacy.h>
302#include "cs4281pm-24.c" 301#include "cs4281pm-24.c"
303 302
304#if CSDEBUG 303#if CSDEBUG
@@ -4256,9 +4255,6 @@ static void __devinit cs4281_InitPM(struct cs4281_state *s)
4256static int __devinit cs4281_probe(struct pci_dev *pcidev, 4255static int __devinit cs4281_probe(struct pci_dev *pcidev,
4257 const struct pci_device_id *pciid) 4256 const struct pci_device_id *pciid)
4258{ 4257{
4259#ifndef NOT_CS4281_PM
4260 struct pm_dev *pmdev;
4261#endif
4262 struct cs4281_state *s; 4258 struct cs4281_state *s;
4263 dma_addr_t dma_mask; 4259 dma_addr_t dma_mask;
4264 mm_segment_t fs; 4260 mm_segment_t fs;
@@ -4374,19 +4370,7 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev,
4374 } 4370 }
4375#ifndef NOT_CS4281_PM 4371#ifndef NOT_CS4281_PM
4376 cs4281_InitPM(s); 4372 cs4281_InitPM(s);
4377 pmdev = cs_pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), cs4281_pm_callback); 4373 s->pm.flags |= CS4281_PM_NOT_REGISTERED;
4378 if (pmdev)
4379 {
4380 CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
4381 "cs4281: probe() pm_register() succeeded (%p).\n", pmdev));
4382 pmdev->data = s;
4383 }
4384 else
4385 {
4386 CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO
4387 "cs4281: probe() pm_register() failed (%p).\n", pmdev));
4388 s->pm.flags |= CS4281_PM_NOT_REGISTERED;
4389 }
4390#endif 4374#endif
4391 4375
4392 pci_set_master(pcidev); // enable bus mastering 4376 pci_set_master(pcidev); // enable bus mastering
@@ -4487,9 +4471,6 @@ static int __init cs4281_init_module(void)
4487static void __exit cs4281_cleanup_module(void) 4471static void __exit cs4281_cleanup_module(void)
4488{ 4472{
4489 pci_unregister_driver(&cs4281_pci_driver); 4473 pci_unregister_driver(&cs4281_pci_driver);
4490#ifndef NOT_CS4281_PM
4491 cs_pm_unregister_all(cs4281_pm_callback);
4492#endif
4493 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, 4474 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
4494 printk(KERN_INFO "cs4281: cleanup_cs4281() finished\n")); 4475 printk(KERN_INFO "cs4281: cleanup_cs4281() finished\n"));
4495} 4476}
diff --git a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c
index d2a453aff0aa..90cbd7679534 100644
--- a/sound/oss/cs4281/cs4281pm-24.c
+++ b/sound/oss/cs4281/cs4281pm-24.c
@@ -27,9 +27,6 @@
27#ifndef NOT_CS4281_PM 27#ifndef NOT_CS4281_PM
28#include <linux/pm.h> 28#include <linux/pm.h>
29 29
30#define cs_pm_register(a, b, c) pm_register((a), (b), (c));
31#define cs_pm_unregister_all(a) pm_unregister_all((a));
32
33static int cs4281_suspend(struct cs4281_state *s); 30static int cs4281_suspend(struct cs4281_state *s);
34static int cs4281_resume(struct cs4281_state *s); 31static int cs4281_resume(struct cs4281_state *s);
35/* 32/*
@@ -41,42 +38,6 @@ static int cs4281_resume(struct cs4281_state *s);
41#define CS4281_SUSPEND_TBL cs4281_suspend_null 38#define CS4281_SUSPEND_TBL cs4281_suspend_null
42#define CS4281_RESUME_TBL cs4281_resume_null 39#define CS4281_RESUME_TBL cs4281_resume_null
43 40
44static int cs4281_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
45{
46 struct cs4281_state *state;
47
48 CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
49 "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n",
50 dev,(unsigned)rqst,data));
51 state = (struct cs4281_state *) dev->data;
52 if (state) {
53 switch(rqst) {
54 case PM_SUSPEND:
55 CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
56 "cs4281: PM suspend request\n"));
57 if(cs4281_suspend(state))
58 {
59 CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
60 "cs4281: PM suspend request refused\n"));
61 return 1;
62 }
63 break;
64 case PM_RESUME:
65 CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
66 "cs4281: PM resume request\n"));
67 if(cs4281_resume(state))
68 {
69 CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
70 "cs4281: PM resume request refused\n"));
71 return 1;
72 }
73 break;
74 }
75 }
76
77 return 0;
78}
79
80#else /* CS4281_PM */ 41#else /* CS4281_PM */
81#define CS4281_SUSPEND_TBL cs4281_suspend_null 42#define CS4281_SUSPEND_TBL cs4281_suspend_null
82#define CS4281_RESUME_TBL cs4281_resume_null 43#define CS4281_RESUME_TBL cs4281_resume_null
diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c
index cb998e8c0fdd..0da4d93f04a6 100644
--- a/sound/oss/cs46xx.c
+++ b/sound/oss/cs46xx.c
@@ -391,10 +391,6 @@ static void cs461x_clear_serial_FIFOs(struct cs_card *card, int type);
391static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state); 391static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state);
392static int cs46xx_resume_tbl(struct pci_dev *pcidev); 392static int cs46xx_resume_tbl(struct pci_dev *pcidev);
393 393
394#ifndef CS46XX_ACPI_SUPPORT
395static int cs46xx_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data);
396#endif
397
398#if CSDEBUG 394#if CSDEBUG
399 395
400/* DEBUG ROUTINES */ 396/* DEBUG ROUTINES */
@@ -5320,7 +5316,6 @@ static const char fndmsg[] = KERN_INFO "cs46xx: Found %d audio device(s).\n";
5320static int __devinit cs46xx_probe(struct pci_dev *pci_dev, 5316static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
5321 const struct pci_device_id *pciid) 5317 const struct pci_device_id *pciid)
5322{ 5318{
5323 struct pm_dev *pmdev;
5324 int i,j; 5319 int i,j;
5325 u16 ss_card, ss_vendor; 5320 u16 ss_card, ss_vendor;
5326 struct cs_card *card; 5321 struct cs_card *card;
@@ -5530,22 +5525,6 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
5530 PCI_SET_DMA_MASK(pci_dev, dma_mask); 5525 PCI_SET_DMA_MASK(pci_dev, dma_mask);
5531 list_add(&card->list, &cs46xx_devs); 5526 list_add(&card->list, &cs46xx_devs);
5532 5527
5533 pmdev = cs_pm_register(PM_PCI_DEV, PM_PCI_ID(pci_dev), cs46xx_pm_callback);
5534 if (pmdev)
5535 {
5536 CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
5537 "cs46xx: probe() pm_register() succeeded (%p).\n",
5538 pmdev));
5539 pmdev->data = card;
5540 }
5541 else
5542 {
5543 CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 2, printk(KERN_INFO
5544 "cs46xx: probe() pm_register() failed (%p).\n",
5545 pmdev));
5546 card->pm.flags |= CS46XX_PM_NOT_REGISTERED;
5547 }
5548
5549 CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=%p\n", 5528 CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=%p\n",
5550 (unsigned)card->pm.flags,card)); 5529 (unsigned)card->pm.flags,card));
5551 5530
@@ -5727,7 +5706,6 @@ static int __init cs46xx_init_module(void)
5727static void __exit cs46xx_cleanup_module(void) 5706static void __exit cs46xx_cleanup_module(void)
5728{ 5707{
5729 pci_unregister_driver(&cs46xx_pci_driver); 5708 pci_unregister_driver(&cs46xx_pci_driver);
5730 cs_pm_unregister_all(cs46xx_pm_callback);
5731 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, 5709 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
5732 printk(KERN_INFO "cs46xx: cleanup_cs46xx() finished\n")); 5710 printk(KERN_INFO "cs46xx: cleanup_cs46xx() finished\n"));
5733} 5711}
@@ -5735,44 +5713,6 @@ static void __exit cs46xx_cleanup_module(void)
5735module_init(cs46xx_init_module); 5713module_init(cs46xx_init_module);
5736module_exit(cs46xx_cleanup_module); 5714module_exit(cs46xx_cleanup_module);
5737 5715
5738#ifndef CS46XX_ACPI_SUPPORT
5739static int cs46xx_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
5740{
5741 struct cs_card *card;
5742
5743 CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
5744 "cs46xx: cs46xx_pm_callback dev=%p rqst=0x%x card=%p\n",
5745 dev,(unsigned)rqst,data));
5746 card = (struct cs_card *) dev->data;
5747 if (card) {
5748 switch(rqst) {
5749 case PM_SUSPEND:
5750 CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
5751 "cs46xx: PM suspend request\n"));
5752 if(cs46xx_suspend(card, PMSG_SUSPEND))
5753 {
5754 CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
5755 "cs46xx: PM suspend request refused\n"));
5756 return 1;
5757 }
5758 break;
5759 case PM_RESUME:
5760 CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
5761 "cs46xx: PM resume request\n"));
5762 if(cs46xx_resume(card))
5763 {
5764 CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
5765 "cs46xx: PM resume request refused\n"));
5766 return 1;
5767 }
5768 break;
5769 }
5770 }
5771
5772 return 0;
5773}
5774#endif
5775
5776#if CS46XX_ACPI_SUPPORT 5716#if CS46XX_ACPI_SUPPORT
5777static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state) 5717static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state)
5778{ 5718{
diff --git a/sound/oss/cs46xxpm-24.h b/sound/oss/cs46xxpm-24.h
index e220bd7240f1..ad82db84d013 100644
--- a/sound/oss/cs46xxpm-24.h
+++ b/sound/oss/cs46xxpm-24.h
@@ -38,13 +38,9 @@
38*/ 38*/
39static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state); 39static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state);
40static int cs46xx_resume_tbl(struct pci_dev *pcidev); 40static int cs46xx_resume_tbl(struct pci_dev *pcidev);
41#define cs_pm_register(a, b, c) NULL
42#define cs_pm_unregister_all(a)
43#define CS46XX_SUSPEND_TBL cs46xx_suspend_tbl 41#define CS46XX_SUSPEND_TBL cs46xx_suspend_tbl
44#define CS46XX_RESUME_TBL cs46xx_resume_tbl 42#define CS46XX_RESUME_TBL cs46xx_resume_tbl
45#else 43#else
46#define cs_pm_register(a, b, c) pm_register((a), (b), (c));
47#define cs_pm_unregister_all(a) pm_unregister_all((a));
48#define CS46XX_SUSPEND_TBL cs46xx_null 44#define CS46XX_SUSPEND_TBL cs46xx_null
49#define CS46XX_RESUME_TBL cs46xx_null 45#define CS46XX_RESUME_TBL cs46xx_null
50#endif 46#endif
diff --git a/sound/oss/maestro.c b/sound/oss/maestro.c
index 3abd3541cbc7..f9ac5b16f61a 100644
--- a/sound/oss/maestro.c
+++ b/sound/oss/maestro.c
@@ -230,10 +230,6 @@
230#include <asm/page.h> 230#include <asm/page.h>
231#include <asm/uaccess.h> 231#include <asm/uaccess.h>
232 232
233#include <linux/pm.h>
234#include <linux/pm_legacy.h>
235static int maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *d);
236
237#include "maestro.h" 233#include "maestro.h"
238 234
239static struct pci_driver maestro_pci_driver; 235static struct pci_driver maestro_pci_driver;
@@ -3404,7 +3400,6 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid)
3404 int i, ret; 3400 int i, ret;
3405 struct ess_card *card; 3401 struct ess_card *card;
3406 struct ess_state *ess; 3402 struct ess_state *ess;
3407 struct pm_dev *pmdev;
3408 int num = 0; 3403 int num = 0;
3409 3404
3410/* when built into the kernel, we only print version if device is found */ 3405/* when built into the kernel, we only print version if device is found */
@@ -3450,11 +3445,6 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid)
3450 memset(card, 0, sizeof(*card)); 3445 memset(card, 0, sizeof(*card));
3451 card->pcidev = pcidev; 3446 card->pcidev = pcidev;
3452 3447
3453 pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev),
3454 maestro_pm_callback);
3455 if (pmdev)
3456 pmdev->data = card;
3457
3458 card->iobase = iobase; 3448 card->iobase = iobase;
3459 card->card_type = card_type; 3449 card->card_type = card_type;
3460 card->irq = pcidev->irq; 3450 card->irq = pcidev->irq;
@@ -3670,7 +3660,6 @@ static int maestro_notifier(struct notifier_block *nb, unsigned long event, void
3670static void cleanup_maestro(void) { 3660static void cleanup_maestro(void) {
3671 M_printk("maestro: unloading\n"); 3661 M_printk("maestro: unloading\n");
3672 pci_unregister_driver(&maestro_pci_driver); 3662 pci_unregister_driver(&maestro_pci_driver);
3673 pm_unregister_all(maestro_pm_callback);
3674 unregister_reboot_notifier(&maestro_nb); 3663 unregister_reboot_notifier(&maestro_nb);
3675} 3664}
3676 3665
@@ -3691,143 +3680,5 @@ check_suspend(struct ess_card *card)
3691 current->state = TASK_RUNNING; 3680 current->state = TASK_RUNNING;
3692} 3681}
3693 3682
3694static int
3695maestro_suspend(struct ess_card *card)
3696{
3697 unsigned long flags;
3698 int i,j;
3699
3700 spin_lock_irqsave(&card->lock,flags); /* over-kill */
3701
3702 M_printk("maestro: apm in dev %p\n",card);
3703
3704 /* we have to read from the apu regs, need
3705 to power it up */
3706 maestro_power(card,ACPI_D0);
3707
3708 for(i=0;i<NR_DSPS;i++) {
3709 struct ess_state *s = &card->channels[i];
3710
3711 if(s->dev_audio == -1)
3712 continue;
3713
3714 M_printk("maestro: stopping apus for device %d\n",i);
3715 stop_dac(s);
3716 stop_adc(s);
3717 for(j=0;j<6;j++)
3718 card->apu_map[s->apu[j]][5]=apu_get_register(s,j,5);
3719
3720 }
3721
3722 /* get rid of interrupts? */
3723 if( card->dsps_open > 0)
3724 stop_bob(&card->channels[0]);
3725
3726 card->in_suspend++;
3727
3728 spin_unlock_irqrestore(&card->lock,flags);
3729
3730 /* we trust in the bios to power down the chip on suspend.
3731 * XXX I'm also not sure that in_suspend will protect
3732 * against all reg accesses from here on out.
3733 */
3734 return 0;
3735}
3736static int
3737maestro_resume(struct ess_card *card)
3738{
3739 unsigned long flags;
3740 int i;
3741
3742 spin_lock_irqsave(&card->lock,flags); /* over-kill */
3743
3744 card->in_suspend = 0;
3745
3746 M_printk("maestro: resuming card at %p\n",card);
3747
3748 /* restore all our config */
3749 maestro_config(card);
3750 /* need to restore the base pointers.. */
3751 if(card->dmapages)
3752 set_base_registers(&card->channels[0],card->dmapages);
3753
3754 mixer_push_state(card);
3755
3756 /* set each channels' apu control registers before
3757 * restoring audio
3758 */
3759 for(i=0;i<NR_DSPS;i++) {
3760 struct ess_state *s = &card->channels[i];
3761 int chan,reg;
3762
3763 if(s->dev_audio == -1)
3764 continue;
3765
3766 for(chan = 0 ; chan < 6 ; chan++) {
3767 wave_set_register(s,s->apu[chan]<<3,s->apu_base[chan]);
3768 for(reg = 1 ; reg < NR_APU_REGS ; reg++)
3769 apu_set_register(s,chan,reg,s->card->apu_map[s->apu[chan]][reg]);
3770 }
3771 for(chan = 0 ; chan < 6 ; chan++)
3772 apu_set_register(s,chan,0,s->card->apu_map[s->apu[chan]][0] & 0xFF0F);
3773 }
3774
3775 /* now we flip on the music */
3776
3777 if( card->dsps_open <= 0) {
3778 /* this card's idle */
3779 maestro_power(card,ACPI_D2);
3780 } else {
3781 /* ok, we're actually playing things on
3782 this card */
3783 maestro_power(card,ACPI_D0);
3784 start_bob(&card->channels[0]);
3785 for(i=0;i<NR_DSPS;i++) {
3786 struct ess_state *s = &card->channels[i];
3787
3788 /* these use the apu_mode, and can handle
3789 spurious calls */
3790 start_dac(s);
3791 start_adc(s);
3792 }
3793 }
3794
3795 spin_unlock_irqrestore(&card->lock,flags);
3796
3797 /* all right, we think things are ready,
3798 wake up people who were using the device
3799 when we suspended */
3800 wake_up(&(card->suspend_queue));
3801
3802 return 0;
3803}
3804
3805int
3806maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
3807{
3808 struct ess_card *card = (struct ess_card*) dev->data;
3809
3810 if ( ! card ) goto out;
3811
3812 M_printk("maestro: pm event 0x%x received for card %p\n", rqst, card);
3813
3814 switch (rqst) {
3815 case PM_SUSPEND:
3816 maestro_suspend(card);
3817 break;
3818 case PM_RESUME:
3819 maestro_resume(card);
3820 break;
3821 /*
3822 * we'd also like to find out about
3823 * power level changes because some biosen
3824 * do mean things to the maestro when they
3825 * change their power state.
3826 */
3827 }
3828out:
3829 return 0;
3830}
3831
3832module_init(init_maestro); 3683module_init(init_maestro);
3833module_exit(cleanup_maestro); 3684module_exit(cleanup_maestro);
diff --git a/sound/oss/nm256_audio.c b/sound/oss/nm256_audio.c
index 0ce2c404a730..42d8f05689c2 100644
--- a/sound/oss/nm256_audio.c
+++ b/sound/oss/nm256_audio.c
@@ -24,8 +24,6 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/pm.h>
28#include <linux/pm_legacy.h>
29#include <linux/delay.h> 27#include <linux/delay.h>
30#include <linux/spinlock.h> 28#include <linux/spinlock.h>
31#include "sound_config.h" 29#include "sound_config.h"
@@ -49,7 +47,6 @@ static int nm256_grabInterrupt (struct nm256_info *card);
49static int nm256_releaseInterrupt (struct nm256_info *card); 47static int nm256_releaseInterrupt (struct nm256_info *card);
50static irqreturn_t nm256_interrupt (int irq, void *dev_id, struct pt_regs *dummy); 48static irqreturn_t nm256_interrupt (int irq, void *dev_id, struct pt_regs *dummy);
51static irqreturn_t nm256_interrupt_zx (int irq, void *dev_id, struct pt_regs *dummy); 49static irqreturn_t nm256_interrupt_zx (int irq, void *dev_id, struct pt_regs *dummy);
52static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data);
53 50
54/* These belong in linux/pci.h. */ 51/* These belong in linux/pci.h. */
55#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005 52#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
@@ -992,15 +989,6 @@ nm256_install_mixer (struct nm256_info *card)
992 return 0; 989 return 0;
993} 990}
994 991
995/* Perform a full reset on the hardware; this is invoked when an APM
996 resume event occurs. */
997static void
998nm256_full_reset (struct nm256_info *card)
999{
1000 nm256_initHw (card);
1001 ac97_reset (&(card->mdev));
1002}
1003
1004/* 992/*
1005 * See if the signature left by the NM256 BIOS is intact; if so, we use 993 * See if the signature left by the NM256 BIOS is intact; if so, we use
1006 * the associated address as the end of our audio buffer in the video 994 * the associated address as the end of our audio buffer in the video
@@ -1053,7 +1041,6 @@ static int __devinit
1053nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr) 1041nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr)
1054{ 1042{
1055 struct nm256_info *card; 1043 struct nm256_info *card;
1056 struct pm_dev *pmdev;
1057 int x; 1044 int x;
1058 1045
1059 if (pci_enable_device(pcidev)) 1046 if (pci_enable_device(pcidev))
@@ -1234,43 +1221,10 @@ nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr)
1234 1221
1235 nm256_install_mixer (card); 1222 nm256_install_mixer (card);
1236 1223
1237 pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), handle_pm_event);
1238 if (pmdev)
1239 pmdev->data = card;
1240
1241 return 1; 1224 return 1;
1242} 1225}
1243 1226
1244 1227
1245/*
1246 * PM event handler, so the card is properly reinitialized after a power
1247 * event.
1248 */
1249static int
1250handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data)
1251{
1252 struct nm256_info *crd = (struct nm256_info*) dev->data;
1253 if (crd) {
1254 switch (rqst) {
1255 case PM_SUSPEND:
1256 break;
1257 case PM_RESUME:
1258 {
1259 int playing = crd->playing;
1260 nm256_full_reset (crd);
1261 /*
1262 * A little ugly, but that's ok; pretend the
1263 * block we were playing is done.
1264 */
1265 if (playing)
1266 DMAbuf_outputintr (crd->dev_for_play, 1);
1267 }
1268 break;
1269 }
1270 }
1271 return 0;
1272}
1273
1274static int __devinit 1228static int __devinit
1275nm256_probe(struct pci_dev *pcidev,const struct pci_device_id *pciid) 1229nm256_probe(struct pci_dev *pcidev,const struct pci_device_id *pciid)
1276{ 1230{
@@ -1696,7 +1650,6 @@ static int __init do_init_nm256(void)
1696static void __exit cleanup_nm256 (void) 1650static void __exit cleanup_nm256 (void)
1697{ 1651{
1698 pci_unregister_driver(&nm256_pci_driver); 1652 pci_unregister_driver(&nm256_pci_driver);
1699 pm_unregister_all (&handle_pm_event);
1700} 1653}
1701 1654
1702module_init(do_init_nm256); 1655module_init(do_init_nm256);
diff --git a/sound/oss/opl3sa2.c b/sound/oss/opl3sa2.c
index cd41d0e4706a..5cecdbcbea9d 100644
--- a/sound/oss/opl3sa2.c
+++ b/sound/oss/opl3sa2.c
@@ -69,8 +69,6 @@
69#include <linux/init.h> 69#include <linux/init.h>
70#include <linux/module.h> 70#include <linux/module.h>
71#include <linux/delay.h> 71#include <linux/delay.h>
72#include <linux/pm.h>
73#include <linux/pm_legacy.h>
74#include "sound_config.h" 72#include "sound_config.h"
75 73
76#include "ad1848.h" 74#include "ad1848.h"
@@ -139,10 +137,6 @@ typedef struct {
139 struct pnp_dev* pdev; 137 struct pnp_dev* pdev;
140 int activated; /* Whether said devices have been activated */ 138 int activated; /* Whether said devices have been activated */
141#endif 139#endif
142#ifdef CONFIG_PM_LEGACY
143 unsigned int in_suspend;
144 struct pm_dev *pmdev;
145#endif
146 unsigned int card; 140 unsigned int card;
147 int chipset; /* What's my version(s)? */ 141 int chipset; /* What's my version(s)? */
148 char *chipset_name; 142 char *chipset_name;
@@ -341,22 +335,6 @@ static void opl3sa2_mixer_reset(opl3sa2_state_t* devc)
341 } 335 }
342} 336}
343 337
344/* Currently only used for power management */
345#ifdef CONFIG_PM_LEGACY
346static void opl3sa2_mixer_restore(opl3sa2_state_t* devc)
347{
348 if (devc) {
349 opl3sa2_set_volume(devc, devc->volume_l, devc->volume_r);
350 opl3sa2_set_mic(devc, devc->mic);
351
352 if (devc->chipset == CHIPSET_OPL3SA3) {
353 opl3sa3_set_bass(devc, devc->bass_l, devc->bass_r);
354 opl3sa3_set_treble(devc, devc->treble_l, devc->treble_r);
355 }
356 }
357}
358#endif /* CONFIG_PM_LEGACY */
359
360static inline void arg_to_vol_mono(unsigned int vol, int* value) 338static inline void arg_to_vol_mono(unsigned int vol, int* value)
361{ 339{
362 int left; 340 int left;
@@ -832,84 +810,6 @@ static struct pnp_driver opl3sa2_driver = {
832 810
833/* End of component functions */ 811/* End of component functions */
834 812
835#ifdef CONFIG_PM_LEGACY
836
837static DEFINE_SPINLOCK(opl3sa2_lock);
838
839/* Power Management support functions */
840static int opl3sa2_suspend(struct pm_dev *pdev, unsigned int pm_mode)
841{
842 unsigned long flags;
843 opl3sa2_state_t *p;
844
845 if (!pdev)
846 return -EINVAL;
847
848 spin_lock_irqsave(&opl3sa2_lock,flags);
849
850 p = (opl3sa2_state_t *) pdev->data;
851 switch (pm_mode) {
852 case 1:
853 pm_mode = OPL3SA2_PM_MODE1;
854 break;
855 case 2:
856 pm_mode = OPL3SA2_PM_MODE2;
857 break;
858 case 3:
859 pm_mode = OPL3SA2_PM_MODE3;
860 break;
861 default:
862 /* we don't know howto handle this... */
863 spin_unlock_irqrestore(&opl3sa2_lock, flags);
864 return -EBUSY;
865 }
866
867 p->in_suspend = 1;
868
869 /* its supposed to automute before suspending, so we won't bother */
870 opl3sa2_write(p->cfg_port, OPL3SA2_PM, pm_mode);
871 /* wait a while for the clock oscillator to stabilise */
872 mdelay(10);
873
874 spin_unlock_irqrestore(&opl3sa2_lock,flags);
875 return 0;
876}
877
878static int opl3sa2_resume(struct pm_dev *pdev)
879{
880 unsigned long flags;
881 opl3sa2_state_t *p;
882
883 if (!pdev)
884 return -EINVAL;
885
886 p = (opl3sa2_state_t *) pdev->data;
887 spin_lock_irqsave(&opl3sa2_lock,flags);
888
889 /* I don't think this is necessary */
890 opl3sa2_write(p->cfg_port, OPL3SA2_PM, OPL3SA2_PM_MODE0);
891 opl3sa2_mixer_restore(p);
892 p->in_suspend = 0;
893
894 spin_unlock_irqrestore(&opl3sa2_lock,flags);
895 return 0;
896}
897
898static int opl3sa2_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data)
899{
900 unsigned long mode = (unsigned long)data;
901
902 switch (rqst) {
903 case PM_SUSPEND:
904 return opl3sa2_suspend(pdev, mode);
905
906 case PM_RESUME:
907 return opl3sa2_resume(pdev);
908 }
909 return 0;
910}
911#endif /* CONFIG_PM_LEGACY */
912
913/* 813/*
914 * Install OPL3-SA2 based card(s). 814 * Install OPL3-SA2 based card(s).
915 * 815 *
@@ -1021,12 +921,6 @@ static int __init init_opl3sa2(void)
1021 921
1022 /* ewww =) */ 922 /* ewww =) */
1023 opl3sa2_state[card].card = card; 923 opl3sa2_state[card].card = card;
1024#ifdef CONFIG_PM_LEGACY
1025 /* register our power management capabilities */
1026 opl3sa2_state[card].pmdev = pm_register(PM_ISA_DEV, card, opl3sa2_pm_callback);
1027 if (opl3sa2_state[card].pmdev)
1028 opl3sa2_state[card].pmdev->data = &opl3sa2_state[card];
1029#endif /* CONFIG_PM_LEGACY */
1030 924
1031 /* 925 /*
1032 * Set the Yamaha 3D enhancement mode (aka Ymersion) if asked to and 926 * Set the Yamaha 3D enhancement mode (aka Ymersion) if asked to and
@@ -1083,10 +977,6 @@ static void __exit cleanup_opl3sa2(void)
1083 int card; 977 int card;
1084 978
1085 for(card = 0; card < opl3sa2_cards_num; card++) { 979 for(card = 0; card < opl3sa2_cards_num; card++) {
1086#ifdef CONFIG_PM_LEGACY
1087 if (opl3sa2_state[card].pmdev)
1088 pm_unregister(opl3sa2_state[card].pmdev);
1089#endif
1090 if (opl3sa2_state[card].cfg_mpu.slots[1] != -1) { 980 if (opl3sa2_state[card].cfg_mpu.slots[1] != -1) {
1091 unload_opl3sa2_mpu(&opl3sa2_state[card].cfg_mpu); 981 unload_opl3sa2_mpu(&opl3sa2_state[card].cfg_mpu);
1092 } 982 }