aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2006-03-23 12:06:08 -0500
committerAnton Altaparmakov <aia21@cantab.net>2006-03-23 12:06:08 -0500
commit92fe7b9ea8ef101bff3c75ade89b93b5f62a7955 (patch)
tree3dba4faa78f1bbe4be503275173e3a63b5d60f22
parente750d1c7cc314b9ba1934b0b474b7d39f906f865 (diff)
parentb0e6e962992b76580f4900b166a337bad7c1e81b (diff)
Merge branch 'master' of /usr/src/ntfs-2.6/
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--Documentation/power/swsusp.txt51
-rw-r--r--Documentation/power/userland-swsusp.txt149
-rw-r--r--Documentation/power/video.txt74
-rw-r--r--arch/cris/kernel/irq.c10
-rw-r--r--arch/frv/kernel/irq.c10
-rw-r--r--arch/i386/Kconfig24
-rw-r--r--arch/i386/Kconfig.debug9
-rw-r--r--arch/i386/kernel/Makefile2
-rw-r--r--arch/i386/kernel/alternative.c321
-rw-r--r--arch/i386/kernel/apic.c1
-rw-r--r--arch/i386/kernel/cpu/centaur.c1
-rw-r--r--arch/i386/kernel/cpu/common.c47
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c4
-rw-r--r--arch/i386/kernel/cpu/intel.c12
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/crash.c2
-rw-r--r--arch/i386/kernel/entry.S4
-rw-r--r--arch/i386/kernel/head.S5
-rw-r--r--arch/i386/kernel/io_apic.c25
-rw-r--r--arch/i386/kernel/kprobes.c4
-rw-r--r--arch/i386/kernel/module.c32
-rw-r--r--arch/i386/kernel/mpparse.c7
-rw-r--r--arch/i386/kernel/nmi.c6
-rw-r--r--arch/i386/kernel/process.c2
-rw-r--r--arch/i386/kernel/ptrace.c4
-rw-r--r--arch/i386/kernel/semaphore.c8
-rw-r--r--arch/i386/kernel/setup.c118
-rw-r--r--arch/i386/kernel/signal.c7
-rw-r--r--arch/i386/kernel/smpboot.c3
-rw-r--r--arch/i386/kernel/topology.c9
-rw-r--r--arch/i386/kernel/traps.c57
-rw-r--r--arch/i386/kernel/vmlinux.lds.S20
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S3
-rw-r--r--arch/i386/mach-es7000/es7000.h5
-rw-r--r--arch/i386/mach-es7000/es7000plat.c6
-rw-r--r--arch/i386/mm/fault.c210
-rw-r--r--arch/i386/mm/init.c45
-rw-r--r--arch/i386/oprofile/nmi_int.c7
-rw-r--r--arch/ia64/hp/sim/simserial.c7
-rw-r--r--arch/m32r/kernel/irq.c10
-rw-r--r--arch/m68k/bvme6000/rtc.c4
-rw-r--r--arch/mips/kernel/irq.c10
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c5
-rw-r--r--arch/parisc/kernel/smp.c25
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/kprobes.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c5
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/ppc/kernel/setup.c10
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/sh/kernel/irq.c5
-rw-r--r--arch/sh/kernel/setup.c5
-rw-r--r--arch/sh64/kernel/irq.c5
-rw-r--r--arch/sparc/kernel/irq.c5
-rw-r--r--arch/sparc/kernel/smp.c24
-rw-r--r--arch/sparc/kernel/sun4d_irq.c8
-rw-r--r--arch/sparc/kernel/sun4d_smp.c8
-rw-r--r--arch/sparc/kernel/sun4m_smp.c6
-rw-r--r--arch/sparc64/kernel/irq.c4
-rw-r--r--arch/sparc64/kernel/smp.c30
-rw-r--r--arch/sparc64/mm/init.c4
-rw-r--r--arch/um/kernel/um_arch.c12
-rw-r--r--arch/x86_64/kernel/early_printk.c26
-rw-r--r--arch/x86_64/kernel/irq.c21
-rw-r--r--arch/x86_64/kernel/kprobes.c4
-rw-r--r--arch/x86_64/kernel/nmi.c4
-rw-r--r--arch/x86_64/kernel/signal.c4
-rw-r--r--arch/xtensa/kernel/irq.c15
-rw-r--r--arch/xtensa/platform-iss/console.c4
-rw-r--r--block/ioctl.c22
-rw-r--r--drivers/base/power/suspend.c5
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/floppy.c17
-rw-r--r--drivers/block/loop.c18
-rw-r--r--drivers/block/nbd.c16
-rw-r--r--drivers/block/pktcdvd.c27
-rw-r--r--drivers/block/rd.c4
-rw-r--r--drivers/cdrom/cdrom.c874
-rw-r--r--drivers/cdrom/cdu31a.c8
-rw-r--r--drivers/cdrom/cm206.c44
-rw-r--r--drivers/cdrom/sbpcd.c710
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/amiserial.c18
-rw-r--r--drivers/char/generic_serial.c14
-rw-r--r--drivers/char/istallion.c1
-rw-r--r--drivers/char/n_tty.c10
-rw-r--r--drivers/char/nwflash.c11
-rw-r--r--drivers/char/raw.c23
-rw-r--r--drivers/char/ser_a2232.c4
-rw-r--r--drivers/char/snsc.c8
-rw-r--r--drivers/char/snsc_event.c5
-rw-r--r--drivers/char/stallion.c1
-rw-r--r--drivers/char/sx.c2
-rw-r--r--drivers/char/tty_io.c50
-rw-r--r--drivers/char/vme_scc.c2
-rw-r--r--drivers/char/vt.c22
-rw-r--r--drivers/char/watchdog/pcwd_usb.c7
-rw-r--r--drivers/connector/connector.c15
-rw-r--r--drivers/firmware/dcdbas.c23
-rw-r--r--drivers/ide/ide-cd.c110
-rw-r--r--drivers/ide/ide-disk.c11
-rw-r--r--drivers/ide/ide-floppy.c11
-rw-r--r--drivers/ide/ide-tape.c19
-rw-r--r--drivers/isdn/capi/kcapi.c17
-rw-r--r--drivers/isdn/hisax/config.c1
-rw-r--r--drivers/isdn/hisax/elsa.c1
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/ppp_generic.c25
-rw-r--r--drivers/oprofile/cpu_buffer.c3
-rw-r--r--drivers/pnp/pnpbios/rsparser.c6
-rw-r--r--drivers/s390/block/dasd_ioctl.c8
-rw-r--r--drivers/scsi/ide-scsi.c11
-rw-r--r--drivers/scsi/sr.c37
-rw-r--r--drivers/scsi/sr.h1
-rw-r--r--drivers/scsi/sr_ioctl.c19
-rw-r--r--drivers/serial/68328serial.c9
-rw-r--r--drivers/serial/au1x00_uart.c11
-rw-r--r--drivers/serial/crisv10.c68
-rw-r--r--drivers/serial/m32r_sio.c15
-rw-r--r--drivers/serial/sunsu.c13
-rw-r--r--drivers/tc/zs.c9
-rw-r--r--fs/9p/mux.c11
-rw-r--r--fs/adfs/file.c4
-rw-r--r--fs/autofs4/autofs_i.h3
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/waitq.c16
-rw-r--r--fs/bio.c8
-rw-r--r--fs/block_dev.c28
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/fcntl.c4
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/inode.c16
-rw-r--r--fs/cifs/link.c16
-rw-r--r--fs/cifs/readdir.c4
-rw-r--r--fs/cifs/xattr.c16
-rw-r--r--fs/devpts/inode.c76
-rw-r--r--fs/dquot.c167
-rw-r--r--fs/eventpoll.c32
-rw-r--r--fs/ext2/namei.c54
-rw-r--r--fs/ext3/dir.c52
-rw-r--r--fs/ext3/file.c4
-rw-r--r--fs/ext3/inode.c16
-rw-r--r--fs/ext3/ioctl.c4
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/fat/fatent.c6
-rw-r--r--fs/fcntl.c9
-rw-r--r--fs/file.c34
-rw-r--r--fs/file_table.c10
-rw-r--r--fs/hpfs/hpfs_fn.h5
-rw-r--r--fs/hpfs/inode.c10
-rw-r--r--fs/hpfs/namei.c60
-rw-r--r--fs/hpfs/super.c4
-rw-r--r--fs/inode.c18
-rw-r--r--fs/inotify.c116
-rw-r--r--fs/jbd/checkpoint.c4
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jffs/inode-v23.c86
-rw-r--r--fs/jffs/intrep.c6
-rw-r--r--fs/jffs/jffs_fm.c2
-rw-r--r--fs/jffs/jffs_fm.h5
-rw-r--r--fs/libfs.c14
-rw-r--r--fs/minix/namei.c48
-rw-r--r--fs/namei.c12
-rw-r--r--fs/ncpfs/file.c4
-rw-r--r--fs/ncpfs/inode.c6
-rw-r--r--fs/ncpfs/ncplib_kernel.c4
-rw-r--r--fs/ncpfs/sock.c34
-rw-r--r--fs/open.c8
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/qnx4/file.c3
-rw-r--r--fs/quota.c6
-rw-r--r--fs/quota_v2.c2
-rw-r--r--fs/ramfs/file-mmu.c11
-rw-r--r--fs/seq_file.c10
-rw-r--r--fs/super.c10
-rw-r--r--fs/sysv/namei.c48
-rw-r--r--fs/udf/balloc.c36
-rw-r--r--fs/udf/ialloc.c8
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/ufs/file.c10
-rw-r--r--fs/ufs/namei.c48
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c3
-rw-r--r--include/asm-alpha/mmu_context.h5
-rw-r--r--include/asm-alpha/topology.h4
-rw-r--r--include/asm-generic/bug.h4
-rw-r--r--include/asm-generic/percpu.h7
-rw-r--r--include/asm-i386/alternative.h129
-rw-r--r--include/asm-i386/arch_hooks.h3
-rw-r--r--include/asm-i386/atomic.h36
-rw-r--r--include/asm-i386/bitops.h7
-rw-r--r--include/asm-i386/cache.h2
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/mach-default/do_timer.h2
-rw-r--r--include/asm-i386/mach-es7000/mach_mpparse.h10
-rw-r--r--include/asm-i386/mach-visws/do_timer.h2
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h2
-rw-r--r--include/asm-i386/mpspec.h1
-rw-r--r--include/asm-i386/mtrr.h1
-rw-r--r--include/asm-i386/mutex.h6
-rw-r--r--include/asm-i386/pgtable-2level.h2
-rw-r--r--include/asm-i386/pgtable-3level.h2
-rw-r--r--include/asm-i386/rwlock.h56
-rw-r--r--include/asm-i386/semaphore.h8
-rw-r--r--include/asm-i386/spinlock.h34
-rw-r--r--include/asm-i386/system.h62
-rw-r--r--include/asm-i386/uaccess.h12
-rw-r--r--include/asm-i386/unistd.h36
-rw-r--r--include/asm-ia64/atomic.h8
-rw-r--r--include/asm-ia64/cache.h2
-rw-r--r--include/asm-m68k/atomic.h8
-rw-r--r--include/asm-parisc/cache.h2
-rw-r--r--include/asm-powerpc/percpu.h7
-rw-r--r--include/asm-s390/atomic.h18
-rw-r--r--include/asm-s390/percpu.h7
-rw-r--r--include/asm-sparc64/atomic.h10
-rw-r--r--include/asm-sparc64/cache.h2
-rw-r--r--include/asm-sparc64/percpu.h7
-rw-r--r--include/asm-um/alternative.h6
-rw-r--r--include/asm-x86_64/atomic.h8
-rw-r--r--include/asm-x86_64/cache.h2
-rw-r--r--include/asm-x86_64/percpu.h7
-rw-r--r--include/linux/cache.h4
-rw-r--r--include/linux/cdrom.h5
-rw-r--r--include/linux/eventpoll.h8
-rw-r--r--include/linux/ext3_fs.h9
-rw-r--r--include/linux/ext3_fs_i.h7
-rw-r--r--include/linux/file.h28
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/generic_serial.h4
-rw-r--r--include/linux/genhd.h14
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/msdos_fs.h3
-rw-r--r--include/linux/nbd.h3
-rw-r--r--include/linux/ncp_fs_i.h2
-rw-r--r--include/linux/ncp_fs_sb.h5
-rw-r--r--include/linux/pm.h3
-rw-r--r--include/linux/profile.h2
-rw-r--r--include/linux/quota.h7
-rw-r--r--include/linux/raid/raid1.h2
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/seq_file.h4
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/tty.h8
-rw-r--r--include/linux/tty_flip.h12
-rw-r--r--include/linux/udf_fs_sb.h4
-rw-r--r--include/linux/vt_kern.h5
-rw-r--r--init/do_mounts_initrd.c1
-rw-r--r--init/main.c26
-rw-r--r--kernel/cpuset.c212
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/kprobes.c14
-rw-r--r--kernel/kthread.c7
-rw-r--r--kernel/module.c53
-rw-r--r--kernel/panic.c97
-rw-r--r--kernel/posix-timers.c1
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/disk.c20
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/pm.c21
-rw-r--r--kernel/power/power.h75
-rw-r--r--kernel/power/process.c61
-rw-r--r--kernel/power/snapshot.c335
-rw-r--r--kernel/power/swap.c544
-rw-r--r--kernel/power/swsusp.c887
-rw-r--r--kernel/power/user.c333
-rw-r--r--kernel/profile.c11
-rw-r--r--kernel/rcupdate.c14
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/spinlock.c9
-rw-r--r--kernel/sys.c46
-rw-r--r--lib/reed_solomon/reed_solomon.c11
-rw-r--r--mm/readahead.c1
-rw-r--r--mm/swapfile.c57
-rw-r--r--security/seclvl.c210
-rw-r--r--sound/oss/ac97_codec.c24
-rw-r--r--sound/oss/aci.c11
-rw-r--r--sound/oss/ad1889.c7
-rw-r--r--sound/oss/ad1889.h2
-rw-r--r--sound/oss/ali5455.c8
-rw-r--r--sound/oss/au1000.c44
-rw-r--r--sound/oss/au1550_ac97.c44
-rw-r--r--sound/oss/btaudio.c36
-rw-r--r--sound/oss/cmpci.c20
-rw-r--r--sound/oss/cs4281/cs4281m.c54
-rw-r--r--sound/oss/cs46xx.c75
-rw-r--r--sound/oss/dmasound/dmasound_awacs.c10
-rw-r--r--sound/oss/emu10k1/hwaccess.h2
-rw-r--r--sound/oss/emu10k1/main.c2
-rw-r--r--sound/oss/emu10k1/midi.c14
-rw-r--r--sound/oss/es1370.c71
-rw-r--r--sound/oss/es1371.c71
-rw-r--r--sound/oss/esssolo1.c50
-rw-r--r--sound/oss/forte.c11
-rw-r--r--sound/oss/hal2.c22
-rw-r--r--sound/oss/i810_audio.c8
-rw-r--r--sound/oss/ite8172.c20
-rw-r--r--sound/oss/maestro.c26
-rw-r--r--sound/oss/maestro3.c20
-rw-r--r--sound/oss/nec_vrc5477.c20
-rw-r--r--sound/oss/rme96xx.c17
-rw-r--r--sound/oss/sonicvibes.c48
-rw-r--r--sound/oss/swarm_cs4297a.c39
-rw-r--r--sound/oss/trident.c62
-rw-r--r--sound/oss/via82cxxx_audio.c49
-rw-r--r--sound/oss/vwsnd.c61
-rw-r--r--sound/oss/ymfpci.c14
-rw-r--r--sound/oss/ymfpci.h3
320 files changed, 5784 insertions, 4256 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fc99075e0af4..7b7382d0f758 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1008,7 +1008,9 @@ running once the system is up.
1008 noexec=on: enable non-executable mappings (default) 1008 noexec=on: enable non-executable mappings (default)
1009 noexec=off: disable nn-executable mappings 1009 noexec=off: disable nn-executable mappings
1010 1010
1011 nofxsr [BUGS=IA-32] 1011 nofxsr [BUGS=IA-32] Disables x86 floating point extended
1012 register save and restore. The kernel will only save
1013 legacy floating-point registers on task switch.
1012 1014
1013 nohlt [BUGS=ARM] 1015 nohlt [BUGS=ARM]
1014 1016
@@ -1053,6 +1055,8 @@ running once the system is up.
1053 1055
1054 nosbagart [IA-64] 1056 nosbagart [IA-64]
1055 1057
1058 nosep [BUGS=IA-32] Disables x86 SYSENTER/SYSEXIT support.
1059
1056 nosmp [SMP] Tells an SMP kernel to act as a UP kernel. 1060 nosmp [SMP] Tells an SMP kernel to act as a UP kernel.
1057 1061
1058 nosync [HW,M68K] Disables sync negotiation for all devices. 1062 nosync [HW,M68K] Disables sync negotiation for all devices.
@@ -1122,6 +1126,11 @@ running once the system is up.
1122 pas16= [HW,SCSI] 1126 pas16= [HW,SCSI]
1123 See header of drivers/scsi/pas16.c. 1127 See header of drivers/scsi/pas16.c.
1124 1128
1129 pause_on_oops=
1130 Halt all CPUs after the first oops has been printed for
1131 the specified number of seconds. This is to be used if
1132 your oopses keep scrolling off the screen.
1133
1125 pcbit= [HW,ISDN] 1134 pcbit= [HW,ISDN]
1126 1135
1127 pcd. [PARIDE] 1136 pcd. [PARIDE]
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index b28b7f04abb8..d7814a113ee1 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -17,6 +17,11 @@ Some warnings, first.
17 * but it will probably only crash. 17 * but it will probably only crash.
18 * 18 *
19 * (*) suspend/resume support is needed to make it safe. 19 * (*) suspend/resume support is needed to make it safe.
20 *
21 * If you have any filesystems on USB devices mounted before suspend,
22 * they won't be accessible after resume and you may lose data, as though
23 * you have unplugged the USB devices with mounted filesystems on them
24 * (see the FAQ below for details).
20 25
21You need to append resume=/dev/your_swap_partition to kernel command 26You need to append resume=/dev/your_swap_partition to kernel command
22line. Then you suspend by 27line. Then you suspend by
@@ -27,19 +32,18 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state
27 32
28echo platform > /sys/power/disk; echo disk > /sys/power/state 33echo platform > /sys/power/disk; echo disk > /sys/power/state
29 34
35. If you have SATA disks, you'll need recent kernels with SATA suspend
36support. For suspend and resume to work, make sure your disk drivers
37are built into kernel -- not modules. [There's way to make
38suspend/resume with modular disk drivers, see FAQ, but you probably
39should not do that.]
40
30If you want to limit the suspend image size to N bytes, do 41If you want to limit the suspend image size to N bytes, do
31 42
32echo N > /sys/power/image_size 43echo N > /sys/power/image_size
33 44
34before suspend (it is limited to 500 MB by default). 45before suspend (it is limited to 500 MB by default).
35 46
36Encrypted suspend image:
37------------------------
38If you want to store your suspend image encrypted with a temporary
39key to prevent data gathering after resume you must compile
40crypto and the aes algorithm into the kernel - modules won't work
41as they cannot be loaded at resume time.
42
43 47
44Article about goals and implementation of Software Suspend for Linux 48Article about goals and implementation of Software Suspend for Linux
45~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 49~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -333,4 +337,37 @@ init=/bin/bash, then swapon and starting suspend sequence manually
333usually does the trick. Then it is good idea to try with latest 337usually does the trick. Then it is good idea to try with latest
334vanilla kernel. 338vanilla kernel.
335 339
340Q: How can distributions ship a swsusp-supporting kernel with modular
341disk drivers (especially SATA)?
342
343A: Well, it can be done, load the drivers, then do echo into
344/sys/power/disk/resume file from initrd. Be sure not to mount
345anything, not even read-only mount, or you are going to lose your
346data.
347
348Q: How do I make suspend more verbose?
349
350A: If you want to see any non-error kernel messages on the virtual
351terminal the kernel switches to during suspend, you have to set the
352kernel console loglevel to at least 5, for example by doing
353
354 echo 5 > /proc/sys/kernel/printk
355
356Q: Is this true that if I have a mounted filesystem on a USB device and
357I suspend to disk, I can lose data unless the filesystem has been mounted
358with "sync"?
359
360A: That's right. It depends on your hardware, and it could be true even for
361suspend-to-RAM. In fact, even with "-o sync" you can lose data if your
362programs have information in buffers they haven't written out to disk.
363
364If you're lucky, your hardware will support low-power modes for USB
365controllers while the system is asleep. Lots of hardware doesn't,
366however. Shutting off the power to a USB controller is equivalent to
367unplugging all the attached devices.
368
369Remember that it's always a bad idea to unplug a disk drive containing a
370mounted filesystem. With USB that's true even when your system is asleep!
371The safest thing is to unmount all USB-based filesystems before suspending
372and remount them after resuming.
336 373
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
new file mode 100644
index 000000000000..94058220aaf0
--- /dev/null
+++ b/Documentation/power/userland-swsusp.txt
@@ -0,0 +1,149 @@
1Documentation for userland software suspend interface
2 (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
3
4First, the warnings at the beginning of swsusp.txt still apply.
5
6Second, you should read the FAQ in swsusp.txt _now_ if you have not
7done it already.
8
9Now, to use the userland interface for software suspend you need special
10utilities that will read/write the system memory snapshot from/to the
11kernel. Such utilities are available, for example, from
12<http://www.sisk.pl/kernel/utilities/suspend>. You may want to have
13a look at them if you are going to develop your own suspend/resume
14utilities.
15
16The interface consists of a character device providing the open(),
17release(), read(), and write() operations as well as several ioctl()
18commands defined in kernel/power/power.h. The major and minor
19numbers of the device are, respectively, 10 and 231, and they can
20be read from /sys/class/misc/snapshot/dev.
21
22The device can be open either for reading or for writing. If open for
23reading, it is considered to be in the suspend mode. Otherwise it is
24assumed to be in the resume mode. The device cannot be open for reading
25and writing. It is also impossible to have the device open more than once
26at a time.
27
28The ioctl() commands recognized by the device are:
29
30SNAPSHOT_FREEZE - freeze user space processes (the current process is
31 not frozen); this is required for SNAPSHOT_ATOMIC_SNAPSHOT
32 and SNAPSHOT_ATOMIC_RESTORE to succeed
33
34SNAPSHOT_UNFREEZE - thaw user space processes frozen by SNAPSHOT_FREEZE
35
36SNAPSHOT_ATOMIC_SNAPSHOT - create a snapshot of the system memory; the
37 last argument of ioctl() should be a pointer to an int variable,
38 the value of which will indicate whether the call returned after
39 creating the snapshot (1) or after restoring the system memory state
40 from it (0) (after resume the system finds itself finishing the
41 SNAPSHOT_ATOMIC_SNAPSHOT ioctl() again); after the snapshot
42 has been created the read() operation can be used to transfer
43 it out of the kernel
44
45SNAPSHOT_ATOMIC_RESTORE - restore the system memory state from the
46 uploaded snapshot image; before calling it you should transfer
47 the system memory snapshot back to the kernel using the write()
48 operation; this call will not succeed if the snapshot
49 image is not available to the kernel
50
51SNAPSHOT_FREE - free memory allocated for the snapshot image
52
53SNAPSHOT_SET_IMAGE_SIZE - set the preferred maximum size of the image
54 (the kernel will do its best to ensure the image size will not exceed
55 this number, but if it turns out to be impossible, the kernel will
56 create the smallest image possible)
57
58SNAPSHOT_AVAIL_SWAP - return the amount of available swap in bytes (the last
59 argument should be a pointer to an unsigned int variable that will
60 contain the result if the call is successful).
61
62SNAPSHOT_GET_SWAP_PAGE - allocate a swap page from the resume partition
63 (the last argument should be a pointer to a loff_t variable that
64 will contain the swap page offset if the call is successful)
65
66SNAPSHOT_FREE_SWAP_PAGES - free all swap pages allocated with
67 SNAPSHOT_GET_SWAP_PAGE
68
69SNAPSHOT_SET_SWAP_FILE - set the resume partition (the last ioctl() argument
70 should specify the device's major and minor numbers in the old
71 two-byte format, as returned by the stat() function in the .st_rdev
72 member of the stat structure); it is recommended to always use this
73 call, because the code to set the resume partition could be removed from
74 future kernels
75
76The device's read() operation can be used to transfer the snapshot image from
77the kernel. It has the following limitations:
78- you cannot read() more than one virtual memory page at a time
79- read()s accross page boundaries are impossible (ie. if ypu read() 1/2 of
80 a page in the previous call, you will only be able to read()
81 _at_ _most_ 1/2 of the page in the next call)
82
83The device's write() operation is used for uploading the system memory snapshot
84into the kernel. It has the same limitations as the read() operation.
85
86The release() operation frees all memory allocated for the snapshot image
87and all swap pages allocated with SNAPSHOT_GET_SWAP_PAGE (if any).
88Thus it is not necessary to use either SNAPSHOT_FREE or
89SNAPSHOT_FREE_SWAP_PAGES before closing the device (in fact it will also
90unfreeze user space processes frozen by SNAPSHOT_UNFREEZE if they are
91still frozen when the device is being closed).
92
93Currently it is assumed that the userland utilities reading/writing the
94snapshot image from/to the kernel will use a swap parition, called the resume
95partition, as storage space. However, this is not really required, as they
96can use, for example, a special (blank) suspend partition or a file on a partition
97that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and mounted afterwards.
98
99These utilities SHOULD NOT make any assumptions regarding the ordering of
100data within the snapshot image, except for the image header that MAY be
101assumed to start with an swsusp_info structure, as specified in
102kernel/power/power.h. This structure MAY be used by the userland utilities
103to obtain some information about the snapshot image, such as the size
104of the snapshot image, including the metadata and the header itself,
105contained in the .size member of swsusp_info.
106
107The snapshot image MUST be written to the kernel unaltered (ie. all of the image
108data, metadata and header MUST be written in _exactly_ the same amount, form
109and order in which they have been read). Otherwise, the behavior of the
110resumed system may be totally unpredictable.
111
112While executing SNAPSHOT_ATOMIC_RESTORE the kernel checks if the
113structure of the snapshot image is consistent with the information stored
114in the image header. If any inconsistencies are detected,
115SNAPSHOT_ATOMIC_RESTORE will not succeed. Still, this is not a fool-proof
116mechanism and the userland utilities using the interface SHOULD use additional
117means, such as checksums, to ensure the integrity of the snapshot image.
118
119The suspending and resuming utilities MUST lock themselves in memory,
120preferrably using mlockall(), before calling SNAPSHOT_FREEZE.
121
122The suspending utility MUST check the value stored by SNAPSHOT_ATOMIC_SNAPSHOT
123in the memory location pointed to by the last argument of ioctl() and proceed
124in accordance with it:
1251. If the value is 1 (ie. the system memory snapshot has just been
126 created and the system is ready for saving it):
127 (a) The suspending utility MUST NOT close the snapshot device
128 _unless_ the whole suspend procedure is to be cancelled, in
129 which case, if the snapshot image has already been saved, the
130 suspending utility SHOULD destroy it, preferrably by zapping
131 its header. If the suspend is not to be cancelled, the
132 system MUST be powered off or rebooted after the snapshot
133 image has been saved.
134 (b) The suspending utility SHOULD NOT attempt to perform any
135 file system operations (including reads) on the file systems
136 that were mounted before SNAPSHOT_ATOMIC_SNAPSHOT has been
137 called. However, it MAY mount a file system that was not
138 mounted at that time and perform some operations on it (eg.
139 use it for saving the image).
1402. If the value is 0 (ie. the system state has just been restored from
141 the snapshot image), the suspending utility MUST close the snapshot
142 device. Afterwards it will be treated as a regular userland process,
143 so it need not exit.
144
145The resuming utility SHOULD NOT attempt to mount any file systems that could
146be mounted before suspend and SHOULD NOT attempt to perform any operations
147involving such file systems.
148
149For details, please refer to the source code.
diff --git a/Documentation/power/video.txt b/Documentation/power/video.txt
index 912bed87c758..d18a57d1a531 100644
--- a/Documentation/power/video.txt
+++ b/Documentation/power/video.txt
@@ -1,7 +1,7 @@
1 1
2 Video issues with S3 resume 2 Video issues with S3 resume
3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 2003-2005, Pavel Machek 4 2003-2006, Pavel Machek
5 5
6During S3 resume, hardware needs to be reinitialized. For most 6During S3 resume, hardware needs to be reinitialized. For most
7devices, this is easy, and kernel driver knows how to do 7devices, this is easy, and kernel driver knows how to do
@@ -15,6 +15,27 @@ run normally so video card is normally initialized. It should not be
15problem for S1 standby, because hardware should retain its state over 15problem for S1 standby, because hardware should retain its state over
16that. 16that.
17 17
18We either have to run video BIOS during early resume, or interpret it
19using vbetool later, or maybe nothing is neccessary on particular
20system because video state is preserved. Unfortunately different
21methods work on different systems, and no known method suits all of
22them.
23
24Userland application called s2ram has been developed; it contains long
25whitelist of systems, and automatically selects working method for a
26given system. It can be downloaded from CVS at
27www.sf.net/projects/suspend . If you get a system that is not in the
28whitelist, please try to find a working solution, and submit whitelist
29entry so that work does not need to be repeated.
30
31Currently, VBE_SAVE method (6 below) works on most
32systems. Unfortunately, vbetool only runs after userland is resumed,
33so it makes debugging of early resume problems
34hard/impossible. Methods that do not rely on userland are preferable.
35
36Details
37~~~~~~~
38
18There are a few types of systems where video works after S3 resume: 39There are a few types of systems where video works after S3 resume:
19 40
20(1) systems where video state is preserved over S3. 41(1) systems where video state is preserved over S3.
@@ -104,6 +125,7 @@ HP NX7000 ??? (*)
104HP Pavilion ZD7000 vbetool post needed, need open-source nv driver for X 125HP Pavilion ZD7000 vbetool post needed, need open-source nv driver for X
105HP Omnibook XE3 athlon version none (1) 126HP Omnibook XE3 athlon version none (1)
106HP Omnibook XE3GC none (1), video is S3 Savage/IX-MV 127HP Omnibook XE3GC none (1), video is S3 Savage/IX-MV
128HP Omnibook 5150 none (1), (S1 also works OK)
107IBM TP T20, model 2647-44G none (1), video is S3 Inc. 86C270-294 Savage/IX-MV, vesafb gets "interesting" but X work. 129IBM TP T20, model 2647-44G none (1), video is S3 Inc. 86C270-294 Savage/IX-MV, vesafb gets "interesting" but X work.
108IBM TP A31 / Type 2652-M5G s3_mode (3) [works ok with BIOS 1.04 2002-08-23, but not at all with BIOS 1.11 2004-11-05 :-(] 130IBM TP A31 / Type 2652-M5G s3_mode (3) [works ok with BIOS 1.04 2002-08-23, but not at all with BIOS 1.11 2004-11-05 :-(]
109IBM TP R32 / Type 2658-MMG none (1) 131IBM TP R32 / Type 2658-MMG none (1)
@@ -120,18 +142,24 @@ IBM ThinkPad T42p (2373-GTG) s3_bios (2)
120IBM TP X20 ??? (*) 142IBM TP X20 ??? (*)
121IBM TP X30 s3_bios (2) 143IBM TP X30 s3_bios (2)
122IBM TP X31 / Type 2672-XXH none (1), use radeontool (http://fdd.com/software/radeon/) to turn off backlight. 144IBM TP X31 / Type 2672-XXH none (1), use radeontool (http://fdd.com/software/radeon/) to turn off backlight.
123IBM TP X32 none (1), but backlight is on and video is trashed after long suspend 145IBM TP X32 none (1), but backlight is on and video is trashed after long suspend. s3_bios,s3_mode (4) works too. Perhaps that gets better results?
124IBM Thinkpad X40 Type 2371-7JG s3_bios,s3_mode (4) 146IBM Thinkpad X40 Type 2371-7JG s3_bios,s3_mode (4)
147IBM TP 600e none(1), but a switch to console and back to X is needed
125Medion MD4220 ??? (*) 148Medion MD4220 ??? (*)
126Samsung P35 vbetool needed (6) 149Samsung P35 vbetool needed (6)
127Sharp PC-AR10 (ATI rage) none (1) 150Sharp PC-AR10 (ATI rage) none (1), backlight does not switch off
128Sony Vaio PCG-C1VRX/K s3_bios (2) 151Sony Vaio PCG-C1VRX/K s3_bios (2)
129Sony Vaio PCG-F403 ??? (*) 152Sony Vaio PCG-F403 ??? (*)
153Sony Vaio PCG-GRT995MP none (1), works with 'nv' X driver
154Sony Vaio PCG-GR7/K none (1), but needs radeonfb, use radeontool (http://fdd.com/software/radeon/) to turn off backlight.
130Sony Vaio PCG-N505SN ??? (*) 155Sony Vaio PCG-N505SN ??? (*)
131Sony Vaio vgn-s260 X or boot-radeon can init it (5) 156Sony Vaio vgn-s260 X or boot-radeon can init it (5)
157Sony Vaio vgn-S580BH vga=normal, but suspend from X. Console will be blank unless you return to X.
158Sony Vaio vgn-FS115B s3_bios (2),s3_mode (4)
132Toshiba Libretto L5 none (1) 159Toshiba Libretto L5 none (1)
133Toshiba Satellite 4030CDT s3_mode (3) 160Toshiba Portege 3020CT s3_mode (3)
134Toshiba Satellite 4080XCDT s3_mode (3) 161Toshiba Satellite 4030CDT s3_mode (3) (S1 also works OK)
162Toshiba Satellite 4080XCDT s3_mode (3) (S1 also works OK)
135Toshiba Satellite 4090XCDT ??? (*) 163Toshiba Satellite 4090XCDT ??? (*)
136Toshiba Satellite P10-554 s3_bios,s3_mode (4)(****) 164Toshiba Satellite P10-554 s3_bios,s3_mode (4)(****)
137Toshiba M30 (2) xor X with nvidia driver using internal AGP 165Toshiba M30 (2) xor X with nvidia driver using internal AGP
@@ -151,39 +179,3 @@ Asus A7V8X nVidia RIVA TNT2 model 64 s3_bios,s3_mode (4)
151(***) To be tested with a newer kernel. 179(***) To be tested with a newer kernel.
152 180
153(****) Not with SMP kernel, UP only. 181(****) Not with SMP kernel, UP only.
154
155VBEtool details
156~~~~~~~~~~~~~~~
157(with thanks to Carl-Daniel Hailfinger)
158
159First, boot into X and run the following script ONCE:
160#!/bin/bash
161statedir=/root/s3/state
162mkdir -p $statedir
163chvt 2
164sleep 1
165vbetool vbestate save >$statedir/vbe
166
167
168To suspend and resume properly, call the following script as root:
169#!/bin/bash
170statedir=/root/s3/state
171curcons=`fgconsole`
172fuser /dev/tty$curcons 2>/dev/null|xargs ps -o comm= -p|grep -q X && chvt 2
173cat /dev/vcsa >$statedir/vcsa
174sync
175echo 3 >/proc/acpi/sleep
176sync
177vbetool post
178vbetool vbestate restore <$statedir/vbe
179cat $statedir/vcsa >/dev/vcsa
180rckbd restart
181chvt $[curcons%6+1]
182chvt $curcons
183
184
185Unless you change your graphics card or other hardware configuration,
186the state once saved will be OK for every resume afterwards.
187NOTE: The "rckbd restart" command may be different for your
188distribution. Simply replace it with the command you would use to
189set the fonts on screen.
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 30deaf1b728a..b504def3e346 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,9 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
52 52
53 if (i == 0) { 53 if (i == 0) {
54 seq_printf(p, " "); 54 seq_printf(p, " ");
55 for (j=0; j<NR_CPUS; j++) 55 for_each_online_cpu(j)
56 if (cpu_online(j)) 56 seq_printf(p, "CPU%d ",j);
57 seq_printf(p, "CPU%d ",j);
58 seq_putc(p, '\n'); 57 seq_putc(p, '\n');
59 } 58 }
60 59
@@ -67,9 +66,8 @@ int show_interrupts(struct seq_file *p, void *v)
67#ifndef CONFIG_SMP 66#ifndef CONFIG_SMP
68 seq_printf(p, "%10u ", kstat_irqs(i)); 67 seq_printf(p, "%10u ", kstat_irqs(i));
69#else 68#else
70 for (j = 0; j < NR_CPUS; j++) 69 for_each_online_cpu(j)
71 if (cpu_online(j)) 70 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
72 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
73#endif 71#endif
74 seq_printf(p, " %14s", irq_desc[i].handler->typename); 72 seq_printf(p, " %14s", irq_desc[i].handler->typename);
75 seq_printf(p, " %s", action->name); 73 seq_printf(p, " %s", action->name);
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 27ab4c30aac6..11fa326a8f62 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -75,9 +75,8 @@ int show_interrupts(struct seq_file *p, void *v)
75 switch (i) { 75 switch (i) {
76 case 0: 76 case 0:
77 seq_printf(p, " "); 77 seq_printf(p, " ");
78 for (j = 0; j < NR_CPUS; j++) 78 for_each_online_cpu(j)
79 if (cpu_online(j)) 79 seq_printf(p, "CPU%d ",j);
80 seq_printf(p, "CPU%d ",j);
81 80
82 seq_putc(p, '\n'); 81 seq_putc(p, '\n');
83 break; 82 break;
@@ -100,9 +99,8 @@ int show_interrupts(struct seq_file *p, void *v)
100#ifndef CONFIG_SMP 99#ifndef CONFIG_SMP
101 seq_printf(p, "%10u ", kstat_irqs(i)); 100 seq_printf(p, "%10u ", kstat_irqs(i));
102#else 101#else
103 for (j = 0; j < NR_CPUS; j++) 102 for_each_online_cpu(j)
104 if (cpu_online(j)) 103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
105 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
106#endif 104#endif
107 105
108 level = group->sources[ix]->level - frv_irq_levels; 106 level = group->sources[ix]->level - frv_irq_levels;
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 5b1a7d46d1d9..bfea1bedcbf2 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -80,6 +80,7 @@ config X86_VOYAGER
80 80
81config X86_NUMAQ 81config X86_NUMAQ
82 bool "NUMAQ (IBM/Sequent)" 82 bool "NUMAQ (IBM/Sequent)"
83 select SMP
83 select NUMA 84 select NUMA
84 help 85 help
85 This option is used for getting Linux to run on a (IBM/Sequent) NUMA 86 This option is used for getting Linux to run on a (IBM/Sequent) NUMA
@@ -400,6 +401,7 @@ choice
400 401
401config NOHIGHMEM 402config NOHIGHMEM
402 bool "off" 403 bool "off"
404 depends on !X86_NUMAQ
403 ---help--- 405 ---help---
404 Linux can use up to 64 Gigabytes of physical memory on x86 systems. 406 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
405 However, the address space of 32-bit x86 processors is only 4 407 However, the address space of 32-bit x86 processors is only 4
@@ -436,6 +438,7 @@ config NOHIGHMEM
436 438
437config HIGHMEM4G 439config HIGHMEM4G
438 bool "4GB" 440 bool "4GB"
441 depends on !X86_NUMAQ
439 help 442 help
440 Select this if you have a 32-bit processor and between 1 and 4 443 Select this if you have a 32-bit processor and between 1 and 4
441 gigabytes of physical RAM. 444 gigabytes of physical RAM.
@@ -503,10 +506,6 @@ config NUMA
503 default n if X86_PC 506 default n if X86_PC
504 default y if (X86_NUMAQ || X86_SUMMIT) 507 default y if (X86_NUMAQ || X86_SUMMIT)
505 508
506# Need comments to help the hapless user trying to turn on NUMA support
507comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
508 depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
509
510comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" 509comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
511 depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI) 510 depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
512 511
@@ -660,13 +659,18 @@ config BOOT_IOREMAP
660 default y 659 default y
661 660
662config REGPARM 661config REGPARM
663 bool "Use register arguments (EXPERIMENTAL)" 662 bool "Use register arguments"
664 depends on EXPERIMENTAL 663 default y
665 default n
666 help 664 help
667 Compile the kernel with -mregparm=3. This uses a different ABI 665 Compile the kernel with -mregparm=3. This instructs gcc to use
668 and passes the first three arguments of a function call in registers. 666 a more efficient function call ABI which passes the first three
669 This will probably break binary only modules. 667 arguments of a function call via registers, which results in denser
668 and faster code.
669
670 If this option is disabled, then the default ABI of passing
671 arguments via the stack is used.
672
673 If unsure, say Y.
670 674
671config SECCOMP 675config SECCOMP
672 bool "Enable seccomp to safely compute untrusted bytecode" 676 bool "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index bf32ecc9ad04..00108ba9a78d 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -31,6 +31,15 @@ config DEBUG_STACK_USAGE
31 31
32 This option will slow down process creation somewhat. 32 This option will slow down process creation somewhat.
33 33
34config STACK_BACKTRACE_COLS
35 int "Stack backtraces per line" if DEBUG_KERNEL
36 range 1 3
37 default 2
38 help
39 Selects how many stack backtrace entries per line to display.
40
41 This can save screen space when displaying traces.
42
34comment "Page alloc debug is incompatible with Software Suspend on i386" 43comment "Page alloc debug is incompatible with Software Suspend on i386"
35 depends on DEBUG_KERNEL && SOFTWARE_SUSPEND 44 depends on DEBUG_KERNEL && SOFTWARE_SUSPEND
36 45
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 65656c033d70..5b9ed21216cf 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ 7obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ 9 pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
10 quirks.o i8237.o topology.o 10 quirks.o i8237.o topology.o alternative.o
11 11
12obj-y += cpu/ 12obj-y += cpu/
13obj-y += timers/ 13obj-y += timers/
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
new file mode 100644
index 000000000000..5cbd6f99fb2a
--- /dev/null
+++ b/arch/i386/kernel/alternative.c
@@ -0,0 +1,321 @@
1#include <linux/module.h>
2#include <linux/spinlock.h>
3#include <linux/list.h>
4#include <asm/alternative.h>
5#include <asm/sections.h>
6
7#define DEBUG 0
8#if DEBUG
9# define DPRINTK(fmt, args...) printk(fmt, args)
10#else
11# define DPRINTK(fmt, args...)
12#endif
13
14/* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */
17asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8);
20asm("\t.data\nk8nops: "
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
22 K8_NOP7 K8_NOP8);
23asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
25 K7_NOP7 K7_NOP8);
26
27extern unsigned char intelnops[], k8nops[], k7nops[];
28static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
29 NULL,
30 intelnops,
31 intelnops + 1,
32 intelnops + 1 + 2,
33 intelnops + 1 + 2 + 3,
34 intelnops + 1 + 2 + 3 + 4,
35 intelnops + 1 + 2 + 3 + 4 + 5,
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
38};
39static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
40 NULL,
41 k8nops,
42 k8nops + 1,
43 k8nops + 1 + 2,
44 k8nops + 1 + 2 + 3,
45 k8nops + 1 + 2 + 3 + 4,
46 k8nops + 1 + 2 + 3 + 4 + 5,
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
49};
50static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
51 NULL,
52 k7nops,
53 k7nops + 1,
54 k7nops + 1 + 2,
55 k7nops + 1 + 2 + 3,
56 k7nops + 1 + 2 + 3 + 4,
57 k7nops + 1 + 2 + 3 + 4 + 5,
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60};
61static struct nop {
62 int cpuid;
63 unsigned char **noptable;
64} noptypes[] = {
65 { X86_FEATURE_K8, k8_nops },
66 { X86_FEATURE_K7, k7_nops },
67 { -1, NULL }
68};
69
70
71extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73extern u8 *__smp_locks[], *__smp_locks_end[];
74
75extern u8 __smp_alt_begin[], __smp_alt_end[];
76
77
78static unsigned char** find_nop_table(void)
79{
80 unsigned char **noptable = intel_nops;
81 int i;
82
83 for (i = 0; noptypes[i].cpuid >= 0; i++) {
84 if (boot_cpu_has(noptypes[i].cpuid)) {
85 noptable = noptypes[i].noptable;
86 break;
87 }
88 }
89 return noptable;
90}
91
92/* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where
95 APs have less capabilities than the boot processor are not handled.
96 Tough. Make sure you disable such features by hand. */
97
98void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
99{
100 unsigned char **noptable = find_nop_table();
101 struct alt_instr *a;
102 int diff, i, k;
103
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
105 for (a = start; a < end; a++) {
106 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid))
108 continue;
109 memcpy(a->instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
113 k = diff;
114 if (k > ASM_NOP_MAX)
115 k = ASM_NOP_MAX;
116 memcpy(a->instr + i, noptable[k], k);
117 }
118 }
119}
120
121static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
122{
123 struct alt_instr *a;
124
125 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
126 for (a = start; a < end; a++) {
127 memcpy(a->replacement + a->replacementlen,
128 a->instr,
129 a->instrlen);
130 }
131}
132
133static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
134{
135 struct alt_instr *a;
136
137 for (a = start; a < end; a++) {
138 memcpy(a->instr,
139 a->replacement + a->replacementlen,
140 a->instrlen);
141 }
142}
143
144static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
145{
146 u8 **ptr;
147
148 for (ptr = start; ptr < end; ptr++) {
149 if (*ptr < text)
150 continue;
151 if (*ptr > text_end)
152 continue;
153 **ptr = 0xf0; /* lock prefix */
154 };
155}
156
157static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
158{
159 unsigned char **noptable = find_nop_table();
160 u8 **ptr;
161
162 for (ptr = start; ptr < end; ptr++) {
163 if (*ptr < text)
164 continue;
165 if (*ptr > text_end)
166 continue;
167 **ptr = noptable[1][0];
168 };
169}
170
171struct smp_alt_module {
172 /* what is this ??? */
173 struct module *mod;
174 char *name;
175
176 /* ptrs to lock prefixes */
177 u8 **locks;
178 u8 **locks_end;
179
180 /* .text segment, needed to avoid patching init code ;) */
181 u8 *text;
182 u8 *text_end;
183
184 struct list_head next;
185};
186static LIST_HEAD(smp_alt_modules);
187static DEFINE_SPINLOCK(smp_alt);
188
189static int smp_alt_once = 0;
190static int __init bootonly(char *str)
191{
192 smp_alt_once = 1;
193 return 1;
194}
195__setup("smp-alt-boot", bootonly);
196
197void alternatives_smp_module_add(struct module *mod, char *name,
198 void *locks, void *locks_end,
199 void *text, void *text_end)
200{
201 struct smp_alt_module *smp;
202 unsigned long flags;
203
204 if (smp_alt_once) {
205 if (boot_cpu_has(X86_FEATURE_UP))
206 alternatives_smp_unlock(locks, locks_end,
207 text, text_end);
208 return;
209 }
210
211 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
212 if (NULL == smp)
213 return; /* we'll run the (safe but slow) SMP code then ... */
214
215 smp->mod = mod;
216 smp->name = name;
217 smp->locks = locks;
218 smp->locks_end = locks_end;
219 smp->text = text;
220 smp->text_end = text_end;
221 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
222 __FUNCTION__, smp->locks, smp->locks_end,
223 smp->text, smp->text_end, smp->name);
224
225 spin_lock_irqsave(&smp_alt, flags);
226 list_add_tail(&smp->next, &smp_alt_modules);
227 if (boot_cpu_has(X86_FEATURE_UP))
228 alternatives_smp_unlock(smp->locks, smp->locks_end,
229 smp->text, smp->text_end);
230 spin_unlock_irqrestore(&smp_alt, flags);
231}
232
233void alternatives_smp_module_del(struct module *mod)
234{
235 struct smp_alt_module *item;
236 unsigned long flags;
237
238 if (smp_alt_once)
239 return;
240
241 spin_lock_irqsave(&smp_alt, flags);
242 list_for_each_entry(item, &smp_alt_modules, next) {
243 if (mod != item->mod)
244 continue;
245 list_del(&item->next);
246 spin_unlock_irqrestore(&smp_alt, flags);
247 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
248 kfree(item);
249 return;
250 }
251 spin_unlock_irqrestore(&smp_alt, flags);
252}
253
254void alternatives_smp_switch(int smp)
255{
256 struct smp_alt_module *mod;
257 unsigned long flags;
258
259 if (smp_alt_once)
260 return;
261 BUG_ON(!smp && (num_online_cpus() > 1));
262
263 spin_lock_irqsave(&smp_alt, flags);
264 if (smp) {
265 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
266 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
267 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
268 alternatives_smp_apply(__smp_alt_instructions,
269 __smp_alt_instructions_end);
270 list_for_each_entry(mod, &smp_alt_modules, next)
271 alternatives_smp_lock(mod->locks, mod->locks_end,
272 mod->text, mod->text_end);
273 } else {
274 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
275 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
276 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
277 apply_alternatives(__smp_alt_instructions,
278 __smp_alt_instructions_end);
279 list_for_each_entry(mod, &smp_alt_modules, next)
280 alternatives_smp_unlock(mod->locks, mod->locks_end,
281 mod->text, mod->text_end);
282 }
283 spin_unlock_irqrestore(&smp_alt, flags);
284}
285
286void __init alternative_instructions(void)
287{
288 apply_alternatives(__alt_instructions, __alt_instructions_end);
289
290 /* switch to patch-once-at-boottime-only mode and free the
291 * tables in case we know the number of CPUs will never ever
292 * change */
293#ifdef CONFIG_HOTPLUG_CPU
294 if (num_possible_cpus() < 2)
295 smp_alt_once = 1;
296#else
297 smp_alt_once = 1;
298#endif
299
300 if (smp_alt_once) {
301 if (1 == num_possible_cpus()) {
302 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
303 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
304 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
305 apply_alternatives(__smp_alt_instructions,
306 __smp_alt_instructions_end);
307 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
308 _text, _etext);
309 }
310 free_init_pages("SMP alternatives",
311 (unsigned long)__smp_alt_begin,
312 (unsigned long)__smp_alt_end);
313 } else {
314 alternatives_smp_save(__smp_alt_instructions,
315 __smp_alt_instructions_end);
316 alternatives_smp_module_add(NULL, "core kernel",
317 __smp_locks, __smp_locks_end,
318 _text, _etext);
319 alternatives_smp_switch(0);
320 }
321}
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 776c90989e06..eb5279d23b7f 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -38,6 +38,7 @@
38#include <asm/i8253.h> 38#include <asm/i8253.h>
39 39
40#include <mach_apic.h> 40#include <mach_apic.h>
41#include <mach_apicdef.h>
41#include <mach_ipi.h> 42#include <mach_ipi.h>
42 43
43#include "io_ports.h" 44#include "io_ports.h"
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index f52669ecb93f..bd75629dd262 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -4,6 +4,7 @@
4#include <asm/processor.h> 4#include <asm/processor.h>
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include <asm/e820.h> 6#include <asm/e820.h>
7#include <asm/mtrr.h>
7#include "cpu.h" 8#include "cpu.h"
8 9
9#ifdef CONFIG_X86_OOSTORE 10#ifdef CONFIG_X86_OOSTORE
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index e6bd095ae108..7e3d6b6a4e96 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -25,9 +25,10 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
25DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 25DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
26EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 26EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
27 27
28static int cachesize_override __devinitdata = -1; 28static int cachesize_override __cpuinitdata = -1;
29static int disable_x86_fxsr __devinitdata = 0; 29static int disable_x86_fxsr __cpuinitdata;
30static int disable_x86_serial_nr __devinitdata = 1; 30static int disable_x86_serial_nr __cpuinitdata = 1;
31static int disable_x86_sep __cpuinitdata;
31 32
32struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; 33struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
33 34
@@ -59,7 +60,7 @@ static int __init cachesize_setup(char *str)
59} 60}
60__setup("cachesize=", cachesize_setup); 61__setup("cachesize=", cachesize_setup);
61 62
62int __devinit get_model_name(struct cpuinfo_x86 *c) 63int __cpuinit get_model_name(struct cpuinfo_x86 *c)
63{ 64{
64 unsigned int *v; 65 unsigned int *v;
65 char *p, *q; 66 char *p, *q;
@@ -89,7 +90,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c)
89} 90}
90 91
91 92
92void __devinit display_cacheinfo(struct cpuinfo_x86 *c) 93void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
93{ 94{
94 unsigned int n, dummy, ecx, edx, l2size; 95 unsigned int n, dummy, ecx, edx, l2size;
95 96
@@ -130,7 +131,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
130/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ 131/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
131 132
132/* Look up CPU names by table lookup. */ 133/* Look up CPU names by table lookup. */
133static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) 134static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
134{ 135{
135 struct cpu_model_info *info; 136 struct cpu_model_info *info;
136 137
@@ -151,7 +152,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
151} 152}
152 153
153 154
154static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) 155static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
155{ 156{
156 char *v = c->x86_vendor_id; 157 char *v = c->x86_vendor_id;
157 int i; 158 int i;
@@ -187,6 +188,14 @@ static int __init x86_fxsr_setup(char * s)
187__setup("nofxsr", x86_fxsr_setup); 188__setup("nofxsr", x86_fxsr_setup);
188 189
189 190
191static int __init x86_sep_setup(char * s)
192{
193 disable_x86_sep = 1;
194 return 1;
195}
196__setup("nosep", x86_sep_setup);
197
198
190/* Standard macro to see if a specific flag is changeable */ 199/* Standard macro to see if a specific flag is changeable */
191static inline int flag_is_changeable_p(u32 flag) 200static inline int flag_is_changeable_p(u32 flag)
192{ 201{
@@ -210,7 +219,7 @@ static inline int flag_is_changeable_p(u32 flag)
210 219
211 220
212/* Probe for the CPUID instruction */ 221/* Probe for the CPUID instruction */
213static int __devinit have_cpuid_p(void) 222static int __cpuinit have_cpuid_p(void)
214{ 223{
215 return flag_is_changeable_p(X86_EFLAGS_ID); 224 return flag_is_changeable_p(X86_EFLAGS_ID);
216} 225}
@@ -254,7 +263,7 @@ static void __init early_cpu_detect(void)
254 } 263 }
255} 264}
256 265
257void __devinit generic_identify(struct cpuinfo_x86 * c) 266void __cpuinit generic_identify(struct cpuinfo_x86 * c)
258{ 267{
259 u32 tfms, xlvl; 268 u32 tfms, xlvl;
260 int junk; 269 int junk;
@@ -307,7 +316,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c)
307#endif 316#endif
308} 317}
309 318
310static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 319static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
311{ 320{
312 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { 321 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
313 /* Disable processor serial number */ 322 /* Disable processor serial number */
@@ -335,7 +344,7 @@ __setup("serialnumber", x86_serial_nr_setup);
335/* 344/*
336 * This does the hard work of actually picking apart the CPU stuff... 345 * This does the hard work of actually picking apart the CPU stuff...
337 */ 346 */
338void __devinit identify_cpu(struct cpuinfo_x86 *c) 347void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
339{ 348{
340 int i; 349 int i;
341 350
@@ -405,6 +414,10 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
405 clear_bit(X86_FEATURE_XMM, c->x86_capability); 414 clear_bit(X86_FEATURE_XMM, c->x86_capability);
406 } 415 }
407 416
417 /* SEP disabled? */
418 if (disable_x86_sep)
419 clear_bit(X86_FEATURE_SEP, c->x86_capability);
420
408 if (disable_pse) 421 if (disable_pse)
409 clear_bit(X86_FEATURE_PSE, c->x86_capability); 422 clear_bit(X86_FEATURE_PSE, c->x86_capability);
410 423
@@ -417,7 +430,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
417 else 430 else
418 /* Last resort... */ 431 /* Last resort... */
419 sprintf(c->x86_model_id, "%02x/%02x", 432 sprintf(c->x86_model_id, "%02x/%02x",
420 c->x86_vendor, c->x86_model); 433 c->x86, c->x86_model);
421 } 434 }
422 435
423 /* Now the feature flags better reflect actual CPU features! */ 436 /* Now the feature flags better reflect actual CPU features! */
@@ -453,7 +466,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
453} 466}
454 467
455#ifdef CONFIG_X86_HT 468#ifdef CONFIG_X86_HT
456void __devinit detect_ht(struct cpuinfo_x86 *c) 469void __cpuinit detect_ht(struct cpuinfo_x86 *c)
457{ 470{
458 u32 eax, ebx, ecx, edx; 471 u32 eax, ebx, ecx, edx;
459 int index_msb, core_bits; 472 int index_msb, core_bits;
@@ -500,7 +513,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c)
500} 513}
501#endif 514#endif
502 515
503void __devinit print_cpu_info(struct cpuinfo_x86 *c) 516void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
504{ 517{
505 char *vendor = NULL; 518 char *vendor = NULL;
506 519
@@ -523,7 +536,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c)
523 printk("\n"); 536 printk("\n");
524} 537}
525 538
526cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; 539cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
527 540
528/* This is hacky. :) 541/* This is hacky. :)
529 * We're emulating future behavior. 542 * We're emulating future behavior.
@@ -570,7 +583,7 @@ void __init early_cpu_init(void)
570 * and IDT. We reload them nevertheless, this function acts as a 583 * and IDT. We reload them nevertheless, this function acts as a
571 * 'CPU state barrier', nothing should get across. 584 * 'CPU state barrier', nothing should get across.
572 */ 585 */
573void __devinit cpu_init(void) 586void __cpuinit cpu_init(void)
574{ 587{
575 int cpu = smp_processor_id(); 588 int cpu = smp_processor_id();
576 struct tss_struct * t = &per_cpu(init_tss, cpu); 589 struct tss_struct * t = &per_cpu(init_tss, cpu);
@@ -670,7 +683,7 @@ void __devinit cpu_init(void)
670} 683}
671 684
672#ifdef CONFIG_HOTPLUG_CPU 685#ifdef CONFIG_HOTPLUG_CPU
673void __devinit cpu_uninit(void) 686void __cpuinit cpu_uninit(void)
674{ 687{
675 int cpu = raw_smp_processor_id(); 688 int cpu = raw_smp_processor_id();
676 cpu_clear(cpu, cpu_initialized); 689 cpu_clear(cpu, cpu_initialized);
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index e11a09207ec8..3d5110b65cc3 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void)
1145{ 1145{
1146 unsigned int i, supported_cpus = 0; 1146 unsigned int i, supported_cpus = 0;
1147 1147
1148 for (i=0; i<NR_CPUS; i++) { 1148 for_each_cpu(i) {
1149 if (!cpu_online(i))
1150 continue;
1151 if (check_supported_cpu(i)) 1149 if (check_supported_cpu(i))
1152 supported_cpus++; 1150 supported_cpus++;
1153 } 1151 }
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 8c0120186b9f..5386b29bb5a5 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void);
29struct movsl_mask movsl_mask __read_mostly; 29struct movsl_mask movsl_mask __read_mostly;
30#endif 30#endif
31 31
32void __devinit early_intel_workaround(struct cpuinfo_x86 *c) 32void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c)
33{ 33{
34 if (c->x86_vendor != X86_VENDOR_INTEL) 34 if (c->x86_vendor != X86_VENDOR_INTEL)
35 return; 35 return;
@@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
44 * This is called before we do cpu ident work 44 * This is called before we do cpu ident work
45 */ 45 */
46 46
47int __devinit ppro_with_ram_bug(void) 47int __cpuinit ppro_with_ram_bug(void)
48{ 48{
49 /* Uses data from early_cpu_detect now */ 49 /* Uses data from early_cpu_detect now */
50 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 50 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void)
62 * P4 Xeon errata 037 workaround. 62 * P4 Xeon errata 037 workaround.
63 * Hardware prefetcher may cause stale data to be loaded into the cache. 63 * Hardware prefetcher may cause stale data to be loaded into the cache.
64 */ 64 */
65static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) 65static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
66{ 66{
67 unsigned long lo, hi; 67 unsigned long lo, hi;
68 68
@@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
81/* 81/*
82 * find out the number of processor cores on the die 82 * find out the number of processor cores on the die
83 */ 83 */
84static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) 84static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
85{ 85{
86 unsigned int eax, ebx, ecx, edx; 86 unsigned int eax, ebx, ecx, edx;
87 87
@@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
96 return 1; 96 return 1;
97} 97}
98 98
99static void __devinit init_intel(struct cpuinfo_x86 *c) 99static void __cpuinit init_intel(struct cpuinfo_x86 *c)
100{ 100{
101 unsigned int l2 = 0; 101 unsigned int l2 = 0;
102 char *p = NULL; 102 char *p = NULL;
@@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
205 return size; 205 return size;
206} 206}
207 207
208static struct cpu_dev intel_cpu_dev __devinitdata = { 208static struct cpu_dev intel_cpu_dev __cpuinitdata = {
209 .c_vendor = "Intel", 209 .c_vendor = "Intel",
210 .c_ident = { "GenuineIntel" }, 210 .c_ident = { "GenuineIntel" },
211 .c_models = { 211 .c_models = {
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index ffe58cee0c48..ce61921369e5 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -174,7 +174,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
176 176
177 if (c->cpuid_level > 4) { 177 if (c->cpuid_level > 3) {
178 static int is_initialized; 178 static int is_initialized;
179 179
180 if (is_initialized == 0) { 180 if (is_initialized == 0) {
@@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
330 } 330 }
331 } 331 }
332} 332}
333static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 333static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
334{ 334{
335 struct _cpuid4_info *this_leaf, *sibling_leaf; 335 struct _cpuid4_info *this_leaf, *sibling_leaf;
336 int sibling; 336 int sibling;
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 89a85af33d28..5cfbd8011698 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
40 /* Other (Linux-defined) */ 40 /* Other (Linux-defined) */
41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", 41 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
42 NULL, NULL, NULL, NULL, 42 NULL, NULL, NULL, NULL,
43 "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 43 "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL,
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 45 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
46 46
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index d49dbe8dc96b..e3c5fca0aa8a 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -105,7 +105,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
105 return 1; 105 return 1;
106 local_irq_disable(); 106 local_irq_disable();
107 107
108 if (!user_mode(regs)) { 108 if (!user_mode_vm(regs)) {
109 crash_fixup_ss_esp(&fixed_regs, regs); 109 crash_fixup_ss_esp(&fixed_regs, regs);
110 regs = &fixed_regs; 110 regs = &fixed_regs;
111 } 111 }
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 4d704724b2f5..cfc683f153b9 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -226,6 +226,10 @@ ENTRY(system_call)
226 pushl %eax # save orig_eax 226 pushl %eax # save orig_eax
227 SAVE_ALL 227 SAVE_ALL
228 GET_THREAD_INFO(%ebp) 228 GET_THREAD_INFO(%ebp)
229 testl $TF_MASK,EFLAGS(%esp)
230 jz no_singlestep
231 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
232no_singlestep:
229 # system call tracing in operation / emulation 233 # system call tracing in operation / emulation
230 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ 234 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
231 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 235 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index e0b7c632efbc..3debc2e26542 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -450,7 +450,6 @@ int_msg:
450 450
451.globl boot_gdt_descr 451.globl boot_gdt_descr
452.globl idt_descr 452.globl idt_descr
453.globl cpu_gdt_descr
454 453
455 ALIGN 454 ALIGN
456# early boot GDT descriptor (must use 1:1 address mapping) 455# early boot GDT descriptor (must use 1:1 address mapping)
@@ -470,8 +469,6 @@ cpu_gdt_descr:
470 .word GDT_ENTRIES*8-1 469 .word GDT_ENTRIES*8-1
471 .long cpu_gdt_table 470 .long cpu_gdt_table
472 471
473 .fill NR_CPUS-1,8,0 # space for the other GDT descriptors
474
475/* 472/*
476 * The boot_gdt_table must mirror the equivalent in setup.S and is 473 * The boot_gdt_table must mirror the equivalent in setup.S and is
477 * used only for booting. 474 * used only for booting.
@@ -485,7 +482,7 @@ ENTRY(boot_gdt_table)
485/* 482/*
486 * The Global Descriptor Table contains 28 quadwords, per-CPU. 483 * The Global Descriptor Table contains 28 quadwords, per-CPU.
487 */ 484 */
488 .align PAGE_SIZE_asm 485 .align L1_CACHE_BYTES
489ENTRY(cpu_gdt_table) 486ENTRY(cpu_gdt_table)
490 .quad 0x0000000000000000 /* NULL descriptor */ 487 .quad 0x0000000000000000 /* NULL descriptor */
491 .quad 0x0000000000000000 /* 0x0b reserved */ 488 .quad 0x0000000000000000 /* 0x0b reserved */
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 39d9a5fa907e..311b4e7266f1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
351{ 351{
352 int i, j; 352 int i, j;
353 Dprintk("Rotating IRQs among CPUs.\n"); 353 Dprintk("Rotating IRQs among CPUs.\n");
354 for (i = 0; i < NR_CPUS; i++) { 354 for_each_online_cpu(i) {
355 for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { 355 for (j = 0; j < NR_IRQS; j++) {
356 if (!irq_desc[j].action) 356 if (!irq_desc[j].action)
357 continue; 357 continue;
358 /* Is it a significant load ? */ 358 /* Is it a significant load ? */
@@ -381,7 +381,7 @@ static void do_irq_balance(void)
381 unsigned long imbalance = 0; 381 unsigned long imbalance = 0;
382 cpumask_t allowed_mask, target_cpu_mask, tmp; 382 cpumask_t allowed_mask, target_cpu_mask, tmp;
383 383
384 for (i = 0; i < NR_CPUS; i++) { 384 for_each_cpu(i) {
385 int package_index; 385 int package_index;
386 CPU_IRQ(i) = 0; 386 CPU_IRQ(i) = 0;
387 if (!cpu_online(i)) 387 if (!cpu_online(i))
@@ -422,9 +422,7 @@ static void do_irq_balance(void)
422 } 422 }
423 } 423 }
424 /* Find the least loaded processor package */ 424 /* Find the least loaded processor package */
425 for (i = 0; i < NR_CPUS; i++) { 425 for_each_online_cpu(i) {
426 if (!cpu_online(i))
427 continue;
428 if (i != CPU_TO_PACKAGEINDEX(i)) 426 if (i != CPU_TO_PACKAGEINDEX(i))
429 continue; 427 continue;
430 if (min_cpu_irq > CPU_IRQ(i)) { 428 if (min_cpu_irq > CPU_IRQ(i)) {
@@ -441,9 +439,7 @@ tryanothercpu:
441 */ 439 */
442 tmp_cpu_irq = 0; 440 tmp_cpu_irq = 0;
443 tmp_loaded = -1; 441 tmp_loaded = -1;
444 for (i = 0; i < NR_CPUS; i++) { 442 for_each_online_cpu(i) {
445 if (!cpu_online(i))
446 continue;
447 if (i != CPU_TO_PACKAGEINDEX(i)) 443 if (i != CPU_TO_PACKAGEINDEX(i))
448 continue; 444 continue;
449 if (max_cpu_irq <= CPU_IRQ(i)) 445 if (max_cpu_irq <= CPU_IRQ(i))
@@ -619,9 +615,7 @@ static int __init balanced_irq_init(void)
619 if (smp_num_siblings > 1 && !cpus_empty(tmp)) 615 if (smp_num_siblings > 1 && !cpus_empty(tmp))
620 physical_balance = 1; 616 physical_balance = 1;
621 617
622 for (i = 0; i < NR_CPUS; i++) { 618 for_each_online_cpu(i) {
623 if (!cpu_online(i))
624 continue;
625 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 619 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
626 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); 620 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
627 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { 621 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
@@ -638,9 +632,11 @@ static int __init balanced_irq_init(void)
638 else 632 else
639 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); 633 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
640failed: 634failed:
641 for (i = 0; i < NR_CPUS; i++) { 635 for_each_cpu(i) {
642 kfree(irq_cpu_data[i].irq_delta); 636 kfree(irq_cpu_data[i].irq_delta);
637 irq_cpu_data[i].irq_delta = NULL;
643 kfree(irq_cpu_data[i].last_irq); 638 kfree(irq_cpu_data[i].last_irq);
639 irq_cpu_data[i].last_irq = NULL;
644 } 640 }
645 return 0; 641 return 0;
646} 642}
@@ -1761,7 +1757,8 @@ static void __init setup_ioapic_ids_from_mpc(void)
1761 * Don't check I/O APIC IDs for xAPIC systems. They have 1757 * Don't check I/O APIC IDs for xAPIC systems. They have
1762 * no meaning without the serial APIC bus. 1758 * no meaning without the serial APIC bus.
1763 */ 1759 */
1764 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15)) 1760 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1761 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
1765 return; 1762 return;
1766 /* 1763 /*
1767 * This is broken; anything with a real cpu count has to 1764 * This is broken; anything with a real cpu count has to
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 694a13997637..7a59050242a7 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -84,9 +84,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
84 84
85void __kprobes arch_remove_kprobe(struct kprobe *p) 85void __kprobes arch_remove_kprobe(struct kprobe *p)
86{ 86{
87 down(&kprobe_mutex); 87 mutex_lock(&kprobe_mutex);
88 free_insn_slot(p->ainsn.insn); 88 free_insn_slot(p->ainsn.insn);
89 up(&kprobe_mutex); 89 mutex_unlock(&kprobe_mutex);
90} 90}
91 91
92static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 92static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c
index 5149c8a621f0..470cf97e7cd3 100644
--- a/arch/i386/kernel/module.c
+++ b/arch/i386/kernel/module.c
@@ -104,26 +104,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
104 return -ENOEXEC; 104 return -ENOEXEC;
105} 105}
106 106
107extern void apply_alternatives(void *start, void *end);
108
109int module_finalize(const Elf_Ehdr *hdr, 107int module_finalize(const Elf_Ehdr *hdr,
110 const Elf_Shdr *sechdrs, 108 const Elf_Shdr *sechdrs,
111 struct module *me) 109 struct module *me)
112{ 110{
113 const Elf_Shdr *s; 111 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
114 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 112 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
115 113
116 /* look for .altinstructions to patch */
117 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 114 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
118 void *seg; 115 if (!strcmp(".text", secstrings + s->sh_name))
119 if (strcmp(".altinstructions", secstrings + s->sh_name)) 116 text = s;
120 continue; 117 if (!strcmp(".altinstructions", secstrings + s->sh_name))
121 seg = (void *)s->sh_addr; 118 alt = s;
122 apply_alternatives(seg, seg + s->sh_size); 119 if (!strcmp(".smp_locks", secstrings + s->sh_name))
123 } 120 locks= s;
121 }
122
123 if (alt) {
124 /* patch .altinstructions */
125 void *aseg = (void *)alt->sh_addr;
126 apply_alternatives(aseg, aseg + alt->sh_size);
127 }
128 if (locks && text) {
129 void *lseg = (void *)locks->sh_addr;
130 void *tseg = (void *)text->sh_addr;
131 alternatives_smp_module_add(me, me->name,
132 lseg, lseg + locks->sh_size,
133 tseg, tseg + text->sh_size);
134 }
124 return 0; 135 return 0;
125} 136}
126 137
127void module_arch_cleanup(struct module *mod) 138void module_arch_cleanup(struct module *mod)
128{ 139{
140 alternatives_smp_module_del(mod);
129} 141}
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index e6e2f43db85e..8d8aa9d1796d 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -828,6 +828,8 @@ void __init find_smp_config (void)
828 smp_scan_config(address, 0x400); 828 smp_scan_config(address, 0x400);
829} 829}
830 830
831int es7000_plat;
832
831/* -------------------------------------------------------------------------- 833/* --------------------------------------------------------------------------
832 ACPI-based MP Configuration 834 ACPI-based MP Configuration
833 -------------------------------------------------------------------------- */ 835 -------------------------------------------------------------------------- */
@@ -935,7 +937,8 @@ void __init mp_register_ioapic (
935 mp_ioapics[idx].mpc_apicaddr = address; 937 mp_ioapics[idx].mpc_apicaddr = address;
936 938
937 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 939 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
938 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) 940 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
941 && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
939 tmpid = io_apic_get_unique_id(idx, id); 942 tmpid = io_apic_get_unique_id(idx, id);
940 else 943 else
941 tmpid = id; 944 tmpid = id;
@@ -1011,8 +1014,6 @@ void __init mp_override_legacy_irq (
1011 return; 1014 return;
1012} 1015}
1013 1016
1014int es7000_plat;
1015
1016void __init mp_config_acpi_legacy_irqs (void) 1017void __init mp_config_acpi_legacy_irqs (void)
1017{ 1018{
1018 struct mpc_config_intsrc intsrc; 1019 struct mpc_config_intsrc intsrc;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index be87c5e2ee95..9074818b9473 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void)
143 local_irq_enable(); 143 local_irq_enable();
144 mdelay((10*1000)/nmi_hz); // wait 10 ticks 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
145 145
146 for (cpu = 0; cpu < NR_CPUS; cpu++) { 146 for_each_cpu(cpu) {
147#ifdef CONFIG_SMP 147#ifdef CONFIG_SMP
148 /* Check cpu_callin_map here because that is set 148 /* Check cpu_callin_map here because that is set
149 after the timer is started. */ 149 after the timer is started. */
@@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
510 * Just reset the alert counters, (other CPUs might be 510 * Just reset the alert counters, (other CPUs might be
511 * spinning on locks we hold): 511 * spinning on locks we hold):
512 */ 512 */
513 for (i = 0; i < NR_CPUS; i++) 513 for_each_cpu(i)
514 alert_counter[i] = 0; 514 alert_counter[i] = 0;
515 515
516 /* 516 /*
@@ -543,7 +543,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
543 /* 543 /*
544 * die_nmi will return ONLY if NOTIFY_STOP happens.. 544 * die_nmi will return ONLY if NOTIFY_STOP happens..
545 */ 545 */
546 die_nmi(regs, "NMI Watchdog detected LOCKUP"); 546 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
547 } else { 547 } else {
548 last_irq_sums[cpu] = sum; 548 last_irq_sums[cpu] = sum;
549 alert_counter[cpu] = 0; 549 alert_counter[cpu] = 0;
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 0480454ebffa..299e61674084 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -295,7 +295,7 @@ void show_regs(struct pt_regs * regs)
295 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); 295 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
296 print_symbol("EIP is at %s\n", regs->eip); 296 print_symbol("EIP is at %s\n", regs->eip);
297 297
298 if (user_mode(regs)) 298 if (user_mode_vm(regs))
299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); 299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
300 printk(" EFLAGS: %08lx %s (%s %.*s)\n", 300 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
301 regs->eflags, print_tainted(), system_utsname.release, 301 regs->eflags, print_tainted(), system_utsname.release,
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 5c1fb6aada5b..506462ef36a0 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -34,10 +34,10 @@
34 34
35/* 35/*
36 * Determines which flags the user has access to [1 = access, 0 = no access]. 36 * Determines which flags the user has access to [1 = access, 0 = no access].
37 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9). 37 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9).
38 * Also masks reserved bits (31-22, 15, 5, 3, 1). 38 * Also masks reserved bits (31-22, 15, 5, 3, 1).
39 */ 39 */
40#define FLAG_MASK 0x00054dd5 40#define FLAG_MASK 0x00050dd5
41 41
42/* set's the trap flag. */ 42/* set's the trap flag. */
43#define TRAP_FLAG 0x100 43#define TRAP_FLAG 0x100
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c
index 7455ab643943..967dc74df9ee 100644
--- a/arch/i386/kernel/semaphore.c
+++ b/arch/i386/kernel/semaphore.c
@@ -110,11 +110,11 @@ asm(
110".align 4\n" 110".align 4\n"
111".globl __write_lock_failed\n" 111".globl __write_lock_failed\n"
112"__write_lock_failed:\n\t" 112"__write_lock_failed:\n\t"
113 LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" 113 LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n"
114"1: rep; nop\n\t" 114"1: rep; nop\n\t"
115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 115 "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
116 "jne 1b\n\t" 116 "jne 1b\n\t"
117 LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" 117 LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t"
118 "jnz __write_lock_failed\n\t" 118 "jnz __write_lock_failed\n\t"
119 "ret" 119 "ret"
120); 120);
@@ -124,11 +124,11 @@ asm(
124".align 4\n" 124".align 4\n"
125".globl __read_lock_failed\n" 125".globl __read_lock_failed\n"
126"__read_lock_failed:\n\t" 126"__read_lock_failed:\n\t"
127 LOCK "incl (%eax)\n" 127 LOCK_PREFIX "incl (%eax)\n"
128"1: rep; nop\n\t" 128"1: rep; nop\n\t"
129 "cmpl $1,(%eax)\n\t" 129 "cmpl $1,(%eax)\n\t"
130 "js 1b\n\t" 130 "js 1b\n\t"
131 LOCK "decl (%eax)\n\t" 131 LOCK_PREFIX "decl (%eax)\n\t"
132 "js __read_lock_failed\n\t" 132 "js __read_lock_failed\n\t"
133 "ret" 133 "ret"
134); 134);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index ab62a9f4701e..2d8782960f41 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1377,101 +1377,6 @@ static void __init register_memory(void)
1377 pci_mem_start, gapstart, gapsize); 1377 pci_mem_start, gapstart, gapsize);
1378} 1378}
1379 1379
1380/* Use inline assembly to define this because the nops are defined
1381 as inline assembly strings in the include files and we cannot
1382 get them easily into strings. */
1383asm("\t.data\nintelnops: "
1384 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
1385 GENERIC_NOP7 GENERIC_NOP8);
1386asm("\t.data\nk8nops: "
1387 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
1388 K8_NOP7 K8_NOP8);
1389asm("\t.data\nk7nops: "
1390 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
1391 K7_NOP7 K7_NOP8);
1392
1393extern unsigned char intelnops[], k8nops[], k7nops[];
1394static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
1395 NULL,
1396 intelnops,
1397 intelnops + 1,
1398 intelnops + 1 + 2,
1399 intelnops + 1 + 2 + 3,
1400 intelnops + 1 + 2 + 3 + 4,
1401 intelnops + 1 + 2 + 3 + 4 + 5,
1402 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
1403 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1404};
1405static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
1406 NULL,
1407 k8nops,
1408 k8nops + 1,
1409 k8nops + 1 + 2,
1410 k8nops + 1 + 2 + 3,
1411 k8nops + 1 + 2 + 3 + 4,
1412 k8nops + 1 + 2 + 3 + 4 + 5,
1413 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
1414 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1415};
1416static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
1417 NULL,
1418 k7nops,
1419 k7nops + 1,
1420 k7nops + 1 + 2,
1421 k7nops + 1 + 2 + 3,
1422 k7nops + 1 + 2 + 3 + 4,
1423 k7nops + 1 + 2 + 3 + 4 + 5,
1424 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
1425 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
1426};
1427static struct nop {
1428 int cpuid;
1429 unsigned char **noptable;
1430} noptypes[] = {
1431 { X86_FEATURE_K8, k8_nops },
1432 { X86_FEATURE_K7, k7_nops },
1433 { -1, NULL }
1434};
1435
1436/* Replace instructions with better alternatives for this CPU type.
1437
1438 This runs before SMP is initialized to avoid SMP problems with
1439 self modifying code. This implies that assymetric systems where
1440 APs have less capabilities than the boot processor are not handled.
1441 Tough. Make sure you disable such features by hand. */
1442void apply_alternatives(void *start, void *end)
1443{
1444 struct alt_instr *a;
1445 int diff, i, k;
1446 unsigned char **noptable = intel_nops;
1447 for (i = 0; noptypes[i].cpuid >= 0; i++) {
1448 if (boot_cpu_has(noptypes[i].cpuid)) {
1449 noptable = noptypes[i].noptable;
1450 break;
1451 }
1452 }
1453 for (a = start; (void *)a < end; a++) {
1454 if (!boot_cpu_has(a->cpuid))
1455 continue;
1456 BUG_ON(a->replacementlen > a->instrlen);
1457 memcpy(a->instr, a->replacement, a->replacementlen);
1458 diff = a->instrlen - a->replacementlen;
1459 /* Pad the rest with nops */
1460 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
1461 k = diff;
1462 if (k > ASM_NOP_MAX)
1463 k = ASM_NOP_MAX;
1464 memcpy(a->instr + i, noptable[k], k);
1465 }
1466 }
1467}
1468
1469void __init alternative_instructions(void)
1470{
1471 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
1472 apply_alternatives(__alt_instructions, __alt_instructions_end);
1473}
1474
1475static char * __init machine_specific_memory_setup(void); 1380static char * __init machine_specific_memory_setup(void);
1476 1381
1477#ifdef CONFIG_MCA 1382#ifdef CONFIG_MCA
@@ -1554,6 +1459,16 @@ void __init setup_arch(char **cmdline_p)
1554 1459
1555 parse_cmdline_early(cmdline_p); 1460 parse_cmdline_early(cmdline_p);
1556 1461
1462#ifdef CONFIG_EARLY_PRINTK
1463 {
1464 char *s = strstr(*cmdline_p, "earlyprintk=");
1465 if (s) {
1466 setup_early_printk(strchr(s, '=') + 1);
1467 printk("early console enabled\n");
1468 }
1469 }
1470#endif
1471
1557 max_low_pfn = setup_memory(); 1472 max_low_pfn = setup_memory();
1558 1473
1559 /* 1474 /*
@@ -1578,19 +1493,6 @@ void __init setup_arch(char **cmdline_p)
1578 * NOTE: at this point the bootmem allocator is fully available. 1493 * NOTE: at this point the bootmem allocator is fully available.
1579 */ 1494 */
1580 1495
1581#ifdef CONFIG_EARLY_PRINTK
1582 {
1583 char *s = strstr(*cmdline_p, "earlyprintk=");
1584 if (s) {
1585 extern void setup_early_printk(char *);
1586
1587 setup_early_printk(strchr(s, '=') + 1);
1588 printk("early console enabled\n");
1589 }
1590 }
1591#endif
1592
1593
1594 dmi_scan_machine(); 1496 dmi_scan_machine();
1595 1497
1596#ifdef CONFIG_X86_GENERICARCH 1498#ifdef CONFIG_X86_GENERICARCH
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 963616d364ec..5c352c3a9e7f 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -123,7 +123,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
123 err |= __get_user(tmp, &sc->seg); \ 123 err |= __get_user(tmp, &sc->seg); \
124 loadsegment(seg,tmp); } 124 loadsegment(seg,tmp); }
125 125
126#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \ 126#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \
127 X86_EFLAGS_OF | X86_EFLAGS_DF | \
127 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ 128 X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
128 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) 129 X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
129 130
@@ -582,9 +583,6 @@ static void fastcall do_signal(struct pt_regs *regs)
582 if (!user_mode(regs)) 583 if (!user_mode(regs))
583 return; 584 return;
584 585
585 if (try_to_freeze())
586 goto no_signal;
587
588 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 586 if (test_thread_flag(TIF_RESTORE_SIGMASK))
589 oldset = &current->saved_sigmask; 587 oldset = &current->saved_sigmask;
590 else 588 else
@@ -613,7 +611,6 @@ static void fastcall do_signal(struct pt_regs *regs)
613 return; 611 return;
614 } 612 }
615 613
616no_signal:
617 /* Did we come from a system call? */ 614 /* Did we come from a system call? */
618 if (regs->orig_eax >= 0) { 615 if (regs->orig_eax >= 0) {
619 /* Restart the system call - no handlers present */ 616 /* Restart the system call - no handlers present */
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 7007e1783797..4c470e99a742 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -899,6 +899,7 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
899 unsigned short nmi_high = 0, nmi_low = 0; 899 unsigned short nmi_high = 0, nmi_low = 0;
900 900
901 ++cpucount; 901 ++cpucount;
902 alternatives_smp_switch(1);
902 903
903 /* 904 /*
904 * We can't use kernel_thread since we must avoid to 905 * We can't use kernel_thread since we must avoid to
@@ -1368,6 +1369,8 @@ void __cpu_die(unsigned int cpu)
1368 /* They ack this in play_dead by setting CPU_DEAD */ 1369 /* They ack this in play_dead by setting CPU_DEAD */
1369 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1370 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1370 printk ("CPU %d is now offline\n", cpu); 1371 printk ("CPU %d is now offline\n", cpu);
1372 if (1 == num_online_cpus())
1373 alternatives_smp_switch(0);
1371 return; 1374 return;
1372 } 1375 }
1373 msleep(100); 1376 msleep(100);
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 67a0e1baa28b..296355292c7c 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -41,6 +41,15 @@ int arch_register_cpu(int num){
41 parent = &node_devices[node].node; 41 parent = &node_devices[node].node;
42#endif /* CONFIG_NUMA */ 42#endif /* CONFIG_NUMA */
43 43
44 /*
45 * CPU0 cannot be offlined due to several
46 * restrictions and assumptions in kernel. This basically
47 * doesnt add a control file, one cannot attempt to offline
48 * BSP.
49 */
50 if (!num)
51 cpu_devices[num].cpu.no_control = 1;
52
44 return register_cpu(&cpu_devices[num].cpu, num, parent); 53 return register_cpu(&cpu_devices[num].cpu, num, parent);
45} 54}
46 55
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index b814dbdcc91e..de5386b01d38 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -99,6 +99,8 @@ int register_die_notifier(struct notifier_block *nb)
99{ 99{
100 int err = 0; 100 int err = 0;
101 unsigned long flags; 101 unsigned long flags;
102
103 vmalloc_sync_all();
102 spin_lock_irqsave(&die_notifier_lock, flags); 104 spin_lock_irqsave(&die_notifier_lock, flags);
103 err = notifier_chain_register(&i386die_chain, nb); 105 err = notifier_chain_register(&i386die_chain, nb);
104 spin_unlock_irqrestore(&die_notifier_lock, flags); 106 spin_unlock_irqrestore(&die_notifier_lock, flags);
@@ -112,12 +114,30 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
112 p < (void *)tinfo + THREAD_SIZE - 3; 114 p < (void *)tinfo + THREAD_SIZE - 3;
113} 115}
114 116
115static void print_addr_and_symbol(unsigned long addr, char *log_lvl) 117/*
118 * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line.
119 */
120static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl,
121 int printed)
116{ 122{
117 printk(log_lvl); 123 if (!printed)
124 printk(log_lvl);
125
126#if CONFIG_STACK_BACKTRACE_COLS == 1
118 printk(" [<%08lx>] ", addr); 127 printk(" [<%08lx>] ", addr);
128#else
129 printk(" <%08lx> ", addr);
130#endif
119 print_symbol("%s", addr); 131 print_symbol("%s", addr);
120 printk("\n"); 132
133 printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS;
134
135 if (printed)
136 printk(" ");
137 else
138 printk("\n");
139
140 return printed;
121} 141}
122 142
123static inline unsigned long print_context_stack(struct thread_info *tinfo, 143static inline unsigned long print_context_stack(struct thread_info *tinfo,
@@ -125,20 +145,24 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
125 char *log_lvl) 145 char *log_lvl)
126{ 146{
127 unsigned long addr; 147 unsigned long addr;
148 int printed = 0; /* nr of entries already printed on current line */
128 149
129#ifdef CONFIG_FRAME_POINTER 150#ifdef CONFIG_FRAME_POINTER
130 while (valid_stack_ptr(tinfo, (void *)ebp)) { 151 while (valid_stack_ptr(tinfo, (void *)ebp)) {
131 addr = *(unsigned long *)(ebp + 4); 152 addr = *(unsigned long *)(ebp + 4);
132 print_addr_and_symbol(addr, log_lvl); 153 printed = print_addr_and_symbol(addr, log_lvl, printed);
133 ebp = *(unsigned long *)ebp; 154 ebp = *(unsigned long *)ebp;
134 } 155 }
135#else 156#else
136 while (valid_stack_ptr(tinfo, stack)) { 157 while (valid_stack_ptr(tinfo, stack)) {
137 addr = *stack++; 158 addr = *stack++;
138 if (__kernel_text_address(addr)) 159 if (__kernel_text_address(addr))
139 print_addr_and_symbol(addr, log_lvl); 160 printed = print_addr_and_symbol(addr, log_lvl, printed);
140 } 161 }
141#endif 162#endif
163 if (printed)
164 printk("\n");
165
142 return ebp; 166 return ebp;
143} 167}
144 168
@@ -166,8 +190,7 @@ static void show_trace_log_lvl(struct task_struct *task,
166 stack = (unsigned long*)context->previous_esp; 190 stack = (unsigned long*)context->previous_esp;
167 if (!stack) 191 if (!stack)
168 break; 192 break;
169 printk(log_lvl); 193 printk("%s =======================\n", log_lvl);
170 printk(" =======================\n");
171 } 194 }
172} 195}
173 196
@@ -194,21 +217,17 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
194 for(i = 0; i < kstack_depth_to_print; i++) { 217 for(i = 0; i < kstack_depth_to_print; i++) {
195 if (kstack_end(stack)) 218 if (kstack_end(stack))
196 break; 219 break;
197 if (i && ((i % 8) == 0)) { 220 if (i && ((i % 8) == 0))
198 printk("\n"); 221 printk("\n%s ", log_lvl);
199 printk(log_lvl);
200 printk(" ");
201 }
202 printk("%08lx ", *stack++); 222 printk("%08lx ", *stack++);
203 } 223 }
204 printk("\n"); 224 printk("\n%sCall Trace:\n", log_lvl);
205 printk(log_lvl);
206 printk("Call Trace:\n");
207 show_trace_log_lvl(task, esp, log_lvl); 225 show_trace_log_lvl(task, esp, log_lvl);
208} 226}
209 227
210void show_stack(struct task_struct *task, unsigned long *esp) 228void show_stack(struct task_struct *task, unsigned long *esp)
211{ 229{
230 printk(" ");
212 show_stack_log_lvl(task, esp, ""); 231 show_stack_log_lvl(task, esp, "");
213} 232}
214 233
@@ -233,7 +252,7 @@ void show_registers(struct pt_regs *regs)
233 252
234 esp = (unsigned long) (&regs->esp); 253 esp = (unsigned long) (&regs->esp);
235 savesegment(ss, ss); 254 savesegment(ss, ss);
236 if (user_mode(regs)) { 255 if (user_mode_vm(regs)) {
237 in_kernel = 0; 256 in_kernel = 0;
238 esp = regs->esp; 257 esp = regs->esp;
239 ss = regs->xss & 0xffff; 258 ss = regs->xss & 0xffff;
@@ -333,6 +352,8 @@ void die(const char * str, struct pt_regs * regs, long err)
333 static int die_counter; 352 static int die_counter;
334 unsigned long flags; 353 unsigned long flags;
335 354
355 oops_enter();
356
336 if (die.lock_owner != raw_smp_processor_id()) { 357 if (die.lock_owner != raw_smp_processor_id()) {
337 console_verbose(); 358 console_verbose();
338 spin_lock_irqsave(&die.lock, flags); 359 spin_lock_irqsave(&die.lock, flags);
@@ -385,6 +406,7 @@ void die(const char * str, struct pt_regs * regs, long err)
385 ssleep(5); 406 ssleep(5);
386 panic("Fatal exception"); 407 panic("Fatal exception");
387 } 408 }
409 oops_exit();
388 do_exit(SIGSEGV); 410 do_exit(SIGSEGV);
389} 411}
390 412
@@ -623,7 +645,7 @@ void die_nmi (struct pt_regs *regs, const char *msg)
623 /* If we are in kernel we are probably nested up pretty bad 645 /* If we are in kernel we are probably nested up pretty bad
624 * and might aswell get out now while we still can. 646 * and might aswell get out now while we still can.
625 */ 647 */
626 if (!user_mode(regs)) { 648 if (!user_mode_vm(regs)) {
627 current->thread.trap_no = 2; 649 current->thread.trap_no = 2;
628 crash_kexec(regs); 650 crash_kexec(regs);
629 } 651 }
@@ -694,6 +716,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
694 716
695void set_nmi_callback(nmi_callback_t callback) 717void set_nmi_callback(nmi_callback_t callback)
696{ 718{
719 vmalloc_sync_all();
697 rcu_assign_pointer(nmi_callback, callback); 720 rcu_assign_pointer(nmi_callback, callback);
698} 721}
699EXPORT_SYMBOL_GPL(set_nmi_callback); 722EXPORT_SYMBOL_GPL(set_nmi_callback);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 4710195b6b74..3f21c6f6466d 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -68,6 +68,26 @@ SECTIONS
68 *(.data.init_task) 68 *(.data.init_task)
69 } 69 }
70 70
71 /* might get freed after init */
72 . = ALIGN(4096);
73 __smp_alt_begin = .;
74 __smp_alt_instructions = .;
75 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
76 *(.smp_altinstructions)
77 }
78 __smp_alt_instructions_end = .;
79 . = ALIGN(4);
80 __smp_locks = .;
81 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
82 *(.smp_locks)
83 }
84 __smp_locks_end = .;
85 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
86 *(.smp_altinstr_replacement)
87 }
88 . = ALIGN(4096);
89 __smp_alt_end = .;
90
71 /* will be freed after init */ 91 /* will be freed after init */
72 . = ALIGN(4096); /* Init code and data */ 92 . = ALIGN(4096); /* Init code and data */
73 __init_begin = .; 93 __init_begin = .;
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 76b728159403..3b62baa6a371 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -21,6 +21,9 @@
21 * instruction clobbers %esp, the user's %esp won't even survive entry 21 * instruction clobbers %esp, the user's %esp won't even survive entry
22 * into the kernel. We store %esp in %ebp. Code in entry.S must fetch 22 * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
23 * arg6 from the stack. 23 * arg6 from the stack.
24 *
25 * You can not use this vsyscall for the clone() syscall because the
26 * three dwords on the parent stack do not get copied to the child.
24 */ 27 */
25 .text 28 .text
26 .globl __kernel_vsyscall 29 .globl __kernel_vsyscall
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index f1e3204f5dec..80566ca4a80a 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -83,6 +83,7 @@ struct es7000_oem_table {
83 struct psai psai; 83 struct psai psai;
84}; 84};
85 85
86#ifdef CONFIG_ACPI
86struct acpi_table_sdt { 87struct acpi_table_sdt {
87 unsigned long pa; 88 unsigned long pa;
88 unsigned long count; 89 unsigned long count;
@@ -99,6 +100,9 @@ struct oem_table {
99 u32 OEMTableSize; 100 u32 OEMTableSize;
100}; 101};
101 102
103extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
104#endif
105
102struct mip_reg { 106struct mip_reg {
103 unsigned long long off_0; 107 unsigned long long off_0;
104 unsigned long long off_8; 108 unsigned long long off_8;
@@ -114,7 +118,6 @@ struct mip_reg {
114#define MIP_FUNC(VALUE) (VALUE & 0xff) 118#define MIP_FUNC(VALUE) (VALUE & 0xff)
115 119
116extern int parse_unisys_oem (char *oemptr); 120extern int parse_unisys_oem (char *oemptr);
117extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
118extern void setup_unisys(void); 121extern void setup_unisys(void);
119extern int es7000_start_cpu(int cpu, unsigned long eip); 122extern int es7000_start_cpu(int cpu, unsigned long eip);
120extern void es7000_sw_apic(void); 123extern void es7000_sw_apic(void);
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index a9ab0644f403..3d0fc853516d 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -51,8 +51,6 @@ struct mip_reg *host_reg;
51int mip_port; 51int mip_port;
52unsigned long mip_addr, host_addr; 52unsigned long mip_addr, host_addr;
53 53
54#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI)
55
56/* 54/*
57 * GSI override for ES7000 platforms. 55 * GSI override for ES7000 platforms.
58 */ 56 */
@@ -76,8 +74,6 @@ es7000_rename_gsi(int ioapic, int gsi)
76 return gsi; 74 return gsi;
77} 75}
78 76
79#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */
80
81void __init 77void __init
82setup_unisys(void) 78setup_unisys(void)
83{ 79{
@@ -160,6 +156,7 @@ parse_unisys_oem (char *oemptr)
160 return es7000_plat; 156 return es7000_plat;
161} 157}
162 158
159#ifdef CONFIG_ACPI
163int __init 160int __init
164find_unisys_acpi_oem_table(unsigned long *oem_addr) 161find_unisys_acpi_oem_table(unsigned long *oem_addr)
165{ 162{
@@ -212,6 +209,7 @@ find_unisys_acpi_oem_table(unsigned long *oem_addr)
212 } 209 }
213 return -1; 210 return -1;
214} 211}
212#endif
215 213
216static void 214static void
217es7000_spin(int n) 215es7000_spin(int n)
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index cf572d9a3b6e..7f0fcf219a26 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -214,6 +214,68 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
214 214
215fastcall void do_invalid_op(struct pt_regs *, unsigned long); 215fastcall void do_invalid_op(struct pt_regs *, unsigned long);
216 216
217static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
218{
219 unsigned index = pgd_index(address);
220 pgd_t *pgd_k;
221 pud_t *pud, *pud_k;
222 pmd_t *pmd, *pmd_k;
223
224 pgd += index;
225 pgd_k = init_mm.pgd + index;
226
227 if (!pgd_present(*pgd_k))
228 return NULL;
229
230 /*
231 * set_pgd(pgd, *pgd_k); here would be useless on PAE
232 * and redundant with the set_pmd() on non-PAE. As would
233 * set_pud.
234 */
235
236 pud = pud_offset(pgd, address);
237 pud_k = pud_offset(pgd_k, address);
238 if (!pud_present(*pud_k))
239 return NULL;
240
241 pmd = pmd_offset(pud, address);
242 pmd_k = pmd_offset(pud_k, address);
243 if (!pmd_present(*pmd_k))
244 return NULL;
245 if (!pmd_present(*pmd))
246 set_pmd(pmd, *pmd_k);
247 else
248 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
249 return pmd_k;
250}
251
252/*
253 * Handle a fault on the vmalloc or module mapping area
254 *
255 * This assumes no large pages in there.
256 */
257static inline int vmalloc_fault(unsigned long address)
258{
259 unsigned long pgd_paddr;
260 pmd_t *pmd_k;
261 pte_t *pte_k;
262 /*
263 * Synchronize this task's top level page-table
264 * with the 'reference' page table.
265 *
266 * Do _not_ use "current" here. We might be inside
267 * an interrupt in the middle of a task switch..
268 */
269 pgd_paddr = read_cr3();
270 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
271 if (!pmd_k)
272 return -1;
273 pte_k = pte_offset_kernel(pmd_k, address);
274 if (!pte_present(*pte_k))
275 return -1;
276 return 0;
277}
278
217/* 279/*
218 * This routine handles page faults. It determines the address, 280 * This routine handles page faults. It determines the address,
219 * and the problem, and then passes it off to one of the appropriate 281 * and the problem, and then passes it off to one of the appropriate
@@ -223,6 +285,8 @@ fastcall void do_invalid_op(struct pt_regs *, unsigned long);
223 * bit 0 == 0 means no page found, 1 means protection fault 285 * bit 0 == 0 means no page found, 1 means protection fault
224 * bit 1 == 0 means read, 1 means write 286 * bit 1 == 0 means read, 1 means write
225 * bit 2 == 0 means kernel, 1 means user-mode 287 * bit 2 == 0 means kernel, 1 means user-mode
288 * bit 3 == 1 means use of reserved bit detected
289 * bit 4 == 1 means fault was an instruction fetch
226 */ 290 */
227fastcall void __kprobes do_page_fault(struct pt_regs *regs, 291fastcall void __kprobes do_page_fault(struct pt_regs *regs,
228 unsigned long error_code) 292 unsigned long error_code)
@@ -237,13 +301,6 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
237 /* get the address */ 301 /* get the address */
238 address = read_cr2(); 302 address = read_cr2();
239 303
240 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
241 SIGSEGV) == NOTIFY_STOP)
242 return;
243 /* It's safe to allow irq's after cr2 has been saved */
244 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
245 local_irq_enable();
246
247 tsk = current; 304 tsk = current;
248 305
249 si_code = SEGV_MAPERR; 306 si_code = SEGV_MAPERR;
@@ -259,17 +316,29 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
259 * 316 *
260 * This verifies that the fault happens in kernel space 317 * This verifies that the fault happens in kernel space
261 * (error_code & 4) == 0, and that the fault was not a 318 * (error_code & 4) == 0, and that the fault was not a
262 * protection error (error_code & 1) == 0. 319 * protection error (error_code & 9) == 0.
263 */ 320 */
264 if (unlikely(address >= TASK_SIZE)) { 321 if (unlikely(address >= TASK_SIZE)) {
265 if (!(error_code & 5)) 322 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
266 goto vmalloc_fault; 323 return;
267 /* 324 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
325 SIGSEGV) == NOTIFY_STOP)
326 return;
327 /*
268 * Don't take the mm semaphore here. If we fixup a prefetch 328 * Don't take the mm semaphore here. If we fixup a prefetch
269 * fault we could otherwise deadlock. 329 * fault we could otherwise deadlock.
270 */ 330 */
271 goto bad_area_nosemaphore; 331 goto bad_area_nosemaphore;
272 } 332 }
333
334 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
335 SIGSEGV) == NOTIFY_STOP)
336 return;
337
338 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
339 fault has been handled. */
340 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
341 local_irq_enable();
273 342
274 mm = tsk->mm; 343 mm = tsk->mm;
275 344
@@ -440,24 +509,31 @@ no_context:
440 509
441 bust_spinlocks(1); 510 bust_spinlocks(1);
442 511
443#ifdef CONFIG_X86_PAE 512 if (oops_may_print()) {
444 if (error_code & 16) { 513 #ifdef CONFIG_X86_PAE
445 pte_t *pte = lookup_address(address); 514 if (error_code & 16) {
515 pte_t *pte = lookup_address(address);
446 516
447 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) 517 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
448 printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid); 518 printk(KERN_CRIT "kernel tried to execute "
519 "NX-protected page - exploit attempt? "
520 "(uid: %d)\n", current->uid);
521 }
522 #endif
523 if (address < PAGE_SIZE)
524 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
525 "pointer dereference");
526 else
527 printk(KERN_ALERT "BUG: unable to handle kernel paging"
528 " request");
529 printk(" at virtual address %08lx\n",address);
530 printk(KERN_ALERT " printing eip:\n");
531 printk("%08lx\n", regs->eip);
449 } 532 }
450#endif
451 if (address < PAGE_SIZE)
452 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
453 else
454 printk(KERN_ALERT "Unable to handle kernel paging request");
455 printk(" at virtual address %08lx\n",address);
456 printk(KERN_ALERT " printing eip:\n");
457 printk("%08lx\n", regs->eip);
458 page = read_cr3(); 533 page = read_cr3();
459 page = ((unsigned long *) __va(page))[address >> 22]; 534 page = ((unsigned long *) __va(page))[address >> 22];
460 printk(KERN_ALERT "*pde = %08lx\n", page); 535 if (oops_may_print())
536 printk(KERN_ALERT "*pde = %08lx\n", page);
461 /* 537 /*
462 * We must not directly access the pte in the highpte 538 * We must not directly access the pte in the highpte
463 * case, the page table might be allocated in highmem. 539 * case, the page table might be allocated in highmem.
@@ -465,7 +541,7 @@ no_context:
465 * it's allocated already. 541 * it's allocated already.
466 */ 542 */
467#ifndef CONFIG_HIGHPTE 543#ifndef CONFIG_HIGHPTE
468 if (page & 1) { 544 if ((page & 1) && oops_may_print()) {
469 page &= PAGE_MASK; 545 page &= PAGE_MASK;
470 address &= 0x003ff000; 546 address &= 0x003ff000;
471 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; 547 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
@@ -510,51 +586,41 @@ do_sigbus:
510 tsk->thread.error_code = error_code; 586 tsk->thread.error_code = error_code;
511 tsk->thread.trap_no = 14; 587 tsk->thread.trap_no = 14;
512 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); 588 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
513 return; 589}
514
515vmalloc_fault:
516 {
517 /*
518 * Synchronize this task's top level page-table
519 * with the 'reference' page table.
520 *
521 * Do _not_ use "tsk" here. We might be inside
522 * an interrupt in the middle of a task switch..
523 */
524 int index = pgd_index(address);
525 unsigned long pgd_paddr;
526 pgd_t *pgd, *pgd_k;
527 pud_t *pud, *pud_k;
528 pmd_t *pmd, *pmd_k;
529 pte_t *pte_k;
530
531 pgd_paddr = read_cr3();
532 pgd = index + (pgd_t *)__va(pgd_paddr);
533 pgd_k = init_mm.pgd + index;
534
535 if (!pgd_present(*pgd_k))
536 goto no_context;
537
538 /*
539 * set_pgd(pgd, *pgd_k); here would be useless on PAE
540 * and redundant with the set_pmd() on non-PAE. As would
541 * set_pud.
542 */
543 590
544 pud = pud_offset(pgd, address); 591#ifndef CONFIG_X86_PAE
545 pud_k = pud_offset(pgd_k, address); 592void vmalloc_sync_all(void)
546 if (!pud_present(*pud_k)) 593{
547 goto no_context; 594 /*
548 595 * Note that races in the updates of insync and start aren't
549 pmd = pmd_offset(pud, address); 596 * problematic: insync can only get set bits added, and updates to
550 pmd_k = pmd_offset(pud_k, address); 597 * start are only improving performance (without affecting correctness
551 if (!pmd_present(*pmd_k)) 598 * if undone).
552 goto no_context; 599 */
553 set_pmd(pmd, *pmd_k); 600 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
601 static unsigned long start = TASK_SIZE;
602 unsigned long address;
554 603
555 pte_k = pte_offset_kernel(pmd_k, address); 604 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
556 if (!pte_present(*pte_k)) 605 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
557 goto no_context; 606 if (!test_bit(pgd_index(address), insync)) {
558 return; 607 unsigned long flags;
608 struct page *page;
609
610 spin_lock_irqsave(&pgd_lock, flags);
611 for (page = pgd_list; page; page =
612 (struct page *)page->index)
613 if (!vmalloc_sync_one(page_address(page),
614 address)) {
615 BUG_ON(page != pgd_list);
616 break;
617 }
618 spin_unlock_irqrestore(&pgd_lock, flags);
619 if (!page)
620 set_bit(pgd_index(address), insync);
621 }
622 if (address == start && test_bit(pgd_index(address), insync))
623 start = address + PGDIR_SIZE;
559 } 624 }
560} 625}
626#endif
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7ba55a6e2dbc..9f66ac582a8b 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -720,21 +720,6 @@ static int noinline do_test_wp_bit(void)
720 return flag; 720 return flag;
721} 721}
722 722
723void free_initmem(void)
724{
725 unsigned long addr;
726
727 addr = (unsigned long)(&__init_begin);
728 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
729 ClearPageReserved(virt_to_page(addr));
730 init_page_count(virt_to_page(addr));
731 memset((void *)addr, 0xcc, PAGE_SIZE);
732 free_page(addr);
733 totalram_pages++;
734 }
735 printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
736}
737
738#ifdef CONFIG_DEBUG_RODATA 723#ifdef CONFIG_DEBUG_RODATA
739 724
740extern char __start_rodata, __end_rodata; 725extern char __start_rodata, __end_rodata;
@@ -758,17 +743,31 @@ void mark_rodata_ro(void)
758} 743}
759#endif 744#endif
760 745
746void free_init_pages(char *what, unsigned long begin, unsigned long end)
747{
748 unsigned long addr;
749
750 for (addr = begin; addr < end; addr += PAGE_SIZE) {
751 ClearPageReserved(virt_to_page(addr));
752 init_page_count(virt_to_page(addr));
753 memset((void *)addr, 0xcc, PAGE_SIZE);
754 free_page(addr);
755 totalram_pages++;
756 }
757 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
758}
759
760void free_initmem(void)
761{
762 free_init_pages("unused kernel memory",
763 (unsigned long)(&__init_begin),
764 (unsigned long)(&__init_end));
765}
761 766
762#ifdef CONFIG_BLK_DEV_INITRD 767#ifdef CONFIG_BLK_DEV_INITRD
763void free_initrd_mem(unsigned long start, unsigned long end) 768void free_initrd_mem(unsigned long start, unsigned long end)
764{ 769{
765 if (start < end) 770 free_init_pages("initrd memory", start, end);
766 printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
767 for (; start < end; start += PAGE_SIZE) {
768 ClearPageReserved(virt_to_page(start));
769 init_page_count(virt_to_page(start));
770 free_page(start);
771 totalram_pages++;
772 }
773} 771}
774#endif 772#endif
773
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 0493e8b8ec49..1accce50c2c7 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
122static void free_msrs(void) 122static void free_msrs(void)
123{ 123{
124 int i; 124 int i;
125 for (i = 0; i < NR_CPUS; ++i) { 125 for_each_cpu(i) {
126 kfree(cpu_msrs[i].counters); 126 kfree(cpu_msrs[i].counters);
127 cpu_msrs[i].counters = NULL; 127 cpu_msrs[i].counters = NULL;
128 kfree(cpu_msrs[i].controls); 128 kfree(cpu_msrs[i].controls);
@@ -138,10 +138,7 @@ static int allocate_msrs(void)
138 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 138 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
139 139
140 int i; 140 int i;
141 for (i = 0; i < NR_CPUS; ++i) { 141 for_each_online_cpu(i) {
142 if (!cpu_online(i))
143 continue;
144
145 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); 142 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
146 if (!cpu_msrs[i].counters) { 143 if (!cpu_msrs[i].counters) {
147 success = 0; 144 success = 0;
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 626cdc83668b..0e5c6ae50228 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -46,11 +46,6 @@
46#define KEYBOARD_INTR 3 /* must match with simulator! */ 46#define KEYBOARD_INTR 3 /* must match with simulator! */
47 47
48#define NR_PORTS 1 /* only one port for now */ 48#define NR_PORTS 1 /* only one port for now */
49#define SERIAL_INLINE 1
50
51#ifdef SERIAL_INLINE
52#define _INLINE_ inline
53#endif
54 49
55#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) 50#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT)
56 51
@@ -237,7 +232,7 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
237 local_irq_restore(flags); 232 local_irq_restore(flags);
238} 233}
239 234
240static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) 235static void transmit_chars(struct async_struct *info, int *intr_done)
241{ 236{
242 int count; 237 int count;
243 unsigned long flags; 238 unsigned long flags;
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 1ce63926a3c0..a4634b06f675 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v)
37 37
38 if (i == 0) { 38 if (i == 0) {
39 seq_printf(p, " "); 39 seq_printf(p, " ");
40 for (j=0; j<NR_CPUS; j++) 40 for_each_online_cpu(j)
41 if (cpu_online(j)) 41 seq_printf(p, "CPU%d ",j);
42 seq_printf(p, "CPU%d ",j);
43 seq_putc(p, '\n'); 42 seq_putc(p, '\n');
44 } 43 }
45 44
@@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v)
52#ifndef CONFIG_SMP 51#ifndef CONFIG_SMP
53 seq_printf(p, "%10u ", kstat_irqs(i)); 52 seq_printf(p, "%10u ", kstat_irqs(i));
54#else 53#else
55 for (j = 0; j < NR_CPUS; j++) 54 for_each_online_cpu(j)
56 if (cpu_online(j)) 55 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
57 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
58#endif 56#endif
59 seq_printf(p, " %14s", irq_desc[i].handler->typename); 57 seq_printf(p, " %14s", irq_desc[i].handler->typename);
60 seq_printf(p, " %s", action->name); 58 seq_printf(p, " %s", action->name);
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index 703cbc6dc9cc..15c16b62dff5 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ 19#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */
20#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
21#include <linux/bcd.h>
21#include <asm/bvme6000hw.h> 22#include <asm/bvme6000hw.h>
22 23
23#include <asm/io.h> 24#include <asm/io.h>
@@ -32,9 +33,6 @@
32 * ioctls. 33 * ioctls.
33 */ 34 */
34 35
35#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10)
36#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10)
37
38static unsigned char days_in_mo[] = 36static unsigned char days_in_mo[] =
39{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; 37{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
40 38
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7d93992e462c..3dd76b3d2967 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -68,9 +68,8 @@ int show_interrupts(struct seq_file *p, void *v)
68 68
69 if (i == 0) { 69 if (i == 0) {
70 seq_printf(p, " "); 70 seq_printf(p, " ");
71 for (j=0; j<NR_CPUS; j++) 71 for_each_online_cpu(j)
72 if (cpu_online(j)) 72 seq_printf(p, "CPU%d ",j);
73 seq_printf(p, "CPU%d ",j);
74 seq_putc(p, '\n'); 73 seq_putc(p, '\n');
75 } 74 }
76 75
@@ -83,9 +82,8 @@ int show_interrupts(struct seq_file *p, void *v)
83#ifndef CONFIG_SMP 82#ifndef CONFIG_SMP
84 seq_printf(p, "%10u ", kstat_irqs(i)); 83 seq_printf(p, "%10u ", kstat_irqs(i));
85#else 84#else
86 for (j = 0; j < NR_CPUS; j++) 85 for_each_online_cpu(j)
87 if (cpu_online(j)) 86 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
88 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
89#endif 87#endif
90 seq_printf(p, " %14s", irq_desc[i].handler->typename); 88 seq_printf(p, " %14s", irq_desc[i].handler->typename);
91 seq_printf(p, " %s", action->name); 89 seq_printf(p, " %s", action->name);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 06ed90752424..78d171bfa331 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
167 mb(); 167 mb();
168 168
169 /* Send a message to all other CPUs and wait for them to respond */ 169 /* Send a message to all other CPUs and wait for them to respond */
170 for (i = 0; i < NR_CPUS; i++) 170 for_each_online_cpu(i)
171 if (cpu_online(i) && i != cpu) 171 if (i != cpu)
172 core_send_ipi(i, SMP_CALL_FUNCTION); 172 core_send_ipi(i, SMP_CALL_FUNCTION);
173 173
174 /* Wait for response */ 174 /* Wait for response */
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 73e5e52781d8..2854ac4c9be1 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -88,12 +88,9 @@ static inline int find_level(cpuid_t *cpunum, int irq)
88{ 88{
89 int cpu, i; 89 int cpu, i;
90 90
91 for (cpu = 0; cpu <= NR_CPUS; cpu++) { 91 for_each_online_cpu(cpu) {
92 struct slice_data *si = cpu_data[cpu].data; 92 struct slice_data *si = cpu_data[cpu].data;
93 93
94 if (!cpu_online(cpu))
95 continue;
96
97 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) 94 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
98 if (si->level_to_irq[i] == irq) { 95 if (si->level_to_irq[i] == irq) {
99 *cpunum = cpu; 96 *cpunum = cpu;
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 25564b7ca6bb..d6ac1c60a471 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op)
298{ 298{
299 int i; 299 int i;
300 300
301 for (i = 0; i < NR_CPUS; i++) { 301 for_each_online_cpu(i) {
302 if (cpu_online(i) && i != smp_processor_id()) 302 if (i != smp_processor_id())
303 send_IPI_single(i, op); 303 send_IPI_single(i, op);
304 } 304 }
305} 305}
@@ -643,14 +643,13 @@ int sys_cpus(int argc, char **argv)
643 if ( argc == 1 ){ 643 if ( argc == 1 ){
644 644
645#ifdef DUMP_MORE_STATE 645#ifdef DUMP_MORE_STATE
646 for(i=0; i<NR_CPUS; i++) { 646 for_each_online_cpu(i) {
647 int cpus_per_line = 4; 647 int cpus_per_line = 4;
648 if(cpu_online(i)) { 648
649 if (j++ % cpus_per_line) 649 if (j++ % cpus_per_line)
650 printk(" %3d",i); 650 printk(" %3d",i);
651 else 651 else
652 printk("\n %3d",i); 652 printk("\n %3d",i);
653 }
654 } 653 }
655 printk("\n"); 654 printk("\n");
656#else 655#else
@@ -659,9 +658,7 @@ int sys_cpus(int argc, char **argv)
659 } else if((argc==2) && !(strcmp(argv[1],"-l"))) { 658 } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
660 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n"); 659 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
661#ifdef DUMP_MORE_STATE 660#ifdef DUMP_MORE_STATE
662 for(i=0;i<NR_CPUS;i++) { 661 for_each_online_cpu(i) {
663 if (!cpu_online(i))
664 continue;
665 if (cpu_data[i].cpuid != NO_PROC_ID) { 662 if (cpu_data[i].cpuid != NO_PROC_ID) {
666 switch(cpu_data[i].state) { 663 switch(cpu_data[i].state) {
667 case STATE_RENDEZVOUS: 664 case STATE_RENDEZVOUS:
@@ -695,9 +692,7 @@ int sys_cpus(int argc, char **argv)
695 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) { 692 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
696#ifdef DUMP_MORE_STATE 693#ifdef DUMP_MORE_STATE
697 printk("\nCPUSTATE CPUID\n"); 694 printk("\nCPUSTATE CPUID\n");
698 for (i=0;i<NR_CPUS;i++) { 695 for_each_online_cpu(i) {
699 if (!cpu_online(i))
700 continue;
701 if (cpu_data[i].cpuid != NO_PROC_ID) { 696 if (cpu_data[i].cpuid != NO_PROC_ID) {
702 switch(cpu_data[i].state) { 697 switch(cpu_data[i].state) {
703 case STATE_RENDEZVOUS: 698 case STATE_RENDEZVOUS:
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 24dc8117b822..771a59cbd213 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -135,9 +135,8 @@ skip:
135#ifdef CONFIG_TAU_INT 135#ifdef CONFIG_TAU_INT
136 if (tau_initialized){ 136 if (tau_initialized){
137 seq_puts(p, "TAU: "); 137 seq_puts(p, "TAU: ");
138 for (j = 0; j < NR_CPUS; j++) 138 for_each_online_cpu(j)
139 if (cpu_online(j)) 139 seq_printf(p, "%10u ", tau_interrupts(j));
140 seq_printf(p, "%10u ", tau_interrupts(j));
141 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 140 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
142 } 141 }
143#endif 142#endif
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 258039fb3016..cb1fe5878e8b 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -81,9 +81,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
81 81
82void __kprobes arch_remove_kprobe(struct kprobe *p) 82void __kprobes arch_remove_kprobe(struct kprobe *p)
83{ 83{
84 down(&kprobe_mutex); 84 mutex_lock(&kprobe_mutex);
85 free_insn_slot(p->ainsn.insn); 85 free_insn_slot(p->ainsn.insn);
86 up(&kprobe_mutex); 86 mutex_unlock(&kprobe_mutex);
87} 87}
88 88
89static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 89static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index be12041c0fc5..c1d62bf11f29 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -162,9 +162,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
162#if defined(CONFIG_SMP) && defined(CONFIG_PPC32) 162#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
163 unsigned long bogosum = 0; 163 unsigned long bogosum = 0;
164 int i; 164 int i;
165 for (i = 0; i < NR_CPUS; ++i) 165 for_each_online_cpu(i)
166 if (cpu_online(i)) 166 bogosum += loops_per_jiffy;
167 bogosum += loops_per_jiffy;
168 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 167 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
169 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 168 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
170#endif /* CONFIG_SMP && CONFIG_PPC32 */ 169#endif /* CONFIG_SMP && CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index db72a92943bf..dc2770df25b3 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -272,9 +272,8 @@ int __init ppc_init(void)
272 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 272 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
273 273
274 /* register CPU devices */ 274 /* register CPU devices */
275 for (i = 0; i < NR_CPUS; i++) 275 for_each_cpu(i)
276 if (cpu_possible(i)) 276 register_cpu(&cpu_devices[i], i, NULL);
277 register_cpu(&cpu_devices[i], i, NULL);
278 277
279 /* call platform init */ 278 /* call platform init */
280 if (ppc_md.init != NULL) { 279 if (ppc_md.init != NULL) {
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6d64a9bf3474..1065d87fc279 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -191,9 +191,7 @@ static void smp_psurge_message_pass(int target, int msg)
191 if (num_online_cpus() < 2) 191 if (num_online_cpus() < 2)
192 return; 192 return;
193 193
194 for (i = 0; i < NR_CPUS; i++) { 194 for_each_online_cpu(i) {
195 if (!cpu_online(i))
196 continue;
197 if (target == MSG_ALL 195 if (target == MSG_ALL
198 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) 196 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
199 || target == i) { 197 || target == i) {
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index c08ab432e958..53e9deacee82 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -168,9 +168,8 @@ int show_cpuinfo(struct seq_file *m, void *v)
168 /* Show summary information */ 168 /* Show summary information */
169#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
170 unsigned long bogosum = 0; 170 unsigned long bogosum = 0;
171 for (i = 0; i < NR_CPUS; ++i) 171 for_each_online_cpu(i)
172 if (cpu_online(i)) 172 bogosum += cpu_data[i].loops_per_jiffy;
173 bogosum += cpu_data[i].loops_per_jiffy;
174 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 173 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
175 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 174 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
176#endif /* CONFIG_SMP */ 175#endif /* CONFIG_SMP */
@@ -712,9 +711,8 @@ int __init ppc_init(void)
712 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 711 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
713 712
714 /* register CPU devices */ 713 /* register CPU devices */
715 for (i = 0; i < NR_CPUS; i++) 714 for_each_cpu(i)
716 if (cpu_possible(i)) 715 register_cpu(&cpu_devices[i], i, NULL);
717 register_cpu(&cpu_devices[i], i, NULL);
718 716
719 /* call platform init */ 717 /* call platform init */
720 if (ppc_md.init != NULL) { 718 if (ppc_md.init != NULL) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7dbe00c76c6b..d52d6d211d9f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -799,9 +799,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
799 */ 799 */
800 print_cpu_info(&S390_lowcore.cpu_data); 800 print_cpu_info(&S390_lowcore.cpu_data);
801 801
802 for(i = 0; i < NR_CPUS; i++) { 802 for_each_cpu(i) {
803 if (!cpu_possible(i))
804 continue;
805 lowcore_ptr[i] = (struct _lowcore *) 803 lowcore_ptr[i] = (struct _lowcore *)
806 __get_free_pages(GFP_KERNEL|GFP_DMA, 804 __get_free_pages(GFP_KERNEL|GFP_DMA,
807 sizeof(void*) == 8 ? 1 : 0); 805 sizeof(void*) == 8 ? 1 : 0);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 6883c00728cb..b56e79632f24 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -35,9 +35,8 @@ int show_interrupts(struct seq_file *p, void *v)
35 35
36 if (i == 0) { 36 if (i == 0) {
37 seq_puts(p, " "); 37 seq_puts(p, " ");
38 for (j=0; j<NR_CPUS; j++) 38 for_each_online_cpu(j)
39 if (cpu_online(j)) 39 seq_printf(p, "CPU%d ",j);
40 seq_printf(p, "CPU%d ",j);
41 seq_putc(p, '\n'); 40 seq_putc(p, '\n');
42 } 41 }
43 42
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index a067a34e0b64..c0e79843f580 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -404,9 +404,8 @@ static int __init topology_init(void)
404{ 404{
405 int cpu_id; 405 int cpu_id;
406 406
407 for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++) 407 for_each_cpu(cpu_id)
408 if (cpu_possible(cpu_id)) 408 register_cpu(&cpu[cpu_id], cpu_id, NULL);
409 register_cpu(&cpu[cpu_id], cpu_id, NULL);
410 409
411 return 0; 410 return 0;
412} 411}
diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c
index 9fc2b71dbd84..d69879c0e063 100644
--- a/arch/sh64/kernel/irq.c
+++ b/arch/sh64/kernel/irq.c
@@ -53,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v)
53 53
54 if (i == 0) { 54 if (i == 0) {
55 seq_puts(p, " "); 55 seq_puts(p, " ");
56 for (j=0; j<NR_CPUS; j++) 56 for_each_online_cpu(j)
57 if (cpu_online(j)) 57 seq_printf(p, "CPU%d ",j);
58 seq_printf(p, "CPU%d ",j);
59 seq_putc(p, '\n'); 58 seq_putc(p, '\n');
60 } 59 }
61 60
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 410b9a72aba9..4c60a6ef54a9 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -184,9 +184,8 @@ int show_interrupts(struct seq_file *p, void *v)
184#ifndef CONFIG_SMP 184#ifndef CONFIG_SMP
185 seq_printf(p, "%10u ", kstat_irqs(i)); 185 seq_printf(p, "%10u ", kstat_irqs(i));
186#else 186#else
187 for (j = 0; j < NR_CPUS; j++) { 187 for_each_online_cpu(j) {
188 if (cpu_online(j)) 188 seq_printf(p, "%10u ",
189 seq_printf(p, "%10u ",
190 kstat_cpu(cpu_logical_map(j)).irqs[i]); 189 kstat_cpu(cpu_logical_map(j)).irqs[i]);
191 } 190 }
192#endif 191#endif
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index c6e721d8f477..ea5682ce7031 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -243,9 +243,8 @@ int setup_profiling_timer(unsigned int multiplier)
243 return -EINVAL; 243 return -EINVAL;
244 244
245 spin_lock_irqsave(&prof_setup_lock, flags); 245 spin_lock_irqsave(&prof_setup_lock, flags);
246 for(i = 0; i < NR_CPUS; i++) { 246 for_each_cpu(i) {
247 if (cpu_possible(i)) 247 load_profile_irq(i, lvl14_resolution / multiplier);
248 load_profile_irq(i, lvl14_resolution / multiplier);
249 prof_multiplier(i) = multiplier; 248 prof_multiplier(i) = multiplier;
250 } 249 }
251 spin_unlock_irqrestore(&prof_setup_lock, flags); 250 spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -273,13 +272,12 @@ void smp_bogo(struct seq_file *m)
273{ 272{
274 int i; 273 int i;
275 274
276 for (i = 0; i < NR_CPUS; i++) { 275 for_each_online_cpu(i) {
277 if (cpu_online(i)) 276 seq_printf(m,
278 seq_printf(m, 277 "Cpu%dBogo\t: %lu.%02lu\n",
279 "Cpu%dBogo\t: %lu.%02lu\n", 278 i,
280 i, 279 cpu_data(i).udelay_val/(500000/HZ),
281 cpu_data(i).udelay_val/(500000/HZ), 280 (cpu_data(i).udelay_val/(5000/HZ))%100);
282 (cpu_data(i).udelay_val/(5000/HZ))%100);
283 } 281 }
284} 282}
285 283
@@ -288,8 +286,6 @@ void smp_info(struct seq_file *m)
288 int i; 286 int i;
289 287
290 seq_printf(m, "State:\n"); 288 seq_printf(m, "State:\n");
291 for (i = 0; i < NR_CPUS; i++) { 289 for_each_online_cpu(i)
292 if (cpu_online(i)) 290 seq_printf(m, "CPU%d\t\t: online\n", i);
293 seq_printf(m, "CPU%d\t\t: online\n", i);
294 }
295} 291}
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 52621348a56c..cea7fc6fc6e5 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -103,11 +103,9 @@ found_it: seq_printf(p, "%3d: ", i);
103#ifndef CONFIG_SMP 103#ifndef CONFIG_SMP
104 seq_printf(p, "%10u ", kstat_irqs(i)); 104 seq_printf(p, "%10u ", kstat_irqs(i));
105#else 105#else
106 for (x = 0; x < NR_CPUS; x++) { 106 for_each_online_cpu(x)
107 if (cpu_online(x)) 107 seq_printf(p, "%10u ",
108 seq_printf(p, "%10u ", 108 kstat_cpu(cpu_logical_map(x)).irqs[i]);
109 kstat_cpu(cpu_logical_map(x)).irqs[i]);
110 }
111#endif 109#endif
112 seq_printf(p, "%c %s", 110 seq_printf(p, "%c %s",
113 (action->flags & SA_INTERRUPT) ? '+' : ' ', 111 (action->flags & SA_INTERRUPT) ? '+' : ' ',
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 4219dd2ce3a2..41bb9596be48 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -249,11 +249,9 @@ void __init smp4d_boot_cpus(void)
249 } else { 249 } else {
250 unsigned long bogosum = 0; 250 unsigned long bogosum = 0;
251 251
252 for(i = 0; i < NR_CPUS; i++) { 252 for_each_present_cpu(i) {
253 if (cpu_isset(i, cpu_present_map)) { 253 bogosum += cpu_data(i).udelay_val;
254 bogosum += cpu_data(i).udelay_val; 254 smp_highest_cpu = i;
255 smp_highest_cpu = i;
256 }
257 } 255 }
258 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); 256 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
259 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 257 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index fbbd8a474c4c..1dde312eebda 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -218,10 +218,8 @@ void __init smp4m_boot_cpus(void)
218 cpu_present_map = cpumask_of_cpu(smp_processor_id()); 218 cpu_present_map = cpumask_of_cpu(smp_processor_id());
219 } else { 219 } else {
220 unsigned long bogosum = 0; 220 unsigned long bogosum = 0;
221 for(i = 0; i < NR_CPUS; i++) { 221 for_each_present_cpu(i)
222 if (cpu_isset(i, cpu_present_map)) 222 bogosum += cpu_data(i).udelay_val;
223 bogosum += cpu_data(i).udelay_val;
224 }
225 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", 223 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
226 cpucount + 1, 224 cpucount + 1,
227 bogosum/(500000/HZ), 225 bogosum/(500000/HZ),
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 8c93ba655b33..e505a4125e35 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -117,9 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
117#ifndef CONFIG_SMP 117#ifndef CONFIG_SMP
118 seq_printf(p, "%10u ", kstat_irqs(i)); 118 seq_printf(p, "%10u ", kstat_irqs(i));
119#else 119#else
120 for (j = 0; j < NR_CPUS; j++) { 120 for_each_online_cpu(j) {
121 if (!cpu_online(j))
122 continue;
123 seq_printf(p, "%10u ", 121 seq_printf(p, "%10u ",
124 kstat_cpu(j).irqs[i]); 122 kstat_cpu(j).irqs[i]);
125 } 123 }
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 373a701c90a5..1b6e2ade1008 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -57,25 +57,21 @@ void smp_info(struct seq_file *m)
57 int i; 57 int i;
58 58
59 seq_printf(m, "State:\n"); 59 seq_printf(m, "State:\n");
60 for (i = 0; i < NR_CPUS; i++) { 60 for_each_online_cpu(i)
61 if (cpu_online(i)) 61 seq_printf(m, "CPU%d:\t\tonline\n", i);
62 seq_printf(m,
63 "CPU%d:\t\tonline\n", i);
64 }
65} 62}
66 63
67void smp_bogo(struct seq_file *m) 64void smp_bogo(struct seq_file *m)
68{ 65{
69 int i; 66 int i;
70 67
71 for (i = 0; i < NR_CPUS; i++) 68 for_each_online_cpu(i)
72 if (cpu_online(i)) 69 seq_printf(m,
73 seq_printf(m, 70 "Cpu%dBogo\t: %lu.%02lu\n"
74 "Cpu%dBogo\t: %lu.%02lu\n" 71 "Cpu%dClkTck\t: %016lx\n",
75 "Cpu%dClkTck\t: %016lx\n", 72 i, cpu_data(i).udelay_val / (500000/HZ),
76 i, cpu_data(i).udelay_val / (500000/HZ), 73 (cpu_data(i).udelay_val / (5000/HZ)) % 100,
77 (cpu_data(i).udelay_val / (5000/HZ)) % 100, 74 i, cpu_data(i).clock_tick);
78 i, cpu_data(i).clock_tick);
79} 75}
80 76
81void __init smp_store_cpu_info(int id) 77void __init smp_store_cpu_info(int id)
@@ -1282,7 +1278,7 @@ int setup_profiling_timer(unsigned int multiplier)
1282 return -EINVAL; 1278 return -EINVAL;
1283 1279
1284 spin_lock_irqsave(&prof_setup_lock, flags); 1280 spin_lock_irqsave(&prof_setup_lock, flags);
1285 for (i = 0; i < NR_CPUS; i++) 1281 for_each_cpu(i)
1286 prof_multiplier(i) = multiplier; 1282 prof_multiplier(i) = multiplier;
1287 current_tick_offset = (timer_tick_offset / multiplier); 1283 current_tick_offset = (timer_tick_offset / multiplier);
1288 spin_unlock_irqrestore(&prof_setup_lock, flags); 1284 spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1384,10 +1380,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
1384 unsigned long bogosum = 0; 1380 unsigned long bogosum = 0;
1385 int i; 1381 int i;
1386 1382
1387 for (i = 0; i < NR_CPUS; i++) { 1383 for_each_online_cpu(i)
1388 if (cpu_online(i)) 1384 bogosum += cpu_data(i).udelay_val;
1389 bogosum += cpu_data(i).udelay_val;
1390 }
1391 printk("Total of %ld processors activated " 1385 printk("Total of %ld processors activated "
1392 "(%lu.%02lu BogoMIPS).\n", 1386 "(%lu.%02lu BogoMIPS).\n",
1393 (long) num_online_cpus(), 1387 (long) num_online_cpus(),
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index ded63ee9c4fd..1539a8362b6f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1828,8 +1828,8 @@ void __flush_tlb_all(void)
1828void online_page(struct page *page) 1828void online_page(struct page *page)
1829{ 1829{
1830 ClearPageReserved(page); 1830 ClearPageReserved(page);
1831 set_page_count(page, 0); 1831 init_page_count(page);
1832 free_cold_page(page); 1832 __free_page(page);
1833 totalram_pages++; 1833 totalram_pages++;
1834 num_physpages++; 1834 num_physpages++;
1835} 1835}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 27cdf9164422..80c9c18aae94 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -491,6 +491,16 @@ void __init check_bugs(void)
491 check_devanon(); 491 check_devanon();
492} 492}
493 493
494void apply_alternatives(void *start, void *end) 494void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
495{
496}
497
498void alternatives_smp_module_add(struct module *mod, char *name,
499 void *locks, void *locks_end,
500 void *text, void *text_end)
501{
502}
503
504void alternatives_smp_module_del(struct module *mod)
495{ 505{
496} 506}
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 6dffb498ccd7..a8a6aa70d695 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -17,11 +17,8 @@
17#define VGABASE ((void __iomem *)0xffffffff800b8000UL) 17#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
18#endif 18#endif
19 19
20#define MAX_YPOS max_ypos
21#define MAX_XPOS max_xpos
22
23static int max_ypos = 25, max_xpos = 80; 20static int max_ypos = 25, max_xpos = 80;
24static int current_ypos = 1, current_xpos = 0; 21static int current_ypos = 25, current_xpos = 0;
25 22
26static void early_vga_write(struct console *con, const char *str, unsigned n) 23static void early_vga_write(struct console *con, const char *str, unsigned n)
27{ 24{
@@ -29,26 +26,26 @@ static void early_vga_write(struct console *con, const char *str, unsigned n)
29 int i, k, j; 26 int i, k, j;
30 27
31 while ((c = *str++) != '\0' && n-- > 0) { 28 while ((c = *str++) != '\0' && n-- > 0) {
32 if (current_ypos >= MAX_YPOS) { 29 if (current_ypos >= max_ypos) {
33 /* scroll 1 line up */ 30 /* scroll 1 line up */
34 for (k = 1, j = 0; k < MAX_YPOS; k++, j++) { 31 for (k = 1, j = 0; k < max_ypos; k++, j++) {
35 for (i = 0; i < MAX_XPOS; i++) { 32 for (i = 0; i < max_xpos; i++) {
36 writew(readw(VGABASE + 2*(MAX_XPOS*k + i)), 33 writew(readw(VGABASE+2*(max_xpos*k+i)),
37 VGABASE + 2*(MAX_XPOS*j + i)); 34 VGABASE + 2*(max_xpos*j + i));
38 } 35 }
39 } 36 }
40 for (i = 0; i < MAX_XPOS; i++) 37 for (i = 0; i < max_xpos; i++)
41 writew(0x720, VGABASE + 2*(MAX_XPOS*j + i)); 38 writew(0x720, VGABASE + 2*(max_xpos*j + i));
42 current_ypos = MAX_YPOS-1; 39 current_ypos = max_ypos-1;
43 } 40 }
44 if (c == '\n') { 41 if (c == '\n') {
45 current_xpos = 0; 42 current_xpos = 0;
46 current_ypos++; 43 current_ypos++;
47 } else if (c != '\r') { 44 } else if (c != '\r') {
48 writew(((0x7 << 8) | (unsigned short) c), 45 writew(((0x7 << 8) | (unsigned short) c),
49 VGABASE + 2*(MAX_XPOS*current_ypos + 46 VGABASE + 2*(max_xpos*current_ypos +
50 current_xpos++)); 47 current_xpos++));
51 if (current_xpos >= MAX_XPOS) { 48 if (current_xpos >= max_xpos) {
52 current_xpos = 0; 49 current_xpos = 0;
53 current_ypos++; 50 current_ypos++;
54 } 51 }
@@ -244,6 +241,7 @@ int __init setup_early_printk(char *opt)
244 && SCREEN_INFO.orig_video_isVGA == 1) { 241 && SCREEN_INFO.orig_video_isVGA == 1) {
245 max_xpos = SCREEN_INFO.orig_video_cols; 242 max_xpos = SCREEN_INFO.orig_video_cols;
246 max_ypos = SCREEN_INFO.orig_video_lines; 243 max_ypos = SCREEN_INFO.orig_video_lines;
244 current_ypos = SCREEN_INFO.orig_y;
247 early_console = &early_vga_console; 245 early_console = &early_vga_console;
248 } else if (!strncmp(buf, "simnow", 6)) { 246 } else if (!strncmp(buf, "simnow", 6)) {
249 simnow_init(buf + 6); 247 simnow_init(buf + 6);
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 30d2a1e545fe..d8bd0b345b1e 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v)
38 38
39 if (i == 0) { 39 if (i == 0) {
40 seq_printf(p, " "); 40 seq_printf(p, " ");
41 for (j=0; j<NR_CPUS; j++) 41 for_each_online_cpu(j)
42 if (cpu_online(j)) 42 seq_printf(p, "CPU%d ",j);
43 seq_printf(p, "CPU%d ",j);
44 seq_putc(p, '\n'); 43 seq_putc(p, '\n');
45 } 44 }
46 45
@@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
53#ifndef CONFIG_SMP 52#ifndef CONFIG_SMP
54 seq_printf(p, "%10u ", kstat_irqs(i)); 53 seq_printf(p, "%10u ", kstat_irqs(i));
55#else 54#else
56 for (j=0; j<NR_CPUS; j++) 55 for_each_online_cpu(j)
57 if (cpu_online(j)) 56 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
58 seq_printf(p, "%10u ",
59 kstat_cpu(j).irqs[i]);
60#endif 57#endif
61 seq_printf(p, " %14s", irq_desc[i].handler->typename); 58 seq_printf(p, " %14s", irq_desc[i].handler->typename);
62 59
@@ -68,15 +65,13 @@ skip:
68 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 65 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
69 } else if (i == NR_IRQS) { 66 } else if (i == NR_IRQS) {
70 seq_printf(p, "NMI: "); 67 seq_printf(p, "NMI: ");
71 for (j = 0; j < NR_CPUS; j++) 68 for_each_online_cpu(j)
72 if (cpu_online(j)) 69 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
73 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
74 seq_putc(p, '\n'); 70 seq_putc(p, '\n');
75#ifdef CONFIG_X86_LOCAL_APIC 71#ifdef CONFIG_X86_LOCAL_APIC
76 seq_printf(p, "LOC: "); 72 seq_printf(p, "LOC: ");
77 for (j = 0; j < NR_CPUS; j++) 73 for_each_online_cpu(j)
78 if (cpu_online(j)) 74 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
79 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
80 seq_putc(p, '\n'); 75 seq_putc(p, '\n');
81#endif 76#endif
82 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 77 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 8b866a8572cf..14f0ced613b6 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -222,9 +222,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
222 222
223void __kprobes arch_remove_kprobe(struct kprobe *p) 223void __kprobes arch_remove_kprobe(struct kprobe *p)
224{ 224{
225 down(&kprobe_mutex); 225 mutex_lock(&kprobe_mutex);
226 free_insn_slot(p->ainsn.insn); 226 free_insn_slot(p->ainsn.insn);
227 up(&kprobe_mutex); 227 mutex_unlock(&kprobe_mutex);
228} 228}
229 229
230static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 230static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 5bf17e41cd2d..66c009e10bac 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -162,9 +162,7 @@ int __init check_nmi_watchdog (void)
162 local_irq_enable(); 162 local_irq_enable();
163 mdelay((10*1000)/nmi_hz); // wait 10 ticks 163 mdelay((10*1000)/nmi_hz); // wait 10 ticks
164 164
165 for (cpu = 0; cpu < NR_CPUS; cpu++) { 165 for_each_online_cpu(cpu) {
166 if (!cpu_online(cpu))
167 continue;
168 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { 166 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
169 endflag = 1; 167 endflag = 1;
170 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", 168 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index 5876df116c92..e5f5ce7909a3 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -443,9 +443,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
443 if (!user_mode(regs)) 443 if (!user_mode(regs))
444 return 1; 444 return 1;
445 445
446 if (try_to_freeze())
447 goto no_signal;
448
449 if (!oldset) 446 if (!oldset)
450 oldset = &current->blocked; 447 oldset = &current->blocked;
451 448
@@ -463,7 +460,6 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
463 return handle_signal(signr, &info, &ka, oldset, regs); 460 return handle_signal(signr, &info, &ka, oldset, regs);
464 } 461 }
465 462
466 no_signal:
467 /* Did we come from a system call? */ 463 /* Did we come from a system call? */
468 if ((long)regs->orig_rax >= 0) { 464 if ((long)regs->orig_rax >= 0) {
469 /* Restart the system call - no handlers present */ 465 /* Restart the system call - no handlers present */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4cbf6d91571f..51f9bed455fa 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -83,9 +83,8 @@ int show_interrupts(struct seq_file *p, void *v)
83 83
84 if (i == 0) { 84 if (i == 0) {
85 seq_printf(p, " "); 85 seq_printf(p, " ");
86 for (j=0; j<NR_CPUS; j++) 86 for_each_online_cpu(j)
87 if (cpu_online(j)) 87 seq_printf(p, "CPU%d ",j);
88 seq_printf(p, "CPU%d ",j);
89 seq_putc(p, '\n'); 88 seq_putc(p, '\n');
90 } 89 }
91 90
@@ -98,9 +97,8 @@ int show_interrupts(struct seq_file *p, void *v)
98#ifndef CONFIG_SMP 97#ifndef CONFIG_SMP
99 seq_printf(p, "%10u ", kstat_irqs(i)); 98 seq_printf(p, "%10u ", kstat_irqs(i));
100#else 99#else
101 for (j = 0; j < NR_CPUS; j++) 100 for_each_online_cpu(j)
102 if (cpu_online(j)) 101 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
104#endif 102#endif
105 seq_printf(p, " %14s", irq_desc[i].handler->typename); 103 seq_printf(p, " %14s", irq_desc[i].handler->typename);
106 seq_printf(p, " %s", action->name); 104 seq_printf(p, " %s", action->name);
@@ -113,9 +111,8 @@ skip:
113 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 111 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
114 } else if (i == NR_IRQS) { 112 } else if (i == NR_IRQS) {
115 seq_printf(p, "NMI: "); 113 seq_printf(p, "NMI: ");
116 for (j = 0; j < NR_CPUS; j++) 114 for_each_online_cpu(j)
117 if (cpu_online(j)) 115 seq_printf(p, "%10u ", nmi_count(j));
118 seq_printf(p, "%10u ", nmi_count(j));
119 seq_putc(p, '\n'); 116 seq_putc(p, '\n');
120 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 117 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
121 } 118 }
diff --git a/arch/xtensa/platform-iss/console.c b/arch/xtensa/platform-iss/console.c
index 94fdfe474ac1..2a580efb58ec 100644
--- a/arch/xtensa/platform-iss/console.c
+++ b/arch/xtensa/platform-iss/console.c
@@ -31,10 +31,6 @@
31#include <linux/tty.h> 31#include <linux/tty.h>
32#include <linux/tty_flip.h> 32#include <linux/tty_flip.h>
33 33
34#ifdef SERIAL_INLINE
35#define _INLINE_ inline
36#endif
37
38#define SERIAL_MAX_NUM_LINES 1 34#define SERIAL_MAX_NUM_LINES 1
39#define SERIAL_TIMER_VALUE (20 * HZ) 35#define SERIAL_TIMER_VALUE (20 * HZ)
40 36
diff --git a/block/ioctl.c b/block/ioctl.c
index e1109491c234..35fdb7dc6512 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -42,9 +42,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
42 return -EINVAL; 42 return -EINVAL;
43 } 43 }
44 /* partition number in use? */ 44 /* partition number in use? */
45 down(&bdev->bd_sem); 45 mutex_lock(&bdev->bd_mutex);
46 if (disk->part[part - 1]) { 46 if (disk->part[part - 1]) {
47 up(&bdev->bd_sem); 47 mutex_unlock(&bdev->bd_mutex);
48 return -EBUSY; 48 return -EBUSY;
49 } 49 }
50 /* overlap? */ 50 /* overlap? */
@@ -55,13 +55,13 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
55 continue; 55 continue;
56 if (!(start+length <= s->start_sect || 56 if (!(start+length <= s->start_sect ||
57 start >= s->start_sect + s->nr_sects)) { 57 start >= s->start_sect + s->nr_sects)) {
58 up(&bdev->bd_sem); 58 mutex_unlock(&bdev->bd_mutex);
59 return -EBUSY; 59 return -EBUSY;
60 } 60 }
61 } 61 }
62 /* all seems OK */ 62 /* all seems OK */
63 add_partition(disk, part, start, length); 63 add_partition(disk, part, start, length);
64 up(&bdev->bd_sem); 64 mutex_unlock(&bdev->bd_mutex);
65 return 0; 65 return 0;
66 case BLKPG_DEL_PARTITION: 66 case BLKPG_DEL_PARTITION:
67 if (!disk->part[part-1]) 67 if (!disk->part[part-1])
@@ -71,9 +71,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
71 bdevp = bdget_disk(disk, part); 71 bdevp = bdget_disk(disk, part);
72 if (!bdevp) 72 if (!bdevp)
73 return -ENOMEM; 73 return -ENOMEM;
74 down(&bdevp->bd_sem); 74 mutex_lock(&bdevp->bd_mutex);
75 if (bdevp->bd_openers) { 75 if (bdevp->bd_openers) {
76 up(&bdevp->bd_sem); 76 mutex_unlock(&bdevp->bd_mutex);
77 bdput(bdevp); 77 bdput(bdevp);
78 return -EBUSY; 78 return -EBUSY;
79 } 79 }
@@ -81,10 +81,10 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
81 fsync_bdev(bdevp); 81 fsync_bdev(bdevp);
82 invalidate_bdev(bdevp, 0); 82 invalidate_bdev(bdevp, 0);
83 83
84 down(&bdev->bd_sem); 84 mutex_lock(&bdev->bd_mutex);
85 delete_partition(disk, part); 85 delete_partition(disk, part);
86 up(&bdev->bd_sem); 86 mutex_unlock(&bdev->bd_mutex);
87 up(&bdevp->bd_sem); 87 mutex_unlock(&bdevp->bd_mutex);
88 bdput(bdevp); 88 bdput(bdevp);
89 89
90 return 0; 90 return 0;
@@ -102,10 +102,10 @@ static int blkdev_reread_part(struct block_device *bdev)
102 return -EINVAL; 102 return -EINVAL;
103 if (!capable(CAP_SYS_ADMIN)) 103 if (!capable(CAP_SYS_ADMIN))
104 return -EACCES; 104 return -EACCES;
105 if (down_trylock(&bdev->bd_sem)) 105 if (!mutex_trylock(&bdev->bd_mutex))
106 return -EBUSY; 106 return -EBUSY;
107 res = rescan_partitions(disk, bdev); 107 res = rescan_partitions(disk, bdev);
108 up(&bdev->bd_sem); 108 mutex_unlock(&bdev->bd_mutex);
109 return res; 109 return res;
110} 110}
111 111
diff --git a/drivers/base/power/suspend.c b/drivers/base/power/suspend.c
index 8660779fb288..bdb60663f2ef 100644
--- a/drivers/base/power/suspend.c
+++ b/drivers/base/power/suspend.c
@@ -8,6 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/vt_kern.h>
11#include <linux/device.h> 12#include <linux/device.h>
12#include "../base.h" 13#include "../base.h"
13#include "power.h" 14#include "power.h"
@@ -62,7 +63,6 @@ int suspend_device(struct device * dev, pm_message_t state)
62 return error; 63 return error;
63} 64}
64 65
65
66/** 66/**
67 * device_suspend - Save state and stop all devices in system. 67 * device_suspend - Save state and stop all devices in system.
68 * @state: Power state to put each device in. 68 * @state: Power state to put each device in.
@@ -82,6 +82,9 @@ int device_suspend(pm_message_t state)
82{ 82{
83 int error = 0; 83 int error = 0;
84 84
85 if (!is_console_suspend_safe())
86 return -EINVAL;
87
85 down(&dpm_sem); 88 down(&dpm_sem);
86 down(&dpm_list_sem); 89 down(&dpm_list_sem);
87 while (!list_empty(&dpm_active) && error == 0) { 90 while (!list_empty(&dpm_active) && error == 0) {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index cf39cf9aac25..e29b8926f80e 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3268,8 +3268,8 @@ clean2:
3268 unregister_blkdev(hba[i]->major, hba[i]->devname); 3268 unregister_blkdev(hba[i]->major, hba[i]->devname);
3269clean1: 3269clean1:
3270 release_io_mem(hba[i]); 3270 release_io_mem(hba[i]);
3271 free_hba(i);
3272 hba[i]->busy_initializing = 0; 3271 hba[i]->busy_initializing = 0;
3272 free_hba(i);
3273 return(-1); 3273 return(-1);
3274} 3274}
3275 3275
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index d23b54332d7e..fb2d0be7cdeb 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -179,6 +179,7 @@ static int print_unex = 1;
179#include <linux/devfs_fs_kernel.h> 179#include <linux/devfs_fs_kernel.h>
180#include <linux/platform_device.h> 180#include <linux/platform_device.h>
181#include <linux/buffer_head.h> /* for invalidate_buffers() */ 181#include <linux/buffer_head.h> /* for invalidate_buffers() */
182#include <linux/mutex.h>
182 183
183/* 184/*
184 * PS/2 floppies have much slower step rates than regular floppies. 185 * PS/2 floppies have much slower step rates than regular floppies.
@@ -413,7 +414,7 @@ static struct floppy_write_errors write_errors[N_DRIVE];
413static struct timer_list motor_off_timer[N_DRIVE]; 414static struct timer_list motor_off_timer[N_DRIVE];
414static struct gendisk *disks[N_DRIVE]; 415static struct gendisk *disks[N_DRIVE];
415static struct block_device *opened_bdev[N_DRIVE]; 416static struct block_device *opened_bdev[N_DRIVE];
416static DECLARE_MUTEX(open_lock); 417static DEFINE_MUTEX(open_lock);
417static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; 418static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
418 419
419/* 420/*
@@ -3333,7 +3334,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3333 if (type) { 3334 if (type) {
3334 if (!capable(CAP_SYS_ADMIN)) 3335 if (!capable(CAP_SYS_ADMIN))
3335 return -EPERM; 3336 return -EPERM;
3336 down(&open_lock); 3337 mutex_lock(&open_lock);
3337 LOCK_FDC(drive, 1); 3338 LOCK_FDC(drive, 1);
3338 floppy_type[type] = *g; 3339 floppy_type[type] = *g;
3339 floppy_type[type].name = "user format"; 3340 floppy_type[type].name = "user format";
@@ -3347,7 +3348,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3347 continue; 3348 continue;
3348 __invalidate_device(bdev); 3349 __invalidate_device(bdev);
3349 } 3350 }
3350 up(&open_lock); 3351 mutex_unlock(&open_lock);
3351 } else { 3352 } else {
3352 int oldStretch; 3353 int oldStretch;
3353 LOCK_FDC(drive, 1); 3354 LOCK_FDC(drive, 1);
@@ -3674,7 +3675,7 @@ static int floppy_release(struct inode *inode, struct file *filp)
3674{ 3675{
3675 int drive = (long)inode->i_bdev->bd_disk->private_data; 3676 int drive = (long)inode->i_bdev->bd_disk->private_data;
3676 3677
3677 down(&open_lock); 3678 mutex_lock(&open_lock);
3678 if (UDRS->fd_ref < 0) 3679 if (UDRS->fd_ref < 0)
3679 UDRS->fd_ref = 0; 3680 UDRS->fd_ref = 0;
3680 else if (!UDRS->fd_ref--) { 3681 else if (!UDRS->fd_ref--) {
@@ -3684,7 +3685,7 @@ static int floppy_release(struct inode *inode, struct file *filp)
3684 if (!UDRS->fd_ref) 3685 if (!UDRS->fd_ref)
3685 opened_bdev[drive] = NULL; 3686 opened_bdev[drive] = NULL;
3686 floppy_release_irq_and_dma(); 3687 floppy_release_irq_and_dma();
3687 up(&open_lock); 3688 mutex_unlock(&open_lock);
3688 return 0; 3689 return 0;
3689} 3690}
3690 3691
@@ -3702,7 +3703,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
3702 char *tmp; 3703 char *tmp;
3703 3704
3704 filp->private_data = (void *)0; 3705 filp->private_data = (void *)0;
3705 down(&open_lock); 3706 mutex_lock(&open_lock);
3706 old_dev = UDRS->fd_device; 3707 old_dev = UDRS->fd_device;
3707 if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev) 3708 if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev)
3708 goto out2; 3709 goto out2;
@@ -3785,7 +3786,7 @@ static int floppy_open(struct inode *inode, struct file *filp)
3785 if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE))) 3786 if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
3786 goto out; 3787 goto out;
3787 } 3788 }
3788 up(&open_lock); 3789 mutex_unlock(&open_lock);
3789 return 0; 3790 return 0;
3790out: 3791out:
3791 if (UDRS->fd_ref < 0) 3792 if (UDRS->fd_ref < 0)
@@ -3796,7 +3797,7 @@ out:
3796 opened_bdev[drive] = NULL; 3797 opened_bdev[drive] = NULL;
3797 floppy_release_irq_and_dma(); 3798 floppy_release_irq_and_dma();
3798out2: 3799out2:
3799 up(&open_lock); 3800 mutex_unlock(&open_lock);
3800 return res; 3801 return res;
3801} 3802}
3802 3803
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 0010704739e3..74bf0255e98f 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1144,7 +1144,7 @@ static int lo_ioctl(struct inode * inode, struct file * file,
1144 struct loop_device *lo = inode->i_bdev->bd_disk->private_data; 1144 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1145 int err; 1145 int err;
1146 1146
1147 down(&lo->lo_ctl_mutex); 1147 mutex_lock(&lo->lo_ctl_mutex);
1148 switch (cmd) { 1148 switch (cmd) {
1149 case LOOP_SET_FD: 1149 case LOOP_SET_FD:
1150 err = loop_set_fd(lo, file, inode->i_bdev, arg); 1150 err = loop_set_fd(lo, file, inode->i_bdev, arg);
@@ -1170,7 +1170,7 @@ static int lo_ioctl(struct inode * inode, struct file * file,
1170 default: 1170 default:
1171 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1171 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1172 } 1172 }
1173 up(&lo->lo_ctl_mutex); 1173 mutex_unlock(&lo->lo_ctl_mutex);
1174 return err; 1174 return err;
1175} 1175}
1176 1176
@@ -1178,9 +1178,9 @@ static int lo_open(struct inode *inode, struct file *file)
1178{ 1178{
1179 struct loop_device *lo = inode->i_bdev->bd_disk->private_data; 1179 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1180 1180
1181 down(&lo->lo_ctl_mutex); 1181 mutex_lock(&lo->lo_ctl_mutex);
1182 lo->lo_refcnt++; 1182 lo->lo_refcnt++;
1183 up(&lo->lo_ctl_mutex); 1183 mutex_unlock(&lo->lo_ctl_mutex);
1184 1184
1185 return 0; 1185 return 0;
1186} 1186}
@@ -1189,9 +1189,9 @@ static int lo_release(struct inode *inode, struct file *file)
1189{ 1189{
1190 struct loop_device *lo = inode->i_bdev->bd_disk->private_data; 1190 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1191 1191
1192 down(&lo->lo_ctl_mutex); 1192 mutex_lock(&lo->lo_ctl_mutex);
1193 --lo->lo_refcnt; 1193 --lo->lo_refcnt;
1194 up(&lo->lo_ctl_mutex); 1194 mutex_unlock(&lo->lo_ctl_mutex);
1195 1195
1196 return 0; 1196 return 0;
1197} 1197}
@@ -1233,12 +1233,12 @@ int loop_unregister_transfer(int number)
1233 xfer_funcs[n] = NULL; 1233 xfer_funcs[n] = NULL;
1234 1234
1235 for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) { 1235 for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) {
1236 down(&lo->lo_ctl_mutex); 1236 mutex_lock(&lo->lo_ctl_mutex);
1237 1237
1238 if (lo->lo_encryption == xfer) 1238 if (lo->lo_encryption == xfer)
1239 loop_release_xfer(lo); 1239 loop_release_xfer(lo);
1240 1240
1241 up(&lo->lo_ctl_mutex); 1241 mutex_unlock(&lo->lo_ctl_mutex);
1242 } 1242 }
1243 1243
1244 return 0; 1244 return 0;
@@ -1285,7 +1285,7 @@ static int __init loop_init(void)
1285 lo->lo_queue = blk_alloc_queue(GFP_KERNEL); 1285 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1286 if (!lo->lo_queue) 1286 if (!lo->lo_queue)
1287 goto out_mem4; 1287 goto out_mem4;
1288 init_MUTEX(&lo->lo_ctl_mutex); 1288 mutex_init(&lo->lo_ctl_mutex);
1289 init_completion(&lo->lo_done); 1289 init_completion(&lo->lo_done);
1290 init_completion(&lo->lo_bh_done); 1290 init_completion(&lo->lo_bh_done);
1291 lo->lo_number = i; 1291 lo->lo_number = i;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6997d8e6bfb5..a9bde30dadad 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -459,9 +459,9 @@ static void do_nbd_request(request_queue_t * q)
459 req->errors = 0; 459 req->errors = 0;
460 spin_unlock_irq(q->queue_lock); 460 spin_unlock_irq(q->queue_lock);
461 461
462 down(&lo->tx_lock); 462 mutex_lock(&lo->tx_lock);
463 if (unlikely(!lo->sock)) { 463 if (unlikely(!lo->sock)) {
464 up(&lo->tx_lock); 464 mutex_unlock(&lo->tx_lock);
465 printk(KERN_ERR "%s: Attempted send on closed socket\n", 465 printk(KERN_ERR "%s: Attempted send on closed socket\n",
466 lo->disk->disk_name); 466 lo->disk->disk_name);
467 req->errors++; 467 req->errors++;
@@ -484,7 +484,7 @@ static void do_nbd_request(request_queue_t * q)
484 } 484 }
485 485
486 lo->active_req = NULL; 486 lo->active_req = NULL;
487 up(&lo->tx_lock); 487 mutex_unlock(&lo->tx_lock);
488 wake_up_all(&lo->active_wq); 488 wake_up_all(&lo->active_wq);
489 489
490 spin_lock_irq(q->queue_lock); 490 spin_lock_irq(q->queue_lock);
@@ -534,9 +534,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
534 534
535 case NBD_CLEAR_SOCK: 535 case NBD_CLEAR_SOCK:
536 error = 0; 536 error = 0;
537 down(&lo->tx_lock); 537 mutex_lock(&lo->tx_lock);
538 lo->sock = NULL; 538 lo->sock = NULL;
539 up(&lo->tx_lock); 539 mutex_unlock(&lo->tx_lock);
540 file = lo->file; 540 file = lo->file;
541 lo->file = NULL; 541 lo->file = NULL;
542 nbd_clear_que(lo); 542 nbd_clear_que(lo);
@@ -590,7 +590,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
590 * FIXME: This code is duplicated from sys_shutdown, but 590 * FIXME: This code is duplicated from sys_shutdown, but
591 * there should be a more generic interface rather than 591 * there should be a more generic interface rather than
592 * calling socket ops directly here */ 592 * calling socket ops directly here */
593 down(&lo->tx_lock); 593 mutex_lock(&lo->tx_lock);
594 if (lo->sock) { 594 if (lo->sock) {
595 printk(KERN_WARNING "%s: shutting down socket\n", 595 printk(KERN_WARNING "%s: shutting down socket\n",
596 lo->disk->disk_name); 596 lo->disk->disk_name);
@@ -598,7 +598,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
598 SEND_SHUTDOWN|RCV_SHUTDOWN); 598 SEND_SHUTDOWN|RCV_SHUTDOWN);
599 lo->sock = NULL; 599 lo->sock = NULL;
600 } 600 }
601 up(&lo->tx_lock); 601 mutex_unlock(&lo->tx_lock);
602 file = lo->file; 602 file = lo->file;
603 lo->file = NULL; 603 lo->file = NULL;
604 nbd_clear_que(lo); 604 nbd_clear_que(lo);
@@ -683,7 +683,7 @@ static int __init nbd_init(void)
683 nbd_dev[i].flags = 0; 683 nbd_dev[i].flags = 0;
684 spin_lock_init(&nbd_dev[i].queue_lock); 684 spin_lock_init(&nbd_dev[i].queue_lock);
685 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 685 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
686 init_MUTEX(&nbd_dev[i].tx_lock); 686 mutex_init(&nbd_dev[i].tx_lock);
687 init_waitqueue_head(&nbd_dev[i].active_wq); 687 init_waitqueue_head(&nbd_dev[i].active_wq);
688 nbd_dev[i].blksize = 1024; 688 nbd_dev[i].blksize = 1024;
689 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ 689 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 476a5b553f34..1d261f985f31 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -56,6 +56,7 @@
56#include <linux/seq_file.h> 56#include <linux/seq_file.h>
57#include <linux/miscdevice.h> 57#include <linux/miscdevice.h>
58#include <linux/suspend.h> 58#include <linux/suspend.h>
59#include <linux/mutex.h>
59#include <scsi/scsi_cmnd.h> 60#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_ioctl.h> 61#include <scsi/scsi_ioctl.h>
61#include <scsi/scsi.h> 62#include <scsi/scsi.h>
@@ -81,7 +82,7 @@
81static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 82static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
82static struct proc_dir_entry *pkt_proc; 83static struct proc_dir_entry *pkt_proc;
83static int pkt_major; 84static int pkt_major;
84static struct semaphore ctl_mutex; /* Serialize open/close/setup/teardown */ 85static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
85static mempool_t *psd_pool; 86static mempool_t *psd_pool;
86 87
87 88
@@ -2018,7 +2019,7 @@ static int pkt_open(struct inode *inode, struct file *file)
2018 2019
2019 VPRINTK("pktcdvd: entering open\n"); 2020 VPRINTK("pktcdvd: entering open\n");
2020 2021
2021 down(&ctl_mutex); 2022 mutex_lock(&ctl_mutex);
2022 pd = pkt_find_dev_from_minor(iminor(inode)); 2023 pd = pkt_find_dev_from_minor(iminor(inode));
2023 if (!pd) { 2024 if (!pd) {
2024 ret = -ENODEV; 2025 ret = -ENODEV;
@@ -2044,14 +2045,14 @@ static int pkt_open(struct inode *inode, struct file *file)
2044 set_blocksize(inode->i_bdev, CD_FRAMESIZE); 2045 set_blocksize(inode->i_bdev, CD_FRAMESIZE);
2045 } 2046 }
2046 2047
2047 up(&ctl_mutex); 2048 mutex_unlock(&ctl_mutex);
2048 return 0; 2049 return 0;
2049 2050
2050out_dec: 2051out_dec:
2051 pd->refcnt--; 2052 pd->refcnt--;
2052out: 2053out:
2053 VPRINTK("pktcdvd: failed open (%d)\n", ret); 2054 VPRINTK("pktcdvd: failed open (%d)\n", ret);
2054 up(&ctl_mutex); 2055 mutex_unlock(&ctl_mutex);
2055 return ret; 2056 return ret;
2056} 2057}
2057 2058
@@ -2060,14 +2061,14 @@ static int pkt_close(struct inode *inode, struct file *file)
2060 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; 2061 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2061 int ret = 0; 2062 int ret = 0;
2062 2063
2063 down(&ctl_mutex); 2064 mutex_lock(&ctl_mutex);
2064 pd->refcnt--; 2065 pd->refcnt--;
2065 BUG_ON(pd->refcnt < 0); 2066 BUG_ON(pd->refcnt < 0);
2066 if (pd->refcnt == 0) { 2067 if (pd->refcnt == 0) {
2067 int flush = test_bit(PACKET_WRITABLE, &pd->flags); 2068 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2068 pkt_release_dev(pd, flush); 2069 pkt_release_dev(pd, flush);
2069 } 2070 }
2070 up(&ctl_mutex); 2071 mutex_unlock(&ctl_mutex);
2071 return ret; 2072 return ret;
2072} 2073}
2073 2074
@@ -2596,21 +2597,21 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm
2596 case PKT_CTRL_CMD_SETUP: 2597 case PKT_CTRL_CMD_SETUP:
2597 if (!capable(CAP_SYS_ADMIN)) 2598 if (!capable(CAP_SYS_ADMIN))
2598 return -EPERM; 2599 return -EPERM;
2599 down(&ctl_mutex); 2600 mutex_lock(&ctl_mutex);
2600 ret = pkt_setup_dev(&ctrl_cmd); 2601 ret = pkt_setup_dev(&ctrl_cmd);
2601 up(&ctl_mutex); 2602 mutex_unlock(&ctl_mutex);
2602 break; 2603 break;
2603 case PKT_CTRL_CMD_TEARDOWN: 2604 case PKT_CTRL_CMD_TEARDOWN:
2604 if (!capable(CAP_SYS_ADMIN)) 2605 if (!capable(CAP_SYS_ADMIN))
2605 return -EPERM; 2606 return -EPERM;
2606 down(&ctl_mutex); 2607 mutex_lock(&ctl_mutex);
2607 ret = pkt_remove_dev(&ctrl_cmd); 2608 ret = pkt_remove_dev(&ctrl_cmd);
2608 up(&ctl_mutex); 2609 mutex_unlock(&ctl_mutex);
2609 break; 2610 break;
2610 case PKT_CTRL_CMD_STATUS: 2611 case PKT_CTRL_CMD_STATUS:
2611 down(&ctl_mutex); 2612 mutex_lock(&ctl_mutex);
2612 pkt_get_status(&ctrl_cmd); 2613 pkt_get_status(&ctrl_cmd);
2613 up(&ctl_mutex); 2614 mutex_unlock(&ctl_mutex);
2614 break; 2615 break;
2615 default: 2616 default:
2616 return -ENOTTY; 2617 return -ENOTTY;
@@ -2656,7 +2657,7 @@ static int __init pkt_init(void)
2656 goto out; 2657 goto out;
2657 } 2658 }
2658 2659
2659 init_MUTEX(&ctl_mutex); 2660 mutex_init(&ctl_mutex);
2660 2661
2661 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver); 2662 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2662 2663
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index ffd6abd6d5a0..1c54f46d3f70 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -310,12 +310,12 @@ static int rd_ioctl(struct inode *inode, struct file *file,
310 * cache 310 * cache
311 */ 311 */
312 error = -EBUSY; 312 error = -EBUSY;
313 down(&bdev->bd_sem); 313 mutex_lock(&bdev->bd_mutex);
314 if (bdev->bd_openers <= 2) { 314 if (bdev->bd_openers <= 2) {
315 truncate_inode_pages(bdev->bd_inode->i_mapping, 0); 315 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
316 error = 0; 316 error = 0;
317 } 317 }
318 up(&bdev->bd_sem); 318 mutex_unlock(&bdev->bd_mutex);
319 return error; 319 return error;
320} 320}
321 321
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 879bbc26ce96..a59876a0bfa1 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -407,7 +407,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
407 ENSURE(get_mcn, CDC_MCN); 407 ENSURE(get_mcn, CDC_MCN);
408 ENSURE(reset, CDC_RESET); 408 ENSURE(reset, CDC_RESET);
409 ENSURE(audio_ioctl, CDC_PLAY_AUDIO); 409 ENSURE(audio_ioctl, CDC_PLAY_AUDIO);
410 ENSURE(dev_ioctl, CDC_IOCTLS);
411 ENSURE(generic_packet, CDC_GENERIC_PACKET); 410 ENSURE(generic_packet, CDC_GENERIC_PACKET);
412 cdi->mc_flags = 0; 411 cdi->mc_flags = 0;
413 cdo->n_minors = 0; 412 cdo->n_minors = 0;
@@ -2196,395 +2195,586 @@ retry:
2196 return cdrom_read_cdda_old(cdi, ubuf, lba, nframes); 2195 return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
2197} 2196}
2198 2197
2199/* Just about every imaginable ioctl is supported in the Uniform layer 2198static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
2200 * these days. ATAPI / SCSI specific code now mainly resides in 2199 void __user *argp)
2201 * mmc_ioct().
2202 */
2203int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
2204 struct inode *ip, unsigned int cmd, unsigned long arg)
2205{ 2200{
2206 struct cdrom_device_ops *cdo = cdi->ops; 2201 struct cdrom_multisession ms_info;
2202 u8 requested_format;
2207 int ret; 2203 int ret;
2208 2204
2209 /* Try the generic SCSI command ioctl's first.. */ 2205 cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
2210 ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, (void __user *)arg); 2206
2211 if (ret != -ENOTTY) 2207 if (!(cdi->ops->capability & CDC_MULTI_SESSION))
2208 return -ENOSYS;
2209
2210 if (copy_from_user(&ms_info, argp, sizeof(ms_info)))
2211 return -EFAULT;
2212
2213 requested_format = ms_info.addr_format;
2214 if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
2215 return -EINVAL;
2216 ms_info.addr_format = CDROM_LBA;
2217
2218 ret = cdi->ops->get_last_session(cdi, &ms_info);
2219 if (ret)
2212 return ret; 2220 return ret;
2213 2221
2214 /* the first few commands do not deal with audio drive_info, but 2222 sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format);
2215 only with routines in cdrom device operations. */ 2223
2216 switch (cmd) { 2224 if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
2217 case CDROMMULTISESSION: { 2225 return -EFAULT;
2218 struct cdrom_multisession ms_info; 2226
2219 u_char requested_format; 2227 cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
2220 cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); 2228 return 0;
2221 if (!(cdo->capability & CDC_MULTI_SESSION)) 2229}
2222 return -ENOSYS; 2230
2223 IOCTL_IN(arg, struct cdrom_multisession, ms_info); 2231static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
2224 requested_format = ms_info.addr_format; 2232{
2225 if (!((requested_format == CDROM_MSF) || 2233 cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n");
2226 (requested_format == CDROM_LBA))) 2234
2227 return -EINVAL; 2235 if (!CDROM_CAN(CDC_OPEN_TRAY))
2228 ms_info.addr_format = CDROM_LBA; 2236 return -ENOSYS;
2229 if ((ret=cdo->get_last_session(cdi, &ms_info))) 2237 if (cdi->use_count != 1 || keeplocked)
2238 return -EBUSY;
2239 if (CDROM_CAN(CDC_LOCK)) {
2240 int ret = cdi->ops->lock_door(cdi, 0);
2241 if (ret)
2230 return ret; 2242 return ret;
2231 sanitize_format(&ms_info.addr, &ms_info.addr_format, 2243 }
2232 requested_format);
2233 IOCTL_OUT(arg, struct cdrom_multisession, ms_info);
2234 cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
2235 return 0;
2236 }
2237 2244
2238 case CDROMEJECT: { 2245 return cdi->ops->tray_move(cdi, 1);
2239 cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); 2246}
2240 if (!CDROM_CAN(CDC_OPEN_TRAY))
2241 return -ENOSYS;
2242 if (cdi->use_count != 1 || keeplocked)
2243 return -EBUSY;
2244 if (CDROM_CAN(CDC_LOCK))
2245 if ((ret=cdo->lock_door(cdi, 0)))
2246 return ret;
2247 2247
2248 return cdo->tray_move(cdi, 1); 2248static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi)
2249 } 2249{
2250 cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n");
2250 2251
2251 case CDROMCLOSETRAY: { 2252 if (!CDROM_CAN(CDC_CLOSE_TRAY))
2252 cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); 2253 return -ENOSYS;
2253 if (!CDROM_CAN(CDC_CLOSE_TRAY)) 2254 return cdi->ops->tray_move(cdi, 0);
2254 return -ENOSYS; 2255}
2255 return cdo->tray_move(cdi, 0);
2256 }
2257 2256
2258 case CDROMEJECT_SW: { 2257static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
2259 cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); 2258 unsigned long arg)
2260 if (!CDROM_CAN(CDC_OPEN_TRAY)) 2259{
2261 return -ENOSYS; 2260 cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n");
2262 if (keeplocked)
2263 return -EBUSY;
2264 cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
2265 if (arg)
2266 cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT;
2267 return 0;
2268 }
2269 2261
2270 case CDROM_MEDIA_CHANGED: { 2262 if (!CDROM_CAN(CDC_OPEN_TRAY))
2271 struct cdrom_changer_info *info; 2263 return -ENOSYS;
2272 int changed; 2264 if (keeplocked)
2265 return -EBUSY;
2273 2266
2274 cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); 2267 cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
2275 if (!CDROM_CAN(CDC_MEDIA_CHANGED)) 2268 if (arg)
2276 return -ENOSYS; 2269 cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT;
2270 return 0;
2271}
2277 2272
2278 /* cannot select disc or select current disc */ 2273static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
2279 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) 2274 unsigned long arg)
2280 return media_changed(cdi, 1); 2275{
2276 struct cdrom_changer_info *info;
2277 int ret;
2281 2278
2282 if ((unsigned int)arg >= cdi->capacity) 2279 cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n");
2283 return -EINVAL;
2284 2280
2285 info = kmalloc(sizeof(*info), GFP_KERNEL); 2281 if (!CDROM_CAN(CDC_MEDIA_CHANGED))
2286 if (!info) 2282 return -ENOSYS;
2287 return -ENOMEM;
2288 2283
2289 if ((ret = cdrom_read_mech_status(cdi, info))) { 2284 /* cannot select disc or select current disc */
2290 kfree(info); 2285 if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
2291 return ret; 2286 return media_changed(cdi, 1);
2292 }
2293 2287
2294 changed = info->slots[arg].change; 2288 if ((unsigned int)arg >= cdi->capacity)
2295 kfree(info); 2289 return -EINVAL;
2296 return changed;
2297 }
2298 2290
2299 case CDROM_SET_OPTIONS: { 2291 info = kmalloc(sizeof(*info), GFP_KERNEL);
2300 cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); 2292 if (!info)
2301 /* options need to be in sync with capability. too late for 2293 return -ENOMEM;
2302 that, so we have to check each one separately... */
2303 switch (arg) {
2304 case CDO_USE_FFLAGS:
2305 case CDO_CHECK_TYPE:
2306 break;
2307 case CDO_LOCK:
2308 if (!CDROM_CAN(CDC_LOCK))
2309 return -ENOSYS;
2310 break;
2311 case 0:
2312 return cdi->options;
2313 /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */
2314 default:
2315 if (!CDROM_CAN(arg))
2316 return -ENOSYS;
2317 }
2318 cdi->options |= (int) arg;
2319 return cdi->options;
2320 }
2321 2294
2322 case CDROM_CLEAR_OPTIONS: { 2295 ret = cdrom_read_mech_status(cdi, info);
2323 cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); 2296 if (!ret)
2324 cdi->options &= ~(int) arg; 2297 ret = info->slots[arg].change;
2325 return cdi->options; 2298 kfree(info);
2326 } 2299 return ret;
2300}
2327 2301
2328 case CDROM_SELECT_SPEED: { 2302static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
2329 cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); 2303 unsigned long arg)
2330 if (!CDROM_CAN(CDC_SELECT_SPEED)) 2304{
2331 return -ENOSYS; 2305 cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n");
2332 return cdo->select_speed(cdi, arg);
2333 }
2334 2306
2335 case CDROM_SELECT_DISC: { 2307 /*
2336 cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); 2308 * Options need to be in sync with capability.
2337 if (!CDROM_CAN(CDC_SELECT_DISC)) 2309 * Too late for that, so we have to check each one separately.
2310 */
2311 switch (arg) {
2312 case CDO_USE_FFLAGS:
2313 case CDO_CHECK_TYPE:
2314 break;
2315 case CDO_LOCK:
2316 if (!CDROM_CAN(CDC_LOCK))
2317 return -ENOSYS;
2318 break;
2319 case 0:
2320 return cdi->options;
2321 /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */
2322 default:
2323 if (!CDROM_CAN(arg))
2338 return -ENOSYS; 2324 return -ENOSYS;
2325 }
2326 cdi->options |= (int) arg;
2327 return cdi->options;
2328}
2339 2329
2340 if ((arg != CDSL_CURRENT) && (arg != CDSL_NONE)) 2330static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi,
2341 if ((int)arg >= cdi->capacity) 2331 unsigned long arg)
2342 return -EINVAL; 2332{
2343 2333 cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n");
2344 /* cdo->select_disc is a hook to allow a driver-specific
2345 * way of seleting disc. However, since there is no
2346 * equiv hook for cdrom_slot_status this may not
2347 * actually be useful...
2348 */
2349 if (cdo->select_disc != NULL)
2350 return cdo->select_disc(cdi, arg);
2351
2352 /* no driver specific select_disc(), call our own */
2353 cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n");
2354 return cdrom_select_disc(cdi, arg);
2355 }
2356 2334
2357 case CDROMRESET: { 2335 cdi->options &= ~(int) arg;
2358 if (!capable(CAP_SYS_ADMIN)) 2336 return cdi->options;
2359 return -EACCES; 2337}
2360 cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
2361 if (!CDROM_CAN(CDC_RESET))
2362 return -ENOSYS;
2363 invalidate_bdev(ip->i_bdev, 0);
2364 return cdo->reset(cdi);
2365 }
2366 2338
2367 case CDROM_LOCKDOOR: { 2339static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
2368 cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); 2340 unsigned long arg)
2369 if (!CDROM_CAN(CDC_LOCK)) 2341{
2370 return -EDRIVE_CANT_DO_THIS; 2342 cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
2371 keeplocked = arg ? 1 : 0;
2372 /* don't unlock the door on multiple opens,but allow root
2373 * to do so */
2374 if ((cdi->use_count != 1) && !arg && !capable(CAP_SYS_ADMIN))
2375 return -EBUSY;
2376 return cdo->lock_door(cdi, arg);
2377 }
2378 2343
2379 case CDROM_DEBUG: { 2344 if (!CDROM_CAN(CDC_SELECT_SPEED))
2380 if (!capable(CAP_SYS_ADMIN)) 2345 return -ENOSYS;
2381 return -EACCES; 2346 return cdi->ops->select_speed(cdi, arg);
2382 cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); 2347}
2383 debug = arg ? 1 : 0;
2384 return debug;
2385 }
2386 2348
2387 case CDROM_GET_CAPABILITY: { 2349static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
2388 cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); 2350 unsigned long arg)
2389 return (cdo->capability & ~cdi->mask); 2351{
2390 } 2352 cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n");
2353
2354 if (!CDROM_CAN(CDC_SELECT_DISC))
2355 return -ENOSYS;
2356
2357 if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
2358 if ((int)arg >= cdi->capacity)
2359 return -EINVAL;
2360 }
2361
2362 /*
2363 * ->select_disc is a hook to allow a driver-specific way of
2364 * seleting disc. However, since there is no equivalent hook for
2365 * cdrom_slot_status this may not actually be useful...
2366 */
2367 if (cdi->ops->select_disc)
2368 return cdi->ops->select_disc(cdi, arg);
2369
2370 cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n");
2371 return cdrom_select_disc(cdi, arg);
2372}
2373
2374static int cdrom_ioctl_reset(struct cdrom_device_info *cdi,
2375 struct block_device *bdev)
2376{
2377 cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
2378
2379 if (!capable(CAP_SYS_ADMIN))
2380 return -EACCES;
2381 if (!CDROM_CAN(CDC_RESET))
2382 return -ENOSYS;
2383 invalidate_bdev(bdev, 0);
2384 return cdi->ops->reset(cdi);
2385}
2386
2387static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
2388 unsigned long arg)
2389{
2390 cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl");
2391
2392 if (!CDROM_CAN(CDC_LOCK))
2393 return -EDRIVE_CANT_DO_THIS;
2394
2395 keeplocked = arg ? 1 : 0;
2396
2397 /*
2398 * Don't unlock the door on multiple opens by default, but allow
2399 * root to do so.
2400 */
2401 if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN))
2402 return -EBUSY;
2403 return cdi->ops->lock_door(cdi, arg);
2404}
2405
2406static int cdrom_ioctl_debug(struct cdrom_device_info *cdi,
2407 unsigned long arg)
2408{
2409 cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis");
2410
2411 if (!capable(CAP_SYS_ADMIN))
2412 return -EACCES;
2413 debug = arg ? 1 : 0;
2414 return debug;
2415}
2391 2416
2392/* The following function is implemented, although very few audio 2417static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi)
2418{
2419 cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
2420 return (cdi->ops->capability & ~cdi->mask);
2421}
2422
2423/*
2424 * The following function is implemented, although very few audio
2393 * discs give Universal Product Code information, which should just be 2425 * discs give Universal Product Code information, which should just be
2394 * the Medium Catalog Number on the box. Note, that the way the code 2426 * the Medium Catalog Number on the box. Note, that the way the code
2395 * is written on the CD is /not/ uniform across all discs! 2427 * is written on the CD is /not/ uniform across all discs!
2396 */ 2428 */
2397 case CDROM_GET_MCN: { 2429static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi,
2398 struct cdrom_mcn mcn; 2430 void __user *argp)
2399 cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); 2431{
2400 if (!(cdo->capability & CDC_MCN)) 2432 struct cdrom_mcn mcn;
2401 return -ENOSYS; 2433 int ret;
2402 if ((ret=cdo->get_mcn(cdi, &mcn)))
2403 return ret;
2404 IOCTL_OUT(arg, struct cdrom_mcn, mcn);
2405 cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
2406 return 0;
2407 }
2408 2434
2409 case CDROM_DRIVE_STATUS: { 2435 cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n");
2410 cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
2411 if (!(cdo->capability & CDC_DRIVE_STATUS))
2412 return -ENOSYS;
2413 if (!CDROM_CAN(CDC_SELECT_DISC))
2414 return cdo->drive_status(cdi, CDSL_CURRENT);
2415 if ((arg == CDSL_CURRENT) || (arg == CDSL_NONE))
2416 return cdo->drive_status(cdi, CDSL_CURRENT);
2417 if (((int)arg >= cdi->capacity))
2418 return -EINVAL;
2419 return cdrom_slot_status(cdi, arg);
2420 }
2421 2436
2422 /* Ok, this is where problems start. The current interface for the 2437 if (!(cdi->ops->capability & CDC_MCN))
2423 CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption 2438 return -ENOSYS;
2424 that CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunatly, 2439 ret = cdi->ops->get_mcn(cdi, &mcn);
2425 while this is often the case, it is also very common for CDs to 2440 if (ret)
2426 have some tracks with data, and some tracks with audio. Just 2441 return ret;
2427 because I feel like it, I declare the following to be the best
2428 way to cope. If the CD has ANY data tracks on it, it will be
2429 returned as a data CD. If it has any XA tracks, I will return
2430 it as that. Now I could simplify this interface by combining these
2431 returns with the above, but this more clearly demonstrates
2432 the problem with the current interface. Too bad this wasn't
2433 designed to use bitmasks... -Erik
2434
2435 Well, now we have the option CDS_MIXED: a mixed-type CD.
2436 User level programmers might feel the ioctl is not very useful.
2437 ---david
2438 */
2439 case CDROM_DISC_STATUS: {
2440 tracktype tracks;
2441 cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
2442 cdrom_count_tracks(cdi, &tracks);
2443 if (tracks.error)
2444 return(tracks.error);
2445
2446 /* Policy mode on */
2447 if (tracks.audio > 0) {
2448 if (tracks.data==0 && tracks.cdi==0 && tracks.xa==0)
2449 return CDS_AUDIO;
2450 else
2451 return CDS_MIXED;
2452 }
2453 if (tracks.cdi > 0) return CDS_XA_2_2;
2454 if (tracks.xa > 0) return CDS_XA_2_1;
2455 if (tracks.data > 0) return CDS_DATA_1;
2456 /* Policy mode off */
2457 2442
2458 cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); 2443 if (copy_to_user(argp, &mcn, sizeof(mcn)))
2459 return CDS_NO_INFO; 2444 return -EFAULT;
2460 } 2445 cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
2446 return 0;
2447}
2461 2448
2462 case CDROM_CHANGER_NSLOTS: { 2449static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
2463 cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); 2450 unsigned long arg)
2464 return cdi->capacity; 2451{
2465 } 2452 cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
2453
2454 if (!(cdi->ops->capability & CDC_DRIVE_STATUS))
2455 return -ENOSYS;
2456 if (!CDROM_CAN(CDC_SELECT_DISC) ||
2457 (arg == CDSL_CURRENT || arg == CDSL_NONE))
2458 return cdi->ops->drive_status(cdi, CDSL_CURRENT);
2459 if (((int)arg >= cdi->capacity))
2460 return -EINVAL;
2461 return cdrom_slot_status(cdi, arg);
2462}
2463
2464/*
2465 * Ok, this is where problems start. The current interface for the
2466 * CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption that
2467 * CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunatly, while this
2468 * is often the case, it is also very common for CDs to have some tracks
2469 * with data, and some tracks with audio. Just because I feel like it,
2470 * I declare the following to be the best way to cope. If the CD has ANY
2471 * data tracks on it, it will be returned as a data CD. If it has any XA
2472 * tracks, I will return it as that. Now I could simplify this interface
2473 * by combining these returns with the above, but this more clearly
2474 * demonstrates the problem with the current interface. Too bad this
2475 * wasn't designed to use bitmasks... -Erik
2476 *
2477 * Well, now we have the option CDS_MIXED: a mixed-type CD.
2478 * User level programmers might feel the ioctl is not very useful.
2479 * ---david
2480 */
2481static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi)
2482{
2483 tracktype tracks;
2484
2485 cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
2486
2487 cdrom_count_tracks(cdi, &tracks);
2488 if (tracks.error)
2489 return tracks.error;
2490
2491 /* Policy mode on */
2492 if (tracks.audio > 0) {
2493 if (!tracks.data && !tracks.cdi && !tracks.xa)
2494 return CDS_AUDIO;
2495 else
2496 return CDS_MIXED;
2466 } 2497 }
2467 2498
2468 /* use the ioctls that are implemented through the generic_packet() 2499 if (tracks.cdi > 0)
2469 interface. this may look at bit funny, but if -ENOTTY is 2500 return CDS_XA_2_2;
2470 returned that particular ioctl is not implemented and we 2501 if (tracks.xa > 0)
2471 let it go through the device specific ones. */ 2502 return CDS_XA_2_1;
2503 if (tracks.data > 0)
2504 return CDS_DATA_1;
2505 /* Policy mode off */
2506
2507 cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n");
2508 return CDS_NO_INFO;
2509}
2510
2511static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi)
2512{
2513 cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n");
2514 return cdi->capacity;
2515}
2516
2517static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
2518 void __user *argp)
2519{
2520 struct cdrom_subchnl q;
2521 u8 requested, back;
2522 int ret;
2523
2524 /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
2525
2526 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2527 return -ENOSYS;
2528 if (copy_from_user(&q, argp, sizeof(q)))
2529 return -EFAULT;
2530
2531 requested = q.cdsc_format;
2532 if (requested != CDROM_MSF && requested != CDROM_LBA)
2533 return -EINVAL;
2534 q.cdsc_format = CDROM_MSF;
2535
2536 ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q);
2537 if (ret)
2538 return ret;
2539
2540 back = q.cdsc_format; /* local copy */
2541 sanitize_format(&q.cdsc_absaddr, &back, requested);
2542 sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
2543
2544 if (copy_to_user(argp, &q, sizeof(q)))
2545 return -EFAULT;
2546 /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
2547 return 0;
2548}
2549
2550static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
2551 void __user *argp)
2552{
2553 struct cdrom_tochdr header;
2554 int ret;
2555
2556 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
2557
2558 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2559 return -ENOSYS;
2560 if (copy_from_user(&header, argp, sizeof(header)))
2561 return -EFAULT;
2562
2563 ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
2564 if (ret)
2565 return ret;
2566
2567 if (copy_to_user(argp, &header, sizeof(header)))
2568 return -EFAULT;
2569 /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
2570 return 0;
2571}
2572
2573static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
2574 void __user *argp)
2575{
2576 struct cdrom_tocentry entry;
2577 u8 requested_format;
2578 int ret;
2579
2580 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
2581
2582 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2583 return -ENOSYS;
2584 if (copy_from_user(&entry, argp, sizeof(entry)))
2585 return -EFAULT;
2586
2587 requested_format = entry.cdte_format;
2588 if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
2589 return -EINVAL;
2590 /* make interface to low-level uniform */
2591 entry.cdte_format = CDROM_MSF;
2592 ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry);
2593 if (ret)
2594 return ret;
2595 sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format);
2596
2597 if (copy_to_user(argp, &entry, sizeof(entry)))
2598 return -EFAULT;
2599 /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
2600 return 0;
2601}
2602
2603static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
2604 void __user *argp)
2605{
2606 struct cdrom_msf msf;
2607
2608 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
2609
2610 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2611 return -ENOSYS;
2612 if (copy_from_user(&msf, argp, sizeof(msf)))
2613 return -EFAULT;
2614 return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf);
2615}
2616
2617static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi,
2618 void __user *argp)
2619{
2620 struct cdrom_ti ti;
2621 int ret;
2622
2623 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
2624
2625 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2626 return -ENOSYS;
2627 if (copy_from_user(&ti, argp, sizeof(ti)))
2628 return -EFAULT;
2629
2630 ret = check_for_audio_disc(cdi, cdi->ops);
2631 if (ret)
2632 return ret;
2633 return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti);
2634}
2635static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
2636 void __user *argp)
2637{
2638 struct cdrom_volctrl volume;
2639
2640 cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
2641
2642 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2643 return -ENOSYS;
2644 if (copy_from_user(&volume, argp, sizeof(volume)))
2645 return -EFAULT;
2646 return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume);
2647}
2648
2649static int cdrom_ioctl_volread(struct cdrom_device_info *cdi,
2650 void __user *argp)
2651{
2652 struct cdrom_volctrl volume;
2653 int ret;
2654
2655 cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
2656
2657 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2658 return -ENOSYS;
2659
2660 ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume);
2661 if (ret)
2662 return ret;
2663
2664 if (copy_to_user(argp, &volume, sizeof(volume)))
2665 return -EFAULT;
2666 return 0;
2667}
2668
2669static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
2670 unsigned int cmd)
2671{
2672 int ret;
2673
2674 cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
2675
2676 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2677 return -ENOSYS;
2678 ret = check_for_audio_disc(cdi, cdi->ops);
2679 if (ret)
2680 return ret;
2681 return cdi->ops->audio_ioctl(cdi, cmd, NULL);
2682}
2683
2684/*
2685 * Just about every imaginable ioctl is supported in the Uniform layer
2686 * these days.
2687 * ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
2688 */
2689int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
2690 struct inode *ip, unsigned int cmd, unsigned long arg)
2691{
2692 void __user *argp = (void __user *)arg;
2693 int ret;
2694
2695 /*
2696 * Try the generic SCSI command ioctl's first.
2697 */
2698 ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, argp);
2699 if (ret != -ENOTTY)
2700 return ret;
2701
2702 switch (cmd) {
2703 case CDROMMULTISESSION:
2704 return cdrom_ioctl_multisession(cdi, argp);
2705 case CDROMEJECT:
2706 return cdrom_ioctl_eject(cdi);
2707 case CDROMCLOSETRAY:
2708 return cdrom_ioctl_closetray(cdi);
2709 case CDROMEJECT_SW:
2710 return cdrom_ioctl_eject_sw(cdi, arg);
2711 case CDROM_MEDIA_CHANGED:
2712 return cdrom_ioctl_media_changed(cdi, arg);
2713 case CDROM_SET_OPTIONS:
2714 return cdrom_ioctl_set_options(cdi, arg);
2715 case CDROM_CLEAR_OPTIONS:
2716 return cdrom_ioctl_clear_options(cdi, arg);
2717 case CDROM_SELECT_SPEED:
2718 return cdrom_ioctl_select_speed(cdi, arg);
2719 case CDROM_SELECT_DISC:
2720 return cdrom_ioctl_select_disc(cdi, arg);
2721 case CDROMRESET:
2722 return cdrom_ioctl_reset(cdi, ip->i_bdev);
2723 case CDROM_LOCKDOOR:
2724 return cdrom_ioctl_lock_door(cdi, arg);
2725 case CDROM_DEBUG:
2726 return cdrom_ioctl_debug(cdi, arg);
2727 case CDROM_GET_CAPABILITY:
2728 return cdrom_ioctl_get_capability(cdi);
2729 case CDROM_GET_MCN:
2730 return cdrom_ioctl_get_mcn(cdi, argp);
2731 case CDROM_DRIVE_STATUS:
2732 return cdrom_ioctl_drive_status(cdi, arg);
2733 case CDROM_DISC_STATUS:
2734 return cdrom_ioctl_disc_status(cdi);
2735 case CDROM_CHANGER_NSLOTS:
2736 return cdrom_ioctl_changer_nslots(cdi);
2737 }
2738
2739 /*
2740 * Use the ioctls that are implemented through the generic_packet()
2741 * interface. this may look at bit funny, but if -ENOTTY is
2742 * returned that particular ioctl is not implemented and we
2743 * let it go through the device specific ones.
2744 */
2472 if (CDROM_CAN(CDC_GENERIC_PACKET)) { 2745 if (CDROM_CAN(CDC_GENERIC_PACKET)) {
2473 ret = mmc_ioctl(cdi, cmd, arg); 2746 ret = mmc_ioctl(cdi, cmd, arg);
2474 if (ret != -ENOTTY) { 2747 if (ret != -ENOTTY)
2475 return ret; 2748 return ret;
2476 }
2477 } 2749 }
2478 2750
2479 /* note: most of the cdinfo() calls are commented out here, 2751 /*
2480 because they fill up the sys log when CD players poll 2752 * Note: most of the cdinfo() calls are commented out here,
2481 the drive. */ 2753 * because they fill up the sys log when CD players poll
2754 * the drive.
2755 */
2482 switch (cmd) { 2756 switch (cmd) {
2483 case CDROMSUBCHNL: { 2757 case CDROMSUBCHNL:
2484 struct cdrom_subchnl q; 2758 return cdrom_ioctl_get_subchnl(cdi, argp);
2485 u_char requested, back; 2759 case CDROMREADTOCHDR:
2486 if (!CDROM_CAN(CDC_PLAY_AUDIO)) 2760 return cdrom_ioctl_read_tochdr(cdi, argp);
2487 return -ENOSYS; 2761 case CDROMREADTOCENTRY:
2488 /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ 2762 return cdrom_ioctl_read_tocentry(cdi, argp);
2489 IOCTL_IN(arg, struct cdrom_subchnl, q); 2763 case CDROMPLAYMSF:
2490 requested = q.cdsc_format; 2764 return cdrom_ioctl_play_msf(cdi, argp);
2491 if (!((requested == CDROM_MSF) || 2765 case CDROMPLAYTRKIND:
2492 (requested == CDROM_LBA))) 2766 return cdrom_ioctl_play_trkind(cdi, argp);
2493 return -EINVAL; 2767 case CDROMVOLCTRL:
2494 q.cdsc_format = CDROM_MSF; 2768 return cdrom_ioctl_volctrl(cdi, argp);
2495 if ((ret=cdo->audio_ioctl(cdi, cmd, &q))) 2769 case CDROMVOLREAD:
2496 return ret; 2770 return cdrom_ioctl_volread(cdi, argp);
2497 back = q.cdsc_format; /* local copy */
2498 sanitize_format(&q.cdsc_absaddr, &back, requested);
2499 sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
2500 IOCTL_OUT(arg, struct cdrom_subchnl, q);
2501 /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
2502 return 0;
2503 }
2504 case CDROMREADTOCHDR: {
2505 struct cdrom_tochdr header;
2506 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2507 return -ENOSYS;
2508 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
2509 IOCTL_IN(arg, struct cdrom_tochdr, header);
2510 if ((ret=cdo->audio_ioctl(cdi, cmd, &header)))
2511 return ret;
2512 IOCTL_OUT(arg, struct cdrom_tochdr, header);
2513 /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
2514 return 0;
2515 }
2516 case CDROMREADTOCENTRY: {
2517 struct cdrom_tocentry entry;
2518 u_char requested_format;
2519 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2520 return -ENOSYS;
2521 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
2522 IOCTL_IN(arg, struct cdrom_tocentry, entry);
2523 requested_format = entry.cdte_format;
2524 if (!((requested_format == CDROM_MSF) ||
2525 (requested_format == CDROM_LBA)))
2526 return -EINVAL;
2527 /* make interface to low-level uniform */
2528 entry.cdte_format = CDROM_MSF;
2529 if ((ret=cdo->audio_ioctl(cdi, cmd, &entry)))
2530 return ret;
2531 sanitize_format(&entry.cdte_addr,
2532 &entry.cdte_format, requested_format);
2533 IOCTL_OUT(arg, struct cdrom_tocentry, entry);
2534 /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
2535 return 0;
2536 }
2537 case CDROMPLAYMSF: {
2538 struct cdrom_msf msf;
2539 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2540 return -ENOSYS;
2541 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
2542 IOCTL_IN(arg, struct cdrom_msf, msf);
2543 return cdo->audio_ioctl(cdi, cmd, &msf);
2544 }
2545 case CDROMPLAYTRKIND: {
2546 struct cdrom_ti ti;
2547 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2548 return -ENOSYS;
2549 cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
2550 IOCTL_IN(arg, struct cdrom_ti, ti);
2551 CHECKAUDIO;
2552 return cdo->audio_ioctl(cdi, cmd, &ti);
2553 }
2554 case CDROMVOLCTRL: {
2555 struct cdrom_volctrl volume;
2556 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2557 return -ENOSYS;
2558 cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
2559 IOCTL_IN(arg, struct cdrom_volctrl, volume);
2560 return cdo->audio_ioctl(cdi, cmd, &volume);
2561 }
2562 case CDROMVOLREAD: {
2563 struct cdrom_volctrl volume;
2564 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2565 return -ENOSYS;
2566 cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
2567 if ((ret=cdo->audio_ioctl(cdi, cmd, &volume)))
2568 return ret;
2569 IOCTL_OUT(arg, struct cdrom_volctrl, volume);
2570 return 0;
2571 }
2572 case CDROMSTART: 2771 case CDROMSTART:
2573 case CDROMSTOP: 2772 case CDROMSTOP:
2574 case CDROMPAUSE: 2773 case CDROMPAUSE:
2575 case CDROMRESUME: { 2774 case CDROMRESUME:
2576 if (!CDROM_CAN(CDC_PLAY_AUDIO)) 2775 return cdrom_ioctl_audioctl(cdi, cmd);
2577 return -ENOSYS; 2776 }
2578 cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
2579 CHECKAUDIO;
2580 return cdo->audio_ioctl(cdi, cmd, NULL);
2581 }
2582 } /* switch */
2583 2777
2584 /* do the device specific ioctls */
2585 if (CDROM_CAN(CDC_IOCTLS))
2586 return cdo->dev_ioctl(cdi, cmd, arg);
2587
2588 return -ENOSYS; 2778 return -ENOSYS;
2589} 2779}
2590 2780
diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c
index 378e88d20757..72ffd64e8b1e 100644
--- a/drivers/cdrom/cdu31a.c
+++ b/drivers/cdrom/cdu31a.c
@@ -2668,7 +2668,7 @@ static int scd_audio_ioctl(struct cdrom_device_info *cdi,
2668 return retval; 2668 return retval;
2669} 2669}
2670 2670
2671static int scd_dev_ioctl(struct cdrom_device_info *cdi, 2671static int scd_read_audio(struct cdrom_device_info *cdi,
2672 unsigned int cmd, unsigned long arg) 2672 unsigned int cmd, unsigned long arg)
2673{ 2673{
2674 void __user *argp = (void __user *)arg; 2674 void __user *argp = (void __user *)arg;
@@ -2894,11 +2894,10 @@ static struct cdrom_device_ops scd_dops = {
2894 .get_mcn = scd_get_mcn, 2894 .get_mcn = scd_get_mcn,
2895 .reset = scd_reset, 2895 .reset = scd_reset,
2896 .audio_ioctl = scd_audio_ioctl, 2896 .audio_ioctl = scd_audio_ioctl,
2897 .dev_ioctl = scd_dev_ioctl,
2898 .capability = CDC_OPEN_TRAY | CDC_CLOSE_TRAY | CDC_LOCK | 2897 .capability = CDC_OPEN_TRAY | CDC_CLOSE_TRAY | CDC_LOCK |
2899 CDC_SELECT_SPEED | CDC_MULTI_SESSION | 2898 CDC_SELECT_SPEED | CDC_MULTI_SESSION |
2900 CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | 2899 CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO |
2901 CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS, 2900 CDC_RESET | CDC_DRIVE_STATUS,
2902 .n_minors = 1, 2901 .n_minors = 1,
2903}; 2902};
2904 2903
@@ -2936,6 +2935,9 @@ static int scd_block_ioctl(struct inode *inode, struct file *file,
2936 case CDROMCLOSETRAY: 2935 case CDROMCLOSETRAY:
2937 retval = scd_tray_move(&scd_info, 0); 2936 retval = scd_tray_move(&scd_info, 0);
2938 break; 2937 break;
2938 case CDROMREADAUDIO:
2939 retval = scd_read_audio(&scd_info, CDROMREADAUDIO, arg);
2940 break;
2939 default: 2941 default:
2940 retval = cdrom_ioctl(file, &scd_info, inode, cmd, arg); 2942 retval = cdrom_ioctl(file, &scd_info, inode, cmd, arg);
2941 } 2943 }
diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c
index ce127f7ec0f6..fad27a87ce35 100644
--- a/drivers/cdrom/cm206.c
+++ b/drivers/cdrom/cm206.c
@@ -1157,32 +1157,6 @@ static int cm206_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
1157 } 1157 }
1158} 1158}
1159 1159
1160/* Ioctl. These ioctls are specific to the cm206 driver. I have made
1161 some driver statistics accessible through ioctl calls.
1162 */
1163
1164static int cm206_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
1165 unsigned long arg)
1166{
1167 switch (cmd) {
1168#ifdef STATISTICS
1169 case CM206CTL_GET_STAT:
1170 if (arg >= NR_STATS)
1171 return -EINVAL;
1172 else
1173 return cd->stats[arg];
1174 case CM206CTL_GET_LAST_STAT:
1175 if (arg >= NR_STATS)
1176 return -EINVAL;
1177 else
1178 return cd->last_stat[arg];
1179#endif
1180 default:
1181 debug(("Unknown ioctl call 0x%x\n", cmd));
1182 return -EINVAL;
1183 }
1184}
1185
1186static int cm206_media_changed(struct cdrom_device_info *cdi, int disc_nr) 1160static int cm206_media_changed(struct cdrom_device_info *cdi, int disc_nr)
1187{ 1161{
1188 if (cd != NULL) { 1162 if (cd != NULL) {
@@ -1321,11 +1295,10 @@ static struct cdrom_device_ops cm206_dops = {
1321 .get_mcn = cm206_get_upc, 1295 .get_mcn = cm206_get_upc,
1322 .reset = cm206_reset, 1296 .reset = cm206_reset,
1323 .audio_ioctl = cm206_audio_ioctl, 1297 .audio_ioctl = cm206_audio_ioctl,
1324 .dev_ioctl = cm206_ioctl,
1325 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | 1298 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
1326 CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | 1299 CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
1327 CDC_MCN | CDC_PLAY_AUDIO | CDC_SELECT_SPEED | 1300 CDC_MCN | CDC_PLAY_AUDIO | CDC_SELECT_SPEED |
1328 CDC_IOCTLS | CDC_DRIVE_STATUS, 1301 CDC_DRIVE_STATUS,
1329 .n_minors = 1, 1302 .n_minors = 1,
1330}; 1303};
1331 1304
@@ -1350,6 +1323,21 @@ static int cm206_block_release(struct inode *inode, struct file *file)
1350static int cm206_block_ioctl(struct inode *inode, struct file *file, 1323static int cm206_block_ioctl(struct inode *inode, struct file *file,
1351 unsigned cmd, unsigned long arg) 1324 unsigned cmd, unsigned long arg)
1352{ 1325{
1326 switch (cmd) {
1327#ifdef STATISTICS
1328 case CM206CTL_GET_STAT:
1329 if (arg >= NR_STATS)
1330 return -EINVAL;
1331 return cd->stats[arg];
1332 case CM206CTL_GET_LAST_STAT:
1333 if (arg >= NR_STATS)
1334 return -EINVAL;
1335 return cd->last_stat[arg];
1336#endif
1337 default:
1338 break;
1339 }
1340
1353 return cdrom_ioctl(file, &cm206_info, inode, cmd, arg); 1341 return cdrom_ioctl(file, &cm206_info, inode, cmd, arg);
1354} 1342}
1355 1343
diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c
index 466e9c2974bd..4760f515f591 100644
--- a/drivers/cdrom/sbpcd.c
+++ b/drivers/cdrom/sbpcd.c
@@ -4160,332 +4160,6 @@ static int sbpcd_get_last_session(struct cdrom_device_info *cdi, struct cdrom_mu
4160 return 0; 4160 return 0;
4161} 4161}
4162 4162
4163/*==========================================================================*/
4164/*==========================================================================*/
4165/*
4166 * ioctl support
4167 */
4168static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4169 u_long arg)
4170{
4171 struct sbpcd_drive *p = cdi->handle;
4172 int i;
4173
4174 msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg);
4175 if (p->drv_id==-1) {
4176 msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name);
4177 return (-ENXIO); /* no such drive */
4178 }
4179 down(&ioctl_read_sem);
4180 if (p != current_drive)
4181 switch_drive(p);
4182
4183 msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd);
4184 switch (cmd) /* Sun-compatible */
4185 {
4186 case DDIOCSDBG: /* DDI Debug */
4187 if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM);
4188 i=sbpcd_dbg_ioctl(arg,1);
4189 RETURN_UP(i);
4190 case CDROMRESET: /* hard reset the drive */
4191 msg(DBG_IOC,"ioctl: CDROMRESET entered.\n");
4192 i=DriveReset();
4193 current_drive->audio_state=0;
4194 RETURN_UP(i);
4195
4196 case CDROMREADMODE1:
4197 msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n");
4198#ifdef SAFE_MIXED
4199 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
4200#endif /* SAFE_MIXED */
4201 cc_ModeSelect(CD_FRAMESIZE);
4202 cc_ModeSense();
4203 current_drive->mode=READ_M1;
4204 RETURN_UP(0);
4205
4206 case CDROMREADMODE2: /* not usable at the moment */
4207 msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n");
4208#ifdef SAFE_MIXED
4209 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
4210#endif /* SAFE_MIXED */
4211 cc_ModeSelect(CD_FRAMESIZE_RAW1);
4212 cc_ModeSense();
4213 current_drive->mode=READ_M2;
4214 RETURN_UP(0);
4215
4216 case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */
4217 msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n");
4218 if (current_drive->sbp_audsiz>0)
4219 vfree(current_drive->aud_buf);
4220 current_drive->aud_buf=NULL;
4221 current_drive->sbp_audsiz=arg;
4222
4223 if (current_drive->sbp_audsiz>16)
4224 {
4225 current_drive->sbp_audsiz = 0;
4226 RETURN_UP(current_drive->sbp_audsiz);
4227 }
4228
4229 if (current_drive->sbp_audsiz>0)
4230 {
4231 current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW);
4232 if (current_drive->aud_buf==NULL)
4233 {
4234 msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz);
4235 current_drive->sbp_audsiz=0;
4236 }
4237 else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz);
4238 }
4239 RETURN_UP(current_drive->sbp_audsiz);
4240
4241 case CDROMREADAUDIO:
4242 { /* start of CDROMREADAUDIO */
4243 int i=0, j=0, frame, block=0;
4244 u_int try=0;
4245 u_long timeout;
4246 u_char *p;
4247 u_int data_tries = 0;
4248 u_int data_waits = 0;
4249 u_int data_retrying = 0;
4250 int status_tries;
4251 int error_flag;
4252
4253 msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n");
4254 if (fam0_drive) RETURN_UP(-EINVAL);
4255 if (famL_drive) RETURN_UP(-EINVAL);
4256 if (famV_drive) RETURN_UP(-EINVAL);
4257 if (famT_drive) RETURN_UP(-EINVAL);
4258#ifdef SAFE_MIXED
4259 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
4260#endif /* SAFE_MIXED */
4261 if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL);
4262 if (copy_from_user(&read_audio, (void __user *)arg,
4263 sizeof(struct cdrom_read_audio)))
4264 RETURN_UP(-EFAULT);
4265 if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL);
4266 if (!access_ok(VERIFY_WRITE, read_audio.buf,
4267 read_audio.nframes*CD_FRAMESIZE_RAW))
4268 RETURN_UP(-EFAULT);
4269
4270 if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */
4271 block=msf2lba(&read_audio.addr.msf.minute);
4272 else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */
4273 block=read_audio.addr.lba;
4274 else RETURN_UP(-EINVAL);
4275#if 000
4276 i=cc_SetSpeed(speed_150,0,0);
4277 if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i);
4278#endif
4279 msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n",
4280 block, blk2msf(block));
4281 msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n");
4282#if OLD_BUSY
4283 while (busy_data) sbp_sleep(HZ/10); /* wait a bit */
4284 busy_audio=1;
4285#endif /* OLD_BUSY */
4286 error_flag=0;
4287 for (data_tries=5; data_tries>0; data_tries--)
4288 {
4289 msg(DBG_AUD,"data_tries=%d ...\n", data_tries);
4290 current_drive->mode=READ_AU;
4291 cc_ModeSelect(CD_FRAMESIZE_RAW);
4292 cc_ModeSense();
4293 for (status_tries=3; status_tries > 0; status_tries--)
4294 {
4295 flags_cmd_out |= f_respo3;
4296 cc_ReadStatus();
4297 if (sbp_status() != 0) break;
4298 if (st_check) cc_ReadError();
4299 sbp_sleep(1); /* wait a bit, try again */
4300 }
4301 if (status_tries == 0)
4302 {
4303 msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__);
4304 continue;
4305 }
4306 msg(DBG_AUD,"read_audio: sbp_status: ok.\n");
4307
4308 flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check;
4309 if (fam0L_drive)
4310 {
4311 flags_cmd_out |= f_lopsta | f_getsta | f_bit1;
4312 cmd_type=READ_M2;
4313 drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */
4314 drvcmd[1]=(block>>16)&0x000000ff;
4315 drvcmd[2]=(block>>8)&0x000000ff;
4316 drvcmd[3]=block&0x000000ff;
4317 drvcmd[4]=0;
4318 drvcmd[5]=read_audio.nframes; /* # of frames */
4319 drvcmd[6]=0;
4320 }
4321 else if (fam1_drive)
4322 {
4323 drvcmd[0]=CMD1_READ; /* "read frames", new drives */
4324 lba2msf(block,&drvcmd[1]); /* msf-bin format required */
4325 drvcmd[4]=0;
4326 drvcmd[5]=0;
4327 drvcmd[6]=read_audio.nframes; /* # of frames */
4328 }
4329 else if (fam2_drive)
4330 {
4331 drvcmd[0]=CMD2_READ_XA2;
4332 lba2msf(block,&drvcmd[1]); /* msf-bin format required */
4333 drvcmd[4]=0;
4334 drvcmd[5]=read_audio.nframes; /* # of frames */
4335 drvcmd[6]=0x11; /* raw mode */
4336 }
4337 else if (famT_drive) /* CD-55A: not tested yet */
4338 {
4339 }
4340 msg(DBG_AUD,"read_audio: before giving \"read\" command.\n");
4341 flags_cmd_out=f_putcmd;
4342 response_count=0;
4343 i=cmd_out();
4344 if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i);
4345 sbp_sleep(0);
4346 msg(DBG_AUD,"read_audio: after giving \"read\" command.\n");
4347 for (frame=1;frame<2 && !error_flag; frame++)
4348 {
4349 try=maxtim_data;
4350 for (timeout=jiffies+9*HZ; ; )
4351 {
4352 for ( ; try!=0;try--)
4353 {
4354 j=inb(CDi_status);
4355 if (!(j&s_not_data_ready)) break;
4356 if (!(j&s_not_result_ready)) break;
4357 if (fam0L_drive) if (j&s_attention) break;
4358 }
4359 if (try != 0 || time_after_eq(jiffies, timeout)) break;
4360 if (data_retrying == 0) data_waits++;
4361 data_retrying = 1;
4362 sbp_sleep(1);
4363 try = 1;
4364 }
4365 if (try==0)
4366 {
4367 msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n");
4368 error_flag++;
4369 break;
4370 }
4371 msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n");
4372 if (j&s_not_data_ready)
4373 {
4374 msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n");
4375 error_flag++;
4376 break;
4377 }
4378 msg(DBG_AUD,"read_audio: before reading data.\n");
4379 error_flag=0;
4380 p = current_drive->aud_buf;
4381 if (sbpro_type==1) OUT(CDo_sel_i_d,1);
4382 if (do_16bit)
4383 {
4384 u_short *p2 = (u_short *) p;
4385
4386 for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
4387 {
4388 if ((inb_p(CDi_status)&s_not_data_ready)) continue;
4389
4390 /* get one sample */
4391 *p2++ = inw_p(CDi_data);
4392 *p2++ = inw_p(CDi_data);
4393 }
4394 } else {
4395 for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
4396 {
4397 if ((inb_p(CDi_status)&s_not_data_ready)) continue;
4398
4399 /* get one sample */
4400 *p++ = inb_p(CDi_data);
4401 *p++ = inb_p(CDi_data);
4402 *p++ = inb_p(CDi_data);
4403 *p++ = inb_p(CDi_data);
4404 }
4405 }
4406 if (sbpro_type==1) OUT(CDo_sel_i_d,0);
4407 data_retrying = 0;
4408 }
4409 msg(DBG_AUD,"read_audio: after reading data.\n");
4410 if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */
4411 {
4412 msg(DBG_AUD,"read_audio: read aborted by drive\n");
4413#if 0000
4414 i=cc_DriveReset(); /* ugly fix to prevent a hang */
4415#else
4416 i=cc_ReadError();
4417#endif
4418 continue;
4419 }
4420 if (fam0L_drive)
4421 {
4422 i=maxtim_data;
4423 for (timeout=jiffies+9*HZ; time_before(jiffies, timeout); timeout--)
4424 {
4425 for ( ;i!=0;i--)
4426 {
4427 j=inb(CDi_status);
4428 if (!(j&s_not_data_ready)) break;
4429 if (!(j&s_not_result_ready)) break;
4430 if (j&s_attention) break;
4431 }
4432 if (i != 0 || time_after_eq(jiffies, timeout)) break;
4433 sbp_sleep(0);
4434 i = 1;
4435 }
4436 if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ");
4437 if (!(j&s_attention))
4438 {
4439 msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n");
4440 i=cc_DriveReset(); /* ugly fix to prevent a hang */
4441 continue;
4442 }
4443 }
4444 do
4445 {
4446 if (fam0L_drive) cc_ReadStatus();
4447 i=ResponseStatus(); /* builds status_bits, returns orig. status (old) or faked p_success (new) */
4448 if (i<0) { msg(DBG_AUD,
4449 "read_audio: cc_ReadStatus error after read: %02X\n",
4450 current_drive->status_bits);
4451 continue; /* FIXME */
4452 }
4453 }
4454 while ((fam0L_drive)&&(!st_check)&&(!(i&p_success)));
4455 if (st_check)
4456 {
4457 i=cc_ReadError();
4458 msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i);
4459 continue;
4460 }
4461 if (copy_to_user(read_audio.buf,
4462 current_drive->aud_buf,
4463 read_audio.nframes * CD_FRAMESIZE_RAW))
4464 RETURN_UP(-EFAULT);
4465 msg(DBG_AUD,"read_audio: copy_to_user done.\n");
4466 break;
4467 }
4468 cc_ModeSelect(CD_FRAMESIZE);
4469 cc_ModeSense();
4470 current_drive->mode=READ_M1;
4471#if OLD_BUSY
4472 busy_audio=0;
4473#endif /* OLD_BUSY */
4474 if (data_tries == 0)
4475 {
4476 msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__);
4477 RETURN_UP(-EIO);
4478 }
4479 msg(DBG_AUD,"read_audio: successful return.\n");
4480 RETURN_UP(0);
4481 } /* end of CDROMREADAUDIO */
4482
4483 default:
4484 msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd);
4485 RETURN_UP(-EINVAL);
4486 } /* end switch(cmd) */
4487}
4488
4489static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, 4163static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4490 void * arg) 4164 void * arg)
4491{ 4165{
@@ -4530,7 +4204,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4530 default: 4204 default:
4531 RETURN_UP(-EINVAL); 4205 RETURN_UP(-EINVAL);
4532 } 4206 }
4533 4207
4534 case CDROMRESUME: /* resume paused audio play */ 4208 case CDROMRESUME: /* resume paused audio play */
4535 msg(DBG_IOC,"ioctl: CDROMRESUME entered.\n"); 4209 msg(DBG_IOC,"ioctl: CDROMRESUME entered.\n");
4536 /* resume playing audio tracks when a previous PLAY AUDIO call has */ 4210 /* resume playing audio tracks when a previous PLAY AUDIO call has */
@@ -4544,12 +4218,12 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4544 if (i<0) RETURN_UP(-EIO); 4218 if (i<0) RETURN_UP(-EIO);
4545 current_drive->audio_state=audio_playing; 4219 current_drive->audio_state=audio_playing;
4546 RETURN_UP(0); 4220 RETURN_UP(0);
4547 4221
4548 case CDROMPLAYMSF: 4222 case CDROMPLAYMSF:
4549 msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n"); 4223 msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n");
4550#ifdef SAFE_MIXED 4224#ifdef SAFE_MIXED
4551 if (current_drive->has_data>1) RETURN_UP(-EBUSY); 4225 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
4552#endif /* SAFE_MIXED */ 4226#endif /* SAFE_MIXED */
4553 if (current_drive->audio_state==audio_playing) 4227 if (current_drive->audio_state==audio_playing)
4554 { 4228 {
4555 i=cc_Pause_Resume(1); 4229 i=cc_Pause_Resume(1);
@@ -4584,7 +4258,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4584 msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n"); 4258 msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n");
4585#ifdef SAFE_MIXED 4259#ifdef SAFE_MIXED
4586 if (current_drive->has_data>1) RETURN_UP(-EBUSY); 4260 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
4587#endif /* SAFE_MIXED */ 4261#endif /* SAFE_MIXED */
4588 if (current_drive->audio_state==audio_playing) 4262 if (current_drive->audio_state==audio_playing)
4589 { 4263 {
4590 msg(DBG_IOX,"CDROMPLAYTRKIND: already audio_playing.\n"); 4264 msg(DBG_IOX,"CDROMPLAYTRKIND: already audio_playing.\n");
@@ -4654,13 +4328,13 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4654 cc_DriveReset(); 4328 cc_DriveReset();
4655#endif 4329#endif
4656 RETURN_UP(i); 4330 RETURN_UP(i);
4657 4331
4658 case CDROMSTART: /* Spin up the drive */ 4332 case CDROMSTART: /* Spin up the drive */
4659 msg(DBG_IOC,"ioctl: CDROMSTART entered.\n"); 4333 msg(DBG_IOC,"ioctl: CDROMSTART entered.\n");
4660 cc_SpinUp(); 4334 cc_SpinUp();
4661 current_drive->audio_state=0; 4335 current_drive->audio_state=0;
4662 RETURN_UP(0); 4336 RETURN_UP(0);
4663 4337
4664 case CDROMVOLCTRL: /* Volume control */ 4338 case CDROMVOLCTRL: /* Volume control */
4665 msg(DBG_IOC,"ioctl: CDROMVOLCTRL entered.\n"); 4339 msg(DBG_IOC,"ioctl: CDROMVOLCTRL entered.\n");
4666 memcpy(&volctrl,(char *) arg,sizeof(volctrl)); 4340 memcpy(&volctrl,(char *) arg,sizeof(volctrl));
@@ -4670,7 +4344,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4670 current_drive->vol_ctrl1=volctrl.channel1; 4344 current_drive->vol_ctrl1=volctrl.channel1;
4671 i=cc_SetVolume(); 4345 i=cc_SetVolume();
4672 RETURN_UP(0); 4346 RETURN_UP(0);
4673 4347
4674 case CDROMVOLREAD: /* read Volume settings from drive */ 4348 case CDROMVOLREAD: /* read Volume settings from drive */
4675 msg(DBG_IOC,"ioctl: CDROMVOLREAD entered.\n"); 4349 msg(DBG_IOC,"ioctl: CDROMVOLREAD entered.\n");
4676 st=cc_GetVolume(); 4350 st=cc_GetVolume();
@@ -4694,7 +4368,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4694 if (i<0) { 4368 if (i<0) {
4695 j=cc_ReadError(); /* clear out error status from drive */ 4369 j=cc_ReadError(); /* clear out error status from drive */
4696 current_drive->audio_state=CDROM_AUDIO_NO_STATUS; 4370 current_drive->audio_state=CDROM_AUDIO_NO_STATUS;
4697 /* get and set the disk state here, 4371 /* get and set the disk state here,
4698 probably not the right place, but who cares! 4372 probably not the right place, but who cares!
4699 It makes it work properly! --AJK */ 4373 It makes it work properly! --AJK */
4700 if (current_drive->CD_changed==0xFF) { 4374 if (current_drive->CD_changed==0xFF) {
@@ -4715,8 +4389,8 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4715 } 4389 }
4716 } 4390 }
4717 memcpy(&SC, (void *) arg, sizeof(struct cdrom_subchnl)); 4391 memcpy(&SC, (void *) arg, sizeof(struct cdrom_subchnl));
4718 /* 4392 /*
4719 This virtual crap is very bogus! 4393 This virtual crap is very bogus!
4720 It doesn't detect when the cd is done playing audio! 4394 It doesn't detect when the cd is done playing audio!
4721 Lets do this right with proper hardware register reading! 4395 Lets do this right with proper hardware register reading!
4722 */ 4396 */
@@ -4775,7 +4449,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4775 SC.cdsc_trk,SC.cdsc_ind, 4449 SC.cdsc_trk,SC.cdsc_ind,
4776 SC.cdsc_absaddr,SC.cdsc_reladdr); 4450 SC.cdsc_absaddr,SC.cdsc_reladdr);
4777 RETURN_UP(0); 4451 RETURN_UP(0);
4778 4452
4779 default: 4453 default:
4780 msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); 4454 msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd);
4781 RETURN_UP(-EINVAL); 4455 RETURN_UP(-EINVAL);
@@ -4788,7 +4462,7 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
4788static void sbp_transfer(struct request *req) 4462static void sbp_transfer(struct request *req)
4789{ 4463{
4790 long offs; 4464 long offs;
4791 4465
4792 while ( (req->nr_sectors > 0) && 4466 while ( (req->nr_sectors > 0) &&
4793 (req->sector/4 >= current_drive->sbp_first_frame) && 4467 (req->sector/4 >= current_drive->sbp_first_frame) &&
4794 (req->sector/4 <= current_drive->sbp_last_frame) ) 4468 (req->sector/4 <= current_drive->sbp_last_frame) )
@@ -4807,11 +4481,11 @@ static void sbp_transfer(struct request *req)
4807 * 4481 *
4808 * This is a kludge so we don't need to modify end_request. 4482 * This is a kludge so we don't need to modify end_request.
4809 * We put the req we take out after INIT_REQUEST in the requests list, 4483 * We put the req we take out after INIT_REQUEST in the requests list,
4810 * so that end_request will discard it. 4484 * so that end_request will discard it.
4811 * 4485 *
4812 * The bug could be present in other block devices, perhaps we 4486 * The bug could be present in other block devices, perhaps we
4813 * should modify INIT_REQUEST and end_request instead, and 4487 * should modify INIT_REQUEST and end_request instead, and
4814 * change every block device.. 4488 * change every block device..
4815 * 4489 *
4816 * Could be a race here?? Could e.g. a timer interrupt schedule() us? 4490 * Could be a race here?? Could e.g. a timer interrupt schedule() us?
4817 * If so, we should copy end_request here, and do it right.. (or 4491 * If so, we should copy end_request here, and do it right.. (or
@@ -4883,19 +4557,19 @@ static void do_sbpcd_request(request_queue_t * q)
4883 while (busy_audio) sbp_sleep(HZ); /* wait a bit */ 4557 while (busy_audio) sbp_sleep(HZ); /* wait a bit */
4884 busy_data=1; 4558 busy_data=1;
4885#endif /* OLD_BUSY */ 4559#endif /* OLD_BUSY */
4886 4560
4887 if (p->audio_state==audio_playing) goto err_done; 4561 if (p->audio_state==audio_playing) goto err_done;
4888 if (p != current_drive) 4562 if (p != current_drive)
4889 switch_drive(p); 4563 switch_drive(p);
4890 4564
4891 block = req->sector; /* always numbered as 512-byte-pieces */ 4565 block = req->sector; /* always numbered as 512-byte-pieces */
4892 nsect = req->nr_sectors; /* always counted as 512-byte-pieces */ 4566 nsect = req->nr_sectors; /* always counted as 512-byte-pieces */
4893 4567
4894 msg(DBG_BSZ,"read sector %d (%d sectors)\n", block, nsect); 4568 msg(DBG_BSZ,"read sector %d (%d sectors)\n", block, nsect);
4895#if 0 4569#if 0
4896 msg(DBG_MUL,"read LBA %d\n", block/4); 4570 msg(DBG_MUL,"read LBA %d\n", block/4);
4897#endif 4571#endif
4898 4572
4899 sbp_transfer(req); 4573 sbp_transfer(req);
4900 /* if we satisfied the request from the buffer, we're done. */ 4574 /* if we satisfied the request from the buffer, we're done. */
4901 if (req->nr_sectors == 0) 4575 if (req->nr_sectors == 0)
@@ -4914,10 +4588,10 @@ static void do_sbpcd_request(request_queue_t * q)
4914 i=prepare(0,0); /* at moment not really a hassle check, but ... */ 4588 i=prepare(0,0); /* at moment not really a hassle check, but ... */
4915 if (i!=0) 4589 if (i!=0)
4916 msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i); 4590 msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i);
4917#endif /* FUTURE */ 4591#endif /* FUTURE */
4918 4592
4919 if (!st_spinning) cc_SpinUp(); 4593 if (!st_spinning) cc_SpinUp();
4920 4594
4921 for (data_tries=n_retries; data_tries > 0; data_tries--) 4595 for (data_tries=n_retries; data_tries > 0; data_tries--)
4922 { 4596 {
4923 for (status_tries=3; status_tries > 0; status_tries--) 4597 for (status_tries=3; status_tries > 0; status_tries--)
@@ -4940,7 +4614,7 @@ static void do_sbpcd_request(request_queue_t * q)
4940 { 4614 {
4941#ifdef SAFE_MIXED 4615#ifdef SAFE_MIXED
4942 current_drive->has_data=2; /* is really a data disk */ 4616 current_drive->has_data=2; /* is really a data disk */
4943#endif /* SAFE_MIXED */ 4617#endif /* SAFE_MIXED */
4944#ifdef DEBUG_GTL 4618#ifdef DEBUG_GTL
4945 printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n", 4619 printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n",
4946 xnr, req, req->sector, req->nr_sectors, jiffies); 4620 xnr, req, req->sector, req->nr_sectors, jiffies);
@@ -4951,7 +4625,7 @@ static void do_sbpcd_request(request_queue_t * q)
4951 goto request_loop; 4625 goto request_loop;
4952 } 4626 }
4953 } 4627 }
4954 4628
4955 err_done: 4629 err_done:
4956#if OLD_BUSY 4630#if OLD_BUSY
4957 busy_data=0; 4631 busy_data=0;
@@ -4976,7 +4650,7 @@ static void sbp_read_cmd(struct request *req)
4976 4650
4977 int i; 4651 int i;
4978 int block; 4652 int block;
4979 4653
4980 current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1; /* purge buffer */ 4654 current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1; /* purge buffer */
4981 current_drive->sbp_current = 0; 4655 current_drive->sbp_current = 0;
4982 block=req->sector/4; 4656 block=req->sector/4;
@@ -4993,7 +4667,7 @@ static void sbp_read_cmd(struct request *req)
4993 current_drive->sbp_read_frames=1; 4667 current_drive->sbp_read_frames=1;
4994 } 4668 }
4995 } 4669 }
4996 4670
4997 flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; 4671 flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check;
4998 clr_cmdbuf(); 4672 clr_cmdbuf();
4999 if (famV_drive) 4673 if (famV_drive)
@@ -5092,7 +4766,7 @@ static int sbp_data(struct request *req)
5092 int success; 4766 int success;
5093 int wait; 4767 int wait;
5094 int duration; 4768 int duration;
5095 4769
5096 error_flag=0; 4770 error_flag=0;
5097 success=0; 4771 success=0;
5098#if LONG_TIMING 4772#if LONG_TIMING
@@ -5105,12 +4779,12 @@ static int sbp_data(struct request *req)
5105 for (frame=0;frame<current_drive->sbp_read_frames&&!error_flag; frame++) 4779 for (frame=0;frame<current_drive->sbp_read_frames&&!error_flag; frame++)
5106 { 4780 {
5107 SBPCD_CLI; 4781 SBPCD_CLI;
5108 4782
5109 del_timer(&data_timer); 4783 del_timer(&data_timer);
5110 data_timer.expires=jiffies+max_latency; 4784 data_timer.expires=jiffies+max_latency;
5111 timed_out_data=0; 4785 timed_out_data=0;
5112 add_timer(&data_timer); 4786 add_timer(&data_timer);
5113 while (!timed_out_data) 4787 while (!timed_out_data)
5114 { 4788 {
5115 if (current_drive->f_multisession) try=maxtim_data*4; 4789 if (current_drive->f_multisession) try=maxtim_data*4;
5116 else try=maxtim_data; 4790 else try=maxtim_data;
@@ -5207,9 +4881,9 @@ static int sbp_data(struct request *req)
5207 else 4881 else
5208 { 4882 {
5209 sbp_sleep(1); 4883 sbp_sleep(1);
5210 OUT(CDo_sel_i_d,0); 4884 OUT(CDo_sel_i_d,0);
5211 i=inb(CDi_status); 4885 i=inb(CDi_status);
5212 } 4886 }
5213 if (!(i&s_not_data_ready)) 4887 if (!(i&s_not_data_ready))
5214 { 4888 {
5215 OUT(CDo_sel_i_d,1); 4889 OUT(CDo_sel_i_d,1);
@@ -5311,7 +4985,7 @@ static int sbp_data(struct request *req)
5311 } 4985 }
5312 SBPCD_STI; 4986 SBPCD_STI;
5313 } 4987 }
5314 4988
5315#if 0 4989#if 0
5316 if (!success) 4990 if (!success)
5317#endif 4991#endif
@@ -5370,7 +5044,326 @@ static int sbpcd_block_ioctl(struct inode *inode, struct file *file,
5370 unsigned cmd, unsigned long arg) 5044 unsigned cmd, unsigned long arg)
5371{ 5045{
5372 struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; 5046 struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
5373 return cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg); 5047 struct cdrom_device_info *cdi = p->sbpcd_infop;
5048 int ret, i;
5049
5050 ret = cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg);
5051 if (ret != -ENOSYS)
5052 return ret;
5053
5054 msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg);
5055 if (p->drv_id==-1) {
5056 msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name);
5057 return (-ENXIO); /* no such drive */
5058 }
5059 down(&ioctl_read_sem);
5060 if (p != current_drive)
5061 switch_drive(p);
5062
5063 msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd);
5064 switch (cmd) /* Sun-compatible */
5065 {
5066 case DDIOCSDBG: /* DDI Debug */
5067 if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM);
5068 i=sbpcd_dbg_ioctl(arg,1);
5069 RETURN_UP(i);
5070 case CDROMRESET: /* hard reset the drive */
5071 msg(DBG_IOC,"ioctl: CDROMRESET entered.\n");
5072 i=DriveReset();
5073 current_drive->audio_state=0;
5074 RETURN_UP(i);
5075
5076 case CDROMREADMODE1:
5077 msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n");
5078#ifdef SAFE_MIXED
5079 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
5080#endif /* SAFE_MIXED */
5081 cc_ModeSelect(CD_FRAMESIZE);
5082 cc_ModeSense();
5083 current_drive->mode=READ_M1;
5084 RETURN_UP(0);
5085
5086 case CDROMREADMODE2: /* not usable at the moment */
5087 msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n");
5088#ifdef SAFE_MIXED
5089 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
5090#endif /* SAFE_MIXED */
5091 cc_ModeSelect(CD_FRAMESIZE_RAW1);
5092 cc_ModeSense();
5093 current_drive->mode=READ_M2;
5094 RETURN_UP(0);
5095
5096 case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */
5097 msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n");
5098 if (current_drive->sbp_audsiz>0)
5099 vfree(current_drive->aud_buf);
5100 current_drive->aud_buf=NULL;
5101 current_drive->sbp_audsiz=arg;
5102
5103 if (current_drive->sbp_audsiz>16)
5104 {
5105 current_drive->sbp_audsiz = 0;
5106 RETURN_UP(current_drive->sbp_audsiz);
5107 }
5108
5109 if (current_drive->sbp_audsiz>0)
5110 {
5111 current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW);
5112 if (current_drive->aud_buf==NULL)
5113 {
5114 msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz);
5115 current_drive->sbp_audsiz=0;
5116 }
5117 else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz);
5118 }
5119 RETURN_UP(current_drive->sbp_audsiz);
5120
5121 case CDROMREADAUDIO:
5122 { /* start of CDROMREADAUDIO */
5123 int i=0, j=0, frame, block=0;
5124 u_int try=0;
5125 u_long timeout;
5126 u_char *p;
5127 u_int data_tries = 0;
5128 u_int data_waits = 0;
5129 u_int data_retrying = 0;
5130 int status_tries;
5131 int error_flag;
5132
5133 msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n");
5134 if (fam0_drive) RETURN_UP(-EINVAL);
5135 if (famL_drive) RETURN_UP(-EINVAL);
5136 if (famV_drive) RETURN_UP(-EINVAL);
5137 if (famT_drive) RETURN_UP(-EINVAL);
5138#ifdef SAFE_MIXED
5139 if (current_drive->has_data>1) RETURN_UP(-EBUSY);
5140#endif /* SAFE_MIXED */
5141 if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL);
5142 if (copy_from_user(&read_audio, (void __user *)arg,
5143 sizeof(struct cdrom_read_audio)))
5144 RETURN_UP(-EFAULT);
5145 if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL);
5146 if (!access_ok(VERIFY_WRITE, read_audio.buf,
5147 read_audio.nframes*CD_FRAMESIZE_RAW))
5148 RETURN_UP(-EFAULT);
5149
5150 if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */
5151 block=msf2lba(&read_audio.addr.msf.minute);
5152 else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */
5153 block=read_audio.addr.lba;
5154 else RETURN_UP(-EINVAL);
5155#if 000
5156 i=cc_SetSpeed(speed_150,0,0);
5157 if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i);
5158#endif
5159 msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n",
5160 block, blk2msf(block));
5161 msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n");
5162#if OLD_BUSY
5163 while (busy_data) sbp_sleep(HZ/10); /* wait a bit */
5164 busy_audio=1;
5165#endif /* OLD_BUSY */
5166 error_flag=0;
5167 for (data_tries=5; data_tries>0; data_tries--)
5168 {
5169 msg(DBG_AUD,"data_tries=%d ...\n", data_tries);
5170 current_drive->mode=READ_AU;
5171 cc_ModeSelect(CD_FRAMESIZE_RAW);
5172 cc_ModeSense();
5173 for (status_tries=3; status_tries > 0; status_tries--)
5174 {
5175 flags_cmd_out |= f_respo3;
5176 cc_ReadStatus();
5177 if (sbp_status() != 0) break;
5178 if (st_check) cc_ReadError();
5179 sbp_sleep(1); /* wait a bit, try again */
5180 }
5181 if (status_tries == 0)
5182 {
5183 msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__);
5184 continue;
5185 }
5186 msg(DBG_AUD,"read_audio: sbp_status: ok.\n");
5187
5188 flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check;
5189 if (fam0L_drive)
5190 {
5191 flags_cmd_out |= f_lopsta | f_getsta | f_bit1;
5192 cmd_type=READ_M2;
5193 drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */
5194 drvcmd[1]=(block>>16)&0x000000ff;
5195 drvcmd[2]=(block>>8)&0x000000ff;
5196 drvcmd[3]=block&0x000000ff;
5197 drvcmd[4]=0;
5198 drvcmd[5]=read_audio.nframes; /* # of frames */
5199 drvcmd[6]=0;
5200 }
5201 else if (fam1_drive)
5202 {
5203 drvcmd[0]=CMD1_READ; /* "read frames", new drives */
5204 lba2msf(block,&drvcmd[1]); /* msf-bin format required */
5205 drvcmd[4]=0;
5206 drvcmd[5]=0;
5207 drvcmd[6]=read_audio.nframes; /* # of frames */
5208 }
5209 else if (fam2_drive)
5210 {
5211 drvcmd[0]=CMD2_READ_XA2;
5212 lba2msf(block,&drvcmd[1]); /* msf-bin format required */
5213 drvcmd[4]=0;
5214 drvcmd[5]=read_audio.nframes; /* # of frames */
5215 drvcmd[6]=0x11; /* raw mode */
5216 }
5217 else if (famT_drive) /* CD-55A: not tested yet */
5218 {
5219 }
5220 msg(DBG_AUD,"read_audio: before giving \"read\" command.\n");
5221 flags_cmd_out=f_putcmd;
5222 response_count=0;
5223 i=cmd_out();
5224 if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i);
5225 sbp_sleep(0);
5226 msg(DBG_AUD,"read_audio: after giving \"read\" command.\n");
5227 for (frame=1;frame<2 && !error_flag; frame++)
5228 {
5229 try=maxtim_data;
5230 for (timeout=jiffies+9*HZ; ; )
5231 {
5232 for ( ; try!=0;try--)
5233 {
5234 j=inb(CDi_status);
5235 if (!(j&s_not_data_ready)) break;
5236 if (!(j&s_not_result_ready)) break;
5237 if (fam0L_drive) if (j&s_attention) break;
5238 }
5239 if (try != 0 || time_after_eq(jiffies, timeout)) break;
5240 if (data_retrying == 0) data_waits++;
5241 data_retrying = 1;
5242 sbp_sleep(1);
5243 try = 1;
5244 }
5245 if (try==0)
5246 {
5247 msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n");
5248 error_flag++;
5249 break;
5250 }
5251 msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n");
5252 if (j&s_not_data_ready)
5253 {
5254 msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n");
5255 error_flag++;
5256 break;
5257 }
5258 msg(DBG_AUD,"read_audio: before reading data.\n");
5259 error_flag=0;
5260 p = current_drive->aud_buf;
5261 if (sbpro_type==1) OUT(CDo_sel_i_d,1);
5262 if (do_16bit)
5263 {
5264 u_short *p2 = (u_short *) p;
5265
5266 for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
5267 {
5268 if ((inb_p(CDi_status)&s_not_data_ready)) continue;
5269
5270 /* get one sample */
5271 *p2++ = inw_p(CDi_data);
5272 *p2++ = inw_p(CDi_data);
5273 }
5274 } else {
5275 for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
5276 {
5277 if ((inb_p(CDi_status)&s_not_data_ready)) continue;
5278
5279 /* get one sample */
5280 *p++ = inb_p(CDi_data);
5281 *p++ = inb_p(CDi_data);
5282 *p++ = inb_p(CDi_data);
5283 *p++ = inb_p(CDi_data);
5284 }
5285 }
5286 if (sbpro_type==1) OUT(CDo_sel_i_d,0);
5287 data_retrying = 0;
5288 }
5289 msg(DBG_AUD,"read_audio: after reading data.\n");
5290 if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */
5291 {
5292 msg(DBG_AUD,"read_audio: read aborted by drive\n");
5293#if 0000
5294 i=cc_DriveReset(); /* ugly fix to prevent a hang */
5295#else
5296 i=cc_ReadError();
5297#endif
5298 continue;
5299 }
5300 if (fam0L_drive)
5301 {
5302 i=maxtim_data;
5303 for (timeout=jiffies+9*HZ; time_before(jiffies, timeout); timeout--)
5304 {
5305 for ( ;i!=0;i--)
5306 {
5307 j=inb(CDi_status);
5308 if (!(j&s_not_data_ready)) break;
5309 if (!(j&s_not_result_ready)) break;
5310 if (j&s_attention) break;
5311 }
5312 if (i != 0 || time_after_eq(jiffies, timeout)) break;
5313 sbp_sleep(0);
5314 i = 1;
5315 }
5316 if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ");
5317 if (!(j&s_attention))
5318 {
5319 msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n");
5320 i=cc_DriveReset(); /* ugly fix to prevent a hang */
5321 continue;
5322 }
5323 }
5324 do
5325 {
5326 if (fam0L_drive) cc_ReadStatus();
5327 i=ResponseStatus(); /* builds status_bits, returns orig. status (old) or faked p_success (new) */
5328 if (i<0) { msg(DBG_AUD,
5329 "read_audio: cc_ReadStatus error after read: %02X\n",
5330 current_drive->status_bits);
5331 continue; /* FIXME */
5332 }
5333 }
5334 while ((fam0L_drive)&&(!st_check)&&(!(i&p_success)));
5335 if (st_check)
5336 {
5337 i=cc_ReadError();
5338 msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i);
5339 continue;
5340 }
5341 if (copy_to_user(read_audio.buf,
5342 current_drive->aud_buf,
5343 read_audio.nframes * CD_FRAMESIZE_RAW))
5344 RETURN_UP(-EFAULT);
5345 msg(DBG_AUD,"read_audio: copy_to_user done.\n");
5346 break;
5347 }
5348 cc_ModeSelect(CD_FRAMESIZE);
5349 cc_ModeSense();
5350 current_drive->mode=READ_M1;
5351#if OLD_BUSY
5352 busy_audio=0;
5353#endif /* OLD_BUSY */
5354 if (data_tries == 0)
5355 {
5356 msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__);
5357 RETURN_UP(-EIO);
5358 }
5359 msg(DBG_AUD,"read_audio: successful return.\n");
5360 RETURN_UP(0);
5361 } /* end of CDROMREADAUDIO */
5362
5363 default:
5364 msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd);
5365 RETURN_UP(-EINVAL);
5366 } /* end switch(cmd) */
5374} 5367}
5375 5368
5376static int sbpcd_block_media_changed(struct gendisk *disk) 5369static int sbpcd_block_media_changed(struct gendisk *disk)
@@ -5478,10 +5471,9 @@ static struct cdrom_device_ops sbpcd_dops = {
5478 .get_mcn = sbpcd_get_mcn, 5471 .get_mcn = sbpcd_get_mcn,
5479 .reset = sbpcd_reset, 5472 .reset = sbpcd_reset,
5480 .audio_ioctl = sbpcd_audio_ioctl, 5473 .audio_ioctl = sbpcd_audio_ioctl,
5481 .dev_ioctl = sbpcd_dev_ioctl,
5482 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | 5474 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
5483 CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | 5475 CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
5484 CDC_MCN | CDC_PLAY_AUDIO | CDC_IOCTLS, 5476 CDC_MCN | CDC_PLAY_AUDIO,
5485 .n_minors = 1, 5477 .n_minors = 1,
5486}; 5478};
5487 5479
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index e27617259552..c0f817ba7adb 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -627,7 +627,7 @@ static struct cdrom_device_ops viocd_dops = {
627 .media_changed = viocd_media_changed, 627 .media_changed = viocd_media_changed,
628 .lock_door = viocd_lock_door, 628 .lock_door = viocd_lock_door,
629 .generic_packet = viocd_packet, 629 .generic_packet = viocd_packet,
630 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM 630 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
631}; 631};
632 632
633static int __init find_capability(const char *type) 633static int __init find_capability(const char *type)
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 7ac365b5d9ec..6602b3156df5 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -46,8 +46,6 @@
46 46
47/* Sanity checks */ 47/* Sanity checks */
48 48
49#define SERIAL_INLINE
50
51#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) 49#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT)
52#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ 50#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
53 tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s) 51 tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s)
@@ -95,10 +93,6 @@ static char *serial_version = "4.30";
95#include <asm/amigahw.h> 93#include <asm/amigahw.h>
96#include <asm/amigaints.h> 94#include <asm/amigaints.h>
97 95
98#ifdef SERIAL_INLINE
99#define _INLINE_ inline
100#endif
101
102#define custom amiga_custom 96#define custom amiga_custom
103static char *serial_name = "Amiga-builtin serial driver"; 97static char *serial_name = "Amiga-builtin serial driver";
104 98
@@ -253,14 +247,14 @@ static void rs_start(struct tty_struct *tty)
253 * This routine is used by the interrupt handler to schedule 247 * This routine is used by the interrupt handler to schedule
254 * processing in the software interrupt portion of the driver. 248 * processing in the software interrupt portion of the driver.
255 */ 249 */
256static _INLINE_ void rs_sched_event(struct async_struct *info, 250static void rs_sched_event(struct async_struct *info,
257 int event) 251 int event)
258{ 252{
259 info->event |= 1 << event; 253 info->event |= 1 << event;
260 tasklet_schedule(&info->tlet); 254 tasklet_schedule(&info->tlet);
261} 255}
262 256
263static _INLINE_ void receive_chars(struct async_struct *info) 257static void receive_chars(struct async_struct *info)
264{ 258{
265 int status; 259 int status;
266 int serdatr; 260 int serdatr;
@@ -349,7 +343,7 @@ out:
349 return; 343 return;
350} 344}
351 345
352static _INLINE_ void transmit_chars(struct async_struct *info) 346static void transmit_chars(struct async_struct *info)
353{ 347{
354 custom.intreq = IF_TBE; 348 custom.intreq = IF_TBE;
355 mb(); 349 mb();
@@ -389,7 +383,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info)
389 } 383 }
390} 384}
391 385
392static _INLINE_ void check_modem_status(struct async_struct *info) 386static void check_modem_status(struct async_struct *info)
393{ 387{
394 unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); 388 unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR);
395 unsigned char dstatus; 389 unsigned char dstatus;
@@ -1959,7 +1953,7 @@ done:
1959 * number, and identifies which options were configured into this 1953 * number, and identifies which options were configured into this
1960 * driver. 1954 * driver.
1961 */ 1955 */
1962static _INLINE_ void show_serial_version(void) 1956static void show_serial_version(void)
1963{ 1957{
1964 printk(KERN_INFO "%s version %s\n", serial_name, serial_version); 1958 printk(KERN_INFO "%s version %s\n", serial_name, serial_version);
1965} 1959}
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c
index e38a5f0e07bb..5e59c0b42731 100644
--- a/drivers/char/generic_serial.c
+++ b/drivers/char/generic_serial.c
@@ -48,8 +48,8 @@ static int gs_debug;
48#define NEW_WRITE_LOCKING 1 48#define NEW_WRITE_LOCKING 1
49#if NEW_WRITE_LOCKING 49#if NEW_WRITE_LOCKING
50#define DECL /* Nothing */ 50#define DECL /* Nothing */
51#define LOCKIT down (& port->port_write_sem); 51#define LOCKIT mutex_lock(& port->port_write_mutex);
52#define RELEASEIT up (&port->port_write_sem); 52#define RELEASEIT mutex_unlock(&port->port_write_mutex);
53#else 53#else
54#define DECL unsigned long flags; 54#define DECL unsigned long flags;
55#define LOCKIT save_flags (flags);cli () 55#define LOCKIT save_flags (flags);cli ()
@@ -124,14 +124,14 @@ int gs_write(struct tty_struct * tty,
124 /* get exclusive "write" access to this port (problem 3) */ 124 /* get exclusive "write" access to this port (problem 3) */
125 /* This is not a spinlock because we can have a disk access (page 125 /* This is not a spinlock because we can have a disk access (page
126 fault) in copy_from_user */ 126 fault) in copy_from_user */
127 down (& port->port_write_sem); 127 mutex_lock(& port->port_write_mutex);
128 128
129 while (1) { 129 while (1) {
130 130
131 c = count; 131 c = count;
132 132
133 /* This is safe because we "OWN" the "head". Noone else can 133 /* This is safe because we "OWN" the "head". Noone else can
134 change the "head": we own the port_write_sem. */ 134 change the "head": we own the port_write_mutex. */
135 /* Don't overrun the end of the buffer */ 135 /* Don't overrun the end of the buffer */
136 t = SERIAL_XMIT_SIZE - port->xmit_head; 136 t = SERIAL_XMIT_SIZE - port->xmit_head;
137 if (t < c) c = t; 137 if (t < c) c = t;
@@ -153,7 +153,7 @@ int gs_write(struct tty_struct * tty,
153 count -= c; 153 count -= c;
154 total += c; 154 total += c;
155 } 155 }
156 up (& port->port_write_sem); 156 mutex_unlock(& port->port_write_mutex);
157 157
158 gs_dprintk (GS_DEBUG_WRITE, "write: interrupts are %s\n", 158 gs_dprintk (GS_DEBUG_WRITE, "write: interrupts are %s\n",
159 (port->flags & GS_TX_INTEN)?"enabled": "disabled"); 159 (port->flags & GS_TX_INTEN)?"enabled": "disabled");
@@ -214,7 +214,7 @@ int gs_write(struct tty_struct * tty,
214 c = count; 214 c = count;
215 215
216 /* This is safe because we "OWN" the "head". Noone else can 216 /* This is safe because we "OWN" the "head". Noone else can
217 change the "head": we own the port_write_sem. */ 217 change the "head": we own the port_write_mutex. */
218 /* Don't overrun the end of the buffer */ 218 /* Don't overrun the end of the buffer */
219 t = SERIAL_XMIT_SIZE - port->xmit_head; 219 t = SERIAL_XMIT_SIZE - port->xmit_head;
220 if (t < c) c = t; 220 if (t < c) c = t;
@@ -888,7 +888,7 @@ int gs_init_port(struct gs_port *port)
888 spin_lock_irqsave (&port->driver_lock, flags); 888 spin_lock_irqsave (&port->driver_lock, flags);
889 if (port->tty) 889 if (port->tty)
890 clear_bit(TTY_IO_ERROR, &port->tty->flags); 890 clear_bit(TTY_IO_ERROR, &port->tty->flags);
891 init_MUTEX(&port->port_write_sem); 891 mutex_init(&port->port_write_mutex);
892 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; 892 port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
893 spin_unlock_irqrestore(&port->driver_lock, flags); 893 spin_unlock_irqrestore(&port->driver_lock, flags);
894 gs_set_termios(port->tty, NULL); 894 gs_set_termios(port->tty, NULL);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 28c5a3193b81..ede128356af2 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -181,7 +181,6 @@ static struct tty_driver *stli_serial;
181 * is already swapping a shared buffer won't make things any worse. 181 * is already swapping a shared buffer won't make things any worse.
182 */ 182 */
183static char *stli_tmpwritebuf; 183static char *stli_tmpwritebuf;
184static DECLARE_MUTEX(stli_tmpwritesem);
185 184
186#define STLI_TXBUFSIZE 4096 185#define STLI_TXBUFSIZE 4096
187 186
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index ccad7ae94541..ede365d05387 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -132,7 +132,7 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty)
132 * We test the TTY_THROTTLED bit first so that it always 132 * We test the TTY_THROTTLED bit first so that it always
133 * indicates the current state. The decision about whether 133 * indicates the current state. The decision about whether
134 * it is worth allowing more input has been taken by the caller. 134 * it is worth allowing more input has been taken by the caller.
135 * Can sleep, may be called under the atomic_read semaphore but 135 * Can sleep, may be called under the atomic_read_lock mutex but
136 * this is not guaranteed. 136 * this is not guaranteed.
137 */ 137 */
138 138
@@ -1132,7 +1132,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
1132 * buffer, and once to drain the space from the (physical) beginning of 1132 * buffer, and once to drain the space from the (physical) beginning of
1133 * the buffer to head pointer. 1133 * the buffer to head pointer.
1134 * 1134 *
1135 * Called under the tty->atomic_read sem and with TTY_DONT_FLIP set 1135 * Called under the tty->atomic_read_lock sem and with TTY_DONT_FLIP set
1136 * 1136 *
1137 */ 1137 */
1138 1138
@@ -1262,11 +1262,11 @@ do_it_again:
1262 * Internal serialization of reads. 1262 * Internal serialization of reads.
1263 */ 1263 */
1264 if (file->f_flags & O_NONBLOCK) { 1264 if (file->f_flags & O_NONBLOCK) {
1265 if (down_trylock(&tty->atomic_read)) 1265 if (!mutex_trylock(&tty->atomic_read_lock))
1266 return -EAGAIN; 1266 return -EAGAIN;
1267 } 1267 }
1268 else { 1268 else {
1269 if (down_interruptible(&tty->atomic_read)) 1269 if (mutex_lock_interruptible(&tty->atomic_read_lock))
1270 return -ERESTARTSYS; 1270 return -ERESTARTSYS;
1271 } 1271 }
1272 1272
@@ -1393,7 +1393,7 @@ do_it_again:
1393 timeout = time; 1393 timeout = time;
1394 } 1394 }
1395 clear_bit(TTY_DONT_FLIP, &tty->flags); 1395 clear_bit(TTY_DONT_FLIP, &tty->flags);
1396 up(&tty->atomic_read); 1396 mutex_unlock(&tty->atomic_read_lock);
1397 remove_wait_queue(&tty->read_wait, &wait); 1397 remove_wait_queue(&tty->read_wait, &wait);
1398 1398
1399 if (!waitqueue_active(&tty->read_wait)) 1399 if (!waitqueue_active(&tty->read_wait))
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index ca41d62b1d9d..8865387d3448 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -27,6 +27,7 @@
27#include <linux/rwsem.h> 27#include <linux/rwsem.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
30#include <linux/mutex.h>
30 31
31#include <asm/hardware/dec21285.h> 32#include <asm/hardware/dec21285.h>
32#include <asm/io.h> 33#include <asm/io.h>
@@ -56,7 +57,7 @@ static int gbWriteEnable;
56static int gbWriteBase64Enable; 57static int gbWriteBase64Enable;
57static volatile unsigned char *FLASH_BASE; 58static volatile unsigned char *FLASH_BASE;
58static int gbFlashSize = KFLASH_SIZE; 59static int gbFlashSize = KFLASH_SIZE;
59static DECLARE_MUTEX(nwflash_sem); 60static DEFINE_MUTEX(nwflash_mutex);
60 61
61extern spinlock_t gpio_lock; 62extern spinlock_t gpio_lock;
62 63
@@ -140,7 +141,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
140 /* 141 /*
141 * We now lock against reads and writes. --rmk 142 * We now lock against reads and writes. --rmk
142 */ 143 */
143 if (down_interruptible(&nwflash_sem)) 144 if (mutex_lock_interruptible(&nwflash_mutex))
144 return -ERESTARTSYS; 145 return -ERESTARTSYS;
145 146
146 ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count); 147 ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count);
@@ -149,7 +150,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
149 *ppos += count; 150 *ppos += count;
150 } else 151 } else
151 ret = -EFAULT; 152 ret = -EFAULT;
152 up(&nwflash_sem); 153 mutex_unlock(&nwflash_mutex);
153 } 154 }
154 return ret; 155 return ret;
155} 156}
@@ -188,7 +189,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
188 /* 189 /*
189 * We now lock against reads and writes. --rmk 190 * We now lock against reads and writes. --rmk
190 */ 191 */
191 if (down_interruptible(&nwflash_sem)) 192 if (mutex_lock_interruptible(&nwflash_mutex))
192 return -ERESTARTSYS; 193 return -ERESTARTSYS;
193 194
194 written = 0; 195 written = 0;
@@ -277,7 +278,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
277 */ 278 */
278 leds_event(led_release); 279 leds_event(led_release);
279 280
280 up(&nwflash_sem); 281 mutex_unlock(&nwflash_mutex);
281 282
282 return written; 283 return written;
283} 284}
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 30e4cbe16bb0..15a7b4086524 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -19,6 +19,7 @@
19#include <linux/uio.h> 19#include <linux/uio.h>
20#include <linux/cdev.h> 20#include <linux/cdev.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/mutex.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24 25
@@ -29,7 +30,7 @@ struct raw_device_data {
29 30
30static struct class *raw_class; 31static struct class *raw_class;
31static struct raw_device_data raw_devices[MAX_RAW_MINORS]; 32static struct raw_device_data raw_devices[MAX_RAW_MINORS];
32static DECLARE_MUTEX(raw_mutex); 33static DEFINE_MUTEX(raw_mutex);
33static struct file_operations raw_ctl_fops; /* forward declaration */ 34static struct file_operations raw_ctl_fops; /* forward declaration */
34 35
35/* 36/*
@@ -53,7 +54,7 @@ static int raw_open(struct inode *inode, struct file *filp)
53 return 0; 54 return 0;
54 } 55 }
55 56
56 down(&raw_mutex); 57 mutex_lock(&raw_mutex);
57 58
58 /* 59 /*
59 * All we need to do on open is check that the device is bound. 60 * All we need to do on open is check that the device is bound.
@@ -78,7 +79,7 @@ static int raw_open(struct inode *inode, struct file *filp)
78 filp->f_dentry->d_inode->i_mapping = 79 filp->f_dentry->d_inode->i_mapping =
79 bdev->bd_inode->i_mapping; 80 bdev->bd_inode->i_mapping;
80 filp->private_data = bdev; 81 filp->private_data = bdev;
81 up(&raw_mutex); 82 mutex_unlock(&raw_mutex);
82 return 0; 83 return 0;
83 84
84out2: 85out2:
@@ -86,7 +87,7 @@ out2:
86out1: 87out1:
87 blkdev_put(bdev); 88 blkdev_put(bdev);
88out: 89out:
89 up(&raw_mutex); 90 mutex_unlock(&raw_mutex);
90 return err; 91 return err;
91} 92}
92 93
@@ -99,14 +100,14 @@ static int raw_release(struct inode *inode, struct file *filp)
99 const int minor= iminor(inode); 100 const int minor= iminor(inode);
100 struct block_device *bdev; 101 struct block_device *bdev;
101 102
102 down(&raw_mutex); 103 mutex_lock(&raw_mutex);
103 bdev = raw_devices[minor].binding; 104 bdev = raw_devices[minor].binding;
104 if (--raw_devices[minor].inuse == 0) { 105 if (--raw_devices[minor].inuse == 0) {
105 /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ 106 /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
106 inode->i_mapping = &inode->i_data; 107 inode->i_mapping = &inode->i_data;
107 inode->i_mapping->backing_dev_info = &default_backing_dev_info; 108 inode->i_mapping->backing_dev_info = &default_backing_dev_info;
108 } 109 }
109 up(&raw_mutex); 110 mutex_unlock(&raw_mutex);
110 111
111 bd_release(bdev); 112 bd_release(bdev);
112 blkdev_put(bdev); 113 blkdev_put(bdev);
@@ -187,9 +188,9 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
187 goto out; 188 goto out;
188 } 189 }
189 190
190 down(&raw_mutex); 191 mutex_lock(&raw_mutex);
191 if (rawdev->inuse) { 192 if (rawdev->inuse) {
192 up(&raw_mutex); 193 mutex_unlock(&raw_mutex);
193 err = -EBUSY; 194 err = -EBUSY;
194 goto out; 195 goto out;
195 } 196 }
@@ -211,11 +212,11 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
211 bind_device(&rq); 212 bind_device(&rq);
212 } 213 }
213 } 214 }
214 up(&raw_mutex); 215 mutex_unlock(&raw_mutex);
215 } else { 216 } else {
216 struct block_device *bdev; 217 struct block_device *bdev;
217 218
218 down(&raw_mutex); 219 mutex_lock(&raw_mutex);
219 bdev = rawdev->binding; 220 bdev = rawdev->binding;
220 if (bdev) { 221 if (bdev) {
221 rq.block_major = MAJOR(bdev->bd_dev); 222 rq.block_major = MAJOR(bdev->bd_dev);
@@ -223,7 +224,7 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
223 } else { 224 } else {
224 rq.block_major = rq.block_minor = 0; 225 rq.block_major = rq.block_minor = 0;
225 } 226 }
226 up(&raw_mutex); 227 mutex_unlock(&raw_mutex);
227 if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) { 228 if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
228 err = -EFAULT; 229 err = -EFAULT;
229 goto out; 230 goto out;
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index fee68cc895f8..510bd3e0e88b 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -97,7 +97,7 @@
97#include <asm/amigahw.h> 97#include <asm/amigahw.h>
98#include <linux/zorro.h> 98#include <linux/zorro.h>
99#include <asm/irq.h> 99#include <asm/irq.h>
100#include <asm/semaphore.h> 100#include <linux/mutex.h>
101 101
102#include <linux/delay.h> 102#include <linux/delay.h>
103 103
@@ -654,7 +654,7 @@ static void a2232_init_portstructs(void)
654 port->gs.closing_wait = 30 * HZ; 654 port->gs.closing_wait = 30 * HZ;
655 port->gs.rd = &a2232_real_driver; 655 port->gs.rd = &a2232_real_driver;
656#ifdef NEW_WRITE_LOCKING 656#ifdef NEW_WRITE_LOCKING
657 init_MUTEX(&(port->gs.port_write_sem)); 657 init_MUTEX(&(port->gs.port_write_mutex));
658#endif 658#endif
659 init_waitqueue_head(&port->gs.open_wait); 659 init_waitqueue_head(&port->gs.open_wait);
660 init_waitqueue_head(&port->gs.close_wait); 660 init_waitqueue_head(&port->gs.close_wait);
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 0e7d216e7eb0..b543821d8cb4 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -5,7 +5,7 @@
5 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 6 * for more details.
7 * 7 *
8 * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11/* 11/*
@@ -77,7 +77,7 @@ scdrv_open(struct inode *inode, struct file *file)
77 scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev); 77 scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev);
78 78
79 /* allocate memory for subchannel data */ 79 /* allocate memory for subchannel data */
80 sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL); 80 sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
81 if (sd == NULL) { 81 if (sd == NULL) {
82 printk("%s: couldn't allocate subchannel data\n", 82 printk("%s: couldn't allocate subchannel data\n",
83 __FUNCTION__); 83 __FUNCTION__);
@@ -85,7 +85,6 @@ scdrv_open(struct inode *inode, struct file *file)
85 } 85 }
86 86
87 /* initialize subch_data_s fields */ 87 /* initialize subch_data_s fields */
88 memset(sd, 0, sizeof (struct subch_data_s));
89 sd->sd_nasid = scd->scd_nasid; 88 sd->sd_nasid = scd->scd_nasid;
90 sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid); 89 sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid);
91 90
@@ -394,7 +393,7 @@ scdrv_init(void)
394 sprintf(devnamep, "#%d", geo_slab(geoid)); 393 sprintf(devnamep, "#%d", geo_slab(geoid));
395 394
396 /* allocate sysctl device data */ 395 /* allocate sysctl device data */
397 scd = kmalloc(sizeof (struct sysctl_data_s), 396 scd = kzalloc(sizeof (struct sysctl_data_s),
398 GFP_KERNEL); 397 GFP_KERNEL);
399 if (!scd) { 398 if (!scd) {
400 printk("%s: failed to allocate device info" 399 printk("%s: failed to allocate device info"
@@ -402,7 +401,6 @@ scdrv_init(void)
402 SYSCTL_BASENAME, devname); 401 SYSCTL_BASENAME, devname);
403 continue; 402 continue;
404 } 403 }
405 memset(scd, 0, sizeof (struct sysctl_data_s));
406 404
407 /* initialize sysctl device data fields */ 405 /* initialize sysctl device data fields */
408 scd->scd_nasid = cnodeid_to_nasid(cnode); 406 scd->scd_nasid = cnodeid_to_nasid(cnode);
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
index a4fa507eed9e..e234d50e142a 100644
--- a/drivers/char/snsc_event.c
+++ b/drivers/char/snsc_event.c
@@ -287,7 +287,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
287{ 287{
288 int rv; 288 int rv;
289 289
290 event_sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL); 290 event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
291 if (event_sd == NULL) { 291 if (event_sd == NULL) {
292 printk(KERN_WARNING "%s: couldn't allocate subchannel info" 292 printk(KERN_WARNING "%s: couldn't allocate subchannel info"
293 " for event monitoring\n", __FUNCTION__); 293 " for event monitoring\n", __FUNCTION__);
@@ -295,7 +295,6 @@ scdrv_event_init(struct sysctl_data_s *scd)
295 } 295 }
296 296
297 /* initialize subch_data_s fields */ 297 /* initialize subch_data_s fields */
298 memset(event_sd, 0, sizeof (struct subch_data_s));
299 event_sd->sd_nasid = scd->scd_nasid; 298 event_sd->sd_nasid = scd->scd_nasid;
300 spin_lock_init(&event_sd->sd_rlock); 299 spin_lock_init(&event_sd->sd_rlock);
301 300
@@ -321,5 +320,3 @@ scdrv_event_init(struct sysctl_data_s *scd)
321 return; 320 return;
322 } 321 }
323} 322}
324
325
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index bdaab6992109..3f5d6077f39c 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -148,7 +148,6 @@ static struct tty_driver *stl_serial;
148 * is already swapping a shared buffer won't make things any worse. 148 * is already swapping a shared buffer won't make things any worse.
149 */ 149 */
150static char *stl_tmpwritebuf; 150static char *stl_tmpwritebuf;
151static DECLARE_MUTEX(stl_tmpwritesem);
152 151
153/* 152/*
154 * Define a local default termios struct. All ports will be created 153 * Define a local default termios struct. All ports will be created
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index a6b4f02bdceb..3b4747230270 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -2318,7 +2318,7 @@ static int sx_init_portstructs (int nboards, int nports)
2318 port->board = board; 2318 port->board = board;
2319 port->gs.rd = &sx_real_driver; 2319 port->gs.rd = &sx_real_driver;
2320#ifdef NEW_WRITE_LOCKING 2320#ifdef NEW_WRITE_LOCKING
2321 port->gs.port_write_sem = MUTEX; 2321 port->gs.port_write_mutex = MUTEX;
2322#endif 2322#endif
2323 port->gs.driver_lock = SPIN_LOCK_UNLOCKED; 2323 port->gs.driver_lock = SPIN_LOCK_UNLOCKED;
2324 /* 2324 /*
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 53d3d066554e..76592ee1fb38 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -130,7 +130,7 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
130 130
131/* Semaphore to protect creating and releasing a tty. This is shared with 131/* Semaphore to protect creating and releasing a tty. This is shared with
132 vt.c for deeply disgusting hack reasons */ 132 vt.c for deeply disgusting hack reasons */
133DECLARE_MUTEX(tty_sem); 133DEFINE_MUTEX(tty_mutex);
134 134
135#ifdef CONFIG_UNIX98_PTYS 135#ifdef CONFIG_UNIX98_PTYS
136extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ 136extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
@@ -1188,11 +1188,11 @@ void disassociate_ctty(int on_exit)
1188 1188
1189 lock_kernel(); 1189 lock_kernel();
1190 1190
1191 down(&tty_sem); 1191 mutex_lock(&tty_mutex);
1192 tty = current->signal->tty; 1192 tty = current->signal->tty;
1193 if (tty) { 1193 if (tty) {
1194 tty_pgrp = tty->pgrp; 1194 tty_pgrp = tty->pgrp;
1195 up(&tty_sem); 1195 mutex_unlock(&tty_mutex);
1196 if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) 1196 if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
1197 tty_vhangup(tty); 1197 tty_vhangup(tty);
1198 } else { 1198 } else {
@@ -1200,7 +1200,7 @@ void disassociate_ctty(int on_exit)
1200 kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit); 1200 kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit);
1201 kill_pg(current->signal->tty_old_pgrp, SIGCONT, on_exit); 1201 kill_pg(current->signal->tty_old_pgrp, SIGCONT, on_exit);
1202 } 1202 }
1203 up(&tty_sem); 1203 mutex_unlock(&tty_mutex);
1204 unlock_kernel(); 1204 unlock_kernel();
1205 return; 1205 return;
1206 } 1206 }
@@ -1211,7 +1211,7 @@ void disassociate_ctty(int on_exit)
1211 } 1211 }
1212 1212
1213 /* Must lock changes to tty_old_pgrp */ 1213 /* Must lock changes to tty_old_pgrp */
1214 down(&tty_sem); 1214 mutex_lock(&tty_mutex);
1215 current->signal->tty_old_pgrp = 0; 1215 current->signal->tty_old_pgrp = 0;
1216 tty->session = 0; 1216 tty->session = 0;
1217 tty->pgrp = -1; 1217 tty->pgrp = -1;
@@ -1222,7 +1222,7 @@ void disassociate_ctty(int on_exit)
1222 p->signal->tty = NULL; 1222 p->signal->tty = NULL;
1223 } while_each_task_pid(current->signal->session, PIDTYPE_SID, p); 1223 } while_each_task_pid(current->signal->session, PIDTYPE_SID, p);
1224 read_unlock(&tasklist_lock); 1224 read_unlock(&tasklist_lock);
1225 up(&tty_sem); 1225 mutex_unlock(&tty_mutex);
1226 unlock_kernel(); 1226 unlock_kernel();
1227} 1227}
1228 1228
@@ -1306,7 +1306,7 @@ static inline ssize_t do_tty_write(
1306 ssize_t ret = 0, written = 0; 1306 ssize_t ret = 0, written = 0;
1307 unsigned int chunk; 1307 unsigned int chunk;
1308 1308
1309 if (down_interruptible(&tty->atomic_write)) { 1309 if (mutex_lock_interruptible(&tty->atomic_write_lock)) {
1310 return -ERESTARTSYS; 1310 return -ERESTARTSYS;
1311 } 1311 }
1312 1312
@@ -1329,7 +1329,7 @@ static inline ssize_t do_tty_write(
1329 if (count < chunk) 1329 if (count < chunk)
1330 chunk = count; 1330 chunk = count;
1331 1331
1332 /* write_buf/write_cnt is protected by the atomic_write semaphore */ 1332 /* write_buf/write_cnt is protected by the atomic_write_lock mutex */
1333 if (tty->write_cnt < chunk) { 1333 if (tty->write_cnt < chunk) {
1334 unsigned char *buf; 1334 unsigned char *buf;
1335 1335
@@ -1338,7 +1338,7 @@ static inline ssize_t do_tty_write(
1338 1338
1339 buf = kmalloc(chunk, GFP_KERNEL); 1339 buf = kmalloc(chunk, GFP_KERNEL);
1340 if (!buf) { 1340 if (!buf) {
1341 up(&tty->atomic_write); 1341 mutex_unlock(&tty->atomic_write_lock);
1342 return -ENOMEM; 1342 return -ENOMEM;
1343 } 1343 }
1344 kfree(tty->write_buf); 1344 kfree(tty->write_buf);
@@ -1374,7 +1374,7 @@ static inline ssize_t do_tty_write(
1374 inode->i_mtime = current_fs_time(inode->i_sb); 1374 inode->i_mtime = current_fs_time(inode->i_sb);
1375 ret = written; 1375 ret = written;
1376 } 1376 }
1377 up(&tty->atomic_write); 1377 mutex_unlock(&tty->atomic_write_lock);
1378 return ret; 1378 return ret;
1379} 1379}
1380 1380
@@ -1442,8 +1442,8 @@ static inline void tty_line_name(struct tty_driver *driver, int index, char *p)
1442 1442
1443/* 1443/*
1444 * WSH 06/09/97: Rewritten to remove races and properly clean up after a 1444 * WSH 06/09/97: Rewritten to remove races and properly clean up after a
1445 * failed open. The new code protects the open with a semaphore, so it's 1445 * failed open. The new code protects the open with a mutex, so it's
1446 * really quite straightforward. The semaphore locking can probably be 1446 * really quite straightforward. The mutex locking can probably be
1447 * relaxed for the (most common) case of reopening a tty. 1447 * relaxed for the (most common) case of reopening a tty.
1448 */ 1448 */
1449static int init_dev(struct tty_driver *driver, int idx, 1449static int init_dev(struct tty_driver *driver, int idx,
@@ -1640,7 +1640,7 @@ fast_track:
1640success: 1640success:
1641 *ret_tty = tty; 1641 *ret_tty = tty;
1642 1642
1643 /* All paths come through here to release the semaphore */ 1643 /* All paths come through here to release the mutex */
1644end_init: 1644end_init:
1645 return retval; 1645 return retval;
1646 1646
@@ -1837,7 +1837,7 @@ static void release_dev(struct file * filp)
1837 /* Guard against races with tty->count changes elsewhere and 1837 /* Guard against races with tty->count changes elsewhere and
1838 opens on /dev/tty */ 1838 opens on /dev/tty */
1839 1839
1840 down(&tty_sem); 1840 mutex_lock(&tty_mutex);
1841 tty_closing = tty->count <= 1; 1841 tty_closing = tty->count <= 1;
1842 o_tty_closing = o_tty && 1842 o_tty_closing = o_tty &&
1843 (o_tty->count <= (pty_master ? 1 : 0)); 1843 (o_tty->count <= (pty_master ? 1 : 0));
@@ -1868,7 +1868,7 @@ static void release_dev(struct file * filp)
1868 1868
1869 printk(KERN_WARNING "release_dev: %s: read/write wait queue " 1869 printk(KERN_WARNING "release_dev: %s: read/write wait queue "
1870 "active!\n", tty_name(tty, buf)); 1870 "active!\n", tty_name(tty, buf));
1871 up(&tty_sem); 1871 mutex_unlock(&tty_mutex);
1872 schedule(); 1872 schedule();
1873 } 1873 }
1874 1874
@@ -1934,7 +1934,7 @@ static void release_dev(struct file * filp)
1934 read_unlock(&tasklist_lock); 1934 read_unlock(&tasklist_lock);
1935 } 1935 }
1936 1936
1937 up(&tty_sem); 1937 mutex_unlock(&tty_mutex);
1938 1938
1939 /* check whether both sides are closing ... */ 1939 /* check whether both sides are closing ... */
1940 if (!tty_closing || (o_tty && !o_tty_closing)) 1940 if (!tty_closing || (o_tty && !o_tty_closing))
@@ -2040,11 +2040,11 @@ retry_open:
2040 index = -1; 2040 index = -1;
2041 retval = 0; 2041 retval = 0;
2042 2042
2043 down(&tty_sem); 2043 mutex_lock(&tty_mutex);
2044 2044
2045 if (device == MKDEV(TTYAUX_MAJOR,0)) { 2045 if (device == MKDEV(TTYAUX_MAJOR,0)) {
2046 if (!current->signal->tty) { 2046 if (!current->signal->tty) {
2047 up(&tty_sem); 2047 mutex_unlock(&tty_mutex);
2048 return -ENXIO; 2048 return -ENXIO;
2049 } 2049 }
2050 driver = current->signal->tty->driver; 2050 driver = current->signal->tty->driver;
@@ -2070,18 +2070,18 @@ retry_open:
2070 noctty = 1; 2070 noctty = 1;
2071 goto got_driver; 2071 goto got_driver;
2072 } 2072 }
2073 up(&tty_sem); 2073 mutex_unlock(&tty_mutex);
2074 return -ENODEV; 2074 return -ENODEV;
2075 } 2075 }
2076 2076
2077 driver = get_tty_driver(device, &index); 2077 driver = get_tty_driver(device, &index);
2078 if (!driver) { 2078 if (!driver) {
2079 up(&tty_sem); 2079 mutex_unlock(&tty_mutex);
2080 return -ENODEV; 2080 return -ENODEV;
2081 } 2081 }
2082got_driver: 2082got_driver:
2083 retval = init_dev(driver, index, &tty); 2083 retval = init_dev(driver, index, &tty);
2084 up(&tty_sem); 2084 mutex_unlock(&tty_mutex);
2085 if (retval) 2085 if (retval)
2086 return retval; 2086 return retval;
2087 2087
@@ -2167,9 +2167,9 @@ static int ptmx_open(struct inode * inode, struct file * filp)
2167 } 2167 }
2168 up(&allocated_ptys_lock); 2168 up(&allocated_ptys_lock);
2169 2169
2170 down(&tty_sem); 2170 mutex_lock(&tty_mutex);
2171 retval = init_dev(ptm_driver, index, &tty); 2171 retval = init_dev(ptm_driver, index, &tty);
2172 up(&tty_sem); 2172 mutex_unlock(&tty_mutex);
2173 2173
2174 if (retval) 2174 if (retval)
2175 goto out; 2175 goto out;
@@ -2915,8 +2915,8 @@ static void initialize_tty_struct(struct tty_struct *tty)
2915 init_waitqueue_head(&tty->write_wait); 2915 init_waitqueue_head(&tty->write_wait);
2916 init_waitqueue_head(&tty->read_wait); 2916 init_waitqueue_head(&tty->read_wait);
2917 INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); 2917 INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
2918 sema_init(&tty->atomic_read, 1); 2918 mutex_init(&tty->atomic_read_lock);
2919 sema_init(&tty->atomic_write, 1); 2919 mutex_init(&tty->atomic_write_lock);
2920 spin_lock_init(&tty->read_lock); 2920 spin_lock_init(&tty->read_lock);
2921 INIT_LIST_HEAD(&tty->tty_files); 2921 INIT_LIST_HEAD(&tty->tty_files);
2922 INIT_WORK(&tty->SAK_work, NULL, NULL); 2922 INIT_WORK(&tty->SAK_work, NULL, NULL);
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c
index d9325281e482..fd00822ac145 100644
--- a/drivers/char/vme_scc.c
+++ b/drivers/char/vme_scc.c
@@ -184,7 +184,7 @@ static void scc_init_portstructs(void)
184 port->gs.closing_wait = 30 * HZ; 184 port->gs.closing_wait = 30 * HZ;
185 port->gs.rd = &scc_real_driver; 185 port->gs.rd = &scc_real_driver;
186#ifdef NEW_WRITE_LOCKING 186#ifdef NEW_WRITE_LOCKING
187 port->gs.port_write_sem = MUTEX; 187 port->gs.port_write_mutex = MUTEX;
188#endif 188#endif
189 init_waitqueue_head(&port->gs.open_wait); 189 init_waitqueue_head(&port->gs.open_wait);
190 init_waitqueue_head(&port->gs.close_wait); 190 init_waitqueue_head(&port->gs.close_wait);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 0900d1dbee59..ca4844c527da 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2489,7 +2489,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2489} 2489}
2490 2490
2491/* 2491/*
2492 * We take tty_sem in here to prevent another thread from coming in via init_dev 2492 * We take tty_mutex in here to prevent another thread from coming in via init_dev
2493 * and taking a ref against the tty while we're in the process of forgetting 2493 * and taking a ref against the tty while we're in the process of forgetting
2494 * about it and cleaning things up. 2494 * about it and cleaning things up.
2495 * 2495 *
@@ -2497,7 +2497,7 @@ static int con_open(struct tty_struct *tty, struct file *filp)
2497 */ 2497 */
2498static void con_close(struct tty_struct *tty, struct file *filp) 2498static void con_close(struct tty_struct *tty, struct file *filp)
2499{ 2499{
2500 down(&tty_sem); 2500 mutex_lock(&tty_mutex);
2501 acquire_console_sem(); 2501 acquire_console_sem();
2502 if (tty && tty->count == 1) { 2502 if (tty && tty->count == 1) {
2503 struct vc_data *vc = tty->driver_data; 2503 struct vc_data *vc = tty->driver_data;
@@ -2507,15 +2507,15 @@ static void con_close(struct tty_struct *tty, struct file *filp)
2507 tty->driver_data = NULL; 2507 tty->driver_data = NULL;
2508 release_console_sem(); 2508 release_console_sem();
2509 vcs_remove_devfs(tty); 2509 vcs_remove_devfs(tty);
2510 up(&tty_sem); 2510 mutex_unlock(&tty_mutex);
2511 /* 2511 /*
2512 * tty_sem is released, but we still hold BKL, so there is 2512 * tty_mutex is released, but we still hold BKL, so there is
2513 * still exclusion against init_dev() 2513 * still exclusion against init_dev()
2514 */ 2514 */
2515 return; 2515 return;
2516 } 2516 }
2517 release_console_sem(); 2517 release_console_sem();
2518 up(&tty_sem); 2518 mutex_unlock(&tty_mutex);
2519} 2519}
2520 2520
2521static void vc_init(struct vc_data *vc, unsigned int rows, 2521static void vc_init(struct vc_data *vc, unsigned int rows,
@@ -2869,9 +2869,9 @@ void unblank_screen(void)
2869} 2869}
2870 2870
2871/* 2871/*
2872 * We defer the timer blanking to work queue so it can take the console semaphore 2872 * We defer the timer blanking to work queue so it can take the console mutex
2873 * (console operations can still happen at irq time, but only from printk which 2873 * (console operations can still happen at irq time, but only from printk which
2874 * has the console semaphore. Not perfect yet, but better than no locking 2874 * has the console mutex. Not perfect yet, but better than no locking
2875 */ 2875 */
2876static void blank_screen_t(unsigned long dummy) 2876static void blank_screen_t(unsigned long dummy)
2877{ 2877{
@@ -3234,6 +3234,14 @@ void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org)
3234 } 3234 }
3235} 3235}
3236 3236
3237int is_console_suspend_safe(void)
3238{
3239 /* It is unsafe to suspend devices while X has control of the
3240 * hardware. Make sure we are running on a kernel-controlled console.
3241 */
3242 return vc_cons[fg_console].d->vc_mode == KD_TEXT;
3243}
3244
3237/* 3245/*
3238 * Visible symbols for modules 3246 * Visible symbols for modules
3239 */ 3247 */
diff --git a/drivers/char/watchdog/pcwd_usb.c b/drivers/char/watchdog/pcwd_usb.c
index 1533f56baa42..2700c5c45b8a 100644
--- a/drivers/char/watchdog/pcwd_usb.c
+++ b/drivers/char/watchdog/pcwd_usb.c
@@ -42,6 +42,7 @@
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <linux/usb.h> 44#include <linux/usb.h>
45#include <linux/mutex.h>
45 46
46 47
47#ifdef CONFIG_USB_DEBUG 48#ifdef CONFIG_USB_DEBUG
@@ -143,7 +144,7 @@ struct usb_pcwd_private {
143static struct usb_pcwd_private *usb_pcwd_device; 144static struct usb_pcwd_private *usb_pcwd_device;
144 145
145/* prevent races between open() and disconnect() */ 146/* prevent races between open() and disconnect() */
146static DECLARE_MUTEX (disconnect_sem); 147static DEFINE_MUTEX(disconnect_mutex);
147 148
148/* local function prototypes */ 149/* local function prototypes */
149static int usb_pcwd_probe (struct usb_interface *interface, const struct usb_device_id *id); 150static int usb_pcwd_probe (struct usb_interface *interface, const struct usb_device_id *id);
@@ -723,7 +724,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface)
723 struct usb_pcwd_private *usb_pcwd; 724 struct usb_pcwd_private *usb_pcwd;
724 725
725 /* prevent races with open() */ 726 /* prevent races with open() */
726 down (&disconnect_sem); 727 mutex_lock(&disconnect_mutex);
727 728
728 usb_pcwd = usb_get_intfdata (interface); 729 usb_pcwd = usb_get_intfdata (interface);
729 usb_set_intfdata (interface, NULL); 730 usb_set_intfdata (interface, NULL);
@@ -749,7 +750,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface)
749 750
750 cards_found--; 751 cards_found--;
751 752
752 up (&disconnect_sem); 753 mutex_unlock(&disconnect_mutex);
753 754
754 printk(KERN_INFO PFX "USB PC Watchdog disconnected\n"); 755 printk(KERN_INFO PFX "USB PC Watchdog disconnected\n");
755} 756}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index d7125f4d9113..35897079a78d 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -26,6 +26,7 @@
26#include <linux/netlink.h> 26#include <linux/netlink.h>
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/connector.h> 28#include <linux/connector.h>
29#include <linux/mutex.h>
29 30
30#include <net/sock.h> 31#include <net/sock.h>
31 32
@@ -41,7 +42,7 @@ module_param(cn_val, uint, 0);
41MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); 42MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
42MODULE_PARM_DESC(cn_val, "Connector's main device val."); 43MODULE_PARM_DESC(cn_val, "Connector's main device val.");
43 44
44static DECLARE_MUTEX(notify_lock); 45static DEFINE_MUTEX(notify_lock);
45static LIST_HEAD(notify_list); 46static LIST_HEAD(notify_list);
46 47
47static struct cn_dev cdev; 48static struct cn_dev cdev;
@@ -260,7 +261,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
260{ 261{
261 struct cn_ctl_entry *ent; 262 struct cn_ctl_entry *ent;
262 263
263 down(&notify_lock); 264 mutex_lock(&notify_lock);
264 list_for_each_entry(ent, &notify_list, notify_entry) { 265 list_for_each_entry(ent, &notify_list, notify_entry) {
265 int i; 266 int i;
266 struct cn_notify_req *req; 267 struct cn_notify_req *req;
@@ -293,7 +294,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
293 cn_netlink_send(&m, ctl->group, GFP_KERNEL); 294 cn_netlink_send(&m, ctl->group, GFP_KERNEL);
294 } 295 }
295 } 296 }
296 up(&notify_lock); 297 mutex_unlock(&notify_lock);
297} 298}
298 299
299/* 300/*
@@ -407,14 +408,14 @@ static void cn_callback(void *data)
407 if (ctl->group == 0) { 408 if (ctl->group == 0) {
408 struct cn_ctl_entry *n; 409 struct cn_ctl_entry *n;
409 410
410 down(&notify_lock); 411 mutex_lock(&notify_lock);
411 list_for_each_entry_safe(ent, n, &notify_list, notify_entry) { 412 list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
412 if (cn_ctl_msg_equals(ent->msg, ctl)) { 413 if (cn_ctl_msg_equals(ent->msg, ctl)) {
413 list_del(&ent->notify_entry); 414 list_del(&ent->notify_entry);
414 kfree(ent); 415 kfree(ent);
415 } 416 }
416 } 417 }
417 up(&notify_lock); 418 mutex_unlock(&notify_lock);
418 419
419 return; 420 return;
420 } 421 }
@@ -429,9 +430,9 @@ static void cn_callback(void *data)
429 430
430 memcpy(ent->msg, ctl, size - sizeof(*ent)); 431 memcpy(ent->msg, ctl, size - sizeof(*ent));
431 432
432 down(&notify_lock); 433 mutex_lock(&notify_lock);
433 list_add(&ent->notify_entry, &notify_list); 434 list_add(&ent->notify_entry, &notify_list);
434 up(&notify_lock); 435 mutex_unlock(&notify_lock);
435} 436}
436 437
437static int __init cn_init(void) 438static int __init cn_init(void)
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 3a4e5c5b4e1f..d6543fc4a923 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -33,6 +33,7 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/types.h> 35#include <linux/types.h>
36#include <linux/mutex.h>
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/semaphore.h> 38#include <asm/semaphore.h>
38 39
@@ -48,7 +49,7 @@ static u8 *smi_data_buf;
48static dma_addr_t smi_data_buf_handle; 49static dma_addr_t smi_data_buf_handle;
49static unsigned long smi_data_buf_size; 50static unsigned long smi_data_buf_size;
50static u32 smi_data_buf_phys_addr; 51static u32 smi_data_buf_phys_addr;
51static DECLARE_MUTEX(smi_data_lock); 52static DEFINE_MUTEX(smi_data_lock);
52 53
53static unsigned int host_control_action; 54static unsigned int host_control_action;
54static unsigned int host_control_smi_type; 55static unsigned int host_control_smi_type;
@@ -139,9 +140,9 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
139 buf_size = simple_strtoul(buf, NULL, 10); 140 buf_size = simple_strtoul(buf, NULL, 10);
140 141
141 /* make sure SMI data buffer is at least buf_size */ 142 /* make sure SMI data buffer is at least buf_size */
142 down(&smi_data_lock); 143 mutex_lock(&smi_data_lock);
143 ret = smi_data_buf_realloc(buf_size); 144 ret = smi_data_buf_realloc(buf_size);
144 up(&smi_data_lock); 145 mutex_unlock(&smi_data_lock);
145 if (ret) 146 if (ret)
146 return ret; 147 return ret;
147 148
@@ -154,7 +155,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos,
154 size_t max_read; 155 size_t max_read;
155 ssize_t ret; 156 ssize_t ret;
156 157
157 down(&smi_data_lock); 158 mutex_lock(&smi_data_lock);
158 159
159 if (pos >= smi_data_buf_size) { 160 if (pos >= smi_data_buf_size) {
160 ret = 0; 161 ret = 0;
@@ -165,7 +166,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos,
165 ret = min(max_read, count); 166 ret = min(max_read, count);
166 memcpy(buf, smi_data_buf + pos, ret); 167 memcpy(buf, smi_data_buf + pos, ret);
167out: 168out:
168 up(&smi_data_lock); 169 mutex_unlock(&smi_data_lock);
169 return ret; 170 return ret;
170} 171}
171 172
@@ -174,7 +175,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos,
174{ 175{
175 ssize_t ret; 176 ssize_t ret;
176 177
177 down(&smi_data_lock); 178 mutex_lock(&smi_data_lock);
178 179
179 ret = smi_data_buf_realloc(pos + count); 180 ret = smi_data_buf_realloc(pos + count);
180 if (ret) 181 if (ret)
@@ -183,7 +184,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos,
183 memcpy(smi_data_buf + pos, buf, count); 184 memcpy(smi_data_buf + pos, buf, count);
184 ret = count; 185 ret = count;
185out: 186out:
186 up(&smi_data_lock); 187 mutex_unlock(&smi_data_lock);
187 return ret; 188 return ret;
188} 189}
189 190
@@ -201,9 +202,9 @@ static ssize_t host_control_action_store(struct device *dev,
201 ssize_t ret; 202 ssize_t ret;
202 203
203 /* make sure buffer is available for host control command */ 204 /* make sure buffer is available for host control command */
204 down(&smi_data_lock); 205 mutex_lock(&smi_data_lock);
205 ret = smi_data_buf_realloc(sizeof(struct apm_cmd)); 206 ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
206 up(&smi_data_lock); 207 mutex_unlock(&smi_data_lock);
207 if (ret) 208 if (ret)
208 return ret; 209 return ret;
209 210
@@ -302,7 +303,7 @@ static ssize_t smi_request_store(struct device *dev,
302 unsigned long val = simple_strtoul(buf, NULL, 10); 303 unsigned long val = simple_strtoul(buf, NULL, 10);
303 ssize_t ret; 304 ssize_t ret;
304 305
305 down(&smi_data_lock); 306 mutex_lock(&smi_data_lock);
306 307
307 if (smi_data_buf_size < sizeof(struct smi_cmd)) { 308 if (smi_data_buf_size < sizeof(struct smi_cmd)) {
308 ret = -ENODEV; 309 ret = -ENODEV;
@@ -334,7 +335,7 @@ static ssize_t smi_request_store(struct device *dev,
334 } 335 }
335 336
336out: 337out:
337 up(&smi_data_lock); 338 mutex_unlock(&smi_data_lock);
338 return ret; 339 return ret;
339} 340}
340 341
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 3325660f7248..c7671e188017 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -313,6 +313,7 @@
313#include <linux/cdrom.h> 313#include <linux/cdrom.h>
314#include <linux/ide.h> 314#include <linux/ide.h>
315#include <linux/completion.h> 315#include <linux/completion.h>
316#include <linux/mutex.h>
316 317
317#include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */ 318#include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */
318 319
@@ -324,7 +325,7 @@
324 325
325#include "ide-cd.h" 326#include "ide-cd.h"
326 327
327static DECLARE_MUTEX(idecd_ref_sem); 328static DEFINE_MUTEX(idecd_ref_mutex);
328 329
329#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref) 330#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref)
330 331
@@ -335,11 +336,11 @@ static struct cdrom_info *ide_cd_get(struct gendisk *disk)
335{ 336{
336 struct cdrom_info *cd = NULL; 337 struct cdrom_info *cd = NULL;
337 338
338 down(&idecd_ref_sem); 339 mutex_lock(&idecd_ref_mutex);
339 cd = ide_cd_g(disk); 340 cd = ide_cd_g(disk);
340 if (cd) 341 if (cd)
341 kref_get(&cd->kref); 342 kref_get(&cd->kref);
342 up(&idecd_ref_sem); 343 mutex_unlock(&idecd_ref_mutex);
343 return cd; 344 return cd;
344} 345}
345 346
@@ -347,9 +348,9 @@ static void ide_cd_release(struct kref *);
347 348
348static void ide_cd_put(struct cdrom_info *cd) 349static void ide_cd_put(struct cdrom_info *cd)
349{ 350{
350 down(&idecd_ref_sem); 351 mutex_lock(&idecd_ref_mutex);
351 kref_put(&cd->kref, ide_cd_release); 352 kref_put(&cd->kref, ide_cd_release);
352 up(&idecd_ref_sem); 353 mutex_unlock(&idecd_ref_mutex);
353} 354}
354 355
355/**************************************************************************** 356/****************************************************************************
@@ -2471,52 +2472,6 @@ static int ide_cdrom_packet(struct cdrom_device_info *cdi,
2471} 2472}
2472 2473
2473static 2474static
2474int ide_cdrom_dev_ioctl (struct cdrom_device_info *cdi,
2475 unsigned int cmd, unsigned long arg)
2476{
2477 struct packet_command cgc;
2478 char buffer[16];
2479 int stat;
2480
2481 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
2482
2483 /* These will be moved into the Uniform layer shortly... */
2484 switch (cmd) {
2485 case CDROMSETSPINDOWN: {
2486 char spindown;
2487
2488 if (copy_from_user(&spindown, (void __user *) arg, sizeof(char)))
2489 return -EFAULT;
2490
2491 if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0)))
2492 return stat;
2493
2494 buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f);
2495
2496 return cdrom_mode_select(cdi, &cgc);
2497 }
2498
2499 case CDROMGETSPINDOWN: {
2500 char spindown;
2501
2502 if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0)))
2503 return stat;
2504
2505 spindown = buffer[11] & 0x0f;
2506
2507 if (copy_to_user((void __user *) arg, &spindown, sizeof (char)))
2508 return -EFAULT;
2509
2510 return 0;
2511 }
2512
2513 default:
2514 return -EINVAL;
2515 }
2516
2517}
2518
2519static
2520int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi, 2475int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
2521 unsigned int cmd, void *arg) 2476 unsigned int cmd, void *arg)
2522 2477
@@ -2852,12 +2807,11 @@ static struct cdrom_device_ops ide_cdrom_dops = {
2852 .get_mcn = ide_cdrom_get_mcn, 2807 .get_mcn = ide_cdrom_get_mcn,
2853 .reset = ide_cdrom_reset, 2808 .reset = ide_cdrom_reset,
2854 .audio_ioctl = ide_cdrom_audio_ioctl, 2809 .audio_ioctl = ide_cdrom_audio_ioctl,
2855 .dev_ioctl = ide_cdrom_dev_ioctl,
2856 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | 2810 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
2857 CDC_SELECT_SPEED | CDC_SELECT_DISC | 2811 CDC_SELECT_SPEED | CDC_SELECT_DISC |
2858 CDC_MULTI_SESSION | CDC_MCN | 2812 CDC_MULTI_SESSION | CDC_MCN |
2859 CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | 2813 CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET |
2860 CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R | 2814 CDC_DRIVE_STATUS | CDC_CD_R |
2861 CDC_CD_RW | CDC_DVD | CDC_DVD_R| CDC_DVD_RAM | 2815 CDC_CD_RW | CDC_DVD | CDC_DVD_R| CDC_DVD_RAM |
2862 CDC_GENERIC_PACKET | CDC_MO_DRIVE | CDC_MRW | 2816 CDC_GENERIC_PACKET | CDC_MO_DRIVE | CDC_MRW |
2863 CDC_MRW_W | CDC_RAM, 2817 CDC_MRW_W | CDC_RAM,
@@ -3367,6 +3321,45 @@ static int idecd_release(struct inode * inode, struct file * file)
3367 return 0; 3321 return 0;
3368} 3322}
3369 3323
3324static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg)
3325{
3326 struct packet_command cgc;
3327 char buffer[16];
3328 int stat;
3329 char spindown;
3330
3331 if (copy_from_user(&spindown, (void __user *)arg, sizeof(char)))
3332 return -EFAULT;
3333
3334 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
3335
3336 stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
3337 if (stat)
3338 return stat;
3339
3340 buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f);
3341 return cdrom_mode_select(cdi, &cgc);
3342}
3343
3344static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
3345{
3346 struct packet_command cgc;
3347 char buffer[16];
3348 int stat;
3349 char spindown;
3350
3351 init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
3352
3353 stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
3354 if (stat)
3355 return stat;
3356
3357 spindown = buffer[11] & 0x0f;
3358 if (copy_to_user((void __user *)arg, &spindown, sizeof (char)))
3359 return -EFAULT;
3360 return 0;
3361}
3362
3370static int idecd_ioctl (struct inode *inode, struct file *file, 3363static int idecd_ioctl (struct inode *inode, struct file *file,
3371 unsigned int cmd, unsigned long arg) 3364 unsigned int cmd, unsigned long arg)
3372{ 3365{
@@ -3374,7 +3367,16 @@ static int idecd_ioctl (struct inode *inode, struct file *file,
3374 struct cdrom_info *info = ide_cd_g(bdev->bd_disk); 3367 struct cdrom_info *info = ide_cd_g(bdev->bd_disk);
3375 int err; 3368 int err;
3376 3369
3377 err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); 3370 switch (cmd) {
3371 case CDROMSETSPINDOWN:
3372 return idecd_set_spindown(&info->devinfo, arg);
3373 case CDROMGETSPINDOWN:
3374 return idecd_get_spindown(&info->devinfo, arg);
3375 default:
3376 break;
3377 }
3378
3379 err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
3378 if (err == -EINVAL) 3380 if (err == -EINVAL)
3379 err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg); 3381 err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg);
3380 3382
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 09086b8b6486..e238b7da824b 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -60,6 +60,7 @@
60#include <linux/genhd.h> 60#include <linux/genhd.h>
61#include <linux/slab.h> 61#include <linux/slab.h>
62#include <linux/delay.h> 62#include <linux/delay.h>
63#include <linux/mutex.h>
63 64
64#define _IDE_DISK 65#define _IDE_DISK
65 66
@@ -78,7 +79,7 @@ struct ide_disk_obj {
78 struct kref kref; 79 struct kref kref;
79}; 80};
80 81
81static DECLARE_MUTEX(idedisk_ref_sem); 82static DEFINE_MUTEX(idedisk_ref_mutex);
82 83
83#define to_ide_disk(obj) container_of(obj, struct ide_disk_obj, kref) 84#define to_ide_disk(obj) container_of(obj, struct ide_disk_obj, kref)
84 85
@@ -89,11 +90,11 @@ static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
89{ 90{
90 struct ide_disk_obj *idkp = NULL; 91 struct ide_disk_obj *idkp = NULL;
91 92
92 down(&idedisk_ref_sem); 93 mutex_lock(&idedisk_ref_mutex);
93 idkp = ide_disk_g(disk); 94 idkp = ide_disk_g(disk);
94 if (idkp) 95 if (idkp)
95 kref_get(&idkp->kref); 96 kref_get(&idkp->kref);
96 up(&idedisk_ref_sem); 97 mutex_unlock(&idedisk_ref_mutex);
97 return idkp; 98 return idkp;
98} 99}
99 100
@@ -101,9 +102,9 @@ static void ide_disk_release(struct kref *);
101 102
102static void ide_disk_put(struct ide_disk_obj *idkp) 103static void ide_disk_put(struct ide_disk_obj *idkp)
103{ 104{
104 down(&idedisk_ref_sem); 105 mutex_lock(&idedisk_ref_mutex);
105 kref_put(&idkp->kref, ide_disk_release); 106 kref_put(&idkp->kref, ide_disk_release);
106 up(&idedisk_ref_sem); 107 mutex_unlock(&idedisk_ref_mutex);
107} 108}
108 109
109/* 110/*
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 1f8db9ac05d1..a53e3ce4a142 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -98,6 +98,7 @@
98#include <linux/cdrom.h> 98#include <linux/cdrom.h>
99#include <linux/ide.h> 99#include <linux/ide.h>
100#include <linux/bitops.h> 100#include <linux/bitops.h>
101#include <linux/mutex.h>
101 102
102#include <asm/byteorder.h> 103#include <asm/byteorder.h>
103#include <asm/irq.h> 104#include <asm/irq.h>
@@ -517,7 +518,7 @@ typedef struct {
517 u8 reserved[4]; 518 u8 reserved[4];
518} idefloppy_mode_parameter_header_t; 519} idefloppy_mode_parameter_header_t;
519 520
520static DECLARE_MUTEX(idefloppy_ref_sem); 521static DEFINE_MUTEX(idefloppy_ref_mutex);
521 522
522#define to_ide_floppy(obj) container_of(obj, struct ide_floppy_obj, kref) 523#define to_ide_floppy(obj) container_of(obj, struct ide_floppy_obj, kref)
523 524
@@ -528,11 +529,11 @@ static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk)
528{ 529{
529 struct ide_floppy_obj *floppy = NULL; 530 struct ide_floppy_obj *floppy = NULL;
530 531
531 down(&idefloppy_ref_sem); 532 mutex_lock(&idefloppy_ref_mutex);
532 floppy = ide_floppy_g(disk); 533 floppy = ide_floppy_g(disk);
533 if (floppy) 534 if (floppy)
534 kref_get(&floppy->kref); 535 kref_get(&floppy->kref);
535 up(&idefloppy_ref_sem); 536 mutex_unlock(&idefloppy_ref_mutex);
536 return floppy; 537 return floppy;
537} 538}
538 539
@@ -540,9 +541,9 @@ static void ide_floppy_release(struct kref *);
540 541
541static void ide_floppy_put(struct ide_floppy_obj *floppy) 542static void ide_floppy_put(struct ide_floppy_obj *floppy)
542{ 543{
543 down(&idefloppy_ref_sem); 544 mutex_lock(&idefloppy_ref_mutex);
544 kref_put(&floppy->kref, ide_floppy_release); 545 kref_put(&floppy->kref, ide_floppy_release);
545 up(&idefloppy_ref_sem); 546 mutex_unlock(&idefloppy_ref_mutex);
546} 547}
547 548
548/* 549/*
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 0101d0def7c5..ebc59064b475 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -443,6 +443,7 @@
443#include <linux/smp_lock.h> 443#include <linux/smp_lock.h>
444#include <linux/completion.h> 444#include <linux/completion.h>
445#include <linux/bitops.h> 445#include <linux/bitops.h>
446#include <linux/mutex.h>
446 447
447#include <asm/byteorder.h> 448#include <asm/byteorder.h>
448#include <asm/irq.h> 449#include <asm/irq.h>
@@ -1011,7 +1012,7 @@ typedef struct ide_tape_obj {
1011 int debug_level; 1012 int debug_level;
1012} idetape_tape_t; 1013} idetape_tape_t;
1013 1014
1014static DECLARE_MUTEX(idetape_ref_sem); 1015static DEFINE_MUTEX(idetape_ref_mutex);
1015 1016
1016static struct class *idetape_sysfs_class; 1017static struct class *idetape_sysfs_class;
1017 1018
@@ -1024,11 +1025,11 @@ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
1024{ 1025{
1025 struct ide_tape_obj *tape = NULL; 1026 struct ide_tape_obj *tape = NULL;
1026 1027
1027 down(&idetape_ref_sem); 1028 mutex_lock(&idetape_ref_mutex);
1028 tape = ide_tape_g(disk); 1029 tape = ide_tape_g(disk);
1029 if (tape) 1030 if (tape)
1030 kref_get(&tape->kref); 1031 kref_get(&tape->kref);
1031 up(&idetape_ref_sem); 1032 mutex_unlock(&idetape_ref_mutex);
1032 return tape; 1033 return tape;
1033} 1034}
1034 1035
@@ -1036,9 +1037,9 @@ static void ide_tape_release(struct kref *);
1036 1037
1037static void ide_tape_put(struct ide_tape_obj *tape) 1038static void ide_tape_put(struct ide_tape_obj *tape)
1038{ 1039{
1039 down(&idetape_ref_sem); 1040 mutex_lock(&idetape_ref_mutex);
1040 kref_put(&tape->kref, ide_tape_release); 1041 kref_put(&tape->kref, ide_tape_release);
1041 up(&idetape_ref_sem); 1042 mutex_unlock(&idetape_ref_mutex);
1042} 1043}
1043 1044
1044/* 1045/*
@@ -1290,11 +1291,11 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
1290{ 1291{
1291 struct ide_tape_obj *tape = NULL; 1292 struct ide_tape_obj *tape = NULL;
1292 1293
1293 down(&idetape_ref_sem); 1294 mutex_lock(&idetape_ref_mutex);
1294 tape = idetape_devs[i]; 1295 tape = idetape_devs[i];
1295 if (tape) 1296 if (tape)
1296 kref_get(&tape->kref); 1297 kref_get(&tape->kref);
1297 up(&idetape_ref_sem); 1298 mutex_unlock(&idetape_ref_mutex);
1298 return tape; 1299 return tape;
1299} 1300}
1300 1301
@@ -4870,11 +4871,11 @@ static int ide_tape_probe(ide_drive_t *drive)
4870 4871
4871 drive->driver_data = tape; 4872 drive->driver_data = tape;
4872 4873
4873 down(&idetape_ref_sem); 4874 mutex_lock(&idetape_ref_mutex);
4874 for (minor = 0; idetape_devs[minor]; minor++) 4875 for (minor = 0; idetape_devs[minor]; minor++)
4875 ; 4876 ;
4876 idetape_devs[minor] = tape; 4877 idetape_devs[minor] = tape;
4877 up(&idetape_ref_sem); 4878 mutex_unlock(&idetape_ref_mutex);
4878 4879
4879 idetape_setup(drive, tape, minor); 4880 idetape_setup(drive, tape, minor);
4880 4881
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index feec40cf5900..8c4fcb9027b3 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -32,6 +32,7 @@
32#ifdef CONFIG_AVMB1_COMPAT 32#ifdef CONFIG_AVMB1_COMPAT
33#include <linux/b1lli.h> 33#include <linux/b1lli.h>
34#endif 34#endif
35#include <linux/mutex.h>
35 36
36static char *revision = "$Revision: 1.1.2.8 $"; 37static char *revision = "$Revision: 1.1.2.8 $";
37 38
@@ -66,7 +67,7 @@ LIST_HEAD(capi_drivers);
66DEFINE_RWLOCK(capi_drivers_list_lock); 67DEFINE_RWLOCK(capi_drivers_list_lock);
67 68
68static DEFINE_RWLOCK(application_lock); 69static DEFINE_RWLOCK(application_lock);
69static DECLARE_MUTEX(controller_sem); 70static DEFINE_MUTEX(controller_mutex);
70 71
71struct capi20_appl *capi_applications[CAPI_MAXAPPL]; 72struct capi20_appl *capi_applications[CAPI_MAXAPPL];
72struct capi_ctr *capi_cards[CAPI_MAXCONTR]; 73struct capi_ctr *capi_cards[CAPI_MAXCONTR];
@@ -395,20 +396,20 @@ attach_capi_ctr(struct capi_ctr *card)
395{ 396{
396 int i; 397 int i;
397 398
398 down(&controller_sem); 399 mutex_lock(&controller_mutex);
399 400
400 for (i = 0; i < CAPI_MAXCONTR; i++) { 401 for (i = 0; i < CAPI_MAXCONTR; i++) {
401 if (capi_cards[i] == NULL) 402 if (capi_cards[i] == NULL)
402 break; 403 break;
403 } 404 }
404 if (i == CAPI_MAXCONTR) { 405 if (i == CAPI_MAXCONTR) {
405 up(&controller_sem); 406 mutex_unlock(&controller_mutex);
406 printk(KERN_ERR "kcapi: out of controller slots\n"); 407 printk(KERN_ERR "kcapi: out of controller slots\n");
407 return -EBUSY; 408 return -EBUSY;
408 } 409 }
409 capi_cards[i] = card; 410 capi_cards[i] = card;
410 411
411 up(&controller_sem); 412 mutex_unlock(&controller_mutex);
412 413
413 card->nrecvctlpkt = 0; 414 card->nrecvctlpkt = 0;
414 card->nrecvdatapkt = 0; 415 card->nrecvdatapkt = 0;
@@ -531,13 +532,13 @@ u16 capi20_register(struct capi20_appl *ap)
531 532
532 write_unlock_irqrestore(&application_lock, flags); 533 write_unlock_irqrestore(&application_lock, flags);
533 534
534 down(&controller_sem); 535 mutex_lock(&controller_mutex);
535 for (i = 0; i < CAPI_MAXCONTR; i++) { 536 for (i = 0; i < CAPI_MAXCONTR; i++) {
536 if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) 537 if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING)
537 continue; 538 continue;
538 register_appl(capi_cards[i], applid, &ap->rparam); 539 register_appl(capi_cards[i], applid, &ap->rparam);
539 } 540 }
540 up(&controller_sem); 541 mutex_unlock(&controller_mutex);
541 542
542 if (showcapimsgs & 1) { 543 if (showcapimsgs & 1) {
543 printk(KERN_DEBUG "kcapi: appl %d up\n", applid); 544 printk(KERN_DEBUG "kcapi: appl %d up\n", applid);
@@ -560,13 +561,13 @@ u16 capi20_release(struct capi20_appl *ap)
560 capi_applications[ap->applid - 1] = NULL; 561 capi_applications[ap->applid - 1] = NULL;
561 write_unlock_irqrestore(&application_lock, flags); 562 write_unlock_irqrestore(&application_lock, flags);
562 563
563 down(&controller_sem); 564 mutex_lock(&controller_mutex);
564 for (i = 0; i < CAPI_MAXCONTR; i++) { 565 for (i = 0; i < CAPI_MAXCONTR; i++) {
565 if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) 566 if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING)
566 continue; 567 continue;
567 release_appl(capi_cards[i], ap->applid); 568 release_appl(capi_cards[i], ap->applid);
568 } 569 }
569 up(&controller_sem); 570 mutex_unlock(&controller_mutex);
570 571
571 flush_scheduled_work(); 572 flush_scheduled_work();
572 skb_queue_purge(&ap->recv_queue); 573 skb_queue_purge(&ap->recv_queue);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index df9d65201819..27332506f9f7 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -25,7 +25,6 @@
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#define HISAX_STATUS_BUFSIZE 4096 27#define HISAX_STATUS_BUFSIZE 4096
28#define INCLUDE_INLINE_FUNCS
29 28
30/* 29/*
31 * This structure array contains one entry per card. An entry looks 30 * This structure array contains one entry per card. An entry looks
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 110e9fd669c5..f8ca4b323331 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -108,7 +108,6 @@ static const char *ITACVer[] =
108#define ELSA_ASSIGN 4 108#define ELSA_ASSIGN 4
109 109
110#define RS_ISR_PASS_LIMIT 256 110#define RS_ISR_PASS_LIMIT 256
111#define _INLINE_ inline
112#define FLG_MODEM_ACTIVE 1 111#define FLG_MODEM_ACTIVE 1
113/* IPAC AUX */ 112/* IPAC AUX */
114#define ELSA_IPAC_LINE_LED 0x40 /* Bit 6 Gelbe LED */ 113#define ELSA_IPAC_LINE_LED 0x40 /* Bit 6 Gelbe LED */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 690a1aae0b34..0c13795dca38 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -172,11 +172,9 @@ static struct net_device_stats *get_stats(struct net_device *dev)
172 172
173 memset(stats, 0, sizeof(struct net_device_stats)); 173 memset(stats, 0, sizeof(struct net_device_stats));
174 174
175 for (i=0; i < NR_CPUS; i++) { 175 for_each_cpu(i) {
176 struct net_device_stats *lb_stats; 176 struct net_device_stats *lb_stats;
177 177
178 if (!cpu_possible(i))
179 continue;
180 lb_stats = &per_cpu(loopback_stats, i); 178 lb_stats = &per_cpu(loopback_stats, i);
181 stats->rx_bytes += lb_stats->rx_bytes; 179 stats->rx_bytes += lb_stats->rx_bytes;
182 stats->tx_bytes += lb_stats->tx_bytes; 180 stats->tx_bytes += lb_stats->tx_bytes;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index f608c12e3e8b..b2073fce8216 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
46#include <linux/rwsem.h> 46#include <linux/rwsem.h>
47#include <linux/stddef.h> 47#include <linux/stddef.h>
48#include <linux/device.h> 48#include <linux/device.h>
49#include <linux/mutex.h>
49#include <net/slhc_vj.h> 50#include <net/slhc_vj.h>
50#include <asm/atomic.h> 51#include <asm/atomic.h>
51 52
@@ -198,11 +199,11 @@ static unsigned int cardmap_find_first_free(struct cardmap *map);
198static void cardmap_destroy(struct cardmap **map); 199static void cardmap_destroy(struct cardmap **map);
199 200
200/* 201/*
201 * all_ppp_sem protects the all_ppp_units mapping. 202 * all_ppp_mutex protects the all_ppp_units mapping.
202 * It also ensures that finding a ppp unit in the all_ppp_units map 203 * It also ensures that finding a ppp unit in the all_ppp_units map
203 * and updating its file.refcnt field is atomic. 204 * and updating its file.refcnt field is atomic.
204 */ 205 */
205static DECLARE_MUTEX(all_ppp_sem); 206static DEFINE_MUTEX(all_ppp_mutex);
206static struct cardmap *all_ppp_units; 207static struct cardmap *all_ppp_units;
207static atomic_t ppp_unit_count = ATOMIC_INIT(0); 208static atomic_t ppp_unit_count = ATOMIC_INIT(0);
208 209
@@ -804,7 +805,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
804 /* Attach to an existing ppp unit */ 805 /* Attach to an existing ppp unit */
805 if (get_user(unit, p)) 806 if (get_user(unit, p))
806 break; 807 break;
807 down(&all_ppp_sem); 808 mutex_lock(&all_ppp_mutex);
808 err = -ENXIO; 809 err = -ENXIO;
809 ppp = ppp_find_unit(unit); 810 ppp = ppp_find_unit(unit);
810 if (ppp != 0) { 811 if (ppp != 0) {
@@ -812,7 +813,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
812 file->private_data = &ppp->file; 813 file->private_data = &ppp->file;
813 err = 0; 814 err = 0;
814 } 815 }
815 up(&all_ppp_sem); 816 mutex_unlock(&all_ppp_mutex);
816 break; 817 break;
817 818
818 case PPPIOCATTCHAN: 819 case PPPIOCATTCHAN:
@@ -2446,7 +2447,7 @@ ppp_create_interface(int unit, int *retp)
2446 dev->do_ioctl = ppp_net_ioctl; 2447 dev->do_ioctl = ppp_net_ioctl;
2447 2448
2448 ret = -EEXIST; 2449 ret = -EEXIST;
2449 down(&all_ppp_sem); 2450 mutex_lock(&all_ppp_mutex);
2450 if (unit < 0) 2451 if (unit < 0)
2451 unit = cardmap_find_first_free(all_ppp_units); 2452 unit = cardmap_find_first_free(all_ppp_units);
2452 else if (cardmap_get(all_ppp_units, unit) != NULL) 2453 else if (cardmap_get(all_ppp_units, unit) != NULL)
@@ -2465,12 +2466,12 @@ ppp_create_interface(int unit, int *retp)
2465 2466
2466 atomic_inc(&ppp_unit_count); 2467 atomic_inc(&ppp_unit_count);
2467 cardmap_set(&all_ppp_units, unit, ppp); 2468 cardmap_set(&all_ppp_units, unit, ppp);
2468 up(&all_ppp_sem); 2469 mutex_unlock(&all_ppp_mutex);
2469 *retp = 0; 2470 *retp = 0;
2470 return ppp; 2471 return ppp;
2471 2472
2472out2: 2473out2:
2473 up(&all_ppp_sem); 2474 mutex_unlock(&all_ppp_mutex);
2474 free_netdev(dev); 2475 free_netdev(dev);
2475out1: 2476out1:
2476 kfree(ppp); 2477 kfree(ppp);
@@ -2500,7 +2501,7 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2500{ 2501{
2501 struct net_device *dev; 2502 struct net_device *dev;
2502 2503
2503 down(&all_ppp_sem); 2504 mutex_lock(&all_ppp_mutex);
2504 ppp_lock(ppp); 2505 ppp_lock(ppp);
2505 dev = ppp->dev; 2506 dev = ppp->dev;
2506 ppp->dev = NULL; 2507 ppp->dev = NULL;
@@ -2514,7 +2515,7 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2514 ppp->file.dead = 1; 2515 ppp->file.dead = 1;
2515 ppp->owner = NULL; 2516 ppp->owner = NULL;
2516 wake_up_interruptible(&ppp->file.rwait); 2517 wake_up_interruptible(&ppp->file.rwait);
2517 up(&all_ppp_sem); 2518 mutex_unlock(&all_ppp_mutex);
2518} 2519}
2519 2520
2520/* 2521/*
@@ -2556,7 +2557,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
2556 2557
2557/* 2558/*
2558 * Locate an existing ppp unit. 2559 * Locate an existing ppp unit.
2559 * The caller should have locked the all_ppp_sem. 2560 * The caller should have locked the all_ppp_mutex.
2560 */ 2561 */
2561static struct ppp * 2562static struct ppp *
2562ppp_find_unit(int unit) 2563ppp_find_unit(int unit)
@@ -2601,7 +2602,7 @@ ppp_connect_channel(struct channel *pch, int unit)
2601 int ret = -ENXIO; 2602 int ret = -ENXIO;
2602 int hdrlen; 2603 int hdrlen;
2603 2604
2604 down(&all_ppp_sem); 2605 mutex_lock(&all_ppp_mutex);
2605 ppp = ppp_find_unit(unit); 2606 ppp = ppp_find_unit(unit);
2606 if (ppp == 0) 2607 if (ppp == 0)
2607 goto out; 2608 goto out;
@@ -2626,7 +2627,7 @@ ppp_connect_channel(struct channel *pch, int unit)
2626 outl: 2627 outl:
2627 write_unlock_bh(&pch->upl); 2628 write_unlock_bh(&pch->upl);
2628 out: 2629 out:
2629 up(&all_ppp_sem); 2630 mutex_unlock(&all_ppp_mutex);
2630 return ret; 2631 return ret;
2631} 2632}
2632 2633
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 78193e4bbdb5..330d3869b41e 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -38,9 +38,8 @@ void free_cpu_buffers(void)
38{ 38{
39 int i; 39 int i;
40 40
41 for_each_online_cpu(i) { 41 for_each_online_cpu(i)
42 vfree(cpu_buffer[i].buffer); 42 vfree(cpu_buffer[i].buffer);
43 }
44} 43}
45 44
46int alloc_cpu_buffers(void) 45int alloc_cpu_buffers(void)
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index 5e38cd7335f7..c89c98a2cca8 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -448,11 +448,7 @@ pnpbios_parse_resource_option_data(unsigned char * p, unsigned char * end, struc
448 break; 448 break;
449 449
450 case SMALL_TAG_END: 450 case SMALL_TAG_END:
451 if (option_independent != option) 451 return p + 2;
452 printk(KERN_WARNING "PnPBIOS: Missing SMALL_TAG_ENDDEP tag\n");
453 p = p + 2;
454 return (unsigned char *)p;
455 break;
456 452
457 default: /* an unkown tag */ 453 default: /* an unkown tag */
458 len_err: 454 len_err:
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index fafeeae52675..f9930552ab54 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -151,9 +151,9 @@ dasd_ioctl_enable(struct block_device *bdev, int no, long args)
151 return -ENODEV; 151 return -ENODEV;
152 dasd_enable_device(device); 152 dasd_enable_device(device);
153 /* Formatting the dasd device can change the capacity. */ 153 /* Formatting the dasd device can change the capacity. */
154 down(&bdev->bd_sem); 154 mutex_lock(&bdev->bd_mutex);
155 i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); 155 i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9);
156 up(&bdev->bd_sem); 156 mutex_unlock(&bdev->bd_mutex);
157 return 0; 157 return 0;
158} 158}
159 159
@@ -184,9 +184,9 @@ dasd_ioctl_disable(struct block_device *bdev, int no, long args)
184 * Set i_size to zero, since read, write, etc. check against this 184 * Set i_size to zero, since read, write, etc. check against this
185 * value. 185 * value.
186 */ 186 */
187 down(&bdev->bd_sem); 187 mutex_lock(&bdev->bd_mutex);
188 i_size_write(bdev->bd_inode, 0); 188 i_size_write(bdev->bd_inode, 0);
189 up(&bdev->bd_sem); 189 mutex_unlock(&bdev->bd_mutex);
190 return 0; 190 return 0;
191} 191}
192 192
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 0cf0e4c7ac0c..39b760a24241 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -47,6 +47,7 @@
47#include <linux/ide.h> 47#include <linux/ide.h>
48#include <linux/scatterlist.h> 48#include <linux/scatterlist.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/mutex.h>
50 51
51#include <asm/io.h> 52#include <asm/io.h>
52#include <asm/bitops.h> 53#include <asm/bitops.h>
@@ -109,7 +110,7 @@ typedef struct ide_scsi_obj {
109 unsigned long log; /* log flags */ 110 unsigned long log; /* log flags */
110} idescsi_scsi_t; 111} idescsi_scsi_t;
111 112
112static DECLARE_MUTEX(idescsi_ref_sem); 113static DEFINE_MUTEX(idescsi_ref_mutex);
113 114
114#define ide_scsi_g(disk) \ 115#define ide_scsi_g(disk) \
115 container_of((disk)->private_data, struct ide_scsi_obj, driver) 116 container_of((disk)->private_data, struct ide_scsi_obj, driver)
@@ -118,19 +119,19 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk)
118{ 119{
119 struct ide_scsi_obj *scsi = NULL; 120 struct ide_scsi_obj *scsi = NULL;
120 121
121 down(&idescsi_ref_sem); 122 mutex_lock(&idescsi_ref_mutex);
122 scsi = ide_scsi_g(disk); 123 scsi = ide_scsi_g(disk);
123 if (scsi) 124 if (scsi)
124 scsi_host_get(scsi->host); 125 scsi_host_get(scsi->host);
125 up(&idescsi_ref_sem); 126 mutex_unlock(&idescsi_ref_mutex);
126 return scsi; 127 return scsi;
127} 128}
128 129
129static void ide_scsi_put(struct ide_scsi_obj *scsi) 130static void ide_scsi_put(struct ide_scsi_obj *scsi)
130{ 131{
131 down(&idescsi_ref_sem); 132 mutex_lock(&idescsi_ref_mutex);
132 scsi_host_put(scsi->host); 133 scsi_host_put(scsi->host);
133 up(&idescsi_ref_sem); 134 mutex_unlock(&idescsi_ref_mutex);
134} 135}
135 136
136static inline idescsi_scsi_t *scsihost_to_idescsi(struct Scsi_Host *host) 137static inline idescsi_scsi_t *scsihost_to_idescsi(struct Scsi_Host *host)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f9c1192dc15e..7c80711e18ed 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -71,7 +71,7 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR);
71#define SR_CAPABILITIES \ 71#define SR_CAPABILITIES \
72 (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \ 72 (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \
73 CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \ 73 CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \
74 CDC_PLAY_AUDIO|CDC_RESET|CDC_IOCTLS|CDC_DRIVE_STATUS| \ 74 CDC_PLAY_AUDIO|CDC_RESET|CDC_DRIVE_STATUS| \
75 CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \ 75 CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
76 CDC_MRW|CDC_MRW_W|CDC_RAM) 76 CDC_MRW|CDC_MRW_W|CDC_RAM)
77 77
@@ -118,7 +118,6 @@ static struct cdrom_device_ops sr_dops = {
118 .get_mcn = sr_get_mcn, 118 .get_mcn = sr_get_mcn,
119 .reset = sr_reset, 119 .reset = sr_reset,
120 .audio_ioctl = sr_audio_ioctl, 120 .audio_ioctl = sr_audio_ioctl,
121 .dev_ioctl = sr_dev_ioctl,
122 .capability = SR_CAPABILITIES, 121 .capability = SR_CAPABILITIES,
123 .generic_packet = sr_packet, 122 .generic_packet = sr_packet,
124}; 123};
@@ -456,17 +455,33 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
456{ 455{
457 struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk); 456 struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
458 struct scsi_device *sdev = cd->device; 457 struct scsi_device *sdev = cd->device;
458 void __user *argp = (void __user *)arg;
459 int ret;
459 460
460 /* 461 /*
461 * Send SCSI addressing ioctls directly to mid level, send other 462 * Send SCSI addressing ioctls directly to mid level, send other
462 * ioctls to cdrom/block level. 463 * ioctls to cdrom/block level.
463 */ 464 */
464 switch (cmd) { 465 switch (cmd) {
465 case SCSI_IOCTL_GET_IDLUN: 466 case SCSI_IOCTL_GET_IDLUN:
466 case SCSI_IOCTL_GET_BUS_NUMBER: 467 case SCSI_IOCTL_GET_BUS_NUMBER:
467 return scsi_ioctl(sdev, cmd, (void __user *)arg); 468 return scsi_ioctl(sdev, cmd, argp);
468 } 469 }
469 return cdrom_ioctl(file, &cd->cdi, inode, cmd, arg); 470
471 ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
472 if (ret != ENOSYS)
473 return ret;
474
475 /*
476 * ENODEV means that we didn't recognise the ioctl, or that we
477 * cannot execute it in the current device state. In either
478 * case fall through to scsi_ioctl, which will return ENDOEV again
479 * if it doesn't recognise the ioctl
480 */
481 ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL);
482 if (ret != -ENODEV)
483 return ret;
484 return scsi_ioctl(sdev, cmd, argp);
470} 485}
471 486
472static int sr_block_media_changed(struct gendisk *disk) 487static int sr_block_media_changed(struct gendisk *disk)
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index d2bcd99c272f..d65de9621b27 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -55,7 +55,6 @@ int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
55int sr_reset(struct cdrom_device_info *); 55int sr_reset(struct cdrom_device_info *);
56int sr_select_speed(struct cdrom_device_info *cdi, int speed); 56int sr_select_speed(struct cdrom_device_info *cdi, int speed);
57int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); 57int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
58int sr_dev_ioctl(struct cdrom_device_info *, unsigned int, unsigned long);
59 58
60int sr_is_xa(Scsi_CD *); 59int sr_is_xa(Scsi_CD *);
61 60
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index b65462f76484..d1268cb46837 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -562,22 +562,3 @@ int sr_is_xa(Scsi_CD *cd)
562#endif 562#endif
563 return is_xa; 563 return is_xa;
564} 564}
565
566int sr_dev_ioctl(struct cdrom_device_info *cdi,
567 unsigned int cmd, unsigned long arg)
568{
569 Scsi_CD *cd = cdi->handle;
570 int ret;
571
572 ret = scsi_nonblockable_ioctl(cd->device, cmd,
573 (void __user *)arg, NULL);
574 /*
575 * ENODEV means that we didn't recognise the ioctl, or that we
576 * cannot execute it in the current device state. In either
577 * case fall through to scsi_ioctl, which will return ENDOEV again
578 * if it doesn't recognise the ioctl
579 */
580 if (ret != -ENODEV)
581 return ret;
582 return scsi_ioctl(cd->device, cmd, (void __user *)arg);
583}
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 7f0f35a05dca..b88a7c1158af 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -101,8 +101,6 @@ struct tty_driver *serial_driver;
101 101
102#define RS_ISR_PASS_LIMIT 256 102#define RS_ISR_PASS_LIMIT 256
103 103
104#define _INLINE_ inline
105
106static void change_speed(struct m68k_serial *info); 104static void change_speed(struct m68k_serial *info);
107 105
108/* 106/*
@@ -262,7 +260,7 @@ static void batten_down_hatches(void)
262 /* Drop into the debugger */ 260 /* Drop into the debugger */
263} 261}
264 262
265static _INLINE_ void status_handle(struct m68k_serial *info, unsigned short status) 263static void status_handle(struct m68k_serial *info, unsigned short status)
266{ 264{
267#if 0 265#if 0
268 if(status & DCD) { 266 if(status & DCD) {
@@ -289,7 +287,8 @@ static _INLINE_ void status_handle(struct m68k_serial *info, unsigned short stat
289 return; 287 return;
290} 288}
291 289
292static _INLINE_ void receive_chars(struct m68k_serial *info, struct pt_regs *regs, unsigned short rx) 290static void receive_chars(struct m68k_serial *info, struct pt_regs *regs,
291 unsigned short rx)
293{ 292{
294 struct tty_struct *tty = info->tty; 293 struct tty_struct *tty = info->tty;
295 m68328_uart *uart = &uart_addr[info->line]; 294 m68328_uart *uart = &uart_addr[info->line];
@@ -359,7 +358,7 @@ clear_and_exit:
359 return; 358 return;
360} 359}
361 360
362static _INLINE_ void transmit_chars(struct m68k_serial *info) 361static void transmit_chars(struct m68k_serial *info)
363{ 362{
364 m68328_uart *uart = &uart_addr[info->line]; 363 m68328_uart *uart = &uart_addr[info->line];
365 364
diff --git a/drivers/serial/au1x00_uart.c b/drivers/serial/au1x00_uart.c
index 29f94bbb79be..948880ac5878 100644
--- a/drivers/serial/au1x00_uart.c
+++ b/drivers/serial/au1x00_uart.c
@@ -133,13 +133,12 @@ static const struct serial_uart_config uart_config[PORT_MAX_8250+1] = {
133 { "AU1X00_UART",16, UART_CLEAR_FIFO | UART_USE_FIFO }, 133 { "AU1X00_UART",16, UART_CLEAR_FIFO | UART_USE_FIFO },
134}; 134};
135 135
136static _INLINE_ unsigned int serial_in(struct uart_8250_port *up, int offset) 136static unsigned int serial_in(struct uart_8250_port *up, int offset)
137{ 137{
138 return au_readl((unsigned long)up->port.membase + offset); 138 return au_readl((unsigned long)up->port.membase + offset);
139} 139}
140 140
141static _INLINE_ void 141static void serial_out(struct uart_8250_port *up, int offset, int value)
142serial_out(struct uart_8250_port *up, int offset, int value)
143{ 142{
144 au_writel(value, (unsigned long)up->port.membase + offset); 143 au_writel(value, (unsigned long)up->port.membase + offset);
145} 144}
@@ -237,7 +236,7 @@ static void serial8250_enable_ms(struct uart_port *port)
237 serial_out(up, UART_IER, up->ier); 236 serial_out(up, UART_IER, up->ier);
238} 237}
239 238
240static _INLINE_ void 239static void
241receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs) 240receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs)
242{ 241{
243 struct tty_struct *tty = up->port.info->tty; 242 struct tty_struct *tty = up->port.info->tty;
@@ -312,7 +311,7 @@ receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs)
312 spin_lock(&up->port.lock); 311 spin_lock(&up->port.lock);
313} 312}
314 313
315static _INLINE_ void transmit_chars(struct uart_8250_port *up) 314static void transmit_chars(struct uart_8250_port *up)
316{ 315{
317 struct circ_buf *xmit = &up->port.info->xmit; 316 struct circ_buf *xmit = &up->port.info->xmit;
318 int count; 317 int count;
@@ -346,7 +345,7 @@ static _INLINE_ void transmit_chars(struct uart_8250_port *up)
346 serial8250_stop_tx(&up->port); 345 serial8250_stop_tx(&up->port);
347} 346}
348 347
349static _INLINE_ void check_modem_status(struct uart_8250_port *up) 348static void check_modem_status(struct uart_8250_port *up)
350{ 349{
351 int status; 350 int status;
352 351
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index be12623d8544..89700141f87e 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -481,8 +481,6 @@ static char *serial_version = "$Revision: 1.25 $";
481#include "serial_compat.h" 481#include "serial_compat.h"
482#endif 482#endif
483 483
484#define _INLINE_ inline
485
486struct tty_driver *serial_driver; 484struct tty_driver *serial_driver;
487 485
488/* serial subtype definitions */ 486/* serial subtype definitions */
@@ -591,8 +589,6 @@ static void rs_throttle(struct tty_struct * tty);
591static void rs_wait_until_sent(struct tty_struct *tty, int timeout); 589static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
592static int rs_write(struct tty_struct * tty, int from_user, 590static int rs_write(struct tty_struct * tty, int from_user,
593 const unsigned char *buf, int count); 591 const unsigned char *buf, int count);
594extern _INLINE_ int rs_raw_write(struct tty_struct * tty, int from_user,
595 const unsigned char *buf, int count);
596#ifdef CONFIG_ETRAX_RS485 592#ifdef CONFIG_ETRAX_RS485
597static int e100_write_rs485(struct tty_struct * tty, int from_user, 593static int e100_write_rs485(struct tty_struct * tty, int from_user,
598 const unsigned char *buf, int count); 594 const unsigned char *buf, int count);
@@ -1538,8 +1534,7 @@ e100_enable_rxdma_irq(struct e100_serial *info)
1538 1534
1539/* the tx DMA uses only dma_descr interrupt */ 1535/* the tx DMA uses only dma_descr interrupt */
1540 1536
1541static _INLINE_ void 1537static void e100_disable_txdma_irq(struct e100_serial *info)
1542e100_disable_txdma_irq(struct e100_serial *info)
1543{ 1538{
1544#ifdef SERIAL_DEBUG_INTR 1539#ifdef SERIAL_DEBUG_INTR
1545 printk("txdma_irq(%d): 0\n",info->line); 1540 printk("txdma_irq(%d): 0\n",info->line);
@@ -1548,8 +1543,7 @@ e100_disable_txdma_irq(struct e100_serial *info)
1548 *R_IRQ_MASK2_CLR = info->irq; 1543 *R_IRQ_MASK2_CLR = info->irq;
1549} 1544}
1550 1545
1551static _INLINE_ void 1546static void e100_enable_txdma_irq(struct e100_serial *info)
1552e100_enable_txdma_irq(struct e100_serial *info)
1553{ 1547{
1554#ifdef SERIAL_DEBUG_INTR 1548#ifdef SERIAL_DEBUG_INTR
1555 printk("txdma_irq(%d): 1\n",info->line); 1549 printk("txdma_irq(%d): 1\n",info->line);
@@ -1558,8 +1552,7 @@ e100_enable_txdma_irq(struct e100_serial *info)
1558 *R_IRQ_MASK2_SET = info->irq; 1552 *R_IRQ_MASK2_SET = info->irq;
1559} 1553}
1560 1554
1561static _INLINE_ void 1555static void e100_disable_txdma_channel(struct e100_serial *info)
1562e100_disable_txdma_channel(struct e100_serial *info)
1563{ 1556{
1564 unsigned long flags; 1557 unsigned long flags;
1565 1558
@@ -1599,8 +1592,7 @@ e100_disable_txdma_channel(struct e100_serial *info)
1599} 1592}
1600 1593
1601 1594
1602static _INLINE_ void 1595static void e100_enable_txdma_channel(struct e100_serial *info)
1603e100_enable_txdma_channel(struct e100_serial *info)
1604{ 1596{
1605 unsigned long flags; 1597 unsigned long flags;
1606 1598
@@ -1625,8 +1617,7 @@ e100_enable_txdma_channel(struct e100_serial *info)
1625 restore_flags(flags); 1617 restore_flags(flags);
1626} 1618}
1627 1619
1628static _INLINE_ void 1620static void e100_disable_rxdma_channel(struct e100_serial *info)
1629e100_disable_rxdma_channel(struct e100_serial *info)
1630{ 1621{
1631 unsigned long flags; 1622 unsigned long flags;
1632 1623
@@ -1665,8 +1656,7 @@ e100_disable_rxdma_channel(struct e100_serial *info)
1665} 1656}
1666 1657
1667 1658
1668static _INLINE_ void 1659static void e100_enable_rxdma_channel(struct e100_serial *info)
1669e100_enable_rxdma_channel(struct e100_serial *info)
1670{ 1660{
1671 unsigned long flags; 1661 unsigned long flags;
1672 1662
@@ -1913,9 +1903,7 @@ rs_start(struct tty_struct *tty)
1913 * This routine is used by the interrupt handler to schedule 1903 * This routine is used by the interrupt handler to schedule
1914 * processing in the software interrupt portion of the driver. 1904 * processing in the software interrupt portion of the driver.
1915 */ 1905 */
1916static _INLINE_ void 1906static void rs_sched_event(struct e100_serial *info, int event)
1917rs_sched_event(struct e100_serial *info,
1918 int event)
1919{ 1907{
1920 if (info->event & (1 << event)) 1908 if (info->event & (1 << event))
1921 return; 1909 return;
@@ -2155,8 +2143,9 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl
2155 return 1; 2143 return 1;
2156} 2144}
2157 2145
2158extern _INLINE_ unsigned int 2146static unsigned int handle_descr_data(struct e100_serial *info,
2159handle_descr_data(struct e100_serial *info, struct etrax_dma_descr *descr, unsigned int recvl) 2147 struct etrax_dma_descr *descr,
2148 unsigned int recvl)
2160{ 2149{
2161 struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer; 2150 struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer;
2162 2151
@@ -2182,8 +2171,7 @@ handle_descr_data(struct e100_serial *info, struct etrax_dma_descr *descr, unsig
2182 return recvl; 2171 return recvl;
2183} 2172}
2184 2173
2185static _INLINE_ unsigned int 2174static unsigned int handle_all_descr_data(struct e100_serial *info)
2186handle_all_descr_data(struct e100_serial *info)
2187{ 2175{
2188 struct etrax_dma_descr *descr; 2176 struct etrax_dma_descr *descr;
2189 unsigned int recvl; 2177 unsigned int recvl;
@@ -2230,8 +2218,7 @@ handle_all_descr_data(struct e100_serial *info)
2230 return ret; 2218 return ret;
2231} 2219}
2232 2220
2233static _INLINE_ void 2221static void receive_chars_dma(struct e100_serial *info)
2234receive_chars_dma(struct e100_serial *info)
2235{ 2222{
2236 struct tty_struct *tty; 2223 struct tty_struct *tty;
2237 unsigned char rstat; 2224 unsigned char rstat;
@@ -2292,8 +2279,7 @@ receive_chars_dma(struct e100_serial *info)
2292 *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart); 2279 *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
2293} 2280}
2294 2281
2295static _INLINE_ int 2282static int start_recv_dma(struct e100_serial *info)
2296start_recv_dma(struct e100_serial *info)
2297{ 2283{
2298 struct etrax_dma_descr *descr = info->rec_descr; 2284 struct etrax_dma_descr *descr = info->rec_descr;
2299 struct etrax_recv_buffer *buffer; 2285 struct etrax_recv_buffer *buffer;
@@ -2348,11 +2334,6 @@ start_receive(struct e100_serial *info)
2348} 2334}
2349 2335
2350 2336
2351static _INLINE_ void
2352status_handle(struct e100_serial *info, unsigned short status)
2353{
2354}
2355
2356/* the bits in the MASK2 register are laid out like this: 2337/* the bits in the MASK2 register are laid out like this:
2357 DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR 2338 DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR
2358 where I is the input channel and O is the output channel for the port. 2339 where I is the input channel and O is the output channel for the port.
@@ -2454,8 +2435,7 @@ rec_interrupt(int irq, void *dev_id, struct pt_regs * regs)
2454 return IRQ_RETVAL(handled); 2435 return IRQ_RETVAL(handled);
2455} /* rec_interrupt */ 2436} /* rec_interrupt */
2456 2437
2457static _INLINE_ int 2438static int force_eop_if_needed(struct e100_serial *info)
2458force_eop_if_needed(struct e100_serial *info)
2459{ 2439{
2460 /* We check data_avail bit to determine if data has 2440 /* We check data_avail bit to determine if data has
2461 * arrived since last time 2441 * arrived since last time
@@ -2499,8 +2479,7 @@ force_eop_if_needed(struct e100_serial *info)
2499 return 1; 2479 return 1;
2500} 2480}
2501 2481
2502extern _INLINE_ void 2482static void flush_to_flip_buffer(struct e100_serial *info)
2503flush_to_flip_buffer(struct e100_serial *info)
2504{ 2483{
2505 struct tty_struct *tty; 2484 struct tty_struct *tty;
2506 struct etrax_recv_buffer *buffer; 2485 struct etrax_recv_buffer *buffer;
@@ -2611,8 +2590,7 @@ flush_to_flip_buffer(struct e100_serial *info)
2611 tty_flip_buffer_push(tty); 2590 tty_flip_buffer_push(tty);
2612} 2591}
2613 2592
2614static _INLINE_ void 2593static void check_flush_timeout(struct e100_serial *info)
2615check_flush_timeout(struct e100_serial *info)
2616{ 2594{
2617 /* Flip what we've got (if we can) */ 2595 /* Flip what we've got (if we can) */
2618 flush_to_flip_buffer(info); 2596 flush_to_flip_buffer(info);
@@ -2741,7 +2719,7 @@ TODO: The break will be delayed until an F or V character is received.
2741 2719
2742*/ 2720*/
2743 2721
2744extern _INLINE_ 2722static
2745struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info) 2723struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
2746{ 2724{
2747 unsigned long data_read; 2725 unsigned long data_read;
@@ -2875,8 +2853,7 @@ more_data:
2875 return info; 2853 return info;
2876} 2854}
2877 2855
2878extern _INLINE_ 2856static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
2879struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
2880{ 2857{
2881 unsigned char rstat; 2858 unsigned char rstat;
2882 2859
@@ -2995,7 +2972,7 @@ struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info)
2995 return info; 2972 return info;
2996} /* handle_ser_rx_interrupt */ 2973} /* handle_ser_rx_interrupt */
2997 2974
2998extern _INLINE_ void handle_ser_tx_interrupt(struct e100_serial *info) 2975static void handle_ser_tx_interrupt(struct e100_serial *info)
2999{ 2976{
3000 unsigned long flags; 2977 unsigned long flags;
3001 2978
@@ -3621,9 +3598,8 @@ rs_flush_chars(struct tty_struct *tty)
3621 restore_flags(flags); 3598 restore_flags(flags);
3622} 3599}
3623 3600
3624extern _INLINE_ int 3601static int rs_raw_write(struct tty_struct * tty, int from_user,
3625rs_raw_write(struct tty_struct * tty, int from_user, 3602 const unsigned char *buf, int count)
3626 const unsigned char *buf, int count)
3627{ 3603{
3628 int c, ret = 0; 3604 int c, ret = 0;
3629 struct e100_serial *info = (struct e100_serial *)tty->driver_data; 3605 struct e100_serial *info = (struct e100_serial *)tty->driver_data;
@@ -4710,7 +4686,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
4710 * /proc fs routines.... 4686 * /proc fs routines....
4711 */ 4687 */
4712 4688
4713extern _INLINE_ int line_info(char *buf, struct e100_serial *info) 4689static int line_info(char *buf, struct e100_serial *info)
4714{ 4690{
4715 char stat_buf[30]; 4691 char stat_buf[30];
4716 int ret; 4692 int ret;
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c
index 876bc5e027bb..e9c10c0a30fc 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/serial/m32r_sio.c
@@ -248,17 +248,17 @@ static void sio_error(int *status)
248 248
249#endif /* CONFIG_SERIAL_M32R_PLDSIO */ 249#endif /* CONFIG_SERIAL_M32R_PLDSIO */
250 250
251static _INLINE_ unsigned int sio_in(struct uart_sio_port *up, int offset) 251static unsigned int sio_in(struct uart_sio_port *up, int offset)
252{ 252{
253 return __sio_in(up->port.iobase + offset); 253 return __sio_in(up->port.iobase + offset);
254} 254}
255 255
256static _INLINE_ void sio_out(struct uart_sio_port *up, int offset, int value) 256static void sio_out(struct uart_sio_port *up, int offset, int value)
257{ 257{
258 __sio_out(value, up->port.iobase + offset); 258 __sio_out(value, up->port.iobase + offset);
259} 259}
260 260
261static _INLINE_ unsigned int serial_in(struct uart_sio_port *up, int offset) 261static unsigned int serial_in(struct uart_sio_port *up, int offset)
262{ 262{
263 if (!offset) 263 if (!offset)
264 return 0; 264 return 0;
@@ -266,8 +266,7 @@ static _INLINE_ unsigned int serial_in(struct uart_sio_port *up, int offset)
266 return __sio_in(offset); 266 return __sio_in(offset);
267} 267}
268 268
269static _INLINE_ void 269static void serial_out(struct uart_sio_port *up, int offset, int value)
270serial_out(struct uart_sio_port *up, int offset, int value)
271{ 270{
272 if (!offset) 271 if (!offset)
273 return; 272 return;
@@ -326,8 +325,8 @@ static void m32r_sio_enable_ms(struct uart_port *port)
326 serial_out(up, UART_IER, up->ier); 325 serial_out(up, UART_IER, up->ier);
327} 326}
328 327
329static _INLINE_ void receive_chars(struct uart_sio_port *up, int *status, 328static void receive_chars(struct uart_sio_port *up, int *status,
330 struct pt_regs *regs) 329 struct pt_regs *regs)
331{ 330{
332 struct tty_struct *tty = up->port.info->tty; 331 struct tty_struct *tty = up->port.info->tty;
333 unsigned char ch; 332 unsigned char ch;
@@ -400,7 +399,7 @@ static _INLINE_ void receive_chars(struct uart_sio_port *up, int *status,
400 tty_flip_buffer_push(tty); 399 tty_flip_buffer_push(tty);
401} 400}
402 401
403static _INLINE_ void transmit_chars(struct uart_sio_port *up) 402static void transmit_chars(struct uart_sio_port *up)
404{ 403{
405 struct circ_buf *xmit = &up->port.info->xmit; 404 struct circ_buf *xmit = &up->port.info->xmit;
406 int count; 405 int count;
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 7fc3d3b41d18..9fe2283d91e5 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -102,9 +102,7 @@ struct uart_sunsu_port {
102#endif 102#endif
103}; 103};
104 104
105#define _INLINE_ 105static unsigned int serial_in(struct uart_sunsu_port *up, int offset)
106
107static _INLINE_ unsigned int serial_in(struct uart_sunsu_port *up, int offset)
108{ 106{
109 offset <<= up->port.regshift; 107 offset <<= up->port.regshift;
110 108
@@ -121,8 +119,7 @@ static _INLINE_ unsigned int serial_in(struct uart_sunsu_port *up, int offset)
121 } 119 }
122} 120}
123 121
124static _INLINE_ void 122static void serial_out(struct uart_sunsu_port *up, int offset, int value)
125serial_out(struct uart_sunsu_port *up, int offset, int value)
126{ 123{
127#ifndef CONFIG_SPARC64 124#ifndef CONFIG_SPARC64
128 /* 125 /*
@@ -316,7 +313,7 @@ static void sunsu_enable_ms(struct uart_port *port)
316 spin_unlock_irqrestore(&up->port.lock, flags); 313 spin_unlock_irqrestore(&up->port.lock, flags);
317} 314}
318 315
319static _INLINE_ struct tty_struct * 316static struct tty_struct *
320receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs *regs) 317receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs *regs)
321{ 318{
322 struct tty_struct *tty = up->port.info->tty; 319 struct tty_struct *tty = up->port.info->tty;
@@ -395,7 +392,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs
395 return tty; 392 return tty;
396} 393}
397 394
398static _INLINE_ void transmit_chars(struct uart_sunsu_port *up) 395static void transmit_chars(struct uart_sunsu_port *up)
399{ 396{
400 struct circ_buf *xmit = &up->port.info->xmit; 397 struct circ_buf *xmit = &up->port.info->xmit;
401 int count; 398 int count;
@@ -431,7 +428,7 @@ static _INLINE_ void transmit_chars(struct uart_sunsu_port *up)
431 __stop_tx(up); 428 __stop_tx(up);
432} 429}
433 430
434static _INLINE_ void check_modem_status(struct uart_sunsu_port *up) 431static void check_modem_status(struct uart_sunsu_port *up)
435{ 432{
436 int status; 433 int status;
437 434
diff --git a/drivers/tc/zs.c b/drivers/tc/zs.c
index 6756d0fab6fe..2dffa8e303b2 100644
--- a/drivers/tc/zs.c
+++ b/drivers/tc/zs.c
@@ -186,8 +186,6 @@ static struct tty_driver *serial_driver;
186#define RS_STROBE_TIME 10 186#define RS_STROBE_TIME 10
187#define RS_ISR_PASS_LIMIT 256 187#define RS_ISR_PASS_LIMIT 256
188 188
189#define _INLINE_ inline
190
191static void probe_sccs(void); 189static void probe_sccs(void);
192static void change_speed(struct dec_serial *info); 190static void change_speed(struct dec_serial *info);
193static void rs_wait_until_sent(struct tty_struct *tty, int timeout); 191static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -344,14 +342,13 @@ static inline void rs_recv_clear(struct dec_zschannel *zsc)
344 * This routine is used by the interrupt handler to schedule 342 * This routine is used by the interrupt handler to schedule
345 * processing in the software interrupt portion of the driver. 343 * processing in the software interrupt portion of the driver.
346 */ 344 */
347static _INLINE_ void rs_sched_event(struct dec_serial *info, int event) 345static void rs_sched_event(struct dec_serial *info, int event)
348{ 346{
349 info->event |= 1 << event; 347 info->event |= 1 << event;
350 tasklet_schedule(&info->tlet); 348 tasklet_schedule(&info->tlet);
351} 349}
352 350
353static _INLINE_ void receive_chars(struct dec_serial *info, 351static void receive_chars(struct dec_serial *info, struct pt_regs *regs)
354 struct pt_regs *regs)
355{ 352{
356 struct tty_struct *tty = info->tty; 353 struct tty_struct *tty = info->tty;
357 unsigned char ch, stat, flag; 354 unsigned char ch, stat, flag;
@@ -441,7 +438,7 @@ static void transmit_chars(struct dec_serial *info)
441 rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); 438 rs_sched_event(info, RS_EVENT_WRITE_WAKEUP);
442} 439}
443 440
444static _INLINE_ void status_handle(struct dec_serial *info) 441static void status_handle(struct dec_serial *info)
445{ 442{
446 unsigned char stat; 443 unsigned char stat;
447 444
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index ea1134eb47c8..8e8356c1c229 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -31,6 +31,7 @@
31#include <linux/poll.h> 31#include <linux/poll.h>
32#include <linux/kthread.h> 32#include <linux/kthread.h>
33#include <linux/idr.h> 33#include <linux/idr.h>
34#include <linux/mutex.h>
34 35
35#include "debug.h" 36#include "debug.h"
36#include "v9fs.h" 37#include "v9fs.h"
@@ -110,7 +111,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
110static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); 111static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
111static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); 112static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
112 113
113static DECLARE_MUTEX(v9fs_mux_task_lock); 114static DEFINE_MUTEX(v9fs_mux_task_lock);
114static struct workqueue_struct *v9fs_mux_wq; 115static struct workqueue_struct *v9fs_mux_wq;
115 116
116static int v9fs_mux_num; 117static int v9fs_mux_num;
@@ -166,7 +167,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
166 167
167 dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, 168 dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
168 v9fs_mux_poll_task_num); 169 v9fs_mux_poll_task_num);
169 up(&v9fs_mux_task_lock); 170 mutex_lock(&v9fs_mux_task_lock);
170 171
171 n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); 172 n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
172 if (n > v9fs_mux_poll_task_num) { 173 if (n > v9fs_mux_poll_task_num) {
@@ -225,7 +226,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
225 } 226 }
226 227
227 v9fs_mux_num++; 228 v9fs_mux_num++;
228 down(&v9fs_mux_task_lock); 229 mutex_unlock(&v9fs_mux_task_lock);
229 230
230 return 0; 231 return 0;
231} 232}
@@ -235,7 +236,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
235 int i; 236 int i;
236 struct v9fs_mux_poll_task *vpt; 237 struct v9fs_mux_poll_task *vpt;
237 238
238 up(&v9fs_mux_task_lock); 239 mutex_lock(&v9fs_mux_task_lock);
239 vpt = m->poll_task; 240 vpt = m->poll_task;
240 list_del(&m->mux_list); 241 list_del(&m->mux_list);
241 for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { 242 for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
@@ -252,7 +253,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
252 v9fs_mux_poll_task_num--; 253 v9fs_mux_poll_task_num--;
253 } 254 }
254 v9fs_mux_num--; 255 v9fs_mux_num--;
255 down(&v9fs_mux_task_lock); 256 mutex_unlock(&v9fs_mux_task_lock);
256} 257}
257 258
258/** 259/**
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index afebbfde6968..6af10885f9d6 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -19,11 +19,7 @@
19 * 19 *
20 * adfs regular file handling primitives 20 * adfs regular file handling primitives
21 */ 21 */
22#include <linux/errno.h>
23#include <linux/fs.h> 22#include <linux/fs.h>
24#include <linux/fcntl.h>
25#include <linux/time.h>
26#include <linux/stat.h>
27#include <linux/buffer_head.h> /* for file_fsync() */ 23#include <linux/buffer_head.h> /* for file_fsync() */
28#include <linux/adfs_fs.h> 24#include <linux/adfs_fs.h>
29 25
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 385bed09b0d8..f54c5b21f876 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -13,6 +13,7 @@
13/* Internal header file for autofs */ 13/* Internal header file for autofs */
14 14
15#include <linux/auto_fs4.h> 15#include <linux/auto_fs4.h>
16#include <linux/mutex.h>
16#include <linux/list.h> 17#include <linux/list.h>
17 18
18/* This is the range of ioctl() numbers we claim as ours */ 19/* This is the range of ioctl() numbers we claim as ours */
@@ -102,7 +103,7 @@ struct autofs_sb_info {
102 int reghost_enabled; 103 int reghost_enabled;
103 int needs_reghost; 104 int needs_reghost;
104 struct super_block *sb; 105 struct super_block *sb;
105 struct semaphore wq_sem; 106 struct mutex wq_mutex;
106 spinlock_t fs_lock; 107 spinlock_t fs_lock;
107 struct autofs_wait_queue *queues; /* Wait queue pointer */ 108 struct autofs_wait_queue *queues; /* Wait queue pointer */
108}; 109};
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 2d3082854a29..1ad98d48e550 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -269,7 +269,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
269 sbi->sb = s; 269 sbi->sb = s;
270 sbi->version = 0; 270 sbi->version = 0;
271 sbi->sub_version = 0; 271 sbi->sub_version = 0;
272 init_MUTEX(&sbi->wq_sem); 272 mutex_init(&sbi->wq_mutex);
273 spin_lock_init(&sbi->fs_lock); 273 spin_lock_init(&sbi->fs_lock);
274 sbi->queues = NULL; 274 sbi->queues = NULL;
275 s->s_blocksize = 1024; 275 s->s_blocksize = 1024;
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 394ff36ef8f1..be78e9378c03 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -178,7 +178,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
178 return -ENOENT; 178 return -ENOENT;
179 } 179 }
180 180
181 if (down_interruptible(&sbi->wq_sem)) { 181 if (mutex_lock_interruptible(&sbi->wq_mutex)) {
182 kfree(name); 182 kfree(name);
183 return -EINTR; 183 return -EINTR;
184 } 184 }
@@ -194,7 +194,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
194 /* Can't wait for an expire if there's no mount */ 194 /* Can't wait for an expire if there's no mount */
195 if (notify == NFY_NONE && !d_mountpoint(dentry)) { 195 if (notify == NFY_NONE && !d_mountpoint(dentry)) {
196 kfree(name); 196 kfree(name);
197 up(&sbi->wq_sem); 197 mutex_unlock(&sbi->wq_mutex);
198 return -ENOENT; 198 return -ENOENT;
199 } 199 }
200 200
@@ -202,7 +202,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
202 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); 202 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL);
203 if ( !wq ) { 203 if ( !wq ) {
204 kfree(name); 204 kfree(name);
205 up(&sbi->wq_sem); 205 mutex_unlock(&sbi->wq_mutex);
206 return -ENOMEM; 206 return -ENOMEM;
207 } 207 }
208 208
@@ -218,10 +218,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
218 wq->status = -EINTR; /* Status return if interrupted */ 218 wq->status = -EINTR; /* Status return if interrupted */
219 atomic_set(&wq->wait_ctr, 2); 219 atomic_set(&wq->wait_ctr, 2);
220 atomic_set(&wq->notified, 1); 220 atomic_set(&wq->notified, 1);
221 up(&sbi->wq_sem); 221 mutex_unlock(&sbi->wq_mutex);
222 } else { 222 } else {
223 atomic_inc(&wq->wait_ctr); 223 atomic_inc(&wq->wait_ctr);
224 up(&sbi->wq_sem); 224 mutex_unlock(&sbi->wq_mutex);
225 kfree(name); 225 kfree(name);
226 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", 226 DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d",
227 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); 227 (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify);
@@ -282,19 +282,19 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok
282{ 282{
283 struct autofs_wait_queue *wq, **wql; 283 struct autofs_wait_queue *wq, **wql;
284 284
285 down(&sbi->wq_sem); 285 mutex_lock(&sbi->wq_mutex);
286 for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { 286 for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) {
287 if ( wq->wait_queue_token == wait_queue_token ) 287 if ( wq->wait_queue_token == wait_queue_token )
288 break; 288 break;
289 } 289 }
290 290
291 if ( !wq ) { 291 if ( !wq ) {
292 up(&sbi->wq_sem); 292 mutex_unlock(&sbi->wq_mutex);
293 return -EINVAL; 293 return -EINVAL;
294 } 294 }
295 295
296 *wql = wq->next; /* Unlink from chain */ 296 *wql = wq->next; /* Unlink from chain */
297 up(&sbi->wq_sem); 297 mutex_unlock(&sbi->wq_mutex);
298 kfree(wq->name); 298 kfree(wq->name);
299 wq->name = NULL; /* Do not wait on this queue */ 299 wq->name = NULL; /* Do not wait on this queue */
300 300
diff --git a/fs/bio.c b/fs/bio.c
index 1f3bb501c262..8f1d2e815c96 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1243,11 +1243,11 @@ static int __init init_bio(void)
1243 scale = 4; 1243 scale = 4;
1244 1244
1245 /* 1245 /*
1246 * scale number of entries 1246 * Limit number of entries reserved -- mempools are only used when
1247 * the system is completely unable to allocate memory, so we only
1248 * need enough to make progress.
1247 */ 1249 */
1248 bvec_pool_entries = megabytes * 2; 1250 bvec_pool_entries = 1 + scale;
1249 if (bvec_pool_entries > 256)
1250 bvec_pool_entries = 256;
1251 1251
1252 fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); 1252 fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
1253 if (!fs_bio_set) 1253 if (!fs_bio_set)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 6e50346fb1ee..44d05e6e34db 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -265,8 +265,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
265 SLAB_CTOR_CONSTRUCTOR) 265 SLAB_CTOR_CONSTRUCTOR)
266 { 266 {
267 memset(bdev, 0, sizeof(*bdev)); 267 memset(bdev, 0, sizeof(*bdev));
268 sema_init(&bdev->bd_sem, 1); 268 mutex_init(&bdev->bd_mutex);
269 sema_init(&bdev->bd_mount_sem, 1); 269 mutex_init(&bdev->bd_mount_mutex);
270 INIT_LIST_HEAD(&bdev->bd_inodes); 270 INIT_LIST_HEAD(&bdev->bd_inodes);
271 INIT_LIST_HEAD(&bdev->bd_list); 271 INIT_LIST_HEAD(&bdev->bd_list);
272 inode_init_once(&ei->vfs_inode); 272 inode_init_once(&ei->vfs_inode);
@@ -574,7 +574,7 @@ static int do_open(struct block_device *bdev, struct file *file)
574 } 574 }
575 owner = disk->fops->owner; 575 owner = disk->fops->owner;
576 576
577 down(&bdev->bd_sem); 577 mutex_lock(&bdev->bd_mutex);
578 if (!bdev->bd_openers) { 578 if (!bdev->bd_openers) {
579 bdev->bd_disk = disk; 579 bdev->bd_disk = disk;
580 bdev->bd_contains = bdev; 580 bdev->bd_contains = bdev;
@@ -605,21 +605,21 @@ static int do_open(struct block_device *bdev, struct file *file)
605 if (ret) 605 if (ret)
606 goto out_first; 606 goto out_first;
607 bdev->bd_contains = whole; 607 bdev->bd_contains = whole;
608 down(&whole->bd_sem); 608 mutex_lock(&whole->bd_mutex);
609 whole->bd_part_count++; 609 whole->bd_part_count++;
610 p = disk->part[part - 1]; 610 p = disk->part[part - 1];
611 bdev->bd_inode->i_data.backing_dev_info = 611 bdev->bd_inode->i_data.backing_dev_info =
612 whole->bd_inode->i_data.backing_dev_info; 612 whole->bd_inode->i_data.backing_dev_info;
613 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { 613 if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) {
614 whole->bd_part_count--; 614 whole->bd_part_count--;
615 up(&whole->bd_sem); 615 mutex_unlock(&whole->bd_mutex);
616 ret = -ENXIO; 616 ret = -ENXIO;
617 goto out_first; 617 goto out_first;
618 } 618 }
619 kobject_get(&p->kobj); 619 kobject_get(&p->kobj);
620 bdev->bd_part = p; 620 bdev->bd_part = p;
621 bd_set_size(bdev, (loff_t) p->nr_sects << 9); 621 bd_set_size(bdev, (loff_t) p->nr_sects << 9);
622 up(&whole->bd_sem); 622 mutex_unlock(&whole->bd_mutex);
623 } 623 }
624 } else { 624 } else {
625 put_disk(disk); 625 put_disk(disk);
@@ -633,13 +633,13 @@ static int do_open(struct block_device *bdev, struct file *file)
633 if (bdev->bd_invalidated) 633 if (bdev->bd_invalidated)
634 rescan_partitions(bdev->bd_disk, bdev); 634 rescan_partitions(bdev->bd_disk, bdev);
635 } else { 635 } else {
636 down(&bdev->bd_contains->bd_sem); 636 mutex_lock(&bdev->bd_contains->bd_mutex);
637 bdev->bd_contains->bd_part_count++; 637 bdev->bd_contains->bd_part_count++;
638 up(&bdev->bd_contains->bd_sem); 638 mutex_unlock(&bdev->bd_contains->bd_mutex);
639 } 639 }
640 } 640 }
641 bdev->bd_openers++; 641 bdev->bd_openers++;
642 up(&bdev->bd_sem); 642 mutex_unlock(&bdev->bd_mutex);
643 unlock_kernel(); 643 unlock_kernel();
644 return 0; 644 return 0;
645 645
@@ -652,7 +652,7 @@ out_first:
652 put_disk(disk); 652 put_disk(disk);
653 module_put(owner); 653 module_put(owner);
654out: 654out:
655 up(&bdev->bd_sem); 655 mutex_unlock(&bdev->bd_mutex);
656 unlock_kernel(); 656 unlock_kernel();
657 if (ret) 657 if (ret)
658 bdput(bdev); 658 bdput(bdev);
@@ -714,7 +714,7 @@ int blkdev_put(struct block_device *bdev)
714 struct inode *bd_inode = bdev->bd_inode; 714 struct inode *bd_inode = bdev->bd_inode;
715 struct gendisk *disk = bdev->bd_disk; 715 struct gendisk *disk = bdev->bd_disk;
716 716
717 down(&bdev->bd_sem); 717 mutex_lock(&bdev->bd_mutex);
718 lock_kernel(); 718 lock_kernel();
719 if (!--bdev->bd_openers) { 719 if (!--bdev->bd_openers) {
720 sync_blockdev(bdev); 720 sync_blockdev(bdev);
@@ -724,9 +724,9 @@ int blkdev_put(struct block_device *bdev)
724 if (disk->fops->release) 724 if (disk->fops->release)
725 ret = disk->fops->release(bd_inode, NULL); 725 ret = disk->fops->release(bd_inode, NULL);
726 } else { 726 } else {
727 down(&bdev->bd_contains->bd_sem); 727 mutex_lock(&bdev->bd_contains->bd_mutex);
728 bdev->bd_contains->bd_part_count--; 728 bdev->bd_contains->bd_part_count--;
729 up(&bdev->bd_contains->bd_sem); 729 mutex_unlock(&bdev->bd_contains->bd_mutex);
730 } 730 }
731 if (!bdev->bd_openers) { 731 if (!bdev->bd_openers) {
732 struct module *owner = disk->fops->owner; 732 struct module *owner = disk->fops->owner;
@@ -746,7 +746,7 @@ int blkdev_put(struct block_device *bdev)
746 bdev->bd_contains = NULL; 746 bdev->bd_contains = NULL;
747 } 747 }
748 unlock_kernel(); 748 unlock_kernel();
749 up(&bdev->bd_sem); 749 mutex_unlock(&bdev->bd_mutex);
750 bdput(bdev); 750 bdput(bdev);
751 return ret; 751 return ret;
752} 752}
diff --git a/fs/buffer.c b/fs/buffer.c
index 1d3683d496f8..0d6ca7bac6c8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -201,7 +201,7 @@ int fsync_bdev(struct block_device *bdev)
201 * freeze_bdev -- lock a filesystem and force it into a consistent state 201 * freeze_bdev -- lock a filesystem and force it into a consistent state
202 * @bdev: blockdevice to lock 202 * @bdev: blockdevice to lock
203 * 203 *
204 * This takes the block device bd_mount_sem to make sure no new mounts 204 * This takes the block device bd_mount_mutex to make sure no new mounts
205 * happen on bdev until thaw_bdev() is called. 205 * happen on bdev until thaw_bdev() is called.
206 * If a superblock is found on this device, we take the s_umount semaphore 206 * If a superblock is found on this device, we take the s_umount semaphore
207 * on it to make sure nobody unmounts until the snapshot creation is done. 207 * on it to make sure nobody unmounts until the snapshot creation is done.
@@ -210,7 +210,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
210{ 210{
211 struct super_block *sb; 211 struct super_block *sb;
212 212
213 down(&bdev->bd_mount_sem); 213 mutex_lock(&bdev->bd_mount_mutex);
214 sb = get_super(bdev); 214 sb = get_super(bdev);
215 if (sb && !(sb->s_flags & MS_RDONLY)) { 215 if (sb && !(sb->s_flags & MS_RDONLY)) {
216 sb->s_frozen = SB_FREEZE_WRITE; 216 sb->s_frozen = SB_FREEZE_WRITE;
@@ -264,7 +264,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
264 drop_super(sb); 264 drop_super(sb);
265 } 265 }
266 266
267 up(&bdev->bd_mount_sem); 267 mutex_unlock(&bdev->bd_mount_mutex);
268} 268}
269EXPORT_SYMBOL(thaw_bdev); 269EXPORT_SYMBOL(thaw_bdev);
270 270
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index fed55e3c53df..632561dd9c50 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -138,9 +138,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
138 cifs_sb = CIFS_SB(inode->i_sb); 138 cifs_sb = CIFS_SB(inode->i_sb);
139 pTcon = cifs_sb->tcon; 139 pTcon = cifs_sb->tcon;
140 140
141 down(&direntry->d_sb->s_vfs_rename_sem); 141 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
142 full_path = build_path_from_dentry(direntry); 142 full_path = build_path_from_dentry(direntry);
143 up(&direntry->d_sb->s_vfs_rename_sem); 143 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
144 if(full_path == NULL) { 144 if(full_path == NULL) {
145 FreeXid(xid); 145 FreeXid(xid);
146 return -ENOMEM; 146 return -ENOMEM;
@@ -317,9 +317,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
317 cifs_sb = CIFS_SB(inode->i_sb); 317 cifs_sb = CIFS_SB(inode->i_sb);
318 pTcon = cifs_sb->tcon; 318 pTcon = cifs_sb->tcon;
319 319
320 down(&direntry->d_sb->s_vfs_rename_sem); 320 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
321 full_path = build_path_from_dentry(direntry); 321 full_path = build_path_from_dentry(direntry);
322 up(&direntry->d_sb->s_vfs_rename_sem); 322 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
323 if(full_path == NULL) 323 if(full_path == NULL)
324 rc = -ENOMEM; 324 rc = -ENOMEM;
325 else if (pTcon->ses->capabilities & CAP_UNIX) { 325 else if (pTcon->ses->capabilities & CAP_UNIX) {
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index a7a47bb36bf3..ec4dfe9bf5ef 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -86,9 +86,9 @@ int cifs_dir_notify(struct file * file, unsigned long arg)
86 cifs_sb = CIFS_SB(file->f_dentry->d_sb); 86 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
87 pTcon = cifs_sb->tcon; 87 pTcon = cifs_sb->tcon;
88 88
89 down(&file->f_dentry->d_sb->s_vfs_rename_sem); 89 mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
90 full_path = build_path_from_dentry(file->f_dentry); 90 full_path = build_path_from_dentry(file->f_dentry);
91 up(&file->f_dentry->d_sb->s_vfs_rename_sem); 91 mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
92 92
93 if(full_path == NULL) { 93 if(full_path == NULL) {
94 rc = -ENOMEM; 94 rc = -ENOMEM;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 675bd2568297..165d67426381 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -203,9 +203,9 @@ int cifs_open(struct inode *inode, struct file *file)
203 } 203 }
204 } 204 }
205 205
206 down(&inode->i_sb->s_vfs_rename_sem); 206 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
207 full_path = build_path_from_dentry(file->f_dentry); 207 full_path = build_path_from_dentry(file->f_dentry);
208 up(&inode->i_sb->s_vfs_rename_sem); 208 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
209 if (full_path == NULL) { 209 if (full_path == NULL) {
210 FreeXid(xid); 210 FreeXid(xid);
211 return -ENOMEM; 211 return -ENOMEM;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 59359911f481..ff93a9f81d1c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -574,9 +574,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
574 574
575 /* Unlink can be called from rename so we can not grab the sem here 575 /* Unlink can be called from rename so we can not grab the sem here
576 since we deadlock otherwise */ 576 since we deadlock otherwise */
577/* down(&direntry->d_sb->s_vfs_rename_sem);*/ 577/* mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/
578 full_path = build_path_from_dentry(direntry); 578 full_path = build_path_from_dentry(direntry);
579/* up(&direntry->d_sb->s_vfs_rename_sem);*/ 579/* mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/
580 if (full_path == NULL) { 580 if (full_path == NULL) {
581 FreeXid(xid); 581 FreeXid(xid);
582 return -ENOMEM; 582 return -ENOMEM;
@@ -718,9 +718,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
718 cifs_sb = CIFS_SB(inode->i_sb); 718 cifs_sb = CIFS_SB(inode->i_sb);
719 pTcon = cifs_sb->tcon; 719 pTcon = cifs_sb->tcon;
720 720
721 down(&inode->i_sb->s_vfs_rename_sem); 721 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
722 full_path = build_path_from_dentry(direntry); 722 full_path = build_path_from_dentry(direntry);
723 up(&inode->i_sb->s_vfs_rename_sem); 723 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
724 if (full_path == NULL) { 724 if (full_path == NULL) {
725 FreeXid(xid); 725 FreeXid(xid);
726 return -ENOMEM; 726 return -ENOMEM;
@@ -803,9 +803,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
803 cifs_sb = CIFS_SB(inode->i_sb); 803 cifs_sb = CIFS_SB(inode->i_sb);
804 pTcon = cifs_sb->tcon; 804 pTcon = cifs_sb->tcon;
805 805
806 down(&inode->i_sb->s_vfs_rename_sem); 806 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
807 full_path = build_path_from_dentry(direntry); 807 full_path = build_path_from_dentry(direntry);
808 up(&inode->i_sb->s_vfs_rename_sem); 808 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
809 if (full_path == NULL) { 809 if (full_path == NULL) {
810 FreeXid(xid); 810 FreeXid(xid);
811 return -ENOMEM; 811 return -ENOMEM;
@@ -1137,9 +1137,9 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
1137 rc = 0; 1137 rc = 0;
1138 } 1138 }
1139 1139
1140 down(&direntry->d_sb->s_vfs_rename_sem); 1140 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
1141 full_path = build_path_from_dentry(direntry); 1141 full_path = build_path_from_dentry(direntry);
1142 up(&direntry->d_sb->s_vfs_rename_sem); 1142 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
1143 if (full_path == NULL) { 1143 if (full_path == NULL) {
1144 FreeXid(xid); 1144 FreeXid(xid);
1145 return -ENOMEM; 1145 return -ENOMEM;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 0f99aae33162..8d0da7c87c7b 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -48,10 +48,10 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
48/* No need to check for cross device links since server will do that 48/* No need to check for cross device links since server will do that
49 BB note DFS case in future though (when we may have to check) */ 49 BB note DFS case in future though (when we may have to check) */
50 50
51 down(&inode->i_sb->s_vfs_rename_sem); 51 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
52 fromName = build_path_from_dentry(old_file); 52 fromName = build_path_from_dentry(old_file);
53 toName = build_path_from_dentry(direntry); 53 toName = build_path_from_dentry(direntry);
54 up(&inode->i_sb->s_vfs_rename_sem); 54 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
55 if((fromName == NULL) || (toName == NULL)) { 55 if((fromName == NULL) || (toName == NULL)) {
56 rc = -ENOMEM; 56 rc = -ENOMEM;
57 goto cifs_hl_exit; 57 goto cifs_hl_exit;
@@ -103,9 +103,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
103 103
104 xid = GetXid(); 104 xid = GetXid();
105 105
106 down(&direntry->d_sb->s_vfs_rename_sem); 106 mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);
107 full_path = build_path_from_dentry(direntry); 107 full_path = build_path_from_dentry(direntry);
108 up(&direntry->d_sb->s_vfs_rename_sem); 108 mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);
109 109
110 if (!full_path) 110 if (!full_path)
111 goto out_no_free; 111 goto out_no_free;
@@ -164,9 +164,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
164 cifs_sb = CIFS_SB(inode->i_sb); 164 cifs_sb = CIFS_SB(inode->i_sb);
165 pTcon = cifs_sb->tcon; 165 pTcon = cifs_sb->tcon;
166 166
167 down(&inode->i_sb->s_vfs_rename_sem); 167 mutex_lock(&inode->i_sb->s_vfs_rename_mutex);
168 full_path = build_path_from_dentry(direntry); 168 full_path = build_path_from_dentry(direntry);
169 up(&inode->i_sb->s_vfs_rename_sem); 169 mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);
170 170
171 if(full_path == NULL) { 171 if(full_path == NULL) {
172 FreeXid(xid); 172 FreeXid(xid);
@@ -232,9 +232,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
232 232
233/* BB would it be safe against deadlock to grab this sem 233/* BB would it be safe against deadlock to grab this sem
234 even though rename itself grabs the sem and calls lookup? */ 234 even though rename itself grabs the sem and calls lookup? */
235/* down(&inode->i_sb->s_vfs_rename_sem);*/ 235/* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/
236 full_path = build_path_from_dentry(direntry); 236 full_path = build_path_from_dentry(direntry);
237/* up(&inode->i_sb->s_vfs_rename_sem);*/ 237/* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/
238 238
239 if(full_path == NULL) { 239 if(full_path == NULL) {
240 FreeXid(xid); 240 FreeXid(xid);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 288cc048d37f..edb3b6eb34bc 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -404,9 +404,9 @@ static int initiate_cifs_search(const int xid, struct file *file)
404 if(pTcon == NULL) 404 if(pTcon == NULL)
405 return -EINVAL; 405 return -EINVAL;
406 406
407 down(&file->f_dentry->d_sb->s_vfs_rename_sem); 407 mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
408 full_path = build_path_from_dentry(file->f_dentry); 408 full_path = build_path_from_dentry(file->f_dentry);
409 up(&file->f_dentry->d_sb->s_vfs_rename_sem); 409 mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex);
410 410
411 if(full_path == NULL) { 411 if(full_path == NULL) {
412 return -ENOMEM; 412 return -ENOMEM;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 777e3363c2a4..3938444d87b2 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -62,9 +62,9 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name)
62 cifs_sb = CIFS_SB(sb); 62 cifs_sb = CIFS_SB(sb);
63 pTcon = cifs_sb->tcon; 63 pTcon = cifs_sb->tcon;
64 64
65 down(&sb->s_vfs_rename_sem); 65 mutex_lock(&sb->s_vfs_rename_mutex);
66 full_path = build_path_from_dentry(direntry); 66 full_path = build_path_from_dentry(direntry);
67 up(&sb->s_vfs_rename_sem); 67 mutex_unlock(&sb->s_vfs_rename_mutex);
68 if(full_path == NULL) { 68 if(full_path == NULL) {
69 FreeXid(xid); 69 FreeXid(xid);
70 return -ENOMEM; 70 return -ENOMEM;
@@ -116,9 +116,9 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name,
116 cifs_sb = CIFS_SB(sb); 116 cifs_sb = CIFS_SB(sb);
117 pTcon = cifs_sb->tcon; 117 pTcon = cifs_sb->tcon;
118 118
119 down(&sb->s_vfs_rename_sem); 119 mutex_lock(&sb->s_vfs_rename_mutex);
120 full_path = build_path_from_dentry(direntry); 120 full_path = build_path_from_dentry(direntry);
121 up(&sb->s_vfs_rename_sem); 121 mutex_unlock(&sb->s_vfs_rename_mutex);
122 if(full_path == NULL) { 122 if(full_path == NULL) {
123 FreeXid(xid); 123 FreeXid(xid);
124 return -ENOMEM; 124 return -ENOMEM;
@@ -223,9 +223,9 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name,
223 cifs_sb = CIFS_SB(sb); 223 cifs_sb = CIFS_SB(sb);
224 pTcon = cifs_sb->tcon; 224 pTcon = cifs_sb->tcon;
225 225
226 down(&sb->s_vfs_rename_sem); 226 mutex_lock(&sb->s_vfs_rename_mutex);
227 full_path = build_path_from_dentry(direntry); 227 full_path = build_path_from_dentry(direntry);
228 up(&sb->s_vfs_rename_sem); 228 mutex_unlock(&sb->s_vfs_rename_mutex);
229 if(full_path == NULL) { 229 if(full_path == NULL) {
230 FreeXid(xid); 230 FreeXid(xid);
231 return -ENOMEM; 231 return -ENOMEM;
@@ -341,9 +341,9 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size)
341 cifs_sb = CIFS_SB(sb); 341 cifs_sb = CIFS_SB(sb);
342 pTcon = cifs_sb->tcon; 342 pTcon = cifs_sb->tcon;
343 343
344 down(&sb->s_vfs_rename_sem); 344 mutex_lock(&sb->s_vfs_rename_mutex);
345 full_path = build_path_from_dentry(direntry); 345 full_path = build_path_from_dentry(direntry);
346 up(&sb->s_vfs_rename_sem); 346 mutex_unlock(&sb->s_vfs_rename_mutex);
347 if(full_path == NULL) { 347 if(full_path == NULL) {
348 FreeXid(xid); 348 FreeXid(xid);
349 return -ENOMEM; 349 return -ENOMEM;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index bfb8a230bac9..14c5620b5cab 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -18,6 +18,7 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/tty.h> 19#include <linux/tty.h>
20#include <linux/devpts_fs.h> 20#include <linux/devpts_fs.h>
21#include <linux/parser.h>
21 22
22#define DEVPTS_SUPER_MAGIC 0x1cd1 23#define DEVPTS_SUPER_MAGIC 0x1cd1
23 24
@@ -32,39 +33,60 @@ static struct {
32 umode_t mode; 33 umode_t mode;
33} config = {.mode = 0600}; 34} config = {.mode = 0600};
34 35
36enum {
37 Opt_uid, Opt_gid, Opt_mode,
38 Opt_err
39};
40
41static match_table_t tokens = {
42 {Opt_uid, "uid=%u"},
43 {Opt_gid, "gid=%u"},
44 {Opt_mode, "mode=%o"},
45 {Opt_err, NULL}
46};
47
35static int devpts_remount(struct super_block *sb, int *flags, char *data) 48static int devpts_remount(struct super_block *sb, int *flags, char *data)
36{ 49{
37 int setuid = 0; 50 char *p;
38 int setgid = 0; 51
39 uid_t uid = 0; 52 config.setuid = 0;
40 gid_t gid = 0; 53 config.setgid = 0;
41 umode_t mode = 0600; 54 config.uid = 0;
42 char *this_char; 55 config.gid = 0;
43 56 config.mode = 0600;
44 this_char = NULL; 57
45 while ((this_char = strsep(&data, ",")) != NULL) { 58 while ((p = strsep(&data, ",")) != NULL) {
46 int n; 59 substring_t args[MAX_OPT_ARGS];
47 char dummy; 60 int token;
48 if (!*this_char) 61 int option;
62
63 if (!*p)
49 continue; 64 continue;
50 if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) { 65
51 setuid = 1; 66 token = match_token(p, tokens, args);
52 uid = n; 67 switch (token) {
53 } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) { 68 case Opt_uid:
54 setgid = 1; 69 if (match_int(&args[0], &option))
55 gid = n; 70 return -EINVAL;
56 } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1) 71 config.uid = option;
57 mode = n & ~S_IFMT; 72 config.setuid = 1;
58 else { 73 break;
59 printk("devpts: called with bogus options\n"); 74 case Opt_gid:
75 if (match_int(&args[0], &option))
76 return -EINVAL;
77 config.gid = option;
78 config.setgid = 1;
79 break;
80 case Opt_mode:
81 if (match_octal(&args[0], &option))
82 return -EINVAL;
83 config.mode = option & ~S_IFMT;
84 break;
85 default:
86 printk(KERN_ERR "devpts: called with bogus options\n");
60 return -EINVAL; 87 return -EINVAL;
61 } 88 }
62 } 89 }
63 config.setuid = setuid;
64 config.setgid = setgid;
65 config.uid = uid;
66 config.gid = gid;
67 config.mode = mode;
68 90
69 return 0; 91 return 0;
70} 92}
diff --git a/fs/dquot.c b/fs/dquot.c
index 1966c890b48d..acf07e581f8c 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -103,12 +103,12 @@
103 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that 103 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
104 * for altering the flag i_mutex is also needed). If operation is holding 104 * for altering the flag i_mutex is also needed). If operation is holding
105 * reference to dquot in other way (e.g. quotactl ops) it must be guarded by 105 * reference to dquot in other way (e.g. quotactl ops) it must be guarded by
106 * dqonoff_sem. 106 * dqonoff_mutex.
107 * This locking assures that: 107 * This locking assures that:
108 * a) update/access to dquot pointers in inode is serialized 108 * a) update/access to dquot pointers in inode is serialized
109 * b) everyone is guarded against invalidate_dquots() 109 * b) everyone is guarded against invalidate_dquots()
110 * 110 *
111 * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced 111 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
112 * from inodes (dquot_alloc_space() and such don't check the dq_lock). 112 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
113 * Currently dquot is locked only when it is being read to memory (or space for 113 * Currently dquot is locked only when it is being read to memory (or space for
114 * it is being allocated) on the first dqget() and when it is being released on 114 * it is being allocated) on the first dqget() and when it is being released on
@@ -118,9 +118,9 @@
118 * spinlock to internal buffers before writing. 118 * spinlock to internal buffers before writing.
119 * 119 *
120 * Lock ordering (including related VFS locks) is the following: 120 * Lock ordering (including related VFS locks) is the following:
121 * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > 121 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
122 * > dquot->dq_lock > dqio_sem 122 * dqio_mutex
123 * i_mutex on quota files is special (it's below dqio_sem) 123 * i_mutex on quota files is special (it's below dqio_mutex)
124 */ 124 */
125 125
126static DEFINE_SPINLOCK(dq_list_lock); 126static DEFINE_SPINLOCK(dq_list_lock);
@@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot)
281 281
282static void wait_on_dquot(struct dquot *dquot) 282static void wait_on_dquot(struct dquot *dquot)
283{ 283{
284 down(&dquot->dq_lock); 284 mutex_lock(&dquot->dq_lock);
285 up(&dquot->dq_lock); 285 mutex_unlock(&dquot->dq_lock);
286} 286}
287 287
288#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) 288#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
@@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot)
321 int ret = 0, ret2 = 0; 321 int ret = 0, ret2 = 0;
322 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 322 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
323 323
324 down(&dquot->dq_lock); 324 mutex_lock(&dquot->dq_lock);
325 down(&dqopt->dqio_sem); 325 mutex_lock(&dqopt->dqio_mutex);
326 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) 326 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
327 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); 327 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
328 if (ret < 0) 328 if (ret < 0)
@@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot)
343 } 343 }
344 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 344 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
345out_iolock: 345out_iolock:
346 up(&dqopt->dqio_sem); 346 mutex_unlock(&dqopt->dqio_mutex);
347 up(&dquot->dq_lock); 347 mutex_unlock(&dquot->dq_lock);
348 return ret; 348 return ret;
349} 349}
350 350
@@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot)
356 int ret = 0, ret2 = 0; 356 int ret = 0, ret2 = 0;
357 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 357 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
358 358
359 down(&dqopt->dqio_sem); 359 mutex_lock(&dqopt->dqio_mutex);
360 spin_lock(&dq_list_lock); 360 spin_lock(&dq_list_lock);
361 if (!clear_dquot_dirty(dquot)) { 361 if (!clear_dquot_dirty(dquot)) {
362 spin_unlock(&dq_list_lock); 362 spin_unlock(&dq_list_lock);
@@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot)
373 ret = ret2; 373 ret = ret2;
374 } 374 }
375out_sem: 375out_sem:
376 up(&dqopt->dqio_sem); 376 mutex_unlock(&dqopt->dqio_mutex);
377 return ret; 377 return ret;
378} 378}
379 379
@@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot)
385 int ret = 0, ret2 = 0; 385 int ret = 0, ret2 = 0;
386 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 386 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
387 387
388 down(&dquot->dq_lock); 388 mutex_lock(&dquot->dq_lock);
389 /* Check whether we are not racing with some other dqget() */ 389 /* Check whether we are not racing with some other dqget() */
390 if (atomic_read(&dquot->dq_count) > 1) 390 if (atomic_read(&dquot->dq_count) > 1)
391 goto out_dqlock; 391 goto out_dqlock;
392 down(&dqopt->dqio_sem); 392 mutex_lock(&dqopt->dqio_mutex);
393 if (dqopt->ops[dquot->dq_type]->release_dqblk) { 393 if (dqopt->ops[dquot->dq_type]->release_dqblk) {
394 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); 394 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
395 /* Write the info */ 395 /* Write the info */
@@ -399,31 +399,57 @@ int dquot_release(struct dquot *dquot)
399 ret = ret2; 399 ret = ret2;
400 } 400 }
401 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 401 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
402 up(&dqopt->dqio_sem); 402 mutex_unlock(&dqopt->dqio_mutex);
403out_dqlock: 403out_dqlock:
404 up(&dquot->dq_lock); 404 mutex_unlock(&dquot->dq_lock);
405 return ret; 405 return ret;
406} 406}
407 407
408/* Invalidate all dquots on the list. Note that this function is called after 408/* Invalidate all dquots on the list. Note that this function is called after
409 * quota is disabled and pointers from inodes removed so there cannot be new 409 * quota is disabled and pointers from inodes removed so there cannot be new
410 * quota users. Also because we hold dqonoff_sem there can be no quota users 410 * quota users. There can still be some users of quotas due to inodes being
411 * for this sb+type at all. */ 411 * just deleted or pruned by prune_icache() (those are not attached to any
412 * list). We have to wait for such users.
413 */
412static void invalidate_dquots(struct super_block *sb, int type) 414static void invalidate_dquots(struct super_block *sb, int type)
413{ 415{
414 struct dquot *dquot, *tmp; 416 struct dquot *dquot, *tmp;
415 417
418restart:
416 spin_lock(&dq_list_lock); 419 spin_lock(&dq_list_lock);
417 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { 420 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
418 if (dquot->dq_sb != sb) 421 if (dquot->dq_sb != sb)
419 continue; 422 continue;
420 if (dquot->dq_type != type) 423 if (dquot->dq_type != type)
421 continue; 424 continue;
422#ifdef __DQUOT_PARANOIA 425 /* Wait for dquot users */
423 if (atomic_read(&dquot->dq_count)) 426 if (atomic_read(&dquot->dq_count)) {
424 BUG(); 427 DEFINE_WAIT(wait);
425#endif 428
426 /* Quota now has no users and it has been written on last dqput() */ 429 atomic_inc(&dquot->dq_count);
430 prepare_to_wait(&dquot->dq_wait_unused, &wait,
431 TASK_UNINTERRUPTIBLE);
432 spin_unlock(&dq_list_lock);
433 /* Once dqput() wakes us up, we know it's time to free
434 * the dquot.
435 * IMPORTANT: we rely on the fact that there is always
436 * at most one process waiting for dquot to free.
437 * Otherwise dq_count would be > 1 and we would never
438 * wake up.
439 */
440 if (atomic_read(&dquot->dq_count) > 1)
441 schedule();
442 finish_wait(&dquot->dq_wait_unused, &wait);
443 dqput(dquot);
444 /* At this moment dquot() need not exist (it could be
445 * reclaimed by prune_dqcache(). Hence we must
446 * restart. */
447 goto restart;
448 }
449 /*
450 * Quota now has no users and it has been written on last
451 * dqput()
452 */
427 remove_dquot_hash(dquot); 453 remove_dquot_hash(dquot);
428 remove_free_dquot(dquot); 454 remove_free_dquot(dquot);
429 remove_inuse(dquot); 455 remove_inuse(dquot);
@@ -439,7 +465,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
439 struct quota_info *dqopt = sb_dqopt(sb); 465 struct quota_info *dqopt = sb_dqopt(sb);
440 int cnt; 466 int cnt;
441 467
442 down(&dqopt->dqonoff_sem); 468 mutex_lock(&dqopt->dqonoff_mutex);
443 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 469 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
444 if (type != -1 && cnt != type) 470 if (type != -1 && cnt != type)
445 continue; 471 continue;
@@ -474,7 +500,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
474 spin_lock(&dq_list_lock); 500 spin_lock(&dq_list_lock);
475 dqstats.syncs++; 501 dqstats.syncs++;
476 spin_unlock(&dq_list_lock); 502 spin_unlock(&dq_list_lock);
477 up(&dqopt->dqonoff_sem); 503 mutex_unlock(&dqopt->dqonoff_mutex);
478 504
479 return 0; 505 return 0;
480} 506}
@@ -515,7 +541,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
515/* 541/*
516 * Put reference to dquot 542 * Put reference to dquot
517 * NOTE: If you change this function please check whether dqput_blocks() works right... 543 * NOTE: If you change this function please check whether dqput_blocks() works right...
518 * MUST be called with either dqptr_sem or dqonoff_sem held 544 * MUST be called with either dqptr_sem or dqonoff_mutex held
519 */ 545 */
520static void dqput(struct dquot *dquot) 546static void dqput(struct dquot *dquot)
521{ 547{
@@ -540,6 +566,10 @@ we_slept:
540 if (atomic_read(&dquot->dq_count) > 1) { 566 if (atomic_read(&dquot->dq_count) > 1) {
541 /* We have more than one user... nothing to do */ 567 /* We have more than one user... nothing to do */
542 atomic_dec(&dquot->dq_count); 568 atomic_dec(&dquot->dq_count);
569 /* Releasing dquot during quotaoff phase? */
570 if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) &&
571 atomic_read(&dquot->dq_count) == 1)
572 wake_up(&dquot->dq_wait_unused);
543 spin_unlock(&dq_list_lock); 573 spin_unlock(&dq_list_lock);
544 return; 574 return;
545 } 575 }
@@ -576,11 +606,12 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
576 return NODQUOT; 606 return NODQUOT;
577 607
578 memset((caddr_t)dquot, 0, sizeof(struct dquot)); 608 memset((caddr_t)dquot, 0, sizeof(struct dquot));
579 sema_init(&dquot->dq_lock, 1); 609 mutex_init(&dquot->dq_lock);
580 INIT_LIST_HEAD(&dquot->dq_free); 610 INIT_LIST_HEAD(&dquot->dq_free);
581 INIT_LIST_HEAD(&dquot->dq_inuse); 611 INIT_LIST_HEAD(&dquot->dq_inuse);
582 INIT_HLIST_NODE(&dquot->dq_hash); 612 INIT_HLIST_NODE(&dquot->dq_hash);
583 INIT_LIST_HEAD(&dquot->dq_dirty); 613 INIT_LIST_HEAD(&dquot->dq_dirty);
614 init_waitqueue_head(&dquot->dq_wait_unused);
584 dquot->dq_sb = sb; 615 dquot->dq_sb = sb;
585 dquot->dq_type = type; 616 dquot->dq_type = type;
586 atomic_set(&dquot->dq_count, 1); 617 atomic_set(&dquot->dq_count, 1);
@@ -590,7 +621,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
590 621
591/* 622/*
592 * Get reference to dquot 623 * Get reference to dquot
593 * MUST be called with either dqptr_sem or dqonoff_sem held 624 * MUST be called with either dqptr_sem or dqonoff_mutex held
594 */ 625 */
595static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) 626static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
596{ 627{
@@ -656,7 +687,7 @@ static int dqinit_needed(struct inode *inode, int type)
656 return 0; 687 return 0;
657} 688}
658 689
659/* This routine is guarded by dqonoff_sem semaphore */ 690/* This routine is guarded by dqonoff_mutex mutex */
660static void add_dquot_ref(struct super_block *sb, int type) 691static void add_dquot_ref(struct super_block *sb, int type)
661{ 692{
662 struct list_head *p; 693 struct list_head *p;
@@ -732,13 +763,9 @@ static void drop_dquot_ref(struct super_block *sb, int type)
732{ 763{
733 LIST_HEAD(tofree_head); 764 LIST_HEAD(tofree_head);
734 765
735 /* We need to be guarded against prune_icache to reach all the
736 * inodes - otherwise some can be on the local list of prune_icache */
737 down(&iprune_sem);
738 down_write(&sb_dqopt(sb)->dqptr_sem); 766 down_write(&sb_dqopt(sb)->dqptr_sem);
739 remove_dquot_ref(sb, type, &tofree_head); 767 remove_dquot_ref(sb, type, &tofree_head);
740 up_write(&sb_dqopt(sb)->dqptr_sem); 768 up_write(&sb_dqopt(sb)->dqptr_sem);
741 up(&iprune_sem);
742 put_dquot_list(&tofree_head); 769 put_dquot_list(&tofree_head);
743} 770}
744 771
@@ -938,8 +965,8 @@ int dquot_initialize(struct inode *inode, int type)
938 unsigned int id = 0; 965 unsigned int id = 0;
939 int cnt, ret = 0; 966 int cnt, ret = 0;
940 967
941 /* First test before acquiring semaphore - solves deadlocks when we 968 /* First test before acquiring mutex - solves deadlocks when we
942 * re-enter the quota code and are already holding the semaphore */ 969 * re-enter the quota code and are already holding the mutex */
943 if (IS_NOQUOTA(inode)) 970 if (IS_NOQUOTA(inode))
944 return 0; 971 return 0;
945 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 972 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1002,8 +1029,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1002 int cnt, ret = NO_QUOTA; 1029 int cnt, ret = NO_QUOTA;
1003 char warntype[MAXQUOTAS]; 1030 char warntype[MAXQUOTAS];
1004 1031
1005 /* First test before acquiring semaphore - solves deadlocks when we 1032 /* First test before acquiring mutex - solves deadlocks when we
1006 * re-enter the quota code and are already holding the semaphore */ 1033 * re-enter the quota code and are already holding the mutex */
1007 if (IS_NOQUOTA(inode)) { 1034 if (IS_NOQUOTA(inode)) {
1008out_add: 1035out_add:
1009 inode_add_bytes(inode, number); 1036 inode_add_bytes(inode, number);
@@ -1051,8 +1078,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number)
1051 int cnt, ret = NO_QUOTA; 1078 int cnt, ret = NO_QUOTA;
1052 char warntype[MAXQUOTAS]; 1079 char warntype[MAXQUOTAS];
1053 1080
1054 /* First test before acquiring semaphore - solves deadlocks when we 1081 /* First test before acquiring mutex - solves deadlocks when we
1055 * re-enter the quota code and are already holding the semaphore */ 1082 * re-enter the quota code and are already holding the mutex */
1056 if (IS_NOQUOTA(inode)) 1083 if (IS_NOQUOTA(inode))
1057 return QUOTA_OK; 1084 return QUOTA_OK;
1058 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1085 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
@@ -1095,8 +1122,8 @@ int dquot_free_space(struct inode *inode, qsize_t number)
1095{ 1122{
1096 unsigned int cnt; 1123 unsigned int cnt;
1097 1124
1098 /* First test before acquiring semaphore - solves deadlocks when we 1125 /* First test before acquiring mutex - solves deadlocks when we
1099 * re-enter the quota code and are already holding the semaphore */ 1126 * re-enter the quota code and are already holding the mutex */
1100 if (IS_NOQUOTA(inode)) { 1127 if (IS_NOQUOTA(inode)) {
1101out_sub: 1128out_sub:
1102 inode_sub_bytes(inode, number); 1129 inode_sub_bytes(inode, number);
@@ -1131,8 +1158,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number)
1131{ 1158{
1132 unsigned int cnt; 1159 unsigned int cnt;
1133 1160
1134 /* First test before acquiring semaphore - solves deadlocks when we 1161 /* First test before acquiring mutex - solves deadlocks when we
1135 * re-enter the quota code and are already holding the semaphore */ 1162 * re-enter the quota code and are already holding the mutex */
1136 if (IS_NOQUOTA(inode)) 1163 if (IS_NOQUOTA(inode))
1137 return QUOTA_OK; 1164 return QUOTA_OK;
1138 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1165 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1171,8 +1198,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1171 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; 1198 chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
1172 char warntype[MAXQUOTAS]; 1199 char warntype[MAXQUOTAS];
1173 1200
1174 /* First test before acquiring semaphore - solves deadlocks when we 1201 /* First test before acquiring mutex - solves deadlocks when we
1175 * re-enter the quota code and are already holding the semaphore */ 1202 * re-enter the quota code and are already holding the mutex */
1176 if (IS_NOQUOTA(inode)) 1203 if (IS_NOQUOTA(inode))
1177 return QUOTA_OK; 1204 return QUOTA_OK;
1178 /* Clear the arrays */ 1205 /* Clear the arrays */
@@ -1266,9 +1293,9 @@ int dquot_commit_info(struct super_block *sb, int type)
1266 int ret; 1293 int ret;
1267 struct quota_info *dqopt = sb_dqopt(sb); 1294 struct quota_info *dqopt = sb_dqopt(sb);
1268 1295
1269 down(&dqopt->dqio_sem); 1296 mutex_lock(&dqopt->dqio_mutex);
1270 ret = dqopt->ops[type]->write_file_info(sb, type); 1297 ret = dqopt->ops[type]->write_file_info(sb, type);
1271 up(&dqopt->dqio_sem); 1298 mutex_unlock(&dqopt->dqio_mutex);
1272 return ret; 1299 return ret;
1273} 1300}
1274 1301
@@ -1324,7 +1351,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1324 struct inode *toputinode[MAXQUOTAS]; 1351 struct inode *toputinode[MAXQUOTAS];
1325 1352
1326 /* We need to serialize quota_off() for device */ 1353 /* We need to serialize quota_off() for device */
1327 down(&dqopt->dqonoff_sem); 1354 mutex_lock(&dqopt->dqonoff_mutex);
1328 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1355 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1329 toputinode[cnt] = NULL; 1356 toputinode[cnt] = NULL;
1330 if (type != -1 && cnt != type) 1357 if (type != -1 && cnt != type)
@@ -1353,7 +1380,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1353 dqopt->info[cnt].dqi_bgrace = 0; 1380 dqopt->info[cnt].dqi_bgrace = 0;
1354 dqopt->ops[cnt] = NULL; 1381 dqopt->ops[cnt] = NULL;
1355 } 1382 }
1356 up(&dqopt->dqonoff_sem); 1383 mutex_unlock(&dqopt->dqonoff_mutex);
1357 /* Sync the superblock so that buffers with quota data are written to 1384 /* Sync the superblock so that buffers with quota data are written to
1358 * disk (and so userspace sees correct data afterwards). */ 1385 * disk (and so userspace sees correct data afterwards). */
1359 if (sb->s_op->sync_fs) 1386 if (sb->s_op->sync_fs)
@@ -1366,7 +1393,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1366 * changes done by userspace on the next quotaon() */ 1393 * changes done by userspace on the next quotaon() */
1367 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1394 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1368 if (toputinode[cnt]) { 1395 if (toputinode[cnt]) {
1369 down(&dqopt->dqonoff_sem); 1396 mutex_lock(&dqopt->dqonoff_mutex);
1370 /* If quota was reenabled in the meantime, we have 1397 /* If quota was reenabled in the meantime, we have
1371 * nothing to do */ 1398 * nothing to do */
1372 if (!sb_has_quota_enabled(sb, cnt)) { 1399 if (!sb_has_quota_enabled(sb, cnt)) {
@@ -1378,7 +1405,7 @@ int vfs_quota_off(struct super_block *sb, int type)
1378 mark_inode_dirty(toputinode[cnt]); 1405 mark_inode_dirty(toputinode[cnt]);
1379 iput(toputinode[cnt]); 1406 iput(toputinode[cnt]);
1380 } 1407 }
1381 up(&dqopt->dqonoff_sem); 1408 mutex_unlock(&dqopt->dqonoff_mutex);
1382 } 1409 }
1383 if (sb->s_bdev) 1410 if (sb->s_bdev)
1384 invalidate_bdev(sb->s_bdev, 0); 1411 invalidate_bdev(sb->s_bdev, 0);
@@ -1419,7 +1446,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
1419 /* And now flush the block cache so that kernel sees the changes */ 1446 /* And now flush the block cache so that kernel sees the changes */
1420 invalidate_bdev(sb->s_bdev, 0); 1447 invalidate_bdev(sb->s_bdev, 0);
1421 mutex_lock(&inode->i_mutex); 1448 mutex_lock(&inode->i_mutex);
1422 down(&dqopt->dqonoff_sem); 1449 mutex_lock(&dqopt->dqonoff_mutex);
1423 if (sb_has_quota_enabled(sb, type)) { 1450 if (sb_has_quota_enabled(sb, type)) {
1424 error = -EBUSY; 1451 error = -EBUSY;
1425 goto out_lock; 1452 goto out_lock;
@@ -1444,17 +1471,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
1444 dqopt->ops[type] = fmt->qf_ops; 1471 dqopt->ops[type] = fmt->qf_ops;
1445 dqopt->info[type].dqi_format = fmt; 1472 dqopt->info[type].dqi_format = fmt;
1446 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 1473 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
1447 down(&dqopt->dqio_sem); 1474 mutex_lock(&dqopt->dqio_mutex);
1448 if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { 1475 if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
1449 up(&dqopt->dqio_sem); 1476 mutex_unlock(&dqopt->dqio_mutex);
1450 goto out_file_init; 1477 goto out_file_init;
1451 } 1478 }
1452 up(&dqopt->dqio_sem); 1479 mutex_unlock(&dqopt->dqio_mutex);
1453 mutex_unlock(&inode->i_mutex); 1480 mutex_unlock(&inode->i_mutex);
1454 set_enable_flags(dqopt, type); 1481 set_enable_flags(dqopt, type);
1455 1482
1456 add_dquot_ref(sb, type); 1483 add_dquot_ref(sb, type);
1457 up(&dqopt->dqonoff_sem); 1484 mutex_unlock(&dqopt->dqonoff_mutex);
1458 1485
1459 return 0; 1486 return 0;
1460 1487
@@ -1462,7 +1489,7 @@ out_file_init:
1462 dqopt->files[type] = NULL; 1489 dqopt->files[type] = NULL;
1463 iput(inode); 1490 iput(inode);
1464out_lock: 1491out_lock:
1465 up(&dqopt->dqonoff_sem); 1492 mutex_unlock(&dqopt->dqonoff_mutex);
1466 if (oldflags != -1) { 1493 if (oldflags != -1) {
1467 down_write(&dqopt->dqptr_sem); 1494 down_write(&dqopt->dqptr_sem);
1468 /* Set the flags back (in the case of accidental quotaon() 1495 /* Set the flags back (in the case of accidental quotaon()
@@ -1550,14 +1577,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
1550{ 1577{
1551 struct dquot *dquot; 1578 struct dquot *dquot;
1552 1579
1553 down(&sb_dqopt(sb)->dqonoff_sem); 1580 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1554 if (!(dquot = dqget(sb, id, type))) { 1581 if (!(dquot = dqget(sb, id, type))) {
1555 up(&sb_dqopt(sb)->dqonoff_sem); 1582 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1556 return -ESRCH; 1583 return -ESRCH;
1557 } 1584 }
1558 do_get_dqblk(dquot, di); 1585 do_get_dqblk(dquot, di);
1559 dqput(dquot); 1586 dqput(dquot);
1560 up(&sb_dqopt(sb)->dqonoff_sem); 1587 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1561 return 0; 1588 return 0;
1562} 1589}
1563 1590
@@ -1619,14 +1646,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
1619{ 1646{
1620 struct dquot *dquot; 1647 struct dquot *dquot;
1621 1648
1622 down(&sb_dqopt(sb)->dqonoff_sem); 1649 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1623 if (!(dquot = dqget(sb, id, type))) { 1650 if (!(dquot = dqget(sb, id, type))) {
1624 up(&sb_dqopt(sb)->dqonoff_sem); 1651 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1625 return -ESRCH; 1652 return -ESRCH;
1626 } 1653 }
1627 do_set_dqblk(dquot, di); 1654 do_set_dqblk(dquot, di);
1628 dqput(dquot); 1655 dqput(dquot);
1629 up(&sb_dqopt(sb)->dqonoff_sem); 1656 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1630 return 0; 1657 return 0;
1631} 1658}
1632 1659
@@ -1635,9 +1662,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1635{ 1662{
1636 struct mem_dqinfo *mi; 1663 struct mem_dqinfo *mi;
1637 1664
1638 down(&sb_dqopt(sb)->dqonoff_sem); 1665 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1639 if (!sb_has_quota_enabled(sb, type)) { 1666 if (!sb_has_quota_enabled(sb, type)) {
1640 up(&sb_dqopt(sb)->dqonoff_sem); 1667 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1641 return -ESRCH; 1668 return -ESRCH;
1642 } 1669 }
1643 mi = sb_dqopt(sb)->info + type; 1670 mi = sb_dqopt(sb)->info + type;
@@ -1647,7 +1674,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1647 ii->dqi_flags = mi->dqi_flags & DQF_MASK; 1674 ii->dqi_flags = mi->dqi_flags & DQF_MASK;
1648 ii->dqi_valid = IIF_ALL; 1675 ii->dqi_valid = IIF_ALL;
1649 spin_unlock(&dq_data_lock); 1676 spin_unlock(&dq_data_lock);
1650 up(&sb_dqopt(sb)->dqonoff_sem); 1677 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1651 return 0; 1678 return 0;
1652} 1679}
1653 1680
@@ -1656,9 +1683,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1656{ 1683{
1657 struct mem_dqinfo *mi; 1684 struct mem_dqinfo *mi;
1658 1685
1659 down(&sb_dqopt(sb)->dqonoff_sem); 1686 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1660 if (!sb_has_quota_enabled(sb, type)) { 1687 if (!sb_has_quota_enabled(sb, type)) {
1661 up(&sb_dqopt(sb)->dqonoff_sem); 1688 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1662 return -ESRCH; 1689 return -ESRCH;
1663 } 1690 }
1664 mi = sb_dqopt(sb)->info + type; 1691 mi = sb_dqopt(sb)->info + type;
@@ -1673,7 +1700,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1673 mark_info_dirty(sb, type); 1700 mark_info_dirty(sb, type);
1674 /* Force write to disk */ 1701 /* Force write to disk */
1675 sb->dq_op->write_info(sb, type); 1702 sb->dq_op->write_info(sb, type);
1676 up(&sb_dqopt(sb)->dqonoff_sem); 1703 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1677 return 0; 1704 return 0;
1678} 1705}
1679 1706
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 4284cd31eba6..1c2b16fda13a 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,6 +34,7 @@
34#include <linux/eventpoll.h> 34#include <linux/eventpoll.h>
35#include <linux/mount.h> 35#include <linux/mount.h>
36#include <linux/bitops.h> 36#include <linux/bitops.h>
37#include <linux/mutex.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/io.h> 40#include <asm/io.h>
@@ -46,7 +47,7 @@
46 * LOCKING: 47 * LOCKING:
47 * There are three level of locking required by epoll : 48 * There are three level of locking required by epoll :
48 * 49 *
49 * 1) epsem (semaphore) 50 * 1) epmutex (mutex)
50 * 2) ep->sem (rw_semaphore) 51 * 2) ep->sem (rw_semaphore)
51 * 3) ep->lock (rw_lock) 52 * 3) ep->lock (rw_lock)
52 * 53 *
@@ -67,9 +68,9 @@
67 * if a file has been pushed inside an epoll set and it is then 68 * if a file has been pushed inside an epoll set and it is then
68 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). 69 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
69 * It is possible to drop the "ep->sem" and to use the global 70 * It is possible to drop the "ep->sem" and to use the global
70 * semaphore "epsem" (together with "ep->lock") to have it working, 71 * semaphore "epmutex" (together with "ep->lock") to have it working,
71 * but having "ep->sem" will make the interface more scalable. 72 * but having "ep->sem" will make the interface more scalable.
72 * Events that require holding "epsem" are very rare, while for 73 * Events that require holding "epmutex" are very rare, while for
73 * normal operations the epoll private "ep->sem" will guarantee 74 * normal operations the epoll private "ep->sem" will guarantee
74 * a greater scalability. 75 * a greater scalability.
75 */ 76 */
@@ -274,7 +275,7 @@ static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
274/* 275/*
275 * This semaphore is used to serialize ep_free() and eventpoll_release_file(). 276 * This semaphore is used to serialize ep_free() and eventpoll_release_file().
276 */ 277 */
277static struct semaphore epsem; 278static struct mutex epmutex;
278 279
279/* Safe wake up implementation */ 280/* Safe wake up implementation */
280static struct poll_safewake psw; 281static struct poll_safewake psw;
@@ -451,15 +452,6 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
451} 452}
452 453
453 454
454/* Used to initialize the epoll bits inside the "struct file" */
455void eventpoll_init_file(struct file *file)
456{
457
458 INIT_LIST_HEAD(&file->f_ep_links);
459 spin_lock_init(&file->f_ep_lock);
460}
461
462
463/* 455/*
464 * This is called from eventpoll_release() to unlink files from the eventpoll 456 * This is called from eventpoll_release() to unlink files from the eventpoll
465 * interface. We need to have this facility to cleanup correctly files that are 457 * interface. We need to have this facility to cleanup correctly files that are
@@ -477,10 +469,10 @@ void eventpoll_release_file(struct file *file)
477 * cleanup path, and this means that noone is using this file anymore. 469 * cleanup path, and this means that noone is using this file anymore.
478 * The only hit might come from ep_free() but by holding the semaphore 470 * The only hit might come from ep_free() but by holding the semaphore
479 * will correctly serialize the operation. We do need to acquire 471 * will correctly serialize the operation. We do need to acquire
480 * "ep->sem" after "epsem" because ep_remove() requires it when called 472 * "ep->sem" after "epmutex" because ep_remove() requires it when called
481 * from anywhere but ep_free(). 473 * from anywhere but ep_free().
482 */ 474 */
483 down(&epsem); 475 mutex_lock(&epmutex);
484 476
485 while (!list_empty(lsthead)) { 477 while (!list_empty(lsthead)) {
486 epi = list_entry(lsthead->next, struct epitem, fllink); 478 epi = list_entry(lsthead->next, struct epitem, fllink);
@@ -492,7 +484,7 @@ void eventpoll_release_file(struct file *file)
492 up_write(&ep->sem); 484 up_write(&ep->sem);
493 } 485 }
494 486
495 up(&epsem); 487 mutex_unlock(&epmutex);
496} 488}
497 489
498 490
@@ -819,9 +811,9 @@ static void ep_free(struct eventpoll *ep)
819 * We do not need to hold "ep->sem" here because the epoll file 811 * We do not need to hold "ep->sem" here because the epoll file
820 * is on the way to be removed and no one has references to it 812 * is on the way to be removed and no one has references to it
821 * anymore. The only hit might come from eventpoll_release_file() but 813 * anymore. The only hit might come from eventpoll_release_file() but
822 * holding "epsem" is sufficent here. 814 * holding "epmutex" is sufficent here.
823 */ 815 */
824 down(&epsem); 816 mutex_lock(&epmutex);
825 817
826 /* 818 /*
827 * Walks through the whole tree by unregistering poll callbacks. 819 * Walks through the whole tree by unregistering poll callbacks.
@@ -843,7 +835,7 @@ static void ep_free(struct eventpoll *ep)
843 ep_remove(ep, epi); 835 ep_remove(ep, epi);
844 } 836 }
845 837
846 up(&epsem); 838 mutex_unlock(&epmutex);
847} 839}
848 840
849 841
@@ -1615,7 +1607,7 @@ static int __init eventpoll_init(void)
1615{ 1607{
1616 int error; 1608 int error;
1617 1609
1618 init_MUTEX(&epsem); 1610 mutex_init(&epmutex);
1619 1611
1620 /* Initialize the structure used to perform safe poll wait head wake ups */ 1612 /* Initialize the structure used to perform safe poll wait head wake ups */
1621 ep_poll_safewake_init(&psw); 1613 ep_poll_safewake_init(&psw);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index ad1432a2a62e..4ca824985321 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -36,22 +36,6 @@
36#include "acl.h" 36#include "acl.h"
37#include "xip.h" 37#include "xip.h"
38 38
39/*
40 * Couple of helper functions - make the code slightly cleaner.
41 */
42
43static inline void ext2_inc_count(struct inode *inode)
44{
45 inode->i_nlink++;
46 mark_inode_dirty(inode);
47}
48
49static inline void ext2_dec_count(struct inode *inode)
50{
51 inode->i_nlink--;
52 mark_inode_dirty(inode);
53}
54
55static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) 39static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
56{ 40{
57 int err = ext2_add_link(dentry, inode); 41 int err = ext2_add_link(dentry, inode);
@@ -59,7 +43,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
59 d_instantiate(dentry, inode); 43 d_instantiate(dentry, inode);
60 return 0; 44 return 0;
61 } 45 }
62 ext2_dec_count(inode); 46 inode_dec_link_count(inode);
63 iput(inode); 47 iput(inode);
64 return err; 48 return err;
65} 49}
@@ -201,7 +185,7 @@ out:
201 return err; 185 return err;
202 186
203out_fail: 187out_fail:
204 ext2_dec_count(inode); 188 inode_dec_link_count(inode);
205 iput (inode); 189 iput (inode);
206 goto out; 190 goto out;
207} 191}
@@ -215,7 +199,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
215 return -EMLINK; 199 return -EMLINK;
216 200
217 inode->i_ctime = CURRENT_TIME_SEC; 201 inode->i_ctime = CURRENT_TIME_SEC;
218 ext2_inc_count(inode); 202 inode_inc_link_count(inode);
219 atomic_inc(&inode->i_count); 203 atomic_inc(&inode->i_count);
220 204
221 return ext2_add_nondir(dentry, inode); 205 return ext2_add_nondir(dentry, inode);
@@ -229,7 +213,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
229 if (dir->i_nlink >= EXT2_LINK_MAX) 213 if (dir->i_nlink >= EXT2_LINK_MAX)
230 goto out; 214 goto out;
231 215
232 ext2_inc_count(dir); 216 inode_inc_link_count(dir);
233 217
234 inode = ext2_new_inode (dir, S_IFDIR | mode); 218 inode = ext2_new_inode (dir, S_IFDIR | mode);
235 err = PTR_ERR(inode); 219 err = PTR_ERR(inode);
@@ -243,7 +227,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
243 else 227 else
244 inode->i_mapping->a_ops = &ext2_aops; 228 inode->i_mapping->a_ops = &ext2_aops;
245 229
246 ext2_inc_count(inode); 230 inode_inc_link_count(inode);
247 231
248 err = ext2_make_empty(inode, dir); 232 err = ext2_make_empty(inode, dir);
249 if (err) 233 if (err)
@@ -258,11 +242,11 @@ out:
258 return err; 242 return err;
259 243
260out_fail: 244out_fail:
261 ext2_dec_count(inode); 245 inode_dec_link_count(inode);
262 ext2_dec_count(inode); 246 inode_dec_link_count(inode);
263 iput(inode); 247 iput(inode);
264out_dir: 248out_dir:
265 ext2_dec_count(dir); 249 inode_dec_link_count(dir);
266 goto out; 250 goto out;
267} 251}
268 252
@@ -282,7 +266,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry)
282 goto out; 266 goto out;
283 267
284 inode->i_ctime = dir->i_ctime; 268 inode->i_ctime = dir->i_ctime;
285 ext2_dec_count(inode); 269 inode_dec_link_count(inode);
286 err = 0; 270 err = 0;
287out: 271out:
288 return err; 272 return err;
@@ -297,8 +281,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
297 err = ext2_unlink(dir, dentry); 281 err = ext2_unlink(dir, dentry);
298 if (!err) { 282 if (!err) {
299 inode->i_size = 0; 283 inode->i_size = 0;
300 ext2_dec_count(inode); 284 inode_dec_link_count(inode);
301 ext2_dec_count(dir); 285 inode_dec_link_count(dir);
302 } 286 }
303 } 287 }
304 return err; 288 return err;
@@ -338,41 +322,41 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
338 new_de = ext2_find_entry (new_dir, new_dentry, &new_page); 322 new_de = ext2_find_entry (new_dir, new_dentry, &new_page);
339 if (!new_de) 323 if (!new_de)
340 goto out_dir; 324 goto out_dir;
341 ext2_inc_count(old_inode); 325 inode_inc_link_count(old_inode);
342 ext2_set_link(new_dir, new_de, new_page, old_inode); 326 ext2_set_link(new_dir, new_de, new_page, old_inode);
343 new_inode->i_ctime = CURRENT_TIME_SEC; 327 new_inode->i_ctime = CURRENT_TIME_SEC;
344 if (dir_de) 328 if (dir_de)
345 new_inode->i_nlink--; 329 new_inode->i_nlink--;
346 ext2_dec_count(new_inode); 330 inode_dec_link_count(new_inode);
347 } else { 331 } else {
348 if (dir_de) { 332 if (dir_de) {
349 err = -EMLINK; 333 err = -EMLINK;
350 if (new_dir->i_nlink >= EXT2_LINK_MAX) 334 if (new_dir->i_nlink >= EXT2_LINK_MAX)
351 goto out_dir; 335 goto out_dir;
352 } 336 }
353 ext2_inc_count(old_inode); 337 inode_inc_link_count(old_inode);
354 err = ext2_add_link(new_dentry, old_inode); 338 err = ext2_add_link(new_dentry, old_inode);
355 if (err) { 339 if (err) {
356 ext2_dec_count(old_inode); 340 inode_dec_link_count(old_inode);
357 goto out_dir; 341 goto out_dir;
358 } 342 }
359 if (dir_de) 343 if (dir_de)
360 ext2_inc_count(new_dir); 344 inode_inc_link_count(new_dir);
361 } 345 }
362 346
363 /* 347 /*
364 * Like most other Unix systems, set the ctime for inodes on a 348 * Like most other Unix systems, set the ctime for inodes on a
365 * rename. 349 * rename.
366 * ext2_dec_count() will mark the inode dirty. 350 * inode_dec_link_count() will mark the inode dirty.
367 */ 351 */
368 old_inode->i_ctime = CURRENT_TIME_SEC; 352 old_inode->i_ctime = CURRENT_TIME_SEC;
369 353
370 ext2_delete_entry (old_de, old_page); 354 ext2_delete_entry (old_de, old_page);
371 ext2_dec_count(old_inode); 355 inode_dec_link_count(old_inode);
372 356
373 if (dir_de) { 357 if (dir_de) {
374 ext2_set_link(old_inode, dir_de, dir_page, new_dir); 358 ext2_set_link(old_inode, dir_de, dir_page, new_dir);
375 ext2_dec_count(old_dir); 359 inode_dec_link_count(old_dir);
376 } 360 }
377 return 0; 361 return 0;
378 362
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 832867aef3dc..773459164bb2 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -95,11 +95,10 @@ static int ext3_readdir(struct file * filp,
95 void * dirent, filldir_t filldir) 95 void * dirent, filldir_t filldir)
96{ 96{
97 int error = 0; 97 int error = 0;
98 unsigned long offset, blk; 98 unsigned long offset;
99 int i, num, stored; 99 int i, stored;
100 struct buffer_head * bh, * tmp, * bha[16]; 100 struct ext3_dir_entry_2 *de;
101 struct ext3_dir_entry_2 * de; 101 struct super_block *sb;
102 struct super_block * sb;
103 int err; 102 int err;
104 struct inode *inode = filp->f_dentry->d_inode; 103 struct inode *inode = filp->f_dentry->d_inode;
105 int ret = 0; 104 int ret = 0;
@@ -124,12 +123,29 @@ static int ext3_readdir(struct file * filp,
124 } 123 }
125#endif 124#endif
126 stored = 0; 125 stored = 0;
127 bh = NULL;
128 offset = filp->f_pos & (sb->s_blocksize - 1); 126 offset = filp->f_pos & (sb->s_blocksize - 1);
129 127
130 while (!error && !stored && filp->f_pos < inode->i_size) { 128 while (!error && !stored && filp->f_pos < inode->i_size) {
131 blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb); 129 unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
132 bh = ext3_bread(NULL, inode, blk, 0, &err); 130 struct buffer_head map_bh;
131 struct buffer_head *bh = NULL;
132
133 map_bh.b_state = 0;
134 err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0);
135 if (!err) {
136 page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
137 &filp->f_ra,
138 filp,
139 map_bh.b_blocknr >>
140 (PAGE_CACHE_SHIFT - inode->i_blkbits),
141 1);
142 bh = ext3_bread(NULL, inode, blk, 0, &err);
143 }
144
145 /*
146 * We ignore I/O errors on directories so users have a chance
147 * of recovering data when there's a bad sector
148 */
133 if (!bh) { 149 if (!bh) {
134 ext3_error (sb, "ext3_readdir", 150 ext3_error (sb, "ext3_readdir",
135 "directory #%lu contains a hole at offset %lu", 151 "directory #%lu contains a hole at offset %lu",
@@ -138,26 +154,6 @@ static int ext3_readdir(struct file * filp,
138 continue; 154 continue;
139 } 155 }
140 156
141 /*
142 * Do the readahead
143 */
144 if (!offset) {
145 for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0;
146 i > 0; i--) {
147 tmp = ext3_getblk (NULL, inode, ++blk, 0, &err);
148 if (tmp && !buffer_uptodate(tmp) &&
149 !buffer_locked(tmp))
150 bha[num++] = tmp;
151 else
152 brelse (tmp);
153 }
154 if (num) {
155 ll_rw_block (READA, num, bha);
156 for (i = 0; i < num; i++)
157 brelse (bha[i]);
158 }
159 }
160
161revalidate: 157revalidate:
162 /* If the dir block has changed since the last call to 158 /* If the dir block has changed since the last call to
163 * readdir(2), then we might be pointing to an invalid 159 * readdir(2), then we might be pointing to an invalid
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 98e78345ead9..59098ea56711 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -37,9 +37,9 @@ static int ext3_release_file (struct inode * inode, struct file * filp)
37 if ((filp->f_mode & FMODE_WRITE) && 37 if ((filp->f_mode & FMODE_WRITE) &&
38 (atomic_read(&inode->i_writecount) == 1)) 38 (atomic_read(&inode->i_writecount) == 1))
39 { 39 {
40 down(&EXT3_I(inode)->truncate_sem); 40 mutex_lock(&EXT3_I(inode)->truncate_mutex);
41 ext3_discard_reservation(inode); 41 ext3_discard_reservation(inode);
42 up(&EXT3_I(inode)->truncate_sem); 42 mutex_unlock(&EXT3_I(inode)->truncate_mutex);
43 } 43 }
44 if (is_dx(inode) && filp->private_data) 44 if (is_dx(inode) && filp->private_data)
45 ext3_htree_free_dir_info(filp->private_data); 45 ext3_htree_free_dir_info(filp->private_data);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 0384e539b88f..2c361377e0a5 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -671,7 +671,7 @@ err_out:
671 * The BKL may not be held on entry here. Be sure to take it early. 671 * The BKL may not be held on entry here. Be sure to take it early.
672 */ 672 */
673 673
674static int 674int
675ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, 675ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
676 struct buffer_head *bh_result, int create, int extend_disksize) 676 struct buffer_head *bh_result, int create, int extend_disksize)
677{ 677{
@@ -702,7 +702,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
702 if (!create || err == -EIO) 702 if (!create || err == -EIO)
703 goto cleanup; 703 goto cleanup;
704 704
705 down(&ei->truncate_sem); 705 mutex_lock(&ei->truncate_mutex);
706 706
707 /* 707 /*
708 * If the indirect block is missing while we are reading 708 * If the indirect block is missing while we are reading
@@ -723,7 +723,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
723 } 723 }
724 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 724 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
725 if (!partial) { 725 if (!partial) {
726 up(&ei->truncate_sem); 726 mutex_unlock(&ei->truncate_mutex);
727 if (err) 727 if (err)
728 goto cleanup; 728 goto cleanup;
729 clear_buffer_new(bh_result); 729 clear_buffer_new(bh_result);
@@ -759,13 +759,13 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
759 err = ext3_splice_branch(handle, inode, iblock, chain, 759 err = ext3_splice_branch(handle, inode, iblock, chain,
760 partial, left); 760 partial, left);
761 /* 761 /*
762 * i_disksize growing is protected by truncate_sem. Don't forget to 762 * i_disksize growing is protected by truncate_mutex. Don't forget to
763 * protect it if you're about to implement concurrent 763 * protect it if you're about to implement concurrent
764 * ext3_get_block() -bzzz 764 * ext3_get_block() -bzzz
765 */ 765 */
766 if (!err && extend_disksize && inode->i_size > ei->i_disksize) 766 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
767 ei->i_disksize = inode->i_size; 767 ei->i_disksize = inode->i_size;
768 up(&ei->truncate_sem); 768 mutex_unlock(&ei->truncate_mutex);
769 if (err) 769 if (err)
770 goto cleanup; 770 goto cleanup;
771 771
@@ -1227,7 +1227,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1227 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... 1227 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1228 * 1228 *
1229 * Same applies to ext3_get_block(). We will deadlock on various things like 1229 * Same applies to ext3_get_block(). We will deadlock on various things like
1230 * lock_journal and i_truncate_sem. 1230 * lock_journal and i_truncate_mutex.
1231 * 1231 *
1232 * Setting PF_MEMALLOC here doesn't work - too many internal memory 1232 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1233 * allocations fail. 1233 * allocations fail.
@@ -2161,7 +2161,7 @@ void ext3_truncate(struct inode * inode)
2161 * From here we block out all ext3_get_block() callers who want to 2161 * From here we block out all ext3_get_block() callers who want to
2162 * modify the block allocation tree. 2162 * modify the block allocation tree.
2163 */ 2163 */
2164 down(&ei->truncate_sem); 2164 mutex_lock(&ei->truncate_mutex);
2165 2165
2166 if (n == 1) { /* direct blocks */ 2166 if (n == 1) { /* direct blocks */
2167 ext3_free_data(handle, inode, NULL, i_data+offsets[0], 2167 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
@@ -2228,7 +2228,7 @@ do_indirects:
2228 2228
2229 ext3_discard_reservation(inode); 2229 ext3_discard_reservation(inode);
2230 2230
2231 up(&ei->truncate_sem); 2231 mutex_unlock(&ei->truncate_mutex);
2232 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 2232 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2233 ext3_mark_inode_dirty(handle, inode); 2233 ext3_mark_inode_dirty(handle, inode);
2234 2234
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 556cd5510078..aaf1da17b6d4 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -182,7 +182,7 @@ flags_err:
182 * need to allocate reservation structure for this inode 182 * need to allocate reservation structure for this inode
183 * before set the window size 183 * before set the window size
184 */ 184 */
185 down(&ei->truncate_sem); 185 mutex_lock(&ei->truncate_mutex);
186 if (!ei->i_block_alloc_info) 186 if (!ei->i_block_alloc_info)
187 ext3_init_block_alloc_info(inode); 187 ext3_init_block_alloc_info(inode);
188 188
@@ -190,7 +190,7 @@ flags_err:
190 struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; 190 struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
191 rsv->rsv_goal_size = rsv_window_size; 191 rsv->rsv_goal_size = rsv_window_size;
192 } 192 }
193 up(&ei->truncate_sem); 193 mutex_unlock(&ei->truncate_mutex);
194 return 0; 194 return 0;
195 } 195 }
196 case EXT3_IOC_GROUP_EXTEND: { 196 case EXT3_IOC_GROUP_EXTEND: {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 56bf76586019..efe5b20d7a5a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -472,7 +472,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
472#ifdef CONFIG_EXT3_FS_XATTR 472#ifdef CONFIG_EXT3_FS_XATTR
473 init_rwsem(&ei->xattr_sem); 473 init_rwsem(&ei->xattr_sem);
474#endif 474#endif
475 init_MUTEX(&ei->truncate_sem); 475 mutex_init(&ei->truncate_mutex);
476 inode_init_once(&ei->vfs_inode); 476 inode_init_once(&ei->vfs_inode);
477 } 477 }
478} 478}
@@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf)
2382 * Process 1 Process 2 2382 * Process 1 Process 2
2383 * ext3_create() quota_sync() 2383 * ext3_create() quota_sync()
2384 * journal_start() write_dquot() 2384 * journal_start() write_dquot()
2385 * DQUOT_INIT() down(dqio_sem) 2385 * DQUOT_INIT() down(dqio_mutex)
2386 * down(dqio_sem) journal_start() 2386 * down(dqio_mutex) journal_start()
2387 * 2387 *
2388 */ 2388 */
2389 2389
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index a1a9e0451217..ab171ea8e869 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -267,19 +267,19 @@ static struct fatent_operations fat32_ops = {
267 267
268static inline void lock_fat(struct msdos_sb_info *sbi) 268static inline void lock_fat(struct msdos_sb_info *sbi)
269{ 269{
270 down(&sbi->fat_lock); 270 mutex_lock(&sbi->fat_lock);
271} 271}
272 272
273static inline void unlock_fat(struct msdos_sb_info *sbi) 273static inline void unlock_fat(struct msdos_sb_info *sbi)
274{ 274{
275 up(&sbi->fat_lock); 275 mutex_unlock(&sbi->fat_lock);
276} 276}
277 277
278void fat_ent_access_init(struct super_block *sb) 278void fat_ent_access_init(struct super_block *sb)
279{ 279{
280 struct msdos_sb_info *sbi = MSDOS_SB(sb); 280 struct msdos_sb_info *sbi = MSDOS_SB(sb);
281 281
282 init_MUTEX(&sbi->fat_lock); 282 mutex_init(&sbi->fat_lock);
283 283
284 switch (sbi->fat_bits) { 284 switch (sbi->fat_bits) {
285 case 32: 285 case 32:
diff --git a/fs/fcntl.c b/fs/fcntl.c
index dc4a7007f4e7..03c789560fb8 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -73,8 +73,8 @@ repeat:
73 * orig_start..fdt->next_fd 73 * orig_start..fdt->next_fd
74 */ 74 */
75 start = orig_start; 75 start = orig_start;
76 if (start < fdt->next_fd) 76 if (start < files->next_fd)
77 start = fdt->next_fd; 77 start = files->next_fd;
78 78
79 newfd = start; 79 newfd = start;
80 if (start < fdt->max_fdset) { 80 if (start < fdt->max_fdset) {
@@ -102,9 +102,8 @@ repeat:
102 * we reacquire the fdtable pointer and use it while holding 102 * we reacquire the fdtable pointer and use it while holding
103 * the lock, no one can free it during that time. 103 * the lock, no one can free it during that time.
104 */ 104 */
105 fdt = files_fdtable(files); 105 if (start <= files->next_fd)
106 if (start <= fdt->next_fd) 106 files->next_fd = newfd + 1;
107 fdt->next_fd = newfd + 1;
108 107
109 error = newfd; 108 error = newfd;
110 109
diff --git a/fs/file.c b/fs/file.c
index cea7cbea11d0..bbc743314730 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -125,7 +125,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
125 kmem_cache_free(files_cachep, fdt->free_files); 125 kmem_cache_free(files_cachep, fdt->free_files);
126 return; 126 return;
127 } 127 }
128 if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { 128 if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE &&
129 fdt->max_fds <= NR_OPEN_DEFAULT) {
129 /* 130 /*
130 * The fdtable was embedded 131 * The fdtable was embedded
131 */ 132 */
@@ -155,8 +156,9 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
155 156
156void free_fdtable(struct fdtable *fdt) 157void free_fdtable(struct fdtable *fdt)
157{ 158{
158 if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || 159 if (fdt->free_files ||
159 fdt->max_fds > NR_OPEN_DEFAULT) 160 fdt->max_fdset > EMBEDDED_FD_SET_SIZE ||
161 fdt->max_fds > NR_OPEN_DEFAULT)
160 call_rcu(&fdt->rcu, free_fdtable_rcu); 162 call_rcu(&fdt->rcu, free_fdtable_rcu);
161} 163}
162 164
@@ -199,7 +201,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt)
199 (nfdt->max_fds - fdt->max_fds) * 201 (nfdt->max_fds - fdt->max_fds) *
200 sizeof(struct file *)); 202 sizeof(struct file *));
201 } 203 }
202 nfdt->next_fd = fdt->next_fd;
203} 204}
204 205
205/* 206/*
@@ -220,11 +221,9 @@ fd_set * alloc_fdset(int num)
220 221
221void free_fdset(fd_set *array, int num) 222void free_fdset(fd_set *array, int num)
222{ 223{
223 int size = num / 8; 224 if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */
224
225 if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */
226 return; 225 return;
227 else if (size <= PAGE_SIZE) 226 else if (num <= 8 * PAGE_SIZE)
228 kfree(array); 227 kfree(array);
229 else 228 else
230 vfree(array); 229 vfree(array);
@@ -237,22 +236,17 @@ static struct fdtable *alloc_fdtable(int nr)
237 fd_set *new_openset = NULL, *new_execset = NULL; 236 fd_set *new_openset = NULL, *new_execset = NULL;
238 struct file **new_fds; 237 struct file **new_fds;
239 238
240 fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); 239 fdt = kzalloc(sizeof(*fdt), GFP_KERNEL);
241 if (!fdt) 240 if (!fdt)
242 goto out; 241 goto out;
243 memset(fdt, 0, sizeof(*fdt));
244 242
245 nfds = __FD_SETSIZE; 243 nfds = 8 * L1_CACHE_BYTES;
246 /* Expand to the max in easy steps */ 244 /* Expand to the max in easy steps */
247 do { 245 while (nfds <= nr) {
248 if (nfds < (PAGE_SIZE * 8)) 246 nfds = nfds * 2;
249 nfds = PAGE_SIZE * 8; 247 if (nfds > NR_OPEN)
250 else { 248 nfds = NR_OPEN;
251 nfds = nfds * 2; 249 }
252 if (nfds > NR_OPEN)
253 nfds = NR_OPEN;
254 }
255 } while (nfds <= nr);
256 250
257 new_openset = alloc_fdset(nfds); 251 new_openset = alloc_fdset(nfds);
258 new_execset = alloc_fdset(nfds); 252 new_execset = alloc_fdset(nfds);
diff --git a/fs/file_table.c b/fs/file_table.c
index 44fabeaa9415..bcea1998b4de 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -88,6 +88,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp,
88 */ 88 */
89struct file *get_empty_filp(void) 89struct file *get_empty_filp(void)
90{ 90{
91 struct task_struct *tsk;
91 static int old_max; 92 static int old_max;
92 struct file * f; 93 struct file * f;
93 94
@@ -112,13 +113,14 @@ struct file *get_empty_filp(void)
112 if (security_file_alloc(f)) 113 if (security_file_alloc(f))
113 goto fail_sec; 114 goto fail_sec;
114 115
115 eventpoll_init_file(f); 116 tsk = current;
117 INIT_LIST_HEAD(&f->f_u.fu_list);
116 atomic_set(&f->f_count, 1); 118 atomic_set(&f->f_count, 1);
117 f->f_uid = current->fsuid;
118 f->f_gid = current->fsgid;
119 rwlock_init(&f->f_owner.lock); 119 rwlock_init(&f->f_owner.lock);
120 f->f_uid = tsk->fsuid;
121 f->f_gid = tsk->fsgid;
122 eventpoll_init_file(f);
120 /* f->f_version: 0 */ 123 /* f->f_version: 0 */
121 INIT_LIST_HEAD(&f->f_u.fu_list);
122 return f; 124 return f;
123 125
124over: 126over:
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 6628c3b352cb..4c6473ab3b34 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -9,6 +9,7 @@
9//#define DBG 9//#define DBG
10//#define DEBUG_LOCKS 10//#define DEBUG_LOCKS
11 11
12#include <linux/mutex.h>
12#include <linux/pagemap.h> 13#include <linux/pagemap.h>
13#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
14#include <linux/hpfs_fs.h> 15#include <linux/hpfs_fs.h>
@@ -57,8 +58,8 @@ struct hpfs_inode_info {
57 unsigned i_ea_uid : 1; /* file's uid is stored in ea */ 58 unsigned i_ea_uid : 1; /* file's uid is stored in ea */
58 unsigned i_ea_gid : 1; /* file's gid is stored in ea */ 59 unsigned i_ea_gid : 1; /* file's gid is stored in ea */
59 unsigned i_dirty : 1; 60 unsigned i_dirty : 1;
60 struct semaphore i_sem; 61 struct mutex i_mutex;
61 struct semaphore i_parent; 62 struct mutex i_parent_mutex;
62 loff_t **i_rddir_off; 63 loff_t **i_rddir_off;
63 struct inode vfs_inode; 64 struct inode vfs_inode;
64}; 65};
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index e3d17e9ea6c1..56f2c338c4d9 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -186,9 +186,9 @@ void hpfs_write_inode(struct inode *i)
186 kfree(hpfs_inode->i_rddir_off); 186 kfree(hpfs_inode->i_rddir_off);
187 hpfs_inode->i_rddir_off = NULL; 187 hpfs_inode->i_rddir_off = NULL;
188 } 188 }
189 down(&hpfs_inode->i_parent); 189 mutex_lock(&hpfs_inode->i_parent_mutex);
190 if (!i->i_nlink) { 190 if (!i->i_nlink) {
191 up(&hpfs_inode->i_parent); 191 mutex_unlock(&hpfs_inode->i_parent_mutex);
192 return; 192 return;
193 } 193 }
194 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); 194 parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
@@ -199,14 +199,14 @@ void hpfs_write_inode(struct inode *i)
199 hpfs_read_inode(parent); 199 hpfs_read_inode(parent);
200 unlock_new_inode(parent); 200 unlock_new_inode(parent);
201 } 201 }
202 down(&hpfs_inode->i_sem); 202 mutex_lock(&hpfs_inode->i_mutex);
203 hpfs_write_inode_nolock(i); 203 hpfs_write_inode_nolock(i);
204 up(&hpfs_inode->i_sem); 204 mutex_unlock(&hpfs_inode->i_mutex);
205 iput(parent); 205 iput(parent);
206 } else { 206 } else {
207 mark_inode_dirty(i); 207 mark_inode_dirty(i);
208 } 208 }
209 up(&hpfs_inode->i_parent); 209 mutex_unlock(&hpfs_inode->i_parent_mutex);
210} 210}
211 211
212void hpfs_write_inode_nolock(struct inode *i) 212void hpfs_write_inode_nolock(struct inode *i)
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 8ff8fc433fc1..a03abb12c610 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -60,7 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
60 if (dee.read_only) 60 if (dee.read_only)
61 result->i_mode &= ~0222; 61 result->i_mode &= ~0222;
62 62
63 down(&hpfs_i(dir)->i_sem); 63 mutex_lock(&hpfs_i(dir)->i_mutex);
64 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 64 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
65 if (r == 1) 65 if (r == 1)
66 goto bail3; 66 goto bail3;
@@ -101,11 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
101 hpfs_write_inode_nolock(result); 101 hpfs_write_inode_nolock(result);
102 } 102 }
103 d_instantiate(dentry, result); 103 d_instantiate(dentry, result);
104 up(&hpfs_i(dir)->i_sem); 104 mutex_unlock(&hpfs_i(dir)->i_mutex);
105 unlock_kernel(); 105 unlock_kernel();
106 return 0; 106 return 0;
107bail3: 107bail3:
108 up(&hpfs_i(dir)->i_sem); 108 mutex_unlock(&hpfs_i(dir)->i_mutex);
109 iput(result); 109 iput(result);
110bail2: 110bail2:
111 hpfs_brelse4(&qbh0); 111 hpfs_brelse4(&qbh0);
@@ -168,7 +168,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
168 result->i_data.a_ops = &hpfs_aops; 168 result->i_data.a_ops = &hpfs_aops;
169 hpfs_i(result)->mmu_private = 0; 169 hpfs_i(result)->mmu_private = 0;
170 170
171 down(&hpfs_i(dir)->i_sem); 171 mutex_lock(&hpfs_i(dir)->i_mutex);
172 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 172 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
173 if (r == 1) 173 if (r == 1)
174 goto bail2; 174 goto bail2;
@@ -193,12 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
193 hpfs_write_inode_nolock(result); 193 hpfs_write_inode_nolock(result);
194 } 194 }
195 d_instantiate(dentry, result); 195 d_instantiate(dentry, result);
196 up(&hpfs_i(dir)->i_sem); 196 mutex_unlock(&hpfs_i(dir)->i_mutex);
197 unlock_kernel(); 197 unlock_kernel();
198 return 0; 198 return 0;
199 199
200bail2: 200bail2:
201 up(&hpfs_i(dir)->i_sem); 201 mutex_unlock(&hpfs_i(dir)->i_mutex);
202 iput(result); 202 iput(result);
203bail1: 203bail1:
204 brelse(bh); 204 brelse(bh);
@@ -254,7 +254,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
254 result->i_blocks = 1; 254 result->i_blocks = 1;
255 init_special_inode(result, mode, rdev); 255 init_special_inode(result, mode, rdev);
256 256
257 down(&hpfs_i(dir)->i_sem); 257 mutex_lock(&hpfs_i(dir)->i_mutex);
258 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 258 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
259 if (r == 1) 259 if (r == 1)
260 goto bail2; 260 goto bail2;
@@ -271,12 +271,12 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
271 271
272 hpfs_write_inode_nolock(result); 272 hpfs_write_inode_nolock(result);
273 d_instantiate(dentry, result); 273 d_instantiate(dentry, result);
274 up(&hpfs_i(dir)->i_sem); 274 mutex_unlock(&hpfs_i(dir)->i_mutex);
275 brelse(bh); 275 brelse(bh);
276 unlock_kernel(); 276 unlock_kernel();
277 return 0; 277 return 0;
278bail2: 278bail2:
279 up(&hpfs_i(dir)->i_sem); 279 mutex_unlock(&hpfs_i(dir)->i_mutex);
280 iput(result); 280 iput(result);
281bail1: 281bail1:
282 brelse(bh); 282 brelse(bh);
@@ -333,7 +333,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
333 result->i_op = &page_symlink_inode_operations; 333 result->i_op = &page_symlink_inode_operations;
334 result->i_data.a_ops = &hpfs_symlink_aops; 334 result->i_data.a_ops = &hpfs_symlink_aops;
335 335
336 down(&hpfs_i(dir)->i_sem); 336 mutex_lock(&hpfs_i(dir)->i_mutex);
337 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 337 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0);
338 if (r == 1) 338 if (r == 1)
339 goto bail2; 339 goto bail2;
@@ -352,11 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
352 352
353 hpfs_write_inode_nolock(result); 353 hpfs_write_inode_nolock(result);
354 d_instantiate(dentry, result); 354 d_instantiate(dentry, result);
355 up(&hpfs_i(dir)->i_sem); 355 mutex_unlock(&hpfs_i(dir)->i_mutex);
356 unlock_kernel(); 356 unlock_kernel();
357 return 0; 357 return 0;
358bail2: 358bail2:
359 up(&hpfs_i(dir)->i_sem); 359 mutex_unlock(&hpfs_i(dir)->i_mutex);
360 iput(result); 360 iput(result);
361bail1: 361bail1:
362 brelse(bh); 362 brelse(bh);
@@ -382,8 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
382 lock_kernel(); 382 lock_kernel();
383 hpfs_adjust_length((char *)name, &len); 383 hpfs_adjust_length((char *)name, &len);
384again: 384again:
385 down(&hpfs_i(inode)->i_parent); 385 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
386 down(&hpfs_i(dir)->i_sem); 386 mutex_lock(&hpfs_i(dir)->i_mutex);
387 err = -ENOENT; 387 err = -ENOENT;
388 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 388 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
389 if (!de) 389 if (!de)
@@ -410,8 +410,8 @@ again:
410 if (rep++) 410 if (rep++)
411 break; 411 break;
412 412
413 up(&hpfs_i(dir)->i_sem); 413 mutex_unlock(&hpfs_i(dir)->i_mutex);
414 up(&hpfs_i(inode)->i_parent); 414 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
415 d_drop(dentry); 415 d_drop(dentry);
416 spin_lock(&dentry->d_lock); 416 spin_lock(&dentry->d_lock);
417 if (atomic_read(&dentry->d_count) > 1 || 417 if (atomic_read(&dentry->d_count) > 1 ||
@@ -442,8 +442,8 @@ again:
442out1: 442out1:
443 hpfs_brelse4(&qbh); 443 hpfs_brelse4(&qbh);
444out: 444out:
445 up(&hpfs_i(dir)->i_sem); 445 mutex_unlock(&hpfs_i(dir)->i_mutex);
446 up(&hpfs_i(inode)->i_parent); 446 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
447 unlock_kernel(); 447 unlock_kernel();
448 return err; 448 return err;
449} 449}
@@ -463,8 +463,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
463 463
464 hpfs_adjust_length((char *)name, &len); 464 hpfs_adjust_length((char *)name, &len);
465 lock_kernel(); 465 lock_kernel();
466 down(&hpfs_i(inode)->i_parent); 466 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
467 down(&hpfs_i(dir)->i_sem); 467 mutex_lock(&hpfs_i(dir)->i_mutex);
468 err = -ENOENT; 468 err = -ENOENT;
469 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 469 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh);
470 if (!de) 470 if (!de)
@@ -502,8 +502,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
502out1: 502out1:
503 hpfs_brelse4(&qbh); 503 hpfs_brelse4(&qbh);
504out: 504out:
505 up(&hpfs_i(dir)->i_sem); 505 mutex_unlock(&hpfs_i(dir)->i_mutex);
506 up(&hpfs_i(inode)->i_parent); 506 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
507 unlock_kernel(); 507 unlock_kernel();
508 return err; 508 return err;
509} 509}
@@ -565,12 +565,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
565 565
566 lock_kernel(); 566 lock_kernel();
567 /* order doesn't matter, due to VFS exclusion */ 567 /* order doesn't matter, due to VFS exclusion */
568 down(&hpfs_i(i)->i_parent); 568 mutex_lock(&hpfs_i(i)->i_parent_mutex);
569 if (new_inode) 569 if (new_inode)
570 down(&hpfs_i(new_inode)->i_parent); 570 mutex_lock(&hpfs_i(new_inode)->i_parent_mutex);
571 down(&hpfs_i(old_dir)->i_sem); 571 mutex_lock(&hpfs_i(old_dir)->i_mutex);
572 if (new_dir != old_dir) 572 if (new_dir != old_dir)
573 down(&hpfs_i(new_dir)->i_sem); 573 mutex_lock(&hpfs_i(new_dir)->i_mutex);
574 574
575 /* Erm? Moving over the empty non-busy directory is perfectly legal */ 575 /* Erm? Moving over the empty non-busy directory is perfectly legal */
576 if (new_inode && S_ISDIR(new_inode->i_mode)) { 576 if (new_inode && S_ISDIR(new_inode->i_mode)) {
@@ -650,11 +650,11 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
650 hpfs_decide_conv(i, (char *)new_name, new_len); 650 hpfs_decide_conv(i, (char *)new_name, new_len);
651end1: 651end1:
652 if (old_dir != new_dir) 652 if (old_dir != new_dir)
653 up(&hpfs_i(new_dir)->i_sem); 653 mutex_unlock(&hpfs_i(new_dir)->i_mutex);
654 up(&hpfs_i(old_dir)->i_sem); 654 mutex_unlock(&hpfs_i(old_dir)->i_mutex);
655 up(&hpfs_i(i)->i_parent); 655 mutex_unlock(&hpfs_i(i)->i_parent_mutex);
656 if (new_inode) 656 if (new_inode)
657 up(&hpfs_i(new_inode)->i_parent); 657 mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex);
658 unlock_kernel(); 658 unlock_kernel();
659 return err; 659 return err;
660} 660}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 63e88d7e2c3b..9488a794076e 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -181,8 +181,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
181 181
182 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 182 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
183 SLAB_CTOR_CONSTRUCTOR) { 183 SLAB_CTOR_CONSTRUCTOR) {
184 init_MUTEX(&ei->i_sem); 184 mutex_init(&ei->i_mutex);
185 init_MUTEX(&ei->i_parent); 185 mutex_init(&ei->i_parent_mutex);
186 inode_init_once(&ei->vfs_inode); 186 inode_init_once(&ei->vfs_inode);
187 } 187 }
188} 188}
diff --git a/fs/inode.c b/fs/inode.c
index d0be6159eb7f..25967b67903d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable;
84DEFINE_SPINLOCK(inode_lock); 84DEFINE_SPINLOCK(inode_lock);
85 85
86/* 86/*
87 * iprune_sem provides exclusion between the kswapd or try_to_free_pages 87 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
88 * icache shrinking path, and the umount path. Without this exclusion, 88 * icache shrinking path, and the umount path. Without this exclusion,
89 * by the time prune_icache calls iput for the inode whose pages it has 89 * by the time prune_icache calls iput for the inode whose pages it has
90 * been invalidating, or by the time it calls clear_inode & destroy_inode 90 * been invalidating, or by the time it calls clear_inode & destroy_inode
91 * from its final dispose_list, the struct super_block they refer to 91 * from its final dispose_list, the struct super_block they refer to
92 * (for inode->i_sb->s_op) may already have been freed and reused. 92 * (for inode->i_sb->s_op) may already have been freed and reused.
93 */ 93 */
94DECLARE_MUTEX(iprune_sem); 94DEFINE_MUTEX(iprune_mutex);
95 95
96/* 96/*
97 * Statistics gathering.. 97 * Statistics gathering..
@@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode)
206 i_size_ordered_init(inode); 206 i_size_ordered_init(inode);
207#ifdef CONFIG_INOTIFY 207#ifdef CONFIG_INOTIFY
208 INIT_LIST_HEAD(&inode->inotify_watches); 208 INIT_LIST_HEAD(&inode->inotify_watches);
209 sema_init(&inode->inotify_sem, 1); 209 mutex_init(&inode->inotify_mutex);
210#endif 210#endif
211} 211}
212 212
@@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
319 /* 319 /*
320 * We can reschedule here without worrying about the list's 320 * We can reschedule here without worrying about the list's
321 * consistency because the per-sb list of inodes must not 321 * consistency because the per-sb list of inodes must not
322 * change during umount anymore, and because iprune_sem keeps 322 * change during umount anymore, and because iprune_mutex keeps
323 * shrink_icache_memory() away. 323 * shrink_icache_memory() away.
324 */ 324 */
325 cond_resched_lock(&inode_lock); 325 cond_resched_lock(&inode_lock);
@@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb)
355 int busy; 355 int busy;
356 LIST_HEAD(throw_away); 356 LIST_HEAD(throw_away);
357 357
358 down(&iprune_sem); 358 mutex_lock(&iprune_mutex);
359 spin_lock(&inode_lock); 359 spin_lock(&inode_lock);
360 inotify_unmount_inodes(&sb->s_inodes); 360 inotify_unmount_inodes(&sb->s_inodes);
361 busy = invalidate_list(&sb->s_inodes, &throw_away); 361 busy = invalidate_list(&sb->s_inodes, &throw_away);
362 spin_unlock(&inode_lock); 362 spin_unlock(&inode_lock);
363 363
364 dispose_list(&throw_away); 364 dispose_list(&throw_away);
365 up(&iprune_sem); 365 mutex_unlock(&iprune_mutex);
366 366
367 return busy; 367 return busy;
368} 368}
@@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev)
377 if (sb) { 377 if (sb) {
378 /* 378 /*
379 * no need to lock the super, get_super holds the 379 * no need to lock the super, get_super holds the
380 * read semaphore so the filesystem cannot go away 380 * read mutex so the filesystem cannot go away
381 * under us (->put_super runs with the write lock 381 * under us (->put_super runs with the write lock
382 * hold). 382 * hold).
383 */ 383 */
@@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan)
423 int nr_scanned; 423 int nr_scanned;
424 unsigned long reap = 0; 424 unsigned long reap = 0;
425 425
426 down(&iprune_sem); 426 mutex_lock(&iprune_mutex);
427 spin_lock(&inode_lock); 427 spin_lock(&inode_lock);
428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 428 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
429 struct inode *inode; 429 struct inode *inode;
@@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan)
459 spin_unlock(&inode_lock); 459 spin_unlock(&inode_lock);
460 460
461 dispose_list(&freeable); 461 dispose_list(&freeable);
462 up(&iprune_sem); 462 mutex_unlock(&iprune_mutex);
463 463
464 if (current_is_kswapd()) 464 if (current_is_kswapd())
465 mod_page_state(kswapd_inodesteal, reap); 465 mod_page_state(kswapd_inodesteal, reap);
diff --git a/fs/inotify.c b/fs/inotify.c
index 3041503bde02..0ee39ef591c6 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -54,10 +54,10 @@ int inotify_max_queued_events;
54 * Lock ordering: 54 * Lock ordering:
55 * 55 *
56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent) 56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
57 * iprune_sem (synchronize shrink_icache_memory()) 57 * iprune_mutex (synchronize shrink_icache_memory())
58 * inode_lock (protects the super_block->s_inodes list) 58 * inode_lock (protects the super_block->s_inodes list)
59 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) 59 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
60 * inotify_dev->sem (protects inotify_device and watches->d_list) 60 * inotify_dev->mutex (protects inotify_device and watches->d_list)
61 */ 61 */
62 62
63/* 63/*
@@ -79,12 +79,12 @@ int inotify_max_queued_events;
79/* 79/*
80 * struct inotify_device - represents an inotify instance 80 * struct inotify_device - represents an inotify instance
81 * 81 *
82 * This structure is protected by the semaphore 'sem'. 82 * This structure is protected by the mutex 'mutex'.
83 */ 83 */
84struct inotify_device { 84struct inotify_device {
85 wait_queue_head_t wq; /* wait queue for i/o */ 85 wait_queue_head_t wq; /* wait queue for i/o */
86 struct idr idr; /* idr mapping wd -> watch */ 86 struct idr idr; /* idr mapping wd -> watch */
87 struct semaphore sem; /* protects this bad boy */ 87 struct mutex mutex; /* protects this bad boy */
88 struct list_head events; /* list of queued events */ 88 struct list_head events; /* list of queued events */
89 struct list_head watches; /* list of watches */ 89 struct list_head watches; /* list of watches */
90 atomic_t count; /* reference count */ 90 atomic_t count; /* reference count */
@@ -101,7 +101,7 @@ struct inotify_device {
101 * device. In read(), this list is walked and all events that can fit in the 101 * device. In read(), this list is walked and all events that can fit in the
102 * buffer are returned. 102 * buffer are returned.
103 * 103 *
104 * Protected by dev->sem of the device in which we are queued. 104 * Protected by dev->mutex of the device in which we are queued.
105 */ 105 */
106struct inotify_kernel_event { 106struct inotify_kernel_event {
107 struct inotify_event event; /* the user-space event */ 107 struct inotify_event event; /* the user-space event */
@@ -112,8 +112,8 @@ struct inotify_kernel_event {
112/* 112/*
113 * struct inotify_watch - represents a watch request on a specific inode 113 * struct inotify_watch - represents a watch request on a specific inode
114 * 114 *
115 * d_list is protected by dev->sem of the associated watch->dev. 115 * d_list is protected by dev->mutex of the associated watch->dev.
116 * i_list and mask are protected by inode->inotify_sem of the associated inode. 116 * i_list and mask are protected by inode->inotify_mutex of the associated inode.
117 * dev, inode, and wd are never written to once the watch is created. 117 * dev, inode, and wd are never written to once the watch is created.
118 */ 118 */
119struct inotify_watch { 119struct inotify_watch {
@@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
261/* 261/*
262 * inotify_dev_get_event - return the next event in the given dev's queue 262 * inotify_dev_get_event - return the next event in the given dev's queue
263 * 263 *
264 * Caller must hold dev->sem. 264 * Caller must hold dev->mutex.
265 */ 265 */
266static inline struct inotify_kernel_event * 266static inline struct inotify_kernel_event *
267inotify_dev_get_event(struct inotify_device *dev) 267inotify_dev_get_event(struct inotify_device *dev)
@@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev)
272/* 272/*
273 * inotify_dev_queue_event - add a new event to the given device 273 * inotify_dev_queue_event - add a new event to the given device
274 * 274 *
275 * Caller must hold dev->sem. Can sleep (calls kernel_event()). 275 * Caller must hold dev->mutex. Can sleep (calls kernel_event()).
276 */ 276 */
277static void inotify_dev_queue_event(struct inotify_device *dev, 277static void inotify_dev_queue_event(struct inotify_device *dev,
278 struct inotify_watch *watch, u32 mask, 278 struct inotify_watch *watch, u32 mask,
@@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev,
315/* 315/*
316 * remove_kevent - cleans up and ultimately frees the given kevent 316 * remove_kevent - cleans up and ultimately frees the given kevent
317 * 317 *
318 * Caller must hold dev->sem. 318 * Caller must hold dev->mutex.
319 */ 319 */
320static void remove_kevent(struct inotify_device *dev, 320static void remove_kevent(struct inotify_device *dev,
321 struct inotify_kernel_event *kevent) 321 struct inotify_kernel_event *kevent)
@@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev,
332/* 332/*
333 * inotify_dev_event_dequeue - destroy an event on the given device 333 * inotify_dev_event_dequeue - destroy an event on the given device
334 * 334 *
335 * Caller must hold dev->sem. 335 * Caller must hold dev->mutex.
336 */ 336 */
337static void inotify_dev_event_dequeue(struct inotify_device *dev) 337static void inotify_dev_event_dequeue(struct inotify_device *dev)
338{ 338{
@@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev)
346/* 346/*
347 * inotify_dev_get_wd - returns the next WD for use by the given dev 347 * inotify_dev_get_wd - returns the next WD for use by the given dev
348 * 348 *
349 * Callers must hold dev->sem. This function can sleep. 349 * Callers must hold dev->mutex. This function can sleep.
350 */ 350 */
351static int inotify_dev_get_wd(struct inotify_device *dev, 351static int inotify_dev_get_wd(struct inotify_device *dev,
352 struct inotify_watch *watch) 352 struct inotify_watch *watch)
@@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd,
383/* 383/*
384 * create_watch - creates a watch on the given device. 384 * create_watch - creates a watch on the given device.
385 * 385 *
386 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. 386 * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
387 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. 387 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
388 */ 388 */
389static struct inotify_watch *create_watch(struct inotify_device *dev, 389static struct inotify_watch *create_watch(struct inotify_device *dev,
@@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev,
434/* 434/*
435 * inotify_find_dev - find the watch associated with the given inode and dev 435 * inotify_find_dev - find the watch associated with the given inode and dev
436 * 436 *
437 * Callers must hold inode->inotify_sem. 437 * Callers must hold inode->inotify_mutex.
438 */ 438 */
439static struct inotify_watch *inode_find_dev(struct inode *inode, 439static struct inotify_watch *inode_find_dev(struct inode *inode,
440 struct inotify_device *dev) 440 struct inotify_device *dev)
@@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch,
469 * the IN_IGNORED event to the given device signifying that the inode is no 469 * the IN_IGNORED event to the given device signifying that the inode is no
470 * longer watched. 470 * longer watched.
471 * 471 *
472 * Callers must hold both inode->inotify_sem and dev->sem. We drop a 472 * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a
473 * reference to the inode before returning. 473 * reference to the inode before returning.
474 * 474 *
475 * The inode is not iput() so as to remain atomic. If the inode needs to be 475 * The inode is not iput() so as to remain atomic. If the inode needs to be
@@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
507 if (!inotify_inode_watched(inode)) 507 if (!inotify_inode_watched(inode))
508 return; 508 return;
509 509
510 down(&inode->inotify_sem); 510 mutex_lock(&inode->inotify_mutex);
511 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 511 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
512 u32 watch_mask = watch->mask; 512 u32 watch_mask = watch->mask;
513 if (watch_mask & mask) { 513 if (watch_mask & mask) {
514 struct inotify_device *dev = watch->dev; 514 struct inotify_device *dev = watch->dev;
515 get_inotify_watch(watch); 515 get_inotify_watch(watch);
516 down(&dev->sem); 516 mutex_lock(&dev->mutex);
517 inotify_dev_queue_event(dev, watch, mask, cookie, name); 517 inotify_dev_queue_event(dev, watch, mask, cookie, name);
518 if (watch_mask & IN_ONESHOT) 518 if (watch_mask & IN_ONESHOT)
519 remove_watch_no_event(watch, dev); 519 remove_watch_no_event(watch, dev);
520 up(&dev->sem); 520 mutex_unlock(&dev->mutex);
521 put_inotify_watch(watch); 521 put_inotify_watch(watch);
522 } 522 }
523 } 523 }
524 up(&inode->inotify_sem); 524 mutex_unlock(&inode->inotify_mutex);
525} 525}
526EXPORT_SYMBOL_GPL(inotify_inode_queue_event); 526EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
527 527
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie);
569 * @list: list of inodes being unmounted (sb->s_inodes) 569 * @list: list of inodes being unmounted (sb->s_inodes)
570 * 570 *
571 * Called with inode_lock held, protecting the unmounting super block's list 571 * Called with inode_lock held, protecting the unmounting super block's list
572 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. 572 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
573 * We temporarily drop inode_lock, however, and CAN block. 573 * We temporarily drop inode_lock, however, and CAN block.
574 */ 574 */
575void inotify_unmount_inodes(struct list_head *list) 575void inotify_unmount_inodes(struct list_head *list)
@@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list)
618 * We can safely drop inode_lock here because we hold 618 * We can safely drop inode_lock here because we hold
619 * references on both inode and next_i. Also no new inodes 619 * references on both inode and next_i. Also no new inodes
620 * will be added since the umount has begun. Finally, 620 * will be added since the umount has begun. Finally,
621 * iprune_sem keeps shrink_icache_memory() away. 621 * iprune_mutex keeps shrink_icache_memory() away.
622 */ 622 */
623 spin_unlock(&inode_lock); 623 spin_unlock(&inode_lock);
624 624
@@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list)
626 iput(need_iput_tmp); 626 iput(need_iput_tmp);
627 627
628 /* for each watch, send IN_UNMOUNT and then remove it */ 628 /* for each watch, send IN_UNMOUNT and then remove it */
629 down(&inode->inotify_sem); 629 mutex_lock(&inode->inotify_mutex);
630 watches = &inode->inotify_watches; 630 watches = &inode->inotify_watches;
631 list_for_each_entry_safe(watch, next_w, watches, i_list) { 631 list_for_each_entry_safe(watch, next_w, watches, i_list) {
632 struct inotify_device *dev = watch->dev; 632 struct inotify_device *dev = watch->dev;
633 down(&dev->sem); 633 mutex_lock(&dev->mutex);
634 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); 634 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
635 remove_watch(watch, dev); 635 remove_watch(watch, dev);
636 up(&dev->sem); 636 mutex_unlock(&dev->mutex);
637 } 637 }
638 up(&inode->inotify_sem); 638 mutex_unlock(&inode->inotify_mutex);
639 iput(inode); 639 iput(inode);
640 640
641 spin_lock(&inode_lock); 641 spin_lock(&inode_lock);
@@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode)
651{ 651{
652 struct inotify_watch *watch, *next; 652 struct inotify_watch *watch, *next;
653 653
654 down(&inode->inotify_sem); 654 mutex_lock(&inode->inotify_mutex);
655 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 655 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
656 struct inotify_device *dev = watch->dev; 656 struct inotify_device *dev = watch->dev;
657 down(&dev->sem); 657 mutex_lock(&dev->mutex);
658 remove_watch(watch, dev); 658 remove_watch(watch, dev);
659 up(&dev->sem); 659 mutex_unlock(&dev->mutex);
660 } 660 }
661 up(&inode->inotify_sem); 661 mutex_unlock(&inode->inotify_mutex);
662} 662}
663EXPORT_SYMBOL_GPL(inotify_inode_is_dead); 663EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
664 664
@@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
670 int ret = 0; 670 int ret = 0;
671 671
672 poll_wait(file, &dev->wq, wait); 672 poll_wait(file, &dev->wq, wait);
673 down(&dev->sem); 673 mutex_lock(&dev->mutex);
674 if (!list_empty(&dev->events)) 674 if (!list_empty(&dev->events))
675 ret = POLLIN | POLLRDNORM; 675 ret = POLLIN | POLLRDNORM;
676 up(&dev->sem); 676 mutex_unlock(&dev->mutex);
677 677
678 return ret; 678 return ret;
679} 679}
@@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
695 695
696 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 696 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
697 697
698 down(&dev->sem); 698 mutex_lock(&dev->mutex);
699 events = !list_empty(&dev->events); 699 events = !list_empty(&dev->events);
700 up(&dev->sem); 700 mutex_unlock(&dev->mutex);
701 if (events) { 701 if (events) {
702 ret = 0; 702 ret = 0;
703 break; 703 break;
@@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
720 if (ret) 720 if (ret)
721 return ret; 721 return ret;
722 722
723 down(&dev->sem); 723 mutex_lock(&dev->mutex);
724 while (1) { 724 while (1) {
725 struct inotify_kernel_event *kevent; 725 struct inotify_kernel_event *kevent;
726 726
@@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
750 750
751 remove_kevent(dev, kevent); 751 remove_kevent(dev, kevent);
752 } 752 }
753 up(&dev->sem); 753 mutex_unlock(&dev->mutex);
754 754
755 return ret; 755 return ret;
756} 756}
@@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file)
763 * Destroy all of the watches on this device. Unfortunately, not very 763 * Destroy all of the watches on this device. Unfortunately, not very
764 * pretty. We cannot do a simple iteration over the list, because we 764 * pretty. We cannot do a simple iteration over the list, because we
765 * do not know the inode until we iterate to the watch. But we need to 765 * do not know the inode until we iterate to the watch. But we need to
766 * hold inode->inotify_sem before dev->sem. The following works. 766 * hold inode->inotify_mutex before dev->mutex. The following works.
767 */ 767 */
768 while (1) { 768 while (1) {
769 struct inotify_watch *watch; 769 struct inotify_watch *watch;
770 struct list_head *watches; 770 struct list_head *watches;
771 struct inode *inode; 771 struct inode *inode;
772 772
773 down(&dev->sem); 773 mutex_lock(&dev->mutex);
774 watches = &dev->watches; 774 watches = &dev->watches;
775 if (list_empty(watches)) { 775 if (list_empty(watches)) {
776 up(&dev->sem); 776 mutex_unlock(&dev->mutex);
777 break; 777 break;
778 } 778 }
779 watch = list_entry(watches->next, struct inotify_watch, d_list); 779 watch = list_entry(watches->next, struct inotify_watch, d_list);
780 get_inotify_watch(watch); 780 get_inotify_watch(watch);
781 up(&dev->sem); 781 mutex_unlock(&dev->mutex);
782 782
783 inode = watch->inode; 783 inode = watch->inode;
784 down(&inode->inotify_sem); 784 mutex_lock(&inode->inotify_mutex);
785 down(&dev->sem); 785 mutex_lock(&dev->mutex);
786 remove_watch_no_event(watch, dev); 786 remove_watch_no_event(watch, dev);
787 up(&dev->sem); 787 mutex_unlock(&dev->mutex);
788 up(&inode->inotify_sem); 788 mutex_unlock(&inode->inotify_mutex);
789 put_inotify_watch(watch); 789 put_inotify_watch(watch);
790 } 790 }
791 791
792 /* destroy all of the events on this device */ 792 /* destroy all of the events on this device */
793 down(&dev->sem); 793 mutex_lock(&dev->mutex);
794 while (!list_empty(&dev->events)) 794 while (!list_empty(&dev->events))
795 inotify_dev_event_dequeue(dev); 795 inotify_dev_event_dequeue(dev);
796 up(&dev->sem); 796 mutex_unlock(&dev->mutex);
797 797
798 /* free this device: the put matching the get in inotify_init() */ 798 /* free this device: the put matching the get in inotify_init() */
799 put_inotify_dev(dev); 799 put_inotify_dev(dev);
@@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd)
811 struct inotify_watch *watch; 811 struct inotify_watch *watch;
812 struct inode *inode; 812 struct inode *inode;
813 813
814 down(&dev->sem); 814 mutex_lock(&dev->mutex);
815 watch = idr_find(&dev->idr, wd); 815 watch = idr_find(&dev->idr, wd);
816 if (unlikely(!watch)) { 816 if (unlikely(!watch)) {
817 up(&dev->sem); 817 mutex_unlock(&dev->mutex);
818 return -EINVAL; 818 return -EINVAL;
819 } 819 }
820 get_inotify_watch(watch); 820 get_inotify_watch(watch);
821 inode = watch->inode; 821 inode = watch->inode;
822 up(&dev->sem); 822 mutex_unlock(&dev->mutex);
823 823
824 down(&inode->inotify_sem); 824 mutex_lock(&inode->inotify_mutex);
825 down(&dev->sem); 825 mutex_lock(&dev->mutex);
826 826
827 /* make sure that we did not race */ 827 /* make sure that we did not race */
828 watch = idr_find(&dev->idr, wd); 828 watch = idr_find(&dev->idr, wd);
829 if (likely(watch)) 829 if (likely(watch))
830 remove_watch(watch, dev); 830 remove_watch(watch, dev);
831 831
832 up(&dev->sem); 832 mutex_unlock(&dev->mutex);
833 up(&inode->inotify_sem); 833 mutex_unlock(&inode->inotify_mutex);
834 put_inotify_watch(watch); 834 put_inotify_watch(watch);
835 835
836 return 0; 836 return 0;
@@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void)
905 INIT_LIST_HEAD(&dev->events); 905 INIT_LIST_HEAD(&dev->events);
906 INIT_LIST_HEAD(&dev->watches); 906 INIT_LIST_HEAD(&dev->watches);
907 init_waitqueue_head(&dev->wq); 907 init_waitqueue_head(&dev->wq);
908 sema_init(&dev->sem, 1); 908 mutex_init(&dev->mutex);
909 dev->event_count = 0; 909 dev->event_count = 0;
910 dev->queue_size = 0; 910 dev->queue_size = 0;
911 dev->max_events = inotify_max_queued_events; 911 dev->max_events = inotify_max_queued_events;
@@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
960 inode = nd.dentry->d_inode; 960 inode = nd.dentry->d_inode;
961 dev = filp->private_data; 961 dev = filp->private_data;
962 962
963 down(&inode->inotify_sem); 963 mutex_lock(&inode->inotify_mutex);
964 down(&dev->sem); 964 mutex_lock(&dev->mutex);
965 965
966 if (mask & IN_MASK_ADD) 966 if (mask & IN_MASK_ADD)
967 mask_add = 1; 967 mask_add = 1;
@@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
998 list_add(&watch->i_list, &inode->inotify_watches); 998 list_add(&watch->i_list, &inode->inotify_watches);
999 ret = watch->wd; 999 ret = watch->wd;
1000out: 1000out:
1001 up(&dev->sem); 1001 mutex_unlock(&dev->mutex);
1002 up(&inode->inotify_sem); 1002 mutex_unlock(&inode->inotify_mutex);
1003 path_release(&nd); 1003 path_release(&nd);
1004fput_and_out: 1004fput_and_out:
1005 fput_light(filp, fput_needed); 1005 fput_light(filp, fput_needed);
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 543ed543d1e5..3f5102b069db 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -85,7 +85,7 @@ void __log_wait_for_space(journal_t *journal)
85 if (journal->j_flags & JFS_ABORT) 85 if (journal->j_flags & JFS_ABORT)
86 return; 86 return;
87 spin_unlock(&journal->j_state_lock); 87 spin_unlock(&journal->j_state_lock);
88 down(&journal->j_checkpoint_sem); 88 mutex_lock(&journal->j_checkpoint_mutex);
89 89
90 /* 90 /*
91 * Test again, another process may have checkpointed while we 91 * Test again, another process may have checkpointed while we
@@ -98,7 +98,7 @@ void __log_wait_for_space(journal_t *journal)
98 log_do_checkpoint(journal); 98 log_do_checkpoint(journal);
99 spin_lock(&journal->j_state_lock); 99 spin_lock(&journal->j_state_lock);
100 } 100 }
101 up(&journal->j_checkpoint_sem); 101 mutex_unlock(&journal->j_checkpoint_mutex);
102 } 102 }
103} 103}
104 104
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index e4b516ac4989..95a628d8cac8 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -659,8 +659,8 @@ static journal_t * journal_init_common (void)
659 init_waitqueue_head(&journal->j_wait_checkpoint); 659 init_waitqueue_head(&journal->j_wait_checkpoint);
660 init_waitqueue_head(&journal->j_wait_commit); 660 init_waitqueue_head(&journal->j_wait_commit);
661 init_waitqueue_head(&journal->j_wait_updates); 661 init_waitqueue_head(&journal->j_wait_updates);
662 init_MUTEX(&journal->j_barrier); 662 mutex_init(&journal->j_barrier);
663 init_MUTEX(&journal->j_checkpoint_sem); 663 mutex_init(&journal->j_checkpoint_mutex);
664 spin_lock_init(&journal->j_revoke_lock); 664 spin_lock_init(&journal->j_revoke_lock);
665 spin_lock_init(&journal->j_list_lock); 665 spin_lock_init(&journal->j_list_lock);
666 spin_lock_init(&journal->j_state_lock); 666 spin_lock_init(&journal->j_state_lock);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index ca917973c2c0..5fc40888f4cf 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -455,7 +455,7 @@ void journal_lock_updates(journal_t *journal)
455 * to make sure that we serialise special journal-locked operations 455 * to make sure that we serialise special journal-locked operations
456 * too. 456 * too.
457 */ 457 */
458 down(&journal->j_barrier); 458 mutex_lock(&journal->j_barrier);
459} 459}
460 460
461/** 461/**
@@ -470,7 +470,7 @@ void journal_unlock_updates (journal_t *journal)
470{ 470{
471 J_ASSERT(journal->j_barrier_count != 0); 471 J_ASSERT(journal->j_barrier_count != 0);
472 472
473 up(&journal->j_barrier); 473 mutex_unlock(&journal->j_barrier);
474 spin_lock(&journal->j_state_lock); 474 spin_lock(&journal->j_state_lock);
475 --journal->j_barrier_count; 475 --journal->j_barrier_count;
476 spin_unlock(&journal->j_state_lock); 476 spin_unlock(&journal->j_state_lock);
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index fc3855a1aef3..890d7ff7456d 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -42,7 +42,7 @@
42#include <linux/quotaops.h> 42#include <linux/quotaops.h>
43#include <linux/highmem.h> 43#include <linux/highmem.h>
44#include <linux/vfs.h> 44#include <linux/vfs.h>
45#include <asm/semaphore.h> 45#include <linux/mutex.h>
46#include <asm/byteorder.h> 46#include <asm/byteorder.h>
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48 48
@@ -203,7 +203,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
203 fmc = c->fmc; 203 fmc = c->fmc;
204 204
205 D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); 205 D3(printk (KERN_NOTICE "notify_change(): down biglock\n"));
206 down(&fmc->biglock); 206 mutex_lock(&fmc->biglock);
207 207
208 f = jffs_find_file(c, inode->i_ino); 208 f = jffs_find_file(c, inode->i_ino);
209 209
@@ -211,7 +211,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
211 printk("jffs_setattr(): Invalid inode number: %lu\n", 211 printk("jffs_setattr(): Invalid inode number: %lu\n",
212 inode->i_ino); 212 inode->i_ino);
213 D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); 213 D3(printk (KERN_NOTICE "notify_change(): up biglock\n"));
214 up(&fmc->biglock); 214 mutex_unlock(&fmc->biglock);
215 res = -EINVAL; 215 res = -EINVAL;
216 goto out; 216 goto out;
217 }); 217 });
@@ -232,7 +232,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
232 if (!(new_node = jffs_alloc_node())) { 232 if (!(new_node = jffs_alloc_node())) {
233 D(printk("jffs_setattr(): Allocation failed!\n")); 233 D(printk("jffs_setattr(): Allocation failed!\n"));
234 D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); 234 D3(printk (KERN_NOTICE "notify_change(): up biglock\n"));
235 up(&fmc->biglock); 235 mutex_unlock(&fmc->biglock);
236 res = -ENOMEM; 236 res = -ENOMEM;
237 goto out; 237 goto out;
238 } 238 }
@@ -319,7 +319,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
319 D(printk("jffs_notify_change(): The write failed!\n")); 319 D(printk("jffs_notify_change(): The write failed!\n"));
320 jffs_free_node(new_node); 320 jffs_free_node(new_node);
321 D3(printk (KERN_NOTICE "n_c(): up biglock\n")); 321 D3(printk (KERN_NOTICE "n_c(): up biglock\n"));
322 up(&c->fmc->biglock); 322 mutex_unlock(&c->fmc->biglock);
323 goto out; 323 goto out;
324 } 324 }
325 325
@@ -327,7 +327,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr)
327 327
328 mark_inode_dirty(inode); 328 mark_inode_dirty(inode);
329 D3(printk (KERN_NOTICE "n_c(): up biglock\n")); 329 D3(printk (KERN_NOTICE "n_c(): up biglock\n"));
330 up(&c->fmc->biglock); 330 mutex_unlock(&c->fmc->biglock);
331out: 331out:
332 unlock_kernel(); 332 unlock_kernel();
333 return res; 333 return res;
@@ -461,7 +461,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
461 goto jffs_rename_end; 461 goto jffs_rename_end;
462 } 462 }
463 D3(printk (KERN_NOTICE "rename(): down biglock\n")); 463 D3(printk (KERN_NOTICE "rename(): down biglock\n"));
464 down(&c->fmc->biglock); 464 mutex_lock(&c->fmc->biglock);
465 /* Create a node and initialize as much as needed. */ 465 /* Create a node and initialize as much as needed. */
466 result = -ENOMEM; 466 result = -ENOMEM;
467 if (!(node = jffs_alloc_node())) { 467 if (!(node = jffs_alloc_node())) {
@@ -555,7 +555,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry,
555 555
556jffs_rename_end: 556jffs_rename_end:
557 D3(printk (KERN_NOTICE "rename(): up biglock\n")); 557 D3(printk (KERN_NOTICE "rename(): up biglock\n"));
558 up(&c->fmc->biglock); 558 mutex_unlock(&c->fmc->biglock);
559 unlock_kernel(); 559 unlock_kernel();
560 return result; 560 return result;
561} /* jffs_rename() */ 561} /* jffs_rename() */
@@ -574,14 +574,14 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
574 int ddino; 574 int ddino;
575 lock_kernel(); 575 lock_kernel();
576 D3(printk (KERN_NOTICE "readdir(): down biglock\n")); 576 D3(printk (KERN_NOTICE "readdir(): down biglock\n"));
577 down(&c->fmc->biglock); 577 mutex_lock(&c->fmc->biglock);
578 578
579 D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); 579 D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp));
580 if (filp->f_pos == 0) { 580 if (filp->f_pos == 0) {
581 D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); 581 D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino));
582 if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { 582 if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) {
583 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 583 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
584 up(&c->fmc->biglock); 584 mutex_unlock(&c->fmc->biglock);
585 unlock_kernel(); 585 unlock_kernel();
586 return 0; 586 return 0;
587 } 587 }
@@ -598,7 +598,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
598 D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); 598 D3(printk("jffs_readdir(): \"..\" %u\n", ddino));
599 if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { 599 if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) {
600 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 600 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
601 up(&c->fmc->biglock); 601 mutex_unlock(&c->fmc->biglock);
602 unlock_kernel(); 602 unlock_kernel();
603 return 0; 603 return 0;
604 } 604 }
@@ -617,7 +617,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
617 if (filldir(dirent, f->name, f->nsize, 617 if (filldir(dirent, f->name, f->nsize,
618 filp->f_pos , f->ino, DT_UNKNOWN) < 0) { 618 filp->f_pos , f->ino, DT_UNKNOWN) < 0) {
619 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 619 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
620 up(&c->fmc->biglock); 620 mutex_unlock(&c->fmc->biglock);
621 unlock_kernel(); 621 unlock_kernel();
622 return 0; 622 return 0;
623 } 623 }
@@ -627,7 +627,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir)
627 } while(f && f->deleted); 627 } while(f && f->deleted);
628 } 628 }
629 D3(printk (KERN_NOTICE "readdir(): up biglock\n")); 629 D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
630 up(&c->fmc->biglock); 630 mutex_unlock(&c->fmc->biglock);
631 unlock_kernel(); 631 unlock_kernel();
632 return filp->f_pos; 632 return filp->f_pos;
633} /* jffs_readdir() */ 633} /* jffs_readdir() */
@@ -660,7 +660,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
660 }); 660 });
661 661
662 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 662 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
663 down(&c->fmc->biglock); 663 mutex_lock(&c->fmc->biglock);
664 664
665 r = -ENAMETOOLONG; 665 r = -ENAMETOOLONG;
666 if (len > JFFS_MAX_NAME_LEN) { 666 if (len > JFFS_MAX_NAME_LEN) {
@@ -683,31 +683,31 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
683 683
684 if ((len == 1) && (name[0] == '.')) { 684 if ((len == 1) && (name[0] == '.')) {
685 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 685 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
686 up(&c->fmc->biglock); 686 mutex_unlock(&c->fmc->biglock);
687 if (!(inode = iget(dir->i_sb, d->ino))) { 687 if (!(inode = iget(dir->i_sb, d->ino))) {
688 D(printk("jffs_lookup(): . iget() ==> NULL\n")); 688 D(printk("jffs_lookup(): . iget() ==> NULL\n"));
689 goto jffs_lookup_end_no_biglock; 689 goto jffs_lookup_end_no_biglock;
690 } 690 }
691 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 691 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
692 down(&c->fmc->biglock); 692 mutex_lock(&c->fmc->biglock);
693 } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { 693 } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) {
694 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 694 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
695 up(&c->fmc->biglock); 695 mutex_unlock(&c->fmc->biglock);
696 if (!(inode = iget(dir->i_sb, d->pino))) { 696 if (!(inode = iget(dir->i_sb, d->pino))) {
697 D(printk("jffs_lookup(): .. iget() ==> NULL\n")); 697 D(printk("jffs_lookup(): .. iget() ==> NULL\n"));
698 goto jffs_lookup_end_no_biglock; 698 goto jffs_lookup_end_no_biglock;
699 } 699 }
700 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 700 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
701 down(&c->fmc->biglock); 701 mutex_lock(&c->fmc->biglock);
702 } else if ((f = jffs_find_child(d, name, len))) { 702 } else if ((f = jffs_find_child(d, name, len))) {
703 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 703 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
704 up(&c->fmc->biglock); 704 mutex_unlock(&c->fmc->biglock);
705 if (!(inode = iget(dir->i_sb, f->ino))) { 705 if (!(inode = iget(dir->i_sb, f->ino))) {
706 D(printk("jffs_lookup(): iget() ==> NULL\n")); 706 D(printk("jffs_lookup(): iget() ==> NULL\n"));
707 goto jffs_lookup_end_no_biglock; 707 goto jffs_lookup_end_no_biglock;
708 } 708 }
709 D3(printk (KERN_NOTICE "lookup(): down biglock\n")); 709 D3(printk (KERN_NOTICE "lookup(): down biglock\n"));
710 down(&c->fmc->biglock); 710 mutex_lock(&c->fmc->biglock);
711 } else { 711 } else {
712 D3(printk("jffs_lookup(): Couldn't find the file. " 712 D3(printk("jffs_lookup(): Couldn't find the file. "
713 "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", 713 "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n",
@@ -717,13 +717,13 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
717 717
718 d_add(dentry, inode); 718 d_add(dentry, inode);
719 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 719 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
720 up(&c->fmc->biglock); 720 mutex_unlock(&c->fmc->biglock);
721 unlock_kernel(); 721 unlock_kernel();
722 return NULL; 722 return NULL;
723 723
724jffs_lookup_end: 724jffs_lookup_end:
725 D3(printk (KERN_NOTICE "lookup(): up biglock\n")); 725 D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
726 up(&c->fmc->biglock); 726 mutex_unlock(&c->fmc->biglock);
727 727
728jffs_lookup_end_no_biglock: 728jffs_lookup_end_no_biglock:
729 unlock_kernel(); 729 unlock_kernel();
@@ -753,7 +753,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
753 ClearPageError(page); 753 ClearPageError(page);
754 754
755 D3(printk (KERN_NOTICE "readpage(): down biglock\n")); 755 D3(printk (KERN_NOTICE "readpage(): down biglock\n"));
756 down(&c->fmc->biglock); 756 mutex_lock(&c->fmc->biglock);
757 757
758 read_len = 0; 758 read_len = 0;
759 result = 0; 759 result = 0;
@@ -782,7 +782,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
782 kunmap(page); 782 kunmap(page);
783 783
784 D3(printk (KERN_NOTICE "readpage(): up biglock\n")); 784 D3(printk (KERN_NOTICE "readpage(): up biglock\n"));
785 up(&c->fmc->biglock); 785 mutex_unlock(&c->fmc->biglock);
786 786
787 if (result) { 787 if (result) {
788 SetPageError(page); 788 SetPageError(page);
@@ -839,7 +839,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
839 839
840 c = dir_f->c; 840 c = dir_f->c;
841 D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); 841 D3(printk (KERN_NOTICE "mkdir(): down biglock\n"));
842 down(&c->fmc->biglock); 842 mutex_lock(&c->fmc->biglock);
843 843
844 dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) 844 dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX)
845 & ~current->fs->umask); 845 & ~current->fs->umask);
@@ -906,7 +906,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
906 result = 0; 906 result = 0;
907jffs_mkdir_end: 907jffs_mkdir_end:
908 D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); 908 D3(printk (KERN_NOTICE "mkdir(): up biglock\n"));
909 up(&c->fmc->biglock); 909 mutex_unlock(&c->fmc->biglock);
910 unlock_kernel(); 910 unlock_kernel();
911 return result; 911 return result;
912} /* jffs_mkdir() */ 912} /* jffs_mkdir() */
@@ -921,10 +921,10 @@ jffs_rmdir(struct inode *dir, struct dentry *dentry)
921 D3(printk("***jffs_rmdir()\n")); 921 D3(printk("***jffs_rmdir()\n"));
922 D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); 922 D3(printk (KERN_NOTICE "rmdir(): down biglock\n"));
923 lock_kernel(); 923 lock_kernel();
924 down(&c->fmc->biglock); 924 mutex_lock(&c->fmc->biglock);
925 ret = jffs_remove(dir, dentry, S_IFDIR); 925 ret = jffs_remove(dir, dentry, S_IFDIR);
926 D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); 926 D3(printk (KERN_NOTICE "rmdir(): up biglock\n"));
927 up(&c->fmc->biglock); 927 mutex_unlock(&c->fmc->biglock);
928 unlock_kernel(); 928 unlock_kernel();
929 return ret; 929 return ret;
930} 930}
@@ -940,10 +940,10 @@ jffs_unlink(struct inode *dir, struct dentry *dentry)
940 lock_kernel(); 940 lock_kernel();
941 D3(printk("***jffs_unlink()\n")); 941 D3(printk("***jffs_unlink()\n"));
942 D3(printk (KERN_NOTICE "unlink(): down biglock\n")); 942 D3(printk (KERN_NOTICE "unlink(): down biglock\n"));
943 down(&c->fmc->biglock); 943 mutex_lock(&c->fmc->biglock);
944 ret = jffs_remove(dir, dentry, 0); 944 ret = jffs_remove(dir, dentry, 0);
945 D3(printk (KERN_NOTICE "unlink(): up biglock\n")); 945 D3(printk (KERN_NOTICE "unlink(): up biglock\n"));
946 up(&c->fmc->biglock); 946 mutex_unlock(&c->fmc->biglock);
947 unlock_kernel(); 947 unlock_kernel();
948 return ret; 948 return ret;
949} 949}
@@ -1086,7 +1086,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
1086 c = dir_f->c; 1086 c = dir_f->c;
1087 1087
1088 D3(printk (KERN_NOTICE "mknod(): down biglock\n")); 1088 D3(printk (KERN_NOTICE "mknod(): down biglock\n"));
1089 down(&c->fmc->biglock); 1089 mutex_lock(&c->fmc->biglock);
1090 1090
1091 /* Create and initialize a new node. */ 1091 /* Create and initialize a new node. */
1092 if (!(node = jffs_alloc_node())) { 1092 if (!(node = jffs_alloc_node())) {
@@ -1152,7 +1152,7 @@ jffs_mknod_err:
1152 1152
1153jffs_mknod_end: 1153jffs_mknod_end:
1154 D3(printk (KERN_NOTICE "mknod(): up biglock\n")); 1154 D3(printk (KERN_NOTICE "mknod(): up biglock\n"));
1155 up(&c->fmc->biglock); 1155 mutex_unlock(&c->fmc->biglock);
1156 unlock_kernel(); 1156 unlock_kernel();
1157 return result; 1157 return result;
1158} /* jffs_mknod() */ 1158} /* jffs_mknod() */
@@ -1203,7 +1203,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1203 return -ENOMEM; 1203 return -ENOMEM;
1204 } 1204 }
1205 D3(printk (KERN_NOTICE "symlink(): down biglock\n")); 1205 D3(printk (KERN_NOTICE "symlink(): down biglock\n"));
1206 down(&c->fmc->biglock); 1206 mutex_lock(&c->fmc->biglock);
1207 1207
1208 node->data_offset = 0; 1208 node->data_offset = 0;
1209 node->removed_size = 0; 1209 node->removed_size = 0;
@@ -1253,7 +1253,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1253 d_instantiate(dentry, inode); 1253 d_instantiate(dentry, inode);
1254 jffs_symlink_end: 1254 jffs_symlink_end:
1255 D3(printk (KERN_NOTICE "symlink(): up biglock\n")); 1255 D3(printk (KERN_NOTICE "symlink(): up biglock\n"));
1256 up(&c->fmc->biglock); 1256 mutex_unlock(&c->fmc->biglock);
1257 unlock_kernel(); 1257 unlock_kernel();
1258 return err; 1258 return err;
1259} /* jffs_symlink() */ 1259} /* jffs_symlink() */
@@ -1306,7 +1306,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode,
1306 return -ENOMEM; 1306 return -ENOMEM;
1307 } 1307 }
1308 D3(printk (KERN_NOTICE "create(): down biglock\n")); 1308 D3(printk (KERN_NOTICE "create(): down biglock\n"));
1309 down(&c->fmc->biglock); 1309 mutex_lock(&c->fmc->biglock);
1310 1310
1311 node->data_offset = 0; 1311 node->data_offset = 0;
1312 node->removed_size = 0; 1312 node->removed_size = 0;
@@ -1359,7 +1359,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode,
1359 d_instantiate(dentry, inode); 1359 d_instantiate(dentry, inode);
1360 jffs_create_end: 1360 jffs_create_end:
1361 D3(printk (KERN_NOTICE "create(): up biglock\n")); 1361 D3(printk (KERN_NOTICE "create(): up biglock\n"));
1362 up(&c->fmc->biglock); 1362 mutex_unlock(&c->fmc->biglock);
1363 unlock_kernel(); 1363 unlock_kernel();
1364 return err; 1364 return err;
1365} /* jffs_create() */ 1365} /* jffs_create() */
@@ -1423,7 +1423,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
1423 thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); 1423 thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count);
1424 1424
1425 D3(printk (KERN_NOTICE "file_write(): down biglock\n")); 1425 D3(printk (KERN_NOTICE "file_write(): down biglock\n"));
1426 down(&c->fmc->biglock); 1426 mutex_lock(&c->fmc->biglock);
1427 1427
1428 /* Urgh. POSIX says we can do short writes if we feel like it. 1428 /* Urgh. POSIX says we can do short writes if we feel like it.
1429 * In practice, we can't. Nothing will cope. So we loop until 1429 * In practice, we can't. Nothing will cope. So we loop until
@@ -1511,7 +1511,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
1511 } 1511 }
1512 out: 1512 out:
1513 D3(printk (KERN_NOTICE "file_write(): up biglock\n")); 1513 D3(printk (KERN_NOTICE "file_write(): up biglock\n"));
1514 up(&c->fmc->biglock); 1514 mutex_unlock(&c->fmc->biglock);
1515 1515
1516 /* Fix things in the real inode. */ 1516 /* Fix things in the real inode. */
1517 if (pos > inode->i_size) { 1517 if (pos > inode->i_size) {
@@ -1567,7 +1567,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1567 return -EIO; 1567 return -EIO;
1568 } 1568 }
1569 D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); 1569 D3(printk (KERN_NOTICE "ioctl(): down biglock\n"));
1570 down(&c->fmc->biglock); 1570 mutex_lock(&c->fmc->biglock);
1571 1571
1572 switch (cmd) { 1572 switch (cmd) {
1573 case JFFS_PRINT_HASH: 1573 case JFFS_PRINT_HASH:
@@ -1609,7 +1609,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
1609 ret = -ENOTTY; 1609 ret = -ENOTTY;
1610 } 1610 }
1611 D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); 1611 D3(printk (KERN_NOTICE "ioctl(): up biglock\n"));
1612 up(&c->fmc->biglock); 1612 mutex_unlock(&c->fmc->biglock);
1613 return ret; 1613 return ret;
1614} /* jffs_ioctl() */ 1614} /* jffs_ioctl() */
1615 1615
@@ -1685,12 +1685,12 @@ jffs_read_inode(struct inode *inode)
1685 } 1685 }
1686 c = (struct jffs_control *)inode->i_sb->s_fs_info; 1686 c = (struct jffs_control *)inode->i_sb->s_fs_info;
1687 D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); 1687 D3(printk (KERN_NOTICE "read_inode(): down biglock\n"));
1688 down(&c->fmc->biglock); 1688 mutex_lock(&c->fmc->biglock);
1689 if (!(f = jffs_find_file(c, inode->i_ino))) { 1689 if (!(f = jffs_find_file(c, inode->i_ino))) {
1690 D(printk("jffs_read_inode(): No such inode (%lu).\n", 1690 D(printk("jffs_read_inode(): No such inode (%lu).\n",
1691 inode->i_ino)); 1691 inode->i_ino));
1692 D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); 1692 D3(printk (KERN_NOTICE "read_inode(): up biglock\n"));
1693 up(&c->fmc->biglock); 1693 mutex_unlock(&c->fmc->biglock);
1694 return; 1694 return;
1695 } 1695 }
1696 inode->u.generic_ip = (void *)f; 1696 inode->u.generic_ip = (void *)f;
@@ -1732,7 +1732,7 @@ jffs_read_inode(struct inode *inode)
1732 } 1732 }
1733 1733
1734 D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); 1734 D3(printk (KERN_NOTICE "read_inode(): up biglock\n"));
1735 up(&c->fmc->biglock); 1735 mutex_unlock(&c->fmc->biglock);
1736} 1736}
1737 1737
1738 1738
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index ce7b54b0b2b7..0ef207dfaf6f 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -62,7 +62,7 @@
62#include <linux/fs.h> 62#include <linux/fs.h>
63#include <linux/stat.h> 63#include <linux/stat.h>
64#include <linux/pagemap.h> 64#include <linux/pagemap.h>
65#include <asm/semaphore.h> 65#include <linux/mutex.h>
66#include <asm/byteorder.h> 66#include <asm/byteorder.h>
67#include <linux/smp_lock.h> 67#include <linux/smp_lock.h>
68#include <linux/time.h> 68#include <linux/time.h>
@@ -3416,7 +3416,7 @@ jffs_garbage_collect_thread(void *ptr)
3416 D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); 3416 D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n"));
3417 3417
3418 D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); 3418 D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n"));
3419 down(&fmc->biglock); 3419 mutex_lock(&fmc->biglock);
3420 3420
3421 D1(printk("***jffs_garbage_collect_thread(): round #%u, " 3421 D1(printk("***jffs_garbage_collect_thread(): round #%u, "
3422 "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); 3422 "fmc->dirty_size = %u\n", i++, fmc->dirty_size));
@@ -3447,6 +3447,6 @@ jffs_garbage_collect_thread(void *ptr)
3447 3447
3448 gc_end: 3448 gc_end:
3449 D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); 3449 D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n"));
3450 up(&fmc->biglock); 3450 mutex_unlock(&fmc->biglock);
3451 } /* for (;;) */ 3451 } /* for (;;) */
3452} /* jffs_garbage_collect_thread() */ 3452} /* jffs_garbage_collect_thread() */
diff --git a/fs/jffs/jffs_fm.c b/fs/jffs/jffs_fm.c
index 6da13b309bd1..7d8ca1aeace2 100644
--- a/fs/jffs/jffs_fm.c
+++ b/fs/jffs/jffs_fm.c
@@ -139,7 +139,7 @@ jffs_build_begin(struct jffs_control *c, int unit)
139 fmc->tail = NULL; 139 fmc->tail = NULL;
140 fmc->head_extra = NULL; 140 fmc->head_extra = NULL;
141 fmc->tail_extra = NULL; 141 fmc->tail_extra = NULL;
142 init_MUTEX(&fmc->biglock); 142 mutex_init(&fmc->biglock);
143 return fmc; 143 return fmc;
144} 144}
145 145
diff --git a/fs/jffs/jffs_fm.h b/fs/jffs/jffs_fm.h
index f64151e74122..c794d923df2a 100644
--- a/fs/jffs/jffs_fm.h
+++ b/fs/jffs/jffs_fm.h
@@ -20,10 +20,11 @@
20#ifndef __LINUX_JFFS_FM_H__ 20#ifndef __LINUX_JFFS_FM_H__
21#define __LINUX_JFFS_FM_H__ 21#define __LINUX_JFFS_FM_H__
22 22
23#include <linux/config.h>
23#include <linux/types.h> 24#include <linux/types.h>
24#include <linux/jffs.h> 25#include <linux/jffs.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
26#include <linux/config.h> 27#include <linux/mutex.h>
27 28
28/* The alignment between two nodes in the flash memory. */ 29/* The alignment between two nodes in the flash memory. */
29#define JFFS_ALIGN_SIZE 4 30#define JFFS_ALIGN_SIZE 4
@@ -97,7 +98,7 @@ struct jffs_fmcontrol
97 struct jffs_fm *tail; 98 struct jffs_fm *tail;
98 struct jffs_fm *head_extra; 99 struct jffs_fm *head_extra;
99 struct jffs_fm *tail_extra; 100 struct jffs_fm *tail_extra;
100 struct semaphore biglock; 101 struct mutex biglock;
101}; 102};
102 103
103/* Notice the two members head_extra and tail_extra in the jffs_control 104/* Notice the two members head_extra and tail_extra in the jffs_control
diff --git a/fs/libfs.c b/fs/libfs.c
index 71fd08fa4103..4fdeaceb892c 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -7,6 +7,8 @@
7#include <linux/pagemap.h> 7#include <linux/pagemap.h>
8#include <linux/mount.h> 8#include <linux/mount.h>
9#include <linux/vfs.h> 9#include <linux/vfs.h>
10#include <linux/mutex.h>
11
10#include <asm/uaccess.h> 12#include <asm/uaccess.h>
11 13
12int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, 14int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -530,7 +532,7 @@ struct simple_attr {
530 char set_buf[24]; 532 char set_buf[24];
531 void *data; 533 void *data;
532 const char *fmt; /* format for read operation */ 534 const char *fmt; /* format for read operation */
533 struct semaphore sem; /* protects access to these buffers */ 535 struct mutex mutex; /* protects access to these buffers */
534}; 536};
535 537
536/* simple_attr_open is called by an actual attribute open file operation 538/* simple_attr_open is called by an actual attribute open file operation
@@ -549,7 +551,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
549 attr->set = set; 551 attr->set = set;
550 attr->data = inode->u.generic_ip; 552 attr->data = inode->u.generic_ip;
551 attr->fmt = fmt; 553 attr->fmt = fmt;
552 init_MUTEX(&attr->sem); 554 mutex_init(&attr->mutex);
553 555
554 file->private_data = attr; 556 file->private_data = attr;
555 557
@@ -575,7 +577,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
575 if (!attr->get) 577 if (!attr->get)
576 return -EACCES; 578 return -EACCES;
577 579
578 down(&attr->sem); 580 mutex_lock(&attr->mutex);
579 if (*ppos) /* continued read */ 581 if (*ppos) /* continued read */
580 size = strlen(attr->get_buf); 582 size = strlen(attr->get_buf);
581 else /* first read */ 583 else /* first read */
@@ -584,7 +586,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
584 (unsigned long long)attr->get(attr->data)); 586 (unsigned long long)attr->get(attr->data));
585 587
586 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); 588 ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
587 up(&attr->sem); 589 mutex_unlock(&attr->mutex);
588 return ret; 590 return ret;
589} 591}
590 592
@@ -602,7 +604,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
602 if (!attr->set) 604 if (!attr->set)
603 return -EACCES; 605 return -EACCES;
604 606
605 down(&attr->sem); 607 mutex_lock(&attr->mutex);
606 ret = -EFAULT; 608 ret = -EFAULT;
607 size = min(sizeof(attr->set_buf) - 1, len); 609 size = min(sizeof(attr->set_buf) - 1, len);
608 if (copy_from_user(attr->set_buf, buf, size)) 610 if (copy_from_user(attr->set_buf, buf, size))
@@ -613,7 +615,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
613 val = simple_strtol(attr->set_buf, NULL, 0); 615 val = simple_strtol(attr->set_buf, NULL, 0);
614 attr->set(attr->data, val); 616 attr->set(attr->data, val);
615out: 617out:
616 up(&attr->sem); 618 mutex_unlock(&attr->mutex);
617 return ret; 619 return ret;
618} 620}
619 621
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index b25bca5bdb57..5b6a4540a05b 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -6,18 +6,6 @@
6 6
7#include "minix.h" 7#include "minix.h"
8 8
9static inline void inc_count(struct inode *inode)
10{
11 inode->i_nlink++;
12 mark_inode_dirty(inode);
13}
14
15static inline void dec_count(struct inode *inode)
16{
17 inode->i_nlink--;
18 mark_inode_dirty(inode);
19}
20
21static int add_nondir(struct dentry *dentry, struct inode *inode) 9static int add_nondir(struct dentry *dentry, struct inode *inode)
22{ 10{
23 int err = minix_add_link(dentry, inode); 11 int err = minix_add_link(dentry, inode);
@@ -25,7 +13,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode)
25 d_instantiate(dentry, inode); 13 d_instantiate(dentry, inode);
26 return 0; 14 return 0;
27 } 15 }
28 dec_count(inode); 16 inode_dec_link_count(inode);
29 iput(inode); 17 iput(inode);
30 return err; 18 return err;
31} 19}
@@ -125,7 +113,7 @@ out:
125 return err; 113 return err;
126 114
127out_fail: 115out_fail:
128 dec_count(inode); 116 inode_dec_link_count(inode);
129 iput(inode); 117 iput(inode);
130 goto out; 118 goto out;
131} 119}
@@ -139,7 +127,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
139 return -EMLINK; 127 return -EMLINK;
140 128
141 inode->i_ctime = CURRENT_TIME_SEC; 129 inode->i_ctime = CURRENT_TIME_SEC;
142 inc_count(inode); 130 inode_inc_link_count(inode);
143 atomic_inc(&inode->i_count); 131 atomic_inc(&inode->i_count);
144 return add_nondir(dentry, inode); 132 return add_nondir(dentry, inode);
145} 133}
@@ -152,7 +140,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
152 if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) 140 if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max)
153 goto out; 141 goto out;
154 142
155 inc_count(dir); 143 inode_inc_link_count(dir);
156 144
157 inode = minix_new_inode(dir, &err); 145 inode = minix_new_inode(dir, &err);
158 if (!inode) 146 if (!inode)
@@ -163,7 +151,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
163 inode->i_mode |= S_ISGID; 151 inode->i_mode |= S_ISGID;
164 minix_set_inode(inode, 0); 152 minix_set_inode(inode, 0);
165 153
166 inc_count(inode); 154 inode_inc_link_count(inode);
167 155
168 err = minix_make_empty(inode, dir); 156 err = minix_make_empty(inode, dir);
169 if (err) 157 if (err)
@@ -178,11 +166,11 @@ out:
178 return err; 166 return err;
179 167
180out_fail: 168out_fail:
181 dec_count(inode); 169 inode_dec_link_count(inode);
182 dec_count(inode); 170 inode_dec_link_count(inode);
183 iput(inode); 171 iput(inode);
184out_dir: 172out_dir:
185 dec_count(dir); 173 inode_dec_link_count(dir);
186 goto out; 174 goto out;
187} 175}
188 176
@@ -202,7 +190,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry)
202 goto end_unlink; 190 goto end_unlink;
203 191
204 inode->i_ctime = dir->i_ctime; 192 inode->i_ctime = dir->i_ctime;
205 dec_count(inode); 193 inode_dec_link_count(inode);
206end_unlink: 194end_unlink:
207 return err; 195 return err;
208} 196}
@@ -215,8 +203,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
215 if (minix_empty_dir(inode)) { 203 if (minix_empty_dir(inode)) {
216 err = minix_unlink(dir, dentry); 204 err = minix_unlink(dir, dentry);
217 if (!err) { 205 if (!err) {
218 dec_count(dir); 206 inode_dec_link_count(dir);
219 dec_count(inode); 207 inode_dec_link_count(inode);
220 } 208 }
221 } 209 }
222 return err; 210 return err;
@@ -257,34 +245,34 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
257 new_de = minix_find_entry(new_dentry, &new_page); 245 new_de = minix_find_entry(new_dentry, &new_page);
258 if (!new_de) 246 if (!new_de)
259 goto out_dir; 247 goto out_dir;
260 inc_count(old_inode); 248 inode_inc_link_count(old_inode);
261 minix_set_link(new_de, new_page, old_inode); 249 minix_set_link(new_de, new_page, old_inode);
262 new_inode->i_ctime = CURRENT_TIME_SEC; 250 new_inode->i_ctime = CURRENT_TIME_SEC;
263 if (dir_de) 251 if (dir_de)
264 new_inode->i_nlink--; 252 new_inode->i_nlink--;
265 dec_count(new_inode); 253 inode_dec_link_count(new_inode);
266 } else { 254 } else {
267 if (dir_de) { 255 if (dir_de) {
268 err = -EMLINK; 256 err = -EMLINK;
269 if (new_dir->i_nlink >= info->s_link_max) 257 if (new_dir->i_nlink >= info->s_link_max)
270 goto out_dir; 258 goto out_dir;
271 } 259 }
272 inc_count(old_inode); 260 inode_inc_link_count(old_inode);
273 err = minix_add_link(new_dentry, old_inode); 261 err = minix_add_link(new_dentry, old_inode);
274 if (err) { 262 if (err) {
275 dec_count(old_inode); 263 inode_dec_link_count(old_inode);
276 goto out_dir; 264 goto out_dir;
277 } 265 }
278 if (dir_de) 266 if (dir_de)
279 inc_count(new_dir); 267 inode_inc_link_count(new_dir);
280 } 268 }
281 269
282 minix_delete_entry(old_de, old_page); 270 minix_delete_entry(old_de, old_page);
283 dec_count(old_inode); 271 inode_dec_link_count(old_inode);
284 272
285 if (dir_de) { 273 if (dir_de) {
286 minix_set_link(dir_de, dir_page, new_dir); 274 minix_set_link(dir_de, dir_page, new_dir);
287 dec_count(old_dir); 275 inode_dec_link_count(old_dir);
288 } 276 }
289 return 0; 277 return 0;
290 278
diff --git a/fs/namei.c b/fs/namei.c
index 8dc2b038d5d9..c72b940797fc 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -104,7 +104,7 @@
104 */ 104 */
105/* 105/*
106 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) 106 * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland)
107 * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives 107 * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives
108 * any extra contention... 108 * any extra contention...
109 */ 109 */
110 110
@@ -1422,7 +1422,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
1422 return NULL; 1422 return NULL;
1423 } 1423 }
1424 1424
1425 down(&p1->d_inode->i_sb->s_vfs_rename_sem); 1425 mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
1426 1426
1427 for (p = p1; p->d_parent != p; p = p->d_parent) { 1427 for (p = p1; p->d_parent != p; p = p->d_parent) {
1428 if (p->d_parent == p2) { 1428 if (p->d_parent == p2) {
@@ -1450,7 +1450,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
1450 mutex_unlock(&p1->d_inode->i_mutex); 1450 mutex_unlock(&p1->d_inode->i_mutex);
1451 if (p1 != p2) { 1451 if (p1 != p2) {
1452 mutex_unlock(&p2->d_inode->i_mutex); 1452 mutex_unlock(&p2->d_inode->i_mutex);
1453 up(&p1->d_inode->i_sb->s_vfs_rename_sem); 1453 mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
1454 } 1454 }
1455} 1455}
1456 1456
@@ -2277,17 +2277,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname)
2277 * a) we can get into loop creation. Check is done in is_subdir(). 2277 * a) we can get into loop creation. Check is done in is_subdir().
2278 * b) race potential - two innocent renames can create a loop together. 2278 * b) race potential - two innocent renames can create a loop together.
2279 * That's where 4.4 screws up. Current fix: serialization on 2279 * That's where 4.4 screws up. Current fix: serialization on
2280 * sb->s_vfs_rename_sem. We might be more accurate, but that's another 2280 * sb->s_vfs_rename_mutex. We might be more accurate, but that's another
2281 * story. 2281 * story.
2282 * c) we have to lock _three_ objects - parents and victim (if it exists). 2282 * c) we have to lock _three_ objects - parents and victim (if it exists).
2283 * And that - after we got ->i_mutex on parents (until then we don't know 2283 * And that - after we got ->i_mutex on parents (until then we don't know
2284 * whether the target exists). Solution: try to be smart with locking 2284 * whether the target exists). Solution: try to be smart with locking
2285 * order for inodes. We rely on the fact that tree topology may change 2285 * order for inodes. We rely on the fact that tree topology may change
2286 * only under ->s_vfs_rename_sem _and_ that parent of the object we 2286 * only under ->s_vfs_rename_mutex _and_ that parent of the object we
2287 * move will be locked. Thus we can rank directories by the tree 2287 * move will be locked. Thus we can rank directories by the tree
2288 * (ancestors first) and rank all non-directories after them. 2288 * (ancestors first) and rank all non-directories after them.
2289 * That works since everybody except rename does "lock parent, lookup, 2289 * That works since everybody except rename does "lock parent, lookup,
2290 * lock child" and rename is under ->s_vfs_rename_sem. 2290 * lock child" and rename is under ->s_vfs_rename_mutex.
2291 * HOWEVER, it relies on the assumption that any object with ->lookup() 2291 * HOWEVER, it relies on the assumption that any object with ->lookup()
2292 * has no more than 1 dentry. If "hybrid" objects will ever appear, 2292 * has no more than 1 dentry. If "hybrid" objects will ever appear,
2293 * we'd better make sure that there's no link(2) for them. 2293 * we'd better make sure that there's no link(2) for them.
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 973b444d6914..ebdad8f6398f 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -46,7 +46,7 @@ int ncp_make_open(struct inode *inode, int right)
46 NCP_FINFO(inode)->volNumber, 46 NCP_FINFO(inode)->volNumber,
47 NCP_FINFO(inode)->dirEntNum); 47 NCP_FINFO(inode)->dirEntNum);
48 error = -EACCES; 48 error = -EACCES;
49 down(&NCP_FINFO(inode)->open_sem); 49 mutex_lock(&NCP_FINFO(inode)->open_mutex);
50 if (!atomic_read(&NCP_FINFO(inode)->opened)) { 50 if (!atomic_read(&NCP_FINFO(inode)->opened)) {
51 struct ncp_entry_info finfo; 51 struct ncp_entry_info finfo;
52 int result; 52 int result;
@@ -93,7 +93,7 @@ int ncp_make_open(struct inode *inode, int right)
93 } 93 }
94 94
95out_unlock: 95out_unlock:
96 up(&NCP_FINFO(inode)->open_sem); 96 mutex_unlock(&NCP_FINFO(inode)->open_mutex);
97out: 97out:
98 return error; 98 return error;
99} 99}
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index d277a58bd128..0b521d3d97ce 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -63,7 +63,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
63 63
64 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 64 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
65 SLAB_CTOR_CONSTRUCTOR) { 65 SLAB_CTOR_CONSTRUCTOR) {
66 init_MUTEX(&ei->open_sem); 66 mutex_init(&ei->open_mutex);
67 inode_init_once(&ei->vfs_inode); 67 inode_init_once(&ei->vfs_inode);
68 } 68 }
69} 69}
@@ -520,7 +520,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
520 } 520 }
521 521
522/* server->lock = 0; */ 522/* server->lock = 0; */
523 init_MUTEX(&server->sem); 523 mutex_init(&server->mutex);
524 server->packet = NULL; 524 server->packet = NULL;
525/* server->buffer_size = 0; */ 525/* server->buffer_size = 0; */
526/* server->conn_status = 0; */ 526/* server->conn_status = 0; */
@@ -557,7 +557,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
557 server->dentry_ttl = 0; /* no caching */ 557 server->dentry_ttl = 0; /* no caching */
558 558
559 INIT_LIST_HEAD(&server->tx.requests); 559 INIT_LIST_HEAD(&server->tx.requests);
560 init_MUTEX(&server->rcv.creq_sem); 560 mutex_init(&server->rcv.creq_mutex);
561 server->tx.creq = NULL; 561 server->tx.creq = NULL;
562 server->rcv.creq = NULL; 562 server->rcv.creq = NULL;
563 server->data_ready = sock->sk->sk_data_ready; 563 server->data_ready = sock->sk->sk_data_ready;
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index c755e1848a42..d9ebf6439f59 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -291,7 +291,7 @@ ncp_make_closed(struct inode *inode)
291 int err; 291 int err;
292 292
293 err = 0; 293 err = 0;
294 down(&NCP_FINFO(inode)->open_sem); 294 mutex_lock(&NCP_FINFO(inode)->open_mutex);
295 if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { 295 if (atomic_read(&NCP_FINFO(inode)->opened) == 1) {
296 atomic_set(&NCP_FINFO(inode)->opened, 0); 296 atomic_set(&NCP_FINFO(inode)->opened, 0);
297 err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); 297 err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle);
@@ -301,7 +301,7 @@ ncp_make_closed(struct inode *inode)
301 NCP_FINFO(inode)->volNumber, 301 NCP_FINFO(inode)->volNumber,
302 NCP_FINFO(inode)->dirEntNum, err); 302 NCP_FINFO(inode)->dirEntNum, err);
303 } 303 }
304 up(&NCP_FINFO(inode)->open_sem); 304 mutex_unlock(&NCP_FINFO(inode)->open_mutex);
305 return err; 305 return err;
306} 306}
307 307
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 6593a5ca88ba..8783eb7ec641 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -171,9 +171,9 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req
171 171
172static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) 172static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
173{ 173{
174 down(&server->rcv.creq_sem); 174 mutex_lock(&server->rcv.creq_mutex);
175 __ncp_abort_request(server, req, err); 175 __ncp_abort_request(server, req, err);
176 up(&server->rcv.creq_sem); 176 mutex_unlock(&server->rcv.creq_mutex);
177} 177}
178 178
179static inline void __ncptcp_abort(struct ncp_server *server) 179static inline void __ncptcp_abort(struct ncp_server *server)
@@ -303,20 +303,20 @@ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_req
303 303
304static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) 304static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
305{ 305{
306 down(&server->rcv.creq_sem); 306 mutex_lock(&server->rcv.creq_mutex);
307 if (!ncp_conn_valid(server)) { 307 if (!ncp_conn_valid(server)) {
308 up(&server->rcv.creq_sem); 308 mutex_unlock(&server->rcv.creq_mutex);
309 printk(KERN_ERR "ncpfs: tcp: Server died\n"); 309 printk(KERN_ERR "ncpfs: tcp: Server died\n");
310 return -EIO; 310 return -EIO;
311 } 311 }
312 if (server->tx.creq || server->rcv.creq) { 312 if (server->tx.creq || server->rcv.creq) {
313 req->status = RQ_QUEUED; 313 req->status = RQ_QUEUED;
314 list_add_tail(&req->req, &server->tx.requests); 314 list_add_tail(&req->req, &server->tx.requests);
315 up(&server->rcv.creq_sem); 315 mutex_unlock(&server->rcv.creq_mutex);
316 return 0; 316 return 0;
317 } 317 }
318 __ncp_start_request(server, req); 318 __ncp_start_request(server, req);
319 up(&server->rcv.creq_sem); 319 mutex_unlock(&server->rcv.creq_mutex);
320 return 0; 320 return 0;
321} 321}
322 322
@@ -400,7 +400,7 @@ void ncpdgram_rcv_proc(void *s)
400 info_server(server, 0, server->unexpected_packet.data, result); 400 info_server(server, 0, server->unexpected_packet.data, result);
401 continue; 401 continue;
402 } 402 }
403 down(&server->rcv.creq_sem); 403 mutex_lock(&server->rcv.creq_mutex);
404 req = server->rcv.creq; 404 req = server->rcv.creq;
405 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && 405 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
406 server->connection == get_conn_number(&reply)))) { 406 server->connection == get_conn_number(&reply)))) {
@@ -430,11 +430,11 @@ void ncpdgram_rcv_proc(void *s)
430 server->rcv.creq = NULL; 430 server->rcv.creq = NULL;
431 ncp_finish_request(req, result); 431 ncp_finish_request(req, result);
432 __ncp_next_request(server); 432 __ncp_next_request(server);
433 up(&server->rcv.creq_sem); 433 mutex_unlock(&server->rcv.creq_mutex);
434 continue; 434 continue;
435 } 435 }
436 } 436 }
437 up(&server->rcv.creq_sem); 437 mutex_unlock(&server->rcv.creq_mutex);
438 } 438 }
439drop:; 439drop:;
440 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); 440 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
@@ -472,9 +472,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server)
472void ncpdgram_timeout_proc(void *s) 472void ncpdgram_timeout_proc(void *s)
473{ 473{
474 struct ncp_server *server = s; 474 struct ncp_server *server = s;
475 down(&server->rcv.creq_sem); 475 mutex_lock(&server->rcv.creq_mutex);
476 __ncpdgram_timeout_proc(server); 476 __ncpdgram_timeout_proc(server);
477 up(&server->rcv.creq_sem); 477 mutex_unlock(&server->rcv.creq_mutex);
478} 478}
479 479
480static inline void ncp_init_req(struct ncp_request_reply* req) 480static inline void ncp_init_req(struct ncp_request_reply* req)
@@ -657,18 +657,18 @@ void ncp_tcp_rcv_proc(void *s)
657{ 657{
658 struct ncp_server *server = s; 658 struct ncp_server *server = s;
659 659
660 down(&server->rcv.creq_sem); 660 mutex_lock(&server->rcv.creq_mutex);
661 __ncptcp_rcv_proc(server); 661 __ncptcp_rcv_proc(server);
662 up(&server->rcv.creq_sem); 662 mutex_unlock(&server->rcv.creq_mutex);
663} 663}
664 664
665void ncp_tcp_tx_proc(void *s) 665void ncp_tcp_tx_proc(void *s)
666{ 666{
667 struct ncp_server *server = s; 667 struct ncp_server *server = s;
668 668
669 down(&server->rcv.creq_sem); 669 mutex_lock(&server->rcv.creq_mutex);
670 __ncptcp_try_send(server); 670 __ncptcp_try_send(server);
671 up(&server->rcv.creq_sem); 671 mutex_unlock(&server->rcv.creq_mutex);
672} 672}
673 673
674static int do_ncp_rpc_call(struct ncp_server *server, int size, 674static int do_ncp_rpc_call(struct ncp_server *server, int size,
@@ -833,7 +833,7 @@ int ncp_disconnect(struct ncp_server *server)
833 833
834void ncp_lock_server(struct ncp_server *server) 834void ncp_lock_server(struct ncp_server *server)
835{ 835{
836 down(&server->sem); 836 mutex_lock(&server->mutex);
837 if (server->lock) 837 if (server->lock)
838 printk(KERN_WARNING "ncp_lock_server: was locked!\n"); 838 printk(KERN_WARNING "ncp_lock_server: was locked!\n");
839 server->lock = 1; 839 server->lock = 1;
@@ -846,5 +846,5 @@ void ncp_unlock_server(struct ncp_server *server)
846 return; 846 return;
847 } 847 }
848 server->lock = 0; 848 server->lock = 0;
849 up(&server->sem); 849 mutex_unlock(&server->mutex);
850} 850}
diff --git a/fs/open.c b/fs/open.c
index 70e0230d8e77..1091dadd6c38 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -973,7 +973,7 @@ repeat:
973 fdt = files_fdtable(files); 973 fdt = files_fdtable(files);
974 fd = find_next_zero_bit(fdt->open_fds->fds_bits, 974 fd = find_next_zero_bit(fdt->open_fds->fds_bits,
975 fdt->max_fdset, 975 fdt->max_fdset,
976 fdt->next_fd); 976 files->next_fd);
977 977
978 /* 978 /*
979 * N.B. For clone tasks sharing a files structure, this test 979 * N.B. For clone tasks sharing a files structure, this test
@@ -998,7 +998,7 @@ repeat:
998 998
999 FD_SET(fd, fdt->open_fds); 999 FD_SET(fd, fdt->open_fds);
1000 FD_CLR(fd, fdt->close_on_exec); 1000 FD_CLR(fd, fdt->close_on_exec);
1001 fdt->next_fd = fd + 1; 1001 files->next_fd = fd + 1;
1002#if 1 1002#if 1
1003 /* Sanity check */ 1003 /* Sanity check */
1004 if (fdt->fd[fd] != NULL) { 1004 if (fdt->fd[fd] != NULL) {
@@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd)
1019{ 1019{
1020 struct fdtable *fdt = files_fdtable(files); 1020 struct fdtable *fdt = files_fdtable(files);
1021 __FD_CLR(fd, fdt->open_fds); 1021 __FD_CLR(fd, fdt->open_fds);
1022 if (fd < fdt->next_fd) 1022 if (fd < files->next_fd)
1023 fdt->next_fd = fd; 1023 files->next_fd = fd;
1024} 1024}
1025 1025
1026void fastcall put_unused_fd(unsigned int fd) 1026void fastcall put_unused_fd(unsigned int fd)
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 1d24fead51a6..826c131994c3 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -312,7 +312,7 @@ static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos)
312 case BLK_HDR: 312 case BLK_HDR:
313 info->state = BLK_LIST; 313 info->state = BLK_LIST;
314 (*pos)++; 314 (*pos)++;
315 break; 315 /*fallthrough*/
316 case BLK_LIST: 316 case BLK_LIST:
317 if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { 317 if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) {
318 /* 318 /*
diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c
index b471315e24ef..c33963fded9e 100644
--- a/fs/qnx4/file.c
+++ b/fs/qnx4/file.c
@@ -12,10 +12,7 @@
12 * 27-06-1998 by Frank Denis : file overwriting. 12 * 27-06-1998 by Frank Denis : file overwriting.
13 */ 13 */
14 14
15#include <linux/config.h>
16#include <linux/types.h>
17#include <linux/fs.h> 15#include <linux/fs.h>
18#include <linux/time.h>
19#include <linux/qnx4_fs.h> 16#include <linux/qnx4_fs.h>
20 17
21/* 18/*
diff --git a/fs/quota.c b/fs/quota.c
index ba9e0bf32f67..d6a2be826e29 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type)
170 170
171 /* Now when everything is written we can discard the pagecache so 171 /* Now when everything is written we can discard the pagecache so
172 * that userspace sees the changes. We need i_mutex and so we could 172 * that userspace sees the changes. We need i_mutex and so we could
173 * not do it inside dqonoff_sem. Moreover we need to be carefull 173 * not do it inside dqonoff_mutex. Moreover we need to be carefull
174 * about races with quotaoff() (that is the reason why we have own 174 * about races with quotaoff() (that is the reason why we have own
175 * reference to inode). */ 175 * reference to inode). */
176 down(&sb_dqopt(sb)->dqonoff_sem); 176 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
177 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 177 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
178 discard[cnt] = NULL; 178 discard[cnt] = NULL;
179 if (type != -1 && cnt != type) 179 if (type != -1 && cnt != type)
@@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
182 continue; 182 continue;
183 discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); 183 discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
184 } 184 }
185 up(&sb_dqopt(sb)->dqonoff_sem); 185 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
186 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 186 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
187 if (discard[cnt]) { 187 if (discard[cnt]) {
188 mutex_lock(&discard[cnt]->i_mutex); 188 mutex_lock(&discard[cnt]->i_mutex);
diff --git a/fs/quota_v2.c b/fs/quota_v2.c
index b4199ec3ece4..c519a583e681 100644
--- a/fs/quota_v2.c
+++ b/fs/quota_v2.c
@@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot)
394 ssize_t ret; 394 ssize_t ret;
395 struct v2_disk_dqblk ddquot, empty; 395 struct v2_disk_dqblk ddquot, empty;
396 396
397 /* dq_off is guarded by dqio_sem */ 397 /* dq_off is guarded by dqio_mutex */
398 if (!dquot->dq_off) 398 if (!dquot->dq_off)
399 if ((ret = dq_insert_tree(dquot)) < 0) { 399 if ((ret = dq_insert_tree(dquot)) < 0) {
400 printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); 400 printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret);
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 2115383dcc8d..6ada2095b9ac 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -24,18 +24,7 @@
24 * caches is sufficient. 24 * caches is sufficient.
25 */ 25 */
26 26
27#include <linux/module.h>
28#include <linux/fs.h> 27#include <linux/fs.h>
29#include <linux/pagemap.h>
30#include <linux/highmem.h>
31#include <linux/init.h>
32#include <linux/string.h>
33#include <linux/smp_lock.h>
34#include <linux/backing-dev.h>
35#include <linux/ramfs.h>
36
37#include <asm/uaccess.h>
38#include "internal.h"
39 28
40struct address_space_operations ramfs_aops = { 29struct address_space_operations ramfs_aops = {
41 .readpage = simple_readpage, 30 .readpage = simple_readpage,
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 7c40570b71dc..555b9ac04c25 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -37,7 +37,7 @@ int seq_open(struct file *file, struct seq_operations *op)
37 file->private_data = p; 37 file->private_data = p;
38 } 38 }
39 memset(p, 0, sizeof(*p)); 39 memset(p, 0, sizeof(*p));
40 sema_init(&p->sem, 1); 40 mutex_init(&p->lock);
41 p->op = op; 41 p->op = op;
42 42
43 /* 43 /*
@@ -71,7 +71,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
71 void *p; 71 void *p;
72 int err = 0; 72 int err = 0;
73 73
74 down(&m->sem); 74 mutex_lock(&m->lock);
75 /* 75 /*
76 * seq_file->op->..m_start/m_stop/m_next may do special actions 76 * seq_file->op->..m_start/m_stop/m_next may do special actions
77 * or optimisations based on the file->f_version, so we want to 77 * or optimisations based on the file->f_version, so we want to
@@ -164,7 +164,7 @@ Done:
164 else 164 else
165 *ppos += copied; 165 *ppos += copied;
166 file->f_version = m->version; 166 file->f_version = m->version;
167 up(&m->sem); 167 mutex_unlock(&m->lock);
168 return copied; 168 return copied;
169Enomem: 169Enomem:
170 err = -ENOMEM; 170 err = -ENOMEM;
@@ -237,7 +237,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
237 struct seq_file *m = (struct seq_file *)file->private_data; 237 struct seq_file *m = (struct seq_file *)file->private_data;
238 long long retval = -EINVAL; 238 long long retval = -EINVAL;
239 239
240 down(&m->sem); 240 mutex_lock(&m->lock);
241 m->version = file->f_version; 241 m->version = file->f_version;
242 switch (origin) { 242 switch (origin) {
243 case 1: 243 case 1:
@@ -260,7 +260,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
260 } 260 }
261 } 261 }
262 } 262 }
263 up(&m->sem); 263 mutex_unlock(&m->lock);
264 file->f_version = m->version; 264 file->f_version = m->version;
265 return retval; 265 return retval;
266} 266}
diff --git a/fs/super.c b/fs/super.c
index e20b5580afd5..425861cb1caa 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -76,9 +76,9 @@ static struct super_block *alloc_super(void)
76 down_write(&s->s_umount); 76 down_write(&s->s_umount);
77 s->s_count = S_BIAS; 77 s->s_count = S_BIAS;
78 atomic_set(&s->s_active, 1); 78 atomic_set(&s->s_active, 1);
79 sema_init(&s->s_vfs_rename_sem,1); 79 mutex_init(&s->s_vfs_rename_mutex);
80 sema_init(&s->s_dquot.dqio_sem, 1); 80 mutex_init(&s->s_dquot.dqio_mutex);
81 sema_init(&s->s_dquot.dqonoff_sem, 1); 81 mutex_init(&s->s_dquot.dqonoff_mutex);
82 init_rwsem(&s->s_dquot.dqptr_sem); 82 init_rwsem(&s->s_dquot.dqptr_sem);
83 init_waitqueue_head(&s->s_wait_unfrozen); 83 init_waitqueue_head(&s->s_wait_unfrozen);
84 s->s_maxbytes = MAX_NON_LFS; 84 s->s_maxbytes = MAX_NON_LFS;
@@ -693,9 +693,9 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
693 * will protect the lockfs code from trying to start a snapshot 693 * will protect the lockfs code from trying to start a snapshot
694 * while we are mounting 694 * while we are mounting
695 */ 695 */
696 down(&bdev->bd_mount_sem); 696 mutex_lock(&bdev->bd_mount_mutex);
697 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); 697 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
698 up(&bdev->bd_mount_sem); 698 mutex_unlock(&bdev->bd_mount_mutex);
699 if (IS_ERR(s)) 699 if (IS_ERR(s))
700 goto out; 700 goto out;
701 701
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 7f0e4b53085e..b8a73f716fbe 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -16,18 +16,6 @@
16#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
17#include "sysv.h" 17#include "sysv.h"
18 18
19static inline void inc_count(struct inode *inode)
20{
21 inode->i_nlink++;
22 mark_inode_dirty(inode);
23}
24
25static inline void dec_count(struct inode *inode)
26{
27 inode->i_nlink--;
28 mark_inode_dirty(inode);
29}
30
31static int add_nondir(struct dentry *dentry, struct inode *inode) 19static int add_nondir(struct dentry *dentry, struct inode *inode)
32{ 20{
33 int err = sysv_add_link(dentry, inode); 21 int err = sysv_add_link(dentry, inode);
@@ -35,7 +23,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode)
35 d_instantiate(dentry, inode); 23 d_instantiate(dentry, inode);
36 return 0; 24 return 0;
37 } 25 }
38 dec_count(inode); 26 inode_dec_link_count(inode);
39 iput(inode); 27 iput(inode);
40 return err; 28 return err;
41} 29}
@@ -124,7 +112,7 @@ out:
124 return err; 112 return err;
125 113
126out_fail: 114out_fail:
127 dec_count(inode); 115 inode_dec_link_count(inode);
128 iput(inode); 116 iput(inode);
129 goto out; 117 goto out;
130} 118}
@@ -138,7 +126,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir,
138 return -EMLINK; 126 return -EMLINK;
139 127
140 inode->i_ctime = CURRENT_TIME_SEC; 128 inode->i_ctime = CURRENT_TIME_SEC;
141 inc_count(inode); 129 inode_inc_link_count(inode);
142 atomic_inc(&inode->i_count); 130 atomic_inc(&inode->i_count);
143 131
144 return add_nondir(dentry, inode); 132 return add_nondir(dentry, inode);
@@ -151,7 +139,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode)
151 139
152 if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) 140 if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max)
153 goto out; 141 goto out;
154 inc_count(dir); 142 inode_inc_link_count(dir);
155 143
156 inode = sysv_new_inode(dir, S_IFDIR|mode); 144 inode = sysv_new_inode(dir, S_IFDIR|mode);
157 err = PTR_ERR(inode); 145 err = PTR_ERR(inode);
@@ -160,7 +148,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode)
160 148
161 sysv_set_inode(inode, 0); 149 sysv_set_inode(inode, 0);
162 150
163 inc_count(inode); 151 inode_inc_link_count(inode);
164 152
165 err = sysv_make_empty(inode, dir); 153 err = sysv_make_empty(inode, dir);
166 if (err) 154 if (err)
@@ -175,11 +163,11 @@ out:
175 return err; 163 return err;
176 164
177out_fail: 165out_fail:
178 dec_count(inode); 166 inode_dec_link_count(inode);
179 dec_count(inode); 167 inode_dec_link_count(inode);
180 iput(inode); 168 iput(inode);
181out_dir: 169out_dir:
182 dec_count(dir); 170 inode_dec_link_count(dir);
183 goto out; 171 goto out;
184} 172}
185 173
@@ -199,7 +187,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry)
199 goto out; 187 goto out;
200 188
201 inode->i_ctime = dir->i_ctime; 189 inode->i_ctime = dir->i_ctime;
202 dec_count(inode); 190 inode_dec_link_count(inode);
203out: 191out:
204 return err; 192 return err;
205} 193}
@@ -213,8 +201,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
213 err = sysv_unlink(dir, dentry); 201 err = sysv_unlink(dir, dentry);
214 if (!err) { 202 if (!err) {
215 inode->i_size = 0; 203 inode->i_size = 0;
216 dec_count(inode); 204 inode_dec_link_count(inode);
217 dec_count(dir); 205 inode_dec_link_count(dir);
218 } 206 }
219 } 207 }
220 return err; 208 return err;
@@ -258,34 +246,34 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
258 new_de = sysv_find_entry(new_dentry, &new_page); 246 new_de = sysv_find_entry(new_dentry, &new_page);
259 if (!new_de) 247 if (!new_de)
260 goto out_dir; 248 goto out_dir;
261 inc_count(old_inode); 249 inode_inc_link_count(old_inode);
262 sysv_set_link(new_de, new_page, old_inode); 250 sysv_set_link(new_de, new_page, old_inode);
263 new_inode->i_ctime = CURRENT_TIME_SEC; 251 new_inode->i_ctime = CURRENT_TIME_SEC;
264 if (dir_de) 252 if (dir_de)
265 new_inode->i_nlink--; 253 new_inode->i_nlink--;
266 dec_count(new_inode); 254 inode_dec_link_count(new_inode);
267 } else { 255 } else {
268 if (dir_de) { 256 if (dir_de) {
269 err = -EMLINK; 257 err = -EMLINK;
270 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 258 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
271 goto out_dir; 259 goto out_dir;
272 } 260 }
273 inc_count(old_inode); 261 inode_inc_link_count(old_inode);
274 err = sysv_add_link(new_dentry, old_inode); 262 err = sysv_add_link(new_dentry, old_inode);
275 if (err) { 263 if (err) {
276 dec_count(old_inode); 264 inode_dec_link_count(old_inode);
277 goto out_dir; 265 goto out_dir;
278 } 266 }
279 if (dir_de) 267 if (dir_de)
280 inc_count(new_dir); 268 inode_inc_link_count(new_dir);
281 } 269 }
282 270
283 sysv_delete_entry(old_de, old_page); 271 sysv_delete_entry(old_de, old_page);
284 dec_count(old_inode); 272 inode_dec_link_count(old_inode);
285 273
286 if (dir_de) { 274 if (dir_de) {
287 sysv_set_link(dir_de, dir_page, new_dir); 275 sysv_set_link(dir_de, dir_page, new_dir);
288 dec_count(old_dir); 276 inode_dec_link_count(old_dir);
289 } 277 }
290 return 0; 278 return 0;
291 279
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 201049ac8a96..ea521f846d97 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -152,7 +152,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb,
152 int bitmap_nr; 152 int bitmap_nr;
153 unsigned long overflow; 153 unsigned long overflow;
154 154
155 down(&sbi->s_alloc_sem); 155 mutex_lock(&sbi->s_alloc_mutex);
156 if (bloc.logicalBlockNum < 0 || 156 if (bloc.logicalBlockNum < 0 ||
157 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) 157 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
158 { 158 {
@@ -211,7 +211,7 @@ error_return:
211 sb->s_dirt = 1; 211 sb->s_dirt = 1;
212 if (UDF_SB_LVIDBH(sb)) 212 if (UDF_SB_LVIDBH(sb))
213 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 213 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
214 up(&sbi->s_alloc_sem); 214 mutex_unlock(&sbi->s_alloc_mutex);
215 return; 215 return;
216} 216}
217 217
@@ -226,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb,
226 int nr_groups, bitmap_nr; 226 int nr_groups, bitmap_nr;
227 struct buffer_head *bh; 227 struct buffer_head *bh;
228 228
229 down(&sbi->s_alloc_sem); 229 mutex_lock(&sbi->s_alloc_mutex);
230 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 230 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
231 goto out; 231 goto out;
232 232
@@ -275,7 +275,7 @@ out:
275 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 275 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
276 } 276 }
277 sb->s_dirt = 1; 277 sb->s_dirt = 1;
278 up(&sbi->s_alloc_sem); 278 mutex_unlock(&sbi->s_alloc_mutex);
279 return alloc_count; 279 return alloc_count;
280} 280}
281 281
@@ -291,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block * sb,
291 int newblock = 0; 291 int newblock = 0;
292 292
293 *err = -ENOSPC; 293 *err = -ENOSPC;
294 down(&sbi->s_alloc_sem); 294 mutex_lock(&sbi->s_alloc_mutex);
295 295
296repeat: 296repeat:
297 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 297 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
@@ -364,7 +364,7 @@ repeat:
364 } 364 }
365 if (i >= (nr_groups*2)) 365 if (i >= (nr_groups*2))
366 { 366 {
367 up(&sbi->s_alloc_sem); 367 mutex_unlock(&sbi->s_alloc_mutex);
368 return newblock; 368 return newblock;
369 } 369 }
370 if (bit < sb->s_blocksize << 3) 370 if (bit < sb->s_blocksize << 3)
@@ -373,7 +373,7 @@ repeat:
373 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); 373 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3);
374 if (bit >= sb->s_blocksize << 3) 374 if (bit >= sb->s_blocksize << 3)
375 { 375 {
376 up(&sbi->s_alloc_sem); 376 mutex_unlock(&sbi->s_alloc_mutex);
377 return 0; 377 return 0;
378 } 378 }
379 379
@@ -387,7 +387,7 @@ got_block:
387 */ 387 */
388 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) 388 if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
389 { 389 {
390 up(&sbi->s_alloc_sem); 390 mutex_unlock(&sbi->s_alloc_mutex);
391 *err = -EDQUOT; 391 *err = -EDQUOT;
392 return 0; 392 return 0;
393 } 393 }
@@ -410,13 +410,13 @@ got_block:
410 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 410 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
411 } 411 }
412 sb->s_dirt = 1; 412 sb->s_dirt = 1;
413 up(&sbi->s_alloc_sem); 413 mutex_unlock(&sbi->s_alloc_mutex);
414 *err = 0; 414 *err = 0;
415 return newblock; 415 return newblock;
416 416
417error_return: 417error_return:
418 *err = -EIO; 418 *err = -EIO;
419 up(&sbi->s_alloc_sem); 419 mutex_unlock(&sbi->s_alloc_mutex);
420 return 0; 420 return 0;
421} 421}
422 422
@@ -433,7 +433,7 @@ static void udf_table_free_blocks(struct super_block * sb,
433 int8_t etype; 433 int8_t etype;
434 int i; 434 int i;
435 435
436 down(&sbi->s_alloc_sem); 436 mutex_lock(&sbi->s_alloc_mutex);
437 if (bloc.logicalBlockNum < 0 || 437 if (bloc.logicalBlockNum < 0 ||
438 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) 438 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum))
439 { 439 {
@@ -666,7 +666,7 @@ static void udf_table_free_blocks(struct super_block * sb,
666 666
667error_return: 667error_return:
668 sb->s_dirt = 1; 668 sb->s_dirt = 1;
669 up(&sbi->s_alloc_sem); 669 mutex_unlock(&sbi->s_alloc_mutex);
670 return; 670 return;
671} 671}
672 672
@@ -692,7 +692,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
692 else 692 else
693 return 0; 693 return 0;
694 694
695 down(&sbi->s_alloc_sem); 695 mutex_lock(&sbi->s_alloc_mutex);
696 extoffset = sizeof(struct unallocSpaceEntry); 696 extoffset = sizeof(struct unallocSpaceEntry);
697 bloc = UDF_I_LOCATION(table); 697 bloc = UDF_I_LOCATION(table);
698 698
@@ -736,7 +736,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
736 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 736 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
737 sb->s_dirt = 1; 737 sb->s_dirt = 1;
738 } 738 }
739 up(&sbi->s_alloc_sem); 739 mutex_unlock(&sbi->s_alloc_mutex);
740 return alloc_count; 740 return alloc_count;
741} 741}
742 742
@@ -761,7 +761,7 @@ static int udf_table_new_block(struct super_block * sb,
761 else 761 else
762 return newblock; 762 return newblock;
763 763
764 down(&sbi->s_alloc_sem); 764 mutex_lock(&sbi->s_alloc_mutex);
765 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 765 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
766 goal = 0; 766 goal = 0;
767 767
@@ -811,7 +811,7 @@ static int udf_table_new_block(struct super_block * sb,
811 if (spread == 0xFFFFFFFF) 811 if (spread == 0xFFFFFFFF)
812 { 812 {
813 udf_release_data(goal_bh); 813 udf_release_data(goal_bh);
814 up(&sbi->s_alloc_sem); 814 mutex_unlock(&sbi->s_alloc_mutex);
815 return 0; 815 return 0;
816 } 816 }
817 817
@@ -827,7 +827,7 @@ static int udf_table_new_block(struct super_block * sb,
827 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) 827 if (inode && DQUOT_ALLOC_BLOCK(inode, 1))
828 { 828 {
829 udf_release_data(goal_bh); 829 udf_release_data(goal_bh);
830 up(&sbi->s_alloc_sem); 830 mutex_unlock(&sbi->s_alloc_mutex);
831 *err = -EDQUOT; 831 *err = -EDQUOT;
832 return 0; 832 return 0;
833 } 833 }
@@ -846,7 +846,7 @@ static int udf_table_new_block(struct super_block * sb,
846 } 846 }
847 847
848 sb->s_dirt = 1; 848 sb->s_dirt = 1;
849 up(&sbi->s_alloc_sem); 849 mutex_unlock(&sbi->s_alloc_mutex);
850 *err = 0; 850 *err = 0;
851 return newblock; 851 return newblock;
852} 852}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index c9b707b470ca..3873c672cb4c 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -42,7 +42,7 @@ void udf_free_inode(struct inode * inode)
42 42
43 clear_inode(inode); 43 clear_inode(inode);
44 44
45 down(&sbi->s_alloc_sem); 45 mutex_lock(&sbi->s_alloc_mutex);
46 if (sbi->s_lvidbh) { 46 if (sbi->s_lvidbh) {
47 if (S_ISDIR(inode->i_mode)) 47 if (S_ISDIR(inode->i_mode))
48 UDF_SB_LVIDIU(sb)->numDirs = 48 UDF_SB_LVIDIU(sb)->numDirs =
@@ -53,7 +53,7 @@ void udf_free_inode(struct inode * inode)
53 53
54 mark_buffer_dirty(sbi->s_lvidbh); 54 mark_buffer_dirty(sbi->s_lvidbh);
55 } 55 }
56 up(&sbi->s_alloc_sem); 56 mutex_unlock(&sbi->s_alloc_mutex);
57 57
58 udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); 58 udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
59} 59}
@@ -83,7 +83,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
83 return NULL; 83 return NULL;
84 } 84 }
85 85
86 down(&sbi->s_alloc_sem); 86 mutex_lock(&sbi->s_alloc_mutex);
87 UDF_I_UNIQUE(inode) = 0; 87 UDF_I_UNIQUE(inode) = 0;
88 UDF_I_LENEXTENTS(inode) = 0; 88 UDF_I_LENEXTENTS(inode) = 0;
89 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; 89 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
@@ -148,7 +148,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err)
148 UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); 148 UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
149 insert_inode_hash(inode); 149 insert_inode_hash(inode);
150 mark_inode_dirty(inode); 150 mark_inode_dirty(inode);
151 up(&sbi->s_alloc_sem); 151 mutex_unlock(&sbi->s_alloc_mutex);
152 152
153 if (DQUOT_ALLOC_INODE(inode)) 153 if (DQUOT_ALLOC_INODE(inode))
154 { 154 {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 368d8f81fe54..9303c50c5d55 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1515,7 +1515,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1515 sb->s_fs_info = sbi; 1515 sb->s_fs_info = sbi;
1516 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); 1516 memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info));
1517 1517
1518 init_MUTEX(&sbi->s_alloc_sem); 1518 mutex_init(&sbi->s_alloc_mutex);
1519 1519
1520 if (!udf_parse_options((char *)options, &uopt)) 1520 if (!udf_parse_options((char *)options, &uopt))
1521 goto error_out; 1521 goto error_out;
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index ed69d7fe1b5d..62ad481810ef 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -23,18 +23,8 @@
23 * ext2 fs regular file handling primitives 23 * ext2 fs regular file handling primitives
24 */ 24 */
25 25
26#include <asm/uaccess.h>
27#include <asm/system.h>
28
29#include <linux/errno.h>
30#include <linux/fs.h> 26#include <linux/fs.h>
31#include <linux/ufs_fs.h> 27#include <linux/ufs_fs.h>
32#include <linux/fcntl.h>
33#include <linux/time.h>
34#include <linux/stat.h>
35#include <linux/mm.h>
36#include <linux/pagemap.h>
37#include <linux/smp_lock.h>
38 28
39/* 29/*
40 * We have mostly NULL's here: the current defaults are ok for 30 * We have mostly NULL's here: the current defaults are ok for
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 2958cde7d3d6..8d5f98a01c74 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -43,18 +43,6 @@
43#define UFSD(x) 43#define UFSD(x)
44#endif 44#endif
45 45
46static inline void ufs_inc_count(struct inode *inode)
47{
48 inode->i_nlink++;
49 mark_inode_dirty(inode);
50}
51
52static inline void ufs_dec_count(struct inode *inode)
53{
54 inode->i_nlink--;
55 mark_inode_dirty(inode);
56}
57
58static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) 46static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
59{ 47{
60 int err = ufs_add_link(dentry, inode); 48 int err = ufs_add_link(dentry, inode);
@@ -62,7 +50,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
62 d_instantiate(dentry, inode); 50 d_instantiate(dentry, inode);
63 return 0; 51 return 0;
64 } 52 }
65 ufs_dec_count(inode); 53 inode_dec_link_count(inode);
66 iput(inode); 54 iput(inode);
67 return err; 55 return err;
68} 56}
@@ -173,7 +161,7 @@ out:
173 return err; 161 return err;
174 162
175out_fail: 163out_fail:
176 ufs_dec_count(inode); 164 inode_dec_link_count(inode);
177 iput(inode); 165 iput(inode);
178 goto out; 166 goto out;
179} 167}
@@ -191,7 +179,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
191 } 179 }
192 180
193 inode->i_ctime = CURRENT_TIME_SEC; 181 inode->i_ctime = CURRENT_TIME_SEC;
194 ufs_inc_count(inode); 182 inode_inc_link_count(inode);
195 atomic_inc(&inode->i_count); 183 atomic_inc(&inode->i_count);
196 184
197 error = ufs_add_nondir(dentry, inode); 185 error = ufs_add_nondir(dentry, inode);
@@ -208,7 +196,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
208 goto out; 196 goto out;
209 197
210 lock_kernel(); 198 lock_kernel();
211 ufs_inc_count(dir); 199 inode_inc_link_count(dir);
212 200
213 inode = ufs_new_inode(dir, S_IFDIR|mode); 201 inode = ufs_new_inode(dir, S_IFDIR|mode);
214 err = PTR_ERR(inode); 202 err = PTR_ERR(inode);
@@ -218,7 +206,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
218 inode->i_op = &ufs_dir_inode_operations; 206 inode->i_op = &ufs_dir_inode_operations;
219 inode->i_fop = &ufs_dir_operations; 207 inode->i_fop = &ufs_dir_operations;
220 208
221 ufs_inc_count(inode); 209 inode_inc_link_count(inode);
222 210
223 err = ufs_make_empty(inode, dir); 211 err = ufs_make_empty(inode, dir);
224 if (err) 212 if (err)
@@ -234,11 +222,11 @@ out:
234 return err; 222 return err;
235 223
236out_fail: 224out_fail:
237 ufs_dec_count(inode); 225 inode_dec_link_count(inode);
238 ufs_dec_count(inode); 226 inode_dec_link_count(inode);
239 iput (inode); 227 iput (inode);
240out_dir: 228out_dir:
241 ufs_dec_count(dir); 229 inode_dec_link_count(dir);
242 unlock_kernel(); 230 unlock_kernel();
243 goto out; 231 goto out;
244} 232}
@@ -260,7 +248,7 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry)
260 goto out; 248 goto out;
261 249
262 inode->i_ctime = dir->i_ctime; 250 inode->i_ctime = dir->i_ctime;
263 ufs_dec_count(inode); 251 inode_dec_link_count(inode);
264 err = 0; 252 err = 0;
265out: 253out:
266 unlock_kernel(); 254 unlock_kernel();
@@ -277,8 +265,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
277 err = ufs_unlink(dir, dentry); 265 err = ufs_unlink(dir, dentry);
278 if (!err) { 266 if (!err) {
279 inode->i_size = 0; 267 inode->i_size = 0;
280 ufs_dec_count(inode); 268 inode_dec_link_count(inode);
281 ufs_dec_count(dir); 269 inode_dec_link_count(dir);
282 } 270 }
283 } 271 }
284 unlock_kernel(); 272 unlock_kernel();
@@ -319,35 +307,35 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry,
319 new_de = ufs_find_entry (new_dentry, &new_bh); 307 new_de = ufs_find_entry (new_dentry, &new_bh);
320 if (!new_de) 308 if (!new_de)
321 goto out_dir; 309 goto out_dir;
322 ufs_inc_count(old_inode); 310 inode_inc_link_count(old_inode);
323 ufs_set_link(new_dir, new_de, new_bh, old_inode); 311 ufs_set_link(new_dir, new_de, new_bh, old_inode);
324 new_inode->i_ctime = CURRENT_TIME_SEC; 312 new_inode->i_ctime = CURRENT_TIME_SEC;
325 if (dir_de) 313 if (dir_de)
326 new_inode->i_nlink--; 314 new_inode->i_nlink--;
327 ufs_dec_count(new_inode); 315 inode_dec_link_count(new_inode);
328 } else { 316 } else {
329 if (dir_de) { 317 if (dir_de) {
330 err = -EMLINK; 318 err = -EMLINK;
331 if (new_dir->i_nlink >= UFS_LINK_MAX) 319 if (new_dir->i_nlink >= UFS_LINK_MAX)
332 goto out_dir; 320 goto out_dir;
333 } 321 }
334 ufs_inc_count(old_inode); 322 inode_inc_link_count(old_inode);
335 err = ufs_add_link(new_dentry, old_inode); 323 err = ufs_add_link(new_dentry, old_inode);
336 if (err) { 324 if (err) {
337 ufs_dec_count(old_inode); 325 inode_dec_link_count(old_inode);
338 goto out_dir; 326 goto out_dir;
339 } 327 }
340 if (dir_de) 328 if (dir_de)
341 ufs_inc_count(new_dir); 329 inode_inc_link_count(new_dir);
342 } 330 }
343 331
344 ufs_delete_entry (old_dir, old_de, old_bh); 332 ufs_delete_entry (old_dir, old_de, old_bh);
345 333
346 ufs_dec_count(old_inode); 334 inode_dec_link_count(old_inode);
347 335
348 if (dir_de) { 336 if (dir_de) {
349 ufs_set_link(old_inode, dir_de, dir_bh, new_dir); 337 ufs_set_link(old_inode, dir_de, dir_bh, new_dir);
350 ufs_dec_count(old_dir); 338 inode_dec_link_count(old_dir);
351 } 339 }
352 unlock_kernel(); 340 unlock_kernel();
353 return 0; 341 return 0;
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
index 8955720a2c6b..713e6a7505d0 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -62,18 +62,15 @@ xfs_read_xfsstats(
62 while (j < xstats[i].endpoint) { 62 while (j < xstats[i].endpoint) {
63 val = 0; 63 val = 0;
64 /* sum over all cpus */ 64 /* sum over all cpus */
65 for (c = 0; c < NR_CPUS; c++) { 65 for_each_cpu(c)
66 if (!cpu_possible(c)) continue;
67 val += *(((__u32*)&per_cpu(xfsstats, c) + j)); 66 val += *(((__u32*)&per_cpu(xfsstats, c) + j));
68 }
69 len += sprintf(buffer + len, " %u", val); 67 len += sprintf(buffer + len, " %u", val);
70 j++; 68 j++;
71 } 69 }
72 buffer[len++] = '\n'; 70 buffer[len++] = '\n';
73 } 71 }
74 /* extra precision counters */ 72 /* extra precision counters */
75 for (i = 0; i < NR_CPUS; i++) { 73 for_each_cpu(i) {
76 if (!cpu_possible(i)) continue;
77 xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; 74 xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
78 xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; 75 xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
79 xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; 76 xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index a02564972420..7079cc837210 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler(
38 38
39 if (!ret && write && *valp) { 39 if (!ret && write && *valp) {
40 printk("XFS Clearing xfsstats\n"); 40 printk("XFS Clearing xfsstats\n");
41 for (c = 0; c < NR_CPUS; c++) { 41 for_each_cpu(c) {
42 if (!cpu_possible(c)) continue;
43 preempt_disable(); 42 preempt_disable();
44 /* save vn_active, it's a universal truth! */ 43 /* save vn_active, it's a universal truth! */
45 vn_active = per_cpu(xfsstats, c).vn_active; 44 vn_active = per_cpu(xfsstats, c).vn_active;
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 6f92482cc96c..0c017fc181c1 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -231,9 +231,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
231{ 231{
232 int i; 232 int i;
233 233
234 for (i = 0; i < NR_CPUS; i++) 234 for_each_online_cpu(i)
235 if (cpu_online(i)) 235 mm->context[i] = 0;
236 mm->context[i] = 0;
237 if (tsk != current) 236 if (tsk != current)
238 task_thread_info(tsk)->pcb.ptbr 237 task_thread_info(tsk)->pcb.ptbr
239 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 238 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
index eb740e280d9c..420ccde6b916 100644
--- a/include/asm-alpha/topology.h
+++ b/include/asm-alpha/topology.h
@@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node)
27 cpumask_t node_cpu_mask = CPU_MASK_NONE; 27 cpumask_t node_cpu_mask = CPU_MASK_NONE;
28 int cpu; 28 int cpu;
29 29
30 for(cpu = 0; cpu < NR_CPUS; cpu++) { 30 for_each_online_cpu(cpu) {
31 if (cpu_online(cpu) && (cpu_to_node(cpu) == node)) 31 if (cpu_to_node(cpu) == node)
32 cpu_set(cpu, node_cpu_mask); 32 cpu_set(cpu, node_cpu_mask);
33 } 33 }
34 34
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 400c2b41896e..1a565a9d2fa7 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -7,7 +7,7 @@
7#ifdef CONFIG_BUG 7#ifdef CONFIG_BUG
8#ifndef HAVE_ARCH_BUG 8#ifndef HAVE_ARCH_BUG
9#define BUG() do { \ 9#define BUG() do { \
10 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ 10 printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \
11 panic("BUG!"); \ 11 panic("BUG!"); \
12} while (0) 12} while (0)
13#endif 13#endif
@@ -19,7 +19,7 @@
19#ifndef HAVE_ARCH_WARN_ON 19#ifndef HAVE_ARCH_WARN_ON
20#define WARN_ON(condition) do { \ 20#define WARN_ON(condition) do { \
21 if (unlikely((condition)!=0)) { \ 21 if (unlikely((condition)!=0)) { \
22 printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ 22 printk("BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __FUNCTION__); \
23 dump_stack(); \ 23 dump_stack(); \
24 } \ 24 } \
25} while (0) 25} while (0)
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 9044aeb37828..78cf45547e31 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -19,10 +19,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
19#define percpu_modcopy(pcpudst, src, size) \ 19#define percpu_modcopy(pcpudst, src, size) \
20do { \ 20do { \
21 unsigned int __i; \ 21 unsigned int __i; \
22 for (__i = 0; __i < NR_CPUS; __i++) \ 22 for_each_cpu(__i) \
23 if (cpu_possible(__i)) \ 23 memcpy((pcpudst)+__per_cpu_offset[__i], \
24 memcpy((pcpudst)+__per_cpu_offset[__i], \ 24 (src), (size)); \
25 (src), (size)); \
26} while (0) 25} while (0)
27#else /* ! SMP */ 26#else /* ! SMP */
28 27
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
new file mode 100644
index 000000000000..e201decea0c9
--- /dev/null
+++ b/include/asm-i386/alternative.h
@@ -0,0 +1,129 @@
1#ifndef _I386_ALTERNATIVE_H
2#define _I386_ALTERNATIVE_H
3
4#ifdef __KERNEL__
5
6struct alt_instr {
7 u8 *instr; /* original instruction */
8 u8 *replacement;
9 u8 cpuid; /* cpuid bit set for replacement */
10 u8 instrlen; /* length of original instruction */
11 u8 replacementlen; /* length of new instruction, <= instrlen */
12 u8 pad;
13};
14
15extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
16
17struct module;
18extern void alternatives_smp_module_add(struct module *mod, char *name,
19 void *locks, void *locks_end,
20 void *text, void *text_end);
21extern void alternatives_smp_module_del(struct module *mod);
22extern void alternatives_smp_switch(int smp);
23
24#endif
25
26/*
27 * Alternative instructions for different CPU types or capabilities.
28 *
29 * This allows to use optimized instructions even on generic binary
30 * kernels.
31 *
32 * length of oldinstr must be longer or equal the length of newinstr
33 * It can be padded with nops as needed.
34 *
35 * For non barrier like inlines please define new variants
36 * without volatile and memory clobber.
37 */
38#define alternative(oldinstr, newinstr, feature) \
39 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
40 ".section .altinstructions,\"a\"\n" \
41 " .align 4\n" \
42 " .long 661b\n" /* label */ \
43 " .long 663f\n" /* new instruction */ \
44 " .byte %c0\n" /* feature bit */ \
45 " .byte 662b-661b\n" /* sourcelen */ \
46 " .byte 664f-663f\n" /* replacementlen */ \
47 ".previous\n" \
48 ".section .altinstr_replacement,\"ax\"\n" \
49 "663:\n\t" newinstr "\n664:\n" /* replacement */\
50 ".previous" :: "i" (feature) : "memory")
51
52/*
53 * Alternative inline assembly with input.
54 *
55 * Pecularities:
56 * No memory clobber here.
57 * Argument numbers start with 1.
58 * Best is to use constraints that are fixed size (like (%1) ... "r")
59 * If you use variable sized constraints like "m" or "g" in the
60 * replacement maake sure to pad to the worst case length.
61 */
62#define alternative_input(oldinstr, newinstr, feature, input...) \
63 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
64 ".section .altinstructions,\"a\"\n" \
65 " .align 4\n" \
66 " .long 661b\n" /* label */ \
67 " .long 663f\n" /* new instruction */ \
68 " .byte %c0\n" /* feature bit */ \
69 " .byte 662b-661b\n" /* sourcelen */ \
70 " .byte 664f-663f\n" /* replacementlen */ \
71 ".previous\n" \
72 ".section .altinstr_replacement,\"ax\"\n" \
73 "663:\n\t" newinstr "\n664:\n" /* replacement */\
74 ".previous" :: "i" (feature), ##input)
75
76/*
77 * Alternative inline assembly for SMP.
78 *
79 * alternative_smp() takes two versions (SMP first, UP second) and is
80 * for more complex stuff such as spinlocks.
81 *
82 * The LOCK_PREFIX macro defined here replaces the LOCK and
83 * LOCK_PREFIX macros used everywhere in the source tree.
84 *
85 * SMP alternatives use the same data structures as the other
86 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
87 * UP system running a SMP kernel. The existing apply_alternatives()
88 * works fine for patching a SMP kernel for UP.
89 *
90 * The SMP alternative tables can be kept after boot and contain both
91 * UP and SMP versions of the instructions to allow switching back to
92 * SMP at runtime, when hotplugging in a new CPU, which is especially
93 * useful in virtualized environments.
94 *
95 * The very common lock prefix is handled as special case in a
96 * separate table which is a pure address list without replacement ptr
97 * and size information. That keeps the table sizes small.
98 */
99
100#ifdef CONFIG_SMP
101#define alternative_smp(smpinstr, upinstr, args...) \
102 asm volatile ("661:\n\t" smpinstr "\n662:\n" \
103 ".section .smp_altinstructions,\"a\"\n" \
104 " .align 4\n" \
105 " .long 661b\n" /* label */ \
106 " .long 663f\n" /* new instruction */ \
107 " .byte 0x68\n" /* X86_FEATURE_UP */ \
108 " .byte 662b-661b\n" /* sourcelen */ \
109 " .byte 664f-663f\n" /* replacementlen */ \
110 ".previous\n" \
111 ".section .smp_altinstr_replacement,\"awx\"\n" \
112 "663:\n\t" upinstr "\n" /* replacement */ \
113 "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
114 ".previous" : args)
115
116#define LOCK_PREFIX \
117 ".section .smp_locks,\"a\"\n" \
118 " .align 4\n" \
119 " .long 661f\n" /* address */ \
120 ".previous\n" \
121 "661:\n\tlock; "
122
123#else /* ! CONFIG_SMP */
124#define alternative_smp(smpinstr, upinstr, args...) \
125 asm volatile (upinstr : args)
126#define LOCK_PREFIX ""
127#endif
128
129#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h
index 28b96a6fb9fa..238cf4275b96 100644
--- a/include/asm-i386/arch_hooks.h
+++ b/include/asm-i386/arch_hooks.h
@@ -24,4 +24,7 @@ extern void trap_init_hook(void);
24extern void time_init_hook(void); 24extern void time_init_hook(void);
25extern void mca_nmi_hook(void); 25extern void mca_nmi_hook(void);
26 26
27extern int setup_early_printk(char *);
28extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
29
27#endif 30#endif
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index de649d3aa2d4..22d80ece95cb 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -10,12 +10,6 @@
10 * resource counting etc.. 10 * resource counting etc..
11 */ 11 */
12 12
13#ifdef CONFIG_SMP
14#define LOCK "lock ; "
15#else
16#define LOCK ""
17#endif
18
19/* 13/*
20 * Make sure gcc doesn't try to be clever and move things around 14 * Make sure gcc doesn't try to be clever and move things around
21 * on us. We need to use _exactly_ the address the user gave us, 15 * on us. We need to use _exactly_ the address the user gave us,
@@ -52,7 +46,7 @@ typedef struct { volatile int counter; } atomic_t;
52static __inline__ void atomic_add(int i, atomic_t *v) 46static __inline__ void atomic_add(int i, atomic_t *v)
53{ 47{
54 __asm__ __volatile__( 48 __asm__ __volatile__(
55 LOCK "addl %1,%0" 49 LOCK_PREFIX "addl %1,%0"
56 :"=m" (v->counter) 50 :"=m" (v->counter)
57 :"ir" (i), "m" (v->counter)); 51 :"ir" (i), "m" (v->counter));
58} 52}
@@ -67,7 +61,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
67static __inline__ void atomic_sub(int i, atomic_t *v) 61static __inline__ void atomic_sub(int i, atomic_t *v)
68{ 62{
69 __asm__ __volatile__( 63 __asm__ __volatile__(
70 LOCK "subl %1,%0" 64 LOCK_PREFIX "subl %1,%0"
71 :"=m" (v->counter) 65 :"=m" (v->counter)
72 :"ir" (i), "m" (v->counter)); 66 :"ir" (i), "m" (v->counter));
73} 67}
@@ -86,7 +80,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
86 unsigned char c; 80 unsigned char c;
87 81
88 __asm__ __volatile__( 82 __asm__ __volatile__(
89 LOCK "subl %2,%0; sete %1" 83 LOCK_PREFIX "subl %2,%0; sete %1"
90 :"=m" (v->counter), "=qm" (c) 84 :"=m" (v->counter), "=qm" (c)
91 :"ir" (i), "m" (v->counter) : "memory"); 85 :"ir" (i), "m" (v->counter) : "memory");
92 return c; 86 return c;
@@ -101,7 +95,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
101static __inline__ void atomic_inc(atomic_t *v) 95static __inline__ void atomic_inc(atomic_t *v)
102{ 96{
103 __asm__ __volatile__( 97 __asm__ __volatile__(
104 LOCK "incl %0" 98 LOCK_PREFIX "incl %0"
105 :"=m" (v->counter) 99 :"=m" (v->counter)
106 :"m" (v->counter)); 100 :"m" (v->counter));
107} 101}
@@ -115,7 +109,7 @@ static __inline__ void atomic_inc(atomic_t *v)
115static __inline__ void atomic_dec(atomic_t *v) 109static __inline__ void atomic_dec(atomic_t *v)
116{ 110{
117 __asm__ __volatile__( 111 __asm__ __volatile__(
118 LOCK "decl %0" 112 LOCK_PREFIX "decl %0"
119 :"=m" (v->counter) 113 :"=m" (v->counter)
120 :"m" (v->counter)); 114 :"m" (v->counter));
121} 115}
@@ -133,7 +127,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
133 unsigned char c; 127 unsigned char c;
134 128
135 __asm__ __volatile__( 129 __asm__ __volatile__(
136 LOCK "decl %0; sete %1" 130 LOCK_PREFIX "decl %0; sete %1"
137 :"=m" (v->counter), "=qm" (c) 131 :"=m" (v->counter), "=qm" (c)
138 :"m" (v->counter) : "memory"); 132 :"m" (v->counter) : "memory");
139 return c != 0; 133 return c != 0;
@@ -152,7 +146,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
152 unsigned char c; 146 unsigned char c;
153 147
154 __asm__ __volatile__( 148 __asm__ __volatile__(
155 LOCK "incl %0; sete %1" 149 LOCK_PREFIX "incl %0; sete %1"
156 :"=m" (v->counter), "=qm" (c) 150 :"=m" (v->counter), "=qm" (c)
157 :"m" (v->counter) : "memory"); 151 :"m" (v->counter) : "memory");
158 return c != 0; 152 return c != 0;
@@ -172,7 +166,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
172 unsigned char c; 166 unsigned char c;
173 167
174 __asm__ __volatile__( 168 __asm__ __volatile__(
175 LOCK "addl %2,%0; sets %1" 169 LOCK_PREFIX "addl %2,%0; sets %1"
176 :"=m" (v->counter), "=qm" (c) 170 :"=m" (v->counter), "=qm" (c)
177 :"ir" (i), "m" (v->counter) : "memory"); 171 :"ir" (i), "m" (v->counter) : "memory");
178 return c; 172 return c;
@@ -195,7 +189,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
195 /* Modern 486+ processor */ 189 /* Modern 486+ processor */
196 __i = i; 190 __i = i;
197 __asm__ __volatile__( 191 __asm__ __volatile__(
198 LOCK "xaddl %0, %1;" 192 LOCK_PREFIX "xaddl %0, %1;"
199 :"=r"(i) 193 :"=r"(i)
200 :"m"(v->counter), "0"(i)); 194 :"m"(v->counter), "0"(i));
201 return i + __i; 195 return i + __i;
@@ -231,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
231({ \ 225({ \
232 int c, old; \ 226 int c, old; \
233 c = atomic_read(v); \ 227 c = atomic_read(v); \
234 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 228 for (;;) { \
229 if (unlikely(c == (u))) \
230 break; \
231 old = atomic_cmpxchg((v), c, c + (a)); \
232 if (likely(old == c)) \
233 break; \
235 c = old; \ 234 c = old; \
235 } \
236 c != (u); \ 236 c != (u); \
237}) 237})
238#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 238#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
@@ -242,11 +242,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
242 242
243/* These are x86-specific, used by some header files */ 243/* These are x86-specific, used by some header files */
244#define atomic_clear_mask(mask, addr) \ 244#define atomic_clear_mask(mask, addr) \
245__asm__ __volatile__(LOCK "andl %0,%1" \ 245__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
246: : "r" (~(mask)),"m" (*addr) : "memory") 246: : "r" (~(mask)),"m" (*addr) : "memory")
247 247
248#define atomic_set_mask(mask, addr) \ 248#define atomic_set_mask(mask, addr) \
249__asm__ __volatile__(LOCK "orl %0,%1" \ 249__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
250: : "r" (mask),"m" (*(addr)) : "memory") 250: : "r" (mask),"m" (*(addr)) : "memory")
251 251
252/* Atomic operations are already serializing on x86 */ 252/* Atomic operations are already serializing on x86 */
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 88e6ca248cd7..7d20b95edb3b 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -7,6 +7,7 @@
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <asm/alternative.h>
10 11
11/* 12/*
12 * These have to be done with inline assembly: that way the bit-setting 13 * These have to be done with inline assembly: that way the bit-setting
@@ -16,12 +17,6 @@
16 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). 17 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
17 */ 18 */
18 19
19#ifdef CONFIG_SMP
20#define LOCK_PREFIX "lock ; "
21#else
22#define LOCK_PREFIX ""
23#endif
24
25#define ADDR (*(volatile long *) addr) 20#define ADDR (*(volatile long *) addr)
26 21
27/** 22/**
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index 615911e5bd24..ca15c9c665cf 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -10,4 +10,6 @@
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12 12
13#define __read_mostly __attribute__((__section__(".data.read_mostly")))
14
13#endif 15#endif
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index c4ec2a4d8fdf..5c0b5876b931 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -70,6 +70,7 @@
70#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ 70#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
71#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ 71#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ 72#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
73#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
73 74
74/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 75/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
75#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 76#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 56211414fc95..6312c3e79814 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -18,7 +18,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
18{ 18{
19 do_timer(regs); 19 do_timer(regs);
20#ifndef CONFIG_SMP 20#ifndef CONFIG_SMP
21 update_process_times(user_mode(regs)); 21 update_process_times(user_mode_vm(regs));
22#endif 22#endif
23/* 23/*
24 * In the SMP case we use the local APIC timer interrupt to do the 24 * In the SMP case we use the local APIC timer interrupt to do the
diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h
index 4a0637a3e208..99f66be240be 100644
--- a/include/asm-i386/mach-es7000/mach_mpparse.h
+++ b/include/asm-i386/mach-es7000/mach_mpparse.h
@@ -30,7 +30,8 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
30 return 0; 30 return 0;
31} 31}
32 32
33static inline int es7000_check_dsdt() 33#ifdef CONFIG_ACPI
34static inline int es7000_check_dsdt(void)
34{ 35{
35 struct acpi_table_header *header = NULL; 36 struct acpi_table_header *header = NULL;
36 if(!acpi_get_table_header_early(ACPI_DSDT, &header)) 37 if(!acpi_get_table_header_early(ACPI_DSDT, &header))
@@ -54,6 +55,11 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
54 } 55 }
55 return 0; 56 return 0;
56} 57}
57 58#else
59static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
60{
61 return 0;
62}
63#endif
58 64
59#endif /* __ASM_MACH_MPPARSE_H */ 65#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h
index 92d638fc8b11..95568e6ca91c 100644
--- a/include/asm-i386/mach-visws/do_timer.h
+++ b/include/asm-i386/mach-visws/do_timer.h
@@ -11,7 +11,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
11 11
12 do_timer(regs); 12 do_timer(regs);
13#ifndef CONFIG_SMP 13#ifndef CONFIG_SMP
14 update_process_times(user_mode(regs)); 14 update_process_times(user_mode_vm(regs));
15#endif 15#endif
16/* 16/*
17 * In the SMP case we use the local APIC timer interrupt to do the 17 * In the SMP case we use the local APIC timer interrupt to do the
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index ae510e5d0d78..eaf518098981 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -5,7 +5,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
5{ 5{
6 do_timer(regs); 6 do_timer(regs);
7#ifndef CONFIG_SMP 7#ifndef CONFIG_SMP
8 update_process_times(user_mode(regs)); 8 update_process_times(user_mode_vm(regs));
9#endif 9#endif
10 10
11 voyager_timer_interrupt(regs); 11 voyager_timer_interrupt(regs);
diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h
index 64a0b8e6afeb..62113d3bfdc2 100644
--- a/include/asm-i386/mpspec.h
+++ b/include/asm-i386/mpspec.h
@@ -22,7 +22,6 @@ extern int mp_bus_id_to_type [MAX_MP_BUSSES];
22extern int mp_irq_entries; 22extern int mp_irq_entries;
23extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; 23extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
24extern int mpc_default_type; 24extern int mpc_default_type;
25extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
26extern unsigned long mp_lapic_addr; 25extern unsigned long mp_lapic_addr;
27extern int pic_mode; 26extern int pic_mode;
28extern int using_apic_timer; 27extern int using_apic_timer;
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
index 5b6ceda68c5f..64cf937c7e33 100644
--- a/include/asm-i386/mtrr.h
+++ b/include/asm-i386/mtrr.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/config.h> 26#include <linux/config.h>
27#include <linux/ioctl.h> 27#include <linux/ioctl.h>
28#include <linux/errno.h>
28 29
29#define MTRR_IOCTL_BASE 'M' 30#define MTRR_IOCTL_BASE 'M'
30 31
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 9b2199e829f3..05a538531229 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -9,6 +9,8 @@
9#ifndef _ASM_MUTEX_H 9#ifndef _ASM_MUTEX_H
10#define _ASM_MUTEX_H 10#define _ASM_MUTEX_H
11 11
12#include "asm/alternative.h"
13
12/** 14/**
13 * __mutex_fastpath_lock - try to take the lock by moving the count 15 * __mutex_fastpath_lock - try to take the lock by moving the count
14 * from 1 to a 0 value 16 * from 1 to a 0 value
@@ -27,7 +29,7 @@ do { \
27 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 29 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
28 \ 30 \
29 __asm__ __volatile__( \ 31 __asm__ __volatile__( \
30 LOCK " decl (%%eax) \n" \ 32 LOCK_PREFIX " decl (%%eax) \n" \
31 " js 2f \n" \ 33 " js 2f \n" \
32 "1: \n" \ 34 "1: \n" \
33 \ 35 \
@@ -83,7 +85,7 @@ do { \
83 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ 85 typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
84 \ 86 \
85 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
86 LOCK " incl (%%eax) \n" \ 88 LOCK_PREFIX " incl (%%eax) \n" \
87 " jle 2f \n" \ 89 " jle 2f \n" \
88 "1: \n" \ 90 "1: \n" \
89 \ 91 \
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 74ef721b534d..27bde973abc7 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -61,4 +61,6 @@ static inline int pte_exec_kernel(pte_t pte)
61#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) 61#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
62#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 62#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
63 63
64void vmalloc_sync_all(void);
65
64#endif /* _I386_PGTABLE_2LEVEL_H */ 66#endif /* _I386_PGTABLE_2LEVEL_H */
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index f1a8b454920a..36a5aa63cbbf 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -152,4 +152,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
152 152
153#define __pmd_free_tlb(tlb, x) do { } while (0) 153#define __pmd_free_tlb(tlb, x) do { } while (0)
154 154
155#define vmalloc_sync_all() ((void)0)
156
155#endif /* _I386_PGTABLE_3LEVEL_H */ 157#endif /* _I386_PGTABLE_3LEVEL_H */
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index b57cc7afdf7e..94f00195d543 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -21,21 +21,23 @@
21#define RW_LOCK_BIAS_STR "0x01000000" 21#define RW_LOCK_BIAS_STR "0x01000000"
22 22
23#define __build_read_lock_ptr(rw, helper) \ 23#define __build_read_lock_ptr(rw, helper) \
24 asm volatile(LOCK "subl $1,(%0)\n\t" \ 24 alternative_smp("lock; subl $1,(%0)\n\t" \
25 "jns 1f\n" \ 25 "jns 1f\n" \
26 "call " helper "\n\t" \ 26 "call " helper "\n\t" \
27 "1:\n" \ 27 "1:\n", \
28 ::"a" (rw) : "memory") 28 "subl $1,(%0)\n\t", \
29 :"a" (rw) : "memory")
29 30
30#define __build_read_lock_const(rw, helper) \ 31#define __build_read_lock_const(rw, helper) \
31 asm volatile(LOCK "subl $1,%0\n\t" \ 32 alternative_smp("lock; subl $1,%0\n\t" \
32 "jns 1f\n" \ 33 "jns 1f\n" \
33 "pushl %%eax\n\t" \ 34 "pushl %%eax\n\t" \
34 "leal %0,%%eax\n\t" \ 35 "leal %0,%%eax\n\t" \
35 "call " helper "\n\t" \ 36 "call " helper "\n\t" \
36 "popl %%eax\n\t" \ 37 "popl %%eax\n\t" \
37 "1:\n" \ 38 "1:\n", \
38 :"=m" (*(volatile int *)rw) : : "memory") 39 "subl $1,%0\n\t", \
40 "=m" (*(volatile int *)rw) : : "memory")
39 41
40#define __build_read_lock(rw, helper) do { \ 42#define __build_read_lock(rw, helper) do { \
41 if (__builtin_constant_p(rw)) \ 43 if (__builtin_constant_p(rw)) \
@@ -45,21 +47,23 @@
45 } while (0) 47 } while (0)
46 48
47#define __build_write_lock_ptr(rw, helper) \ 49#define __build_write_lock_ptr(rw, helper) \
48 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 50 alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
49 "jz 1f\n" \ 51 "jz 1f\n" \
50 "call " helper "\n\t" \ 52 "call " helper "\n\t" \
51 "1:\n" \ 53 "1:\n", \
52 ::"a" (rw) : "memory") 54 "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t", \
55 :"a" (rw) : "memory")
53 56
54#define __build_write_lock_const(rw, helper) \ 57#define __build_write_lock_const(rw, helper) \
55 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ 58 alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
56 "jz 1f\n" \ 59 "jz 1f\n" \
57 "pushl %%eax\n\t" \ 60 "pushl %%eax\n\t" \
58 "leal %0,%%eax\n\t" \ 61 "leal %0,%%eax\n\t" \
59 "call " helper "\n\t" \ 62 "call " helper "\n\t" \
60 "popl %%eax\n\t" \ 63 "popl %%eax\n\t" \
61 "1:\n" \ 64 "1:\n", \
62 :"=m" (*(volatile int *)rw) : : "memory") 65 "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
66 "=m" (*(volatile int *)rw) : : "memory")
63 67
64#define __build_write_lock(rw, helper) do { \ 68#define __build_write_lock(rw, helper) do { \
65 if (__builtin_constant_p(rw)) \ 69 if (__builtin_constant_p(rw)) \
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index 6a42b2142fd6..f7a0f310c524 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -99,7 +99,7 @@ static inline void down(struct semaphore * sem)
99 might_sleep(); 99 might_sleep();
100 __asm__ __volatile__( 100 __asm__ __volatile__(
101 "# atomic down operation\n\t" 101 "# atomic down operation\n\t"
102 LOCK "decl %0\n\t" /* --sem->count */ 102 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
103 "js 2f\n" 103 "js 2f\n"
104 "1:\n" 104 "1:\n"
105 LOCK_SECTION_START("") 105 LOCK_SECTION_START("")
@@ -123,7 +123,7 @@ static inline int down_interruptible(struct semaphore * sem)
123 might_sleep(); 123 might_sleep();
124 __asm__ __volatile__( 124 __asm__ __volatile__(
125 "# atomic interruptible down operation\n\t" 125 "# atomic interruptible down operation\n\t"
126 LOCK "decl %1\n\t" /* --sem->count */ 126 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
127 "js 2f\n\t" 127 "js 2f\n\t"
128 "xorl %0,%0\n" 128 "xorl %0,%0\n"
129 "1:\n" 129 "1:\n"
@@ -148,7 +148,7 @@ static inline int down_trylock(struct semaphore * sem)
148 148
149 __asm__ __volatile__( 149 __asm__ __volatile__(
150 "# atomic interruptible down operation\n\t" 150 "# atomic interruptible down operation\n\t"
151 LOCK "decl %1\n\t" /* --sem->count */ 151 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
152 "js 2f\n\t" 152 "js 2f\n\t"
153 "xorl %0,%0\n" 153 "xorl %0,%0\n"
154 "1:\n" 154 "1:\n"
@@ -173,7 +173,7 @@ static inline void up(struct semaphore * sem)
173{ 173{
174 __asm__ __volatile__( 174 __asm__ __volatile__(
175 "# atomic up operation\n\t" 175 "# atomic up operation\n\t"
176 LOCK "incl %0\n\t" /* ++sem->count */ 176 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
177 "jle 2f\n" 177 "jle 2f\n"
178 "1:\n" 178 "1:\n"
179 LOCK_SECTION_START("") 179 LOCK_SECTION_START("")
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 23604350cdf4..d76b7693cf1d 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -35,31 +35,41 @@
35#define __raw_spin_lock_string_flags \ 35#define __raw_spin_lock_string_flags \
36 "\n1:\t" \ 36 "\n1:\t" \
37 "lock ; decb %0\n\t" \ 37 "lock ; decb %0\n\t" \
38 "jns 4f\n\t" \ 38 "jns 5f\n" \
39 "2:\t" \ 39 "2:\t" \
40 "testl $0x200, %1\n\t" \ 40 "testl $0x200, %1\n\t" \
41 "jz 3f\n\t" \ 41 "jz 4f\n\t" \
42 "sti\n\t" \ 42 "sti\n" \
43 "3:\t" \ 43 "3:\t" \
44 "rep;nop\n\t" \ 44 "rep;nop\n\t" \
45 "cmpb $0, %0\n\t" \ 45 "cmpb $0, %0\n\t" \
46 "jle 3b\n\t" \ 46 "jle 3b\n\t" \
47 "cli\n\t" \ 47 "cli\n\t" \
48 "jmp 1b\n" \ 48 "jmp 1b\n" \
49 "4:\n\t" 49 "4:\t" \
50 "rep;nop\n\t" \
51 "cmpb $0, %0\n\t" \
52 "jg 1b\n\t" \
53 "jmp 4b\n" \
54 "5:\n\t"
55
56#define __raw_spin_lock_string_up \
57 "\n\tdecb %0"
50 58
51static inline void __raw_spin_lock(raw_spinlock_t *lock) 59static inline void __raw_spin_lock(raw_spinlock_t *lock)
52{ 60{
53 __asm__ __volatile__( 61 alternative_smp(
54 __raw_spin_lock_string 62 __raw_spin_lock_string,
55 :"=m" (lock->slock) : : "memory"); 63 __raw_spin_lock_string_up,
64 "=m" (lock->slock) : : "memory");
56} 65}
57 66
58static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 67static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
59{ 68{
60 __asm__ __volatile__( 69 alternative_smp(
61 __raw_spin_lock_string_flags 70 __raw_spin_lock_string_flags,
62 :"=m" (lock->slock) : "r" (flags) : "memory"); 71 __raw_spin_lock_string_up,
72 "=m" (lock->slock) : "r" (flags) : "memory");
63} 73}
64 74
65static inline int __raw_spin_trylock(raw_spinlock_t *lock) 75static inline int __raw_spin_trylock(raw_spinlock_t *lock)
@@ -178,12 +188,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
178 188
179static inline void __raw_read_unlock(raw_rwlock_t *rw) 189static inline void __raw_read_unlock(raw_rwlock_t *rw)
180{ 190{
181 asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); 191 asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
182} 192}
183 193
184static inline void __raw_write_unlock(raw_rwlock_t *rw) 194static inline void __raw_write_unlock(raw_rwlock_t *rw)
185{ 195{
186 asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" 196 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
187 : "=m" (rw->lock) : : "memory"); 197 : "=m" (rw->lock) : : "memory");
188} 198}
189 199
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 399145a247f2..d0d8d7448d88 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -352,67 +352,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
352 352
353#endif 353#endif
354 354
355#ifdef __KERNEL__
356struct alt_instr {
357 __u8 *instr; /* original instruction */
358 __u8 *replacement;
359 __u8 cpuid; /* cpuid bit set for replacement */
360 __u8 instrlen; /* length of original instruction */
361 __u8 replacementlen; /* length of new instruction, <= instrlen */
362 __u8 pad;
363};
364#endif
365
366/*
367 * Alternative instructions for different CPU types or capabilities.
368 *
369 * This allows to use optimized instructions even on generic binary
370 * kernels.
371 *
372 * length of oldinstr must be longer or equal the length of newinstr
373 * It can be padded with nops as needed.
374 *
375 * For non barrier like inlines please define new variants
376 * without volatile and memory clobber.
377 */
378#define alternative(oldinstr, newinstr, feature) \
379 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
380 ".section .altinstructions,\"a\"\n" \
381 " .align 4\n" \
382 " .long 661b\n" /* label */ \
383 " .long 663f\n" /* new instruction */ \
384 " .byte %c0\n" /* feature bit */ \
385 " .byte 662b-661b\n" /* sourcelen */ \
386 " .byte 664f-663f\n" /* replacementlen */ \
387 ".previous\n" \
388 ".section .altinstr_replacement,\"ax\"\n" \
389 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
390 ".previous" :: "i" (feature) : "memory")
391
392/*
393 * Alternative inline assembly with input.
394 *
395 * Pecularities:
396 * No memory clobber here.
397 * Argument numbers start with 1.
398 * Best is to use constraints that are fixed size (like (%1) ... "r")
399 * If you use variable sized constraints like "m" or "g" in the
400 * replacement maake sure to pad to the worst case length.
401 */
402#define alternative_input(oldinstr, newinstr, feature, input...) \
403 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
404 ".section .altinstructions,\"a\"\n" \
405 " .align 4\n" \
406 " .long 661b\n" /* label */ \
407 " .long 663f\n" /* new instruction */ \
408 " .byte %c0\n" /* feature bit */ \
409 " .byte 662b-661b\n" /* sourcelen */ \
410 " .byte 664f-663f\n" /* replacementlen */ \
411 ".previous\n" \
412 ".section .altinstr_replacement,\"ax\"\n" \
413 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
414 ".previous" :: "i" (feature), ##input)
415
416/* 355/*
417 * Force strict CPU ordering. 356 * Force strict CPU ordering.
418 * And yes, this is required on UP too when we're talking 357 * And yes, this is required on UP too when we're talking
@@ -558,5 +497,6 @@ static inline void sched_cacheflush(void)
558} 497}
559 498
560extern unsigned long arch_align_stack(unsigned long sp); 499extern unsigned long arch_align_stack(unsigned long sp);
500extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
561 501
562#endif 502#endif
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 3f1337c34208..371457b1ceb6 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -197,13 +197,15 @@ extern void __put_user_8(void);
197 197
198#define put_user(x,ptr) \ 198#define put_user(x,ptr) \
199({ int __ret_pu; \ 199({ int __ret_pu; \
200 __typeof__(*(ptr)) __pu_val; \
200 __chk_user_ptr(ptr); \ 201 __chk_user_ptr(ptr); \
202 __pu_val = x; \
201 switch(sizeof(*(ptr))) { \ 203 switch(sizeof(*(ptr))) { \
202 case 1: __put_user_1(x, ptr); break; \ 204 case 1: __put_user_1(__pu_val, ptr); break; \
203 case 2: __put_user_2(x, ptr); break; \ 205 case 2: __put_user_2(__pu_val, ptr); break; \
204 case 4: __put_user_4(x, ptr); break; \ 206 case 4: __put_user_4(__pu_val, ptr); break; \
205 case 8: __put_user_8(x, ptr); break; \ 207 case 8: __put_user_8(__pu_val, ptr); break; \
206 default:__put_user_X(x, ptr); break; \ 208 default:__put_user_X(__pu_val, ptr); break; \
207 } \ 209 } \
208 __ret_pu; \ 210 __ret_pu; \
209}) 211})
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index dc81a55dd94d..d8afd0e3b81a 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -347,9 +347,9 @@ __syscall_return(type,__res); \
347type name(type1 arg1) \ 347type name(type1 arg1) \
348{ \ 348{ \
349long __res; \ 349long __res; \
350__asm__ volatile ("int $0x80" \ 350__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
351 : "=a" (__res) \ 351 : "=a" (__res) \
352 : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \ 352 : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
353__syscall_return(type,__res); \ 353__syscall_return(type,__res); \
354} 354}
355 355
@@ -357,9 +357,10 @@ __syscall_return(type,__res); \
357type name(type1 arg1,type2 arg2) \ 357type name(type1 arg1,type2 arg2) \
358{ \ 358{ \
359long __res; \ 359long __res; \
360__asm__ volatile ("int $0x80" \ 360__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
361 : "=a" (__res) \ 361 : "=a" (__res) \
362 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \ 362 : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
363 : "memory"); \
363__syscall_return(type,__res); \ 364__syscall_return(type,__res); \
364} 365}
365 366
@@ -367,9 +368,9 @@ __syscall_return(type,__res); \
367type name(type1 arg1,type2 arg2,type3 arg3) \ 368type name(type1 arg1,type2 arg2,type3 arg3) \
368{ \ 369{ \
369long __res; \ 370long __res; \
370__asm__ volatile ("int $0x80" \ 371__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
371 : "=a" (__res) \ 372 : "=a" (__res) \
372 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 373 : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
373 "d" ((long)(arg3)) : "memory"); \ 374 "d" ((long)(arg3)) : "memory"); \
374__syscall_return(type,__res); \ 375__syscall_return(type,__res); \
375} 376}
@@ -378,9 +379,9 @@ __syscall_return(type,__res); \
378type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ 379type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
379{ \ 380{ \
380long __res; \ 381long __res; \
381__asm__ volatile ("int $0x80" \ 382__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
382 : "=a" (__res) \ 383 : "=a" (__res) \
383 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 384 : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
384 "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ 385 "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
385__syscall_return(type,__res); \ 386__syscall_return(type,__res); \
386} 387}
@@ -390,10 +391,12 @@ __syscall_return(type,__res); \
390type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ 391type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
391{ \ 392{ \
392long __res; \ 393long __res; \
393__asm__ volatile ("int $0x80" \ 394__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
395 "int $0x80 ; pop %%ebx" \
394 : "=a" (__res) \ 396 : "=a" (__res) \
395 : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 397 : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
396 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \ 398 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
399 : "memory"); \
397__syscall_return(type,__res); \ 400__syscall_return(type,__res); \
398} 401}
399 402
@@ -402,11 +405,14 @@ __syscall_return(type,__res); \
402type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ 405type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
403{ \ 406{ \
404long __res; \ 407long __res; \
405__asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \ 408 struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
409__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
410 "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
411 "pop %%ebx ; pop %%ebp" \
406 : "=a" (__res) \ 412 : "=a" (__res) \
407 : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ 413 : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
408 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ 414 "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
409 "0" ((long)(arg6)) : "memory"); \ 415 : "memory"); \
410__syscall_return(type,__res); \ 416__syscall_return(type,__res); \
411} 417}
412 418
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index d3e0dfa99e1f..569ec7574baf 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
95({ \ 95({ \
96 int c, old; \ 96 int c, old; \
97 c = atomic_read(v); \ 97 c = atomic_read(v); \
98 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 98 for (;;) { \
99 if (unlikely(c == (u))) \
100 break; \
101 old = atomic_cmpxchg((v), c, c + (a)); \
102 if (likely(old == c)) \
103 break; \
99 c = old; \ 104 c = old; \
105 } \
100 c != (u); \ 106 c != (u); \
101}) 107})
102#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 108#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h
index 40dd25195d65..f0a104db8f20 100644
--- a/include/asm-ia64/cache.h
+++ b/include/asm-ia64/cache.h
@@ -25,4 +25,6 @@
25# define SMP_CACHE_BYTES (1 << 3) 25# define SMP_CACHE_BYTES (1 << 3)
26#endif 26#endif
27 27
28#define __read_mostly __attribute__((__section__(".data.read_mostly")))
29
28#endif /* _ASM_IA64_CACHE_H */ 30#endif /* _ASM_IA64_CACHE_H */
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 862e497c2645..732d696d31a6 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -175,8 +175,14 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
175({ \ 175({ \
176 int c, old; \ 176 int c, old; \
177 c = atomic_read(v); \ 177 c = atomic_read(v); \
178 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 178 for (;;) { \
179 if (unlikely(c == (u))) \
180 break; \
181 old = atomic_cmpxchg((v), c, c + (a)); \
182 if (likely(old == c)) \
183 break; \
179 c = old; \ 184 c = old; \
185 } \
180 c != (u); \ 186 c != (u); \
181}) 187})
182#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 188#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-parisc/cache.h b/include/asm-parisc/cache.h
index 93f179f13ce8..ae50f8e12eed 100644
--- a/include/asm-parisc/cache.h
+++ b/include/asm-parisc/cache.h
@@ -29,6 +29,8 @@
29 29
30#define SMP_CACHE_BYTES L1_CACHE_BYTES 30#define SMP_CACHE_BYTES L1_CACHE_BYTES
31 31
32#define __read_mostly __attribute__((__section__(".data.read_mostly")))
33
32extern void flush_data_cache_local(void *); /* flushes local data-cache only */ 34extern void flush_data_cache_local(void *); /* flushes local data-cache only */
33extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ 35extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */
34#ifdef CONFIG_SMP 36#ifdef CONFIG_SMP
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index e31922c50e53..464301cd0d03 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -27,10 +27,9 @@
27#define percpu_modcopy(pcpudst, src, size) \ 27#define percpu_modcopy(pcpudst, src, size) \
28do { \ 28do { \
29 unsigned int __i; \ 29 unsigned int __i; \
30 for (__i = 0; __i < NR_CPUS; __i++) \ 30 for_each_cpu(__i) \
31 if (cpu_possible(__i)) \ 31 memcpy((pcpudst)+__per_cpu_offset(__i), \
32 memcpy((pcpudst)+__per_cpu_offset(__i), \ 32 (src), (size)); \
33 (src), (size)); \
34} while (0) 33} while (0)
35 34
36extern void setup_per_cpu_areas(void); 35extern void setup_per_cpu_areas(void);
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index be6fefe223d6..de1d9926aa60 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -89,10 +89,15 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
89static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 89static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
90{ 90{
91 int c, old; 91 int c, old;
92
93 c = atomic_read(v); 92 c = atomic_read(v);
94 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 93 for (;;) {
94 if (unlikely(c == u))
95 break;
96 old = atomic_cmpxchg(v, c, c + a);
97 if (likely(old == c))
98 break;
95 c = old; 99 c = old;
100 }
96 return c != u; 101 return c != u;
97} 102}
98 103
@@ -167,10 +172,15 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
167 long long a, long long u) 172 long long a, long long u)
168{ 173{
169 long long c, old; 174 long long c, old;
170
171 c = atomic64_read(v); 175 c = atomic64_read(v);
172 while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) 176 for (;;) {
177 if (unlikely(c == u))
178 break;
179 old = atomic64_cmpxchg(v, c, c + a);
180 if (likely(old == c))
181 break;
173 c = old; 182 c = old;
183 }
174 return c != u; 184 return c != u;
175} 185}
176 186
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 123fcaca295e..e10ed87094f0 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -46,10 +46,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
46#define percpu_modcopy(pcpudst, src, size) \ 46#define percpu_modcopy(pcpudst, src, size) \
47do { \ 47do { \
48 unsigned int __i; \ 48 unsigned int __i; \
49 for (__i = 0; __i < NR_CPUS; __i++) \ 49 for_each_cpu(__i) \
50 if (cpu_possible(__i)) \ 50 memcpy((pcpudst)+__per_cpu_offset[__i], \
51 memcpy((pcpudst)+__per_cpu_offset[__i], \ 51 (src), (size)); \
52 (src), (size)); \
53} while (0) 52} while (0)
54 53
55#else /* ! SMP */ 54#else /* ! SMP */
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 25256bdc8aae..468eb48d8142 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -78,9 +78,15 @@ extern int atomic64_sub_ret(int, atomic64_t *);
78({ \ 78({ \
79 int c, old; \ 79 int c, old; \
80 c = atomic_read(v); \ 80 c = atomic_read(v); \
81 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 81 for (;;) { \
82 if (unlikely(c == (u))) \
83 break; \
84 old = atomic_cmpxchg((v), c, c + (a)); \
85 if (likely(old == c)) \
86 break; \
82 c = old; \ 87 c = old; \
83 c != (u); \ 88 } \
89 likely(c != (u)); \
84}) 90})
85#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 91#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
86 92
diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h
index f7d35a2ae9b8..e9df17acedde 100644
--- a/include/asm-sparc64/cache.h
+++ b/include/asm-sparc64/cache.h
@@ -13,4 +13,6 @@
13#define SMP_CACHE_BYTES_SHIFT 6 13#define SMP_CACHE_BYTES_SHIFT 6
14#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ 14#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
15 15
16#define __read_mostly __attribute__((__section__(".data.read_mostly")))
17
16#endif 18#endif
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index aea4e51e7cd1..82032e159a76 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -26,10 +26,9 @@ register unsigned long __local_per_cpu_offset asm("g5");
26#define percpu_modcopy(pcpudst, src, size) \ 26#define percpu_modcopy(pcpudst, src, size) \
27do { \ 27do { \
28 unsigned int __i; \ 28 unsigned int __i; \
29 for (__i = 0; __i < NR_CPUS; __i++) \ 29 for_each_cpu(__i) \
30 if (cpu_possible(__i)) \ 30 memcpy((pcpudst)+__per_cpu_offset(__i), \
31 memcpy((pcpudst)+__per_cpu_offset(__i), \ 31 (src), (size)); \
32 (src), (size)); \
33} while (0) 32} while (0)
34#else /* ! SMP */ 33#else /* ! SMP */
35 34
diff --git a/include/asm-um/alternative.h b/include/asm-um/alternative.h
new file mode 100644
index 000000000000..b6434396bd42
--- /dev/null
+++ b/include/asm-um/alternative.h
@@ -0,0 +1,6 @@
1#ifndef __UM_ALTERNATIVE_H
2#define __UM_ALTERNATIVE_H
3
4#include "asm/arch/alternative.h"
5
6#endif
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd553e772..cecbf7baa6aa 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
405({ \ 405({ \
406 int c, old; \ 406 int c, old; \
407 c = atomic_read(v); \ 407 c = atomic_read(v); \
408 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 408 for (;;) { \
409 if (unlikely(c == (u))) \
410 break; \
411 old = atomic_cmpxchg((v), c, c + (a)); \
412 if (likely(old == c)) \
413 break; \
409 c = old; \ 414 c = old; \
415 } \
410 c != (u); \ 416 c != (u); \
411}) 417})
412#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 418#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index 263f0a211ed7..c8043a16152e 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -20,6 +20,8 @@
20 __attribute__((__section__(".data.page_aligned"))) 20 __attribute__((__section__(".data.page_aligned")))
21#endif 21#endif
22 22
23#define __read_mostly __attribute__((__section__(".data.read_mostly")))
24
23#endif 25#endif
24 26
25#endif 27#endif
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 29a6b0408f75..4405b4adeaba 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -26,10 +26,9 @@
26#define percpu_modcopy(pcpudst, src, size) \ 26#define percpu_modcopy(pcpudst, src, size) \
27do { \ 27do { \
28 unsigned int __i; \ 28 unsigned int __i; \
29 for (__i = 0; __i < NR_CPUS; __i++) \ 29 for_each_cpu(__i) \
30 if (cpu_possible(__i)) \ 30 memcpy((pcpudst)+__per_cpu_offset(__i), \
31 memcpy((pcpudst)+__per_cpu_offset(__i), \ 31 (src), (size)); \
32 (src), (size)); \
33} while (0) 32} while (0)
34 33
35extern void setup_per_cpu_areas(void); 34extern void setup_per_cpu_areas(void);
diff --git a/include/linux/cache.h b/include/linux/cache.h
index d22e632f41fb..cc4b3aafad9a 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -13,9 +13,7 @@
13#define SMP_CACHE_BYTES L1_CACHE_BYTES 13#define SMP_CACHE_BYTES L1_CACHE_BYTES
14#endif 14#endif
15 15
16#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) 16#ifndef __read_mostly
17#define __read_mostly __attribute__((__section__(".data.read_mostly")))
18#else
19#define __read_mostly 17#define __read_mostly
20#endif 18#endif
21 19
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index b68fdf1f3156..3c9b0bc05123 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -378,7 +378,6 @@ struct cdrom_generic_command
378#define CDC_MEDIA_CHANGED 0x80 /* media changed */ 378#define CDC_MEDIA_CHANGED 0x80 /* media changed */
379#define CDC_PLAY_AUDIO 0x100 /* audio functions */ 379#define CDC_PLAY_AUDIO 0x100 /* audio functions */
380#define CDC_RESET 0x200 /* hard reset device */ 380#define CDC_RESET 0x200 /* hard reset device */
381#define CDC_IOCTLS 0x400 /* driver has non-standard ioctls */
382#define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */ 381#define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */
383#define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */ 382#define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */
384#define CDC_CD_R 0x2000 /* drive is a CD-R */ 383#define CDC_CD_R 0x2000 /* drive is a CD-R */
@@ -974,9 +973,7 @@ struct cdrom_device_ops {
974 int (*reset) (struct cdrom_device_info *); 973 int (*reset) (struct cdrom_device_info *);
975 /* play stuff */ 974 /* play stuff */
976 int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); 975 int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
977 /* dev-specific */ 976
978 int (*dev_ioctl) (struct cdrom_device_info *,
979 unsigned int, unsigned long);
980/* driver specifications */ 977/* driver specifications */
981 const int capability; /* capability flags */ 978 const int capability; /* capability flags */
982 int n_minors; /* number of active minor devices */ 979 int n_minors; /* number of active minor devices */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 1289f0ec4c00..1e4bdfcf83a2 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -52,7 +52,12 @@ struct file;
52#ifdef CONFIG_EPOLL 52#ifdef CONFIG_EPOLL
53 53
54/* Used to initialize the epoll bits inside the "struct file" */ 54/* Used to initialize the epoll bits inside the "struct file" */
55void eventpoll_init_file(struct file *file); 55static inline void eventpoll_init_file(struct file *file)
56{
57 INIT_LIST_HEAD(&file->f_ep_links);
58 spin_lock_init(&file->f_ep_lock);
59}
60
56 61
57/* Used to release the epoll bits inside the "struct file" */ 62/* Used to release the epoll bits inside the "struct file" */
58void eventpoll_release_file(struct file *file); 63void eventpoll_release_file(struct file *file);
@@ -85,7 +90,6 @@ static inline void eventpoll_release(struct file *file)
85 eventpoll_release_file(file); 90 eventpoll_release_file(file);
86} 91}
87 92
88
89#else 93#else
90 94
91static inline void eventpoll_init_file(struct file *file) {} 95static inline void eventpoll_init_file(struct file *file) {}
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index c0272d73ab20..e7239f2f97a1 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -772,9 +772,12 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
772 772
773 773
774/* inode.c */ 774/* inode.c */
775extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); 775int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
776extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); 776struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
777extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); 777struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
778int ext3_get_block_handle(handle_t *handle, struct inode *inode,
779 sector_t iblock, struct buffer_head *bh_result, int create,
780 int extend_disksize);
778 781
779extern void ext3_read_inode (struct inode *); 782extern void ext3_read_inode (struct inode *);
780extern int ext3_write_inode (struct inode *, int); 783extern int ext3_write_inode (struct inode *, int);
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index e71dd98dbcae..7abf90147180 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -19,6 +19,7 @@
19#include <linux/rwsem.h> 19#include <linux/rwsem.h>
20#include <linux/rbtree.h> 20#include <linux/rbtree.h>
21#include <linux/seqlock.h> 21#include <linux/seqlock.h>
22#include <linux/mutex.h>
22 23
23struct ext3_reserve_window { 24struct ext3_reserve_window {
24 __u32 _rsv_start; /* First byte reserved */ 25 __u32 _rsv_start; /* First byte reserved */
@@ -122,16 +123,16 @@ struct ext3_inode_info {
122 __u16 i_extra_isize; 123 __u16 i_extra_isize;
123 124
124 /* 125 /*
125 * truncate_sem is for serialising ext3_truncate() against 126 * truncate_mutex is for serialising ext3_truncate() against
126 * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's 127 * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
127 * data tree are chopped off during truncate. We can't do that in 128 * data tree are chopped off during truncate. We can't do that in
128 * ext3 because whenever we perform intermediate commits during 129 * ext3 because whenever we perform intermediate commits during
129 * truncate, the inode and all the metadata blocks *must* be in a 130 * truncate, the inode and all the metadata blocks *must* be in a
130 * consistent state which allows truncation of the orphans to restart 131 * consistent state which allows truncation of the orphans to restart
131 * during recovery. Hence we must fix the get_block-vs-truncate race 132 * during recovery. Hence we must fix the get_block-vs-truncate race
132 * by other means, so we have truncate_sem. 133 * by other means, so we have truncate_mutex.
133 */ 134 */
134 struct semaphore truncate_sem; 135 struct mutex truncate_mutex;
135 struct inode vfs_inode; 136 struct inode vfs_inode;
136}; 137};
137 138
diff --git a/include/linux/file.h b/include/linux/file.h
index 9901b850f2e4..9f7c2513866f 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/rcupdate.h> 12#include <linux/rcupdate.h>
13#include <linux/types.h>
13 14
14/* 15/*
15 * The default fd array needs to be at least BITS_PER_LONG, 16 * The default fd array needs to be at least BITS_PER_LONG,
@@ -17,10 +18,22 @@
17 */ 18 */
18#define NR_OPEN_DEFAULT BITS_PER_LONG 19#define NR_OPEN_DEFAULT BITS_PER_LONG
19 20
21/*
22 * The embedded_fd_set is a small fd_set,
23 * suitable for most tasks (which open <= BITS_PER_LONG files)
24 */
25struct embedded_fd_set {
26 unsigned long fds_bits[1];
27};
28
29/*
30 * More than this number of fds: we use a separately allocated fd_set
31 */
32#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set))
33
20struct fdtable { 34struct fdtable {
21 unsigned int max_fds; 35 unsigned int max_fds;
22 int max_fdset; 36 int max_fdset;
23 int next_fd;
24 struct file ** fd; /* current fd array */ 37 struct file ** fd; /* current fd array */
25 fd_set *close_on_exec; 38 fd_set *close_on_exec;
26 fd_set *open_fds; 39 fd_set *open_fds;
@@ -33,13 +46,20 @@ struct fdtable {
33 * Open file table structure 46 * Open file table structure
34 */ 47 */
35struct files_struct { 48struct files_struct {
49 /*
50 * read mostly part
51 */
36 atomic_t count; 52 atomic_t count;
37 struct fdtable *fdt; 53 struct fdtable *fdt;
38 struct fdtable fdtab; 54 struct fdtable fdtab;
39 fd_set close_on_exec_init; 55 /*
40 fd_set open_fds_init; 56 * written part on a separate cache line in SMP
57 */
58 spinlock_t file_lock ____cacheline_aligned_in_smp;
59 int next_fd;
60 struct embedded_fd_set close_on_exec_init;
61 struct embedded_fd_set open_fds_init;
41 struct file * fd_array[NR_OPEN_DEFAULT]; 62 struct file * fd_array[NR_OPEN_DEFAULT];
42 spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */
43}; 63};
44 64
45#define files_fdtable(files) (rcu_dereference((files)->fdt)) 65#define files_fdtable(files) (rcu_dereference((files)->fdt))
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 128d0082522c..f9c9dea636d0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -397,8 +397,8 @@ struct block_device {
397 dev_t bd_dev; /* not a kdev_t - it's a search key */ 397 dev_t bd_dev; /* not a kdev_t - it's a search key */
398 struct inode * bd_inode; /* will die */ 398 struct inode * bd_inode; /* will die */
399 int bd_openers; 399 int bd_openers;
400 struct semaphore bd_sem; /* open/close mutex */ 400 struct mutex bd_mutex; /* open/close mutex */
401 struct semaphore bd_mount_sem; /* mount mutex */ 401 struct mutex bd_mount_mutex; /* mount mutex */
402 struct list_head bd_inodes; 402 struct list_head bd_inodes;
403 void * bd_holder; 403 void * bd_holder;
404 int bd_holders; 404 int bd_holders;
@@ -509,7 +509,7 @@ struct inode {
509 509
510#ifdef CONFIG_INOTIFY 510#ifdef CONFIG_INOTIFY
511 struct list_head inotify_watches; /* watches on this inode */ 511 struct list_head inotify_watches; /* watches on this inode */
512 struct semaphore inotify_sem; /* protects the watches list */ 512 struct mutex inotify_mutex; /* protects the watches list */
513#endif 513#endif
514 514
515 unsigned long i_state; 515 unsigned long i_state;
@@ -847,7 +847,7 @@ struct super_block {
847 * The next field is for VFS *only*. No filesystems have any business 847 * The next field is for VFS *only*. No filesystems have any business
848 * even looking at it. You had been warned. 848 * even looking at it. You had been warned.
849 */ 849 */
850 struct semaphore s_vfs_rename_sem; /* Kludge */ 850 struct mutex s_vfs_rename_mutex; /* Kludge */
851 851
852 /* Granuality of c/m/atime in ns. 852 /* Granuality of c/m/atime in ns.
853 Cannot be worse than a second */ 853 Cannot be worse than a second */
@@ -1115,6 +1115,18 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
1115 __mark_inode_dirty(inode, I_DIRTY_SYNC); 1115 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1116} 1116}
1117 1117
1118static inline void inode_inc_link_count(struct inode *inode)
1119{
1120 inode->i_nlink++;
1121 mark_inode_dirty(inode);
1122}
1123
1124static inline void inode_dec_link_count(struct inode *inode)
1125{
1126 inode->i_nlink--;
1127 mark_inode_dirty(inode);
1128}
1129
1118extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); 1130extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
1119static inline void file_accessed(struct file *file) 1131static inline void file_accessed(struct file *file)
1120{ 1132{
@@ -1534,7 +1546,7 @@ extern void destroy_inode(struct inode *);
1534extern struct inode *new_inode(struct super_block *); 1546extern struct inode *new_inode(struct super_block *);
1535extern int remove_suid(struct dentry *); 1547extern int remove_suid(struct dentry *);
1536extern void remove_dquot_ref(struct super_block *, int, struct list_head *); 1548extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
1537extern struct semaphore iprune_sem; 1549extern struct mutex iprune_mutex;
1538 1550
1539extern void __insert_inode_hash(struct inode *, unsigned long hashval); 1551extern void __insert_inode_hash(struct inode *, unsigned long hashval);
1540extern void remove_inode_hash(struct inode *); 1552extern void remove_inode_hash(struct inode *);
diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h
index 0abe9d9a0069..652611a4bdcd 100644
--- a/include/linux/generic_serial.h
+++ b/include/linux/generic_serial.h
@@ -12,6 +12,8 @@
12#ifndef GENERIC_SERIAL_H 12#ifndef GENERIC_SERIAL_H
13#define GENERIC_SERIAL_H 13#define GENERIC_SERIAL_H
14 14
15#include <linux/mutex.h>
16
15struct real_driver { 17struct real_driver {
16 void (*disable_tx_interrupts) (void *); 18 void (*disable_tx_interrupts) (void *);
17 void (*enable_tx_interrupts) (void *); 19 void (*enable_tx_interrupts) (void *);
@@ -34,7 +36,7 @@ struct gs_port {
34 int xmit_head; 36 int xmit_head;
35 int xmit_tail; 37 int xmit_tail;
36 int xmit_cnt; 38 int xmit_cnt;
37 struct semaphore port_write_sem; 39 struct mutex port_write_mutex;
38 int flags; 40 int flags;
39 wait_queue_head_t open_wait; 41 wait_queue_head_t open_wait;
40 wait_queue_head_t close_wait; 42 wait_queue_head_t close_wait;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index eef5ccdcd731..fd647fde5ec1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -149,22 +149,16 @@ struct disk_attribute {
149({ \ 149({ \
150 typeof(gendiskp->dkstats->field) res = 0; \ 150 typeof(gendiskp->dkstats->field) res = 0; \
151 int i; \ 151 int i; \
152 for (i=0; i < NR_CPUS; i++) { \ 152 for_each_cpu(i) \
153 if (!cpu_possible(i)) \
154 continue; \
155 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ 153 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
156 } \
157 res; \ 154 res; \
158}) 155})
159 156
160static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { 157static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
161 int i; 158 int i;
162 for (i=0; i < NR_CPUS; i++) { 159 for_each_cpu(i)
163 if (cpu_possible(i)) { 160 memset(per_cpu_ptr(gendiskp->dkstats, i), value,
164 memset(per_cpu_ptr(gendiskp->dkstats, i), value, 161 sizeof (struct disk_stats));
165 sizeof (struct disk_stats));
166 }
167 }
168} 162}
169 163
170#else 164#else
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index dcfd2ecccb5d..92146f3b7423 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -7,11 +7,10 @@
7#define INIT_FDTABLE \ 7#define INIT_FDTABLE \
8{ \ 8{ \
9 .max_fds = NR_OPEN_DEFAULT, \ 9 .max_fds = NR_OPEN_DEFAULT, \
10 .max_fdset = __FD_SETSIZE, \ 10 .max_fdset = EMBEDDED_FD_SET_SIZE, \
11 .next_fd = 0, \
12 .fd = &init_files.fd_array[0], \ 11 .fd = &init_files.fd_array[0], \
13 .close_on_exec = &init_files.close_on_exec_init, \ 12 .close_on_exec = (fd_set *)&init_files.close_on_exec_init, \
14 .open_fds = &init_files.open_fds_init, \ 13 .open_fds = (fd_set *)&init_files.open_fds_init, \
15 .rcu = RCU_HEAD_INIT, \ 14 .rcu = RCU_HEAD_INIT, \
16 .free_files = NULL, \ 15 .free_files = NULL, \
17 .next = NULL, \ 16 .next = NULL, \
@@ -20,9 +19,10 @@
20#define INIT_FILES \ 19#define INIT_FILES \
21{ \ 20{ \
22 .count = ATOMIC_INIT(1), \ 21 .count = ATOMIC_INIT(1), \
23 .file_lock = SPIN_LOCK_UNLOCKED, \
24 .fdt = &init_files.fdtab, \ 22 .fdt = &init_files.fdtab, \
25 .fdtab = INIT_FDTABLE, \ 23 .fdtab = INIT_FDTABLE, \
24 .file_lock = SPIN_LOCK_UNLOCKED, \
25 .next_fd = 0, \
26 .close_on_exec_init = { { 0, } }, \ 26 .close_on_exec_init = { { 0, } }, \
27 .open_fds_init = { { 0, } }, \ 27 .open_fds_init = { { 0, } }, \
28 .fd_array = { NULL, } \ 28 .fd_array = { NULL, } \
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 41ee79962bb2..2ccbfb6340ba 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -28,6 +28,7 @@
28#include <linux/journal-head.h> 28#include <linux/journal-head.h>
29#include <linux/stddef.h> 29#include <linux/stddef.h>
30#include <linux/bit_spinlock.h> 30#include <linux/bit_spinlock.h>
31#include <linux/mutex.h>
31#include <asm/semaphore.h> 32#include <asm/semaphore.h>
32#endif 33#endif
33 34
@@ -575,7 +576,7 @@ struct transaction_s
575 * @j_wait_checkpoint: Wait queue to trigger checkpointing 576 * @j_wait_checkpoint: Wait queue to trigger checkpointing
576 * @j_wait_commit: Wait queue to trigger commit 577 * @j_wait_commit: Wait queue to trigger commit
577 * @j_wait_updates: Wait queue to wait for updates to complete 578 * @j_wait_updates: Wait queue to wait for updates to complete
578 * @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints 579 * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
579 * @j_head: Journal head - identifies the first unused block in the journal 580 * @j_head: Journal head - identifies the first unused block in the journal
580 * @j_tail: Journal tail - identifies the oldest still-used block in the 581 * @j_tail: Journal tail - identifies the oldest still-used block in the
581 * journal. 582 * journal.
@@ -645,7 +646,7 @@ struct journal_s
645 int j_barrier_count; 646 int j_barrier_count;
646 647
647 /* The barrier lock itself */ 648 /* The barrier lock itself */
648 struct semaphore j_barrier; 649 struct mutex j_barrier;
649 650
650 /* 651 /*
651 * Transactions: The current running transaction... 652 * Transactions: The current running transaction...
@@ -687,7 +688,7 @@ struct journal_s
687 wait_queue_head_t j_wait_updates; 688 wait_queue_head_t j_wait_updates;
688 689
689 /* Semaphore for locking against concurrent checkpoints */ 690 /* Semaphore for locking against concurrent checkpoints */
690 struct semaphore j_checkpoint_sem; 691 struct mutex j_checkpoint_mutex;
691 692
692 /* 693 /*
693 * Journal head: identifies the first unused block in the journal. 694 * Journal head: identifies the first unused block in the journal.
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3b507bf05d09..bb6e7ddee2fd 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -91,6 +91,9 @@ extern struct notifier_block *panic_notifier_list;
91extern long (*panic_blink)(long time); 91extern long (*panic_blink)(long time);
92NORET_TYPE void panic(const char * fmt, ...) 92NORET_TYPE void panic(const char * fmt, ...)
93 __attribute__ ((NORET_AND format (printf, 1, 2))); 93 __attribute__ ((NORET_AND format (printf, 1, 2)));
94extern void oops_enter(void);
95extern void oops_exit(void);
96extern int oops_may_print(void);
94fastcall NORET_TYPE void do_exit(long error_code) 97fastcall NORET_TYPE void do_exit(long error_code)
95 ATTRIB_NORET; 98 ATTRIB_NORET;
96NORET_TYPE void complete_and_exit(struct completion *, long) 99NORET_TYPE void complete_and_exit(struct completion *, long)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 669756bc20a2..778adc0fa640 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -36,6 +36,7 @@
36#include <linux/percpu.h> 36#include <linux/percpu.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/rcupdate.h> 38#include <linux/rcupdate.h>
39#include <linux/mutex.h>
39 40
40#ifdef CONFIG_KPROBES 41#ifdef CONFIG_KPROBES
41#include <asm/kprobes.h> 42#include <asm/kprobes.h>
@@ -152,7 +153,7 @@ struct kretprobe_instance {
152}; 153};
153 154
154extern spinlock_t kretprobe_lock; 155extern spinlock_t kretprobe_lock;
155extern struct semaphore kprobe_mutex; 156extern struct mutex kprobe_mutex;
156extern int arch_prepare_kprobe(struct kprobe *p); 157extern int arch_prepare_kprobe(struct kprobe *p);
157extern void arch_arm_kprobe(struct kprobe *p); 158extern void arch_arm_kprobe(struct kprobe *p);
158extern void arch_disarm_kprobe(struct kprobe *p); 159extern void arch_disarm_kprobe(struct kprobe *p);
diff --git a/include/linux/loop.h b/include/linux/loop.h
index f96506782ebe..e76c7611d6cc 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -17,6 +17,7 @@
17#include <linux/bio.h> 17#include <linux/bio.h>
18#include <linux/blkdev.h> 18#include <linux/blkdev.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/mutex.h>
20 21
21/* Possible states of device */ 22/* Possible states of device */
22enum { 23enum {
@@ -60,7 +61,7 @@ struct loop_device {
60 int lo_state; 61 int lo_state;
61 struct completion lo_done; 62 struct completion lo_done;
62 struct completion lo_bh_done; 63 struct completion lo_bh_done;
63 struct semaphore lo_ctl_mutex; 64 struct mutex lo_ctl_mutex;
64 int lo_pending; 65 int lo_pending;
65 66
66 request_queue_t *lo_queue; 67 request_queue_t *lo_queue;
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 8bcd9450d926..779e6a5744c7 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -184,6 +184,7 @@ struct fat_slot_info {
184#include <linux/string.h> 184#include <linux/string.h>
185#include <linux/nls.h> 185#include <linux/nls.h>
186#include <linux/fs.h> 186#include <linux/fs.h>
187#include <linux/mutex.h>
187 188
188struct fat_mount_options { 189struct fat_mount_options {
189 uid_t fs_uid; 190 uid_t fs_uid;
@@ -226,7 +227,7 @@ struct msdos_sb_info {
226 unsigned long max_cluster; /* maximum cluster number */ 227 unsigned long max_cluster; /* maximum cluster number */
227 unsigned long root_cluster; /* first cluster of the root directory */ 228 unsigned long root_cluster; /* first cluster of the root directory */
228 unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */ 229 unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
229 struct semaphore fat_lock; 230 struct mutex fat_lock;
230 unsigned int prev_free; /* previously allocated cluster number */ 231 unsigned int prev_free; /* previously allocated cluster number */
231 unsigned int free_clusters; /* -1 if undefined */ 232 unsigned int free_clusters; /* -1 if undefined */
232 struct fat_mount_options options; 233 struct fat_mount_options options;
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index f95d51fae733..a6ce409ec6fc 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -38,6 +38,7 @@ enum {
38#ifdef __KERNEL__ 38#ifdef __KERNEL__
39 39
40#include <linux/wait.h> 40#include <linux/wait.h>
41#include <linux/mutex.h>
41 42
42/* values for flags field */ 43/* values for flags field */
43#define NBD_READ_ONLY 0x0001 44#define NBD_READ_ONLY 0x0001
@@ -57,7 +58,7 @@ struct nbd_device {
57 struct request *active_req; 58 struct request *active_req;
58 wait_queue_head_t active_wq; 59 wait_queue_head_t active_wq;
59 60
60 struct semaphore tx_lock; 61 struct mutex tx_lock;
61 struct gendisk *disk; 62 struct gendisk *disk;
62 int blksize; 63 int blksize;
63 u64 bytesize; 64 u64 bytesize;
diff --git a/include/linux/ncp_fs_i.h b/include/linux/ncp_fs_i.h
index 415be1ec6f98..bdb4c8ae6924 100644
--- a/include/linux/ncp_fs_i.h
+++ b/include/linux/ncp_fs_i.h
@@ -19,7 +19,7 @@ struct ncp_inode_info {
19 __le32 DosDirNum; 19 __le32 DosDirNum;
20 __u8 volNumber; 20 __u8 volNumber;
21 __le32 nwattr; 21 __le32 nwattr;
22 struct semaphore open_sem; 22 struct mutex open_mutex;
23 atomic_t opened; 23 atomic_t opened;
24 int access; 24 int access;
25 int flags; 25 int flags;
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h
index cf858eb80f0b..b089d9506283 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/include/linux/ncp_fs_sb.h
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/ncp_mount.h> 12#include <linux/ncp_mount.h>
13#include <linux/net.h> 13#include <linux/net.h>
14#include <linux/mutex.h>
14 15
15#ifdef __KERNEL__ 16#ifdef __KERNEL__
16 17
@@ -51,7 +52,7 @@ struct ncp_server {
51 receive replies */ 52 receive replies */
52 53
53 int lock; /* To prevent mismatch in protocols. */ 54 int lock; /* To prevent mismatch in protocols. */
54 struct semaphore sem; 55 struct mutex mutex;
55 56
56 int current_size; /* for packet preparation */ 57 int current_size; /* for packet preparation */
57 int has_subfunction; 58 int has_subfunction;
@@ -96,7 +97,7 @@ struct ncp_server {
96 struct { 97 struct {
97 struct work_struct tq; /* STREAM/DGRAM: data/error ready */ 98 struct work_struct tq; /* STREAM/DGRAM: data/error ready */
98 struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */ 99 struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */
99 struct semaphore creq_sem; /* DGRAM only: lock accesses to rcv.creq */ 100 struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */
100 101
101 unsigned int state; /* STREAM only: receiver state */ 102 unsigned int state; /* STREAM only: receiver state */
102 struct { 103 struct {
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 5be87ba3b7ac..6df2585c0169 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -188,6 +188,8 @@ extern void device_power_up(void);
188extern void device_resume(void); 188extern void device_resume(void);
189 189
190#ifdef CONFIG_PM 190#ifdef CONFIG_PM
191extern suspend_disk_method_t pm_disk_mode;
192
191extern int device_suspend(pm_message_t state); 193extern int device_suspend(pm_message_t state);
192 194
193#define device_set_wakeup_enable(dev,val) \ 195#define device_set_wakeup_enable(dev,val) \
@@ -215,7 +217,6 @@ static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state)
215 217
216static inline void dpm_runtime_resume(struct device * dev) 218static inline void dpm_runtime_resume(struct device * dev)
217{ 219{
218
219} 220}
220 221
221#endif 222#endif
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 026969a5595c..1f2fea6640a4 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -14,6 +14,7 @@
14 14
15struct proc_dir_entry; 15struct proc_dir_entry;
16struct pt_regs; 16struct pt_regs;
17struct notifier_block;
17 18
18/* init basic kernel profiler */ 19/* init basic kernel profiler */
19void __init profile_init(void); 20void __init profile_init(void);
@@ -32,7 +33,6 @@ enum profile_type {
32 33
33#ifdef CONFIG_PROFILING 34#ifdef CONFIG_PROFILING
34 35
35struct notifier_block;
36struct task_struct; 36struct task_struct;
37struct mm_struct; 37struct mm_struct;
38 38
diff --git a/include/linux/quota.h b/include/linux/quota.h
index f33aeb22c26a..8dc2d04a103f 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -38,6 +38,7 @@
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/types.h> 39#include <linux/types.h>
40#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/mutex.h>
41 42
42#define __DQUOT_VERSION__ "dquot_6.5.1" 43#define __DQUOT_VERSION__ "dquot_6.5.1"
43#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 44#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1
@@ -215,7 +216,7 @@ struct dquot {
215 struct list_head dq_inuse; /* List of all quotas */ 216 struct list_head dq_inuse; /* List of all quotas */
216 struct list_head dq_free; /* Free list element */ 217 struct list_head dq_free; /* Free list element */
217 struct list_head dq_dirty; /* List of dirty dquots */ 218 struct list_head dq_dirty; /* List of dirty dquots */
218 struct semaphore dq_lock; /* dquot IO lock */ 219 struct mutex dq_lock; /* dquot IO lock */
219 atomic_t dq_count; /* Use count */ 220 atomic_t dq_count; /* Use count */
220 wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */ 221 wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
221 struct super_block *dq_sb; /* superblock this applies to */ 222 struct super_block *dq_sb; /* superblock this applies to */
@@ -285,8 +286,8 @@ struct quota_format_type {
285 286
286struct quota_info { 287struct quota_info {
287 unsigned int flags; /* Flags for diskquotas on this device */ 288 unsigned int flags; /* Flags for diskquotas on this device */
288 struct semaphore dqio_sem; /* lock device while I/O in progress */ 289 struct mutex dqio_mutex; /* lock device while I/O in progress */
289 struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */ 290 struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
290 struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ 291 struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
291 struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ 292 struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
292 struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ 293 struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
index 9d5494aaac0f..3009c813d83d 100644
--- a/include/linux/raid/raid1.h
+++ b/include/linux/raid/raid1.h
@@ -130,6 +130,6 @@ struct r1bio_s {
130 * with failure when last write completes (and all failed). 130 * with failure when last write completes (and all failed).
131 * Record that bi_end_io was called with this flag... 131 * Record that bi_end_io was called with this flag...
132 */ 132 */
133#define R1BIO_Returned 4 133#define R1BIO_Returned 6
134 134
135#endif 135#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index c2ec6c77874e..5673008b61e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -113,8 +113,6 @@ struct rcu_data {
113 113
114DECLARE_PER_CPU(struct rcu_data, rcu_data); 114DECLARE_PER_CPU(struct rcu_data, rcu_data);
115DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); 115DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
116extern struct rcu_ctrlblk rcu_ctrlblk;
117extern struct rcu_ctrlblk rcu_bh_ctrlblk;
118 116
119/* 117/*
120 * Increment the quiescent state counter. 118 * Increment the quiescent state counter.
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 850a974ee505..b95f6eb7254c 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,7 +4,7 @@
4 4
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <asm/semaphore.h> 7#include <linux/mutex.h>
8 8
9struct seq_operations; 9struct seq_operations;
10struct file; 10struct file;
@@ -19,7 +19,7 @@ struct seq_file {
19 size_t count; 19 size_t count;
20 loff_t index; 20 loff_t index;
21 loff_t version; 21 loff_t version;
22 struct semaphore sem; 22 struct mutex lock;
23 struct seq_operations *op; 23 struct seq_operations *op;
24 void *private; 24 void *private;
25}; 25};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 12415dd94451..54eac8a39a4c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -234,14 +234,15 @@ extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *v
234/* linux/mm/swapfile.c */ 234/* linux/mm/swapfile.c */
235extern long total_swap_pages; 235extern long total_swap_pages;
236extern unsigned int nr_swapfiles; 236extern unsigned int nr_swapfiles;
237extern struct swap_info_struct swap_info[];
238extern void si_swapinfo(struct sysinfo *); 237extern void si_swapinfo(struct sysinfo *);
239extern swp_entry_t get_swap_page(void); 238extern swp_entry_t get_swap_page(void);
240extern swp_entry_t get_swap_page_of_type(int type); 239extern swp_entry_t get_swap_page_of_type(int);
241extern int swap_duplicate(swp_entry_t); 240extern int swap_duplicate(swp_entry_t);
242extern int valid_swaphandles(swp_entry_t, unsigned long *); 241extern int valid_swaphandles(swp_entry_t, unsigned long *);
243extern void swap_free(swp_entry_t); 242extern void swap_free(swp_entry_t);
244extern void free_swap_and_cache(swp_entry_t); 243extern void free_swap_and_cache(swp_entry_t);
244extern int swap_type_of(dev_t);
245extern unsigned int count_swap_pages(int, int);
245extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); 246extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
246extern struct swap_info_struct *get_swap_info_struct(unsigned); 247extern struct swap_info_struct *get_swap_info_struct(unsigned);
247extern int can_share_swap_page(struct page *); 248extern int can_share_swap_page(struct page *);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index f45cd74e6f24..f13f49afe198 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -24,6 +24,7 @@
24#include <linux/tty_driver.h> 24#include <linux/tty_driver.h>
25#include <linux/tty_ldisc.h> 25#include <linux/tty_ldisc.h>
26#include <linux/screen_info.h> 26#include <linux/screen_info.h>
27#include <linux/mutex.h>
27 28
28#include <asm/system.h> 29#include <asm/system.h>
29 30
@@ -231,8 +232,8 @@ struct tty_struct {
231 int canon_data; 232 int canon_data;
232 unsigned long canon_head; 233 unsigned long canon_head;
233 unsigned int canon_column; 234 unsigned int canon_column;
234 struct semaphore atomic_read; 235 struct mutex atomic_read_lock;
235 struct semaphore atomic_write; 236 struct mutex atomic_write_lock;
236 unsigned char *write_buf; 237 unsigned char *write_buf;
237 int write_cnt; 238 int write_cnt;
238 spinlock_t read_lock; 239 spinlock_t read_lock;
@@ -319,8 +320,7 @@ extern void tty_ldisc_put(int);
319extern void tty_wakeup(struct tty_struct *tty); 320extern void tty_wakeup(struct tty_struct *tty);
320extern void tty_ldisc_flush(struct tty_struct *tty); 321extern void tty_ldisc_flush(struct tty_struct *tty);
321 322
322struct semaphore; 323extern struct mutex tty_mutex;
323extern struct semaphore tty_sem;
324 324
325/* n_tty.c */ 325/* n_tty.c */
326extern struct tty_ldisc tty_ldisc_N_TTY; 326extern struct tty_ldisc tty_ldisc_N_TTY;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 222faf97d5f9..0c6169fff366 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -7,14 +7,8 @@ extern int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *c
7extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size); 7extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
8extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size); 8extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
9 9
10#ifdef INCLUDE_INLINE_FUNCS 10static inline int tty_insert_flip_char(struct tty_struct *tty,
11#define _INLINE_ extern 11 unsigned char ch, char flag)
12#else
13#define _INLINE_ static __inline__
14#endif
15
16_INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
17 unsigned char ch, char flag)
18{ 12{
19 struct tty_buffer *tb = tty->buf.tail; 13 struct tty_buffer *tb = tty->buf.tail;
20 if (tb && tb->active && tb->used < tb->size) { 14 if (tb && tb->active && tb->used < tb->size) {
@@ -25,7 +19,7 @@ _INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
25 return tty_insert_flip_string_flags(tty, &ch, &flag, 1); 19 return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
26} 20}
27 21
28_INLINE_ void tty_schedule_flip(struct tty_struct *tty) 22static inline void tty_schedule_flip(struct tty_struct *tty)
29{ 23{
30 unsigned long flags; 24 unsigned long flags;
31 spin_lock_irqsave(&tty->buf.lock, flags); 25 spin_lock_irqsave(&tty->buf.lock, flags);
diff --git a/include/linux/udf_fs_sb.h b/include/linux/udf_fs_sb.h
index b15ff2e99c91..80ae9ef940dc 100644
--- a/include/linux/udf_fs_sb.h
+++ b/include/linux/udf_fs_sb.h
@@ -13,7 +13,7 @@
13#ifndef _UDF_FS_SB_H 13#ifndef _UDF_FS_SB_H
14#define _UDF_FS_SB_H 1 14#define _UDF_FS_SB_H 1
15 15
16#include <asm/semaphore.h> 16#include <linux/mutex.h>
17 17
18#pragma pack(1) 18#pragma pack(1)
19 19
@@ -111,7 +111,7 @@ struct udf_sb_info
111 /* VAT inode */ 111 /* VAT inode */
112 struct inode *s_vat; 112 struct inode *s_vat;
113 113
114 struct semaphore s_alloc_sem; 114 struct mutex s_alloc_mutex;
115}; 115};
116 116
117#endif /* _UDF_FS_SB_H */ 117#endif /* _UDF_FS_SB_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index fab5aed8ca31..530ae3f4248c 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -73,6 +73,11 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
73int vt_waitactive(int vt); 73int vt_waitactive(int vt);
74void change_console(struct vc_data *new_vc); 74void change_console(struct vc_data *new_vc);
75void reset_vc(struct vc_data *vc); 75void reset_vc(struct vc_data *vc);
76#ifdef CONFIG_VT
77int is_console_suspend_safe(void);
78#else
79static inline int is_console_suspend_safe(void) { return 1; }
80#endif
76 81
77/* 82/*
78 * vc_screen.c shares this temporary buffer with the console write code so that 83 * vc_screen.c shares this temporary buffer with the console write code so that
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index a05cabd0fd10..405f9031af87 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -56,6 +56,7 @@ static void __init handle_initrd(void)
56 sys_chroot("."); 56 sys_chroot(".");
57 mount_devfs_fs (); 57 mount_devfs_fs ();
58 58
59 current->flags |= PF_NOFREEZE;
59 pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD); 60 pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
60 if (pid > 0) { 61 if (pid > 0) {
61 while (pid != sys_wait4(-1, NULL, 0, NULL)) 62 while (pid != sys_wait4(-1, NULL, 0, NULL))
diff --git a/init/main.c b/init/main.c
index 4c194c47395f..2714e0e7cfec 100644
--- a/init/main.c
+++ b/init/main.c
@@ -325,7 +325,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
325#else 325#else
326 326
327#ifdef __GENERIC_PER_CPU 327#ifdef __GENERIC_PER_CPU
328unsigned long __per_cpu_offset[NR_CPUS]; 328unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
329 329
330EXPORT_SYMBOL(__per_cpu_offset); 330EXPORT_SYMBOL(__per_cpu_offset);
331 331
@@ -333,6 +333,7 @@ static void __init setup_per_cpu_areas(void)
333{ 333{
334 unsigned long size, i; 334 unsigned long size, i;
335 char *ptr; 335 char *ptr;
336 unsigned long nr_possible_cpus = num_possible_cpus();
336 337
337 /* Copy section for each CPU (we discard the original) */ 338 /* Copy section for each CPU (we discard the original) */
338 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); 339 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
@@ -340,12 +341,12 @@ static void __init setup_per_cpu_areas(void)
340 if (size < PERCPU_ENOUGH_ROOM) 341 if (size < PERCPU_ENOUGH_ROOM)
341 size = PERCPU_ENOUGH_ROOM; 342 size = PERCPU_ENOUGH_ROOM;
342#endif 343#endif
344 ptr = alloc_bootmem(size * nr_possible_cpus);
343 345
344 ptr = alloc_bootmem(size * NR_CPUS); 346 for_each_cpu(i) {
345
346 for (i = 0; i < NR_CPUS; i++, ptr += size) {
347 __per_cpu_offset[i] = ptr - __per_cpu_start; 347 __per_cpu_offset[i] = ptr - __per_cpu_start;
348 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 348 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
349 ptr += size;
349 } 350 }
350} 351}
351#endif /* !__GENERIC_PER_CPU */ 352#endif /* !__GENERIC_PER_CPU */
@@ -438,6 +439,15 @@ void __init parse_early_param(void)
438 * Activate the first processor. 439 * Activate the first processor.
439 */ 440 */
440 441
442static void __init boot_cpu_init(void)
443{
444 int cpu = smp_processor_id();
445 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
446 cpu_set(cpu, cpu_online_map);
447 cpu_set(cpu, cpu_present_map);
448 cpu_set(cpu, cpu_possible_map);
449}
450
441asmlinkage void __init start_kernel(void) 451asmlinkage void __init start_kernel(void)
442{ 452{
443 char * command_line; 453 char * command_line;
@@ -447,17 +457,13 @@ asmlinkage void __init start_kernel(void)
447 * enable them 457 * enable them
448 */ 458 */
449 lock_kernel(); 459 lock_kernel();
460 boot_cpu_init();
450 page_address_init(); 461 page_address_init();
451 printk(KERN_NOTICE); 462 printk(KERN_NOTICE);
452 printk(linux_banner); 463 printk(linux_banner);
453 setup_arch(&command_line); 464 setup_arch(&command_line);
454 setup_per_cpu_areas(); 465 setup_per_cpu_areas();
455 466 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
456 /*
457 * Mark the boot cpu "online" so that it can call console drivers in
458 * printk() and can access its per-cpu storage.
459 */
460 smp_prepare_boot_cpu();
461 467
462 /* 468 /*
463 * Set up the scheduler prior starting any interrupts (such as the 469 * Set up the scheduler prior starting any interrupts (such as the
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 12815d3f1a05..c86ee051b734 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -53,7 +53,7 @@
53 53
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55#include <asm/atomic.h> 55#include <asm/atomic.h>
56#include <asm/semaphore.h> 56#include <linux/mutex.h>
57 57
58#define CPUSET_SUPER_MAGIC 0x27e0eb 58#define CPUSET_SUPER_MAGIC 0x27e0eb
59 59
@@ -168,63 +168,57 @@ static struct vfsmount *cpuset_mount;
168static struct super_block *cpuset_sb; 168static struct super_block *cpuset_sb;
169 169
170/* 170/*
171 * We have two global cpuset semaphores below. They can nest. 171 * We have two global cpuset mutexes below. They can nest.
172 * It is ok to first take manage_sem, then nest callback_sem. We also 172 * It is ok to first take manage_mutex, then nest callback_mutex. We also
173 * require taking task_lock() when dereferencing a tasks cpuset pointer. 173 * require taking task_lock() when dereferencing a tasks cpuset pointer.
174 * See "The task_lock() exception", at the end of this comment. 174 * See "The task_lock() exception", at the end of this comment.
175 * 175 *
176 * A task must hold both semaphores to modify cpusets. If a task 176 * A task must hold both mutexes to modify cpusets. If a task
177 * holds manage_sem, then it blocks others wanting that semaphore, 177 * holds manage_mutex, then it blocks others wanting that mutex,
178 * ensuring that it is the only task able to also acquire callback_sem 178 * ensuring that it is the only task able to also acquire callback_mutex
179 * and be able to modify cpusets. It can perform various checks on 179 * and be able to modify cpusets. It can perform various checks on
180 * the cpuset structure first, knowing nothing will change. It can 180 * the cpuset structure first, knowing nothing will change. It can
181 * also allocate memory while just holding manage_sem. While it is 181 * also allocate memory while just holding manage_mutex. While it is
182 * performing these checks, various callback routines can briefly 182 * performing these checks, various callback routines can briefly
183 * acquire callback_sem to query cpusets. Once it is ready to make 183 * acquire callback_mutex to query cpusets. Once it is ready to make
184 * the changes, it takes callback_sem, blocking everyone else. 184 * the changes, it takes callback_mutex, blocking everyone else.
185 * 185 *
186 * Calls to the kernel memory allocator can not be made while holding 186 * Calls to the kernel memory allocator can not be made while holding
187 * callback_sem, as that would risk double tripping on callback_sem 187 * callback_mutex, as that would risk double tripping on callback_mutex
188 * from one of the callbacks into the cpuset code from within 188 * from one of the callbacks into the cpuset code from within
189 * __alloc_pages(). 189 * __alloc_pages().
190 * 190 *
191 * If a task is only holding callback_sem, then it has read-only 191 * If a task is only holding callback_mutex, then it has read-only
192 * access to cpusets. 192 * access to cpusets.
193 * 193 *
194 * The task_struct fields mems_allowed and mems_generation may only 194 * The task_struct fields mems_allowed and mems_generation may only
195 * be accessed in the context of that task, so require no locks. 195 * be accessed in the context of that task, so require no locks.
196 * 196 *
197 * Any task can increment and decrement the count field without lock. 197 * Any task can increment and decrement the count field without lock.
198 * So in general, code holding manage_sem or callback_sem can't rely 198 * So in general, code holding manage_mutex or callback_mutex can't rely
199 * on the count field not changing. However, if the count goes to 199 * on the count field not changing. However, if the count goes to
200 * zero, then only attach_task(), which holds both semaphores, can 200 * zero, then only attach_task(), which holds both mutexes, can
201 * increment it again. Because a count of zero means that no tasks 201 * increment it again. Because a count of zero means that no tasks
202 * are currently attached, therefore there is no way a task attached 202 * are currently attached, therefore there is no way a task attached
203 * to that cpuset can fork (the other way to increment the count). 203 * to that cpuset can fork (the other way to increment the count).
204 * So code holding manage_sem or callback_sem can safely assume that 204 * So code holding manage_mutex or callback_mutex can safely assume that
205 * if the count is zero, it will stay zero. Similarly, if a task 205 * if the count is zero, it will stay zero. Similarly, if a task
206 * holds manage_sem or callback_sem on a cpuset with zero count, it 206 * holds manage_mutex or callback_mutex on a cpuset with zero count, it
207 * knows that the cpuset won't be removed, as cpuset_rmdir() needs 207 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
208 * both of those semaphores. 208 * both of those mutexes.
209 *
210 * A possible optimization to improve parallelism would be to make
211 * callback_sem a R/W semaphore (rwsem), allowing the callback routines
212 * to proceed in parallel, with read access, until the holder of
213 * manage_sem needed to take this rwsem for exclusive write access
214 * and modify some cpusets.
215 * 209 *
216 * The cpuset_common_file_write handler for operations that modify 210 * The cpuset_common_file_write handler for operations that modify
217 * the cpuset hierarchy holds manage_sem across the entire operation, 211 * the cpuset hierarchy holds manage_mutex across the entire operation,
218 * single threading all such cpuset modifications across the system. 212 * single threading all such cpuset modifications across the system.
219 * 213 *
220 * The cpuset_common_file_read() handlers only hold callback_sem across 214 * The cpuset_common_file_read() handlers only hold callback_mutex across
221 * small pieces of code, such as when reading out possibly multi-word 215 * small pieces of code, such as when reading out possibly multi-word
222 * cpumasks and nodemasks. 216 * cpumasks and nodemasks.
223 * 217 *
224 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't 218 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
225 * (usually) take either semaphore. These are the two most performance 219 * (usually) take either mutex. These are the two most performance
226 * critical pieces of code here. The exception occurs on cpuset_exit(), 220 * critical pieces of code here. The exception occurs on cpuset_exit(),
227 * when a task in a notify_on_release cpuset exits. Then manage_sem 221 * when a task in a notify_on_release cpuset exits. Then manage_mutex
228 * is taken, and if the cpuset count is zero, a usermode call made 222 * is taken, and if the cpuset count is zero, a usermode call made
229 * to /sbin/cpuset_release_agent with the name of the cpuset (path 223 * to /sbin/cpuset_release_agent with the name of the cpuset (path
230 * relative to the root of cpuset file system) as the argument. 224 * relative to the root of cpuset file system) as the argument.
@@ -242,9 +236,9 @@ static struct super_block *cpuset_sb;
242 * 236 *
243 * The need for this exception arises from the action of attach_task(), 237 * The need for this exception arises from the action of attach_task(),
244 * which overwrites one tasks cpuset pointer with another. It does 238 * which overwrites one tasks cpuset pointer with another. It does
245 * so using both semaphores, however there are several performance 239 * so using both mutexes, however there are several performance
246 * critical places that need to reference task->cpuset without the 240 * critical places that need to reference task->cpuset without the
247 * expense of grabbing a system global semaphore. Therefore except as 241 * expense of grabbing a system global mutex. Therefore except as
248 * noted below, when dereferencing or, as in attach_task(), modifying 242 * noted below, when dereferencing or, as in attach_task(), modifying
249 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock 243 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
250 * (task->alloc_lock) already in the task_struct routinely used for 244 * (task->alloc_lock) already in the task_struct routinely used for
@@ -256,8 +250,8 @@ static struct super_block *cpuset_sb;
256 * the routine cpuset_update_task_memory_state(). 250 * the routine cpuset_update_task_memory_state().
257 */ 251 */
258 252
259static DECLARE_MUTEX(manage_sem); 253static DEFINE_MUTEX(manage_mutex);
260static DECLARE_MUTEX(callback_sem); 254static DEFINE_MUTEX(callback_mutex);
261 255
262/* 256/*
263 * A couple of forward declarations required, due to cyclic reference loop: 257 * A couple of forward declarations required, due to cyclic reference loop:
@@ -432,7 +426,7 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
432} 426}
433 427
434/* 428/*
435 * Call with manage_sem held. Writes path of cpuset into buf. 429 * Call with manage_mutex held. Writes path of cpuset into buf.
436 * Returns 0 on success, -errno on error. 430 * Returns 0 on success, -errno on error.
437 */ 431 */
438 432
@@ -484,11 +478,11 @@ static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
484 * status of the /sbin/cpuset_release_agent task, so no sense holding 478 * status of the /sbin/cpuset_release_agent task, so no sense holding
485 * our caller up for that. 479 * our caller up for that.
486 * 480 *
487 * When we had only one cpuset semaphore, we had to call this 481 * When we had only one cpuset mutex, we had to call this
488 * without holding it, to avoid deadlock when call_usermodehelper() 482 * without holding it, to avoid deadlock when call_usermodehelper()
489 * allocated memory. With two locks, we could now call this while 483 * allocated memory. With two locks, we could now call this while
490 * holding manage_sem, but we still don't, so as to minimize 484 * holding manage_mutex, but we still don't, so as to minimize
491 * the time manage_sem is held. 485 * the time manage_mutex is held.
492 */ 486 */
493 487
494static void cpuset_release_agent(const char *pathbuf) 488static void cpuset_release_agent(const char *pathbuf)
@@ -520,15 +514,15 @@ static void cpuset_release_agent(const char *pathbuf)
520 * cs is notify_on_release() and now both the user count is zero and 514 * cs is notify_on_release() and now both the user count is zero and
521 * the list of children is empty, prepare cpuset path in a kmalloc'd 515 * the list of children is empty, prepare cpuset path in a kmalloc'd
522 * buffer, to be returned via ppathbuf, so that the caller can invoke 516 * buffer, to be returned via ppathbuf, so that the caller can invoke
523 * cpuset_release_agent() with it later on, once manage_sem is dropped. 517 * cpuset_release_agent() with it later on, once manage_mutex is dropped.
524 * Call here with manage_sem held. 518 * Call here with manage_mutex held.
525 * 519 *
526 * This check_for_release() routine is responsible for kmalloc'ing 520 * This check_for_release() routine is responsible for kmalloc'ing
527 * pathbuf. The above cpuset_release_agent() is responsible for 521 * pathbuf. The above cpuset_release_agent() is responsible for
528 * kfree'ing pathbuf. The caller of these routines is responsible 522 * kfree'ing pathbuf. The caller of these routines is responsible
529 * for providing a pathbuf pointer, initialized to NULL, then 523 * for providing a pathbuf pointer, initialized to NULL, then
530 * calling check_for_release() with manage_sem held and the address 524 * calling check_for_release() with manage_mutex held and the address
531 * of the pathbuf pointer, then dropping manage_sem, then calling 525 * of the pathbuf pointer, then dropping manage_mutex, then calling
532 * cpuset_release_agent() with pathbuf, as set by check_for_release(). 526 * cpuset_release_agent() with pathbuf, as set by check_for_release().
533 */ 527 */
534 528
@@ -559,7 +553,7 @@ static void check_for_release(struct cpuset *cs, char **ppathbuf)
559 * One way or another, we guarantee to return some non-empty subset 553 * One way or another, we guarantee to return some non-empty subset
560 * of cpu_online_map. 554 * of cpu_online_map.
561 * 555 *
562 * Call with callback_sem held. 556 * Call with callback_mutex held.
563 */ 557 */
564 558
565static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) 559static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
@@ -583,7 +577,7 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
583 * One way or another, we guarantee to return some non-empty subset 577 * One way or another, we guarantee to return some non-empty subset
584 * of node_online_map. 578 * of node_online_map.
585 * 579 *
586 * Call with callback_sem held. 580 * Call with callback_mutex held.
587 */ 581 */
588 582
589static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) 583static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
@@ -608,12 +602,12 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
608 * current->cpuset if a task has its memory placement changed. 602 * current->cpuset if a task has its memory placement changed.
609 * Do not call this routine if in_interrupt(). 603 * Do not call this routine if in_interrupt().
610 * 604 *
611 * Call without callback_sem or task_lock() held. May be called 605 * Call without callback_mutex or task_lock() held. May be called
612 * with or without manage_sem held. Doesn't need task_lock to guard 606 * with or without manage_mutex held. Doesn't need task_lock to guard
613 * against another task changing a non-NULL cpuset pointer to NULL, 607 * against another task changing a non-NULL cpuset pointer to NULL,
614 * as that is only done by a task on itself, and if the current task 608 * as that is only done by a task on itself, and if the current task
615 * is here, it is not simultaneously in the exit code NULL'ing its 609 * is here, it is not simultaneously in the exit code NULL'ing its
616 * cpuset pointer. This routine also might acquire callback_sem and 610 * cpuset pointer. This routine also might acquire callback_mutex and
617 * current->mm->mmap_sem during call. 611 * current->mm->mmap_sem during call.
618 * 612 *
619 * Reading current->cpuset->mems_generation doesn't need task_lock 613 * Reading current->cpuset->mems_generation doesn't need task_lock
@@ -658,13 +652,13 @@ void cpuset_update_task_memory_state(void)
658 } 652 }
659 653
660 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { 654 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
661 down(&callback_sem); 655 mutex_lock(&callback_mutex);
662 task_lock(tsk); 656 task_lock(tsk);
663 cs = tsk->cpuset; /* Maybe changed when task not locked */ 657 cs = tsk->cpuset; /* Maybe changed when task not locked */
664 guarantee_online_mems(cs, &tsk->mems_allowed); 658 guarantee_online_mems(cs, &tsk->mems_allowed);
665 tsk->cpuset_mems_generation = cs->mems_generation; 659 tsk->cpuset_mems_generation = cs->mems_generation;
666 task_unlock(tsk); 660 task_unlock(tsk);
667 up(&callback_sem); 661 mutex_unlock(&callback_mutex);
668 mpol_rebind_task(tsk, &tsk->mems_allowed); 662 mpol_rebind_task(tsk, &tsk->mems_allowed);
669 } 663 }
670} 664}
@@ -674,7 +668,7 @@ void cpuset_update_task_memory_state(void)
674 * 668 *
675 * One cpuset is a subset of another if all its allowed CPUs and 669 * One cpuset is a subset of another if all its allowed CPUs and
676 * Memory Nodes are a subset of the other, and its exclusive flags 670 * Memory Nodes are a subset of the other, and its exclusive flags
677 * are only set if the other's are set. Call holding manage_sem. 671 * are only set if the other's are set. Call holding manage_mutex.
678 */ 672 */
679 673
680static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 674static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -692,7 +686,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
692 * If we replaced the flag and mask values of the current cpuset 686 * If we replaced the flag and mask values of the current cpuset
693 * (cur) with those values in the trial cpuset (trial), would 687 * (cur) with those values in the trial cpuset (trial), would
694 * our various subset and exclusive rules still be valid? Presumes 688 * our various subset and exclusive rules still be valid? Presumes
695 * manage_sem held. 689 * manage_mutex held.
696 * 690 *
697 * 'cur' is the address of an actual, in-use cpuset. Operations 691 * 'cur' is the address of an actual, in-use cpuset. Operations
698 * such as list traversal that depend on the actual address of the 692 * such as list traversal that depend on the actual address of the
@@ -746,7 +740,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
746 * exclusive child cpusets 740 * exclusive child cpusets
747 * Build these two partitions by calling partition_sched_domains 741 * Build these two partitions by calling partition_sched_domains
748 * 742 *
749 * Call with manage_sem held. May nest a call to the 743 * Call with manage_mutex held. May nest a call to the
750 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 744 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
751 */ 745 */
752 746
@@ -792,7 +786,7 @@ static void update_cpu_domains(struct cpuset *cur)
792} 786}
793 787
794/* 788/*
795 * Call with manage_sem held. May take callback_sem during call. 789 * Call with manage_mutex held. May take callback_mutex during call.
796 */ 790 */
797 791
798static int update_cpumask(struct cpuset *cs, char *buf) 792static int update_cpumask(struct cpuset *cs, char *buf)
@@ -811,9 +805,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
811 if (retval < 0) 805 if (retval < 0)
812 return retval; 806 return retval;
813 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); 807 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
814 down(&callback_sem); 808 mutex_lock(&callback_mutex);
815 cs->cpus_allowed = trialcs.cpus_allowed; 809 cs->cpus_allowed = trialcs.cpus_allowed;
816 up(&callback_sem); 810 mutex_unlock(&callback_mutex);
817 if (is_cpu_exclusive(cs) && !cpus_unchanged) 811 if (is_cpu_exclusive(cs) && !cpus_unchanged)
818 update_cpu_domains(cs); 812 update_cpu_domains(cs);
819 return 0; 813 return 0;
@@ -827,7 +821,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
827 * the cpuset is marked 'memory_migrate', migrate the tasks 821 * the cpuset is marked 'memory_migrate', migrate the tasks
828 * pages to the new memory. 822 * pages to the new memory.
829 * 823 *
830 * Call with manage_sem held. May take callback_sem during call. 824 * Call with manage_mutex held. May take callback_mutex during call.
831 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 825 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
832 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 826 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
833 * their mempolicies to the cpusets new mems_allowed. 827 * their mempolicies to the cpusets new mems_allowed.
@@ -862,11 +856,11 @@ static int update_nodemask(struct cpuset *cs, char *buf)
862 if (retval < 0) 856 if (retval < 0)
863 goto done; 857 goto done;
864 858
865 down(&callback_sem); 859 mutex_lock(&callback_mutex);
866 cs->mems_allowed = trialcs.mems_allowed; 860 cs->mems_allowed = trialcs.mems_allowed;
867 atomic_inc(&cpuset_mems_generation); 861 atomic_inc(&cpuset_mems_generation);
868 cs->mems_generation = atomic_read(&cpuset_mems_generation); 862 cs->mems_generation = atomic_read(&cpuset_mems_generation);
869 up(&callback_sem); 863 mutex_unlock(&callback_mutex);
870 864
871 set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ 865 set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
872 866
@@ -922,7 +916,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
922 * tasklist_lock. Forks can happen again now - the mpol_copy() 916 * tasklist_lock. Forks can happen again now - the mpol_copy()
923 * cpuset_being_rebound check will catch such forks, and rebind 917 * cpuset_being_rebound check will catch such forks, and rebind
924 * their vma mempolicies too. Because we still hold the global 918 * their vma mempolicies too. Because we still hold the global
925 * cpuset manage_sem, we know that no other rebind effort will 919 * cpuset manage_mutex, we know that no other rebind effort will
926 * be contending for the global variable cpuset_being_rebound. 920 * be contending for the global variable cpuset_being_rebound.
927 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 921 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
928 * is idempotent. Also migrate pages in each mm to new nodes. 922 * is idempotent. Also migrate pages in each mm to new nodes.
@@ -948,7 +942,7 @@ done:
948} 942}
949 943
950/* 944/*
951 * Call with manage_sem held. 945 * Call with manage_mutex held.
952 */ 946 */
953 947
954static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) 948static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
@@ -967,7 +961,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
967 * cs: the cpuset to update 961 * cs: the cpuset to update
968 * buf: the buffer where we read the 0 or 1 962 * buf: the buffer where we read the 0 or 1
969 * 963 *
970 * Call with manage_sem held. 964 * Call with manage_mutex held.
971 */ 965 */
972 966
973static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) 967static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
@@ -989,12 +983,12 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
989 return err; 983 return err;
990 cpu_exclusive_changed = 984 cpu_exclusive_changed =
991 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); 985 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
992 down(&callback_sem); 986 mutex_lock(&callback_mutex);
993 if (turning_on) 987 if (turning_on)
994 set_bit(bit, &cs->flags); 988 set_bit(bit, &cs->flags);
995 else 989 else
996 clear_bit(bit, &cs->flags); 990 clear_bit(bit, &cs->flags);
997 up(&callback_sem); 991 mutex_unlock(&callback_mutex);
998 992
999 if (cpu_exclusive_changed) 993 if (cpu_exclusive_changed)
1000 update_cpu_domains(cs); 994 update_cpu_domains(cs);
@@ -1104,7 +1098,7 @@ static int fmeter_getrate(struct fmeter *fmp)
1104 * writing the path of the old cpuset in 'ppathbuf' if it needs to be 1098 * writing the path of the old cpuset in 'ppathbuf' if it needs to be
1105 * notified on release. 1099 * notified on release.
1106 * 1100 *
1107 * Call holding manage_sem. May take callback_sem and task_lock of 1101 * Call holding manage_mutex. May take callback_mutex and task_lock of
1108 * the task 'pid' during call. 1102 * the task 'pid' during call.
1109 */ 1103 */
1110 1104
@@ -1144,13 +1138,13 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
1144 get_task_struct(tsk); 1138 get_task_struct(tsk);
1145 } 1139 }
1146 1140
1147 down(&callback_sem); 1141 mutex_lock(&callback_mutex);
1148 1142
1149 task_lock(tsk); 1143 task_lock(tsk);
1150 oldcs = tsk->cpuset; 1144 oldcs = tsk->cpuset;
1151 if (!oldcs) { 1145 if (!oldcs) {
1152 task_unlock(tsk); 1146 task_unlock(tsk);
1153 up(&callback_sem); 1147 mutex_unlock(&callback_mutex);
1154 put_task_struct(tsk); 1148 put_task_struct(tsk);
1155 return -ESRCH; 1149 return -ESRCH;
1156 } 1150 }
@@ -1164,7 +1158,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
1164 from = oldcs->mems_allowed; 1158 from = oldcs->mems_allowed;
1165 to = cs->mems_allowed; 1159 to = cs->mems_allowed;
1166 1160
1167 up(&callback_sem); 1161 mutex_unlock(&callback_mutex);
1168 1162
1169 mm = get_task_mm(tsk); 1163 mm = get_task_mm(tsk);
1170 if (mm) { 1164 if (mm) {
@@ -1221,7 +1215,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
1221 } 1215 }
1222 buffer[nbytes] = 0; /* nul-terminate */ 1216 buffer[nbytes] = 0; /* nul-terminate */
1223 1217
1224 down(&manage_sem); 1218 mutex_lock(&manage_mutex);
1225 1219
1226 if (is_removed(cs)) { 1220 if (is_removed(cs)) {
1227 retval = -ENODEV; 1221 retval = -ENODEV;
@@ -1264,7 +1258,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
1264 if (retval == 0) 1258 if (retval == 0)
1265 retval = nbytes; 1259 retval = nbytes;
1266out2: 1260out2:
1267 up(&manage_sem); 1261 mutex_unlock(&manage_mutex);
1268 cpuset_release_agent(pathbuf); 1262 cpuset_release_agent(pathbuf);
1269out1: 1263out1:
1270 kfree(buffer); 1264 kfree(buffer);
@@ -1304,9 +1298,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1304{ 1298{
1305 cpumask_t mask; 1299 cpumask_t mask;
1306 1300
1307 down(&callback_sem); 1301 mutex_lock(&callback_mutex);
1308 mask = cs->cpus_allowed; 1302 mask = cs->cpus_allowed;
1309 up(&callback_sem); 1303 mutex_unlock(&callback_mutex);
1310 1304
1311 return cpulist_scnprintf(page, PAGE_SIZE, mask); 1305 return cpulist_scnprintf(page, PAGE_SIZE, mask);
1312} 1306}
@@ -1315,9 +1309,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1315{ 1309{
1316 nodemask_t mask; 1310 nodemask_t mask;
1317 1311
1318 down(&callback_sem); 1312 mutex_lock(&callback_mutex);
1319 mask = cs->mems_allowed; 1313 mask = cs->mems_allowed;
1320 up(&callback_sem); 1314 mutex_unlock(&callback_mutex);
1321 1315
1322 return nodelist_scnprintf(page, PAGE_SIZE, mask); 1316 return nodelist_scnprintf(page, PAGE_SIZE, mask);
1323} 1317}
@@ -1598,7 +1592,7 @@ static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
1598 * Handle an open on 'tasks' file. Prepare a buffer listing the 1592 * Handle an open on 'tasks' file. Prepare a buffer listing the
1599 * process id's of tasks currently attached to the cpuset being opened. 1593 * process id's of tasks currently attached to the cpuset being opened.
1600 * 1594 *
1601 * Does not require any specific cpuset semaphores, and does not take any. 1595 * Does not require any specific cpuset mutexes, and does not take any.
1602 */ 1596 */
1603static int cpuset_tasks_open(struct inode *unused, struct file *file) 1597static int cpuset_tasks_open(struct inode *unused, struct file *file)
1604{ 1598{
@@ -1754,7 +1748,7 @@ static int cpuset_populate_dir(struct dentry *cs_dentry)
1754 * name: name of the new cpuset. Will be strcpy'ed. 1748 * name: name of the new cpuset. Will be strcpy'ed.
1755 * mode: mode to set on new inode 1749 * mode: mode to set on new inode
1756 * 1750 *
1757 * Must be called with the semaphore on the parent inode held 1751 * Must be called with the mutex on the parent inode held
1758 */ 1752 */
1759 1753
1760static long cpuset_create(struct cpuset *parent, const char *name, int mode) 1754static long cpuset_create(struct cpuset *parent, const char *name, int mode)
@@ -1766,7 +1760,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1766 if (!cs) 1760 if (!cs)
1767 return -ENOMEM; 1761 return -ENOMEM;
1768 1762
1769 down(&manage_sem); 1763 mutex_lock(&manage_mutex);
1770 cpuset_update_task_memory_state(); 1764 cpuset_update_task_memory_state();
1771 cs->flags = 0; 1765 cs->flags = 0;
1772 if (notify_on_release(parent)) 1766 if (notify_on_release(parent))
@@ -1782,28 +1776,28 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1782 1776
1783 cs->parent = parent; 1777 cs->parent = parent;
1784 1778
1785 down(&callback_sem); 1779 mutex_lock(&callback_mutex);
1786 list_add(&cs->sibling, &cs->parent->children); 1780 list_add(&cs->sibling, &cs->parent->children);
1787 number_of_cpusets++; 1781 number_of_cpusets++;
1788 up(&callback_sem); 1782 mutex_unlock(&callback_mutex);
1789 1783
1790 err = cpuset_create_dir(cs, name, mode); 1784 err = cpuset_create_dir(cs, name, mode);
1791 if (err < 0) 1785 if (err < 0)
1792 goto err; 1786 goto err;
1793 1787
1794 /* 1788 /*
1795 * Release manage_sem before cpuset_populate_dir() because it 1789 * Release manage_mutex before cpuset_populate_dir() because it
1796 * will down() this new directory's i_mutex and if we race with 1790 * will down() this new directory's i_mutex and if we race with
1797 * another mkdir, we might deadlock. 1791 * another mkdir, we might deadlock.
1798 */ 1792 */
1799 up(&manage_sem); 1793 mutex_unlock(&manage_mutex);
1800 1794
1801 err = cpuset_populate_dir(cs->dentry); 1795 err = cpuset_populate_dir(cs->dentry);
1802 /* If err < 0, we have a half-filled directory - oh well ;) */ 1796 /* If err < 0, we have a half-filled directory - oh well ;) */
1803 return 0; 1797 return 0;
1804err: 1798err:
1805 list_del(&cs->sibling); 1799 list_del(&cs->sibling);
1806 up(&manage_sem); 1800 mutex_unlock(&manage_mutex);
1807 kfree(cs); 1801 kfree(cs);
1808 return err; 1802 return err;
1809} 1803}
@@ -1825,18 +1819,18 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1825 1819
1826 /* the vfs holds both inode->i_mutex already */ 1820 /* the vfs holds both inode->i_mutex already */
1827 1821
1828 down(&manage_sem); 1822 mutex_lock(&manage_mutex);
1829 cpuset_update_task_memory_state(); 1823 cpuset_update_task_memory_state();
1830 if (atomic_read(&cs->count) > 0) { 1824 if (atomic_read(&cs->count) > 0) {
1831 up(&manage_sem); 1825 mutex_unlock(&manage_mutex);
1832 return -EBUSY; 1826 return -EBUSY;
1833 } 1827 }
1834 if (!list_empty(&cs->children)) { 1828 if (!list_empty(&cs->children)) {
1835 up(&manage_sem); 1829 mutex_unlock(&manage_mutex);
1836 return -EBUSY; 1830 return -EBUSY;
1837 } 1831 }
1838 parent = cs->parent; 1832 parent = cs->parent;
1839 down(&callback_sem); 1833 mutex_lock(&callback_mutex);
1840 set_bit(CS_REMOVED, &cs->flags); 1834 set_bit(CS_REMOVED, &cs->flags);
1841 if (is_cpu_exclusive(cs)) 1835 if (is_cpu_exclusive(cs))
1842 update_cpu_domains(cs); 1836 update_cpu_domains(cs);
@@ -1848,10 +1842,10 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1848 cpuset_d_remove_dir(d); 1842 cpuset_d_remove_dir(d);
1849 dput(d); 1843 dput(d);
1850 number_of_cpusets--; 1844 number_of_cpusets--;
1851 up(&callback_sem); 1845 mutex_unlock(&callback_mutex);
1852 if (list_empty(&parent->children)) 1846 if (list_empty(&parent->children))
1853 check_for_release(parent, &pathbuf); 1847 check_for_release(parent, &pathbuf);
1854 up(&manage_sem); 1848 mutex_unlock(&manage_mutex);
1855 cpuset_release_agent(pathbuf); 1849 cpuset_release_agent(pathbuf);
1856 return 0; 1850 return 0;
1857} 1851}
@@ -1960,19 +1954,19 @@ void cpuset_fork(struct task_struct *child)
1960 * Description: Detach cpuset from @tsk and release it. 1954 * Description: Detach cpuset from @tsk and release it.
1961 * 1955 *
1962 * Note that cpusets marked notify_on_release force every task in 1956 * Note that cpusets marked notify_on_release force every task in
1963 * them to take the global manage_sem semaphore when exiting. 1957 * them to take the global manage_mutex mutex when exiting.
1964 * This could impact scaling on very large systems. Be reluctant to 1958 * This could impact scaling on very large systems. Be reluctant to
1965 * use notify_on_release cpusets where very high task exit scaling 1959 * use notify_on_release cpusets where very high task exit scaling
1966 * is required on large systems. 1960 * is required on large systems.
1967 * 1961 *
1968 * Don't even think about derefencing 'cs' after the cpuset use count 1962 * Don't even think about derefencing 'cs' after the cpuset use count
1969 * goes to zero, except inside a critical section guarded by manage_sem 1963 * goes to zero, except inside a critical section guarded by manage_mutex
1970 * or callback_sem. Otherwise a zero cpuset use count is a license to 1964 * or callback_mutex. Otherwise a zero cpuset use count is a license to
1971 * any other task to nuke the cpuset immediately, via cpuset_rmdir(). 1965 * any other task to nuke the cpuset immediately, via cpuset_rmdir().
1972 * 1966 *
1973 * This routine has to take manage_sem, not callback_sem, because 1967 * This routine has to take manage_mutex, not callback_mutex, because
1974 * it is holding that semaphore while calling check_for_release(), 1968 * it is holding that mutex while calling check_for_release(),
1975 * which calls kmalloc(), so can't be called holding callback__sem(). 1969 * which calls kmalloc(), so can't be called holding callback_mutex().
1976 * 1970 *
1977 * We don't need to task_lock() this reference to tsk->cpuset, 1971 * We don't need to task_lock() this reference to tsk->cpuset,
1978 * because tsk is already marked PF_EXITING, so attach_task() won't 1972 * because tsk is already marked PF_EXITING, so attach_task() won't
@@ -2022,10 +2016,10 @@ void cpuset_exit(struct task_struct *tsk)
2022 if (notify_on_release(cs)) { 2016 if (notify_on_release(cs)) {
2023 char *pathbuf = NULL; 2017 char *pathbuf = NULL;
2024 2018
2025 down(&manage_sem); 2019 mutex_lock(&manage_mutex);
2026 if (atomic_dec_and_test(&cs->count)) 2020 if (atomic_dec_and_test(&cs->count))
2027 check_for_release(cs, &pathbuf); 2021 check_for_release(cs, &pathbuf);
2028 up(&manage_sem); 2022 mutex_unlock(&manage_mutex);
2029 cpuset_release_agent(pathbuf); 2023 cpuset_release_agent(pathbuf);
2030 } else { 2024 } else {
2031 atomic_dec(&cs->count); 2025 atomic_dec(&cs->count);
@@ -2046,11 +2040,11 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
2046{ 2040{
2047 cpumask_t mask; 2041 cpumask_t mask;
2048 2042
2049 down(&callback_sem); 2043 mutex_lock(&callback_mutex);
2050 task_lock(tsk); 2044 task_lock(tsk);
2051 guarantee_online_cpus(tsk->cpuset, &mask); 2045 guarantee_online_cpus(tsk->cpuset, &mask);
2052 task_unlock(tsk); 2046 task_unlock(tsk);
2053 up(&callback_sem); 2047 mutex_unlock(&callback_mutex);
2054 2048
2055 return mask; 2049 return mask;
2056} 2050}
@@ -2074,11 +2068,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2074{ 2068{
2075 nodemask_t mask; 2069 nodemask_t mask;
2076 2070
2077 down(&callback_sem); 2071 mutex_lock(&callback_mutex);
2078 task_lock(tsk); 2072 task_lock(tsk);
2079 guarantee_online_mems(tsk->cpuset, &mask); 2073 guarantee_online_mems(tsk->cpuset, &mask);
2080 task_unlock(tsk); 2074 task_unlock(tsk);
2081 up(&callback_sem); 2075 mutex_unlock(&callback_mutex);
2082 2076
2083 return mask; 2077 return mask;
2084} 2078}
@@ -2104,7 +2098,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
2104 2098
2105/* 2099/*
2106 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive 2100 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
2107 * ancestor to the specified cpuset. Call holding callback_sem. 2101 * ancestor to the specified cpuset. Call holding callback_mutex.
2108 * If no ancestor is mem_exclusive (an unusual configuration), then 2102 * If no ancestor is mem_exclusive (an unusual configuration), then
2109 * returns the root cpuset. 2103 * returns the root cpuset.
2110 */ 2104 */
@@ -2131,12 +2125,12 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2131 * GFP_KERNEL allocations are not so marked, so can escape to the 2125 * GFP_KERNEL allocations are not so marked, so can escape to the
2132 * nearest mem_exclusive ancestor cpuset. 2126 * nearest mem_exclusive ancestor cpuset.
2133 * 2127 *
2134 * Scanning up parent cpusets requires callback_sem. The __alloc_pages() 2128 * Scanning up parent cpusets requires callback_mutex. The __alloc_pages()
2135 * routine only calls here with __GFP_HARDWALL bit _not_ set if 2129 * routine only calls here with __GFP_HARDWALL bit _not_ set if
2136 * it's a GFP_KERNEL allocation, and all nodes in the current tasks 2130 * it's a GFP_KERNEL allocation, and all nodes in the current tasks
2137 * mems_allowed came up empty on the first pass over the zonelist. 2131 * mems_allowed came up empty on the first pass over the zonelist.
2138 * So only GFP_KERNEL allocations, if all nodes in the cpuset are 2132 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
2139 * short of memory, might require taking the callback_sem semaphore. 2133 * short of memory, might require taking the callback_mutex mutex.
2140 * 2134 *
2141 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() 2135 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
2142 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing 2136 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
@@ -2171,31 +2165,31 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
2171 return 1; 2165 return 1;
2172 2166
2173 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 2167 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2174 down(&callback_sem); 2168 mutex_lock(&callback_mutex);
2175 2169
2176 task_lock(current); 2170 task_lock(current);
2177 cs = nearest_exclusive_ancestor(current->cpuset); 2171 cs = nearest_exclusive_ancestor(current->cpuset);
2178 task_unlock(current); 2172 task_unlock(current);
2179 2173
2180 allowed = node_isset(node, cs->mems_allowed); 2174 allowed = node_isset(node, cs->mems_allowed);
2181 up(&callback_sem); 2175 mutex_unlock(&callback_mutex);
2182 return allowed; 2176 return allowed;
2183} 2177}
2184 2178
2185/** 2179/**
2186 * cpuset_lock - lock out any changes to cpuset structures 2180 * cpuset_lock - lock out any changes to cpuset structures
2187 * 2181 *
2188 * The out of memory (oom) code needs to lock down cpusets 2182 * The out of memory (oom) code needs to mutex_lock cpusets
2189 * from being changed while it scans the tasklist looking for a 2183 * from being changed while it scans the tasklist looking for a
2190 * task in an overlapping cpuset. Expose callback_sem via this 2184 * task in an overlapping cpuset. Expose callback_mutex via this
2191 * cpuset_lock() routine, so the oom code can lock it, before 2185 * cpuset_lock() routine, so the oom code can lock it, before
2192 * locking the task list. The tasklist_lock is a spinlock, so 2186 * locking the task list. The tasklist_lock is a spinlock, so
2193 * must be taken inside callback_sem. 2187 * must be taken inside callback_mutex.
2194 */ 2188 */
2195 2189
2196void cpuset_lock(void) 2190void cpuset_lock(void)
2197{ 2191{
2198 down(&callback_sem); 2192 mutex_lock(&callback_mutex);
2199} 2193}
2200 2194
2201/** 2195/**
@@ -2206,7 +2200,7 @@ void cpuset_lock(void)
2206 2200
2207void cpuset_unlock(void) 2201void cpuset_unlock(void)
2208{ 2202{
2209 up(&callback_sem); 2203 mutex_unlock(&callback_mutex);
2210} 2204}
2211 2205
2212/** 2206/**
@@ -2218,7 +2212,7 @@ void cpuset_unlock(void)
2218 * determine if task @p's memory usage might impact the memory 2212 * determine if task @p's memory usage might impact the memory
2219 * available to the current task. 2213 * available to the current task.
2220 * 2214 *
2221 * Call while holding callback_sem. 2215 * Call while holding callback_mutex.
2222 **/ 2216 **/
2223 2217
2224int cpuset_excl_nodes_overlap(const struct task_struct *p) 2218int cpuset_excl_nodes_overlap(const struct task_struct *p)
@@ -2289,7 +2283,7 @@ void __cpuset_memory_pressure_bump(void)
2289 * - Used for /proc/<pid>/cpuset. 2283 * - Used for /proc/<pid>/cpuset.
2290 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 2284 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2291 * doesn't really matter if tsk->cpuset changes after we read it, 2285 * doesn't really matter if tsk->cpuset changes after we read it,
2292 * and we take manage_sem, keeping attach_task() from changing it 2286 * and we take manage_mutex, keeping attach_task() from changing it
2293 * anyway. 2287 * anyway.
2294 */ 2288 */
2295 2289
@@ -2305,7 +2299,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
2305 return -ENOMEM; 2299 return -ENOMEM;
2306 2300
2307 tsk = m->private; 2301 tsk = m->private;
2308 down(&manage_sem); 2302 mutex_lock(&manage_mutex);
2309 cs = tsk->cpuset; 2303 cs = tsk->cpuset;
2310 if (!cs) { 2304 if (!cs) {
2311 retval = -EINVAL; 2305 retval = -EINVAL;
@@ -2318,7 +2312,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v)
2318 seq_puts(m, buf); 2312 seq_puts(m, buf);
2319 seq_putc(m, '\n'); 2313 seq_putc(m, '\n');
2320out: 2314out:
2321 up(&manage_sem); 2315 mutex_unlock(&manage_mutex);
2322 kfree(buf); 2316 kfree(buf);
2323 return retval; 2317 return retval;
2324} 2318}
diff --git a/kernel/exit.c b/kernel/exit.c
index d1e8d500a7e1..8037405e136e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -345,9 +345,9 @@ void daemonize(const char *name, ...)
345 exit_mm(current); 345 exit_mm(current);
346 346
347 set_special_pids(1, 1); 347 set_special_pids(1, 1);
348 down(&tty_sem); 348 mutex_lock(&tty_mutex);
349 current->signal->tty = NULL; 349 current->signal->tty = NULL;
350 up(&tty_sem); 350 mutex_unlock(&tty_mutex);
351 351
352 /* Block and flush all signals */ 352 /* Block and flush all signals */
353 sigfillset(&blocked); 353 sigfillset(&blocked);
diff --git a/kernel/fork.c b/kernel/fork.c
index 9bd7b65ee418..c79ae0b19a49 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -607,12 +607,12 @@ static struct files_struct *alloc_files(void)
607 atomic_set(&newf->count, 1); 607 atomic_set(&newf->count, 1);
608 608
609 spin_lock_init(&newf->file_lock); 609 spin_lock_init(&newf->file_lock);
610 newf->next_fd = 0;
610 fdt = &newf->fdtab; 611 fdt = &newf->fdtab;
611 fdt->next_fd = 0;
612 fdt->max_fds = NR_OPEN_DEFAULT; 612 fdt->max_fds = NR_OPEN_DEFAULT;
613 fdt->max_fdset = __FD_SETSIZE; 613 fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
614 fdt->close_on_exec = &newf->close_on_exec_init; 614 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
615 fdt->open_fds = &newf->open_fds_init; 615 fdt->open_fds = (fd_set *)&newf->open_fds_init;
616 fdt->fd = &newf->fd_array[0]; 616 fdt->fd = &newf->fd_array[0];
617 INIT_RCU_HEAD(&fdt->rcu); 617 INIT_RCU_HEAD(&fdt->rcu);
618 fdt->free_files = NULL; 618 fdt->free_files = NULL;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index fef1af8a73ce..1fb9f753ef60 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -48,7 +48,7 @@
48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
50 50
51DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 51DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
54 54
@@ -460,7 +460,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
460 } 460 }
461 461
462 p->nmissed = 0; 462 p->nmissed = 0;
463 down(&kprobe_mutex); 463 mutex_lock(&kprobe_mutex);
464 old_p = get_kprobe(p->addr); 464 old_p = get_kprobe(p->addr);
465 if (old_p) { 465 if (old_p) {
466 ret = register_aggr_kprobe(old_p, p); 466 ret = register_aggr_kprobe(old_p, p);
@@ -477,7 +477,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
477 arch_arm_kprobe(p); 477 arch_arm_kprobe(p);
478 478
479out: 479out:
480 up(&kprobe_mutex); 480 mutex_unlock(&kprobe_mutex);
481 481
482 if (ret && probed_mod) 482 if (ret && probed_mod)
483 module_put(probed_mod); 483 module_put(probed_mod);
@@ -496,10 +496,10 @@ void __kprobes unregister_kprobe(struct kprobe *p)
496 struct kprobe *old_p, *list_p; 496 struct kprobe *old_p, *list_p;
497 int cleanup_p; 497 int cleanup_p;
498 498
499 down(&kprobe_mutex); 499 mutex_lock(&kprobe_mutex);
500 old_p = get_kprobe(p->addr); 500 old_p = get_kprobe(p->addr);
501 if (unlikely(!old_p)) { 501 if (unlikely(!old_p)) {
502 up(&kprobe_mutex); 502 mutex_unlock(&kprobe_mutex);
503 return; 503 return;
504 } 504 }
505 if (p != old_p) { 505 if (p != old_p) {
@@ -507,7 +507,7 @@ void __kprobes unregister_kprobe(struct kprobe *p)
507 if (list_p == p) 507 if (list_p == p)
508 /* kprobe p is a valid probe */ 508 /* kprobe p is a valid probe */
509 goto valid_p; 509 goto valid_p;
510 up(&kprobe_mutex); 510 mutex_unlock(&kprobe_mutex);
511 return; 511 return;
512 } 512 }
513valid_p: 513valid_p:
@@ -523,7 +523,7 @@ valid_p:
523 cleanup_p = 0; 523 cleanup_p = 0;
524 } 524 }
525 525
526 up(&kprobe_mutex); 526 mutex_unlock(&kprobe_mutex);
527 527
528 synchronize_sched(); 528 synchronize_sched();
529 if (p->mod_refcounted && 529 if (p->mod_refcounted &&
diff --git a/kernel/kthread.c b/kernel/kthread.c
index e75950a1092c..6a5373868a98 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -12,6 +12,7 @@
12#include <linux/unistd.h> 12#include <linux/unistd.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/mutex.h>
15#include <asm/semaphore.h> 16#include <asm/semaphore.h>
16 17
17/* 18/*
@@ -41,7 +42,7 @@ struct kthread_stop_info
41 42
42/* Thread stopping is done by setthing this var: lock serializes 43/* Thread stopping is done by setthing this var: lock serializes
43 * multiple kthread_stop calls. */ 44 * multiple kthread_stop calls. */
44static DECLARE_MUTEX(kthread_stop_lock); 45static DEFINE_MUTEX(kthread_stop_lock);
45static struct kthread_stop_info kthread_stop_info; 46static struct kthread_stop_info kthread_stop_info;
46 47
47int kthread_should_stop(void) 48int kthread_should_stop(void)
@@ -173,7 +174,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
173{ 174{
174 int ret; 175 int ret;
175 176
176 down(&kthread_stop_lock); 177 mutex_lock(&kthread_stop_lock);
177 178
178 /* It could exit after stop_info.k set, but before wake_up_process. */ 179 /* It could exit after stop_info.k set, but before wake_up_process. */
179 get_task_struct(k); 180 get_task_struct(k);
@@ -194,7 +195,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
194 wait_for_completion(&kthread_stop_info.done); 195 wait_for_completion(&kthread_stop_info.done);
195 kthread_stop_info.k = NULL; 196 kthread_stop_info.k = NULL;
196 ret = kthread_stop_info.err; 197 ret = kthread_stop_info.err;
197 up(&kthread_stop_lock); 198 mutex_unlock(&kthread_stop_lock);
198 199
199 return ret; 200 return ret;
200} 201}
diff --git a/kernel/module.c b/kernel/module.c
index 77764f22f021..fb404299082e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -39,6 +39,7 @@
39#include <linux/device.h> 39#include <linux/device.h>
40#include <linux/string.h> 40#include <linux/string.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/mutex.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
43#include <asm/semaphore.h> 44#include <asm/semaphore.h>
44#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
@@ -60,18 +61,18 @@
60static DEFINE_SPINLOCK(modlist_lock); 61static DEFINE_SPINLOCK(modlist_lock);
61 62
62/* List of modules, protected by module_mutex AND modlist_lock */ 63/* List of modules, protected by module_mutex AND modlist_lock */
63static DECLARE_MUTEX(module_mutex); 64static DEFINE_MUTEX(module_mutex);
64static LIST_HEAD(modules); 65static LIST_HEAD(modules);
65 66
66static DECLARE_MUTEX(notify_mutex); 67static DEFINE_MUTEX(notify_mutex);
67static struct notifier_block * module_notify_list; 68static struct notifier_block * module_notify_list;
68 69
69int register_module_notifier(struct notifier_block * nb) 70int register_module_notifier(struct notifier_block * nb)
70{ 71{
71 int err; 72 int err;
72 down(&notify_mutex); 73 mutex_lock(&notify_mutex);
73 err = notifier_chain_register(&module_notify_list, nb); 74 err = notifier_chain_register(&module_notify_list, nb);
74 up(&notify_mutex); 75 mutex_unlock(&notify_mutex);
75 return err; 76 return err;
76} 77}
77EXPORT_SYMBOL(register_module_notifier); 78EXPORT_SYMBOL(register_module_notifier);
@@ -79,9 +80,9 @@ EXPORT_SYMBOL(register_module_notifier);
79int unregister_module_notifier(struct notifier_block * nb) 80int unregister_module_notifier(struct notifier_block * nb)
80{ 81{
81 int err; 82 int err;
82 down(&notify_mutex); 83 mutex_lock(&notify_mutex);
83 err = notifier_chain_unregister(&module_notify_list, nb); 84 err = notifier_chain_unregister(&module_notify_list, nb);
84 up(&notify_mutex); 85 mutex_unlock(&notify_mutex);
85 return err; 86 return err;
86} 87}
87EXPORT_SYMBOL(unregister_module_notifier); 88EXPORT_SYMBOL(unregister_module_notifier);
@@ -601,7 +602,7 @@ static void free_module(struct module *mod);
601static void wait_for_zero_refcount(struct module *mod) 602static void wait_for_zero_refcount(struct module *mod)
602{ 603{
603 /* Since we might sleep for some time, drop the semaphore first */ 604 /* Since we might sleep for some time, drop the semaphore first */
604 up(&module_mutex); 605 mutex_unlock(&module_mutex);
605 for (;;) { 606 for (;;) {
606 DEBUGP("Looking at refcount...\n"); 607 DEBUGP("Looking at refcount...\n");
607 set_current_state(TASK_UNINTERRUPTIBLE); 608 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -610,7 +611,7 @@ static void wait_for_zero_refcount(struct module *mod)
610 schedule(); 611 schedule();
611 } 612 }
612 current->state = TASK_RUNNING; 613 current->state = TASK_RUNNING;
613 down(&module_mutex); 614 mutex_lock(&module_mutex);
614} 615}
615 616
616asmlinkage long 617asmlinkage long
@@ -627,7 +628,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
627 return -EFAULT; 628 return -EFAULT;
628 name[MODULE_NAME_LEN-1] = '\0'; 629 name[MODULE_NAME_LEN-1] = '\0';
629 630
630 if (down_interruptible(&module_mutex) != 0) 631 if (mutex_lock_interruptible(&module_mutex) != 0)
631 return -EINTR; 632 return -EINTR;
632 633
633 mod = find_module(name); 634 mod = find_module(name);
@@ -676,14 +677,14 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
676 677
677 /* Final destruction now noone is using it. */ 678 /* Final destruction now noone is using it. */
678 if (mod->exit != NULL) { 679 if (mod->exit != NULL) {
679 up(&module_mutex); 680 mutex_unlock(&module_mutex);
680 mod->exit(); 681 mod->exit();
681 down(&module_mutex); 682 mutex_lock(&module_mutex);
682 } 683 }
683 free_module(mod); 684 free_module(mod);
684 685
685 out: 686 out:
686 up(&module_mutex); 687 mutex_unlock(&module_mutex);
687 return ret; 688 return ret;
688} 689}
689 690
@@ -1972,13 +1973,13 @@ sys_init_module(void __user *umod,
1972 return -EPERM; 1973 return -EPERM;
1973 1974
1974 /* Only one module load at a time, please */ 1975 /* Only one module load at a time, please */
1975 if (down_interruptible(&module_mutex) != 0) 1976 if (mutex_lock_interruptible(&module_mutex) != 0)
1976 return -EINTR; 1977 return -EINTR;
1977 1978
1978 /* Do all the hard work */ 1979 /* Do all the hard work */
1979 mod = load_module(umod, len, uargs); 1980 mod = load_module(umod, len, uargs);
1980 if (IS_ERR(mod)) { 1981 if (IS_ERR(mod)) {
1981 up(&module_mutex); 1982 mutex_unlock(&module_mutex);
1982 return PTR_ERR(mod); 1983 return PTR_ERR(mod);
1983 } 1984 }
1984 1985
@@ -1987,11 +1988,11 @@ sys_init_module(void __user *umod,
1987 stop_machine_run(__link_module, mod, NR_CPUS); 1988 stop_machine_run(__link_module, mod, NR_CPUS);
1988 1989
1989 /* Drop lock so they can recurse */ 1990 /* Drop lock so they can recurse */
1990 up(&module_mutex); 1991 mutex_unlock(&module_mutex);
1991 1992
1992 down(&notify_mutex); 1993 mutex_lock(&notify_mutex);
1993 notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); 1994 notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod);
1994 up(&notify_mutex); 1995 mutex_unlock(&notify_mutex);
1995 1996
1996 /* Start the module */ 1997 /* Start the module */
1997 if (mod->init != NULL) 1998 if (mod->init != NULL)
@@ -2006,15 +2007,15 @@ sys_init_module(void __user *umod,
2006 mod->name); 2007 mod->name);
2007 else { 2008 else {
2008 module_put(mod); 2009 module_put(mod);
2009 down(&module_mutex); 2010 mutex_lock(&module_mutex);
2010 free_module(mod); 2011 free_module(mod);
2011 up(&module_mutex); 2012 mutex_unlock(&module_mutex);
2012 } 2013 }
2013 return ret; 2014 return ret;
2014 } 2015 }
2015 2016
2016 /* Now it's a first class citizen! */ 2017 /* Now it's a first class citizen! */
2017 down(&module_mutex); 2018 mutex_lock(&module_mutex);
2018 mod->state = MODULE_STATE_LIVE; 2019 mod->state = MODULE_STATE_LIVE;
2019 /* Drop initial reference. */ 2020 /* Drop initial reference. */
2020 module_put(mod); 2021 module_put(mod);
@@ -2022,7 +2023,7 @@ sys_init_module(void __user *umod,
2022 mod->module_init = NULL; 2023 mod->module_init = NULL;
2023 mod->init_size = 0; 2024 mod->init_size = 0;
2024 mod->init_text_size = 0; 2025 mod->init_text_size = 0;
2025 up(&module_mutex); 2026 mutex_unlock(&module_mutex);
2026 2027
2027 return 0; 2028 return 0;
2028} 2029}
@@ -2112,7 +2113,7 @@ struct module *module_get_kallsym(unsigned int symnum,
2112{ 2113{
2113 struct module *mod; 2114 struct module *mod;
2114 2115
2115 down(&module_mutex); 2116 mutex_lock(&module_mutex);
2116 list_for_each_entry(mod, &modules, list) { 2117 list_for_each_entry(mod, &modules, list) {
2117 if (symnum < mod->num_symtab) { 2118 if (symnum < mod->num_symtab) {
2118 *value = mod->symtab[symnum].st_value; 2119 *value = mod->symtab[symnum].st_value;
@@ -2120,12 +2121,12 @@ struct module *module_get_kallsym(unsigned int symnum,
2120 strncpy(namebuf, 2121 strncpy(namebuf,
2121 mod->strtab + mod->symtab[symnum].st_name, 2122 mod->strtab + mod->symtab[symnum].st_name,
2122 127); 2123 127);
2123 up(&module_mutex); 2124 mutex_unlock(&module_mutex);
2124 return mod; 2125 return mod;
2125 } 2126 }
2126 symnum -= mod->num_symtab; 2127 symnum -= mod->num_symtab;
2127 } 2128 }
2128 up(&module_mutex); 2129 mutex_unlock(&module_mutex);
2129 return NULL; 2130 return NULL;
2130} 2131}
2131 2132
@@ -2168,7 +2169,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
2168 struct list_head *i; 2169 struct list_head *i;
2169 loff_t n = 0; 2170 loff_t n = 0;
2170 2171
2171 down(&module_mutex); 2172 mutex_lock(&module_mutex);
2172 list_for_each(i, &modules) { 2173 list_for_each(i, &modules) {
2173 if (n++ == *pos) 2174 if (n++ == *pos)
2174 break; 2175 break;
@@ -2189,7 +2190,7 @@ static void *m_next(struct seq_file *m, void *p, loff_t *pos)
2189 2190
2190static void m_stop(struct seq_file *m, void *p) 2191static void m_stop(struct seq_file *m, void *p)
2191{ 2192{
2192 up(&module_mutex); 2193 mutex_unlock(&module_mutex);
2193} 2194}
2194 2195
2195static int m_show(struct seq_file *m, void *p) 2196static int m_show(struct seq_file *m, void *p)
diff --git a/kernel/panic.c b/kernel/panic.c
index 126dc43f1c74..acd95adddb93 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -20,10 +20,13 @@
20#include <linux/nmi.h> 20#include <linux/nmi.h>
21#include <linux/kexec.h> 21#include <linux/kexec.h>
22 22
23int panic_timeout;
24int panic_on_oops; 23int panic_on_oops;
25int tainted; 24int tainted;
25static int pause_on_oops;
26static int pause_on_oops_flag;
27static DEFINE_SPINLOCK(pause_on_oops_lock);
26 28
29int panic_timeout;
27EXPORT_SYMBOL(panic_timeout); 30EXPORT_SYMBOL(panic_timeout);
28 31
29struct notifier_block *panic_notifier_list; 32struct notifier_block *panic_notifier_list;
@@ -174,3 +177,95 @@ void add_taint(unsigned flag)
174 tainted |= flag; 177 tainted |= flag;
175} 178}
176EXPORT_SYMBOL(add_taint); 179EXPORT_SYMBOL(add_taint);
180
181static int __init pause_on_oops_setup(char *str)
182{
183 pause_on_oops = simple_strtoul(str, NULL, 0);
184 return 1;
185}
186__setup("pause_on_oops=", pause_on_oops_setup);
187
188static void spin_msec(int msecs)
189{
190 int i;
191
192 for (i = 0; i < msecs; i++) {
193 touch_nmi_watchdog();
194 mdelay(1);
195 }
196}
197
198/*
199 * It just happens that oops_enter() and oops_exit() are identically
200 * implemented...
201 */
202static void do_oops_enter_exit(void)
203{
204 unsigned long flags;
205 static int spin_counter;
206
207 if (!pause_on_oops)
208 return;
209
210 spin_lock_irqsave(&pause_on_oops_lock, flags);
211 if (pause_on_oops_flag == 0) {
212 /* This CPU may now print the oops message */
213 pause_on_oops_flag = 1;
214 } else {
215 /* We need to stall this CPU */
216 if (!spin_counter) {
217 /* This CPU gets to do the counting */
218 spin_counter = pause_on_oops;
219 do {
220 spin_unlock(&pause_on_oops_lock);
221 spin_msec(MSEC_PER_SEC);
222 spin_lock(&pause_on_oops_lock);
223 } while (--spin_counter);
224 pause_on_oops_flag = 0;
225 } else {
226 /* This CPU waits for a different one */
227 while (spin_counter) {
228 spin_unlock(&pause_on_oops_lock);
229 spin_msec(1);
230 spin_lock(&pause_on_oops_lock);
231 }
232 }
233 }
234 spin_unlock_irqrestore(&pause_on_oops_lock, flags);
235}
236
237/*
238 * Return true if the calling CPU is allowed to print oops-related info. This
239 * is a bit racy..
240 */
241int oops_may_print(void)
242{
243 return pause_on_oops_flag == 0;
244}
245
246/*
247 * Called when the architecture enters its oops handler, before it prints
248 * anything. If this is the first CPU to oops, and it's oopsing the first time
249 * then let it proceed.
250 *
251 * This is all enabled by the pause_on_oops kernel boot option. We do all this
252 * to ensure that oopses don't scroll off the screen. It has the side-effect
253 * of preventing later-oopsing CPUs from mucking up the display, too.
254 *
255 * It turns out that the CPU which is allowed to print ends up pausing for the
256 * right duration, whereas all the other CPUs pause for twice as long: once in
257 * oops_enter(), once in oops_exit().
258 */
259void oops_enter(void)
260{
261 do_oops_enter_exit();
262}
263
264/*
265 * Called when the architecture exits its oops handler, after printing
266 * everything.
267 */
268void oops_exit(void)
269{
270 do_oops_enter_exit();
271}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index fa895fc2ecf5..9944379360b5 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -35,6 +35,7 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/time.h> 37#include <linux/time.h>
38#include <linux/mutex.h>
38 39
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/semaphore.h> 41#include <asm/semaphore.h>
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 04be7d0d96a7..8d0af3d37a4b 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -5,7 +5,7 @@ endif
5 5
6obj-y := main.o process.o console.o 6obj-y := main.o process.o console.o
7obj-$(CONFIG_PM_LEGACY) += pm.o 7obj-$(CONFIG_PM_LEGACY) += pm.o
8obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o 8obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o swap.o user.o
9 9
10obj-$(CONFIG_SUSPEND_SMP) += smp.o 10obj-$(CONFIG_SUSPEND_SMP) += smp.o
11 11
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 0b43847dc980..81d4d982f3f0 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,17 +22,6 @@
22#include "power.h" 22#include "power.h"
23 23
24 24
25extern suspend_disk_method_t pm_disk_mode;
26
27extern int swsusp_shrink_memory(void);
28extern int swsusp_suspend(void);
29extern int swsusp_write(struct pbe *pblist, unsigned int nr_pages);
30extern int swsusp_check(void);
31extern int swsusp_read(struct pbe **pblist_ptr);
32extern void swsusp_close(void);
33extern int swsusp_resume(void);
34
35
36static int noresume = 0; 25static int noresume = 0;
37char resume_file[256] = CONFIG_PM_STD_PARTITION; 26char resume_file[256] = CONFIG_PM_STD_PARTITION;
38dev_t swsusp_resume_device; 27dev_t swsusp_resume_device;
@@ -70,10 +59,6 @@ static void power_down(suspend_disk_method_t mode)
70 while(1); 59 while(1);
71} 60}
72 61
73
74static int in_suspend __nosavedata = 0;
75
76
77static inline void platform_finish(void) 62static inline void platform_finish(void)
78{ 63{
79 if (pm_disk_mode == PM_DISK_PLATFORM) { 64 if (pm_disk_mode == PM_DISK_PLATFORM) {
@@ -87,7 +72,6 @@ static int prepare_processes(void)
87 int error; 72 int error;
88 73
89 pm_prepare_console(); 74 pm_prepare_console();
90 sys_sync();
91 disable_nonboot_cpus(); 75 disable_nonboot_cpus();
92 76
93 if (freeze_processes()) { 77 if (freeze_processes()) {
@@ -145,7 +129,7 @@ int pm_suspend_disk(void)
145 if (in_suspend) { 129 if (in_suspend) {
146 device_resume(); 130 device_resume();
147 pr_debug("PM: writing image.\n"); 131 pr_debug("PM: writing image.\n");
148 error = swsusp_write(pagedir_nosave, nr_copy_pages); 132 error = swsusp_write();
149 if (!error) 133 if (!error)
150 power_down(pm_disk_mode); 134 power_down(pm_disk_mode);
151 else { 135 else {
@@ -216,7 +200,7 @@ static int software_resume(void)
216 200
217 pr_debug("PM: Reading swsusp image.\n"); 201 pr_debug("PM: Reading swsusp image.\n");
218 202
219 if ((error = swsusp_read(&pagedir_nosave))) { 203 if ((error = swsusp_read())) {
220 swsusp_free(); 204 swsusp_free();
221 goto Thaw; 205 goto Thaw;
222 } 206 }
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 9cb235cba4a9..ee371f50ccaa 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -103,7 +103,7 @@ static int suspend_prepare(suspend_state_t state)
103} 103}
104 104
105 105
106static int suspend_enter(suspend_state_t state) 106int suspend_enter(suspend_state_t state)
107{ 107{
108 int error = 0; 108 int error = 0;
109 unsigned long flags; 109 unsigned long flags;
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 33c508e857dd..0f6908cce1dd 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -25,6 +25,7 @@
25#include <linux/pm.h> 25#include <linux/pm.h>
26#include <linux/pm_legacy.h> 26#include <linux/pm_legacy.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/mutex.h>
28 29
29int pm_active; 30int pm_active;
30 31
@@ -40,7 +41,7 @@ int pm_active;
40 * until a resume but that will be fine. 41 * until a resume but that will be fine.
41 */ 42 */
42 43
43static DECLARE_MUTEX(pm_devs_lock); 44static DEFINE_MUTEX(pm_devs_lock);
44static LIST_HEAD(pm_devs); 45static LIST_HEAD(pm_devs);
45 46
46/** 47/**
@@ -67,9 +68,9 @@ struct pm_dev *pm_register(pm_dev_t type,
67 dev->id = id; 68 dev->id = id;
68 dev->callback = callback; 69 dev->callback = callback;
69 70
70 down(&pm_devs_lock); 71 mutex_lock(&pm_devs_lock);
71 list_add(&dev->entry, &pm_devs); 72 list_add(&dev->entry, &pm_devs);
72 up(&pm_devs_lock); 73 mutex_unlock(&pm_devs_lock);
73 } 74 }
74 return dev; 75 return dev;
75} 76}
@@ -85,9 +86,9 @@ struct pm_dev *pm_register(pm_dev_t type,
85void pm_unregister(struct pm_dev *dev) 86void pm_unregister(struct pm_dev *dev)
86{ 87{
87 if (dev) { 88 if (dev) {
88 down(&pm_devs_lock); 89 mutex_lock(&pm_devs_lock);
89 list_del(&dev->entry); 90 list_del(&dev->entry);
90 up(&pm_devs_lock); 91 mutex_unlock(&pm_devs_lock);
91 92
92 kfree(dev); 93 kfree(dev);
93 } 94 }
@@ -118,7 +119,7 @@ void pm_unregister_all(pm_callback callback)
118 if (!callback) 119 if (!callback)
119 return; 120 return;
120 121
121 down(&pm_devs_lock); 122 mutex_lock(&pm_devs_lock);
122 entry = pm_devs.next; 123 entry = pm_devs.next;
123 while (entry != &pm_devs) { 124 while (entry != &pm_devs) {
124 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); 125 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
@@ -126,7 +127,7 @@ void pm_unregister_all(pm_callback callback)
126 if (dev->callback == callback) 127 if (dev->callback == callback)
127 __pm_unregister(dev); 128 __pm_unregister(dev);
128 } 129 }
129 up(&pm_devs_lock); 130 mutex_unlock(&pm_devs_lock);
130} 131}
131 132
132/** 133/**
@@ -234,7 +235,7 @@ int pm_send_all(pm_request_t rqst, void *data)
234{ 235{
235 struct list_head *entry; 236 struct list_head *entry;
236 237
237 down(&pm_devs_lock); 238 mutex_lock(&pm_devs_lock);
238 entry = pm_devs.next; 239 entry = pm_devs.next;
239 while (entry != &pm_devs) { 240 while (entry != &pm_devs) {
240 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); 241 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
@@ -246,13 +247,13 @@ int pm_send_all(pm_request_t rqst, void *data)
246 */ 247 */
247 if (rqst == PM_SUSPEND) 248 if (rqst == PM_SUSPEND)
248 pm_undo_all(dev); 249 pm_undo_all(dev);
249 up(&pm_devs_lock); 250 mutex_unlock(&pm_devs_lock);
250 return status; 251 return status;
251 } 252 }
252 } 253 }
253 entry = entry->next; 254 entry = entry->next;
254 } 255 }
255 up(&pm_devs_lock); 256 mutex_unlock(&pm_devs_lock);
256 return 0; 257 return 0;
257} 258}
258 259
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 388dba680841..f06f12f21767 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -8,6 +8,7 @@ struct swsusp_info {
8 int cpus; 8 int cpus;
9 unsigned long image_pages; 9 unsigned long image_pages;
10 unsigned long pages; 10 unsigned long pages;
11 unsigned long size;
11} __attribute__((aligned(PAGE_SIZE))); 12} __attribute__((aligned(PAGE_SIZE)));
12 13
13 14
@@ -37,21 +38,79 @@ extern struct subsystem power_subsys;
37/* References to section boundaries */ 38/* References to section boundaries */
38extern const void __nosave_begin, __nosave_end; 39extern const void __nosave_begin, __nosave_end;
39 40
40extern unsigned int nr_copy_pages;
41extern struct pbe *pagedir_nosave; 41extern struct pbe *pagedir_nosave;
42 42
43/* Preferred image size in bytes (default 500 MB) */ 43/* Preferred image size in bytes (default 500 MB) */
44extern unsigned long image_size; 44extern unsigned long image_size;
45extern int in_suspend;
46extern dev_t swsusp_resume_device;
45 47
46extern asmlinkage int swsusp_arch_suspend(void); 48extern asmlinkage int swsusp_arch_suspend(void);
47extern asmlinkage int swsusp_arch_resume(void); 49extern asmlinkage int swsusp_arch_resume(void);
48 50
49extern unsigned int count_data_pages(void); 51extern unsigned int count_data_pages(void);
50extern void free_pagedir(struct pbe *pblist); 52
51extern void release_eaten_pages(void); 53struct snapshot_handle {
52extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed); 54 loff_t offset;
55 unsigned int page;
56 unsigned int page_offset;
57 unsigned int prev;
58 struct pbe *pbe;
59 void *buffer;
60 unsigned int buf_offset;
61};
62
63#define data_of(handle) ((handle).buffer + (handle).buf_offset)
64
65extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
66extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
67int snapshot_image_loaded(struct snapshot_handle *handle);
68
69#define SNAPSHOT_IOC_MAGIC '3'
70#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1)
71#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2)
72#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
73#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4)
74#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5)
75#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
76#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
77#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
78#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9)
79#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
80#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11)
81#define SNAPSHOT_IOC_MAXNR 11
82
83/**
84 * The bitmap is used for tracing allocated swap pages
85 *
86 * The entire bitmap consists of a number of bitmap_page
87 * structures linked with the help of the .next member.
88 * Thus each page can be allocated individually, so we only
89 * need to make 0-order memory allocations to create
90 * the bitmap.
91 */
92
93#define BITMAP_PAGE_SIZE (PAGE_SIZE - sizeof(void *))
94#define BITMAP_PAGE_CHUNKS (BITMAP_PAGE_SIZE / sizeof(long))
95#define BITS_PER_CHUNK (sizeof(long) * 8)
96#define BITMAP_PAGE_BITS (BITMAP_PAGE_CHUNKS * BITS_PER_CHUNK)
97
98struct bitmap_page {
99 unsigned long chunks[BITMAP_PAGE_CHUNKS];
100 struct bitmap_page *next;
101};
102
103extern void free_bitmap(struct bitmap_page *bitmap);
104extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
105extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
106extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
107
108extern int swsusp_check(void);
109extern int swsusp_shrink_memory(void);
53extern void swsusp_free(void); 110extern void swsusp_free(void);
54extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); 111extern int swsusp_suspend(void);
55extern unsigned int snapshot_nr_pages(void); 112extern int swsusp_resume(void);
56extern struct pbe *snapshot_pblist(void); 113extern int swsusp_read(void);
57extern void snapshot_pblist_set(struct pbe *pblist); 114extern int swsusp_write(void);
115extern void swsusp_close(void);
116extern int suspend_enter(suspend_state_t state);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 28de118f7a0b..8ac7c35fad77 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -12,11 +12,12 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/suspend.h> 13#include <linux/suspend.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/syscalls.h>
15 16
16/* 17/*
17 * Timeout for stopping processes 18 * Timeout for stopping processes
18 */ 19 */
19#define TIMEOUT (6 * HZ) 20#define TIMEOUT (20 * HZ)
20 21
21 22
22static inline int freezeable(struct task_struct * p) 23static inline int freezeable(struct task_struct * p)
@@ -54,38 +55,62 @@ void refrigerator(void)
54 current->state = save; 55 current->state = save;
55} 56}
56 57
58static inline void freeze_process(struct task_struct *p)
59{
60 unsigned long flags;
61
62 if (!freezing(p)) {
63 freeze(p);
64 spin_lock_irqsave(&p->sighand->siglock, flags);
65 signal_wake_up(p, 0);
66 spin_unlock_irqrestore(&p->sighand->siglock, flags);
67 }
68}
69
57/* 0 = success, else # of processes that we failed to stop */ 70/* 0 = success, else # of processes that we failed to stop */
58int freeze_processes(void) 71int freeze_processes(void)
59{ 72{
60 int todo; 73 int todo, nr_user, user_frozen;
61 unsigned long start_time; 74 unsigned long start_time;
62 struct task_struct *g, *p; 75 struct task_struct *g, *p;
63 unsigned long flags; 76 unsigned long flags;
64 77
65 printk( "Stopping tasks: " ); 78 printk( "Stopping tasks: " );
66 start_time = jiffies; 79 start_time = jiffies;
80 user_frozen = 0;
67 do { 81 do {
68 todo = 0; 82 nr_user = todo = 0;
69 read_lock(&tasklist_lock); 83 read_lock(&tasklist_lock);
70 do_each_thread(g, p) { 84 do_each_thread(g, p) {
71 if (!freezeable(p)) 85 if (!freezeable(p))
72 continue; 86 continue;
73 if (frozen(p)) 87 if (frozen(p))
74 continue; 88 continue;
75 89 if (p->mm && !(p->flags & PF_BORROWED_MM)) {
76 freeze(p); 90 /* The task is a user-space one.
77 spin_lock_irqsave(&p->sighand->siglock, flags); 91 * Freeze it unless there's a vfork completion
78 signal_wake_up(p, 0); 92 * pending
79 spin_unlock_irqrestore(&p->sighand->siglock, flags); 93 */
80 todo++; 94 if (!p->vfork_done)
95 freeze_process(p);
96 nr_user++;
97 } else {
98 /* Freeze only if the user space is frozen */
99 if (user_frozen)
100 freeze_process(p);
101 todo++;
102 }
81 } while_each_thread(g, p); 103 } while_each_thread(g, p);
82 read_unlock(&tasklist_lock); 104 read_unlock(&tasklist_lock);
105 todo += nr_user;
106 if (!user_frozen && !nr_user) {
107 sys_sync();
108 start_time = jiffies;
109 }
110 user_frozen = !nr_user;
83 yield(); /* Yield is okay here */ 111 yield(); /* Yield is okay here */
84 if (todo && time_after(jiffies, start_time + TIMEOUT)) { 112 if (todo && time_after(jiffies, start_time + TIMEOUT))
85 printk( "\n" );
86 printk(KERN_ERR " stopping tasks failed (%d tasks remaining)\n", todo );
87 break; 113 break;
88 }
89 } while(todo); 114 } while(todo);
90 115
91 /* This does not unfreeze processes that are already frozen 116 /* This does not unfreeze processes that are already frozen
@@ -94,8 +119,14 @@ int freeze_processes(void)
94 * but it cleans up leftover PF_FREEZE requests. 119 * but it cleans up leftover PF_FREEZE requests.
95 */ 120 */
96 if (todo) { 121 if (todo) {
122 printk( "\n" );
123 printk(KERN_ERR " stopping tasks timed out "
124 "after %d seconds (%d tasks remaining):\n",
125 TIMEOUT / HZ, todo);
97 read_lock(&tasklist_lock); 126 read_lock(&tasklist_lock);
98 do_each_thread(g, p) 127 do_each_thread(g, p) {
128 if (freezeable(p) && !frozen(p))
129 printk(KERN_ERR " %s\n", p->comm);
99 if (freezing(p)) { 130 if (freezing(p)) {
100 pr_debug(" clean up: %s\n", p->comm); 131 pr_debug(" clean up: %s\n", p->comm);
101 p->flags &= ~PF_FREEZE; 132 p->flags &= ~PF_FREEZE;
@@ -103,7 +134,7 @@ int freeze_processes(void)
103 recalc_sigpending_tsk(p); 134 recalc_sigpending_tsk(p);
104 spin_unlock_irqrestore(&p->sighand->siglock, flags); 135 spin_unlock_irqrestore(&p->sighand->siglock, flags);
105 } 136 }
106 while_each_thread(g, p); 137 } while_each_thread(g, p);
107 read_unlock(&tasklist_lock); 138 read_unlock(&tasklist_lock);
108 return todo; 139 return todo;
109 } 140 }
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 8d5a5986d621..c5863d02c89e 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12 12
13#include <linux/version.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/suspend.h> 16#include <linux/suspend.h>
@@ -34,7 +35,9 @@
34#include "power.h" 35#include "power.h"
35 36
36struct pbe *pagedir_nosave; 37struct pbe *pagedir_nosave;
37unsigned int nr_copy_pages; 38static unsigned int nr_copy_pages;
39static unsigned int nr_meta_pages;
40static unsigned long *buffer;
38 41
39#ifdef CONFIG_HIGHMEM 42#ifdef CONFIG_HIGHMEM
40unsigned int count_highmem_pages(void) 43unsigned int count_highmem_pages(void)
@@ -80,7 +83,7 @@ static int save_highmem_zone(struct zone *zone)
80 void *kaddr; 83 void *kaddr;
81 unsigned long pfn = zone_pfn + zone->zone_start_pfn; 84 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
82 85
83 if (!(pfn%1000)) 86 if (!(pfn%10000))
84 printk("."); 87 printk(".");
85 if (!pfn_valid(pfn)) 88 if (!pfn_valid(pfn))
86 continue; 89 continue;
@@ -119,13 +122,15 @@ int save_highmem(void)
119 struct zone *zone; 122 struct zone *zone;
120 int res = 0; 123 int res = 0;
121 124
122 pr_debug("swsusp: Saving Highmem\n"); 125 pr_debug("swsusp: Saving Highmem");
126 drain_local_pages();
123 for_each_zone (zone) { 127 for_each_zone (zone) {
124 if (is_highmem(zone)) 128 if (is_highmem(zone))
125 res = save_highmem_zone(zone); 129 res = save_highmem_zone(zone);
126 if (res) 130 if (res)
127 return res; 131 return res;
128 } 132 }
133 printk("\n");
129 return 0; 134 return 0;
130} 135}
131 136
@@ -235,7 +240,7 @@ static void copy_data_pages(struct pbe *pblist)
235 * free_pagedir - free pages allocated with alloc_pagedir() 240 * free_pagedir - free pages allocated with alloc_pagedir()
236 */ 241 */
237 242
238void free_pagedir(struct pbe *pblist) 243static void free_pagedir(struct pbe *pblist)
239{ 244{
240 struct pbe *pbe; 245 struct pbe *pbe;
241 246
@@ -301,7 +306,7 @@ struct eaten_page {
301 306
302static struct eaten_page *eaten_pages = NULL; 307static struct eaten_page *eaten_pages = NULL;
303 308
304void release_eaten_pages(void) 309static void release_eaten_pages(void)
305{ 310{
306 struct eaten_page *p, *q; 311 struct eaten_page *p, *q;
307 312
@@ -376,7 +381,6 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
376 if (!nr_pages) 381 if (!nr_pages)
377 return NULL; 382 return NULL;
378 383
379 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
380 pblist = alloc_image_page(gfp_mask, safe_needed); 384 pblist = alloc_image_page(gfp_mask, safe_needed);
381 /* FIXME: rewrite this ugly loop */ 385 /* FIXME: rewrite this ugly loop */
382 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; 386 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
@@ -388,7 +392,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
388 free_pagedir(pblist); 392 free_pagedir(pblist);
389 pblist = NULL; 393 pblist = NULL;
390 } else 394 } else
391 create_pbe_list(pblist, nr_pages); 395 create_pbe_list(pblist, nr_pages);
392 return pblist; 396 return pblist;
393} 397}
394 398
@@ -414,6 +418,10 @@ void swsusp_free(void)
414 } 418 }
415 } 419 }
416 } 420 }
421 nr_copy_pages = 0;
422 nr_meta_pages = 0;
423 pagedir_nosave = NULL;
424 buffer = NULL;
417} 425}
418 426
419 427
@@ -437,7 +445,7 @@ static int enough_free_mem(unsigned int nr_pages)
437 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); 445 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
438} 446}
439 447
440int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) 448static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
441{ 449{
442 struct pbe *p; 450 struct pbe *p;
443 451
@@ -504,7 +512,318 @@ asmlinkage int swsusp_save(void)
504 */ 512 */
505 513
506 nr_copy_pages = nr_pages; 514 nr_copy_pages = nr_pages;
515 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
507 516
508 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); 517 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
509 return 0; 518 return 0;
510} 519}
520
521static void init_header(struct swsusp_info *info)
522{
523 memset(info, 0, sizeof(struct swsusp_info));
524 info->version_code = LINUX_VERSION_CODE;
525 info->num_physpages = num_physpages;
526 memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
527 info->cpus = num_online_cpus();
528 info->image_pages = nr_copy_pages;
529 info->pages = nr_copy_pages + nr_meta_pages + 1;
530 info->size = info->pages;
531 info->size <<= PAGE_SHIFT;
532}
533
534/**
535 * pack_orig_addresses - the .orig_address fields of the PBEs from the
536 * list starting at @pbe are stored in the array @buf[] (1 page)
537 */
538
539static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
540{
541 int j;
542
543 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
544 buf[j] = pbe->orig_address;
545 pbe = pbe->next;
546 }
547 if (!pbe)
548 for (; j < PAGE_SIZE / sizeof(long); j++)
549 buf[j] = 0;
550 return pbe;
551}
552
553/**
554 * snapshot_read_next - used for reading the system memory snapshot.
555 *
556 * On the first call to it @handle should point to a zeroed
557 * snapshot_handle structure. The structure gets updated and a pointer
558 * to it should be passed to this function every next time.
559 *
560 * The @count parameter should contain the number of bytes the caller
561 * wants to read from the snapshot. It must not be zero.
562 *
563 * On success the function returns a positive number. Then, the caller
564 * is allowed to read up to the returned number of bytes from the memory
565 * location computed by the data_of() macro. The number returned
566 * may be smaller than @count, but this only happens if the read would
567 * cross a page boundary otherwise.
568 *
569 * The function returns 0 to indicate the end of data stream condition,
570 * and a negative number is returned on error. In such cases the
571 * structure pointed to by @handle is not updated and should not be used
572 * any more.
573 */
574
575int snapshot_read_next(struct snapshot_handle *handle, size_t count)
576{
577 if (handle->page > nr_meta_pages + nr_copy_pages)
578 return 0;
579 if (!buffer) {
580 /* This makes the buffer be freed by swsusp_free() */
581 buffer = alloc_image_page(GFP_ATOMIC, 0);
582 if (!buffer)
583 return -ENOMEM;
584 }
585 if (!handle->offset) {
586 init_header((struct swsusp_info *)buffer);
587 handle->buffer = buffer;
588 handle->pbe = pagedir_nosave;
589 }
590 if (handle->prev < handle->page) {
591 if (handle->page <= nr_meta_pages) {
592 handle->pbe = pack_orig_addresses(buffer, handle->pbe);
593 if (!handle->pbe)
594 handle->pbe = pagedir_nosave;
595 } else {
596 handle->buffer = (void *)handle->pbe->address;
597 handle->pbe = handle->pbe->next;
598 }
599 handle->prev = handle->page;
600 }
601 handle->buf_offset = handle->page_offset;
602 if (handle->page_offset + count >= PAGE_SIZE) {
603 count = PAGE_SIZE - handle->page_offset;
604 handle->page_offset = 0;
605 handle->page++;
606 } else {
607 handle->page_offset += count;
608 }
609 handle->offset += count;
610 return count;
611}
612
613/**
614 * mark_unsafe_pages - mark the pages that cannot be used for storing
615 * the image during resume, because they conflict with the pages that
616 * had been used before suspend
617 */
618
619static int mark_unsafe_pages(struct pbe *pblist)
620{
621 struct zone *zone;
622 unsigned long zone_pfn;
623 struct pbe *p;
624
625 if (!pblist) /* a sanity check */
626 return -EINVAL;
627
628 /* Clear page flags */
629 for_each_zone (zone) {
630 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
631 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
632 ClearPageNosaveFree(pfn_to_page(zone_pfn +
633 zone->zone_start_pfn));
634 }
635
636 /* Mark orig addresses */
637 for_each_pbe (p, pblist) {
638 if (virt_addr_valid(p->orig_address))
639 SetPageNosaveFree(virt_to_page(p->orig_address));
640 else
641 return -EFAULT;
642 }
643
644 return 0;
645}
646
647static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
648{
649 /* We assume both lists contain the same number of elements */
650 while (src) {
651 dst->orig_address = src->orig_address;
652 dst = dst->next;
653 src = src->next;
654 }
655}
656
657static int check_header(struct swsusp_info *info)
658{
659 char *reason = NULL;
660
661 if (info->version_code != LINUX_VERSION_CODE)
662 reason = "kernel version";
663 if (info->num_physpages != num_physpages)
664 reason = "memory size";
665 if (strcmp(info->uts.sysname,system_utsname.sysname))
666 reason = "system type";
667 if (strcmp(info->uts.release,system_utsname.release))
668 reason = "kernel release";
669 if (strcmp(info->uts.version,system_utsname.version))
670 reason = "version";
671 if (strcmp(info->uts.machine,system_utsname.machine))
672 reason = "machine";
673 if (reason) {
674 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
675 return -EPERM;
676 }
677 return 0;
678}
679
680/**
681 * load header - check the image header and copy data from it
682 */
683
684static int load_header(struct snapshot_handle *handle,
685 struct swsusp_info *info)
686{
687 int error;
688 struct pbe *pblist;
689
690 error = check_header(info);
691 if (!error) {
692 pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
693 if (!pblist)
694 return -ENOMEM;
695 pagedir_nosave = pblist;
696 handle->pbe = pblist;
697 nr_copy_pages = info->image_pages;
698 nr_meta_pages = info->pages - info->image_pages - 1;
699 }
700 return error;
701}
702
703/**
704 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
705 * the PBEs in the list starting at @pbe
706 */
707
708static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
709 struct pbe *pbe)
710{
711 int j;
712
713 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
714 pbe->orig_address = buf[j];
715 pbe = pbe->next;
716 }
717 return pbe;
718}
719
720/**
721 * create_image - use metadata contained in the PBE list
722 * pointed to by pagedir_nosave to mark the pages that will
723 * be overwritten in the process of restoring the system
724 * memory state from the image and allocate memory for
725 * the image avoiding these pages
726 */
727
728static int create_image(struct snapshot_handle *handle)
729{
730 int error = 0;
731 struct pbe *p, *pblist;
732
733 p = pagedir_nosave;
734 error = mark_unsafe_pages(p);
735 if (!error) {
736 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
737 if (pblist)
738 copy_page_backup_list(pblist, p);
739 free_pagedir(p);
740 if (!pblist)
741 error = -ENOMEM;
742 }
743 if (!error)
744 error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
745 if (!error) {
746 release_eaten_pages();
747 pagedir_nosave = pblist;
748 } else {
749 pagedir_nosave = NULL;
750 handle->pbe = NULL;
751 nr_copy_pages = 0;
752 nr_meta_pages = 0;
753 }
754 return error;
755}
756
757/**
758 * snapshot_write_next - used for writing the system memory snapshot.
759 *
760 * On the first call to it @handle should point to a zeroed
761 * snapshot_handle structure. The structure gets updated and a pointer
762 * to it should be passed to this function every next time.
763 *
764 * The @count parameter should contain the number of bytes the caller
765 * wants to write to the image. It must not be zero.
766 *
767 * On success the function returns a positive number. Then, the caller
768 * is allowed to write up to the returned number of bytes to the memory
769 * location computed by the data_of() macro. The number returned
770 * may be smaller than @count, but this only happens if the write would
771 * cross a page boundary otherwise.
772 *
773 * The function returns 0 to indicate the "end of file" condition,
774 * and a negative number is returned on error. In such cases the
775 * structure pointed to by @handle is not updated and should not be used
776 * any more.
777 */
778
779int snapshot_write_next(struct snapshot_handle *handle, size_t count)
780{
781 int error = 0;
782
783 if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages)
784 return 0;
785 if (!buffer) {
786 /* This makes the buffer be freed by swsusp_free() */
787 buffer = alloc_image_page(GFP_ATOMIC, 0);
788 if (!buffer)
789 return -ENOMEM;
790 }
791 if (!handle->offset)
792 handle->buffer = buffer;
793 if (handle->prev < handle->page) {
794 if (!handle->prev) {
795 error = load_header(handle, (struct swsusp_info *)buffer);
796 if (error)
797 return error;
798 } else if (handle->prev <= nr_meta_pages) {
799 handle->pbe = unpack_orig_addresses(buffer, handle->pbe);
800 if (!handle->pbe) {
801 error = create_image(handle);
802 if (error)
803 return error;
804 handle->pbe = pagedir_nosave;
805 handle->buffer = (void *)handle->pbe->address;
806 }
807 } else {
808 handle->pbe = handle->pbe->next;
809 handle->buffer = (void *)handle->pbe->address;
810 }
811 handle->prev = handle->page;
812 }
813 handle->buf_offset = handle->page_offset;
814 if (handle->page_offset + count >= PAGE_SIZE) {
815 count = PAGE_SIZE - handle->page_offset;
816 handle->page_offset = 0;
817 handle->page++;
818 } else {
819 handle->page_offset += count;
820 }
821 handle->offset += count;
822 return count;
823}
824
825int snapshot_image_loaded(struct snapshot_handle *handle)
826{
827 return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
828 handle->page <= nr_meta_pages + nr_copy_pages);
829}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
new file mode 100644
index 000000000000..9177f3f73a6c
--- /dev/null
+++ b/kernel/power/swap.c
@@ -0,0 +1,544 @@
1/*
2 * linux/kernel/power/swap.c
3 *
4 * This file provides functions for reading the suspend image from
5 * and writing it to a swap partition.
6 *
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 *
10 * This file is released under the GPLv2.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/smp_lock.h>
16#include <linux/file.h>
17#include <linux/utsname.h>
18#include <linux/version.h>
19#include <linux/delay.h>
20#include <linux/bitops.h>
21#include <linux/genhd.h>
22#include <linux/device.h>
23#include <linux/buffer_head.h>
24#include <linux/bio.h>
25#include <linux/swap.h>
26#include <linux/swapops.h>
27#include <linux/pm.h>
28
29#include "power.h"
30
31extern char resume_file[];
32
33#define SWSUSP_SIG "S1SUSPEND"
34
35static struct swsusp_header {
36 char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
37 swp_entry_t image;
38 char orig_sig[10];
39 char sig[10];
40} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
41
42/*
43 * Saving part...
44 */
45
46static unsigned short root_swap = 0xffff;
47
48static int mark_swapfiles(swp_entry_t start)
49{
50 int error;
51
52 rw_swap_page_sync(READ,
53 swp_entry(root_swap, 0),
54 virt_to_page((unsigned long)&swsusp_header));
55 if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
56 !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
57 memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
58 memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
59 swsusp_header.image = start;
60 error = rw_swap_page_sync(WRITE,
61 swp_entry(root_swap, 0),
62 virt_to_page((unsigned long)
63 &swsusp_header));
64 } else {
65 pr_debug("swsusp: Partition is not swap space.\n");
66 error = -ENODEV;
67 }
68 return error;
69}
70
71/**
72 * swsusp_swap_check - check if the resume device is a swap device
73 * and get its index (if so)
74 */
75
76static int swsusp_swap_check(void) /* This is called before saving image */
77{
78 int res = swap_type_of(swsusp_resume_device);
79
80 if (res >= 0) {
81 root_swap = res;
82 return 0;
83 }
84 return res;
85}
86
87/**
88 * write_page - Write one page to given swap location.
89 * @buf: Address we're writing.
90 * @offset: Offset of the swap page we're writing to.
91 */
92
93static int write_page(void *buf, unsigned long offset)
94{
95 swp_entry_t entry;
96 int error = -ENOSPC;
97
98 if (offset) {
99 entry = swp_entry(root_swap, offset);
100 error = rw_swap_page_sync(WRITE, entry, virt_to_page(buf));
101 }
102 return error;
103}
104
105/*
106 * The swap map is a data structure used for keeping track of each page
107 * written to a swap partition. It consists of many swap_map_page
108 * structures that contain each an array of MAP_PAGE_SIZE swap entries.
109 * These structures are stored on the swap and linked together with the
110 * help of the .next_swap member.
111 *
112 * The swap map is created during suspend. The swap map pages are
113 * allocated and populated one at a time, so we only need one memory
114 * page to set up the entire structure.
115 *
116 * During resume we also only need to use one swap_map_page structure
117 * at a time.
118 */
119
120#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(long) - 1)
121
122struct swap_map_page {
123 unsigned long entries[MAP_PAGE_ENTRIES];
124 unsigned long next_swap;
125};
126
127/**
128 * The swap_map_handle structure is used for handling swap in
129 * a file-alike way
130 */
131
132struct swap_map_handle {
133 struct swap_map_page *cur;
134 unsigned long cur_swap;
135 struct bitmap_page *bitmap;
136 unsigned int k;
137};
138
139static void release_swap_writer(struct swap_map_handle *handle)
140{
141 if (handle->cur)
142 free_page((unsigned long)handle->cur);
143 handle->cur = NULL;
144 if (handle->bitmap)
145 free_bitmap(handle->bitmap);
146 handle->bitmap = NULL;
147}
148
149static int get_swap_writer(struct swap_map_handle *handle)
150{
151 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
152 if (!handle->cur)
153 return -ENOMEM;
154 handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0));
155 if (!handle->bitmap) {
156 release_swap_writer(handle);
157 return -ENOMEM;
158 }
159 handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap);
160 if (!handle->cur_swap) {
161 release_swap_writer(handle);
162 return -ENOSPC;
163 }
164 handle->k = 0;
165 return 0;
166}
167
168static int swap_write_page(struct swap_map_handle *handle, void *buf)
169{
170 int error;
171 unsigned long offset;
172
173 if (!handle->cur)
174 return -EINVAL;
175 offset = alloc_swap_page(root_swap, handle->bitmap);
176 error = write_page(buf, offset);
177 if (error)
178 return error;
179 handle->cur->entries[handle->k++] = offset;
180 if (handle->k >= MAP_PAGE_ENTRIES) {
181 offset = alloc_swap_page(root_swap, handle->bitmap);
182 if (!offset)
183 return -ENOSPC;
184 handle->cur->next_swap = offset;
185 error = write_page(handle->cur, handle->cur_swap);
186 if (error)
187 return error;
188 memset(handle->cur, 0, PAGE_SIZE);
189 handle->cur_swap = offset;
190 handle->k = 0;
191 }
192 return 0;
193}
194
195static int flush_swap_writer(struct swap_map_handle *handle)
196{
197 if (handle->cur && handle->cur_swap)
198 return write_page(handle->cur, handle->cur_swap);
199 else
200 return -EINVAL;
201}
202
203/**
204 * save_image - save the suspend image data
205 */
206
207static int save_image(struct swap_map_handle *handle,
208 struct snapshot_handle *snapshot,
209 unsigned int nr_pages)
210{
211 unsigned int m;
212 int ret;
213 int error = 0;
214
215 printk("Saving image data pages (%u pages) ... ", nr_pages);
216 m = nr_pages / 100;
217 if (!m)
218 m = 1;
219 nr_pages = 0;
220 do {
221 ret = snapshot_read_next(snapshot, PAGE_SIZE);
222 if (ret > 0) {
223 error = swap_write_page(handle, data_of(*snapshot));
224 if (error)
225 break;
226 if (!(nr_pages % m))
227 printk("\b\b\b\b%3d%%", nr_pages / m);
228 nr_pages++;
229 }
230 } while (ret > 0);
231 if (!error)
232 printk("\b\b\b\bdone\n");
233 return error;
234}
235
236/**
237 * enough_swap - Make sure we have enough swap to save the image.
238 *
239 * Returns TRUE or FALSE after checking the total amount of swap
240 * space avaiable from the resume partition.
241 */
242
243static int enough_swap(unsigned int nr_pages)
244{
245 unsigned int free_swap = count_swap_pages(root_swap, 1);
246
247 pr_debug("swsusp: free swap pages: %u\n", free_swap);
248 return free_swap > (nr_pages + PAGES_FOR_IO +
249 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
250}
251
252/**
253 * swsusp_write - Write entire image and metadata.
254 *
255 * It is important _NOT_ to umount filesystems at this point. We want
256 * them synced (in case something goes wrong) but we DO not want to mark
257 * filesystem clean: it is not. (And it does not matter, if we resume
258 * correctly, we'll mark system clean, anyway.)
259 */
260
261int swsusp_write(void)
262{
263 struct swap_map_handle handle;
264 struct snapshot_handle snapshot;
265 struct swsusp_info *header;
266 unsigned long start;
267 int error;
268
269 if ((error = swsusp_swap_check())) {
270 printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n");
271 return error;
272 }
273 memset(&snapshot, 0, sizeof(struct snapshot_handle));
274 error = snapshot_read_next(&snapshot, PAGE_SIZE);
275 if (error < PAGE_SIZE)
276 return error < 0 ? error : -EFAULT;
277 header = (struct swsusp_info *)data_of(snapshot);
278 if (!enough_swap(header->pages)) {
279 printk(KERN_ERR "swsusp: Not enough free swap\n");
280 return -ENOSPC;
281 }
282 error = get_swap_writer(&handle);
283 if (!error) {
284 start = handle.cur_swap;
285 error = swap_write_page(&handle, header);
286 }
287 if (!error)
288 error = save_image(&handle, &snapshot, header->pages - 1);
289 if (!error) {
290 flush_swap_writer(&handle);
291 printk("S");
292 error = mark_swapfiles(swp_entry(root_swap, start));
293 printk("|\n");
294 }
295 if (error)
296 free_all_swap_pages(root_swap, handle.bitmap);
297 release_swap_writer(&handle);
298 return error;
299}
300
301/*
302 * Using bio to read from swap.
303 * This code requires a bit more work than just using buffer heads
304 * but, it is the recommended way for 2.5/2.6.
305 * The following are to signal the beginning and end of I/O. Bios
306 * finish asynchronously, while we want them to happen synchronously.
307 * A simple atomic_t, and a wait loop take care of this problem.
308 */
309
310static atomic_t io_done = ATOMIC_INIT(0);
311
312static int end_io(struct bio *bio, unsigned int num, int err)
313{
314 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
315 panic("I/O error reading memory image");
316 atomic_set(&io_done, 0);
317 return 0;
318}
319
320static struct block_device *resume_bdev;
321
322/**
323 * submit - submit BIO request.
324 * @rw: READ or WRITE.
325 * @off physical offset of page.
326 * @page: page we're reading or writing.
327 *
328 * Straight from the textbook - allocate and initialize the bio.
329 * If we're writing, make sure the page is marked as dirty.
330 * Then submit it and wait.
331 */
332
333static int submit(int rw, pgoff_t page_off, void *page)
334{
335 int error = 0;
336 struct bio *bio;
337
338 bio = bio_alloc(GFP_ATOMIC, 1);
339 if (!bio)
340 return -ENOMEM;
341 bio->bi_sector = page_off * (PAGE_SIZE >> 9);
342 bio->bi_bdev = resume_bdev;
343 bio->bi_end_io = end_io;
344
345 if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) {
346 printk("swsusp: ERROR: adding page to bio at %ld\n",page_off);
347 error = -EFAULT;
348 goto Done;
349 }
350
351 atomic_set(&io_done, 1);
352 submit_bio(rw | (1 << BIO_RW_SYNC), bio);
353 while (atomic_read(&io_done))
354 yield();
355 if (rw == READ)
356 bio_set_pages_dirty(bio);
357 Done:
358 bio_put(bio);
359 return error;
360}
361
362static int bio_read_page(pgoff_t page_off, void *page)
363{
364 return submit(READ, page_off, page);
365}
366
367static int bio_write_page(pgoff_t page_off, void *page)
368{
369 return submit(WRITE, page_off, page);
370}
371
372/**
373 * The following functions allow us to read data using a swap map
374 * in a file-alike way
375 */
376
377static void release_swap_reader(struct swap_map_handle *handle)
378{
379 if (handle->cur)
380 free_page((unsigned long)handle->cur);
381 handle->cur = NULL;
382}
383
384static int get_swap_reader(struct swap_map_handle *handle,
385 swp_entry_t start)
386{
387 int error;
388
389 if (!swp_offset(start))
390 return -EINVAL;
391 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
392 if (!handle->cur)
393 return -ENOMEM;
394 error = bio_read_page(swp_offset(start), handle->cur);
395 if (error) {
396 release_swap_reader(handle);
397 return error;
398 }
399 handle->k = 0;
400 return 0;
401}
402
403static int swap_read_page(struct swap_map_handle *handle, void *buf)
404{
405 unsigned long offset;
406 int error;
407
408 if (!handle->cur)
409 return -EINVAL;
410 offset = handle->cur->entries[handle->k];
411 if (!offset)
412 return -EFAULT;
413 error = bio_read_page(offset, buf);
414 if (error)
415 return error;
416 if (++handle->k >= MAP_PAGE_ENTRIES) {
417 handle->k = 0;
418 offset = handle->cur->next_swap;
419 if (!offset)
420 release_swap_reader(handle);
421 else
422 error = bio_read_page(offset, handle->cur);
423 }
424 return error;
425}
426
427/**
428 * load_image - load the image using the swap map handle
429 * @handle and the snapshot handle @snapshot
430 * (assume there are @nr_pages pages to load)
431 */
432
433static int load_image(struct swap_map_handle *handle,
434 struct snapshot_handle *snapshot,
435 unsigned int nr_pages)
436{
437 unsigned int m;
438 int ret;
439 int error = 0;
440
441 printk("Loading image data pages (%u pages) ... ", nr_pages);
442 m = nr_pages / 100;
443 if (!m)
444 m = 1;
445 nr_pages = 0;
446 do {
447 ret = snapshot_write_next(snapshot, PAGE_SIZE);
448 if (ret > 0) {
449 error = swap_read_page(handle, data_of(*snapshot));
450 if (error)
451 break;
452 if (!(nr_pages % m))
453 printk("\b\b\b\b%3d%%", nr_pages / m);
454 nr_pages++;
455 }
456 } while (ret > 0);
457 if (!error)
458 printk("\b\b\b\bdone\n");
459 if (!snapshot_image_loaded(snapshot))
460 error = -ENODATA;
461 return error;
462}
463
464int swsusp_read(void)
465{
466 int error;
467 struct swap_map_handle handle;
468 struct snapshot_handle snapshot;
469 struct swsusp_info *header;
470
471 if (IS_ERR(resume_bdev)) {
472 pr_debug("swsusp: block device not initialised\n");
473 return PTR_ERR(resume_bdev);
474 }
475
476 memset(&snapshot, 0, sizeof(struct snapshot_handle));
477 error = snapshot_write_next(&snapshot, PAGE_SIZE);
478 if (error < PAGE_SIZE)
479 return error < 0 ? error : -EFAULT;
480 header = (struct swsusp_info *)data_of(snapshot);
481 error = get_swap_reader(&handle, swsusp_header.image);
482 if (!error)
483 error = swap_read_page(&handle, header);
484 if (!error)
485 error = load_image(&handle, &snapshot, header->pages - 1);
486 release_swap_reader(&handle);
487
488 blkdev_put(resume_bdev);
489
490 if (!error)
491 pr_debug("swsusp: Reading resume file was successful\n");
492 else
493 pr_debug("swsusp: Error %d resuming\n", error);
494 return error;
495}
496
497/**
498 * swsusp_check - Check for swsusp signature in the resume device
499 */
500
501int swsusp_check(void)
502{
503 int error;
504
505 resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
506 if (!IS_ERR(resume_bdev)) {
507 set_blocksize(resume_bdev, PAGE_SIZE);
508 memset(&swsusp_header, 0, sizeof(swsusp_header));
509 if ((error = bio_read_page(0, &swsusp_header)))
510 return error;
511 if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
512 memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
513 /* Reset swap signature now */
514 error = bio_write_page(0, &swsusp_header);
515 } else {
516 return -EINVAL;
517 }
518 if (error)
519 blkdev_put(resume_bdev);
520 else
521 pr_debug("swsusp: Signature found, resuming\n");
522 } else {
523 error = PTR_ERR(resume_bdev);
524 }
525
526 if (error)
527 pr_debug("swsusp: Error %d check for resume file\n", error);
528
529 return error;
530}
531
532/**
533 * swsusp_close - close swap device.
534 */
535
536void swsusp_close(void)
537{
538 if (IS_ERR(resume_bdev)) {
539 pr_debug("swsusp: block device not initialised\n");
540 return;
541 }
542
543 blkdev_put(resume_bdev);
544}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 2d9d08f72f76..c4016cbbd3e0 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -31,41 +31,24 @@
31 * Fixed runaway init 31 * Fixed runaway init
32 * 32 *
33 * Rafael J. Wysocki <rjw@sisk.pl> 33 * Rafael J. Wysocki <rjw@sisk.pl>
34 * Added the swap map data structure and reworked the handling of swap 34 * Reworked the freeing of memory and the handling of swap
35 * 35 *
36 * More state savers are welcome. Especially for the scsi layer... 36 * More state savers are welcome. Especially for the scsi layer...
37 * 37 *
38 * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt 38 * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt
39 */ 39 */
40 40
41#include <linux/module.h>
42#include <linux/mm.h> 41#include <linux/mm.h>
43#include <linux/suspend.h> 42#include <linux/suspend.h>
44#include <linux/smp_lock.h>
45#include <linux/file.h>
46#include <linux/utsname.h>
47#include <linux/version.h>
48#include <linux/delay.h>
49#include <linux/bitops.h>
50#include <linux/spinlock.h> 43#include <linux/spinlock.h>
51#include <linux/genhd.h>
52#include <linux/kernel.h> 44#include <linux/kernel.h>
53#include <linux/major.h> 45#include <linux/major.h>
54#include <linux/swap.h> 46#include <linux/swap.h>
55#include <linux/pm.h> 47#include <linux/pm.h>
56#include <linux/device.h>
57#include <linux/buffer_head.h>
58#include <linux/swapops.h> 48#include <linux/swapops.h>
59#include <linux/bootmem.h> 49#include <linux/bootmem.h>
60#include <linux/syscalls.h> 50#include <linux/syscalls.h>
61#include <linux/highmem.h> 51#include <linux/highmem.h>
62#include <linux/bio.h>
63
64#include <asm/uaccess.h>
65#include <asm/mmu_context.h>
66#include <asm/pgtable.h>
67#include <asm/tlbflush.h>
68#include <asm/io.h>
69 52
70#include "power.h" 53#include "power.h"
71 54
@@ -77,6 +60,8 @@
77 */ 60 */
78unsigned long image_size = 500 * 1024 * 1024; 61unsigned long image_size = 500 * 1024 * 1024;
79 62
63int in_suspend __nosavedata = 0;
64
80#ifdef CONFIG_HIGHMEM 65#ifdef CONFIG_HIGHMEM
81unsigned int count_highmem_pages(void); 66unsigned int count_highmem_pages(void);
82int save_highmem(void); 67int save_highmem(void);
@@ -87,471 +72,97 @@ static int restore_highmem(void) { return 0; }
87static unsigned int count_highmem_pages(void) { return 0; } 72static unsigned int count_highmem_pages(void) { return 0; }
88#endif 73#endif
89 74
90extern char resume_file[];
91
92#define SWSUSP_SIG "S1SUSPEND"
93
94static struct swsusp_header {
95 char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
96 swp_entry_t image;
97 char orig_sig[10];
98 char sig[10];
99} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
100
101static struct swsusp_info swsusp_info;
102
103/*
104 * Saving part...
105 */
106
107static unsigned short root_swap = 0xffff;
108
109static int mark_swapfiles(swp_entry_t start)
110{
111 int error;
112
113 rw_swap_page_sync(READ,
114 swp_entry(root_swap, 0),
115 virt_to_page((unsigned long)&swsusp_header));
116 if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
117 !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
118 memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
119 memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
120 swsusp_header.image = start;
121 error = rw_swap_page_sync(WRITE,
122 swp_entry(root_swap, 0),
123 virt_to_page((unsigned long)
124 &swsusp_header));
125 } else {
126 pr_debug("swsusp: Partition is not swap space.\n");
127 error = -ENODEV;
128 }
129 return error;
130}
131
132/*
133 * Check whether the swap device is the specified resume
134 * device, irrespective of whether they are specified by
135 * identical names.
136 *
137 * (Thus, device inode aliasing is allowed. You can say /dev/hda4
138 * instead of /dev/ide/host0/bus0/target0/lun0/part4 [if using devfs]
139 * and they'll be considered the same device. This is *necessary* for
140 * devfs, since the resume code can only recognize the form /dev/hda4,
141 * but the suspend code would see the long name.)
142 */
143static inline int is_resume_device(const struct swap_info_struct *swap_info)
144{
145 struct file *file = swap_info->swap_file;
146 struct inode *inode = file->f_dentry->d_inode;
147
148 return S_ISBLK(inode->i_mode) &&
149 swsusp_resume_device == MKDEV(imajor(inode), iminor(inode));
150}
151
152static int swsusp_swap_check(void) /* This is called before saving image */
153{
154 int i;
155
156 spin_lock(&swap_lock);
157 for (i = 0; i < MAX_SWAPFILES; i++) {
158 if (!(swap_info[i].flags & SWP_WRITEOK))
159 continue;
160 if (!swsusp_resume_device || is_resume_device(swap_info + i)) {
161 spin_unlock(&swap_lock);
162 root_swap = i;
163 return 0;
164 }
165 }
166 spin_unlock(&swap_lock);
167 return -ENODEV;
168}
169
170/**
171 * write_page - Write one page to a fresh swap location.
172 * @addr: Address we're writing.
173 * @loc: Place to store the entry we used.
174 *
175 * Allocate a new swap entry and 'sync' it. Note we discard -EIO
176 * errors. That is an artifact left over from swsusp. It did not
177 * check the return of rw_swap_page_sync() at all, since most pages
178 * written back to swap would return -EIO.
179 * This is a partial improvement, since we will at least return other
180 * errors, though we need to eventually fix the damn code.
181 */
182static int write_page(unsigned long addr, swp_entry_t *loc)
183{
184 swp_entry_t entry;
185 int error = -ENOSPC;
186
187 entry = get_swap_page_of_type(root_swap);
188 if (swp_offset(entry)) {
189 error = rw_swap_page_sync(WRITE, entry, virt_to_page(addr));
190 if (!error || error == -EIO)
191 *loc = entry;
192 }
193 return error;
194}
195
196/** 75/**
197 * Swap map-handling functions 76 * The following functions are used for tracing the allocated
198 * 77 * swap pages, so that they can be freed in case of an error.
199 * The swap map is a data structure used for keeping track of each page
200 * written to the swap. It consists of many swap_map_page structures
201 * that contain each an array of MAP_PAGE_SIZE swap entries.
202 * These structures are linked together with the help of either the
203 * .next (in memory) or the .next_swap (in swap) member.
204 * 78 *
205 * The swap map is created during suspend. At that time we need to keep 79 * The functions operate on a linked bitmap structure defined
206 * it in memory, because we have to free all of the allocated swap 80 * in power.h
207 * entries if an error occurs. The memory needed is preallocated
208 * so that we know in advance if there's enough of it.
209 *
210 * The first swap_map_page structure is filled with the swap entries that
211 * correspond to the first MAP_PAGE_SIZE data pages written to swap and
212 * so on. After the all of the data pages have been written, the order
213 * of the swap_map_page structures in the map is reversed so that they
214 * can be read from swap in the original order. This causes the data
215 * pages to be loaded in exactly the same order in which they have been
216 * saved.
217 *
218 * During resume we only need to use one swap_map_page structure
219 * at a time, which means that we only need to use two memory pages for
220 * reading the image - one for reading the swap_map_page structures
221 * and the second for reading the data pages from swap.
222 */ 81 */
223 82
224#define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \ 83void free_bitmap(struct bitmap_page *bitmap)
225 / sizeof(swp_entry_t))
226
227struct swap_map_page {
228 swp_entry_t entries[MAP_PAGE_SIZE];
229 swp_entry_t next_swap;
230 struct swap_map_page *next;
231};
232
233static inline void free_swap_map(struct swap_map_page *swap_map)
234{ 84{
235 struct swap_map_page *swp; 85 struct bitmap_page *bp;
236 86
237 while (swap_map) { 87 while (bitmap) {
238 swp = swap_map->next; 88 bp = bitmap->next;
239 free_page((unsigned long)swap_map); 89 free_page((unsigned long)bitmap);
240 swap_map = swp; 90 bitmap = bp;
241 } 91 }
242} 92}
243 93
244static struct swap_map_page *alloc_swap_map(unsigned int nr_pages) 94struct bitmap_page *alloc_bitmap(unsigned int nr_bits)
245{ 95{
246 struct swap_map_page *swap_map, *swp; 96 struct bitmap_page *bitmap, *bp;
247 unsigned n = 0; 97 unsigned int n;
248 98
249 if (!nr_pages) 99 if (!nr_bits)
250 return NULL; 100 return NULL;
251 101
252 pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages); 102 bitmap = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
253 swap_map = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); 103 bp = bitmap;
254 swp = swap_map; 104 for (n = BITMAP_PAGE_BITS; n < nr_bits; n += BITMAP_PAGE_BITS) {
255 for (n = MAP_PAGE_SIZE; n < nr_pages; n += MAP_PAGE_SIZE) { 105 bp->next = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL);
256 swp->next = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); 106 bp = bp->next;
257 swp = swp->next; 107 if (!bp) {
258 if (!swp) { 108 free_bitmap(bitmap);
259 free_swap_map(swap_map);
260 return NULL; 109 return NULL;
261 } 110 }
262 } 111 }
263 return swap_map; 112 return bitmap;
264} 113}
265 114
266/** 115static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit)
267 * reverse_swap_map - reverse the order of pages in the swap map
268 * @swap_map
269 */
270
271static inline struct swap_map_page *reverse_swap_map(struct swap_map_page *swap_map)
272{
273 struct swap_map_page *prev, *next;
274
275 prev = NULL;
276 while (swap_map) {
277 next = swap_map->next;
278 swap_map->next = prev;
279 prev = swap_map;
280 swap_map = next;
281 }
282 return prev;
283}
284
285/**
286 * free_swap_map_entries - free the swap entries allocated to store
287 * the swap map @swap_map (this is only called in case of an error)
288 */
289static inline void free_swap_map_entries(struct swap_map_page *swap_map)
290{
291 while (swap_map) {
292 if (swap_map->next_swap.val)
293 swap_free(swap_map->next_swap);
294 swap_map = swap_map->next;
295 }
296}
297
298/**
299 * save_swap_map - save the swap map used for tracing the data pages
300 * stored in the swap
301 */
302
303static int save_swap_map(struct swap_map_page *swap_map, swp_entry_t *start)
304{
305 swp_entry_t entry = (swp_entry_t){0};
306 int error;
307
308 while (swap_map) {
309 swap_map->next_swap = entry;
310 if ((error = write_page((unsigned long)swap_map, &entry)))
311 return error;
312 swap_map = swap_map->next;
313 }
314 *start = entry;
315 return 0;
316}
317
318/**
319 * free_image_entries - free the swap entries allocated to store
320 * the image data pages (this is only called in case of an error)
321 */
322
323static inline void free_image_entries(struct swap_map_page *swp)
324{ 116{
325 unsigned k; 117 unsigned int n;
326 118
327 while (swp) { 119 n = BITMAP_PAGE_BITS;
328 for (k = 0; k < MAP_PAGE_SIZE; k++) 120 while (bitmap && n <= bit) {
329 if (swp->entries[k].val) 121 n += BITMAP_PAGE_BITS;
330 swap_free(swp->entries[k]); 122 bitmap = bitmap->next;
331 swp = swp->next;
332 } 123 }
333} 124 if (!bitmap)
334 125 return -EINVAL;
335/** 126 n -= BITMAP_PAGE_BITS;
336 * The swap_map_handle structure is used for handling the swap map in 127 bit -= n;
337 * a file-alike way 128 n = 0;
338 */ 129 while (bit >= BITS_PER_CHUNK) {
339 130 bit -= BITS_PER_CHUNK;
340struct swap_map_handle { 131 n++;
341 struct swap_map_page *cur;
342 unsigned int k;
343};
344
345static inline void init_swap_map_handle(struct swap_map_handle *handle,
346 struct swap_map_page *map)
347{
348 handle->cur = map;
349 handle->k = 0;
350}
351
352static inline int swap_map_write_page(struct swap_map_handle *handle,
353 unsigned long addr)
354{
355 int error;
356
357 error = write_page(addr, handle->cur->entries + handle->k);
358 if (error)
359 return error;
360 if (++handle->k >= MAP_PAGE_SIZE) {
361 handle->cur = handle->cur->next;
362 handle->k = 0;
363 } 132 }
133 bitmap->chunks[n] |= (1UL << bit);
364 return 0; 134 return 0;
365} 135}
366 136
367/** 137unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap)
368 * save_image_data - save the data pages pointed to by the PBEs
369 * from the list @pblist using the swap map handle @handle
370 * (assume there are @nr_pages data pages to save)
371 */
372
373static int save_image_data(struct pbe *pblist,
374 struct swap_map_handle *handle,
375 unsigned int nr_pages)
376{
377 unsigned int m;
378 struct pbe *p;
379 int error = 0;
380
381 printk("Saving image data pages (%u pages) ... ", nr_pages);
382 m = nr_pages / 100;
383 if (!m)
384 m = 1;
385 nr_pages = 0;
386 for_each_pbe (p, pblist) {
387 error = swap_map_write_page(handle, p->address);
388 if (error)
389 break;
390 if (!(nr_pages % m))
391 printk("\b\b\b\b%3d%%", nr_pages / m);
392 nr_pages++;
393 }
394 if (!error)
395 printk("\b\b\b\bdone\n");
396 return error;
397}
398
399static void dump_info(void)
400{
401 pr_debug(" swsusp: Version: %u\n",swsusp_info.version_code);
402 pr_debug(" swsusp: Num Pages: %ld\n",swsusp_info.num_physpages);
403 pr_debug(" swsusp: UTS Sys: %s\n",swsusp_info.uts.sysname);
404 pr_debug(" swsusp: UTS Node: %s\n",swsusp_info.uts.nodename);
405 pr_debug(" swsusp: UTS Release: %s\n",swsusp_info.uts.release);
406 pr_debug(" swsusp: UTS Version: %s\n",swsusp_info.uts.version);
407 pr_debug(" swsusp: UTS Machine: %s\n",swsusp_info.uts.machine);
408 pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname);
409 pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus);
410 pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages);
411 pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info.pages);
412}
413
414static void init_header(unsigned int nr_pages)
415{
416 memset(&swsusp_info, 0, sizeof(swsusp_info));
417 swsusp_info.version_code = LINUX_VERSION_CODE;
418 swsusp_info.num_physpages = num_physpages;
419 memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname));
420
421 swsusp_info.cpus = num_online_cpus();
422 swsusp_info.image_pages = nr_pages;
423 swsusp_info.pages = nr_pages +
424 ((nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
425}
426
427/**
428 * pack_orig_addresses - the .orig_address fields of the PBEs from the
429 * list starting at @pbe are stored in the array @buf[] (1 page)
430 */
431
432static inline struct pbe *pack_orig_addresses(unsigned long *buf,
433 struct pbe *pbe)
434{
435 int j;
436
437 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
438 buf[j] = pbe->orig_address;
439 pbe = pbe->next;
440 }
441 if (!pbe)
442 for (; j < PAGE_SIZE / sizeof(long); j++)
443 buf[j] = 0;
444 return pbe;
445}
446
447/**
448 * save_image_metadata - save the .orig_address fields of the PBEs
449 * from the list @pblist using the swap map handle @handle
450 */
451
452static int save_image_metadata(struct pbe *pblist,
453 struct swap_map_handle *handle)
454{ 138{
455 unsigned long *buf; 139 unsigned long offset;
456 unsigned int n = 0;
457 struct pbe *p;
458 int error = 0;
459 140
460 printk("Saving image metadata ... "); 141 offset = swp_offset(get_swap_page_of_type(swap));
461 buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC); 142 if (offset) {
462 if (!buf) 143 if (bitmap_set(bitmap, offset)) {
463 return -ENOMEM; 144 swap_free(swp_entry(swap, offset));
464 p = pblist; 145 offset = 0;
465 while (p) { 146 }
466 p = pack_orig_addresses(buf, p);
467 error = swap_map_write_page(handle, (unsigned long)buf);
468 if (error)
469 break;
470 n++;
471 } 147 }
472 free_page((unsigned long)buf); 148 return offset;
473 if (!error)
474 printk("done (%u pages saved)\n", n);
475 return error;
476} 149}
477 150
478/** 151void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
479 * enough_swap - Make sure we have enough swap to save the image.
480 *
481 * Returns TRUE or FALSE after checking the total amount of swap
482 * space avaiable from the resume partition.
483 */
484
485static int enough_swap(unsigned int nr_pages)
486{ 152{
487 unsigned int free_swap = swap_info[root_swap].pages - 153 unsigned int bit, n;
488 swap_info[root_swap].inuse_pages; 154 unsigned long test;
489
490 pr_debug("swsusp: free swap pages: %u\n", free_swap);
491 return free_swap > (nr_pages + PAGES_FOR_IO +
492 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
493}
494 155
495/** 156 bit = 0;
496 * swsusp_write - Write entire image and metadata. 157 while (bitmap) {
497 * 158 for (n = 0; n < BITMAP_PAGE_CHUNKS; n++)
498 * It is important _NOT_ to umount filesystems at this point. We want 159 for (test = 1UL; test; test <<= 1) {
499 * them synced (in case something goes wrong) but we DO not want to mark 160 if (bitmap->chunks[n] & test)
500 * filesystem clean: it is not. (And it does not matter, if we resume 161 swap_free(swp_entry(swap, bit));
501 * correctly, we'll mark system clean, anyway.) 162 bit++;
502 */ 163 }
503 164 bitmap = bitmap->next;
504int swsusp_write(struct pbe *pblist, unsigned int nr_pages)
505{
506 struct swap_map_page *swap_map;
507 struct swap_map_handle handle;
508 swp_entry_t start;
509 int error;
510
511 if ((error = swsusp_swap_check())) {
512 printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n");
513 return error;
514 }
515 if (!enough_swap(nr_pages)) {
516 printk(KERN_ERR "swsusp: Not enough free swap\n");
517 return -ENOSPC;
518 } 165 }
519
520 init_header(nr_pages);
521 swap_map = alloc_swap_map(swsusp_info.pages);
522 if (!swap_map)
523 return -ENOMEM;
524 init_swap_map_handle(&handle, swap_map);
525
526 error = swap_map_write_page(&handle, (unsigned long)&swsusp_info);
527 if (!error)
528 error = save_image_metadata(pblist, &handle);
529 if (!error)
530 error = save_image_data(pblist, &handle, nr_pages);
531 if (error)
532 goto Free_image_entries;
533
534 swap_map = reverse_swap_map(swap_map);
535 error = save_swap_map(swap_map, &start);
536 if (error)
537 goto Free_map_entries;
538
539 dump_info();
540 printk( "S" );
541 error = mark_swapfiles(start);
542 printk( "|\n" );
543 if (error)
544 goto Free_map_entries;
545
546Free_swap_map:
547 free_swap_map(swap_map);
548 return error;
549
550Free_map_entries:
551 free_swap_map_entries(swap_map);
552Free_image_entries:
553 free_image_entries(swap_map);
554 goto Free_swap_map;
555} 166}
556 167
557/** 168/**
@@ -660,379 +271,3 @@ int swsusp_resume(void)
660 local_irq_enable(); 271 local_irq_enable();
661 return error; 272 return error;
662} 273}
663
664/**
665 * mark_unsafe_pages - mark the pages that cannot be used for storing
666 * the image during resume, because they conflict with the pages that
667 * had been used before suspend
668 */
669
670static void mark_unsafe_pages(struct pbe *pblist)
671{
672 struct zone *zone;
673 unsigned long zone_pfn;
674 struct pbe *p;
675
676 if (!pblist) /* a sanity check */
677 return;
678
679 /* Clear page flags */
680 for_each_zone (zone) {
681 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
682 if (pfn_valid(zone_pfn + zone->zone_start_pfn))
683 ClearPageNosaveFree(pfn_to_page(zone_pfn +
684 zone->zone_start_pfn));
685 }
686
687 /* Mark orig addresses */
688 for_each_pbe (p, pblist)
689 SetPageNosaveFree(virt_to_page(p->orig_address));
690
691}
692
693static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
694{
695 /* We assume both lists contain the same number of elements */
696 while (src) {
697 dst->orig_address = src->orig_address;
698 dst = dst->next;
699 src = src->next;
700 }
701}
702
703/*
704 * Using bio to read from swap.
705 * This code requires a bit more work than just using buffer heads
706 * but, it is the recommended way for 2.5/2.6.
707 * The following are to signal the beginning and end of I/O. Bios
708 * finish asynchronously, while we want them to happen synchronously.
709 * A simple atomic_t, and a wait loop take care of this problem.
710 */
711
712static atomic_t io_done = ATOMIC_INIT(0);
713
714static int end_io(struct bio *bio, unsigned int num, int err)
715{
716 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
717 panic("I/O error reading memory image");
718 atomic_set(&io_done, 0);
719 return 0;
720}
721
722static struct block_device *resume_bdev;
723
724/**
725 * submit - submit BIO request.
726 * @rw: READ or WRITE.
727 * @off physical offset of page.
728 * @page: page we're reading or writing.
729 *
730 * Straight from the textbook - allocate and initialize the bio.
731 * If we're writing, make sure the page is marked as dirty.
732 * Then submit it and wait.
733 */
734
735static int submit(int rw, pgoff_t page_off, void *page)
736{
737 int error = 0;
738 struct bio *bio;
739
740 bio = bio_alloc(GFP_ATOMIC, 1);
741 if (!bio)
742 return -ENOMEM;
743 bio->bi_sector = page_off * (PAGE_SIZE >> 9);
744 bio->bi_bdev = resume_bdev;
745 bio->bi_end_io = end_io;
746
747 if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) {
748 printk("swsusp: ERROR: adding page to bio at %ld\n",page_off);
749 error = -EFAULT;
750 goto Done;
751 }
752
753
754 atomic_set(&io_done, 1);
755 submit_bio(rw | (1 << BIO_RW_SYNC), bio);
756 while (atomic_read(&io_done))
757 yield();
758 if (rw == READ)
759 bio_set_pages_dirty(bio);
760 Done:
761 bio_put(bio);
762 return error;
763}
764
765static int bio_read_page(pgoff_t page_off, void *page)
766{
767 return submit(READ, page_off, page);
768}
769
770static int bio_write_page(pgoff_t page_off, void *page)
771{
772 return submit(WRITE, page_off, page);
773}
774
775/**
776 * The following functions allow us to read data using a swap map
777 * in a file-alike way
778 */
779
780static inline void release_swap_map_reader(struct swap_map_handle *handle)
781{
782 if (handle->cur)
783 free_page((unsigned long)handle->cur);
784 handle->cur = NULL;
785}
786
787static inline int get_swap_map_reader(struct swap_map_handle *handle,
788 swp_entry_t start)
789{
790 int error;
791
792 if (!swp_offset(start))
793 return -EINVAL;
794 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
795 if (!handle->cur)
796 return -ENOMEM;
797 error = bio_read_page(swp_offset(start), handle->cur);
798 if (error) {
799 release_swap_map_reader(handle);
800 return error;
801 }
802 handle->k = 0;
803 return 0;
804}
805
806static inline int swap_map_read_page(struct swap_map_handle *handle, void *buf)
807{
808 unsigned long offset;
809 int error;
810
811 if (!handle->cur)
812 return -EINVAL;
813 offset = swp_offset(handle->cur->entries[handle->k]);
814 if (!offset)
815 return -EINVAL;
816 error = bio_read_page(offset, buf);
817 if (error)
818 return error;
819 if (++handle->k >= MAP_PAGE_SIZE) {
820 handle->k = 0;
821 offset = swp_offset(handle->cur->next_swap);
822 if (!offset)
823 release_swap_map_reader(handle);
824 else
825 error = bio_read_page(offset, handle->cur);
826 }
827 return error;
828}
829
830static int check_header(void)
831{
832 char *reason = NULL;
833
834 dump_info();
835 if (swsusp_info.version_code != LINUX_VERSION_CODE)
836 reason = "kernel version";
837 if (swsusp_info.num_physpages != num_physpages)
838 reason = "memory size";
839 if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname))
840 reason = "system type";
841 if (strcmp(swsusp_info.uts.release,system_utsname.release))
842 reason = "kernel release";
843 if (strcmp(swsusp_info.uts.version,system_utsname.version))
844 reason = "version";
845 if (strcmp(swsusp_info.uts.machine,system_utsname.machine))
846 reason = "machine";
847 if (reason) {
848 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
849 return -EPERM;
850 }
851 return 0;
852}
853
854/**
855 * load_image_data - load the image data using the swap map handle
856 * @handle and store them using the page backup list @pblist
857 * (assume there are @nr_pages pages to load)
858 */
859
860static int load_image_data(struct pbe *pblist,
861 struct swap_map_handle *handle,
862 unsigned int nr_pages)
863{
864 int error;
865 unsigned int m;
866 struct pbe *p;
867
868 if (!pblist)
869 return -EINVAL;
870 printk("Loading image data pages (%u pages) ... ", nr_pages);
871 m = nr_pages / 100;
872 if (!m)
873 m = 1;
874 nr_pages = 0;
875 p = pblist;
876 while (p) {
877 error = swap_map_read_page(handle, (void *)p->address);
878 if (error)
879 break;
880 p = p->next;
881 if (!(nr_pages % m))
882 printk("\b\b\b\b%3d%%", nr_pages / m);
883 nr_pages++;
884 }
885 if (!error)
886 printk("\b\b\b\bdone\n");
887 return error;
888}
889
890/**
891 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
892 * the PBEs in the list starting at @pbe
893 */
894
895static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
896 struct pbe *pbe)
897{
898 int j;
899
900 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
901 pbe->orig_address = buf[j];
902 pbe = pbe->next;
903 }
904 return pbe;
905}
906
907/**
908 * load_image_metadata - load the image metadata using the swap map
909 * handle @handle and put them into the PBEs in the list @pblist
910 */
911
912static int load_image_metadata(struct pbe *pblist, struct swap_map_handle *handle)
913{
914 struct pbe *p;
915 unsigned long *buf;
916 unsigned int n = 0;
917 int error = 0;
918
919 printk("Loading image metadata ... ");
920 buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
921 if (!buf)
922 return -ENOMEM;
923 p = pblist;
924 while (p) {
925 error = swap_map_read_page(handle, buf);
926 if (error)
927 break;
928 p = unpack_orig_addresses(buf, p);
929 n++;
930 }
931 free_page((unsigned long)buf);
932 if (!error)
933 printk("done (%u pages loaded)\n", n);
934 return error;
935}
936
937int swsusp_read(struct pbe **pblist_ptr)
938{
939 int error;
940 struct pbe *p, *pblist;
941 struct swap_map_handle handle;
942 unsigned int nr_pages;
943
944 if (IS_ERR(resume_bdev)) {
945 pr_debug("swsusp: block device not initialised\n");
946 return PTR_ERR(resume_bdev);
947 }
948
949 error = get_swap_map_reader(&handle, swsusp_header.image);
950 if (!error)
951 error = swap_map_read_page(&handle, &swsusp_info);
952 if (!error)
953 error = check_header();
954 if (error)
955 return error;
956 nr_pages = swsusp_info.image_pages;
957 p = alloc_pagedir(nr_pages, GFP_ATOMIC, 0);
958 if (!p)
959 return -ENOMEM;
960 error = load_image_metadata(p, &handle);
961 if (!error) {
962 mark_unsafe_pages(p);
963 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
964 if (pblist)
965 copy_page_backup_list(pblist, p);
966 free_pagedir(p);
967 if (!pblist)
968 error = -ENOMEM;
969
970 /* Allocate memory for the image and read the data from swap */
971 if (!error)
972 error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
973 if (!error) {
974 release_eaten_pages();
975 error = load_image_data(pblist, &handle, nr_pages);
976 }
977 if (!error)
978 *pblist_ptr = pblist;
979 }
980 release_swap_map_reader(&handle);
981
982 blkdev_put(resume_bdev);
983
984 if (!error)
985 pr_debug("swsusp: Reading resume file was successful\n");
986 else
987 pr_debug("swsusp: Error %d resuming\n", error);
988 return error;
989}
990
991/**
992 * swsusp_check - Check for swsusp signature in the resume device
993 */
994
995int swsusp_check(void)
996{
997 int error;
998
999 resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
1000 if (!IS_ERR(resume_bdev)) {
1001 set_blocksize(resume_bdev, PAGE_SIZE);
1002 memset(&swsusp_header, 0, sizeof(swsusp_header));
1003 if ((error = bio_read_page(0, &swsusp_header)))
1004 return error;
1005 if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
1006 memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
1007 /* Reset swap signature now */
1008 error = bio_write_page(0, &swsusp_header);
1009 } else {
1010 return -EINVAL;
1011 }
1012 if (error)
1013 blkdev_put(resume_bdev);
1014 else
1015 pr_debug("swsusp: Signature found, resuming\n");
1016 } else {
1017 error = PTR_ERR(resume_bdev);
1018 }
1019
1020 if (error)
1021 pr_debug("swsusp: Error %d check for resume file\n", error);
1022
1023 return error;
1024}
1025
1026/**
1027 * swsusp_close - close swap device.
1028 */
1029
1030void swsusp_close(void)
1031{
1032 if (IS_ERR(resume_bdev)) {
1033 pr_debug("swsusp: block device not initialised\n");
1034 return;
1035 }
1036
1037 blkdev_put(resume_bdev);
1038}
diff --git a/kernel/power/user.c b/kernel/power/user.c
new file mode 100644
index 000000000000..3f1539fbe48a
--- /dev/null
+++ b/kernel/power/user.c
@@ -0,0 +1,333 @@
1/*
2 * linux/kernel/power/user.c
3 *
4 * This file provides the user space interface for software suspend/resume.
5 *
6 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
7 *
8 * This file is released under the GPLv2.
9 *
10 */
11
12#include <linux/suspend.h>
13#include <linux/syscalls.h>
14#include <linux/string.h>
15#include <linux/device.h>
16#include <linux/miscdevice.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pm.h>
21#include <linux/fs.h>
22
23#include <asm/uaccess.h>
24
25#include "power.h"
26
27#define SNAPSHOT_MINOR 231
28
29static struct snapshot_data {
30 struct snapshot_handle handle;
31 int swap;
32 struct bitmap_page *bitmap;
33 int mode;
34 char frozen;
35 char ready;
36} snapshot_state;
37
38static atomic_t device_available = ATOMIC_INIT(1);
39
40static int snapshot_open(struct inode *inode, struct file *filp)
41{
42 struct snapshot_data *data;
43
44 if (!atomic_add_unless(&device_available, -1, 0))
45 return -EBUSY;
46
47 if ((filp->f_flags & O_ACCMODE) == O_RDWR)
48 return -ENOSYS;
49
50 nonseekable_open(inode, filp);
51 data = &snapshot_state;
52 filp->private_data = data;
53 memset(&data->handle, 0, sizeof(struct snapshot_handle));
54 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
55 data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device) : -1;
56 data->mode = O_RDONLY;
57 } else {
58 data->swap = -1;
59 data->mode = O_WRONLY;
60 }
61 data->bitmap = NULL;
62 data->frozen = 0;
63 data->ready = 0;
64
65 return 0;
66}
67
68static int snapshot_release(struct inode *inode, struct file *filp)
69{
70 struct snapshot_data *data;
71
72 swsusp_free();
73 data = filp->private_data;
74 free_all_swap_pages(data->swap, data->bitmap);
75 free_bitmap(data->bitmap);
76 if (data->frozen) {
77 down(&pm_sem);
78 thaw_processes();
79 enable_nonboot_cpus();
80 up(&pm_sem);
81 }
82 atomic_inc(&device_available);
83 return 0;
84}
85
86static ssize_t snapshot_read(struct file *filp, char __user *buf,
87 size_t count, loff_t *offp)
88{
89 struct snapshot_data *data;
90 ssize_t res;
91
92 data = filp->private_data;
93 res = snapshot_read_next(&data->handle, count);
94 if (res > 0) {
95 if (copy_to_user(buf, data_of(data->handle), res))
96 res = -EFAULT;
97 else
98 *offp = data->handle.offset;
99 }
100 return res;
101}
102
103static ssize_t snapshot_write(struct file *filp, const char __user *buf,
104 size_t count, loff_t *offp)
105{
106 struct snapshot_data *data;
107 ssize_t res;
108
109 data = filp->private_data;
110 res = snapshot_write_next(&data->handle, count);
111 if (res > 0) {
112 if (copy_from_user(data_of(data->handle), buf, res))
113 res = -EFAULT;
114 else
115 *offp = data->handle.offset;
116 }
117 return res;
118}
119
120static int snapshot_ioctl(struct inode *inode, struct file *filp,
121 unsigned int cmd, unsigned long arg)
122{
123 int error = 0;
124 struct snapshot_data *data;
125 loff_t offset, avail;
126
127 if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
128 return -ENOTTY;
129 if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
130 return -ENOTTY;
131 if (!capable(CAP_SYS_ADMIN))
132 return -EPERM;
133
134 data = filp->private_data;
135
136 switch (cmd) {
137
138 case SNAPSHOT_FREEZE:
139 if (data->frozen)
140 break;
141 down(&pm_sem);
142 disable_nonboot_cpus();
143 if (freeze_processes()) {
144 thaw_processes();
145 enable_nonboot_cpus();
146 error = -EBUSY;
147 }
148 up(&pm_sem);
149 if (!error)
150 data->frozen = 1;
151 break;
152
153 case SNAPSHOT_UNFREEZE:
154 if (!data->frozen)
155 break;
156 down(&pm_sem);
157 thaw_processes();
158 enable_nonboot_cpus();
159 up(&pm_sem);
160 data->frozen = 0;
161 break;
162
163 case SNAPSHOT_ATOMIC_SNAPSHOT:
164 if (data->mode != O_RDONLY || !data->frozen || data->ready) {
165 error = -EPERM;
166 break;
167 }
168 down(&pm_sem);
169 /* Free memory before shutting down devices. */
170 error = swsusp_shrink_memory();
171 if (!error) {
172 error = device_suspend(PMSG_FREEZE);
173 if (!error) {
174 in_suspend = 1;
175 error = swsusp_suspend();
176 device_resume();
177 }
178 }
179 up(&pm_sem);
180 if (!error)
181 error = put_user(in_suspend, (unsigned int __user *)arg);
182 if (!error)
183 data->ready = 1;
184 break;
185
186 case SNAPSHOT_ATOMIC_RESTORE:
187 if (data->mode != O_WRONLY || !data->frozen ||
188 !snapshot_image_loaded(&data->handle)) {
189 error = -EPERM;
190 break;
191 }
192 down(&pm_sem);
193 pm_prepare_console();
194 error = device_suspend(PMSG_FREEZE);
195 if (!error) {
196 error = swsusp_resume();
197 device_resume();
198 }
199 pm_restore_console();
200 up(&pm_sem);
201 break;
202
203 case SNAPSHOT_FREE:
204 swsusp_free();
205 memset(&data->handle, 0, sizeof(struct snapshot_handle));
206 data->ready = 0;
207 break;
208
209 case SNAPSHOT_SET_IMAGE_SIZE:
210 image_size = arg;
211 break;
212
213 case SNAPSHOT_AVAIL_SWAP:
214 avail = count_swap_pages(data->swap, 1);
215 avail <<= PAGE_SHIFT;
216 error = put_user(avail, (loff_t __user *)arg);
217 break;
218
219 case SNAPSHOT_GET_SWAP_PAGE:
220 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
221 error = -ENODEV;
222 break;
223 }
224 if (!data->bitmap) {
225 data->bitmap = alloc_bitmap(count_swap_pages(data->swap, 0));
226 if (!data->bitmap) {
227 error = -ENOMEM;
228 break;
229 }
230 }
231 offset = alloc_swap_page(data->swap, data->bitmap);
232 if (offset) {
233 offset <<= PAGE_SHIFT;
234 error = put_user(offset, (loff_t __user *)arg);
235 } else {
236 error = -ENOSPC;
237 }
238 break;
239
240 case SNAPSHOT_FREE_SWAP_PAGES:
241 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
242 error = -ENODEV;
243 break;
244 }
245 free_all_swap_pages(data->swap, data->bitmap);
246 free_bitmap(data->bitmap);
247 data->bitmap = NULL;
248 break;
249
250 case SNAPSHOT_SET_SWAP_FILE:
251 if (!data->bitmap) {
252 /*
253 * User space encodes device types as two-byte values,
254 * so we need to recode them
255 */
256 if (old_decode_dev(arg)) {
257 data->swap = swap_type_of(old_decode_dev(arg));
258 if (data->swap < 0)
259 error = -ENODEV;
260 } else {
261 data->swap = -1;
262 error = -EINVAL;
263 }
264 } else {
265 error = -EPERM;
266 }
267 break;
268
269 case SNAPSHOT_S2RAM:
270 if (!data->frozen) {
271 error = -EPERM;
272 break;
273 }
274
275 if (down_trylock(&pm_sem)) {
276 error = -EBUSY;
277 break;
278 }
279
280 if (pm_ops->prepare) {
281 error = pm_ops->prepare(PM_SUSPEND_MEM);
282 if (error)
283 goto OutS3;
284 }
285
286 /* Put devices to sleep */
287 error = device_suspend(PMSG_SUSPEND);
288 if (error) {
289 printk(KERN_ERR "Failed to suspend some devices.\n");
290 } else {
291 /* Enter S3, system is already frozen */
292 suspend_enter(PM_SUSPEND_MEM);
293
294 /* Wake up devices */
295 device_resume();
296 }
297
298 if (pm_ops->finish)
299 pm_ops->finish(PM_SUSPEND_MEM);
300
301OutS3:
302 up(&pm_sem);
303 break;
304
305 default:
306 error = -ENOTTY;
307
308 }
309
310 return error;
311}
312
313static struct file_operations snapshot_fops = {
314 .open = snapshot_open,
315 .release = snapshot_release,
316 .read = snapshot_read,
317 .write = snapshot_write,
318 .llseek = no_llseek,
319 .ioctl = snapshot_ioctl,
320};
321
322static struct miscdevice snapshot_device = {
323 .minor = SNAPSHOT_MINOR,
324 .name = "snapshot",
325 .fops = &snapshot_fops,
326};
327
328static int __init snapshot_device_init(void)
329{
330 return misc_register(&snapshot_device);
331};
332
333device_initcall(snapshot_device_init);
diff --git a/kernel/profile.c b/kernel/profile.c
index f89248e6d704..ad81f799a9b4 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -23,6 +23,7 @@
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/profile.h> 24#include <linux/profile.h>
25#include <linux/highmem.h> 25#include <linux/highmem.h>
26#include <linux/mutex.h>
26#include <asm/sections.h> 27#include <asm/sections.h>
27#include <asm/semaphore.h> 28#include <asm/semaphore.h>
28 29
@@ -44,7 +45,7 @@ static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
44#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
45static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 46static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
46static DEFINE_PER_CPU(int, cpu_profile_flip); 47static DEFINE_PER_CPU(int, cpu_profile_flip);
47static DECLARE_MUTEX(profile_flip_mutex); 48static DEFINE_MUTEX(profile_flip_mutex);
48#endif /* CONFIG_SMP */ 49#endif /* CONFIG_SMP */
49 50
50static int __init profile_setup(char * str) 51static int __init profile_setup(char * str)
@@ -243,7 +244,7 @@ static void profile_flip_buffers(void)
243{ 244{
244 int i, j, cpu; 245 int i, j, cpu;
245 246
246 down(&profile_flip_mutex); 247 mutex_lock(&profile_flip_mutex);
247 j = per_cpu(cpu_profile_flip, get_cpu()); 248 j = per_cpu(cpu_profile_flip, get_cpu());
248 put_cpu(); 249 put_cpu();
249 on_each_cpu(__profile_flip_buffers, NULL, 0, 1); 250 on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
@@ -259,14 +260,14 @@ static void profile_flip_buffers(void)
259 hits[i].hits = hits[i].pc = 0; 260 hits[i].hits = hits[i].pc = 0;
260 } 261 }
261 } 262 }
262 up(&profile_flip_mutex); 263 mutex_unlock(&profile_flip_mutex);
263} 264}
264 265
265static void profile_discard_flip_buffers(void) 266static void profile_discard_flip_buffers(void)
266{ 267{
267 int i, cpu; 268 int i, cpu;
268 269
269 down(&profile_flip_mutex); 270 mutex_lock(&profile_flip_mutex);
270 i = per_cpu(cpu_profile_flip, get_cpu()); 271 i = per_cpu(cpu_profile_flip, get_cpu());
271 put_cpu(); 272 put_cpu();
272 on_each_cpu(__profile_flip_buffers, NULL, 0, 1); 273 on_each_cpu(__profile_flip_buffers, NULL, 0, 1);
@@ -274,7 +275,7 @@ static void profile_discard_flip_buffers(void)
274 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; 275 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
275 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); 276 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
276 } 277 }
277 up(&profile_flip_mutex); 278 mutex_unlock(&profile_flip_mutex);
278} 279}
279 280
280void profile_hit(int type, void *__pc) 281void profile_hit(int type, void *__pc)
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index fedf5e369755..6df1559b1c02 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -47,15 +47,16 @@
47#include <linux/notifier.h> 47#include <linux/notifier.h>
48#include <linux/rcupdate.h> 48#include <linux/rcupdate.h>
49#include <linux/cpu.h> 49#include <linux/cpu.h>
50#include <linux/mutex.h>
50 51
51/* Definition for rcupdate control block. */ 52/* Definition for rcupdate control block. */
52struct rcu_ctrlblk rcu_ctrlblk = { 53static struct rcu_ctrlblk rcu_ctrlblk = {
53 .cur = -300, 54 .cur = -300,
54 .completed = -300, 55 .completed = -300,
55 .lock = SPIN_LOCK_UNLOCKED, 56 .lock = SPIN_LOCK_UNLOCKED,
56 .cpumask = CPU_MASK_NONE, 57 .cpumask = CPU_MASK_NONE,
57}; 58};
58struct rcu_ctrlblk rcu_bh_ctrlblk = { 59static struct rcu_ctrlblk rcu_bh_ctrlblk = {
59 .cur = -300, 60 .cur = -300,
60 .completed = -300, 61 .completed = -300,
61 .lock = SPIN_LOCK_UNLOCKED, 62 .lock = SPIN_LOCK_UNLOCKED,
@@ -75,7 +76,7 @@ static int rsinterval = 1000;
75#endif 76#endif
76 77
77static atomic_t rcu_barrier_cpu_count; 78static atomic_t rcu_barrier_cpu_count;
78static struct semaphore rcu_barrier_sema; 79static DEFINE_MUTEX(rcu_barrier_mutex);
79static struct completion rcu_barrier_completion; 80static struct completion rcu_barrier_completion;
80 81
81#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
@@ -207,13 +208,13 @@ static void rcu_barrier_func(void *notused)
207void rcu_barrier(void) 208void rcu_barrier(void)
208{ 209{
209 BUG_ON(in_interrupt()); 210 BUG_ON(in_interrupt());
210 /* Take cpucontrol semaphore to protect against CPU hotplug */ 211 /* Take cpucontrol mutex to protect against CPU hotplug */
211 down(&rcu_barrier_sema); 212 mutex_lock(&rcu_barrier_mutex);
212 init_completion(&rcu_barrier_completion); 213 init_completion(&rcu_barrier_completion);
213 atomic_set(&rcu_barrier_cpu_count, 0); 214 atomic_set(&rcu_barrier_cpu_count, 0);
214 on_each_cpu(rcu_barrier_func, NULL, 0, 1); 215 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
215 wait_for_completion(&rcu_barrier_completion); 216 wait_for_completion(&rcu_barrier_completion);
216 up(&rcu_barrier_sema); 217 mutex_unlock(&rcu_barrier_mutex);
217} 218}
218EXPORT_SYMBOL_GPL(rcu_barrier); 219EXPORT_SYMBOL_GPL(rcu_barrier);
219 220
@@ -549,7 +550,6 @@ static struct notifier_block __devinitdata rcu_nb = {
549 */ 550 */
550void __init rcu_init(void) 551void __init rcu_init(void)
551{ 552{
552 sema_init(&rcu_barrier_sema, 1);
553 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, 553 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
554 (void *)(long)smp_processor_id()); 554 (void *)(long)smp_processor_id());
555 /* Register notifier for non-boot CPUs */ 555 /* Register notifier for non-boot CPUs */
diff --git a/kernel/sched.c b/kernel/sched.c
index 6b6e0d70eb30..7ffaabd64f89 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -237,6 +237,7 @@ struct runqueue {
237 237
238 task_t *migration_thread; 238 task_t *migration_thread;
239 struct list_head migration_queue; 239 struct list_head migration_queue;
240 int cpu;
240#endif 241#endif
241 242
242#ifdef CONFIG_SCHEDSTATS 243#ifdef CONFIG_SCHEDSTATS
@@ -1654,6 +1655,9 @@ unsigned long nr_iowait(void)
1654/* 1655/*
1655 * double_rq_lock - safely lock two runqueues 1656 * double_rq_lock - safely lock two runqueues
1656 * 1657 *
1658 * We must take them in cpu order to match code in
1659 * dependent_sleeper and wake_dependent_sleeper.
1660 *
1657 * Note this does not disable interrupts like task_rq_lock, 1661 * Note this does not disable interrupts like task_rq_lock,
1658 * you need to do so manually before calling. 1662 * you need to do so manually before calling.
1659 */ 1663 */
@@ -1665,7 +1669,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1665 spin_lock(&rq1->lock); 1669 spin_lock(&rq1->lock);
1666 __acquire(rq2->lock); /* Fake it out ;) */ 1670 __acquire(rq2->lock); /* Fake it out ;) */
1667 } else { 1671 } else {
1668 if (rq1 < rq2) { 1672 if (rq1->cpu < rq2->cpu) {
1669 spin_lock(&rq1->lock); 1673 spin_lock(&rq1->lock);
1670 spin_lock(&rq2->lock); 1674 spin_lock(&rq2->lock);
1671 } else { 1675 } else {
@@ -1701,7 +1705,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
1701 __acquires(this_rq->lock) 1705 __acquires(this_rq->lock)
1702{ 1706{
1703 if (unlikely(!spin_trylock(&busiest->lock))) { 1707 if (unlikely(!spin_trylock(&busiest->lock))) {
1704 if (busiest < this_rq) { 1708 if (busiest->cpu < this_rq->cpu) {
1705 spin_unlock(&this_rq->lock); 1709 spin_unlock(&this_rq->lock);
1706 spin_lock(&busiest->lock); 1710 spin_lock(&busiest->lock);
1707 spin_lock(&this_rq->lock); 1711 spin_lock(&this_rq->lock);
@@ -2869,7 +2873,7 @@ asmlinkage void __sched schedule(void)
2869 */ 2873 */
2870 if (likely(!current->exit_state)) { 2874 if (likely(!current->exit_state)) {
2871 if (unlikely(in_atomic())) { 2875 if (unlikely(in_atomic())) {
2872 printk(KERN_ERR "scheduling while atomic: " 2876 printk(KERN_ERR "BUG: scheduling while atomic: "
2873 "%s/0x%08x/%d\n", 2877 "%s/0x%08x/%d\n",
2874 current->comm, preempt_count(), current->pid); 2878 current->comm, preempt_count(), current->pid);
2875 dump_stack(); 2879 dump_stack();
@@ -6029,6 +6033,7 @@ void __init sched_init(void)
6029 rq->push_cpu = 0; 6033 rq->push_cpu = 0;
6030 rq->migration_thread = NULL; 6034 rq->migration_thread = NULL;
6031 INIT_LIST_HEAD(&rq->migration_queue); 6035 INIT_LIST_HEAD(&rq->migration_queue);
6036 rq->cpu = i;
6032#endif 6037#endif
6033 atomic_set(&rq->nr_iowait, 0); 6038 atomic_set(&rq->nr_iowait, 0);
6034 6039
@@ -6069,7 +6074,7 @@ void __might_sleep(char *file, int line)
6069 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 6074 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6070 return; 6075 return;
6071 prev_jiffy = jiffies; 6076 prev_jiffy = jiffies;
6072 printk(KERN_ERR "Debug: sleeping function called from invalid" 6077 printk(KERN_ERR "BUG: sleeping function called from invalid"
6073 " context at %s:%d\n", file, line); 6078 " context at %s:%d\n", file, line);
6074 printk("in_atomic():%d, irqs_disabled():%d\n", 6079 printk("in_atomic():%d, irqs_disabled():%d\n",
6075 in_atomic(), irqs_disabled()); 6080 in_atomic(), irqs_disabled());
diff --git a/kernel/signal.c b/kernel/signal.c
index ea154104a00b..75f7341b0c39 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1922,6 +1922,8 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1922 sigset_t *mask = &current->blocked; 1922 sigset_t *mask = &current->blocked;
1923 int signr = 0; 1923 int signr = 0;
1924 1924
1925 try_to_freeze();
1926
1925relock: 1927relock:
1926 spin_lock_irq(&current->sighand->siglock); 1928 spin_lock_irq(&current->sighand->siglock);
1927 for (;;) { 1929 for (;;) {
@@ -2099,10 +2101,11 @@ long do_no_restart_syscall(struct restart_block *param)
2099int sigprocmask(int how, sigset_t *set, sigset_t *oldset) 2101int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2100{ 2102{
2101 int error; 2103 int error;
2102 sigset_t old_block;
2103 2104
2104 spin_lock_irq(&current->sighand->siglock); 2105 spin_lock_irq(&current->sighand->siglock);
2105 old_block = current->blocked; 2106 if (oldset)
2107 *oldset = current->blocked;
2108
2106 error = 0; 2109 error = 0;
2107 switch (how) { 2110 switch (how) {
2108 case SIG_BLOCK: 2111 case SIG_BLOCK:
@@ -2119,8 +2122,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2119 } 2122 }
2120 recalc_sigpending(); 2123 recalc_sigpending();
2121 spin_unlock_irq(&current->sighand->siglock); 2124 spin_unlock_irq(&current->sighand->siglock);
2122 if (oldset) 2125
2123 *oldset = old_block;
2124 return error; 2126 return error;
2125} 2127}
2126 2128
@@ -2307,7 +2309,6 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
2307 2309
2308 timeout = schedule_timeout_interruptible(timeout); 2310 timeout = schedule_timeout_interruptible(timeout);
2309 2311
2310 try_to_freeze();
2311 spin_lock_irq(&current->sighand->siglock); 2312 spin_lock_irq(&current->sighand->siglock);
2312 sig = dequeue_signal(current, &these, &info); 2313 sig = dequeue_signal(current, &these, &info);
2313 current->blocked = current->real_blocked; 2314 current->blocked = current->real_blocked;
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 0375fcd5921d..d1b810782bc4 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -179,16 +179,16 @@ EXPORT_SYMBOL(_write_lock);
179#define BUILD_LOCK_OPS(op, locktype) \ 179#define BUILD_LOCK_OPS(op, locktype) \
180void __lockfunc _##op##_lock(locktype##_t *lock) \ 180void __lockfunc _##op##_lock(locktype##_t *lock) \
181{ \ 181{ \
182 preempt_disable(); \
183 for (;;) { \ 182 for (;;) { \
183 preempt_disable(); \
184 if (likely(_raw_##op##_trylock(lock))) \ 184 if (likely(_raw_##op##_trylock(lock))) \
185 break; \ 185 break; \
186 preempt_enable(); \ 186 preempt_enable(); \
187 \
187 if (!(lock)->break_lock) \ 188 if (!(lock)->break_lock) \
188 (lock)->break_lock = 1; \ 189 (lock)->break_lock = 1; \
189 while (!op##_can_lock(lock) && (lock)->break_lock) \ 190 while (!op##_can_lock(lock) && (lock)->break_lock) \
190 cpu_relax(); \ 191 cpu_relax(); \
191 preempt_disable(); \
192 } \ 192 } \
193 (lock)->break_lock = 0; \ 193 (lock)->break_lock = 0; \
194} \ 194} \
@@ -199,19 +199,18 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
199{ \ 199{ \
200 unsigned long flags; \ 200 unsigned long flags; \
201 \ 201 \
202 preempt_disable(); \
203 for (;;) { \ 202 for (;;) { \
203 preempt_disable(); \
204 local_irq_save(flags); \ 204 local_irq_save(flags); \
205 if (likely(_raw_##op##_trylock(lock))) \ 205 if (likely(_raw_##op##_trylock(lock))) \
206 break; \ 206 break; \
207 local_irq_restore(flags); \ 207 local_irq_restore(flags); \
208 \
209 preempt_enable(); \ 208 preempt_enable(); \
209 \
210 if (!(lock)->break_lock) \ 210 if (!(lock)->break_lock) \
211 (lock)->break_lock = 1; \ 211 (lock)->break_lock = 1; \
212 while (!op##_can_lock(lock) && (lock)->break_lock) \ 212 while (!op##_can_lock(lock) && (lock)->break_lock) \
213 cpu_relax(); \ 213 cpu_relax(); \
214 preempt_disable(); \
215 } \ 214 } \
216 (lock)->break_lock = 0; \ 215 (lock)->break_lock = 0; \
217 return flags; \ 216 return flags; \
diff --git a/kernel/sys.c b/kernel/sys.c
index f91218a5463e..c0fcad9f826c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1227,7 +1227,7 @@ asmlinkage long sys_setsid(void)
1227 struct pid *pid; 1227 struct pid *pid;
1228 int err = -EPERM; 1228 int err = -EPERM;
1229 1229
1230 down(&tty_sem); 1230 mutex_lock(&tty_mutex);
1231 write_lock_irq(&tasklist_lock); 1231 write_lock_irq(&tasklist_lock);
1232 1232
1233 pid = find_pid(PIDTYPE_PGID, group_leader->pid); 1233 pid = find_pid(PIDTYPE_PGID, group_leader->pid);
@@ -1241,7 +1241,7 @@ asmlinkage long sys_setsid(void)
1241 err = process_group(group_leader); 1241 err = process_group(group_leader);
1242out: 1242out:
1243 write_unlock_irq(&tasklist_lock); 1243 write_unlock_irq(&tasklist_lock);
1244 up(&tty_sem); 1244 mutex_unlock(&tty_mutex);
1245 return err; 1245 return err;
1246} 1246}
1247 1247
@@ -1677,9 +1677,6 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1677 * a lot simpler! (Which we're not doing right now because we're not 1677 * a lot simpler! (Which we're not doing right now because we're not
1678 * measuring them yet). 1678 * measuring them yet).
1679 * 1679 *
1680 * This expects to be called with tasklist_lock read-locked or better,
1681 * and the siglock not locked. It may momentarily take the siglock.
1682 *
1683 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1680 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1684 * races with threads incrementing their own counters. But since word 1681 * races with threads incrementing their own counters. But since word
1685 * reads are atomic, we either get new values or old values and we don't 1682 * reads are atomic, we either get new values or old values and we don't
@@ -1687,6 +1684,25 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1687 * the c* fields from p->signal from races with exit.c updating those 1684 * the c* fields from p->signal from races with exit.c updating those
1688 * fields when reaping, so a sample either gets all the additions of a 1685 * fields when reaping, so a sample either gets all the additions of a
1689 * given child after it's reaped, or none so this sample is before reaping. 1686 * given child after it's reaped, or none so this sample is before reaping.
1687 *
1688 * tasklist_lock locking optimisation:
1689 * If we are current and single threaded, we do not need to take the tasklist
1690 * lock or the siglock. No one else can take our signal_struct away,
1691 * no one else can reap the children to update signal->c* counters, and
1692 * no one else can race with the signal-> fields.
1693 * If we do not take the tasklist_lock, the signal-> fields could be read
1694 * out of order while another thread was just exiting. So we place a
1695 * read memory barrier when we avoid the lock. On the writer side,
1696 * write memory barrier is implied in __exit_signal as __exit_signal releases
1697 * the siglock spinlock after updating the signal-> fields.
1698 *
1699 * We don't really need the siglock when we access the non c* fields
1700 * of the signal_struct (for RUSAGE_SELF) even in multithreaded
1701 * case, since we take the tasklist lock for read and the non c* signal->
1702 * fields are updated only in __exit_signal, which is called with
1703 * tasklist_lock taken for write, hence these two threads cannot execute
1704 * concurrently.
1705 *
1690 */ 1706 */
1691 1707
1692static void k_getrusage(struct task_struct *p, int who, struct rusage *r) 1708static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
@@ -1694,13 +1710,23 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1694 struct task_struct *t; 1710 struct task_struct *t;
1695 unsigned long flags; 1711 unsigned long flags;
1696 cputime_t utime, stime; 1712 cputime_t utime, stime;
1713 int need_lock = 0;
1697 1714
1698 memset((char *) r, 0, sizeof *r); 1715 memset((char *) r, 0, sizeof *r);
1716 utime = stime = cputime_zero;
1699 1717
1700 if (unlikely(!p->signal)) 1718 if (p != current || !thread_group_empty(p))
1701 return; 1719 need_lock = 1;
1702 1720
1703 utime = stime = cputime_zero; 1721 if (need_lock) {
1722 read_lock(&tasklist_lock);
1723 if (unlikely(!p->signal)) {
1724 read_unlock(&tasklist_lock);
1725 return;
1726 }
1727 } else
1728 /* See locking comments above */
1729 smp_rmb();
1704 1730
1705 switch (who) { 1731 switch (who) {
1706 case RUSAGE_BOTH: 1732 case RUSAGE_BOTH:
@@ -1740,6 +1766,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1740 BUG(); 1766 BUG();
1741 } 1767 }
1742 1768
1769 if (need_lock)
1770 read_unlock(&tasklist_lock);
1743 cputime_to_timeval(utime, &r->ru_utime); 1771 cputime_to_timeval(utime, &r->ru_utime);
1744 cputime_to_timeval(stime, &r->ru_stime); 1772 cputime_to_timeval(stime, &r->ru_stime);
1745} 1773}
@@ -1747,9 +1775,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1747int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1775int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1748{ 1776{
1749 struct rusage r; 1777 struct rusage r;
1750 read_lock(&tasklist_lock);
1751 k_getrusage(p, who, &r); 1778 k_getrusage(p, who, &r);
1752 read_unlock(&tasklist_lock);
1753 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1779 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1754} 1780}
1755 1781
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index f5fef948a415..f8ac9fa95de1 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -44,12 +44,13 @@
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/rslib.h> 45#include <linux/rslib.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/mutex.h>
47#include <asm/semaphore.h> 48#include <asm/semaphore.h>
48 49
49/* This list holds all currently allocated rs control structures */ 50/* This list holds all currently allocated rs control structures */
50static LIST_HEAD (rslist); 51static LIST_HEAD (rslist);
51/* Protection for the list */ 52/* Protection for the list */
52static DECLARE_MUTEX(rslistlock); 53static DEFINE_MUTEX(rslistlock);
53 54
54/** 55/**
55 * rs_init - Initialize a Reed-Solomon codec 56 * rs_init - Initialize a Reed-Solomon codec
@@ -161,7 +162,7 @@ errrs:
161 */ 162 */
162void free_rs(struct rs_control *rs) 163void free_rs(struct rs_control *rs)
163{ 164{
164 down(&rslistlock); 165 mutex_lock(&rslistlock);
165 rs->users--; 166 rs->users--;
166 if(!rs->users) { 167 if(!rs->users) {
167 list_del(&rs->list); 168 list_del(&rs->list);
@@ -170,7 +171,7 @@ void free_rs(struct rs_control *rs)
170 kfree(rs->genpoly); 171 kfree(rs->genpoly);
171 kfree(rs); 172 kfree(rs);
172 } 173 }
173 up(&rslistlock); 174 mutex_unlock(&rslistlock);
174} 175}
175 176
176/** 177/**
@@ -201,7 +202,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
201 if (nroots < 0 || nroots >= (1<<symsize)) 202 if (nroots < 0 || nroots >= (1<<symsize))
202 return NULL; 203 return NULL;
203 204
204 down(&rslistlock); 205 mutex_lock(&rslistlock);
205 206
206 /* Walk through the list and look for a matching entry */ 207 /* Walk through the list and look for a matching entry */
207 list_for_each(tmp, &rslist) { 208 list_for_each(tmp, &rslist) {
@@ -228,7 +229,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
228 list_add(&rs->list, &rslist); 229 list_add(&rs->list, &rslist);
229 } 230 }
230out: 231out:
231 up(&rslistlock); 232 mutex_unlock(&rslistlock);
232 return rs; 233 return rs;
233} 234}
234 235
diff --git a/mm/readahead.c b/mm/readahead.c
index 301b36c4a0ce..0f142a40984b 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -555,6 +555,7 @@ recheck:
555out: 555out:
556 return ra->prev_page + 1; 556 return ra->prev_page + 1;
557} 557}
558EXPORT_SYMBOL_GPL(page_cache_readahead);
558 559
559/* 560/*
560 * handle_ra_miss() is called when it is known that a page which should have 561 * handle_ra_miss() is called when it is known that a page which should have
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 365ed6ff182d..39aa9d129612 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -45,7 +45,7 @@ static const char Unused_offset[] = "Unused swap offset entry ";
45 45
46struct swap_list_t swap_list = {-1, -1}; 46struct swap_list_t swap_list = {-1, -1};
47 47
48struct swap_info_struct swap_info[MAX_SWAPFILES]; 48static struct swap_info_struct swap_info[MAX_SWAPFILES];
49 49
50static DEFINE_MUTEX(swapon_mutex); 50static DEFINE_MUTEX(swapon_mutex);
51 51
@@ -417,6 +417,61 @@ void free_swap_and_cache(swp_entry_t entry)
417 } 417 }
418} 418}
419 419
420#ifdef CONFIG_SOFTWARE_SUSPEND
421/*
422 * Find the swap type that corresponds to given device (if any)
423 *
424 * This is needed for software suspend and is done in such a way that inode
425 * aliasing is allowed.
426 */
427int swap_type_of(dev_t device)
428{
429 int i;
430
431 spin_lock(&swap_lock);
432 for (i = 0; i < nr_swapfiles; i++) {
433 struct inode *inode;
434
435 if (!(swap_info[i].flags & SWP_WRITEOK))
436 continue;
437 if (!device) {
438 spin_unlock(&swap_lock);
439 return i;
440 }
441 inode = swap_info->swap_file->f_dentry->d_inode;
442 if (S_ISBLK(inode->i_mode) &&
443 device == MKDEV(imajor(inode), iminor(inode))) {
444 spin_unlock(&swap_lock);
445 return i;
446 }
447 }
448 spin_unlock(&swap_lock);
449 return -ENODEV;
450}
451
452/*
453 * Return either the total number of swap pages of given type, or the number
454 * of free pages of that type (depending on @free)
455 *
456 * This is needed for software suspend
457 */
458unsigned int count_swap_pages(int type, int free)
459{
460 unsigned int n = 0;
461
462 if (type < nr_swapfiles) {
463 spin_lock(&swap_lock);
464 if (swap_info[type].flags & SWP_WRITEOK) {
465 n = swap_info[type].pages;
466 if (free)
467 n -= swap_info[type].inuse_pages;
468 }
469 spin_unlock(&swap_lock);
470 }
471 return n;
472}
473#endif
474
420/* 475/*
421 * No need to decide whether this PTE shares the swap entry with others, 476 * No need to decide whether this PTE shares the swap entry with others,
422 * just let do_wp_page work it out if a write is requested later - to 477 * just let do_wp_page work it out if a write is requested later - to
diff --git a/security/seclvl.c b/security/seclvl.c
index 8529ea6f7aa8..441beaf1bbc1 100644
--- a/security/seclvl.c
+++ b/security/seclvl.c
@@ -8,6 +8,7 @@
8 * Copyright (c) 2001 WireX Communications, Inc <chris@wirex.com> 8 * Copyright (c) 2001 WireX Communications, Inc <chris@wirex.com>
9 * Copyright (c) 2001 Greg Kroah-Hartman <greg@kroah.com> 9 * Copyright (c) 2001 Greg Kroah-Hartman <greg@kroah.com>
10 * Copyright (c) 2002 International Business Machines <robb@austin.ibm.com> 10 * Copyright (c) 2002 International Business Machines <robb@austin.ibm.com>
11 * Copyright (c) 2006 Davi E. M. Arnaut <davi.arnaut@gmail.com>
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
@@ -31,6 +32,7 @@
31#include <linux/kobject.h> 32#include <linux/kobject.h>
32#include <linux/crypto.h> 33#include <linux/crypto.h>
33#include <asm/scatterlist.h> 34#include <asm/scatterlist.h>
35#include <linux/scatterlist.h>
34#include <linux/gfp.h> 36#include <linux/gfp.h>
35#include <linux/sysfs.h> 37#include <linux/sysfs.h>
36 38
@@ -194,35 +196,27 @@ static unsigned char hashedPassword[SHA1_DIGEST_SIZE];
194 * people... 196 * people...
195 */ 197 */
196static int 198static int
197plaintext_to_sha1(unsigned char *hash, const char *plaintext, int len) 199plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len)
198{ 200{
199 char *pgVirtAddr;
200 struct crypto_tfm *tfm; 201 struct crypto_tfm *tfm;
201 struct scatterlist sg[1]; 202 struct scatterlist sg;
202 if (len > PAGE_SIZE) { 203 if (len > PAGE_SIZE) {
203 seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d " 204 seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d "
204 "characters). Largest possible is %lu " 205 "characters). Largest possible is %lu "
205 "bytes.\n", len, PAGE_SIZE); 206 "bytes.\n", len, PAGE_SIZE);
206 return -ENOMEM; 207 return -EINVAL;
207 } 208 }
208 tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); 209 tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP);
209 if (tfm == NULL) { 210 if (tfm == NULL) {
210 seclvl_printk(0, KERN_ERR, 211 seclvl_printk(0, KERN_ERR,
211 "Failed to load transform for SHA1\n"); 212 "Failed to load transform for SHA1\n");
212 return -ENOSYS; 213 return -EINVAL;
213 } 214 }
214 // Just get a new page; don't play around with page boundaries 215 sg_init_one(&sg, (u8 *)plaintext, len);
215 // and scatterlists.
216 pgVirtAddr = (char *)__get_free_page(GFP_KERNEL);
217 sg[0].page = virt_to_page(pgVirtAddr);
218 sg[0].offset = 0;
219 sg[0].length = len;
220 strncpy(pgVirtAddr, plaintext, len);
221 crypto_digest_init(tfm); 216 crypto_digest_init(tfm);
222 crypto_digest_update(tfm, sg, 1); 217 crypto_digest_update(tfm, &sg, 1);
223 crypto_digest_final(tfm, hash); 218 crypto_digest_final(tfm, hash);
224 crypto_free_tfm(tfm); 219 crypto_free_tfm(tfm);
225 free_page((unsigned long)pgVirtAddr);
226 return 0; 220 return 0;
227} 221}
228 222
@@ -234,11 +228,9 @@ static ssize_t
234passwd_write_file(struct file * file, const char __user * buf, 228passwd_write_file(struct file * file, const char __user * buf,
235 size_t count, loff_t *ppos) 229 size_t count, loff_t *ppos)
236{ 230{
237 int i; 231 char *p;
238 unsigned char tmp[SHA1_DIGEST_SIZE];
239 char *page;
240 int rc;
241 int len; 232 int len;
233 unsigned char tmp[SHA1_DIGEST_SIZE];
242 234
243 if (!*passwd && !*sha1_passwd) { 235 if (!*passwd && !*sha1_passwd) {
244 seclvl_printk(0, KERN_ERR, "Attempt to password-unlock the " 236 seclvl_printk(0, KERN_ERR, "Attempt to password-unlock the "
@@ -251,38 +243,39 @@ passwd_write_file(struct file * file, const char __user * buf,
251 return -EINVAL; 243 return -EINVAL;
252 } 244 }
253 245
254 if (count < 0 || count >= PAGE_SIZE) 246 if (count >= PAGE_SIZE)
255 return -EINVAL; 247 return -EINVAL;
256 if (*ppos != 0) 248 if (*ppos != 0)
257 return -EINVAL; 249 return -EINVAL;
258 page = (char *)get_zeroed_page(GFP_KERNEL); 250 p = kmalloc(count, GFP_KERNEL);
259 if (!page) 251 if (!p)
260 return -ENOMEM; 252 return -ENOMEM;
261 len = -EFAULT; 253 len = -EFAULT;
262 if (copy_from_user(page, buf, count)) 254 if (copy_from_user(p, buf, count))
263 goto out; 255 goto out;
264 256
265 len = strlen(page); 257 len = count;
266 /* ``echo "secret" > seclvl/passwd'' includes a newline */ 258 /* ``echo "secret" > seclvl/passwd'' includes a newline */
267 if (page[len - 1] == '\n') 259 if (p[len - 1] == '\n')
268 len--; 260 len--;
269 /* Hash the password, then compare the hashed values */ 261 /* Hash the password, then compare the hashed values */
270 if ((rc = plaintext_to_sha1(tmp, page, len))) { 262 if ((len = plaintext_to_sha1(tmp, p, len))) {
271 seclvl_printk(0, KERN_ERR, "Error hashing password: rc = " 263 seclvl_printk(0, KERN_ERR, "Error hashing password: rc = "
272 "[%d]\n", rc); 264 "[%d]\n", len);
273 return rc; 265 goto out;
274 }
275 for (i = 0; i < SHA1_DIGEST_SIZE; i++) {
276 if (hashedPassword[i] != tmp[i])
277 return -EPERM;
278 } 266 }
267
268 len = -EPERM;
269 if (memcmp(hashedPassword, tmp, SHA1_DIGEST_SIZE))
270 goto out;
271
279 seclvl_printk(0, KERN_INFO, 272 seclvl_printk(0, KERN_INFO,
280 "Password accepted; seclvl reduced to 0.\n"); 273 "Password accepted; seclvl reduced to 0.\n");
281 seclvl = 0; 274 seclvl = 0;
282 len = count; 275 len = count;
283 276
284out: 277out:
285 free_page((unsigned long)page); 278 kfree (p);
286 return len; 279 return len;
287} 280}
288 281
@@ -295,13 +288,11 @@ static struct file_operations passwd_file_ops = {
295 */ 288 */
296static int seclvl_ptrace(struct task_struct *parent, struct task_struct *child) 289static int seclvl_ptrace(struct task_struct *parent, struct task_struct *child)
297{ 290{
298 if (seclvl >= 0) { 291 if (seclvl >= 0 && child->pid == 1) {
299 if (child->pid == 1) { 292 seclvl_printk(1, KERN_WARNING, "Attempt to ptrace "
300 seclvl_printk(1, KERN_WARNING, "Attempt to ptrace " 293 "the init process dissallowed in "
301 "the init process dissallowed in " 294 "secure level %d\n", seclvl);
302 "secure level %d\n", seclvl); 295 return -EPERM;
303 return -EPERM;
304 }
305 } 296 }
306 return 0; 297 return 0;
307} 298}
@@ -312,55 +303,54 @@ static int seclvl_ptrace(struct task_struct *parent, struct task_struct *child)
312 */ 303 */
313static int seclvl_capable(struct task_struct *tsk, int cap) 304static int seclvl_capable(struct task_struct *tsk, int cap)
314{ 305{
306 int rc = 0;
307
315 /* init can do anything it wants */ 308 /* init can do anything it wants */
316 if (tsk->pid == 1) 309 if (tsk->pid == 1)
317 return 0; 310 return 0;
318 311
319 switch (seclvl) { 312 if (seclvl > 0) {
320 case 2: 313 rc = -EPERM;
321 /* fall through */ 314
322 case 1: 315 if (cap == CAP_LINUX_IMMUTABLE)
323 if (cap == CAP_LINUX_IMMUTABLE) {
324 seclvl_printk(1, KERN_WARNING, "Attempt to modify " 316 seclvl_printk(1, KERN_WARNING, "Attempt to modify "
325 "the IMMUTABLE and/or APPEND extended " 317 "the IMMUTABLE and/or APPEND extended "
326 "attribute on a file with the IMMUTABLE " 318 "attribute on a file with the IMMUTABLE "
327 "and/or APPEND extended attribute set " 319 "and/or APPEND extended attribute set "
328 "denied in seclvl [%d]\n", seclvl); 320 "denied in seclvl [%d]\n", seclvl);
329 return -EPERM; 321 else if (cap == CAP_SYS_RAWIO)
330 } else if (cap == CAP_SYS_RAWIO) { // Somewhat broad...
331 seclvl_printk(1, KERN_WARNING, "Attempt to perform " 322 seclvl_printk(1, KERN_WARNING, "Attempt to perform "
332 "raw I/O while in secure level [%d] " 323 "raw I/O while in secure level [%d] "
333 "denied\n", seclvl); 324 "denied\n", seclvl);
334 return -EPERM; 325 else if (cap == CAP_NET_ADMIN)
335 } else if (cap == CAP_NET_ADMIN) {
336 seclvl_printk(1, KERN_WARNING, "Attempt to perform " 326 seclvl_printk(1, KERN_WARNING, "Attempt to perform "
337 "network administrative task while " 327 "network administrative task while "
338 "in secure level [%d] denied\n", seclvl); 328 "in secure level [%d] denied\n", seclvl);
339 return -EPERM; 329 else if (cap == CAP_SETUID)
340 } else if (cap == CAP_SETUID) {
341 seclvl_printk(1, KERN_WARNING, "Attempt to setuid " 330 seclvl_printk(1, KERN_WARNING, "Attempt to setuid "
342 "while in secure level [%d] denied\n", 331 "while in secure level [%d] denied\n",
343 seclvl); 332 seclvl);
344 return -EPERM; 333 else if (cap == CAP_SETGID)
345 } else if (cap == CAP_SETGID) {
346 seclvl_printk(1, KERN_WARNING, "Attempt to setgid " 334 seclvl_printk(1, KERN_WARNING, "Attempt to setgid "
347 "while in secure level [%d] denied\n", 335 "while in secure level [%d] denied\n",
348 seclvl); 336 seclvl);
349 } else if (cap == CAP_SYS_MODULE) { 337 else if (cap == CAP_SYS_MODULE)
350 seclvl_printk(1, KERN_WARNING, "Attempt to perform " 338 seclvl_printk(1, KERN_WARNING, "Attempt to perform "
351 "a module operation while in secure " 339 "a module operation while in secure "
352 "level [%d] denied\n", seclvl); 340 "level [%d] denied\n", seclvl);
353 return -EPERM; 341 else
354 } 342 rc = 0;
355 break;
356 default:
357 break;
358 } 343 }
359 /* from dummy.c */ 344
360 if (cap_is_fs_cap(cap) ? tsk->fsuid == 0 : tsk->euid == 0) 345 if (!rc) {
361 return 0; /* capability granted */ 346 if (!(cap_is_fs_cap(cap) ? tsk->fsuid == 0 : tsk->euid == 0))
362 seclvl_printk(1, KERN_WARNING, "Capability denied\n"); 347 rc = -EPERM;
363 return -EPERM; /* capability denied */ 348 }
349
350 if (rc)
351 seclvl_printk(1, KERN_WARNING, "Capability denied\n");
352
353 return rc;
364} 354}
365 355
366/** 356/**
@@ -466,12 +456,9 @@ static int seclvl_inode_setattr(struct dentry *dentry, struct iattr *iattr)
466static void seclvl_file_free_security(struct file *filp) 456static void seclvl_file_free_security(struct file *filp)
467{ 457{
468 struct dentry *dentry = filp->f_dentry; 458 struct dentry *dentry = filp->f_dentry;
469 struct inode *inode = NULL;
470 459
471 if (dentry) { 460 if (dentry)
472 inode = dentry->d_inode; 461 seclvl_bd_release(dentry->d_inode);
473 seclvl_bd_release(inode);
474 }
475} 462}
476 463
477/** 464/**
@@ -479,9 +466,7 @@ static void seclvl_file_free_security(struct file *filp)
479 */ 466 */
480static int seclvl_umount(struct vfsmount *mnt, int flags) 467static int seclvl_umount(struct vfsmount *mnt, int flags)
481{ 468{
482 if (current->pid == 1) 469 if (current->pid != 1 && seclvl == 2) {
483 return 0;
484 if (seclvl == 2) {
485 seclvl_printk(1, KERN_WARNING, "Attempt to unmount in secure " 470 seclvl_printk(1, KERN_WARNING, "Attempt to unmount in secure "
486 "level %d\n", seclvl); 471 "level %d\n", seclvl);
487 return -EPERM; 472 return -EPERM;
@@ -505,8 +490,9 @@ static struct security_operations seclvl_ops = {
505static int processPassword(void) 490static int processPassword(void)
506{ 491{
507 int rc = 0; 492 int rc = 0;
508 hashedPassword[0] = '\0';
509 if (*passwd) { 493 if (*passwd) {
494 char *p;
495
510 if (*sha1_passwd) { 496 if (*sha1_passwd) {
511 seclvl_printk(0, KERN_ERR, "Error: Both " 497 seclvl_printk(0, KERN_ERR, "Error: Both "
512 "passwd and sha1_passwd " 498 "passwd and sha1_passwd "
@@ -514,12 +500,16 @@ static int processPassword(void)
514 "exclusive.\n"); 500 "exclusive.\n");
515 return -EINVAL; 501 return -EINVAL;
516 } 502 }
517 if ((rc = plaintext_to_sha1(hashedPassword, passwd, 503
518 strlen(passwd)))) { 504 p = kstrdup(passwd, GFP_KERNEL);
505 if (p == NULL)
506 return -ENOMEM;
507
508 if ((rc = plaintext_to_sha1(hashedPassword, p, strlen(p))))
519 seclvl_printk(0, KERN_ERR, "Error: SHA1 support not " 509 seclvl_printk(0, KERN_ERR, "Error: SHA1 support not "
520 "in kernel\n"); 510 "in kernel\n");
521 return rc; 511
522 } 512 kfree (p);
523 /* All static data goes to the BSS, which zero's the 513 /* All static data goes to the BSS, which zero's the
524 * plaintext password out for us. */ 514 * plaintext password out for us. */
525 } else if (*sha1_passwd) { // Base 16 515 } else if (*sha1_passwd) { // Base 16
@@ -542,7 +532,7 @@ static int processPassword(void)
542 sha1_passwd[i + 2] = tmp; 532 sha1_passwd[i + 2] = tmp;
543 } 533 }
544 } 534 }
545 return 0; 535 return rc;
546} 536}
547 537
548/** 538/**
@@ -552,28 +542,46 @@ struct dentry *dir_ino, *seclvl_ino, *passwd_ino;
552 542
553static int seclvlfs_register(void) 543static int seclvlfs_register(void)
554{ 544{
545 int rc = 0;
546
555 dir_ino = securityfs_create_dir("seclvl", NULL); 547 dir_ino = securityfs_create_dir("seclvl", NULL);
556 if (!dir_ino) 548
557 return -EFAULT; 549 if (IS_ERR(dir_ino))
550 return PTR_ERR(dir_ino);
558 551
559 seclvl_ino = securityfs_create_file("seclvl", S_IRUGO | S_IWUSR, 552 seclvl_ino = securityfs_create_file("seclvl", S_IRUGO | S_IWUSR,
560 dir_ino, &seclvl, &seclvl_file_ops); 553 dir_ino, &seclvl, &seclvl_file_ops);
561 if (!seclvl_ino) 554 if (IS_ERR(seclvl_ino)) {
555 rc = PTR_ERR(seclvl_ino);
562 goto out_deldir; 556 goto out_deldir;
557 }
563 if (*passwd || *sha1_passwd) { 558 if (*passwd || *sha1_passwd) {
564 passwd_ino = securityfs_create_file("passwd", S_IRUGO | S_IWUSR, 559 passwd_ino = securityfs_create_file("passwd", S_IRUGO | S_IWUSR,
565 dir_ino, NULL, &passwd_file_ops); 560 dir_ino, NULL, &passwd_file_ops);
566 if (!passwd_ino) 561 if (IS_ERR(passwd_ino)) {
562 rc = PTR_ERR(passwd_ino);
567 goto out_delf; 563 goto out_delf;
564 }
568 } 565 }
569 return 0; 566 return rc;
567
568out_delf:
569 securityfs_remove(seclvl_ino);
570 570
571out_deldir: 571out_deldir:
572 securityfs_remove(dir_ino); 572 securityfs_remove(dir_ino);
573out_delf: 573
574 return rc;
575}
576
577static void seclvlfs_unregister(void)
578{
574 securityfs_remove(seclvl_ino); 579 securityfs_remove(seclvl_ino);
575 580
576 return -EFAULT; 581 if (*passwd || *sha1_passwd)
582 securityfs_remove(passwd_ino);
583
584 securityfs_remove(dir_ino);
577} 585}
578 586
579/** 587/**
@@ -582,6 +590,8 @@ out_delf:
582static int __init seclvl_init(void) 590static int __init seclvl_init(void)
583{ 591{
584 int rc = 0; 592 int rc = 0;
593 static char once;
594
585 if (verbosity < 0 || verbosity > 1) { 595 if (verbosity < 0 || verbosity > 1) {
586 printk(KERN_ERR "Error: bad verbosity [%d]; only 0 or 1 " 596 printk(KERN_ERR "Error: bad verbosity [%d]; only 0 or 1 "
587 "are valid values\n", verbosity); 597 "are valid values\n", verbosity);
@@ -600,6 +610,11 @@ static int __init seclvl_init(void)
600 "module parameter(s): rc = [%d]\n", rc); 610 "module parameter(s): rc = [%d]\n", rc);
601 goto exit; 611 goto exit;
602 } 612 }
613
614 if ((rc = seclvlfs_register())) {
615 seclvl_printk(0, KERN_ERR, "Error registering with sysfs\n");
616 goto exit;
617 }
603 /* register ourselves with the security framework */ 618 /* register ourselves with the security framework */
604 if (register_security(&seclvl_ops)) { 619 if (register_security(&seclvl_ops)) {
605 seclvl_printk(0, KERN_ERR, 620 seclvl_printk(0, KERN_ERR,
@@ -611,20 +626,24 @@ static int __init seclvl_init(void)
611 seclvl_printk(0, KERN_ERR, "seclvl: Failure " 626 seclvl_printk(0, KERN_ERR, "seclvl: Failure "
612 "registering with primary security " 627 "registering with primary security "
613 "module.\n"); 628 "module.\n");
629 seclvlfs_unregister();
614 goto exit; 630 goto exit;
615 } /* if primary module registered */ 631 } /* if primary module registered */
616 secondary = 1; 632 secondary = 1;
617 } /* if we registered ourselves with the security framework */ 633 } /* if we registered ourselves with the security framework */
618 if ((rc = seclvlfs_register())) { 634
619 seclvl_printk(0, KERN_ERR, "Error registering with sysfs\n");
620 goto exit;
621 }
622 seclvl_printk(0, KERN_INFO, "seclvl: Successfully initialized.\n"); 635 seclvl_printk(0, KERN_INFO, "seclvl: Successfully initialized.\n");
636
637 if (once) {
638 once = 1;
639 seclvl_printk(0, KERN_INFO, "seclvl is going away. It has been "
640 "buggy for ages. Also, be warned that "
641 "Securelevels are useless.");
642 }
623 exit: 643 exit:
624 if (rc) { 644 if (rc)
625 printk(KERN_ERR "seclvl: Error during initialization: rc = " 645 printk(KERN_ERR "seclvl: Error during initialization: rc = "
626 "[%d]\n", rc); 646 "[%d]\n", rc);
627 }
628 return rc; 647 return rc;
629} 648}
630 649
@@ -633,17 +652,14 @@ static int __init seclvl_init(void)
633 */ 652 */
634static void __exit seclvl_exit(void) 653static void __exit seclvl_exit(void)
635{ 654{
636 securityfs_remove(seclvl_ino); 655 seclvlfs_unregister();
637 if (*passwd || *sha1_passwd) 656
638 securityfs_remove(passwd_ino); 657 if (secondary)
639 securityfs_remove(dir_ino);
640 if (secondary == 1) {
641 mod_unreg_security(MY_NAME, &seclvl_ops); 658 mod_unreg_security(MY_NAME, &seclvl_ops);
642 } else if (unregister_security(&seclvl_ops)) { 659 else if (unregister_security(&seclvl_ops))
643 seclvl_printk(0, KERN_INFO, 660 seclvl_printk(0, KERN_INFO,
644 "seclvl: Failure unregistering with the " 661 "seclvl: Failure unregistering with the "
645 "kernel\n"); 662 "kernel\n");
646 }
647} 663}
648 664
649module_init(seclvl_init); 665module_init(seclvl_init);
diff --git a/sound/oss/ac97_codec.c b/sound/oss/ac97_codec.c
index fd25aca25120..972327c97644 100644
--- a/sound/oss/ac97_codec.c
+++ b/sound/oss/ac97_codec.c
@@ -55,7 +55,7 @@
55#include <linux/pci.h> 55#include <linux/pci.h>
56#include <linux/ac97_codec.h> 56#include <linux/ac97_codec.h>
57#include <asm/uaccess.h> 57#include <asm/uaccess.h>
58#include <asm/semaphore.h> 58#include <linux/mutex.h>
59 59
60#define CODEC_ID_BUFSZ 14 60#define CODEC_ID_BUFSZ 14
61 61
@@ -304,7 +304,7 @@ static const unsigned int ac97_oss_rm[] = {
304 304
305static LIST_HEAD(codecs); 305static LIST_HEAD(codecs);
306static LIST_HEAD(codec_drivers); 306static LIST_HEAD(codec_drivers);
307static DECLARE_MUTEX(codec_sem); 307static DEFINE_MUTEX(codec_mutex);
308 308
309/* reads the given OSS mixer from the ac97 the caller must have insured that the ac97 knows 309/* reads the given OSS mixer from the ac97 the caller must have insured that the ac97 knows
310 about that given mixer, and should be holding a spinlock for the card */ 310 about that given mixer, and should be holding a spinlock for the card */
@@ -769,9 +769,9 @@ void ac97_release_codec(struct ac97_codec *codec)
769{ 769{
770 /* Remove from the list first, we don't want to be 770 /* Remove from the list first, we don't want to be
771 "rediscovered" */ 771 "rediscovered" */
772 down(&codec_sem); 772 mutex_lock(&codec_mutex);
773 list_del(&codec->list); 773 list_del(&codec->list);
774 up(&codec_sem); 774 mutex_unlock(&codec_mutex);
775 /* 775 /*
776 * The driver needs to deal with internal 776 * The driver needs to deal with internal
777 * locking to avoid accidents here. 777 * locking to avoid accidents here.
@@ -889,7 +889,7 @@ int ac97_probe_codec(struct ac97_codec *codec)
889 * callbacks. 889 * callbacks.
890 */ 890 */
891 891
892 down(&codec_sem); 892 mutex_lock(&codec_mutex);
893 list_add(&codec->list, &codecs); 893 list_add(&codec->list, &codecs);
894 894
895 list_for_each(l, &codec_drivers) { 895 list_for_each(l, &codec_drivers) {
@@ -903,7 +903,7 @@ int ac97_probe_codec(struct ac97_codec *codec)
903 } 903 }
904 } 904 }
905 905
906 up(&codec_sem); 906 mutex_unlock(&codec_mutex);
907 return 1; 907 return 1;
908} 908}
909 909
@@ -1439,7 +1439,7 @@ int ac97_register_driver(struct ac97_driver *driver)
1439 struct list_head *l; 1439 struct list_head *l;
1440 struct ac97_codec *c; 1440 struct ac97_codec *c;
1441 1441
1442 down(&codec_sem); 1442 mutex_lock(&codec_mutex);
1443 INIT_LIST_HEAD(&driver->list); 1443 INIT_LIST_HEAD(&driver->list);
1444 list_add(&driver->list, &codec_drivers); 1444 list_add(&driver->list, &codec_drivers);
1445 1445
@@ -1452,7 +1452,7 @@ int ac97_register_driver(struct ac97_driver *driver)
1452 continue; 1452 continue;
1453 c->driver = driver; 1453 c->driver = driver;
1454 } 1454 }
1455 up(&codec_sem); 1455 mutex_unlock(&codec_mutex);
1456 return 0; 1456 return 0;
1457} 1457}
1458 1458
@@ -1471,7 +1471,7 @@ void ac97_unregister_driver(struct ac97_driver *driver)
1471 struct list_head *l; 1471 struct list_head *l;
1472 struct ac97_codec *c; 1472 struct ac97_codec *c;
1473 1473
1474 down(&codec_sem); 1474 mutex_lock(&codec_mutex);
1475 list_del_init(&driver->list); 1475 list_del_init(&driver->list);
1476 1476
1477 list_for_each(l, &codecs) 1477 list_for_each(l, &codecs)
@@ -1483,7 +1483,7 @@ void ac97_unregister_driver(struct ac97_driver *driver)
1483 } 1483 }
1484 } 1484 }
1485 1485
1486 up(&codec_sem); 1486 mutex_unlock(&codec_mutex);
1487} 1487}
1488 1488
1489EXPORT_SYMBOL_GPL(ac97_unregister_driver); 1489EXPORT_SYMBOL_GPL(ac97_unregister_driver);
@@ -1494,14 +1494,14 @@ static int swap_headphone(int remove_master)
1494 struct ac97_codec *c; 1494 struct ac97_codec *c;
1495 1495
1496 if (remove_master) { 1496 if (remove_master) {
1497 down(&codec_sem); 1497 mutex_lock(&codec_mutex);
1498 list_for_each(l, &codecs) 1498 list_for_each(l, &codecs)
1499 { 1499 {
1500 c = list_entry(l, struct ac97_codec, list); 1500 c = list_entry(l, struct ac97_codec, list);
1501 if (supported_mixer(c, SOUND_MIXER_PHONEOUT)) 1501 if (supported_mixer(c, SOUND_MIXER_PHONEOUT))
1502 c->supported_mixers &= ~SOUND_MASK_PHONEOUT; 1502 c->supported_mixers &= ~SOUND_MASK_PHONEOUT;
1503 } 1503 }
1504 up(&codec_sem); 1504 mutex_unlock(&codec_mutex);
1505 } else 1505 } else
1506 ac97_hw[SOUND_MIXER_PHONEOUT].offset = AC97_MASTER_VOL_STEREO; 1506 ac97_hw[SOUND_MIXER_PHONEOUT].offset = AC97_MASTER_VOL_STEREO;
1507 1507
diff --git a/sound/oss/aci.c b/sound/oss/aci.c
index 3928c2802cc4..3bfac375dbdb 100644
--- a/sound/oss/aci.c
+++ b/sound/oss/aci.c
@@ -56,7 +56,8 @@
56#include <linux/module.h> 56#include <linux/module.h>
57#include <linux/proc_fs.h> 57#include <linux/proc_fs.h>
58#include <linux/slab.h> 58#include <linux/slab.h>
59#include <asm/semaphore.h> 59#include <linux/mutex.h>
60
60#include <asm/io.h> 61#include <asm/io.h>
61#include <asm/uaccess.h> 62#include <asm/uaccess.h>
62#include "sound_config.h" 63#include "sound_config.h"
@@ -79,7 +80,7 @@ static int aci_micpreamp=3; /* microphone preamp-level that can't be *
79 * checked with ACI versions prior to 0xb0 */ 80 * checked with ACI versions prior to 0xb0 */
80 81
81static int mixer_device; 82static int mixer_device;
82static struct semaphore aci_sem; 83static struct mutex aci_mutex;
83 84
84#ifdef MODULE 85#ifdef MODULE
85static int reset; 86static int reset;
@@ -212,7 +213,7 @@ int aci_rw_cmd(int write1, int write2, int write3)
212 int write[] = {write1, write2, write3}; 213 int write[] = {write1, write2, write3};
213 int read = -EINTR, i; 214 int read = -EINTR, i;
214 215
215 if (down_interruptible(&aci_sem)) 216 if (mutex_lock_interruptible(&aci_mutex))
216 goto out; 217 goto out;
217 218
218 for (i=0; i<3; i++) { 219 for (i=0; i<3; i++) {
@@ -227,7 +228,7 @@ int aci_rw_cmd(int write1, int write2, int write3)
227 } 228 }
228 229
229 read = aci_rawread(); 230 read = aci_rawread();
230out_up: up(&aci_sem); 231out_up: mutex_unlock(&aci_mutex);
231out: return read; 232out: return read;
232} 233}
233 234
@@ -603,7 +604,7 @@ static int __init attach_aci(void)
603 char *boardname; 604 char *boardname;
604 int i, rc = -EBUSY; 605 int i, rc = -EBUSY;
605 606
606 init_MUTEX(&aci_sem); 607 mutex_init(&aci_mutex);
607 608
608 outb(0xE3, 0xf8f); /* Write MAD16 password */ 609 outb(0xE3, 0xf8f); /* Write MAD16 password */
609 aci_port = (inb(0xf90) & 0x10) ? 610 aci_port = (inb(0xf90) & 0x10) ?
diff --git a/sound/oss/ad1889.c b/sound/oss/ad1889.c
index a0d73f343100..54dabf862802 100644
--- a/sound/oss/ad1889.c
+++ b/sound/oss/ad1889.c
@@ -38,6 +38,7 @@
38#include <linux/ac97_codec.h> 38#include <linux/ac97_codec.h>
39#include <linux/sound.h> 39#include <linux/sound.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/mutex.h>
41 42
42#include <asm/delay.h> 43#include <asm/delay.h>
43#include <asm/io.h> 44#include <asm/io.h>
@@ -238,7 +239,7 @@ static ad1889_dev_t *ad1889_alloc_dev(struct pci_dev *pci)
238 239
239 for (i = 0; i < AD_MAX_STATES; i++) { 240 for (i = 0; i < AD_MAX_STATES; i++) {
240 dev->state[i].card = dev; 241 dev->state[i].card = dev;
241 init_MUTEX(&dev->state[i].sem); 242 mutex_init(&dev->state[i].mutex);
242 init_waitqueue_head(&dev->state[i].dmabuf.wait); 243 init_waitqueue_head(&dev->state[i].dmabuf.wait);
243 } 244 }
244 245
@@ -461,7 +462,7 @@ static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t
461 ssize_t ret = 0; 462 ssize_t ret = 0;
462 DECLARE_WAITQUEUE(wait, current); 463 DECLARE_WAITQUEUE(wait, current);
463 464
464 down(&state->sem); 465 mutex_lock(&state->mutex);
465#if 0 466#if 0
466 if (dmabuf->mapped) { 467 if (dmabuf->mapped) {
467 ret = -ENXIO; 468 ret = -ENXIO;
@@ -546,7 +547,7 @@ static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t
546err2: 547err2:
547 remove_wait_queue(&state->dmabuf.wait, &wait); 548 remove_wait_queue(&state->dmabuf.wait, &wait);
548err1: 549err1:
549 up(&state->sem); 550 mutex_unlock(&state->mutex);
550 return ret; 551 return ret;
551} 552}
552 553
diff --git a/sound/oss/ad1889.h b/sound/oss/ad1889.h
index e04affce1dd1..861b3213f30b 100644
--- a/sound/oss/ad1889.h
+++ b/sound/oss/ad1889.h
@@ -100,7 +100,7 @@ typedef struct ad1889_state {
100 unsigned int subdivision; 100 unsigned int subdivision;
101 } dmabuf; 101 } dmabuf;
102 102
103 struct semaphore sem; 103 struct mutex mutex;
104} ad1889_state_t; 104} ad1889_state_t;
105 105
106typedef struct ad1889_dev { 106typedef struct ad1889_dev {
diff --git a/sound/oss/ali5455.c b/sound/oss/ali5455.c
index 9c9e6c0410f2..62bb936b1f3d 100644
--- a/sound/oss/ali5455.c
+++ b/sound/oss/ali5455.c
@@ -64,6 +64,8 @@
64#include <linux/smp_lock.h> 64#include <linux/smp_lock.h>
65#include <linux/ac97_codec.h> 65#include <linux/ac97_codec.h>
66#include <linux/interrupt.h> 66#include <linux/interrupt.h>
67#include <linux/mutex.h>
68
67#include <asm/uaccess.h> 69#include <asm/uaccess.h>
68 70
69#ifndef PCI_DEVICE_ID_ALI_5455 71#ifndef PCI_DEVICE_ID_ALI_5455
@@ -234,7 +236,7 @@ struct ali_state {
234 struct ali_card *card; /* Card info */ 236 struct ali_card *card; /* Card info */
235 237
236 /* single open lock mechanism, only used for recording */ 238 /* single open lock mechanism, only used for recording */
237 struct semaphore open_sem; 239 struct mutex open_mutex;
238 wait_queue_head_t open_wait; 240 wait_queue_head_t open_wait;
239 241
240 /* file mode */ 242 /* file mode */
@@ -2807,7 +2809,7 @@ found_virt:
2807 state->card = card; 2809 state->card = card;
2808 state->magic = ALI5455_STATE_MAGIC; 2810 state->magic = ALI5455_STATE_MAGIC;
2809 init_waitqueue_head(&dmabuf->wait); 2811 init_waitqueue_head(&dmabuf->wait);
2810 init_MUTEX(&state->open_sem); 2812 mutex_init(&state->open_mutex);
2811 file->private_data = state; 2813 file->private_data = state;
2812 dmabuf->trigger = 0; 2814 dmabuf->trigger = 0;
2813 /* allocate hardware channels */ 2815 /* allocate hardware channels */
@@ -3359,7 +3361,7 @@ static void __devinit ali_configure_clocking(void)
3359 state->card = card; 3361 state->card = card;
3360 state->magic = ALI5455_STATE_MAGIC; 3362 state->magic = ALI5455_STATE_MAGIC;
3361 init_waitqueue_head(&dmabuf->wait); 3363 init_waitqueue_head(&dmabuf->wait);
3362 init_MUTEX(&state->open_sem); 3364 mutex_init(&state->open_mutex);
3363 dmabuf->fmt = ALI5455_FMT_STEREO | ALI5455_FMT_16BIT; 3365 dmabuf->fmt = ALI5455_FMT_STEREO | ALI5455_FMT_16BIT;
3364 dmabuf->trigger = PCM_ENABLE_OUTPUT; 3366 dmabuf->trigger = PCM_ENABLE_OUTPUT;
3365 ali_set_dac_rate(state, 48000); 3367 ali_set_dac_rate(state, 48000);
diff --git a/sound/oss/au1000.c b/sound/oss/au1000.c
index c407de86cbb6..fe54de25aafc 100644
--- a/sound/oss/au1000.c
+++ b/sound/oss/au1000.c
@@ -68,6 +68,8 @@
68#include <linux/smp_lock.h> 68#include <linux/smp_lock.h>
69#include <linux/ac97_codec.h> 69#include <linux/ac97_codec.h>
70#include <linux/interrupt.h> 70#include <linux/interrupt.h>
71#include <linux/mutex.h>
72
71#include <asm/io.h> 73#include <asm/io.h>
72#include <asm/uaccess.h> 74#include <asm/uaccess.h>
73#include <asm/mach-au1x00/au1000.h> 75#include <asm/mach-au1x00/au1000.h>
@@ -120,8 +122,8 @@ struct au1000_state {
120 int no_vra; // do not use VRA 122 int no_vra; // do not use VRA
121 123
122 spinlock_t lock; 124 spinlock_t lock;
123 struct semaphore open_sem; 125 struct mutex open_mutex;
124 struct semaphore sem; 126 struct mutex sem;
125 mode_t open_mode; 127 mode_t open_mode;
126 wait_queue_head_t open_wait; 128 wait_queue_head_t open_wait;
127 129
@@ -1106,7 +1108,7 @@ static ssize_t au1000_read(struct file *file, char *buffer,
1106 1108
1107 count *= db->cnt_factor; 1109 count *= db->cnt_factor;
1108 1110
1109 down(&s->sem); 1111 mutex_lock(&s->sem);
1110 add_wait_queue(&db->wait, &wait); 1112 add_wait_queue(&db->wait, &wait);
1111 1113
1112 while (count > 0) { 1114 while (count > 0) {
@@ -1125,14 +1127,14 @@ static ssize_t au1000_read(struct file *file, char *buffer,
1125 ret = -EAGAIN; 1127 ret = -EAGAIN;
1126 goto out; 1128 goto out;
1127 } 1129 }
1128 up(&s->sem); 1130 mutex_unlock(&s->sem);
1129 schedule(); 1131 schedule();
1130 if (signal_pending(current)) { 1132 if (signal_pending(current)) {
1131 if (!ret) 1133 if (!ret)
1132 ret = -ERESTARTSYS; 1134 ret = -ERESTARTSYS;
1133 goto out2; 1135 goto out2;
1134 } 1136 }
1135 down(&s->sem); 1137 mutex_lock(&s->sem);
1136 } 1138 }
1137 } while (avail <= 0); 1139 } while (avail <= 0);
1138 1140
@@ -1159,7 +1161,7 @@ static ssize_t au1000_read(struct file *file, char *buffer,
1159 } // while (count > 0) 1161 } // while (count > 0)
1160 1162
1161out: 1163out:
1162 up(&s->sem); 1164 mutex_unlock(&s->sem);
1163out2: 1165out2:
1164 remove_wait_queue(&db->wait, &wait); 1166 remove_wait_queue(&db->wait, &wait);
1165 set_current_state(TASK_RUNNING); 1167 set_current_state(TASK_RUNNING);
@@ -1187,7 +1189,7 @@ static ssize_t au1000_write(struct file *file, const char *buffer,
1187 1189
1188 count *= db->cnt_factor; 1190 count *= db->cnt_factor;
1189 1191
1190 down(&s->sem); 1192 mutex_lock(&s->sem);
1191 add_wait_queue(&db->wait, &wait); 1193 add_wait_queue(&db->wait, &wait);
1192 1194
1193 while (count > 0) { 1195 while (count > 0) {
@@ -1204,14 +1206,14 @@ static ssize_t au1000_write(struct file *file, const char *buffer,
1204 ret = -EAGAIN; 1206 ret = -EAGAIN;
1205 goto out; 1207 goto out;
1206 } 1208 }
1207 up(&s->sem); 1209 mutex_unlock(&s->sem);
1208 schedule(); 1210 schedule();
1209 if (signal_pending(current)) { 1211 if (signal_pending(current)) {
1210 if (!ret) 1212 if (!ret)
1211 ret = -ERESTARTSYS; 1213 ret = -ERESTARTSYS;
1212 goto out2; 1214 goto out2;
1213 } 1215 }
1214 down(&s->sem); 1216 mutex_lock(&s->sem);
1215 } 1217 }
1216 } while (avail <= 0); 1218 } while (avail <= 0);
1217 1219
@@ -1240,7 +1242,7 @@ static ssize_t au1000_write(struct file *file, const char *buffer,
1240 } // while (count > 0) 1242 } // while (count > 0)
1241 1243
1242out: 1244out:
1243 up(&s->sem); 1245 mutex_unlock(&s->sem);
1244out2: 1246out2:
1245 remove_wait_queue(&db->wait, &wait); 1247 remove_wait_queue(&db->wait, &wait);
1246 set_current_state(TASK_RUNNING); 1248 set_current_state(TASK_RUNNING);
@@ -1298,7 +1300,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma)
1298 dbg("%s", __FUNCTION__); 1300 dbg("%s", __FUNCTION__);
1299 1301
1300 lock_kernel(); 1302 lock_kernel();
1301 down(&s->sem); 1303 mutex_lock(&s->sem);
1302 if (vma->vm_flags & VM_WRITE) 1304 if (vma->vm_flags & VM_WRITE)
1303 db = &s->dma_dac; 1305 db = &s->dma_dac;
1304 else if (vma->vm_flags & VM_READ) 1306 else if (vma->vm_flags & VM_READ)
@@ -1324,7 +1326,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma)
1324 vma->vm_flags &= ~VM_IO; 1326 vma->vm_flags &= ~VM_IO;
1325 db->mapped = 1; 1327 db->mapped = 1;
1326out: 1328out:
1327 up(&s->sem); 1329 mutex_unlock(&s->sem);
1328 unlock_kernel(); 1330 unlock_kernel();
1329 return ret; 1331 return ret;
1330} 1332}
@@ -1829,21 +1831,21 @@ static int au1000_open(struct inode *inode, struct file *file)
1829 1831
1830 file->private_data = s; 1832 file->private_data = s;
1831 /* wait for device to become free */ 1833 /* wait for device to become free */
1832 down(&s->open_sem); 1834 mutex_lock(&s->open_mutex);
1833 while (s->open_mode & file->f_mode) { 1835 while (s->open_mode & file->f_mode) {
1834 if (file->f_flags & O_NONBLOCK) { 1836 if (file->f_flags & O_NONBLOCK) {
1835 up(&s->open_sem); 1837 mutex_unlock(&s->open_mutex);
1836 return -EBUSY; 1838 return -EBUSY;
1837 } 1839 }
1838 add_wait_queue(&s->open_wait, &wait); 1840 add_wait_queue(&s->open_wait, &wait);
1839 __set_current_state(TASK_INTERRUPTIBLE); 1841 __set_current_state(TASK_INTERRUPTIBLE);
1840 up(&s->open_sem); 1842 mutex_unlock(&s->open_mutex);
1841 schedule(); 1843 schedule();
1842 remove_wait_queue(&s->open_wait, &wait); 1844 remove_wait_queue(&s->open_wait, &wait);
1843 set_current_state(TASK_RUNNING); 1845 set_current_state(TASK_RUNNING);
1844 if (signal_pending(current)) 1846 if (signal_pending(current))
1845 return -ERESTARTSYS; 1847 return -ERESTARTSYS;
1846 down(&s->open_sem); 1848 mutex_lock(&s->open_mutex);
1847 } 1849 }
1848 1850
1849 stop_dac(s); 1851 stop_dac(s);
@@ -1879,8 +1881,8 @@ static int au1000_open(struct inode *inode, struct file *file)
1879 } 1881 }
1880 1882
1881 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 1883 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1882 up(&s->open_sem); 1884 mutex_unlock(&s->open_mutex);
1883 init_MUTEX(&s->sem); 1885 mutex_init(&s->sem);
1884 return nonseekable_open(inode, file); 1886 return nonseekable_open(inode, file);
1885} 1887}
1886 1888
@@ -1896,7 +1898,7 @@ static int au1000_release(struct inode *inode, struct file *file)
1896 lock_kernel(); 1898 lock_kernel();
1897 } 1899 }
1898 1900
1899 down(&s->open_sem); 1901 mutex_lock(&s->open_mutex);
1900 if (file->f_mode & FMODE_WRITE) { 1902 if (file->f_mode & FMODE_WRITE) {
1901 stop_dac(s); 1903 stop_dac(s);
1902 dealloc_dmabuf(s, &s->dma_dac); 1904 dealloc_dmabuf(s, &s->dma_dac);
@@ -1906,7 +1908,7 @@ static int au1000_release(struct inode *inode, struct file *file)
1906 dealloc_dmabuf(s, &s->dma_adc); 1908 dealloc_dmabuf(s, &s->dma_adc);
1907 } 1909 }
1908 s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); 1910 s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE));
1909 up(&s->open_sem); 1911 mutex_unlock(&s->open_mutex);
1910 wake_up(&s->open_wait); 1912 wake_up(&s->open_wait);
1911 unlock_kernel(); 1913 unlock_kernel();
1912 return 0; 1914 return 0;
@@ -1996,7 +1998,7 @@ static int __devinit au1000_probe(void)
1996 init_waitqueue_head(&s->dma_adc.wait); 1998 init_waitqueue_head(&s->dma_adc.wait);
1997 init_waitqueue_head(&s->dma_dac.wait); 1999 init_waitqueue_head(&s->dma_dac.wait);
1998 init_waitqueue_head(&s->open_wait); 2000 init_waitqueue_head(&s->open_wait);
1999 init_MUTEX(&s->open_sem); 2001 mutex_init(&s->open_mutex);
2000 spin_lock_init(&s->lock); 2002 spin_lock_init(&s->lock);
2001 s->codec.private_data = s; 2003 s->codec.private_data = s;
2002 s->codec.id = 0; 2004 s->codec.id = 0;
diff --git a/sound/oss/au1550_ac97.c b/sound/oss/au1550_ac97.c
index bdee0502f3e2..6a4956b8025d 100644
--- a/sound/oss/au1550_ac97.c
+++ b/sound/oss/au1550_ac97.c
@@ -52,6 +52,8 @@
52#include <linux/spinlock.h> 52#include <linux/spinlock.h>
53#include <linux/smp_lock.h> 53#include <linux/smp_lock.h>
54#include <linux/ac97_codec.h> 54#include <linux/ac97_codec.h>
55#include <linux/mutex.h>
56
55#include <asm/io.h> 57#include <asm/io.h>
56#include <asm/uaccess.h> 58#include <asm/uaccess.h>
57#include <asm/hardirq.h> 59#include <asm/hardirq.h>
@@ -90,8 +92,8 @@ static struct au1550_state {
90 int no_vra; /* do not use VRA */ 92 int no_vra; /* do not use VRA */
91 93
92 spinlock_t lock; 94 spinlock_t lock;
93 struct semaphore open_sem; 95 struct mutex open_mutex;
94 struct semaphore sem; 96 struct mutex sem;
95 mode_t open_mode; 97 mode_t open_mode;
96 wait_queue_head_t open_wait; 98 wait_queue_head_t open_wait;
97 99
@@ -1044,7 +1046,7 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
1044 1046
1045 count *= db->cnt_factor; 1047 count *= db->cnt_factor;
1046 1048
1047 down(&s->sem); 1049 mutex_lock(&s->sem);
1048 add_wait_queue(&db->wait, &wait); 1050 add_wait_queue(&db->wait, &wait);
1049 1051
1050 while (count > 0) { 1052 while (count > 0) {
@@ -1064,14 +1066,14 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
1064 ret = -EAGAIN; 1066 ret = -EAGAIN;
1065 goto out; 1067 goto out;
1066 } 1068 }
1067 up(&s->sem); 1069 mutex_unlock(&s->sem);
1068 schedule(); 1070 schedule();
1069 if (signal_pending(current)) { 1071 if (signal_pending(current)) {
1070 if (!ret) 1072 if (!ret)
1071 ret = -ERESTARTSYS; 1073 ret = -ERESTARTSYS;
1072 goto out2; 1074 goto out2;
1073 } 1075 }
1074 down(&s->sem); 1076 mutex_lock(&s->sem);
1075 } 1077 }
1076 } while (avail <= 0); 1078 } while (avail <= 0);
1077 1079
@@ -1099,7 +1101,7 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
1099 } /* while (count > 0) */ 1101 } /* while (count > 0) */
1100 1102
1101out: 1103out:
1102 up(&s->sem); 1104 mutex_unlock(&s->sem);
1103out2: 1105out2:
1104 remove_wait_queue(&db->wait, &wait); 1106 remove_wait_queue(&db->wait, &wait);
1105 set_current_state(TASK_RUNNING); 1107 set_current_state(TASK_RUNNING);
@@ -1125,7 +1127,7 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos)
1125 1127
1126 count *= db->cnt_factor; 1128 count *= db->cnt_factor;
1127 1129
1128 down(&s->sem); 1130 mutex_lock(&s->sem);
1129 add_wait_queue(&db->wait, &wait); 1131 add_wait_queue(&db->wait, &wait);
1130 1132
1131 while (count > 0) { 1133 while (count > 0) {
@@ -1143,14 +1145,14 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos)
1143 ret = -EAGAIN; 1145 ret = -EAGAIN;
1144 goto out; 1146 goto out;
1145 } 1147 }
1146 up(&s->sem); 1148 mutex_unlock(&s->sem);
1147 schedule(); 1149 schedule();
1148 if (signal_pending(current)) { 1150 if (signal_pending(current)) {
1149 if (!ret) 1151 if (!ret)
1150 ret = -ERESTARTSYS; 1152 ret = -ERESTARTSYS;
1151 goto out2; 1153 goto out2;
1152 } 1154 }
1153 down(&s->sem); 1155 mutex_lock(&s->sem);
1154 } 1156 }
1155 } while (avail <= 0); 1157 } while (avail <= 0);
1156 1158
@@ -1196,7 +1198,7 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos)
1196 } /* while (count > 0) */ 1198 } /* while (count > 0) */
1197 1199
1198out: 1200out:
1199 up(&s->sem); 1201 mutex_unlock(&s->sem);
1200out2: 1202out2:
1201 remove_wait_queue(&db->wait, &wait); 1203 remove_wait_queue(&db->wait, &wait);
1202 set_current_state(TASK_RUNNING); 1204 set_current_state(TASK_RUNNING);
@@ -1253,7 +1255,7 @@ au1550_mmap(struct file *file, struct vm_area_struct *vma)
1253 int ret = 0; 1255 int ret = 0;
1254 1256
1255 lock_kernel(); 1257 lock_kernel();
1256 down(&s->sem); 1258 mutex_lock(&s->sem);
1257 if (vma->vm_flags & VM_WRITE) 1259 if (vma->vm_flags & VM_WRITE)
1258 db = &s->dma_dac; 1260 db = &s->dma_dac;
1259 else if (vma->vm_flags & VM_READ) 1261 else if (vma->vm_flags & VM_READ)
@@ -1279,7 +1281,7 @@ au1550_mmap(struct file *file, struct vm_area_struct *vma)
1279 vma->vm_flags &= ~VM_IO; 1281 vma->vm_flags &= ~VM_IO;
1280 db->mapped = 1; 1282 db->mapped = 1;
1281out: 1283out:
1282 up(&s->sem); 1284 mutex_unlock(&s->sem);
1283 unlock_kernel(); 1285 unlock_kernel();
1284 return ret; 1286 return ret;
1285} 1287}
@@ -1790,21 +1792,21 @@ au1550_open(struct inode *inode, struct file *file)
1790 1792
1791 file->private_data = s; 1793 file->private_data = s;
1792 /* wait for device to become free */ 1794 /* wait for device to become free */
1793 down(&s->open_sem); 1795 mutex_lock(&s->open_mutex);
1794 while (s->open_mode & file->f_mode) { 1796 while (s->open_mode & file->f_mode) {
1795 if (file->f_flags & O_NONBLOCK) { 1797 if (file->f_flags & O_NONBLOCK) {
1796 up(&s->open_sem); 1798 mutex_unlock(&s->open_mutex);
1797 return -EBUSY; 1799 return -EBUSY;
1798 } 1800 }
1799 add_wait_queue(&s->open_wait, &wait); 1801 add_wait_queue(&s->open_wait, &wait);
1800 __set_current_state(TASK_INTERRUPTIBLE); 1802 __set_current_state(TASK_INTERRUPTIBLE);
1801 up(&s->open_sem); 1803 mutex_unlock(&s->open_mutex);
1802 schedule(); 1804 schedule();
1803 remove_wait_queue(&s->open_wait, &wait); 1805 remove_wait_queue(&s->open_wait, &wait);
1804 set_current_state(TASK_RUNNING); 1806 set_current_state(TASK_RUNNING);
1805 if (signal_pending(current)) 1807 if (signal_pending(current))
1806 return -ERESTARTSYS; 1808 return -ERESTARTSYS;
1807 down(&s->open_sem); 1809 mutex_lock(&s->open_mutex);
1808 } 1810 }
1809 1811
1810 stop_dac(s); 1812 stop_dac(s);
@@ -1840,8 +1842,8 @@ au1550_open(struct inode *inode, struct file *file)
1840 } 1842 }
1841 1843
1842 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 1844 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1843 up(&s->open_sem); 1845 mutex_unlock(&s->open_mutex);
1844 init_MUTEX(&s->sem); 1846 mutex_init(&s->sem);
1845 return 0; 1847 return 0;
1846} 1848}
1847 1849
@@ -1858,7 +1860,7 @@ au1550_release(struct inode *inode, struct file *file)
1858 lock_kernel(); 1860 lock_kernel();
1859 } 1861 }
1860 1862
1861 down(&s->open_sem); 1863 mutex_lock(&s->open_mutex);
1862 if (file->f_mode & FMODE_WRITE) { 1864 if (file->f_mode & FMODE_WRITE) {
1863 stop_dac(s); 1865 stop_dac(s);
1864 kfree(s->dma_dac.rawbuf); 1866 kfree(s->dma_dac.rawbuf);
@@ -1870,7 +1872,7 @@ au1550_release(struct inode *inode, struct file *file)
1870 s->dma_adc.rawbuf = NULL; 1872 s->dma_adc.rawbuf = NULL;
1871 } 1873 }
1872 s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); 1874 s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE));
1873 up(&s->open_sem); 1875 mutex_unlock(&s->open_mutex);
1874 wake_up(&s->open_wait); 1876 wake_up(&s->open_wait);
1875 unlock_kernel(); 1877 unlock_kernel();
1876 return 0; 1878 return 0;
@@ -1902,7 +1904,7 @@ au1550_probe(void)
1902 init_waitqueue_head(&s->dma_adc.wait); 1904 init_waitqueue_head(&s->dma_adc.wait);
1903 init_waitqueue_head(&s->dma_dac.wait); 1905 init_waitqueue_head(&s->dma_dac.wait);
1904 init_waitqueue_head(&s->open_wait); 1906 init_waitqueue_head(&s->open_wait);
1905 init_MUTEX(&s->open_sem); 1907 mutex_init(&s->open_mutex);
1906 spin_lock_init(&s->lock); 1908 spin_lock_init(&s->lock);
1907 1909
1908 s->codec = ac97_alloc_codec(); 1910 s->codec = ac97_alloc_codec();
diff --git a/sound/oss/btaudio.c b/sound/oss/btaudio.c
index 4007a5680acb..bfe3b534ef30 100644
--- a/sound/oss/btaudio.c
+++ b/sound/oss/btaudio.c
@@ -32,6 +32,8 @@
32#include <linux/soundcard.h> 32#include <linux/soundcard.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/kdev_t.h> 34#include <linux/kdev_t.h>
35#include <linux/mutex.h>
36
35#include <asm/uaccess.h> 37#include <asm/uaccess.h>
36#include <asm/io.h> 38#include <asm/io.h>
37 39
@@ -108,7 +110,7 @@ struct btaudio {
108 110
109 /* locking */ 111 /* locking */
110 int users; 112 int users;
111 struct semaphore lock; 113 struct mutex lock;
112 114
113 /* risc instructions */ 115 /* risc instructions */
114 unsigned int risc_size; 116 unsigned int risc_size;
@@ -440,7 +442,7 @@ static struct file_operations btaudio_mixer_fops = {
440static int btaudio_dsp_open(struct inode *inode, struct file *file, 442static int btaudio_dsp_open(struct inode *inode, struct file *file,
441 struct btaudio *bta, int analog) 443 struct btaudio *bta, int analog)
442{ 444{
443 down(&bta->lock); 445 mutex_lock(&bta->lock);
444 if (bta->users) 446 if (bta->users)
445 goto busy; 447 goto busy;
446 bta->users++; 448 bta->users++;
@@ -452,11 +454,11 @@ static int btaudio_dsp_open(struct inode *inode, struct file *file,
452 bta->read_count = 0; 454 bta->read_count = 0;
453 bta->sampleshift = 0; 455 bta->sampleshift = 0;
454 456
455 up(&bta->lock); 457 mutex_unlock(&bta->lock);
456 return 0; 458 return 0;
457 459
458 busy: 460 busy:
459 up(&bta->lock); 461 mutex_unlock(&bta->lock);
460 return -EBUSY; 462 return -EBUSY;
461} 463}
462 464
@@ -496,11 +498,11 @@ static int btaudio_dsp_release(struct inode *inode, struct file *file)
496{ 498{
497 struct btaudio *bta = file->private_data; 499 struct btaudio *bta = file->private_data;
498 500
499 down(&bta->lock); 501 mutex_lock(&bta->lock);
500 if (bta->recording) 502 if (bta->recording)
501 stop_recording(bta); 503 stop_recording(bta);
502 bta->users--; 504 bta->users--;
503 up(&bta->lock); 505 mutex_unlock(&bta->lock);
504 return 0; 506 return 0;
505} 507}
506 508
@@ -513,7 +515,7 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer,
513 DECLARE_WAITQUEUE(wait, current); 515 DECLARE_WAITQUEUE(wait, current);
514 516
515 add_wait_queue(&bta->readq, &wait); 517 add_wait_queue(&bta->readq, &wait);
516 down(&bta->lock); 518 mutex_lock(&bta->lock);
517 while (swcount > 0) { 519 while (swcount > 0) {
518 if (0 == bta->read_count) { 520 if (0 == bta->read_count) {
519 if (!bta->recording) { 521 if (!bta->recording) {
@@ -528,10 +530,10 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer,
528 ret = -EAGAIN; 530 ret = -EAGAIN;
529 break; 531 break;
530 } 532 }
531 up(&bta->lock); 533 mutex_unlock(&bta->lock);
532 current->state = TASK_INTERRUPTIBLE; 534 current->state = TASK_INTERRUPTIBLE;
533 schedule(); 535 schedule();
534 down(&bta->lock); 536 mutex_lock(&bta->lock);
535 if(signal_pending(current)) { 537 if(signal_pending(current)) {
536 if (0 == ret) 538 if (0 == ret)
537 ret = -EINTR; 539 ret = -EINTR;
@@ -604,7 +606,7 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer,
604 if (bta->read_offset == bta->buf_size) 606 if (bta->read_offset == bta->buf_size)
605 bta->read_offset = 0; 607 bta->read_offset = 0;
606 } 608 }
607 up(&bta->lock); 609 mutex_unlock(&bta->lock);
608 remove_wait_queue(&bta->readq, &wait); 610 remove_wait_queue(&bta->readq, &wait);
609 current->state = TASK_RUNNING; 611 current->state = TASK_RUNNING;
610 return ret; 612 return ret;
@@ -651,10 +653,10 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file,
651 bta->decimation = 0; 653 bta->decimation = 0;
652 } 654 }
653 if (bta->recording) { 655 if (bta->recording) {
654 down(&bta->lock); 656 mutex_lock(&bta->lock);
655 stop_recording(bta); 657 stop_recording(bta);
656 start_recording(bta); 658 start_recording(bta);
657 up(&bta->lock); 659 mutex_unlock(&bta->lock);
658 } 660 }
659 /* fall through */ 661 /* fall through */
660 case SOUND_PCM_READ_RATE: 662 case SOUND_PCM_READ_RATE:
@@ -716,10 +718,10 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file,
716 else 718 else
717 bta->bits = 16; 719 bta->bits = 16;
718 if (bta->recording) { 720 if (bta->recording) {
719 down(&bta->lock); 721 mutex_lock(&bta->lock);
720 stop_recording(bta); 722 stop_recording(bta);
721 start_recording(bta); 723 start_recording(bta);
722 up(&bta->lock); 724 mutex_unlock(&bta->lock);
723 } 725 }
724 } 726 }
725 if (debug) 727 if (debug)
@@ -736,9 +738,9 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file,
736 738
737 case SNDCTL_DSP_RESET: 739 case SNDCTL_DSP_RESET:
738 if (bta->recording) { 740 if (bta->recording) {
739 down(&bta->lock); 741 mutex_lock(&bta->lock);
740 stop_recording(bta); 742 stop_recording(bta);
741 up(&bta->lock); 743 mutex_unlock(&bta->lock);
742 } 744 }
743 return 0; 745 return 0;
744 case SNDCTL_DSP_GETBLKSIZE: 746 case SNDCTL_DSP_GETBLKSIZE:
@@ -941,7 +943,7 @@ static int __devinit btaudio_probe(struct pci_dev *pci_dev,
941 if (rate) 943 if (rate)
942 bta->rate = rate; 944 bta->rate = rate;
943 945
944 init_MUTEX(&bta->lock); 946 mutex_init(&bta->lock);
945 init_waitqueue_head(&bta->readq); 947 init_waitqueue_head(&bta->readq);
946 948
947 if (-1 != latency) { 949 if (-1 != latency) {
diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c
index 7cfbb08db537..1fbd5137f6d7 100644
--- a/sound/oss/cmpci.c
+++ b/sound/oss/cmpci.c
@@ -138,6 +138,8 @@
138#endif 138#endif
139#ifdef CONFIG_SOUND_CMPCI_JOYSTICK 139#ifdef CONFIG_SOUND_CMPCI_JOYSTICK
140#include <linux/gameport.h> 140#include <linux/gameport.h>
141#include <linux/mutex.h>
142
141#endif 143#endif
142 144
143/* --------------------------------------------------------------------- */ 145/* --------------------------------------------------------------------- */
@@ -392,7 +394,7 @@ struct cm_state {
392 unsigned char fmt, enable; 394 unsigned char fmt, enable;
393 395
394 spinlock_t lock; 396 spinlock_t lock;
395 struct semaphore open_sem; 397 struct mutex open_mutex;
396 mode_t open_mode; 398 mode_t open_mode;
397 wait_queue_head_t open_wait; 399 wait_queue_head_t open_wait;
398 400
@@ -2825,21 +2827,21 @@ static int cm_open(struct inode *inode, struct file *file)
2825 VALIDATE_STATE(s); 2827 VALIDATE_STATE(s);
2826 file->private_data = s; 2828 file->private_data = s;
2827 /* wait for device to become free */ 2829 /* wait for device to become free */
2828 down(&s->open_sem); 2830 mutex_lock(&s->open_mutex);
2829 while (s->open_mode & file->f_mode) { 2831 while (s->open_mode & file->f_mode) {
2830 if (file->f_flags & O_NONBLOCK) { 2832 if (file->f_flags & O_NONBLOCK) {
2831 up(&s->open_sem); 2833 mutex_unlock(&s->open_mutex);
2832 return -EBUSY; 2834 return -EBUSY;
2833 } 2835 }
2834 add_wait_queue(&s->open_wait, &wait); 2836 add_wait_queue(&s->open_wait, &wait);
2835 __set_current_state(TASK_INTERRUPTIBLE); 2837 __set_current_state(TASK_INTERRUPTIBLE);
2836 up(&s->open_sem); 2838 mutex_unlock(&s->open_mutex);
2837 schedule(); 2839 schedule();
2838 remove_wait_queue(&s->open_wait, &wait); 2840 remove_wait_queue(&s->open_wait, &wait);
2839 set_current_state(TASK_RUNNING); 2841 set_current_state(TASK_RUNNING);
2840 if (signal_pending(current)) 2842 if (signal_pending(current))
2841 return -ERESTARTSYS; 2843 return -ERESTARTSYS;
2842 down(&s->open_sem); 2844 mutex_lock(&s->open_mutex);
2843 } 2845 }
2844 if (file->f_mode & FMODE_READ) { 2846 if (file->f_mode & FMODE_READ) {
2845 s->status &= ~DO_BIGENDIAN_R; 2847 s->status &= ~DO_BIGENDIAN_R;
@@ -2867,7 +2869,7 @@ static int cm_open(struct inode *inode, struct file *file)
2867 } 2869 }
2868 set_fmt(s, fmtm, fmts); 2870 set_fmt(s, fmtm, fmts);
2869 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 2871 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2870 up(&s->open_sem); 2872 mutex_unlock(&s->open_mutex);
2871 return nonseekable_open(inode, file); 2873 return nonseekable_open(inode, file);
2872} 2874}
2873 2875
@@ -2879,7 +2881,7 @@ static int cm_release(struct inode *inode, struct file *file)
2879 lock_kernel(); 2881 lock_kernel();
2880 if (file->f_mode & FMODE_WRITE) 2882 if (file->f_mode & FMODE_WRITE)
2881 drain_dac(s, file->f_flags & O_NONBLOCK); 2883 drain_dac(s, file->f_flags & O_NONBLOCK);
2882 down(&s->open_sem); 2884 mutex_lock(&s->open_mutex);
2883 if (file->f_mode & FMODE_WRITE) { 2885 if (file->f_mode & FMODE_WRITE) {
2884 stop_dac(s); 2886 stop_dac(s);
2885 2887
@@ -2903,7 +2905,7 @@ static int cm_release(struct inode *inode, struct file *file)
2903 s->status &= ~DO_BIGENDIAN_R; 2905 s->status &= ~DO_BIGENDIAN_R;
2904 } 2906 }
2905 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); 2907 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE));
2906 up(&s->open_sem); 2908 mutex_unlock(&s->open_mutex);
2907 wake_up(&s->open_wait); 2909 wake_up(&s->open_wait);
2908 unlock_kernel(); 2910 unlock_kernel();
2909 return 0; 2911 return 0;
@@ -3080,7 +3082,7 @@ static int __devinit cm_probe(struct pci_dev *pcidev, const struct pci_device_id
3080 init_waitqueue_head(&s->dma_adc.wait); 3082 init_waitqueue_head(&s->dma_adc.wait);
3081 init_waitqueue_head(&s->dma_dac.wait); 3083 init_waitqueue_head(&s->dma_dac.wait);
3082 init_waitqueue_head(&s->open_wait); 3084 init_waitqueue_head(&s->open_wait);
3083 init_MUTEX(&s->open_sem); 3085 mutex_init(&s->open_mutex);
3084 spin_lock_init(&s->lock); 3086 spin_lock_init(&s->lock);
3085 s->magic = CM_MAGIC; 3087 s->magic = CM_MAGIC;
3086 s->dev = pcidev; 3088 s->dev = pcidev;
diff --git a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c
index 0720365f6438..0004442f9b7e 100644
--- a/sound/oss/cs4281/cs4281m.c
+++ b/sound/oss/cs4281/cs4281m.c
@@ -245,9 +245,9 @@ struct cs4281_state {
245 void *tmpbuff; // tmp buffer for sample conversions 245 void *tmpbuff; // tmp buffer for sample conversions
246 unsigned ena; 246 unsigned ena;
247 spinlock_t lock; 247 spinlock_t lock;
248 struct semaphore open_sem; 248 struct mutex open_sem;
249 struct semaphore open_sem_adc; 249 struct mutex open_sem_adc;
250 struct semaphore open_sem_dac; 250 struct mutex open_sem_dac;
251 mode_t open_mode; 251 mode_t open_mode;
252 wait_queue_head_t open_wait; 252 wait_queue_head_t open_wait;
253 wait_queue_head_t open_wait_adc; 253 wait_queue_head_t open_wait_adc;
@@ -3598,20 +3598,20 @@ static int cs4281_release(struct inode *inode, struct file *file)
3598 3598
3599 if (file->f_mode & FMODE_WRITE) { 3599 if (file->f_mode & FMODE_WRITE) {
3600 drain_dac(s, file->f_flags & O_NONBLOCK); 3600 drain_dac(s, file->f_flags & O_NONBLOCK);
3601 down(&s->open_sem_dac); 3601 mutex_lock(&s->open_sem_dac);
3602 stop_dac(s); 3602 stop_dac(s);
3603 dealloc_dmabuf(s, &s->dma_dac); 3603 dealloc_dmabuf(s, &s->dma_dac);
3604 s->open_mode &= ~FMODE_WRITE; 3604 s->open_mode &= ~FMODE_WRITE;
3605 up(&s->open_sem_dac); 3605 mutex_unlock(&s->open_sem_dac);
3606 wake_up(&s->open_wait_dac); 3606 wake_up(&s->open_wait_dac);
3607 } 3607 }
3608 if (file->f_mode & FMODE_READ) { 3608 if (file->f_mode & FMODE_READ) {
3609 drain_adc(s, file->f_flags & O_NONBLOCK); 3609 drain_adc(s, file->f_flags & O_NONBLOCK);
3610 down(&s->open_sem_adc); 3610 mutex_lock(&s->open_sem_adc);
3611 stop_adc(s); 3611 stop_adc(s);
3612 dealloc_dmabuf(s, &s->dma_adc); 3612 dealloc_dmabuf(s, &s->dma_adc);
3613 s->open_mode &= ~FMODE_READ; 3613 s->open_mode &= ~FMODE_READ;
3614 up(&s->open_sem_adc); 3614 mutex_unlock(&s->open_sem_adc);
3615 wake_up(&s->open_wait_adc); 3615 wake_up(&s->open_wait_adc);
3616 } 3616 }
3617 return 0; 3617 return 0;
@@ -3651,33 +3651,33 @@ static int cs4281_open(struct inode *inode, struct file *file)
3651 return -ENODEV; 3651 return -ENODEV;
3652 } 3652 }
3653 if (file->f_mode & FMODE_WRITE) { 3653 if (file->f_mode & FMODE_WRITE) {
3654 down(&s->open_sem_dac); 3654 mutex_lock(&s->open_sem_dac);
3655 while (s->open_mode & FMODE_WRITE) { 3655 while (s->open_mode & FMODE_WRITE) {
3656 if (file->f_flags & O_NONBLOCK) { 3656 if (file->f_flags & O_NONBLOCK) {
3657 up(&s->open_sem_dac); 3657 mutex_unlock(&s->open_sem_dac);
3658 return -EBUSY; 3658 return -EBUSY;
3659 } 3659 }
3660 up(&s->open_sem_dac); 3660 mutex_unlock(&s->open_sem_dac);
3661 interruptible_sleep_on(&s->open_wait_dac); 3661 interruptible_sleep_on(&s->open_wait_dac);
3662 3662
3663 if (signal_pending(current)) 3663 if (signal_pending(current))
3664 return -ERESTARTSYS; 3664 return -ERESTARTSYS;
3665 down(&s->open_sem_dac); 3665 mutex_lock(&s->open_sem_dac);
3666 } 3666 }
3667 } 3667 }
3668 if (file->f_mode & FMODE_READ) { 3668 if (file->f_mode & FMODE_READ) {
3669 down(&s->open_sem_adc); 3669 mutex_lock(&s->open_sem_adc);
3670 while (s->open_mode & FMODE_READ) { 3670 while (s->open_mode & FMODE_READ) {
3671 if (file->f_flags & O_NONBLOCK) { 3671 if (file->f_flags & O_NONBLOCK) {
3672 up(&s->open_sem_adc); 3672 mutex_unlock(&s->open_sem_adc);
3673 return -EBUSY; 3673 return -EBUSY;
3674 } 3674 }
3675 up(&s->open_sem_adc); 3675 mutex_unlock(&s->open_sem_adc);
3676 interruptible_sleep_on(&s->open_wait_adc); 3676 interruptible_sleep_on(&s->open_wait_adc);
3677 3677
3678 if (signal_pending(current)) 3678 if (signal_pending(current))
3679 return -ERESTARTSYS; 3679 return -ERESTARTSYS;
3680 down(&s->open_sem_adc); 3680 mutex_lock(&s->open_sem_adc);
3681 } 3681 }
3682 } 3682 }
3683 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 3683 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
@@ -3691,7 +3691,7 @@ static int cs4281_open(struct inode *inode, struct file *file)
3691 s->ena &= ~FMODE_READ; 3691 s->ena &= ~FMODE_READ;
3692 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = 3692 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags =
3693 s->dma_adc.subdivision = 0; 3693 s->dma_adc.subdivision = 0;
3694 up(&s->open_sem_adc); 3694 mutex_unlock(&s->open_sem_adc);
3695 3695
3696 if (prog_dmabuf_adc(s)) { 3696 if (prog_dmabuf_adc(s)) {
3697 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR 3697 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
@@ -3711,7 +3711,7 @@ static int cs4281_open(struct inode *inode, struct file *file)
3711 s->ena &= ~FMODE_WRITE; 3711 s->ena &= ~FMODE_WRITE;
3712 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = 3712 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags =
3713 s->dma_dac.subdivision = 0; 3713 s->dma_dac.subdivision = 0;
3714 up(&s->open_sem_dac); 3714 mutex_unlock(&s->open_sem_dac);
3715 3715
3716 if (prog_dmabuf_dac(s)) { 3716 if (prog_dmabuf_dac(s)) {
3717 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR 3717 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
@@ -3978,17 +3978,17 @@ static int cs4281_midi_open(struct inode *inode, struct file *file)
3978 VALIDATE_STATE(s); 3978 VALIDATE_STATE(s);
3979 file->private_data = s; 3979 file->private_data = s;
3980 // wait for device to become free 3980 // wait for device to become free
3981 down(&s->open_sem); 3981 mutex_lock(&s->open_sem);
3982 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { 3982 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
3983 if (file->f_flags & O_NONBLOCK) { 3983 if (file->f_flags & O_NONBLOCK) {
3984 up(&s->open_sem); 3984 mutex_unlock(&s->open_sem);
3985 return -EBUSY; 3985 return -EBUSY;
3986 } 3986 }
3987 up(&s->open_sem); 3987 mutex_unlock(&s->open_sem);
3988 interruptible_sleep_on(&s->open_wait); 3988 interruptible_sleep_on(&s->open_wait);
3989 if (signal_pending(current)) 3989 if (signal_pending(current))
3990 return -ERESTARTSYS; 3990 return -ERESTARTSYS;
3991 down(&s->open_sem); 3991 mutex_lock(&s->open_sem);
3992 } 3992 }
3993 spin_lock_irqsave(&s->lock, flags); 3993 spin_lock_irqsave(&s->lock, flags);
3994 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 3994 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -4018,7 +4018,7 @@ static int cs4281_midi_open(struct inode *inode, struct file *file)
4018 (file-> 4018 (file->
4019 f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | 4019 f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ |
4020 FMODE_MIDI_WRITE); 4020 FMODE_MIDI_WRITE);
4021 up(&s->open_sem); 4021 mutex_unlock(&s->open_sem);
4022 return nonseekable_open(inode, file); 4022 return nonseekable_open(inode, file);
4023} 4023}
4024 4024
@@ -4057,7 +4057,7 @@ static int cs4281_midi_release(struct inode *inode, struct file *file)
4057 remove_wait_queue(&s->midi.owait, &wait); 4057 remove_wait_queue(&s->midi.owait, &wait);
4058 current->state = TASK_RUNNING; 4058 current->state = TASK_RUNNING;
4059 } 4059 }
4060 down(&s->open_sem); 4060 mutex_lock(&s->open_sem);
4061 s->open_mode &= 4061 s->open_mode &=
4062 (~(file->f_mode << FMODE_MIDI_SHIFT)) & (FMODE_MIDI_READ | 4062 (~(file->f_mode << FMODE_MIDI_SHIFT)) & (FMODE_MIDI_READ |
4063 FMODE_MIDI_WRITE); 4063 FMODE_MIDI_WRITE);
@@ -4067,7 +4067,7 @@ static int cs4281_midi_release(struct inode *inode, struct file *file)
4067 del_timer(&s->midi.timer); 4067 del_timer(&s->midi.timer);
4068 } 4068 }
4069 spin_unlock_irqrestore(&s->lock, flags); 4069 spin_unlock_irqrestore(&s->lock, flags);
4070 up(&s->open_sem); 4070 mutex_unlock(&s->open_sem);
4071 wake_up(&s->open_wait); 4071 wake_up(&s->open_wait);
4072 return 0; 4072 return 0;
4073} 4073}
@@ -4300,9 +4300,9 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev,
4300 init_waitqueue_head(&s->open_wait_dac); 4300 init_waitqueue_head(&s->open_wait_dac);
4301 init_waitqueue_head(&s->midi.iwait); 4301 init_waitqueue_head(&s->midi.iwait);
4302 init_waitqueue_head(&s->midi.owait); 4302 init_waitqueue_head(&s->midi.owait);
4303 init_MUTEX(&s->open_sem); 4303 mutex_init(&s->open_sem);
4304 init_MUTEX(&s->open_sem_adc); 4304 mutex_init(&s->open_sem_adc);
4305 init_MUTEX(&s->open_sem_dac); 4305 mutex_init(&s->open_sem_dac);
4306 spin_lock_init(&s->lock); 4306 spin_lock_init(&s->lock);
4307 s->pBA0phys = pci_resource_start(pcidev, 0); 4307 s->pBA0phys = pci_resource_start(pcidev, 0);
4308 s->pBA1phys = pci_resource_start(pcidev, 1); 4308 s->pBA1phys = pci_resource_start(pcidev, 1);
diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c
index 58e25c82eaf2..53881bc91bba 100644
--- a/sound/oss/cs46xx.c
+++ b/sound/oss/cs46xx.c
@@ -90,6 +90,7 @@
90#include <linux/init.h> 90#include <linux/init.h>
91#include <linux/poll.h> 91#include <linux/poll.h>
92#include <linux/ac97_codec.h> 92#include <linux/ac97_codec.h>
93#include <linux/mutex.h>
93 94
94#include <asm/io.h> 95#include <asm/io.h>
95#include <asm/dma.h> 96#include <asm/dma.h>
@@ -238,7 +239,7 @@ struct cs_state {
238 struct cs_card *card; /* Card info */ 239 struct cs_card *card; /* Card info */
239 240
240 /* single open lock mechanism, only used for recording */ 241 /* single open lock mechanism, only used for recording */
241 struct semaphore open_sem; 242 struct mutex open_mutex;
242 wait_queue_head_t open_wait; 243 wait_queue_head_t open_wait;
243 244
244 /* file mode */ 245 /* file mode */
@@ -297,7 +298,7 @@ struct cs_state {
297 unsigned subdivision; 298 unsigned subdivision;
298 } dmabuf; 299 } dmabuf;
299 /* Guard against mmap/write/read races */ 300 /* Guard against mmap/write/read races */
300 struct semaphore sem; 301 struct mutex sem;
301}; 302};
302 303
303struct cs_card { 304struct cs_card {
@@ -375,7 +376,7 @@ struct cs_card {
375 unsigned char ibuf[CS_MIDIINBUF]; 376 unsigned char ibuf[CS_MIDIINBUF];
376 unsigned char obuf[CS_MIDIOUTBUF]; 377 unsigned char obuf[CS_MIDIOUTBUF];
377 mode_t open_mode; 378 mode_t open_mode;
378 struct semaphore open_sem; 379 struct mutex open_mutex;
379 } midi; 380 } midi;
380 struct cs46xx_pm pm; 381 struct cs46xx_pm pm;
381}; 382};
@@ -1428,9 +1429,9 @@ static int prog_dmabuf(struct cs_state *state)
1428{ 1429{
1429 int ret; 1430 int ret;
1430 1431
1431 down(&state->sem); 1432 mutex_lock(&state->sem);
1432 ret = __prog_dmabuf(state); 1433 ret = __prog_dmabuf(state);
1433 up(&state->sem); 1434 mutex_unlock(&state->sem);
1434 1435
1435 return ret; 1436 return ret;
1436} 1437}
@@ -1831,17 +1832,17 @@ static int cs_midi_open(struct inode *inode, struct file *file)
1831 1832
1832 file->private_data = card; 1833 file->private_data = card;
1833 /* wait for device to become free */ 1834 /* wait for device to become free */
1834 down(&card->midi.open_sem); 1835 mutex_lock(&card->midi.open_mutex);
1835 while (card->midi.open_mode & file->f_mode) { 1836 while (card->midi.open_mode & file->f_mode) {
1836 if (file->f_flags & O_NONBLOCK) { 1837 if (file->f_flags & O_NONBLOCK) {
1837 up(&card->midi.open_sem); 1838 mutex_unlock(&card->midi.open_mutex);
1838 return -EBUSY; 1839 return -EBUSY;
1839 } 1840 }
1840 up(&card->midi.open_sem); 1841 mutex_unlock(&card->midi.open_mutex);
1841 interruptible_sleep_on(&card->midi.open_wait); 1842 interruptible_sleep_on(&card->midi.open_wait);
1842 if (signal_pending(current)) 1843 if (signal_pending(current))
1843 return -ERESTARTSYS; 1844 return -ERESTARTSYS;
1844 down(&card->midi.open_sem); 1845 mutex_lock(&card->midi.open_mutex);
1845 } 1846 }
1846 spin_lock_irqsave(&card->midi.lock, flags); 1847 spin_lock_irqsave(&card->midi.lock, flags);
1847 if (!(card->midi.open_mode & (FMODE_READ | FMODE_WRITE))) { 1848 if (!(card->midi.open_mode & (FMODE_READ | FMODE_WRITE))) {
@@ -1859,7 +1860,7 @@ static int cs_midi_open(struct inode *inode, struct file *file)
1859 } 1860 }
1860 spin_unlock_irqrestore(&card->midi.lock, flags); 1861 spin_unlock_irqrestore(&card->midi.lock, flags);
1861 card->midi.open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE)); 1862 card->midi.open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE));
1862 up(&card->midi.open_sem); 1863 mutex_unlock(&card->midi.open_mutex);
1863 return 0; 1864 return 0;
1864} 1865}
1865 1866
@@ -1891,9 +1892,9 @@ static int cs_midi_release(struct inode *inode, struct file *file)
1891 remove_wait_queue(&card->midi.owait, &wait); 1892 remove_wait_queue(&card->midi.owait, &wait);
1892 current->state = TASK_RUNNING; 1893 current->state = TASK_RUNNING;
1893 } 1894 }
1894 down(&card->midi.open_sem); 1895 mutex_lock(&card->midi.open_mutex);
1895 card->midi.open_mode &= (~(file->f_mode & (FMODE_READ | FMODE_WRITE))); 1896 card->midi.open_mode &= (~(file->f_mode & (FMODE_READ | FMODE_WRITE)));
1896 up(&card->midi.open_sem); 1897 mutex_unlock(&card->midi.open_mutex);
1897 wake_up(&card->midi.open_wait); 1898 wake_up(&card->midi.open_wait);
1898 return 0; 1899 return 0;
1899} 1900}
@@ -2081,7 +2082,7 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
2081 if (!access_ok(VERIFY_WRITE, buffer, count)) 2082 if (!access_ok(VERIFY_WRITE, buffer, count))
2082 return -EFAULT; 2083 return -EFAULT;
2083 2084
2084 down(&state->sem); 2085 mutex_lock(&state->sem);
2085 if (!dmabuf->ready && (ret = __prog_dmabuf(state))) 2086 if (!dmabuf->ready && (ret = __prog_dmabuf(state)))
2086 goto out2; 2087 goto out2;
2087 2088
@@ -2114,13 +2115,13 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
2114 if (!ret) ret = -EAGAIN; 2115 if (!ret) ret = -EAGAIN;
2115 goto out; 2116 goto out;
2116 } 2117 }
2117 up(&state->sem); 2118 mutex_unlock(&state->sem);
2118 schedule(); 2119 schedule();
2119 if (signal_pending(current)) { 2120 if (signal_pending(current)) {
2120 if(!ret) ret = -ERESTARTSYS; 2121 if(!ret) ret = -ERESTARTSYS;
2121 goto out; 2122 goto out;
2122 } 2123 }
2123 down(&state->sem); 2124 mutex_lock(&state->sem);
2124 if (dmabuf->mapped) 2125 if (dmabuf->mapped)
2125 { 2126 {
2126 if(!ret) 2127 if(!ret)
@@ -2155,7 +2156,7 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
2155out: 2156out:
2156 remove_wait_queue(&state->dmabuf.wait, &wait); 2157 remove_wait_queue(&state->dmabuf.wait, &wait);
2157out2: 2158out2:
2158 up(&state->sem); 2159 mutex_unlock(&state->sem);
2159 set_current_state(TASK_RUNNING); 2160 set_current_state(TASK_RUNNING);
2160 CS_DBGOUT(CS_WAVE_READ | CS_FUNCTION, 4, 2161 CS_DBGOUT(CS_WAVE_READ | CS_FUNCTION, 4,
2161 printk("cs46xx: cs_read()- %zd\n",ret) ); 2162 printk("cs46xx: cs_read()- %zd\n",ret) );
@@ -2184,7 +2185,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
2184 return -EFAULT; 2185 return -EFAULT;
2185 dmabuf = &state->dmabuf; 2186 dmabuf = &state->dmabuf;
2186 2187
2187 down(&state->sem); 2188 mutex_lock(&state->sem);
2188 if (dmabuf->mapped) 2189 if (dmabuf->mapped)
2189 { 2190 {
2190 ret = -ENXIO; 2191 ret = -ENXIO;
@@ -2240,13 +2241,13 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
2240 if (!ret) ret = -EAGAIN; 2241 if (!ret) ret = -EAGAIN;
2241 goto out; 2242 goto out;
2242 } 2243 }
2243 up(&state->sem); 2244 mutex_unlock(&state->sem);
2244 schedule(); 2245 schedule();
2245 if (signal_pending(current)) { 2246 if (signal_pending(current)) {
2246 if(!ret) ret = -ERESTARTSYS; 2247 if(!ret) ret = -ERESTARTSYS;
2247 goto out; 2248 goto out;
2248 } 2249 }
2249 down(&state->sem); 2250 mutex_lock(&state->sem);
2250 if (dmabuf->mapped) 2251 if (dmabuf->mapped)
2251 { 2252 {
2252 if(!ret) 2253 if(!ret)
@@ -2278,7 +2279,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
2278 start_dac(state); 2279 start_dac(state);
2279 } 2280 }
2280out: 2281out:
2281 up(&state->sem); 2282 mutex_unlock(&state->sem);
2282 remove_wait_queue(&state->dmabuf.wait, &wait); 2283 remove_wait_queue(&state->dmabuf.wait, &wait);
2283 set_current_state(TASK_RUNNING); 2284 set_current_state(TASK_RUNNING);
2284 2285
@@ -2411,7 +2412,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
2411 goto out; 2412 goto out;
2412 } 2413 }
2413 2414
2414 down(&state->sem); 2415 mutex_lock(&state->sem);
2415 dmabuf = &state->dmabuf; 2416 dmabuf = &state->dmabuf;
2416 if (cs4x_pgoff(vma) != 0) 2417 if (cs4x_pgoff(vma) != 0)
2417 { 2418 {
@@ -2438,7 +2439,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
2438 2439
2439 CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_mmap()-\n") ); 2440 CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_mmap()-\n") );
2440out: 2441out:
2441 up(&state->sem); 2442 mutex_unlock(&state->sem);
2442 return ret; 2443 return ret;
2443} 2444}
2444 2445
@@ -3200,7 +3201,7 @@ static int cs_open(struct inode *inode, struct file *file)
3200 if (state == NULL) 3201 if (state == NULL)
3201 return -ENOMEM; 3202 return -ENOMEM;
3202 memset(state, 0, sizeof(struct cs_state)); 3203 memset(state, 0, sizeof(struct cs_state));
3203 init_MUTEX(&state->sem); 3204 mutex_init(&state->sem);
3204 dmabuf = &state->dmabuf; 3205 dmabuf = &state->dmabuf;
3205 dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 3206 dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
3206 if(dmabuf->pbuf==NULL) 3207 if(dmabuf->pbuf==NULL)
@@ -3241,10 +3242,10 @@ static int cs_open(struct inode *inode, struct file *file)
3241 state->virt = 0; 3242 state->virt = 0;
3242 state->magic = CS_STATE_MAGIC; 3243 state->magic = CS_STATE_MAGIC;
3243 init_waitqueue_head(&dmabuf->wait); 3244 init_waitqueue_head(&dmabuf->wait);
3244 init_MUTEX(&state->open_sem); 3245 mutex_init(&state->open_mutex);
3245 file->private_data = card; 3246 file->private_data = card;
3246 3247
3247 down(&state->open_sem); 3248 mutex_lock(&state->open_mutex);
3248 3249
3249 /* set default sample format. According to OSS Programmer's Guide /dev/dsp 3250 /* set default sample format. According to OSS Programmer's Guide /dev/dsp
3250 should be default to unsigned 8-bits, mono, with sample rate 8kHz and 3251 should be default to unsigned 8-bits, mono, with sample rate 8kHz and
@@ -3260,7 +3261,7 @@ static int cs_open(struct inode *inode, struct file *file)
3260 cs_set_divisor(dmabuf); 3261 cs_set_divisor(dmabuf);
3261 3262
3262 state->open_mode |= FMODE_READ; 3263 state->open_mode |= FMODE_READ;
3263 up(&state->open_sem); 3264 mutex_unlock(&state->open_mutex);
3264 } 3265 }
3265 if(file->f_mode & FMODE_WRITE) 3266 if(file->f_mode & FMODE_WRITE)
3266 { 3267 {
@@ -3271,7 +3272,7 @@ static int cs_open(struct inode *inode, struct file *file)
3271 if (state == NULL) 3272 if (state == NULL)
3272 return -ENOMEM; 3273 return -ENOMEM;
3273 memset(state, 0, sizeof(struct cs_state)); 3274 memset(state, 0, sizeof(struct cs_state));
3274 init_MUTEX(&state->sem); 3275 mutex_init(&state->sem);
3275 dmabuf = &state->dmabuf; 3276 dmabuf = &state->dmabuf;
3276 dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 3277 dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
3277 if(dmabuf->pbuf==NULL) 3278 if(dmabuf->pbuf==NULL)
@@ -3312,10 +3313,10 @@ static int cs_open(struct inode *inode, struct file *file)
3312 state->virt = 1; 3313 state->virt = 1;
3313 state->magic = CS_STATE_MAGIC; 3314 state->magic = CS_STATE_MAGIC;
3314 init_waitqueue_head(&dmabuf->wait); 3315 init_waitqueue_head(&dmabuf->wait);
3315 init_MUTEX(&state->open_sem); 3316 mutex_init(&state->open_mutex);
3316 file->private_data = card; 3317 file->private_data = card;
3317 3318
3318 down(&state->open_sem); 3319 mutex_lock(&state->open_mutex);
3319 3320
3320 /* set default sample format. According to OSS Programmer's Guide /dev/dsp 3321 /* set default sample format. According to OSS Programmer's Guide /dev/dsp
3321 should be default to unsigned 8-bits, mono, with sample rate 8kHz and 3322 should be default to unsigned 8-bits, mono, with sample rate 8kHz and
@@ -3331,7 +3332,7 @@ static int cs_open(struct inode *inode, struct file *file)
3331 cs_set_divisor(dmabuf); 3332 cs_set_divisor(dmabuf);
3332 3333
3333 state->open_mode |= FMODE_WRITE; 3334 state->open_mode |= FMODE_WRITE;
3334 up(&state->open_sem); 3335 mutex_unlock(&state->open_mutex);
3335 if((ret = prog_dmabuf(state))) 3336 if((ret = prog_dmabuf(state)))
3336 return ret; 3337 return ret;
3337 } 3338 }
@@ -3363,14 +3364,14 @@ static int cs_release(struct inode *inode, struct file *file)
3363 cs_clear_tail(state); 3364 cs_clear_tail(state);
3364 drain_dac(state, file->f_flags & O_NONBLOCK); 3365 drain_dac(state, file->f_flags & O_NONBLOCK);
3365 /* stop DMA state machine and free DMA buffers/channels */ 3366 /* stop DMA state machine and free DMA buffers/channels */
3366 down(&state->open_sem); 3367 mutex_lock(&state->open_mutex);
3367 stop_dac(state); 3368 stop_dac(state);
3368 dealloc_dmabuf(state); 3369 dealloc_dmabuf(state);
3369 state->card->free_pcm_channel(state->card, dmabuf->channel->num); 3370 state->card->free_pcm_channel(state->card, dmabuf->channel->num);
3370 free_page((unsigned long)state->dmabuf.pbuf); 3371 free_page((unsigned long)state->dmabuf.pbuf);
3371 3372
3372 /* we're covered by the open_sem */ 3373 /* we're covered by the open_mutex */
3373 up(&state->open_sem); 3374 mutex_unlock(&state->open_mutex);
3374 state->card->states[state->virt] = NULL; 3375 state->card->states[state->virt] = NULL;
3375 state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); 3376 state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
3376 3377
@@ -3395,14 +3396,14 @@ static int cs_release(struct inode *inode, struct file *file)
3395 { 3396 {
3396 CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n") ); 3397 CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n") );
3397 dmabuf = &state->dmabuf; 3398 dmabuf = &state->dmabuf;
3398 down(&state->open_sem); 3399 mutex_lock(&state->open_mutex);
3399 stop_adc(state); 3400 stop_adc(state);
3400 dealloc_dmabuf(state); 3401 dealloc_dmabuf(state);
3401 state->card->free_pcm_channel(state->card, dmabuf->channel->num); 3402 state->card->free_pcm_channel(state->card, dmabuf->channel->num);
3402 free_page((unsigned long)state->dmabuf.pbuf); 3403 free_page((unsigned long)state->dmabuf.pbuf);
3403 3404
3404 /* we're covered by the open_sem */ 3405 /* we're covered by the open_mutex */
3405 up(&state->open_sem); 3406 mutex_unlock(&state->open_mutex);
3406 state->card->states[state->virt] = NULL; 3407 state->card->states[state->virt] = NULL;
3407 state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); 3408 state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
3408 3409
@@ -5507,7 +5508,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
5507 } 5508 }
5508 5509
5509 init_waitqueue_head(&card->midi.open_wait); 5510 init_waitqueue_head(&card->midi.open_wait);
5510 init_MUTEX(&card->midi.open_sem); 5511 mutex_init(&card->midi.open_mutex);
5511 init_waitqueue_head(&card->midi.iwait); 5512 init_waitqueue_head(&card->midi.iwait);
5512 init_waitqueue_head(&card->midi.owait); 5513 init_waitqueue_head(&card->midi.owait);
5513 cs461x_pokeBA0(card, BA0_MIDCR, MIDCR_MRST); 5514 cs461x_pokeBA0(card, BA0_MIDCR, MIDCR_MRST);
diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c
index 74f975676ccb..a17375141c3a 100644
--- a/sound/oss/dmasound/dmasound_awacs.c
+++ b/sound/oss/dmasound/dmasound_awacs.c
@@ -80,7 +80,7 @@
80#include <linux/kmod.h> 80#include <linux/kmod.h>
81#include <linux/interrupt.h> 81#include <linux/interrupt.h>
82#include <linux/input.h> 82#include <linux/input.h>
83#include <asm/semaphore.h> 83#include <linux/mutex.h>
84#ifdef CONFIG_ADB_CUDA 84#ifdef CONFIG_ADB_CUDA
85#include <linux/cuda.h> 85#include <linux/cuda.h>
86#endif 86#endif
@@ -130,7 +130,7 @@ static struct resource awacs_rsrc[3];
130static char awacs_name[64]; 130static char awacs_name[64];
131static int awacs_revision; 131static int awacs_revision;
132static int awacs_sleeping; 132static int awacs_sleeping;
133static DECLARE_MUTEX(dmasound_sem); 133static DEFINE_MUTEX(dmasound_mutex);
134 134
135static int sound_device_id; /* exists after iMac revA */ 135static int sound_device_id; /* exists after iMac revA */
136static int hw_can_byteswap = 1 ; /* most pmac sound h/w can */ 136static int hw_can_byteswap = 1 ; /* most pmac sound h/w can */
@@ -312,11 +312,11 @@ extern int daca_enter_sleep(void);
312extern int daca_leave_sleep(void); 312extern int daca_leave_sleep(void);
313 313
314#define TRY_LOCK() \ 314#define TRY_LOCK() \
315 if ((rc = down_interruptible(&dmasound_sem)) != 0) \ 315 if ((rc = mutex_lock_interruptible(&dmasound_mutex)) != 0) \
316 return rc; 316 return rc;
317#define LOCK() down(&dmasound_sem); 317#define LOCK() mutex_lock(&dmasound_mutex);
318 318
319#define UNLOCK() up(&dmasound_sem); 319#define UNLOCK() mutex_unlock(&dmasound_mutex);
320 320
321/* We use different versions that the ones provided in dmasound.h 321/* We use different versions that the ones provided in dmasound.h
322 * 322 *
diff --git a/sound/oss/emu10k1/hwaccess.h b/sound/oss/emu10k1/hwaccess.h
index 104223a192aa..85e27bda694b 100644
--- a/sound/oss/emu10k1/hwaccess.h
+++ b/sound/oss/emu10k1/hwaccess.h
@@ -181,7 +181,7 @@ struct emu10k1_card
181 struct emu10k1_mpuout *mpuout; 181 struct emu10k1_mpuout *mpuout;
182 struct emu10k1_mpuin *mpuin; 182 struct emu10k1_mpuin *mpuin;
183 183
184 struct semaphore open_sem; 184 struct mutex open_sem;
185 mode_t open_mode; 185 mode_t open_mode;
186 wait_queue_head_t open_wait; 186 wait_queue_head_t open_wait;
187 187
diff --git a/sound/oss/emu10k1/main.c b/sound/oss/emu10k1/main.c
index 23241cbdd90f..0cd44a6f7ac0 100644
--- a/sound/oss/emu10k1/main.c
+++ b/sound/oss/emu10k1/main.c
@@ -1320,7 +1320,7 @@ static int __devinit emu10k1_probe(struct pci_dev *pci_dev, const struct pci_dev
1320 card->is_aps = (subsysvid == EMU_APS_SUBID); 1320 card->is_aps = (subsysvid == EMU_APS_SUBID);
1321 1321
1322 spin_lock_init(&card->lock); 1322 spin_lock_init(&card->lock);
1323 init_MUTEX(&card->open_sem); 1323 mutex_init(&card->open_sem);
1324 card->open_mode = 0; 1324 card->open_mode = 0;
1325 init_waitqueue_head(&card->open_wait); 1325 init_waitqueue_head(&card->open_wait);
1326 1326
diff --git a/sound/oss/emu10k1/midi.c b/sound/oss/emu10k1/midi.c
index b40b5f97aace..959a96794dba 100644
--- a/sound/oss/emu10k1/midi.c
+++ b/sound/oss/emu10k1/midi.c
@@ -110,21 +110,21 @@ match:
110#endif 110#endif
111 111
112 /* Wait for device to become free */ 112 /* Wait for device to become free */
113 down(&card->open_sem); 113 mutex_lock(&card->open_sem);
114 while (card->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { 114 while (card->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
115 if (file->f_flags & O_NONBLOCK) { 115 if (file->f_flags & O_NONBLOCK) {
116 up(&card->open_sem); 116 mutex_unlock(&card->open_sem);
117 return -EBUSY; 117 return -EBUSY;
118 } 118 }
119 119
120 up(&card->open_sem); 120 mutex_unlock(&card->open_sem);
121 interruptible_sleep_on(&card->open_wait); 121 interruptible_sleep_on(&card->open_wait);
122 122
123 if (signal_pending(current)) { 123 if (signal_pending(current)) {
124 return -ERESTARTSYS; 124 return -ERESTARTSYS;
125 } 125 }
126 126
127 down(&card->open_sem); 127 mutex_lock(&card->open_sem);
128 } 128 }
129 129
130 if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL) 130 if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL)
@@ -183,7 +183,7 @@ match:
183 183
184 card->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); 184 card->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
185 185
186 up(&card->open_sem); 186 mutex_unlock(&card->open_sem);
187 187
188 return nonseekable_open(inode, file); 188 return nonseekable_open(inode, file);
189} 189}
@@ -234,9 +234,9 @@ static int emu10k1_midi_release(struct inode *inode, struct file *file)
234 234
235 kfree(midi_dev); 235 kfree(midi_dev);
236 236
237 down(&card->open_sem); 237 mutex_lock(&card->open_sem);
238 card->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE)); 238 card->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE));
239 up(&card->open_sem); 239 mutex_unlock(&card->open_sem);
240 wake_up_interruptible(&card->open_wait); 240 wake_up_interruptible(&card->open_wait);
241 241
242 unlock_kernel(); 242 unlock_kernel();
diff --git a/sound/oss/es1370.c b/sound/oss/es1370.c
index ae55c536613a..094f569cc6e0 100644
--- a/sound/oss/es1370.c
+++ b/sound/oss/es1370.c
@@ -157,6 +157,7 @@
157#include <linux/gameport.h> 157#include <linux/gameport.h>
158#include <linux/wait.h> 158#include <linux/wait.h>
159#include <linux/dma-mapping.h> 159#include <linux/dma-mapping.h>
160#include <linux/mutex.h>
160 161
161#include <asm/io.h> 162#include <asm/io.h>
162#include <asm/page.h> 163#include <asm/page.h>
@@ -346,7 +347,7 @@ struct es1370_state {
346 unsigned sctrl; 347 unsigned sctrl;
347 348
348 spinlock_t lock; 349 spinlock_t lock;
349 struct semaphore open_sem; 350 struct mutex open_mutex;
350 mode_t open_mode; 351 mode_t open_mode;
351 wait_queue_head_t open_wait; 352 wait_queue_head_t open_wait;
352 353
@@ -393,7 +394,7 @@ struct es1370_state {
393 struct gameport *gameport; 394 struct gameport *gameport;
394#endif 395#endif
395 396
396 struct semaphore sem; 397 struct mutex mutex;
397}; 398};
398 399
399/* --------------------------------------------------------------------- */ 400/* --------------------------------------------------------------------- */
@@ -1159,7 +1160,7 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count,
1159 return -ENXIO; 1160 return -ENXIO;
1160 if (!access_ok(VERIFY_WRITE, buffer, count)) 1161 if (!access_ok(VERIFY_WRITE, buffer, count))
1161 return -EFAULT; 1162 return -EFAULT;
1162 down(&s->sem); 1163 mutex_lock(&s->mutex);
1163 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) 1164 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
1164 goto out; 1165 goto out;
1165 1166
@@ -1183,14 +1184,14 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count,
1183 ret = -EAGAIN; 1184 ret = -EAGAIN;
1184 goto out; 1185 goto out;
1185 } 1186 }
1186 up(&s->sem); 1187 mutex_unlock(&s->mutex);
1187 schedule(); 1188 schedule();
1188 if (signal_pending(current)) { 1189 if (signal_pending(current)) {
1189 if (!ret) 1190 if (!ret)
1190 ret = -ERESTARTSYS; 1191 ret = -ERESTARTSYS;
1191 goto out; 1192 goto out;
1192 } 1193 }
1193 down(&s->sem); 1194 mutex_lock(&s->mutex);
1194 if (s->dma_adc.mapped) 1195 if (s->dma_adc.mapped)
1195 { 1196 {
1196 ret = -ENXIO; 1197 ret = -ENXIO;
@@ -1215,7 +1216,7 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count,
1215 start_adc(s); 1216 start_adc(s);
1216 } 1217 }
1217out: 1218out:
1218 up(&s->sem); 1219 mutex_unlock(&s->mutex);
1219 remove_wait_queue(&s->dma_adc.wait, &wait); 1220 remove_wait_queue(&s->dma_adc.wait, &wait);
1220 set_current_state(TASK_RUNNING); 1221 set_current_state(TASK_RUNNING);
1221 return ret; 1222 return ret;
@@ -1235,7 +1236,7 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t
1235 return -ENXIO; 1236 return -ENXIO;
1236 if (!access_ok(VERIFY_READ, buffer, count)) 1237 if (!access_ok(VERIFY_READ, buffer, count))
1237 return -EFAULT; 1238 return -EFAULT;
1238 down(&s->sem); 1239 mutex_lock(&s->mutex);
1239 if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s))) 1240 if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s)))
1240 goto out; 1241 goto out;
1241 ret = 0; 1242 ret = 0;
@@ -1263,14 +1264,14 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t
1263 ret = -EAGAIN; 1264 ret = -EAGAIN;
1264 goto out; 1265 goto out;
1265 } 1266 }
1266 up(&s->sem); 1267 mutex_unlock(&s->mutex);
1267 schedule(); 1268 schedule();
1268 if (signal_pending(current)) { 1269 if (signal_pending(current)) {
1269 if (!ret) 1270 if (!ret)
1270 ret = -ERESTARTSYS; 1271 ret = -ERESTARTSYS;
1271 goto out; 1272 goto out;
1272 } 1273 }
1273 down(&s->sem); 1274 mutex_lock(&s->mutex);
1274 if (s->dma_dac2.mapped) 1275 if (s->dma_dac2.mapped)
1275 { 1276 {
1276 ret = -ENXIO; 1277 ret = -ENXIO;
@@ -1296,7 +1297,7 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t
1296 start_dac2(s); 1297 start_dac2(s);
1297 } 1298 }
1298out: 1299out:
1299 up(&s->sem); 1300 mutex_unlock(&s->mutex);
1300 remove_wait_queue(&s->dma_dac2.wait, &wait); 1301 remove_wait_queue(&s->dma_dac2.wait, &wait);
1301 set_current_state(TASK_RUNNING); 1302 set_current_state(TASK_RUNNING);
1302 return ret; 1303 return ret;
@@ -1348,7 +1349,7 @@ static int es1370_mmap(struct file *file, struct vm_area_struct *vma)
1348 1349
1349 VALIDATE_STATE(s); 1350 VALIDATE_STATE(s);
1350 lock_kernel(); 1351 lock_kernel();
1351 down(&s->sem); 1352 mutex_lock(&s->mutex);
1352 if (vma->vm_flags & VM_WRITE) { 1353 if (vma->vm_flags & VM_WRITE) {
1353 if ((ret = prog_dmabuf_dac2(s)) != 0) { 1354 if ((ret = prog_dmabuf_dac2(s)) != 0) {
1354 goto out; 1355 goto out;
@@ -1380,7 +1381,7 @@ static int es1370_mmap(struct file *file, struct vm_area_struct *vma)
1380 } 1381 }
1381 db->mapped = 1; 1382 db->mapped = 1;
1382out: 1383out:
1383 up(&s->sem); 1384 mutex_unlock(&s->mutex);
1384 unlock_kernel(); 1385 unlock_kernel();
1385 return ret; 1386 return ret;
1386} 1387}
@@ -1752,21 +1753,21 @@ static int es1370_open(struct inode *inode, struct file *file)
1752 VALIDATE_STATE(s); 1753 VALIDATE_STATE(s);
1753 file->private_data = s; 1754 file->private_data = s;
1754 /* wait for device to become free */ 1755 /* wait for device to become free */
1755 down(&s->open_sem); 1756 mutex_lock(&s->open_mutex);
1756 while (s->open_mode & file->f_mode) { 1757 while (s->open_mode & file->f_mode) {
1757 if (file->f_flags & O_NONBLOCK) { 1758 if (file->f_flags & O_NONBLOCK) {
1758 up(&s->open_sem); 1759 mutex_unlock(&s->open_mutex);
1759 return -EBUSY; 1760 return -EBUSY;
1760 } 1761 }
1761 add_wait_queue(&s->open_wait, &wait); 1762 add_wait_queue(&s->open_wait, &wait);
1762 __set_current_state(TASK_INTERRUPTIBLE); 1763 __set_current_state(TASK_INTERRUPTIBLE);
1763 up(&s->open_sem); 1764 mutex_unlock(&s->open_mutex);
1764 schedule(); 1765 schedule();
1765 remove_wait_queue(&s->open_wait, &wait); 1766 remove_wait_queue(&s->open_wait, &wait);
1766 set_current_state(TASK_RUNNING); 1767 set_current_state(TASK_RUNNING);
1767 if (signal_pending(current)) 1768 if (signal_pending(current))
1768 return -ERESTARTSYS; 1769 return -ERESTARTSYS;
1769 down(&s->open_sem); 1770 mutex_lock(&s->open_mutex);
1770 } 1771 }
1771 spin_lock_irqsave(&s->lock, flags); 1772 spin_lock_irqsave(&s->lock, flags);
1772 if (!(s->open_mode & (FMODE_READ|FMODE_WRITE))) 1773 if (!(s->open_mode & (FMODE_READ|FMODE_WRITE)))
@@ -1793,8 +1794,8 @@ static int es1370_open(struct inode *inode, struct file *file)
1793 outl(s->ctrl, s->io+ES1370_REG_CONTROL); 1794 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
1794 spin_unlock_irqrestore(&s->lock, flags); 1795 spin_unlock_irqrestore(&s->lock, flags);
1795 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 1796 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1796 up(&s->open_sem); 1797 mutex_unlock(&s->open_mutex);
1797 init_MUTEX(&s->sem); 1798 mutex_init(&s->mutex);
1798 return nonseekable_open(inode, file); 1799 return nonseekable_open(inode, file);
1799} 1800}
1800 1801
@@ -1806,7 +1807,7 @@ static int es1370_release(struct inode *inode, struct file *file)
1806 lock_kernel(); 1807 lock_kernel();
1807 if (file->f_mode & FMODE_WRITE) 1808 if (file->f_mode & FMODE_WRITE)
1808 drain_dac2(s, file->f_flags & O_NONBLOCK); 1809 drain_dac2(s, file->f_flags & O_NONBLOCK);
1809 down(&s->open_sem); 1810 mutex_lock(&s->open_mutex);
1810 if (file->f_mode & FMODE_WRITE) { 1811 if (file->f_mode & FMODE_WRITE) {
1811 stop_dac2(s); 1812 stop_dac2(s);
1812 synchronize_irq(s->irq); 1813 synchronize_irq(s->irq);
@@ -1818,7 +1819,7 @@ static int es1370_release(struct inode *inode, struct file *file)
1818 } 1819 }
1819 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); 1820 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE));
1820 wake_up(&s->open_wait); 1821 wake_up(&s->open_wait);
1821 up(&s->open_sem); 1822 mutex_unlock(&s->open_mutex);
1822 unlock_kernel(); 1823 unlock_kernel();
1823 return 0; 1824 return 0;
1824} 1825}
@@ -2198,21 +2199,21 @@ static int es1370_open_dac(struct inode *inode, struct file *file)
2198 return -EINVAL; 2199 return -EINVAL;
2199 file->private_data = s; 2200 file->private_data = s;
2200 /* wait for device to become free */ 2201 /* wait for device to become free */
2201 down(&s->open_sem); 2202 mutex_lock(&s->open_mutex);
2202 while (s->open_mode & FMODE_DAC) { 2203 while (s->open_mode & FMODE_DAC) {
2203 if (file->f_flags & O_NONBLOCK) { 2204 if (file->f_flags & O_NONBLOCK) {
2204 up(&s->open_sem); 2205 mutex_unlock(&s->open_mutex);
2205 return -EBUSY; 2206 return -EBUSY;
2206 } 2207 }
2207 add_wait_queue(&s->open_wait, &wait); 2208 add_wait_queue(&s->open_wait, &wait);
2208 __set_current_state(TASK_INTERRUPTIBLE); 2209 __set_current_state(TASK_INTERRUPTIBLE);
2209 up(&s->open_sem); 2210 mutex_unlock(&s->open_mutex);
2210 schedule(); 2211 schedule();
2211 remove_wait_queue(&s->open_wait, &wait); 2212 remove_wait_queue(&s->open_wait, &wait);
2212 set_current_state(TASK_RUNNING); 2213 set_current_state(TASK_RUNNING);
2213 if (signal_pending(current)) 2214 if (signal_pending(current))
2214 return -ERESTARTSYS; 2215 return -ERESTARTSYS;
2215 down(&s->open_sem); 2216 mutex_lock(&s->open_mutex);
2216 } 2217 }
2217 s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0; 2218 s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0;
2218 s->dma_dac1.enabled = 1; 2219 s->dma_dac1.enabled = 1;
@@ -2227,7 +2228,7 @@ static int es1370_open_dac(struct inode *inode, struct file *file)
2227 outl(s->ctrl, s->io+ES1370_REG_CONTROL); 2228 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
2228 spin_unlock_irqrestore(&s->lock, flags); 2229 spin_unlock_irqrestore(&s->lock, flags);
2229 s->open_mode |= FMODE_DAC; 2230 s->open_mode |= FMODE_DAC;
2230 up(&s->open_sem); 2231 mutex_unlock(&s->open_mutex);
2231 return nonseekable_open(inode, file); 2232 return nonseekable_open(inode, file);
2232} 2233}
2233 2234
@@ -2238,12 +2239,12 @@ static int es1370_release_dac(struct inode *inode, struct file *file)
2238 VALIDATE_STATE(s); 2239 VALIDATE_STATE(s);
2239 lock_kernel(); 2240 lock_kernel();
2240 drain_dac1(s, file->f_flags & O_NONBLOCK); 2241 drain_dac1(s, file->f_flags & O_NONBLOCK);
2241 down(&s->open_sem); 2242 mutex_lock(&s->open_mutex);
2242 stop_dac1(s); 2243 stop_dac1(s);
2243 dealloc_dmabuf(s, &s->dma_dac1); 2244 dealloc_dmabuf(s, &s->dma_dac1);
2244 s->open_mode &= ~FMODE_DAC; 2245 s->open_mode &= ~FMODE_DAC;
2245 wake_up(&s->open_wait); 2246 wake_up(&s->open_wait);
2246 up(&s->open_sem); 2247 mutex_unlock(&s->open_mutex);
2247 unlock_kernel(); 2248 unlock_kernel();
2248 return 0; 2249 return 0;
2249} 2250}
@@ -2430,21 +2431,21 @@ static int es1370_midi_open(struct inode *inode, struct file *file)
2430 VALIDATE_STATE(s); 2431 VALIDATE_STATE(s);
2431 file->private_data = s; 2432 file->private_data = s;
2432 /* wait for device to become free */ 2433 /* wait for device to become free */
2433 down(&s->open_sem); 2434 mutex_lock(&s->open_mutex);
2434 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { 2435 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
2435 if (file->f_flags & O_NONBLOCK) { 2436 if (file->f_flags & O_NONBLOCK) {
2436 up(&s->open_sem); 2437 mutex_unlock(&s->open_mutex);
2437 return -EBUSY; 2438 return -EBUSY;
2438 } 2439 }
2439 add_wait_queue(&s->open_wait, &wait); 2440 add_wait_queue(&s->open_wait, &wait);
2440 __set_current_state(TASK_INTERRUPTIBLE); 2441 __set_current_state(TASK_INTERRUPTIBLE);
2441 up(&s->open_sem); 2442 mutex_unlock(&s->open_mutex);
2442 schedule(); 2443 schedule();
2443 remove_wait_queue(&s->open_wait, &wait); 2444 remove_wait_queue(&s->open_wait, &wait);
2444 set_current_state(TASK_RUNNING); 2445 set_current_state(TASK_RUNNING);
2445 if (signal_pending(current)) 2446 if (signal_pending(current))
2446 return -ERESTARTSYS; 2447 return -ERESTARTSYS;
2447 down(&s->open_sem); 2448 mutex_lock(&s->open_mutex);
2448 } 2449 }
2449 spin_lock_irqsave(&s->lock, flags); 2450 spin_lock_irqsave(&s->lock, flags);
2450 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 2451 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -2465,7 +2466,7 @@ static int es1370_midi_open(struct inode *inode, struct file *file)
2465 es1370_handle_midi(s); 2466 es1370_handle_midi(s);
2466 spin_unlock_irqrestore(&s->lock, flags); 2467 spin_unlock_irqrestore(&s->lock, flags);
2467 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); 2468 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
2468 up(&s->open_sem); 2469 mutex_unlock(&s->open_mutex);
2469 return nonseekable_open(inode, file); 2470 return nonseekable_open(inode, file);
2470} 2471}
2471 2472
@@ -2499,7 +2500,7 @@ static int es1370_midi_release(struct inode *inode, struct file *file)
2499 remove_wait_queue(&s->midi.owait, &wait); 2500 remove_wait_queue(&s->midi.owait, &wait);
2500 set_current_state(TASK_RUNNING); 2501 set_current_state(TASK_RUNNING);
2501 } 2502 }
2502 down(&s->open_sem); 2503 mutex_lock(&s->open_mutex);
2503 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); 2504 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE));
2504 spin_lock_irqsave(&s->lock, flags); 2505 spin_lock_irqsave(&s->lock, flags);
2505 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 2506 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -2508,7 +2509,7 @@ static int es1370_midi_release(struct inode *inode, struct file *file)
2508 } 2509 }
2509 spin_unlock_irqrestore(&s->lock, flags); 2510 spin_unlock_irqrestore(&s->lock, flags);
2510 wake_up(&s->open_wait); 2511 wake_up(&s->open_wait);
2511 up(&s->open_sem); 2512 mutex_unlock(&s->open_mutex);
2512 unlock_kernel(); 2513 unlock_kernel();
2513 return 0; 2514 return 0;
2514} 2515}
@@ -2638,7 +2639,7 @@ static int __devinit es1370_probe(struct pci_dev *pcidev, const struct pci_devic
2638 init_waitqueue_head(&s->open_wait); 2639 init_waitqueue_head(&s->open_wait);
2639 init_waitqueue_head(&s->midi.iwait); 2640 init_waitqueue_head(&s->midi.iwait);
2640 init_waitqueue_head(&s->midi.owait); 2641 init_waitqueue_head(&s->midi.owait);
2641 init_MUTEX(&s->open_sem); 2642 mutex_init(&s->open_mutex);
2642 spin_lock_init(&s->lock); 2643 spin_lock_init(&s->lock);
2643 s->magic = ES1370_MAGIC; 2644 s->magic = ES1370_MAGIC;
2644 s->dev = pcidev; 2645 s->dev = pcidev;
diff --git a/sound/oss/es1371.c b/sound/oss/es1371.c
index 5c697f162579..4400c8538686 100644
--- a/sound/oss/es1371.c
+++ b/sound/oss/es1371.c
@@ -129,6 +129,7 @@
129#include <linux/gameport.h> 129#include <linux/gameport.h>
130#include <linux/wait.h> 130#include <linux/wait.h>
131#include <linux/dma-mapping.h> 131#include <linux/dma-mapping.h>
132#include <linux/mutex.h>
132 133
133#include <asm/io.h> 134#include <asm/io.h>
134#include <asm/page.h> 135#include <asm/page.h>
@@ -419,7 +420,7 @@ struct es1371_state {
419 unsigned dac1rate, dac2rate, adcrate; 420 unsigned dac1rate, dac2rate, adcrate;
420 421
421 spinlock_t lock; 422 spinlock_t lock;
422 struct semaphore open_sem; 423 struct mutex open_mutex;
423 mode_t open_mode; 424 mode_t open_mode;
424 wait_queue_head_t open_wait; 425 wait_queue_head_t open_wait;
425 426
@@ -462,7 +463,7 @@ struct es1371_state {
462 struct gameport *gameport; 463 struct gameport *gameport;
463#endif 464#endif
464 465
465 struct semaphore sem; 466 struct mutex sem;
466}; 467};
467 468
468/* --------------------------------------------------------------------- */ 469/* --------------------------------------------------------------------- */
@@ -1346,7 +1347,7 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count,
1346 return -ENXIO; 1347 return -ENXIO;
1347 if (!access_ok(VERIFY_WRITE, buffer, count)) 1348 if (!access_ok(VERIFY_WRITE, buffer, count))
1348 return -EFAULT; 1349 return -EFAULT;
1349 down(&s->sem); 1350 mutex_lock(&s->sem);
1350 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) 1351 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
1351 goto out2; 1352 goto out2;
1352 1353
@@ -1370,14 +1371,14 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count,
1370 ret = -EAGAIN; 1371 ret = -EAGAIN;
1371 goto out; 1372 goto out;
1372 } 1373 }
1373 up(&s->sem); 1374 mutex_unlock(&s->sem);
1374 schedule(); 1375 schedule();
1375 if (signal_pending(current)) { 1376 if (signal_pending(current)) {
1376 if (!ret) 1377 if (!ret)
1377 ret = -ERESTARTSYS; 1378 ret = -ERESTARTSYS;
1378 goto out2; 1379 goto out2;
1379 } 1380 }
1380 down(&s->sem); 1381 mutex_lock(&s->sem);
1381 if (s->dma_adc.mapped) 1382 if (s->dma_adc.mapped)
1382 { 1383 {
1383 ret = -ENXIO; 1384 ret = -ENXIO;
@@ -1402,7 +1403,7 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count,
1402 start_adc(s); 1403 start_adc(s);
1403 } 1404 }
1404out: 1405out:
1405 up(&s->sem); 1406 mutex_unlock(&s->sem);
1406out2: 1407out2:
1407 remove_wait_queue(&s->dma_adc.wait, &wait); 1408 remove_wait_queue(&s->dma_adc.wait, &wait);
1408 set_current_state(TASK_RUNNING); 1409 set_current_state(TASK_RUNNING);
@@ -1423,7 +1424,7 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t
1423 return -ENXIO; 1424 return -ENXIO;
1424 if (!access_ok(VERIFY_READ, buffer, count)) 1425 if (!access_ok(VERIFY_READ, buffer, count))
1425 return -EFAULT; 1426 return -EFAULT;
1426 down(&s->sem); 1427 mutex_lock(&s->sem);
1427 if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s))) 1428 if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s)))
1428 goto out3; 1429 goto out3;
1429 ret = 0; 1430 ret = 0;
@@ -1451,14 +1452,14 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t
1451 ret = -EAGAIN; 1452 ret = -EAGAIN;
1452 goto out; 1453 goto out;
1453 } 1454 }
1454 up(&s->sem); 1455 mutex_unlock(&s->sem);
1455 schedule(); 1456 schedule();
1456 if (signal_pending(current)) { 1457 if (signal_pending(current)) {
1457 if (!ret) 1458 if (!ret)
1458 ret = -ERESTARTSYS; 1459 ret = -ERESTARTSYS;
1459 goto out2; 1460 goto out2;
1460 } 1461 }
1461 down(&s->sem); 1462 mutex_lock(&s->sem);
1462 if (s->dma_dac2.mapped) 1463 if (s->dma_dac2.mapped)
1463 { 1464 {
1464 ret = -ENXIO; 1465 ret = -ENXIO;
@@ -1484,7 +1485,7 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t
1484 start_dac2(s); 1485 start_dac2(s);
1485 } 1486 }
1486out: 1487out:
1487 up(&s->sem); 1488 mutex_unlock(&s->sem);
1488out2: 1489out2:
1489 remove_wait_queue(&s->dma_dac2.wait, &wait); 1490 remove_wait_queue(&s->dma_dac2.wait, &wait);
1490out3: 1491out3:
@@ -1538,7 +1539,7 @@ static int es1371_mmap(struct file *file, struct vm_area_struct *vma)
1538 1539
1539 VALIDATE_STATE(s); 1540 VALIDATE_STATE(s);
1540 lock_kernel(); 1541 lock_kernel();
1541 down(&s->sem); 1542 mutex_lock(&s->sem);
1542 1543
1543 if (vma->vm_flags & VM_WRITE) { 1544 if (vma->vm_flags & VM_WRITE) {
1544 if ((ret = prog_dmabuf_dac2(s)) != 0) { 1545 if ((ret = prog_dmabuf_dac2(s)) != 0) {
@@ -1571,7 +1572,7 @@ static int es1371_mmap(struct file *file, struct vm_area_struct *vma)
1571 } 1572 }
1572 db->mapped = 1; 1573 db->mapped = 1;
1573out: 1574out:
1574 up(&s->sem); 1575 mutex_unlock(&s->sem);
1575 unlock_kernel(); 1576 unlock_kernel();
1576 return ret; 1577 return ret;
1577} 1578}
@@ -1938,21 +1939,21 @@ static int es1371_open(struct inode *inode, struct file *file)
1938 VALIDATE_STATE(s); 1939 VALIDATE_STATE(s);
1939 file->private_data = s; 1940 file->private_data = s;
1940 /* wait for device to become free */ 1941 /* wait for device to become free */
1941 down(&s->open_sem); 1942 mutex_lock(&s->open_mutex);
1942 while (s->open_mode & file->f_mode) { 1943 while (s->open_mode & file->f_mode) {
1943 if (file->f_flags & O_NONBLOCK) { 1944 if (file->f_flags & O_NONBLOCK) {
1944 up(&s->open_sem); 1945 mutex_unlock(&s->open_mutex);
1945 return -EBUSY; 1946 return -EBUSY;
1946 } 1947 }
1947 add_wait_queue(&s->open_wait, &wait); 1948 add_wait_queue(&s->open_wait, &wait);
1948 __set_current_state(TASK_INTERRUPTIBLE); 1949 __set_current_state(TASK_INTERRUPTIBLE);
1949 up(&s->open_sem); 1950 mutex_unlock(&s->open_mutex);
1950 schedule(); 1951 schedule();
1951 remove_wait_queue(&s->open_wait, &wait); 1952 remove_wait_queue(&s->open_wait, &wait);
1952 set_current_state(TASK_RUNNING); 1953 set_current_state(TASK_RUNNING);
1953 if (signal_pending(current)) 1954 if (signal_pending(current))
1954 return -ERESTARTSYS; 1955 return -ERESTARTSYS;
1955 down(&s->open_sem); 1956 mutex_lock(&s->open_mutex);
1956 } 1957 }
1957 if (file->f_mode & FMODE_READ) { 1958 if (file->f_mode & FMODE_READ) {
1958 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; 1959 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0;
@@ -1982,8 +1983,8 @@ static int es1371_open(struct inode *inode, struct file *file)
1982 outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL); 1983 outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL);
1983 spin_unlock_irqrestore(&s->lock, flags); 1984 spin_unlock_irqrestore(&s->lock, flags);
1984 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 1985 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1985 up(&s->open_sem); 1986 mutex_unlock(&s->open_mutex);
1986 init_MUTEX(&s->sem); 1987 mutex_init(&s->sem);
1987 return nonseekable_open(inode, file); 1988 return nonseekable_open(inode, file);
1988} 1989}
1989 1990
@@ -1995,7 +1996,7 @@ static int es1371_release(struct inode *inode, struct file *file)
1995 lock_kernel(); 1996 lock_kernel();
1996 if (file->f_mode & FMODE_WRITE) 1997 if (file->f_mode & FMODE_WRITE)
1997 drain_dac2(s, file->f_flags & O_NONBLOCK); 1998 drain_dac2(s, file->f_flags & O_NONBLOCK);
1998 down(&s->open_sem); 1999 mutex_lock(&s->open_mutex);
1999 if (file->f_mode & FMODE_WRITE) { 2000 if (file->f_mode & FMODE_WRITE) {
2000 stop_dac2(s); 2001 stop_dac2(s);
2001 dealloc_dmabuf(s, &s->dma_dac2); 2002 dealloc_dmabuf(s, &s->dma_dac2);
@@ -2005,7 +2006,7 @@ static int es1371_release(struct inode *inode, struct file *file)
2005 dealloc_dmabuf(s, &s->dma_adc); 2006 dealloc_dmabuf(s, &s->dma_adc);
2006 } 2007 }
2007 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); 2008 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE));
2008 up(&s->open_sem); 2009 mutex_unlock(&s->open_mutex);
2009 wake_up(&s->open_wait); 2010 wake_up(&s->open_wait);
2010 unlock_kernel(); 2011 unlock_kernel();
2011 return 0; 2012 return 0;
@@ -2377,21 +2378,21 @@ static int es1371_open_dac(struct inode *inode, struct file *file)
2377 return -EINVAL; 2378 return -EINVAL;
2378 file->private_data = s; 2379 file->private_data = s;
2379 /* wait for device to become free */ 2380 /* wait for device to become free */
2380 down(&s->open_sem); 2381 mutex_lock(&s->open_mutex);
2381 while (s->open_mode & FMODE_DAC) { 2382 while (s->open_mode & FMODE_DAC) {
2382 if (file->f_flags & O_NONBLOCK) { 2383 if (file->f_flags & O_NONBLOCK) {
2383 up(&s->open_sem); 2384 mutex_unlock(&s->open_mutex);
2384 return -EBUSY; 2385 return -EBUSY;
2385 } 2386 }
2386 add_wait_queue(&s->open_wait, &wait); 2387 add_wait_queue(&s->open_wait, &wait);
2387 __set_current_state(TASK_INTERRUPTIBLE); 2388 __set_current_state(TASK_INTERRUPTIBLE);
2388 up(&s->open_sem); 2389 mutex_unlock(&s->open_mutex);
2389 schedule(); 2390 schedule();
2390 remove_wait_queue(&s->open_wait, &wait); 2391 remove_wait_queue(&s->open_wait, &wait);
2391 set_current_state(TASK_RUNNING); 2392 set_current_state(TASK_RUNNING);
2392 if (signal_pending(current)) 2393 if (signal_pending(current))
2393 return -ERESTARTSYS; 2394 return -ERESTARTSYS;
2394 down(&s->open_sem); 2395 mutex_lock(&s->open_mutex);
2395 } 2396 }
2396 s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0; 2397 s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0;
2397 s->dma_dac1.enabled = 1; 2398 s->dma_dac1.enabled = 1;
@@ -2405,7 +2406,7 @@ static int es1371_open_dac(struct inode *inode, struct file *file)
2405 outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL); 2406 outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL);
2406 spin_unlock_irqrestore(&s->lock, flags); 2407 spin_unlock_irqrestore(&s->lock, flags);
2407 s->open_mode |= FMODE_DAC; 2408 s->open_mode |= FMODE_DAC;
2408 up(&s->open_sem); 2409 mutex_unlock(&s->open_mutex);
2409 return nonseekable_open(inode, file); 2410 return nonseekable_open(inode, file);
2410} 2411}
2411 2412
@@ -2416,11 +2417,11 @@ static int es1371_release_dac(struct inode *inode, struct file *file)
2416 VALIDATE_STATE(s); 2417 VALIDATE_STATE(s);
2417 lock_kernel(); 2418 lock_kernel();
2418 drain_dac1(s, file->f_flags & O_NONBLOCK); 2419 drain_dac1(s, file->f_flags & O_NONBLOCK);
2419 down(&s->open_sem); 2420 mutex_lock(&s->open_mutex);
2420 stop_dac1(s); 2421 stop_dac1(s);
2421 dealloc_dmabuf(s, &s->dma_dac1); 2422 dealloc_dmabuf(s, &s->dma_dac1);
2422 s->open_mode &= ~FMODE_DAC; 2423 s->open_mode &= ~FMODE_DAC;
2423 up(&s->open_sem); 2424 mutex_unlock(&s->open_mutex);
2424 wake_up(&s->open_wait); 2425 wake_up(&s->open_wait);
2425 unlock_kernel(); 2426 unlock_kernel();
2426 return 0; 2427 return 0;
@@ -2608,21 +2609,21 @@ static int es1371_midi_open(struct inode *inode, struct file *file)
2608 VALIDATE_STATE(s); 2609 VALIDATE_STATE(s);
2609 file->private_data = s; 2610 file->private_data = s;
2610 /* wait for device to become free */ 2611 /* wait for device to become free */
2611 down(&s->open_sem); 2612 mutex_lock(&s->open_mutex);
2612 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { 2613 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
2613 if (file->f_flags & O_NONBLOCK) { 2614 if (file->f_flags & O_NONBLOCK) {
2614 up(&s->open_sem); 2615 mutex_unlock(&s->open_mutex);
2615 return -EBUSY; 2616 return -EBUSY;
2616 } 2617 }
2617 add_wait_queue(&s->open_wait, &wait); 2618 add_wait_queue(&s->open_wait, &wait);
2618 __set_current_state(TASK_INTERRUPTIBLE); 2619 __set_current_state(TASK_INTERRUPTIBLE);
2619 up(&s->open_sem); 2620 mutex_unlock(&s->open_mutex);
2620 schedule(); 2621 schedule();
2621 remove_wait_queue(&s->open_wait, &wait); 2622 remove_wait_queue(&s->open_wait, &wait);
2622 set_current_state(TASK_RUNNING); 2623 set_current_state(TASK_RUNNING);
2623 if (signal_pending(current)) 2624 if (signal_pending(current))
2624 return -ERESTARTSYS; 2625 return -ERESTARTSYS;
2625 down(&s->open_sem); 2626 mutex_lock(&s->open_mutex);
2626 } 2627 }
2627 spin_lock_irqsave(&s->lock, flags); 2628 spin_lock_irqsave(&s->lock, flags);
2628 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 2629 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -2643,7 +2644,7 @@ static int es1371_midi_open(struct inode *inode, struct file *file)
2643 es1371_handle_midi(s); 2644 es1371_handle_midi(s);
2644 spin_unlock_irqrestore(&s->lock, flags); 2645 spin_unlock_irqrestore(&s->lock, flags);
2645 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); 2646 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
2646 up(&s->open_sem); 2647 mutex_unlock(&s->open_mutex);
2647 return nonseekable_open(inode, file); 2648 return nonseekable_open(inode, file);
2648} 2649}
2649 2650
@@ -2676,7 +2677,7 @@ static int es1371_midi_release(struct inode *inode, struct file *file)
2676 remove_wait_queue(&s->midi.owait, &wait); 2677 remove_wait_queue(&s->midi.owait, &wait);
2677 set_current_state(TASK_RUNNING); 2678 set_current_state(TASK_RUNNING);
2678 } 2679 }
2679 down(&s->open_sem); 2680 mutex_lock(&s->open_mutex);
2680 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); 2681 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE));
2681 spin_lock_irqsave(&s->lock, flags); 2682 spin_lock_irqsave(&s->lock, flags);
2682 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 2683 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -2684,7 +2685,7 @@ static int es1371_midi_release(struct inode *inode, struct file *file)
2684 outl(s->ctrl, s->io+ES1371_REG_CONTROL); 2685 outl(s->ctrl, s->io+ES1371_REG_CONTROL);
2685 } 2686 }
2686 spin_unlock_irqrestore(&s->lock, flags); 2687 spin_unlock_irqrestore(&s->lock, flags);
2687 up(&s->open_sem); 2688 mutex_unlock(&s->open_mutex);
2688 wake_up(&s->open_wait); 2689 wake_up(&s->open_wait);
2689 unlock_kernel(); 2690 unlock_kernel();
2690 return 0; 2691 return 0;
@@ -2884,7 +2885,7 @@ static int __devinit es1371_probe(struct pci_dev *pcidev, const struct pci_devic
2884 init_waitqueue_head(&s->open_wait); 2885 init_waitqueue_head(&s->open_wait);
2885 init_waitqueue_head(&s->midi.iwait); 2886 init_waitqueue_head(&s->midi.iwait);
2886 init_waitqueue_head(&s->midi.owait); 2887 init_waitqueue_head(&s->midi.owait);
2887 init_MUTEX(&s->open_sem); 2888 mutex_init(&s->open_mutex);
2888 spin_lock_init(&s->lock); 2889 spin_lock_init(&s->lock);
2889 s->magic = ES1371_MAGIC; 2890 s->magic = ES1371_MAGIC;
2890 s->dev = pcidev; 2891 s->dev = pcidev;
diff --git a/sound/oss/esssolo1.c b/sound/oss/esssolo1.c
index 849b59f67ef5..78d3e29ce968 100644
--- a/sound/oss/esssolo1.c
+++ b/sound/oss/esssolo1.c
@@ -105,6 +105,8 @@
105#include <linux/gameport.h> 105#include <linux/gameport.h>
106#include <linux/wait.h> 106#include <linux/wait.h>
107#include <linux/dma-mapping.h> 107#include <linux/dma-mapping.h>
108#include <linux/mutex.h>
109
108 110
109#include <asm/io.h> 111#include <asm/io.h>
110#include <asm/page.h> 112#include <asm/page.h>
@@ -191,7 +193,7 @@ struct solo1_state {
191 unsigned ena; 193 unsigned ena;
192 194
193 spinlock_t lock; 195 spinlock_t lock;
194 struct semaphore open_sem; 196 struct mutex open_mutex;
195 mode_t open_mode; 197 mode_t open_mode;
196 wait_queue_head_t open_wait; 198 wait_queue_head_t open_wait;
197 199
@@ -1581,7 +1583,7 @@ static int solo1_release(struct inode *inode, struct file *file)
1581 lock_kernel(); 1583 lock_kernel();
1582 if (file->f_mode & FMODE_WRITE) 1584 if (file->f_mode & FMODE_WRITE)
1583 drain_dac(s, file->f_flags & O_NONBLOCK); 1585 drain_dac(s, file->f_flags & O_NONBLOCK);
1584 down(&s->open_sem); 1586 mutex_lock(&s->open_mutex);
1585 if (file->f_mode & FMODE_WRITE) { 1587 if (file->f_mode & FMODE_WRITE) {
1586 stop_dac(s); 1588 stop_dac(s);
1587 outb(0, s->iobase+6); /* disable DMA */ 1589 outb(0, s->iobase+6); /* disable DMA */
@@ -1595,7 +1597,7 @@ static int solo1_release(struct inode *inode, struct file *file)
1595 } 1597 }
1596 s->open_mode &= ~(FMODE_READ | FMODE_WRITE); 1598 s->open_mode &= ~(FMODE_READ | FMODE_WRITE);
1597 wake_up(&s->open_wait); 1599 wake_up(&s->open_wait);
1598 up(&s->open_sem); 1600 mutex_unlock(&s->open_mutex);
1599 unlock_kernel(); 1601 unlock_kernel();
1600 return 0; 1602 return 0;
1601} 1603}
@@ -1624,21 +1626,21 @@ static int solo1_open(struct inode *inode, struct file *file)
1624 VALIDATE_STATE(s); 1626 VALIDATE_STATE(s);
1625 file->private_data = s; 1627 file->private_data = s;
1626 /* wait for device to become free */ 1628 /* wait for device to become free */
1627 down(&s->open_sem); 1629 mutex_lock(&s->open_mutex);
1628 while (s->open_mode & (FMODE_READ | FMODE_WRITE)) { 1630 while (s->open_mode & (FMODE_READ | FMODE_WRITE)) {
1629 if (file->f_flags & O_NONBLOCK) { 1631 if (file->f_flags & O_NONBLOCK) {
1630 up(&s->open_sem); 1632 mutex_unlock(&s->open_mutex);
1631 return -EBUSY; 1633 return -EBUSY;
1632 } 1634 }
1633 add_wait_queue(&s->open_wait, &wait); 1635 add_wait_queue(&s->open_wait, &wait);
1634 __set_current_state(TASK_INTERRUPTIBLE); 1636 __set_current_state(TASK_INTERRUPTIBLE);
1635 up(&s->open_sem); 1637 mutex_unlock(&s->open_mutex);
1636 schedule(); 1638 schedule();
1637 remove_wait_queue(&s->open_wait, &wait); 1639 remove_wait_queue(&s->open_wait, &wait);
1638 set_current_state(TASK_RUNNING); 1640 set_current_state(TASK_RUNNING);
1639 if (signal_pending(current)) 1641 if (signal_pending(current))
1640 return -ERESTARTSYS; 1642 return -ERESTARTSYS;
1641 down(&s->open_sem); 1643 mutex_lock(&s->open_mutex);
1642 } 1644 }
1643 s->fmt = AFMT_U8; 1645 s->fmt = AFMT_U8;
1644 s->channels = 1; 1646 s->channels = 1;
@@ -1650,7 +1652,7 @@ static int solo1_open(struct inode *inode, struct file *file)
1650 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; 1652 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0;
1651 s->dma_dac.enabled = 1; 1653 s->dma_dac.enabled = 1;
1652 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 1654 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1653 up(&s->open_sem); 1655 mutex_unlock(&s->open_mutex);
1654 prog_codec(s); 1656 prog_codec(s);
1655 return nonseekable_open(inode, file); 1657 return nonseekable_open(inode, file);
1656} 1658}
@@ -1911,21 +1913,21 @@ static int solo1_midi_open(struct inode *inode, struct file *file)
1911 VALIDATE_STATE(s); 1913 VALIDATE_STATE(s);
1912 file->private_data = s; 1914 file->private_data = s;
1913 /* wait for device to become free */ 1915 /* wait for device to become free */
1914 down(&s->open_sem); 1916 mutex_lock(&s->open_mutex);
1915 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { 1917 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
1916 if (file->f_flags & O_NONBLOCK) { 1918 if (file->f_flags & O_NONBLOCK) {
1917 up(&s->open_sem); 1919 mutex_unlock(&s->open_mutex);
1918 return -EBUSY; 1920 return -EBUSY;
1919 } 1921 }
1920 add_wait_queue(&s->open_wait, &wait); 1922 add_wait_queue(&s->open_wait, &wait);
1921 __set_current_state(TASK_INTERRUPTIBLE); 1923 __set_current_state(TASK_INTERRUPTIBLE);
1922 up(&s->open_sem); 1924 mutex_unlock(&s->open_mutex);
1923 schedule(); 1925 schedule();
1924 remove_wait_queue(&s->open_wait, &wait); 1926 remove_wait_queue(&s->open_wait, &wait);
1925 set_current_state(TASK_RUNNING); 1927 set_current_state(TASK_RUNNING);
1926 if (signal_pending(current)) 1928 if (signal_pending(current))
1927 return -ERESTARTSYS; 1929 return -ERESTARTSYS;
1928 down(&s->open_sem); 1930 mutex_lock(&s->open_mutex);
1929 } 1931 }
1930 spin_lock_irqsave(&s->lock, flags); 1932 spin_lock_irqsave(&s->lock, flags);
1931 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 1933 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -1951,7 +1953,7 @@ static int solo1_midi_open(struct inode *inode, struct file *file)
1951 } 1953 }
1952 spin_unlock_irqrestore(&s->lock, flags); 1954 spin_unlock_irqrestore(&s->lock, flags);
1953 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); 1955 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
1954 up(&s->open_sem); 1956 mutex_unlock(&s->open_mutex);
1955 return nonseekable_open(inode, file); 1957 return nonseekable_open(inode, file);
1956} 1958}
1957 1959
@@ -1985,7 +1987,7 @@ static int solo1_midi_release(struct inode *inode, struct file *file)
1985 remove_wait_queue(&s->midi.owait, &wait); 1987 remove_wait_queue(&s->midi.owait, &wait);
1986 set_current_state(TASK_RUNNING); 1988 set_current_state(TASK_RUNNING);
1987 } 1989 }
1988 down(&s->open_sem); 1990 mutex_lock(&s->open_mutex);
1989 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); 1991 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE));
1990 spin_lock_irqsave(&s->lock, flags); 1992 spin_lock_irqsave(&s->lock, flags);
1991 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 1993 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -1994,7 +1996,7 @@ static int solo1_midi_release(struct inode *inode, struct file *file)
1994 } 1996 }
1995 spin_unlock_irqrestore(&s->lock, flags); 1997 spin_unlock_irqrestore(&s->lock, flags);
1996 wake_up(&s->open_wait); 1998 wake_up(&s->open_wait);
1997 up(&s->open_sem); 1999 mutex_unlock(&s->open_mutex);
1998 unlock_kernel(); 2000 unlock_kernel();
1999 return 0; 2001 return 0;
2000} 2002}
@@ -2132,24 +2134,24 @@ static int solo1_dmfm_open(struct inode *inode, struct file *file)
2132 VALIDATE_STATE(s); 2134 VALIDATE_STATE(s);
2133 file->private_data = s; 2135 file->private_data = s;
2134 /* wait for device to become free */ 2136 /* wait for device to become free */
2135 down(&s->open_sem); 2137 mutex_lock(&s->open_mutex);
2136 while (s->open_mode & FMODE_DMFM) { 2138 while (s->open_mode & FMODE_DMFM) {
2137 if (file->f_flags & O_NONBLOCK) { 2139 if (file->f_flags & O_NONBLOCK) {
2138 up(&s->open_sem); 2140 mutex_unlock(&s->open_mutex);
2139 return -EBUSY; 2141 return -EBUSY;
2140 } 2142 }
2141 add_wait_queue(&s->open_wait, &wait); 2143 add_wait_queue(&s->open_wait, &wait);
2142 __set_current_state(TASK_INTERRUPTIBLE); 2144 __set_current_state(TASK_INTERRUPTIBLE);
2143 up(&s->open_sem); 2145 mutex_unlock(&s->open_mutex);
2144 schedule(); 2146 schedule();
2145 remove_wait_queue(&s->open_wait, &wait); 2147 remove_wait_queue(&s->open_wait, &wait);
2146 set_current_state(TASK_RUNNING); 2148 set_current_state(TASK_RUNNING);
2147 if (signal_pending(current)) 2149 if (signal_pending(current))
2148 return -ERESTARTSYS; 2150 return -ERESTARTSYS;
2149 down(&s->open_sem); 2151 mutex_lock(&s->open_mutex);
2150 } 2152 }
2151 if (!request_region(s->sbbase, FMSYNTH_EXTENT, "ESS Solo1")) { 2153 if (!request_region(s->sbbase, FMSYNTH_EXTENT, "ESS Solo1")) {
2152 up(&s->open_sem); 2154 mutex_unlock(&s->open_mutex);
2153 printk(KERN_ERR "solo1: FM synth io ports in use, opl3 loaded?\n"); 2155 printk(KERN_ERR "solo1: FM synth io ports in use, opl3 loaded?\n");
2154 return -EBUSY; 2156 return -EBUSY;
2155 } 2157 }
@@ -2161,7 +2163,7 @@ static int solo1_dmfm_open(struct inode *inode, struct file *file)
2161 outb(5, s->sbbase+2); 2163 outb(5, s->sbbase+2);
2162 outb(1, s->sbbase+3); /* enable OPL3 */ 2164 outb(1, s->sbbase+3); /* enable OPL3 */
2163 s->open_mode |= FMODE_DMFM; 2165 s->open_mode |= FMODE_DMFM;
2164 up(&s->open_sem); 2166 mutex_unlock(&s->open_mutex);
2165 return nonseekable_open(inode, file); 2167 return nonseekable_open(inode, file);
2166} 2168}
2167 2169
@@ -2172,7 +2174,7 @@ static int solo1_dmfm_release(struct inode *inode, struct file *file)
2172 2174
2173 VALIDATE_STATE(s); 2175 VALIDATE_STATE(s);
2174 lock_kernel(); 2176 lock_kernel();
2175 down(&s->open_sem); 2177 mutex_lock(&s->open_mutex);
2176 s->open_mode &= ~FMODE_DMFM; 2178 s->open_mode &= ~FMODE_DMFM;
2177 for (regb = 0xb0; regb < 0xb9; regb++) { 2179 for (regb = 0xb0; regb < 0xb9; regb++) {
2178 outb(regb, s->sbbase); 2180 outb(regb, s->sbbase);
@@ -2182,7 +2184,7 @@ static int solo1_dmfm_release(struct inode *inode, struct file *file)
2182 } 2184 }
2183 release_region(s->sbbase, FMSYNTH_EXTENT); 2185 release_region(s->sbbase, FMSYNTH_EXTENT);
2184 wake_up(&s->open_wait); 2186 wake_up(&s->open_wait);
2185 up(&s->open_sem); 2187 mutex_unlock(&s->open_mutex);
2186 unlock_kernel(); 2188 unlock_kernel();
2187 return 0; 2189 return 0;
2188} 2190}
@@ -2362,7 +2364,7 @@ static int __devinit solo1_probe(struct pci_dev *pcidev, const struct pci_device
2362 init_waitqueue_head(&s->open_wait); 2364 init_waitqueue_head(&s->open_wait);
2363 init_waitqueue_head(&s->midi.iwait); 2365 init_waitqueue_head(&s->midi.iwait);
2364 init_waitqueue_head(&s->midi.owait); 2366 init_waitqueue_head(&s->midi.owait);
2365 init_MUTEX(&s->open_sem); 2367 mutex_init(&s->open_mutex);
2366 spin_lock_init(&s->lock); 2368 spin_lock_init(&s->lock);
2367 s->magic = SOLO1_MAGIC; 2369 s->magic = SOLO1_MAGIC;
2368 s->dev = pcidev; 2370 s->dev = pcidev;
diff --git a/sound/oss/forte.c b/sound/oss/forte.c
index 8406bc90c4ff..0294eec8ad90 100644
--- a/sound/oss/forte.c
+++ b/sound/oss/forte.c
@@ -43,6 +43,7 @@
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44 44
45#include <linux/proc_fs.h> 45#include <linux/proc_fs.h>
46#include <linux/mutex.h>
46 47
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48#include <asm/io.h> 49#include <asm/io.h>
@@ -185,7 +186,7 @@ struct forte_chip {
185 unsigned long iobase; 186 unsigned long iobase;
186 int irq; 187 int irq;
187 188
188 struct semaphore open_sem; /* Device access */ 189 struct mutex open_mutex; /* Device access */
189 spinlock_t lock; /* State */ 190 spinlock_t lock; /* State */
190 191
191 spinlock_t ac97_lock; 192 spinlock_t ac97_lock;
@@ -1242,13 +1243,13 @@ forte_dsp_open (struct inode *inode, struct file *file)
1242 struct forte_chip *chip = forte; /* FIXME: HACK FROM HELL! */ 1243 struct forte_chip *chip = forte; /* FIXME: HACK FROM HELL! */
1243 1244
1244 if (file->f_flags & O_NONBLOCK) { 1245 if (file->f_flags & O_NONBLOCK) {
1245 if (down_trylock (&chip->open_sem)) { 1246 if (!mutex_trylock(&chip->open_mutex)) {
1246 DPRINTK ("%s: returning -EAGAIN\n", __FUNCTION__); 1247 DPRINTK ("%s: returning -EAGAIN\n", __FUNCTION__);
1247 return -EAGAIN; 1248 return -EAGAIN;
1248 } 1249 }
1249 } 1250 }
1250 else { 1251 else {
1251 if (down_interruptible (&chip->open_sem)) { 1252 if (mutex_lock_interruptible(&chip->open_mutex)) {
1252 DPRINTK ("%s: returning -ERESTARTSYS\n", __FUNCTION__); 1253 DPRINTK ("%s: returning -ERESTARTSYS\n", __FUNCTION__);
1253 return -ERESTARTSYS; 1254 return -ERESTARTSYS;
1254 } 1255 }
@@ -1302,7 +1303,7 @@ forte_dsp_release (struct inode *inode, struct file *file)
1302 spin_unlock_irq (&chip->lock); 1303 spin_unlock_irq (&chip->lock);
1303 } 1304 }
1304 1305
1305 up (&chip->open_sem); 1306 mutex_unlock(&chip->open_mutex);
1306 1307
1307 return ret; 1308 return ret;
1308} 1309}
@@ -2011,7 +2012,7 @@ forte_probe (struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
2011 memset (chip, 0, sizeof (struct forte_chip)); 2012 memset (chip, 0, sizeof (struct forte_chip));
2012 chip->pci_dev = pci_dev; 2013 chip->pci_dev = pci_dev;
2013 2014
2014 init_MUTEX(&chip->open_sem); 2015 mutex_init(&chip->open_mutex);
2015 spin_lock_init (&chip->lock); 2016 spin_lock_init (&chip->lock);
2016 spin_lock_init (&chip->ac97_lock); 2017 spin_lock_init (&chip->ac97_lock);
2017 2018
diff --git a/sound/oss/hal2.c b/sound/oss/hal2.c
index afe97c4ce069..dd4f59d30a3a 100644
--- a/sound/oss/hal2.c
+++ b/sound/oss/hal2.c
@@ -32,6 +32,8 @@
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33#include <linux/sound.h> 33#include <linux/sound.h>
34#include <linux/soundcard.h> 34#include <linux/soundcard.h>
35#include <linux/mutex.h>
36
35 37
36#include <asm/io.h> 38#include <asm/io.h>
37#include <asm/sgi/hpc3.h> 39#include <asm/sgi/hpc3.h>
@@ -92,7 +94,7 @@ struct hal2_codec {
92 94
93 wait_queue_head_t dma_wait; 95 wait_queue_head_t dma_wait;
94 spinlock_t lock; 96 spinlock_t lock;
95 struct semaphore sem; 97 struct mutex sem;
96 98
97 int usecount; /* recording and playback are 99 int usecount; /* recording and playback are
98 * independent */ 100 * independent */
@@ -1178,7 +1180,7 @@ static ssize_t hal2_read(struct file *file, char *buffer,
1178 1180
1179 if (!count) 1181 if (!count)
1180 return 0; 1182 return 0;
1181 if (down_interruptible(&adc->sem)) 1183 if (mutex_lock_interruptible(&adc->sem))
1182 return -EINTR; 1184 return -EINTR;
1183 if (file->f_flags & O_NONBLOCK) { 1185 if (file->f_flags & O_NONBLOCK) {
1184 err = hal2_get_buffer(hal2, buffer, count); 1186 err = hal2_get_buffer(hal2, buffer, count);
@@ -1217,7 +1219,7 @@ static ssize_t hal2_read(struct file *file, char *buffer,
1217 } 1219 }
1218 } while (count > 0 && err >= 0); 1220 } while (count > 0 && err >= 0);
1219 } 1221 }
1220 up(&adc->sem); 1222 mutex_unlock(&adc->sem);
1221 1223
1222 return err; 1224 return err;
1223} 1225}
@@ -1232,7 +1234,7 @@ static ssize_t hal2_write(struct file *file, const char *buffer,
1232 1234
1233 if (!count) 1235 if (!count)
1234 return 0; 1236 return 0;
1235 if (down_interruptible(&dac->sem)) 1237 if (mutex_lock_interruptible(&dac->sem))
1236 return -EINTR; 1238 return -EINTR;
1237 if (file->f_flags & O_NONBLOCK) { 1239 if (file->f_flags & O_NONBLOCK) {
1238 err = hal2_add_buffer(hal2, buf, count); 1240 err = hal2_add_buffer(hal2, buf, count);
@@ -1271,7 +1273,7 @@ static ssize_t hal2_write(struct file *file, const char *buffer,
1271 } 1273 }
1272 } while (count > 0 && err >= 0); 1274 } while (count > 0 && err >= 0);
1273 } 1275 }
1274 up(&dac->sem); 1276 mutex_unlock(&dac->sem);
1275 1277
1276 return err; 1278 return err;
1277} 1279}
@@ -1356,20 +1358,20 @@ static int hal2_release(struct inode *inode, struct file *file)
1356 if (file->f_mode & FMODE_READ) { 1358 if (file->f_mode & FMODE_READ) {
1357 struct hal2_codec *adc = &hal2->adc; 1359 struct hal2_codec *adc = &hal2->adc;
1358 1360
1359 down(&adc->sem); 1361 mutex_lock(&adc->sem);
1360 hal2_stop_adc(hal2); 1362 hal2_stop_adc(hal2);
1361 hal2_free_adc_dmabuf(adc); 1363 hal2_free_adc_dmabuf(adc);
1362 adc->usecount--; 1364 adc->usecount--;
1363 up(&adc->sem); 1365 mutex_unlock(&adc->sem);
1364 } 1366 }
1365 if (file->f_mode & FMODE_WRITE) { 1367 if (file->f_mode & FMODE_WRITE) {
1366 struct hal2_codec *dac = &hal2->dac; 1368 struct hal2_codec *dac = &hal2->dac;
1367 1369
1368 down(&dac->sem); 1370 mutex_lock(&dac->sem);
1369 hal2_sync_dac(hal2); 1371 hal2_sync_dac(hal2);
1370 hal2_free_dac_dmabuf(dac); 1372 hal2_free_dac_dmabuf(dac);
1371 dac->usecount--; 1373 dac->usecount--;
1372 up(&dac->sem); 1374 mutex_unlock(&dac->sem);
1373 } 1375 }
1374 1376
1375 return 0; 1377 return 0;
@@ -1400,7 +1402,7 @@ static void hal2_init_codec(struct hal2_codec *codec, struct hpc3_regs *hpc3,
1400 codec->pbus.pbusnr = index; 1402 codec->pbus.pbusnr = index;
1401 codec->pbus.pbus = &hpc3->pbdma[index]; 1403 codec->pbus.pbus = &hpc3->pbdma[index];
1402 init_waitqueue_head(&codec->dma_wait); 1404 init_waitqueue_head(&codec->dma_wait);
1403 init_MUTEX(&codec->sem); 1405 mutex_init(&codec->sem);
1404 spin_lock_init(&codec->lock); 1406 spin_lock_init(&codec->lock);
1405} 1407}
1406 1408
diff --git a/sound/oss/i810_audio.c b/sound/oss/i810_audio.c
index abc242abd5b1..dd2b871cdac5 100644
--- a/sound/oss/i810_audio.c
+++ b/sound/oss/i810_audio.c
@@ -100,6 +100,8 @@
100#include <linux/smp_lock.h> 100#include <linux/smp_lock.h>
101#include <linux/ac97_codec.h> 101#include <linux/ac97_codec.h>
102#include <linux/bitops.h> 102#include <linux/bitops.h>
103#include <linux/mutex.h>
104
103#include <asm/uaccess.h> 105#include <asm/uaccess.h>
104 106
105#define DRIVER_VERSION "1.01" 107#define DRIVER_VERSION "1.01"
@@ -331,7 +333,7 @@ struct i810_state {
331 struct i810_card *card; /* Card info */ 333 struct i810_card *card; /* Card info */
332 334
333 /* single open lock mechanism, only used for recording */ 335 /* single open lock mechanism, only used for recording */
334 struct semaphore open_sem; 336 struct mutex open_mutex;
335 wait_queue_head_t open_wait; 337 wait_queue_head_t open_wait;
336 338
337 /* file mode */ 339 /* file mode */
@@ -2597,7 +2599,7 @@ found_virt:
2597 state->card = card; 2599 state->card = card;
2598 state->magic = I810_STATE_MAGIC; 2600 state->magic = I810_STATE_MAGIC;
2599 init_waitqueue_head(&dmabuf->wait); 2601 init_waitqueue_head(&dmabuf->wait);
2600 init_MUTEX(&state->open_sem); 2602 mutex_init(&state->open_mutex);
2601 file->private_data = state; 2603 file->private_data = state;
2602 dmabuf->trigger = 0; 2604 dmabuf->trigger = 0;
2603 2605
@@ -3213,7 +3215,7 @@ static void __devinit i810_configure_clocking (void)
3213 state->card = card; 3215 state->card = card;
3214 state->magic = I810_STATE_MAGIC; 3216 state->magic = I810_STATE_MAGIC;
3215 init_waitqueue_head(&dmabuf->wait); 3217 init_waitqueue_head(&dmabuf->wait);
3216 init_MUTEX(&state->open_sem); 3218 mutex_init(&state->open_mutex);
3217 dmabuf->fmt = I810_FMT_STEREO | I810_FMT_16BIT; 3219 dmabuf->fmt = I810_FMT_STEREO | I810_FMT_16BIT;
3218 dmabuf->trigger = PCM_ENABLE_OUTPUT; 3220 dmabuf->trigger = PCM_ENABLE_OUTPUT;
3219 i810_set_spdif_output(state, -1, 0); 3221 i810_set_spdif_output(state, -1, 0);
diff --git a/sound/oss/ite8172.c b/sound/oss/ite8172.c
index 8fd2f9a9e668..ffcb910f5c3e 100644
--- a/sound/oss/ite8172.c
+++ b/sound/oss/ite8172.c
@@ -71,6 +71,8 @@
71#include <linux/smp_lock.h> 71#include <linux/smp_lock.h>
72#include <linux/ac97_codec.h> 72#include <linux/ac97_codec.h>
73#include <linux/interrupt.h> 73#include <linux/interrupt.h>
74#include <linux/mutex.h>
75
74#include <asm/io.h> 76#include <asm/io.h>
75#include <asm/dma.h> 77#include <asm/dma.h>
76#include <asm/uaccess.h> 78#include <asm/uaccess.h>
@@ -304,7 +306,7 @@ struct it8172_state {
304 unsigned dacrate, adcrate; 306 unsigned dacrate, adcrate;
305 307
306 spinlock_t lock; 308 spinlock_t lock;
307 struct semaphore open_sem; 309 struct mutex open_mutex;
308 mode_t open_mode; 310 mode_t open_mode;
309 wait_queue_head_t open_wait; 311 wait_queue_head_t open_wait;
310 312
@@ -1801,21 +1803,21 @@ static int it8172_open(struct inode *inode, struct file *file)
1801 } 1803 }
1802 file->private_data = s; 1804 file->private_data = s;
1803 /* wait for device to become free */ 1805 /* wait for device to become free */
1804 down(&s->open_sem); 1806 mutex_lock(&s->open_mutex);
1805 while (s->open_mode & file->f_mode) { 1807 while (s->open_mode & file->f_mode) {
1806 if (file->f_flags & O_NONBLOCK) { 1808 if (file->f_flags & O_NONBLOCK) {
1807 up(&s->open_sem); 1809 mutex_unlock(&s->open_mutex);
1808 return -EBUSY; 1810 return -EBUSY;
1809 } 1811 }
1810 add_wait_queue(&s->open_wait, &wait); 1812 add_wait_queue(&s->open_wait, &wait);
1811 __set_current_state(TASK_INTERRUPTIBLE); 1813 __set_current_state(TASK_INTERRUPTIBLE);
1812 up(&s->open_sem); 1814 mutex_unlock(&s->open_mutex);
1813 schedule(); 1815 schedule();
1814 remove_wait_queue(&s->open_wait, &wait); 1816 remove_wait_queue(&s->open_wait, &wait);
1815 set_current_state(TASK_RUNNING); 1817 set_current_state(TASK_RUNNING);
1816 if (signal_pending(current)) 1818 if (signal_pending(current))
1817 return -ERESTARTSYS; 1819 return -ERESTARTSYS;
1818 down(&s->open_sem); 1820 mutex_lock(&s->open_mutex);
1819 } 1821 }
1820 1822
1821 spin_lock_irqsave(&s->lock, flags); 1823 spin_lock_irqsave(&s->lock, flags);
@@ -1850,7 +1852,7 @@ static int it8172_open(struct inode *inode, struct file *file)
1850 spin_unlock_irqrestore(&s->lock, flags); 1852 spin_unlock_irqrestore(&s->lock, flags);
1851 1853
1852 s->open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE)); 1854 s->open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE));
1853 up(&s->open_sem); 1855 mutex_unlock(&s->open_mutex);
1854 return nonseekable_open(inode, file); 1856 return nonseekable_open(inode, file);
1855} 1857}
1856 1858
@@ -1864,7 +1866,7 @@ static int it8172_release(struct inode *inode, struct file *file)
1864 lock_kernel(); 1866 lock_kernel();
1865 if (file->f_mode & FMODE_WRITE) 1867 if (file->f_mode & FMODE_WRITE)
1866 drain_dac(s, file->f_flags & O_NONBLOCK); 1868 drain_dac(s, file->f_flags & O_NONBLOCK);
1867 down(&s->open_sem); 1869 mutex_lock(&s->open_mutex);
1868 if (file->f_mode & FMODE_WRITE) { 1870 if (file->f_mode & FMODE_WRITE) {
1869 stop_dac(s); 1871 stop_dac(s);
1870 dealloc_dmabuf(s, &s->dma_dac); 1872 dealloc_dmabuf(s, &s->dma_dac);
@@ -1874,7 +1876,7 @@ static int it8172_release(struct inode *inode, struct file *file)
1874 dealloc_dmabuf(s, &s->dma_adc); 1876 dealloc_dmabuf(s, &s->dma_adc);
1875 } 1877 }
1876 s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); 1878 s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE));
1877 up(&s->open_sem); 1879 mutex_unlock(&s->open_mutex);
1878 wake_up(&s->open_wait); 1880 wake_up(&s->open_wait);
1879 unlock_kernel(); 1881 unlock_kernel();
1880 return 0; 1882 return 0;
@@ -1997,7 +1999,7 @@ static int __devinit it8172_probe(struct pci_dev *pcidev,
1997 init_waitqueue_head(&s->dma_adc.wait); 1999 init_waitqueue_head(&s->dma_adc.wait);
1998 init_waitqueue_head(&s->dma_dac.wait); 2000 init_waitqueue_head(&s->dma_dac.wait);
1999 init_waitqueue_head(&s->open_wait); 2001 init_waitqueue_head(&s->open_wait);
2000 init_MUTEX(&s->open_sem); 2002 mutex_init(&s->open_mutex);
2001 spin_lock_init(&s->lock); 2003 spin_lock_init(&s->lock);
2002 s->dev = pcidev; 2004 s->dev = pcidev;
2003 s->io = pci_resource_start(pcidev, 0); 2005 s->io = pci_resource_start(pcidev, 0);
diff --git a/sound/oss/maestro.c b/sound/oss/maestro.c
index d4b569acf764..e647f2f86279 100644
--- a/sound/oss/maestro.c
+++ b/sound/oss/maestro.c
@@ -223,6 +223,8 @@
223#include <linux/reboot.h> 223#include <linux/reboot.h>
224#include <linux/bitops.h> 224#include <linux/bitops.h>
225#include <linux/wait.h> 225#include <linux/wait.h>
226#include <linux/mutex.h>
227
226 228
227#include <asm/current.h> 229#include <asm/current.h>
228#include <asm/dma.h> 230#include <asm/dma.h>
@@ -397,7 +399,7 @@ struct ess_state {
397 /* this locks around the oss state in the driver */ 399 /* this locks around the oss state in the driver */
398 spinlock_t lock; 400 spinlock_t lock;
399 /* only let 1 be opening at a time */ 401 /* only let 1 be opening at a time */
400 struct semaphore open_sem; 402 struct mutex open_mutex;
401 wait_queue_head_t open_wait; 403 wait_queue_head_t open_wait;
402 mode_t open_mode; 404 mode_t open_mode;
403 405
@@ -3020,26 +3022,26 @@ ess_open(struct inode *inode, struct file *file)
3020 VALIDATE_STATE(s); 3022 VALIDATE_STATE(s);
3021 file->private_data = s; 3023 file->private_data = s;
3022 /* wait for device to become free */ 3024 /* wait for device to become free */
3023 down(&s->open_sem); 3025 mutex_lock(&s->open_mutex);
3024 while (s->open_mode & file->f_mode) { 3026 while (s->open_mode & file->f_mode) {
3025 if (file->f_flags & O_NONBLOCK) { 3027 if (file->f_flags & O_NONBLOCK) {
3026 up(&s->open_sem); 3028 mutex_unlock(&s->open_mutex);
3027 return -EWOULDBLOCK; 3029 return -EWOULDBLOCK;
3028 } 3030 }
3029 up(&s->open_sem); 3031 mutex_unlock(&s->open_mutex);
3030 interruptible_sleep_on(&s->open_wait); 3032 interruptible_sleep_on(&s->open_wait);
3031 if (signal_pending(current)) 3033 if (signal_pending(current))
3032 return -ERESTARTSYS; 3034 return -ERESTARTSYS;
3033 down(&s->open_sem); 3035 mutex_lock(&s->open_mutex);
3034 } 3036 }
3035 3037
3036 /* under semaphore.. */ 3038 /* under semaphore.. */
3037 if ((s->card->dmapages==NULL) && allocate_buffers(s)) { 3039 if ((s->card->dmapages==NULL) && allocate_buffers(s)) {
3038 up(&s->open_sem); 3040 mutex_unlock(&s->open_mutex);
3039 return -ENOMEM; 3041 return -ENOMEM;
3040 } 3042 }
3041 3043
3042 /* we're covered by the open_sem */ 3044 /* we're covered by the open_mutex */
3043 if( ! s->card->dsps_open ) { 3045 if( ! s->card->dsps_open ) {
3044 maestro_power(s->card,ACPI_D0); 3046 maestro_power(s->card,ACPI_D0);
3045 start_bob(s); 3047 start_bob(s);
@@ -3076,7 +3078,7 @@ ess_open(struct inode *inode, struct file *file)
3076 set_fmt(s, fmtm, fmts); 3078 set_fmt(s, fmtm, fmts);
3077 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 3079 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
3078 3080
3079 up(&s->open_sem); 3081 mutex_unlock(&s->open_mutex);
3080 return nonseekable_open(inode, file); 3082 return nonseekable_open(inode, file);
3081} 3083}
3082 3084
@@ -3089,7 +3091,7 @@ ess_release(struct inode *inode, struct file *file)
3089 lock_kernel(); 3091 lock_kernel();
3090 if (file->f_mode & FMODE_WRITE) 3092 if (file->f_mode & FMODE_WRITE)
3091 drain_dac(s, file->f_flags & O_NONBLOCK); 3093 drain_dac(s, file->f_flags & O_NONBLOCK);
3092 down(&s->open_sem); 3094 mutex_lock(&s->open_mutex);
3093 if (file->f_mode & FMODE_WRITE) { 3095 if (file->f_mode & FMODE_WRITE) {
3094 stop_dac(s); 3096 stop_dac(s);
3095 } 3097 }
@@ -3098,7 +3100,7 @@ ess_release(struct inode *inode, struct file *file)
3098 } 3100 }
3099 3101
3100 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); 3102 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
3101 /* we're covered by the open_sem */ 3103 /* we're covered by the open_mutex */
3102 M_printk("maestro: %d dsps now alive\n",s->card->dsps_open-1); 3104 M_printk("maestro: %d dsps now alive\n",s->card->dsps_open-1);
3103 if( --s->card->dsps_open <= 0) { 3105 if( --s->card->dsps_open <= 0) {
3104 s->card->dsps_open = 0; 3106 s->card->dsps_open = 0;
@@ -3106,7 +3108,7 @@ ess_release(struct inode *inode, struct file *file)
3106 free_buffers(s); 3108 free_buffers(s);
3107 maestro_power(s->card,ACPI_D2); 3109 maestro_power(s->card,ACPI_D2);
3108 } 3110 }
3109 up(&s->open_sem); 3111 mutex_unlock(&s->open_mutex);
3110 wake_up(&s->open_wait); 3112 wake_up(&s->open_wait);
3111 unlock_kernel(); 3113 unlock_kernel();
3112 return 0; 3114 return 0;
@@ -3466,7 +3468,7 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid)
3466 init_waitqueue_head(&s->dma_dac.wait); 3468 init_waitqueue_head(&s->dma_dac.wait);
3467 init_waitqueue_head(&s->open_wait); 3469 init_waitqueue_head(&s->open_wait);
3468 spin_lock_init(&s->lock); 3470 spin_lock_init(&s->lock);
3469 init_MUTEX(&s->open_sem); 3471 mutex_init(&s->open_mutex);
3470 s->magic = ESS_STATE_MAGIC; 3472 s->magic = ESS_STATE_MAGIC;
3471 3473
3472 s->apu[0] = 6*i; 3474 s->apu[0] = 6*i;
diff --git a/sound/oss/maestro3.c b/sound/oss/maestro3.c
index f3dec70fcb9b..66044aff2586 100644
--- a/sound/oss/maestro3.c
+++ b/sound/oss/maestro3.c
@@ -144,6 +144,8 @@
144#include <linux/spinlock.h> 144#include <linux/spinlock.h>
145#include <linux/ac97_codec.h> 145#include <linux/ac97_codec.h>
146#include <linux/wait.h> 146#include <linux/wait.h>
147#include <linux/mutex.h>
148
147 149
148#include <asm/io.h> 150#include <asm/io.h>
149#include <asm/dma.h> 151#include <asm/dma.h>
@@ -205,7 +207,7 @@ struct m3_state {
205 when irqhandler uses s->lock 207 when irqhandler uses s->lock
206 and m3_assp_read uses card->lock ? 208 and m3_assp_read uses card->lock ?
207 */ 209 */
208 struct semaphore open_sem; 210 struct mutex open_mutex;
209 wait_queue_head_t open_wait; 211 wait_queue_head_t open_wait;
210 mode_t open_mode; 212 mode_t open_mode;
211 213
@@ -2013,17 +2015,17 @@ static int m3_open(struct inode *inode, struct file *file)
2013 file->private_data = s; 2015 file->private_data = s;
2014 2016
2015 /* wait for device to become free */ 2017 /* wait for device to become free */
2016 down(&s->open_sem); 2018 mutex_lock(&s->open_mutex);
2017 while (s->open_mode & file->f_mode) { 2019 while (s->open_mode & file->f_mode) {
2018 if (file->f_flags & O_NONBLOCK) { 2020 if (file->f_flags & O_NONBLOCK) {
2019 up(&s->open_sem); 2021 mutex_unlock(&s->open_mutex);
2020 return -EWOULDBLOCK; 2022 return -EWOULDBLOCK;
2021 } 2023 }
2022 up(&s->open_sem); 2024 mutex_unlock(&s->open_mutex);
2023 interruptible_sleep_on(&s->open_wait); 2025 interruptible_sleep_on(&s->open_wait);
2024 if (signal_pending(current)) 2026 if (signal_pending(current))
2025 return -ERESTARTSYS; 2027 return -ERESTARTSYS;
2026 down(&s->open_sem); 2028 mutex_lock(&s->open_mutex);
2027 } 2029 }
2028 2030
2029 spin_lock_irqsave(&c->lock, flags); 2031 spin_lock_irqsave(&c->lock, flags);
@@ -2047,7 +2049,7 @@ static int m3_open(struct inode *inode, struct file *file)
2047 set_fmt(s, fmtm, fmts); 2049 set_fmt(s, fmtm, fmts);
2048 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 2050 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2049 2051
2050 up(&s->open_sem); 2052 mutex_unlock(&s->open_mutex);
2051 spin_unlock_irqrestore(&c->lock, flags); 2053 spin_unlock_irqrestore(&c->lock, flags);
2052 return nonseekable_open(inode, file); 2054 return nonseekable_open(inode, file);
2053} 2055}
@@ -2062,7 +2064,7 @@ static int m3_release(struct inode *inode, struct file *file)
2062 if (file->f_mode & FMODE_WRITE) 2064 if (file->f_mode & FMODE_WRITE)
2063 drain_dac(s, file->f_flags & O_NONBLOCK); 2065 drain_dac(s, file->f_flags & O_NONBLOCK);
2064 2066
2065 down(&s->open_sem); 2067 mutex_lock(&s->open_mutex);
2066 spin_lock_irqsave(&card->lock, flags); 2068 spin_lock_irqsave(&card->lock, flags);
2067 2069
2068 if (file->f_mode & FMODE_WRITE) { 2070 if (file->f_mode & FMODE_WRITE) {
@@ -2083,7 +2085,7 @@ static int m3_release(struct inode *inode, struct file *file)
2083 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); 2085 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
2084 2086
2085 spin_unlock_irqrestore(&card->lock, flags); 2087 spin_unlock_irqrestore(&card->lock, flags);
2086 up(&s->open_sem); 2088 mutex_unlock(&s->open_mutex);
2087 wake_up(&s->open_wait); 2089 wake_up(&s->open_wait);
2088 2090
2089 return 0; 2091 return 0;
@@ -2679,7 +2681,7 @@ static int __devinit m3_probe(struct pci_dev *pci_dev, const struct pci_device_i
2679 init_waitqueue_head(&s->dma_adc.wait); 2681 init_waitqueue_head(&s->dma_adc.wait);
2680 init_waitqueue_head(&s->dma_dac.wait); 2682 init_waitqueue_head(&s->dma_dac.wait);
2681 init_waitqueue_head(&s->open_wait); 2683 init_waitqueue_head(&s->open_wait);
2682 init_MUTEX(&(s->open_sem)); 2684 mutex_init(&(s->open_mutex));
2683 s->magic = M3_STATE_MAGIC; 2685 s->magic = M3_STATE_MAGIC;
2684 2686
2685 m3_assp_client_init(s); 2687 m3_assp_client_init(s);
diff --git a/sound/oss/nec_vrc5477.c b/sound/oss/nec_vrc5477.c
index fbb9170e8e0a..21c1954d9108 100644
--- a/sound/oss/nec_vrc5477.c
+++ b/sound/oss/nec_vrc5477.c
@@ -78,6 +78,8 @@
78#include <linux/spinlock.h> 78#include <linux/spinlock.h>
79#include <linux/smp_lock.h> 79#include <linux/smp_lock.h>
80#include <linux/ac97_codec.h> 80#include <linux/ac97_codec.h>
81#include <linux/mutex.h>
82
81#include <asm/io.h> 83#include <asm/io.h>
82#include <asm/dma.h> 84#include <asm/dma.h>
83#include <asm/uaccess.h> 85#include <asm/uaccess.h>
@@ -198,7 +200,7 @@ struct vrc5477_ac97_state {
198 unsigned short extended_status; 200 unsigned short extended_status;
199 201
200 spinlock_t lock; 202 spinlock_t lock;
201 struct semaphore open_sem; 203 struct mutex open_mutex;
202 mode_t open_mode; 204 mode_t open_mode;
203 wait_queue_head_t open_wait; 205 wait_queue_head_t open_wait;
204 206
@@ -1617,22 +1619,22 @@ static int vrc5477_ac97_open(struct inode *inode, struct file *file)
1617 file->private_data = s; 1619 file->private_data = s;
1618 1620
1619 /* wait for device to become free */ 1621 /* wait for device to become free */
1620 down(&s->open_sem); 1622 mutex_lock(&s->open_mutex);
1621 while (s->open_mode & file->f_mode) { 1623 while (s->open_mode & file->f_mode) {
1622 1624
1623 if (file->f_flags & O_NONBLOCK) { 1625 if (file->f_flags & O_NONBLOCK) {
1624 up(&s->open_sem); 1626 mutex_unlock(&s->open_mutex);
1625 return -EBUSY; 1627 return -EBUSY;
1626 } 1628 }
1627 add_wait_queue(&s->open_wait, &wait); 1629 add_wait_queue(&s->open_wait, &wait);
1628 __set_current_state(TASK_INTERRUPTIBLE); 1630 __set_current_state(TASK_INTERRUPTIBLE);
1629 up(&s->open_sem); 1631 mutex_unlock(&s->open_mutex);
1630 schedule(); 1632 schedule();
1631 remove_wait_queue(&s->open_wait, &wait); 1633 remove_wait_queue(&s->open_wait, &wait);
1632 set_current_state(TASK_RUNNING); 1634 set_current_state(TASK_RUNNING);
1633 if (signal_pending(current)) 1635 if (signal_pending(current))
1634 return -ERESTARTSYS; 1636 return -ERESTARTSYS;
1635 down(&s->open_sem); 1637 mutex_lock(&s->open_mutex);
1636 } 1638 }
1637 1639
1638 spin_lock_irqsave(&s->lock, flags); 1640 spin_lock_irqsave(&s->lock, flags);
@@ -1659,7 +1661,7 @@ static int vrc5477_ac97_open(struct inode *inode, struct file *file)
1659 bailout: 1661 bailout:
1660 spin_unlock_irqrestore(&s->lock, flags); 1662 spin_unlock_irqrestore(&s->lock, flags);
1661 1663
1662 up(&s->open_sem); 1664 mutex_unlock(&s->open_mutex);
1663 return ret; 1665 return ret;
1664} 1666}
1665 1667
@@ -1671,7 +1673,7 @@ static int vrc5477_ac97_release(struct inode *inode, struct file *file)
1671 lock_kernel(); 1673 lock_kernel();
1672 if (file->f_mode & FMODE_WRITE) 1674 if (file->f_mode & FMODE_WRITE)
1673 drain_dac(s, file->f_flags & O_NONBLOCK); 1675 drain_dac(s, file->f_flags & O_NONBLOCK);
1674 down(&s->open_sem); 1676 mutex_lock(&s->open_mutex);
1675 if (file->f_mode & FMODE_WRITE) { 1677 if (file->f_mode & FMODE_WRITE) {
1676 stop_dac(s); 1678 stop_dac(s);
1677 dealloc_dmabuf(s, &s->dma_dac); 1679 dealloc_dmabuf(s, &s->dma_dac);
@@ -1681,7 +1683,7 @@ static int vrc5477_ac97_release(struct inode *inode, struct file *file)
1681 dealloc_dmabuf(s, &s->dma_adc); 1683 dealloc_dmabuf(s, &s->dma_adc);
1682 } 1684 }
1683 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); 1685 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
1684 up(&s->open_sem); 1686 mutex_unlock(&s->open_mutex);
1685 wake_up(&s->open_wait); 1687 wake_up(&s->open_wait);
1686 unlock_kernel(); 1688 unlock_kernel();
1687 return 0; 1689 return 0;
@@ -1867,7 +1869,7 @@ static int __devinit vrc5477_ac97_probe(struct pci_dev *pcidev,
1867 init_waitqueue_head(&s->dma_adc.wait); 1869 init_waitqueue_head(&s->dma_adc.wait);
1868 init_waitqueue_head(&s->dma_dac.wait); 1870 init_waitqueue_head(&s->dma_dac.wait);
1869 init_waitqueue_head(&s->open_wait); 1871 init_waitqueue_head(&s->open_wait);
1870 init_MUTEX(&s->open_sem); 1872 mutex_init(&s->open_mutex);
1871 spin_lock_init(&s->lock); 1873 spin_lock_init(&s->lock);
1872 1874
1873 s->dev = pcidev; 1875 s->dev = pcidev;
diff --git a/sound/oss/rme96xx.c b/sound/oss/rme96xx.c
index faa0b7919b65..a1ec9d131ab3 100644
--- a/sound/oss/rme96xx.c
+++ b/sound/oss/rme96xx.c
@@ -58,6 +58,7 @@ TODO:
58#include <linux/interrupt.h> 58#include <linux/interrupt.h>
59#include <linux/poll.h> 59#include <linux/poll.h>
60#include <linux/wait.h> 60#include <linux/wait.h>
61#include <linux/mutex.h>
61 62
62#include <asm/dma.h> 63#include <asm/dma.h>
63#include <asm/page.h> 64#include <asm/page.h>
@@ -326,7 +327,7 @@ typedef struct _rme96xx_info {
326 327
327 /* waiting and locking */ 328 /* waiting and locking */
328 wait_queue_head_t wait; 329 wait_queue_head_t wait;
329 struct semaphore open_sem; 330 struct mutex open_mutex;
330 wait_queue_head_t open_wait; 331 wait_queue_head_t open_wait;
331 332
332 } dma[RME96xx_MAX_DEVS]; 333 } dma[RME96xx_MAX_DEVS];
@@ -842,7 +843,7 @@ static void busmaster_free(void* ptr,int size) {
842 843
843static int rme96xx_dmabuf_init(rme96xx_info * s,struct dmabuf* dma,int ioffset,int ooffset) { 844static int rme96xx_dmabuf_init(rme96xx_info * s,struct dmabuf* dma,int ioffset,int ooffset) {
844 845
845 init_MUTEX(&dma->open_sem); 846 mutex_init(&dma->open_mutex);
846 init_waitqueue_head(&dma->open_wait); 847 init_waitqueue_head(&dma->open_wait);
847 init_waitqueue_head(&dma->wait); 848 init_waitqueue_head(&dma->wait);
848 dma->s = s; 849 dma->s = s;
@@ -1469,21 +1470,21 @@ static int rme96xx_open(struct inode *in, struct file *f)
1469 dma = &s->dma[devnum]; 1470 dma = &s->dma[devnum];
1470 f->private_data = dma; 1471 f->private_data = dma;
1471 /* wait for device to become free */ 1472 /* wait for device to become free */
1472 down(&dma->open_sem); 1473 mutex_lock(&dma->open_mutex);
1473 while (dma->open_mode & f->f_mode) { 1474 while (dma->open_mode & f->f_mode) {
1474 if (f->f_flags & O_NONBLOCK) { 1475 if (f->f_flags & O_NONBLOCK) {
1475 up(&dma->open_sem); 1476 mutex_unlock(&dma->open_mutex);
1476 return -EBUSY; 1477 return -EBUSY;
1477 } 1478 }
1478 add_wait_queue(&dma->open_wait, &wait); 1479 add_wait_queue(&dma->open_wait, &wait);
1479 __set_current_state(TASK_INTERRUPTIBLE); 1480 __set_current_state(TASK_INTERRUPTIBLE);
1480 up(&dma->open_sem); 1481 mutex_unlock(&dma->open_mutex);
1481 schedule(); 1482 schedule();
1482 remove_wait_queue(&dma->open_wait, &wait); 1483 remove_wait_queue(&dma->open_wait, &wait);
1483 set_current_state(TASK_RUNNING); 1484 set_current_state(TASK_RUNNING);
1484 if (signal_pending(current)) 1485 if (signal_pending(current))
1485 return -ERESTARTSYS; 1486 return -ERESTARTSYS;
1486 down(&dma->open_sem); 1487 mutex_lock(&dma->open_mutex);
1487 } 1488 }
1488 1489
1489 COMM ("hardware open") 1490 COMM ("hardware open")
@@ -1492,7 +1493,7 @@ static int rme96xx_open(struct inode *in, struct file *f)
1492 1493
1493 dma->open_mode |= (f->f_mode & (FMODE_READ | FMODE_WRITE)); 1494 dma->open_mode |= (f->f_mode & (FMODE_READ | FMODE_WRITE));
1494 dma->opened = 1; 1495 dma->opened = 1;
1495 up(&dma->open_sem); 1496 mutex_unlock(&dma->open_mutex);
1496 1497
1497 DBG(printk("device num %d open finished\n",devnum)); 1498 DBG(printk("device num %d open finished\n",devnum));
1498 return 0; 1499 return 0;
@@ -1524,7 +1525,7 @@ static int rme96xx_release(struct inode *in, struct file *file)
1524 } 1525 }
1525 1526
1526 wake_up(&dma->open_wait); 1527 wake_up(&dma->open_wait);
1527 up(&dma->open_sem); 1528 mutex_unlock(&dma->open_mutex);
1528 1529
1529 return 0; 1530 return 0;
1530} 1531}
diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c
index 71b05e2f6977..69a4b8778b51 100644
--- a/sound/oss/sonicvibes.c
+++ b/sound/oss/sonicvibes.c
@@ -116,6 +116,8 @@
116#include <linux/spinlock.h> 116#include <linux/spinlock.h>
117#include <linux/smp_lock.h> 117#include <linux/smp_lock.h>
118#include <linux/gameport.h> 118#include <linux/gameport.h>
119#include <linux/mutex.h>
120
119 121
120#include <asm/io.h> 122#include <asm/io.h>
121#include <asm/uaccess.h> 123#include <asm/uaccess.h>
@@ -328,7 +330,7 @@ struct sv_state {
328 unsigned char fmt, enable; 330 unsigned char fmt, enable;
329 331
330 spinlock_t lock; 332 spinlock_t lock;
331 struct semaphore open_sem; 333 struct mutex open_mutex;
332 mode_t open_mode; 334 mode_t open_mode;
333 wait_queue_head_t open_wait; 335 wait_queue_head_t open_wait;
334 336
@@ -1922,21 +1924,21 @@ static int sv_open(struct inode *inode, struct file *file)
1922 VALIDATE_STATE(s); 1924 VALIDATE_STATE(s);
1923 file->private_data = s; 1925 file->private_data = s;
1924 /* wait for device to become free */ 1926 /* wait for device to become free */
1925 down(&s->open_sem); 1927 mutex_lock(&s->open_mutex);
1926 while (s->open_mode & file->f_mode) { 1928 while (s->open_mode & file->f_mode) {
1927 if (file->f_flags & O_NONBLOCK) { 1929 if (file->f_flags & O_NONBLOCK) {
1928 up(&s->open_sem); 1930 mutex_unlock(&s->open_mutex);
1929 return -EBUSY; 1931 return -EBUSY;
1930 } 1932 }
1931 add_wait_queue(&s->open_wait, &wait); 1933 add_wait_queue(&s->open_wait, &wait);
1932 __set_current_state(TASK_INTERRUPTIBLE); 1934 __set_current_state(TASK_INTERRUPTIBLE);
1933 up(&s->open_sem); 1935 mutex_unlock(&s->open_mutex);
1934 schedule(); 1936 schedule();
1935 remove_wait_queue(&s->open_wait, &wait); 1937 remove_wait_queue(&s->open_wait, &wait);
1936 set_current_state(TASK_RUNNING); 1938 set_current_state(TASK_RUNNING);
1937 if (signal_pending(current)) 1939 if (signal_pending(current))
1938 return -ERESTARTSYS; 1940 return -ERESTARTSYS;
1939 down(&s->open_sem); 1941 mutex_lock(&s->open_mutex);
1940 } 1942 }
1941 if (file->f_mode & FMODE_READ) { 1943 if (file->f_mode & FMODE_READ) {
1942 fmtm &= ~((SV_CFMT_STEREO | SV_CFMT_16BIT) << SV_CFMT_CSHIFT); 1944 fmtm &= ~((SV_CFMT_STEREO | SV_CFMT_16BIT) << SV_CFMT_CSHIFT);
@@ -1956,7 +1958,7 @@ static int sv_open(struct inode *inode, struct file *file)
1956 } 1958 }
1957 set_fmt(s, fmtm, fmts); 1959 set_fmt(s, fmtm, fmts);
1958 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 1960 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1959 up(&s->open_sem); 1961 mutex_unlock(&s->open_mutex);
1960 return nonseekable_open(inode, file); 1962 return nonseekable_open(inode, file);
1961} 1963}
1962 1964
@@ -1968,7 +1970,7 @@ static int sv_release(struct inode *inode, struct file *file)
1968 lock_kernel(); 1970 lock_kernel();
1969 if (file->f_mode & FMODE_WRITE) 1971 if (file->f_mode & FMODE_WRITE)
1970 drain_dac(s, file->f_flags & O_NONBLOCK); 1972 drain_dac(s, file->f_flags & O_NONBLOCK);
1971 down(&s->open_sem); 1973 mutex_lock(&s->open_mutex);
1972 if (file->f_mode & FMODE_WRITE) { 1974 if (file->f_mode & FMODE_WRITE) {
1973 stop_dac(s); 1975 stop_dac(s);
1974 dealloc_dmabuf(s, &s->dma_dac); 1976 dealloc_dmabuf(s, &s->dma_dac);
@@ -1979,7 +1981,7 @@ static int sv_release(struct inode *inode, struct file *file)
1979 } 1981 }
1980 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); 1982 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE));
1981 wake_up(&s->open_wait); 1983 wake_up(&s->open_wait);
1982 up(&s->open_sem); 1984 mutex_unlock(&s->open_mutex);
1983 unlock_kernel(); 1985 unlock_kernel();
1984 return 0; 1986 return 0;
1985} 1987}
@@ -2167,21 +2169,21 @@ static int sv_midi_open(struct inode *inode, struct file *file)
2167 VALIDATE_STATE(s); 2169 VALIDATE_STATE(s);
2168 file->private_data = s; 2170 file->private_data = s;
2169 /* wait for device to become free */ 2171 /* wait for device to become free */
2170 down(&s->open_sem); 2172 mutex_lock(&s->open_mutex);
2171 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { 2173 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
2172 if (file->f_flags & O_NONBLOCK) { 2174 if (file->f_flags & O_NONBLOCK) {
2173 up(&s->open_sem); 2175 mutex_unlock(&s->open_mutex);
2174 return -EBUSY; 2176 return -EBUSY;
2175 } 2177 }
2176 add_wait_queue(&s->open_wait, &wait); 2178 add_wait_queue(&s->open_wait, &wait);
2177 __set_current_state(TASK_INTERRUPTIBLE); 2179 __set_current_state(TASK_INTERRUPTIBLE);
2178 up(&s->open_sem); 2180 mutex_unlock(&s->open_mutex);
2179 schedule(); 2181 schedule();
2180 remove_wait_queue(&s->open_wait, &wait); 2182 remove_wait_queue(&s->open_wait, &wait);
2181 set_current_state(TASK_RUNNING); 2183 set_current_state(TASK_RUNNING);
2182 if (signal_pending(current)) 2184 if (signal_pending(current))
2183 return -ERESTARTSYS; 2185 return -ERESTARTSYS;
2184 down(&s->open_sem); 2186 mutex_lock(&s->open_mutex);
2185 } 2187 }
2186 spin_lock_irqsave(&s->lock, flags); 2188 spin_lock_irqsave(&s->lock, flags);
2187 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 2189 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -2210,7 +2212,7 @@ static int sv_midi_open(struct inode *inode, struct file *file)
2210 } 2212 }
2211 spin_unlock_irqrestore(&s->lock, flags); 2213 spin_unlock_irqrestore(&s->lock, flags);
2212 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); 2214 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
2213 up(&s->open_sem); 2215 mutex_unlock(&s->open_mutex);
2214 return nonseekable_open(inode, file); 2216 return nonseekable_open(inode, file);
2215} 2217}
2216 2218
@@ -2248,7 +2250,7 @@ static int sv_midi_release(struct inode *inode, struct file *file)
2248 remove_wait_queue(&s->midi.owait, &wait); 2250 remove_wait_queue(&s->midi.owait, &wait);
2249 set_current_state(TASK_RUNNING); 2251 set_current_state(TASK_RUNNING);
2250 } 2252 }
2251 down(&s->open_sem); 2253 mutex_lock(&s->open_mutex);
2252 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); 2254 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE));
2253 spin_lock_irqsave(&s->lock, flags); 2255 spin_lock_irqsave(&s->lock, flags);
2254 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { 2256 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
@@ -2257,7 +2259,7 @@ static int sv_midi_release(struct inode *inode, struct file *file)
2257 } 2259 }
2258 spin_unlock_irqrestore(&s->lock, flags); 2260 spin_unlock_irqrestore(&s->lock, flags);
2259 wake_up(&s->open_wait); 2261 wake_up(&s->open_wait);
2260 up(&s->open_sem); 2262 mutex_unlock(&s->open_mutex);
2261 unlock_kernel(); 2263 unlock_kernel();
2262 return 0; 2264 return 0;
2263} 2265}
@@ -2388,21 +2390,21 @@ static int sv_dmfm_open(struct inode *inode, struct file *file)
2388 VALIDATE_STATE(s); 2390 VALIDATE_STATE(s);
2389 file->private_data = s; 2391 file->private_data = s;
2390 /* wait for device to become free */ 2392 /* wait for device to become free */
2391 down(&s->open_sem); 2393 mutex_lock(&s->open_mutex);
2392 while (s->open_mode & FMODE_DMFM) { 2394 while (s->open_mode & FMODE_DMFM) {
2393 if (file->f_flags & O_NONBLOCK) { 2395 if (file->f_flags & O_NONBLOCK) {
2394 up(&s->open_sem); 2396 mutex_unlock(&s->open_mutex);
2395 return -EBUSY; 2397 return -EBUSY;
2396 } 2398 }
2397 add_wait_queue(&s->open_wait, &wait); 2399 add_wait_queue(&s->open_wait, &wait);
2398 __set_current_state(TASK_INTERRUPTIBLE); 2400 __set_current_state(TASK_INTERRUPTIBLE);
2399 up(&s->open_sem); 2401 mutex_unlock(&s->open_mutex);
2400 schedule(); 2402 schedule();
2401 remove_wait_queue(&s->open_wait, &wait); 2403 remove_wait_queue(&s->open_wait, &wait);
2402 set_current_state(TASK_RUNNING); 2404 set_current_state(TASK_RUNNING);
2403 if (signal_pending(current)) 2405 if (signal_pending(current))
2404 return -ERESTARTSYS; 2406 return -ERESTARTSYS;
2405 down(&s->open_sem); 2407 mutex_lock(&s->open_mutex);
2406 } 2408 }
2407 /* init the stuff */ 2409 /* init the stuff */
2408 outb(1, s->iosynth); 2410 outb(1, s->iosynth);
@@ -2412,7 +2414,7 @@ static int sv_dmfm_open(struct inode *inode, struct file *file)
2412 outb(5, s->iosynth+2); 2414 outb(5, s->iosynth+2);
2413 outb(1, s->iosynth+3); /* enable OPL3 */ 2415 outb(1, s->iosynth+3); /* enable OPL3 */
2414 s->open_mode |= FMODE_DMFM; 2416 s->open_mode |= FMODE_DMFM;
2415 up(&s->open_sem); 2417 mutex_unlock(&s->open_mutex);
2416 return nonseekable_open(inode, file); 2418 return nonseekable_open(inode, file);
2417} 2419}
2418 2420
@@ -2423,7 +2425,7 @@ static int sv_dmfm_release(struct inode *inode, struct file *file)
2423 2425
2424 VALIDATE_STATE(s); 2426 VALIDATE_STATE(s);
2425 lock_kernel(); 2427 lock_kernel();
2426 down(&s->open_sem); 2428 mutex_lock(&s->open_mutex);
2427 s->open_mode &= ~FMODE_DMFM; 2429 s->open_mode &= ~FMODE_DMFM;
2428 for (regb = 0xb0; regb < 0xb9; regb++) { 2430 for (regb = 0xb0; regb < 0xb9; regb++) {
2429 outb(regb, s->iosynth); 2431 outb(regb, s->iosynth);
@@ -2432,7 +2434,7 @@ static int sv_dmfm_release(struct inode *inode, struct file *file)
2432 outb(0, s->iosynth+3); 2434 outb(0, s->iosynth+3);
2433 } 2435 }
2434 wake_up(&s->open_wait); 2436 wake_up(&s->open_wait);
2435 up(&s->open_sem); 2437 mutex_unlock(&s->open_mutex);
2436 unlock_kernel(); 2438 unlock_kernel();
2437 return 0; 2439 return 0;
2438} 2440}
@@ -2582,7 +2584,7 @@ static int __devinit sv_probe(struct pci_dev *pcidev, const struct pci_device_id
2582 init_waitqueue_head(&s->open_wait); 2584 init_waitqueue_head(&s->open_wait);
2583 init_waitqueue_head(&s->midi.iwait); 2585 init_waitqueue_head(&s->midi.iwait);
2584 init_waitqueue_head(&s->midi.owait); 2586 init_waitqueue_head(&s->midi.owait);
2585 init_MUTEX(&s->open_sem); 2587 mutex_init(&s->open_mutex);
2586 spin_lock_init(&s->lock); 2588 spin_lock_init(&s->lock);
2587 s->magic = SV_MAGIC; 2589 s->magic = SV_MAGIC;
2588 s->dev = pcidev; 2590 s->dev = pcidev;
diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
index df4d3771fa84..dce9016cbcfd 100644
--- a/sound/oss/swarm_cs4297a.c
+++ b/sound/oss/swarm_cs4297a.c
@@ -76,6 +76,7 @@
76#include <linux/init.h> 76#include <linux/init.h>
77#include <linux/poll.h> 77#include <linux/poll.h>
78#include <linux/smp_lock.h> 78#include <linux/smp_lock.h>
79#include <linux/mutex.h>
79 80
80#include <asm/byteorder.h> 81#include <asm/byteorder.h>
81#include <asm/dma.h> 82#include <asm/dma.h>
@@ -291,9 +292,9 @@ struct cs4297a_state {
291 unsigned conversion:1; // conversion from 16 to 8 bit in progress 292 unsigned conversion:1; // conversion from 16 to 8 bit in progress
292 unsigned ena; 293 unsigned ena;
293 spinlock_t lock; 294 spinlock_t lock;
294 struct semaphore open_sem; 295 struct mutex open_mutex;
295 struct semaphore open_sem_adc; 296 struct mutex open_sem_adc;
296 struct semaphore open_sem_dac; 297 struct mutex open_sem_dac;
297 mode_t open_mode; 298 mode_t open_mode;
298 wait_queue_head_t open_wait; 299 wait_queue_head_t open_wait;
299 wait_queue_head_t open_wait_adc; 300 wait_queue_head_t open_wait_adc;
@@ -2352,20 +2353,20 @@ static int cs4297a_release(struct inode *inode, struct file *file)
2352 2353
2353 if (file->f_mode & FMODE_WRITE) { 2354 if (file->f_mode & FMODE_WRITE) {
2354 drain_dac(s, file->f_flags & O_NONBLOCK); 2355 drain_dac(s, file->f_flags & O_NONBLOCK);
2355 down(&s->open_sem_dac); 2356 mutex_lock(&s->open_sem_dac);
2356 stop_dac(s); 2357 stop_dac(s);
2357 dealloc_dmabuf(s, &s->dma_dac); 2358 dealloc_dmabuf(s, &s->dma_dac);
2358 s->open_mode &= ~FMODE_WRITE; 2359 s->open_mode &= ~FMODE_WRITE;
2359 up(&s->open_sem_dac); 2360 mutex_unlock(&s->open_sem_dac);
2360 wake_up(&s->open_wait_dac); 2361 wake_up(&s->open_wait_dac);
2361 } 2362 }
2362 if (file->f_mode & FMODE_READ) { 2363 if (file->f_mode & FMODE_READ) {
2363 drain_adc(s, file->f_flags & O_NONBLOCK); 2364 drain_adc(s, file->f_flags & O_NONBLOCK);
2364 down(&s->open_sem_adc); 2365 mutex_lock(&s->open_sem_adc);
2365 stop_adc(s); 2366 stop_adc(s);
2366 dealloc_dmabuf(s, &s->dma_adc); 2367 dealloc_dmabuf(s, &s->dma_adc);
2367 s->open_mode &= ~FMODE_READ; 2368 s->open_mode &= ~FMODE_READ;
2368 up(&s->open_sem_adc); 2369 mutex_unlock(&s->open_sem_adc);
2369 wake_up(&s->open_wait_adc); 2370 wake_up(&s->open_wait_adc);
2370 } 2371 }
2371 return 0; 2372 return 0;
@@ -2413,37 +2414,37 @@ static int cs4297a_open(struct inode *inode, struct file *file)
2413 ; 2414 ;
2414 } 2415 }
2415 2416
2416 down(&s->open_sem_dac); 2417 mutex_lock(&s->open_sem_dac);
2417 while (s->open_mode & FMODE_WRITE) { 2418 while (s->open_mode & FMODE_WRITE) {
2418 if (file->f_flags & O_NONBLOCK) { 2419 if (file->f_flags & O_NONBLOCK) {
2419 up(&s->open_sem_dac); 2420 mutex_unlock(&s->open_sem_dac);
2420 return -EBUSY; 2421 return -EBUSY;
2421 } 2422 }
2422 up(&s->open_sem_dac); 2423 mutex_unlock(&s->open_sem_dac);
2423 interruptible_sleep_on(&s->open_wait_dac); 2424 interruptible_sleep_on(&s->open_wait_dac);
2424 2425
2425 if (signal_pending(current)) { 2426 if (signal_pending(current)) {
2426 printk("open - sig pending\n"); 2427 printk("open - sig pending\n");
2427 return -ERESTARTSYS; 2428 return -ERESTARTSYS;
2428 } 2429 }
2429 down(&s->open_sem_dac); 2430 mutex_lock(&s->open_sem_dac);
2430 } 2431 }
2431 } 2432 }
2432 if (file->f_mode & FMODE_READ) { 2433 if (file->f_mode & FMODE_READ) {
2433 down(&s->open_sem_adc); 2434 mutex_lock(&s->open_sem_adc);
2434 while (s->open_mode & FMODE_READ) { 2435 while (s->open_mode & FMODE_READ) {
2435 if (file->f_flags & O_NONBLOCK) { 2436 if (file->f_flags & O_NONBLOCK) {
2436 up(&s->open_sem_adc); 2437 mutex_unlock(&s->open_sem_adc);
2437 return -EBUSY; 2438 return -EBUSY;
2438 } 2439 }
2439 up(&s->open_sem_adc); 2440 mutex_unlock(&s->open_sem_adc);
2440 interruptible_sleep_on(&s->open_wait_adc); 2441 interruptible_sleep_on(&s->open_wait_adc);
2441 2442
2442 if (signal_pending(current)) { 2443 if (signal_pending(current)) {
2443 printk("open - sig pending\n"); 2444 printk("open - sig pending\n");
2444 return -ERESTARTSYS; 2445 return -ERESTARTSYS;
2445 } 2446 }
2446 down(&s->open_sem_adc); 2447 mutex_lock(&s->open_sem_adc);
2447 } 2448 }
2448 } 2449 }
2449 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 2450 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
@@ -2456,7 +2457,7 @@ static int cs4297a_open(struct inode *inode, struct file *file)
2456 s->ena &= ~FMODE_READ; 2457 s->ena &= ~FMODE_READ;
2457 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = 2458 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags =
2458 s->dma_adc.subdivision = 0; 2459 s->dma_adc.subdivision = 0;
2459 up(&s->open_sem_adc); 2460 mutex_unlock(&s->open_sem_adc);
2460 2461
2461 if (prog_dmabuf_adc(s)) { 2462 if (prog_dmabuf_adc(s)) {
2462 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR 2463 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
@@ -2474,7 +2475,7 @@ static int cs4297a_open(struct inode *inode, struct file *file)
2474 s->ena &= ~FMODE_WRITE; 2475 s->ena &= ~FMODE_WRITE;
2475 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = 2476 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags =
2476 s->dma_dac.subdivision = 0; 2477 s->dma_dac.subdivision = 0;
2477 up(&s->open_sem_dac); 2478 mutex_unlock(&s->open_sem_dac);
2478 2479
2479 if (prog_dmabuf_dac(s)) { 2480 if (prog_dmabuf_dac(s)) {
2480 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR 2481 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
@@ -2631,8 +2632,8 @@ static int __init cs4297a_init(void)
2631 init_waitqueue_head(&s->open_wait); 2632 init_waitqueue_head(&s->open_wait);
2632 init_waitqueue_head(&s->open_wait_adc); 2633 init_waitqueue_head(&s->open_wait_adc);
2633 init_waitqueue_head(&s->open_wait_dac); 2634 init_waitqueue_head(&s->open_wait_dac);
2634 init_MUTEX(&s->open_sem_adc); 2635 mutex_init(&s->open_sem_adc);
2635 init_MUTEX(&s->open_sem_dac); 2636 mutex_init(&s->open_sem_dac);
2636 spin_lock_init(&s->lock); 2637 spin_lock_init(&s->lock);
2637 2638
2638 s->irq = K_INT_SER_1; 2639 s->irq = K_INT_SER_1;
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index a21c663e7e12..e61a454a8150 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -190,7 +190,7 @@
190 * 190 *
191 * Lock order (high->low) 191 * Lock order (high->low)
192 * lock - hardware lock 192 * lock - hardware lock
193 * open_sem - guard opens 193 * open_mutex - guard opens
194 * sem - guard dmabuf, write re-entry etc 194 * sem - guard dmabuf, write re-entry etc
195 */ 195 */
196 196
@@ -216,6 +216,8 @@
216#include <linux/pm.h> 216#include <linux/pm.h>
217#include <linux/gameport.h> 217#include <linux/gameport.h>
218#include <linux/kernel.h> 218#include <linux/kernel.h>
219#include <linux/mutex.h>
220
219#include <asm/uaccess.h> 221#include <asm/uaccess.h>
220#include <asm/io.h> 222#include <asm/io.h>
221#include <asm/dma.h> 223#include <asm/dma.h>
@@ -349,7 +351,7 @@ struct trident_state {
349 unsigned chans_num; 351 unsigned chans_num;
350 unsigned long fmt_flag; 352 unsigned long fmt_flag;
351 /* Guard against mmap/write/read races */ 353 /* Guard against mmap/write/read races */
352 struct semaphore sem; 354 struct mutex sem;
353 355
354}; 356};
355 357
@@ -402,7 +404,7 @@ struct trident_card {
402 struct trident_card *next; 404 struct trident_card *next;
403 405
404 /* single open lock mechanism, only used for recording */ 406 /* single open lock mechanism, only used for recording */
405 struct semaphore open_sem; 407 struct mutex open_mutex;
406 408
407 /* The trident has a certain amount of cross channel interaction 409 /* The trident has a certain amount of cross channel interaction
408 so we use a single per card lock */ 410 so we use a single per card lock */
@@ -1881,7 +1883,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
1881 if (!access_ok(VERIFY_WRITE, buffer, count)) 1883 if (!access_ok(VERIFY_WRITE, buffer, count))
1882 return -EFAULT; 1884 return -EFAULT;
1883 1885
1884 down(&state->sem); 1886 mutex_lock(&state->sem);
1885 if (!dmabuf->ready && (ret = prog_dmabuf_record(state))) 1887 if (!dmabuf->ready && (ret = prog_dmabuf_record(state)))
1886 goto out; 1888 goto out;
1887 1889
@@ -1913,7 +1915,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
1913 goto out; 1915 goto out;
1914 } 1916 }
1915 1917
1916 up(&state->sem); 1918 mutex_unlock(&state->sem);
1917 /* No matter how much space left in the buffer, */ 1919 /* No matter how much space left in the buffer, */
1918 /* we have to wait until CSO == ESO/2 or CSO == ESO */ 1920 /* we have to wait until CSO == ESO/2 or CSO == ESO */
1919 /* when address engine interrupts */ 1921 /* when address engine interrupts */
@@ -1940,7 +1942,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
1940 ret = -ERESTARTSYS; 1942 ret = -ERESTARTSYS;
1941 goto out; 1943 goto out;
1942 } 1944 }
1943 down(&state->sem); 1945 mutex_lock(&state->sem);
1944 if (dmabuf->mapped) { 1946 if (dmabuf->mapped) {
1945 if (!ret) 1947 if (!ret)
1946 ret = -ENXIO; 1948 ret = -ENXIO;
@@ -1968,7 +1970,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
1968 start_adc(state); 1970 start_adc(state);
1969 } 1971 }
1970out: 1972out:
1971 up(&state->sem); 1973 mutex_unlock(&state->sem);
1972 return ret; 1974 return ret;
1973} 1975}
1974 1976
@@ -1996,7 +1998,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
1996 * Guard against an mmap or ioctl while writing 1998 * Guard against an mmap or ioctl while writing
1997 */ 1999 */
1998 2000
1999 down(&state->sem); 2001 mutex_lock(&state->sem);
2000 2002
2001 if (dmabuf->mapped) { 2003 if (dmabuf->mapped) {
2002 ret = -ENXIO; 2004 ret = -ENXIO;
@@ -2045,7 +2047,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2045 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2); 2047 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
2046 tmo >>= sample_shift[dmabuf->fmt]; 2048 tmo >>= sample_shift[dmabuf->fmt];
2047 unlock_set_fmt(state); 2049 unlock_set_fmt(state);
2048 up(&state->sem); 2050 mutex_unlock(&state->sem);
2049 2051
2050 /* There are two situations when sleep_on_timeout */ 2052 /* There are two situations when sleep_on_timeout */
2051 /* returns, one is when the interrupt is serviced */ 2053 /* returns, one is when the interrupt is serviced */
@@ -2073,7 +2075,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2073 ret = -ERESTARTSYS; 2075 ret = -ERESTARTSYS;
2074 goto out_nolock; 2076 goto out_nolock;
2075 } 2077 }
2076 down(&state->sem); 2078 mutex_lock(&state->sem);
2077 if (dmabuf->mapped) { 2079 if (dmabuf->mapped) {
2078 if (!ret) 2080 if (!ret)
2079 ret = -ENXIO; 2081 ret = -ENXIO;
@@ -2131,7 +2133,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2131 start_dac(state); 2133 start_dac(state);
2132 } 2134 }
2133out: 2135out:
2134 up(&state->sem); 2136 mutex_unlock(&state->sem);
2135out_nolock: 2137out_nolock:
2136 return ret; 2138 return ret;
2137} 2139}
@@ -2152,24 +2154,24 @@ trident_poll(struct file *file, struct poll_table_struct *wait)
2152 * prog_dmabuf events 2154 * prog_dmabuf events
2153 */ 2155 */
2154 2156
2155 down(&state->sem); 2157 mutex_lock(&state->sem);
2156 2158
2157 if (file->f_mode & FMODE_WRITE) { 2159 if (file->f_mode & FMODE_WRITE) {
2158 if (!dmabuf->ready && prog_dmabuf_playback(state)) { 2160 if (!dmabuf->ready && prog_dmabuf_playback(state)) {
2159 up(&state->sem); 2161 mutex_unlock(&state->sem);
2160 return 0; 2162 return 0;
2161 } 2163 }
2162 poll_wait(file, &dmabuf->wait, wait); 2164 poll_wait(file, &dmabuf->wait, wait);
2163 } 2165 }
2164 if (file->f_mode & FMODE_READ) { 2166 if (file->f_mode & FMODE_READ) {
2165 if (!dmabuf->ready && prog_dmabuf_record(state)) { 2167 if (!dmabuf->ready && prog_dmabuf_record(state)) {
2166 up(&state->sem); 2168 mutex_unlock(&state->sem);
2167 return 0; 2169 return 0;
2168 } 2170 }
2169 poll_wait(file, &dmabuf->wait, wait); 2171 poll_wait(file, &dmabuf->wait, wait);
2170 } 2172 }
2171 2173
2172 up(&state->sem); 2174 mutex_unlock(&state->sem);
2173 2175
2174 spin_lock_irqsave(&state->card->lock, flags); 2176 spin_lock_irqsave(&state->card->lock, flags);
2175 trident_update_ptr(state); 2177 trident_update_ptr(state);
@@ -2207,7 +2209,7 @@ trident_mmap(struct file *file, struct vm_area_struct *vma)
2207 * a read or write against an mmap. 2209 * a read or write against an mmap.
2208 */ 2210 */
2209 2211
2210 down(&state->sem); 2212 mutex_lock(&state->sem);
2211 2213
2212 if (vma->vm_flags & VM_WRITE) { 2214 if (vma->vm_flags & VM_WRITE) {
2213 if ((ret = prog_dmabuf_playback(state)) != 0) 2215 if ((ret = prog_dmabuf_playback(state)) != 0)
@@ -2232,7 +2234,7 @@ trident_mmap(struct file *file, struct vm_area_struct *vma)
2232 dmabuf->mapped = 1; 2234 dmabuf->mapped = 1;
2233 ret = 0; 2235 ret = 0;
2234out: 2236out:
2235 up(&state->sem); 2237 mutex_unlock(&state->sem);
2236 return ret; 2238 return ret;
2237} 2239}
2238 2240
@@ -2429,15 +2431,15 @@ trident_ioctl(struct inode *inode, struct file *file,
2429 unlock_set_fmt(state); 2431 unlock_set_fmt(state);
2430 break; 2432 break;
2431 } 2433 }
2432 down(&state->card->open_sem); 2434 mutex_lock(&state->card->open_mutex);
2433 ret = ali_allocate_other_states_resources(state, 6); 2435 ret = ali_allocate_other_states_resources(state, 6);
2434 if (ret < 0) { 2436 if (ret < 0) {
2435 up(&state->card->open_sem); 2437 mutex_unlock(&state->card->open_mutex);
2436 unlock_set_fmt(state); 2438 unlock_set_fmt(state);
2437 break; 2439 break;
2438 } 2440 }
2439 state->card->multi_channel_use_count++; 2441 state->card->multi_channel_use_count++;
2440 up(&state->card->open_sem); 2442 mutex_unlock(&state->card->open_mutex);
2441 } else 2443 } else
2442 val = 2; /*yield to 2-channels */ 2444 val = 2; /*yield to 2-channels */
2443 } else 2445 } else
@@ -2727,11 +2729,11 @@ trident_open(struct inode *inode, struct file *file)
2727 2729
2728 /* find an available virtual channel (instance of /dev/dsp) */ 2730 /* find an available virtual channel (instance of /dev/dsp) */
2729 while (card != NULL) { 2731 while (card != NULL) {
2730 down(&card->open_sem); 2732 mutex_lock(&card->open_mutex);
2731 if (file->f_mode & FMODE_READ) { 2733 if (file->f_mode & FMODE_READ) {
2732 /* Skip opens on cards that are in 6 channel mode */ 2734 /* Skip opens on cards that are in 6 channel mode */
2733 if (card->multi_channel_use_count > 0) { 2735 if (card->multi_channel_use_count > 0) {
2734 up(&card->open_sem); 2736 mutex_unlock(&card->open_mutex);
2735 card = card->next; 2737 card = card->next;
2736 continue; 2738 continue;
2737 } 2739 }
@@ -2740,16 +2742,16 @@ trident_open(struct inode *inode, struct file *file)
2740 if (card->states[i] == NULL) { 2742 if (card->states[i] == NULL) {
2741 state = card->states[i] = kmalloc(sizeof(*state), GFP_KERNEL); 2743 state = card->states[i] = kmalloc(sizeof(*state), GFP_KERNEL);
2742 if (state == NULL) { 2744 if (state == NULL) {
2743 up(&card->open_sem); 2745 mutex_unlock(&card->open_mutex);
2744 return -ENOMEM; 2746 return -ENOMEM;
2745 } 2747 }
2746 memset(state, 0, sizeof(*state)); 2748 memset(state, 0, sizeof(*state));
2747 init_MUTEX(&state->sem); 2749 mutex_init(&state->sem);
2748 dmabuf = &state->dmabuf; 2750 dmabuf = &state->dmabuf;
2749 goto found_virt; 2751 goto found_virt;
2750 } 2752 }
2751 } 2753 }
2752 up(&card->open_sem); 2754 mutex_unlock(&card->open_mutex);
2753 card = card->next; 2755 card = card->next;
2754 } 2756 }
2755 /* no more virtual channel avaiable */ 2757 /* no more virtual channel avaiable */
@@ -2816,7 +2818,7 @@ trident_open(struct inode *inode, struct file *file)
2816 } 2818 }
2817 2819
2818 state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 2820 state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2819 up(&card->open_sem); 2821 mutex_unlock(&card->open_mutex);
2820 2822
2821 pr_debug("trident: open virtual channel %d, hard channel %d\n", 2823 pr_debug("trident: open virtual channel %d, hard channel %d\n",
2822 state->virt, dmabuf->channel->num); 2824 state->virt, dmabuf->channel->num);
@@ -2845,7 +2847,7 @@ trident_release(struct inode *inode, struct file *file)
2845 state->virt, dmabuf->channel->num); 2847 state->virt, dmabuf->channel->num);
2846 2848
2847 /* stop DMA state machine and free DMA buffers/channels */ 2849 /* stop DMA state machine and free DMA buffers/channels */
2848 down(&card->open_sem); 2850 mutex_lock(&card->open_mutex);
2849 2851
2850 if (file->f_mode & FMODE_WRITE) { 2852 if (file->f_mode & FMODE_WRITE) {
2851 stop_dac(state); 2853 stop_dac(state);
@@ -2878,8 +2880,8 @@ trident_release(struct inode *inode, struct file *file)
2878 card->states[state->virt] = NULL; 2880 card->states[state->virt] = NULL;
2879 kfree(state); 2881 kfree(state);
2880 2882
2881 /* we're covered by the open_sem */ 2883 /* we're covered by the open_mutex */
2882 up(&card->open_sem); 2884 mutex_unlock(&card->open_mutex);
2883 2885
2884 return 0; 2886 return 0;
2885} 2887}
@@ -4405,7 +4407,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4405 card->banks[BANK_B].addresses = &bank_b_addrs; 4407 card->banks[BANK_B].addresses = &bank_b_addrs;
4406 card->banks[BANK_B].bitmap = 0UL; 4408 card->banks[BANK_B].bitmap = 0UL;
4407 4409
4408 init_MUTEX(&card->open_sem); 4410 mutex_init(&card->open_mutex);
4409 spin_lock_init(&card->lock); 4411 spin_lock_init(&card->lock);
4410 init_timer(&card->timer); 4412 init_timer(&card->timer);
4411 4413
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
index 83edda93f0b4..1a921ee71aba 100644
--- a/sound/oss/via82cxxx_audio.c
+++ b/sound/oss/via82cxxx_audio.c
@@ -38,7 +38,8 @@
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <asm/io.h> 39#include <asm/io.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/semaphore.h> 41#include <linux/mutex.h>
42
42#include "sound_config.h" 43#include "sound_config.h"
43#include "dev_table.h" 44#include "dev_table.h"
44#include "mpu401.h" 45#include "mpu401.h"
@@ -311,8 +312,8 @@ struct via_info {
311 312
312 int mixer_vol; /* 8233/35 volume - not yet implemented */ 313 int mixer_vol; /* 8233/35 volume - not yet implemented */
313 314
314 struct semaphore syscall_sem; 315 struct mutex syscall_mutex;
315 struct semaphore open_sem; 316 struct mutex open_mutex;
316 317
317 /* The 8233/8235 have 4 DX audio channels, two record and 318 /* The 8233/8235 have 4 DX audio channels, two record and
318 one six channel out. We bind ch_in to DX 1, ch_out to multichannel 319 one six channel out. We bind ch_in to DX 1, ch_out to multichannel
@@ -505,10 +506,10 @@ static inline int via_syscall_down (struct via_info *card, int nonblock)
505 nonblock = 0; 506 nonblock = 0;
506 507
507 if (nonblock) { 508 if (nonblock) {
508 if (down_trylock (&card->syscall_sem)) 509 if (!mutex_trylock(&card->syscall_mutex))
509 return -EAGAIN; 510 return -EAGAIN;
510 } else { 511 } else {
511 if (down_interruptible (&card->syscall_sem)) 512 if (mutex_lock_interruptible(&card->syscall_mutex))
512 return -ERESTARTSYS; 513 return -ERESTARTSYS;
513 } 514 }
514 515
@@ -1609,7 +1610,7 @@ static int via_mixer_ioctl (struct inode *inode, struct file *file, unsigned int
1609#endif 1610#endif
1610 rc = codec->mixer_ioctl(codec, cmd, arg); 1611 rc = codec->mixer_ioctl(codec, cmd, arg);
1611 1612
1612 up (&card->syscall_sem); 1613 mutex_unlock(&card->syscall_mutex);
1613 1614
1614out: 1615out:
1615 DPRINTK ("EXIT, returning %d\n", rc); 1616 DPRINTK ("EXIT, returning %d\n", rc);
@@ -2228,7 +2229,7 @@ static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma)
2228 if (wr) 2229 if (wr)
2229 card->ch_out.is_mapped = 1; 2230 card->ch_out.is_mapped = 1;
2230 2231
2231 up (&card->syscall_sem); 2232 mutex_unlock(&card->syscall_mutex);
2232 rc = 0; 2233 rc = 0;
2233 2234
2234out: 2235out:
@@ -2256,7 +2257,7 @@ handle_one_block:
2256 /* Thomas Sailer: 2257 /* Thomas Sailer:
2257 * But also to ourselves, release semaphore if we do so */ 2258 * But also to ourselves, release semaphore if we do so */
2258 if (need_resched()) { 2259 if (need_resched()) {
2259 up(&card->syscall_sem); 2260 mutex_unlock(&card->syscall_mutex);
2260 schedule (); 2261 schedule ();
2261 ret = via_syscall_down (card, nonblock); 2262 ret = via_syscall_down (card, nonblock);
2262 if (ret) 2263 if (ret)
@@ -2286,7 +2287,7 @@ handle_one_block:
2286 break; 2287 break;
2287 } 2288 }
2288 2289
2289 up(&card->syscall_sem); 2290 mutex_unlock(&card->syscall_mutex);
2290 2291
2291 DPRINTK ("Sleeping on block %d\n", n); 2292 DPRINTK ("Sleeping on block %d\n", n);
2292 schedule(); 2293 schedule();
@@ -2402,7 +2403,7 @@ static ssize_t via_dsp_read(struct file *file, char __user *buffer, size_t count
2402 rc = via_dsp_do_read (card, buffer, count, nonblock); 2403 rc = via_dsp_do_read (card, buffer, count, nonblock);
2403 2404
2404out_up: 2405out_up:
2405 up (&card->syscall_sem); 2406 mutex_unlock(&card->syscall_mutex);
2406out: 2407out:
2407 DPRINTK ("EXIT, returning %ld\n",(long) rc); 2408 DPRINTK ("EXIT, returning %ld\n",(long) rc);
2408 return rc; 2409 return rc;
@@ -2426,7 +2427,7 @@ handle_one_block:
2426 /* Thomas Sailer: 2427 /* Thomas Sailer:
2427 * But also to ourselves, release semaphore if we do so */ 2428 * But also to ourselves, release semaphore if we do so */
2428 if (need_resched()) { 2429 if (need_resched()) {
2429 up(&card->syscall_sem); 2430 mutex_unlock(&card->syscall_mutex);
2430 schedule (); 2431 schedule ();
2431 ret = via_syscall_down (card, nonblock); 2432 ret = via_syscall_down (card, nonblock);
2432 if (ret) 2433 if (ret)
@@ -2456,7 +2457,7 @@ handle_one_block:
2456 break; 2457 break;
2457 } 2458 }
2458 2459
2459 up(&card->syscall_sem); 2460 mutex_unlock(&card->syscall_mutex);
2460 2461
2461 DPRINTK ("Sleeping on page %d, tmp==%d, ir==%d\n", n, tmp, chan->is_record); 2462 DPRINTK ("Sleeping on page %d, tmp==%d, ir==%d\n", n, tmp, chan->is_record);
2462 schedule(); 2463 schedule();
@@ -2585,7 +2586,7 @@ static ssize_t via_dsp_write(struct file *file, const char __user *buffer, size_
2585 rc = via_dsp_do_write (card, buffer, count, nonblock); 2586 rc = via_dsp_do_write (card, buffer, count, nonblock);
2586 2587
2587out_up: 2588out_up:
2588 up (&card->syscall_sem); 2589 mutex_unlock(&card->syscall_mutex);
2589out: 2590out:
2590 DPRINTK ("EXIT, returning %ld\n",(long) rc); 2591 DPRINTK ("EXIT, returning %ld\n",(long) rc);
2591 return rc; 2592 return rc;
@@ -2634,7 +2635,7 @@ static unsigned int via_dsp_poll(struct file *file, struct poll_table_struct *wa
2634 * Sleeps until all playback has been flushed to the audio 2635 * Sleeps until all playback has been flushed to the audio
2635 * hardware. 2636 * hardware.
2636 * 2637 *
2637 * Locking: inside card->syscall_sem 2638 * Locking: inside card->syscall_mutex
2638 */ 2639 */
2639 2640
2640static int via_dsp_drain_playback (struct via_info *card, 2641static int via_dsp_drain_playback (struct via_info *card,
@@ -2692,7 +2693,7 @@ static int via_dsp_drain_playback (struct via_info *card,
2692 printk (KERN_ERR "sleeping but not active\n"); 2693 printk (KERN_ERR "sleeping but not active\n");
2693#endif 2694#endif
2694 2695
2695 up(&card->syscall_sem); 2696 mutex_unlock(&card->syscall_mutex);
2696 2697
2697 DPRINTK ("sleeping, nbufs=%d\n", atomic_read (&chan->n_frags)); 2698 DPRINTK ("sleeping, nbufs=%d\n", atomic_read (&chan->n_frags));
2698 schedule(); 2699 schedule();
@@ -2748,7 +2749,7 @@ out:
2748 * 2749 *
2749 * Handles SNDCTL_DSP_GETISPACE and SNDCTL_DSP_GETOSPACE. 2750 * Handles SNDCTL_DSP_GETISPACE and SNDCTL_DSP_GETOSPACE.
2750 * 2751 *
2751 * Locking: inside card->syscall_sem 2752 * Locking: inside card->syscall_mutex
2752 */ 2753 */
2753 2754
2754static int via_dsp_ioctl_space (struct via_info *card, 2755static int via_dsp_ioctl_space (struct via_info *card,
@@ -2793,7 +2794,7 @@ static int via_dsp_ioctl_space (struct via_info *card,
2793 * 2794 *
2794 * Handles SNDCTL_DSP_GETIPTR and SNDCTL_DSP_GETOPTR. 2795 * Handles SNDCTL_DSP_GETIPTR and SNDCTL_DSP_GETOPTR.
2795 * 2796 *
2796 * Locking: inside card->syscall_sem 2797 * Locking: inside card->syscall_mutex
2797 */ 2798 */
2798 2799
2799static int via_dsp_ioctl_ptr (struct via_info *card, 2800static int via_dsp_ioctl_ptr (struct via_info *card,
@@ -3221,7 +3222,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file,
3221 break; 3222 break;
3222 } 3223 }
3223 3224
3224 up (&card->syscall_sem); 3225 mutex_unlock(&card->syscall_mutex);
3225 DPRINTK ("EXIT, returning %d\n", rc); 3226 DPRINTK ("EXIT, returning %d\n", rc);
3226 return rc; 3227 return rc;
3227} 3228}
@@ -3264,12 +3265,12 @@ static int via_dsp_open (struct inode *inode, struct file *file)
3264 3265
3265match: 3266match:
3266 if (nonblock) { 3267 if (nonblock) {
3267 if (down_trylock (&card->open_sem)) { 3268 if (!mutex_trylock(&card->open_mutex)) {
3268 DPRINTK ("EXIT, returning -EAGAIN\n"); 3269 DPRINTK ("EXIT, returning -EAGAIN\n");
3269 return -EAGAIN; 3270 return -EAGAIN;
3270 } 3271 }
3271 } else { 3272 } else {
3272 if (down_interruptible (&card->open_sem)) { 3273 if (mutex_lock_interruptible(&card->open_mutex)) {
3273 DPRINTK ("EXIT, returning -ERESTARTSYS\n"); 3274 DPRINTK ("EXIT, returning -ERESTARTSYS\n");
3274 return -ERESTARTSYS; 3275 return -ERESTARTSYS;
3275 } 3276 }
@@ -3355,8 +3356,8 @@ static int via_dsp_release(struct inode *inode, struct file *file)
3355 via_chan_buffer_free (card, &card->ch_in); 3356 via_chan_buffer_free (card, &card->ch_in);
3356 } 3357 }
3357 3358
3358 up (&card->syscall_sem); 3359 mutex_unlock(&card->syscall_mutex);
3359 up (&card->open_sem); 3360 mutex_unlock(&card->open_mutex);
3360 3361
3361 DPRINTK ("EXIT, returning 0\n"); 3362 DPRINTK ("EXIT, returning 0\n");
3362 return 0; 3363 return 0;
@@ -3414,8 +3415,8 @@ static int __devinit via_init_one (struct pci_dev *pdev, const struct pci_device
3414 card->card_num = via_num_cards++; 3415 card->card_num = via_num_cards++;
3415 spin_lock_init (&card->lock); 3416 spin_lock_init (&card->lock);
3416 spin_lock_init (&card->ac97_lock); 3417 spin_lock_init (&card->ac97_lock);
3417 init_MUTEX (&card->syscall_sem); 3418 mutex_init(&card->syscall_mutex);
3418 init_MUTEX (&card->open_sem); 3419 mutex_init(&card->open_mutex);
3419 3420
3420 /* we must init these now, in case the intr handler needs them */ 3421 /* we must init these now, in case the intr handler needs them */
3421 via_chan_init_defaults (card, &card->ch_out); 3422 via_chan_init_defaults (card, &card->ch_out);
diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c
index 265423054caf..b372e88e857f 100644
--- a/sound/oss/vwsnd.c
+++ b/sound/oss/vwsnd.c
@@ -94,7 +94,7 @@
94 * Open will block until the previous client has closed the 94 * Open will block until the previous client has closed the
95 * device, unless O_NONBLOCK is specified. 95 * device, unless O_NONBLOCK is specified.
96 * 96 *
97 * The semaphore devc->io_sema serializes PCM I/O syscalls. This 97 * The semaphore devc->io_mutex serializes PCM I/O syscalls. This
98 * is unnecessary in Linux 2.2, because the kernel lock 98 * is unnecessary in Linux 2.2, because the kernel lock
99 * serializes read, write, and ioctl globally, but it's there, 99 * serializes read, write, and ioctl globally, but it's there,
100 * ready for the brave, new post-kernel-lock world. 100 * ready for the brave, new post-kernel-lock world.
@@ -105,7 +105,7 @@
105 * area it owns and update its pointers. See pcm_output() and 105 * area it owns and update its pointers. See pcm_output() and
106 * pcm_input() for most of the gory stuff. 106 * pcm_input() for most of the gory stuff.
107 * 107 *
108 * devc->mix_sema serializes all mixer ioctls. This is also 108 * devc->mix_mutex serializes all mixer ioctls. This is also
109 * redundant because of the kernel lock. 109 * redundant because of the kernel lock.
110 * 110 *
111 * The lowest level lock is lith->lithium_lock. It is a 111 * The lowest level lock is lith->lithium_lock. It is a
@@ -148,7 +148,8 @@
148#include <linux/smp_lock.h> 148#include <linux/smp_lock.h>
149#include <linux/wait.h> 149#include <linux/wait.h>
150#include <linux/interrupt.h> 150#include <linux/interrupt.h>
151#include <asm/semaphore.h> 151#include <linux/mutex.h>
152
152#include <asm/mach-visws/cobalt.h> 153#include <asm/mach-visws/cobalt.h>
153 154
154#include "sound_config.h" 155#include "sound_config.h"
@@ -1447,11 +1448,11 @@ typedef enum vwsnd_port_flags {
1447 * 1448 *
1448 * port->lock protects: hwstate, flags, swb_[iu]_avail. 1449 * port->lock protects: hwstate, flags, swb_[iu]_avail.
1449 * 1450 *
1450 * devc->io_sema protects: swstate, sw_*, swb_[iu]_idx. 1451 * devc->io_mutex protects: swstate, sw_*, swb_[iu]_idx.
1451 * 1452 *
1452 * everything else is only written by open/release or 1453 * everything else is only written by open/release or
1453 * pcm_{setup,shutdown}(), which are serialized by a 1454 * pcm_{setup,shutdown}(), which are serialized by a
1454 * combination of devc->open_sema and devc->io_sema. 1455 * combination of devc->open_mutex and devc->io_mutex.
1455 */ 1456 */
1456 1457
1457typedef struct vwsnd_port { 1458typedef struct vwsnd_port {
@@ -1507,9 +1508,9 @@ typedef struct vwsnd_dev {
1507 int audio_minor; /* minor number of audio device */ 1508 int audio_minor; /* minor number of audio device */
1508 int mixer_minor; /* minor number of mixer device */ 1509 int mixer_minor; /* minor number of mixer device */
1509 1510
1510 struct semaphore open_sema; 1511 struct mutex open_mutex;
1511 struct semaphore io_sema; 1512 struct mutex io_mutex;
1512 struct semaphore mix_sema; 1513 struct mutex mix_mutex;
1513 mode_t open_mode; 1514 mode_t open_mode;
1514 wait_queue_head_t open_wait; 1515 wait_queue_head_t open_wait;
1515 1516
@@ -1633,7 +1634,7 @@ static __inline__ unsigned int swb_inc_i(vwsnd_port_t *port, int inc)
1633 * mode-setting ioctls have been done, but before the first I/O is 1634 * mode-setting ioctls have been done, but before the first I/O is
1634 * done. 1635 * done.
1635 * 1636 *
1636 * Locking: called with devc->io_sema held. 1637 * Locking: called with devc->io_mutex held.
1637 * 1638 *
1638 * Returns 0 on success, -errno on failure. 1639 * Returns 0 on success, -errno on failure.
1639 */ 1640 */
@@ -2319,9 +2320,9 @@ static ssize_t vwsnd_audio_read(struct file *file,
2319 vwsnd_dev_t *devc = file->private_data; 2320 vwsnd_dev_t *devc = file->private_data;
2320 ssize_t ret; 2321 ssize_t ret;
2321 2322
2322 down(&devc->io_sema); 2323 mutex_lock(&devc->io_mutex);
2323 ret = vwsnd_audio_do_read(file, buffer, count, ppos); 2324 ret = vwsnd_audio_do_read(file, buffer, count, ppos);
2324 up(&devc->io_sema); 2325 mutex_unlock(&devc->io_mutex);
2325 return ret; 2326 return ret;
2326} 2327}
2327 2328
@@ -2394,9 +2395,9 @@ static ssize_t vwsnd_audio_write(struct file *file,
2394 vwsnd_dev_t *devc = file->private_data; 2395 vwsnd_dev_t *devc = file->private_data;
2395 ssize_t ret; 2396 ssize_t ret;
2396 2397
2397 down(&devc->io_sema); 2398 mutex_lock(&devc->io_mutex);
2398 ret = vwsnd_audio_do_write(file, buffer, count, ppos); 2399 ret = vwsnd_audio_do_write(file, buffer, count, ppos);
2399 up(&devc->io_sema); 2400 mutex_unlock(&devc->io_mutex);
2400 return ret; 2401 return ret;
2401} 2402}
2402 2403
@@ -2891,9 +2892,9 @@ static int vwsnd_audio_ioctl(struct inode *inode,
2891 vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; 2892 vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data;
2892 int ret; 2893 int ret;
2893 2894
2894 down(&devc->io_sema); 2895 mutex_lock(&devc->io_mutex);
2895 ret = vwsnd_audio_do_ioctl(inode, file, cmd, arg); 2896 ret = vwsnd_audio_do_ioctl(inode, file, cmd, arg);
2896 up(&devc->io_sema); 2897 mutex_unlock(&devc->io_mutex);
2897 return ret; 2898 return ret;
2898} 2899}
2899 2900
@@ -2929,9 +2930,9 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file)
2929 return -ENODEV; 2930 return -ENODEV;
2930 } 2931 }
2931 2932
2932 down(&devc->open_sema); 2933 mutex_lock(&devc->open_mutex);
2933 while (devc->open_mode & file->f_mode) { 2934 while (devc->open_mode & file->f_mode) {
2934 up(&devc->open_sema); 2935 mutex_unlock(&devc->open_mutex);
2935 if (file->f_flags & O_NONBLOCK) { 2936 if (file->f_flags & O_NONBLOCK) {
2936 DEC_USE_COUNT; 2937 DEC_USE_COUNT;
2937 return -EBUSY; 2938 return -EBUSY;
@@ -2941,10 +2942,10 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file)
2941 DEC_USE_COUNT; 2942 DEC_USE_COUNT;
2942 return -ERESTARTSYS; 2943 return -ERESTARTSYS;
2943 } 2944 }
2944 down(&devc->open_sema); 2945 mutex_lock(&devc->open_mutex);
2945 } 2946 }
2946 devc->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); 2947 devc->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2947 up(&devc->open_sema); 2948 mutex_unlock(&devc->open_mutex);
2948 2949
2949 /* get default sample format from minor number. */ 2950 /* get default sample format from minor number. */
2950 2951
@@ -2960,7 +2961,7 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file)
2960 2961
2961 /* Initialize vwsnd_ports. */ 2962 /* Initialize vwsnd_ports. */
2962 2963
2963 down(&devc->io_sema); 2964 mutex_lock(&devc->io_mutex);
2964 { 2965 {
2965 if (file->f_mode & FMODE_READ) { 2966 if (file->f_mode & FMODE_READ) {
2966 devc->rport.swstate = SW_INITIAL; 2967 devc->rport.swstate = SW_INITIAL;
@@ -2987,7 +2988,7 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file)
2987 devc->wport.frag_count = 0; 2988 devc->wport.frag_count = 0;
2988 } 2989 }
2989 } 2990 }
2990 up(&devc->io_sema); 2991 mutex_unlock(&devc->io_mutex);
2991 2992
2992 file->private_data = devc; 2993 file->private_data = devc;
2993 DBGRV(); 2994 DBGRV();
@@ -3005,7 +3006,7 @@ static int vwsnd_audio_release(struct inode *inode, struct file *file)
3005 int err = 0; 3006 int err = 0;
3006 3007
3007 lock_kernel(); 3008 lock_kernel();
3008 down(&devc->io_sema); 3009 mutex_lock(&devc->io_mutex);
3009 { 3010 {
3010 DBGEV("(inode=0x%p, file=0x%p)\n", inode, file); 3011 DBGEV("(inode=0x%p, file=0x%p)\n", inode, file);
3011 3012
@@ -3022,13 +3023,13 @@ static int vwsnd_audio_release(struct inode *inode, struct file *file)
3022 if (wport) 3023 if (wport)
3023 wport->swstate = SW_OFF; 3024 wport->swstate = SW_OFF;
3024 } 3025 }
3025 up(&devc->io_sema); 3026 mutex_unlock(&devc->io_mutex);
3026 3027
3027 down(&devc->open_sema); 3028 mutex_lock(&devc->open_mutex);
3028 { 3029 {
3029 devc->open_mode &= ~file->f_mode; 3030 devc->open_mode &= ~file->f_mode;
3030 } 3031 }
3031 up(&devc->open_sema); 3032 mutex_unlock(&devc->open_mutex);
3032 wake_up(&devc->open_wait); 3033 wake_up(&devc->open_wait);
3033 DEC_USE_COUNT; 3034 DEC_USE_COUNT;
3034 DBGR(); 3035 DBGR();
@@ -3213,7 +3214,7 @@ static int vwsnd_mixer_ioctl(struct inode *ioctl,
3213 3214
3214 DBGEV("(devc=0x%p, cmd=0x%x, arg=0x%lx)\n", devc, cmd, arg); 3215 DBGEV("(devc=0x%p, cmd=0x%x, arg=0x%lx)\n", devc, cmd, arg);
3215 3216
3216 down(&devc->mix_sema); 3217 mutex_lock(&devc->mix_mutex);
3217 { 3218 {
3218 if ((cmd & ~nrmask) == MIXER_READ(0)) 3219 if ((cmd & ~nrmask) == MIXER_READ(0))
3219 retval = mixer_read_ioctl(devc, nr, (void __user *) arg); 3220 retval = mixer_read_ioctl(devc, nr, (void __user *) arg);
@@ -3222,7 +3223,7 @@ static int vwsnd_mixer_ioctl(struct inode *ioctl,
3222 else 3223 else
3223 retval = -EINVAL; 3224 retval = -EINVAL;
3224 } 3225 }
3225 up(&devc->mix_sema); 3226 mutex_unlock(&devc->mix_mutex);
3226 return retval; 3227 return retval;
3227} 3228}
3228 3229
@@ -3376,9 +3377,9 @@ static int __init attach_vwsnd(struct address_info *hw_config)
3376 3377
3377 /* Initialize as much of *devc as possible */ 3378 /* Initialize as much of *devc as possible */
3378 3379
3379 init_MUTEX(&devc->open_sema); 3380 mutex_init(&devc->open_mutex);
3380 init_MUTEX(&devc->io_sema); 3381 mutex_init(&devc->io_mutex);
3381 init_MUTEX(&devc->mix_sema); 3382 mutex_init(&devc->mix_mutex);
3382 devc->open_mode = 0; 3383 devc->open_mode = 0;
3383 spin_lock_init(&devc->rport.lock); 3384 spin_lock_init(&devc->rport.lock);
3384 init_waitqueue_head(&devc->rport.queue); 3385 init_waitqueue_head(&devc->rport.queue);
diff --git a/sound/oss/ymfpci.c b/sound/oss/ymfpci.c
index f8bd72e46f57..bf90c124a7e6 100644
--- a/sound/oss/ymfpci.c
+++ b/sound/oss/ymfpci.c
@@ -1918,10 +1918,10 @@ static int ymf_open(struct inode *inode, struct file *file)
1918 if (unit == NULL) 1918 if (unit == NULL)
1919 return -ENODEV; 1919 return -ENODEV;
1920 1920
1921 down(&unit->open_sem); 1921 mutex_lock(&unit->open_mutex);
1922 1922
1923 if ((state = ymf_state_alloc(unit)) == NULL) { 1923 if ((state = ymf_state_alloc(unit)) == NULL) {
1924 up(&unit->open_sem); 1924 mutex_unlock(&unit->open_mutex);
1925 return -ENOMEM; 1925 return -ENOMEM;
1926 } 1926 }
1927 list_add_tail(&state->chain, &unit->states); 1927 list_add_tail(&state->chain, &unit->states);
@@ -1956,7 +1956,7 @@ static int ymf_open(struct inode *inode, struct file *file)
1956 ymfpci_writeb(unit, YDSXGR_TIMERCTRL, 1956 ymfpci_writeb(unit, YDSXGR_TIMERCTRL,
1957 (YDSXGR_TIMERCTRL_TEN|YDSXGR_TIMERCTRL_TIEN)); 1957 (YDSXGR_TIMERCTRL_TEN|YDSXGR_TIMERCTRL_TIEN));
1958#endif 1958#endif
1959 up(&unit->open_sem); 1959 mutex_unlock(&unit->open_mutex);
1960 1960
1961 return nonseekable_open(inode, file); 1961 return nonseekable_open(inode, file);
1962 1962
@@ -1974,7 +1974,7 @@ out_nodma:
1974 list_del(&state->chain); 1974 list_del(&state->chain);
1975 kfree(state); 1975 kfree(state);
1976 1976
1977 up(&unit->open_sem); 1977 mutex_unlock(&unit->open_mutex);
1978 return err; 1978 return err;
1979} 1979}
1980 1980
@@ -1987,7 +1987,7 @@ static int ymf_release(struct inode *inode, struct file *file)
1987 ymfpci_writeb(unit, YDSXGR_TIMERCTRL, 0); 1987 ymfpci_writeb(unit, YDSXGR_TIMERCTRL, 0);
1988#endif 1988#endif
1989 1989
1990 down(&unit->open_sem); 1990 mutex_lock(&unit->open_mutex);
1991 1991
1992 /* 1992 /*
1993 * XXX Solve the case of O_NONBLOCK close - don't deallocate here. 1993 * XXX Solve the case of O_NONBLOCK close - don't deallocate here.
@@ -2004,7 +2004,7 @@ static int ymf_release(struct inode *inode, struct file *file)
2004 file->private_data = NULL; /* Can you tell I programmed Solaris */ 2004 file->private_data = NULL; /* Can you tell I programmed Solaris */
2005 kfree(state); 2005 kfree(state);
2006 2006
2007 up(&unit->open_sem); 2007 mutex_unlock(&unit->open_mutex);
2008 2008
2009 return 0; 2009 return 0;
2010} 2010}
@@ -2532,7 +2532,7 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi
2532 spin_lock_init(&codec->reg_lock); 2532 spin_lock_init(&codec->reg_lock);
2533 spin_lock_init(&codec->voice_lock); 2533 spin_lock_init(&codec->voice_lock);
2534 spin_lock_init(&codec->ac97_lock); 2534 spin_lock_init(&codec->ac97_lock);
2535 init_MUTEX(&codec->open_sem); 2535 mutex_init(&codec->open_mutex);
2536 INIT_LIST_HEAD(&codec->states); 2536 INIT_LIST_HEAD(&codec->states);
2537 codec->pci = pcidev; 2537 codec->pci = pcidev;
2538 2538
diff --git a/sound/oss/ymfpci.h b/sound/oss/ymfpci.h
index f810a100c641..ac1785f2b7e7 100644
--- a/sound/oss/ymfpci.h
+++ b/sound/oss/ymfpci.h
@@ -22,6 +22,7 @@
22 * 22 *
23 */ 23 */
24#include <linux/config.h> 24#include <linux/config.h>
25#include <linux/mutex.h>
25 26
26/* 27/*
27 * Direct registers 28 * Direct registers
@@ -279,7 +280,7 @@ struct ymf_unit {
279 280
280 /* soundcore stuff */ 281 /* soundcore stuff */
281 int dev_audio; 282 int dev_audio;
282 struct semaphore open_sem; 283 struct mutex open_mutex;
283 284
284 struct list_head ymf_devs; 285 struct list_head ymf_devs;
285 struct list_head states; /* List of states for this unit */ 286 struct list_head states; /* List of states for this unit */