aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS6
-rw-r--r--Documentation/DocBook/kernel-api.tmpl44
-rw-r--r--Documentation/RCU/checklist.txt44
-rw-r--r--Documentation/RCU/whatisRCU.txt12
-rw-r--r--Documentation/devices.txt7
-rw-r--r--Documentation/filesystems/fuse.txt118
-rw-r--r--Documentation/filesystems/ramfs-rootfs-initramfs.txt146
-rw-r--r--Documentation/kdump/kdump.txt420
-rw-r--r--Documentation/memory-barriers.txt34
-rw-r--r--Documentation/rtc.txt7
-rw-r--r--Documentation/sysrq.txt5
-rw-r--r--arch/arm/kernel/bios32.c1
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-power.c3
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-power.c3
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/i386/kernel/crash.c3
-rw-r--r--arch/i386/kernel/doublefault.c3
-rw-r--r--arch/i386/kernel/setup.c19
-rw-r--r--arch/i386/kernel/smpboot.c14
-rw-r--r--arch/i386/lib/usercopy.c119
-rw-r--r--arch/i386/mach-default/setup.c43
-rw-r--r--arch/i386/mach-visws/setup.c49
-rw-r--r--arch/i386/mach-voyager/setup.c74
-rw-r--r--arch/i386/pci/i386.c2
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/pci/pci.c3
-rw-r--r--arch/m68k/amiga/amiga_ksyms.c2
-rw-r--r--arch/m68k/amiga/amiints.c384
-rw-r--r--arch/m68k/amiga/cia.c156
-rw-r--r--arch/m68k/amiga/config.c15
-rw-r--r--arch/m68k/apollo/Makefile2
-rw-r--r--arch/m68k/apollo/config.c24
-rw-r--r--arch/m68k/apollo/dn_ints.c137
-rw-r--r--arch/m68k/atari/ataints.c278
-rw-r--r--arch/m68k/atari/config.c11
-rw-r--r--arch/m68k/bvme6000/Makefile2
-rw-r--r--arch/m68k/bvme6000/bvmeints.c160
-rw-r--r--arch/m68k/bvme6000/config.c21
-rw-r--r--arch/m68k/hp300/Makefile2
-rw-r--r--arch/m68k/hp300/config.c11
-rw-r--r--arch/m68k/hp300/ints.c175
-rw-r--r--arch/m68k/hp300/ints.h9
-rw-r--r--arch/m68k/hp300/time.c3
-rw-r--r--arch/m68k/kernel/Makefile4
-rw-r--r--arch/m68k/kernel/dma.c129
-rw-r--r--arch/m68k/kernel/entry.S100
-rw-r--r--arch/m68k/kernel/ints.c378
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c2
-rw-r--r--arch/m68k/kernel/setup.c3
-rw-r--r--arch/m68k/kernel/traps.c180
-rw-r--r--arch/m68k/mac/baboon.c2
-rw-r--r--arch/m68k/mac/config.c20
-rw-r--r--arch/m68k/mac/iop.c2
-rw-r--r--arch/m68k/mac/macints.c503
-rw-r--r--arch/m68k/mac/oss.c14
-rw-r--r--arch/m68k/mac/psc.c10
-rw-r--r--arch/m68k/mac/via.c18
-rw-r--r--arch/m68k/mm/kmap.c6
-rw-r--r--arch/m68k/mvme147/147ints.c145
-rw-r--r--arch/m68k/mvme147/Makefile2
-rw-r--r--arch/m68k/mvme147/config.c22
-rw-r--r--arch/m68k/mvme16x/16xints.c149
-rw-r--r--arch/m68k/mvme16x/Makefile2
-rw-r--r--arch/m68k/mvme16x/config.c23
-rw-r--r--arch/m68k/q40/config.c13
-rw-r--r--arch/m68k/q40/q40ints.c481
-rw-r--r--arch/m68k/sun3/config.c8
-rw-r--r--arch/m68k/sun3/sun3ints.c208
-rw-r--r--arch/m68k/sun3x/config.c7
-rw-r--r--arch/mips/kernel/irixsig.c3
-rw-r--r--arch/mips/kernel/sysirix.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c1
-rw-r--r--arch/powerpc/kernel/pci_64.c1
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c2
-rw-r--r--arch/powerpc/kernel/traps.c15
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c4
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c270
-rw-r--r--arch/powerpc/xmon/xmon.c3
-rw-r--r--arch/ppc/kernel/pci.c1
-rw-r--r--arch/s390/kernel/setup.c15
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/um/include/sysdep-x86_64/syscalls.h2
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/xtensa/kernel/pci.c12
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/cciss.c3216
-rw-r--r--drivers/block/cciss.h2
-rw-r--r--drivers/block/cpqarray.c9
-rw-r--r--drivers/block/loop.c24
-rw-r--r--drivers/block/nbd.c34
-rw-r--r--drivers/cdrom/mcdx.c2
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/applicom.c40
-rw-r--r--drivers/char/cyclades.c7
-rw-r--r--drivers/char/esp.c4
-rw-r--r--drivers/char/ip2/ip2main.c12
-rw-r--r--drivers/char/isicom.c4
-rw-r--r--drivers/char/keyboard.c1
-rw-r--r--drivers/char/mmtimer.c1
-rw-r--r--drivers/char/mxser.c836
-rw-r--r--drivers/char/n_r3964.c3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c4
-rw-r--r--drivers/char/rocket.c62
-rw-r--r--drivers/char/specialix.c4
-rw-r--r--drivers/char/synclink_gt.c119
-rw-r--r--drivers/char/synclinkmp.c4
-rw-r--r--drivers/firmware/Kconfig3
-rw-r--r--drivers/firmware/dmi_scan.c13
-rw-r--r--drivers/ide/ide-cd.c120
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c57
-rw-r--r--drivers/ide/ide-tape.c49
-rw-r--r--drivers/ieee1394/Kconfig2
-rw-r--r--drivers/ieee1394/nodemgr.c5
-rw-r--r--drivers/input/touchscreen/ads7846.c3
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c6
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/ledtrig-heartbeat.c118
-rw-r--r--drivers/macintosh/Kconfig19
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/adbhid.c28
-rw-r--r--drivers/macintosh/via-pmu-backlight.c150
-rw-r--r--drivers/macintosh/via-pmu.c120
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/wan/sdla.c4
-rw-r--r--drivers/oprofile/buffer_sync.c8
-rw-r--r--drivers/oprofile/event_buffer.c12
-rw-r--r--drivers/oprofile/event_buffer.h4
-rw-r--r--drivers/oprofile/oprof.c26
-rw-r--r--drivers/parport/Kconfig12
-rw-r--r--drivers/parport/Makefile3
-rw-r--r--drivers/parport/daisy.c2
-rw-r--r--drivers/parport/parport_ax88796.c443
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/pnp/card.c48
-rw-r--r--drivers/rtc/Kconfig91
-rw-r--r--drivers/rtc/Makefile8
-rw-r--r--drivers/rtc/class.c1
-rw-r--r--drivers/rtc/interface.c22
-rw-r--r--drivers/rtc/rtc-at91.c407
-rw-r--r--drivers/rtc/rtc-dev.c131
-rw-r--r--drivers/rtc/rtc-ds1307.c388
-rw-r--r--drivers/rtc/rtc-ds1553.c414
-rw-r--r--drivers/rtc/rtc-ds1742.c259
-rw-r--r--drivers/rtc/rtc-lib.c19
-rw-r--r--drivers/rtc/rtc-max6902.c286
-rw-r--r--drivers/rtc/rtc-pcf8583.c394
-rw-r--r--drivers/rtc/rtc-pl031.c233
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-v3020.c264
-rw-r--r--drivers/rtc/rtc-vr41xx.c8
-rw-r--r--drivers/s390/char/sclp_quiesce.c3
-rw-r--r--drivers/sbus/char/flash.c1
-rw-r--r--drivers/sbus/char/vfc_dev.c2
-rw-r--r--drivers/scsi/advansys.c6
-rw-r--r--drivers/scsi/mac_esp.c7
-rw-r--r--drivers/scsi/mac_scsi.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/sun3x_esp.c8
-rw-r--r--drivers/scsi/wd7000.c2
-rw-r--r--drivers/video/Kconfig56
-rw-r--r--drivers/video/aty/Makefile1
-rw-r--r--drivers/video/aty/aty128fb.c322
-rw-r--r--drivers/video/aty/atyfb.h1
-rw-r--r--drivers/video/aty/atyfb_base.c178
-rw-r--r--drivers/video/aty/radeon_backlight.c247
-rw-r--r--drivers/video/aty/radeon_base.c140
-rw-r--r--drivers/video/aty/radeonfb.h9
-rw-r--r--drivers/video/chipsfb.c30
-rw-r--r--drivers/video/fbsysfs.c88
-rw-r--r--drivers/video/igafb.c3
-rw-r--r--drivers/video/nvidia/Makefile3
-rw-r--r--drivers/video/nvidia/nv_backlight.c175
-rw-r--r--drivers/video/nvidia/nv_proto.h10
-rw-r--r--drivers/video/nvidia/nvidia.c95
-rw-r--r--drivers/video/riva/fbdev.c222
-rw-r--r--drivers/video/tridentfb.c6
-rw-r--r--fs/9p/vfs_inode.c12
-rw-r--r--fs/Kconfig13
-rw-r--r--fs/affs/super.c12
-rw-r--r--fs/autofs4/expire.c6
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/eventpoll.c17
-rw-r--r--fs/ext2/Makefile2
-rw-r--r--fs/ext2/balloc.c22
-rw-r--r--fs/ext2/bitmap.c32
-rw-r--r--fs/ext2/dir.c3
-rw-r--r--fs/ext2/fsync.c2
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext2/super.c3
-rw-r--r--fs/ext3/balloc.c242
-rw-r--r--fs/ext3/ialloc.c10
-rw-r--r--fs/ext3/inode.c57
-rw-r--r--fs/ext3/ioctl.c2
-rw-r--r--fs/ext3/namei.c4
-rw-r--r--fs/ext3/resize.c81
-rw-r--r--fs/ext3/super.c46
-rw-r--r--fs/ext3/xattr.c27
-rw-r--r--fs/freevxfs/vxfs.h4
-rw-r--r--fs/freevxfs/vxfs_fshead.c12
-rw-r--r--fs/fuse/Makefile2
-rw-r--r--fs/fuse/control.c218
-rw-r--r--fs/fuse/dev.c418
-rw-r--r--fs/fuse/dir.c56
-rw-r--r--fs/fuse/file.c206
-rw-r--r--fs/fuse/fuse_i.h135
-rw-r--r--fs/fuse/inode.c178
-rw-r--r--fs/jbd/recovery.c1
-rw-r--r--fs/namei.c6
-rw-r--r--fs/ntfs/file.c26
-rw-r--r--fs/open.c2
-rw-r--r--fs/openpromfs/inode.c44
-rw-r--r--fs/select.c7
-rw-r--r--fs/smbfs/smbiod.c26
-rw-r--r--fs/ufs/balloc.c448
-rw-r--r--fs/ufs/cylinder.c49
-rw-r--r--fs/ufs/dir.c1000
-rw-r--r--fs/ufs/file.c21
-rw-r--r--fs/ufs/ialloc.c63
-rw-r--r--fs/ufs/inode.c267
-rw-r--r--fs/ufs/namei.c84
-rw-r--r--fs/ufs/super.c429
-rw-r--r--fs/ufs/truncate.c104
-rw-r--r--fs/ufs/util.c48
-rw-r--r--fs/ufs/util.h107
-rw-r--r--include/asm-alpha/floppy.h5
-rw-r--r--include/asm-arm/floppy.h2
-rw-r--r--include/asm-arm26/floppy.h2
-rw-r--r--include/asm-generic/bug.h13
-rw-r--r--include/asm-generic/percpu.h2
-rw-r--r--include/asm-i386/floppy.h5
-rw-r--r--include/asm-i386/mach-default/setup_arch.h (renamed from include/asm-i386/mach-default/setup_arch_pre.h)0
-rw-r--r--include/asm-i386/mach-default/setup_arch_post.h40
-rw-r--r--include/asm-i386/mach-visws/setup_arch.h (renamed from include/asm-i386/mach-visws/setup_arch_pre.h)0
-rw-r--r--include/asm-i386/mach-visws/setup_arch_post.h49
-rw-r--r--include/asm-i386/mach-voyager/setup_arch.h (renamed from include/asm-i386/mach-voyager/setup_arch_pre.h)2
-rw-r--r--include/asm-i386/mach-voyager/setup_arch_post.h73
-rw-r--r--include/asm-i386/setup.h15
-rw-r--r--include/asm-i386/uaccess.h50
-rw-r--r--include/asm-ia64/percpu.h2
-rw-r--r--include/asm-m68k/amigaints.h96
-rw-r--r--include/asm-m68k/apollohw.h4
-rw-r--r--include/asm-m68k/atari_stdma.h2
-rw-r--r--include/asm-m68k/atariints.h11
-rw-r--r--include/asm-m68k/bvme6000hw.h30
-rw-r--r--include/asm-m68k/cacheflush.h40
-rw-r--r--include/asm-m68k/dma-mapping.h90
-rw-r--r--include/asm-m68k/irq.h106
-rw-r--r--include/asm-m68k/mac_oss.h10
-rw-r--r--include/asm-m68k/machdep.h6
-rw-r--r--include/asm-m68k/macintosh.h10
-rw-r--r--include/asm-m68k/macints.h14
-rw-r--r--include/asm-m68k/mvme147hw.h44
-rw-r--r--include/asm-m68k/mvme16xhw.h40
-rw-r--r--include/asm-m68k/scatterlist.h9
-rw-r--r--include/asm-m68k/signal.h19
-rw-r--r--include/asm-m68k/sun3ints.h22
-rw-r--r--include/asm-m68k/traps.h7
-rw-r--r--include/asm-m68k/uaccess.h234
-rw-r--r--include/asm-mips/compat.h3
-rw-r--r--include/asm-mips/mach-generic/floppy.h2
-rw-r--r--include/asm-mips/mach-jazz/floppy.h2
-rw-r--r--include/asm-parisc/floppy.h6
-rw-r--r--include/asm-powerpc/backlight.h30
-rw-r--r--include/asm-powerpc/floppy.h3
-rw-r--r--include/asm-powerpc/percpu.h2
-rw-r--r--include/asm-ppc/floppy.h6
-rw-r--r--include/asm-s390/percpu.h2
-rw-r--r--include/asm-sh/floppy.h7
-rw-r--r--include/asm-sparc64/percpu.h2
-rw-r--r--include/asm-x86_64/floppy.h6
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/linux/acct.h8
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/console.h1
-rw-r--r--include/linux/cpumask.h4
-rw-r--r--include/linux/eventpoll.h2
-rw-r--r--include/linux/ext3_fs.h31
-rw-r--r--include/linux/ext3_fs_i.h16
-rw-r--r--include/linux/fb.h23
-rw-r--r--include/linux/fcntl.h1
-rw-r--r--include/linux/fuse.h36
-rw-r--r--include/linux/hrtimer.h3
-rw-r--r--include/linux/ide.h3
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kthread.h65
-rw-r--r--include/linux/ktime.h8
-rw-r--r--include/linux/list.h134
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/migrate.h11
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/nbd.h12
-rw-r--r--include/linux/parport.h5
-rw-r--r--include/linux/pmu.h4
-rw-r--r--include/linux/reboot.h4
-rw-r--r--include/linux/resource.h4
-rw-r--r--include/linux/rtc-v3020.h35
-rw-r--r--include/linux/rtc.h12
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/synclink.h5
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/ufs_fs.h104
-rw-r--r--include/linux/ufs_fs_i.h1
-rw-r--r--init/Kconfig3
-rw-r--r--kernel/acct.c115
-rw-r--r--kernel/compat.c7
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/kthread.c61
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/disk.c2
-rw-r--r--kernel/power/main.c4
-rw-r--r--kernel/printk.c52
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/stop_machine.c17
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/workqueue.c30
-rw-r--r--lib/bitmap.c31
-rw-r--r--lib/crc-ccitt.c6
-rw-r--r--lib/crc16.c10
-rw-r--r--lib/crc32.c54
-rw-r--r--lib/idr.c16
-rw-r--r--lib/libcrc32c.c2
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/reed_solomon/reed_solomon.c11
-rw-r--r--lib/vsprintf.c88
-rw-r--r--mm/filemap.c36
-rw-r--r--mm/filemap.h26
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/migrate.c30
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/pdflush.c15
-rw-r--r--mm/readahead.c16
-rw-r--r--mm/rmap.c9
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/route.c2
-rwxr-xr-xscripts/bloat-o-meter3
-rwxr-xr-xscripts/checkstack.pl14
-rwxr-xr-xscripts/kernel-doc19
-rw-r--r--sound/oss/Kconfig5
-rw-r--r--sound/oss/au1550_ac97.c9
-rw-r--r--sound/oss/emu10k1/midi.c2
-rw-r--r--sound/oss/msnd.c2
354 files changed, 13501 insertions, 9087 deletions
diff --git a/CREDITS b/CREDITS
index 9bf714a1c7d9..1d35f10ec3b2 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1573,12 +1573,8 @@ S: 160 00 Praha 6
1573S: Czech Republic 1573S: Czech Republic
1574 1574
1575N: Niels Kristian Bech Jensen 1575N: Niels Kristian Bech Jensen
1576E: nkbj@image.dk 1576E: nkbj1970@hotmail.com
1577W: http://www.image.dk/~nkbj
1578D: Miscellaneous kernel updates and fixes. 1577D: Miscellaneous kernel updates and fixes.
1579S: Dr. Holsts Vej 34, lejl. 164
1580S: DK-8230 Åbyhøj
1581S: Denmark
1582 1578
1583N: Michael K. Johnson 1579N: Michael K. Johnson
1584E: johnsonm@redhat.com 1580E: johnsonm@redhat.com
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 31b727ceb127..3630a0d7695f 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -62,6 +62,8 @@
62 <sect1><title>Internal Functions</title> 62 <sect1><title>Internal Functions</title>
63!Ikernel/exit.c 63!Ikernel/exit.c
64!Ikernel/signal.c 64!Ikernel/signal.c
65!Iinclude/linux/kthread.h
66!Ekernel/kthread.c
65 </sect1> 67 </sect1>
66 68
67 <sect1><title>Kernel objects manipulation</title> 69 <sect1><title>Kernel objects manipulation</title>
@@ -114,6 +116,29 @@ X!Ilib/string.c
114 </sect1> 116 </sect1>
115 </chapter> 117 </chapter>
116 118
119 <chapter id="kernel-lib">
120 <title>Basic Kernel Library Functions</title>
121
122 <para>
123 The Linux kernel provides more basic utility functions.
124 </para>
125
126 <sect1><title>Bitmap Operations</title>
127!Elib/bitmap.c
128!Ilib/bitmap.c
129 </sect1>
130
131 <sect1><title>Command-line Parsing</title>
132!Elib/cmdline.c
133 </sect1>
134
135 <sect1><title>CRC Functions</title>
136!Elib/crc16.c
137!Elib/crc32.c
138!Elib/crc-ccitt.c
139 </sect1>
140 </chapter>
141
117 <chapter id="mm"> 142 <chapter id="mm">
118 <title>Memory Management in Linux</title> 143 <title>Memory Management in Linux</title>
119 <sect1><title>The Slab Cache</title> 144 <sect1><title>The Slab Cache</title>
@@ -281,12 +306,13 @@ X!Ekernel/module.c
281 <sect1><title>MTRR Handling</title> 306 <sect1><title>MTRR Handling</title>
282!Earch/i386/kernel/cpu/mtrr/main.c 307!Earch/i386/kernel/cpu/mtrr/main.c
283 </sect1> 308 </sect1>
309
284 <sect1><title>PCI Support Library</title> 310 <sect1><title>PCI Support Library</title>
285!Edrivers/pci/pci.c 311!Edrivers/pci/pci.c
286!Edrivers/pci/pci-driver.c 312!Edrivers/pci/pci-driver.c
287!Edrivers/pci/remove.c 313!Edrivers/pci/remove.c
288!Edrivers/pci/pci-acpi.c 314!Edrivers/pci/pci-acpi.c
289<!-- kerneldoc does not understand to __devinit 315<!-- kerneldoc does not understand __devinit
290X!Edrivers/pci/search.c 316X!Edrivers/pci/search.c
291 --> 317 -->
292!Edrivers/pci/msi.c 318!Edrivers/pci/msi.c
@@ -315,6 +341,13 @@ X!Earch/i386/kernel/mca.c
315 </sect1> 341 </sect1>
316 </chapter> 342 </chapter>
317 343
344 <chapter id="firmware">
345 <title>Firmware Interfaces</title>
346 <sect1><title>DMI Interfaces</title>
347!Edrivers/firmware/dmi_scan.c
348 </sect1>
349 </chapter>
350
318 <chapter id="devfs"> 351 <chapter id="devfs">
319 <title>The Device File System</title> 352 <title>The Device File System</title>
320!Efs/devfs/base.c 353!Efs/devfs/base.c
@@ -403,7 +436,6 @@ X!Edrivers/pnp/system.c
403 </sect1> 436 </sect1>
404 </chapter> 437 </chapter>
405 438
406
407 <chapter id="blkdev"> 439 <chapter id="blkdev">
408 <title>Block Devices</title> 440 <title>Block Devices</title>
409!Eblock/ll_rw_blk.c 441!Eblock/ll_rw_blk.c
@@ -414,6 +446,14 @@ X!Edrivers/pnp/system.c
414!Edrivers/char/misc.c 446!Edrivers/char/misc.c
415 </chapter> 447 </chapter>
416 448
449 <chapter id="parportdev">
450 <title>Parallel Port Devices</title>
451!Iinclude/linux/parport.h
452!Edrivers/parport/ieee1284.c
453!Edrivers/parport/share.c
454!Idrivers/parport/daisy.c
455 </chapter>
456
417 <chapter id="viddev"> 457 <chapter id="viddev">
418 <title>Video4Linux</title> 458 <title>Video4Linux</title>
419!Edrivers/media/video/videodev.c 459!Edrivers/media/video/videodev.c
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 49e27cc19385..1d50cf0c905e 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -144,9 +144,47 @@ over a rather long period of time, but improvements are always welcome!
144 whether the increased speed is worth it. 144 whether the increased speed is worth it.
145 145
1468. Although synchronize_rcu() is a bit slower than is call_rcu(), 1468. Although synchronize_rcu() is a bit slower than is call_rcu(),
147 it usually results in simpler code. So, unless update performance 147 it usually results in simpler code. So, unless update
148 is important or the updaters cannot block, synchronize_rcu() 148 performance is critically important or the updaters cannot block,
149 should be used in preference to call_rcu(). 149 synchronize_rcu() should be used in preference to call_rcu().
150
151 An especially important property of the synchronize_rcu()
152 primitive is that it automatically self-limits: if grace periods
153 are delayed for whatever reason, then the synchronize_rcu()
154 primitive will correspondingly delay updates. In contrast,
155 code using call_rcu() should explicitly limit update rate in
156 cases where grace periods are delayed, as failing to do so can
157 result in excessive realtime latencies or even OOM conditions.
158
159 Ways of gaining this self-limiting property when using call_rcu()
160 include:
161
162 a. Keeping a count of the number of data-structure elements
163 used by the RCU-protected data structure, including those
164 waiting for a grace period to elapse. Enforce a limit
165 on this number, stalling updates as needed to allow
166 previously deferred frees to complete.
167
168 Alternatively, limit only the number awaiting deferred
169 free rather than the total number of elements.
170
171 b. Limiting update rate. For example, if updates occur only
172 once per hour, then no explicit rate limiting is required,
173 unless your system is already badly broken. The dcache
174 subsystem takes this approach -- updates are guarded
175 by a global lock, limiting their rate.
176
177 c. Trusted update -- if updates can only be done manually by
178 superuser or some other trusted user, then it might not
179 be necessary to automatically limit them. The theory
180 here is that superuser already has lots of ways to crash
181 the machine.
182
183 d. Use call_rcu_bh() rather than call_rcu(), in order to take
184 advantage of call_rcu_bh()'s faster grace periods.
185
186 e. Periodically invoke synchronize_rcu(), permitting a limited
187 number of updates per grace period.
150 188
1519. All RCU list-traversal primitives, which include 1899. All RCU list-traversal primitives, which include
152 list_for_each_rcu(), list_for_each_entry_rcu(), 190 list_for_each_rcu(), list_for_each_entry_rcu(),
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 6e459420ee9f..4f41a60e5111 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -184,7 +184,17 @@ synchronize_rcu()
184 blocking, it registers a function and argument which are invoked 184 blocking, it registers a function and argument which are invoked
185 after all ongoing RCU read-side critical sections have completed. 185 after all ongoing RCU read-side critical sections have completed.
186 This callback variant is particularly useful in situations where 186 This callback variant is particularly useful in situations where
187 it is illegal to block. 187 it is illegal to block or where update-side performance is
188 critically important.
189
190 However, the call_rcu() API should not be used lightly, as use
191 of the synchronize_rcu() API generally results in simpler code.
192 In addition, the synchronize_rcu() API has the nice property
193 of automatically limiting update rate should grace periods
194 be delayed. This property results in system resilience in face
195 of denial-of-service attacks. Code using call_rcu() should limit
196 update rate in order to gain this same sort of resilience. See
197 checklist.txt for some approaches to limiting the update rate.
188 198
189rcu_assign_pointer() 199rcu_assign_pointer()
190 200
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index b2f593fc76ca..4aaf68fafebe 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -3,7 +3,7 @@
3 3
4 Maintained by Torben Mathiasen <device@lanana.org> 4 Maintained by Torben Mathiasen <device@lanana.org>
5 5
6 Last revised: 01 March 2006 6 Last revised: 15 May 2006
7 7
8This list is the Linux Device List, the official registry of allocated 8This list is the Linux Device List, the official registry of allocated
9device numbers and /dev directory nodes for the Linux operating 9device numbers and /dev directory nodes for the Linux operating
@@ -2791,6 +2791,7 @@ Your cooperation is appreciated.
2791 170 = /dev/ttyNX0 Hilscher netX serial port 0 2791 170 = /dev/ttyNX0 Hilscher netX serial port 0
2792 ... 2792 ...
2793 185 = /dev/ttyNX15 Hilscher netX serial port 15 2793 185 = /dev/ttyNX15 Hilscher netX serial port 15
2794 186 = /dev/ttyJ0 JTAG1 DCC protocol based serial port emulation
2794 2795
2795205 char Low-density serial ports (alternate device) 2796205 char Low-density serial ports (alternate device)
2796 0 = /dev/culu0 Callout device for ttyLU0 2797 0 = /dev/culu0 Callout device for ttyLU0
@@ -3108,6 +3109,10 @@ Your cooperation is appreciated.
3108 ... 3109 ...
3109 240 = /dev/rfdp 16th RFD FTL layer 3110 240 = /dev/rfdp 16th RFD FTL layer
3110 3111
3112257 char Phoenix Technologies Cryptographic Services Driver
3113 0 = /dev/ptlsec Crypto Services Driver
3114
3115
3111 3116
3112 **** ADDITIONAL /dev DIRECTORY ENTRIES 3117 **** ADDITIONAL /dev DIRECTORY ENTRIES
3113 3118
diff --git a/Documentation/filesystems/fuse.txt b/Documentation/filesystems/fuse.txt
index 33f74310d161..a584f05403a4 100644
--- a/Documentation/filesystems/fuse.txt
+++ b/Documentation/filesystems/fuse.txt
@@ -18,6 +18,14 @@ Non-privileged mount (or user mount):
18 user. NOTE: this is not the same as mounts allowed with the "user" 18 user. NOTE: this is not the same as mounts allowed with the "user"
19 option in /etc/fstab, which is not discussed here. 19 option in /etc/fstab, which is not discussed here.
20 20
21Filesystem connection:
22
23 A connection between the filesystem daemon and the kernel. The
24 connection exists until either the daemon dies, or the filesystem is
25 umounted. Note that detaching (or lazy umounting) the filesystem
26 does _not_ break the connection, in this case it will exist until
27 the last reference to the filesystem is released.
28
21Mount owner: 29Mount owner:
22 30
23 The user who does the mounting. 31 The user who does the mounting.
@@ -86,16 +94,20 @@ Mount options
86 The default is infinite. Note that the size of read requests is 94 The default is infinite. Note that the size of read requests is
87 limited anyway to 32 pages (which is 128kbyte on i386). 95 limited anyway to 32 pages (which is 128kbyte on i386).
88 96
89Sysfs 97Control filesystem
90~~~~~ 98~~~~~~~~~~~~~~~~~~
99
100There's a control filesystem for FUSE, which can be mounted by:
91 101
92FUSE sets up the following hierarchy in sysfs: 102 mount -t fusectl none /sys/fs/fuse/connections
93 103
94 /sys/fs/fuse/connections/N/ 104Mounting it under the '/sys/fs/fuse/connections' directory makes it
105backwards compatible with earlier versions.
95 106
96where N is an increasing number allocated to each new connection. 107Under the fuse control filesystem each connection has a directory
108named by a unique number.
97 109
98For each connection the following attributes are defined: 110For each connection the following files exist within this directory:
99 111
100 'waiting' 112 'waiting'
101 113
@@ -110,7 +122,47 @@ For each connection the following attributes are defined:
110 connection. This means that all waiting requests will be aborted an 122 connection. This means that all waiting requests will be aborted an
111 error returned for all aborted and new requests. 123 error returned for all aborted and new requests.
112 124
113Only a privileged user may read or write these attributes. 125Only the owner of the mount may read or write these files.
126
127Interrupting filesystem operations
128~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
129
130If a process issuing a FUSE filesystem request is interrupted, the
131following will happen:
132
133 1) If the request is not yet sent to userspace AND the signal is
134 fatal (SIGKILL or unhandled fatal signal), then the request is
135 dequeued and returns immediately.
136
137 2) If the request is not yet sent to userspace AND the signal is not
138 fatal, then an 'interrupted' flag is set for the request. When
139 the request has been successfully transfered to userspace and
140 this flag is set, an INTERRUPT request is queued.
141
142 3) If the request is already sent to userspace, then an INTERRUPT
143 request is queued.
144
145INTERRUPT requests take precedence over other requests, so the
146userspace filesystem will receive queued INTERRUPTs before any others.
147
148The userspace filesystem may ignore the INTERRUPT requests entirely,
149or may honor them by sending a reply to the _original_ request, with
150the error set to EINTR.
151
152It is also possible that there's a race between processing the
153original request and it's INTERRUPT request. There are two possibilities:
154
155 1) The INTERRUPT request is processed before the original request is
156 processed
157
158 2) The INTERRUPT request is processed after the original request has
159 been answered
160
161If the filesystem cannot find the original request, it should wait for
162some timeout and/or a number of new requests to arrive, after which it
163should reply to the INTERRUPT request with an EAGAIN error. In case
1641) the INTERRUPT request will be requeued. In case 2) the INTERRUPT
165reply will be ignored.
114 166
115Aborting a filesystem connection 167Aborting a filesystem connection
116~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 168~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -139,8 +191,8 @@ the filesystem. There are several ways to do this:
139 - Use forced umount (umount -f). Works in all cases but only if 191 - Use forced umount (umount -f). Works in all cases but only if
140 filesystem is still attached (it hasn't been lazy unmounted) 192 filesystem is still attached (it hasn't been lazy unmounted)
141 193
142 - Abort filesystem through the sysfs interface. Most powerful 194 - Abort filesystem through the FUSE control filesystem. Most
143 method, always works. 195 powerful method, always works.
144 196
145How do non-privileged mounts work? 197How do non-privileged mounts work?
146~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 198~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -304,25 +356,7 @@ Scenario 1 - Simple deadlock
304 | | for "file"] 356 | | for "file"]
305 | | *DEADLOCK* 357 | | *DEADLOCK*
306 358
307The solution for this is to allow requests to be interrupted while 359The solution for this is to allow the filesystem to be aborted.
308they are in userspace:
309
310 | [interrupted by signal] |
311 | <fuse_unlink() |
312 | [release semaphore] | [semaphore acquired]
313 | <sys_unlink() |
314 | | >fuse_unlink()
315 | | [queue req on fc->pending]
316 | | [wake up fc->waitq]
317 | | [sleep on req->waitq]
318
319If the filesystem daemon was single threaded, this will stop here,
320since there's no other thread to dequeue and execute the request.
321In this case the solution is to kill the FUSE daemon as well. If
322there are multiple serving threads, you just have to kill them as
323long as any remain.
324
325Moral: a filesystem which deadlocks, can soon find itself dead.
326 360
327Scenario 2 - Tricky deadlock 361Scenario 2 - Tricky deadlock
328---------------------------- 362----------------------------
@@ -355,24 +389,14 @@ but is caused by a pagefault.
355 | | [lock page] 389 | | [lock page]
356 | | * DEADLOCK * 390 | | * DEADLOCK *
357 391
358Solution is again to let the the request be interrupted (not 392Solution is basically the same as above.
359elaborated further).
360
361An additional problem is that while the write buffer is being
362copied to the request, the request must not be interrupted. This
363is because the destination address of the copy may not be valid
364after the request is interrupted.
365
366This is solved with doing the copy atomically, and allowing
367interruption while the page(s) belonging to the write buffer are
368faulted with get_user_pages(). The 'req->locked' flag indicates
369when the copy is taking place, and interruption is delayed until
370this flag is unset.
371 393
372Scenario 3 - Tricky deadlock with asynchronous read 394An additional problem is that while the write buffer is being copied
373--------------------------------------------------- 395to the request, the request must not be interrupted/aborted. This is
396because the destination address of the copy may not be valid after the
397request has returned.
374 398
375The same situation as above, except thread-1 will wait on page lock 399This is solved with doing the copy atomically, and allowing abort
376and hence it will be uninterruptible as well. The solution is to 400while the page(s) belonging to the write buffer are faulted with
377abort the connection with forced umount (if mount is attached) or 401get_user_pages(). The 'req->locked' flag indicates when the copy is
378through the abort attribute in sysfs. 402taking place, and abort is delayed until this flag is unset.
diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.txt b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
index 60ab61e54e8a..25981e2e51be 100644
--- a/Documentation/filesystems/ramfs-rootfs-initramfs.txt
+++ b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
@@ -70,11 +70,13 @@ tmpfs mounts. See Documentation/filesystems/tmpfs.txt for more information.
70What is rootfs? 70What is rootfs?
71--------------- 71---------------
72 72
73Rootfs is a special instance of ramfs, which is always present in 2.6 systems. 73Rootfs is a special instance of ramfs (or tmpfs, if that's enabled), which is
74(It's used internally as the starting and stopping point for searches of the 74always present in 2.6 systems. You can't unmount rootfs for approximately the
75kernel's doubly-linked list of mount points.) 75same reason you can't kill the init process; rather than having special code
76to check for and handle an empty list, it's smaller and simpler for the kernel
77to just make sure certain lists can't become empty.
76 78
77Most systems just mount another filesystem over it and ignore it. The 79Most systems just mount another filesystem over rootfs and ignore it. The
78amount of space an empty instance of ramfs takes up is tiny. 80amount of space an empty instance of ramfs takes up is tiny.
79 81
80What is initramfs? 82What is initramfs?
@@ -92,14 +94,16 @@ out of that.
92 94
93All this differs from the old initrd in several ways: 95All this differs from the old initrd in several ways:
94 96
95 - The old initrd was a separate file, while the initramfs archive is linked 97 - The old initrd was always a separate file, while the initramfs archive is
96 into the linux kernel image. (The directory linux-*/usr is devoted to 98 linked into the linux kernel image. (The directory linux-*/usr is devoted
97 generating this archive during the build.) 99 to generating this archive during the build.)
98 100
99 - The old initrd file was a gzipped filesystem image (in some file format, 101 - The old initrd file was a gzipped filesystem image (in some file format,
100 such as ext2, that had to be built into the kernel), while the new 102 such as ext2, that needed a driver built into the kernel), while the new
101 initramfs archive is a gzipped cpio archive (like tar only simpler, 103 initramfs archive is a gzipped cpio archive (like tar only simpler,
102 see cpio(1) and Documentation/early-userspace/buffer-format.txt). 104 see cpio(1) and Documentation/early-userspace/buffer-format.txt). The
105 kernel's cpio extraction code is not only extremely small, it's also
106 __init data that can be discarded during the boot process.
103 107
104 - The program run by the old initrd (which was called /initrd, not /init) did 108 - The program run by the old initrd (which was called /initrd, not /init) did
105 some setup and then returned to the kernel, while the init program from 109 some setup and then returned to the kernel, while the init program from
@@ -124,13 +128,14 @@ Populating initramfs:
124 128
125The 2.6 kernel build process always creates a gzipped cpio format initramfs 129The 2.6 kernel build process always creates a gzipped cpio format initramfs
126archive and links it into the resulting kernel binary. By default, this 130archive and links it into the resulting kernel binary. By default, this
127archive is empty (consuming 134 bytes on x86). The config option 131archive is empty (consuming 134 bytes on x86).
128CONFIG_INITRAMFS_SOURCE (for some reason buried under devices->block devices 132
129in menuconfig, and living in usr/Kconfig) can be used to specify a source for 133The config option CONFIG_INITRAMFS_SOURCE (for some reason buried under
130the initramfs archive, which will automatically be incorporated into the 134devices->block devices in menuconfig, and living in usr/Kconfig) can be used
131resulting binary. This option can point to an existing gzipped cpio archive, a 135to specify a source for the initramfs archive, which will automatically be
132directory containing files to be archived, or a text file specification such 136incorporated into the resulting binary. This option can point to an existing
133as the following example: 137gzipped cpio archive, a directory containing files to be archived, or a text
138file specification such as the following example:
134 139
135 dir /dev 755 0 0 140 dir /dev 755 0 0
136 nod /dev/console 644 0 0 c 5 1 141 nod /dev/console 644 0 0 c 5 1
@@ -146,23 +151,84 @@ as the following example:
146Run "usr/gen_init_cpio" (after the kernel build) to get a usage message 151Run "usr/gen_init_cpio" (after the kernel build) to get a usage message
147documenting the above file format. 152documenting the above file format.
148 153
149One advantage of the text file is that root access is not required to 154One advantage of the configuration file is that root access is not required to
150set permissions or create device nodes in the new archive. (Note that those 155set permissions or create device nodes in the new archive. (Note that those
151two example "file" entries expect to find files named "init.sh" and "busybox" in 156two example "file" entries expect to find files named "init.sh" and "busybox" in
152a directory called "initramfs", under the linux-2.6.* directory. See 157a directory called "initramfs", under the linux-2.6.* directory. See
153Documentation/early-userspace/README for more details.) 158Documentation/early-userspace/README for more details.)
154 159
155The kernel does not depend on external cpio tools, gen_init_cpio is created 160The kernel does not depend on external cpio tools. If you specify a
156from usr/gen_init_cpio.c which is entirely self-contained, and the kernel's 161directory instead of a configuration file, the kernel's build infrastructure
157boot-time extractor is also (obviously) self-contained. However, if you _do_ 162creates a configuration file from that directory (usr/Makefile calls
158happen to have cpio installed, the following command line can extract the 163scripts/gen_initramfs_list.sh), and proceeds to package up that directory
159generated cpio image back into its component files: 164using the config file (by feeding it to usr/gen_init_cpio, which is created
165from usr/gen_init_cpio.c). The kernel's build-time cpio creation code is
166entirely self-contained, and the kernel's boot-time extractor is also
167(obviously) self-contained.
168
169The one thing you might need external cpio utilities installed for is creating
170or extracting your own preprepared cpio files to feed to the kernel build
171(instead of a config file or directory).
172
173The following command line can extract a cpio image (either by the above script
174or by the kernel build) back into its component files:
160 175
161 cpio -i -d -H newc -F initramfs_data.cpio --no-absolute-filenames 176 cpio -i -d -H newc -F initramfs_data.cpio --no-absolute-filenames
162 177
178The following shell script can create a prebuilt cpio archive you can
179use in place of the above config file:
180
181 #!/bin/sh
182
183 # Copyright 2006 Rob Landley <rob@landley.net> and TimeSys Corporation.
184 # Licensed under GPL version 2
185
186 if [ $# -ne 2 ]
187 then
188 echo "usage: mkinitramfs directory imagename.cpio.gz"
189 exit 1
190 fi
191
192 if [ -d "$1" ]
193 then
194 echo "creating $2 from $1"
195 (cd "$1"; find . | cpio -o -H newc | gzip) > "$2"
196 else
197 echo "First argument must be a directory"
198 exit 1
199 fi
200
201Note: The cpio man page contains some bad advice that will break your initramfs
202archive if you follow it. It says "A typical way to generate the list
203of filenames is with the find command; you should give find the -depth option
204to minimize problems with permissions on directories that are unwritable or not
205searchable." Don't do this when creating initramfs.cpio.gz images, it won't
206work. The Linux kernel cpio extractor won't create files in a directory that
207doesn't exist, so the directory entries must go before the files that go in
208those directories. The above script gets them in the right order.
209
210External initramfs images:
211--------------------------
212
213If the kernel has initrd support enabled, an external cpio.gz archive can also
214be passed into a 2.6 kernel in place of an initrd. In this case, the kernel
215will autodetect the type (initramfs, not initrd) and extract the external cpio
216archive into rootfs before trying to run /init.
217
218This has the memory efficiency advantages of initramfs (no ramdisk block
219device) but the separate packaging of initrd (which is nice if you have
220non-GPL code you'd like to run from initramfs, without conflating it with
221the GPL licensed Linux kernel binary).
222
223It can also be used to supplement the kernel's built-in initamfs image. The
224files in the external archive will overwrite any conflicting files in
225the built-in initramfs archive. Some distributors also prefer to customize
226a single kernel image with task-specific initramfs images, without recompiling.
227
163Contents of initramfs: 228Contents of initramfs:
164---------------------- 229----------------------
165 230
231An initramfs archive is a complete self-contained root filesystem for Linux.
166If you don't already understand what shared libraries, devices, and paths 232If you don't already understand what shared libraries, devices, and paths
167you need to get a minimal root filesystem up and running, here are some 233you need to get a minimal root filesystem up and running, here are some
168references: 234references:
@@ -176,13 +242,36 @@ code against, along with some related utilities. It is BSD licensed.
176 242
177I use uClibc (http://www.uclibc.org) and busybox (http://www.busybox.net) 243I use uClibc (http://www.uclibc.org) and busybox (http://www.busybox.net)
178myself. These are LGPL and GPL, respectively. (A self-contained initramfs 244myself. These are LGPL and GPL, respectively. (A self-contained initramfs
179package is planned for the busybox 1.2 release.) 245package is planned for the busybox 1.3 release.)
180 246
181In theory you could use glibc, but that's not well suited for small embedded 247In theory you could use glibc, but that's not well suited for small embedded
182uses like this. (A "hello world" program statically linked against glibc is 248uses like this. (A "hello world" program statically linked against glibc is
183over 400k. With uClibc it's 7k. Also note that glibc dlopens libnss to do 249over 400k. With uClibc it's 7k. Also note that glibc dlopens libnss to do
184name lookups, even when otherwise statically linked.) 250name lookups, even when otherwise statically linked.)
185 251
252A good first step is to get initramfs to run a statically linked "hello world"
253program as init, and test it under an emulator like qemu (www.qemu.org) or
254User Mode Linux, like so:
255
256 cat > hello.c << EOF
257 #include <stdio.h>
258 #include <unistd.h>
259
260 int main(int argc, char *argv[])
261 {
262 printf("Hello world!\n");
263 sleep(999999999);
264 }
265 EOF
266 gcc -static hello2.c -o init
267 echo init | cpio -o -H newc | gzip > test.cpio.gz
268 # Testing external initramfs using the initrd loading mechanism.
269 qemu -kernel /boot/vmlinuz -initrd test.cpio.gz /dev/zero
270
271When debugging a normal root filesystem, it's nice to be able to boot with
272"init=/bin/sh". The initramfs equivalent is "rdinit=/bin/sh", and it's
273just as useful.
274
186Why cpio rather than tar? 275Why cpio rather than tar?
187------------------------- 276-------------------------
188 277
@@ -241,7 +330,7 @@ the above threads) is:
241Future directions: 330Future directions:
242------------------ 331------------------
243 332
244Today (2.6.14), initramfs is always compiled in, but not always used. The 333Today (2.6.16), initramfs is always compiled in, but not always used. The
245kernel falls back to legacy boot code that is reached only if initramfs does 334kernel falls back to legacy boot code that is reached only if initramfs does
246not contain an /init program. The fallback is legacy code, there to ensure a 335not contain an /init program. The fallback is legacy code, there to ensure a
247smooth transition and allowing early boot functionality to gradually move to 336smooth transition and allowing early boot functionality to gradually move to
@@ -258,8 +347,9 @@ and so on.
258 347
259This kind of complexity (which inevitably includes policy) is rightly handled 348This kind of complexity (which inevitably includes policy) is rightly handled
260in userspace. Both klibc and busybox/uClibc are working on simple initramfs 349in userspace. Both klibc and busybox/uClibc are working on simple initramfs
261packages to drop into a kernel build, and when standard solutions are ready 350packages to drop into a kernel build.
262and widely deployed, the kernel's legacy early boot code will become obsolete
263and a candidate for the feature removal schedule.
264 351
265But that's a while off yet. 352The klibc package has now been accepted into Andrew Morton's 2.6.17-mm tree.
353The kernel's current early boot code (partition detection, etc) will probably
354be migrated into a default initramfs, automatically created and used by the
355kernel build.
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 212cf3c21abf..08bafa8c1caa 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -1,155 +1,325 @@
1Documentation for kdump - the kexec-based crash dumping solution 1================================================================
2Documentation for Kdump - The kexec-based Crash Dumping Solution
2================================================================ 3================================================================
3 4
4DESIGN 5This document includes overview, setup and installation, and analysis
5====== 6information.
6 7
7Kdump uses kexec to reboot to a second kernel whenever a dump needs to be 8Overview
8taken. This second kernel is booted with very little memory. The first kernel 9========
9reserves the section of memory that the second kernel uses. This ensures that
10on-going DMA from the first kernel does not corrupt the second kernel.
11 10
12All the necessary information about Core image is encoded in ELF format and 11Kdump uses kexec to quickly boot to a dump-capture kernel whenever a
13stored in reserved area of memory before crash. Physical address of start of 12dump of the system kernel's memory needs to be taken (for example, when
14ELF header is passed to new kernel through command line parameter elfcorehdr=. 13the system panics). The system kernel's memory image is preserved across
14the reboot and is accessible to the dump-capture kernel.
15 15
16On i386, the first 640 KB of physical memory is needed to boot, irrespective 16You can use common Linux commands, such as cp and scp, to copy the
17of where the kernel loads. Hence, this region is backed up by kexec just before 17memory image to a dump file on the local disk, or across the network to
18rebooting into the new kernel. 18a remote system.
19 19
20In the second kernel, "old memory" can be accessed in two ways. 20Kdump and kexec are currently supported on the x86, x86_64, and ppc64
21architectures.
21 22
22- The first one is through a /dev/oldmem device interface. A capture utility 23When the system kernel boots, it reserves a small section of memory for
23 can read the device file and write out the memory in raw format. This is raw 24the dump-capture kernel. This ensures that ongoing Direct Memory Access
24 dump of memory and analysis/capture tool should be intelligent enough to 25(DMA) from the system kernel does not corrupt the dump-capture kernel.
25 determine where to look for the right information. ELF headers (elfcorehdr=) 26The kexec -p command loads the dump-capture kernel into this reserved
26 can become handy here. 27memory.
27 28
28- The second interface is through /proc/vmcore. This exports the dump as an ELF 29On x86 machines, the first 640 KB of physical memory is needed to boot,
29 format file which can be written out using any file copy command 30regardless of where the kernel loads. Therefore, kexec backs up this
30 (cp, scp, etc). Further, gdb can be used to perform limited debugging on 31region just before rebooting into the dump-capture kernel.
31 the dump file. This method ensures methods ensure that there is correct
32 ordering of the dump pages (corresponding to the first 640 KB that has been
33 relocated).
34 32
35SETUP 33All of the necessary information about the system kernel's core image is
36===== 34encoded in the ELF format, and stored in a reserved area of memory
35before a crash. The physical address of the start of the ELF header is
36passed to the dump-capture kernel through the elfcorehdr= boot
37parameter.
38
39With the dump-capture kernel, you can access the memory image, or "old
40memory," in two ways:
41
42- Through a /dev/oldmem device interface. A capture utility can read the
43 device file and write out the memory in raw format. This is a raw dump
44 of memory. Analysis and capture tools must be intelligent enough to
45 determine where to look for the right information.
46
47- Through /proc/vmcore. This exports the dump as an ELF-format file that
48 you can write out using file copy commands such as cp or scp. Further,
49 you can use analysis tools such as the GNU Debugger (GDB) and the Crash
50 tool to debug the dump file. This method ensures that the dump pages are
51 correctly ordered.
52
53
54Setup and Installation
55======================
56
57Install kexec-tools and the Kdump patch
58---------------------------------------
59
601) Login as the root user.
61
622) Download the kexec-tools user-space package from the following URL:
63
64 http://www.xmission.com/~ebiederm/files/kexec/kexec-tools-1.101.tar.gz
65
663) Unpack the tarball with the tar command, as follows:
67
68 tar xvpzf kexec-tools-1.101.tar.gz
69
704) Download the latest consolidated Kdump patch from the following URL:
71
72 http://lse.sourceforge.net/kdump/
73
74 (This location is being used until all the user-space Kdump patches
75 are integrated with the kexec-tools package.)
76
775) Change to the kexec-tools-1.101 directory, as follows:
78
79 cd kexec-tools-1.101
80
816) Apply the consolidated patch to the kexec-tools-1.101 source tree
82 with the patch command, as follows. (Modify the path to the downloaded
83 patch as necessary.)
84
85 patch -p1 < /path-to-kdump-patch/kexec-tools-1.101-kdump.patch
86
877) Configure the package, as follows:
88
89 ./configure
90
918) Compile the package, as follows:
92
93 make
94
959) Install the package, as follows:
96
97 make install
98
99
100Download and build the system and dump-capture kernels
101------------------------------------------------------
102
103Download the mainline (vanilla) kernel source code (2.6.13-rc1 or newer)
104from http://www.kernel.org. Two kernels must be built: a system kernel
105and a dump-capture kernel. Use the following steps to configure these
106kernels with the necessary kexec and Kdump features:
107
108System kernel
109-------------
110
1111) Enable "kexec system call" in "Processor type and features."
112
113 CONFIG_KEXEC=y
114
1152) Enable "sysfs file system support" in "Filesystem" -> "Pseudo
116 filesystems." This is usually enabled by default.
117
118 CONFIG_SYSFS=y
119
120 Note that "sysfs file system support" might not appear in the "Pseudo
121 filesystems" menu if "Configure standard kernel features (for small
122 systems)" is not enabled in "General Setup." In this case, check the
123 .config file itself to ensure that sysfs is turned on, as follows:
124
125 grep 'CONFIG_SYSFS' .config
126
1273) Enable "Compile the kernel with debug info" in "Kernel hacking."
128
129 CONFIG_DEBUG_INFO=Y
130
131 This causes the kernel to be built with debug symbols. The dump
132 analysis tools require a vmlinux with debug symbols in order to read
133 and analyze a dump file.
134
1354) Make and install the kernel and its modules. Update the boot loader
136 (such as grub, yaboot, or lilo) configuration files as necessary.
137
1385) Boot the system kernel with the boot parameter "crashkernel=Y@X",
139 where Y specifies how much memory to reserve for the dump-capture kernel
140 and X specifies the beginning of this reserved memory. For example,
141 "crashkernel=64M@16M" tells the system kernel to reserve 64 MB of memory
142 starting at physical address 0x01000000 for the dump-capture kernel.
143
144 On x86 and x86_64, use "crashkernel=64M@16M".
145
146 On ppc64, use "crashkernel=128M@32M".
147
148
149The dump-capture kernel
150-----------------------
37 151
381) Download the upstream kexec-tools userspace package from 1521) Under "General setup," append "-kdump" to the current string in
39 http://www.xmission.com/~ebiederm/files/kexec/kexec-tools-1.101.tar.gz. 153 "Local version."
40 154
41 Apply the latest consolidated kdump patch on top of kexec-tools-1.101 1552) On x86, enable high memory support under "Processor type and
42 from http://lse.sourceforge.net/kdump/. This arrangment has been made 156 features":
43 till all the userspace patches supporting kdump are integrated with 157
44 upstream kexec-tools userspace. 158 CONFIG_HIGHMEM64G=y
45 159 or
462) Download and build the appropriate (2.6.13-rc1 onwards) vanilla kernels. 160 CONFIG_HIGHMEM4G
47 Two kernels need to be built in order to get this feature working. 161
48 Following are the steps to properly configure the two kernels specific 1623) On x86 and x86_64, disable symmetric multi-processing support
49 to kexec and kdump features: 163 under "Processor type and features":
50 164
51 A) First kernel or regular kernel: 165 CONFIG_SMP=n
52 ---------------------------------- 166 (If CONFIG_SMP=y, then specify maxcpus=1 on the kernel command line
53 a) Enable "kexec system call" feature (in Processor type and features). 167 when loading the dump-capture kernel, see section "Load the Dump-capture
54 CONFIG_KEXEC=y 168 Kernel".)
55 b) Enable "sysfs file system support" (in Pseudo filesystems). 169
56 CONFIG_SYSFS=y 1704) On ppc64, disable NUMA support and enable EMBEDDED support:
57 c) make 171
58 d) Boot into first kernel with the command line parameter "crashkernel=Y@X". 172 CONFIG_NUMA=n
59 Use appropriate values for X and Y. Y denotes how much memory to reserve 173 CONFIG_EMBEDDED=y
60 for the second kernel, and X denotes at what physical address the 174 CONFIG_EEH=N for the dump-capture kernel
61 reserved memory section starts. For example: "crashkernel=64M@16M". 175
62 1765) Enable "kernel crash dumps" support under "Processor type and
63 177 features":
64 B) Second kernel or dump capture kernel: 178
65 --------------------------------------- 179 CONFIG_CRASH_DUMP=y
66 a) For i386 architecture enable Highmem support 180
67 CONFIG_HIGHMEM=y 1816) Use a suitable value for "Physical address where the kernel is
68 b) Enable "kernel crash dumps" feature (under "Processor type and features") 182 loaded" (under "Processor type and features"). This only appears when
69 CONFIG_CRASH_DUMP=y 183 "kernel crash dumps" is enabled. By default this value is 0x1000000
70 c) Make sure a suitable value for "Physical address where the kernel is 184 (16MB). It should be the same as X in the "crashkernel=Y@X" boot
71 loaded" (under "Processor type and features"). By default this value 185 parameter discussed above.
72 is 0x1000000 (16MB) and it should be same as X (See option d above), 186
73 e.g., 16 MB or 0x1000000. 187 On x86 and x86_64, use "CONFIG_PHYSICAL_START=0x1000000".
74 CONFIG_PHYSICAL_START=0x1000000 188
75 d) Enable "/proc/vmcore support" (Optional, under "Pseudo filesystems"). 189 On ppc64 the value is automatically set at 32MB when
76 CONFIG_PROC_VMCORE=y 190 CONFIG_CRASH_DUMP is set.
77 191
783) After booting to regular kernel or first kernel, load the second kernel 1926) Optionally enable "/proc/vmcore support" under "Filesystems" ->
79 using the following command: 193 "Pseudo filesystems".
80 194
81 kexec -p <second-kernel> --args-linux --elf32-core-headers 195 CONFIG_PROC_VMCORE=y
82 --append="root=<root-dev> init 1 irqpoll maxcpus=1" 196 (CONFIG_PROC_VMCORE is set by default when CONFIG_CRASH_DUMP is selected.)
83 197
84 Notes: 1987) Make and install the kernel and its modules. DO NOT add this kernel
85 ====== 199 to the boot loader configuration files.
86 i) <second-kernel> has to be a vmlinux image ie uncompressed elf image. 200
87 bzImage will not work, as of now. 201
88 ii) --args-linux has to be speicfied as if kexec it loading an elf image, 202Load the Dump-capture Kernel
89 it needs to know that the arguments supplied are of linux type. 203============================
90 iii) By default ELF headers are stored in ELF64 format to support systems 204
91 with more than 4GB memory. Option --elf32-core-headers forces generation 205After booting to the system kernel, load the dump-capture kernel using
92 of ELF32 headers. The reason for this option being, as of now gdb can 206the following command:
93 not open vmcore file with ELF64 headers on a 32 bit systems. So ELF32 207
94 headers can be used if one has non-PAE systems and hence memory less 208 kexec -p <dump-capture-kernel> \
95 than 4GB. 209 --initrd=<initrd-for-dump-capture-kernel> --args-linux \
96 iv) Specify "irqpoll" as command line parameter. This reduces driver 210 --append="root=<root-dev> init 1 irqpoll"
97 initialization failures in second kernel due to shared interrupts. 211
98 v) <root-dev> needs to be specified in a format corresponding to the root 212
99 device name in the output of mount command. 213Notes on loading the dump-capture kernel:
100 vi) If you have built the drivers required to mount root file system as 214
101 modules in <second-kernel>, then, specify 215* <dump-capture-kernel> must be a vmlinux image (that is, an
102 --initrd=<initrd-for-second-kernel>. 216 uncompressed ELF image). bzImage does not work at this time.
103 vii) Specify maxcpus=1 as, if during first kernel run, if panic happens on 217
104 non-boot cpus, second kernel doesn't seem to be boot up all the cpus. 218* By default, the ELF headers are stored in ELF64 format to support
105 The other option is to always built the second kernel without SMP 219 systems with more than 4GB memory. The --elf32-core-headers option can
106 support ie CONFIG_SMP=n 220 be used to force the generation of ELF32 headers. This is necessary
107 221 because GDB currently cannot open vmcore files with ELF64 headers on
1084) After successfully loading the second kernel as above, if a panic occurs 222 32-bit systems. ELF32 headers can be used on non-PAE systems (that is,
109 system reboots into the second kernel. A module can be written to force 223 less than 4GB of memory).
110 the panic or "ALT-SysRq-c" can be used initiate a crash dump for testing 224
111 purposes. 225* The "irqpoll" boot parameter reduces driver initialization failures
112 226 due to shared interrupts in the dump-capture kernel.
1135) Once the second kernel has booted, write out the dump file using 227
228* You must specify <root-dev> in the format corresponding to the root
229 device name in the output of mount command.
230
231* "init 1" boots the dump-capture kernel into single-user mode without
232 networking. If you want networking, use "init 3."
233
234
235Kernel Panic
236============
237
238After successfully loading the dump-capture kernel as previously
239described, the system will reboot into the dump-capture kernel if a
240system crash is triggered. Trigger points are located in panic(),
241die(), die_nmi() and in the sysrq handler (ALT-SysRq-c).
242
243The following conditions will execute a crash trigger point:
244
245If a hard lockup is detected and "NMI watchdog" is configured, the system
246will boot into the dump-capture kernel ( die_nmi() ).
247
248If die() is called, and it happens to be a thread with pid 0 or 1, or die()
249is called inside interrupt context or die() is called and panic_on_oops is set,
250the system will boot into the dump-capture kernel.
251
252On powererpc systems when a soft-reset is generated, die() is called by all cpus and the system system will boot into the dump-capture kernel.
253
254For testing purposes, you can trigger a crash by using "ALT-SysRq-c",
255"echo c > /proc/sysrq-trigger or write a module to force the panic.
256
257Write Out the Dump File
258=======================
259
260After the dump-capture kernel is booted, write out the dump file with
261the following command:
114 262
115 cp /proc/vmcore <dump-file> 263 cp /proc/vmcore <dump-file>
116 264
117 Dump memory can also be accessed as a /dev/oldmem device for a linear/raw 265You can also access dumped memory as a /dev/oldmem device for a linear
118 view. To create the device, type: 266and raw view. To create the device, use the following command:
119 267
120 mknod /dev/oldmem c 1 12 268 mknod /dev/oldmem c 1 12
121 269
122 Use "dd" with suitable options for count, bs and skip to access specific 270Use the dd command with suitable options for count, bs, and skip to
123 portions of the dump. 271access specific portions of the dump.
124 272
125 Entire memory: dd if=/dev/oldmem of=oldmem.001 273To see the entire memory, use the following command:
126 274
275 dd if=/dev/oldmem of=oldmem.001
127 276
128ANALYSIS 277
278Analysis
129======== 279========
130Limited analysis can be done using gdb on the dump file copied out of
131/proc/vmcore. Use vmlinux built with -g and run
132 280
133 gdb vmlinux <dump-file> 281Before analyzing the dump image, you should reboot into a stable kernel.
282
283You can do limited analysis using GDB on the dump file copied out of
284/proc/vmcore. Use the debug vmlinux built with -g and run the following
285command:
286
287 gdb vmlinux <dump-file>
134 288
135Stack trace for the task on processor 0, register display, memory display 289Stack trace for the task on processor 0, register display, and memory
136work fine. 290display work fine.
137 291
138Note: gdb cannot analyse core files generated in ELF64 format for i386. 292Note: GDB cannot analyze core files generated in ELF64 format for x86.
293On systems with a maximum of 4GB of memory, you can generate
294ELF32-format headers using the --elf32-core-headers kernel option on the
295dump kernel.
139 296
140Latest "crash" (crash-4.0-2.18) as available on Dave Anderson's site 297You can also use the Crash utility to analyze dump files in Kdump
141http://people.redhat.com/~anderson/ works well with kdump format. 298format. Crash is available on Dave Anderson's site at the following URL:
142 299
300 http://people.redhat.com/~anderson/
301
302
303To Do
304=====
143 305
144TODO 3061) Provide a kernel pages filtering mechanism, so core file size is not
145==== 307 extreme on systems with huge memory banks.
1461) Provide a kernel pages filtering mechanism so that core file size is not
147 insane on systems having huge memory banks.
1482) Relocatable kernel can help in maintaining multiple kernels for crashdump
149 and same kernel as the first kernel can be used to capture the dump.
150 308
3092) Relocatable kernel can help in maintaining multiple kernels for
310 crash_dump, and the same kernel as the system kernel can be used to
311 capture the dump.
151 312
152CONTACT 313
314Contact
153======= 315=======
316
154Vivek Goyal (vgoyal@in.ibm.com) 317Vivek Goyal (vgoyal@in.ibm.com)
155Maneesh Soni (maneesh@in.ibm.com) 318Maneesh Soni (maneesh@in.ibm.com)
319
320
321Trademark
322=========
323
324Linux is a trademark of Linus Torvalds in the United States, other
325countries, or both.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 4710845dbac4..cf0d5416a4c3 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -262,9 +262,14 @@ What is required is some way of intervening to instruct the compiler and the
262CPU to restrict the order. 262CPU to restrict the order.
263 263
264Memory barriers are such interventions. They impose a perceived partial 264Memory barriers are such interventions. They impose a perceived partial
265ordering between the memory operations specified on either side of the barrier. 265ordering over the memory operations on either side of the barrier.
266They request that the sequence of memory events generated appears to other 266
267parts of the system as if the barrier is effective on that CPU. 267Such enforcement is important because the CPUs and other devices in a system
268can use a variety of tricks to improve performance - including reordering,
269deferral and combination of memory operations; speculative loads; speculative
270branch prediction and various types of caching. Memory barriers are used to
271override or suppress these tricks, allowing the code to sanely control the
272interaction of multiple CPUs and/or devices.
268 273
269 274
270VARIETIES OF MEMORY BARRIER 275VARIETIES OF MEMORY BARRIER
@@ -282,7 +287,7 @@ Memory barriers come in four basic varieties:
282 A write barrier is a partial ordering on stores only; it is not required 287 A write barrier is a partial ordering on stores only; it is not required
283 to have any effect on loads. 288 to have any effect on loads.
284 289
285 A CPU can be viewed as as commiting a sequence of store operations to the 290 A CPU can be viewed as committing a sequence of store operations to the
286 memory system as time progresses. All stores before a write barrier will 291 memory system as time progresses. All stores before a write barrier will
287 occur in the sequence _before_ all the stores after the write barrier. 292 occur in the sequence _before_ all the stores after the write barrier.
288 293
@@ -413,7 +418,7 @@ There are certain things that the Linux kernel memory barriers do not guarantee:
413 indirect effect will be the order in which the second CPU sees the effects 418 indirect effect will be the order in which the second CPU sees the effects
414 of the first CPU's accesses occur, but see the next point: 419 of the first CPU's accesses occur, but see the next point:
415 420
416 (*) There is no guarantee that the a CPU will see the correct order of effects 421 (*) There is no guarantee that a CPU will see the correct order of effects
417 from a second CPU's accesses, even _if_ the second CPU uses a memory 422 from a second CPU's accesses, even _if_ the second CPU uses a memory
418 barrier, unless the first CPU _also_ uses a matching memory barrier (see 423 barrier, unless the first CPU _also_ uses a matching memory barrier (see
419 the subsection on "SMP Barrier Pairing"). 424 the subsection on "SMP Barrier Pairing").
@@ -461,8 +466,8 @@ Whilst this may seem like a failure of coherency or causality maintenance, it
461isn't, and this behaviour can be observed on certain real CPUs (such as the DEC 466isn't, and this behaviour can be observed on certain real CPUs (such as the DEC
462Alpha). 467Alpha).
463 468
464To deal with this, a data dependency barrier must be inserted between the 469To deal with this, a data dependency barrier or better must be inserted
465address load and the data load: 470between the address load and the data load:
466 471
467 CPU 1 CPU 2 472 CPU 1 CPU 2
468 =============== =============== 473 =============== ===============
@@ -484,7 +489,7 @@ lines. The pointer P might be stored in an odd-numbered cache line, and the
484variable B might be stored in an even-numbered cache line. Then, if the 489variable B might be stored in an even-numbered cache line. Then, if the
485even-numbered bank of the reading CPU's cache is extremely busy while the 490even-numbered bank of the reading CPU's cache is extremely busy while the
486odd-numbered bank is idle, one can see the new value of the pointer P (&B), 491odd-numbered bank is idle, one can see the new value of the pointer P (&B),
487but the old value of the variable B (1). 492but the old value of the variable B (2).
488 493
489 494
490Another example of where data dependency barriers might by required is where a 495Another example of where data dependency barriers might by required is where a
@@ -744,7 +749,7 @@ some effectively random order, despite the write barrier issued by CPU 1:
744 : : 749 : :
745 750
746 751
747If, however, a read barrier were to be placed between the load of E and the 752If, however, a read barrier were to be placed between the load of B and the
748load of A on CPU 2: 753load of A on CPU 2:
749 754
750 CPU 1 CPU 2 755 CPU 1 CPU 2
@@ -1461,9 +1466,8 @@ instruction itself is complete.
1461 1466
1462On a UP system - where this wouldn't be a problem - the smp_mb() is just a 1467On a UP system - where this wouldn't be a problem - the smp_mb() is just a
1463compiler barrier, thus making sure the compiler emits the instructions in the 1468compiler barrier, thus making sure the compiler emits the instructions in the
1464right order without actually intervening in the CPU. Since there there's only 1469right order without actually intervening in the CPU. Since there's only one
1465one CPU, that CPU's dependency ordering logic will take care of everything 1470CPU, that CPU's dependency ordering logic will take care of everything else.
1466else.
1467 1471
1468 1472
1469ATOMIC OPERATIONS 1473ATOMIC OPERATIONS
@@ -1640,9 +1644,9 @@ functions:
1640 1644
1641 The PCI bus, amongst others, defines an I/O space concept - which on such 1645 The PCI bus, amongst others, defines an I/O space concept - which on such
1642 CPUs as i386 and x86_64 cpus readily maps to the CPU's concept of I/O 1646 CPUs as i386 and x86_64 cpus readily maps to the CPU's concept of I/O
1643 space. However, it may also mapped as a virtual I/O space in the CPU's 1647 space. However, it may also be mapped as a virtual I/O space in the CPU's
1644 memory map, particularly on those CPUs that don't support alternate 1648 memory map, particularly on those CPUs that don't support alternate I/O
1645 I/O spaces. 1649 spaces.
1646 1650
1647 Accesses to this space may be fully synchronous (as on i386), but 1651 Accesses to this space may be fully synchronous (as on i386), but
1648 intermediary bridges (such as the PCI host bridge) may not fully honour 1652 intermediary bridges (such as the PCI host bridge) may not fully honour
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index 95d17b3e2eee..2a58f985795a 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -44,8 +44,10 @@ normal timer interrupt, which is 100Hz.
44Programming and/or enabling interrupt frequencies greater than 64Hz is 44Programming and/or enabling interrupt frequencies greater than 64Hz is
45only allowed by root. This is perhaps a bit conservative, but we don't want 45only allowed by root. This is perhaps a bit conservative, but we don't want
46an evil user generating lots of IRQs on a slow 386sx-16, where it might have 46an evil user generating lots of IRQs on a slow 386sx-16, where it might have
47a negative impact on performance. Note that the interrupt handler is only 47a negative impact on performance. This 64Hz limit can be changed by writing
48a few lines of code to minimize any possibility of this effect. 48a different value to /proc/sys/dev/rtc/max-user-freq. Note that the
49interrupt handler is only a few lines of code to minimize any possibility
50of this effect.
49 51
50Also, if the kernel time is synchronized with an external source, the 52Also, if the kernel time is synchronized with an external source, the
51kernel will write the time back to the CMOS clock every 11 minutes. In 53kernel will write the time back to the CMOS clock every 11 minutes. In
@@ -81,6 +83,7 @@ that will be using this driver.
81 */ 83 */
82 84
83#include <stdio.h> 85#include <stdio.h>
86#include <stdlib.h>
84#include <linux/rtc.h> 87#include <linux/rtc.h>
85#include <sys/ioctl.h> 88#include <sys/ioctl.h>
86#include <sys/time.h> 89#include <sys/time.h>
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index ad0bedf678b3..e0188a23fd5e 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -115,8 +115,9 @@ trojan program is running at console and which could grab your password
115when you would try to login. It will kill all programs on given console 115when you would try to login. It will kill all programs on given console
116and thus letting you make sure that the login prompt you see is actually 116and thus letting you make sure that the login prompt you see is actually
117the one from init, not some trojan program. 117the one from init, not some trojan program.
118IMPORTANT:In its true form it is not a true SAK like the one in :IMPORTANT 118IMPORTANT: In its true form it is not a true SAK like the one in a :IMPORTANT
119IMPORTANT:c2 compliant systems, and it should be mistook as such. :IMPORTANT 119IMPORTANT: c2 compliant system, and it should not be mistaken as :IMPORTANT
120IMPORTANT: such. :IMPORTANT
120 It seems other find it useful as (System Attention Key) which is 121 It seems other find it useful as (System Attention Key) which is
121useful when you want to exit a program that will not let you switch consoles. 122useful when you want to exit a program that will not let you switch consoles.
122(For example, X or a svgalib program.) 123(For example, X or a svgalib program.)
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index de606dfa8db9..302fc1401547 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -702,7 +702,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
702 /* 702 /*
703 * Mark this as IO 703 * Mark this as IO
704 */ 704 */
705 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
706 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 705 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
707 706
708 if (remap_pfn_range(vma, vma->vm_start, phys, 707 if (remap_pfn_range(vma, vma->vm_start, phys,
diff --git a/arch/arm/mach-ixp4xx/nas100d-power.c b/arch/arm/mach-ixp4xx/nas100d-power.c
index 99d333d7ebdd..a3745ed37f9f 100644
--- a/arch/arm/mach-ixp4xx/nas100d-power.c
+++ b/arch/arm/mach-ixp4xx/nas100d-power.c
@@ -20,11 +20,10 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/reboot.h> 21#include <linux/reboot.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/reboot.h>
23 24
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
25 26
26extern void ctrl_alt_del(void);
27
28static irqreturn_t nas100d_reset_handler(int irq, void *dev_id, struct pt_regs *regs) 27static irqreturn_t nas100d_reset_handler(int irq, void *dev_id, struct pt_regs *regs)
29{ 28{
30 /* Signal init to do the ctrlaltdel action, this will bypass init if 29 /* Signal init to do the ctrlaltdel action, this will bypass init if
diff --git a/arch/arm/mach-ixp4xx/nslu2-power.c b/arch/arm/mach-ixp4xx/nslu2-power.c
index d80c362bc539..6d38e97142cc 100644
--- a/arch/arm/mach-ixp4xx/nslu2-power.c
+++ b/arch/arm/mach-ixp4xx/nslu2-power.c
@@ -20,11 +20,10 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/reboot.h> 21#include <linux/reboot.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/reboot.h>
23 24
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
25 26
26extern void ctrl_alt_del(void);
27
28static irqreturn_t nslu2_power_handler(int irq, void *dev_id, struct pt_regs *regs) 27static irqreturn_t nslu2_power_handler(int irq, void *dev_id, struct pt_regs *regs)
29{ 28{
30 /* Signal init to do the ctrlaltdel action, this will bypass init if 29 /* Signal init to do the ctrlaltdel action, this will bypass init if
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index 24bc149889b6..1e9d062103ae 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -27,8 +27,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
27 /* Leave vm_pgoff as-is, the PCI space address is the physical 27 /* Leave vm_pgoff as-is, the PCI space address is the physical
28 * address on this platform. 28 * address on this platform.
29 */ 29 */
30 vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
31
32 prot = pgprot_val(vma->vm_page_prot); 30 prot = pgprot_val(vma->vm_page_prot);
33 vma->vm_page_prot = __pgprot(prot); 31 vma->vm_page_prot = __pgprot(prot);
34 32
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 05668e3598c0..5fd65325b81a 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -371,11 +371,11 @@ static int acpi_cpufreq_early_init_acpi(void)
371 371
372 dprintk("acpi_cpufreq_early_init\n"); 372 dprintk("acpi_cpufreq_early_init\n");
373 373
374 for_each_cpu(i) { 374 for_each_possible_cpu(i) {
375 data = kzalloc(sizeof(struct acpi_processor_performance), 375 data = kzalloc(sizeof(struct acpi_processor_performance),
376 GFP_KERNEL); 376 GFP_KERNEL);
377 if (!data) { 377 if (!data) {
378 for_each_cpu(j) { 378 for_each_possible_cpu(j) {
379 kfree(acpi_perf_data[j]); 379 kfree(acpi_perf_data[j]);
380 acpi_perf_data[j] = NULL; 380 acpi_perf_data[j] = NULL;
381 } 381 }
@@ -584,7 +584,7 @@ acpi_cpufreq_exit (void)
584 584
585 cpufreq_unregister_driver(&acpi_cpufreq_driver); 585 cpufreq_unregister_driver(&acpi_cpufreq_driver);
586 586
587 for_each_cpu(i) { 587 for_each_possible_cpu(i) {
588 kfree(acpi_perf_data[i]); 588 kfree(acpi_perf_data[i]);
589 acpi_perf_data[i] = NULL; 589 acpi_perf_data[i] = NULL;
590 } 590 }
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 31c3a5baaa7f..f7e4356f6820 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -361,11 +361,11 @@ static int centrino_cpu_early_init_acpi(void)
361 unsigned int i, j; 361 unsigned int i, j;
362 struct acpi_processor_performance *data; 362 struct acpi_processor_performance *data;
363 363
364 for_each_cpu(i) { 364 for_each_possible_cpu(i) {
365 data = kzalloc(sizeof(struct acpi_processor_performance), 365 data = kzalloc(sizeof(struct acpi_processor_performance),
366 GFP_KERNEL); 366 GFP_KERNEL);
367 if (!data) { 367 if (!data) {
368 for_each_cpu(j) { 368 for_each_possible_cpu(j) {
369 kfree(acpi_perf_data[j]); 369 kfree(acpi_perf_data[j]);
370 acpi_perf_data[j] = NULL; 370 acpi_perf_data[j] = NULL;
371 } 371 }
@@ -805,7 +805,7 @@ static void __exit centrino_exit(void)
805 cpufreq_unregister_driver(&centrino_driver); 805 cpufreq_unregister_driver(&centrino_driver);
806 806
807#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI 807#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
808 for_each_cpu(j) { 808 for_each_possible_cpu(j) {
809 kfree(acpi_perf_data[j]); 809 kfree(acpi_perf_data[j]);
810 acpi_perf_data[j] = NULL; 810 acpi_perf_data[j] = NULL;
811 } 811 }
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 2b0cfce24a61..21dc1bbb8067 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -114,7 +114,8 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
114 atomic_dec(&waiting_for_crash_ipi); 114 atomic_dec(&waiting_for_crash_ipi);
115 /* Assume hlt works */ 115 /* Assume hlt works */
116 halt(); 116 halt();
117 for(;;); 117 for (;;)
118 cpu_relax();
118 119
119 return 1; 120 return 1;
120} 121}
diff --git a/arch/i386/kernel/doublefault.c b/arch/i386/kernel/doublefault.c
index 5edb1d379add..b4d14c2eb345 100644
--- a/arch/i386/kernel/doublefault.c
+++ b/arch/i386/kernel/doublefault.c
@@ -44,7 +44,8 @@ static void doublefault_fn(void)
44 } 44 }
45 } 45 }
46 46
47 for (;;) /* nothing */; 47 for (;;)
48 cpu_relax();
48} 49}
49 50
50struct tss_struct doublefault_tss __cacheline_aligned = { 51struct tss_struct doublefault_tss __cacheline_aligned = {
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index e6023970aa40..6c1639836e06 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -61,7 +61,7 @@
61#include <asm/io_apic.h> 61#include <asm/io_apic.h>
62#include <asm/ist.h> 62#include <asm/ist.h>
63#include <asm/io.h> 63#include <asm/io.h>
64#include "setup_arch_pre.h" 64#include <setup_arch.h>
65#include <bios_ebda.h> 65#include <bios_ebda.h>
66 66
67/* Forward Declaration. */ 67/* Forward Declaration. */
@@ -411,8 +411,8 @@ static void __init limit_regions(unsigned long long size)
411 } 411 }
412} 412}
413 413
414static void __init add_memory_region(unsigned long long start, 414void __init add_memory_region(unsigned long long start,
415 unsigned long long size, int type) 415 unsigned long long size, int type)
416{ 416{
417 int x; 417 int x;
418 418
@@ -475,7 +475,7 @@ static struct change_member *change_point[2*E820MAX] __initdata;
475static struct e820entry *overlap_list[E820MAX] __initdata; 475static struct e820entry *overlap_list[E820MAX] __initdata;
476static struct e820entry new_bios[E820MAX] __initdata; 476static struct e820entry new_bios[E820MAX] __initdata;
477 477
478static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) 478int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
479{ 479{
480 struct change_member *change_tmp; 480 struct change_member *change_tmp;
481 unsigned long current_type, last_type; 481 unsigned long current_type, last_type;
@@ -644,7 +644,7 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
644 * thinkpad 560x, for example, does not cooperate with the memory 644 * thinkpad 560x, for example, does not cooperate with the memory
645 * detection code.) 645 * detection code.)
646 */ 646 */
647static int __init copy_e820_map(struct e820entry * biosmap, int nr_map) 647int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
648{ 648{
649 /* Only one memory region (or negative)? Ignore it */ 649 /* Only one memory region (or negative)? Ignore it */
650 if (nr_map < 2) 650 if (nr_map < 2)
@@ -702,12 +702,6 @@ static inline void copy_edd(void)
702} 702}
703#endif 703#endif
704 704
705/*
706 * Do NOT EVER look at the BIOS memory size location.
707 * It does not work on many machines.
708 */
709#define LOWMEMSIZE() (0x9f000)
710
711static void __init parse_cmdline_early (char ** cmdline_p) 705static void __init parse_cmdline_early (char ** cmdline_p)
712{ 706{
713 char c = ' ', *to = command_line, *from = saved_command_line; 707 char c = ' ', *to = command_line, *from = saved_command_line;
@@ -1424,8 +1418,6 @@ static void __init register_memory(void)
1424 pci_mem_start, gapstart, gapsize); 1418 pci_mem_start, gapstart, gapsize);
1425} 1419}
1426 1420
1427static char * __init machine_specific_memory_setup(void);
1428
1429#ifdef CONFIG_MCA 1421#ifdef CONFIG_MCA
1430static void set_mca_bus(int x) 1422static void set_mca_bus(int x)
1431{ 1423{
@@ -1708,7 +1700,6 @@ static __init int add_pcspkr(void)
1708} 1700}
1709device_initcall(add_pcspkr); 1701device_initcall(add_pcspkr);
1710 1702
1711#include "setup_arch_post.h"
1712/* 1703/*
1713 * Local Variables: 1704 * Local Variables:
1714 * mode:c 1705 * mode:c
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 825b2b4ca721..bd0ca5c9f053 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -257,7 +257,7 @@ static void __init synchronize_tsc_bp (void)
257 * all APs synchronize but they loop on '== num_cpus' 257 * all APs synchronize but they loop on '== num_cpus'
258 */ 258 */
259 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1) 259 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
260 mb(); 260 cpu_relax();
261 atomic_set(&tsc_count_stop, 0); 261 atomic_set(&tsc_count_stop, 0);
262 wmb(); 262 wmb();
263 /* 263 /*
@@ -276,7 +276,7 @@ static void __init synchronize_tsc_bp (void)
276 * Wait for all APs to leave the synchronization point: 276 * Wait for all APs to leave the synchronization point:
277 */ 277 */
278 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1) 278 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
279 mb(); 279 cpu_relax();
280 atomic_set(&tsc_count_start, 0); 280 atomic_set(&tsc_count_start, 0);
281 wmb(); 281 wmb();
282 atomic_inc(&tsc_count_stop); 282 atomic_inc(&tsc_count_stop);
@@ -333,19 +333,21 @@ static void __init synchronize_tsc_ap (void)
333 * this gets called, so we first wait for the BP to 333 * this gets called, so we first wait for the BP to
334 * finish SMP initialization: 334 * finish SMP initialization:
335 */ 335 */
336 while (!atomic_read(&tsc_start_flag)) mb(); 336 while (!atomic_read(&tsc_start_flag))
337 cpu_relax();
337 338
338 for (i = 0; i < NR_LOOPS; i++) { 339 for (i = 0; i < NR_LOOPS; i++) {
339 atomic_inc(&tsc_count_start); 340 atomic_inc(&tsc_count_start);
340 while (atomic_read(&tsc_count_start) != num_booting_cpus()) 341 while (atomic_read(&tsc_count_start) != num_booting_cpus())
341 mb(); 342 cpu_relax();
342 343
343 rdtscll(tsc_values[smp_processor_id()]); 344 rdtscll(tsc_values[smp_processor_id()]);
344 if (i == NR_LOOPS-1) 345 if (i == NR_LOOPS-1)
345 write_tsc(0, 0); 346 write_tsc(0, 0);
346 347
347 atomic_inc(&tsc_count_stop); 348 atomic_inc(&tsc_count_stop);
348 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb(); 349 while (atomic_read(&tsc_count_stop) != num_booting_cpus())
350 cpu_relax();
349 } 351 }
350} 352}
351#undef NR_LOOPS 353#undef NR_LOOPS
@@ -1433,7 +1435,7 @@ int __devinit __cpu_up(unsigned int cpu)
1433 /* Unleash the CPU! */ 1435 /* Unleash the CPU! */
1434 cpu_set(cpu, smp_commenced_mask); 1436 cpu_set(cpu, smp_commenced_mask);
1435 while (!cpu_isset(cpu, cpu_online_map)) 1437 while (!cpu_isset(cpu, cpu_online_map))
1436 mb(); 1438 cpu_relax();
1437 return 0; 1439 return 0;
1438} 1440}
1439 1441
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 6979297ce278..c5aa65f7c02a 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -528,6 +528,97 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
528 return size; 528 return size;
529} 529}
530 530
531static unsigned long __copy_user_intel_nocache(void *to,
532 const void __user *from, unsigned long size)
533{
534 int d0, d1;
535
536 __asm__ __volatile__(
537 " .align 2,0x90\n"
538 "0: movl 32(%4), %%eax\n"
539 " cmpl $67, %0\n"
540 " jbe 2f\n"
541 "1: movl 64(%4), %%eax\n"
542 " .align 2,0x90\n"
543 "2: movl 0(%4), %%eax\n"
544 "21: movl 4(%4), %%edx\n"
545 " movnti %%eax, 0(%3)\n"
546 " movnti %%edx, 4(%3)\n"
547 "3: movl 8(%4), %%eax\n"
548 "31: movl 12(%4),%%edx\n"
549 " movnti %%eax, 8(%3)\n"
550 " movnti %%edx, 12(%3)\n"
551 "4: movl 16(%4), %%eax\n"
552 "41: movl 20(%4), %%edx\n"
553 " movnti %%eax, 16(%3)\n"
554 " movnti %%edx, 20(%3)\n"
555 "10: movl 24(%4), %%eax\n"
556 "51: movl 28(%4), %%edx\n"
557 " movnti %%eax, 24(%3)\n"
558 " movnti %%edx, 28(%3)\n"
559 "11: movl 32(%4), %%eax\n"
560 "61: movl 36(%4), %%edx\n"
561 " movnti %%eax, 32(%3)\n"
562 " movnti %%edx, 36(%3)\n"
563 "12: movl 40(%4), %%eax\n"
564 "71: movl 44(%4), %%edx\n"
565 " movnti %%eax, 40(%3)\n"
566 " movnti %%edx, 44(%3)\n"
567 "13: movl 48(%4), %%eax\n"
568 "81: movl 52(%4), %%edx\n"
569 " movnti %%eax, 48(%3)\n"
570 " movnti %%edx, 52(%3)\n"
571 "14: movl 56(%4), %%eax\n"
572 "91: movl 60(%4), %%edx\n"
573 " movnti %%eax, 56(%3)\n"
574 " movnti %%edx, 60(%3)\n"
575 " addl $-64, %0\n"
576 " addl $64, %4\n"
577 " addl $64, %3\n"
578 " cmpl $63, %0\n"
579 " ja 0b\n"
580 " sfence \n"
581 "5: movl %0, %%eax\n"
582 " shrl $2, %0\n"
583 " andl $3, %%eax\n"
584 " cld\n"
585 "6: rep; movsl\n"
586 " movl %%eax,%0\n"
587 "7: rep; movsb\n"
588 "8:\n"
589 ".section .fixup,\"ax\"\n"
590 "9: lea 0(%%eax,%0,4),%0\n"
591 "16: jmp 8b\n"
592 ".previous\n"
593 ".section __ex_table,\"a\"\n"
594 " .align 4\n"
595 " .long 0b,16b\n"
596 " .long 1b,16b\n"
597 " .long 2b,16b\n"
598 " .long 21b,16b\n"
599 " .long 3b,16b\n"
600 " .long 31b,16b\n"
601 " .long 4b,16b\n"
602 " .long 41b,16b\n"
603 " .long 10b,16b\n"
604 " .long 51b,16b\n"
605 " .long 11b,16b\n"
606 " .long 61b,16b\n"
607 " .long 12b,16b\n"
608 " .long 71b,16b\n"
609 " .long 13b,16b\n"
610 " .long 81b,16b\n"
611 " .long 14b,16b\n"
612 " .long 91b,16b\n"
613 " .long 6b,9b\n"
614 " .long 7b,16b\n"
615 ".previous"
616 : "=&c"(size), "=&D" (d0), "=&S" (d1)
617 : "1"(to), "2"(from), "0"(size)
618 : "eax", "edx", "memory");
619 return size;
620}
621
531#else 622#else
532 623
533/* 624/*
@@ -694,6 +785,19 @@ unsigned long __copy_from_user_ll(void *to, const void __user *from,
694} 785}
695EXPORT_SYMBOL(__copy_from_user_ll); 786EXPORT_SYMBOL(__copy_from_user_ll);
696 787
788unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
789 unsigned long n)
790{
791 BUG_ON((long)n < 0);
792 if (movsl_is_ok(to, from, n))
793 __copy_user(to, from, n);
794 else
795 n = __copy_user_intel((void __user *)to,
796 (const void *)from, n);
797 return n;
798}
799EXPORT_SYMBOL(__copy_from_user_ll_nozero);
800
697unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, 801unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
698 unsigned long n) 802 unsigned long n)
699{ 803{
@@ -709,6 +813,21 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
709 return n; 813 return n;
710} 814}
711 815
816unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
817 unsigned long n)
818{
819 BUG_ON((long)n < 0);
820#ifdef CONFIG_X86_INTEL_USERCOPY
821 if ( n > 64 && cpu_has_xmm2)
822 n = __copy_user_intel_nocache(to, from, n);
823 else
824 __copy_user(to, from, n);
825#else
826 __copy_user(to, from, n);
827#endif
828 return n;
829}
830
712/** 831/**
713 * copy_to_user: - Copy a block of data into user space. 832 * copy_to_user: - Copy a block of data into user space.
714 * @to: Destination address, in user space. 833 * @to: Destination address, in user space.
diff --git a/arch/i386/mach-default/setup.c b/arch/i386/mach-default/setup.c
index b4a7455c6993..004837c58793 100644
--- a/arch/i386/mach-default/setup.c
+++ b/arch/i386/mach-default/setup.c
@@ -8,6 +8,8 @@
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <asm/acpi.h> 9#include <asm/acpi.h>
10#include <asm/arch_hooks.h> 10#include <asm/arch_hooks.h>
11#include <asm/e820.h>
12#include <asm/setup.h>
11 13
12#ifdef CONFIG_HOTPLUG_CPU 14#ifdef CONFIG_HOTPLUG_CPU
13#define DEFAULT_SEND_IPI (1) 15#define DEFAULT_SEND_IPI (1)
@@ -130,3 +132,44 @@ static int __init print_ipi_mode(void)
130} 132}
131 133
132late_initcall(print_ipi_mode); 134late_initcall(print_ipi_mode);
135
136/**
137 * machine_specific_memory_setup - Hook for machine specific memory setup.
138 *
139 * Description:
140 * This is included late in kernel/setup.c so that it can make
141 * use of all of the static functions.
142 **/
143
144char * __init machine_specific_memory_setup(void)
145{
146 char *who;
147
148
149 who = "BIOS-e820";
150
151 /*
152 * Try to copy the BIOS-supplied E820-map.
153 *
154 * Otherwise fake a memory map; one section from 0k->640k,
155 * the next section from 1mb->appropriate_mem_k
156 */
157 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
158 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
159 unsigned long mem_size;
160
161 /* compare results from other methods and take the greater */
162 if (ALT_MEM_K < EXT_MEM_K) {
163 mem_size = EXT_MEM_K;
164 who = "BIOS-88";
165 } else {
166 mem_size = ALT_MEM_K;
167 who = "BIOS-e801";
168 }
169
170 e820.nr_map = 0;
171 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
172 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
173 }
174 return who;
175}
diff --git a/arch/i386/mach-visws/setup.c b/arch/i386/mach-visws/setup.c
index 07fac7e749c7..8a9e1a6f745d 100644
--- a/arch/i386/mach-visws/setup.c
+++ b/arch/i386/mach-visws/setup.c
@@ -10,6 +10,8 @@
10#include <asm/fixmap.h> 10#include <asm/fixmap.h>
11#include <asm/arch_hooks.h> 11#include <asm/arch_hooks.h>
12#include <asm/io.h> 12#include <asm/io.h>
13#include <asm/e820.h>
14#include <asm/setup.h>
13#include "cobalt.h" 15#include "cobalt.h"
14#include "piix4.h" 16#include "piix4.h"
15 17
@@ -133,3 +135,50 @@ void __init time_init_hook(void)
133 /* Wire cpu IDT entry to s/w handler (and Cobalt APIC to IDT) */ 135 /* Wire cpu IDT entry to s/w handler (and Cobalt APIC to IDT) */
134 setup_irq(0, &irq0); 136 setup_irq(0, &irq0);
135} 137}
138
139/* Hook for machine specific memory setup. */
140
141#define MB (1024 * 1024)
142
143static unsigned long sgivwfb_mem_phys;
144static unsigned long sgivwfb_mem_size;
145
146long long mem_size __initdata = 0;
147
148char * __init machine_specific_memory_setup(void)
149{
150 long long gfx_mem_size = 8 * MB;
151
152 mem_size = ALT_MEM_K;
153
154 if (!mem_size) {
155 printk(KERN_WARNING "Bootloader didn't set memory size, upgrade it !\n");
156 mem_size = 128 * MB;
157 }
158
159 /*
160 * this hardcodes the graphics memory to 8 MB
161 * it really should be sized dynamically (or at least
162 * set as a boot param)
163 */
164 if (!sgivwfb_mem_size) {
165 printk(KERN_WARNING "Defaulting to 8 MB framebuffer size\n");
166 sgivwfb_mem_size = 8 * MB;
167 }
168
169 /*
170 * Trim to nearest MB
171 */
172 sgivwfb_mem_size &= ~((1 << 20) - 1);
173 sgivwfb_mem_phys = mem_size - gfx_mem_size;
174
175 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
176 add_memory_region(HIGH_MEMORY, mem_size - sgivwfb_mem_size - HIGH_MEMORY, E820_RAM);
177 add_memory_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED);
178
179 return "PROM";
180
181 /* Remove gcc warnings */
182 (void) sanitize_e820_map(NULL, NULL);
183 (void) copy_e820_map(NULL, 0);
184}
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c
index 7d8a3acb9441..0e225054e222 100644
--- a/arch/i386/mach-voyager/setup.c
+++ b/arch/i386/mach-voyager/setup.c
@@ -7,6 +7,9 @@
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <asm/acpi.h> 8#include <asm/acpi.h>
9#include <asm/arch_hooks.h> 9#include <asm/arch_hooks.h>
10#include <asm/voyager.h>
11#include <asm/e820.h>
12#include <asm/setup.h>
10 13
11void __init pre_intr_init_hook(void) 14void __init pre_intr_init_hook(void)
12{ 15{
@@ -45,3 +48,74 @@ void __init time_init_hook(void)
45{ 48{
46 setup_irq(0, &irq0); 49 setup_irq(0, &irq0);
47} 50}
51
52/* Hook for machine specific memory setup. */
53
54char * __init machine_specific_memory_setup(void)
55{
56 char *who;
57
58 who = "NOT VOYAGER";
59
60 if(voyager_level == 5) {
61 __u32 addr, length;
62 int i;
63
64 who = "Voyager-SUS";
65
66 e820.nr_map = 0;
67 for(i=0; voyager_memory_detect(i, &addr, &length); i++) {
68 add_memory_region(addr, length, E820_RAM);
69 }
70 return who;
71 } else if(voyager_level == 4) {
72 __u32 tom;
73 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
74 /* select the DINO config space */
75 outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
76 /* Read DINO top of memory register */
77 tom = ((inb(catbase + 0x4) & 0xf0) << 16)
78 + ((inb(catbase + 0x5) & 0x7f) << 24);
79
80 if(inb(catbase) != VOYAGER_DINO) {
81 printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
82 tom = (EXT_MEM_K)<<10;
83 }
84 who = "Voyager-TOM";
85 add_memory_region(0, 0x9f000, E820_RAM);
86 /* map from 1M to top of memory */
87 add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM);
88 /* FIXME: Should check the ASICs to see if I need to
89 * take out the 8M window. Just do it at the moment
90 * */
91 add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED);
92 return who;
93 }
94
95 who = "BIOS-e820";
96
97 /*
98 * Try to copy the BIOS-supplied E820-map.
99 *
100 * Otherwise fake a memory map; one section from 0k->640k,
101 * the next section from 1mb->appropriate_mem_k
102 */
103 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
104 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
105 unsigned long mem_size;
106
107 /* compare results from other methods and take the greater */
108 if (ALT_MEM_K < EXT_MEM_K) {
109 mem_size = EXT_MEM_K;
110 who = "BIOS-88";
111 } else {
112 mem_size = ALT_MEM_K;
113 who = "BIOS-e801";
114 }
115
116 e820.nr_map = 0;
117 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
118 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
119 }
120 return who;
121}
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index 7852827a599b..a151f7a99f5e 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -285,8 +285,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
285 /* Leave vm_pgoff as-is, the PCI space address is the physical 285 /* Leave vm_pgoff as-is, the PCI space address is the physical
286 * address on this platform. 286 * address on this platform.
287 */ 287 */
288 vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
289
290 prot = pgprot_val(vma->vm_page_prot); 288 prot = pgprot_val(vma->vm_page_prot);
291 if (boot_cpu_data.x86 > 3) 289 if (boot_cpu_data.x86 > 3)
292 prot |= _PAGE_PCD | _PAGE_PWT; 290 prot |= _PAGE_PCD | _PAGE_PWT;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 4f3a16b37f8f..879edb51d1e0 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -166,7 +166,7 @@ static void cache_shared_cpu_map_setup( unsigned int cpu,
166 166
167 num_shared = (int) csi.num_shared; 167 num_shared = (int) csi.num_shared;
168 do { 168 do {
169 for_each_cpu(j) 169 for_each_possible_cpu(j)
170 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id 170 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
171 && cpu_data(j)->core_id == csi.log1_cid 171 && cpu_data(j)->core_id == csi.log1_cid
172 && cpu_data(j)->thread_id == csi.log1_tid) 172 && cpu_data(j)->thread_id == csi.log1_tid)
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 61dd8608da4f..77375a55da31 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -602,8 +602,6 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
602 * Leave vm_pgoff as-is, the PCI space address is the physical 602 * Leave vm_pgoff as-is, the PCI space address is the physical
603 * address on this platform. 603 * address on this platform.
604 */ 604 */
605 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
606
607 if (write_combine && efi_range_is_wc(vma->vm_start, 605 if (write_combine && efi_range_is_wc(vma->vm_start,
608 vma->vm_end - vma->vm_start)) 606 vma->vm_end - vma->vm_start))
609 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 607 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -666,7 +664,6 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
666 664
667 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; 665 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
668 vma->vm_page_prot = prot; 666 vma->vm_page_prot = prot;
669 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
670 667
671 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 668 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
672 size, vma->vm_page_prot)) 669 size, vma->vm_page_prot))
diff --git a/arch/m68k/amiga/amiga_ksyms.c b/arch/m68k/amiga/amiga_ksyms.c
index b7bd84c73ea7..8f2e0587ae2f 100644
--- a/arch/m68k/amiga/amiga_ksyms.c
+++ b/arch/m68k/amiga/amiga_ksyms.c
@@ -23,8 +23,6 @@ EXPORT_SYMBOL(amiga_chip_avail);
23EXPORT_SYMBOL(amiga_chip_size); 23EXPORT_SYMBOL(amiga_chip_size);
24EXPORT_SYMBOL(amiga_audio_period); 24EXPORT_SYMBOL(amiga_audio_period);
25EXPORT_SYMBOL(amiga_audio_min_period); 25EXPORT_SYMBOL(amiga_audio_min_period);
26EXPORT_SYMBOL(amiga_do_irq);
27EXPORT_SYMBOL(amiga_do_irq_list);
28 26
29#ifdef CONFIG_AMIGA_PCMCIA 27#ifdef CONFIG_AMIGA_PCMCIA
30 EXPORT_SYMBOL(pcmcia_reset); 28 EXPORT_SYMBOL(pcmcia_reset);
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index b0aa61bf8700..f9403f4640a1 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -35,61 +35,30 @@
35 * /Jes 35 * /Jes
36 */ 36 */
37 37
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/sched.h>
41#include <linux/kernel_stat.h>
42#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/interrupt.h>
43#include <linux/errno.h> 40#include <linux/errno.h>
44#include <linux/seq_file.h>
45 41
46#include <asm/system.h>
47#include <asm/irq.h> 42#include <asm/irq.h>
48#include <asm/traps.h> 43#include <asm/traps.h>
49#include <asm/amigahw.h> 44#include <asm/amigahw.h>
50#include <asm/amigaints.h> 45#include <asm/amigaints.h>
51#include <asm/amipcmcia.h> 46#include <asm/amipcmcia.h>
52 47
53extern int cia_request_irq(struct ciabase *base,int irq, 48static void amiga_enable_irq(unsigned int irq);
54 irqreturn_t (*handler)(int, void *, struct pt_regs *), 49static void amiga_disable_irq(unsigned int irq);
55 unsigned long flags, const char *devname, void *dev_id); 50static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp);
56extern void cia_free_irq(struct ciabase *base, unsigned int irq, void *dev_id); 51static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp);
57extern void cia_init_IRQ(struct ciabase *base); 52static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp);
58extern int cia_get_irq_list(struct ciabase *base, struct seq_file *p); 53static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp);
59 54
60/* irq node variables for amiga interrupt sources */ 55static struct irq_controller amiga_irq_controller = {
61static irq_node_t *ami_irq_list[AMI_STD_IRQS]; 56 .name = "amiga",
62 57 .lock = SPIN_LOCK_UNLOCKED,
63static unsigned short amiga_intena_vals[AMI_STD_IRQS] = { 58 .enable = amiga_enable_irq,
64 [IRQ_AMIGA_VERTB] = IF_VERTB, 59 .disable = amiga_disable_irq,
65 [IRQ_AMIGA_COPPER] = IF_COPER,
66 [IRQ_AMIGA_AUD0] = IF_AUD0,
67 [IRQ_AMIGA_AUD1] = IF_AUD1,
68 [IRQ_AMIGA_AUD2] = IF_AUD2,
69 [IRQ_AMIGA_AUD3] = IF_AUD3,
70 [IRQ_AMIGA_BLIT] = IF_BLIT,
71 [IRQ_AMIGA_DSKSYN] = IF_DSKSYN,
72 [IRQ_AMIGA_DSKBLK] = IF_DSKBLK,
73 [IRQ_AMIGA_RBF] = IF_RBF,
74 [IRQ_AMIGA_TBE] = IF_TBE,
75 [IRQ_AMIGA_SOFT] = IF_SOFT,
76 [IRQ_AMIGA_PORTS] = IF_PORTS,
77 [IRQ_AMIGA_EXTER] = IF_EXTER
78};
79static const unsigned char ami_servers[AMI_STD_IRQS] = {
80 [IRQ_AMIGA_VERTB] = 1,
81 [IRQ_AMIGA_PORTS] = 1,
82 [IRQ_AMIGA_EXTER] = 1
83}; 60};
84 61
85static short ami_ablecount[AMI_IRQS];
86
87static irqreturn_t ami_badint(int irq, void *dev_id, struct pt_regs *fp)
88{
89 num_spurious += 1;
90 return IRQ_NONE;
91}
92
93/* 62/*
94 * void amiga_init_IRQ(void) 63 * void amiga_init_IRQ(void)
95 * 64 *
@@ -103,23 +72,12 @@ static irqreturn_t ami_badint(int irq, void *dev_id, struct pt_regs *fp)
103 72
104void __init amiga_init_IRQ(void) 73void __init amiga_init_IRQ(void)
105{ 74{
106 int i; 75 request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL);
76 request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL);
77 request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL);
78 request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL);
107 79
108 /* initialize handlers */ 80 m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS);
109 for (i = 0; i < AMI_STD_IRQS; i++) {
110 if (ami_servers[i]) {
111 ami_irq_list[i] = NULL;
112 } else {
113 ami_irq_list[i] = new_irq_node();
114 ami_irq_list[i]->handler = ami_badint;
115 ami_irq_list[i]->flags = 0;
116 ami_irq_list[i]->dev_id = NULL;
117 ami_irq_list[i]->devname = NULL;
118 ami_irq_list[i]->next = NULL;
119 }
120 }
121 for (i = 0; i < AMI_IRQS; i++)
122 ami_ablecount[i] = 0;
123 81
124 /* turn off PCMCIA interrupts */ 82 /* turn off PCMCIA interrupts */
125 if (AMIGAHW_PRESENT(PCMCIA)) 83 if (AMIGAHW_PRESENT(PCMCIA))
@@ -134,249 +92,21 @@ void __init amiga_init_IRQ(void)
134 cia_init_IRQ(&ciab_base); 92 cia_init_IRQ(&ciab_base);
135} 93}
136 94
137static inline int amiga_insert_irq(irq_node_t **list, irq_node_t *node)
138{
139 unsigned long flags;
140 irq_node_t *cur;
141
142 if (!node->dev_id)
143 printk("%s: Warning: dev_id of %s is zero\n",
144 __FUNCTION__, node->devname);
145
146 local_irq_save(flags);
147
148 cur = *list;
149
150 if (node->flags & SA_INTERRUPT) {
151 if (node->flags & SA_SHIRQ)
152 return -EBUSY;
153 /*
154 * There should never be more than one
155 */
156 while (cur && cur->flags & SA_INTERRUPT) {
157 list = &cur->next;
158 cur = cur->next;
159 }
160 } else {
161 while (cur) {
162 list = &cur->next;
163 cur = cur->next;
164 }
165 }
166
167 node->next = cur;
168 *list = node;
169
170 local_irq_restore(flags);
171 return 0;
172}
173
174static inline void amiga_delete_irq(irq_node_t **list, void *dev_id)
175{
176 unsigned long flags;
177 irq_node_t *node;
178
179 local_irq_save(flags);
180
181 for (node = *list; node; list = &node->next, node = *list) {
182 if (node->dev_id == dev_id) {
183 *list = node->next;
184 /* Mark it as free. */
185 node->handler = NULL;
186 local_irq_restore(flags);
187 return;
188 }
189 }
190 local_irq_restore(flags);
191 printk ("%s: tried to remove invalid irq\n", __FUNCTION__);
192}
193
194/*
195 * amiga_request_irq : add an interrupt service routine for a particular
196 * machine specific interrupt source.
197 * If the addition was successful, it returns 0.
198 */
199
200int amiga_request_irq(unsigned int irq,
201 irqreturn_t (*handler)(int, void *, struct pt_regs *),
202 unsigned long flags, const char *devname, void *dev_id)
203{
204 irq_node_t *node;
205 int error = 0;
206
207 if (irq >= AMI_IRQS) {
208 printk ("%s: Unknown IRQ %d from %s\n", __FUNCTION__,
209 irq, devname);
210 return -ENXIO;
211 }
212
213 if (irq >= IRQ_AMIGA_AUTO)
214 return cpu_request_irq(irq - IRQ_AMIGA_AUTO, handler,
215 flags, devname, dev_id);
216
217 if (irq >= IRQ_AMIGA_CIAB)
218 return cia_request_irq(&ciab_base, irq - IRQ_AMIGA_CIAB,
219 handler, flags, devname, dev_id);
220
221 if (irq >= IRQ_AMIGA_CIAA)
222 return cia_request_irq(&ciaa_base, irq - IRQ_AMIGA_CIAA,
223 handler, flags, devname, dev_id);
224
225 /*
226 * IRQ_AMIGA_PORTS & IRQ_AMIGA_EXTER defaults to shared,
227 * we could add a check here for the SA_SHIRQ flag but all drivers
228 * should be aware of sharing anyway.
229 */
230 if (ami_servers[irq]) {
231 if (!(node = new_irq_node()))
232 return -ENOMEM;
233 node->handler = handler;
234 node->flags = flags;
235 node->dev_id = dev_id;
236 node->devname = devname;
237 node->next = NULL;
238 error = amiga_insert_irq(&ami_irq_list[irq], node);
239 } else {
240 ami_irq_list[irq]->handler = handler;
241 ami_irq_list[irq]->flags = flags;
242 ami_irq_list[irq]->dev_id = dev_id;
243 ami_irq_list[irq]->devname = devname;
244 }
245
246 /* enable the interrupt */
247 if (irq < IRQ_AMIGA_PORTS && !ami_ablecount[irq])
248 amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
249
250 return error;
251}
252
253void amiga_free_irq(unsigned int irq, void *dev_id)
254{
255 if (irq >= AMI_IRQS) {
256 printk ("%s: Unknown IRQ %d\n", __FUNCTION__, irq);
257 return;
258 }
259
260 if (irq >= IRQ_AMIGA_AUTO)
261 cpu_free_irq(irq - IRQ_AMIGA_AUTO, dev_id);
262
263 if (irq >= IRQ_AMIGA_CIAB) {
264 cia_free_irq(&ciab_base, irq - IRQ_AMIGA_CIAB, dev_id);
265 return;
266 }
267
268 if (irq >= IRQ_AMIGA_CIAA) {
269 cia_free_irq(&ciaa_base, irq - IRQ_AMIGA_CIAA, dev_id);
270 return;
271 }
272
273 if (ami_servers[irq]) {
274 amiga_delete_irq(&ami_irq_list[irq], dev_id);
275 /* if server list empty, disable the interrupt */
276 if (!ami_irq_list[irq] && irq < IRQ_AMIGA_PORTS)
277 amiga_custom.intena = amiga_intena_vals[irq];
278 } else {
279 if (ami_irq_list[irq]->dev_id != dev_id)
280 printk("%s: removing probably wrong IRQ %d from %s\n",
281 __FUNCTION__, irq, ami_irq_list[irq]->devname);
282 ami_irq_list[irq]->handler = ami_badint;
283 ami_irq_list[irq]->flags = 0;
284 ami_irq_list[irq]->dev_id = NULL;
285 ami_irq_list[irq]->devname = NULL;
286 amiga_custom.intena = amiga_intena_vals[irq];
287 }
288}
289
290/* 95/*
291 * Enable/disable a particular machine specific interrupt source. 96 * Enable/disable a particular machine specific interrupt source.
292 * Note that this may affect other interrupts in case of a shared interrupt. 97 * Note that this may affect other interrupts in case of a shared interrupt.
293 * This function should only be called for a _very_ short time to change some 98 * This function should only be called for a _very_ short time to change some
294 * internal data, that may not be changed by the interrupt at the same time. 99 * internal data, that may not be changed by the interrupt at the same time.
295 * ami_(enable|disable)_irq calls may also be nested.
296 */ 100 */
297 101
298void amiga_enable_irq(unsigned int irq) 102static void amiga_enable_irq(unsigned int irq)
299{
300 if (irq >= AMI_IRQS) {
301 printk("%s: Unknown IRQ %d\n", __FUNCTION__, irq);
302 return;
303 }
304
305 if (--ami_ablecount[irq])
306 return;
307
308 /* No action for auto-vector interrupts */
309 if (irq >= IRQ_AMIGA_AUTO){
310 printk("%s: Trying to enable auto-vector IRQ %i\n",
311 __FUNCTION__, irq - IRQ_AMIGA_AUTO);
312 return;
313 }
314
315 if (irq >= IRQ_AMIGA_CIAB) {
316 cia_set_irq(&ciab_base, (1 << (irq - IRQ_AMIGA_CIAB)));
317 cia_able_irq(&ciab_base, CIA_ICR_SETCLR |
318 (1 << (irq - IRQ_AMIGA_CIAB)));
319 return;
320 }
321
322 if (irq >= IRQ_AMIGA_CIAA) {
323 cia_set_irq(&ciaa_base, (1 << (irq - IRQ_AMIGA_CIAA)));
324 cia_able_irq(&ciaa_base, CIA_ICR_SETCLR |
325 (1 << (irq - IRQ_AMIGA_CIAA)));
326 return;
327 }
328
329 /* enable the interrupt */
330 amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
331}
332
333void amiga_disable_irq(unsigned int irq)
334{
335 if (irq >= AMI_IRQS) {
336 printk("%s: Unknown IRQ %d\n", __FUNCTION__, irq);
337 return;
338 }
339
340 if (ami_ablecount[irq]++)
341 return;
342
343 /* No action for auto-vector interrupts */
344 if (irq >= IRQ_AMIGA_AUTO) {
345 printk("%s: Trying to disable auto-vector IRQ %i\n",
346 __FUNCTION__, irq - IRQ_AMIGA_AUTO);
347 return;
348 }
349
350 if (irq >= IRQ_AMIGA_CIAB) {
351 cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB));
352 return;
353 }
354
355 if (irq >= IRQ_AMIGA_CIAA) {
356 cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA));
357 return;
358 }
359
360 /* disable the interrupt */
361 amiga_custom.intena = amiga_intena_vals[irq];
362}
363
364inline void amiga_do_irq(int irq, struct pt_regs *fp)
365{ 103{
366 kstat_cpu(0).irqs[SYS_IRQS + irq]++; 104 amiga_custom.intena = IF_SETCLR | (1 << (irq - IRQ_USER));
367 ami_irq_list[irq]->handler(irq, ami_irq_list[irq]->dev_id, fp);
368} 105}
369 106
370void amiga_do_irq_list(int irq, struct pt_regs *fp) 107static void amiga_disable_irq(unsigned int irq)
371{ 108{
372 irq_node_t *node; 109 amiga_custom.intena = 1 << (irq - IRQ_USER);
373
374 kstat_cpu(0).irqs[SYS_IRQS + irq]++;
375
376 amiga_custom.intreq = amiga_intena_vals[irq];
377
378 for (node = ami_irq_list[irq]; node; node = node->next)
379 node->handler(irq, node->dev_id, fp);
380} 110}
381 111
382/* 112/*
@@ -390,19 +120,19 @@ static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
390 /* if serial transmit buffer empty, interrupt */ 120 /* if serial transmit buffer empty, interrupt */
391 if (ints & IF_TBE) { 121 if (ints & IF_TBE) {
392 amiga_custom.intreq = IF_TBE; 122 amiga_custom.intreq = IF_TBE;
393 amiga_do_irq(IRQ_AMIGA_TBE, fp); 123 m68k_handle_int(IRQ_AMIGA_TBE, fp);
394 } 124 }
395 125
396 /* if floppy disk transfer complete, interrupt */ 126 /* if floppy disk transfer complete, interrupt */
397 if (ints & IF_DSKBLK) { 127 if (ints & IF_DSKBLK) {
398 amiga_custom.intreq = IF_DSKBLK; 128 amiga_custom.intreq = IF_DSKBLK;
399 amiga_do_irq(IRQ_AMIGA_DSKBLK, fp); 129 m68k_handle_int(IRQ_AMIGA_DSKBLK, fp);
400 } 130 }
401 131
402 /* if software interrupt set, interrupt */ 132 /* if software interrupt set, interrupt */
403 if (ints & IF_SOFT) { 133 if (ints & IF_SOFT) {
404 amiga_custom.intreq = IF_SOFT; 134 amiga_custom.intreq = IF_SOFT;
405 amiga_do_irq(IRQ_AMIGA_SOFT, fp); 135 m68k_handle_int(IRQ_AMIGA_SOFT, fp);
406 } 136 }
407 return IRQ_HANDLED; 137 return IRQ_HANDLED;
408} 138}
@@ -414,18 +144,20 @@ static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
414 /* if a blitter interrupt */ 144 /* if a blitter interrupt */
415 if (ints & IF_BLIT) { 145 if (ints & IF_BLIT) {
416 amiga_custom.intreq = IF_BLIT; 146 amiga_custom.intreq = IF_BLIT;
417 amiga_do_irq(IRQ_AMIGA_BLIT, fp); 147 m68k_handle_int(IRQ_AMIGA_BLIT, fp);
418 } 148 }
419 149
420 /* if a copper interrupt */ 150 /* if a copper interrupt */
421 if (ints & IF_COPER) { 151 if (ints & IF_COPER) {
422 amiga_custom.intreq = IF_COPER; 152 amiga_custom.intreq = IF_COPER;
423 amiga_do_irq(IRQ_AMIGA_COPPER, fp); 153 m68k_handle_int(IRQ_AMIGA_COPPER, fp);
424 } 154 }
425 155
426 /* if a vertical blank interrupt */ 156 /* if a vertical blank interrupt */
427 if (ints & IF_VERTB) 157 if (ints & IF_VERTB) {
428 amiga_do_irq_list(IRQ_AMIGA_VERTB, fp); 158 amiga_custom.intreq = IF_VERTB;
159 m68k_handle_int(IRQ_AMIGA_VERTB, fp);
160 }
429 return IRQ_HANDLED; 161 return IRQ_HANDLED;
430} 162}
431 163
@@ -436,25 +168,25 @@ static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
436 /* if audio 0 interrupt */ 168 /* if audio 0 interrupt */
437 if (ints & IF_AUD0) { 169 if (ints & IF_AUD0) {
438 amiga_custom.intreq = IF_AUD0; 170 amiga_custom.intreq = IF_AUD0;
439 amiga_do_irq(IRQ_AMIGA_AUD0, fp); 171 m68k_handle_int(IRQ_AMIGA_AUD0, fp);
440 } 172 }
441 173
442 /* if audio 1 interrupt */ 174 /* if audio 1 interrupt */
443 if (ints & IF_AUD1) { 175 if (ints & IF_AUD1) {
444 amiga_custom.intreq = IF_AUD1; 176 amiga_custom.intreq = IF_AUD1;
445 amiga_do_irq(IRQ_AMIGA_AUD1, fp); 177 m68k_handle_int(IRQ_AMIGA_AUD1, fp);
446 } 178 }
447 179
448 /* if audio 2 interrupt */ 180 /* if audio 2 interrupt */
449 if (ints & IF_AUD2) { 181 if (ints & IF_AUD2) {
450 amiga_custom.intreq = IF_AUD2; 182 amiga_custom.intreq = IF_AUD2;
451 amiga_do_irq(IRQ_AMIGA_AUD2, fp); 183 m68k_handle_int(IRQ_AMIGA_AUD2, fp);
452 } 184 }
453 185
454 /* if audio 3 interrupt */ 186 /* if audio 3 interrupt */
455 if (ints & IF_AUD3) { 187 if (ints & IF_AUD3) {
456 amiga_custom.intreq = IF_AUD3; 188 amiga_custom.intreq = IF_AUD3;
457 amiga_do_irq(IRQ_AMIGA_AUD3, fp); 189 m68k_handle_int(IRQ_AMIGA_AUD3, fp);
458 } 190 }
459 return IRQ_HANDLED; 191 return IRQ_HANDLED;
460} 192}
@@ -466,55 +198,13 @@ static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
466 /* if serial receive buffer full interrupt */ 198 /* if serial receive buffer full interrupt */
467 if (ints & IF_RBF) { 199 if (ints & IF_RBF) {
468 /* acknowledge of IF_RBF must be done by the serial interrupt */ 200 /* acknowledge of IF_RBF must be done by the serial interrupt */
469 amiga_do_irq(IRQ_AMIGA_RBF, fp); 201 m68k_handle_int(IRQ_AMIGA_RBF, fp);
470 } 202 }
471 203
472 /* if a disk sync interrupt */ 204 /* if a disk sync interrupt */
473 if (ints & IF_DSKSYN) { 205 if (ints & IF_DSKSYN) {
474 amiga_custom.intreq = IF_DSKSYN; 206 amiga_custom.intreq = IF_DSKSYN;
475 amiga_do_irq(IRQ_AMIGA_DSKSYN, fp); 207 m68k_handle_int(IRQ_AMIGA_DSKSYN, fp);
476 } 208 }
477 return IRQ_HANDLED; 209 return IRQ_HANDLED;
478} 210}
479
480static irqreturn_t ami_int7(int irq, void *dev_id, struct pt_regs *fp)
481{
482 panic ("level 7 interrupt received\n");
483}
484
485irqreturn_t (*amiga_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
486 [0] = ami_badint,
487 [1] = ami_int1,
488 [2] = ami_badint,
489 [3] = ami_int3,
490 [4] = ami_int4,
491 [5] = ami_int5,
492 [6] = ami_badint,
493 [7] = ami_int7
494};
495
496int show_amiga_interrupts(struct seq_file *p, void *v)
497{
498 int i;
499 irq_node_t *node;
500
501 for (i = 0; i < AMI_STD_IRQS; i++) {
502 if (!(node = ami_irq_list[i]))
503 continue;
504 seq_printf(p, "ami %2d: %10u ", i,
505 kstat_cpu(0).irqs[SYS_IRQS + i]);
506 do {
507 if (node->flags & SA_INTERRUPT)
508 seq_puts(p, "F ");
509 else
510 seq_puts(p, " ");
511 seq_printf(p, "%s\n", node->devname);
512 if ((node = node->next))
513 seq_puts(p, " ");
514 } while (node);
515 }
516
517 cia_get_irq_list(&ciaa_base, p);
518 cia_get_irq_list(&ciab_base, p);
519 return 0;
520}
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 9476eb9440f5..0956e45399e5 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -29,21 +29,18 @@ struct ciabase {
29 unsigned short int_mask; 29 unsigned short int_mask;
30 int handler_irq, cia_irq, server_irq; 30 int handler_irq, cia_irq, server_irq;
31 char *name; 31 char *name;
32 irq_handler_t irq_list[CIA_IRQS];
33} ciaa_base = { 32} ciaa_base = {
34 .cia = &ciaa, 33 .cia = &ciaa,
35 .int_mask = IF_PORTS, 34 .int_mask = IF_PORTS,
36 .handler_irq = IRQ_AMIGA_AUTO_2, 35 .handler_irq = IRQ_AMIGA_PORTS,
37 .cia_irq = IRQ_AMIGA_CIAA, 36 .cia_irq = IRQ_AMIGA_CIAA,
38 .server_irq = IRQ_AMIGA_PORTS, 37 .name = "CIAA"
39 .name = "CIAA handler"
40}, ciab_base = { 38}, ciab_base = {
41 .cia = &ciab, 39 .cia = &ciab,
42 .int_mask = IF_EXTER, 40 .int_mask = IF_EXTER,
43 .handler_irq = IRQ_AMIGA_AUTO_6, 41 .handler_irq = IRQ_AMIGA_EXTER,
44 .cia_irq = IRQ_AMIGA_CIAB, 42 .cia_irq = IRQ_AMIGA_CIAB,
45 .server_irq = IRQ_AMIGA_EXTER, 43 .name = "CIAB"
46 .name = "CIAB handler"
47}; 44};
48 45
49/* 46/*
@@ -66,13 +63,11 @@ unsigned char cia_set_irq(struct ciabase *base, unsigned char mask)
66 63
67/* 64/*
68 * Enable or disable CIA interrupts, return old interrupt mask, 65 * Enable or disable CIA interrupts, return old interrupt mask,
69 * interrupts will only be enabled if a handler exists
70 */ 66 */
71 67
72unsigned char cia_able_irq(struct ciabase *base, unsigned char mask) 68unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
73{ 69{
74 unsigned char old, tmp; 70 unsigned char old;
75 int i;
76 71
77 old = base->icr_mask; 72 old = base->icr_mask;
78 base->icr_data |= base->cia->icr; 73 base->icr_data |= base->cia->icr;
@@ -82,99 +77,104 @@ unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
82 else 77 else
83 base->icr_mask &= ~mask; 78 base->icr_mask &= ~mask;
84 base->icr_mask &= CIA_ICR_ALL; 79 base->icr_mask &= CIA_ICR_ALL;
85 for (i = 0, tmp = 1; i < CIA_IRQS; i++, tmp <<= 1) {
86 if ((tmp & base->icr_mask) && !base->irq_list[i].handler) {
87 base->icr_mask &= ~tmp;
88 base->cia->icr = tmp;
89 }
90 }
91 if (base->icr_data & base->icr_mask) 80 if (base->icr_data & base->icr_mask)
92 amiga_custom.intreq = IF_SETCLR | base->int_mask; 81 amiga_custom.intreq = IF_SETCLR | base->int_mask;
93 return old; 82 return old;
94} 83}
95 84
96int cia_request_irq(struct ciabase *base, unsigned int irq,
97 irqreturn_t (*handler)(int, void *, struct pt_regs *),
98 unsigned long flags, const char *devname, void *dev_id)
99{
100 unsigned char mask;
101
102 base->irq_list[irq].handler = handler;
103 base->irq_list[irq].flags = flags;
104 base->irq_list[irq].dev_id = dev_id;
105 base->irq_list[irq].devname = devname;
106
107 /* enable the interrupt */
108 mask = 1 << irq;
109 cia_set_irq(base, mask);
110 cia_able_irq(base, CIA_ICR_SETCLR | mask);
111 return 0;
112}
113
114void cia_free_irq(struct ciabase *base, unsigned int irq, void *dev_id)
115{
116 if (base->irq_list[irq].dev_id != dev_id)
117 printk("%s: removing probably wrong IRQ %i from %s\n",
118 __FUNCTION__, base->cia_irq + irq,
119 base->irq_list[irq].devname);
120
121 base->irq_list[irq].handler = NULL;
122 base->irq_list[irq].flags = 0;
123
124 cia_able_irq(base, 1 << irq);
125}
126
127static irqreturn_t cia_handler(int irq, void *dev_id, struct pt_regs *fp) 85static irqreturn_t cia_handler(int irq, void *dev_id, struct pt_regs *fp)
128{ 86{
129 struct ciabase *base = (struct ciabase *)dev_id; 87 struct ciabase *base = (struct ciabase *)dev_id;
130 int mach_irq, i; 88 int mach_irq;
131 unsigned char ints; 89 unsigned char ints;
132 90
133 mach_irq = base->cia_irq; 91 mach_irq = base->cia_irq;
134 irq = SYS_IRQS + mach_irq;
135 ints = cia_set_irq(base, CIA_ICR_ALL); 92 ints = cia_set_irq(base, CIA_ICR_ALL);
136 amiga_custom.intreq = base->int_mask; 93 amiga_custom.intreq = base->int_mask;
137 for (i = 0; i < CIA_IRQS; i++, irq++, mach_irq++) { 94 for (; ints; mach_irq++, ints >>= 1) {
138 if (ints & 1) { 95 if (ints & 1)
139 kstat_cpu(0).irqs[irq]++; 96 m68k_handle_int(mach_irq, fp);
140 base->irq_list[i].handler(mach_irq, base->irq_list[i].dev_id, fp);
141 }
142 ints >>= 1;
143 } 97 }
144 amiga_do_irq_list(base->server_irq, fp);
145 return IRQ_HANDLED; 98 return IRQ_HANDLED;
146} 99}
147 100
148void __init cia_init_IRQ(struct ciabase *base) 101static void cia_enable_irq(unsigned int irq)
149{ 102{
150 int i; 103 unsigned char mask;
151 104
152 /* init isr handlers */ 105 if (irq >= IRQ_AMIGA_CIAB) {
153 for (i = 0; i < CIA_IRQS; i++) { 106 mask = 1 << (irq - IRQ_AMIGA_CIAB);
154 base->irq_list[i].handler = NULL; 107 cia_set_irq(&ciab_base, mask);
155 base->irq_list[i].flags = 0; 108 cia_able_irq(&ciab_base, CIA_ICR_SETCLR | mask);
109 } else {
110 mask = 1 << (irq - IRQ_AMIGA_CIAA);
111 cia_set_irq(&ciaa_base, mask);
112 cia_able_irq(&ciaa_base, CIA_ICR_SETCLR | mask);
156 } 113 }
114}
157 115
158 /* clear any pending interrupt and turn off all interrupts */ 116static void cia_disable_irq(unsigned int irq)
159 cia_set_irq(base, CIA_ICR_ALL); 117{
160 cia_able_irq(base, CIA_ICR_ALL); 118 if (irq >= IRQ_AMIGA_CIAB)
119 cia_able_irq(&ciab_base, 1 << (irq - IRQ_AMIGA_CIAB));
120 else
121 cia_able_irq(&ciaa_base, 1 << (irq - IRQ_AMIGA_CIAA));
122}
161 123
162 /* install CIA handler */ 124static struct irq_controller cia_irq_controller = {
163 request_irq(base->handler_irq, cia_handler, 0, base->name, base); 125 .name = "cia",
126 .lock = SPIN_LOCK_UNLOCKED,
127 .enable = cia_enable_irq,
128 .disable = cia_disable_irq,
129};
130
131/*
132 * Override auto irq 2 & 6 and use them as general chain
133 * for external interrupts, we link the CIA interrupt sources
134 * into this chain.
135 */
164 136
165 amiga_custom.intena = IF_SETCLR | base->int_mask; 137static void auto_enable_irq(unsigned int irq)
138{
139 switch (irq) {
140 case IRQ_AUTO_2:
141 amiga_custom.intena = IF_SETCLR | IF_PORTS;
142 break;
143 case IRQ_AUTO_6:
144 amiga_custom.intena = IF_SETCLR | IF_EXTER;
145 break;
146 }
166} 147}
167 148
168int cia_get_irq_list(struct ciabase *base, struct seq_file *p) 149static void auto_disable_irq(unsigned int irq)
169{ 150{
170 int i, j; 151 switch (irq) {
171 152 case IRQ_AUTO_2:
172 j = base->cia_irq; 153 amiga_custom.intena = IF_PORTS;
173 for (i = 0; i < CIA_IRQS; i++) { 154 break;
174 seq_printf(p, "cia %2d: %10d ", j + i, 155 case IRQ_AUTO_6:
175 kstat_cpu(0).irqs[SYS_IRQS + j + i]); 156 amiga_custom.intena = IF_EXTER;
176 seq_puts(p, " "); 157 break;
177 seq_printf(p, "%s\n", base->irq_list[i].devname);
178 } 158 }
179 return 0; 159}
160
161static struct irq_controller auto_irq_controller = {
162 .name = "auto",
163 .lock = SPIN_LOCK_UNLOCKED,
164 .enable = auto_enable_irq,
165 .disable = auto_disable_irq,
166};
167
168void __init cia_init_IRQ(struct ciabase *base)
169{
170 m68k_setup_irq_controller(&cia_irq_controller, base->cia_irq, CIA_IRQS);
171
172 /* clear any pending interrupt and turn off all interrupts */
173 cia_set_irq(base, CIA_ICR_ALL);
174 cia_able_irq(base, CIA_ICR_ALL);
175
176 /* override auto int and install CIA handler */
177 m68k_setup_irq_controller(&auto_irq_controller, base->handler_irq, 1);
178 m68k_irq_startup(base->handler_irq);
179 request_irq(base->handler_irq, cia_handler, SA_SHIRQ, base->name, base);
180} 180}
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 12e3706fe02c..b5b8a416a07a 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -87,17 +87,8 @@ extern char m68k_debug_device[];
87static void amiga_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)); 87static void amiga_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
88/* amiga specific irq functions */ 88/* amiga specific irq functions */
89extern void amiga_init_IRQ (void); 89extern void amiga_init_IRQ (void);
90extern irqreturn_t (*amiga_default_handler[]) (int, void *, struct pt_regs *);
91extern int amiga_request_irq (unsigned int irq,
92 irqreturn_t (*handler)(int, void *, struct pt_regs *),
93 unsigned long flags, const char *devname,
94 void *dev_id);
95extern void amiga_free_irq (unsigned int irq, void *dev_id);
96extern void amiga_enable_irq (unsigned int);
97extern void amiga_disable_irq (unsigned int);
98static void amiga_get_model(char *model); 90static void amiga_get_model(char *model);
99static int amiga_get_hardware_list(char *buffer); 91static int amiga_get_hardware_list(char *buffer);
100extern int show_amiga_interrupts (struct seq_file *, void *);
101/* amiga specific timer functions */ 92/* amiga specific timer functions */
102static unsigned long amiga_gettimeoffset (void); 93static unsigned long amiga_gettimeoffset (void);
103static int a3000_hwclk (int, struct rtc_time *); 94static int a3000_hwclk (int, struct rtc_time *);
@@ -392,14 +383,8 @@ void __init config_amiga(void)
392 383
393 mach_sched_init = amiga_sched_init; 384 mach_sched_init = amiga_sched_init;
394 mach_init_IRQ = amiga_init_IRQ; 385 mach_init_IRQ = amiga_init_IRQ;
395 mach_default_handler = &amiga_default_handler;
396 mach_request_irq = amiga_request_irq;
397 mach_free_irq = amiga_free_irq;
398 enable_irq = amiga_enable_irq;
399 disable_irq = amiga_disable_irq;
400 mach_get_model = amiga_get_model; 386 mach_get_model = amiga_get_model;
401 mach_get_hardware_list = amiga_get_hardware_list; 387 mach_get_hardware_list = amiga_get_hardware_list;
402 mach_get_irq_list = show_amiga_interrupts;
403 mach_gettimeoffset = amiga_gettimeoffset; 388 mach_gettimeoffset = amiga_gettimeoffset;
404 if (AMIGAHW_PRESENT(A3000_CLK)){ 389 if (AMIGAHW_PRESENT(A3000_CLK)){
405 mach_hwclk = a3000_hwclk; 390 mach_hwclk = a3000_hwclk;
diff --git a/arch/m68k/apollo/Makefile b/arch/m68k/apollo/Makefile
index 39264f3b6ad6..76a057962c38 100644
--- a/arch/m68k/apollo/Makefile
+++ b/arch/m68k/apollo/Makefile
@@ -2,4 +2,4 @@
2# Makefile for Linux arch/m68k/amiga source directory 2# Makefile for Linux arch/m68k/amiga source directory
3# 3#
4 4
5obj-y := config.o dn_ints.o dma.o 5obj-y := config.o dn_ints.o
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index d401962d9b25..99c70978aafa 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -28,11 +28,6 @@ u_long apollo_model;
28 28
29extern void dn_sched_init(irqreturn_t (*handler)(int,void *,struct pt_regs *)); 29extern void dn_sched_init(irqreturn_t (*handler)(int,void *,struct pt_regs *));
30extern void dn_init_IRQ(void); 30extern void dn_init_IRQ(void);
31extern int dn_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
32extern void dn_free_irq(unsigned int irq, void *dev_id);
33extern void dn_enable_irq(unsigned int);
34extern void dn_disable_irq(unsigned int);
35extern int show_dn_interrupts(struct seq_file *, void *);
36extern unsigned long dn_gettimeoffset(void); 31extern unsigned long dn_gettimeoffset(void);
37extern int dn_dummy_hwclk(int, struct rtc_time *); 32extern int dn_dummy_hwclk(int, struct rtc_time *);
38extern int dn_dummy_set_clock_mmss(unsigned long); 33extern int dn_dummy_set_clock_mmss(unsigned long);
@@ -40,13 +35,11 @@ extern void dn_dummy_reset(void);
40extern void dn_dummy_waitbut(void); 35extern void dn_dummy_waitbut(void);
41extern struct fb_info *dn_fb_init(long *); 36extern struct fb_info *dn_fb_init(long *);
42extern void dn_dummy_debug_init(void); 37extern void dn_dummy_debug_init(void);
43extern void dn_dummy_video_setup(char *,int *);
44extern irqreturn_t dn_process_int(int irq, struct pt_regs *fp); 38extern irqreturn_t dn_process_int(int irq, struct pt_regs *fp);
45#ifdef CONFIG_HEARTBEAT 39#ifdef CONFIG_HEARTBEAT
46static void dn_heartbeat(int on); 40static void dn_heartbeat(int on);
47#endif 41#endif
48static irqreturn_t dn_timer_int(int irq,void *, struct pt_regs *); 42static irqreturn_t dn_timer_int(int irq,void *, struct pt_regs *);
49static irqreturn_t (*sched_timer_handler)(int, void *, struct pt_regs *)=NULL;
50static void dn_get_model(char *model); 43static void dn_get_model(char *model);
51static const char *apollo_models[] = { 44static const char *apollo_models[] = {
52 [APOLLO_DN3000-APOLLO_DN3000] = "DN3000 (Otter)", 45 [APOLLO_DN3000-APOLLO_DN3000] = "DN3000 (Otter)",
@@ -164,17 +157,10 @@ void config_apollo(void) {
164 157
165 mach_sched_init=dn_sched_init; /* */ 158 mach_sched_init=dn_sched_init; /* */
166 mach_init_IRQ=dn_init_IRQ; 159 mach_init_IRQ=dn_init_IRQ;
167 mach_default_handler=NULL;
168 mach_request_irq = dn_request_irq;
169 mach_free_irq = dn_free_irq;
170 enable_irq = dn_enable_irq;
171 disable_irq = dn_disable_irq;
172 mach_get_irq_list = show_dn_interrupts;
173 mach_gettimeoffset = dn_gettimeoffset; 160 mach_gettimeoffset = dn_gettimeoffset;
174 mach_max_dma_address = 0xffffffff; 161 mach_max_dma_address = 0xffffffff;
175 mach_hwclk = dn_dummy_hwclk; /* */ 162 mach_hwclk = dn_dummy_hwclk; /* */
176 mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */ 163 mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
177 mach_process_int = dn_process_int;
178 mach_reset = dn_dummy_reset; /* */ 164 mach_reset = dn_dummy_reset; /* */
179#ifdef CONFIG_HEARTBEAT 165#ifdef CONFIG_HEARTBEAT
180 mach_heartbeat = dn_heartbeat; 166 mach_heartbeat = dn_heartbeat;
@@ -189,11 +175,13 @@ void config_apollo(void) {
189 175
190} 176}
191 177
192irqreturn_t dn_timer_int(int irq, void *dev_id, struct pt_regs *fp) { 178irqreturn_t dn_timer_int(int irq, void *dev_id, struct pt_regs *fp)
179{
180 irqreturn_t (*timer_handler)(int, void *, struct pt_regs *) = dev_id;
193 181
194 volatile unsigned char x; 182 volatile unsigned char x;
195 183
196 sched_timer_handler(irq,dev_id,fp); 184 timer_handler(irq, dev_id, fp);
197 185
198 x=*(volatile unsigned char *)(timer+3); 186 x=*(volatile unsigned char *)(timer+3);
199 x=*(volatile unsigned char *)(timer+5); 187 x=*(volatile unsigned char *)(timer+5);
@@ -217,9 +205,7 @@ void dn_sched_init(irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
217 printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); 205 printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
218#endif 206#endif
219 207
220 sched_timer_handler=timer_routine; 208 request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine);
221 request_irq(0,dn_timer_int,0,NULL,NULL);
222
223} 209}
224 210
225unsigned long dn_gettimeoffset(void) { 211unsigned long dn_gettimeoffset(void) {
diff --git a/arch/m68k/apollo/dn_ints.c b/arch/m68k/apollo/dn_ints.c
index a31259359a12..9fe07803797b 100644
--- a/arch/m68k/apollo/dn_ints.c
+++ b/arch/m68k/apollo/dn_ints.c
@@ -1,125 +1,44 @@
1#include <linux/types.h> 1#include <linux/interrupt.h>
2#include <linux/kernel.h>
3#include <linux/jiffies.h>
4#include <linux/kernel_stat.h>
5#include <linux/timer.h>
6 2
7#include <asm/system.h>
8#include <asm/irq.h> 3#include <asm/irq.h>
9#include <asm/traps.h> 4#include <asm/traps.h>
10#include <asm/page.h>
11#include <asm/machdep.h>
12#include <asm/apollohw.h> 5#include <asm/apollohw.h>
13#include <asm/errno.h>
14 6
15static irq_handler_t dn_irqs[16]; 7void dn_process_int(unsigned int irq, struct pt_regs *fp)
16
17irqreturn_t dn_process_int(int irq, struct pt_regs *fp)
18{ 8{
19 irqreturn_t res = IRQ_NONE; 9 m68k_handle_int(irq, fp);
20
21 if(dn_irqs[irq-160].handler) {
22 res = dn_irqs[irq-160].handler(irq,dn_irqs[irq-160].dev_id,fp);
23 } else {
24 printk("spurious irq %d occurred\n",irq);
25 }
26
27 *(volatile unsigned char *)(pica)=0x20;
28 *(volatile unsigned char *)(picb)=0x20;
29
30 return res;
31}
32
33void dn_init_IRQ(void) {
34
35 int i;
36
37 for(i=0;i<16;i++) {
38 dn_irqs[i].handler=NULL;
39 dn_irqs[i].flags=IRQ_FLG_STD;
40 dn_irqs[i].dev_id=NULL;
41 dn_irqs[i].devname=NULL;
42 }
43
44}
45
46int dn_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id) {
47
48 if((irq<0) || (irq>15)) {
49 printk("Trying to request invalid IRQ\n");
50 return -ENXIO;
51 }
52
53 if(!dn_irqs[irq].handler) {
54 dn_irqs[irq].handler=handler;
55 dn_irqs[irq].flags=IRQ_FLG_STD;
56 dn_irqs[irq].dev_id=dev_id;
57 dn_irqs[irq].devname=devname;
58 if(irq<8)
59 *(volatile unsigned char *)(pica+1)&=~(1<<irq);
60 else
61 *(volatile unsigned char *)(picb+1)&=~(1<<(irq-8));
62
63 return 0;
64 }
65 else {
66 printk("Trying to request already assigned irq %d\n",irq);
67 return -ENXIO;
68 }
69
70}
71
72void dn_free_irq(unsigned int irq, void *dev_id) {
73
74 if((irq<0) || (irq>15)) {
75 printk("Trying to free invalid IRQ\n");
76 return ;
77 }
78
79 if(irq<8)
80 *(volatile unsigned char *)(pica+1)|=(1<<irq);
81 else
82 *(volatile unsigned char *)(picb+1)|=(1<<(irq-8));
83
84 dn_irqs[irq].handler=NULL;
85 dn_irqs[irq].flags=IRQ_FLG_STD;
86 dn_irqs[irq].dev_id=NULL;
87 dn_irqs[irq].devname=NULL;
88
89 return ;
90
91}
92
93void dn_enable_irq(unsigned int irq) {
94
95 printk("dn enable irq\n");
96
97}
98
99void dn_disable_irq(unsigned int irq) {
100
101 printk("dn disable irq\n");
102 10
11 *(volatile unsigned char *)(pica)=0x20;
12 *(volatile unsigned char *)(picb)=0x20;
103} 13}
104 14
105int show_dn_interrupts(struct seq_file *p, void *v) { 15int apollo_irq_startup(unsigned int irq)
106 16{
107 printk("dn get irq list\n"); 17 if (irq < 8)
108 18 *(volatile unsigned char *)(pica+1) &= ~(1 << irq);
109 return 0; 19 else
110 20 *(volatile unsigned char *)(picb+1) &= ~(1 << (irq - 8));
21 return 0;
111} 22}
112 23
113struct fb_info *dn_dummy_fb_init(long *mem_start) { 24void apollo_irq_shutdown(unsigned int irq)
114 25{
115 printk("fb init\n"); 26 if (irq < 8)
116 27 *(volatile unsigned char *)(pica+1) |= (1 << irq);
117 return NULL; 28 else
118 29 *(volatile unsigned char *)(picb+1) |= (1 << (irq - 8));
119} 30}
120 31
121void dn_dummy_video_setup(char *options,int *ints) { 32static struct irq_controller apollo_irq_controller = {
33 .name = "apollo",
34 .lock = SPIN_LOCK_UNLOCKED,
35 .startup = apollo_irq_startup,
36 .shutdown = apollo_irq_shutdown,
37};
122 38
123 printk("no video yet\n");
124 39
40void dn_init_IRQ(void)
41{
42 m68k_setup_user_interrupt(VEC_USER + 96, 16, dn_process_int);
43 m68k_setup_irq_controller(&apollo_irq_controller, IRQ_APOLLO, 16);
125} 44}
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 076f47917842..ece13cbf9950 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -104,6 +104,7 @@
104 * the sr copy in the frame. 104 * the sr copy in the frame.
105 */ 105 */
106 106
107#if 0
107 108
108#define NUM_INT_SOURCES (8 + NUM_ATARI_SOURCES) 109#define NUM_INT_SOURCES (8 + NUM_ATARI_SOURCES)
109 110
@@ -133,13 +134,6 @@ static struct irqhandler irq_handler[NUM_INT_SOURCES];
133 */ 134 */
134static struct irqparam irq_param[NUM_INT_SOURCES]; 135static struct irqparam irq_param[NUM_INT_SOURCES];
135 136
136/*
137 * Bitmap for free interrupt vector numbers
138 * (new vectors starting from 0x70 can be allocated by
139 * atari_register_vme_int())
140 */
141static int free_vme_vec_bitmap;
142
143/* check for valid int number (complex, sigh...) */ 137/* check for valid int number (complex, sigh...) */
144#define IS_VALID_INTNO(n) \ 138#define IS_VALID_INTNO(n) \
145 ((n) > 0 && \ 139 ((n) > 0 && \
@@ -301,6 +295,14 @@ __asm__ (__ALIGN_STR "\n"
301); 295);
302 for (;;); 296 for (;;);
303} 297}
298#endif
299
300/*
301 * Bitmap for free interrupt vector numbers
302 * (new vectors starting from 0x70 can be allocated by
303 * atari_register_vme_int())
304 */
305static int free_vme_vec_bitmap;
304 306
305/* GK: 307/* GK:
306 * HBL IRQ handler for Falcon. Nobody needs it :-) 308 * HBL IRQ handler for Falcon. Nobody needs it :-)
@@ -313,13 +315,34 @@ __ALIGN_STR "\n\t"
313 "orw #0x200,%sp@\n\t" /* set saved ipl to 2 */ 315 "orw #0x200,%sp@\n\t" /* set saved ipl to 2 */
314 "rte"); 316 "rte");
315 317
316/* Defined in entry.S; only increments 'num_spurious' */ 318extern void atari_microwire_cmd(int cmd);
317asmlinkage void bad_interrupt(void);
318
319extern void atari_microwire_cmd( int cmd );
320 319
321extern int atari_SCC_reset_done; 320extern int atari_SCC_reset_done;
322 321
322static int atari_startup_irq(unsigned int irq)
323{
324 m68k_irq_startup(irq);
325 atari_turnon_irq(irq);
326 atari_enable_irq(irq);
327 return 0;
328}
329
330static void atari_shutdown_irq(unsigned int irq)
331{
332 atari_disable_irq(irq);
333 atari_turnoff_irq(irq);
334 m68k_irq_shutdown(irq);
335}
336
337static struct irq_controller atari_irq_controller = {
338 .name = "atari",
339 .lock = SPIN_LOCK_UNLOCKED,
340 .startup = atari_startup_irq,
341 .shutdown = atari_shutdown_irq,
342 .enable = atari_enable_irq,
343 .disable = atari_disable_irq,
344};
345
323/* 346/*
324 * void atari_init_IRQ (void) 347 * void atari_init_IRQ (void)
325 * 348 *
@@ -333,12 +356,8 @@ extern int atari_SCC_reset_done;
333 356
334void __init atari_init_IRQ(void) 357void __init atari_init_IRQ(void)
335{ 358{
336 int i; 359 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
337 360 m68k_setup_irq_controller(&atari_irq_controller, 1, NUM_ATARI_SOURCES - 1);
338 /* initialize the vector table */
339 for (i = 0; i < NUM_INT_SOURCES; ++i) {
340 vectors[IRQ_SOURCE_TO_VECTOR(i)] = bad_interrupt;
341 }
342 361
343 /* Initialize the MFP(s) */ 362 /* Initialize the MFP(s) */
344 363
@@ -378,8 +397,7 @@ void __init atari_init_IRQ(void)
378 * enabled in VME mask 397 * enabled in VME mask
379 */ 398 */
380 tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */ 399 tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */
381 } 400 } else {
382 else {
383 /* If no SCU and no Hades, the HSYNC interrupt needs to be 401 /* If no SCU and no Hades, the HSYNC interrupt needs to be
384 * disabled this way. (Else _inthandler in kernel/sys_call.S 402 * disabled this way. (Else _inthandler in kernel/sys_call.S
385 * gets overruns) 403 * gets overruns)
@@ -404,184 +422,6 @@ void __init atari_init_IRQ(void)
404} 422}
405 423
406 424
407static irqreturn_t atari_call_irq_list( int irq, void *dev_id, struct pt_regs *fp )
408{
409 irq_node_t *node;
410
411 for (node = (irq_node_t *)dev_id; node; node = node->next)
412 node->handler(irq, node->dev_id, fp);
413 return IRQ_HANDLED;
414}
415
416
417/*
418 * atari_request_irq : add an interrupt service routine for a particular
419 * machine specific interrupt source.
420 * If the addition was successful, it returns 0.
421 */
422
423int atari_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
424 unsigned long flags, const char *devname, void *dev_id)
425{
426 int vector;
427 unsigned long oflags = flags;
428
429 /*
430 * The following is a hack to make some PCI card drivers work,
431 * which set the SA_SHIRQ flag.
432 */
433
434 flags &= ~SA_SHIRQ;
435
436 if (flags == SA_INTERRUPT) {
437 printk ("%s: SA_INTERRUPT changed to IRQ_TYPE_SLOW for %s\n",
438 __FUNCTION__, devname);
439 flags = IRQ_TYPE_SLOW;
440 }
441 if (flags < IRQ_TYPE_SLOW || flags > IRQ_TYPE_PRIO) {
442 printk ("%s: Bad irq type 0x%lx <0x%lx> requested from %s\n",
443 __FUNCTION__, flags, oflags, devname);
444 return -EINVAL;
445 }
446 if (!IS_VALID_INTNO(irq)) {
447 printk ("%s: Unknown irq %d requested from %s\n",
448 __FUNCTION__, irq, devname);
449 return -ENXIO;
450 }
451 vector = IRQ_SOURCE_TO_VECTOR(irq);
452
453 /*
454 * Check type/source combination: slow ints are (currently)
455 * only possible for MFP-interrupts.
456 */
457 if (flags == IRQ_TYPE_SLOW &&
458 (irq < STMFP_SOURCE_BASE || irq >= SCC_SOURCE_BASE)) {
459 printk ("%s: Slow irq requested for non-MFP source %d from %s\n",
460 __FUNCTION__, irq, devname);
461 return -EINVAL;
462 }
463
464 if (vectors[vector] == bad_interrupt) {
465 /* int has no handler yet */
466 irq_handler[irq].handler = handler;
467 irq_handler[irq].dev_id = dev_id;
468 irq_param[irq].flags = flags;
469 irq_param[irq].devname = devname;
470 vectors[vector] =
471 (flags == IRQ_TYPE_SLOW) ? slow_handlers[irq-STMFP_SOURCE_BASE] :
472 (flags == IRQ_TYPE_FAST) ? atari_fast_irq_handler :
473 atari_prio_irq_handler;
474 /* If MFP int, also enable and umask it */
475 atari_turnon_irq(irq);
476 atari_enable_irq(irq);
477
478 return 0;
479 }
480 else if (irq_param[irq].flags == flags) {
481 /* old handler is of same type -> handlers can be chained */
482 irq_node_t *node;
483 unsigned long flags;
484
485 local_irq_save(flags);
486
487 if (irq_handler[irq].handler != atari_call_irq_list) {
488 /* Only one handler yet, make a node for this first one */
489 if (!(node = new_irq_node()))
490 return -ENOMEM;
491 node->handler = irq_handler[irq].handler;
492 node->dev_id = irq_handler[irq].dev_id;
493 node->devname = irq_param[irq].devname;
494 node->next = NULL;
495
496 irq_handler[irq].handler = atari_call_irq_list;
497 irq_handler[irq].dev_id = node;
498 irq_param[irq].devname = "chained";
499 }
500
501 if (!(node = new_irq_node()))
502 return -ENOMEM;
503 node->handler = handler;
504 node->dev_id = dev_id;
505 node->devname = devname;
506 /* new handlers are put in front of the queue */
507 node->next = irq_handler[irq].dev_id;
508 irq_handler[irq].dev_id = node;
509
510 local_irq_restore(flags);
511 return 0;
512 } else {
513 printk ("%s: Irq %d allocated by other type int (call from %s)\n",
514 __FUNCTION__, irq, devname);
515 return -EBUSY;
516 }
517}
518
519void atari_free_irq(unsigned int irq, void *dev_id)
520{
521 unsigned long flags;
522 int vector;
523 irq_node_t **list, *node;
524
525 if (!IS_VALID_INTNO(irq)) {
526 printk("%s: Unknown irq %d\n", __FUNCTION__, irq);
527 return;
528 }
529
530 vector = IRQ_SOURCE_TO_VECTOR(irq);
531 if (vectors[vector] == bad_interrupt)
532 goto not_found;
533
534 local_irq_save(flags);
535
536 if (irq_handler[irq].handler != atari_call_irq_list) {
537 /* It's the only handler for the interrupt */
538 if (irq_handler[irq].dev_id != dev_id) {
539 local_irq_restore(flags);
540 goto not_found;
541 }
542 irq_handler[irq].handler = NULL;
543 irq_handler[irq].dev_id = NULL;
544 irq_param[irq].devname = NULL;
545 vectors[vector] = bad_interrupt;
546 /* If MFP int, also disable it */
547 atari_disable_irq(irq);
548 atari_turnoff_irq(irq);
549
550 local_irq_restore(flags);
551 return;
552 }
553
554 /* The interrupt is chained, find the irq on the list */
555 for(list = (irq_node_t **)&irq_handler[irq].dev_id; *list; list = &(*list)->next) {
556 if ((*list)->dev_id == dev_id) break;
557 }
558 if (!*list) {
559 local_irq_restore(flags);
560 goto not_found;
561 }
562
563 (*list)->handler = NULL; /* Mark it as free for reallocation */
564 *list = (*list)->next;
565
566 /* If there's now only one handler, unchain the interrupt, i.e. plug in
567 * the handler directly again and omit atari_call_irq_list */
568 node = (irq_node_t *)irq_handler[irq].dev_id;
569 if (node && !node->next) {
570 irq_handler[irq].handler = node->handler;
571 irq_handler[irq].dev_id = node->dev_id;
572 irq_param[irq].devname = node->devname;
573 node->handler = NULL; /* Mark it as free for reallocation */
574 }
575
576 local_irq_restore(flags);
577 return;
578
579not_found:
580 printk("%s: tried to remove invalid irq\n", __FUNCTION__);
581 return;
582}
583
584
585/* 425/*
586 * atari_register_vme_int() returns the number of a free interrupt vector for 426 * atari_register_vme_int() returns the number of a free interrupt vector for
587 * hardware with a programmable int vector (probably a VME board). 427 * hardware with a programmable int vector (probably a VME board).
@@ -591,58 +431,24 @@ unsigned long atari_register_vme_int(void)
591{ 431{
592 int i; 432 int i;
593 433
594 for(i = 0; i < 32; i++) 434 for (i = 0; i < 32; i++)
595 if((free_vme_vec_bitmap & (1 << i)) == 0) 435 if ((free_vme_vec_bitmap & (1 << i)) == 0)
596 break; 436 break;
597 437
598 if(i == 16) 438 if (i == 16)
599 return 0; 439 return 0;
600 440
601 free_vme_vec_bitmap |= 1 << i; 441 free_vme_vec_bitmap |= 1 << i;
602 return (VME_SOURCE_BASE + i); 442 return VME_SOURCE_BASE + i;
603} 443}
604 444
605 445
606void atari_unregister_vme_int(unsigned long irq) 446void atari_unregister_vme_int(unsigned long irq)
607{ 447{
608 if(irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) { 448 if (irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) {
609 irq -= VME_SOURCE_BASE; 449 irq -= VME_SOURCE_BASE;
610 free_vme_vec_bitmap &= ~(1 << irq); 450 free_vme_vec_bitmap &= ~(1 << irq);
611 } 451 }
612} 452}
613 453
614 454
615int show_atari_interrupts(struct seq_file *p, void *v)
616{
617 int i;
618
619 for (i = 0; i < NUM_INT_SOURCES; ++i) {
620 if (vectors[IRQ_SOURCE_TO_VECTOR(i)] == bad_interrupt)
621 continue;
622 if (i < STMFP_SOURCE_BASE)
623 seq_printf(p, "auto %2d: %10u ",
624 i, kstat_cpu(0).irqs[i]);
625 else
626 seq_printf(p, "vec $%02x: %10u ",
627 IRQ_SOURCE_TO_VECTOR(i),
628 kstat_cpu(0).irqs[i]);
629
630 if (irq_handler[i].handler != atari_call_irq_list) {
631 seq_printf(p, "%s\n", irq_param[i].devname);
632 }
633 else {
634 irq_node_t *n;
635 for( n = (irq_node_t *)irq_handler[i].dev_id; n; n = n->next ) {
636 seq_printf(p, "%s\n", n->devname);
637 if (n->next)
638 seq_puts(p, " " );
639 }
640 }
641 }
642 if (num_spurious)
643 seq_printf(p, "spurio.: %10u\n", num_spurious);
644
645 return 0;
646}
647
648
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 1012b08e5522..727289acad7e 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -57,12 +57,6 @@ static int atari_get_hardware_list(char *buffer);
57 57
58/* atari specific irq functions */ 58/* atari specific irq functions */
59extern void atari_init_IRQ (void); 59extern void atari_init_IRQ (void);
60extern int atari_request_irq (unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
61 unsigned long flags, const char *devname, void *dev_id);
62extern void atari_free_irq (unsigned int irq, void *dev_id);
63extern void atari_enable_irq (unsigned int);
64extern void atari_disable_irq (unsigned int);
65extern int show_atari_interrupts (struct seq_file *, void *);
66extern void atari_mksound( unsigned int count, unsigned int ticks ); 60extern void atari_mksound( unsigned int count, unsigned int ticks );
67#ifdef CONFIG_HEARTBEAT 61#ifdef CONFIG_HEARTBEAT
68static void atari_heartbeat( int on ); 62static void atari_heartbeat( int on );
@@ -232,13 +226,8 @@ void __init config_atari(void)
232 226
233 mach_sched_init = atari_sched_init; 227 mach_sched_init = atari_sched_init;
234 mach_init_IRQ = atari_init_IRQ; 228 mach_init_IRQ = atari_init_IRQ;
235 mach_request_irq = atari_request_irq;
236 mach_free_irq = atari_free_irq;
237 enable_irq = atari_enable_irq;
238 disable_irq = atari_disable_irq;
239 mach_get_model = atari_get_model; 229 mach_get_model = atari_get_model;
240 mach_get_hardware_list = atari_get_hardware_list; 230 mach_get_hardware_list = atari_get_hardware_list;
241 mach_get_irq_list = show_atari_interrupts;
242 mach_gettimeoffset = atari_gettimeoffset; 231 mach_gettimeoffset = atari_gettimeoffset;
243 mach_reset = atari_reset; 232 mach_reset = atari_reset;
244 mach_max_dma_address = 0xffffff; 233 mach_max_dma_address = 0xffffff;
diff --git a/arch/m68k/bvme6000/Makefile b/arch/m68k/bvme6000/Makefile
index 2348e6ceed1e..d8174004fe2f 100644
--- a/arch/m68k/bvme6000/Makefile
+++ b/arch/m68k/bvme6000/Makefile
@@ -2,4 +2,4 @@
2# Makefile for Linux arch/m68k/bvme6000 source directory 2# Makefile for Linux arch/m68k/bvme6000 source directory
3# 3#
4 4
5obj-y := config.o bvmeints.o rtc.o 5obj-y := config.o rtc.o
diff --git a/arch/m68k/bvme6000/bvmeints.c b/arch/m68k/bvme6000/bvmeints.c
deleted file mode 100644
index 298a8df02664..000000000000
--- a/arch/m68k/bvme6000/bvmeints.c
+++ /dev/null
@@ -1,160 +0,0 @@
1/*
2 * arch/m68k/bvme6000/bvmeints.c
3 *
4 * Copyright (C) 1997 Richard Hirst [richard@sleepie.demon.co.uk]
5 *
6 * based on amiints.c -- Amiga Linux interrupt handling code
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file README.legal in the main directory of this archive
10 * for more details.
11 *
12 */
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/seq_file.h>
18
19#include <asm/ptrace.h>
20#include <asm/system.h>
21#include <asm/irq.h>
22#include <asm/traps.h>
23
24static irqreturn_t bvme6000_defhand (int irq, void *dev_id, struct pt_regs *fp);
25
26/*
27 * This should ideally be 4 elements only, for speed.
28 */
29
30static struct {
31 irqreturn_t (*handler)(int, void *, struct pt_regs *);
32 unsigned long flags;
33 void *dev_id;
34 const char *devname;
35 unsigned count;
36} irq_tab[256];
37
38/*
39 * void bvme6000_init_IRQ (void)
40 *
41 * Parameters: None
42 *
43 * Returns: Nothing
44 *
45 * This function is called during kernel startup to initialize
46 * the bvme6000 IRQ handling routines.
47 */
48
49void bvme6000_init_IRQ (void)
50{
51 int i;
52
53 for (i = 0; i < 256; i++) {
54 irq_tab[i].handler = bvme6000_defhand;
55 irq_tab[i].flags = IRQ_FLG_STD;
56 irq_tab[i].dev_id = NULL;
57 irq_tab[i].devname = NULL;
58 irq_tab[i].count = 0;
59 }
60}
61
62int bvme6000_request_irq(unsigned int irq,
63 irqreturn_t (*handler)(int, void *, struct pt_regs *),
64 unsigned long flags, const char *devname, void *dev_id)
65{
66 if (irq > 255) {
67 printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
68 return -ENXIO;
69 }
70#if 0
71 /* Nothing special about auto-vectored devices for the BVME6000,
72 * but treat it specially to avoid changes elsewhere.
73 */
74
75 if (irq >= VEC_INT1 && irq <= VEC_INT7)
76 return cpu_request_irq(irq - VEC_SPUR, handler, flags,
77 devname, dev_id);
78#endif
79 if (!(irq_tab[irq].flags & IRQ_FLG_STD)) {
80 if (irq_tab[irq].flags & IRQ_FLG_LOCK) {
81 printk("%s: IRQ %d from %s is not replaceable\n",
82 __FUNCTION__, irq, irq_tab[irq].devname);
83 return -EBUSY;
84 }
85 if (flags & IRQ_FLG_REPLACE) {
86 printk("%s: %s can't replace IRQ %d from %s\n",
87 __FUNCTION__, devname, irq, irq_tab[irq].devname);
88 return -EBUSY;
89 }
90 }
91 irq_tab[irq].handler = handler;
92 irq_tab[irq].flags = flags;
93 irq_tab[irq].dev_id = dev_id;
94 irq_tab[irq].devname = devname;
95 return 0;
96}
97
98void bvme6000_free_irq(unsigned int irq, void *dev_id)
99{
100 if (irq > 255) {
101 printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
102 return;
103 }
104#if 0
105 if (irq >= VEC_INT1 && irq <= VEC_INT7) {
106 cpu_free_irq(irq - VEC_SPUR, dev_id);
107 return;
108 }
109#endif
110 if (irq_tab[irq].dev_id != dev_id)
111 printk("%s: Removing probably wrong IRQ %d from %s\n",
112 __FUNCTION__, irq, irq_tab[irq].devname);
113
114 irq_tab[irq].handler = bvme6000_defhand;
115 irq_tab[irq].flags = IRQ_FLG_STD;
116 irq_tab[irq].dev_id = NULL;
117 irq_tab[irq].devname = NULL;
118}
119
120irqreturn_t bvme6000_process_int (unsigned long vec, struct pt_regs *fp)
121{
122 if (vec > 255) {
123 printk ("bvme6000_process_int: Illegal vector %ld", vec);
124 return IRQ_NONE;
125 } else {
126 irq_tab[vec].count++;
127 irq_tab[vec].handler(vec, irq_tab[vec].dev_id, fp);
128 return IRQ_HANDLED;
129 }
130}
131
132int show_bvme6000_interrupts(struct seq_file *p, void *v)
133{
134 int i;
135
136 for (i = 0; i < 256; i++) {
137 if (irq_tab[i].count)
138 seq_printf(p, "Vec 0x%02x: %8d %s\n",
139 i, irq_tab[i].count,
140 irq_tab[i].devname ? irq_tab[i].devname : "free");
141 }
142 return 0;
143}
144
145
146static irqreturn_t bvme6000_defhand (int irq, void *dev_id, struct pt_regs *fp)
147{
148 printk ("Unknown interrupt 0x%02x\n", irq);
149 return IRQ_NONE;
150}
151
152void bvme6000_enable_irq (unsigned int irq)
153{
154}
155
156
157void bvme6000_disable_irq (unsigned int irq)
158{
159}
160
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index c90cb5fcc8ef..d1e916ae55a8 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -36,15 +36,8 @@
36#include <asm/machdep.h> 36#include <asm/machdep.h>
37#include <asm/bvme6000hw.h> 37#include <asm/bvme6000hw.h>
38 38
39extern irqreturn_t bvme6000_process_int (int level, struct pt_regs *regs);
40extern void bvme6000_init_IRQ (void);
41extern void bvme6000_free_irq (unsigned int, void *);
42extern int show_bvme6000_interrupts(struct seq_file *, void *);
43extern void bvme6000_enable_irq (unsigned int);
44extern void bvme6000_disable_irq (unsigned int);
45static void bvme6000_get_model(char *model); 39static void bvme6000_get_model(char *model);
46static int bvme6000_get_hardware_list(char *buffer); 40static int bvme6000_get_hardware_list(char *buffer);
47extern int bvme6000_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
48extern void bvme6000_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)); 41extern void bvme6000_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
49extern unsigned long bvme6000_gettimeoffset (void); 42extern unsigned long bvme6000_gettimeoffset (void);
50extern int bvme6000_hwclk (int, struct rtc_time *); 43extern int bvme6000_hwclk (int, struct rtc_time *);
@@ -100,6 +93,14 @@ static int bvme6000_get_hardware_list(char *buffer)
100 return 0; 93 return 0;
101} 94}
102 95
96/*
97 * This function is called during kernel startup to initialize
98 * the bvme6000 IRQ handling routines.
99 */
100static void bvme6000_init_IRQ(void)
101{
102 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
103}
103 104
104void __init config_bvme6000(void) 105void __init config_bvme6000(void)
105{ 106{
@@ -127,12 +128,6 @@ void __init config_bvme6000(void)
127 mach_hwclk = bvme6000_hwclk; 128 mach_hwclk = bvme6000_hwclk;
128 mach_set_clock_mmss = bvme6000_set_clock_mmss; 129 mach_set_clock_mmss = bvme6000_set_clock_mmss;
129 mach_reset = bvme6000_reset; 130 mach_reset = bvme6000_reset;
130 mach_free_irq = bvme6000_free_irq;
131 mach_process_int = bvme6000_process_int;
132 mach_get_irq_list = show_bvme6000_interrupts;
133 mach_request_irq = bvme6000_request_irq;
134 enable_irq = bvme6000_enable_irq;
135 disable_irq = bvme6000_disable_irq;
136 mach_get_model = bvme6000_get_model; 131 mach_get_model = bvme6000_get_model;
137 mach_get_hardware_list = bvme6000_get_hardware_list; 132 mach_get_hardware_list = bvme6000_get_hardware_list;
138 133
diff --git a/arch/m68k/hp300/Makefile b/arch/m68k/hp300/Makefile
index 89b6317899e3..288b9c67c9bf 100644
--- a/arch/m68k/hp300/Makefile
+++ b/arch/m68k/hp300/Makefile
@@ -2,4 +2,4 @@
2# Makefile for Linux arch/m68k/hp300 source directory 2# Makefile for Linux arch/m68k/hp300 source directory
3# 3#
4 4
5obj-y := ksyms.o config.o ints.o time.o reboot.o 5obj-y := ksyms.o config.o time.o reboot.o
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index 6d129eef370f..2ef271cd818b 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -21,7 +21,6 @@
21#include <asm/hp300hw.h> 21#include <asm/hp300hw.h>
22#include <asm/rtc.h> 22#include <asm/rtc.h>
23 23
24#include "ints.h"
25#include "time.h" 24#include "time.h"
26 25
27unsigned long hp300_model; 26unsigned long hp300_model;
@@ -64,8 +63,6 @@ static char *hp300_models[] __initdata = {
64static char hp300_model_name[13] = "HP9000/"; 63static char hp300_model_name[13] = "HP9000/";
65 64
66extern void hp300_reset(void); 65extern void hp300_reset(void);
67extern irqreturn_t (*hp300_default_handler[])(int, void *, struct pt_regs *);
68extern int show_hp300_interrupts(struct seq_file *, void *);
69#ifdef CONFIG_SERIAL_8250_CONSOLE 66#ifdef CONFIG_SERIAL_8250_CONSOLE
70extern int hp300_setup_serial_console(void) __init; 67extern int hp300_setup_serial_console(void) __init;
71#endif 68#endif
@@ -245,16 +242,16 @@ static unsigned int hp300_get_ss(void)
245 hp300_rtc_read(RTC_REG_SEC2); 242 hp300_rtc_read(RTC_REG_SEC2);
246} 243}
247 244
245static void __init hp300_init_IRQ(void)
246{
247}
248
248void __init config_hp300(void) 249void __init config_hp300(void)
249{ 250{
250 mach_sched_init = hp300_sched_init; 251 mach_sched_init = hp300_sched_init;
251 mach_init_IRQ = hp300_init_IRQ; 252 mach_init_IRQ = hp300_init_IRQ;
252 mach_request_irq = hp300_request_irq;
253 mach_free_irq = hp300_free_irq;
254 mach_get_model = hp300_get_model; 253 mach_get_model = hp300_get_model;
255 mach_get_irq_list = show_hp300_interrupts;
256 mach_gettimeoffset = hp300_gettimeoffset; 254 mach_gettimeoffset = hp300_gettimeoffset;
257 mach_default_handler = &hp300_default_handler;
258 mach_hwclk = hp300_hwclk; 255 mach_hwclk = hp300_hwclk;
259 mach_get_ss = hp300_get_ss; 256 mach_get_ss = hp300_get_ss;
260 mach_reset = hp300_reset; 257 mach_reset = hp300_reset;
diff --git a/arch/m68k/hp300/ints.c b/arch/m68k/hp300/ints.c
deleted file mode 100644
index 0c5bb403e893..000000000000
--- a/arch/m68k/hp300/ints.c
+++ /dev/null
@@ -1,175 +0,0 @@
1/*
2 * linux/arch/m68k/hp300/ints.c
3 *
4 * Copyright (C) 1998 Philip Blundell <philb@gnu.org>
5 *
6 * This file contains the HP300-specific interrupt handling.
7 * We only use the autovector interrupts, and therefore we need to
8 * maintain lists of devices sharing each ipl.
9 * [ipl list code added by Peter Maydell <pmaydell@chiark.greenend.org.uk> 06/1998]
10 */
11
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/sched.h>
16#include <linux/kernel_stat.h>
17#include <linux/interrupt.h>
18#include <linux/spinlock.h>
19#include <asm/machdep.h>
20#include <asm/irq.h>
21#include <asm/io.h>
22#include <asm/system.h>
23#include <asm/traps.h>
24#include <asm/ptrace.h>
25#include <asm/errno.h>
26#include "ints.h"
27
28/* Each ipl has a linked list of interrupt service routines.
29 * Service routines are added via hp300_request_irq() and removed
30 * via hp300_free_irq(). The device driver should set IRQ_FLG_FAST
31 * if it needs to be serviced early (eg FIFOless UARTs); this will
32 * cause it to be added at the front of the queue rather than
33 * the back.
34 * Currently IRQ_FLG_SLOW and flags=0 are treated identically; if
35 * we needed three levels of priority we could distinguish them
36 * but this strikes me as mildly ugly...
37 */
38
39/* we start with no entries in any list */
40static irq_node_t *hp300_irq_list[HP300_NUM_IRQS];
41
42static spinlock_t irqlist_lock;
43
44/* This handler receives all interrupts, dispatching them to the registered handlers */
45static irqreturn_t hp300_int_handler(int irq, void *dev_id, struct pt_regs *fp)
46{
47 irq_node_t *t;
48 /* We just give every handler on the chain an opportunity to handle
49 * the interrupt, in priority order.
50 */
51 for(t = hp300_irq_list[irq]; t; t=t->next)
52 t->handler(irq, t->dev_id, fp);
53 /* We could put in some accounting routines, checks for stray interrupts,
54 * etc, in here. Note that currently we can't tell whether or not
55 * a handler handles the interrupt, though.
56 */
57 return IRQ_HANDLED;
58}
59
60static irqreturn_t hp300_badint(int irq, void *dev_id, struct pt_regs *fp)
61{
62 num_spurious += 1;
63 return IRQ_NONE;
64}
65
66irqreturn_t (*hp300_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
67 [0] = hp300_badint,
68 [1] = hp300_int_handler,
69 [2] = hp300_int_handler,
70 [3] = hp300_int_handler,
71 [4] = hp300_int_handler,
72 [5] = hp300_int_handler,
73 [6] = hp300_int_handler,
74 [7] = hp300_int_handler
75};
76
77/* dev_id had better be unique to each handler because it's the only way we have
78 * to distinguish handlers when removing them...
79 *
80 * It would be pretty easy to support IRQ_FLG_LOCK (handler is not replacable)
81 * and IRQ_FLG_REPLACE (handler replaces existing one with this dev_id)
82 * if we wanted to. IRQ_FLG_FAST is needed for devices where interrupt latency
83 * matters (eg the dreaded FIFOless UART...)
84 */
85int hp300_request_irq(unsigned int irq,
86 irqreturn_t (*handler) (int, void *, struct pt_regs *),
87 unsigned long flags, const char *devname, void *dev_id)
88{
89 irq_node_t *t, *n = new_irq_node();
90
91 if (!n) /* oops, no free nodes */
92 return -ENOMEM;
93
94 spin_lock_irqsave(&irqlist_lock, flags);
95
96 if (!hp300_irq_list[irq]) {
97 /* no list yet */
98 hp300_irq_list[irq] = n;
99 n->next = NULL;
100 } else if (flags & IRQ_FLG_FAST) {
101 /* insert at head of list */
102 n->next = hp300_irq_list[irq];
103 hp300_irq_list[irq] = n;
104 } else {
105 /* insert at end of list */
106 for(t = hp300_irq_list[irq]; t->next; t = t->next)
107 /* do nothing */;
108 n->next = NULL;
109 t->next = n;
110 }
111
112 /* Fill in n appropriately */
113 n->handler = handler;
114 n->flags = flags;
115 n->dev_id = dev_id;
116 n->devname = devname;
117 spin_unlock_irqrestore(&irqlist_lock, flags);
118 return 0;
119}
120
121void hp300_free_irq(unsigned int irq, void *dev_id)
122{
123 irq_node_t *t;
124 unsigned long flags;
125
126 spin_lock_irqsave(&irqlist_lock, flags);
127
128 t = hp300_irq_list[irq];
129 if (!t) /* no handlers at all for that IRQ */
130 {
131 printk(KERN_ERR "hp300_free_irq: attempt to remove nonexistent handler for IRQ %d\n", irq);
132 spin_unlock_irqrestore(&irqlist_lock, flags);
133 return;
134 }
135
136 if (t->dev_id == dev_id)
137 { /* removing first handler on chain */
138 t->flags = IRQ_FLG_STD; /* we probably don't really need these */
139 t->dev_id = NULL;
140 t->devname = NULL;
141 t->handler = NULL; /* frees this irq_node_t */
142 hp300_irq_list[irq] = t->next;
143 spin_unlock_irqrestore(&irqlist_lock, flags);
144 return;
145 }
146
147 /* OK, must be removing from middle of the chain */
148
149 for (t = hp300_irq_list[irq]; t->next && t->next->dev_id != dev_id; t = t->next)
150 /* do nothing */;
151 if (!t->next)
152 {
153 printk(KERN_ERR "hp300_free_irq: attempt to remove nonexistent handler for IRQ %d\n", irq);
154 spin_unlock_irqrestore(&irqlist_lock, flags);
155 return;
156 }
157 /* remove the entry after t: */
158 t->next->flags = IRQ_FLG_STD;
159 t->next->dev_id = NULL;
160 t->next->devname = NULL;
161 t->next->handler = NULL;
162 t->next = t->next->next;
163
164 spin_unlock_irqrestore(&irqlist_lock, flags);
165}
166
167int show_hp300_interrupts(struct seq_file *p, void *v)
168{
169 return 0;
170}
171
172void __init hp300_init_IRQ(void)
173{
174 spin_lock_init(&irqlist_lock);
175}
diff --git a/arch/m68k/hp300/ints.h b/arch/m68k/hp300/ints.h
deleted file mode 100644
index 8cfabe2f3840..000000000000
--- a/arch/m68k/hp300/ints.h
+++ /dev/null
@@ -1,9 +0,0 @@
1extern void hp300_init_IRQ(void);
2extern void (*hp300_handlers[8])(int, void *, struct pt_regs *);
3extern void hp300_free_irq(unsigned int irq, void *dev_id);
4extern int hp300_request_irq(unsigned int irq,
5 irqreturn_t (*handler) (int, void *, struct pt_regs *),
6 unsigned long flags, const char *devname, void *dev_id);
7
8/* number of interrupts, includes 0 (what's that?) */
9#define HP300_NUM_IRQS 8
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index 8da5b1b31e61..7df05662b277 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -18,7 +18,6 @@
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/traps.h> 19#include <asm/traps.h>
20#include <asm/blinken.h> 20#include <asm/blinken.h>
21#include "ints.h"
22 21
23/* Clock hardware definitions */ 22/* Clock hardware definitions */
24 23
@@ -71,7 +70,7 @@ void __init hp300_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *
71 70
72 asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE)); 71 asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
73 72
74 cpu_request_irq(6, hp300_tick, IRQ_FLG_STD, "timer tick", vector); 73 request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector);
75 74
76 out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */ 75 out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
77 out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */ 76 out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 458925c471a1..dae609797dc0 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -9,8 +9,8 @@ else
9endif 9endif
10extra-y += vmlinux.lds 10extra-y += vmlinux.lds
11 11
12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \ 12obj-y := entry.o process.o traps.o ints.o dma.o signal.o ptrace.o \
13 sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o 13 sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o
14 14
15obj-$(CONFIG_PCI) += bios32.o 15obj-$(CONFIG_PCI) += bios32.o
16obj-$(CONFIG_MODULES) += module.o 16obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
new file mode 100644
index 000000000000..fc449f8b2045
--- /dev/null
+++ b/arch/m68k/kernel/dma.c
@@ -0,0 +1,129 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#undef DEBUG
8
9#include <linux/dma-mapping.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/vmalloc.h>
13
14#include <asm/pgalloc.h>
15#include <asm/scatterlist.h>
16
17void *dma_alloc_coherent(struct device *dev, size_t size,
18 dma_addr_t *handle, int flag)
19{
20 struct page *page, **map;
21 pgprot_t pgprot;
22 void *addr;
23 int i, order;
24
25 pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
26
27 size = PAGE_ALIGN(size);
28 order = get_order(size);
29
30 page = alloc_pages(flag, order);
31 if (!page)
32 return NULL;
33
34 *handle = page_to_phys(page);
35 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
36 if (!map) {
37 __free_pages(page, order);
38 return NULL;
39 }
40 split_page(page, order);
41
42 order = 1 << order;
43 size >>= PAGE_SHIFT;
44 map[0] = page;
45 for (i = 1; i < size; i++)
46 map[i] = page + i;
47 for (; i < order; i++)
48 __free_page(page + i);
49 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
50 if (CPU_IS_040_OR_060)
51 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
52 else
53 pgprot_val(pgprot) |= _PAGE_NOCACHE030;
54 addr = vmap(map, size, flag, pgprot);
55 kfree(map);
56
57 return addr;
58}
59EXPORT_SYMBOL(dma_alloc_coherent);
60
61void dma_free_coherent(struct device *dev, size_t size,
62 void *addr, dma_addr_t handle)
63{
64 pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
65 vfree(addr);
66}
67EXPORT_SYMBOL(dma_free_coherent);
68
69inline void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
70 enum dma_data_direction dir)
71{
72 switch (dir) {
73 case DMA_TO_DEVICE:
74 cache_push(handle, size);
75 break;
76 case DMA_FROM_DEVICE:
77 cache_clear(handle, size);
78 break;
79 default:
80 if (printk_ratelimit())
81 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
82 break;
83 }
84}
85EXPORT_SYMBOL(dma_sync_single_for_device);
86
87void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
88 enum dma_data_direction dir)
89{
90 int i;
91
92 for (i = 0; i < nents; sg++, i++)
93 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
94}
95EXPORT_SYMBOL(dma_sync_sg_for_device);
96
97dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
98 enum dma_data_direction dir)
99{
100 dma_addr_t handle = virt_to_bus(addr);
101
102 dma_sync_single_for_device(dev, handle, size, dir);
103 return handle;
104}
105EXPORT_SYMBOL(dma_map_single);
106
107dma_addr_t dma_map_page(struct device *dev, struct page *page,
108 unsigned long offset, size_t size,
109 enum dma_data_direction dir)
110{
111 dma_addr_t handle = page_to_phys(page) + offset;
112
113 dma_sync_single_for_device(dev, handle, size, dir);
114 return handle;
115}
116EXPORT_SYMBOL(dma_map_page);
117
118int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
119 enum dma_data_direction dir)
120{
121 int i;
122
123 for (i = 0; i < nents; sg++, i++) {
124 sg->dma_address = page_to_phys(sg->page) + sg->offset;
125 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
126 }
127 return nents;
128}
129EXPORT_SYMBOL(dma_map_sg);
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 522079f8c2ba..449b62b30f45 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -45,9 +45,11 @@
45#include <asm/asm-offsets.h> 45#include <asm/asm-offsets.h>
46 46
47.globl system_call, buserr, trap, resume 47.globl system_call, buserr, trap, resume
48.globl inthandler, sys_call_table 48.globl sys_call_table
49.globl sys_fork, sys_clone, sys_vfork 49.globl sys_fork, sys_clone, sys_vfork
50.globl ret_from_interrupt, bad_interrupt 50.globl ret_from_interrupt, bad_interrupt
51.globl auto_irqhandler_fixup
52.globl user_irqvec_fixup, user_irqhandler_fixup
51 53
52.text 54.text
53ENTRY(buserr) 55ENTRY(buserr)
@@ -191,65 +193,29 @@ do_delayed_trace:
191 jbra resume_userspace 193 jbra resume_userspace
192 194
193 195
194#if 0 196/* This is the main interrupt handler for autovector interrupts */
195#ifdef CONFIG_AMIGA
196ami_inthandler:
197 addql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
198 SAVE_ALL_INT
199 GET_CURRENT(%d0)
200
201 bfextu %sp@(PT_VECTOR){#4,#12},%d0
202 movel %d0,%a0
203 addql #1,%a0@(kstat+STAT_IRQ-VECOFF(VEC_SPUR))
204 movel %a0@(autoirq_list-VECOFF(VEC_SPUR)),%a0
205
206| amiga vector int handler get the req mask instead of irq vector
207 lea CUSTOMBASE,%a1
208 movew %a1@(C_INTREQR),%d0
209 andw %a1@(C_INTENAR),%d0
210
211| prepare stack (push frame pointer, dev_id & req mask)
212 pea %sp@
213 movel %a0@(IRQ_DEVID),%sp@-
214 movel %d0,%sp@-
215 pea %pc@(ret_from_interrupt:w)
216 jbra @(IRQ_HANDLER,%a0)@(0)
217
218ENTRY(nmi_handler)
219 rte
220#endif
221#endif
222 197
223/* 198ENTRY(auto_inthandler)
224** This is the main interrupt handler, responsible for calling process_int()
225*/
226inthandler:
227 SAVE_ALL_INT 199 SAVE_ALL_INT
228 GET_CURRENT(%d0) 200 GET_CURRENT(%d0)
229 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) 201 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
230 | put exception # in d0 202 | put exception # in d0
231 bfextu %sp@(PT_VECTOR){#4,#10},%d0 203 bfextu %sp@(PT_VECTOR){#4,#10},%d0
204 subw #VEC_SPUR,%d0
232 205
233 movel %sp,%sp@- 206 movel %sp,%sp@-
234 movel %d0,%sp@- | put vector # on stack 207 movel %d0,%sp@- | put vector # on stack
235#if defined(MACH_Q40_ONLY) && defined(CONFIG_BLK_DEV_FD) 208auto_irqhandler_fixup = . + 2
236 btstb #4,0xff000000 | Q40 floppy needs very special treatment ... 209 jsr m68k_handle_int | process the IRQ
237 jbeq 1f 210 addql #8,%sp | pop parameters off stack
238 btstb #3,0xff000004
239 jbeq 1f
240 jbsr floppy_hardint
241 jbra 3f
2421:
243#endif
244 jbsr process_int | process the IRQ
2453: addql #8,%sp | pop parameters off stack
246 211
247ret_from_interrupt: 212ret_from_interrupt:
248 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) 213 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
249 jeq 1f 214 jeq ret_from_last_interrupt
2502: 2152: RESTORE_ALL
251 RESTORE_ALL 216
2521: 217 ALIGN
218ret_from_last_interrupt:
253 moveq #(~ALLOWINT>>8)&0xff,%d0 219 moveq #(~ALLOWINT>>8)&0xff,%d0
254 andb %sp@(PT_SR),%d0 220 andb %sp@(PT_SR),%d0
255 jne 2b 221 jne 2b
@@ -260,12 +226,42 @@ ret_from_interrupt:
260 pea ret_from_exception 226 pea ret_from_exception
261 jra do_softirq 227 jra do_softirq
262 228
229/* Handler for user defined interrupt vectors */
230
231ENTRY(user_inthandler)
232 SAVE_ALL_INT
233 GET_CURRENT(%d0)
234 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
235 | put exception # in d0
236 bfextu %sp@(PT_VECTOR){#4,#10},%d0
237user_irqvec_fixup = . + 2
238 subw #VEC_USER,%d0
239
240 movel %sp,%sp@-
241 movel %d0,%sp@- | put vector # on stack
242user_irqhandler_fixup = . + 2
243 jsr m68k_handle_int | process the IRQ
244 addql #8,%sp | pop parameters off stack
245
246 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
247 jeq ret_from_last_interrupt
248 RESTORE_ALL
263 249
264/* Handler for uninitialized and spurious interrupts */ 250/* Handler for uninitialized and spurious interrupts */
265 251
266bad_interrupt: 252ENTRY(bad_inthandler)
267 addql #1,num_spurious 253 SAVE_ALL_INT
268 rte 254 GET_CURRENT(%d0)
255 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
256
257 movel %sp,%sp@-
258 jsr handle_badint
259 addql #4,%sp
260
261 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
262 jeq ret_from_last_interrupt
263 RESTORE_ALL
264
269 265
270ENTRY(sys_fork) 266ENTRY(sys_fork)
271 SAVE_SWITCH_STACK 267 SAVE_SWITCH_STACK
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 4b85514792e7..5a8344b93547 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -39,47 +39,40 @@
39#include <asm/traps.h> 39#include <asm/traps.h>
40#include <asm/page.h> 40#include <asm/page.h>
41#include <asm/machdep.h> 41#include <asm/machdep.h>
42#include <asm/cacheflush.h>
42 43
43#ifdef CONFIG_Q40 44#ifdef CONFIG_Q40
44#include <asm/q40ints.h> 45#include <asm/q40ints.h>
45#endif 46#endif
46 47
48extern u32 auto_irqhandler_fixup[];
49extern u32 user_irqhandler_fixup[];
50extern u16 user_irqvec_fixup[];
51
47/* table for system interrupt handlers */ 52/* table for system interrupt handlers */
48static irq_handler_t irq_list[SYS_IRQS]; 53static struct irq_node *irq_list[NR_IRQS];
49 54static struct irq_controller *irq_controller[NR_IRQS];
50static const char *default_names[SYS_IRQS] = { 55static int irq_depth[NR_IRQS];
51 [0] = "spurious int", 56
52 [1] = "int1 handler", 57static int m68k_first_user_vec;
53 [2] = "int2 handler", 58
54 [3] = "int3 handler", 59static struct irq_controller auto_irq_controller = {
55 [4] = "int4 handler", 60 .name = "auto",
56 [5] = "int5 handler", 61 .lock = SPIN_LOCK_UNLOCKED,
57 [6] = "int6 handler", 62 .startup = m68k_irq_startup,
58 [7] = "int7 handler" 63 .shutdown = m68k_irq_shutdown,
59}; 64};
60 65
61/* The number of spurious interrupts */ 66static struct irq_controller user_irq_controller = {
62volatile unsigned int num_spurious; 67 .name = "user",
68 .lock = SPIN_LOCK_UNLOCKED,
69 .startup = m68k_irq_startup,
70 .shutdown = m68k_irq_shutdown,
71};
63 72
64#define NUM_IRQ_NODES 100 73#define NUM_IRQ_NODES 100
65static irq_node_t nodes[NUM_IRQ_NODES]; 74static irq_node_t nodes[NUM_IRQ_NODES];
66 75
67static void dummy_enable_irq(unsigned int irq);
68static void dummy_disable_irq(unsigned int irq);
69static int dummy_request_irq(unsigned int irq,
70 irqreturn_t (*handler) (int, void *, struct pt_regs *),
71 unsigned long flags, const char *devname, void *dev_id);
72static void dummy_free_irq(unsigned int irq, void *dev_id);
73
74void (*enable_irq) (unsigned int) = dummy_enable_irq;
75void (*disable_irq) (unsigned int) = dummy_disable_irq;
76
77int (*mach_request_irq) (unsigned int, irqreturn_t (*)(int, void *, struct pt_regs *),
78 unsigned long, const char *, void *) = dummy_request_irq;
79void (*mach_free_irq) (unsigned int, void *) = dummy_free_irq;
80
81void init_irq_proc(void);
82
83/* 76/*
84 * void init_IRQ(void) 77 * void init_IRQ(void)
85 * 78 *
@@ -101,18 +94,70 @@ void __init init_IRQ(void)
101 hardirq_mask_is_broken(); 94 hardirq_mask_is_broken();
102 } 95 }
103 96
104 for (i = 0; i < SYS_IRQS; i++) { 97 for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
105 if (mach_default_handler) 98 irq_controller[i] = &auto_irq_controller;
106 irq_list[i].handler = (*mach_default_handler)[i]; 99
107 irq_list[i].flags = 0; 100 mach_init_IRQ();
108 irq_list[i].dev_id = NULL; 101}
109 irq_list[i].devname = default_names[i]; 102
110 } 103/**
104 * m68k_setup_auto_interrupt
105 * @handler: called from auto vector interrupts
106 *
107 * setup the handler to be called from auto vector interrupts instead of the
108 * standard m68k_handle_int(), it will be called with irq numbers in the range
109 * from IRQ_AUTO_1 - IRQ_AUTO_7.
110 */
111void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *))
112{
113 if (handler)
114 *auto_irqhandler_fixup = (u32)handler;
115 flush_icache();
116}
111 117
112 for (i = 0; i < NUM_IRQ_NODES; i++) 118/**
113 nodes[i].handler = NULL; 119 * m68k_setup_user_interrupt
120 * @vec: first user vector interrupt to handle
121 * @cnt: number of active user vector interrupts
122 * @handler: called from user vector interrupts
123 *
124 * setup user vector interrupts, this includes activating the specified range
125 * of interrupts, only then these interrupts can be requested (note: this is
126 * different from auto vector interrupts). An optional handler can be installed
127 * to be called instead of the default m68k_handle_int(), it will be called
128 * with irq numbers starting from IRQ_USER.
129 */
130void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
131 void (*handler)(unsigned int, struct pt_regs *))
132{
133 int i;
134
135 m68k_first_user_vec = vec;
136 for (i = 0; i < cnt; i++)
137 irq_controller[IRQ_USER + i] = &user_irq_controller;
138 *user_irqvec_fixup = vec - IRQ_USER;
139 if (handler)
140 *user_irqhandler_fixup = (u32)handler;
141 flush_icache();
142}
143
144/**
145 * m68k_setup_irq_controller
146 * @contr: irq controller which controls specified irq
147 * @irq: first irq to be managed by the controller
148 *
149 * Change the controller for the specified range of irq, which will be used to
150 * manage these irq. auto/user irq already have a default controller, which can
151 * be changed as well, but the controller probably should use m68k_irq_startup/
152 * m68k_irq_shutdown.
153 */
154void m68k_setup_irq_controller(struct irq_controller *contr, unsigned int irq,
155 unsigned int cnt)
156{
157 int i;
114 158
115 mach_init_IRQ (); 159 for (i = 0; i < cnt; i++)
160 irq_controller[irq + i] = contr;
116} 161}
117 162
118irq_node_t *new_irq_node(void) 163irq_node_t *new_irq_node(void)
@@ -120,84 +165,183 @@ irq_node_t *new_irq_node(void)
120 irq_node_t *node; 165 irq_node_t *node;
121 short i; 166 short i;
122 167
123 for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--) 168 for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--) {
124 if (!node->handler) 169 if (!node->handler) {
170 memset(node, 0, sizeof(*node));
125 return node; 171 return node;
172 }
173 }
126 174
127 printk ("new_irq_node: out of nodes\n"); 175 printk ("new_irq_node: out of nodes\n");
128 return NULL; 176 return NULL;
129} 177}
130 178
131/* 179int setup_irq(unsigned int irq, struct irq_node *node)
132 * We will keep these functions until I have convinced Linus to move 180{
133 * the declaration of them from include/linux/sched.h to 181 struct irq_controller *contr;
134 * include/asm/irq.h. 182 struct irq_node **prev;
135 */ 183 unsigned long flags;
184
185 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
186 printk("%s: Incorrect IRQ %d from %s\n",
187 __FUNCTION__, irq, node->devname);
188 return -ENXIO;
189 }
190
191 spin_lock_irqsave(&contr->lock, flags);
192
193 prev = irq_list + irq;
194 if (*prev) {
195 /* Can't share interrupts unless both agree to */
196 if (!((*prev)->flags & node->flags & SA_SHIRQ)) {
197 spin_unlock_irqrestore(&contr->lock, flags);
198 return -EBUSY;
199 }
200 while (*prev)
201 prev = &(*prev)->next;
202 }
203
204 if (!irq_list[irq]) {
205 if (contr->startup)
206 contr->startup(irq);
207 else
208 contr->enable(irq);
209 }
210 node->next = NULL;
211 *prev = node;
212
213 spin_unlock_irqrestore(&contr->lock, flags);
214
215 return 0;
216}
217
136int request_irq(unsigned int irq, 218int request_irq(unsigned int irq,
137 irqreturn_t (*handler) (int, void *, struct pt_regs *), 219 irqreturn_t (*handler) (int, void *, struct pt_regs *),
138 unsigned long flags, const char *devname, void *dev_id) 220 unsigned long flags, const char *devname, void *dev_id)
139{ 221{
140 return mach_request_irq(irq, handler, flags, devname, dev_id); 222 struct irq_node *node;
223 int res;
224
225 node = new_irq_node();
226 if (!node)
227 return -ENOMEM;
228
229 node->handler = handler;
230 node->flags = flags;
231 node->dev_id = dev_id;
232 node->devname = devname;
233
234 res = setup_irq(irq, node);
235 if (res)
236 node->handler = NULL;
237
238 return res;
141} 239}
142 240
143EXPORT_SYMBOL(request_irq); 241EXPORT_SYMBOL(request_irq);
144 242
145void free_irq(unsigned int irq, void *dev_id) 243void free_irq(unsigned int irq, void *dev_id)
146{ 244{
147 mach_free_irq(irq, dev_id); 245 struct irq_controller *contr;
246 struct irq_node **p, *node;
247 unsigned long flags;
248
249 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
250 printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
251 return;
252 }
253
254 spin_lock_irqsave(&contr->lock, flags);
255
256 p = irq_list + irq;
257 while ((node = *p)) {
258 if (node->dev_id == dev_id)
259 break;
260 p = &node->next;
261 }
262
263 if (node) {
264 *p = node->next;
265 node->handler = NULL;
266 } else
267 printk("%s: Removing probably wrong IRQ %d\n",
268 __FUNCTION__, irq);
269
270 if (!irq_list[irq]) {
271 if (contr->shutdown)
272 contr->shutdown(irq);
273 else
274 contr->disable(irq);
275 }
276
277 spin_unlock_irqrestore(&contr->lock, flags);
148} 278}
149 279
150EXPORT_SYMBOL(free_irq); 280EXPORT_SYMBOL(free_irq);
151 281
152int cpu_request_irq(unsigned int irq, 282void enable_irq(unsigned int irq)
153 irqreturn_t (*handler)(int, void *, struct pt_regs *),
154 unsigned long flags, const char *devname, void *dev_id)
155{ 283{
156 if (irq < IRQ1 || irq > IRQ7) { 284 struct irq_controller *contr;
157 printk("%s: Incorrect IRQ %d from %s\n", 285 unsigned long flags;
158 __FUNCTION__, irq, devname);
159 return -ENXIO;
160 }
161 286
162#if 0 287 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
163 if (!(irq_list[irq].flags & IRQ_FLG_STD)) { 288 printk("%s: Incorrect IRQ %d\n",
164 if (irq_list[irq].flags & IRQ_FLG_LOCK) { 289 __FUNCTION__, irq);
165 printk("%s: IRQ %d from %s is not replaceable\n", 290 return;
166 __FUNCTION__, irq, irq_list[irq].devname);
167 return -EBUSY;
168 }
169 if (!(flags & IRQ_FLG_REPLACE)) {
170 printk("%s: %s can't replace IRQ %d from %s\n",
171 __FUNCTION__, devname, irq, irq_list[irq].devname);
172 return -EBUSY;
173 }
174 } 291 }
175#endif
176 292
177 irq_list[irq].handler = handler; 293 spin_lock_irqsave(&contr->lock, flags);
178 irq_list[irq].flags = flags; 294 if (irq_depth[irq]) {
179 irq_list[irq].dev_id = dev_id; 295 if (!--irq_depth[irq]) {
180 irq_list[irq].devname = devname; 296 if (contr->enable)
181 return 0; 297 contr->enable(irq);
298 }
299 } else
300 WARN_ON(1);
301 spin_unlock_irqrestore(&contr->lock, flags);
182} 302}
183 303
184void cpu_free_irq(unsigned int irq, void *dev_id) 304EXPORT_SYMBOL(enable_irq);
305
306void disable_irq(unsigned int irq)
185{ 307{
186 if (irq < IRQ1 || irq > IRQ7) { 308 struct irq_controller *contr;
187 printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq); 309 unsigned long flags;
310
311 if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
312 printk("%s: Incorrect IRQ %d\n",
313 __FUNCTION__, irq);
188 return; 314 return;
189 } 315 }
190 316
191 if (irq_list[irq].dev_id != dev_id) 317 spin_lock_irqsave(&contr->lock, flags);
192 printk("%s: Removing probably wrong IRQ %d from %s\n", 318 if (!irq_depth[irq]++) {
193 __FUNCTION__, irq, irq_list[irq].devname); 319 if (contr->disable)
320 contr->disable(irq);
321 }
322 spin_unlock_irqrestore(&contr->lock, flags);
323}
194 324
195 irq_list[irq].handler = (*mach_default_handler)[irq]; 325EXPORT_SYMBOL(disable_irq);
196 irq_list[irq].flags = 0; 326
197 irq_list[irq].dev_id = NULL; 327int m68k_irq_startup(unsigned int irq)
198 irq_list[irq].devname = default_names[irq]; 328{
329 if (irq <= IRQ_AUTO_7)
330 vectors[VEC_SPUR + irq] = auto_inthandler;
331 else
332 vectors[m68k_first_user_vec + irq - IRQ_USER] = user_inthandler;
333 return 0;
199} 334}
200 335
336void m68k_irq_shutdown(unsigned int irq)
337{
338 if (irq <= IRQ_AUTO_7)
339 vectors[VEC_SPUR + irq] = bad_inthandler;
340 else
341 vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
342}
343
344
201/* 345/*
202 * Do we need these probe functions on the m68k? 346 * Do we need these probe functions on the m68k?
203 * 347 *
@@ -225,58 +369,50 @@ int probe_irq_off (unsigned long irqs)
225 369
226EXPORT_SYMBOL(probe_irq_off); 370EXPORT_SYMBOL(probe_irq_off);
227 371
228static void dummy_enable_irq(unsigned int irq) 372unsigned int irq_canonicalize(unsigned int irq)
229{
230 printk("calling uninitialized enable_irq()\n");
231}
232
233static void dummy_disable_irq(unsigned int irq)
234{ 373{
235 printk("calling uninitialized disable_irq()\n"); 374#ifdef CONFIG_Q40
375 if (MACH_IS_Q40 && irq == 11)
376 irq = 10;
377#endif
378 return irq;
236} 379}
237 380
238static int dummy_request_irq(unsigned int irq, 381EXPORT_SYMBOL(irq_canonicalize);
239 irqreturn_t (*handler) (int, void *, struct pt_regs *),
240 unsigned long flags, const char *devname, void *dev_id)
241{
242 printk("calling uninitialized request_irq()\n");
243 return 0;
244}
245 382
246static void dummy_free_irq(unsigned int irq, void *dev_id) 383asmlinkage void m68k_handle_int(unsigned int irq, struct pt_regs *regs)
247{ 384{
248 printk("calling uninitialized disable_irq()\n"); 385 struct irq_node *node;
386
387 kstat_cpu(0).irqs[irq]++;
388 node = irq_list[irq];
389 do {
390 node->handler(irq, node->dev_id, regs);
391 node = node->next;
392 } while (node);
249} 393}
250 394
251asmlinkage void process_int(unsigned long vec, struct pt_regs *fp) 395asmlinkage void handle_badint(struct pt_regs *regs)
252{ 396{
253 if (vec >= VEC_INT1 && vec <= VEC_INT7 && !MACH_IS_BVME6000) { 397 kstat_cpu(0).irqs[0]++;
254 vec -= VEC_SPUR; 398 printk("unexpected interrupt from %u\n", regs->vector);
255 kstat_cpu(0).irqs[vec]++;
256 irq_list[vec].handler(vec, irq_list[vec].dev_id, fp);
257 } else {
258 if (mach_process_int)
259 mach_process_int(vec, fp);
260 else
261 panic("Can't process interrupt vector %ld\n", vec);
262 return;
263 }
264} 399}
265 400
266int show_interrupts(struct seq_file *p, void *v) 401int show_interrupts(struct seq_file *p, void *v)
267{ 402{
403 struct irq_controller *contr;
404 struct irq_node *node;
268 int i = *(loff_t *) v; 405 int i = *(loff_t *) v;
269 406
270 /* autovector interrupts */ 407 /* autovector interrupts */
271 if (i < SYS_IRQS) { 408 if (irq_list[i]) {
272 if (mach_default_handler) { 409 contr = irq_controller[i];
273 seq_printf(p, "auto %2d: %10u ", i, 410 node = irq_list[i];
274 i ? kstat_cpu(0).irqs[i] : num_spurious); 411 seq_printf(p, "%-8s %3u: %10u %s", contr->name, i, kstat_cpu(0).irqs[i], node->devname);
275 seq_puts(p, " "); 412 while ((node = node->next))
276 seq_printf(p, "%s\n", irq_list[i].devname); 413 seq_printf(p, ", %s", node->devname);
277 } 414 seq_puts(p, "\n");
278 } else if (i == SYS_IRQS) 415 }
279 mach_get_irq_list(p, v);
280 return 0; 416 return 0;
281} 417}
282 418
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 5b7952ea2bae..1f5e1b5aeda4 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -57,8 +57,6 @@ EXPORT_SYMBOL(dump_thread);
57EXPORT_SYMBOL(strnlen); 57EXPORT_SYMBOL(strnlen);
58EXPORT_SYMBOL(strrchr); 58EXPORT_SYMBOL(strrchr);
59EXPORT_SYMBOL(strstr); 59EXPORT_SYMBOL(strstr);
60EXPORT_SYMBOL(enable_irq);
61EXPORT_SYMBOL(disable_irq);
62EXPORT_SYMBOL(kernel_thread); 60EXPORT_SYMBOL(kernel_thread);
63#ifdef CONFIG_VME 61#ifdef CONFIG_VME
64EXPORT_SYMBOL(vme_brdtype); 62EXPORT_SYMBOL(vme_brdtype);
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 750d5b3c971f..214a95f9f3ac 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -68,11 +68,8 @@ char m68k_debug_device[6] = "";
68void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *)) __initdata = NULL; 68void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *)) __initdata = NULL;
69/* machine dependent irq functions */ 69/* machine dependent irq functions */
70void (*mach_init_IRQ) (void) __initdata = NULL; 70void (*mach_init_IRQ) (void) __initdata = NULL;
71irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *);
72void (*mach_get_model) (char *model); 71void (*mach_get_model) (char *model);
73int (*mach_get_hardware_list) (char *buffer); 72int (*mach_get_hardware_list) (char *buffer);
74int (*mach_get_irq_list) (struct seq_file *, void *);
75irqreturn_t (*mach_process_int) (int, struct pt_regs *);
76/* machine dependent timer functions */ 73/* machine dependent timer functions */
77unsigned long (*mach_gettimeoffset) (void); 74unsigned long (*mach_gettimeoffset) (void);
78int (*mach_hwclk) (int, struct rtc_time*); 75int (*mach_hwclk) (int, struct rtc_time*);
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 837a88709902..e86de7b061cd 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -45,7 +45,6 @@
45asmlinkage void system_call(void); 45asmlinkage void system_call(void);
46asmlinkage void buserr(void); 46asmlinkage void buserr(void);
47asmlinkage void trap(void); 47asmlinkage void trap(void);
48asmlinkage void inthandler(void);
49asmlinkage void nmihandler(void); 48asmlinkage void nmihandler(void);
50#ifdef CONFIG_M68KFPU_EMU 49#ifdef CONFIG_M68KFPU_EMU
51asmlinkage void fpu_emu(void); 50asmlinkage void fpu_emu(void);
@@ -53,51 +52,7 @@ asmlinkage void fpu_emu(void);
53 52
54e_vector vectors[256] = { 53e_vector vectors[256] = {
55 [VEC_BUSERR] = buserr, 54 [VEC_BUSERR] = buserr,
56 [VEC_ADDRERR] = trap,
57 [VEC_ILLEGAL] = trap,
58 [VEC_ZERODIV] = trap,
59 [VEC_CHK] = trap,
60 [VEC_TRAP] = trap,
61 [VEC_PRIV] = trap,
62 [VEC_TRACE] = trap,
63 [VEC_LINE10] = trap,
64 [VEC_LINE11] = trap,
65 [VEC_RESV12] = trap,
66 [VEC_COPROC] = trap,
67 [VEC_FORMAT] = trap,
68 [VEC_UNINT] = trap,
69 [VEC_RESV16] = trap,
70 [VEC_RESV17] = trap,
71 [VEC_RESV18] = trap,
72 [VEC_RESV19] = trap,
73 [VEC_RESV20] = trap,
74 [VEC_RESV21] = trap,
75 [VEC_RESV22] = trap,
76 [VEC_RESV23] = trap,
77 [VEC_SPUR] = inthandler,
78 [VEC_INT1] = inthandler,
79 [VEC_INT2] = inthandler,
80 [VEC_INT3] = inthandler,
81 [VEC_INT4] = inthandler,
82 [VEC_INT5] = inthandler,
83 [VEC_INT6] = inthandler,
84 [VEC_INT7] = inthandler,
85 [VEC_SYS] = system_call, 55 [VEC_SYS] = system_call,
86 [VEC_TRAP1] = trap,
87 [VEC_TRAP2] = trap,
88 [VEC_TRAP3] = trap,
89 [VEC_TRAP4] = trap,
90 [VEC_TRAP5] = trap,
91 [VEC_TRAP6] = trap,
92 [VEC_TRAP7] = trap,
93 [VEC_TRAP8] = trap,
94 [VEC_TRAP9] = trap,
95 [VEC_TRAP10] = trap,
96 [VEC_TRAP11] = trap,
97 [VEC_TRAP12] = trap,
98 [VEC_TRAP13] = trap,
99 [VEC_TRAP14] = trap,
100 [VEC_TRAP15] = trap,
101}; 56};
102 57
103/* nmi handler for the Amiga */ 58/* nmi handler for the Amiga */
@@ -132,12 +87,15 @@ void __init trap_init (void)
132{ 87{
133 int i; 88 int i;
134 89
135 for (i = 48; i < 64; i++) 90 for (i = VEC_SPUR; i <= VEC_INT7; i++)
91 vectors[i] = bad_inthandler;
92
93 for (i = 0; i < VEC_USER; i++)
136 if (!vectors[i]) 94 if (!vectors[i])
137 vectors[i] = trap; 95 vectors[i] = trap;
138 96
139 for (i = 64; i < 256; i++) 97 for (i = VEC_USER; i < 256; i++)
140 vectors[i] = inthandler; 98 vectors[i] = bad_inthandler;
141 99
142#ifdef CONFIG_M68KFPU_EMU 100#ifdef CONFIG_M68KFPU_EMU
143 if (FPU_IS_EMU) 101 if (FPU_IS_EMU)
@@ -927,66 +885,88 @@ void show_trace(unsigned long *stack)
927void show_registers(struct pt_regs *regs) 885void show_registers(struct pt_regs *regs)
928{ 886{
929 struct frame *fp = (struct frame *)regs; 887 struct frame *fp = (struct frame *)regs;
888 mm_segment_t old_fs = get_fs();
889 u16 c, *cp;
930 unsigned long addr; 890 unsigned long addr;
931 int i; 891 int i;
932 892
893 print_modules();
894 printk("PC: [<%08lx>]",regs->pc);
895 print_symbol(" %s", regs->pc);
896 printk("\nSR: %04x SP: %p a2: %08lx\n",
897 regs->sr, regs, regs->a2);
898 printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
899 regs->d0, regs->d1, regs->d2, regs->d3);
900 printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
901 regs->d4, regs->d5, regs->a0, regs->a1);
902
903 printk("Process %s (pid: %d, task=%p)\n",
904 current->comm, current->pid, current);
933 addr = (unsigned long)&fp->un; 905 addr = (unsigned long)&fp->un;
934 printk("Frame format=%X ", fp->ptregs.format); 906 printk("Frame format=%X ", regs->format);
935 switch (fp->ptregs.format) { 907 switch (regs->format) {
936 case 0x2: 908 case 0x2:
937 printk("instr addr=%08lx\n", fp->un.fmt2.iaddr); 909 printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
938 addr += sizeof(fp->un.fmt2); 910 addr += sizeof(fp->un.fmt2);
939 break; 911 break;
940 case 0x3: 912 case 0x3:
941 printk("eff addr=%08lx\n", fp->un.fmt3.effaddr); 913 printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
942 addr += sizeof(fp->un.fmt3); 914 addr += sizeof(fp->un.fmt3);
943 break; 915 break;
944 case 0x4: 916 case 0x4:
945 printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n" 917 printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
946 : "eff addr=%08lx pc=%08lx\n"), 918 : "eff addr=%08lx pc=%08lx\n"),
947 fp->un.fmt4.effaddr, fp->un.fmt4.pc); 919 fp->un.fmt4.effaddr, fp->un.fmt4.pc);
948 addr += sizeof(fp->un.fmt4); 920 addr += sizeof(fp->un.fmt4);
949 break; 921 break;
950 case 0x7: 922 case 0x7:
951 printk("eff addr=%08lx ssw=%04x faddr=%08lx\n", 923 printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
952 fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr); 924 fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
953 printk("wb 1 stat/addr/data: %04x %08lx %08lx\n", 925 printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
954 fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0); 926 fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
955 printk("wb 2 stat/addr/data: %04x %08lx %08lx\n", 927 printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
956 fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d); 928 fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
957 printk("wb 3 stat/addr/data: %04x %08lx %08lx\n", 929 printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
958 fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d); 930 fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
959 printk("push data: %08lx %08lx %08lx %08lx\n", 931 printk("push data: %08lx %08lx %08lx %08lx\n",
960 fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2, 932 fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
961 fp->un.fmt7.pd3); 933 fp->un.fmt7.pd3);
962 addr += sizeof(fp->un.fmt7); 934 addr += sizeof(fp->un.fmt7);
963 break; 935 break;
964 case 0x9: 936 case 0x9:
965 printk("instr addr=%08lx\n", fp->un.fmt9.iaddr); 937 printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
966 addr += sizeof(fp->un.fmt9); 938 addr += sizeof(fp->un.fmt9);
967 break; 939 break;
968 case 0xa: 940 case 0xa:
969 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", 941 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
970 fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb, 942 fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
971 fp->un.fmta.daddr, fp->un.fmta.dobuf); 943 fp->un.fmta.daddr, fp->un.fmta.dobuf);
972 addr += sizeof(fp->un.fmta); 944 addr += sizeof(fp->un.fmta);
973 break; 945 break;
974 case 0xb: 946 case 0xb:
975 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n", 947 printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
976 fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb, 948 fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
977 fp->un.fmtb.daddr, fp->un.fmtb.dobuf); 949 fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
978 printk("baddr=%08lx dibuf=%08lx ver=%x\n", 950 printk("baddr=%08lx dibuf=%08lx ver=%x\n",
979 fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver); 951 fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
980 addr += sizeof(fp->un.fmtb); 952 addr += sizeof(fp->un.fmtb);
981 break; 953 break;
982 default: 954 default:
983 printk("\n"); 955 printk("\n");
984 } 956 }
985 show_stack(NULL, (unsigned long *)addr); 957 show_stack(NULL, (unsigned long *)addr);
986 958
987 printk("Code: "); 959 printk("Code:");
988 for (i = 0; i < 10; i++) 960 set_fs(KERNEL_DS);
989 printk("%04x ", 0xffff & ((short *) fp->ptregs.pc)[i]); 961 cp = (u16 *)regs->pc;
962 for (i = -8; i < 16; i++) {
963 if (get_user(c, cp + i) && i >= 0) {
964 printk(" Bad PC value.");
965 break;
966 }
967 printk(i ? " %04x" : " <%04x>", c);
968 }
969 set_fs(old_fs);
990 printk ("\n"); 970 printk ("\n");
991} 971}
992 972
@@ -1190,19 +1170,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
1190 1170
1191 console_verbose(); 1171 console_verbose();
1192 printk("%s: %08x\n",str,nr); 1172 printk("%s: %08x\n",str,nr);
1193 print_modules(); 1173 show_registers(fp);
1194 printk("PC: [<%08lx>]",fp->pc);
1195 print_symbol(" %s\n", fp->pc);
1196 printk("\nSR: %04x SP: %p a2: %08lx\n",
1197 fp->sr, fp, fp->a2);
1198 printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
1199 fp->d0, fp->d1, fp->d2, fp->d3);
1200 printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
1201 fp->d4, fp->d5, fp->a0, fp->a1);
1202
1203 printk("Process %s (pid: %d, stackpage=%08lx)\n",
1204 current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
1205 show_stack(NULL, (unsigned long *)fp);
1206 do_exit(SIGSEGV); 1174 do_exit(SIGSEGV);
1207} 1175}
1208 1176
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index b19b7dd9bd21..6eaa881793d1 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -81,7 +81,7 @@ irqreturn_t baboon_irq(int irq, void *dev_id, struct pt_regs *regs)
81 for (i = 0, irq_bit = 1 ; i < 3 ; i++, irq_bit <<= 1) { 81 for (i = 0, irq_bit = 1 ; i < 3 ; i++, irq_bit <<= 1) {
82 if (events & irq_bit/* & baboon_active*/) { 82 if (events & irq_bit/* & baboon_active*/) {
83 baboon_active &= ~irq_bit; 83 baboon_active &= ~irq_bit;
84 mac_do_irq_list(IRQ_BABOON_0 + i, regs); 84 m68k_handle_int(IRQ_BABOON_0 + i, regs);
85 baboon_active |= irq_bit; 85 baboon_active |= irq_bit;
86 baboon->mb_ifr &= ~irq_bit; 86 baboon->mb_ifr &= ~irq_bit;
87 } 87 }
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 19dce75711b1..5a9990e436bb 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -94,20 +94,6 @@ static void mac_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *))
94 via_init_clock(vector); 94 via_init_clock(vector);
95} 95}
96 96
97extern irqreturn_t mac_default_handler(int, void *, struct pt_regs *);
98
99irqreturn_t (*mac_handlers[8])(int, void *, struct pt_regs *)=
100{
101 mac_default_handler,
102 mac_default_handler,
103 mac_default_handler,
104 mac_default_handler,
105 mac_default_handler,
106 mac_default_handler,
107 mac_default_handler,
108 mac_default_handler
109};
110
111/* 97/*
112 * Parse a Macintosh-specific record in the bootinfo 98 * Parse a Macintosh-specific record in the bootinfo
113 */ 99 */
@@ -183,13 +169,7 @@ void __init config_mac(void)
183 169
184 mach_sched_init = mac_sched_init; 170 mach_sched_init = mac_sched_init;
185 mach_init_IRQ = mac_init_IRQ; 171 mach_init_IRQ = mac_init_IRQ;
186 mach_request_irq = mac_request_irq;
187 mach_free_irq = mac_free_irq;
188 enable_irq = mac_enable_irq;
189 disable_irq = mac_disable_irq;
190 mach_get_model = mac_get_model; 172 mach_get_model = mac_get_model;
191 mach_default_handler = &mac_handlers;
192 mach_get_irq_list = show_mac_interrupts;
193 mach_gettimeoffset = mac_gettimeoffset; 173 mach_gettimeoffset = mac_gettimeoffset;
194#warning move to adb/via init 174#warning move to adb/via init
195#if 0 175#if 0
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 9179a3798407..4c8ece7e64a3 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -317,7 +317,7 @@ void __init iop_register_interrupts(void)
317{ 317{
318 if (iop_ism_present) { 318 if (iop_ism_present) {
319 if (oss_present) { 319 if (oss_present) {
320 cpu_request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq, 320 request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
321 IRQ_FLG_LOCK, "ISM IOP", 321 IRQ_FLG_LOCK, "ISM IOP",
322 (void *) IOP_NUM_ISM); 322 (void *) IOP_NUM_ISM);
323 oss_irq_enable(IRQ_MAC_ADB); 323 oss_irq_enable(IRQ_MAC_ADB);
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 7a1600bd195d..694b14bb0de1 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -137,14 +137,6 @@
137#define DEBUG_SPURIOUS 137#define DEBUG_SPURIOUS
138#define SHUTUP_SONIC 138#define SHUTUP_SONIC
139 139
140/*
141 * The mac_irq_list array is an array of linked lists of irq_node_t nodes.
142 * Each node contains one handler to be called whenever the interrupt
143 * occurs, with fast handlers listed before slow handlers.
144 */
145
146irq_node_t *mac_irq_list[NUM_MAC_SOURCES];
147
148/* SCC interrupt mask */ 140/* SCC interrupt mask */
149 141
150static int scc_mask; 142static int scc_mask;
@@ -209,8 +201,8 @@ extern int baboon_irq_pending(int);
209 * SCC interrupt routines 201 * SCC interrupt routines
210 */ 202 */
211 203
212static void scc_irq_enable(int); 204static void scc_irq_enable(unsigned int);
213static void scc_irq_disable(int); 205static void scc_irq_disable(unsigned int);
214 206
215/* 207/*
216 * console_loglevel determines NMI handler function 208 * console_loglevel determines NMI handler function
@@ -221,21 +213,25 @@ irqreturn_t mac_debug_handler(int, void *, struct pt_regs *);
221 213
222/* #define DEBUG_MACINTS */ 214/* #define DEBUG_MACINTS */
223 215
216static void mac_enable_irq(unsigned int irq);
217static void mac_disable_irq(unsigned int irq);
218
219static struct irq_controller mac_irq_controller = {
220 .name = "mac",
221 .lock = SPIN_LOCK_UNLOCKED,
222 .enable = mac_enable_irq,
223 .disable = mac_disable_irq,
224};
225
224void mac_init_IRQ(void) 226void mac_init_IRQ(void)
225{ 227{
226 int i;
227
228#ifdef DEBUG_MACINTS 228#ifdef DEBUG_MACINTS
229 printk("mac_init_IRQ(): Setting things up...\n"); 229 printk("mac_init_IRQ(): Setting things up...\n");
230#endif 230#endif
231 /* Initialize the IRQ handler lists. Initially each list is empty, */
232
233 for (i = 0; i < NUM_MAC_SOURCES; i++) {
234 mac_irq_list[i] = NULL;
235 }
236
237 scc_mask = 0; 231 scc_mask = 0;
238 232
233 m68k_setup_irq_controller(&mac_irq_controller, IRQ_USER,
234 NUM_MAC_SOURCES - IRQ_USER);
239 /* Make sure the SONIC interrupt is cleared or things get ugly */ 235 /* Make sure the SONIC interrupt is cleared or things get ugly */
240#ifdef SHUTUP_SONIC 236#ifdef SHUTUP_SONIC
241 printk("Killing onboard sonic... "); 237 printk("Killing onboard sonic... ");
@@ -252,15 +248,16 @@ void mac_init_IRQ(void)
252 * at levels 1-7. Most of the work is done elsewhere. 248 * at levels 1-7. Most of the work is done elsewhere.
253 */ 249 */
254 250
255 if (oss_present) { 251 if (oss_present)
256 oss_register_interrupts(); 252 oss_register_interrupts();
257 } else { 253 else
258 via_register_interrupts(); 254 via_register_interrupts();
259 } 255 if (psc_present)
260 if (psc_present) psc_register_interrupts(); 256 psc_register_interrupts();
261 if (baboon_present) baboon_register_interrupts(); 257 if (baboon_present)
258 baboon_register_interrupts();
262 iop_register_interrupts(); 259 iop_register_interrupts();
263 cpu_request_irq(7, mac_nmi_handler, IRQ_FLG_LOCK, "NMI", 260 request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
264 mac_nmi_handler); 261 mac_nmi_handler);
265#ifdef DEBUG_MACINTS 262#ifdef DEBUG_MACINTS
266 printk("mac_init_IRQ(): Done!\n"); 263 printk("mac_init_IRQ(): Done!\n");
@@ -268,104 +265,6 @@ void mac_init_IRQ(void)
268} 265}
269 266
270/* 267/*
271 * Routines to work with irq_node_t's on linked lists lifted from
272 * the Amiga code written by Roman Zippel.
273 */
274
275static inline void mac_insert_irq(irq_node_t **list, irq_node_t *node)
276{
277 unsigned long flags;
278 irq_node_t *cur;
279
280 if (!node->dev_id)
281 printk("%s: Warning: dev_id of %s is zero\n",
282 __FUNCTION__, node->devname);
283
284 local_irq_save(flags);
285
286 cur = *list;
287
288 if (node->flags & IRQ_FLG_FAST) {
289 node->flags &= ~IRQ_FLG_SLOW;
290 while (cur && cur->flags & IRQ_FLG_FAST) {
291 list = &cur->next;
292 cur = cur->next;
293 }
294 } else if (node->flags & IRQ_FLG_SLOW) {
295 while (cur) {
296 list = &cur->next;
297 cur = cur->next;
298 }
299 } else {
300 while (cur && !(cur->flags & IRQ_FLG_SLOW)) {
301 list = &cur->next;
302 cur = cur->next;
303 }
304 }
305
306 node->next = cur;
307 *list = node;
308
309 local_irq_restore(flags);
310}
311
312static inline void mac_delete_irq(irq_node_t **list, void *dev_id)
313{
314 unsigned long flags;
315 irq_node_t *node;
316
317 local_irq_save(flags);
318
319 for (node = *list; node; list = &node->next, node = *list) {
320 if (node->dev_id == dev_id) {
321 *list = node->next;
322 /* Mark it as free. */
323 node->handler = NULL;
324 local_irq_restore(flags);
325 return;
326 }
327 }
328 local_irq_restore(flags);
329 printk ("%s: tried to remove invalid irq\n", __FUNCTION__);
330}
331
332/*
333 * Call all the handlers for a given interrupt. Fast handlers are called
334 * first followed by slow handlers.
335 *
336 * This code taken from the original Amiga code written by Roman Zippel.
337 */
338
339void mac_do_irq_list(int irq, struct pt_regs *fp)
340{
341 irq_node_t *node, *slow_nodes;
342 unsigned long flags;
343
344 kstat_cpu(0).irqs[irq]++;
345
346#ifdef DEBUG_SPURIOUS
347 if (!mac_irq_list[irq] && (console_loglevel > 7)) {
348 printk("mac_do_irq_list: spurious interrupt %d!\n", irq);
349 return;
350 }
351#endif
352
353 /* serve first fast and normal handlers */
354 for (node = mac_irq_list[irq];
355 node && (!(node->flags & IRQ_FLG_SLOW));
356 node = node->next)
357 node->handler(irq, node->dev_id, fp);
358 if (!node) return;
359 local_save_flags(flags);
360 local_irq_restore((flags & ~0x0700) | (fp->sr & 0x0700));
361 /* if slow handlers exists, serve them now */
362 slow_nodes = node;
363 for (; node; node = node->next) {
364 node->handler(irq, node->dev_id, fp);
365 }
366}
367
368/*
369 * mac_enable_irq - enable an interrupt source 268 * mac_enable_irq - enable an interrupt source
370 * mac_disable_irq - disable an interrupt source 269 * mac_disable_irq - disable an interrupt source
371 * mac_clear_irq - clears a pending interrupt 270 * mac_clear_irq - clears a pending interrupt
@@ -374,276 +273,124 @@ void mac_do_irq_list(int irq, struct pt_regs *fp)
374 * These routines are just dispatchers to the VIA/OSS/PSC routines. 273 * These routines are just dispatchers to the VIA/OSS/PSC routines.
375 */ 274 */
376 275
377void mac_enable_irq (unsigned int irq) 276static void mac_enable_irq(unsigned int irq)
378{ 277{
379 int irq_src = IRQ_SRC(irq); 278 int irq_src = IRQ_SRC(irq);
380 279
381 switch(irq_src) { 280 switch(irq_src) {
382 case 1: via_irq_enable(irq); 281 case 1:
383 break; 282 via_irq_enable(irq);
384 case 2: 283 break;
385 case 7: if (oss_present) { 284 case 2:
386 oss_irq_enable(irq); 285 case 7:
387 } else { 286 if (oss_present)
388 via_irq_enable(irq); 287 oss_irq_enable(irq);
389 } 288 else
390 break; 289 via_irq_enable(irq);
391 case 3: 290 break;
392 case 4: 291 case 3:
393 case 5: 292 case 4:
394 case 6: if (psc_present) { 293 case 5:
395 psc_irq_enable(irq); 294 case 6:
396 } else if (oss_present) { 295 if (psc_present)
397 oss_irq_enable(irq); 296 psc_irq_enable(irq);
398 } else if (irq_src == 4) { 297 else if (oss_present)
399 scc_irq_enable(irq); 298 oss_irq_enable(irq);
400 } 299 else if (irq_src == 4)
401 break; 300 scc_irq_enable(irq);
402 case 8: if (baboon_present) { 301 break;
403 baboon_irq_enable(irq); 302 case 8:
404 } 303 if (baboon_present)
405 break; 304 baboon_irq_enable(irq);
305 break;
406 } 306 }
407} 307}
408 308
409void mac_disable_irq (unsigned int irq) 309static void mac_disable_irq(unsigned int irq)
410{ 310{
411 int irq_src = IRQ_SRC(irq); 311 int irq_src = IRQ_SRC(irq);
412 312
413 switch(irq_src) { 313 switch(irq_src) {
414 case 1: via_irq_disable(irq); 314 case 1:
415 break; 315 via_irq_disable(irq);
416 case 2: 316 break;
417 case 7: if (oss_present) { 317 case 2:
418 oss_irq_disable(irq); 318 case 7:
419 } else { 319 if (oss_present)
420 via_irq_disable(irq); 320 oss_irq_disable(irq);
421 } 321 else
422 break; 322 via_irq_disable(irq);
423 case 3: 323 break;
424 case 4: 324 case 3:
425 case 5: 325 case 4:
426 case 6: if (psc_present) { 326 case 5:
427 psc_irq_disable(irq); 327 case 6:
428 } else if (oss_present) { 328 if (psc_present)
429 oss_irq_disable(irq); 329 psc_irq_disable(irq);
430 } else if (irq_src == 4) { 330 else if (oss_present)
431 scc_irq_disable(irq); 331 oss_irq_disable(irq);
432 } 332 else if (irq_src == 4)
433 break; 333 scc_irq_disable(irq);
434 case 8: if (baboon_present) { 334 break;
435 baboon_irq_disable(irq); 335 case 8:
436 } 336 if (baboon_present)
437 break; 337 baboon_irq_disable(irq);
338 break;
438 } 339 }
439} 340}
440 341
441void mac_clear_irq( unsigned int irq ) 342void mac_clear_irq(unsigned int irq)
442{ 343{
443 switch(IRQ_SRC(irq)) { 344 switch(IRQ_SRC(irq)) {
444 case 1: via_irq_clear(irq); 345 case 1:
445 break; 346 via_irq_clear(irq);
446 case 2: 347 break;
447 case 7: if (oss_present) { 348 case 2:
448 oss_irq_clear(irq); 349 case 7:
449 } else { 350 if (oss_present)
450 via_irq_clear(irq); 351 oss_irq_clear(irq);
451 } 352 else
452 break; 353 via_irq_clear(irq);
453 case 3: 354 break;
454 case 4: 355 case 3:
455 case 5: 356 case 4:
456 case 6: if (psc_present) { 357 case 5:
457 psc_irq_clear(irq); 358 case 6:
458 } else if (oss_present) { 359 if (psc_present)
459 oss_irq_clear(irq); 360 psc_irq_clear(irq);
460 } 361 else if (oss_present)
461 break; 362 oss_irq_clear(irq);
462 case 8: if (baboon_present) { 363 break;
463 baboon_irq_clear(irq); 364 case 8:
464 } 365 if (baboon_present)
465 break; 366 baboon_irq_clear(irq);
367 break;
466 } 368 }
467} 369}
468 370
469int mac_irq_pending( unsigned int irq ) 371int mac_irq_pending(unsigned int irq)
470{ 372{
471 switch(IRQ_SRC(irq)) { 373 switch(IRQ_SRC(irq)) {
472 case 1: return via_irq_pending(irq); 374 case 1:
473 case 2: 375 return via_irq_pending(irq);
474 case 7: if (oss_present) { 376 case 2:
475 return oss_irq_pending(irq); 377 case 7:
476 } else { 378 if (oss_present)
477 return via_irq_pending(irq); 379 return oss_irq_pending(irq);
478 } 380 else
479 case 3: 381 return via_irq_pending(irq);
480 case 4: 382 case 3:
481 case 5: 383 case 4:
482 case 6: if (psc_present) { 384 case 5:
483 return psc_irq_pending(irq); 385 case 6:
484 } else if (oss_present) { 386 if (psc_present)
485 return oss_irq_pending(irq); 387 return psc_irq_pending(irq);
486 } 388 else if (oss_present)
487 } 389 return oss_irq_pending(irq);
488 return 0;
489}
490
491/*
492 * Add an interrupt service routine to an interrupt source.
493 * Returns 0 on success.
494 *
495 * FIXME: You can register interrupts on nonexistent source (ie PSC4 on a
496 * non-PSC machine). We should return -EINVAL in those cases.
497 */
498
499int mac_request_irq(unsigned int irq,
500 irqreturn_t (*handler)(int, void *, struct pt_regs *),
501 unsigned long flags, const char *devname, void *dev_id)
502{
503 irq_node_t *node;
504
505#ifdef DEBUG_MACINTS
506 printk ("%s: irq %d requested for %s\n", __FUNCTION__, irq, devname);
507#endif
508
509 if (irq < VIA1_SOURCE_BASE) {
510 return cpu_request_irq(irq, handler, flags, devname, dev_id);
511 } 390 }
512
513 if (irq >= NUM_MAC_SOURCES) {
514 printk ("%s: unknown irq %d requested by %s\n",
515 __FUNCTION__, irq, devname);
516 }
517
518 /* Get a node and stick it onto the right list */
519
520 if (!(node = new_irq_node())) return -ENOMEM;
521
522 node->handler = handler;
523 node->flags = flags;
524 node->dev_id = dev_id;
525 node->devname = devname;
526 node->next = NULL;
527 mac_insert_irq(&mac_irq_list[irq], node);
528
529 /* Now enable the IRQ source */
530
531 mac_enable_irq(irq);
532
533 return 0; 391 return 0;
534} 392}
535 393
536/*
537 * Removes an interrupt service routine from an interrupt source.
538 */
539
540void mac_free_irq(unsigned int irq, void *dev_id)
541{
542#ifdef DEBUG_MACINTS
543 printk ("%s: irq %d freed by %p\n", __FUNCTION__, irq, dev_id);
544#endif
545
546 if (irq < VIA1_SOURCE_BASE) {
547 cpu_free_irq(irq, dev_id);
548 return;
549 }
550
551 if (irq >= NUM_MAC_SOURCES) {
552 printk ("%s: unknown irq %d freed\n",
553 __FUNCTION__, irq);
554 return;
555 }
556
557 mac_delete_irq(&mac_irq_list[irq], dev_id);
558
559 /* If the list for this interrupt is */
560 /* empty then disable the source. */
561
562 if (!mac_irq_list[irq]) {
563 mac_disable_irq(irq);
564 }
565}
566
567/*
568 * Generate a pretty listing for /proc/interrupts
569 *
570 * By the time we're called the autovector interrupt list has already been
571 * generated, so we just need to do the machspec interrupts.
572 *
573 * 990506 (jmt) - rewritten to handle chained machspec interrupt handlers.
574 * Also removed display of num_spurious it is already
575 * displayed for us as autovector irq 0.
576 */
577
578int show_mac_interrupts(struct seq_file *p, void *v)
579{
580 int i;
581 irq_node_t *node;
582 char *base;
583
584 /* Don't do Nubus interrupts in this loop; we do them separately */
585 /* below so that we can print slot numbers instead of IRQ numbers */
586
587 for (i = VIA1_SOURCE_BASE ; i < NUM_MAC_SOURCES ; ++i) {
588
589 /* Nonexistant interrupt or nothing registered; skip it. */
590
591 if ((node = mac_irq_list[i]) == NULL) continue;
592 if (node->flags & IRQ_FLG_STD) continue;
593
594 base = "";
595 switch(IRQ_SRC(i)) {
596 case 1: base = "via1";
597 break;
598 case 2: if (oss_present) {
599 base = "oss";
600 } else {
601 base = "via2";
602 }
603 break;
604 case 3:
605 case 4:
606 case 5:
607 case 6: if (psc_present) {
608 base = "psc";
609 } else if (oss_present) {
610 base = "oss";
611 } else {
612 if (IRQ_SRC(i) == 4) base = "scc";
613 }
614 break;
615 case 7: base = "nbus";
616 break;
617 case 8: base = "bbn";
618 break;
619 }
620 seq_printf(p, "%4s %2d: %10u ", base, i, kstat_cpu(0).irqs[i]);
621
622 do {
623 if (node->flags & IRQ_FLG_FAST) {
624 seq_puts(p, "F ");
625 } else if (node->flags & IRQ_FLG_SLOW) {
626 seq_puts(p, "S ");
627 } else {
628 seq_puts(p, " ");
629 }
630 seq_printf(p, "%s\n", node->devname);
631 if ((node = node->next)) {
632 seq_puts(p, " ");
633 }
634 } while(node);
635
636 }
637 return 0;
638}
639
640void mac_default_handler(int irq, void *dev_id, struct pt_regs *regs)
641{
642#ifdef DEBUG_SPURIOUS
643 printk("Unexpected IRQ %d on device %p\n", irq, dev_id);
644#endif
645}
646
647static int num_debug[8]; 394static int num_debug[8];
648 395
649irqreturn_t mac_debug_handler(int irq, void *dev_id, struct pt_regs *regs) 396irqreturn_t mac_debug_handler(int irq, void *dev_id, struct pt_regs *regs)
@@ -683,7 +430,7 @@ irqreturn_t mac_nmi_handler(int irq, void *dev_id, struct pt_regs *fp)
683 while (nmi_hold == 1) 430 while (nmi_hold == 1)
684 udelay(1000); 431 udelay(1000);
685 432
686 if ( console_loglevel >= 8 ) { 433 if (console_loglevel >= 8) {
687#if 0 434#if 0
688 show_state(); 435 show_state();
689 printk("PC: %08lx\nSR: %04x SP: %p\n", fp->pc, fp->sr, fp); 436 printk("PC: %08lx\nSR: %04x SP: %p\n", fp->pc, fp->sr, fp);
@@ -712,14 +459,16 @@ irqreturn_t mac_nmi_handler(int irq, void *dev_id, struct pt_regs *fp)
712 * done in hardware (only the PSC can do that.) 459 * done in hardware (only the PSC can do that.)
713 */ 460 */
714 461
715static void scc_irq_enable(int irq) { 462static void scc_irq_enable(unsigned int irq)
716 int irq_idx = IRQ_IDX(irq); 463{
464 int irq_idx = IRQ_IDX(irq);
717 465
718 scc_mask |= (1 << irq_idx); 466 scc_mask |= (1 << irq_idx);
719} 467}
720 468
721static void scc_irq_disable(int irq) { 469static void scc_irq_disable(unsigned int irq)
722 int irq_idx = IRQ_IDX(irq); 470{
471 int irq_idx = IRQ_IDX(irq);
723 472
724 scc_mask &= ~(1 << irq_idx); 473 scc_mask &= ~(1 << irq_idx);
725} 474}
@@ -754,6 +503,8 @@ void mac_scc_dispatch(int irq, void *dev_id, struct pt_regs *regs)
754 /* and since they're autovector interrupts they */ 503 /* and since they're autovector interrupts they */
755 /* pretty much kill the system. */ 504 /* pretty much kill the system. */
756 505
757 if (reg & 0x38) mac_do_irq_list(IRQ_SCCA, regs); 506 if (reg & 0x38)
758 if (reg & 0x07) mac_do_irq_list(IRQ_SCCB, regs); 507 m68k_handle_int(IRQ_SCCA, regs);
508 if (reg & 0x07)
509 m68k_handle_int(IRQ_SCCB, regs);
759} 510}
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 333547692724..63e04365191f 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -67,15 +67,15 @@ void __init oss_init(void)
67 67
68void __init oss_register_interrupts(void) 68void __init oss_register_interrupts(void)
69{ 69{
70 cpu_request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK, 70 request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
71 "scsi", (void *) oss); 71 "scsi", (void *) oss);
72 cpu_request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK, 72 request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
73 "scc", mac_scc_dispatch); 73 "scc", mac_scc_dispatch);
74 cpu_request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK, 74 request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
75 "nubus", (void *) oss); 75 "nubus", (void *) oss);
76 cpu_request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK, 76 request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
77 "sound", (void *) oss); 77 "sound", (void *) oss);
78 cpu_request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK, 78 request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
79 "via1", (void *) via1); 79 "via1", (void *) via1);
80} 80}
81 81
@@ -113,7 +113,7 @@ irqreturn_t oss_irq(int irq, void *dev_id, struct pt_regs *regs)
113 oss->irq_pending &= ~OSS_IP_SOUND; 113 oss->irq_pending &= ~OSS_IP_SOUND;
114 } else if (events & OSS_IP_SCSI) { 114 } else if (events & OSS_IP_SCSI) {
115 oss->irq_level[OSS_SCSI] = OSS_IRQLEV_DISABLED; 115 oss->irq_level[OSS_SCSI] = OSS_IRQLEV_DISABLED;
116 mac_do_irq_list(IRQ_MAC_SCSI, regs); 116 m68k_handle_int(IRQ_MAC_SCSI, regs);
117 oss->irq_pending &= ~OSS_IP_SCSI; 117 oss->irq_pending &= ~OSS_IP_SCSI;
118 oss->irq_level[OSS_SCSI] = OSS_IRQLEV_SCSI; 118 oss->irq_level[OSS_SCSI] = OSS_IRQLEV_SCSI;
119 } else { 119 } else {
@@ -146,7 +146,7 @@ irqreturn_t oss_nubus_irq(int irq, void *dev_id, struct pt_regs *regs)
146 for (i = 0, irq_bit = 1 ; i < 6 ; i++, irq_bit <<= 1) { 146 for (i = 0, irq_bit = 1 ; i < 6 ; i++, irq_bit <<= 1) {
147 if (events & irq_bit) { 147 if (events & irq_bit) {
148 oss->irq_level[i] = OSS_IRQLEV_DISABLED; 148 oss->irq_level[i] = OSS_IRQLEV_DISABLED;
149 mac_do_irq_list(NUBUS_SOURCE_BASE + i, regs); 149 m68k_handle_int(NUBUS_SOURCE_BASE + i, regs);
150 oss->irq_pending &= ~irq_bit; 150 oss->irq_pending &= ~irq_bit;
151 oss->irq_level[i] = OSS_IRQLEV_NUBUS; 151 oss->irq_level[i] = OSS_IRQLEV_NUBUS;
152 } 152 }
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index e72384e43a1e..e26218091755 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -117,10 +117,10 @@ void __init psc_init(void)
117 117
118void __init psc_register_interrupts(void) 118void __init psc_register_interrupts(void)
119{ 119{
120 cpu_request_irq(3, psc_irq, IRQ_FLG_LOCK, "psc3", (void *) 0x30); 120 request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30);
121 cpu_request_irq(4, psc_irq, IRQ_FLG_LOCK, "psc4", (void *) 0x40); 121 request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40);
122 cpu_request_irq(5, psc_irq, IRQ_FLG_LOCK, "psc5", (void *) 0x50); 122 request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50);
123 cpu_request_irq(6, psc_irq, IRQ_FLG_LOCK, "psc6", (void *) 0x60); 123 request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60);
124} 124}
125 125
126/* 126/*
@@ -149,7 +149,7 @@ irqreturn_t psc_irq(int irq, void *dev_id, struct pt_regs *regs)
149 for (i = 0, irq_bit = 1 ; i < 4 ; i++, irq_bit <<= 1) { 149 for (i = 0, irq_bit = 1 ; i < 4 ; i++, irq_bit <<= 1) {
150 if (events & irq_bit) { 150 if (events & irq_bit) {
151 psc_write_byte(pIER, irq_bit); 151 psc_write_byte(pIER, irq_bit);
152 mac_do_irq_list(base_irq + i, regs); 152 m68k_handle_int(base_irq + i, regs);
153 psc_write_byte(pIFR, irq_bit); 153 psc_write_byte(pIFR, irq_bit);
154 psc_write_byte(pIER, irq_bit | 0x80); 154 psc_write_byte(pIER, irq_bit | 0x80);
155 } 155 }
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index a6e3814c8666..c4aa345d544e 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -253,21 +253,21 @@ void __init via_init_clock(irqreturn_t (*func)(int, void *, struct pt_regs *))
253void __init via_register_interrupts(void) 253void __init via_register_interrupts(void)
254{ 254{
255 if (via_alt_mapping) { 255 if (via_alt_mapping) {
256 cpu_request_irq(IRQ_AUTO_1, via1_irq, 256 request_irq(IRQ_AUTO_1, via1_irq,
257 IRQ_FLG_LOCK|IRQ_FLG_FAST, "software", 257 IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
258 (void *) via1); 258 (void *) via1);
259 cpu_request_irq(IRQ_AUTO_6, via1_irq, 259 request_irq(IRQ_AUTO_6, via1_irq,
260 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1", 260 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
261 (void *) via1); 261 (void *) via1);
262 } else { 262 } else {
263 cpu_request_irq(IRQ_AUTO_1, via1_irq, 263 request_irq(IRQ_AUTO_1, via1_irq,
264 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1", 264 IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
265 (void *) via1); 265 (void *) via1);
266 } 266 }
267 cpu_request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST, 267 request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
268 "via2", (void *) via2); 268 "via2", (void *) via2);
269 if (!psc_present) { 269 if (!psc_present) {
270 cpu_request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK, 270 request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
271 "scc", mac_scc_dispatch); 271 "scc", mac_scc_dispatch);
272 } 272 }
273 request_irq(IRQ_MAC_NUBUS, via_nubus_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST, 273 request_irq(IRQ_MAC_NUBUS, via_nubus_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
@@ -424,7 +424,7 @@ irqreturn_t via1_irq(int irq, void *dev_id, struct pt_regs *regs)
424 for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1) 424 for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1)
425 if (events & irq_bit) { 425 if (events & irq_bit) {
426 via1[vIER] = irq_bit; 426 via1[vIER] = irq_bit;
427 mac_do_irq_list(VIA1_SOURCE_BASE + i, regs); 427 m68k_handle_int(VIA1_SOURCE_BASE + i, regs);
428 via1[vIFR] = irq_bit; 428 via1[vIFR] = irq_bit;
429 via1[vIER] = irq_bit | 0x80; 429 via1[vIER] = irq_bit | 0x80;
430 } 430 }
@@ -439,7 +439,7 @@ irqreturn_t via1_irq(int irq, void *dev_id, struct pt_regs *regs)
439 /* No, it won't be set. that's why we're doing this. */ 439 /* No, it won't be set. that's why we're doing this. */
440 via_irq_disable(IRQ_MAC_NUBUS); 440 via_irq_disable(IRQ_MAC_NUBUS);
441 via_irq_clear(IRQ_MAC_NUBUS); 441 via_irq_clear(IRQ_MAC_NUBUS);
442 mac_do_irq_list(IRQ_MAC_NUBUS, regs); 442 m68k_handle_int(IRQ_MAC_NUBUS, regs);
443 via_irq_enable(IRQ_MAC_NUBUS); 443 via_irq_enable(IRQ_MAC_NUBUS);
444 } 444 }
445#endif 445#endif
@@ -459,7 +459,7 @@ irqreturn_t via2_irq(int irq, void *dev_id, struct pt_regs *regs)
459 if (events & irq_bit) { 459 if (events & irq_bit) {
460 via2[gIER] = irq_bit; 460 via2[gIER] = irq_bit;
461 via2[gIFR] = irq_bit | rbv_clear; 461 via2[gIFR] = irq_bit | rbv_clear;
462 mac_do_irq_list(VIA2_SOURCE_BASE + i, regs); 462 m68k_handle_int(VIA2_SOURCE_BASE + i, regs);
463 via2[gIER] = irq_bit | 0x80; 463 via2[gIER] = irq_bit | 0x80;
464 } 464 }
465 return IRQ_HANDLED; 465 return IRQ_HANDLED;
@@ -481,7 +481,7 @@ irqreturn_t via_nubus_irq(int irq, void *dev_id, struct pt_regs *regs)
481 for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1) { 481 for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1) {
482 if (events & irq_bit) { 482 if (events & irq_bit) {
483 via_irq_disable(NUBUS_SOURCE_BASE + i); 483 via_irq_disable(NUBUS_SOURCE_BASE + i);
484 mac_do_irq_list(NUBUS_SOURCE_BASE + i, regs); 484 m68k_handle_int(NUBUS_SOURCE_BASE + i, regs);
485 via_irq_enable(NUBUS_SOURCE_BASE + i); 485 via_irq_enable(NUBUS_SOURCE_BASE + i);
486 } 486 }
487 } 487 }
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 85ad19a0ac79..43ffab048724 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -259,13 +259,15 @@ void __iounmap(void *addr, unsigned long size)
259 259
260 if (CPU_IS_020_OR_030) { 260 if (CPU_IS_020_OR_030) {
261 int pmd_off = (virtaddr/PTRTREESIZE) & 15; 261 int pmd_off = (virtaddr/PTRTREESIZE) & 15;
262 int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
262 263
263 if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { 264 if (pmd_type == _PAGE_PRESENT) {
264 pmd_dir->pmd[pmd_off] = 0; 265 pmd_dir->pmd[pmd_off] = 0;
265 virtaddr += PTRTREESIZE; 266 virtaddr += PTRTREESIZE;
266 size -= PTRTREESIZE; 267 size -= PTRTREESIZE;
267 continue; 268 continue;
268 } 269 } else if (pmd_type == 0)
270 continue;
269 } 271 }
270 272
271 if (pmd_bad(*pmd_dir)) { 273 if (pmd_bad(*pmd_dir)) {
diff --git a/arch/m68k/mvme147/147ints.c b/arch/m68k/mvme147/147ints.c
deleted file mode 100644
index 69a744ee35a3..000000000000
--- a/arch/m68k/mvme147/147ints.c
+++ /dev/null
@@ -1,145 +0,0 @@
1/*
2 * arch/m68k/mvme147/147ints.c
3 *
4 * Copyright (C) 1997 Richard Hirst [richard@sleepie.demon.co.uk]
5 *
6 * based on amiints.c -- Amiga Linux interrupt handling code
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file README.legal in the main directory of this archive
10 * for more details.
11 *
12 */
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/seq_file.h>
18
19#include <asm/ptrace.h>
20#include <asm/system.h>
21#include <asm/irq.h>
22#include <asm/traps.h>
23
24static irqreturn_t mvme147_defhand (int irq, void *dev_id, struct pt_regs *fp);
25
26/*
27 * This should ideally be 4 elements only, for speed.
28 */
29
30static struct {
31 irqreturn_t (*handler)(int, void *, struct pt_regs *);
32 unsigned long flags;
33 void *dev_id;
34 const char *devname;
35 unsigned count;
36} irq_tab[256];
37
38/*
39 * void mvme147_init_IRQ (void)
40 *
41 * Parameters: None
42 *
43 * Returns: Nothing
44 *
45 * This function is called during kernel startup to initialize
46 * the mvme147 IRQ handling routines.
47 */
48
49void mvme147_init_IRQ (void)
50{
51 int i;
52
53 for (i = 0; i < 256; i++) {
54 irq_tab[i].handler = mvme147_defhand;
55 irq_tab[i].flags = IRQ_FLG_STD;
56 irq_tab[i].dev_id = NULL;
57 irq_tab[i].devname = NULL;
58 irq_tab[i].count = 0;
59 }
60}
61
62int mvme147_request_irq(unsigned int irq,
63 irqreturn_t (*handler)(int, void *, struct pt_regs *),
64 unsigned long flags, const char *devname, void *dev_id)
65{
66 if (irq > 255) {
67 printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
68 return -ENXIO;
69 }
70 if (!(irq_tab[irq].flags & IRQ_FLG_STD)) {
71 if (irq_tab[irq].flags & IRQ_FLG_LOCK) {
72 printk("%s: IRQ %d from %s is not replaceable\n",
73 __FUNCTION__, irq, irq_tab[irq].devname);
74 return -EBUSY;
75 }
76 if (flags & IRQ_FLG_REPLACE) {
77 printk("%s: %s can't replace IRQ %d from %s\n",
78 __FUNCTION__, devname, irq, irq_tab[irq].devname);
79 return -EBUSY;
80 }
81 }
82 irq_tab[irq].handler = handler;
83 irq_tab[irq].flags = flags;
84 irq_tab[irq].dev_id = dev_id;
85 irq_tab[irq].devname = devname;
86 return 0;
87}
88
89void mvme147_free_irq(unsigned int irq, void *dev_id)
90{
91 if (irq > 255) {
92 printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
93 return;
94 }
95 if (irq_tab[irq].dev_id != dev_id)
96 printk("%s: Removing probably wrong IRQ %d from %s\n",
97 __FUNCTION__, irq, irq_tab[irq].devname);
98
99 irq_tab[irq].handler = mvme147_defhand;
100 irq_tab[irq].flags = IRQ_FLG_STD;
101 irq_tab[irq].dev_id = NULL;
102 irq_tab[irq].devname = NULL;
103}
104
105irqreturn_t mvme147_process_int (unsigned long vec, struct pt_regs *fp)
106{
107 if (vec > 255) {
108 printk ("mvme147_process_int: Illegal vector %ld\n", vec);
109 return IRQ_NONE;
110 } else {
111 irq_tab[vec].count++;
112 irq_tab[vec].handler(vec, irq_tab[vec].dev_id, fp);
113 return IRQ_HANDLED;
114 }
115}
116
117int show_mvme147_interrupts (struct seq_file *p, void *v)
118{
119 int i;
120
121 for (i = 0; i < 256; i++) {
122 if (irq_tab[i].count)
123 seq_printf(p, "Vec 0x%02x: %8d %s\n",
124 i, irq_tab[i].count,
125 irq_tab[i].devname ? irq_tab[i].devname : "free");
126 }
127 return 0;
128}
129
130
131static irqreturn_t mvme147_defhand (int irq, void *dev_id, struct pt_regs *fp)
132{
133 printk ("Unknown interrupt 0x%02x\n", irq);
134 return IRQ_NONE;
135}
136
137void mvme147_enable_irq (unsigned int irq)
138{
139}
140
141
142void mvme147_disable_irq (unsigned int irq)
143{
144}
145
diff --git a/arch/m68k/mvme147/Makefile b/arch/m68k/mvme147/Makefile
index f0153ed3efa5..a36d38dbfbbc 100644
--- a/arch/m68k/mvme147/Makefile
+++ b/arch/m68k/mvme147/Makefile
@@ -2,4 +2,4 @@
2# Makefile for Linux arch/m68k/mvme147 source directory 2# Makefile for Linux arch/m68k/mvme147 source directory
3# 3#
4 4
5obj-y := config.o 147ints.o 5obj-y := config.o
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 0fcf9720c2fe..0cd0e5bddcee 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -36,15 +36,8 @@
36#include <asm/mvme147hw.h> 36#include <asm/mvme147hw.h>
37 37
38 38
39extern irqreturn_t mvme147_process_int (int level, struct pt_regs *regs);
40extern void mvme147_init_IRQ (void);
41extern void mvme147_free_irq (unsigned int, void *);
42extern int show_mvme147_interrupts (struct seq_file *, void *);
43extern void mvme147_enable_irq (unsigned int);
44extern void mvme147_disable_irq (unsigned int);
45static void mvme147_get_model(char *model); 39static void mvme147_get_model(char *model);
46static int mvme147_get_hardware_list(char *buffer); 40static int mvme147_get_hardware_list(char *buffer);
47extern int mvme147_request_irq (unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
48extern void mvme147_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)); 41extern void mvme147_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
49extern unsigned long mvme147_gettimeoffset (void); 42extern unsigned long mvme147_gettimeoffset (void);
50extern int mvme147_hwclk (int, struct rtc_time *); 43extern int mvme147_hwclk (int, struct rtc_time *);
@@ -91,6 +84,15 @@ static int mvme147_get_hardware_list(char *buffer)
91 return 0; 84 return 0;
92} 85}
93 86
87/*
88 * This function is called during kernel startup to initialize
89 * the mvme147 IRQ handling routines.
90 */
91
92void mvme147_init_IRQ(void)
93{
94 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
95}
94 96
95void __init config_mvme147(void) 97void __init config_mvme147(void)
96{ 98{
@@ -101,12 +103,6 @@ void __init config_mvme147(void)
101 mach_hwclk = mvme147_hwclk; 103 mach_hwclk = mvme147_hwclk;
102 mach_set_clock_mmss = mvme147_set_clock_mmss; 104 mach_set_clock_mmss = mvme147_set_clock_mmss;
103 mach_reset = mvme147_reset; 105 mach_reset = mvme147_reset;
104 mach_free_irq = mvme147_free_irq;
105 mach_process_int = mvme147_process_int;
106 mach_get_irq_list = show_mvme147_interrupts;
107 mach_request_irq = mvme147_request_irq;
108 enable_irq = mvme147_enable_irq;
109 disable_irq = mvme147_disable_irq;
110 mach_get_model = mvme147_get_model; 106 mach_get_model = mvme147_get_model;
111 mach_get_hardware_list = mvme147_get_hardware_list; 107 mach_get_hardware_list = mvme147_get_hardware_list;
112 108
diff --git a/arch/m68k/mvme16x/16xints.c b/arch/m68k/mvme16x/16xints.c
deleted file mode 100644
index 793ef735b59c..000000000000
--- a/arch/m68k/mvme16x/16xints.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * arch/m68k/mvme16x/16xints.c
3 *
4 * Copyright (C) 1995 Richard Hirst [richard@sleepie.demon.co.uk]
5 *
6 * based on amiints.c -- Amiga Linux interrupt handling code
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file README.legal in the main directory of this archive
10 * for more details.
11 *
12 */
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/seq_file.h>
18
19#include <asm/system.h>
20#include <asm/ptrace.h>
21#include <asm/irq.h>
22
23static irqreturn_t mvme16x_defhand (int irq, void *dev_id, struct pt_regs *fp);
24
25/*
26 * This should ideally be 4 elements only, for speed.
27 */
28
29static struct {
30 irqreturn_t (*handler)(int, void *, struct pt_regs *);
31 unsigned long flags;
32 void *dev_id;
33 const char *devname;
34 unsigned count;
35} irq_tab[192];
36
37/*
38 * void mvme16x_init_IRQ (void)
39 *
40 * Parameters: None
41 *
42 * Returns: Nothing
43 *
44 * This function is called during kernel startup to initialize
45 * the mvme16x IRQ handling routines. Should probably ensure
46 * that the base vectors for the VMEChip2 and PCCChip2 are valid.
47 */
48
49void mvme16x_init_IRQ (void)
50{
51 int i;
52
53 for (i = 0; i < 192; i++) {
54 irq_tab[i].handler = mvme16x_defhand;
55 irq_tab[i].flags = IRQ_FLG_STD;
56 irq_tab[i].dev_id = NULL;
57 irq_tab[i].devname = NULL;
58 irq_tab[i].count = 0;
59 }
60}
61
62int mvme16x_request_irq(unsigned int irq,
63 irqreturn_t (*handler)(int, void *, struct pt_regs *),
64 unsigned long flags, const char *devname, void *dev_id)
65{
66 if (irq < 64 || irq > 255) {
67 printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
68 return -ENXIO;
69 }
70
71 if (!(irq_tab[irq-64].flags & IRQ_FLG_STD)) {
72 if (irq_tab[irq-64].flags & IRQ_FLG_LOCK) {
73 printk("%s: IRQ %d from %s is not replaceable\n",
74 __FUNCTION__, irq, irq_tab[irq-64].devname);
75 return -EBUSY;
76 }
77 if (flags & IRQ_FLG_REPLACE) {
78 printk("%s: %s can't replace IRQ %d from %s\n",
79 __FUNCTION__, devname, irq, irq_tab[irq-64].devname);
80 return -EBUSY;
81 }
82 }
83 irq_tab[irq-64].handler = handler;
84 irq_tab[irq-64].flags = flags;
85 irq_tab[irq-64].dev_id = dev_id;
86 irq_tab[irq-64].devname = devname;
87 return 0;
88}
89
90void mvme16x_free_irq(unsigned int irq, void *dev_id)
91{
92 if (irq < 64 || irq > 255) {
93 printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
94 return;
95 }
96
97 if (irq_tab[irq-64].dev_id != dev_id)
98 printk("%s: Removing probably wrong IRQ %d from %s\n",
99 __FUNCTION__, irq, irq_tab[irq-64].devname);
100
101 irq_tab[irq-64].handler = mvme16x_defhand;
102 irq_tab[irq-64].flags = IRQ_FLG_STD;
103 irq_tab[irq-64].dev_id = NULL;
104 irq_tab[irq-64].devname = NULL;
105}
106
107irqreturn_t mvme16x_process_int (unsigned long vec, struct pt_regs *fp)
108{
109 if (vec < 64 || vec > 255) {
110 printk ("mvme16x_process_int: Illegal vector %ld", vec);
111 return IRQ_NONE;
112 } else {
113 irq_tab[vec-64].count++;
114 irq_tab[vec-64].handler(vec, irq_tab[vec-64].dev_id, fp);
115 return IRQ_HANDLED;
116 }
117}
118
119int show_mvme16x_interrupts (struct seq_file *p, void *v)
120{
121 int i;
122
123 for (i = 0; i < 192; i++) {
124 if (irq_tab[i].count)
125 seq_printf(p, "Vec 0x%02x: %8d %s\n",
126 i+64, irq_tab[i].count,
127 irq_tab[i].devname ? irq_tab[i].devname : "free");
128 }
129 return 0;
130}
131
132
133static irqreturn_t mvme16x_defhand (int irq, void *dev_id, struct pt_regs *fp)
134{
135 printk ("Unknown interrupt 0x%02x\n", irq);
136 return IRQ_NONE;
137}
138
139
140void mvme16x_enable_irq (unsigned int irq)
141{
142}
143
144
145void mvme16x_disable_irq (unsigned int irq)
146{
147}
148
149
diff --git a/arch/m68k/mvme16x/Makefile b/arch/m68k/mvme16x/Makefile
index 5129f56b64a3..950e82f21640 100644
--- a/arch/m68k/mvme16x/Makefile
+++ b/arch/m68k/mvme16x/Makefile
@@ -2,4 +2,4 @@
2# Makefile for Linux arch/m68k/mvme16x source directory 2# Makefile for Linux arch/m68k/mvme16x source directory
3# 3#
4 4
5obj-y := config.o 16xints.o rtc.o mvme16x_ksyms.o 5obj-y := config.o rtc.o mvme16x_ksyms.o
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 26ce81c1337d..ce2727ed1bc0 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -40,15 +40,8 @@ extern t_bdid mvme_bdid;
40 40
41static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE; 41static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE;
42 42
43extern irqreturn_t mvme16x_process_int (int level, struct pt_regs *regs);
44extern void mvme16x_init_IRQ (void);
45extern void mvme16x_free_irq (unsigned int, void *);
46extern int show_mvme16x_interrupts (struct seq_file *, void *);
47extern void mvme16x_enable_irq (unsigned int);
48extern void mvme16x_disable_irq (unsigned int);
49static void mvme16x_get_model(char *model); 43static void mvme16x_get_model(char *model);
50static int mvme16x_get_hardware_list(char *buffer); 44static int mvme16x_get_hardware_list(char *buffer);
51extern int mvme16x_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
52extern void mvme16x_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)); 45extern void mvme16x_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
53extern unsigned long mvme16x_gettimeoffset (void); 46extern unsigned long mvme16x_gettimeoffset (void);
54extern int mvme16x_hwclk (int, struct rtc_time *); 47extern int mvme16x_hwclk (int, struct rtc_time *);
@@ -120,6 +113,16 @@ static int mvme16x_get_hardware_list(char *buffer)
120 return (len); 113 return (len);
121} 114}
122 115
116/*
117 * This function is called during kernel startup to initialize
118 * the mvme16x IRQ handling routines. Should probably ensure
119 * that the base vectors for the VMEChip2 and PCCChip2 are valid.
120 */
121
122static void mvme16x_init_IRQ (void)
123{
124 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
125}
123 126
124#define pcc2chip ((volatile u_char *)0xfff42000) 127#define pcc2chip ((volatile u_char *)0xfff42000)
125#define PccSCCMICR 0x1d 128#define PccSCCMICR 0x1d
@@ -138,12 +141,6 @@ void __init config_mvme16x(void)
138 mach_hwclk = mvme16x_hwclk; 141 mach_hwclk = mvme16x_hwclk;
139 mach_set_clock_mmss = mvme16x_set_clock_mmss; 142 mach_set_clock_mmss = mvme16x_set_clock_mmss;
140 mach_reset = mvme16x_reset; 143 mach_reset = mvme16x_reset;
141 mach_free_irq = mvme16x_free_irq;
142 mach_process_int = mvme16x_process_int;
143 mach_get_irq_list = show_mvme16x_interrupts;
144 mach_request_irq = mvme16x_request_irq;
145 enable_irq = mvme16x_enable_irq;
146 disable_irq = mvme16x_disable_irq;
147 mach_get_model = mvme16x_get_model; 144 mach_get_model = mvme16x_get_model;
148 mach_get_hardware_list = mvme16x_get_hardware_list; 145 mach_get_hardware_list = mvme16x_get_hardware_list;
149 146
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 5e0f9b04d45e..efa52d302d67 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -37,15 +37,9 @@
37#include <asm/q40_master.h> 37#include <asm/q40_master.h>
38 38
39extern irqreturn_t q40_process_int (int level, struct pt_regs *regs); 39extern irqreturn_t q40_process_int (int level, struct pt_regs *regs);
40extern irqreturn_t (*q40_default_handler[]) (int, void *, struct pt_regs *); /* added just for debugging */
41extern void q40_init_IRQ (void); 40extern void q40_init_IRQ (void);
42extern void q40_free_irq (unsigned int, void *);
43extern int show_q40_interrupts (struct seq_file *, void *);
44extern void q40_enable_irq (unsigned int);
45extern void q40_disable_irq (unsigned int);
46static void q40_get_model(char *model); 41static void q40_get_model(char *model);
47static int q40_get_hardware_list(char *buffer); 42static int q40_get_hardware_list(char *buffer);
48extern int q40_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), unsigned long flags, const char *devname, void *dev_id);
49extern void q40_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)); 43extern void q40_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
50 44
51extern unsigned long q40_gettimeoffset (void); 45extern unsigned long q40_gettimeoffset (void);
@@ -175,13 +169,6 @@ void __init config_q40(void)
175 mach_set_clock_mmss = q40_set_clock_mmss; 169 mach_set_clock_mmss = q40_set_clock_mmss;
176 170
177 mach_reset = q40_reset; 171 mach_reset = q40_reset;
178 mach_free_irq = q40_free_irq;
179 mach_process_int = q40_process_int;
180 mach_get_irq_list = show_q40_interrupts;
181 mach_request_irq = q40_request_irq;
182 enable_irq = q40_enable_irq;
183 disable_irq = q40_disable_irq;
184 mach_default_handler = &q40_default_handler;
185 mach_get_model = q40_get_model; 172 mach_get_model = q40_get_model;
186 mach_get_hardware_list = q40_get_hardware_list; 173 mach_get_hardware_list = q40_get_hardware_list;
187 174
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index f8ecc2664fe6..472f41c4158b 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -14,13 +14,8 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/sched.h>
19#include <linux/seq_file.h>
20#include <linux/interrupt.h> 17#include <linux/interrupt.h>
21#include <linux/hardirq.h>
22 18
23#include <asm/rtc.h>
24#include <asm/ptrace.h> 19#include <asm/ptrace.h>
25#include <asm/system.h> 20#include <asm/system.h>
26#include <asm/irq.h> 21#include <asm/irq.h>
@@ -39,29 +34,37 @@
39 * 34 *
40*/ 35*/
41 36
42extern int ints_inited; 37static void q40_irq_handler(unsigned int, struct pt_regs *fp);
38static void q40_enable_irq(unsigned int);
39static void q40_disable_irq(unsigned int);
43 40
41unsigned short q40_ablecount[35];
42unsigned short q40_state[35];
44 43
45irqreturn_t q40_irq2_handler (int, void *, struct pt_regs *fp); 44static int q40_irq_startup(unsigned int irq)
46 45{
47 46 /* test for ISA ints not implemented by HW */
48static irqreturn_t q40_defhand (int irq, void *dev_id, struct pt_regs *fp); 47 switch (irq) {
49static irqreturn_t default_handler(int lev, void *dev_id, struct pt_regs *regs); 48 case 1: case 2: case 8: case 9:
50 49 case 11: case 12: case 13:
51 50 printk("%s: ISA IRQ %d not implemented by HW\n", __FUNCTION__, irq);
52#define DEVNAME_SIZE 24 51 return -ENXIO;
52 }
53 return 0;
54}
53 55
54static struct q40_irq_node { 56static void q40_irq_shutdown(unsigned int irq)
55 irqreturn_t (*handler)(int, void *, struct pt_regs *); 57{
56 unsigned long flags; 58}
57 void *dev_id;
58 /* struct q40_irq_node *next;*/
59 char devname[DEVNAME_SIZE];
60 unsigned count;
61 unsigned short state;
62} irq_tab[Q40_IRQ_MAX+1];
63 59
64short unsigned q40_ablecount[Q40_IRQ_MAX+1]; 60static struct irq_controller q40_irq_controller = {
61 .name = "q40",
62 .lock = SPIN_LOCK_UNLOCKED,
63 .startup = q40_irq_startup,
64 .shutdown = q40_irq_shutdown,
65 .enable = q40_enable_irq,
66 .disable = q40_disable_irq,
67};
65 68
66/* 69/*
67 * void q40_init_IRQ (void) 70 * void q40_init_IRQ (void)
@@ -74,139 +77,29 @@ short unsigned q40_ablecount[Q40_IRQ_MAX+1];
74 * the q40 IRQ handling routines. 77 * the q40 IRQ handling routines.
75 */ 78 */
76 79
77static int disabled=0; 80static int disabled;
78 81
79void q40_init_IRQ (void) 82void q40_init_IRQ(void)
80{ 83{
81 int i; 84 m68k_setup_irq_controller(&q40_irq_controller, 1, Q40_IRQ_MAX);
82
83 disabled=0;
84 for (i = 0; i <= Q40_IRQ_MAX; i++) {
85 irq_tab[i].handler = q40_defhand;
86 irq_tab[i].flags = 0;
87 irq_tab[i].dev_id = NULL;
88 /* irq_tab[i].next = NULL;*/
89 irq_tab[i].devname[0] = 0;
90 irq_tab[i].count = 0;
91 irq_tab[i].state =0;
92 q40_ablecount[i]=0; /* all enabled */
93 }
94 85
95 /* setup handler for ISA ints */ 86 /* setup handler for ISA ints */
96 cpu_request_irq(IRQ2, q40_irq2_handler, 0, "q40 ISA and master chip", 87 m68k_setup_auto_interrupt(q40_irq_handler);
97 NULL); 88
89 m68k_irq_startup(IRQ_AUTO_2);
90 m68k_irq_startup(IRQ_AUTO_4);
98 91
99 /* now enable some ints.. */ 92 /* now enable some ints.. */
100 master_outb(1,EXT_ENABLE_REG); /* ISA IRQ 5-15 */ 93 master_outb(1, EXT_ENABLE_REG); /* ISA IRQ 5-15 */
101 94
102 /* make sure keyboard IRQ is disabled */ 95 /* make sure keyboard IRQ is disabled */
103 master_outb(0,KEY_IRQ_ENABLE_REG); 96 master_outb(0, KEY_IRQ_ENABLE_REG);
104} 97}
105 98
106int q40_request_irq(unsigned int irq,
107 irqreturn_t (*handler)(int, void *, struct pt_regs *),
108 unsigned long flags, const char *devname, void *dev_id)
109{
110 /*printk("q40_request_irq %d, %s\n",irq,devname);*/
111
112 if (irq > Q40_IRQ_MAX || (irq>15 && irq<32)) {
113 printk("%s: Incorrect IRQ %d from %s\n", __FUNCTION__, irq, devname);
114 return -ENXIO;
115 }
116
117 /* test for ISA ints not implemented by HW */
118 switch (irq)
119 {
120 case 1: case 2: case 8: case 9:
121 case 12: case 13:
122 printk("%s: ISA IRQ %d from %s not implemented by HW\n", __FUNCTION__, irq, devname);
123 return -ENXIO;
124 case 11:
125 printk("warning IRQ 10 and 11 not distinguishable\n");
126 irq=10;
127 default:
128 ;
129 }
130
131 if (irq<Q40_IRQ_SAMPLE)
132 {
133 if (irq_tab[irq].dev_id != NULL)
134 {
135 printk("%s: IRQ %d from %s is not replaceable\n",
136 __FUNCTION__, irq, irq_tab[irq].devname);
137 return -EBUSY;
138 }
139 /*printk("IRQ %d set to handler %p\n",irq,handler);*/
140 if (dev_id==NULL)
141 {
142 printk("WARNING: dev_id == NULL in request_irq\n");
143 dev_id=(void*)1;
144 }
145 irq_tab[irq].handler = handler;
146 irq_tab[irq].flags = flags;
147 irq_tab[irq].dev_id = dev_id;
148 strlcpy(irq_tab[irq].devname,devname,sizeof(irq_tab[irq].devname));
149 irq_tab[irq].state = 0;
150 return 0;
151 }
152 else {
153 /* Q40_IRQ_SAMPLE :somewhat special actions required here ..*/
154 cpu_request_irq(4, handler, flags, devname, dev_id);
155 cpu_request_irq(6, handler, flags, devname, dev_id);
156 return 0;
157 }
158}
159
160void q40_free_irq(unsigned int irq, void *dev_id)
161{
162 if (irq > Q40_IRQ_MAX || (irq>15 && irq<32)) {
163 printk("%s: Incorrect IRQ %d, dev_id %x \n", __FUNCTION__, irq, (unsigned)dev_id);
164 return;
165 }
166
167 /* test for ISA ints not implemented by HW */
168 switch (irq)
169 {
170 case 1: case 2: case 8: case 9:
171 case 12: case 13:
172 printk("%s: ISA IRQ %d from %x invalid\n", __FUNCTION__, irq, (unsigned)dev_id);
173 return;
174 case 11: irq=10;
175 default:
176 ;
177 }
178
179 if (irq<Q40_IRQ_SAMPLE)
180 {
181 if (irq_tab[irq].dev_id != dev_id)
182 printk("%s: Removing probably wrong IRQ %d from %s\n",
183 __FUNCTION__, irq, irq_tab[irq].devname);
184
185 irq_tab[irq].handler = q40_defhand;
186 irq_tab[irq].flags = 0;
187 irq_tab[irq].dev_id = NULL;
188 /* irq_tab[irq].devname = NULL; */
189 /* do not reset state !! */
190 }
191 else
192 { /* == Q40_IRQ_SAMPLE */
193 cpu_free_irq(4, dev_id);
194 cpu_free_irq(6, dev_id);
195 }
196}
197
198
199irqreturn_t q40_process_int (int level, struct pt_regs *fp)
200{
201 printk("unexpected interrupt vec=%x, pc=%lx, d0=%lx, d0_orig=%lx, d1=%lx, d2=%lx\n",
202 level, fp->pc, fp->d0, fp->orig_d0, fp->d1, fp->d2);
203 printk("\tIIRQ_REG = %x, EIRQ_REG = %x\n",master_inb(IIRQ_REG),master_inb(EIRQ_REG));
204 return IRQ_HANDLED;
205}
206 99
207/* 100/*
208 * this stuff doesn't really belong here.. 101 * this stuff doesn't really belong here..
209*/ 102 */
210 103
211int ql_ticks; /* 200Hz ticks since last jiffie */ 104int ql_ticks; /* 200Hz ticks since last jiffie */
212static int sound_ticks; 105static int sound_ticks;
@@ -215,54 +108,53 @@ static int sound_ticks;
215 108
216void q40_mksound(unsigned int hz, unsigned int ticks) 109void q40_mksound(unsigned int hz, unsigned int ticks)
217{ 110{
218 /* for now ignore hz, except that hz==0 switches off sound */ 111 /* for now ignore hz, except that hz==0 switches off sound */
219 /* simply alternate the ampl (128-SVOL)-(128+SVOL)-..-.. at 200Hz */ 112 /* simply alternate the ampl (128-SVOL)-(128+SVOL)-..-.. at 200Hz */
220 if (hz==0) 113 if (hz == 0) {
221 { 114 if (sound_ticks)
222 if (sound_ticks) 115 sound_ticks = 1;
223 sound_ticks=1; 116
224 117 *DAC_LEFT = 128;
225 *DAC_LEFT=128; 118 *DAC_RIGHT = 128;
226 *DAC_RIGHT=128; 119
227 120 return;
228 return; 121 }
229 } 122 /* sound itself is done in q40_timer_int */
230 /* sound itself is done in q40_timer_int */ 123 if (sound_ticks == 0)
231 if (sound_ticks == 0) sound_ticks=1000; /* pretty long beep */ 124 sound_ticks = 1000; /* pretty long beep */
232 sound_ticks=ticks<<1; 125 sound_ticks = ticks << 1;
233} 126}
234 127
235static irqreturn_t (*q40_timer_routine)(int, void *, struct pt_regs *); 128static irqreturn_t (*q40_timer_routine)(int, void *, struct pt_regs *);
236 129
237static irqreturn_t q40_timer_int (int irq, void * dev, struct pt_regs * regs) 130static irqreturn_t q40_timer_int (int irq, void * dev, struct pt_regs * regs)
238{ 131{
239 ql_ticks = ql_ticks ? 0 : 1; 132 ql_ticks = ql_ticks ? 0 : 1;
240 if (sound_ticks) 133 if (sound_ticks) {
241 { 134 unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL;
242 unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL; 135 sound_ticks--;
243 sound_ticks--; 136 *DAC_LEFT=sval;
244 *DAC_LEFT=sval; 137 *DAC_RIGHT=sval;
245 *DAC_RIGHT=sval; 138 }
246 } 139
247 140 if (!ql_ticks)
248 if (!ql_ticks) 141 q40_timer_routine(irq, dev, regs);
249 q40_timer_routine(irq, dev, regs); 142 return IRQ_HANDLED;
250 return IRQ_HANDLED;
251} 143}
252 144
253void q40_sched_init (irqreturn_t (*timer_routine)(int, void *, struct pt_regs *)) 145void q40_sched_init (irqreturn_t (*timer_routine)(int, void *, struct pt_regs *))
254{ 146{
255 int timer_irq; 147 int timer_irq;
256 148
257 q40_timer_routine = timer_routine; 149 q40_timer_routine = timer_routine;
258 timer_irq=Q40_IRQ_FRAME; 150 timer_irq = Q40_IRQ_FRAME;
259 151
260 if (request_irq(timer_irq, q40_timer_int, 0, 152 if (request_irq(timer_irq, q40_timer_int, 0,
261 "timer", q40_timer_int)) 153 "timer", q40_timer_int))
262 panic ("Couldn't register timer int"); 154 panic("Couldn't register timer int");
263 155
264 master_outb(-1,FRAME_CLEAR_REG); 156 master_outb(-1, FRAME_CLEAR_REG);
265 master_outb( 1,FRAME_RATE_REG); 157 master_outb( 1, FRAME_RATE_REG);
266} 158}
267 159
268 160
@@ -308,169 +200,132 @@ static int mext_disabled=0; /* ext irq disabled by master chip? */
308static int aliased_irq=0; /* how many times inside handler ?*/ 200static int aliased_irq=0; /* how many times inside handler ?*/
309 201
310 202
311/* got level 2 interrupt, dispatch to ISA or keyboard/timer IRQs */ 203/* got interrupt, dispatch to ISA or keyboard/timer IRQs */
312irqreturn_t q40_irq2_handler (int vec, void *devname, struct pt_regs *fp) 204static void q40_irq_handler(unsigned int irq, struct pt_regs *fp)
313{ 205{
314 unsigned mir, mer; 206 unsigned mir, mer;
315 int irq,i; 207 int i;
316 208
317//repeat: 209//repeat:
318 mir=master_inb(IIRQ_REG); 210 mir = master_inb(IIRQ_REG);
319 if (mir&Q40_IRQ_FRAME_MASK) { 211#ifdef CONFIG_BLK_DEV_FD
320 irq_tab[Q40_IRQ_FRAME].count++; 212 if ((mir & Q40_IRQ_EXT_MASK) &&
321 irq_tab[Q40_IRQ_FRAME].handler(Q40_IRQ_FRAME,irq_tab[Q40_IRQ_FRAME].dev_id,fp); 213 (master_inb(EIRQ_REG) & Q40_IRQ6_MASK)) {
322 master_outb(-1,FRAME_CLEAR_REG); 214 floppy_hardint();
323 } 215 return;
324 if ((mir&Q40_IRQ_SER_MASK) || (mir&Q40_IRQ_EXT_MASK)) { 216 }
325 mer=master_inb(EIRQ_REG); 217#endif
326 for (i=0; eirqs[i].mask; i++) { 218 switch (irq) {
327 if (mer&(eirqs[i].mask)) { 219 case 4:
328 irq=eirqs[i].irq; 220 case 6:
221 m68k_handle_int(Q40_IRQ_SAMPLE, fp);
222 return;
223 }
224 if (mir & Q40_IRQ_FRAME_MASK) {
225 m68k_handle_int(Q40_IRQ_FRAME, fp);
226 master_outb(-1, FRAME_CLEAR_REG);
227 }
228 if ((mir & Q40_IRQ_SER_MASK) || (mir & Q40_IRQ_EXT_MASK)) {
229 mer = master_inb(EIRQ_REG);
230 for (i = 0; eirqs[i].mask; i++) {
231 if (mer & eirqs[i].mask) {
232 irq = eirqs[i].irq;
329/* 233/*
330 * There is a little mess wrt which IRQ really caused this irq request. The 234 * There is a little mess wrt which IRQ really caused this irq request. The
331 * main problem is that IIRQ_REG and EIRQ_REG reflect the state when they 235 * main problem is that IIRQ_REG and EIRQ_REG reflect the state when they
332 * are read - which is long after the request came in. In theory IRQs should 236 * are read - which is long after the request came in. In theory IRQs should
333 * not just go away but they occassionally do 237 * not just go away but they occassionally do
334 */ 238 */
335 if (irq>4 && irq<=15 && mext_disabled) { 239 if (irq > 4 && irq <= 15 && mext_disabled) {
336 /*aliased_irq++;*/ 240 /*aliased_irq++;*/
337 goto iirq; 241 goto iirq;
338 } 242 }
339 if (irq_tab[irq].handler == q40_defhand ) { 243 if (q40_state[irq] & IRQ_INPROGRESS) {
340 printk("handler for IRQ %d not defined\n",irq); 244 /* some handlers do local_irq_enable() for irq latency reasons, */
341 continue; /* ignore uninited INTs :-( */ 245 /* however reentering an active irq handler is not permitted */
342 }
343 if ( irq_tab[irq].state & IRQ_INPROGRESS ) {
344 /* some handlers do local_irq_enable() for irq latency reasons, */
345 /* however reentering an active irq handler is not permitted */
346#ifdef IP_USE_DISABLE 246#ifdef IP_USE_DISABLE
347 /* in theory this is the better way to do it because it still */ 247 /* in theory this is the better way to do it because it still */
348 /* lets through eg the serial irqs, unfortunately it crashes */ 248 /* lets through eg the serial irqs, unfortunately it crashes */
349 disable_irq(irq); 249 disable_irq(irq);
350 disabled=1; 250 disabled = 1;
351#else 251#else
352 /*printk("IRQ_INPROGRESS detected for irq %d, disabling - %s disabled\n",irq,disabled ? "already" : "not yet"); */ 252 /*printk("IRQ_INPROGRESS detected for irq %d, disabling - %s disabled\n",
353 fp->sr = (((fp->sr) & (~0x700))+0x200); 253 irq, disabled ? "already" : "not yet"); */
354 disabled=1; 254 fp->sr = (((fp->sr) & (~0x700))+0x200);
255 disabled = 1;
355#endif 256#endif
356 goto iirq; 257 goto iirq;
357 } 258 }
358 irq_tab[irq].count++; 259 q40_state[irq] |= IRQ_INPROGRESS;
359 irq_tab[irq].state |= IRQ_INPROGRESS; 260 m68k_handle_int(irq, fp);
360 irq_tab[irq].handler(irq,irq_tab[irq].dev_id,fp); 261 q40_state[irq] &= ~IRQ_INPROGRESS;
361 irq_tab[irq].state &= ~IRQ_INPROGRESS; 262
362 263 /* naively enable everything, if that fails than */
363 /* naively enable everything, if that fails than */ 264 /* this function will be reentered immediately thus */
364 /* this function will be reentered immediately thus */ 265 /* getting another chance to disable the IRQ */
365 /* getting another chance to disable the IRQ */ 266
366 267 if (disabled) {
367 if ( disabled ) {
368#ifdef IP_USE_DISABLE 268#ifdef IP_USE_DISABLE
369 if (irq>4){ 269 if (irq > 4) {
370 disabled=0; 270 disabled = 0;
371 enable_irq(irq);} 271 enable_irq(irq);
272 }
372#else 273#else
373 disabled=0; 274 disabled = 0;
374 /*printk("reenabling irq %d\n",irq); */ 275 /*printk("reenabling irq %d\n", irq); */
375#endif 276#endif
376 } 277 }
377// used to do 'goto repeat;' here, this delayed bh processing too long 278// used to do 'goto repeat;' here, this delayed bh processing too long
378 return IRQ_HANDLED; 279 return;
379 } 280 }
380 } 281 }
381 if (mer && ccleirq>0 && !aliased_irq) 282 if (mer && ccleirq > 0 && !aliased_irq) {
382 printk("ISA interrupt from unknown source? EIRQ_REG = %x\n",mer),ccleirq--; 283 printk("ISA interrupt from unknown source? EIRQ_REG = %x\n",mer);
383 } 284 ccleirq--;
384 iirq: 285 }
385 mir=master_inb(IIRQ_REG);
386 /* should test whether keyboard irq is really enabled, doing it in defhand */
387 if (mir&Q40_IRQ_KEYB_MASK) {
388 irq_tab[Q40_IRQ_KEYBOARD].count++;
389 irq_tab[Q40_IRQ_KEYBOARD].handler(Q40_IRQ_KEYBOARD,irq_tab[Q40_IRQ_KEYBOARD].dev_id,fp);
390 }
391 return IRQ_HANDLED;
392}
393
394int show_q40_interrupts (struct seq_file *p, void *v)
395{
396 int i;
397
398 for (i = 0; i <= Q40_IRQ_MAX; i++) {
399 if (irq_tab[i].count)
400 seq_printf(p, "%sIRQ %02d: %8d %s%s\n",
401 (i<=15) ? "ISA-" : " " ,
402 i, irq_tab[i].count,
403 irq_tab[i].devname[0] ? irq_tab[i].devname : "?",
404 irq_tab[i].handler == q40_defhand ?
405 " (now unassigned)" : "");
406 } 286 }
407 return 0; 287 iirq:
408} 288 mir = master_inb(IIRQ_REG);
409 289 /* should test whether keyboard irq is really enabled, doing it in defhand */
290 if (mir & Q40_IRQ_KEYB_MASK)
291 m68k_handle_int(Q40_IRQ_KEYBOARD, fp);
410 292
411static irqreturn_t q40_defhand (int irq, void *dev_id, struct pt_regs *fp) 293 return;
412{
413 if (irq!=Q40_IRQ_KEYBOARD)
414 printk ("Unknown q40 interrupt %d\n", irq);
415 else master_outb(-1,KEYBOARD_UNLOCK_REG);
416 return IRQ_NONE;
417} 294}
418static irqreturn_t default_handler(int lev, void *dev_id, struct pt_regs *regs)
419{
420 printk ("Uninitialised interrupt level %d\n", lev);
421 return IRQ_NONE;
422}
423
424irqreturn_t (*q40_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
425 [0] = default_handler,
426 [1] = default_handler,
427 [2] = default_handler,
428 [3] = default_handler,
429 [4] = default_handler,
430 [5] = default_handler,
431 [6] = default_handler,
432 [7] = default_handler
433};
434
435 295
436void q40_enable_irq (unsigned int irq) 296void q40_enable_irq(unsigned int irq)
437{ 297{
438 if ( irq>=5 && irq<=15 ) 298 if (irq >= 5 && irq <= 15) {
439 { 299 mext_disabled--;
440 mext_disabled--; 300 if (mext_disabled > 0)
441 if (mext_disabled>0) 301 printk("q40_enable_irq : nested disable/enable\n");
442 printk("q40_enable_irq : nested disable/enable\n"); 302 if (mext_disabled == 0)
443 if (mext_disabled==0) 303 master_outb(1, EXT_ENABLE_REG);
444 master_outb(1,EXT_ENABLE_REG); 304 }
445 }
446} 305}
447 306
448 307
449void q40_disable_irq (unsigned int irq) 308void q40_disable_irq(unsigned int irq)
450{ 309{
451 /* disable ISA iqs : only do something if the driver has been 310 /* disable ISA iqs : only do something if the driver has been
452 * verified to be Q40 "compatible" - right now IDE, NE2K 311 * verified to be Q40 "compatible" - right now IDE, NE2K
453 * Any driver should not attempt to sleep across disable_irq !! 312 * Any driver should not attempt to sleep across disable_irq !!
454 */ 313 */
455 314
456 if ( irq>=5 && irq<=15 ) { 315 if (irq >= 5 && irq <= 15) {
457 master_outb(0,EXT_ENABLE_REG); 316 master_outb(0, EXT_ENABLE_REG);
458 mext_disabled++; 317 mext_disabled++;
459 if (mext_disabled>1) printk("disable_irq nesting count %d\n",mext_disabled); 318 if (mext_disabled > 1)
460 } 319 printk("disable_irq nesting count %d\n",mext_disabled);
320 }
461} 321}
462 322
463unsigned long q40_probe_irq_on (void) 323unsigned long q40_probe_irq_on(void)
464{ 324{
465 printk("irq probing not working - reconfigure the driver to avoid this\n"); 325 printk("irq probing not working - reconfigure the driver to avoid this\n");
466 return -1; 326 return -1;
467} 327}
468int q40_probe_irq_off (unsigned long irqs) 328int q40_probe_irq_off(unsigned long irqs)
469{ 329{
470 return -1; 330 return -1;
471} 331}
472/*
473 * Local variables:
474 * compile-command: "m68k-linux-gcc -D__KERNEL__ -I/home/rz/lx/linux-2.2.6/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -m68040 -c -o q40ints.o q40ints.c"
475 * End:
476 */
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index f1ca0dfbaa67..553c304aa2c5 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -36,7 +36,6 @@ extern char _text, _end;
36char sun3_reserved_pmeg[SUN3_PMEGS_NUM]; 36char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
37 37
38extern unsigned long sun3_gettimeoffset(void); 38extern unsigned long sun3_gettimeoffset(void);
39extern int show_sun3_interrupts (struct seq_file *, void *);
40extern void sun3_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)); 39extern void sun3_sched_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
41extern void sun3_get_model (char* model); 40extern void sun3_get_model (char* model);
42extern void idprom_init (void); 41extern void idprom_init (void);
@@ -147,13 +146,6 @@ void __init config_sun3(void)
147 146
148 mach_sched_init = sun3_sched_init; 147 mach_sched_init = sun3_sched_init;
149 mach_init_IRQ = sun3_init_IRQ; 148 mach_init_IRQ = sun3_init_IRQ;
150 mach_default_handler = &sun3_default_handler;
151 mach_request_irq = sun3_request_irq;
152 mach_free_irq = sun3_free_irq;
153 enable_irq = sun3_enable_irq;
154 disable_irq = sun3_disable_irq;
155 mach_process_int = sun3_process_int;
156 mach_get_irq_list = show_sun3_interrupts;
157 mach_reset = sun3_reboot; 149 mach_reset = sun3_reboot;
158 mach_gettimeoffset = sun3_gettimeoffset; 150 mach_gettimeoffset = sun3_gettimeoffset;
159 mach_get_model = sun3_get_model; 151 mach_get_model = sun3_get_model;
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index e62a033cd493..0912435e9e90 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -19,7 +19,6 @@
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21extern void sun3_leds (unsigned char); 21extern void sun3_leds (unsigned char);
22static irqreturn_t sun3_inthandle(int irq, void *dev_id, struct pt_regs *fp);
23 22
24void sun3_disable_interrupts(void) 23void sun3_disable_interrupts(void)
25{ 24{
@@ -40,48 +39,30 @@ int led_pattern[8] = {
40 39
41volatile unsigned char* sun3_intreg; 40volatile unsigned char* sun3_intreg;
42 41
43void sun3_insert_irq(irq_node_t **list, irq_node_t *node)
44{
45}
46
47void sun3_delete_irq(irq_node_t **list, void *dev_id)
48{
49}
50
51void sun3_enable_irq(unsigned int irq) 42void sun3_enable_irq(unsigned int irq)
52{ 43{
53 *sun3_intreg |= (1<<irq); 44 *sun3_intreg |= (1 << irq);
54} 45}
55 46
56void sun3_disable_irq(unsigned int irq) 47void sun3_disable_irq(unsigned int irq)
57{ 48{
58 *sun3_intreg &= ~(1<<irq); 49 *sun3_intreg &= ~(1 << irq);
59}
60
61inline void sun3_do_irq(int irq, struct pt_regs *fp)
62{
63 kstat_cpu(0).irqs[SYS_IRQS + irq]++;
64 *sun3_intreg &= ~(1<<irq);
65 *sun3_intreg |= (1<<irq);
66} 50}
67 51
68static irqreturn_t sun3_int7(int irq, void *dev_id, struct pt_regs *fp) 52static irqreturn_t sun3_int7(int irq, void *dev_id, struct pt_regs *fp)
69{ 53{
70 sun3_do_irq(irq,fp); 54 *sun3_intreg |= (1 << irq);
71 if(!(kstat_cpu(0).irqs[SYS_IRQS + irq] % 2000)) 55 if (!(kstat_cpu(0).irqs[irq] % 2000))
72 sun3_leds(led_pattern[(kstat_cpu(0).irqs[SYS_IRQS+irq]%16000) 56 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 16000) / 2000]);
73 /2000]);
74 return IRQ_HANDLED; 57 return IRQ_HANDLED;
75} 58}
76 59
77static irqreturn_t sun3_int5(int irq, void *dev_id, struct pt_regs *fp) 60static irqreturn_t sun3_int5(int irq, void *dev_id, struct pt_regs *fp)
78{ 61{
79 kstat_cpu(0).irqs[SYS_IRQS + irq]++;
80#ifdef CONFIG_SUN3 62#ifdef CONFIG_SUN3
81 intersil_clear(); 63 intersil_clear();
82#endif 64#endif
83 *sun3_intreg &= ~(1<<irq); 65 *sun3_intreg |= (1 << irq);
84 *sun3_intreg |= (1<<irq);
85#ifdef CONFIG_SUN3 66#ifdef CONFIG_SUN3
86 intersil_clear(); 67 intersil_clear();
87#endif 68#endif
@@ -89,65 +70,8 @@ static irqreturn_t sun3_int5(int irq, void *dev_id, struct pt_regs *fp)
89#ifndef CONFIG_SMP 70#ifndef CONFIG_SMP
90 update_process_times(user_mode(fp)); 71 update_process_times(user_mode(fp));
91#endif 72#endif
92 if(!(kstat_cpu(0).irqs[SYS_IRQS + irq] % 20)) 73 if (!(kstat_cpu(0).irqs[irq] % 20))
93 sun3_leds(led_pattern[(kstat_cpu(0).irqs[SYS_IRQS+irq]%160) 74 sun3_leds(led_pattern[(kstat_cpu(0).irqs[irq] % 160) / 20]);
94 /20]);
95 return IRQ_HANDLED;
96}
97
98/* handle requested ints, excepting 5 and 7, which always do the same
99 thing */
100irqreturn_t (*sun3_default_handler[SYS_IRQS])(int, void *, struct pt_regs *) = {
101 [0] = sun3_inthandle,
102 [1] = sun3_inthandle,
103 [2] = sun3_inthandle,
104 [3] = sun3_inthandle,
105 [4] = sun3_inthandle,
106 [5] = sun3_int5,
107 [6] = sun3_inthandle,
108 [7] = sun3_int7
109};
110
111static const char *dev_names[SYS_IRQS] = {
112 [5] = "timer",
113 [7] = "int7 handler"
114};
115static void *dev_ids[SYS_IRQS];
116static irqreturn_t (*sun3_inthandler[SYS_IRQS])(int, void *, struct pt_regs *) = {
117 [5] = sun3_int5,
118 [7] = sun3_int7
119};
120static irqreturn_t (*sun3_vechandler[SUN3_INT_VECS])(int, void *, struct pt_regs *);
121static void *vec_ids[SUN3_INT_VECS];
122static const char *vec_names[SUN3_INT_VECS];
123static int vec_ints[SUN3_INT_VECS];
124
125
126int show_sun3_interrupts(struct seq_file *p, void *v)
127{
128 int i;
129
130 for(i = 0; i < (SUN3_INT_VECS-1); i++) {
131 if(sun3_vechandler[i] != NULL) {
132 seq_printf(p, "vec %3d: %10u %s\n", i+64,
133 vec_ints[i],
134 (vec_names[i]) ? vec_names[i] :
135 "sun3_vechandler");
136 }
137 }
138
139 return 0;
140}
141
142static irqreturn_t sun3_inthandle(int irq, void *dev_id, struct pt_regs *fp)
143{
144 if(sun3_inthandler[irq] == NULL)
145 panic ("bad interrupt %d received (id %p)\n",irq, dev_id);
146
147 kstat_cpu(0).irqs[SYS_IRQS + irq]++;
148 *sun3_intreg &= ~(1<<irq);
149
150 sun3_inthandler[irq](irq, dev_ids[irq], fp);
151 return IRQ_HANDLED; 75 return IRQ_HANDLED;
152} 76}
153 77
@@ -157,109 +81,31 @@ static irqreturn_t sun3_vec255(int irq, void *dev_id, struct pt_regs *fp)
157 return IRQ_HANDLED; 81 return IRQ_HANDLED;
158} 82}
159 83
160void sun3_init_IRQ(void) 84static void sun3_inthandle(unsigned int irq, struct pt_regs *fp)
161{ 85{
162 int i; 86 *sun3_intreg &= ~(1 << irq);
163
164 *sun3_intreg = 1;
165
166 for(i = 0; i < SYS_IRQS; i++)
167 {
168 if(dev_names[i])
169 cpu_request_irq(i, sun3_default_handler[i], 0,
170 dev_names[i], NULL);
171 }
172
173 for(i = 0; i < 192; i++)
174 sun3_vechandler[i] = NULL;
175
176 sun3_vechandler[191] = sun3_vec255;
177}
178
179int sun3_request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
180 unsigned long flags, const char *devname, void *dev_id)
181{
182
183 if(irq < SYS_IRQS) {
184 if(sun3_inthandler[irq] != NULL) {
185 printk("sun3_request_irq: request for irq %d -- already taken!\n", irq);
186 return 1;
187 }
188
189 sun3_inthandler[irq] = handler;
190 dev_ids[irq] = dev_id;
191 dev_names[irq] = devname;
192
193 /* setting devname would be nice */
194 cpu_request_irq(irq, sun3_default_handler[irq], 0, devname,
195 NULL);
196
197 return 0;
198 } else {
199 if((irq >= 64) && (irq <= 255)) {
200 int vec;
201
202 vec = irq - 64;
203 if(sun3_vechandler[vec] != NULL) {
204 printk("sun3_request_irq: request for vec %d -- already taken!\n", irq);
205 return 1;
206 }
207
208 sun3_vechandler[vec] = handler;
209 vec_ids[vec] = dev_id;
210 vec_names[vec] = devname;
211 vec_ints[vec] = 0;
212
213 return 0;
214 }
215 }
216
217 printk("sun3_request_irq: invalid irq %d\n", irq);
218 return 1;
219 87
88 m68k_handle_int(irq, fp);
220} 89}
221 90
222void sun3_free_irq(unsigned int irq, void *dev_id) 91static struct irq_controller sun3_irq_controller = {
223{ 92 .name = "sun3",
224 93 .lock = SPIN_LOCK_UNLOCKED,
225 if(irq < SYS_IRQS) { 94 .startup = m68k_irq_startup,
226 if(sun3_inthandler[irq] == NULL) 95 .shutdown = m68k_irq_shutdown,
227 panic("sun3_free_int: attempt to free unused irq %d\n", irq); 96 .enable = sun3_enable_irq,
228 if(dev_ids[irq] != dev_id) 97 .disable = sun3_disable_irq,
229 panic("sun3_free_int: incorrect dev_id for irq %d\n", irq); 98};
230
231 sun3_inthandler[irq] = NULL;
232 return;
233 } else if((irq >= 64) && (irq <= 255)) {
234 int vec;
235
236 vec = irq - 64;
237 if(sun3_vechandler[vec] == NULL)
238 panic("sun3_free_int: attempt to free unused vector %d\n", irq);
239 if(vec_ids[irq] != dev_id)
240 panic("sun3_free_int: incorrect dev_id for vec %d\n", irq);
241
242 sun3_vechandler[vec] = NULL;
243 return;
244 } else {
245 panic("sun3_free_irq: invalid irq %d\n", irq);
246 }
247}
248 99
249irqreturn_t sun3_process_int(int irq, struct pt_regs *regs) 100void sun3_init_IRQ(void)
250{ 101{
102 *sun3_intreg = 1;
251 103
252 if((irq >= 64) && (irq <= 255)) { 104 m68k_setup_auto_interrupt(sun3_inthandle);
253 int vec; 105 m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7);
254 106 m68k_setup_user_interrupt(VEC_USER, 192, NULL);
255 vec = irq - 64;
256 if(sun3_vechandler[vec] == NULL)
257 panic ("bad interrupt vector %d received\n",irq);
258 107
259 vec_ints[vec]++; 108 request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL);
260 return sun3_vechandler[vec](irq, vec_ids[vec], regs); 109 request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL);
261 } else { 110 request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL);
262 panic("sun3_process_int: unable to handle interrupt vector %d\n",
263 irq);
264 }
265} 111}
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 0920f5d33606..52fb17408869 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -52,17 +52,10 @@ void __init config_sun3x(void)
52 52
53 sun3x_prom_init(); 53 sun3x_prom_init();
54 54
55 mach_get_irq_list = show_sun3_interrupts;
56 mach_max_dma_address = 0xffffffff; /* we can DMA anywhere, whee */ 55 mach_max_dma_address = 0xffffffff; /* we can DMA anywhere, whee */
57 56
58 mach_default_handler = &sun3_default_handler;
59 mach_sched_init = sun3x_sched_init; 57 mach_sched_init = sun3x_sched_init;
60 mach_init_IRQ = sun3_init_IRQ; 58 mach_init_IRQ = sun3_init_IRQ;
61 enable_irq = sun3_enable_irq;
62 disable_irq = sun3_disable_irq;
63 mach_request_irq = sun3_request_irq;
64 mach_free_irq = sun3_free_irq;
65 mach_process_int = sun3_process_int;
66 59
67 mach_gettimeoffset = sun3x_gettimeoffset; 60 mach_gettimeoffset = sun3x_gettimeoffset;
68 mach_reset = sun3x_reboot; 61 mach_reset = sun3x_reboot;
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index a9bf6cc3abd1..676e868d26fb 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -13,6 +13,7 @@
13#include <linux/smp_lock.h> 13#include <linux/smp_lock.h>
14#include <linux/time.h> 14#include <linux/time.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/resource.h>
16 17
17#include <asm/ptrace.h> 18#include <asm/ptrace.h>
18#include <asm/uaccess.h> 19#include <asm/uaccess.h>
@@ -540,8 +541,6 @@ out:
540#define IRIX_P_PGID 2 541#define IRIX_P_PGID 2
541#define IRIX_P_ALL 7 542#define IRIX_P_ALL 7
542 543
543extern int getrusage(struct task_struct *, int, struct rusage __user *);
544
545#define W_EXITED 1 544#define W_EXITED 1
546#define W_TRAPPED 2 545#define W_TRAPPED 2
547#define W_STOPPED 4 546#define W_STOPPED 4
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 19e1ef43eb4b..1137dd6ea7aa 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -31,6 +31,7 @@
31#include <linux/socket.h> 31#include <linux/socket.h>
32#include <linux/security.h> 32#include <linux/security.h>
33#include <linux/syscalls.h> 33#include <linux/syscalls.h>
34#include <linux/resource.h>
34 35
35#include <asm/ptrace.h> 36#include <asm/ptrace.h>
36#include <asm/page.h> 37#include <asm/page.h>
@@ -235,7 +236,6 @@ asmlinkage int irix_prctl(unsigned option, ...)
235#undef DEBUG_PROCGRPS 236#undef DEBUG_PROCGRPS
236 237
237extern unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt); 238extern unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt);
238extern int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
239extern char *prom_getenv(char *name); 239extern char *prom_getenv(char *name);
240extern long prom_setenv(char *name, char *value); 240extern long prom_setenv(char *name, char *value);
241 241
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index c858eb4bef17..b5431ccf1147 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -1654,7 +1654,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1654 return -EINVAL; 1654 return -EINVAL;
1655 1655
1656 vma->vm_pgoff = offset >> PAGE_SHIFT; 1656 vma->vm_pgoff = offset >> PAGE_SHIFT;
1657 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
1658 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 1657 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1659 vma->vm_page_prot, 1658 vma->vm_page_prot,
1660 mmap_state, write_combine); 1659 mmap_state, write_combine);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 5ad87c426bed..247937dd8b73 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -877,7 +877,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
877 return -EINVAL; 877 return -EINVAL;
878 878
879 vma->vm_pgoff = offset >> PAGE_SHIFT; 879 vma->vm_pgoff = offset >> PAGE_SHIFT;
880 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
881 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 880 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
882 vma->vm_page_prot, 881 vma->vm_page_prot,
883 mmap_state, write_combine); 882 mmap_state, write_combine);
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index 2b87f82df135..2ab8f2be911e 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -115,8 +115,6 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
115{ 115{
116 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); 116 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
117 117
118 vma->vm_flags |= VM_SHM | VM_LOCKED;
119
120 if ((vma->vm_end - vma->vm_start) > dp->size) 118 if ((vma->vm_end - vma->vm_start) > dp->size)
121 return -EINVAL; 119 return -EINVAL;
122 120
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 91a6e04d9741..52f5659534f4 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/kprobes.h> 33#include <linux/kprobes.h>
34#include <linux/kexec.h> 34#include <linux/kexec.h>
35#include <linux/backlight.h>
35 36
36#include <asm/kdebug.h> 37#include <asm/kdebug.h>
37#include <asm/pgtable.h> 38#include <asm/pgtable.h>
@@ -105,10 +106,18 @@ int die(const char *str, struct pt_regs *regs, long err)
105 spin_lock_irq(&die_lock); 106 spin_lock_irq(&die_lock);
106 bust_spinlocks(1); 107 bust_spinlocks(1);
107#ifdef CONFIG_PMAC_BACKLIGHT 108#ifdef CONFIG_PMAC_BACKLIGHT
108 if (machine_is(powermac)) { 109 mutex_lock(&pmac_backlight_mutex);
109 set_backlight_enable(1); 110 if (machine_is(powermac) && pmac_backlight) {
110 set_backlight_level(BACKLIGHT_MAX); 111 struct backlight_properties *props;
112
113 down(&pmac_backlight->sem);
114 props = pmac_backlight->props;
115 props->brightness = props->max_brightness;
116 props->power = FB_BLANK_UNBLANK;
117 props->update_status(pmac_backlight);
118 up(&pmac_backlight->sem);
111 } 119 }
120 mutex_unlock(&pmac_backlight_mutex);
112#endif 121#endif
113 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); 122 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
114#ifdef CONFIG_PREEMPT 123#ifdef CONFIG_PREEMPT
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 2dfde61c8412..ce696c1cca75 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -89,7 +89,7 @@ void __init cbe_regs_init(void)
89 struct device_node *cpu; 89 struct device_node *cpu;
90 90
91 /* Build local fast map of CPUs */ 91 /* Build local fast map of CPUs */
92 for_each_cpu(i) 92 for_each_possible_cpu(i)
93 cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL); 93 cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
94 94
95 /* Find maps for each device tree CPU */ 95 /* Find maps for each device tree CPU */
@@ -110,7 +110,7 @@ void __init cbe_regs_init(void)
110 return; 110 return;
111 } 111 }
112 map->cpu_node = cpu; 112 map->cpu_node = cpu;
113 for_each_cpu(i) 113 for_each_possible_cpu(i)
114 if (cbe_thread_map[i].cpu_node == cpu) 114 if (cbe_thread_map[i].cpu_node == cpu)
115 cbe_thread_map[i].regs = map; 115 cbe_thread_map[i].regs = map;
116 116
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index f4e2d8805c9e..1bbf822b4efc 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -180,7 +180,7 @@ static int setup_iic_hardcoded(void)
180 unsigned long regs; 180 unsigned long regs;
181 struct iic *iic; 181 struct iic *iic;
182 182
183 for_each_cpu(cpu) { 183 for_each_possible_cpu(cpu) {
184 iic = &per_cpu(iic, cpu); 184 iic = &per_cpu(iic, cpu);
185 nodeid = cpu/2; 185 nodeid = cpu/2;
186 186
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index 8be2f7d071f0..498b042e1837 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -3,200 +3,148 @@
3 * Contains support for the backlight. 3 * Contains support for the backlight.
4 * 4 *
5 * Copyright (C) 2000 Benjamin Herrenschmidt 5 * Copyright (C) 2000 Benjamin Herrenschmidt
6 * Copyright (C) 2006 Michael Hanselmann <linux-kernel@hansmi.ch>
6 * 7 *
7 */ 8 */
8 9
9#include <linux/config.h> 10#include <linux/config.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
11#include <linux/module.h> 12#include <linux/fb.h>
12#include <linux/stddef.h> 13#include <linux/backlight.h>
13#include <linux/reboot.h>
14#include <linux/nvram.h>
15#include <linux/console.h>
16#include <asm/sections.h>
17#include <asm/ptrace.h>
18#include <asm/io.h>
19#include <asm/pgtable.h>
20#include <asm/system.h>
21#include <asm/prom.h> 14#include <asm/prom.h>
22#include <asm/machdep.h>
23#include <asm/nvram.h>
24#include <asm/backlight.h> 15#include <asm/backlight.h>
25 16
26#include <linux/adb.h> 17#define OLD_BACKLIGHT_MAX 15
27#include <linux/pmu.h>
28 18
29static struct backlight_controller *backlighter; 19/* Protect the pmac_backlight variable */
30static void* backlighter_data; 20DEFINE_MUTEX(pmac_backlight_mutex);
31static int backlight_autosave;
32static int backlight_level = BACKLIGHT_MAX;
33static int backlight_enabled = 1;
34static int backlight_req_level = -1;
35static int backlight_req_enable = -1;
36 21
37static void backlight_callback(void *); 22/* Main backlight storage
38static DECLARE_WORK(backlight_work, backlight_callback, NULL); 23 *
24 * Backlight drivers in this variable are required to have the "props"
25 * attribute set and to have an update_status function.
26 *
27 * We can only store one backlight here, but since Apple laptops have only one
28 * internal display, it doesn't matter. Other backlight drivers can be used
29 * independently.
30 *
31 * Lock ordering:
32 * pmac_backlight_mutex (global, main backlight)
33 * pmac_backlight->sem (backlight class)
34 */
35struct backlight_device *pmac_backlight;
39 36
40void register_backlight_controller(struct backlight_controller *ctrler, 37int pmac_has_backlight_type(const char *type)
41 void *data, char *type)
42{ 38{
43 struct device_node* bk_node; 39 struct device_node* bk_node = find_devices("backlight");
44 char *prop; 40
45 int valid = 0;
46
47 /* There's already a matching controller, bail out */
48 if (backlighter != NULL)
49 return;
50
51 bk_node = find_devices("backlight");
52
53#ifdef CONFIG_ADB_PMU
54 /* Special case for the old PowerBook since I can't test on it */
55 backlight_autosave = machine_is_compatible("AAPL,3400/2400")
56 || machine_is_compatible("AAPL,3500");
57 if ((backlight_autosave
58 || machine_is_compatible("AAPL,PowerBook1998")
59 || machine_is_compatible("PowerBook1,1"))
60 && !strcmp(type, "pmu"))
61 valid = 1;
62#endif
63 if (bk_node) { 41 if (bk_node) {
64 prop = get_property(bk_node, "backlight-control", NULL); 42 char *prop = get_property(bk_node, "backlight-control", NULL);
65 if (prop && !strncmp(prop, type, strlen(type))) 43 if (prop && strncmp(prop, type, strlen(type)) == 0)
66 valid = 1; 44 return 1;
67 }
68 if (!valid)
69 return;
70 backlighter = ctrler;
71 backlighter_data = data;
72
73 if (bk_node && !backlight_autosave)
74 prop = get_property(bk_node, "bklt", NULL);
75 else
76 prop = NULL;
77 if (prop) {
78 backlight_level = ((*prop)+1) >> 1;
79 if (backlight_level > BACKLIGHT_MAX)
80 backlight_level = BACKLIGHT_MAX;
81 } 45 }
82 46
83#ifdef CONFIG_ADB_PMU 47 return 0;
84 if (backlight_autosave) {
85 struct adb_request req;
86 pmu_request(&req, NULL, 2, 0xd9, 0);
87 while (!req.complete)
88 pmu_poll();
89 backlight_level = req.reply[0] >> 4;
90 }
91#endif
92 acquire_console_sem();
93 if (!backlighter->set_enable(1, backlight_level, data))
94 backlight_enabled = 1;
95 release_console_sem();
96
97 printk(KERN_INFO "Registered \"%s\" backlight controller,"
98 "level: %d/15\n", type, backlight_level);
99} 48}
100EXPORT_SYMBOL(register_backlight_controller);
101 49
102void unregister_backlight_controller(struct backlight_controller 50int pmac_backlight_curve_lookup(struct fb_info *info, int value)
103 *ctrler, void *data)
104{ 51{
105 /* We keep the current backlight level (for now) */ 52 int level = (FB_BACKLIGHT_LEVELS - 1);
106 if (ctrler == backlighter && data == backlighter_data) 53
107 backlighter = NULL; 54 if (info && info->bl_dev) {
55 int i, max = 0;
56
57 /* Look for biggest value */
58 for (i = 0; i < FB_BACKLIGHT_LEVELS; i++)
59 max = max((int)info->bl_curve[i], max);
60
61 /* Look for nearest value */
62 for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) {
63 int diff = abs(info->bl_curve[i] - value);
64 if (diff < max) {
65 max = diff;
66 level = i;
67 }
68 }
69
70 }
71
72 return level;
108} 73}
109EXPORT_SYMBOL(unregister_backlight_controller);
110 74
111static int __set_backlight_enable(int enable) 75static void pmac_backlight_key(int direction)
112{ 76{
113 int rc; 77 mutex_lock(&pmac_backlight_mutex);
114 78 if (pmac_backlight) {
115 if (!backlighter) 79 struct backlight_properties *props;
116 return -ENODEV; 80 int brightness;
117 acquire_console_sem(); 81
118 rc = backlighter->set_enable(enable, backlight_level, 82 down(&pmac_backlight->sem);
119 backlighter_data); 83 props = pmac_backlight->props;
120 if (!rc) 84
121 backlight_enabled = enable; 85 brightness = props->brightness +
122 release_console_sem(); 86 ((direction?-1:1) * (props->max_brightness / 15));
123 return rc; 87
88 if (brightness < 0)
89 brightness = 0;
90 else if (brightness > props->max_brightness)
91 brightness = props->max_brightness;
92
93 props->brightness = brightness;
94 props->update_status(pmac_backlight);
95
96 up(&pmac_backlight->sem);
97 }
98 mutex_unlock(&pmac_backlight_mutex);
124} 99}
125int set_backlight_enable(int enable) 100
101void pmac_backlight_key_up()
126{ 102{
127 if (!backlighter) 103 pmac_backlight_key(0);
128 return -ENODEV;
129 backlight_req_enable = enable;
130 schedule_work(&backlight_work);
131 return 0;
132} 104}
133 105
134EXPORT_SYMBOL(set_backlight_enable); 106void pmac_backlight_key_down()
135
136int get_backlight_enable(void)
137{ 107{
138 if (!backlighter) 108 pmac_backlight_key(1);
139 return -ENODEV;
140 return backlight_enabled;
141} 109}
142EXPORT_SYMBOL(get_backlight_enable);
143 110
144static int __set_backlight_level(int level) 111int pmac_backlight_set_legacy_brightness(int brightness)
145{ 112{
146 int rc = 0; 113 int error = -ENXIO;
147 114
148 if (!backlighter) 115 mutex_lock(&pmac_backlight_mutex);
149 return -ENODEV; 116 if (pmac_backlight) {
150 if (level < BACKLIGHT_MIN) 117 struct backlight_properties *props;
151 level = BACKLIGHT_OFF; 118
152 if (level > BACKLIGHT_MAX) 119 down(&pmac_backlight->sem);
153 level = BACKLIGHT_MAX; 120 props = pmac_backlight->props;
154 acquire_console_sem(); 121 props->brightness = brightness *
155 if (backlight_enabled) 122 props->max_brightness / OLD_BACKLIGHT_MAX;
156 rc = backlighter->set_level(level, backlighter_data); 123 props->update_status(pmac_backlight);
157 if (!rc) 124 up(&pmac_backlight->sem);
158 backlight_level = level; 125
159 release_console_sem(); 126 error = 0;
160 if (!rc && !backlight_autosave) {
161 level <<=1;
162 if (level & 0x10)
163 level |= 0x01;
164 // -- todo: save to property "bklt"
165 } 127 }
166 return rc; 128 mutex_unlock(&pmac_backlight_mutex);
129
130 return error;
167} 131}
168int set_backlight_level(int level) 132
133int pmac_backlight_get_legacy_brightness()
169{ 134{
170 if (!backlighter) 135 int result = -ENXIO;
171 return -ENODEV;
172 backlight_req_level = level;
173 schedule_work(&backlight_work);
174 return 0;
175}
176 136
177EXPORT_SYMBOL(set_backlight_level); 137 mutex_lock(&pmac_backlight_mutex);
138 if (pmac_backlight) {
139 struct backlight_properties *props;
178 140
179int get_backlight_level(void) 141 down(&pmac_backlight->sem);
180{ 142 props = pmac_backlight->props;
181 if (!backlighter) 143 result = props->brightness *
182 return -ENODEV; 144 OLD_BACKLIGHT_MAX / props->max_brightness;
183 return backlight_level; 145 up(&pmac_backlight->sem);
184} 146 }
185EXPORT_SYMBOL(get_backlight_level); 147 mutex_unlock(&pmac_backlight_mutex);
186 148
187static void backlight_callback(void *dummy) 149 return result;
188{
189 int level, enable;
190
191 do {
192 level = backlight_req_level;
193 enable = backlight_req_enable;
194 mb();
195
196 if (level >= 0)
197 __set_backlight_level(level);
198 if (enable >= 0)
199 __set_backlight_enable(enable);
200 } while(cmpxchg(&backlight_req_level, level, -1) != level ||
201 cmpxchg(&backlight_req_enable, enable, -1) != enable);
202} 150}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 4735b41c113c..0741df8c41b7 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -26,9 +26,6 @@
26#include <asm/prom.h> 26#include <asm/prom.h>
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/xmon.h> 28#include <asm/xmon.h>
29#ifdef CONFIG_PMAC_BACKLIGHT
30#include <asm/backlight.h>
31#endif
32#include <asm/processor.h> 29#include <asm/processor.h>
33#include <asm/pgtable.h> 30#include <asm/pgtable.h>
34#include <asm/mmu.h> 31#include <asm/mmu.h>
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 809673a36f7a..d20accf9650d 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -1032,7 +1032,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
1032 return -EINVAL; 1032 return -EINVAL;
1033 1033
1034 vma->vm_pgoff = offset >> PAGE_SHIFT; 1034 vma->vm_pgoff = offset >> PAGE_SHIFT;
1035 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
1036 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 1035 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
1037 vma->vm_page_prot, 1036 vma->vm_page_prot,
1038 mmap_state, write_combine); 1037 mmap_state, write_combine);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0a04e4a564b2..b282034452a4 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -47,6 +47,7 @@
47#include <asm/irq.h> 47#include <asm/irq.h>
48#include <asm/page.h> 48#include <asm/page.h>
49#include <asm/ptrace.h> 49#include <asm/ptrace.h>
50#include <asm/sections.h>
50 51
51/* 52/*
52 * Machine setup.. 53 * Machine setup..
@@ -66,11 +67,6 @@ unsigned long __initdata zholes_size[MAX_NR_ZONES];
66static unsigned long __initdata memory_end; 67static unsigned long __initdata memory_end;
67 68
68/* 69/*
69 * Setup options
70 */
71extern int _text,_etext, _edata, _end;
72
73/*
74 * This is set up by the setup-routine at boot-time 70 * This is set up by the setup-routine at boot-time
75 * for S390 need to find out, what we have to setup 71 * for S390 need to find out, what we have to setup
76 * using address 0x10400 ... 72 * using address 0x10400 ...
@@ -80,15 +76,11 @@ extern int _text,_etext, _edata, _end;
80 76
81static struct resource code_resource = { 77static struct resource code_resource = {
82 .name = "Kernel code", 78 .name = "Kernel code",
83 .start = (unsigned long) &_text,
84 .end = (unsigned long) &_etext - 1,
85 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 79 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
86}; 80};
87 81
88static struct resource data_resource = { 82static struct resource data_resource = {
89 .name = "Kernel data", 83 .name = "Kernel data",
90 .start = (unsigned long) &_etext,
91 .end = (unsigned long) &_edata - 1,
92 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 84 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
93}; 85};
94 86
@@ -422,6 +414,11 @@ setup_resources(void)
422 struct resource *res; 414 struct resource *res;
423 int i; 415 int i;
424 416
417 code_resource.start = (unsigned long) &_text;
418 code_resource.end = (unsigned long) &_etext - 1;
419 data_resource.start = (unsigned long) &_etext;
420 data_resource.end = (unsigned long) &_edata - 1;
421
425 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 422 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
426 res = alloc_bootmem_low(sizeof(struct resource)); 423 res = alloc_bootmem_low(sizeof(struct resource));
427 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 424 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 6d7173fc55a3..79149314ed04 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -300,8 +300,6 @@ void mconsole_reboot(struct mc_request *req)
300 machine_restart(NULL); 300 machine_restart(NULL);
301} 301}
302 302
303extern void ctrl_alt_del(void);
304
305void mconsole_cad(struct mc_request *req) 303void mconsole_cad(struct mc_request *req)
306{ 304{
307 mconsole_reply(req, "", 0, 0); 305 mconsole_reply(req, "", 0, 0);
diff --git a/arch/um/include/sysdep-x86_64/syscalls.h b/arch/um/include/sysdep-x86_64/syscalls.h
index e06f83e80f4a..5e86aa047b2b 100644
--- a/arch/um/include/sysdep-x86_64/syscalls.h
+++ b/arch/um/include/sysdep-x86_64/syscalls.h
@@ -12,8 +12,6 @@
12 12
13typedef long syscall_handler_t(void); 13typedef long syscall_handler_t(void);
14 14
15extern syscall_handler_t *ia32_sys_call_table[];
16
17extern syscall_handler_t *sys_call_table[]; 15extern syscall_handler_t *sys_call_table[];
18 16
19#define EXECUTE_SYSCALL(syscall, regs) \ 17#define EXECUTE_SYSCALL(syscall, regs) \
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index b8d5116d7371..fdb82658b1a1 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -823,7 +823,7 @@ void __init setup_arch(char **cmdline_p)
823#endif 823#endif
824#ifdef CONFIG_KEXEC 824#ifdef CONFIG_KEXEC
825 if (crashk_res.start != crashk_res.end) { 825 if (crashk_res.start != crashk_res.end) {
826 reserve_bootmem(crashk_res.start, 826 reserve_bootmem_generic(crashk_res.start,
827 crashk_res.end - crashk_res.start + 1); 827 crashk_res.end - crashk_res.start + 1);
828 } 828 }
829#endif 829#endif
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index de19501aa809..c6f471b9eaa0 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -350,17 +350,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
350} 350}
351 351
352/* 352/*
353 * Set vm_flags of VMA, as appropriate for this architecture, for a pci device
354 * mapping.
355 */
356static __inline__ void
357__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
358 enum pci_mmap_state mmap_state)
359{
360 vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
361}
362
363/*
364 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 353 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
365 * device mapping. 354 * device mapping.
366 */ 355 */
@@ -399,7 +388,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
399 if (ret < 0) 388 if (ret < 0)
400 return ret; 389 return ret;
401 390
402 __pci_mmap_set_flags(dev, vma, mmap_state);
403 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); 391 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
404 392
405 ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 393 ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 2a8af685926f..2641597c6549 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -64,6 +64,7 @@
64#include <linux/buffer_head.h> 64#include <linux/buffer_head.h>
65#include <linux/blkdev.h> 65#include <linux/blkdev.h>
66#include <linux/elevator.h> 66#include <linux/elevator.h>
67#include <linux/interrupt.h>
67 68
68#include <asm/setup.h> 69#include <asm/setup.h>
69#include <asm/uaccess.h> 70#include <asm/uaccess.h>
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 25c3c4a5da81..39b0f53186e8 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -34,7 +34,7 @@
34#include <linux/blkpg.h> 34#include <linux/blkpg.h>
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/hdreg.h> 38#include <linux/hdreg.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/compat.h> 40#include <linux/compat.h>
@@ -64,143 +64,129 @@ MODULE_LICENSE("GPL");
64 64
65/* define the PCI info for the cards we can control */ 65/* define the PCI info for the cards we can control */
66static const struct pci_device_id cciss_pci_device_id[] = { 66static const struct pci_device_id cciss_pci_device_id[] = {
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
68 0x0E11, 0x4070, 0, 0, 0}, 68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
70 0x0E11, 0x4080, 0, 0, 0}, 70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
72 0x0E11, 0x4082, 0, 0, 0}, 72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
74 0x0E11, 0x4083, 0, 0, 0}, 74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
76 0x0E11, 0x409A, 0, 0, 0}, 76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
78 0x0E11, 0x409B, 0, 0, 0}, 78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
80 0x0E11, 0x409C, 0, 0, 0}, 80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
81 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
82 0x0E11, 0x409D, 0, 0, 0}, 82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
83 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
84 0x0E11, 0x4091, 0, 0, 0}, 84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
85 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
86 0x103C, 0x3225, 0, 0, 0},
87 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
88 0x103c, 0x3223, 0, 0, 0},
89 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
90 0x103c, 0x3234, 0, 0, 0},
91 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
92 0x103c, 0x3235, 0, 0, 0},
93 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
94 0x103c, 0x3211, 0, 0, 0},
95 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
96 0x103c, 0x3212, 0, 0, 0},
97 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
98 0x103c, 0x3213, 0, 0, 0},
99 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
100 0x103c, 0x3214, 0, 0, 0},
101 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
102 0x103c, 0x3215, 0, 0, 0},
103 {0,} 85 {0,}
104}; 86};
105MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
106 87
107#define NR_PRODUCTS ARRAY_SIZE(products) 88MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
108 89
109/* board_id = Subsystem Device ID & Vendor ID 90/* board_id = Subsystem Device ID & Vendor ID
110 * product = Marketing Name for the board 91 * product = Marketing Name for the board
111 * access = Address of the struct of function pointers 92 * access = Address of the struct of function pointers
112 */ 93 */
113static struct board_type products[] = { 94static struct board_type products[] = {
114 { 0x40700E11, "Smart Array 5300", &SA5_access }, 95 {0x40700E11, "Smart Array 5300", &SA5_access},
115 { 0x40800E11, "Smart Array 5i", &SA5B_access}, 96 {0x40800E11, "Smart Array 5i", &SA5B_access},
116 { 0x40820E11, "Smart Array 532", &SA5B_access}, 97 {0x40820E11, "Smart Array 532", &SA5B_access},
117 { 0x40830E11, "Smart Array 5312", &SA5B_access}, 98 {0x40830E11, "Smart Array 5312", &SA5B_access},
118 { 0x409A0E11, "Smart Array 641", &SA5_access}, 99 {0x409A0E11, "Smart Array 641", &SA5_access},
119 { 0x409B0E11, "Smart Array 642", &SA5_access}, 100 {0x409B0E11, "Smart Array 642", &SA5_access},
120 { 0x409C0E11, "Smart Array 6400", &SA5_access}, 101 {0x409C0E11, "Smart Array 6400", &SA5_access},
121 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 102 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
122 { 0x40910E11, "Smart Array 6i", &SA5_access}, 103 {0x40910E11, "Smart Array 6i", &SA5_access},
123 { 0x3225103C, "Smart Array P600", &SA5_access}, 104 {0x3225103C, "Smart Array P600", &SA5_access},
124 { 0x3223103C, "Smart Array P800", &SA5_access}, 105 {0x3223103C, "Smart Array P800", &SA5_access},
125 { 0x3234103C, "Smart Array P400", &SA5_access}, 106 {0x3234103C, "Smart Array P400", &SA5_access},
126 { 0x3235103C, "Smart Array P400i", &SA5_access}, 107 {0x3235103C, "Smart Array P400i", &SA5_access},
127 { 0x3211103C, "Smart Array E200i", &SA5_access}, 108 {0x3211103C, "Smart Array E200i", &SA5_access},
128 { 0x3212103C, "Smart Array E200", &SA5_access}, 109 {0x3212103C, "Smart Array E200", &SA5_access},
129 { 0x3213103C, "Smart Array E200i", &SA5_access}, 110 {0x3213103C, "Smart Array E200i", &SA5_access},
130 { 0x3214103C, "Smart Array E200i", &SA5_access}, 111 {0x3214103C, "Smart Array E200i", &SA5_access},
131 { 0x3215103C, "Smart Array E200i", &SA5_access}, 112 {0x3215103C, "Smart Array E200i", &SA5_access},
132}; 113};
133 114
134/* How long to wait (in millesconds) for board to go into simple mode */ 115/* How long to wait (in milliseconds) for board to go into simple mode */
135#define MAX_CONFIG_WAIT 30000 116#define MAX_CONFIG_WAIT 30000
136#define MAX_IOCTL_CONFIG_WAIT 1000 117#define MAX_IOCTL_CONFIG_WAIT 1000
137 118
138/*define how many times we will try a command because of bus resets */ 119/*define how many times we will try a command because of bus resets */
139#define MAX_CMD_RETRIES 3 120#define MAX_CMD_RETRIES 3
140 121
141#define READ_AHEAD 1024 122#define READ_AHEAD 1024
142#define NR_CMDS 384 /* #commands that can be outstanding */ 123#define NR_CMDS 384 /* #commands that can be outstanding */
143#define MAX_CTLR 32 124#define MAX_CTLR 32
144 125
145/* Originally cciss driver only supports 8 major numbers */ 126/* Originally cciss driver only supports 8 major numbers */
146#define MAX_CTLR_ORIG 8 127#define MAX_CTLR_ORIG 8
147 128
148
149static ctlr_info_t *hba[MAX_CTLR]; 129static ctlr_info_t *hba[MAX_CTLR];
150 130
151static void do_cciss_request(request_queue_t *q); 131static void do_cciss_request(request_queue_t *q);
152static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs); 132static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs);
153static int cciss_open(struct inode *inode, struct file *filep); 133static int cciss_open(struct inode *inode, struct file *filep);
154static int cciss_release(struct inode *inode, struct file *filep); 134static int cciss_release(struct inode *inode, struct file *filep);
155static int cciss_ioctl(struct inode *inode, struct file *filep, 135static int cciss_ioctl(struct inode *inode, struct file *filep,
156 unsigned int cmd, unsigned long arg); 136 unsigned int cmd, unsigned long arg);
157static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 137static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
158 138
159static int revalidate_allvol(ctlr_info_t *host); 139static int revalidate_allvol(ctlr_info_t *host);
160static int cciss_revalidate(struct gendisk *disk); 140static int cciss_revalidate(struct gendisk *disk);
161static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk); 141static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
162static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all); 142static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
143 int clear_all);
163 144
164static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf, 145static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
165 int withirq, unsigned int *total_size, unsigned int *block_size); 146 int withirq, unsigned int *total_size,
166static void cciss_geometry_inquiry(int ctlr, int logvol, 147 unsigned int *block_size);
167 int withirq, unsigned int total_size, 148static void cciss_geometry_inquiry(int ctlr, int logvol, int withirq,
168 unsigned int block_size, InquiryData_struct *inq_buff, 149 unsigned int total_size,
169 drive_info_struct *drv); 150 unsigned int block_size,
151 InquiryData_struct *inq_buff,
152 drive_info_struct *drv);
170static void cciss_getgeometry(int cntl_num); 153static void cciss_getgeometry(int cntl_num);
171static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32); 154static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
172static void start_io( ctlr_info_t *h); 155 __u32);
173static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size, 156static void start_io(ctlr_info_t *h);
174 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code, 157static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
175 unsigned char *scsi3addr, int cmd_type); 158 unsigned int use_unit_num, unsigned int log_unit,
176static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, 159 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
177 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code, 160static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
178 int cmd_type); 161 unsigned int use_unit_num, unsigned int log_unit,
162 __u8 page_code, int cmd_type);
179 163
180static void fail_all_cmds(unsigned long ctlr); 164static void fail_all_cmds(unsigned long ctlr);
181 165
182#ifdef CONFIG_PROC_FS 166#ifdef CONFIG_PROC_FS
183static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 167static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
184 int length, int *eof, void *data); 168 int length, int *eof, void *data);
185static void cciss_procinit(int i); 169static void cciss_procinit(int i);
186#else 170#else
187static void cciss_procinit(int i) {} 171static void cciss_procinit(int i)
188#endif /* CONFIG_PROC_FS */ 172{
173}
174#endif /* CONFIG_PROC_FS */
189 175
190#ifdef CONFIG_COMPAT 176#ifdef CONFIG_COMPAT
191static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg); 177static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
192#endif 178#endif
193 179
194static struct block_device_operations cciss_fops = { 180static struct block_device_operations cciss_fops = {
195 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
196 .open = cciss_open, 182 .open = cciss_open,
197 .release = cciss_release, 183 .release = cciss_release,
198 .ioctl = cciss_ioctl, 184 .ioctl = cciss_ioctl,
199 .getgeo = cciss_getgeo, 185 .getgeo = cciss_getgeo,
200#ifdef CONFIG_COMPAT 186#ifdef CONFIG_COMPAT
201 .compat_ioctl = cciss_compat_ioctl, 187 .compat_ioctl = cciss_compat_ioctl,
202#endif 188#endif
203 .revalidate_disk= cciss_revalidate, 189 .revalidate_disk = cciss_revalidate,
204}; 190};
205 191
206/* 192/*
@@ -208,28 +194,29 @@ static struct block_device_operations cciss_fops = {
208 */ 194 */
209static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c) 195static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
210{ 196{
211 if (*Qptr == NULL) { 197 if (*Qptr == NULL) {
212 *Qptr = c; 198 *Qptr = c;
213 c->next = c->prev = c; 199 c->next = c->prev = c;
214 } else { 200 } else {
215 c->prev = (*Qptr)->prev; 201 c->prev = (*Qptr)->prev;
216 c->next = (*Qptr); 202 c->next = (*Qptr);
217 (*Qptr)->prev->next = c; 203 (*Qptr)->prev->next = c;
218 (*Qptr)->prev = c; 204 (*Qptr)->prev = c;
219 } 205 }
220} 206}
221 207
222static inline CommandList_struct *removeQ(CommandList_struct **Qptr, 208static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
223 CommandList_struct *c) 209 CommandList_struct *c)
224{ 210{
225 if (c && c->next != c) { 211 if (c && c->next != c) {
226 if (*Qptr == c) *Qptr = c->next; 212 if (*Qptr == c)
227 c->prev->next = c->next; 213 *Qptr = c->next;
228 c->next->prev = c->prev; 214 c->prev->next = c->next;
229 } else { 215 c->next->prev = c->prev;
230 *Qptr = NULL; 216 } else {
231 } 217 *Qptr = NULL;
232 return c; 218 }
219 return c;
233} 220}
234 221
235#include "cciss_scsi.c" /* For SCSI tape support */ 222#include "cciss_scsi.c" /* For SCSI tape support */
@@ -242,23 +229,24 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
242#define ENG_GIG 1000000000 229#define ENG_GIG 1000000000
243#define ENG_GIG_FACTOR (ENG_GIG/512) 230#define ENG_GIG_FACTOR (ENG_GIG/512)
244#define RAID_UNKNOWN 6 231#define RAID_UNKNOWN 6
245static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG", 232static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
246 "UNKNOWN"}; 233 "UNKNOWN"
234};
247 235
248static struct proc_dir_entry *proc_cciss; 236static struct proc_dir_entry *proc_cciss;
249 237
250static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 238static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
251 int length, int *eof, void *data) 239 int length, int *eof, void *data)
252{ 240{
253 off_t pos = 0; 241 off_t pos = 0;
254 off_t len = 0; 242 off_t len = 0;
255 int size, i, ctlr; 243 int size, i, ctlr;
256 ctlr_info_t *h = (ctlr_info_t*)data; 244 ctlr_info_t *h = (ctlr_info_t *) data;
257 drive_info_struct *drv; 245 drive_info_struct *drv;
258 unsigned long flags; 246 unsigned long flags;
259 sector_t vol_sz, vol_sz_frac; 247 sector_t vol_sz, vol_sz_frac;
260 248
261 ctlr = h->ctlr; 249 ctlr = h->ctlr;
262 250
263 /* prevent displaying bogus info during configuration 251 /* prevent displaying bogus info during configuration
264 * or deconfiguration of a logical volume 252 * or deconfiguration of a logical volume
@@ -266,35 +254,35 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
266 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 254 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
267 if (h->busy_configuring) { 255 if (h->busy_configuring) {
268 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 256 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
269 return -EBUSY; 257 return -EBUSY;
270 } 258 }
271 h->busy_configuring = 1; 259 h->busy_configuring = 1;
272 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 260 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
273 261
274 size = sprintf(buffer, "%s: HP %s Controller\n" 262 size = sprintf(buffer, "%s: HP %s Controller\n"
275 "Board ID: 0x%08lx\n" 263 "Board ID: 0x%08lx\n"
276 "Firmware Version: %c%c%c%c\n" 264 "Firmware Version: %c%c%c%c\n"
277 "IRQ: %d\n" 265 "IRQ: %d\n"
278 "Logical drives: %d\n" 266 "Logical drives: %d\n"
279 "Current Q depth: %d\n" 267 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n" 268 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n" 269 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n" 270 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n", 271 "Max SG entries since init: %d\n\n",
284 h->devname, 272 h->devname,
285 h->product_name, 273 h->product_name,
286 (unsigned long)h->board_id, 274 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3], 275 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
288 (unsigned int)h->intr[SIMPLE_MODE_INT], 276 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
289 h->num_luns, 277 h->num_luns, h->Qdepth, h->commands_outstanding,
290 h->Qdepth, h->commands_outstanding, 278 h->maxQsinceinit, h->max_outstanding, h->maxSG);
291 h->maxQsinceinit, h->max_outstanding, h->maxSG); 279
292 280 pos += size;
293 pos += size; len += size; 281 len += size;
294 cciss_proc_tape_report(ctlr, buffer, &pos, &len); 282 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
295 for(i=0; i<=h->highest_lun; i++) { 283 for (i = 0; i <= h->highest_lun; i++) {
296 284
297 drv = &h->drv[i]; 285 drv = &h->drv[i];
298 if (drv->heads == 0) 286 if (drv->heads == 0)
299 continue; 287 continue;
300 288
@@ -305,25 +293,26 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
305 293
306 if (drv->raid_level > 5) 294 if (drv->raid_level > 5)
307 drv->raid_level = RAID_UNKNOWN; 295 drv->raid_level = RAID_UNKNOWN;
308 size = sprintf(buffer+len, "cciss/c%dd%d:" 296 size = sprintf(buffer + len, "cciss/c%dd%d:"
309 "\t%4u.%02uGB\tRAID %s\n", 297 "\t%4u.%02uGB\tRAID %s\n",
310 ctlr, i, (int)vol_sz, (int)vol_sz_frac, 298 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
311 raid_label[drv->raid_level]); 299 raid_label[drv->raid_level]);
312 pos += size; len += size; 300 pos += size;
313 } 301 len += size;
314 302 }
315 *eof = 1; 303
316 *start = buffer+offset; 304 *eof = 1;
317 len -= offset; 305 *start = buffer + offset;
318 if (len>length) 306 len -= offset;
319 len = length; 307 if (len > length)
308 len = length;
320 h->busy_configuring = 0; 309 h->busy_configuring = 0;
321 return len; 310 return len;
322} 311}
323 312
324static int 313static int
325cciss_proc_write(struct file *file, const char __user *buffer, 314cciss_proc_write(struct file *file, const char __user *buffer,
326 unsigned long count, void *data) 315 unsigned long count, void *data)
327{ 316{
328 unsigned char cmd[80]; 317 unsigned char cmd[80];
329 int len; 318 int len;
@@ -332,20 +321,23 @@ cciss_proc_write(struct file *file, const char __user *buffer,
332 int rc; 321 int rc;
333#endif 322#endif
334 323
335 if (count > sizeof(cmd)-1) return -EINVAL; 324 if (count > sizeof(cmd) - 1)
336 if (copy_from_user(cmd, buffer, count)) return -EFAULT; 325 return -EINVAL;
326 if (copy_from_user(cmd, buffer, count))
327 return -EFAULT;
337 cmd[count] = '\0'; 328 cmd[count] = '\0';
338 len = strlen(cmd); // above 3 lines ensure safety 329 len = strlen(cmd); // above 3 lines ensure safety
339 if (len && cmd[len-1] == '\n') 330 if (len && cmd[len - 1] == '\n')
340 cmd[--len] = '\0'; 331 cmd[--len] = '\0';
341# ifdef CONFIG_CISS_SCSI_TAPE 332# ifdef CONFIG_CISS_SCSI_TAPE
342 if (strcmp("engage scsi", cmd)==0) { 333 if (strcmp("engage scsi", cmd) == 0) {
343 rc = cciss_engage_scsi(h->ctlr); 334 rc = cciss_engage_scsi(h->ctlr);
344 if (rc != 0) return -rc; 335 if (rc != 0)
345 return count; 336 return -rc;
346 } 337 return count;
347 /* might be nice to have "disengage" too, but it's not 338 }
348 safely possible. (only 1 module use count, lock issues.) */ 339 /* might be nice to have "disengage" too, but it's not
340 safely possible. (only 1 module use count, lock issues.) */
349# endif 341# endif
350 return -EINVAL; 342 return -EINVAL;
351} 343}
@@ -358,116 +350,113 @@ static void __devinit cciss_procinit(int i)
358{ 350{
359 struct proc_dir_entry *pde; 351 struct proc_dir_entry *pde;
360 352
361 if (proc_cciss == NULL) { 353 if (proc_cciss == NULL) {
362 proc_cciss = proc_mkdir("cciss", proc_root_driver); 354 proc_cciss = proc_mkdir("cciss", proc_root_driver);
363 if (!proc_cciss) 355 if (!proc_cciss)
364 return; 356 return;
365 } 357 }
366 358
367 pde = create_proc_read_entry(hba[i]->devname, 359 pde = create_proc_read_entry(hba[i]->devname,
368 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, 360 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
369 proc_cciss, cciss_proc_get_info, hba[i]); 361 proc_cciss, cciss_proc_get_info, hba[i]);
370 pde->write_proc = cciss_proc_write; 362 pde->write_proc = cciss_proc_write;
371} 363}
372#endif /* CONFIG_PROC_FS */ 364#endif /* CONFIG_PROC_FS */
373 365
374/* 366/*
375 * For operations that cannot sleep, a command block is allocated at init, 367 * For operations that cannot sleep, a command block is allocated at init,
376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 368 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
377 * which ones are free or in use. For operations that can wait for kmalloc 369 * which ones are free or in use. For operations that can wait for kmalloc
378 * to possible sleep, this routine can be called with get_from_pool set to 0. 370 * to possible sleep, this routine can be called with get_from_pool set to 0.
379 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was. 371 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
380 */ 372 */
381static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool) 373static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
382{ 374{
383 CommandList_struct *c; 375 CommandList_struct *c;
384 int i; 376 int i;
385 u64bit temp64; 377 u64bit temp64;
386 dma_addr_t cmd_dma_handle, err_dma_handle; 378 dma_addr_t cmd_dma_handle, err_dma_handle;
387 379
388 if (!get_from_pool) 380 if (!get_from_pool) {
389 { 381 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
390 c = (CommandList_struct *) pci_alloc_consistent( 382 sizeof(CommandList_struct), &cmd_dma_handle);
391 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle); 383 if (c == NULL)
392 if(c==NULL) 384 return NULL;
393 return NULL;
394 memset(c, 0, sizeof(CommandList_struct)); 385 memset(c, 0, sizeof(CommandList_struct));
395 386
396 c->cmdindex = -1; 387 c->cmdindex = -1;
397 388
398 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent( 389 c->err_info = (ErrorInfo_struct *)
399 h->pdev, sizeof(ErrorInfo_struct), 390 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
400 &err_dma_handle); 391 &err_dma_handle);
401 392
402 if (c->err_info == NULL) 393 if (c->err_info == NULL) {
403 { 394 pci_free_consistent(h->pdev,
404 pci_free_consistent(h->pdev,
405 sizeof(CommandList_struct), c, cmd_dma_handle); 395 sizeof(CommandList_struct), c, cmd_dma_handle);
406 return NULL; 396 return NULL;
407 } 397 }
408 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 398 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
409 } else /* get it out of the controllers pool */ 399 } else { /* get it out of the controllers pool */
410 { 400
411 do { 401 do {
412 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS); 402 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
413 if (i == NR_CMDS) 403 if (i == NR_CMDS)
414 return NULL; 404 return NULL;
415 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0); 405 } while (test_and_set_bit
406 (i & (BITS_PER_LONG - 1),
407 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
416#ifdef CCISS_DEBUG 408#ifdef CCISS_DEBUG
417 printk(KERN_DEBUG "cciss: using command buffer %d\n", i); 409 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
418#endif 410#endif
419 c = h->cmd_pool + i; 411 c = h->cmd_pool + i;
420 memset(c, 0, sizeof(CommandList_struct)); 412 memset(c, 0, sizeof(CommandList_struct));
421 cmd_dma_handle = h->cmd_pool_dhandle 413 cmd_dma_handle = h->cmd_pool_dhandle
422 + i*sizeof(CommandList_struct); 414 + i * sizeof(CommandList_struct);
423 c->err_info = h->errinfo_pool + i; 415 c->err_info = h->errinfo_pool + i;
424 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 416 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
425 err_dma_handle = h->errinfo_pool_dhandle 417 err_dma_handle = h->errinfo_pool_dhandle
426 + i*sizeof(ErrorInfo_struct); 418 + i * sizeof(ErrorInfo_struct);
427 h->nr_allocs++; 419 h->nr_allocs++;
428 420
429 c->cmdindex = i; 421 c->cmdindex = i;
430 } 422 }
431 423
432 c->busaddr = (__u32) cmd_dma_handle; 424 c->busaddr = (__u32) cmd_dma_handle;
433 temp64.val = (__u64) err_dma_handle; 425 temp64.val = (__u64) err_dma_handle;
434 c->ErrDesc.Addr.lower = temp64.val32.lower; 426 c->ErrDesc.Addr.lower = temp64.val32.lower;
435 c->ErrDesc.Addr.upper = temp64.val32.upper; 427 c->ErrDesc.Addr.upper = temp64.val32.upper;
436 c->ErrDesc.Len = sizeof(ErrorInfo_struct); 428 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
437
438 c->ctlr = h->ctlr;
439 return c;
440
441 429
430 c->ctlr = h->ctlr;
431 return c;
442} 432}
443 433
444/* 434/*
445 * Frees a command block that was previously allocated with cmd_alloc(). 435 * Frees a command block that was previously allocated with cmd_alloc().
446 */ 436 */
447static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool) 437static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
448{ 438{
449 int i; 439 int i;
450 u64bit temp64; 440 u64bit temp64;
451 441
452 if( !got_from_pool) 442 if (!got_from_pool) {
453 {
454 temp64.val32.lower = c->ErrDesc.Addr.lower; 443 temp64.val32.lower = c->ErrDesc.Addr.lower;
455 temp64.val32.upper = c->ErrDesc.Addr.upper; 444 temp64.val32.upper = c->ErrDesc.Addr.upper;
456 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 445 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
457 c->err_info, (dma_addr_t) temp64.val); 446 c->err_info, (dma_addr_t) temp64.val);
458 pci_free_consistent(h->pdev, sizeof(CommandList_struct), 447 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
459 c, (dma_addr_t) c->busaddr); 448 c, (dma_addr_t) c->busaddr);
460 } else 449 } else {
461 {
462 i = c - h->cmd_pool; 450 i = c - h->cmd_pool;
463 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)); 451 clear_bit(i & (BITS_PER_LONG - 1),
464 h->nr_frees++; 452 h->cmd_pool_bits + (i / BITS_PER_LONG));
465 } 453 h->nr_frees++;
454 }
466} 455}
467 456
468static inline ctlr_info_t *get_host(struct gendisk *disk) 457static inline ctlr_info_t *get_host(struct gendisk *disk)
469{ 458{
470 return disk->queue->queuedata; 459 return disk->queue->queuedata;
471} 460}
472 461
473static inline drive_info_struct *get_drv(struct gendisk *disk) 462static inline drive_info_struct *get_drv(struct gendisk *disk)
@@ -485,7 +474,7 @@ static int cciss_open(struct inode *inode, struct file *filep)
485 474
486#ifdef CCISS_DEBUG 475#ifdef CCISS_DEBUG
487 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name); 476 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
488#endif /* CCISS_DEBUG */ 477#endif /* CCISS_DEBUG */
489 478
490 if (host->busy_initializing || drv->busy_configuring) 479 if (host->busy_initializing || drv->busy_configuring)
491 return -EBUSY; 480 return -EBUSY;
@@ -498,10 +487,10 @@ static int cciss_open(struct inode *inode, struct file *filep)
498 * for "raw controller". 487 * for "raw controller".
499 */ 488 */
500 if (drv->nr_blocks == 0) { 489 if (drv->nr_blocks == 0) {
501 if (iminor(inode) != 0) { /* not node 0? */ 490 if (iminor(inode) != 0) { /* not node 0? */
502 /* if not node 0 make sure it is a partition = 0 */ 491 /* if not node 0 make sure it is a partition = 0 */
503 if (iminor(inode) & 0x0f) { 492 if (iminor(inode) & 0x0f) {
504 return -ENXIO; 493 return -ENXIO;
505 /* if it is, make sure we have a LUN ID */ 494 /* if it is, make sure we have a LUN ID */
506 } else if (drv->LunID == 0) { 495 } else if (drv->LunID == 0) {
507 return -ENXIO; 496 return -ENXIO;
@@ -514,6 +503,7 @@ static int cciss_open(struct inode *inode, struct file *filep)
514 host->usage_count++; 503 host->usage_count++;
515 return 0; 504 return 0;
516} 505}
506
517/* 507/*
518 * Close. Sync first. 508 * Close. Sync first.
519 */ 509 */
@@ -523,8 +513,9 @@ static int cciss_release(struct inode *inode, struct file *filep)
523 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk); 513 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
524 514
525#ifdef CCISS_DEBUG 515#ifdef CCISS_DEBUG
526 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name); 516 printk(KERN_DEBUG "cciss_release %s\n",
527#endif /* CCISS_DEBUG */ 517 inode->i_bdev->bd_disk->disk_name);
518#endif /* CCISS_DEBUG */
528 519
529 drv->usage_count--; 520 drv->usage_count--;
530 host->usage_count--; 521 host->usage_count--;
@@ -542,8 +533,10 @@ static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
542 return ret; 533 return ret;
543} 534}
544 535
545static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg); 536static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
546static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg); 537 unsigned long arg);
538static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
539 unsigned long arg);
547 540
548static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg) 541static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
549{ 542{
@@ -575,19 +568,26 @@ static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
575 } 568 }
576} 569}
577 570
578static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg) 571static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
572 unsigned long arg)
579{ 573{
580 IOCTL32_Command_struct __user *arg32 = 574 IOCTL32_Command_struct __user *arg32 =
581 (IOCTL32_Command_struct __user *) arg; 575 (IOCTL32_Command_struct __user *) arg;
582 IOCTL_Command_struct arg64; 576 IOCTL_Command_struct arg64;
583 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 577 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
584 int err; 578 int err;
585 u32 cp; 579 u32 cp;
586 580
587 err = 0; 581 err = 0;
588 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); 582 err |=
589 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); 583 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
590 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); 584 sizeof(arg64.LUN_info));
585 err |=
586 copy_from_user(&arg64.Request, &arg32->Request,
587 sizeof(arg64.Request));
588 err |=
589 copy_from_user(&arg64.error_info, &arg32->error_info,
590 sizeof(arg64.error_info));
591 err |= get_user(arg64.buf_size, &arg32->buf_size); 591 err |= get_user(arg64.buf_size, &arg32->buf_size);
592 err |= get_user(cp, &arg32->buf); 592 err |= get_user(cp, &arg32->buf);
593 arg64.buf = compat_ptr(cp); 593 arg64.buf = compat_ptr(cp);
@@ -596,28 +596,38 @@ static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long ar
596 if (err) 596 if (err)
597 return -EFAULT; 597 return -EFAULT;
598 598
599 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p); 599 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
600 if (err) 600 if (err)
601 return err; 601 return err;
602 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); 602 err |=
603 copy_in_user(&arg32->error_info, &p->error_info,
604 sizeof(arg32->error_info));
603 if (err) 605 if (err)
604 return -EFAULT; 606 return -EFAULT;
605 return err; 607 return err;
606} 608}
607 609
608static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg) 610static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
611 unsigned long arg)
609{ 612{
610 BIG_IOCTL32_Command_struct __user *arg32 = 613 BIG_IOCTL32_Command_struct __user *arg32 =
611 (BIG_IOCTL32_Command_struct __user *) arg; 614 (BIG_IOCTL32_Command_struct __user *) arg;
612 BIG_IOCTL_Command_struct arg64; 615 BIG_IOCTL_Command_struct arg64;
613 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 616 BIG_IOCTL_Command_struct __user *p =
617 compat_alloc_user_space(sizeof(arg64));
614 int err; 618 int err;
615 u32 cp; 619 u32 cp;
616 620
617 err = 0; 621 err = 0;
618 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); 622 err |=
619 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); 623 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
620 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); 624 sizeof(arg64.LUN_info));
625 err |=
626 copy_from_user(&arg64.Request, &arg32->Request,
627 sizeof(arg64.Request));
628 err |=
629 copy_from_user(&arg64.error_info, &arg32->error_info,
630 sizeof(arg64.error_info));
621 err |= get_user(arg64.buf_size, &arg32->buf_size); 631 err |= get_user(arg64.buf_size, &arg32->buf_size);
622 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 632 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
623 err |= get_user(cp, &arg32->buf); 633 err |= get_user(cp, &arg32->buf);
@@ -625,12 +635,14 @@ static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned
625 err |= copy_to_user(p, &arg64, sizeof(arg64)); 635 err |= copy_to_user(p, &arg64, sizeof(arg64));
626 636
627 if (err) 637 if (err)
628 return -EFAULT; 638 return -EFAULT;
629 639
630 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p); 640 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
631 if (err) 641 if (err)
632 return err; 642 return err;
633 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); 643 err |=
644 copy_in_user(&arg32->error_info, &p->error_info,
645 sizeof(arg32->error_info));
634 if (err) 646 if (err)
635 return -EFAULT; 647 return -EFAULT;
636 return err; 648 return err;
@@ -651,10 +663,10 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
651} 663}
652 664
653/* 665/*
654 * ioctl 666 * ioctl
655 */ 667 */
656static int cciss_ioctl(struct inode *inode, struct file *filep, 668static int cciss_ioctl(struct inode *inode, struct file *filep,
657 unsigned int cmd, unsigned long arg) 669 unsigned int cmd, unsigned long arg)
658{ 670{
659 struct block_device *bdev = inode->i_bdev; 671 struct block_device *bdev = inode->i_bdev;
660 struct gendisk *disk = bdev->bd_disk; 672 struct gendisk *disk = bdev->bd_disk;
@@ -665,171 +677,193 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
665 677
666#ifdef CCISS_DEBUG 678#ifdef CCISS_DEBUG
667 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg); 679 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
668#endif /* CCISS_DEBUG */ 680#endif /* CCISS_DEBUG */
669 681
670 switch(cmd) { 682 switch (cmd) {
671 case CCISS_GETPCIINFO: 683 case CCISS_GETPCIINFO:
672 { 684 {
673 cciss_pci_info_struct pciinfo; 685 cciss_pci_info_struct pciinfo;
674 686
675 if (!arg) return -EINVAL; 687 if (!arg)
676 pciinfo.domain = pci_domain_nr(host->pdev->bus); 688 return -EINVAL;
677 pciinfo.bus = host->pdev->bus->number; 689 pciinfo.domain = pci_domain_nr(host->pdev->bus);
678 pciinfo.dev_fn = host->pdev->devfn; 690 pciinfo.bus = host->pdev->bus->number;
679 pciinfo.board_id = host->board_id; 691 pciinfo.dev_fn = host->pdev->devfn;
680 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct ))) 692 pciinfo.board_id = host->board_id;
681 return -EFAULT; 693 if (copy_to_user
682 return(0); 694 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
683 } 695 return -EFAULT;
696 return 0;
697 }
684 case CCISS_GETINTINFO: 698 case CCISS_GETINTINFO:
685 { 699 {
686 cciss_coalint_struct intinfo; 700 cciss_coalint_struct intinfo;
687 if (!arg) return -EINVAL; 701 if (!arg)
688 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay); 702 return -EINVAL;
689 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount); 703 intinfo.delay =
690 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct ))) 704 readl(&host->cfgtable->HostWrite.CoalIntDelay);
691 return -EFAULT; 705 intinfo.count =
692 return(0); 706 readl(&host->cfgtable->HostWrite.CoalIntCount);
693 } 707 if (copy_to_user
708 (argp, &intinfo, sizeof(cciss_coalint_struct)))
709 return -EFAULT;
710 return 0;
711 }
694 case CCISS_SETINTINFO: 712 case CCISS_SETINTINFO:
695 {
696 cciss_coalint_struct intinfo;
697 unsigned long flags;
698 int i;
699
700 if (!arg) return -EINVAL;
701 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
702 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
703 return -EFAULT;
704 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
705
706 { 713 {
707// printk("cciss_ioctl: delay and count cannot be 0\n"); 714 cciss_coalint_struct intinfo;
708 return( -EINVAL); 715 unsigned long flags;
716 int i;
717
718 if (!arg)
719 return -EINVAL;
720 if (!capable(CAP_SYS_ADMIN))
721 return -EPERM;
722 if (copy_from_user
723 (&intinfo, argp, sizeof(cciss_coalint_struct)))
724 return -EFAULT;
725 if ((intinfo.delay == 0) && (intinfo.count == 0))
726 {
727// printk("cciss_ioctl: delay and count cannot be 0\n");
728 return -EINVAL;
729 }
730 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
731 /* Update the field, and then ring the doorbell */
732 writel(intinfo.delay,
733 &(host->cfgtable->HostWrite.CoalIntDelay));
734 writel(intinfo.count,
735 &(host->cfgtable->HostWrite.CoalIntCount));
736 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
737
738 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
739 if (!(readl(host->vaddr + SA5_DOORBELL)
740 & CFGTBL_ChangeReq))
741 break;
742 /* delay and try again */
743 udelay(1000);
744 }
745 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
746 if (i >= MAX_IOCTL_CONFIG_WAIT)
747 return -EAGAIN;
748 return 0;
709 } 749 }
710 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
711 /* Update the field, and then ring the doorbell */
712 writel( intinfo.delay,
713 &(host->cfgtable->HostWrite.CoalIntDelay));
714 writel( intinfo.count,
715 &(host->cfgtable->HostWrite.CoalIntCount));
716 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
717
718 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
719 if (!(readl(host->vaddr + SA5_DOORBELL)
720 & CFGTBL_ChangeReq))
721 break;
722 /* delay and try again */
723 udelay(1000);
724 }
725 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
726 if (i >= MAX_IOCTL_CONFIG_WAIT)
727 return -EAGAIN;
728 return(0);
729 }
730 case CCISS_GETNODENAME: 750 case CCISS_GETNODENAME:
731 { 751 {
732 NodeName_type NodeName; 752 NodeName_type NodeName;
733 int i; 753 int i;
734 754
735 if (!arg) return -EINVAL; 755 if (!arg)
736 for(i=0;i<16;i++) 756 return -EINVAL;
737 NodeName[i] = readb(&host->cfgtable->ServerName[i]); 757 for (i = 0; i < 16; i++)
738 if (copy_to_user(argp, NodeName, sizeof( NodeName_type))) 758 NodeName[i] =
739 return -EFAULT; 759 readb(&host->cfgtable->ServerName[i]);
740 return(0); 760 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
741 } 761 return -EFAULT;
762 return 0;
763 }
742 case CCISS_SETNODENAME: 764 case CCISS_SETNODENAME:
743 { 765 {
744 NodeName_type NodeName; 766 NodeName_type NodeName;
745 unsigned long flags; 767 unsigned long flags;
746 int i; 768 int i;
747 769
748 if (!arg) return -EINVAL; 770 if (!arg)
749 if (!capable(CAP_SYS_ADMIN)) return -EPERM; 771 return -EINVAL;
750 772 if (!capable(CAP_SYS_ADMIN))
751 if (copy_from_user(NodeName, argp, sizeof( NodeName_type))) 773 return -EPERM;
752 return -EFAULT; 774
753 775 if (copy_from_user
754 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 776 (NodeName, argp, sizeof(NodeName_type)))
755 777 return -EFAULT;
756 /* Update the field, and then ring the doorbell */ 778
757 for(i=0;i<16;i++) 779 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
758 writeb( NodeName[i], &host->cfgtable->ServerName[i]); 780
759 781 /* Update the field, and then ring the doorbell */
760 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); 782 for (i = 0; i < 16; i++)
761 783 writeb(NodeName[i],
762 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) { 784 &host->cfgtable->ServerName[i]);
763 if (!(readl(host->vaddr + SA5_DOORBELL) 785
764 & CFGTBL_ChangeReq)) 786 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
765 break; 787
766 /* delay and try again */ 788 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
767 udelay(1000); 789 if (!(readl(host->vaddr + SA5_DOORBELL)
768 } 790 & CFGTBL_ChangeReq))
769 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 791 break;
770 if (i >= MAX_IOCTL_CONFIG_WAIT) 792 /* delay and try again */
771 return -EAGAIN; 793 udelay(1000);
772 return(0); 794 }
773 } 795 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
796 if (i >= MAX_IOCTL_CONFIG_WAIT)
797 return -EAGAIN;
798 return 0;
799 }
774 800
775 case CCISS_GETHEARTBEAT: 801 case CCISS_GETHEARTBEAT:
776 { 802 {
777 Heartbeat_type heartbeat; 803 Heartbeat_type heartbeat;
778 804
779 if (!arg) return -EINVAL; 805 if (!arg)
780 heartbeat = readl(&host->cfgtable->HeartBeat); 806 return -EINVAL;
781 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type))) 807 heartbeat = readl(&host->cfgtable->HeartBeat);
782 return -EFAULT; 808 if (copy_to_user
783 return(0); 809 (argp, &heartbeat, sizeof(Heartbeat_type)))
784 } 810 return -EFAULT;
811 return 0;
812 }
785 case CCISS_GETBUSTYPES: 813 case CCISS_GETBUSTYPES:
786 { 814 {
787 BusTypes_type BusTypes; 815 BusTypes_type BusTypes;
788 816
789 if (!arg) return -EINVAL; 817 if (!arg)
790 BusTypes = readl(&host->cfgtable->BusTypes); 818 return -EINVAL;
791 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) )) 819 BusTypes = readl(&host->cfgtable->BusTypes);
792 return -EFAULT; 820 if (copy_to_user
793 return(0); 821 (argp, &BusTypes, sizeof(BusTypes_type)))
794 } 822 return -EFAULT;
823 return 0;
824 }
795 case CCISS_GETFIRMVER: 825 case CCISS_GETFIRMVER:
796 { 826 {
797 FirmwareVer_type firmware; 827 FirmwareVer_type firmware;
798 828
799 if (!arg) return -EINVAL; 829 if (!arg)
800 memcpy(firmware, host->firm_ver, 4); 830 return -EINVAL;
831 memcpy(firmware, host->firm_ver, 4);
801 832
802 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type))) 833 if (copy_to_user
803 return -EFAULT; 834 (argp, firmware, sizeof(FirmwareVer_type)))
804 return(0); 835 return -EFAULT;
805 } 836 return 0;
806 case CCISS_GETDRIVVER: 837 }
807 { 838 case CCISS_GETDRIVVER:
808 DriverVer_type DriverVer = DRIVER_VERSION; 839 {
840 DriverVer_type DriverVer = DRIVER_VERSION;
809 841
810 if (!arg) return -EINVAL; 842 if (!arg)
843 return -EINVAL;
811 844
812 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) )) 845 if (copy_to_user
813 return -EFAULT; 846 (argp, &DriverVer, sizeof(DriverVer_type)))
814 return(0); 847 return -EFAULT;
815 } 848 return 0;
849 }
816 850
817 case CCISS_REVALIDVOLS: 851 case CCISS_REVALIDVOLS:
818 if (bdev != bdev->bd_contains || drv != host->drv) 852 if (bdev != bdev->bd_contains || drv != host->drv)
819 return -ENXIO; 853 return -ENXIO;
820 return revalidate_allvol(host); 854 return revalidate_allvol(host);
821 855
822 case CCISS_GETLUNINFO: { 856 case CCISS_GETLUNINFO:{
823 LogvolInfo_struct luninfo; 857 LogvolInfo_struct luninfo;
824 858
825 luninfo.LunID = drv->LunID; 859 luninfo.LunID = drv->LunID;
826 luninfo.num_opens = drv->usage_count; 860 luninfo.num_opens = drv->usage_count;
827 luninfo.num_parts = 0; 861 luninfo.num_parts = 0;
828 if (copy_to_user(argp, &luninfo, 862 if (copy_to_user(argp, &luninfo,
829 sizeof(LogvolInfo_struct))) 863 sizeof(LogvolInfo_struct)))
830 return -EFAULT; 864 return -EFAULT;
831 return(0); 865 return 0;
832 } 866 }
833 case CCISS_DEREGDISK: 867 case CCISS_DEREGDISK:
834 return rebuild_lun_table(host, disk); 868 return rebuild_lun_table(host, disk);
835 869
@@ -837,278 +871,284 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
837 return rebuild_lun_table(host, NULL); 871 return rebuild_lun_table(host, NULL);
838 872
839 case CCISS_PASSTHRU: 873 case CCISS_PASSTHRU:
840 {
841 IOCTL_Command_struct iocommand;
842 CommandList_struct *c;
843 char *buff = NULL;
844 u64bit temp64;
845 unsigned long flags;
846 DECLARE_COMPLETION(wait);
847
848 if (!arg) return -EINVAL;
849
850 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
851
852 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
853 return -EFAULT;
854 if((iocommand.buf_size < 1) &&
855 (iocommand.Request.Type.Direction != XFER_NONE))
856 {
857 return -EINVAL;
858 }
859#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
860 /* Check kmalloc limits */
861 if(iocommand.buf_size > 128000)
862 return -EINVAL;
863#endif
864 if(iocommand.buf_size > 0)
865 {
866 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
867 if( buff == NULL)
868 return -EFAULT;
869 }
870 if (iocommand.Request.Type.Direction == XFER_WRITE)
871 {
872 /* Copy the data into the buffer we created */
873 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
874 {
875 kfree(buff);
876 return -EFAULT;
877 }
878 } else {
879 memset(buff, 0, iocommand.buf_size);
880 }
881 if ((c = cmd_alloc(host , 0)) == NULL)
882 {
883 kfree(buff);
884 return -ENOMEM;
885 }
886 // Fill in the command type
887 c->cmd_type = CMD_IOCTL_PEND;
888 // Fill in Command Header
889 c->Header.ReplyQueue = 0; // unused in simple mode
890 if( iocommand.buf_size > 0) // buffer to fill
891 { 874 {
892 c->Header.SGList = 1; 875 IOCTL_Command_struct iocommand;
893 c->Header.SGTotal= 1; 876 CommandList_struct *c;
894 } else // no buffers to fill 877 char *buff = NULL;
895 { 878 u64bit temp64;
896 c->Header.SGList = 0; 879 unsigned long flags;
897 c->Header.SGTotal= 0; 880 DECLARE_COMPLETION(wait);
898 }
899 c->Header.LUN = iocommand.LUN_info;
900 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
901
902 // Fill in Request block
903 c->Request = iocommand.Request;
904
905 // Fill in the scatter gather information
906 if (iocommand.buf_size > 0 )
907 {
908 temp64.val = pci_map_single( host->pdev, buff,
909 iocommand.buf_size,
910 PCI_DMA_BIDIRECTIONAL);
911 c->SG[0].Addr.lower = temp64.val32.lower;
912 c->SG[0].Addr.upper = temp64.val32.upper;
913 c->SG[0].Len = iocommand.buf_size;
914 c->SG[0].Ext = 0; // we are not chaining
915 }
916 c->waiting = &wait;
917 881
918 /* Put the request on the tail of the request queue */ 882 if (!arg)
919 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 883 return -EINVAL;
920 addQ(&host->reqQ, c);
921 host->Qdepth++;
922 start_io(host);
923 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
924 884
925 wait_for_completion(&wait); 885 if (!capable(CAP_SYS_RAWIO))
886 return -EPERM;
926 887
927 /* unlock the buffers from DMA */ 888 if (copy_from_user
928 temp64.val32.lower = c->SG[0].Addr.lower; 889 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
929 temp64.val32.upper = c->SG[0].Addr.upper; 890 return -EFAULT;
930 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val, 891 if ((iocommand.buf_size < 1) &&
931 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 892 (iocommand.Request.Type.Direction != XFER_NONE)) {
893 return -EINVAL;
894 }
895#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
896 /* Check kmalloc limits */
897 if (iocommand.buf_size > 128000)
898 return -EINVAL;
899#endif
900 if (iocommand.buf_size > 0) {
901 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
902 if (buff == NULL)
903 return -EFAULT;
904 }
905 if (iocommand.Request.Type.Direction == XFER_WRITE) {
906 /* Copy the data into the buffer we created */
907 if (copy_from_user
908 (buff, iocommand.buf, iocommand.buf_size)) {
909 kfree(buff);
910 return -EFAULT;
911 }
912 } else {
913 memset(buff, 0, iocommand.buf_size);
914 }
915 if ((c = cmd_alloc(host, 0)) == NULL) {
916 kfree(buff);
917 return -ENOMEM;
918 }
919 // Fill in the command type
920 c->cmd_type = CMD_IOCTL_PEND;
921 // Fill in Command Header
922 c->Header.ReplyQueue = 0; // unused in simple mode
923 if (iocommand.buf_size > 0) // buffer to fill
924 {
925 c->Header.SGList = 1;
926 c->Header.SGTotal = 1;
927 } else // no buffers to fill
928 {
929 c->Header.SGList = 0;
930 c->Header.SGTotal = 0;
931 }
932 c->Header.LUN = iocommand.LUN_info;
933 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
932 934
933 /* Copy the error information out */ 935 // Fill in Request block
934 iocommand.error_info = *(c->err_info); 936 c->Request = iocommand.Request;
935 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
936 {
937 kfree(buff);
938 cmd_free(host, c, 0);
939 return( -EFAULT);
940 }
941 937
942 if (iocommand.Request.Type.Direction == XFER_READ) 938 // Fill in the scatter gather information
943 { 939 if (iocommand.buf_size > 0) {
944 /* Copy the data out of the buffer we created */ 940 temp64.val = pci_map_single(host->pdev, buff,
945 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) 941 iocommand.buf_size,
946 { 942 PCI_DMA_BIDIRECTIONAL);
947 kfree(buff); 943 c->SG[0].Addr.lower = temp64.val32.lower;
944 c->SG[0].Addr.upper = temp64.val32.upper;
945 c->SG[0].Len = iocommand.buf_size;
946 c->SG[0].Ext = 0; // we are not chaining
947 }
948 c->waiting = &wait;
949
950 /* Put the request on the tail of the request queue */
951 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
952 addQ(&host->reqQ, c);
953 host->Qdepth++;
954 start_io(host);
955 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
956
957 wait_for_completion(&wait);
958
959 /* unlock the buffers from DMA */
960 temp64.val32.lower = c->SG[0].Addr.lower;
961 temp64.val32.upper = c->SG[0].Addr.upper;
962 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
963 iocommand.buf_size,
964 PCI_DMA_BIDIRECTIONAL);
965
966 /* Copy the error information out */
967 iocommand.error_info = *(c->err_info);
968 if (copy_to_user
969 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
970 kfree(buff);
948 cmd_free(host, c, 0); 971 cmd_free(host, c, 0);
949 return -EFAULT; 972 return -EFAULT;
950 } 973 }
951 } 974
952 kfree(buff); 975 if (iocommand.Request.Type.Direction == XFER_READ) {
953 cmd_free(host, c, 0); 976 /* Copy the data out of the buffer we created */
954 return(0); 977 if (copy_to_user
955 } 978 (iocommand.buf, buff, iocommand.buf_size)) {
956 case CCISS_BIG_PASSTHRU: { 979 kfree(buff);
957 BIG_IOCTL_Command_struct *ioc; 980 cmd_free(host, c, 0);
958 CommandList_struct *c; 981 return -EFAULT;
959 unsigned char **buff = NULL; 982 }
960 int *buff_size = NULL; 983 }
961 u64bit temp64; 984 kfree(buff);
962 unsigned long flags; 985 cmd_free(host, c, 0);
963 BYTE sg_used = 0; 986 return 0;
964 int status = 0;
965 int i;
966 DECLARE_COMPLETION(wait);
967 __u32 left;
968 __u32 sz;
969 BYTE __user *data_ptr;
970
971 if (!arg)
972 return -EINVAL;
973 if (!capable(CAP_SYS_RAWIO))
974 return -EPERM;
975 ioc = (BIG_IOCTL_Command_struct *)
976 kmalloc(sizeof(*ioc), GFP_KERNEL);
977 if (!ioc) {
978 status = -ENOMEM;
979 goto cleanup1;
980 }
981 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
982 status = -EFAULT;
983 goto cleanup1;
984 } 987 }
985 if ((ioc->buf_size < 1) && 988 case CCISS_BIG_PASSTHRU:{
986 (ioc->Request.Type.Direction != XFER_NONE)) { 989 BIG_IOCTL_Command_struct *ioc;
990 CommandList_struct *c;
991 unsigned char **buff = NULL;
992 int *buff_size = NULL;
993 u64bit temp64;
994 unsigned long flags;
995 BYTE sg_used = 0;
996 int status = 0;
997 int i;
998 DECLARE_COMPLETION(wait);
999 __u32 left;
1000 __u32 sz;
1001 BYTE __user *data_ptr;
1002
1003 if (!arg)
1004 return -EINVAL;
1005 if (!capable(CAP_SYS_RAWIO))
1006 return -EPERM;
1007 ioc = (BIG_IOCTL_Command_struct *)
1008 kmalloc(sizeof(*ioc), GFP_KERNEL);
1009 if (!ioc) {
1010 status = -ENOMEM;
1011 goto cleanup1;
1012 }
1013 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1014 status = -EFAULT;
1015 goto cleanup1;
1016 }
1017 if ((ioc->buf_size < 1) &&
1018 (ioc->Request.Type.Direction != XFER_NONE)) {
987 status = -EINVAL; 1019 status = -EINVAL;
988 goto cleanup1; 1020 goto cleanup1;
989 } 1021 }
990 /* Check kmalloc limits using all SGs */ 1022 /* Check kmalloc limits using all SGs */
991 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 1023 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
992 status = -EINVAL; 1024 status = -EINVAL;
993 goto cleanup1; 1025 goto cleanup1;
994 } 1026 }
995 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 1027 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
996 status = -EINVAL; 1028 status = -EINVAL;
997 goto cleanup1; 1029 goto cleanup1;
998 } 1030 }
999 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 1031 buff =
1000 if (!buff) { 1032 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1001 status = -ENOMEM; 1033 if (!buff) {
1002 goto cleanup1;
1003 }
1004 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
1005 GFP_KERNEL);
1006 if (!buff_size) {
1007 status = -ENOMEM;
1008 goto cleanup1;
1009 }
1010 left = ioc->buf_size;
1011 data_ptr = ioc->buf;
1012 while (left) {
1013 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
1014 buff_size[sg_used] = sz;
1015 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1016 if (buff[sg_used] == NULL) {
1017 status = -ENOMEM; 1034 status = -ENOMEM;
1018 goto cleanup1; 1035 goto cleanup1;
1019 } 1036 }
1020 if (ioc->Request.Type.Direction == XFER_WRITE) { 1037 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1021 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 1038 GFP_KERNEL);
1039 if (!buff_size) {
1040 status = -ENOMEM;
1041 goto cleanup1;
1042 }
1043 left = ioc->buf_size;
1044 data_ptr = ioc->buf;
1045 while (left) {
1046 sz = (left >
1047 ioc->malloc_size) ? ioc->
1048 malloc_size : left;
1049 buff_size[sg_used] = sz;
1050 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1051 if (buff[sg_used] == NULL) {
1022 status = -ENOMEM; 1052 status = -ENOMEM;
1023 goto cleanup1; 1053 goto cleanup1;
1024 } 1054 }
1055 if (ioc->Request.Type.Direction == XFER_WRITE) {
1056 if (copy_from_user
1057 (buff[sg_used], data_ptr, sz)) {
1058 status = -ENOMEM;
1059 goto cleanup1;
1060 }
1061 } else {
1062 memset(buff[sg_used], 0, sz);
1063 }
1064 left -= sz;
1065 data_ptr += sz;
1066 sg_used++;
1067 }
1068 if ((c = cmd_alloc(host, 0)) == NULL) {
1069 status = -ENOMEM;
1070 goto cleanup1;
1071 }
1072 c->cmd_type = CMD_IOCTL_PEND;
1073 c->Header.ReplyQueue = 0;
1074
1075 if (ioc->buf_size > 0) {
1076 c->Header.SGList = sg_used;
1077 c->Header.SGTotal = sg_used;
1025 } else { 1078 } else {
1026 memset(buff[sg_used], 0, sz); 1079 c->Header.SGList = 0;
1080 c->Header.SGTotal = 0;
1027 } 1081 }
1028 left -= sz; 1082 c->Header.LUN = ioc->LUN_info;
1029 data_ptr += sz; 1083 c->Header.Tag.lower = c->busaddr;
1030 sg_used++; 1084
1031 } 1085 c->Request = ioc->Request;
1032 if ((c = cmd_alloc(host , 0)) == NULL) { 1086 if (ioc->buf_size > 0) {
1033 status = -ENOMEM; 1087 int i;
1034 goto cleanup1; 1088 for (i = 0; i < sg_used; i++) {
1035 } 1089 temp64.val =
1036 c->cmd_type = CMD_IOCTL_PEND; 1090 pci_map_single(host->pdev, buff[i],
1037 c->Header.ReplyQueue = 0; 1091 buff_size[i],
1038 1092 PCI_DMA_BIDIRECTIONAL);
1039 if( ioc->buf_size > 0) { 1093 c->SG[i].Addr.lower =
1040 c->Header.SGList = sg_used; 1094 temp64.val32.lower;
1041 c->Header.SGTotal= sg_used; 1095 c->SG[i].Addr.upper =
1042 } else { 1096 temp64.val32.upper;
1043 c->Header.SGList = 0; 1097 c->SG[i].Len = buff_size[i];
1044 c->Header.SGTotal= 0; 1098 c->SG[i].Ext = 0; /* we are not chaining */
1045 } 1099 }
1046 c->Header.LUN = ioc->LUN_info; 1100 }
1047 c->Header.Tag.lower = c->busaddr; 1101 c->waiting = &wait;
1048 1102 /* Put the request on the tail of the request queue */
1049 c->Request = ioc->Request; 1103 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1050 if (ioc->buf_size > 0 ) { 1104 addQ(&host->reqQ, c);
1051 int i; 1105 host->Qdepth++;
1052 for(i=0; i<sg_used; i++) { 1106 start_io(host);
1053 temp64.val = pci_map_single( host->pdev, buff[i], 1107 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1054 buff_size[i], 1108 wait_for_completion(&wait);
1109 /* unlock the buffers from DMA */
1110 for (i = 0; i < sg_used; i++) {
1111 temp64.val32.lower = c->SG[i].Addr.lower;
1112 temp64.val32.upper = c->SG[i].Addr.upper;
1113 pci_unmap_single(host->pdev,
1114 (dma_addr_t) temp64.val, buff_size[i],
1055 PCI_DMA_BIDIRECTIONAL); 1115 PCI_DMA_BIDIRECTIONAL);
1056 c->SG[i].Addr.lower = temp64.val32.lower;
1057 c->SG[i].Addr.upper = temp64.val32.upper;
1058 c->SG[i].Len = buff_size[i];
1059 c->SG[i].Ext = 0; /* we are not chaining */
1060 } 1116 }
1061 } 1117 /* Copy the error information out */
1062 c->waiting = &wait; 1118 ioc->error_info = *(c->err_info);
1063 /* Put the request on the tail of the request queue */ 1119 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1064 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1120 cmd_free(host, c, 0);
1065 addQ(&host->reqQ, c); 1121 status = -EFAULT;
1066 host->Qdepth++; 1122 goto cleanup1;
1067 start_io(host); 1123 }
1068 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1124 if (ioc->Request.Type.Direction == XFER_READ) {
1069 wait_for_completion(&wait); 1125 /* Copy the data out of the buffer we created */
1070 /* unlock the buffers from DMA */ 1126 BYTE __user *ptr = ioc->buf;
1071 for(i=0; i<sg_used; i++) { 1127 for (i = 0; i < sg_used; i++) {
1072 temp64.val32.lower = c->SG[i].Addr.lower; 1128 if (copy_to_user
1073 temp64.val32.upper = c->SG[i].Addr.upper; 1129 (ptr, buff[i], buff_size[i])) {
1074 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val, 1130 cmd_free(host, c, 0);
1075 buff_size[i], PCI_DMA_BIDIRECTIONAL); 1131 status = -EFAULT;
1076 } 1132 goto cleanup1;
1077 /* Copy the error information out */ 1133 }
1078 ioc->error_info = *(c->err_info); 1134 ptr += buff_size[i];
1079 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1080 cmd_free(host, c, 0);
1081 status = -EFAULT;
1082 goto cleanup1;
1083 }
1084 if (ioc->Request.Type.Direction == XFER_READ) {
1085 /* Copy the data out of the buffer we created */
1086 BYTE __user *ptr = ioc->buf;
1087 for(i=0; i< sg_used; i++) {
1088 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1089 cmd_free(host, c, 0);
1090 status = -EFAULT;
1091 goto cleanup1;
1092 } 1135 }
1093 ptr += buff_size[i];
1094 } 1136 }
1137 cmd_free(host, c, 0);
1138 status = 0;
1139 cleanup1:
1140 if (buff) {
1141 for (i = 0; i < sg_used; i++)
1142 kfree(buff[i]);
1143 kfree(buff);
1144 }
1145 kfree(buff_size);
1146 kfree(ioc);
1147 return status;
1095 } 1148 }
1096 cmd_free(host, c, 0);
1097 status = 0;
1098cleanup1:
1099 if (buff) {
1100 for(i=0; i<sg_used; i++)
1101 kfree(buff[i]);
1102 kfree(buff);
1103 }
1104 kfree(buff_size);
1105 kfree(ioc);
1106 return(status);
1107 }
1108 default: 1149 default:
1109 return -ENOTTY; 1150 return -ENOTTY;
1110 } 1151 }
1111
1112} 1152}
1113 1153
1114/* 1154/*
@@ -1119,7 +1159,7 @@ cleanup1:
1119 * 1159 *
1120 * Right now I'm using the getgeometry() function to do this, but this 1160 * Right now I'm using the getgeometry() function to do this, but this
1121 * function should probably be finer grained and allow you to revalidate one 1161 * function should probably be finer grained and allow you to revalidate one
1122 * particualar logical volume (instead of all of them on a particular 1162 * particular logical volume (instead of all of them on a particular
1123 * controller). 1163 * controller).
1124 */ 1164 */
1125static int revalidate_allvol(ctlr_info_t *host) 1165static int revalidate_allvol(ctlr_info_t *host)
@@ -1127,17 +1167,17 @@ static int revalidate_allvol(ctlr_info_t *host)
1127 int ctlr = host->ctlr, i; 1167 int ctlr = host->ctlr, i;
1128 unsigned long flags; 1168 unsigned long flags;
1129 1169
1130 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1170 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1131 if (host->usage_count > 1) { 1171 if (host->usage_count > 1) {
1132 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1172 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1133 printk(KERN_WARNING "cciss: Device busy for volume" 1173 printk(KERN_WARNING "cciss: Device busy for volume"
1134 " revalidation (usage=%d)\n", host->usage_count); 1174 " revalidation (usage=%d)\n", host->usage_count);
1135 return -EBUSY; 1175 return -EBUSY;
1136 } 1176 }
1137 host->usage_count++; 1177 host->usage_count++;
1138 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1178 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1139 1179
1140 for(i=0; i< NWD; i++) { 1180 for (i = 0; i < NWD; i++) {
1141 struct gendisk *disk = host->gendisk[i]; 1181 struct gendisk *disk = host->gendisk[i];
1142 if (disk) { 1182 if (disk) {
1143 request_queue_t *q = disk->queue; 1183 request_queue_t *q = disk->queue;
@@ -1149,22 +1189,22 @@ static int revalidate_allvol(ctlr_info_t *host)
1149 } 1189 }
1150 } 1190 }
1151 1191
1152 /* 1192 /*
1153 * Set the partition and block size structures for all volumes 1193 * Set the partition and block size structures for all volumes
1154 * on this controller to zero. We will reread all of this data 1194 * on this controller to zero. We will reread all of this data
1155 */ 1195 */
1156 memset(host->drv, 0, sizeof(drive_info_struct) 1196 memset(host->drv, 0, sizeof(drive_info_struct)
1157 * CISS_MAX_LUN); 1197 * CISS_MAX_LUN);
1158 /* 1198 /*
1159 * Tell the array controller not to give us any interrupts while 1199 * Tell the array controller not to give us any interrupts while
1160 * we check the new geometry. Then turn interrupts back on when 1200 * we check the new geometry. Then turn interrupts back on when
1161 * we're done. 1201 * we're done.
1162 */ 1202 */
1163 host->access.set_intr_mask(host, CCISS_INTR_OFF); 1203 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1164 cciss_getgeometry(ctlr); 1204 cciss_getgeometry(ctlr);
1165 host->access.set_intr_mask(host, CCISS_INTR_ON); 1205 host->access.set_intr_mask(host, CCISS_INTR_ON);
1166 1206
1167 /* Loop through each real device */ 1207 /* Loop through each real device */
1168 for (i = 0; i < NWD; i++) { 1208 for (i = 0; i < NWD; i++) {
1169 struct gendisk *disk = host->gendisk[i]; 1209 struct gendisk *disk = host->gendisk[i];
1170 drive_info_struct *drv = &(host->drv[i]); 1210 drive_info_struct *drv = &(host->drv[i]);
@@ -1176,8 +1216,8 @@ static int revalidate_allvol(ctlr_info_t *host)
1176 set_capacity(disk, drv->nr_blocks); 1216 set_capacity(disk, drv->nr_blocks);
1177 add_disk(disk); 1217 add_disk(disk);
1178 } 1218 }
1179 host->usage_count--; 1219 host->usage_count--;
1180 return 0; 1220 return 0;
1181} 1221}
1182 1222
1183static inline void complete_buffers(struct bio *bio, int status) 1223static inline void complete_buffers(struct bio *bio, int status)
@@ -1191,7 +1231,6 @@ static inline void complete_buffers(struct bio *bio, int status)
1191 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO); 1231 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1192 bio = xbh; 1232 bio = xbh;
1193 } 1233 }
1194
1195} 1234}
1196 1235
1197static void cciss_softirq_done(struct request *rq) 1236static void cciss_softirq_done(struct request *rq)
@@ -1209,7 +1248,7 @@ static void cciss_softirq_done(struct request *rq)
1209 1248
1210 /* command did not need to be retried */ 1249 /* command did not need to be retried */
1211 /* unmap the DMA mapping for all the scatter gather elements */ 1250 /* unmap the DMA mapping for all the scatter gather elements */
1212 for(i=0; i<cmd->Header.SGList; i++) { 1251 for (i = 0; i < cmd->Header.SGList; i++) {
1213 temp64.val32.lower = cmd->SG[i].Addr.lower; 1252 temp64.val32.lower = cmd->SG[i].Addr.lower;
1214 temp64.val32.upper = cmd->SG[i].Addr.upper; 1253 temp64.val32.upper = cmd->SG[i].Addr.upper;
1215 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir); 1254 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
@@ -1219,11 +1258,12 @@ static void cciss_softirq_done(struct request *rq)
1219 1258
1220#ifdef CCISS_DEBUG 1259#ifdef CCISS_DEBUG
1221 printk("Done with %p\n", rq); 1260 printk("Done with %p\n", rq);
1222#endif /* CCISS_DEBUG */ 1261#endif /* CCISS_DEBUG */
1223 1262
1263 add_disk_randomness(rq->rq_disk);
1224 spin_lock_irqsave(&h->lock, flags); 1264 spin_lock_irqsave(&h->lock, flags);
1225 end_that_request_last(rq, rq->errors); 1265 end_that_request_last(rq, rq->errors);
1226 cmd_free(h, cmd,1); 1266 cmd_free(h, cmd, 1);
1227 spin_unlock_irqrestore(&h->lock, flags); 1267 spin_unlock_irqrestore(&h->lock, flags);
1228} 1268}
1229 1269
@@ -1234,9 +1274,9 @@ static void cciss_softirq_done(struct request *rq)
1234 * will always be left registered with the kernel since it is also the 1274 * will always be left registered with the kernel since it is also the
1235 * controller node. Any changes to disk 0 will show up on the next 1275 * controller node. Any changes to disk 0 will show up on the next
1236 * reboot. 1276 * reboot.
1237*/ 1277 */
1238static void cciss_update_drive_info(int ctlr, int drv_index) 1278static void cciss_update_drive_info(int ctlr, int drv_index)
1239 { 1279{
1240 ctlr_info_t *h = hba[ctlr]; 1280 ctlr_info_t *h = hba[ctlr];
1241 struct gendisk *disk; 1281 struct gendisk *disk;
1242 ReadCapdata_struct *size_buff = NULL; 1282 ReadCapdata_struct *size_buff = NULL;
@@ -1246,13 +1286,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
1246 unsigned long flags = 0; 1286 unsigned long flags = 0;
1247 int ret = 0; 1287 int ret = 0;
1248 1288
1249 /* if the disk already exists then deregister it before proceeding*/ 1289 /* if the disk already exists then deregister it before proceeding */
1250 if (h->drv[drv_index].raid_level != -1){ 1290 if (h->drv[drv_index].raid_level != -1) {
1251 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1291 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1252 h->drv[drv_index].busy_configuring = 1; 1292 h->drv[drv_index].busy_configuring = 1;
1253 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1293 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1254 ret = deregister_disk(h->gendisk[drv_index], 1294 ret = deregister_disk(h->gendisk[drv_index],
1255 &h->drv[drv_index], 0); 1295 &h->drv[drv_index], 0);
1256 h->drv[drv_index].busy_configuring = 0; 1296 h->drv[drv_index].busy_configuring = 0;
1257 } 1297 }
1258 1298
@@ -1260,27 +1300,25 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
1260 if (ret) 1300 if (ret)
1261 return; 1301 return;
1262 1302
1263 1303 /* Get information about the disk and modify the driver structure */
1264 /* Get information about the disk and modify the driver sturcture */ 1304 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1265 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL); 1305 if (size_buff == NULL)
1266 if (size_buff == NULL)
1267 goto mem_msg; 1306 goto mem_msg;
1268 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL); 1307 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1269 if (inq_buff == NULL) 1308 if (inq_buff == NULL)
1270 goto mem_msg; 1309 goto mem_msg;
1271 1310
1272 cciss_read_capacity(ctlr, drv_index, size_buff, 1, 1311 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1273 &total_size, &block_size); 1312 &total_size, &block_size);
1274 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, 1313 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1275 inq_buff, &h->drv[drv_index]); 1314 inq_buff, &h->drv[drv_index]);
1276 1315
1277 ++h->num_luns; 1316 ++h->num_luns;
1278 disk = h->gendisk[drv_index]; 1317 disk = h->gendisk[drv_index];
1279 set_capacity(disk, h->drv[drv_index].nr_blocks); 1318 set_capacity(disk, h->drv[drv_index].nr_blocks);
1280 1319
1281
1282 /* if it's the controller it's already added */ 1320 /* if it's the controller it's already added */
1283 if (drv_index){ 1321 if (drv_index) {
1284 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1322 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1285 1323
1286 /* Set up queue information */ 1324 /* Set up queue information */
@@ -1300,17 +1338,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
1300 disk->queue->queuedata = hba[ctlr]; 1338 disk->queue->queuedata = hba[ctlr];
1301 1339
1302 blk_queue_hardsect_size(disk->queue, 1340 blk_queue_hardsect_size(disk->queue,
1303 hba[ctlr]->drv[drv_index].block_size); 1341 hba[ctlr]->drv[drv_index].block_size);
1304 1342
1305 h->drv[drv_index].queue = disk->queue; 1343 h->drv[drv_index].queue = disk->queue;
1306 add_disk(disk); 1344 add_disk(disk);
1307 } 1345 }
1308 1346
1309freeret: 1347 freeret:
1310 kfree(size_buff); 1348 kfree(size_buff);
1311 kfree(inq_buff); 1349 kfree(inq_buff);
1312 return; 1350 return;
1313mem_msg: 1351 mem_msg:
1314 printk(KERN_ERR "cciss: out of memory\n"); 1352 printk(KERN_ERR "cciss: out of memory\n");
1315 goto freeret; 1353 goto freeret;
1316} 1354}
@@ -1320,13 +1358,13 @@ mem_msg:
1320 * where new drives will be added. If the index to be returned is greater 1358 * where new drives will be added. If the index to be returned is greater
1321 * than the highest_lun index for the controller then highest_lun is set 1359 * than the highest_lun index for the controller then highest_lun is set
1322 * to this new index. If there are no available indexes then -1 is returned. 1360 * to this new index. If there are no available indexes then -1 is returned.
1323*/ 1361 */
1324static int cciss_find_free_drive_index(int ctlr) 1362static int cciss_find_free_drive_index(int ctlr)
1325{ 1363{
1326 int i; 1364 int i;
1327 1365
1328 for (i=0; i < CISS_MAX_LUN; i++){ 1366 for (i = 0; i < CISS_MAX_LUN; i++) {
1329 if (hba[ctlr]->drv[i].raid_level == -1){ 1367 if (hba[ctlr]->drv[i].raid_level == -1) {
1330 if (i > hba[ctlr]->highest_lun) 1368 if (i > hba[ctlr]->highest_lun)
1331 hba[ctlr]->highest_lun = i; 1369 hba[ctlr]->highest_lun = i;
1332 return i; 1370 return i;
@@ -1336,7 +1374,7 @@ static int cciss_find_free_drive_index(int ctlr)
1336} 1374}
1337 1375
1338/* This function will add and remove logical drives from the Logical 1376/* This function will add and remove logical drives from the Logical
1339 * drive array of the controller and maintain persistancy of ordering 1377 * drive array of the controller and maintain persistency of ordering
1340 * so that mount points are preserved until the next reboot. This allows 1378 * so that mount points are preserved until the next reboot. This allows
1341 * for the removal of logical drives in the middle of the drive array 1379 * for the removal of logical drives in the middle of the drive array
1342 * without a re-ordering of those drives. 1380 * without a re-ordering of those drives.
@@ -1344,7 +1382,7 @@ static int cciss_find_free_drive_index(int ctlr)
1344 * h = The controller to perform the operations on 1382 * h = The controller to perform the operations on
1345 * del_disk = The disk to remove if specified. If the value given 1383 * del_disk = The disk to remove if specified. If the value given
1346 * is NULL then no disk is removed. 1384 * is NULL then no disk is removed.
1347*/ 1385 */
1348static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk) 1386static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1349{ 1387{
1350 int ctlr = h->ctlr; 1388 int ctlr = h->ctlr;
@@ -1361,12 +1399,12 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1361 1399
1362 /* Set busy_configuring flag for this operation */ 1400 /* Set busy_configuring flag for this operation */
1363 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1401 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1364 if (h->num_luns >= CISS_MAX_LUN){ 1402 if (h->num_luns >= CISS_MAX_LUN) {
1365 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1403 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1366 return -EINVAL; 1404 return -EINVAL;
1367 } 1405 }
1368 1406
1369 if (h->busy_configuring){ 1407 if (h->busy_configuring) {
1370 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1408 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1371 return -EBUSY; 1409 return -EBUSY;
1372 } 1410 }
@@ -1376,7 +1414,7 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1376 * and update the logical drive table. If it is not NULL then 1414 * and update the logical drive table. If it is not NULL then
1377 * we will check if the disk is in use or not. 1415 * we will check if the disk is in use or not.
1378 */ 1416 */
1379 if (del_disk != NULL){ 1417 if (del_disk != NULL) {
1380 drv = get_drv(del_disk); 1418 drv = get_drv(del_disk);
1381 drv->busy_configuring = 1; 1419 drv->busy_configuring = 1;
1382 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1420 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
@@ -1394,61 +1432,67 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1394 goto mem_msg; 1432 goto mem_msg;
1395 1433
1396 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, 1434 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1397 sizeof(ReportLunData_struct), 0, 0, 0, 1435 sizeof(ReportLunData_struct), 0,
1398 TYPE_CMD); 1436 0, 0, TYPE_CMD);
1399 1437
1400 if (return_code == IO_OK){ 1438 if (return_code == IO_OK) {
1401 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24; 1439 listlength |=
1402 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16; 1440 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1403 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8; 1441 << 24;
1404 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]); 1442 listlength |=
1405 } else{ /* reading number of logical volumes failed */ 1443 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1444 << 16;
1445 listlength |=
1446 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1447 << 8;
1448 listlength |=
1449 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1450 } else { /* reading number of logical volumes failed */
1406 printk(KERN_WARNING "cciss: report logical volume" 1451 printk(KERN_WARNING "cciss: report logical volume"
1407 " command failed\n"); 1452 " command failed\n");
1408 listlength = 0; 1453 listlength = 0;
1409 goto freeret; 1454 goto freeret;
1410 } 1455 }
1411 1456
1412 num_luns = listlength / 8; /* 8 bytes per entry */ 1457 num_luns = listlength / 8; /* 8 bytes per entry */
1413 if (num_luns > CISS_MAX_LUN){ 1458 if (num_luns > CISS_MAX_LUN) {
1414 num_luns = CISS_MAX_LUN; 1459 num_luns = CISS_MAX_LUN;
1415 printk(KERN_WARNING "cciss: more luns configured" 1460 printk(KERN_WARNING "cciss: more luns configured"
1416 " on controller than can be handled by" 1461 " on controller than can be handled by"
1417 " this driver.\n"); 1462 " this driver.\n");
1418 } 1463 }
1419 1464
1420 /* Compare controller drive array to drivers drive array. 1465 /* Compare controller drive array to drivers drive array.
1421 * Check for updates in the drive information and any new drives 1466 * Check for updates in the drive information and any new drives
1422 * on the controller. 1467 * on the controller.
1423 */ 1468 */
1424 for (i=0; i < num_luns; i++){ 1469 for (i = 0; i < num_luns; i++) {
1425 int j; 1470 int j;
1426 1471
1427 drv_found = 0; 1472 drv_found = 0;
1428 1473
1429 lunid = (0xff & 1474 lunid = (0xff &
1430 (unsigned int)(ld_buff->LUN[i][3])) << 24; 1475 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1431 lunid |= (0xff & 1476 lunid |= (0xff &
1432 (unsigned int)(ld_buff->LUN[i][2])) << 16; 1477 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1433 lunid |= (0xff & 1478 lunid |= (0xff &
1434 (unsigned int)(ld_buff->LUN[i][1])) << 8; 1479 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1435 lunid |= 0xff & 1480 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1436 (unsigned int)(ld_buff->LUN[i][0]);
1437 1481
1438 /* Find if the LUN is already in the drive array 1482 /* Find if the LUN is already in the drive array
1439 * of the controller. If so then update its info 1483 * of the controller. If so then update its info
1440 * if not is use. If it does not exist then find 1484 * if not is use. If it does not exist then find
1441 * the first free index and add it. 1485 * the first free index and add it.
1442 */ 1486 */
1443 for (j=0; j <= h->highest_lun; j++){ 1487 for (j = 0; j <= h->highest_lun; j++) {
1444 if (h->drv[j].LunID == lunid){ 1488 if (h->drv[j].LunID == lunid) {
1445 drv_index = j; 1489 drv_index = j;
1446 drv_found = 1; 1490 drv_found = 1;
1447 } 1491 }
1448 } 1492 }
1449 1493
1450 /* check if the drive was found already in the array */ 1494 /* check if the drive was found already in the array */
1451 if (!drv_found){ 1495 if (!drv_found) {
1452 drv_index = cciss_find_free_drive_index(ctlr); 1496 drv_index = cciss_find_free_drive_index(ctlr);
1453 if (drv_index == -1) 1497 if (drv_index == -1)
1454 goto freeret; 1498 goto freeret;
@@ -1456,18 +1500,18 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1456 } 1500 }
1457 h->drv[drv_index].LunID = lunid; 1501 h->drv[drv_index].LunID = lunid;
1458 cciss_update_drive_info(ctlr, drv_index); 1502 cciss_update_drive_info(ctlr, drv_index);
1459 } /* end for */ 1503 } /* end for */
1460 } /* end else */ 1504 } /* end else */
1461 1505
1462freeret: 1506 freeret:
1463 kfree(ld_buff); 1507 kfree(ld_buff);
1464 h->busy_configuring = 0; 1508 h->busy_configuring = 0;
1465 /* We return -1 here to tell the ACU that we have registered/updated 1509 /* We return -1 here to tell the ACU that we have registered/updated
1466 * all of the drives that we can and to keep it from calling us 1510 * all of the drives that we can and to keep it from calling us
1467 * additional times. 1511 * additional times.
1468 */ 1512 */
1469 return -1; 1513 return -1;
1470mem_msg: 1514 mem_msg:
1471 printk(KERN_ERR "cciss: out of memory\n"); 1515 printk(KERN_ERR "cciss: out of memory\n");
1472 goto freeret; 1516 goto freeret;
1473} 1517}
@@ -1483,7 +1527,7 @@ mem_msg:
1483 * clear_all = This flag determines whether or not the disk information 1527 * clear_all = This flag determines whether or not the disk information
1484 * is going to be completely cleared out and the highest_lun 1528 * is going to be completely cleared out and the highest_lun
1485 * reset. Sometimes we want to clear out information about 1529 * reset. Sometimes we want to clear out information about
1486 * the disk in preperation for re-adding it. In this case 1530 * the disk in preparation for re-adding it. In this case
1487 * the highest_lun should be left unchanged and the LunID 1531 * the highest_lun should be left unchanged and the LunID
1488 * should not be cleared. 1532 * should not be cleared.
1489*/ 1533*/
@@ -1496,19 +1540,17 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1496 return -EPERM; 1540 return -EPERM;
1497 1541
1498 /* make sure logical volume is NOT is use */ 1542 /* make sure logical volume is NOT is use */
1499 if(clear_all || (h->gendisk[0] == disk)) { 1543 if (clear_all || (h->gendisk[0] == disk)) {
1500 if (drv->usage_count > 1) 1544 if (drv->usage_count > 1)
1501 return -EBUSY; 1545 return -EBUSY;
1502 } 1546 } else if (drv->usage_count > 0)
1503 else 1547 return -EBUSY;
1504 if( drv->usage_count > 0 )
1505 return -EBUSY;
1506 1548
1507 /* invalidate the devices and deregister the disk. If it is disk 1549 /* invalidate the devices and deregister the disk. If it is disk
1508 * zero do not deregister it but just zero out it's values. This 1550 * zero do not deregister it but just zero out it's values. This
1509 * allows us to delete disk zero but keep the controller registered. 1551 * allows us to delete disk zero but keep the controller registered.
1510 */ 1552 */
1511 if (h->gendisk[0] != disk){ 1553 if (h->gendisk[0] != disk) {
1512 if (disk) { 1554 if (disk) {
1513 request_queue_t *q = disk->queue; 1555 request_queue_t *q = disk->queue;
1514 if (disk->flags & GENHD_FL_UP) 1556 if (disk->flags & GENHD_FL_UP)
@@ -1530,91 +1572,90 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1530 drv->raid_level = -1; /* This can be used as a flag variable to 1572 drv->raid_level = -1; /* This can be used as a flag variable to
1531 * indicate that this element of the drive 1573 * indicate that this element of the drive
1532 * array is free. 1574 * array is free.
1533 */ 1575 */
1534 1576
1535 if (clear_all){ 1577 if (clear_all) {
1536 /* check to see if it was the last disk */ 1578 /* check to see if it was the last disk */
1537 if (drv == h->drv + h->highest_lun) { 1579 if (drv == h->drv + h->highest_lun) {
1538 /* if so, find the new hightest lun */ 1580 /* if so, find the new hightest lun */
1539 int i, newhighest =-1; 1581 int i, newhighest = -1;
1540 for(i=0; i<h->highest_lun; i++) { 1582 for (i = 0; i < h->highest_lun; i++) {
1541 /* if the disk has size > 0, it is available */ 1583 /* if the disk has size > 0, it is available */
1542 if (h->drv[i].heads) 1584 if (h->drv[i].heads)
1543 newhighest = i; 1585 newhighest = i;
1586 }
1587 h->highest_lun = newhighest;
1544 } 1588 }
1545 h->highest_lun = newhighest;
1546 }
1547 1589
1548 drv->LunID = 0; 1590 drv->LunID = 0;
1549 } 1591 }
1550 return(0); 1592 return 0;
1551} 1593}
1552 1594
1553static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, 1595static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1554 size_t size, 1596 1: address logical volume log_unit,
1555 unsigned int use_unit_num, /* 0: address the controller, 1597 2: periph device address is scsi3addr */
1556 1: address logical volume log_unit, 1598 unsigned int log_unit, __u8 page_code,
1557 2: periph device address is scsi3addr */ 1599 unsigned char *scsi3addr, int cmd_type)
1558 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1559 int cmd_type)
1560{ 1600{
1561 ctlr_info_t *h= hba[ctlr]; 1601 ctlr_info_t *h = hba[ctlr];
1562 u64bit buff_dma_handle; 1602 u64bit buff_dma_handle;
1563 int status = IO_OK; 1603 int status = IO_OK;
1564 1604
1565 c->cmd_type = CMD_IOCTL_PEND; 1605 c->cmd_type = CMD_IOCTL_PEND;
1566 c->Header.ReplyQueue = 0; 1606 c->Header.ReplyQueue = 0;
1567 if( buff != NULL) { 1607 if (buff != NULL) {
1568 c->Header.SGList = 1; 1608 c->Header.SGList = 1;
1569 c->Header.SGTotal= 1; 1609 c->Header.SGTotal = 1;
1570 } else { 1610 } else {
1571 c->Header.SGList = 0; 1611 c->Header.SGList = 0;
1572 c->Header.SGTotal= 0; 1612 c->Header.SGTotal = 0;
1573 } 1613 }
1574 c->Header.Tag.lower = c->busaddr; 1614 c->Header.Tag.lower = c->busaddr;
1575 1615
1576 c->Request.Type.Type = cmd_type; 1616 c->Request.Type.Type = cmd_type;
1577 if (cmd_type == TYPE_CMD) { 1617 if (cmd_type == TYPE_CMD) {
1578 switch(cmd) { 1618 switch (cmd) {
1579 case CISS_INQUIRY: 1619 case CISS_INQUIRY:
1580 /* If the logical unit number is 0 then, this is going 1620 /* If the logical unit number is 0 then, this is going
1581 to controller so It's a physical command 1621 to controller so It's a physical command
1582 mode = 0 target = 0. So we have nothing to write. 1622 mode = 0 target = 0. So we have nothing to write.
1583 otherwise, if use_unit_num == 1, 1623 otherwise, if use_unit_num == 1,
1584 mode = 1(volume set addressing) target = LUNID 1624 mode = 1(volume set addressing) target = LUNID
1585 otherwise, if use_unit_num == 2, 1625 otherwise, if use_unit_num == 2,
1586 mode = 0(periph dev addr) target = scsi3addr */ 1626 mode = 0(periph dev addr) target = scsi3addr */
1587 if (use_unit_num == 1) { 1627 if (use_unit_num == 1) {
1588 c->Header.LUN.LogDev.VolId= 1628 c->Header.LUN.LogDev.VolId =
1589 h->drv[log_unit].LunID; 1629 h->drv[log_unit].LunID;
1590 c->Header.LUN.LogDev.Mode = 1; 1630 c->Header.LUN.LogDev.Mode = 1;
1591 } else if (use_unit_num == 2) { 1631 } else if (use_unit_num == 2) {
1592 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8); 1632 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1633 8);
1593 c->Header.LUN.LogDev.Mode = 0; 1634 c->Header.LUN.LogDev.Mode = 0;
1594 } 1635 }
1595 /* are we trying to read a vital product page */ 1636 /* are we trying to read a vital product page */
1596 if(page_code != 0) { 1637 if (page_code != 0) {
1597 c->Request.CDB[1] = 0x01; 1638 c->Request.CDB[1] = 0x01;
1598 c->Request.CDB[2] = page_code; 1639 c->Request.CDB[2] = page_code;
1599 } 1640 }
1600 c->Request.CDBLen = 6; 1641 c->Request.CDBLen = 6;
1601 c->Request.Type.Attribute = ATTR_SIMPLE; 1642 c->Request.Type.Attribute = ATTR_SIMPLE;
1602 c->Request.Type.Direction = XFER_READ; 1643 c->Request.Type.Direction = XFER_READ;
1603 c->Request.Timeout = 0; 1644 c->Request.Timeout = 0;
1604 c->Request.CDB[0] = CISS_INQUIRY; 1645 c->Request.CDB[0] = CISS_INQUIRY;
1605 c->Request.CDB[4] = size & 0xFF; 1646 c->Request.CDB[4] = size & 0xFF;
1606 break; 1647 break;
1607 case CISS_REPORT_LOG: 1648 case CISS_REPORT_LOG:
1608 case CISS_REPORT_PHYS: 1649 case CISS_REPORT_PHYS:
1609 /* Talking to controller so It's a physical command 1650 /* Talking to controller so It's a physical command
1610 mode = 00 target = 0. Nothing to write. 1651 mode = 00 target = 0. Nothing to write.
1611 */ 1652 */
1612 c->Request.CDBLen = 12; 1653 c->Request.CDBLen = 12;
1613 c->Request.Type.Attribute = ATTR_SIMPLE; 1654 c->Request.Type.Attribute = ATTR_SIMPLE;
1614 c->Request.Type.Direction = XFER_READ; 1655 c->Request.Type.Direction = XFER_READ;
1615 c->Request.Timeout = 0; 1656 c->Request.Timeout = 0;
1616 c->Request.CDB[0] = cmd; 1657 c->Request.CDB[0] = cmd;
1617 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB 1658 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1618 c->Request.CDB[7] = (size >> 16) & 0xFF; 1659 c->Request.CDB[7] = (size >> 16) & 0xFF;
1619 c->Request.CDB[8] = (size >> 8) & 0xFF; 1660 c->Request.CDB[8] = (size >> 8) & 0xFF;
1620 c->Request.CDB[9] = size & 0xFF; 1661 c->Request.CDB[9] = size & 0xFF;
@@ -1628,7 +1669,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1628 c->Request.Type.Direction = XFER_READ; 1669 c->Request.Type.Direction = XFER_READ;
1629 c->Request.Timeout = 0; 1670 c->Request.Timeout = 0;
1630 c->Request.CDB[0] = cmd; 1671 c->Request.CDB[0] = cmd;
1631 break; 1672 break;
1632 case CCISS_CACHE_FLUSH: 1673 case CCISS_CACHE_FLUSH:
1633 c->Request.CDBLen = 12; 1674 c->Request.CDBLen = 12;
1634 c->Request.Type.Attribute = ATTR_SIMPLE; 1675 c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -1636,32 +1677,32 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1636 c->Request.Timeout = 0; 1677 c->Request.Timeout = 0;
1637 c->Request.CDB[0] = BMIC_WRITE; 1678 c->Request.CDB[0] = BMIC_WRITE;
1638 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 1679 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1639 break; 1680 break;
1640 default: 1681 default:
1641 printk(KERN_WARNING 1682 printk(KERN_WARNING
1642 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); 1683 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1643 return(IO_ERROR); 1684 return IO_ERROR;
1644 } 1685 }
1645 } else if (cmd_type == TYPE_MSG) { 1686 } else if (cmd_type == TYPE_MSG) {
1646 switch (cmd) { 1687 switch (cmd) {
1647 case 0: /* ABORT message */ 1688 case 0: /* ABORT message */
1648 c->Request.CDBLen = 12; 1689 c->Request.CDBLen = 12;
1649 c->Request.Type.Attribute = ATTR_SIMPLE; 1690 c->Request.Type.Attribute = ATTR_SIMPLE;
1650 c->Request.Type.Direction = XFER_WRITE; 1691 c->Request.Type.Direction = XFER_WRITE;
1651 c->Request.Timeout = 0; 1692 c->Request.Timeout = 0;
1652 c->Request.CDB[0] = cmd; /* abort */ 1693 c->Request.CDB[0] = cmd; /* abort */
1653 c->Request.CDB[1] = 0; /* abort a command */ 1694 c->Request.CDB[1] = 0; /* abort a command */
1654 /* buff contains the tag of the command to abort */ 1695 /* buff contains the tag of the command to abort */
1655 memcpy(&c->Request.CDB[4], buff, 8); 1696 memcpy(&c->Request.CDB[4], buff, 8);
1656 break; 1697 break;
1657 case 1: /* RESET message */ 1698 case 1: /* RESET message */
1658 c->Request.CDBLen = 12; 1699 c->Request.CDBLen = 12;
1659 c->Request.Type.Attribute = ATTR_SIMPLE; 1700 c->Request.Type.Attribute = ATTR_SIMPLE;
1660 c->Request.Type.Direction = XFER_WRITE; 1701 c->Request.Type.Direction = XFER_WRITE;
1661 c->Request.Timeout = 0; 1702 c->Request.Timeout = 0;
1662 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 1703 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1663 c->Request.CDB[0] = cmd; /* reset */ 1704 c->Request.CDB[0] = cmd; /* reset */
1664 c->Request.CDB[1] = 0x04; /* reset a LUN */ 1705 c->Request.CDB[1] = 0x04; /* reset a LUN */
1665 case 3: /* No-Op message */ 1706 case 3: /* No-Op message */
1666 c->Request.CDBLen = 1; 1707 c->Request.CDBLen = 1;
1667 c->Request.Type.Attribute = ATTR_SIMPLE; 1708 c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -1671,168 +1712,164 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1671 break; 1712 break;
1672 default: 1713 default:
1673 printk(KERN_WARNING 1714 printk(KERN_WARNING
1674 "cciss%d: unknown message type %d\n", 1715 "cciss%d: unknown message type %d\n", ctlr, cmd);
1675 ctlr, cmd);
1676 return IO_ERROR; 1716 return IO_ERROR;
1677 } 1717 }
1678 } else { 1718 } else {
1679 printk(KERN_WARNING 1719 printk(KERN_WARNING
1680 "cciss%d: unknown command type %d\n", ctlr, cmd_type); 1720 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1681 return IO_ERROR; 1721 return IO_ERROR;
1682 } 1722 }
1683 /* Fill in the scatter gather information */ 1723 /* Fill in the scatter gather information */
1684 if (size > 0) { 1724 if (size > 0) {
1685 buff_dma_handle.val = (__u64) pci_map_single(h->pdev, 1725 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1686 buff, size, PCI_DMA_BIDIRECTIONAL); 1726 buff, size,
1727 PCI_DMA_BIDIRECTIONAL);
1687 c->SG[0].Addr.lower = buff_dma_handle.val32.lower; 1728 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1688 c->SG[0].Addr.upper = buff_dma_handle.val32.upper; 1729 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1689 c->SG[0].Len = size; 1730 c->SG[0].Len = size;
1690 c->SG[0].Ext = 0; /* we are not chaining */ 1731 c->SG[0].Ext = 0; /* we are not chaining */
1691 } 1732 }
1692 return status; 1733 return status;
1693} 1734}
1694static int sendcmd_withirq(__u8 cmd, 1735
1695 int ctlr, 1736static int sendcmd_withirq(__u8 cmd,
1696 void *buff, 1737 int ctlr,
1697 size_t size, 1738 void *buff,
1698 unsigned int use_unit_num, 1739 size_t size,
1699 unsigned int log_unit, 1740 unsigned int use_unit_num,
1700 __u8 page_code, 1741 unsigned int log_unit, __u8 page_code, int cmd_type)
1701 int cmd_type)
1702{ 1742{
1703 ctlr_info_t *h = hba[ctlr]; 1743 ctlr_info_t *h = hba[ctlr];
1704 CommandList_struct *c; 1744 CommandList_struct *c;
1705 u64bit buff_dma_handle; 1745 u64bit buff_dma_handle;
1706 unsigned long flags; 1746 unsigned long flags;
1707 int return_status; 1747 int return_status;
1708 DECLARE_COMPLETION(wait); 1748 DECLARE_COMPLETION(wait);
1709 1749
1710 if ((c = cmd_alloc(h , 0)) == NULL) 1750 if ((c = cmd_alloc(h, 0)) == NULL)
1711 return -ENOMEM; 1751 return -ENOMEM;
1712 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num, 1752 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1713 log_unit, page_code, NULL, cmd_type); 1753 log_unit, page_code, NULL, cmd_type);
1714 if (return_status != IO_OK) { 1754 if (return_status != IO_OK) {
1715 cmd_free(h, c, 0); 1755 cmd_free(h, c, 0);
1716 return return_status; 1756 return return_status;
1717 } 1757 }
1718resend_cmd2: 1758 resend_cmd2:
1719 c->waiting = &wait; 1759 c->waiting = &wait;
1720 1760
1721 /* Put the request on the tail of the queue and send it */ 1761 /* Put the request on the tail of the queue and send it */
1722 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1762 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1723 addQ(&h->reqQ, c); 1763 addQ(&h->reqQ, c);
1724 h->Qdepth++; 1764 h->Qdepth++;
1725 start_io(h); 1765 start_io(h);
1726 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1766 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1727 1767
1728 wait_for_completion(&wait); 1768 wait_for_completion(&wait);
1729 1769
1730 if(c->err_info->CommandStatus != 0) 1770 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1731 { /* an error has occurred */ 1771 switch (c->err_info->CommandStatus) {
1732 switch(c->err_info->CommandStatus) 1772 case CMD_TARGET_STATUS:
1733 { 1773 printk(KERN_WARNING "cciss: cmd %p has "
1734 case CMD_TARGET_STATUS: 1774 " completed with errors\n", c);
1735 printk(KERN_WARNING "cciss: cmd %p has " 1775 if (c->err_info->ScsiStatus) {
1736 " completed with errors\n", c); 1776 printk(KERN_WARNING "cciss: cmd %p "
1737 if( c->err_info->ScsiStatus) 1777 "has SCSI Status = %x\n",
1738 { 1778 c, c->err_info->ScsiStatus);
1739 printk(KERN_WARNING "cciss: cmd %p " 1779 }
1740 "has SCSI Status = %x\n",
1741 c,
1742 c->err_info->ScsiStatus);
1743 }
1744 1780
1745 break; 1781 break;
1746 case CMD_DATA_UNDERRUN: 1782 case CMD_DATA_UNDERRUN:
1747 case CMD_DATA_OVERRUN: 1783 case CMD_DATA_OVERRUN:
1748 /* expected for inquire and report lun commands */ 1784 /* expected for inquire and report lun commands */
1749 break; 1785 break;
1750 case CMD_INVALID: 1786 case CMD_INVALID:
1751 printk(KERN_WARNING "cciss: Cmd %p is " 1787 printk(KERN_WARNING "cciss: Cmd %p is "
1752 "reported invalid\n", c); 1788 "reported invalid\n", c);
1753 return_status = IO_ERROR; 1789 return_status = IO_ERROR;
1754 break; 1790 break;
1755 case CMD_PROTOCOL_ERR: 1791 case CMD_PROTOCOL_ERR:
1756 printk(KERN_WARNING "cciss: cmd %p has " 1792 printk(KERN_WARNING "cciss: cmd %p has "
1757 "protocol error \n", c); 1793 "protocol error \n", c);
1758 return_status = IO_ERROR; 1794 return_status = IO_ERROR;
1759 break;
1760case CMD_HARDWARE_ERR:
1761 printk(KERN_WARNING "cciss: cmd %p had "
1762 " hardware error\n", c);
1763 return_status = IO_ERROR;
1764 break;
1765 case CMD_CONNECTION_LOST:
1766 printk(KERN_WARNING "cciss: cmd %p had "
1767 "connection lost\n", c);
1768 return_status = IO_ERROR;
1769 break; 1795 break;
1770 case CMD_ABORTED: 1796 case CMD_HARDWARE_ERR:
1771 printk(KERN_WARNING "cciss: cmd %p was " 1797 printk(KERN_WARNING "cciss: cmd %p had "
1772 "aborted\n", c); 1798 " hardware error\n", c);
1773 return_status = IO_ERROR; 1799 return_status = IO_ERROR;
1774 break; 1800 break;
1775 case CMD_ABORT_FAILED: 1801 case CMD_CONNECTION_LOST:
1776 printk(KERN_WARNING "cciss: cmd %p reports " 1802 printk(KERN_WARNING "cciss: cmd %p had "
1777 "abort failed\n", c); 1803 "connection lost\n", c);
1778 return_status = IO_ERROR; 1804 return_status = IO_ERROR;
1779 break; 1805 break;
1780 case CMD_UNSOLICITED_ABORT: 1806 case CMD_ABORTED:
1781 printk(KERN_WARNING 1807 printk(KERN_WARNING "cciss: cmd %p was "
1782 "cciss%d: unsolicited abort %p\n", 1808 "aborted\n", c);
1783 ctlr, c); 1809 return_status = IO_ERROR;
1784 if (c->retry_count < MAX_CMD_RETRIES) { 1810 break;
1785 printk(KERN_WARNING 1811 case CMD_ABORT_FAILED:
1786 "cciss%d: retrying %p\n", 1812 printk(KERN_WARNING "cciss: cmd %p reports "
1787 ctlr, c); 1813 "abort failed\n", c);
1788 c->retry_count++; 1814 return_status = IO_ERROR;
1789 /* erase the old error information */
1790 memset(c->err_info, 0,
1791 sizeof(ErrorInfo_struct));
1792 return_status = IO_OK;
1793 INIT_COMPLETION(wait);
1794 goto resend_cmd2;
1795 }
1796 return_status = IO_ERROR;
1797 break; 1815 break;
1798 default: 1816 case CMD_UNSOLICITED_ABORT:
1799 printk(KERN_WARNING "cciss: cmd %p returned " 1817 printk(KERN_WARNING
1800 "unknown status %x\n", c, 1818 "cciss%d: unsolicited abort %p\n", ctlr, c);
1801 c->err_info->CommandStatus); 1819 if (c->retry_count < MAX_CMD_RETRIES) {
1802 return_status = IO_ERROR; 1820 printk(KERN_WARNING
1821 "cciss%d: retrying %p\n", ctlr, c);
1822 c->retry_count++;
1823 /* erase the old error information */
1824 memset(c->err_info, 0,
1825 sizeof(ErrorInfo_struct));
1826 return_status = IO_OK;
1827 INIT_COMPLETION(wait);
1828 goto resend_cmd2;
1829 }
1830 return_status = IO_ERROR;
1831 break;
1832 default:
1833 printk(KERN_WARNING "cciss: cmd %p returned "
1834 "unknown status %x\n", c,
1835 c->err_info->CommandStatus);
1836 return_status = IO_ERROR;
1803 } 1837 }
1804 } 1838 }
1805 /* unlock the buffers from DMA */ 1839 /* unlock the buffers from DMA */
1806 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 1840 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1807 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 1841 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1808 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val, 1842 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1809 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 1843 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1810 cmd_free(h, c, 0); 1844 cmd_free(h, c, 0);
1811 return(return_status); 1845 return return_status;
1812
1813} 1846}
1847
1814static void cciss_geometry_inquiry(int ctlr, int logvol, 1848static void cciss_geometry_inquiry(int ctlr, int logvol,
1815 int withirq, unsigned int total_size, 1849 int withirq, unsigned int total_size,
1816 unsigned int block_size, InquiryData_struct *inq_buff, 1850 unsigned int block_size,
1817 drive_info_struct *drv) 1851 InquiryData_struct *inq_buff,
1852 drive_info_struct *drv)
1818{ 1853{
1819 int return_code; 1854 int return_code;
1820 memset(inq_buff, 0, sizeof(InquiryData_struct)); 1855 memset(inq_buff, 0, sizeof(InquiryData_struct));
1821 if (withirq) 1856 if (withirq)
1822 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, 1857 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1823 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD); 1858 inq_buff, sizeof(*inq_buff), 1,
1859 logvol, 0xC1, TYPE_CMD);
1824 else 1860 else
1825 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff, 1861 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1826 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD); 1862 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1863 TYPE_CMD);
1827 if (return_code == IO_OK) { 1864 if (return_code == IO_OK) {
1828 if(inq_buff->data_byte[8] == 0xFF) { 1865 if (inq_buff->data_byte[8] == 0xFF) {
1829 printk(KERN_WARNING 1866 printk(KERN_WARNING
1830 "cciss: reading geometry failed, volume " 1867 "cciss: reading geometry failed, volume "
1831 "does not support reading geometry\n"); 1868 "does not support reading geometry\n");
1832 drv->block_size = block_size; 1869 drv->block_size = block_size;
1833 drv->nr_blocks = total_size; 1870 drv->nr_blocks = total_size;
1834 drv->heads = 255; 1871 drv->heads = 255;
1835 drv->sectors = 32; // Sectors per track 1872 drv->sectors = 32; // Sectors per track
1836 drv->cylinders = total_size / 255 / 32; 1873 drv->cylinders = total_size / 255 / 32;
1837 } else { 1874 } else {
1838 unsigned int t; 1875 unsigned int t;
@@ -1846,37 +1883,42 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
1846 drv->raid_level = inq_buff->data_byte[8]; 1883 drv->raid_level = inq_buff->data_byte[8];
1847 t = drv->heads * drv->sectors; 1884 t = drv->heads * drv->sectors;
1848 if (t > 1) { 1885 if (t > 1) {
1849 drv->cylinders = total_size/t; 1886 drv->cylinders = total_size / t;
1850 } 1887 }
1851 } 1888 }
1852 } else { /* Get geometry failed */ 1889 } else { /* Get geometry failed */
1853 printk(KERN_WARNING "cciss: reading geometry failed\n"); 1890 printk(KERN_WARNING "cciss: reading geometry failed\n");
1854 } 1891 }
1855 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n", 1892 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1856 drv->heads, drv->sectors, drv->cylinders); 1893 drv->heads, drv->sectors, drv->cylinders);
1857} 1894}
1895
1858static void 1896static void
1859cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf, 1897cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1860 int withirq, unsigned int *total_size, unsigned int *block_size) 1898 int withirq, unsigned int *total_size,
1899 unsigned int *block_size)
1861{ 1900{
1862 int return_code; 1901 int return_code;
1863 memset(buf, 0, sizeof(*buf)); 1902 memset(buf, 0, sizeof(*buf));
1864 if (withirq) 1903 if (withirq)
1865 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, 1904 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1866 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD); 1905 ctlr, buf, sizeof(*buf), 1,
1906 logvol, 0, TYPE_CMD);
1867 else 1907 else
1868 return_code = sendcmd(CCISS_READ_CAPACITY, 1908 return_code = sendcmd(CCISS_READ_CAPACITY,
1869 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD); 1909 ctlr, buf, sizeof(*buf), 1, logvol, 0,
1910 NULL, TYPE_CMD);
1870 if (return_code == IO_OK) { 1911 if (return_code == IO_OK) {
1871 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1; 1912 *total_size =
1872 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0])); 1913 be32_to_cpu(*((__be32 *) & buf->total_size[0])) + 1;
1873 } else { /* read capacity command failed */ 1914 *block_size = be32_to_cpu(*((__be32 *) & buf->block_size[0]));
1915 } else { /* read capacity command failed */
1874 printk(KERN_WARNING "cciss: read capacity failed\n"); 1916 printk(KERN_WARNING "cciss: read capacity failed\n");
1875 *total_size = 0; 1917 *total_size = 0;
1876 *block_size = BLOCK_SIZE; 1918 *block_size = BLOCK_SIZE;
1877 } 1919 }
1878 printk(KERN_INFO " blocks= %u block_size= %d\n", 1920 printk(KERN_INFO " blocks= %u block_size= %d\n",
1879 *total_size, *block_size); 1921 *total_size, *block_size);
1880 return; 1922 return;
1881} 1923}
1882 1924
@@ -1885,38 +1927,38 @@ static int cciss_revalidate(struct gendisk *disk)
1885 ctlr_info_t *h = get_host(disk); 1927 ctlr_info_t *h = get_host(disk);
1886 drive_info_struct *drv = get_drv(disk); 1928 drive_info_struct *drv = get_drv(disk);
1887 int logvol; 1929 int logvol;
1888 int FOUND=0; 1930 int FOUND = 0;
1889 unsigned int block_size; 1931 unsigned int block_size;
1890 unsigned int total_size; 1932 unsigned int total_size;
1891 ReadCapdata_struct *size_buff = NULL; 1933 ReadCapdata_struct *size_buff = NULL;
1892 InquiryData_struct *inq_buff = NULL; 1934 InquiryData_struct *inq_buff = NULL;
1893 1935
1894 for(logvol=0; logvol < CISS_MAX_LUN; logvol++) 1936 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
1895 { 1937 if (h->drv[logvol].LunID == drv->LunID) {
1896 if(h->drv[logvol].LunID == drv->LunID) { 1938 FOUND = 1;
1897 FOUND=1;
1898 break; 1939 break;
1899 } 1940 }
1900 } 1941 }
1901 1942
1902 if (!FOUND) return 1; 1943 if (!FOUND)
1944 return 1;
1903 1945
1904 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL); 1946 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1905 if (size_buff == NULL) 1947 if (size_buff == NULL) {
1906 { 1948 printk(KERN_WARNING "cciss: out of memory\n");
1907 printk(KERN_WARNING "cciss: out of memory\n"); 1949 return 1;
1908 return 1; 1950 }
1909 } 1951 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1910 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL); 1952 if (inq_buff == NULL) {
1911 if (inq_buff == NULL) 1953 printk(KERN_WARNING "cciss: out of memory\n");
1912 {
1913 printk(KERN_WARNING "cciss: out of memory\n");
1914 kfree(size_buff); 1954 kfree(size_buff);
1915 return 1; 1955 return 1;
1916 } 1956 }
1917 1957
1918 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size); 1958 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size,
1919 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); 1959 &block_size);
1960 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
1961 inq_buff, drv);
1920 1962
1921 blk_queue_hardsect_size(drv->queue, drv->block_size); 1963 blk_queue_hardsect_size(drv->queue, drv->block_size);
1922 set_capacity(disk, drv->nr_blocks); 1964 set_capacity(disk, drv->nr_blocks);
@@ -1943,7 +1985,7 @@ static unsigned long pollcomplete(int ctlr)
1943 if (done == FIFO_EMPTY) 1985 if (done == FIFO_EMPTY)
1944 schedule_timeout_uninterruptible(1); 1986 schedule_timeout_uninterruptible(1);
1945 else 1987 else
1946 return (done); 1988 return done;
1947 } 1989 }
1948 /* Invalid address to tell caller we ran out of time */ 1990 /* Invalid address to tell caller we ran out of time */
1949 return 1; 1991 return 1;
@@ -1952,28 +1994,28 @@ static unsigned long pollcomplete(int ctlr)
1952static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete) 1994static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1953{ 1995{
1954 /* We get in here if sendcmd() is polling for completions 1996 /* We get in here if sendcmd() is polling for completions
1955 and gets some command back that it wasn't expecting -- 1997 and gets some command back that it wasn't expecting --
1956 something other than that which it just sent down. 1998 something other than that which it just sent down.
1957 Ordinarily, that shouldn't happen, but it can happen when 1999 Ordinarily, that shouldn't happen, but it can happen when
1958 the scsi tape stuff gets into error handling mode, and 2000 the scsi tape stuff gets into error handling mode, and
1959 starts using sendcmd() to try to abort commands and 2001 starts using sendcmd() to try to abort commands and
1960 reset tape drives. In that case, sendcmd may pick up 2002 reset tape drives. In that case, sendcmd may pick up
1961 completions of commands that were sent to logical drives 2003 completions of commands that were sent to logical drives
1962 through the block i/o system, or cciss ioctls completing, etc. 2004 through the block i/o system, or cciss ioctls completing, etc.
1963 In that case, we need to save those completions for later 2005 In that case, we need to save those completions for later
1964 processing by the interrupt handler. 2006 processing by the interrupt handler.
1965 */ 2007 */
1966 2008
1967#ifdef CONFIG_CISS_SCSI_TAPE 2009#ifdef CONFIG_CISS_SCSI_TAPE
1968 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects; 2010 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
1969 2011
1970 /* If it's not the scsi tape stuff doing error handling, (abort */ 2012 /* If it's not the scsi tape stuff doing error handling, (abort */
1971 /* or reset) then we don't expect anything weird. */ 2013 /* or reset) then we don't expect anything weird. */
1972 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) { 2014 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
1973#endif 2015#endif
1974 printk( KERN_WARNING "cciss cciss%d: SendCmd " 2016 printk(KERN_WARNING "cciss cciss%d: SendCmd "
1975 "Invalid command list address returned! (%lx)\n", 2017 "Invalid command list address returned! (%lx)\n",
1976 ctlr, complete); 2018 ctlr, complete);
1977 /* not much we can do. */ 2019 /* not much we can do. */
1978#ifdef CONFIG_CISS_SCSI_TAPE 2020#ifdef CONFIG_CISS_SCSI_TAPE
1979 return 1; 2021 return 1;
@@ -1984,7 +2026,7 @@ static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1984 if (srl->ncompletions >= (NR_CMDS + 2)) { 2026 if (srl->ncompletions >= (NR_CMDS + 2)) {
1985 /* Uh oh. No room to save it for later... */ 2027 /* Uh oh. No room to save it for later... */
1986 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, " 2028 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
1987 "reject list overflow, command lost!\n", ctlr); 2029 "reject list overflow, command lost!\n", ctlr);
1988 return 1; 2030 return 1;
1989 } 2031 }
1990 /* Save it for later */ 2032 /* Save it for later */
@@ -1995,340 +2037,327 @@ static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
1995} 2037}
1996 2038
1997/* 2039/*
1998 * Send a command to the controller, and wait for it to complete. 2040 * Send a command to the controller, and wait for it to complete.
1999 * Only used at init time. 2041 * Only used at init time.
2000 */ 2042 */
2001static int sendcmd( 2043static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2002 __u8 cmd, 2044 1: address logical volume log_unit,
2003 int ctlr, 2045 2: periph device address is scsi3addr */
2004 void *buff, 2046 unsigned int log_unit,
2005 size_t size, 2047 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2006 unsigned int use_unit_num, /* 0: address the controller,
2007 1: address logical volume log_unit,
2008 2: periph device address is scsi3addr */
2009 unsigned int log_unit,
2010 __u8 page_code,
2011 unsigned char *scsi3addr,
2012 int cmd_type)
2013{ 2048{
2014 CommandList_struct *c; 2049 CommandList_struct *c;
2015 int i; 2050 int i;
2016 unsigned long complete; 2051 unsigned long complete;
2017 ctlr_info_t *info_p= hba[ctlr]; 2052 ctlr_info_t *info_p = hba[ctlr];
2018 u64bit buff_dma_handle; 2053 u64bit buff_dma_handle;
2019 int status, done = 0; 2054 int status, done = 0;
2020 2055
2021 if ((c = cmd_alloc(info_p, 1)) == NULL) { 2056 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2022 printk(KERN_WARNING "cciss: unable to get memory"); 2057 printk(KERN_WARNING "cciss: unable to get memory");
2023 return(IO_ERROR); 2058 return IO_ERROR;
2024 } 2059 }
2025 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num, 2060 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2026 log_unit, page_code, scsi3addr, cmd_type); 2061 log_unit, page_code, scsi3addr, cmd_type);
2027 if (status != IO_OK) { 2062 if (status != IO_OK) {
2028 cmd_free(info_p, c, 1); 2063 cmd_free(info_p, c, 1);
2029 return status; 2064 return status;
2030 } 2065 }
2031resend_cmd1: 2066 resend_cmd1:
2032 /* 2067 /*
2033 * Disable interrupt 2068 * Disable interrupt
2034 */ 2069 */
2035#ifdef CCISS_DEBUG 2070#ifdef CCISS_DEBUG
2036 printk(KERN_DEBUG "cciss: turning intr off\n"); 2071 printk(KERN_DEBUG "cciss: turning intr off\n");
2037#endif /* CCISS_DEBUG */ 2072#endif /* CCISS_DEBUG */
2038 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF); 2073 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2039 2074
2040 /* Make sure there is room in the command FIFO */ 2075 /* Make sure there is room in the command FIFO */
2041 /* Actually it should be completely empty at this time */ 2076 /* Actually it should be completely empty at this time */
2042 /* unless we are in here doing error handling for the scsi */ 2077 /* unless we are in here doing error handling for the scsi */
2043 /* tape side of the driver. */ 2078 /* tape side of the driver. */
2044 for (i = 200000; i > 0; i--) 2079 for (i = 200000; i > 0; i--) {
2045 {
2046 /* if fifo isn't full go */ 2080 /* if fifo isn't full go */
2047 if (!(info_p->access.fifo_full(info_p))) 2081 if (!(info_p->access.fifo_full(info_p))) {
2048 { 2082
2049 2083 break;
2050 break; 2084 }
2051 } 2085 udelay(10);
2052 udelay(10); 2086 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2053 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full," 2087 " waiting!\n", ctlr);
2054 " waiting!\n", ctlr); 2088 }
2055 } 2089 /*
2056 /* 2090 * Send the cmd
2057 * Send the cmd 2091 */
2058 */ 2092 info_p->access.submit_command(info_p, c);
2059 info_p->access.submit_command(info_p, c);
2060 done = 0; 2093 done = 0;
2061 do { 2094 do {
2062 complete = pollcomplete(ctlr); 2095 complete = pollcomplete(ctlr);
2063 2096
2064#ifdef CCISS_DEBUG 2097#ifdef CCISS_DEBUG
2065 printk(KERN_DEBUG "cciss: command completed\n"); 2098 printk(KERN_DEBUG "cciss: command completed\n");
2066#endif /* CCISS_DEBUG */ 2099#endif /* CCISS_DEBUG */
2067 2100
2068 if (complete == 1) { 2101 if (complete == 1) {
2069 printk( KERN_WARNING 2102 printk(KERN_WARNING
2070 "cciss cciss%d: SendCmd Timeout out, " 2103 "cciss cciss%d: SendCmd Timeout out, "
2071 "No command list address returned!\n", 2104 "No command list address returned!\n", ctlr);
2072 ctlr);
2073 status = IO_ERROR; 2105 status = IO_ERROR;
2074 done = 1; 2106 done = 1;
2075 break; 2107 break;
2076 } 2108 }
2077 2109
2078 /* This will need to change for direct lookup completions */ 2110 /* This will need to change for direct lookup completions */
2079 if ( (complete & CISS_ERROR_BIT) 2111 if ((complete & CISS_ERROR_BIT)
2080 && (complete & ~CISS_ERROR_BIT) == c->busaddr) 2112 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2081 { 2113 /* if data overrun or underun on Report command
2082 /* if data overrun or underun on Report command 2114 ignore it
2083 ignore it 2115 */
2084 */
2085 if (((c->Request.CDB[0] == CISS_REPORT_LOG) || 2116 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2086 (c->Request.CDB[0] == CISS_REPORT_PHYS) || 2117 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2087 (c->Request.CDB[0] == CISS_INQUIRY)) && 2118 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2088 ((c->err_info->CommandStatus == 2119 ((c->err_info->CommandStatus ==
2089 CMD_DATA_OVERRUN) || 2120 CMD_DATA_OVERRUN) ||
2090 (c->err_info->CommandStatus == 2121 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2091 CMD_DATA_UNDERRUN) 2122 )) {
2092 ))
2093 {
2094 complete = c->busaddr; 2123 complete = c->busaddr;
2095 } else { 2124 } else {
2096 if (c->err_info->CommandStatus == 2125 if (c->err_info->CommandStatus ==
2097 CMD_UNSOLICITED_ABORT) { 2126 CMD_UNSOLICITED_ABORT) {
2098 printk(KERN_WARNING "cciss%d: " 2127 printk(KERN_WARNING "cciss%d: "
2099 "unsolicited abort %p\n", 2128 "unsolicited abort %p\n",
2100 ctlr, c); 2129 ctlr, c);
2101 if (c->retry_count < MAX_CMD_RETRIES) { 2130 if (c->retry_count < MAX_CMD_RETRIES) {
2102 printk(KERN_WARNING 2131 printk(KERN_WARNING
2103 "cciss%d: retrying %p\n", 2132 "cciss%d: retrying %p\n",
2104 ctlr, c); 2133 ctlr, c);
2105 c->retry_count++; 2134 c->retry_count++;
2106 /* erase the old error */ 2135 /* erase the old error */
2107 /* information */ 2136 /* information */
2108 memset(c->err_info, 0, 2137 memset(c->err_info, 0,
2109 sizeof(ErrorInfo_struct)); 2138 sizeof
2139 (ErrorInfo_struct));
2110 goto resend_cmd1; 2140 goto resend_cmd1;
2111 } else { 2141 } else {
2112 printk(KERN_WARNING 2142 printk(KERN_WARNING
2113 "cciss%d: retried %p too " 2143 "cciss%d: retried %p too "
2114 "many times\n", ctlr, c); 2144 "many times\n", ctlr, c);
2115 status = IO_ERROR; 2145 status = IO_ERROR;
2116 goto cleanup1; 2146 goto cleanup1;
2117 } 2147 }
2118 } else if (c->err_info->CommandStatus == CMD_UNABORTABLE) { 2148 } else if (c->err_info->CommandStatus ==
2119 printk(KERN_WARNING "cciss%d: command could not be aborted.\n", ctlr); 2149 CMD_UNABORTABLE) {
2150 printk(KERN_WARNING
2151 "cciss%d: command could not be aborted.\n",
2152 ctlr);
2120 status = IO_ERROR; 2153 status = IO_ERROR;
2121 goto cleanup1; 2154 goto cleanup1;
2122 } 2155 }
2123 printk(KERN_WARNING "ciss ciss%d: sendcmd" 2156 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2124 " Error %x \n", ctlr, 2157 " Error %x \n", ctlr,
2125 c->err_info->CommandStatus); 2158 c->err_info->CommandStatus);
2126 printk(KERN_WARNING "ciss ciss%d: sendcmd" 2159 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2127 " offensive info\n" 2160 " offensive info\n"
2128 " size %x\n num %x value %x\n", ctlr, 2161 " size %x\n num %x value %x\n",
2129 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size, 2162 ctlr,
2130 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num, 2163 c->err_info->MoreErrInfo.Invalid_Cmd.
2131 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value); 2164 offense_size,
2165 c->err_info->MoreErrInfo.Invalid_Cmd.
2166 offense_num,
2167 c->err_info->MoreErrInfo.Invalid_Cmd.
2168 offense_value);
2132 status = IO_ERROR; 2169 status = IO_ERROR;
2133 goto cleanup1; 2170 goto cleanup1;
2134 } 2171 }
2135 } 2172 }
2136 /* This will need changing for direct lookup completions */ 2173 /* This will need changing for direct lookup completions */
2137 if (complete != c->busaddr) { 2174 if (complete != c->busaddr) {
2138 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) { 2175 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2139 BUG(); /* we are pretty much hosed if we get here. */ 2176 BUG(); /* we are pretty much hosed if we get here. */
2140 } 2177 }
2141 continue; 2178 continue;
2142 } else 2179 } else
2143 done = 1; 2180 done = 1;
2144 } while (!done); 2181 } while (!done);
2145 2182
2146cleanup1: 2183 cleanup1:
2147 /* unlock the data buffer from DMA */ 2184 /* unlock the data buffer from DMA */
2148 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2185 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2149 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2186 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2150 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val, 2187 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2151 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2188 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2152#ifdef CONFIG_CISS_SCSI_TAPE 2189#ifdef CONFIG_CISS_SCSI_TAPE
2153 /* if we saved some commands for later, process them now. */ 2190 /* if we saved some commands for later, process them now. */
2154 if (info_p->scsi_rejects.ncompletions > 0) 2191 if (info_p->scsi_rejects.ncompletions > 0)
2155 do_cciss_intr(0, info_p, NULL); 2192 do_cciss_intr(0, info_p, NULL);
2156#endif 2193#endif
2157 cmd_free(info_p, c, 1); 2194 cmd_free(info_p, c, 1);
2158 return (status); 2195 return status;
2159} 2196}
2197
2160/* 2198/*
2161 * Map (physical) PCI mem into (virtual) kernel space 2199 * Map (physical) PCI mem into (virtual) kernel space
2162 */ 2200 */
2163static void __iomem *remap_pci_mem(ulong base, ulong size) 2201static void __iomem *remap_pci_mem(ulong base, ulong size)
2164{ 2202{
2165 ulong page_base = ((ulong) base) & PAGE_MASK; 2203 ulong page_base = ((ulong) base) & PAGE_MASK;
2166 ulong page_offs = ((ulong) base) - page_base; 2204 ulong page_offs = ((ulong) base) - page_base;
2167 void __iomem *page_remapped = ioremap(page_base, page_offs+size); 2205 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2168 2206
2169 return page_remapped ? (page_remapped + page_offs) : NULL; 2207 return page_remapped ? (page_remapped + page_offs) : NULL;
2170} 2208}
2171 2209
2172/* 2210/*
2173 * Takes jobs of the Q and sends them to the hardware, then puts it on 2211 * Takes jobs of the Q and sends them to the hardware, then puts it on
2174 * the Q to wait for completion. 2212 * the Q to wait for completion.
2175 */ 2213 */
2176static void start_io( ctlr_info_t *h) 2214static void start_io(ctlr_info_t *h)
2177{ 2215{
2178 CommandList_struct *c; 2216 CommandList_struct *c;
2179 2217
2180 while(( c = h->reqQ) != NULL ) 2218 while ((c = h->reqQ) != NULL) {
2181 {
2182 /* can't do anything if fifo is full */ 2219 /* can't do anything if fifo is full */
2183 if ((h->access.fifo_full(h))) { 2220 if ((h->access.fifo_full(h))) {
2184 printk(KERN_WARNING "cciss: fifo full\n"); 2221 printk(KERN_WARNING "cciss: fifo full\n");
2185 break; 2222 break;
2186 } 2223 }
2187 2224
2188 /* Get the first entry from the Request Q */ 2225 /* Get the first entry from the Request Q */
2189 removeQ(&(h->reqQ), c); 2226 removeQ(&(h->reqQ), c);
2190 h->Qdepth--; 2227 h->Qdepth--;
2191 2228
2192 /* Tell the controller execute command */ 2229 /* Tell the controller execute command */
2193 h->access.submit_command(h, c); 2230 h->access.submit_command(h, c);
2194 2231
2195 /* Put job onto the completed Q */ 2232 /* Put job onto the completed Q */
2196 addQ (&(h->cmpQ), c); 2233 addQ(&(h->cmpQ), c);
2197 } 2234 }
2198} 2235}
2236
2199/* Assumes that CCISS_LOCK(h->ctlr) is held. */ 2237/* Assumes that CCISS_LOCK(h->ctlr) is held. */
2200/* Zeros out the error record and then resends the command back */ 2238/* Zeros out the error record and then resends the command back */
2201/* to the controller */ 2239/* to the controller */
2202static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c) 2240static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2203{ 2241{
2204 /* erase the old error information */ 2242 /* erase the old error information */
2205 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 2243 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2206 2244
2207 /* add it to software queue and then send it to the controller */ 2245 /* add it to software queue and then send it to the controller */
2208 addQ(&(h->reqQ),c); 2246 addQ(&(h->reqQ), c);
2209 h->Qdepth++; 2247 h->Qdepth++;
2210 if(h->Qdepth > h->maxQsinceinit) 2248 if (h->Qdepth > h->maxQsinceinit)
2211 h->maxQsinceinit = h->Qdepth; 2249 h->maxQsinceinit = h->Qdepth;
2212 2250
2213 start_io(h); 2251 start_io(h);
2214} 2252}
2215 2253
2216/* checks the status of the job and calls complete buffers to mark all 2254/* checks the status of the job and calls complete buffers to mark all
2217 * buffers for the completed job. Note that this function does not need 2255 * buffers for the completed job. Note that this function does not need
2218 * to hold the hba/queue lock. 2256 * to hold the hba/queue lock.
2219 */ 2257 */
2220static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, 2258static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2221 int timeout) 2259 int timeout)
2222{ 2260{
2223 int status = 1; 2261 int status = 1;
2224 int retry_cmd = 0; 2262 int retry_cmd = 0;
2225 2263
2226 if (timeout) 2264 if (timeout)
2227 status = 0; 2265 status = 0;
2228 2266
2229 if(cmd->err_info->CommandStatus != 0) 2267 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2230 { /* an error has occurred */ 2268 switch (cmd->err_info->CommandStatus) {
2231 switch(cmd->err_info->CommandStatus)
2232 {
2233 unsigned char sense_key; 2269 unsigned char sense_key;
2234 case CMD_TARGET_STATUS: 2270 case CMD_TARGET_STATUS:
2235 status = 0; 2271 status = 0;
2236 2272
2237 if( cmd->err_info->ScsiStatus == 0x02) 2273 if (cmd->err_info->ScsiStatus == 0x02) {
2238 { 2274 printk(KERN_WARNING "cciss: cmd %p "
2239 printk(KERN_WARNING "cciss: cmd %p " 2275 "has CHECK CONDITION "
2240 "has CHECK CONDITION " 2276 " byte 2 = 0x%x\n", cmd,
2241 " byte 2 = 0x%x\n", cmd, 2277 cmd->err_info->SenseInfo[2]
2242 cmd->err_info->SenseInfo[2] 2278 );
2243 ); 2279 /* check the sense key */
2244 /* check the sense key */ 2280 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2245 sense_key = 0xf & 2281 /* no status or recovered error */
2246 cmd->err_info->SenseInfo[2]; 2282 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2247 /* no status or recovered error */ 2283 status = 1;
2248 if((sense_key == 0x0) ||
2249 (sense_key == 0x1))
2250 {
2251 status = 1;
2252 }
2253 } else
2254 {
2255 printk(KERN_WARNING "cciss: cmd %p "
2256 "has SCSI Status 0x%x\n",
2257 cmd, cmd->err_info->ScsiStatus);
2258 } 2284 }
2285 } else {
2286 printk(KERN_WARNING "cciss: cmd %p "
2287 "has SCSI Status 0x%x\n",
2288 cmd, cmd->err_info->ScsiStatus);
2289 }
2259 break; 2290 break;
2260 case CMD_DATA_UNDERRUN: 2291 case CMD_DATA_UNDERRUN:
2261 printk(KERN_WARNING "cciss: cmd %p has" 2292 printk(KERN_WARNING "cciss: cmd %p has"
2262 " completed with data underrun " 2293 " completed with data underrun "
2263 "reported\n", cmd); 2294 "reported\n", cmd);
2264 break; 2295 break;
2265 case CMD_DATA_OVERRUN: 2296 case CMD_DATA_OVERRUN:
2266 printk(KERN_WARNING "cciss: cmd %p has" 2297 printk(KERN_WARNING "cciss: cmd %p has"
2267 " completed with data overrun " 2298 " completed with data overrun "
2268 "reported\n", cmd); 2299 "reported\n", cmd);
2269 break; 2300 break;
2270 case CMD_INVALID: 2301 case CMD_INVALID:
2271 printk(KERN_WARNING "cciss: cmd %p is " 2302 printk(KERN_WARNING "cciss: cmd %p is "
2272 "reported invalid\n", cmd); 2303 "reported invalid\n", cmd);
2273 status = 0; 2304 status = 0;
2274 break; 2305 break;
2275 case CMD_PROTOCOL_ERR: 2306 case CMD_PROTOCOL_ERR:
2276 printk(KERN_WARNING "cciss: cmd %p has " 2307 printk(KERN_WARNING "cciss: cmd %p has "
2277 "protocol error \n", cmd); 2308 "protocol error \n", cmd);
2278 status = 0; 2309 status = 0;
2279 break;
2280 case CMD_HARDWARE_ERR:
2281 printk(KERN_WARNING "cciss: cmd %p had "
2282 " hardware error\n", cmd);
2283 status = 0;
2284 break;
2285 case CMD_CONNECTION_LOST:
2286 printk(KERN_WARNING "cciss: cmd %p had "
2287 "connection lost\n", cmd);
2288 status=0;
2289 break; 2310 break;
2290 case CMD_ABORTED: 2311 case CMD_HARDWARE_ERR:
2291 printk(KERN_WARNING "cciss: cmd %p was " 2312 printk(KERN_WARNING "cciss: cmd %p had "
2292 "aborted\n", cmd); 2313 " hardware error\n", cmd);
2293 status=0; 2314 status = 0;
2294 break; 2315 break;
2295 case CMD_ABORT_FAILED: 2316 case CMD_CONNECTION_LOST:
2296 printk(KERN_WARNING "cciss: cmd %p reports " 2317 printk(KERN_WARNING "cciss: cmd %p had "
2297 "abort failed\n", cmd); 2318 "connection lost\n", cmd);
2298 status=0; 2319 status = 0;
2299 break; 2320 break;
2300 case CMD_UNSOLICITED_ABORT: 2321 case CMD_ABORTED:
2301 printk(KERN_WARNING "cciss%d: unsolicited " 2322 printk(KERN_WARNING "cciss: cmd %p was "
2302 "abort %p\n", h->ctlr, cmd); 2323 "aborted\n", cmd);
2303 if (cmd->retry_count < MAX_CMD_RETRIES) { 2324 status = 0;
2304 retry_cmd=1; 2325 break;
2305 printk(KERN_WARNING 2326 case CMD_ABORT_FAILED:
2306 "cciss%d: retrying %p\n", 2327 printk(KERN_WARNING "cciss: cmd %p reports "
2307 h->ctlr, cmd); 2328 "abort failed\n", cmd);
2308 cmd->retry_count++; 2329 status = 0;
2309 } else 2330 break;
2310 printk(KERN_WARNING 2331 case CMD_UNSOLICITED_ABORT:
2311 "cciss%d: %p retried too " 2332 printk(KERN_WARNING "cciss%d: unsolicited "
2312 "many times\n", h->ctlr, cmd); 2333 "abort %p\n", h->ctlr, cmd);
2313 status=0; 2334 if (cmd->retry_count < MAX_CMD_RETRIES) {
2335 retry_cmd = 1;
2336 printk(KERN_WARNING
2337 "cciss%d: retrying %p\n", h->ctlr, cmd);
2338 cmd->retry_count++;
2339 } else
2340 printk(KERN_WARNING
2341 "cciss%d: %p retried too "
2342 "many times\n", h->ctlr, cmd);
2343 status = 0;
2314 break; 2344 break;
2315 case CMD_TIMEOUT: 2345 case CMD_TIMEOUT:
2316 printk(KERN_WARNING "cciss: cmd %p timedout\n", 2346 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2317 cmd); 2347 status = 0;
2318 status=0;
2319 break; 2348 break;
2320 default: 2349 default:
2321 printk(KERN_WARNING "cciss: cmd %p returned " 2350 printk(KERN_WARNING "cciss: cmd %p returned "
2322 "unknown status %x\n", cmd, 2351 "unknown status %x\n", cmd,
2323 cmd->err_info->CommandStatus); 2352 cmd->err_info->CommandStatus);
2324 status=0; 2353 status = 0;
2325 } 2354 }
2326 } 2355 }
2327 /* We need to return this command */ 2356 /* We need to return this command */
2328 if(retry_cmd) { 2357 if (retry_cmd) {
2329 resend_cciss_cmd(h,cmd); 2358 resend_cciss_cmd(h, cmd);
2330 return; 2359 return;
2331 } 2360 }
2332 2361
2333 cmd->rq->completion_data = cmd; 2362 cmd->rq->completion_data = cmd;
2334 cmd->rq->errors = status; 2363 cmd->rq->errors = status;
@@ -2336,12 +2365,12 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
2336 blk_complete_request(cmd->rq); 2365 blk_complete_request(cmd->rq);
2337} 2366}
2338 2367
2339/* 2368/*
2340 * Get a request and submit it to the controller. 2369 * Get a request and submit it to the controller.
2341 */ 2370 */
2342static void do_cciss_request(request_queue_t *q) 2371static void do_cciss_request(request_queue_t *q)
2343{ 2372{
2344 ctlr_info_t *h= q->queuedata; 2373 ctlr_info_t *h = q->queuedata;
2345 CommandList_struct *c; 2374 CommandList_struct *c;
2346 int start_blk, seg; 2375 int start_blk, seg;
2347 struct request *creq; 2376 struct request *creq;
@@ -2352,18 +2381,18 @@ static void do_cciss_request(request_queue_t *q)
2352 2381
2353 /* We call start_io here in case there is a command waiting on the 2382 /* We call start_io here in case there is a command waiting on the
2354 * queue that has not been sent. 2383 * queue that has not been sent.
2355 */ 2384 */
2356 if (blk_queue_plugged(q)) 2385 if (blk_queue_plugged(q))
2357 goto startio; 2386 goto startio;
2358 2387
2359queue: 2388 queue:
2360 creq = elv_next_request(q); 2389 creq = elv_next_request(q);
2361 if (!creq) 2390 if (!creq)
2362 goto startio; 2391 goto startio;
2363 2392
2364 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES); 2393 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2365 2394
2366 if (( c = cmd_alloc(h, 1)) == NULL) 2395 if ((c = cmd_alloc(h, 1)) == NULL)
2367 goto full; 2396 goto full;
2368 2397
2369 blkdev_dequeue_request(creq); 2398 blkdev_dequeue_request(creq);
@@ -2372,81 +2401,82 @@ queue:
2372 2401
2373 c->cmd_type = CMD_RWREQ; 2402 c->cmd_type = CMD_RWREQ;
2374 c->rq = creq; 2403 c->rq = creq;
2375 2404
2376 /* fill in the request */ 2405 /* fill in the request */
2377 drv = creq->rq_disk->private_data; 2406 drv = creq->rq_disk->private_data;
2378 c->Header.ReplyQueue = 0; // unused in simple mode 2407 c->Header.ReplyQueue = 0; // unused in simple mode
2379 /* got command from pool, so use the command block index instead */ 2408 /* got command from pool, so use the command block index instead */
2380 /* for direct lookups. */ 2409 /* for direct lookups. */
2381 /* The first 2 bits are reserved for controller error reporting. */ 2410 /* The first 2 bits are reserved for controller error reporting. */
2382 c->Header.Tag.lower = (c->cmdindex << 3); 2411 c->Header.Tag.lower = (c->cmdindex << 3);
2383 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ 2412 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2384 c->Header.LUN.LogDev.VolId= drv->LunID; 2413 c->Header.LUN.LogDev.VolId = drv->LunID;
2385 c->Header.LUN.LogDev.Mode = 1; 2414 c->Header.LUN.LogDev.Mode = 1;
2386 c->Request.CDBLen = 10; // 12 byte commands not in FW yet; 2415 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2387 c->Request.Type.Type = TYPE_CMD; // It is a command. 2416 c->Request.Type.Type = TYPE_CMD; // It is a command.
2388 c->Request.Type.Attribute = ATTR_SIMPLE; 2417 c->Request.Type.Attribute = ATTR_SIMPLE;
2389 c->Request.Type.Direction = 2418 c->Request.Type.Direction =
2390 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE; 2419 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2391 c->Request.Timeout = 0; // Don't time out 2420 c->Request.Timeout = 0; // Don't time out
2392 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE; 2421 c->Request.CDB[0] =
2422 (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2393 start_blk = creq->sector; 2423 start_blk = creq->sector;
2394#ifdef CCISS_DEBUG 2424#ifdef CCISS_DEBUG
2395 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector, 2425 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2396 (int) creq->nr_sectors); 2426 (int)creq->nr_sectors);
2397#endif /* CCISS_DEBUG */ 2427#endif /* CCISS_DEBUG */
2398 2428
2399 seg = blk_rq_map_sg(q, creq, tmp_sg); 2429 seg = blk_rq_map_sg(q, creq, tmp_sg);
2400 2430
2401 /* get the DMA records for the setup */ 2431 /* get the DMA records for the setup */
2402 if (c->Request.Type.Direction == XFER_READ) 2432 if (c->Request.Type.Direction == XFER_READ)
2403 dir = PCI_DMA_FROMDEVICE; 2433 dir = PCI_DMA_FROMDEVICE;
2404 else 2434 else
2405 dir = PCI_DMA_TODEVICE; 2435 dir = PCI_DMA_TODEVICE;
2406 2436
2407 for (i=0; i<seg; i++) 2437 for (i = 0; i < seg; i++) {
2408 {
2409 c->SG[i].Len = tmp_sg[i].length; 2438 c->SG[i].Len = tmp_sg[i].length;
2410 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page, 2439 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2411 tmp_sg[i].offset, tmp_sg[i].length, 2440 tmp_sg[i].offset,
2412 dir); 2441 tmp_sg[i].length, dir);
2413 c->SG[i].Addr.lower = temp64.val32.lower; 2442 c->SG[i].Addr.lower = temp64.val32.lower;
2414 c->SG[i].Addr.upper = temp64.val32.upper; 2443 c->SG[i].Addr.upper = temp64.val32.upper;
2415 c->SG[i].Ext = 0; // we are not chaining 2444 c->SG[i].Ext = 0; // we are not chaining
2416 } 2445 }
2417 /* track how many SG entries we are using */ 2446 /* track how many SG entries we are using */
2418 if( seg > h->maxSG) 2447 if (seg > h->maxSG)
2419 h->maxSG = seg; 2448 h->maxSG = seg;
2420 2449
2421#ifdef CCISS_DEBUG 2450#ifdef CCISS_DEBUG
2422 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); 2451 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2423#endif /* CCISS_DEBUG */ 2452 creq->nr_sectors, seg);
2453#endif /* CCISS_DEBUG */
2424 2454
2425 c->Header.SGList = c->Header.SGTotal = seg; 2455 c->Header.SGList = c->Header.SGTotal = seg;
2426 c->Request.CDB[1]= 0; 2456 c->Request.CDB[1] = 0;
2427 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB 2457 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2428 c->Request.CDB[3]= (start_blk >> 16) & 0xff; 2458 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2429 c->Request.CDB[4]= (start_blk >> 8) & 0xff; 2459 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2430 c->Request.CDB[5]= start_blk & 0xff; 2460 c->Request.CDB[5] = start_blk & 0xff;
2431 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB 2461 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2432 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff; 2462 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2433 c->Request.CDB[8]= creq->nr_sectors & 0xff; 2463 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2434 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 2464 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2435 2465
2436 spin_lock_irq(q->queue_lock); 2466 spin_lock_irq(q->queue_lock);
2437 2467
2438 addQ(&(h->reqQ),c); 2468 addQ(&(h->reqQ), c);
2439 h->Qdepth++; 2469 h->Qdepth++;
2440 if(h->Qdepth > h->maxQsinceinit) 2470 if (h->Qdepth > h->maxQsinceinit)
2441 h->maxQsinceinit = h->Qdepth; 2471 h->maxQsinceinit = h->Qdepth;
2442 2472
2443 goto queue; 2473 goto queue;
2444full: 2474 full:
2445 blk_stop_queue(q); 2475 blk_stop_queue(q);
2446startio: 2476 startio:
2447 /* We will already have the driver lock here so not need 2477 /* We will already have the driver lock here so not need
2448 * to lock it. 2478 * to lock it.
2449 */ 2479 */
2450 start_io(h); 2480 start_io(h);
2451} 2481}
2452 2482
@@ -2473,7 +2503,7 @@ static inline unsigned long get_next_completion(ctlr_info_t *h)
2473static inline int interrupt_pending(ctlr_info_t *h) 2503static inline int interrupt_pending(ctlr_info_t *h)
2474{ 2504{
2475#ifdef CONFIG_CISS_SCSI_TAPE 2505#ifdef CONFIG_CISS_SCSI_TAPE
2476 return ( h->access.intr_pending(h) 2506 return (h->access.intr_pending(h)
2477 || (h->scsi_rejects.ncompletions > 0)); 2507 || (h->scsi_rejects.ncompletions > 0));
2478#else 2508#else
2479 return h->access.intr_pending(h); 2509 return h->access.intr_pending(h);
@@ -2483,11 +2513,11 @@ static inline int interrupt_pending(ctlr_info_t *h)
2483static inline long interrupt_not_for_us(ctlr_info_t *h) 2513static inline long interrupt_not_for_us(ctlr_info_t *h)
2484{ 2514{
2485#ifdef CONFIG_CISS_SCSI_TAPE 2515#ifdef CONFIG_CISS_SCSI_TAPE
2486 return (((h->access.intr_pending(h) == 0) || 2516 return (((h->access.intr_pending(h) == 0) ||
2487 (h->interrupts_enabled == 0)) 2517 (h->interrupts_enabled == 0))
2488 && (h->scsi_rejects.ncompletions == 0)); 2518 && (h->scsi_rejects.ncompletions == 0));
2489#else 2519#else
2490 return (((h->access.intr_pending(h) == 0) || 2520 return (((h->access.intr_pending(h) == 0) ||
2491 (h->interrupts_enabled == 0))); 2521 (h->interrupts_enabled == 0)));
2492#endif 2522#endif
2493} 2523}
@@ -2509,12 +2539,14 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2509 */ 2539 */
2510 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2540 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2511 while (interrupt_pending(h)) { 2541 while (interrupt_pending(h)) {
2512 while((a = get_next_completion(h)) != FIFO_EMPTY) { 2542 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2513 a1 = a; 2543 a1 = a;
2514 if ((a & 0x04)) { 2544 if ((a & 0x04)) {
2515 a2 = (a >> 3); 2545 a2 = (a >> 3);
2516 if (a2 >= NR_CMDS) { 2546 if (a2 >= NR_CMDS) {
2517 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr); 2547 printk(KERN_WARNING
2548 "cciss: controller cciss%d failed, stopping.\n",
2549 h->ctlr);
2518 fail_all_cmds(h->ctlr); 2550 fail_all_cmds(h->ctlr);
2519 return IRQ_HANDLED; 2551 return IRQ_HANDLED;
2520 } 2552 }
@@ -2523,22 +2555,24 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2523 a = c->busaddr; 2555 a = c->busaddr;
2524 2556
2525 } else { 2557 } else {
2526 a &= ~3; 2558 a &= ~3;
2527 if ((c = h->cmpQ) == NULL) { 2559 if ((c = h->cmpQ) == NULL) {
2528 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1); 2560 printk(KERN_WARNING
2529 continue; 2561 "cciss: Completion of %08x ignored\n",
2530 } 2562 a1);
2531 while(c->busaddr != a) { 2563 continue;
2532 c = c->next; 2564 }
2533 if (c == h->cmpQ) 2565 while (c->busaddr != a) {
2534 break; 2566 c = c->next;
2535 } 2567 if (c == h->cmpQ)
2568 break;
2569 }
2536 } 2570 }
2537 /* 2571 /*
2538 * If we've found the command, take it off the 2572 * If we've found the command, take it off the
2539 * completion Q and free it 2573 * completion Q and free it
2540 */ 2574 */
2541 if (c->busaddr == a) { 2575 if (c->busaddr == a) {
2542 removeQ(&h->cmpQ, c); 2576 removeQ(&h->cmpQ, c);
2543 if (c->cmd_type == CMD_RWREQ) { 2577 if (c->cmd_type == CMD_RWREQ) {
2544 complete_command(h, c, 0); 2578 complete_command(h, c, 0);
@@ -2554,130 +2588,118 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2554 } 2588 }
2555 } 2589 }
2556 2590
2557 /* check to see if we have maxed out the number of commands that can 2591 /* check to see if we have maxed out the number of commands that can
2558 * be placed on the queue. If so then exit. We do this check here 2592 * be placed on the queue. If so then exit. We do this check here
2559 * in case the interrupt we serviced was from an ioctl and did not 2593 * in case the interrupt we serviced was from an ioctl and did not
2560 * free any new commands. 2594 * free any new commands.
2595 */
2596 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2597 goto cleanup;
2598
2599 /* We have room on the queue for more commands. Now we need to queue
2600 * them up. We will also keep track of the next queue to run so
2601 * that every queue gets a chance to be started first.
2561 */ 2602 */
2562 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) 2603 for (j = 0; j < h->highest_lun + 1; j++) {
2563 goto cleanup;
2564
2565 /* We have room on the queue for more commands. Now we need to queue
2566 * them up. We will also keep track of the next queue to run so
2567 * that every queue gets a chance to be started first.
2568 */
2569 for (j=0; j < h->highest_lun + 1; j++){
2570 int curr_queue = (start_queue + j) % (h->highest_lun + 1); 2604 int curr_queue = (start_queue + j) % (h->highest_lun + 1);
2571 /* make sure the disk has been added and the drive is real 2605 /* make sure the disk has been added and the drive is real
2572 * because this can be called from the middle of init_one. 2606 * because this can be called from the middle of init_one.
2573 */ 2607 */
2574 if(!(h->drv[curr_queue].queue) || 2608 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
2575 !(h->drv[curr_queue].heads)) 2609 continue;
2576 continue; 2610 blk_start_queue(h->gendisk[curr_queue]->queue);
2577 blk_start_queue(h->gendisk[curr_queue]->queue); 2611
2578 2612 /* check to see if we have maxed out the number of commands
2579 /* check to see if we have maxed out the number of commands 2613 * that can be placed on the queue.
2580 * that can be placed on the queue. 2614 */
2581 */ 2615 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
2582 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) 2616 if (curr_queue == start_queue) {
2583 { 2617 h->next_to_run =
2584 if (curr_queue == start_queue){ 2618 (start_queue + 1) % (h->highest_lun + 1);
2585 h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); 2619 goto cleanup;
2586 goto cleanup; 2620 } else {
2587 } else { 2621 h->next_to_run = curr_queue;
2588 h->next_to_run = curr_queue; 2622 goto cleanup;
2589 goto cleanup; 2623 }
2590 } 2624 } else {
2591 } else {
2592 curr_queue = (curr_queue + 1) % (h->highest_lun + 1); 2625 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
2593 } 2626 }
2594 } 2627 }
2595 2628
2596cleanup: 2629 cleanup:
2597 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2630 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2598 return IRQ_HANDLED; 2631 return IRQ_HANDLED;
2599} 2632}
2600/* 2633
2601 * We cannot read the structure directly, for portablity we must use 2634/*
2635 * We cannot read the structure directly, for portability we must use
2602 * the io functions. 2636 * the io functions.
2603 * This is for debug only. 2637 * This is for debug only.
2604 */ 2638 */
2605#ifdef CCISS_DEBUG 2639#ifdef CCISS_DEBUG
2606static void print_cfg_table( CfgTable_struct *tb) 2640static void print_cfg_table(CfgTable_struct *tb)
2607{ 2641{
2608 int i; 2642 int i;
2609 char temp_name[17]; 2643 char temp_name[17];
2610 2644
2611 printk("Controller Configuration information\n"); 2645 printk("Controller Configuration information\n");
2612 printk("------------------------------------\n"); 2646 printk("------------------------------------\n");
2613 for(i=0;i<4;i++) 2647 for (i = 0; i < 4; i++)
2614 temp_name[i] = readb(&(tb->Signature[i])); 2648 temp_name[i] = readb(&(tb->Signature[i]));
2615 temp_name[4]='\0'; 2649 temp_name[4] = '\0';
2616 printk(" Signature = %s\n", temp_name); 2650 printk(" Signature = %s\n", temp_name);
2617 printk(" Spec Number = %d\n", readl(&(tb->SpecValence))); 2651 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2618 printk(" Transport methods supported = 0x%x\n", 2652 printk(" Transport methods supported = 0x%x\n",
2619 readl(&(tb-> TransportSupport))); 2653 readl(&(tb->TransportSupport)));
2620 printk(" Transport methods active = 0x%x\n", 2654 printk(" Transport methods active = 0x%x\n",
2621 readl(&(tb->TransportActive))); 2655 readl(&(tb->TransportActive)));
2622 printk(" Requested transport Method = 0x%x\n", 2656 printk(" Requested transport Method = 0x%x\n",
2623 readl(&(tb->HostWrite.TransportRequest))); 2657 readl(&(tb->HostWrite.TransportRequest)));
2624 printk(" Coalese Interrupt Delay = 0x%x\n", 2658 printk(" Coalesce Interrupt Delay = 0x%x\n",
2625 readl(&(tb->HostWrite.CoalIntDelay))); 2659 readl(&(tb->HostWrite.CoalIntDelay)));
2626 printk(" Coalese Interrupt Count = 0x%x\n", 2660 printk(" Coalesce Interrupt Count = 0x%x\n",
2627 readl(&(tb->HostWrite.CoalIntCount))); 2661 readl(&(tb->HostWrite.CoalIntCount)));
2628 printk(" Max outstanding commands = 0x%d\n", 2662 printk(" Max outstanding commands = 0x%d\n",
2629 readl(&(tb->CmdsOutMax))); 2663 readl(&(tb->CmdsOutMax)));
2630 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes))); 2664 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2631 for(i=0;i<16;i++) 2665 for (i = 0; i < 16; i++)
2632 temp_name[i] = readb(&(tb->ServerName[i])); 2666 temp_name[i] = readb(&(tb->ServerName[i]));
2633 temp_name[16] = '\0'; 2667 temp_name[16] = '\0';
2634 printk(" Server Name = %s\n", temp_name); 2668 printk(" Server Name = %s\n", temp_name);
2635 printk(" Heartbeat Counter = 0x%x\n\n\n", 2669 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2636 readl(&(tb->HeartBeat)));
2637}
2638#endif /* CCISS_DEBUG */
2639
2640static void release_io_mem(ctlr_info_t *c)
2641{
2642 /* if IO mem was not protected do nothing */
2643 if( c->io_mem_addr == 0)
2644 return;
2645 release_region(c->io_mem_addr, c->io_mem_length);
2646 c->io_mem_addr = 0;
2647 c->io_mem_length = 0;
2648} 2670}
2671#endif /* CCISS_DEBUG */
2649 2672
2650static int find_PCI_BAR_index(struct pci_dev *pdev, 2673static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2651 unsigned long pci_bar_addr)
2652{ 2674{
2653 int i, offset, mem_type, bar_type; 2675 int i, offset, mem_type, bar_type;
2654 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 2676 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2655 return 0; 2677 return 0;
2656 offset = 0; 2678 offset = 0;
2657 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) { 2679 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2658 bar_type = pci_resource_flags(pdev, i) & 2680 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2659 PCI_BASE_ADDRESS_SPACE;
2660 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 2681 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2661 offset += 4; 2682 offset += 4;
2662 else { 2683 else {
2663 mem_type = pci_resource_flags(pdev, i) & 2684 mem_type = pci_resource_flags(pdev, i) &
2664 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 2685 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2665 switch (mem_type) { 2686 switch (mem_type) {
2666 case PCI_BASE_ADDRESS_MEM_TYPE_32: 2687 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2667 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 2688 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2668 offset += 4; /* 32 bit */ 2689 offset += 4; /* 32 bit */
2669 break; 2690 break;
2670 case PCI_BASE_ADDRESS_MEM_TYPE_64: 2691 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2671 offset += 8; 2692 offset += 8;
2672 break; 2693 break;
2673 default: /* reserved in PCI 2.2 */ 2694 default: /* reserved in PCI 2.2 */
2674 printk(KERN_WARNING "Base address is invalid\n"); 2695 printk(KERN_WARNING
2675 return -1; 2696 "Base address is invalid\n");
2697 return -1;
2676 break; 2698 break;
2677 } 2699 }
2678 } 2700 }
2679 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 2701 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2680 return i+1; 2702 return i + 1;
2681 } 2703 }
2682 return -1; 2704 return -1;
2683} 2705}
@@ -2686,53 +2708,54 @@ static int find_PCI_BAR_index(struct pci_dev *pdev,
2686 * controllers that are capable. If not, we use IO-APIC mode. 2708 * controllers that are capable. If not, we use IO-APIC mode.
2687 */ 2709 */
2688 2710
2689static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id) 2711static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2712 struct pci_dev *pdev, __u32 board_id)
2690{ 2713{
2691#ifdef CONFIG_PCI_MSI 2714#ifdef CONFIG_PCI_MSI
2692 int err; 2715 int err;
2693 struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1}, 2716 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2694 {0,2}, {0,3}}; 2717 {0, 2}, {0, 3}
2718 };
2695 2719
2696 /* Some boards advertise MSI but don't really support it */ 2720 /* Some boards advertise MSI but don't really support it */
2697 if ((board_id == 0x40700E11) || 2721 if ((board_id == 0x40700E11) ||
2698 (board_id == 0x40800E11) || 2722 (board_id == 0x40800E11) ||
2699 (board_id == 0x40820E11) || 2723 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2700 (board_id == 0x40830E11))
2701 goto default_int_mode; 2724 goto default_int_mode;
2702 2725
2703 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { 2726 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2704 err = pci_enable_msix(pdev, cciss_msix_entries, 4); 2727 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2705 if (!err) { 2728 if (!err) {
2706 c->intr[0] = cciss_msix_entries[0].vector; 2729 c->intr[0] = cciss_msix_entries[0].vector;
2707 c->intr[1] = cciss_msix_entries[1].vector; 2730 c->intr[1] = cciss_msix_entries[1].vector;
2708 c->intr[2] = cciss_msix_entries[2].vector; 2731 c->intr[2] = cciss_msix_entries[2].vector;
2709 c->intr[3] = cciss_msix_entries[3].vector; 2732 c->intr[3] = cciss_msix_entries[3].vector;
2710 c->msix_vector = 1; 2733 c->msix_vector = 1;
2711 return; 2734 return;
2712 } 2735 }
2713 if (err > 0) { 2736 if (err > 0) {
2714 printk(KERN_WARNING "cciss: only %d MSI-X vectors " 2737 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2715 "available\n", err); 2738 "available\n", err);
2716 } else { 2739 } else {
2717 printk(KERN_WARNING "cciss: MSI-X init failed %d\n", 2740 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2718 err); 2741 err);
2719 } 2742 }
2720 } 2743 }
2721 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { 2744 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2722 if (!pci_enable_msi(pdev)) { 2745 if (!pci_enable_msi(pdev)) {
2723 c->intr[SIMPLE_MODE_INT] = pdev->irq; 2746 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2724 c->msi_vector = 1; 2747 c->msi_vector = 1;
2725 return; 2748 return;
2726 } else { 2749 } else {
2727 printk(KERN_WARNING "cciss: MSI init failed\n"); 2750 printk(KERN_WARNING "cciss: MSI init failed\n");
2728 c->intr[SIMPLE_MODE_INT] = pdev->irq; 2751 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2729 return; 2752 return;
2730 } 2753 }
2731 } 2754 }
2732default_int_mode: 2755 default_int_mode:
2733#endif /* CONFIG_PCI_MSI */ 2756#endif /* CONFIG_PCI_MSI */
2734 /* if we get here we're going to use the default interrupt mode */ 2757 /* if we get here we're going to use the default interrupt mode */
2735 c->intr[SIMPLE_MODE_INT] = pdev->irq; 2758 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2736 return; 2759 return;
2737} 2760}
2738 2761
@@ -2743,58 +2766,40 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2743 __u64 cfg_offset; 2766 __u64 cfg_offset;
2744 __u32 cfg_base_addr; 2767 __u32 cfg_base_addr;
2745 __u64 cfg_base_addr_index; 2768 __u64 cfg_base_addr_index;
2746 int i; 2769 int i, err;
2747 2770
2748 /* check to see if controller has been disabled */ 2771 /* check to see if controller has been disabled */
2749 /* BEFORE trying to enable it */ 2772 /* BEFORE trying to enable it */
2750 (void) pci_read_config_word(pdev, PCI_COMMAND,&command); 2773 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2751 if(!(command & 0x02)) 2774 if (!(command & 0x02)) {
2752 { 2775 printk(KERN_WARNING
2753 printk(KERN_WARNING "cciss: controller appears to be disabled\n"); 2776 "cciss: controller appears to be disabled\n");
2754 return(-1); 2777 return -ENODEV;
2755 } 2778 }
2756 2779
2757 if (pci_enable_device(pdev)) 2780 err = pci_enable_device(pdev);
2758 { 2781 if (err) {
2759 printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); 2782 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2760 return( -1); 2783 return err;
2784 }
2785
2786 err = pci_request_regions(pdev, "cciss");
2787 if (err) {
2788 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2789 "aborting\n");
2790 goto err_out_disable_pdev;
2761 } 2791 }
2762 2792
2763 subsystem_vendor_id = pdev->subsystem_vendor; 2793 subsystem_vendor_id = pdev->subsystem_vendor;
2764 subsystem_device_id = pdev->subsystem_device; 2794 subsystem_device_id = pdev->subsystem_device;
2765 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | 2795 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2766 subsystem_vendor_id); 2796 subsystem_vendor_id);
2767
2768 /* search for our IO range so we can protect it */
2769 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2770 {
2771 /* is this an IO range */
2772 if( pci_resource_flags(pdev, i) & 0x01 ) {
2773 c->io_mem_addr = pci_resource_start(pdev, i);
2774 c->io_mem_length = pci_resource_end(pdev, i) -
2775 pci_resource_start(pdev, i) +1;
2776#ifdef CCISS_DEBUG
2777 printk("IO value found base_addr[%d] %lx %lx\n", i,
2778 c->io_mem_addr, c->io_mem_length);
2779#endif /* CCISS_DEBUG */
2780 /* register the IO range */
2781 if(!request_region( c->io_mem_addr,
2782 c->io_mem_length, "cciss"))
2783 {
2784 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2785 c->io_mem_addr, c->io_mem_length);
2786 c->io_mem_addr= 0;
2787 c->io_mem_length = 0;
2788 }
2789 break;
2790 }
2791 }
2792 2797
2793#ifdef CCISS_DEBUG 2798#ifdef CCISS_DEBUG
2794 printk("command = %x\n", command); 2799 printk("command = %x\n", command);
2795 printk("irq = %x\n", pdev->irq); 2800 printk("irq = %x\n", pdev->irq);
2796 printk("board_id = %x\n", board_id); 2801 printk("board_id = %x\n", board_id);
2797#endif /* CCISS_DEBUG */ 2802#endif /* CCISS_DEBUG */
2798 2803
2799/* If the kernel supports MSI/MSI-X we will try to enable that functionality, 2804/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2800 * else we use the IO-APIC interrupt assigned to us by system ROM. 2805 * else we use the IO-APIC interrupt assigned to us by system ROM.
@@ -2803,27 +2808,28 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2803 2808
2804 /* 2809 /*
2805 * Memory base addr is first addr , the second points to the config 2810 * Memory base addr is first addr , the second points to the config
2806 * table 2811 * table
2807 */ 2812 */
2808 2813
2809 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */ 2814 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2810#ifdef CCISS_DEBUG 2815#ifdef CCISS_DEBUG
2811 printk("address 0 = %x\n", c->paddr); 2816 printk("address 0 = %x\n", c->paddr);
2812#endif /* CCISS_DEBUG */ 2817#endif /* CCISS_DEBUG */
2813 c->vaddr = remap_pci_mem(c->paddr, 200); 2818 c->vaddr = remap_pci_mem(c->paddr, 200);
2814 2819
2815 /* Wait for the board to become ready. (PCI hotplug needs this.) 2820 /* Wait for the board to become ready. (PCI hotplug needs this.)
2816 * We poll for up to 120 secs, once per 100ms. */ 2821 * We poll for up to 120 secs, once per 100ms. */
2817 for (i=0; i < 1200; i++) { 2822 for (i = 0; i < 1200; i++) {
2818 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET); 2823 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2819 if (scratchpad == CCISS_FIRMWARE_READY) 2824 if (scratchpad == CCISS_FIRMWARE_READY)
2820 break; 2825 break;
2821 set_current_state(TASK_INTERRUPTIBLE); 2826 set_current_state(TASK_INTERRUPTIBLE);
2822 schedule_timeout(HZ / 10); /* wait 100ms */ 2827 schedule_timeout(HZ / 10); /* wait 100ms */
2823 } 2828 }
2824 if (scratchpad != CCISS_FIRMWARE_READY) { 2829 if (scratchpad != CCISS_FIRMWARE_READY) {
2825 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); 2830 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2826 return -1; 2831 err = -ENODEV;
2832 goto err_out_free_res;
2827 } 2833 }
2828 2834
2829 /* get the address index number */ 2835 /* get the address index number */
@@ -2831,103 +2837,108 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2831 cfg_base_addr &= (__u32) 0x0000ffff; 2837 cfg_base_addr &= (__u32) 0x0000ffff;
2832#ifdef CCISS_DEBUG 2838#ifdef CCISS_DEBUG
2833 printk("cfg base address = %x\n", cfg_base_addr); 2839 printk("cfg base address = %x\n", cfg_base_addr);
2834#endif /* CCISS_DEBUG */ 2840#endif /* CCISS_DEBUG */
2835 cfg_base_addr_index = 2841 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2836 find_PCI_BAR_index(pdev, cfg_base_addr);
2837#ifdef CCISS_DEBUG 2842#ifdef CCISS_DEBUG
2838 printk("cfg base address index = %x\n", cfg_base_addr_index); 2843 printk("cfg base address index = %x\n", cfg_base_addr_index);
2839#endif /* CCISS_DEBUG */ 2844#endif /* CCISS_DEBUG */
2840 if (cfg_base_addr_index == -1) { 2845 if (cfg_base_addr_index == -1) {
2841 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); 2846 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2842 release_io_mem(c); 2847 err = -ENODEV;
2843 return -1; 2848 goto err_out_free_res;
2844 } 2849 }
2845 2850
2846 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); 2851 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2847#ifdef CCISS_DEBUG 2852#ifdef CCISS_DEBUG
2848 printk("cfg offset = %x\n", cfg_offset); 2853 printk("cfg offset = %x\n", cfg_offset);
2849#endif /* CCISS_DEBUG */ 2854#endif /* CCISS_DEBUG */
2850 c->cfgtable = remap_pci_mem(pci_resource_start(pdev, 2855 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2851 cfg_base_addr_index) + cfg_offset, 2856 cfg_base_addr_index) +
2852 sizeof(CfgTable_struct)); 2857 cfg_offset, sizeof(CfgTable_struct));
2853 c->board_id = board_id; 2858 c->board_id = board_id;
2854 2859
2855#ifdef CCISS_DEBUG 2860#ifdef CCISS_DEBUG
2856 print_cfg_table(c->cfgtable); 2861 print_cfg_table(c->cfgtable);
2857#endif /* CCISS_DEBUG */ 2862#endif /* CCISS_DEBUG */
2858 2863
2859 for(i=0; i<NR_PRODUCTS; i++) { 2864 for (i = 0; i < ARRAY_SIZE(products); i++) {
2860 if (board_id == products[i].board_id) { 2865 if (board_id == products[i].board_id) {
2861 c->product_name = products[i].product_name; 2866 c->product_name = products[i].product_name;
2862 c->access = *(products[i].access); 2867 c->access = *(products[i].access);
2863 break; 2868 break;
2864 } 2869 }
2865 } 2870 }
2866 if (i == NR_PRODUCTS) { 2871 if (i == ARRAY_SIZE(products)) {
2867 printk(KERN_WARNING "cciss: Sorry, I don't know how" 2872 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2868 " to access the Smart Array controller %08lx\n", 2873 " to access the Smart Array controller %08lx\n",
2869 (unsigned long)board_id); 2874 (unsigned long)board_id);
2870 return -1; 2875 err = -ENODEV;
2871 } 2876 goto err_out_free_res;
2872 if ( (readb(&c->cfgtable->Signature[0]) != 'C') || 2877 }
2873 (readb(&c->cfgtable->Signature[1]) != 'I') || 2878 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2874 (readb(&c->cfgtable->Signature[2]) != 'S') || 2879 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2875 (readb(&c->cfgtable->Signature[3]) != 'S') ) 2880 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2876 { 2881 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2877 printk("Does not appear to be a valid CISS config table\n"); 2882 printk("Does not appear to be a valid CISS config table\n");
2878 return -1; 2883 err = -ENODEV;
2884 goto err_out_free_res;
2879 } 2885 }
2880
2881#ifdef CONFIG_X86 2886#ifdef CONFIG_X86
2882{ 2887 {
2883 /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 2888 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2884 __u32 prefetch; 2889 __u32 prefetch;
2885 prefetch = readl(&(c->cfgtable->SCSI_Prefetch)); 2890 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2886 prefetch |= 0x100; 2891 prefetch |= 0x100;
2887 writel(prefetch, &(c->cfgtable->SCSI_Prefetch)); 2892 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2888} 2893 }
2889#endif 2894#endif
2890 2895
2891#ifdef CCISS_DEBUG 2896#ifdef CCISS_DEBUG
2892 printk("Trying to put board into Simple mode\n"); 2897 printk("Trying to put board into Simple mode\n");
2893#endif /* CCISS_DEBUG */ 2898#endif /* CCISS_DEBUG */
2894 c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 2899 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2895 /* Update the field, and then ring the doorbell */ 2900 /* Update the field, and then ring the doorbell */
2896 writel( CFGTBL_Trans_Simple, 2901 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2897 &(c->cfgtable->HostWrite.TransportRequest)); 2902 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2898 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2899 2903
2900 /* under certain very rare conditions, this can take awhile. 2904 /* under certain very rare conditions, this can take awhile.
2901 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 2905 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2902 * as we enter this code.) */ 2906 * as we enter this code.) */
2903 for(i=0;i<MAX_CONFIG_WAIT;i++) { 2907 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
2904 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 2908 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2905 break; 2909 break;
2906 /* delay and try again */ 2910 /* delay and try again */
2907 set_current_state(TASK_INTERRUPTIBLE); 2911 set_current_state(TASK_INTERRUPTIBLE);
2908 schedule_timeout(10); 2912 schedule_timeout(10);
2909 } 2913 }
2910 2914
2911#ifdef CCISS_DEBUG 2915#ifdef CCISS_DEBUG
2912 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL)); 2916 printk(KERN_DEBUG "I counter got to %d %x\n", i,
2913#endif /* CCISS_DEBUG */ 2917 readl(c->vaddr + SA5_DOORBELL));
2918#endif /* CCISS_DEBUG */
2914#ifdef CCISS_DEBUG 2919#ifdef CCISS_DEBUG
2915 print_cfg_table(c->cfgtable); 2920 print_cfg_table(c->cfgtable);
2916#endif /* CCISS_DEBUG */ 2921#endif /* CCISS_DEBUG */
2917 2922
2918 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) 2923 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
2919 {
2920 printk(KERN_WARNING "cciss: unable to get board into" 2924 printk(KERN_WARNING "cciss: unable to get board into"
2921 " simple mode\n"); 2925 " simple mode\n");
2922 return -1; 2926 err = -ENODEV;
2927 goto err_out_free_res;
2923 } 2928 }
2924 return 0; 2929 return 0;
2925 2930
2931 err_out_free_res:
2932 pci_release_regions(pdev);
2933
2934 err_out_disable_pdev:
2935 pci_disable_device(pdev);
2936 return err;
2926} 2937}
2927 2938
2928/* 2939/*
2929 * Gets information about the local volumes attached to the controller. 2940 * Gets information about the local volumes attached to the controller.
2930 */ 2941 */
2931static void cciss_getgeometry(int cntl_num) 2942static void cciss_getgeometry(int cntl_num)
2932{ 2943{
2933 ReportLunData_struct *ld_buff; 2944 ReportLunData_struct *ld_buff;
@@ -2938,102 +2949,102 @@ static void cciss_getgeometry(int cntl_num)
2938 int listlength = 0; 2949 int listlength = 0;
2939 __u32 lunid = 0; 2950 __u32 lunid = 0;
2940 int block_size; 2951 int block_size;
2941 int total_size; 2952 int total_size;
2942 2953
2943 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); 2954 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2944 if (ld_buff == NULL) 2955 if (ld_buff == NULL) {
2945 { 2956 printk(KERN_ERR "cciss: out of memory\n");
2957 return;
2958 }
2959 size_buff = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2960 if (size_buff == NULL) {
2946 printk(KERN_ERR "cciss: out of memory\n"); 2961 printk(KERN_ERR "cciss: out of memory\n");
2962 kfree(ld_buff);
2947 return; 2963 return;
2948 } 2964 }
2949 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL); 2965 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2950 if (size_buff == NULL) 2966 if (inq_buff == NULL) {
2951 { 2967 printk(KERN_ERR "cciss: out of memory\n");
2952 printk(KERN_ERR "cciss: out of memory\n");
2953 kfree(ld_buff); 2968 kfree(ld_buff);
2954 return;
2955 }
2956 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2957 if (inq_buff == NULL)
2958 {
2959 printk(KERN_ERR "cciss: out of memory\n");
2960 kfree(ld_buff);
2961 kfree(size_buff); 2969 kfree(size_buff);
2962 return; 2970 return;
2963 } 2971 }
2964 /* Get the firmware version */ 2972 /* Get the firmware version */
2965 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff, 2973 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2966 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD); 2974 sizeof(InquiryData_struct), 0, 0, 0, NULL,
2967 if (return_code == IO_OK) 2975 TYPE_CMD);
2968 { 2976 if (return_code == IO_OK) {
2969 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32]; 2977 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2970 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33]; 2978 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2971 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34]; 2979 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2972 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35]; 2980 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2973 } else /* send command failed */ 2981 } else { /* send command failed */
2974 { 2982
2975 printk(KERN_WARNING "cciss: unable to determine firmware" 2983 printk(KERN_WARNING "cciss: unable to determine firmware"
2976 " version of controller\n"); 2984 " version of controller\n");
2977 } 2985 }
2978 /* Get the number of logical volumes */ 2986 /* Get the number of logical volumes */
2979 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff, 2987 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2980 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD); 2988 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
2989 TYPE_CMD);
2981 2990
2982 if( return_code == IO_OK) 2991 if (return_code == IO_OK) {
2983 {
2984#ifdef CCISS_DEBUG 2992#ifdef CCISS_DEBUG
2985 printk("LUN Data\n--------------------------\n"); 2993 printk("LUN Data\n--------------------------\n");
2986#endif /* CCISS_DEBUG */ 2994#endif /* CCISS_DEBUG */
2987 2995
2988 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24; 2996 listlength |=
2989 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16; 2997 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2990 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8; 2998 listlength |=
2999 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3000 listlength |=
3001 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2991 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]); 3002 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2992 } else /* reading number of logical volumes failed */ 3003 } else { /* reading number of logical volumes failed */
2993 { 3004
2994 printk(KERN_WARNING "cciss: report logical volume" 3005 printk(KERN_WARNING "cciss: report logical volume"
2995 " command failed\n"); 3006 " command failed\n");
2996 listlength = 0; 3007 listlength = 0;
2997 } 3008 }
2998 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry 3009 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2999 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) 3010 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3000 { 3011 printk(KERN_ERR
3001 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n", 3012 "ciss: only %d number of logical volumes supported\n",
3002 CISS_MAX_LUN); 3013 CISS_MAX_LUN);
3003 hba[cntl_num]->num_luns = CISS_MAX_LUN; 3014 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3004 } 3015 }
3005#ifdef CCISS_DEBUG 3016#ifdef CCISS_DEBUG
3006 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0], 3017 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3007 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2], 3018 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3008 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns); 3019 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3009#endif /* CCISS_DEBUG */ 3020 hba[cntl_num]->num_luns);
3010 3021#endif /* CCISS_DEBUG */
3011 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1; 3022
3012// for(i=0; i< hba[cntl_num]->num_luns; i++) 3023 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3013 for(i=0; i < CISS_MAX_LUN; i++) 3024// for(i=0; i< hba[cntl_num]->num_luns; i++)
3014 { 3025 for (i = 0; i < CISS_MAX_LUN; i++) {
3015 if (i < hba[cntl_num]->num_luns){ 3026 if (i < hba[cntl_num]->num_luns) {
3016 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) 3027 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3017 << 24; 3028 << 24;
3018 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) 3029 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3019 << 16; 3030 << 16;
3020 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) 3031 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3021 << 8; 3032 << 8;
3022 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]); 3033 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3023 3034
3024 hba[cntl_num]->drv[i].LunID = lunid; 3035 hba[cntl_num]->drv[i].LunID = lunid;
3025
3026 3036
3027#ifdef CCISS_DEBUG 3037#ifdef CCISS_DEBUG
3028 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i, 3038 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3029 ld_buff->LUN[i][0], ld_buff->LUN[i][1], 3039 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3030 ld_buff->LUN[i][2], ld_buff->LUN[i][3], 3040 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3031 hba[cntl_num]->drv[i].LunID); 3041 hba[cntl_num]->drv[i].LunID);
3032#endif /* CCISS_DEBUG */ 3042#endif /* CCISS_DEBUG */
3033 cciss_read_capacity(cntl_num, i, size_buff, 0, 3043 cciss_read_capacity(cntl_num, i, size_buff, 0,
3034 &total_size, &block_size); 3044 &total_size, &block_size);
3035 cciss_geometry_inquiry(cntl_num, i, 0, total_size, 3045 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3036 block_size, inq_buff, &hba[cntl_num]->drv[i]); 3046 block_size, inq_buff,
3047 &hba[cntl_num]->drv[i]);
3037 } else { 3048 } else {
3038 /* initialize raid_level to indicate a free space */ 3049 /* initialize raid_level to indicate a free space */
3039 hba[cntl_num]->drv[i].raid_level = -1; 3050 hba[cntl_num]->drv[i].raid_level = -1;
@@ -3042,7 +3053,7 @@ static void cciss_getgeometry(int cntl_num)
3042 kfree(ld_buff); 3053 kfree(ld_buff);
3043 kfree(size_buff); 3054 kfree(size_buff);
3044 kfree(inq_buff); 3055 kfree(inq_buff);
3045} 3056}
3046 3057
3047/* Function to find the first free pointer into our hba[] array */ 3058/* Function to find the first free pointer into our hba[] array */
3048/* Returns -1 if no free entries are left. */ 3059/* Returns -1 if no free entries are left. */
@@ -3056,7 +3067,7 @@ static int alloc_cciss_hba(void)
3056 goto out; 3067 goto out;
3057 } 3068 }
3058 3069
3059 for(i=0; i< MAX_CTLR; i++) { 3070 for (i = 0; i < MAX_CTLR; i++) {
3060 if (!hba[i]) { 3071 if (!hba[i]) {
3061 ctlr_info_t *p; 3072 ctlr_info_t *p;
3062 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); 3073 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
@@ -3069,11 +3080,11 @@ static int alloc_cciss_hba(void)
3069 } 3080 }
3070 } 3081 }
3071 printk(KERN_WARNING "cciss: This driver supports a maximum" 3082 printk(KERN_WARNING "cciss: This driver supports a maximum"
3072 " of %d controllers.\n", MAX_CTLR); 3083 " of %d controllers.\n", MAX_CTLR);
3073 goto out; 3084 goto out;
3074Enomem: 3085 Enomem:
3075 printk(KERN_ERR "cciss: out of memory.\n"); 3086 printk(KERN_ERR "cciss: out of memory.\n");
3076out: 3087 out:
3077 while (n--) 3088 while (n--)
3078 put_disk(disk[n]); 3089 put_disk(disk[n]);
3079 return -1; 3090 return -1;
@@ -3096,20 +3107,17 @@ static void free_hba(int i)
3096 * returns the number of block devices registered. 3107 * returns the number of block devices registered.
3097 */ 3108 */
3098static int __devinit cciss_init_one(struct pci_dev *pdev, 3109static int __devinit cciss_init_one(struct pci_dev *pdev,
3099 const struct pci_device_id *ent) 3110 const struct pci_device_id *ent)
3100{ 3111{
3101 request_queue_t *q; 3112 request_queue_t *q;
3102 int i; 3113 int i;
3103 int j; 3114 int j;
3104 int rc; 3115 int rc;
3116 int dac;
3105 3117
3106 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
3107 " bus %d dev %d func %d\n",
3108 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
3109 PCI_FUNC(pdev->devfn));
3110 i = alloc_cciss_hba(); 3118 i = alloc_cciss_hba();
3111 if(i < 0) 3119 if (i < 0)
3112 return (-1); 3120 return -1;
3113 3121
3114 hba[i]->busy_initializing = 1; 3122 hba[i]->busy_initializing = 1;
3115 3123
@@ -3122,11 +3130,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3122 3130
3123 /* configure PCI DMA stuff */ 3131 /* configure PCI DMA stuff */
3124 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) 3132 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3125 printk("cciss: using DAC cycles\n"); 3133 dac = 1;
3126 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) 3134 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3127 printk("cciss: not using DAC cycles\n"); 3135 dac = 0;
3128 else { 3136 else {
3129 printk("cciss: no suitable DMA available\n"); 3137 printk(KERN_ERR "cciss: no suitable DMA available\n");
3130 goto clean1; 3138 goto clean1;
3131 } 3139 }
3132 3140
@@ -3138,60 +3146,69 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3138 if (i < MAX_CTLR_ORIG) 3146 if (i < MAX_CTLR_ORIG)
3139 hba[i]->major = COMPAQ_CISS_MAJOR + i; 3147 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3140 rc = register_blkdev(hba[i]->major, hba[i]->devname); 3148 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3141 if(rc == -EBUSY || rc == -EINVAL) { 3149 if (rc == -EBUSY || rc == -EINVAL) {
3142 printk(KERN_ERR 3150 printk(KERN_ERR
3143 "cciss: Unable to get major number %d for %s " 3151 "cciss: Unable to get major number %d for %s "
3144 "on hba %d\n", hba[i]->major, hba[i]->devname, i); 3152 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3145 goto clean1; 3153 goto clean1;
3146 } 3154 } else {
3147 else {
3148 if (i >= MAX_CTLR_ORIG) 3155 if (i >= MAX_CTLR_ORIG)
3149 hba[i]->major = rc; 3156 hba[i]->major = rc;
3150 } 3157 }
3151 3158
3152 /* make sure the board interrupts are off */ 3159 /* make sure the board interrupts are off */
3153 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF); 3160 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3154 if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr, 3161 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3155 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, 3162 SA_INTERRUPT | SA_SHIRQ, hba[i]->devname, hba[i])) {
3156 hba[i]->devname, hba[i])) {
3157 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n", 3163 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3158 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname); 3164 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3159 goto clean2; 3165 goto clean2;
3160 } 3166 }
3161 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL); 3167
3162 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent( 3168 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3163 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), 3169 hba[i]->devname, pdev->device, pci_name(pdev),
3164 &(hba[i]->cmd_pool_dhandle)); 3170 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3165 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent( 3171
3166 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), 3172 hba[i]->cmd_pool_bits =
3167 &(hba[i]->errinfo_pool_dhandle)); 3173 kmalloc(((NR_CMDS + BITS_PER_LONG -
3168 if((hba[i]->cmd_pool_bits == NULL) 3174 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3169 || (hba[i]->cmd_pool == NULL) 3175 hba[i]->cmd_pool = (CommandList_struct *)
3170 || (hba[i]->errinfo_pool == NULL)) { 3176 pci_alloc_consistent(hba[i]->pdev,
3171 printk( KERN_ERR "cciss: out of memory"); 3177 NR_CMDS * sizeof(CommandList_struct),
3178 &(hba[i]->cmd_pool_dhandle));
3179 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3180 pci_alloc_consistent(hba[i]->pdev,
3181 NR_CMDS * sizeof(ErrorInfo_struct),
3182 &(hba[i]->errinfo_pool_dhandle));
3183 if ((hba[i]->cmd_pool_bits == NULL)
3184 || (hba[i]->cmd_pool == NULL)
3185 || (hba[i]->errinfo_pool == NULL)) {
3186 printk(KERN_ERR "cciss: out of memory");
3172 goto clean4; 3187 goto clean4;
3173 } 3188 }
3174#ifdef CONFIG_CISS_SCSI_TAPE 3189#ifdef CONFIG_CISS_SCSI_TAPE
3175 hba[i]->scsi_rejects.complete = 3190 hba[i]->scsi_rejects.complete =
3176 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) * 3191 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3177 (NR_CMDS + 5), GFP_KERNEL); 3192 (NR_CMDS + 5), GFP_KERNEL);
3178 if (hba[i]->scsi_rejects.complete == NULL) { 3193 if (hba[i]->scsi_rejects.complete == NULL) {
3179 printk( KERN_ERR "cciss: out of memory"); 3194 printk(KERN_ERR "cciss: out of memory");
3180 goto clean4; 3195 goto clean4;
3181 } 3196 }
3182#endif 3197#endif
3183 spin_lock_init(&hba[i]->lock); 3198 spin_lock_init(&hba[i]->lock);
3184 3199
3185 /* Initialize the pdev driver private data. 3200 /* Initialize the pdev driver private data.
3186 have it point to hba[i]. */ 3201 have it point to hba[i]. */
3187 pci_set_drvdata(pdev, hba[i]); 3202 pci_set_drvdata(pdev, hba[i]);
3188 /* command and error info recs zeroed out before 3203 /* command and error info recs zeroed out before
3189 they are used */ 3204 they are used */
3190 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long)); 3205 memset(hba[i]->cmd_pool_bits, 0,
3206 ((NR_CMDS + BITS_PER_LONG -
3207 1) / BITS_PER_LONG) * sizeof(unsigned long));
3191 3208
3192#ifdef CCISS_DEBUG 3209#ifdef CCISS_DEBUG
3193 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i); 3210 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3194#endif /* CCISS_DEBUG */ 3211#endif /* CCISS_DEBUG */
3195 3212
3196 cciss_getgeometry(i); 3213 cciss_getgeometry(i);
3197 3214
@@ -3203,15 +3220,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3203 cciss_procinit(i); 3220 cciss_procinit(i);
3204 hba[i]->busy_initializing = 0; 3221 hba[i]->busy_initializing = 0;
3205 3222
3206 for(j=0; j < NWD; j++) { /* mfm */ 3223 for (j = 0; j < NWD; j++) { /* mfm */
3207 drive_info_struct *drv = &(hba[i]->drv[j]); 3224 drive_info_struct *drv = &(hba[i]->drv[j]);
3208 struct gendisk *disk = hba[i]->gendisk[j]; 3225 struct gendisk *disk = hba[i]->gendisk[j];
3209 3226
3210 q = blk_init_queue(do_cciss_request, &hba[i]->lock); 3227 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3211 if (!q) { 3228 if (!q) {
3212 printk(KERN_ERR 3229 printk(KERN_ERR
3213 "cciss: unable to allocate queue for disk %d\n", 3230 "cciss: unable to allocate queue for disk %d\n",
3214 j); 3231 j);
3215 break; 3232 break;
3216 } 3233 }
3217 drv->queue = q; 3234 drv->queue = q;
@@ -3240,92 +3257,87 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3240 disk->driverfs_dev = &pdev->dev; 3257 disk->driverfs_dev = &pdev->dev;
3241 /* we must register the controller even if no disks exist */ 3258 /* we must register the controller even if no disks exist */
3242 /* this is for the online array utilities */ 3259 /* this is for the online array utilities */
3243 if(!drv->heads && j) 3260 if (!drv->heads && j)
3244 continue; 3261 continue;
3245 blk_queue_hardsect_size(q, drv->block_size); 3262 blk_queue_hardsect_size(q, drv->block_size);
3246 set_capacity(disk, drv->nr_blocks); 3263 set_capacity(disk, drv->nr_blocks);
3247 add_disk(disk); 3264 add_disk(disk);
3248 } 3265 }
3249 3266
3250 return(1); 3267 return 1;
3251 3268
3252clean4: 3269 clean4:
3253#ifdef CONFIG_CISS_SCSI_TAPE 3270#ifdef CONFIG_CISS_SCSI_TAPE
3254 kfree(hba[i]->scsi_rejects.complete); 3271 kfree(hba[i]->scsi_rejects.complete);
3255#endif 3272#endif
3256 kfree(hba[i]->cmd_pool_bits); 3273 kfree(hba[i]->cmd_pool_bits);
3257 if(hba[i]->cmd_pool) 3274 if (hba[i]->cmd_pool)
3258 pci_free_consistent(hba[i]->pdev, 3275 pci_free_consistent(hba[i]->pdev,
3259 NR_CMDS * sizeof(CommandList_struct), 3276 NR_CMDS * sizeof(CommandList_struct),
3260 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); 3277 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3261 if(hba[i]->errinfo_pool) 3278 if (hba[i]->errinfo_pool)
3262 pci_free_consistent(hba[i]->pdev, 3279 pci_free_consistent(hba[i]->pdev,
3263 NR_CMDS * sizeof( ErrorInfo_struct), 3280 NR_CMDS * sizeof(ErrorInfo_struct),
3264 hba[i]->errinfo_pool, 3281 hba[i]->errinfo_pool,
3265 hba[i]->errinfo_pool_dhandle); 3282 hba[i]->errinfo_pool_dhandle);
3266 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); 3283 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3267clean2: 3284 clean2:
3268 unregister_blkdev(hba[i]->major, hba[i]->devname); 3285 unregister_blkdev(hba[i]->major, hba[i]->devname);
3269clean1: 3286 clean1:
3270 release_io_mem(hba[i]);
3271 hba[i]->busy_initializing = 0; 3287 hba[i]->busy_initializing = 0;
3272 free_hba(i); 3288 free_hba(i);
3273 return(-1); 3289 return -1;
3274} 3290}
3275 3291
3276static void __devexit cciss_remove_one (struct pci_dev *pdev) 3292static void __devexit cciss_remove_one(struct pci_dev *pdev)
3277{ 3293{
3278 ctlr_info_t *tmp_ptr; 3294 ctlr_info_t *tmp_ptr;
3279 int i, j; 3295 int i, j;
3280 char flush_buf[4]; 3296 char flush_buf[4];
3281 int return_code; 3297 int return_code;
3282 3298
3283 if (pci_get_drvdata(pdev) == NULL) 3299 if (pci_get_drvdata(pdev) == NULL) {
3284 { 3300 printk(KERN_ERR "cciss: Unable to remove device \n");
3285 printk( KERN_ERR "cciss: Unable to remove device \n");
3286 return; 3301 return;
3287 } 3302 }
3288 tmp_ptr = pci_get_drvdata(pdev); 3303 tmp_ptr = pci_get_drvdata(pdev);
3289 i = tmp_ptr->ctlr; 3304 i = tmp_ptr->ctlr;
3290 if (hba[i] == NULL) 3305 if (hba[i] == NULL) {
3291 {
3292 printk(KERN_ERR "cciss: device appears to " 3306 printk(KERN_ERR "cciss: device appears to "
3293 "already be removed \n"); 3307 "already be removed \n");
3294 return; 3308 return;
3295 } 3309 }
3296 /* Turn board interrupts off and send the flush cache command */ 3310 /* Turn board interrupts off and send the flush cache command */
3297 /* sendcmd will turn off interrupt, and send the flush... 3311 /* sendcmd will turn off interrupt, and send the flush...
3298 * To write all data in the battery backed cache to disks */ 3312 * To write all data in the battery backed cache to disks */
3299 memset(flush_buf, 0, 4); 3313 memset(flush_buf, 0, 4);
3300 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, 3314 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3301 TYPE_CMD); 3315 TYPE_CMD);
3302 if(return_code != IO_OK) 3316 if (return_code != IO_OK) {
3303 { 3317 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3304 printk(KERN_WARNING "Error Flushing cache on controller %d\n", 3318 i);
3305 i);
3306 } 3319 }
3307 free_irq(hba[i]->intr[2], hba[i]); 3320 free_irq(hba[i]->intr[2], hba[i]);
3308 3321
3309#ifdef CONFIG_PCI_MSI 3322#ifdef CONFIG_PCI_MSI
3310 if (hba[i]->msix_vector) 3323 if (hba[i]->msix_vector)
3311 pci_disable_msix(hba[i]->pdev); 3324 pci_disable_msix(hba[i]->pdev);
3312 else if (hba[i]->msi_vector) 3325 else if (hba[i]->msi_vector)
3313 pci_disable_msi(hba[i]->pdev); 3326 pci_disable_msi(hba[i]->pdev);
3314#endif /* CONFIG_PCI_MSI */ 3327#endif /* CONFIG_PCI_MSI */
3315 3328
3316 pci_set_drvdata(pdev, NULL);
3317 iounmap(hba[i]->vaddr); 3329 iounmap(hba[i]->vaddr);
3318 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */ 3330 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3319 unregister_blkdev(hba[i]->major, hba[i]->devname); 3331 unregister_blkdev(hba[i]->major, hba[i]->devname);
3320 remove_proc_entry(hba[i]->devname, proc_cciss); 3332 remove_proc_entry(hba[i]->devname, proc_cciss);
3321 3333
3322 /* remove it from the disk list */ 3334 /* remove it from the disk list */
3323 for (j = 0; j < NWD; j++) { 3335 for (j = 0; j < NWD; j++) {
3324 struct gendisk *disk = hba[i]->gendisk[j]; 3336 struct gendisk *disk = hba[i]->gendisk[j];
3325 if (disk) { 3337 if (disk) {
3326 request_queue_t *q = disk->queue; 3338 request_queue_t *q = disk->queue;
3327 3339
3328 if (disk->flags & GENHD_FL_UP) 3340 if (disk->flags & GENHD_FL_UP)
3329 del_gendisk(disk); 3341 del_gendisk(disk);
3330 if (q) 3342 if (q)
3331 blk_cleanup_queue(q); 3343 blk_cleanup_queue(q);
@@ -3334,26 +3346,28 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev)
3334 3346
3335 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), 3347 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3336 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); 3348 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3337 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), 3349 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3338 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); 3350 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3339 kfree(hba[i]->cmd_pool_bits); 3351 kfree(hba[i]->cmd_pool_bits);
3340#ifdef CONFIG_CISS_SCSI_TAPE 3352#ifdef CONFIG_CISS_SCSI_TAPE
3341 kfree(hba[i]->scsi_rejects.complete); 3353 kfree(hba[i]->scsi_rejects.complete);
3342#endif 3354#endif
3343 release_io_mem(hba[i]); 3355 pci_release_regions(pdev);
3356 pci_disable_device(pdev);
3357 pci_set_drvdata(pdev, NULL);
3344 free_hba(i); 3358 free_hba(i);
3345} 3359}
3346 3360
3347static struct pci_driver cciss_pci_driver = { 3361static struct pci_driver cciss_pci_driver = {
3348 .name = "cciss", 3362 .name = "cciss",
3349 .probe = cciss_init_one, 3363 .probe = cciss_init_one,
3350 .remove = __devexit_p(cciss_remove_one), 3364 .remove = __devexit_p(cciss_remove_one),
3351 .id_table = cciss_pci_device_id, /* id_table */ 3365 .id_table = cciss_pci_device_id, /* id_table */
3352}; 3366};
3353 3367
3354/* 3368/*
3355 * This is it. Register the PCI driver information for the cards we control 3369 * This is it. Register the PCI driver information for the cards we control
3356 * the OS will call our registered routines when it finds one of our cards. 3370 * the OS will call our registered routines when it finds one of our cards.
3357 */ 3371 */
3358static int __init cciss_init(void) 3372static int __init cciss_init(void)
3359{ 3373{
@@ -3369,12 +3383,10 @@ static void __exit cciss_cleanup(void)
3369 3383
3370 pci_unregister_driver(&cciss_pci_driver); 3384 pci_unregister_driver(&cciss_pci_driver);
3371 /* double check that all controller entrys have been removed */ 3385 /* double check that all controller entrys have been removed */
3372 for (i=0; i< MAX_CTLR; i++) 3386 for (i = 0; i < MAX_CTLR; i++) {
3373 { 3387 if (hba[i] != NULL) {
3374 if (hba[i] != NULL)
3375 {
3376 printk(KERN_WARNING "cciss: had to remove" 3388 printk(KERN_WARNING "cciss: had to remove"
3377 " controller %d\n", i); 3389 " controller %d\n", i);
3378 cciss_remove_one(hba[i]->pdev); 3390 cciss_remove_one(hba[i]->pdev);
3379 } 3391 }
3380 } 3392 }
@@ -3389,21 +3401,21 @@ static void fail_all_cmds(unsigned long ctlr)
3389 unsigned long flags; 3401 unsigned long flags;
3390 3402
3391 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr); 3403 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3392 h->alive = 0; /* the controller apparently died... */ 3404 h->alive = 0; /* the controller apparently died... */
3393 3405
3394 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 3406 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3395 3407
3396 pci_disable_device(h->pdev); /* Make sure it is really dead. */ 3408 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3397 3409
3398 /* move everything off the request queue onto the completed queue */ 3410 /* move everything off the request queue onto the completed queue */
3399 while( (c = h->reqQ) != NULL ) { 3411 while ((c = h->reqQ) != NULL) {
3400 removeQ(&(h->reqQ), c); 3412 removeQ(&(h->reqQ), c);
3401 h->Qdepth--; 3413 h->Qdepth--;
3402 addQ (&(h->cmpQ), c); 3414 addQ(&(h->cmpQ), c);
3403 } 3415 }
3404 3416
3405 /* Now, fail everything on the completed queue with a HW error */ 3417 /* Now, fail everything on the completed queue with a HW error */
3406 while( (c = h->cmpQ) != NULL ) { 3418 while ((c = h->cmpQ) != NULL) {
3407 removeQ(&h->cmpQ, c); 3419 removeQ(&h->cmpQ, c);
3408 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 3420 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3409 if (c->cmd_type == CMD_RWREQ) { 3421 if (c->cmd_type == CMD_RWREQ) {
@@ -3411,8 +3423,8 @@ static void fail_all_cmds(unsigned long ctlr)
3411 } else if (c->cmd_type == CMD_IOCTL_PEND) 3423 } else if (c->cmd_type == CMD_IOCTL_PEND)
3412 complete(c->waiting); 3424 complete(c->waiting);
3413#ifdef CONFIG_CISS_SCSI_TAPE 3425#ifdef CONFIG_CISS_SCSI_TAPE
3414 else if (c->cmd_type == CMD_SCSI) 3426 else if (c->cmd_type == CMD_SCSI)
3415 complete_scsi_command(c, 0, 0); 3427 complete_scsi_command(c, 0, 0);
3416#endif 3428#endif
3417 } 3429 }
3418 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 3430 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index b24fc0553ccf..868e0d862b0d 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -60,8 +60,6 @@ struct ctlr_info
60 __u32 board_id; 60 __u32 board_id;
61 void __iomem *vaddr; 61 void __iomem *vaddr;
62 unsigned long paddr; 62 unsigned long paddr;
63 unsigned long io_mem_addr;
64 unsigned long io_mem_length;
65 CfgTable_struct __iomem *cfgtable; 63 CfgTable_struct __iomem *cfgtable;
66 int interrupts_enabled; 64 int interrupts_enabled;
67 int major; 65 int major;
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b6ea2f0c7276..5eb6fb7b5cfa 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -392,7 +392,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
392} 392}
393 393
394/* pdev is NULL for eisa */ 394/* pdev is NULL for eisa */
395static int cpqarray_register_ctlr( int i, struct pci_dev *pdev) 395static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
396{ 396{
397 request_queue_t *q; 397 request_queue_t *q;
398 int j; 398 int j;
@@ -410,8 +410,7 @@ static int cpqarray_register_ctlr( int i, struct pci_dev *pdev)
410 } 410 }
411 hba[i]->access.set_intr_mask(hba[i], 0); 411 hba[i]->access.set_intr_mask(hba[i], 0);
412 if (request_irq(hba[i]->intr, do_ida_intr, 412 if (request_irq(hba[i]->intr, do_ida_intr,
413 SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM, 413 SA_INTERRUPT|SA_SHIRQ, hba[i]->devname, hba[i]))
414 hba[i]->devname, hba[i]))
415 { 414 {
416 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n", 415 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
417 hba[i]->intr, hba[i]->devname); 416 hba[i]->intr, hba[i]->devname);
@@ -745,7 +744,7 @@ __setup("smart2=", cpqarray_setup);
745/* 744/*
746 * Find an EISA controller's signature. Set up an hba if we find it. 745 * Find an EISA controller's signature. Set up an hba if we find it.
747 */ 746 */
748static int cpqarray_eisa_detect(void) 747static int __init cpqarray_eisa_detect(void)
749{ 748{
750 int i=0, j; 749 int i=0, j;
751 __u32 board_id; 750 __u32 board_id;
@@ -1036,6 +1035,8 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1036 1035
1037 complete_buffers(cmd->rq->bio, ok); 1036 complete_buffers(cmd->rq->bio, ok);
1038 1037
1038 add_disk_randomness(cmd->rq->rq_disk);
1039
1039 DBGPX(printk("Done with %p\n", cmd->rq);); 1040 DBGPX(printk("Done with %p\n", cmd->rq););
1040 end_that_request_last(cmd->rq, ok ? 1 : -EIO); 1041 end_that_request_last(cmd->rq, ok ? 1 : -EIO);
1041} 1042}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3c74ea729fc7..9dc294a74953 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -74,6 +74,7 @@
74#include <linux/completion.h> 74#include <linux/completion.h>
75#include <linux/highmem.h> 75#include <linux/highmem.h>
76#include <linux/gfp.h> 76#include <linux/gfp.h>
77#include <linux/kthread.h>
77 78
78#include <asm/uaccess.h> 79#include <asm/uaccess.h>
79 80
@@ -578,8 +579,6 @@ static int loop_thread(void *data)
578 struct loop_device *lo = data; 579 struct loop_device *lo = data;
579 struct bio *bio; 580 struct bio *bio;
580 581
581 daemonize("loop%d", lo->lo_number);
582
583 /* 582 /*
584 * loop can be used in an encrypted device, 583 * loop can be used in an encrypted device,
585 * hence, it mustn't be stopped at all 584 * hence, it mustn't be stopped at all
@@ -592,11 +591,6 @@ static int loop_thread(void *data)
592 lo->lo_state = Lo_bound; 591 lo->lo_state = Lo_bound;
593 lo->lo_pending = 1; 592 lo->lo_pending = 1;
594 593
595 /*
596 * complete it, we are running
597 */
598 complete(&lo->lo_done);
599
600 for (;;) { 594 for (;;) {
601 int pending; 595 int pending;
602 596
@@ -629,7 +623,6 @@ static int loop_thread(void *data)
629 break; 623 break;
630 } 624 }
631 625
632 complete(&lo->lo_done);
633 return 0; 626 return 0;
634} 627}
635 628
@@ -746,6 +739,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
746 unsigned lo_blocksize; 739 unsigned lo_blocksize;
747 int lo_flags = 0; 740 int lo_flags = 0;
748 int error; 741 int error;
742 struct task_struct *tsk;
749 loff_t size; 743 loff_t size;
750 744
751 /* This is safe, since we have a reference from open(). */ 745 /* This is safe, since we have a reference from open(). */
@@ -839,10 +833,11 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
839 833
840 set_blocksize(bdev, lo_blocksize); 834 set_blocksize(bdev, lo_blocksize);
841 835
842 error = kernel_thread(loop_thread, lo, CLONE_KERNEL); 836 tsk = kthread_run(loop_thread, lo, "loop%d", lo->lo_number);
843 if (error < 0) 837 if (IS_ERR(tsk)) {
838 error = PTR_ERR(tsk);
844 goto out_putf; 839 goto out_putf;
845 wait_for_completion(&lo->lo_done); 840 }
846 return 0; 841 return 0;
847 842
848 out_putf: 843 out_putf:
@@ -898,6 +893,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
898 if (lo->lo_state != Lo_bound) 893 if (lo->lo_state != Lo_bound)
899 return -ENXIO; 894 return -ENXIO;
900 895
896 if (!lo->lo_thread)
897 return -EINVAL;
898
901 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ 899 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
902 return -EBUSY; 900 return -EBUSY;
903 901
@@ -911,7 +909,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
911 complete(&lo->lo_bh_done); 909 complete(&lo->lo_bh_done);
912 spin_unlock_irq(&lo->lo_lock); 910 spin_unlock_irq(&lo->lo_lock);
913 911
914 wait_for_completion(&lo->lo_done); 912 kthread_stop(lo->lo_thread);
915 913
916 lo->lo_backing_file = NULL; 914 lo->lo_backing_file = NULL;
917 915
@@ -924,6 +922,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
924 lo->lo_sizelimit = 0; 922 lo->lo_sizelimit = 0;
925 lo->lo_encrypt_key_size = 0; 923 lo->lo_encrypt_key_size = 0;
926 lo->lo_flags = 0; 924 lo->lo_flags = 0;
925 lo->lo_thread = NULL;
927 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); 926 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
928 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); 927 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
929 memset(lo->lo_file_name, 0, LO_NAME_SIZE); 928 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
@@ -1288,7 +1287,6 @@ static int __init loop_init(void)
1288 if (!lo->lo_queue) 1287 if (!lo->lo_queue)
1289 goto out_mem4; 1288 goto out_mem4;
1290 mutex_init(&lo->lo_ctl_mutex); 1289 mutex_init(&lo->lo_ctl_mutex);
1291 init_completion(&lo->lo_done);
1292 init_completion(&lo->lo_bh_done); 1290 init_completion(&lo->lo_bh_done);
1293 lo->lo_number = i; 1291 lo->lo_number = i;
1294 spin_lock_init(&lo->lo_lock); 1292 spin_lock_init(&lo->lo_lock);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 8bca4905d7f7..7f554f2ed079 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -7,39 +7,9 @@
7 * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz> 7 * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 * 9 *
10 * (part of code stolen from loop.c) 10 * This file is released under GPLv2 or later.
11 * 11 *
12 * 97-3-25 compiled 0-th version, not yet tested it 12 * (part of code stolen from loop.c)
13 * (it did not work, BTW) (later that day) HEY! it works!
14 * (bit later) hmm, not that much... 2:00am next day:
15 * yes, it works, but it gives something like 50kB/sec
16 * 97-4-01 complete rewrite to make it possible for many requests at
17 * once to be processed
18 * 97-4-11 Making protocol independent of endianity etc.
19 * 97-9-13 Cosmetic changes
20 * 98-5-13 Attempt to make 64-bit-clean on 64-bit machines
21 * 99-1-11 Attempt to make 64-bit-clean on 32-bit machines <ankry@mif.pg.gda.pl>
22 * 01-2-27 Fix to store proper blockcount for kernel (calculated using
23 * BLOCK_SIZE_BITS, not device blocksize) <aga@permonline.ru>
24 * 01-3-11 Make nbd work with new Linux block layer code. It now supports
25 * plugging like all the other block devices. Also added in MSG_MORE to
26 * reduce number of partial TCP segments sent. <steve@chygwyn.com>
27 * 01-12-6 Fix deadlock condition by making queue locks independent of
28 * the transmit lock. <steve@chygwyn.com>
29 * 02-10-11 Allow hung xmit to be aborted via SIGKILL & various fixes.
30 * <Paul.Clements@SteelEye.com> <James.Bottomley@SteelEye.com>
31 * 03-06-22 Make nbd work with new linux 2.5 block layer design. This fixes
32 * memory corruption from module removal and possible memory corruption
33 * from sending/receiving disk data. <ldl@aros.net>
34 * 03-06-23 Cosmetic changes. <ldl@aros.net>
35 * 03-06-23 Enhance diagnostics support. <ldl@aros.net>
36 * 03-06-24 Remove unneeded blksize_bits field from nbd_device struct.
37 * <ldl@aros.net>
38 * 03-06-24 Cleanup PARANOIA usage & code. <ldl@aros.net>
39 * 04-02-19 Remove PARANOIA, plus various cleanups (Paul Clements)
40 * possible FIXME: make set_sock / set_blksize / set_size / do_it one syscall
41 * why not: would need access_ok and friends, would share yet another
42 * structure with userland
43 */ 13 */
44 14
45#include <linux/major.h> 15#include <linux/major.h>
diff --git a/drivers/cdrom/mcdx.c b/drivers/cdrom/mcdx.c
index a0b580c22d80..0f6e7aab8d2c 100644
--- a/drivers/cdrom/mcdx.c
+++ b/drivers/cdrom/mcdx.c
@@ -1006,7 +1006,7 @@ static int mcdx_talk(struct s_drive_stuff *stuffp,
1006 1006
1007/* MODULE STUFF ***********************************************************/ 1007/* MODULE STUFF ***********************************************************/
1008 1008
1009int __mcdx_init(void) 1009static int __init __mcdx_init(void)
1010{ 1010{
1011 int i; 1011 int i;
1012 int drives = 0; 1012 int drives = 0;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 78d928f9d9f1..63f28d169b36 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -865,6 +865,7 @@ config SONYPI
865config TANBAC_TB0219 865config TANBAC_TB0219
866 tristate "TANBAC TB0219 base board support" 866 tristate "TANBAC TB0219 base board support"
867 depends TANBAC_TB022X 867 depends TANBAC_TB022X
868 select GPIO_VR41XX
868 869
869menu "Ftape, the floppy tape device driver" 870menu "Ftape, the floppy tape device driver"
870 871
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index a370e7a0bad5..9275d5e52e6d 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -166,11 +166,7 @@ static int ac_register_board(unsigned long physloc, void __iomem *loc,
166 return boardno + 1; 166 return boardno + 1;
167} 167}
168 168
169#ifdef MODULE 169static void __exit applicom_exit(void)
170
171#define applicom_init init_module
172
173void cleanup_module(void)
174{ 170{
175 unsigned int i; 171 unsigned int i;
176 172
@@ -188,9 +184,7 @@ void cleanup_module(void)
188 } 184 }
189} 185}
190 186
191#endif /* MODULE */ 187static int __init applicom_init(void)
192
193int __init applicom_init(void)
194{ 188{
195 int i, numisa = 0; 189 int i, numisa = 0;
196 struct pci_dev *dev = NULL; 190 struct pci_dev *dev = NULL;
@@ -355,10 +349,9 @@ out:
355 return ret; 349 return ret;
356} 350}
357 351
352module_init(applicom_init);
353module_exit(applicom_exit);
358 354
359#ifndef MODULE
360__initcall(applicom_init);
361#endif
362 355
363static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos) 356static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
364{ 357{
@@ -851,28 +844,3 @@ static int ac_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
851 return 0; 844 return 0;
852} 845}
853 846
854#ifndef MODULE
855static int __init applicom_setup(char *str)
856{
857 int ints[4];
858
859 (void) get_options(str, 4, ints);
860
861 if (ints[0] > 2) {
862 printk(KERN_WARNING "Too many arguments to 'applicom=', expected mem,irq only.\n");
863 }
864
865 if (ints[0] < 2) {
866 printk(KERN_INFO"applicom numargs: %d\n", ints[0]);
867 return 0;
868 }
869
870 mem = ints[1];
871 irq = ints[2];
872 return 1;
873}
874
875__setup("applicom=", applicom_setup);
876
877#endif /* MODULE */
878
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index cc7acf877dc0..122e7a72a4e1 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -2833,9 +2833,8 @@ cy_write(struct tty_struct * tty, const unsigned char *buf, int count)
2833 return 0; 2833 return 0;
2834 } 2834 }
2835 2835
2836 if (!tty || !info->xmit_buf || !tmp_buf){ 2836 if (!info->xmit_buf || !tmp_buf)
2837 return 0; 2837 return 0;
2838 }
2839 2838
2840 CY_LOCK(info, flags); 2839 CY_LOCK(info, flags);
2841 while (1) { 2840 while (1) {
@@ -2884,7 +2883,7 @@ cy_put_char(struct tty_struct *tty, unsigned char ch)
2884 if (serial_paranoia_check(info, tty->name, "cy_put_char")) 2883 if (serial_paranoia_check(info, tty->name, "cy_put_char"))
2885 return; 2884 return;
2886 2885
2887 if (!tty || !info->xmit_buf) 2886 if (!info->xmit_buf)
2888 return; 2887 return;
2889 2888
2890 CY_LOCK(info, flags); 2889 CY_LOCK(info, flags);
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 09dc4b01232c..922174d527ae 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -1212,7 +1212,7 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
1212 if (serial_paranoia_check(info, tty->name, "rs_put_char")) 1212 if (serial_paranoia_check(info, tty->name, "rs_put_char"))
1213 return; 1213 return;
1214 1214
1215 if (!tty || !info->xmit_buf) 1215 if (!info->xmit_buf)
1216 return; 1216 return;
1217 1217
1218 spin_lock_irqsave(&info->lock, flags); 1218 spin_lock_irqsave(&info->lock, flags);
@@ -1256,7 +1256,7 @@ static int rs_write(struct tty_struct * tty,
1256 if (serial_paranoia_check(info, tty->name, "rs_write")) 1256 if (serial_paranoia_check(info, tty->name, "rs_write"))
1257 return 0; 1257 return 0;
1258 1258
1259 if (!tty || !info->xmit_buf) 1259 if (!info->xmit_buf)
1260 return 0; 1260 return 0;
1261 1261
1262 while (1) { 1262 while (1) {
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 03db1cb3fa95..9ab33c3d359f 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -305,7 +305,7 @@ static struct class *ip2_class;
305 305
306// Some functions to keep track of what irq's we have 306// Some functions to keep track of what irq's we have
307 307
308static int __init 308static int
309is_valid_irq(int irq) 309is_valid_irq(int irq)
310{ 310{
311 int *i = Valid_Irqs; 311 int *i = Valid_Irqs;
@@ -316,14 +316,14 @@ is_valid_irq(int irq)
316 return (*i); 316 return (*i);
317} 317}
318 318
319static void __init 319static void
320mark_requested_irq( char irq ) 320mark_requested_irq( char irq )
321{ 321{
322 rirqs[iindx++] = irq; 322 rirqs[iindx++] = irq;
323} 323}
324 324
325#ifdef MODULE 325#ifdef MODULE
326static int __init 326static int
327clear_requested_irq( char irq ) 327clear_requested_irq( char irq )
328{ 328{
329 int i; 329 int i;
@@ -337,7 +337,7 @@ clear_requested_irq( char irq )
337} 337}
338#endif 338#endif
339 339
340static int __init 340static int
341have_requested_irq( char irq ) 341have_requested_irq( char irq )
342{ 342{
343 // array init to zeros so 0 irq will not be requested as a side effect 343 // array init to zeros so 0 irq will not be requested as a side effect
@@ -818,7 +818,7 @@ EXPORT_SYMBOL(ip2_loadmain);
818/* the board, the channel structures are initialized, and the board details */ 818/* the board, the channel structures are initialized, and the board details */
819/* are reported on the console. */ 819/* are reported on the console. */
820/******************************************************************************/ 820/******************************************************************************/
821static void __init 821static void
822ip2_init_board( int boardnum ) 822ip2_init_board( int boardnum )
823{ 823{
824 int i; 824 int i;
@@ -961,7 +961,7 @@ err_initialize:
961/* EISA motherboard, or no valid board ID is selected it returns 0. Otherwise */ 961/* EISA motherboard, or no valid board ID is selected it returns 0. Otherwise */
962/* it returns the base address of the controller. */ 962/* it returns the base address of the controller. */
963/******************************************************************************/ 963/******************************************************************************/
964static unsigned short __init 964static unsigned short
965find_eisa_board( int start_slot ) 965find_eisa_board( int start_slot )
966{ 966{
967 int i, j; 967 int i, j;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index e9ebabaf8cb0..efaaa1937ab6 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1145,7 +1145,7 @@ static int isicom_write(struct tty_struct *tty, const unsigned char *buf,
1145 if (isicom_paranoia_check(port, tty->name, "isicom_write")) 1145 if (isicom_paranoia_check(port, tty->name, "isicom_write"))
1146 return 0; 1146 return 0;
1147 1147
1148 if (!tty || !port->xmit_buf) 1148 if (!port->xmit_buf)
1149 return 0; 1149 return 0;
1150 1150
1151 spin_lock_irqsave(&card->card_lock, flags); 1151 spin_lock_irqsave(&card->card_lock, flags);
@@ -1180,7 +1180,7 @@ static void isicom_put_char(struct tty_struct *tty, unsigned char ch)
1180 if (isicom_paranoia_check(port, tty->name, "isicom_put_char")) 1180 if (isicom_paranoia_check(port, tty->name, "isicom_put_char"))
1181 return; 1181 return;
1182 1182
1183 if (!tty || !port->xmit_buf) 1183 if (!port->xmit_buf)
1184 return; 1184 return;
1185 1185
1186 spin_lock_irqsave(&card->card_lock, flags); 1186 spin_lock_irqsave(&card->card_lock, flags);
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 5755b7e5f187..edd996f6fb87 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -39,6 +39,7 @@
39#include <linux/vt_kern.h> 39#include <linux/vt_kern.h>
40#include <linux/sysrq.h> 40#include <linux/sysrq.h>
41#include <linux/input.h> 41#include <linux/input.h>
42#include <linux/reboot.h>
42 43
43static void kbd_disconnect(struct input_handle *handle); 44static void kbd_disconnect(struct input_handle *handle);
44extern void ctrl_alt_del(void); 45extern void ctrl_alt_del(void);
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 1b05fa688996..d65b3109318a 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -329,7 +329,6 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
329 if (PAGE_SIZE > (1 << 16)) 329 if (PAGE_SIZE > (1 << 16))
330 return -ENOSYS; 330 return -ENOSYS;
331 331
332 vma->vm_flags |= (VM_IO | VM_SHM | VM_LOCKED );
333 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 332 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
334 333
335 mmtimer_addr = __pa(RTC_COUNTER_ADDR); 334 mmtimer_addr = __pa(RTC_COUNTER_ADDR);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 0fb2fb9fb024..645d9d713aec 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -9,7 +9,7 @@
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or 11 * the Free Software Foundation; either version 2 of the License, or
12* (at your option) any later version. 12 * (at your option) any later version.
13 * 13 *
14 * This program is distributed in the hope that it will be useful, 14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -71,8 +71,8 @@
71#define MXSERMAJOR 174 71#define MXSERMAJOR 174
72#define MXSERCUMAJOR 175 72#define MXSERCUMAJOR 175
73 73
74#define MXSER_EVENT_TXLOW 1 74#define MXSER_EVENT_TXLOW 1
75#define MXSER_EVENT_HANGUP 2 75#define MXSER_EVENT_HANGUP 2
76 76
77#define MXSER_BOARDS 4 /* Max. boards */ 77#define MXSER_BOARDS 4 /* Max. boards */
78#define MXSER_PORTS 32 /* Max. ports */ 78#define MXSER_PORTS 32 /* Max. ports */
@@ -92,7 +92,8 @@
92#define UART_MCR_AFE 0x20 92#define UART_MCR_AFE 0x20
93#define UART_LSR_SPECIAL 0x1E 93#define UART_LSR_SPECIAL 0x1E
94 94
95#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK|IXON|IXOFF)) 95#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK|\
96 IXON|IXOFF))
96 97
97#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) 98#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT)
98 99
@@ -152,27 +153,27 @@ static char *mxser_brdname[] = {
152}; 153};
153 154
154static int mxser_numports[] = { 155static int mxser_numports[] = {
155 8, // C168-ISA 156 8, /* C168-ISA */
156 4, // C104-ISA 157 4, /* C104-ISA */
157 4, // CI104J 158 4, /* CI104J */
158 8, // C168-PCI 159 8, /* C168-PCI */
159 4, // C104-PCI 160 4, /* C104-PCI */
160 2, // C102-ISA 161 2, /* C102-ISA */
161 2, // CI132 162 2, /* CI132 */
162 4, // CI134 163 4, /* CI134 */
163 2, // CP132 164 2, /* CP132 */
164 4, // CP114 165 4, /* CP114 */
165 4, // CT114 166 4, /* CT114 */
166 2, // CP102 167 2, /* CP102 */
167 4, // CP104U 168 4, /* CP104U */
168 8, // CP168U 169 8, /* CP168U */
169 2, // CP132U 170 2, /* CP132U */
170 4, // CP134U 171 4, /* CP134U */
171 4, // CP104JU 172 4, /* CP104JU */
172 8, // RC7000 173 8, /* RC7000 */
173 8, // CP118U 174 8, /* CP118U */
174 2, // CP102UL 175 2, /* CP102UL */
175 2, // CP102U 176 2, /* CP102U */
176}; 177};
177 178
178#define UART_TYPE_NUM 2 179#define UART_TYPE_NUM 2
@@ -182,7 +183,7 @@ static const unsigned int Gmoxa_uart_id[UART_TYPE_NUM] = {
182 MOXA_MUST_MU860_HWID 183 MOXA_MUST_MU860_HWID
183}; 184};
184 185
185// This is only for PCI 186/* This is only for PCI */
186#define UART_INFO_NUM 3 187#define UART_INFO_NUM 3
187struct mxpciuart_info { 188struct mxpciuart_info {
188 int type; 189 int type;
@@ -231,7 +232,7 @@ MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
231typedef struct _moxa_pci_info { 232typedef struct _moxa_pci_info {
232 unsigned short busNum; 233 unsigned short busNum;
233 unsigned short devNum; 234 unsigned short devNum;
234 struct pci_dev *pdev; // add by Victor Yu. 06-23-2003 235 struct pci_dev *pdev; /* add by Victor Yu. 06-23-2003 */
235} moxa_pci_info; 236} moxa_pci_info;
236 237
237static int ioaddr[MXSER_BOARDS] = { 0, 0, 0, 0 }; 238static int ioaddr[MXSER_BOARDS] = { 0, 0, 0, 0 };
@@ -280,6 +281,7 @@ struct mxser_mon_ext {
280 int fifo[32]; 281 int fifo[32];
281 int iftype[32]; 282 int iftype[32];
282}; 283};
284
283struct mxser_hwconf { 285struct mxser_hwconf {
284 int board_type; 286 int board_type;
285 int ports; 287 int ports;
@@ -290,9 +292,9 @@ struct mxser_hwconf {
290 int ioaddr[MXSER_PORTS_PER_BOARD]; 292 int ioaddr[MXSER_PORTS_PER_BOARD];
291 int baud_base[MXSER_PORTS_PER_BOARD]; 293 int baud_base[MXSER_PORTS_PER_BOARD];
292 moxa_pci_info pciInfo; 294 moxa_pci_info pciInfo;
293 int IsMoxaMustChipFlag; // add by Victor Yu. 08-30-2002 295 int IsMoxaMustChipFlag; /* add by Victor Yu. 08-30-2002 */
294 int MaxCanSetBaudRate[MXSER_PORTS_PER_BOARD]; // add by Victor Yu. 09-04-2002 296 int MaxCanSetBaudRate[MXSER_PORTS_PER_BOARD]; /* add by Victor Yu. 09-04-2002 */
295 int opmode_ioaddr[MXSER_PORTS_PER_BOARD]; // add by Victor Yu. 01-05-2004 297 int opmode_ioaddr[MXSER_PORTS_PER_BOARD]; /* add by Victor Yu. 01-05-2004 */
296}; 298};
297 299
298struct mxser_struct { 300struct mxser_struct {
@@ -334,9 +336,9 @@ struct mxser_struct {
334 wait_queue_head_t delta_msr_wait; 336 wait_queue_head_t delta_msr_wait;
335 struct async_icount icount; /* kernel counters for the 4 input interrupts */ 337 struct async_icount icount; /* kernel counters for the 4 input interrupts */
336 int timeout; 338 int timeout;
337 int IsMoxaMustChipFlag; // add by Victor Yu. 08-30-2002 339 int IsMoxaMustChipFlag; /* add by Victor Yu. 08-30-2002 */
338 int MaxCanSetBaudRate; // add by Victor Yu. 09-04-2002 340 int MaxCanSetBaudRate; /* add by Victor Yu. 09-04-2002 */
339 int opmode_ioaddr; // add by Victor Yu. 01-05-2004 341 int opmode_ioaddr; /* add by Victor Yu. 01-05-2004 */
340 unsigned char stop_rx; 342 unsigned char stop_rx;
341 unsigned char ldisc_stop_rx; 343 unsigned char ldisc_stop_rx;
342 long realbaud; 344 long realbaud;
@@ -345,7 +347,6 @@ struct mxser_struct {
345 spinlock_t slock; 347 spinlock_t slock;
346}; 348};
347 349
348
349struct mxser_mstatus { 350struct mxser_mstatus {
350 tcflag_t cflag; 351 tcflag_t cflag;
351 int cts; 352 int cts;
@@ -358,7 +359,7 @@ static struct mxser_mstatus GMStatus[MXSER_PORTS];
358 359
359static int mxserBoardCAP[MXSER_BOARDS] = { 360static int mxserBoardCAP[MXSER_BOARDS] = {
360 0, 0, 0, 0 361 0, 0, 0, 0
361 /* 0x180, 0x280, 0x200, 0x320 */ 362 /* 0x180, 0x280, 0x200, 0x320 */
362}; 363};
363 364
364static struct tty_driver *mxvar_sdriver; 365static struct tty_driver *mxvar_sdriver;
@@ -386,7 +387,7 @@ static struct mxser_hwconf mxsercfg[MXSER_BOARDS];
386static void mxser_getcfg(int board, struct mxser_hwconf *hwconf); 387static void mxser_getcfg(int board, struct mxser_hwconf *hwconf);
387static int mxser_init(void); 388static int mxser_init(void);
388 389
389//static void mxser_poll(unsigned long); 390/* static void mxser_poll(unsigned long); */
390static int mxser_get_ISA_conf(int, struct mxser_hwconf *); 391static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
391static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); 392static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
392static void mxser_do_softint(void *); 393static void mxser_do_softint(void *);
@@ -440,18 +441,18 @@ static int CheckIsMoxaMust(int io)
440 SET_MOXA_MUST_XON1_VALUE(io, 0x11); 441 SET_MOXA_MUST_XON1_VALUE(io, 0x11);
441 if ((hwid = inb(io + UART_MCR)) != 0) { 442 if ((hwid = inb(io + UART_MCR)) != 0) {
442 outb(oldmcr, io + UART_MCR); 443 outb(oldmcr, io + UART_MCR);
443 return (MOXA_OTHER_UART); 444 return MOXA_OTHER_UART;
444 } 445 }
445 446
446 GET_MOXA_MUST_HARDWARE_ID(io, &hwid); 447 GET_MOXA_MUST_HARDWARE_ID(io, &hwid);
447 for (i = 0; i < UART_TYPE_NUM; i++) { 448 for (i = 0; i < UART_TYPE_NUM; i++) {
448 if (hwid == Gmoxa_uart_id[i]) 449 if (hwid == Gmoxa_uart_id[i])
449 return (int) hwid; 450 return (int)hwid;
450 } 451 }
451 return MOXA_OTHER_UART; 452 return MOXA_OTHER_UART;
452} 453}
453 454
454// above is modified by Victor Yu. 08-15-2002 455/* above is modified by Victor Yu. 08-15-2002 */
455 456
456static struct tty_operations mxser_ops = { 457static struct tty_operations mxser_ops = {
457 .open = mxser_open, 458 .open = mxser_open,
@@ -504,7 +505,6 @@ static void __exit mxser_module_exit(void)
504 else 505 else
505 printk(KERN_ERR "Couldn't unregister MOXA Smartio/Industio family serial driver\n"); 506 printk(KERN_ERR "Couldn't unregister MOXA Smartio/Industio family serial driver\n");
506 507
507
508 for (i = 0; i < MXSER_BOARDS; i++) { 508 for (i = 0; i < MXSER_BOARDS; i++) {
509 struct pci_dev *pdev; 509 struct pci_dev *pdev;
510 510
@@ -513,7 +513,7 @@ static void __exit mxser_module_exit(void)
513 else { 513 else {
514 pdev = mxsercfg[i].pciInfo.pdev; 514 pdev = mxsercfg[i].pciInfo.pdev;
515 free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]); 515 free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]);
516 if (pdev != NULL) { //PCI 516 if (pdev != NULL) { /* PCI */
517 release_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); 517 release_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
518 release_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3)); 518 release_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
519 } else { 519 } else {
@@ -524,7 +524,6 @@ static void __exit mxser_module_exit(void)
524 } 524 }
525 if (verbose) 525 if (verbose)
526 printk(KERN_DEBUG "Done.\n"); 526 printk(KERN_DEBUG "Done.\n");
527
528} 527}
529 528
530static void process_txrx_fifo(struct mxser_struct *info) 529static void process_txrx_fifo(struct mxser_struct *info)
@@ -558,8 +557,10 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
558 n = board * MXSER_PORTS_PER_BOARD; 557 n = board * MXSER_PORTS_PER_BOARD;
559 info = &mxvar_table[n]; 558 info = &mxvar_table[n];
560 /*if (verbose) */ { 559 /*if (verbose) */ {
561 printk(KERN_DEBUG " ttyM%d - ttyM%d ", n, n + hwconf->ports - 1); 560 printk(KERN_DEBUG " ttyM%d - ttyM%d ",
562 printk(" max. baud rate = %d bps.\n", hwconf->MaxCanSetBaudRate[0]); 561 n, n + hwconf->ports - 1);
562 printk(" max. baud rate = %d bps.\n",
563 hwconf->MaxCanSetBaudRate[0]);
563 } 564 }
564 565
565 for (i = 0; i < hwconf->ports; i++, n++, info++) { 566 for (i = 0; i < hwconf->ports; i++, n++, info++) {
@@ -568,12 +569,12 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
568 info->irq = hwconf->irq; 569 info->irq = hwconf->irq;
569 info->vector = hwconf->vector; 570 info->vector = hwconf->vector;
570 info->vectormask = hwconf->vector_mask; 571 info->vectormask = hwconf->vector_mask;
571 info->opmode_ioaddr = hwconf->opmode_ioaddr[i]; // add by Victor Yu. 01-05-2004 572 info->opmode_ioaddr = hwconf->opmode_ioaddr[i]; /* add by Victor Yu. 01-05-2004 */
572 info->stop_rx = 0; 573 info->stop_rx = 0;
573 info->ldisc_stop_rx = 0; 574 info->ldisc_stop_rx = 0;
574 575
575 info->IsMoxaMustChipFlag = hwconf->IsMoxaMustChipFlag; 576 info->IsMoxaMustChipFlag = hwconf->IsMoxaMustChipFlag;
576 //Enhance mode enabled here 577 /* Enhance mode enabled here */
577 if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) { 578 if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
578 ENABLE_MOXA_MUST_ENCHANCE_MODE(info->base); 579 ENABLE_MOXA_MUST_ENCHANCE_MODE(info->base);
579 } 580 }
@@ -606,22 +607,25 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
606 607
607 /* before set INT ISR, disable all int */ 608 /* before set INT ISR, disable all int */
608 for (i = 0; i < hwconf->ports; i++) { 609 for (i = 0; i < hwconf->ports; i++) {
609 outb(inb(hwconf->ioaddr[i] + UART_IER) & 0xf0, hwconf->ioaddr[i] + UART_IER); 610 outb(inb(hwconf->ioaddr[i] + UART_IER) & 0xf0,
611 hwconf->ioaddr[i] + UART_IER);
610 } 612 }
611 613
612 n = board * MXSER_PORTS_PER_BOARD; 614 n = board * MXSER_PORTS_PER_BOARD;
613 info = &mxvar_table[n]; 615 info = &mxvar_table[n];
614 616
615 retval = request_irq(hwconf->irq, mxser_interrupt, IRQ_T(info), "mxser", info); 617 retval = request_irq(hwconf->irq, mxser_interrupt, IRQ_T(info),
618 "mxser", info);
616 if (retval) { 619 if (retval) {
617 printk(KERN_ERR "Board %d: %s", board, mxser_brdname[hwconf->board_type - 1]); 620 printk(KERN_ERR "Board %d: %s",
618 printk(" Request irq fail,IRQ (%d) may be conflit with another device.\n", info->irq); 621 board, mxser_brdname[hwconf->board_type - 1]);
622 printk(" Request irq failed, IRQ (%d) may conflict with"
623 " another device.\n", info->irq);
619 return retval; 624 return retval;
620 } 625 }
621 return 0; 626 return 0;
622} 627}
623 628
624
625static void mxser_getcfg(int board, struct mxser_hwconf *hwconf) 629static void mxser_getcfg(int board, struct mxser_hwconf *hwconf)
626{ 630{
627 mxsercfg[board] = *hwconf; 631 mxsercfg[board] = *hwconf;
@@ -631,26 +635,27 @@ static void mxser_getcfg(int board, struct mxser_hwconf *hwconf)
631static int mxser_get_PCI_conf(int busnum, int devnum, int board_type, struct mxser_hwconf *hwconf) 635static int mxser_get_PCI_conf(int busnum, int devnum, int board_type, struct mxser_hwconf *hwconf)
632{ 636{
633 int i, j; 637 int i, j;
634// unsigned int val; 638 /* unsigned int val; */
635 unsigned int ioaddress; 639 unsigned int ioaddress;
636 struct pci_dev *pdev = hwconf->pciInfo.pdev; 640 struct pci_dev *pdev = hwconf->pciInfo.pdev;
637 641
638 //io address 642 /* io address */
639 hwconf->board_type = board_type; 643 hwconf->board_type = board_type;
640 hwconf->ports = mxser_numports[board_type - 1]; 644 hwconf->ports = mxser_numports[board_type - 1];
641 ioaddress = pci_resource_start(pdev, 2); 645 ioaddress = pci_resource_start(pdev, 2);
642 request_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2), "mxser(IO)"); 646 request_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2),
647 "mxser(IO)");
643 648
644 for (i = 0; i < hwconf->ports; i++) { 649 for (i = 0; i < hwconf->ports; i++)
645 hwconf->ioaddr[i] = ioaddress + 8 * i; 650 hwconf->ioaddr[i] = ioaddress + 8 * i;
646 }
647 651
648 //vector 652 /* vector */
649 ioaddress = pci_resource_start(pdev, 3); 653 ioaddress = pci_resource_start(pdev, 3);
650 request_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3), "mxser(vector)"); 654 request_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3),
655 "mxser(vector)");
651 hwconf->vector = ioaddress; 656 hwconf->vector = ioaddress;
652 657
653 //irq 658 /* irq */
654 hwconf->irq = hwconf->pciInfo.pdev->irq; 659 hwconf->irq = hwconf->pciInfo.pdev->irq;
655 660
656 hwconf->IsMoxaMustChipFlag = CheckIsMoxaMust(hwconf->ioaddr[0]); 661 hwconf->IsMoxaMustChipFlag = CheckIsMoxaMust(hwconf->ioaddr[0]);
@@ -663,7 +668,7 @@ static int mxser_get_PCI_conf(int busnum, int devnum, int board_type, struct mxs
663 if (Gpci_uart_info[j].type == hwconf->IsMoxaMustChipFlag) { 668 if (Gpci_uart_info[j].type == hwconf->IsMoxaMustChipFlag) {
664 hwconf->MaxCanSetBaudRate[i] = Gpci_uart_info[j].max_baud; 669 hwconf->MaxCanSetBaudRate[i] = Gpci_uart_info[j].max_baud;
665 670
666 //exception....CP-102 671 /* exception....CP-102 */
667 if (board_type == MXSER_BOARD_CP102) 672 if (board_type == MXSER_BOARD_CP102)
668 hwconf->MaxCanSetBaudRate[i] = 921600; 673 hwconf->MaxCanSetBaudRate[i] = 921600;
669 break; 674 break;
@@ -678,15 +683,15 @@ static int mxser_get_PCI_conf(int busnum, int devnum, int board_type, struct mxs
678 else 683 else
679 hwconf->opmode_ioaddr[i] = ioaddress + 0x0c; 684 hwconf->opmode_ioaddr[i] = ioaddress + 0x0c;
680 } 685 }
681 outb(0, ioaddress + 4); // default set to RS232 mode 686 outb(0, ioaddress + 4); /* default set to RS232 mode */
682 outb(0, ioaddress + 0x0c); //default set to RS232 mode 687 outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
683 } 688 }
684 689
685 for (i = 0; i < hwconf->ports; i++) { 690 for (i = 0; i < hwconf->ports; i++) {
686 hwconf->vector_mask |= (1 << i); 691 hwconf->vector_mask |= (1 << i);
687 hwconf->baud_base[i] = 921600; 692 hwconf->baud_base[i] = 921600;
688 } 693 }
689 return (0); 694 return 0;
690} 695}
691#endif 696#endif
692 697
@@ -707,7 +712,8 @@ static int mxser_init(void)
707 mxsercfg[i].board_type = -1; 712 mxsercfg[i].board_type = -1;
708 } 713 }
709 714
710 printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n", MXSER_VERSION); 715 printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
716 MXSER_VERSION);
711 717
712 /* Initialize the tty_driver structure */ 718 /* Initialize the tty_driver structure */
713 memset(mxvar_sdriver, 0, sizeof(struct tty_driver)); 719 memset(mxvar_sdriver, 0, sizeof(struct tty_driver));
@@ -719,7 +725,7 @@ static int mxser_init(void)
719 mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL; 725 mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
720 mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL; 726 mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
721 mxvar_sdriver->init_termios = tty_std_termios; 727 mxvar_sdriver->init_termios = tty_std_termios;
722 mxvar_sdriver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; 728 mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
723 mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW; 729 mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW;
724 tty_set_operations(mxvar_sdriver, &mxser_ops); 730 tty_set_operations(mxvar_sdriver, &mxser_ops);
725 mxvar_sdriver->ttys = mxvar_tty; 731 mxvar_sdriver->ttys = mxvar_tty;
@@ -739,23 +745,29 @@ static int mxser_init(void)
739 /* Start finding ISA boards here */ 745 /* Start finding ISA boards here */
740 for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) { 746 for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
741 int cap; 747 int cap;
748
742 if (!(cap = mxserBoardCAP[b])) 749 if (!(cap = mxserBoardCAP[b]))
743 continue; 750 continue;
744 751
745 retval = mxser_get_ISA_conf(cap, &hwconf); 752 retval = mxser_get_ISA_conf(cap, &hwconf);
746 753
747 if (retval != 0) 754 if (retval != 0)
748 printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n", mxser_brdname[hwconf.board_type - 1], ioaddr[b]); 755 printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n",
756 mxser_brdname[hwconf.board_type - 1], ioaddr[b]);
749 757
750 if (retval <= 0) { 758 if (retval <= 0) {
751 if (retval == MXSER_ERR_IRQ) 759 if (retval == MXSER_ERR_IRQ)
752 printk(KERN_ERR "Invalid interrupt number,board not configured\n"); 760 printk(KERN_ERR "Invalid interrupt number, "
761 "board not configured\n");
753 else if (retval == MXSER_ERR_IRQ_CONFLIT) 762 else if (retval == MXSER_ERR_IRQ_CONFLIT)
754 printk(KERN_ERR "Invalid interrupt number,board not configured\n"); 763 printk(KERN_ERR "Invalid interrupt number, "
764 "board not configured\n");
755 else if (retval == MXSER_ERR_VECTOR) 765 else if (retval == MXSER_ERR_VECTOR)
756 printk(KERN_ERR "Invalid interrupt vector,board not configured\n"); 766 printk(KERN_ERR "Invalid interrupt vector, "
767 "board not configured\n");
757 else if (retval == MXSER_ERR_IOADDR) 768 else if (retval == MXSER_ERR_IOADDR)
758 printk(KERN_ERR "Invalid I/O address,board not configured\n"); 769 printk(KERN_ERR "Invalid I/O address, "
770 "board not configured\n");
759 771
760 continue; 772 continue;
761 } 773 }
@@ -765,35 +777,43 @@ static int mxser_init(void)
765 hwconf.pciInfo.pdev = NULL; 777 hwconf.pciInfo.pdev = NULL;
766 778
767 mxser_getcfg(m, &hwconf); 779 mxser_getcfg(m, &hwconf);
768 //init mxsercfg first, or mxsercfg data is not correct on ISR. 780 /*
769 //mxser_initbrd will hook ISR. 781 * init mxsercfg first,
782 * or mxsercfg data is not correct on ISR.
783 */
784 /* mxser_initbrd will hook ISR. */
770 if (mxser_initbrd(m, &hwconf) < 0) 785 if (mxser_initbrd(m, &hwconf) < 0)
771 continue; 786 continue;
772 787
773
774 m++; 788 m++;
775 } 789 }
776 790
777 /* Start finding ISA boards from module arg */ 791 /* Start finding ISA boards from module arg */
778 for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) { 792 for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
779 int cap; 793 int cap;
794
780 if (!(cap = ioaddr[b])) 795 if (!(cap = ioaddr[b]))
781 continue; 796 continue;
782 797
783 retval = mxser_get_ISA_conf(cap, &hwconf); 798 retval = mxser_get_ISA_conf(cap, &hwconf);
784 799
785 if (retval != 0) 800 if (retval != 0)
786 printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n", mxser_brdname[hwconf.board_type - 1], ioaddr[b]); 801 printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n",
802 mxser_brdname[hwconf.board_type - 1], ioaddr[b]);
787 803
788 if (retval <= 0) { 804 if (retval <= 0) {
789 if (retval == MXSER_ERR_IRQ) 805 if (retval == MXSER_ERR_IRQ)
790 printk(KERN_ERR "Invalid interrupt number,board not configured\n"); 806 printk(KERN_ERR "Invalid interrupt number, "
807 "board not configured\n");
791 else if (retval == MXSER_ERR_IRQ_CONFLIT) 808 else if (retval == MXSER_ERR_IRQ_CONFLIT)
792 printk(KERN_ERR "Invalid interrupt number,board not configured\n"); 809 printk(KERN_ERR "Invalid interrupt number, "
810 "board not configured\n");
793 else if (retval == MXSER_ERR_VECTOR) 811 else if (retval == MXSER_ERR_VECTOR)
794 printk(KERN_ERR "Invalid interrupt vector,board not configured\n"); 812 printk(KERN_ERR "Invalid interrupt vector, "
813 "board not configured\n");
795 else if (retval == MXSER_ERR_IOADDR) 814 else if (retval == MXSER_ERR_IOADDR)
796 printk(KERN_ERR "Invalid I/O address,board not configured\n"); 815 printk(KERN_ERR "Invalid I/O address, "
816 "board not configured\n");
797 817
798 continue; 818 continue;
799 } 819 }
@@ -803,8 +823,11 @@ static int mxser_init(void)
803 hwconf.pciInfo.pdev = NULL; 823 hwconf.pciInfo.pdev = NULL;
804 824
805 mxser_getcfg(m, &hwconf); 825 mxser_getcfg(m, &hwconf);
806 //init mxsercfg first, or mxsercfg data is not correct on ISR. 826 /*
807 //mxser_initbrd will hook ISR. 827 * init mxsercfg first,
828 * or mxsercfg data is not correct on ISR.
829 */
830 /* mxser_initbrd will hook ISR. */
808 if (mxser_initbrd(m, &hwconf) < 0) 831 if (mxser_initbrd(m, &hwconf) < 0)
809 continue; 832 continue;
810 833
@@ -817,7 +840,8 @@ static int mxser_init(void)
817 index = 0; 840 index = 0;
818 b = 0; 841 b = 0;
819 while (b < n) { 842 while (b < n) {
820 pdev = pci_find_device(mxser_pcibrds[b].vendor, mxser_pcibrds[b].device, pdev); 843 pdev = pci_find_device(mxser_pcibrds[b].vendor,
844 mxser_pcibrds[b].device, pdev);
821 if (pdev == NULL) { 845 if (pdev == NULL) {
822 b++; 846 b++;
823 continue; 847 continue;
@@ -825,30 +849,48 @@ static int mxser_init(void)
825 hwconf.pciInfo.busNum = busnum = pdev->bus->number; 849 hwconf.pciInfo.busNum = busnum = pdev->bus->number;
826 hwconf.pciInfo.devNum = devnum = PCI_SLOT(pdev->devfn) << 3; 850 hwconf.pciInfo.devNum = devnum = PCI_SLOT(pdev->devfn) << 3;
827 hwconf.pciInfo.pdev = pdev; 851 hwconf.pciInfo.pdev = pdev;
828 printk(KERN_INFO "Found MOXA %s board(BusNo=%d,DevNo=%d)\n", mxser_brdname[(int) (mxser_pcibrds[b].driver_data) - 1], busnum, devnum >> 3); 852 printk(KERN_INFO "Found MOXA %s board(BusNo=%d,DevNo=%d)\n",
853 mxser_brdname[(int) (mxser_pcibrds[b].driver_data) - 1],
854 busnum, devnum >> 3);
829 index++; 855 index++;
830 if (m >= MXSER_BOARDS) { 856 if (m >= MXSER_BOARDS)
831 printk(KERN_ERR "Too many Smartio/Industio family boards find (maximum %d),board not configured\n", MXSER_BOARDS); 857 printk(KERN_ERR
832 } else { 858 "Too many Smartio/Industio family boards find "
859 "(maximum %d), board not configured\n",
860 MXSER_BOARDS);
861 else {
833 if (pci_enable_device(pdev)) { 862 if (pci_enable_device(pdev)) {
834 printk(KERN_ERR "Moxa SmartI/O PCI enable fail !\n"); 863 printk(KERN_ERR "Moxa SmartI/O PCI enable "
864 "fail !\n");
835 continue; 865 continue;
836 } 866 }
837 retval = mxser_get_PCI_conf(busnum, devnum, (int) mxser_pcibrds[b].driver_data, &hwconf); 867 retval = mxser_get_PCI_conf(busnum, devnum,
868 (int)mxser_pcibrds[b].driver_data,
869 &hwconf);
838 if (retval < 0) { 870 if (retval < 0) {
839 if (retval == MXSER_ERR_IRQ) 871 if (retval == MXSER_ERR_IRQ)
840 printk(KERN_ERR "Invalid interrupt number,board not configured\n"); 872 printk(KERN_ERR
873 "Invalid interrupt number, "
874 "board not configured\n");
841 else if (retval == MXSER_ERR_IRQ_CONFLIT) 875 else if (retval == MXSER_ERR_IRQ_CONFLIT)
842 printk(KERN_ERR "Invalid interrupt number,board not configured\n"); 876 printk(KERN_ERR
877 "Invalid interrupt number, "
878 "board not configured\n");
843 else if (retval == MXSER_ERR_VECTOR) 879 else if (retval == MXSER_ERR_VECTOR)
844 printk(KERN_ERR "Invalid interrupt vector,board not configured\n"); 880 printk(KERN_ERR
881 "Invalid interrupt vector, "
882 "board not configured\n");
845 else if (retval == MXSER_ERR_IOADDR) 883 else if (retval == MXSER_ERR_IOADDR)
846 printk(KERN_ERR "Invalid I/O address,board not configured\n"); 884 printk(KERN_ERR
885 "Invalid I/O address, "
886 "board not configured\n");
847 continue; 887 continue;
848 } 888 }
849 mxser_getcfg(m, &hwconf); 889 mxser_getcfg(m, &hwconf);
850 //init mxsercfg first, or mxsercfg data is not correct on ISR. 890 /* init mxsercfg first,
851 //mxser_initbrd will hook ISR. 891 * or mxsercfg data is not correct on ISR.
892 */
893 /* mxser_initbrd will hook ISR. */
852 if (mxser_initbrd(m, &hwconf) < 0) 894 if (mxser_initbrd(m, &hwconf) < 0)
853 continue; 895 continue;
854 m++; 896 m++;
@@ -858,7 +900,8 @@ static int mxser_init(void)
858 900
859 retval = tty_register_driver(mxvar_sdriver); 901 retval = tty_register_driver(mxvar_sdriver);
860 if (retval) { 902 if (retval) {
861 printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family driver !\n"); 903 printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family"
904 " driver !\n");
862 put_tty_driver(mxvar_sdriver); 905 put_tty_driver(mxvar_sdriver);
863 906
864 for (i = 0; i < MXSER_BOARDS; i++) { 907 for (i = 0; i < MXSER_BOARDS; i++) {
@@ -866,7 +909,7 @@ static int mxser_init(void)
866 continue; 909 continue;
867 else { 910 else {
868 free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]); 911 free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]);
869 //todo: release io, vector 912 /* todo: release io, vector */
870 } 913 }
871 } 914 }
872 return retval; 915 return retval;
@@ -877,7 +920,7 @@ static int mxser_init(void)
877 920
878static void mxser_do_softint(void *private_) 921static void mxser_do_softint(void *private_)
879{ 922{
880 struct mxser_struct *info = (struct mxser_struct *) private_; 923 struct mxser_struct *info = private_;
881 struct tty_struct *tty; 924 struct tty_struct *tty;
882 925
883 tty = info->tty; 926 tty = info->tty;
@@ -926,7 +969,7 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
926 return -ENODEV; 969 return -ENODEV;
927 info = mxvar_table + line; 970 info = mxvar_table + line;
928 if (!info->base) 971 if (!info->base)
929 return (-ENODEV); 972 return -ENODEV;
930 973
931 tty->driver_data = info; 974 tty->driver_data = info;
932 info->tty = tty; 975 info->tty = tty;
@@ -935,11 +978,11 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
935 */ 978 */
936 retval = mxser_startup(info); 979 retval = mxser_startup(info);
937 if (retval) 980 if (retval)
938 return (retval); 981 return retval;
939 982
940 retval = mxser_block_til_ready(tty, filp, info); 983 retval = mxser_block_til_ready(tty, filp, info);
941 if (retval) 984 if (retval)
942 return (retval); 985 return retval;
943 986
944 info->count++; 987 info->count++;
945 988
@@ -955,11 +998,12 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
955 info->pgrp = process_group(current); 998 info->pgrp = process_group(current);
956 clear_bit(TTY_DONT_FLIP, &tty->flags); 999 clear_bit(TTY_DONT_FLIP, &tty->flags);
957 1000
958 //status = mxser_get_msr(info->base, 0, info->port); 1001 /*
959 //mxser_check_modem_status(info, status); 1002 status = mxser_get_msr(info->base, 0, info->port);
1003 mxser_check_modem_status(info, status);
1004 */
960 1005
961/* unmark here for very high baud rate (ex. 921600 bps) used 1006/* unmark here for very high baud rate (ex. 921600 bps) used */
962*/
963 tty->low_latency = 1; 1007 tty->low_latency = 1;
964 return 0; 1008 return 0;
965} 1009}
@@ -972,7 +1016,7 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
972 */ 1016 */
973static void mxser_close(struct tty_struct *tty, struct file *filp) 1017static void mxser_close(struct tty_struct *tty, struct file *filp)
974{ 1018{
975 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1019 struct mxser_struct *info = tty->driver_data;
976 1020
977 unsigned long timeout; 1021 unsigned long timeout;
978 unsigned long flags; 1022 unsigned long flags;
@@ -997,11 +1041,13 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
997 * one, we've got real problems, since it means the 1041 * one, we've got real problems, since it means the
998 * serial port won't be shutdown. 1042 * serial port won't be shutdown.
999 */ 1043 */
1000 printk(KERN_ERR "mxser_close: bad serial port count; tty->count is 1, " "info->count is %d\n", info->count); 1044 printk(KERN_ERR "mxser_close: bad serial port count; "
1045 "tty->count is 1, info->count is %d\n", info->count);
1001 info->count = 1; 1046 info->count = 1;
1002 } 1047 }
1003 if (--info->count < 0) { 1048 if (--info->count < 0) {
1004 printk(KERN_ERR "mxser_close: bad serial port count for ttys%d: %d\n", info->port, info->count); 1049 printk(KERN_ERR "mxser_close: bad serial port count for "
1050 "ttys%d: %d\n", info->port, info->count);
1005 info->count = 0; 1051 info->count = 0;
1006 } 1052 }
1007 if (info->count) { 1053 if (info->count) {
@@ -1056,7 +1102,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
1056 1102
1057 ld = tty_ldisc_ref(tty); 1103 ld = tty_ldisc_ref(tty);
1058 if (ld) { 1104 if (ld) {
1059 if(ld->flush_buffer) 1105 if (ld->flush_buffer)
1060 ld->flush_buffer(tty); 1106 ld->flush_buffer(tty);
1061 tty_ldisc_deref(ld); 1107 tty_ldisc_deref(ld);
1062 } 1108 }
@@ -1078,31 +1124,34 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
1078static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count) 1124static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
1079{ 1125{
1080 int c, total = 0; 1126 int c, total = 0;
1081 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1127 struct mxser_struct *info = tty->driver_data;
1082 unsigned long flags; 1128 unsigned long flags;
1083 1129
1084 if (!tty || !info->xmit_buf) 1130 if (!info->xmit_buf)
1085 return (0); 1131 return 0;
1086 1132
1087 while (1) { 1133 while (1) {
1088 c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, SERIAL_XMIT_SIZE - info->xmit_head)); 1134 c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
1135 SERIAL_XMIT_SIZE - info->xmit_head));
1089 if (c <= 0) 1136 if (c <= 0)
1090 break; 1137 break;
1091 1138
1092 memcpy(info->xmit_buf + info->xmit_head, buf, c); 1139 memcpy(info->xmit_buf + info->xmit_head, buf, c);
1093 spin_lock_irqsave(&info->slock, flags); 1140 spin_lock_irqsave(&info->slock, flags);
1094 info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE - 1); 1141 info->xmit_head = (info->xmit_head + c) &
1142 (SERIAL_XMIT_SIZE - 1);
1095 info->xmit_cnt += c; 1143 info->xmit_cnt += c;
1096 spin_unlock_irqrestore(&info->slock, flags); 1144 spin_unlock_irqrestore(&info->slock, flags);
1097 1145
1098 buf += c; 1146 buf += c;
1099 count -= c; 1147 count -= c;
1100 total += c; 1148 total += c;
1101
1102 } 1149 }
1103 1150
1104 if (info->xmit_cnt && !tty->stopped && !(info->IER & UART_IER_THRI)) { 1151 if (info->xmit_cnt && !tty->stopped && !(info->IER & UART_IER_THRI)) {
1105 if (!tty->hw_stopped || (info->type == PORT_16550A) || (info->IsMoxaMustChipFlag)) { 1152 if (!tty->hw_stopped ||
1153 (info->type == PORT_16550A) ||
1154 (info->IsMoxaMustChipFlag)) {
1106 spin_lock_irqsave(&info->slock, flags); 1155 spin_lock_irqsave(&info->slock, flags);
1107 info->IER |= UART_IER_THRI; 1156 info->IER |= UART_IER_THRI;
1108 outb(info->IER, info->base + UART_IER); 1157 outb(info->IER, info->base + UART_IER);
@@ -1114,10 +1163,10 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
1114 1163
1115static void mxser_put_char(struct tty_struct *tty, unsigned char ch) 1164static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
1116{ 1165{
1117 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1166 struct mxser_struct *info = tty->driver_data;
1118 unsigned long flags; 1167 unsigned long flags;
1119 1168
1120 if (!tty || !info->xmit_buf) 1169 if (!info->xmit_buf)
1121 return; 1170 return;
1122 1171
1123 if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1) 1172 if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
@@ -1129,7 +1178,9 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
1129 info->xmit_cnt++; 1178 info->xmit_cnt++;
1130 spin_unlock_irqrestore(&info->slock, flags); 1179 spin_unlock_irqrestore(&info->slock, flags);
1131 if (!tty->stopped && !(info->IER & UART_IER_THRI)) { 1180 if (!tty->stopped && !(info->IER & UART_IER_THRI)) {
1132 if (!tty->hw_stopped || (info->type == PORT_16550A) || info->IsMoxaMustChipFlag) { 1181 if (!tty->hw_stopped ||
1182 (info->type == PORT_16550A) ||
1183 info->IsMoxaMustChipFlag) {
1133 spin_lock_irqsave(&info->slock, flags); 1184 spin_lock_irqsave(&info->slock, flags);
1134 info->IER |= UART_IER_THRI; 1185 info->IER |= UART_IER_THRI;
1135 outb(info->IER, info->base + UART_IER); 1186 outb(info->IER, info->base + UART_IER);
@@ -1141,10 +1192,16 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
1141 1192
1142static void mxser_flush_chars(struct tty_struct *tty) 1193static void mxser_flush_chars(struct tty_struct *tty)
1143{ 1194{
1144 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1195 struct mxser_struct *info = tty->driver_data;
1145 unsigned long flags; 1196 unsigned long flags;
1146 1197
1147 if (info->xmit_cnt <= 0 || tty->stopped || !info->xmit_buf || (tty->hw_stopped && (info->type != PORT_16550A) && (!info->IsMoxaMustChipFlag))) 1198 if (info->xmit_cnt <= 0 ||
1199 tty->stopped ||
1200 !info->xmit_buf ||
1201 (tty->hw_stopped &&
1202 (info->type != PORT_16550A) &&
1203 (!info->IsMoxaMustChipFlag)
1204 ))
1148 return; 1205 return;
1149 1206
1150 spin_lock_irqsave(&info->slock, flags); 1207 spin_lock_irqsave(&info->slock, flags);
@@ -1157,24 +1214,24 @@ static void mxser_flush_chars(struct tty_struct *tty)
1157 1214
1158static int mxser_write_room(struct tty_struct *tty) 1215static int mxser_write_room(struct tty_struct *tty)
1159{ 1216{
1160 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1217 struct mxser_struct *info = tty->driver_data;
1161 int ret; 1218 int ret;
1162 1219
1163 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 1220 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
1164 if (ret < 0) 1221 if (ret < 0)
1165 ret = 0; 1222 ret = 0;
1166 return (ret); 1223 return ret;
1167} 1224}
1168 1225
1169static int mxser_chars_in_buffer(struct tty_struct *tty) 1226static int mxser_chars_in_buffer(struct tty_struct *tty)
1170{ 1227{
1171 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1228 struct mxser_struct *info = tty->driver_data;
1172 return info->xmit_cnt; 1229 return info->xmit_cnt;
1173} 1230}
1174 1231
1175static void mxser_flush_buffer(struct tty_struct *tty) 1232static void mxser_flush_buffer(struct tty_struct *tty)
1176{ 1233{
1177 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1234 struct mxser_struct *info = tty->driver_data;
1178 char fcr; 1235 char fcr;
1179 unsigned long flags; 1236 unsigned long flags;
1180 1237
@@ -1184,7 +1241,8 @@ static void mxser_flush_buffer(struct tty_struct *tty)
1184 1241
1185 /* below added by shinhay */ 1242 /* below added by shinhay */
1186 fcr = inb(info->base + UART_FCR); 1243 fcr = inb(info->base + UART_FCR);
1187 outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), info->base + UART_FCR); 1244 outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
1245 info->base + UART_FCR);
1188 outb(fcr, info->base + UART_FCR); 1246 outb(fcr, info->base + UART_FCR);
1189 1247
1190 spin_unlock_irqrestore(&info->slock, flags); 1248 spin_unlock_irqrestore(&info->slock, flags);
@@ -1197,7 +1255,7 @@ static void mxser_flush_buffer(struct tty_struct *tty)
1197 1255
1198static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) 1256static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
1199{ 1257{
1200 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1258 struct mxser_struct *info = tty->driver_data;
1201 int retval; 1259 int retval;
1202 struct async_icount cprev, cnow; /* kernel counter temps */ 1260 struct async_icount cprev, cnow; /* kernel counter temps */
1203 struct serial_icounter_struct __user *p_cuser; 1261 struct serial_icounter_struct __user *p_cuser;
@@ -1206,9 +1264,9 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1206 void __user *argp = (void __user *)arg; 1264 void __user *argp = (void __user *)arg;
1207 1265
1208 if (tty->index == MXSER_PORTS) 1266 if (tty->index == MXSER_PORTS)
1209 return (mxser_ioctl_special(cmd, argp)); 1267 return mxser_ioctl_special(cmd, argp);
1210 1268
1211 // following add by Victor Yu. 01-05-2004 1269 /* following add by Victor Yu. 01-05-2004 */
1212 if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) { 1270 if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) {
1213 int opmode, p; 1271 int opmode, p;
1214 static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f }; 1272 static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f };
@@ -1219,7 +1277,10 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1219 if (cmd == MOXA_SET_OP_MODE) { 1277 if (cmd == MOXA_SET_OP_MODE) {
1220 if (get_user(opmode, (int __user *) argp)) 1278 if (get_user(opmode, (int __user *) argp))
1221 return -EFAULT; 1279 return -EFAULT;
1222 if (opmode != RS232_MODE && opmode != RS485_2WIRE_MODE && opmode != RS422_MODE && opmode != RS485_4WIRE_MODE) 1280 if (opmode != RS232_MODE &&
1281 opmode != RS485_2WIRE_MODE &&
1282 opmode != RS422_MODE &&
1283 opmode != RS485_4WIRE_MODE)
1223 return -EFAULT; 1284 return -EFAULT;
1224 mask = ModeMask[p]; 1285 mask = ModeMask[p];
1225 shiftbit = p * 2; 1286 shiftbit = p * 2;
@@ -1236,36 +1297,36 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1236 } 1297 }
1237 return 0; 1298 return 0;
1238 } 1299 }
1239 // above add by Victor Yu. 01-05-2004 1300 /* above add by Victor Yu. 01-05-2004 */
1240 1301
1241 if ((cmd != TIOCGSERIAL) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 1302 if ((cmd != TIOCGSERIAL) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
1242 if (tty->flags & (1 << TTY_IO_ERROR)) 1303 if (tty->flags & (1 << TTY_IO_ERROR))
1243 return (-EIO); 1304 return -EIO;
1244 } 1305 }
1245 switch (cmd) { 1306 switch (cmd) {
1246 case TCSBRK: /* SVID version: non-zero arg --> no break */ 1307 case TCSBRK: /* SVID version: non-zero arg --> no break */
1247 retval = tty_check_change(tty); 1308 retval = tty_check_change(tty);
1248 if (retval) 1309 if (retval)
1249 return (retval); 1310 return retval;
1250 tty_wait_until_sent(tty, 0); 1311 tty_wait_until_sent(tty, 0);
1251 if (!arg) 1312 if (!arg)
1252 mxser_send_break(info, HZ / 4); /* 1/4 second */ 1313 mxser_send_break(info, HZ / 4); /* 1/4 second */
1253 return (0); 1314 return 0;
1254 case TCSBRKP: /* support for POSIX tcsendbreak() */ 1315 case TCSBRKP: /* support for POSIX tcsendbreak() */
1255 retval = tty_check_change(tty); 1316 retval = tty_check_change(tty);
1256 if (retval) 1317 if (retval)
1257 return (retval); 1318 return retval;
1258 tty_wait_until_sent(tty, 0); 1319 tty_wait_until_sent(tty, 0);
1259 mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4); 1320 mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
1260 return (0); 1321 return 0;
1261 case TIOCGSOFTCAR: 1322 case TIOCGSOFTCAR:
1262 return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp); 1323 return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
1263 case TIOCSSOFTCAR: 1324 case TIOCSSOFTCAR:
1264 if (get_user(templ, (unsigned long __user *) argp)) 1325 if (get_user(templ, (unsigned long __user *) argp))
1265 return -EFAULT; 1326 return -EFAULT;
1266 arg = templ; 1327 arg = templ;
1267 tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0)); 1328 tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
1268 return (0); 1329 return 0;
1269 case TIOCGSERIAL: 1330 case TIOCGSERIAL:
1270 return mxser_get_serial_info(info, argp); 1331 return mxser_get_serial_info(info, argp);
1271 case TIOCSSERIAL: 1332 case TIOCSSERIAL:
@@ -1278,7 +1339,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1278 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) 1339 * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
1279 * Caller should use TIOCGICOUNT to see which one it was 1340 * Caller should use TIOCGICOUNT to see which one it was
1280 */ 1341 */
1281 case TIOCMIWAIT:{ 1342 case TIOCMIWAIT: {
1282 DECLARE_WAITQUEUE(wait, current); 1343 DECLARE_WAITQUEUE(wait, current);
1283 int ret; 1344 int ret;
1284 spin_lock_irqsave(&info->slock, flags); 1345 spin_lock_irqsave(&info->slock, flags);
@@ -1292,7 +1353,14 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1292 spin_unlock_irqrestore(&info->slock, flags); 1353 spin_unlock_irqrestore(&info->slock, flags);
1293 1354
1294 set_current_state(TASK_INTERRUPTIBLE); 1355 set_current_state(TASK_INTERRUPTIBLE);
1295 if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { 1356 if (((arg & TIOCM_RNG) &&
1357 (cnow.rng != cprev.rng)) ||
1358 ((arg & TIOCM_DSR) &&
1359 (cnow.dsr != cprev.dsr)) ||
1360 ((arg & TIOCM_CD) &&
1361 (cnow.dcd != cprev.dcd)) ||
1362 ((arg & TIOCM_CTS) &&
1363 (cnow.cts != cprev.cts))) {
1296 ret = 0; 1364 ret = 0;
1297 break; 1365 break;
1298 } 1366 }
@@ -1338,21 +1406,18 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1338 put_user(cnow.dsr, &p_cuser->dsr); 1406 put_user(cnow.dsr, &p_cuser->dsr);
1339 put_user(cnow.rng, &p_cuser->rng); 1407 put_user(cnow.rng, &p_cuser->rng);
1340 put_user(cnow.dcd, &p_cuser->dcd); 1408 put_user(cnow.dcd, &p_cuser->dcd);
1341
1342/* */
1343 return 0; 1409 return 0;
1344 case MOXA_HighSpeedOn: 1410 case MOXA_HighSpeedOn:
1345 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *) argp); 1411 return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
1346 1412 case MOXA_SDS_RSTICOUNTER: {
1347 case MOXA_SDS_RSTICOUNTER:{
1348 info->mon_data.rxcnt = 0; 1413 info->mon_data.rxcnt = 0;
1349 info->mon_data.txcnt = 0; 1414 info->mon_data.txcnt = 0;
1350 return 0; 1415 return 0;
1351 } 1416 }
1352// (above) added by James. 1417/* (above) added by James. */
1353 case MOXA_ASPP_SETBAUD:{ 1418 case MOXA_ASPP_SETBAUD:{
1354 long baud; 1419 long baud;
1355 if (get_user(baud, (long __user *) argp)) 1420 if (get_user(baud, (long __user *)argp))
1356 return -EFAULT; 1421 return -EFAULT;
1357 mxser_set_baud(info, baud); 1422 mxser_set_baud(info, baud);
1358 return 0; 1423 return 0;
@@ -1377,9 +1442,10 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1377 1442
1378 return 0; 1443 return 0;
1379 } 1444 }
1380 case MOXA_ASPP_MON:{ 1445 case MOXA_ASPP_MON: {
1381 int mcr, status; 1446 int mcr, status;
1382// info->mon_data.ser_param = tty->termios->c_cflag; 1447
1448 /* info->mon_data.ser_param = tty->termios->c_cflag; */
1383 1449
1384 status = mxser_get_msr(info->base, 1, info->port, info); 1450 status = mxser_get_msr(info->base, 1, info->port, info);
1385 mxser_check_modem_status(info, status); 1451 mxser_check_modem_status(info, status);
@@ -1400,25 +1466,25 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
1400 else 1466 else
1401 info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD; 1467 info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
1402 1468
1403 1469 if (copy_to_user(argp, &info->mon_data,
1404 if (copy_to_user(argp, &info->mon_data, sizeof(struct mxser_mon))) 1470 sizeof(struct mxser_mon)))
1405 return -EFAULT; 1471 return -EFAULT;
1406 1472
1407 return 0; 1473 return 0;
1408
1409 } 1474 }
1410 1475
1411 case MOXA_ASPP_LSTATUS:{ 1476 case MOXA_ASPP_LSTATUS: {
1412 if (copy_to_user(argp, &info->err_shadow, sizeof(unsigned char))) 1477 if (copy_to_user(argp, &info->err_shadow,
1478 sizeof(unsigned char)))
1413 return -EFAULT; 1479 return -EFAULT;
1414 1480
1415 info->err_shadow = 0; 1481 info->err_shadow = 0;
1416 return 0; 1482 return 0;
1417
1418 } 1483 }
1419 case MOXA_SET_BAUD_METHOD:{ 1484 case MOXA_SET_BAUD_METHOD: {
1420 int method; 1485 int method;
1421 if (get_user(method, (int __user *) argp)) 1486
1487 if (get_user(method, (int __user *)argp))
1422 return -EFAULT; 1488 return -EFAULT;
1423 mxser_set_baud_method[info->port] = method; 1489 mxser_set_baud_method[info->port] = method;
1424 if (copy_to_user(argp, &method, sizeof(int))) 1490 if (copy_to_user(argp, &method, sizeof(int)))
@@ -1442,7 +1508,8 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1442 1508
1443 switch (cmd) { 1509 switch (cmd) {
1444 case MOXA_GET_CONF: 1510 case MOXA_GET_CONF:
1445 if (copy_to_user(argp, mxsercfg, sizeof(struct mxser_hwconf) * 4)) 1511 if (copy_to_user(argp, mxsercfg,
1512 sizeof(struct mxser_hwconf) * 4))
1446 return -EFAULT; 1513 return -EFAULT;
1447 return 0; 1514 return 0;
1448 case MOXA_GET_MAJOR: 1515 case MOXA_GET_MAJOR:
@@ -1461,11 +1528,11 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1461 if (mxvar_table[i].base) 1528 if (mxvar_table[i].base)
1462 result |= (1 << i); 1529 result |= (1 << i);
1463 } 1530 }
1464 return put_user(result, (unsigned long __user *) argp); 1531 return put_user(result, (unsigned long __user *)argp);
1465 case MOXA_GETDATACOUNT: 1532 case MOXA_GETDATACOUNT:
1466 if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log))) 1533 if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
1467 return -EFAULT; 1534 return -EFAULT;
1468 return (0); 1535 return 0;
1469 case MOXA_GETMSTATUS: 1536 case MOXA_GETMSTATUS:
1470 for (i = 0; i < MXSER_PORTS; i++) { 1537 for (i = 0; i < MXSER_PORTS; i++) {
1471 GMStatus[i].ri = 0; 1538 GMStatus[i].ri = 0;
@@ -1498,22 +1565,26 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1498 else 1565 else
1499 GMStatus[i].cts = 0; 1566 GMStatus[i].cts = 0;
1500 } 1567 }
1501 if (copy_to_user(argp, GMStatus, sizeof(struct mxser_mstatus) * MXSER_PORTS)) 1568 if (copy_to_user(argp, GMStatus,
1569 sizeof(struct mxser_mstatus) * MXSER_PORTS))
1502 return -EFAULT; 1570 return -EFAULT;
1503 return 0; 1571 return 0;
1504 case MOXA_ASPP_MON_EXT:{ 1572 case MOXA_ASPP_MON_EXT: {
1505 int status; 1573 int status;
1506 int opmode, p; 1574 int opmode, p;
1507 int shiftbit; 1575 int shiftbit;
1508 unsigned cflag, iflag; 1576 unsigned cflag, iflag;
1509 1577
1510 for (i = 0; i < MXSER_PORTS; i++) { 1578 for (i = 0; i < MXSER_PORTS; i++) {
1511
1512 if (!mxvar_table[i].base) 1579 if (!mxvar_table[i].base)
1513 continue; 1580 continue;
1514 1581
1515 status = mxser_get_msr(mxvar_table[i].base, 0, i, &(mxvar_table[i])); 1582 status = mxser_get_msr(mxvar_table[i].base, 0,
1516// mxser_check_modem_status(&mxvar_table[i], status); 1583 i, &(mxvar_table[i]));
1584 /*
1585 mxser_check_modem_status(&mxvar_table[i],
1586 status);
1587 */
1517 if (status & UART_MSR_TERI) 1588 if (status & UART_MSR_TERI)
1518 mxvar_table[i].icount.rng++; 1589 mxvar_table[i].icount.rng++;
1519 if (status & UART_MSR_DDSR) 1590 if (status & UART_MSR_DDSR)
@@ -1578,75 +1649,76 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1578 return 0; 1649 return 0;
1579} 1650}
1580 1651
1581
1582static void mxser_stoprx(struct tty_struct *tty) 1652static void mxser_stoprx(struct tty_struct *tty)
1583{ 1653{
1584 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1654 struct mxser_struct *info = tty->driver_data;
1585 //unsigned long flags; 1655 /* unsigned long flags; */
1586
1587 1656
1588 info->ldisc_stop_rx = 1; 1657 info->ldisc_stop_rx = 1;
1589 if (I_IXOFF(tty)) { 1658 if (I_IXOFF(tty)) {
1590 1659 /* MX_LOCK(&info->slock); */
1591 //MX_LOCK(&info->slock); 1660 /* following add by Victor Yu. 09-02-2002 */
1592 // following add by Victor Yu. 09-02-2002
1593 if (info->IsMoxaMustChipFlag) { 1661 if (info->IsMoxaMustChipFlag) {
1594 info->IER &= ~MOXA_MUST_RECV_ISR; 1662 info->IER &= ~MOXA_MUST_RECV_ISR;
1595 outb(info->IER, info->base + UART_IER); 1663 outb(info->IER, info->base + UART_IER);
1596 } else { 1664 } else {
1597 // above add by Victor Yu. 09-02-2002 1665 /* above add by Victor Yu. 09-02-2002 */
1598
1599 info->x_char = STOP_CHAR(tty); 1666 info->x_char = STOP_CHAR(tty);
1600 // outb(info->IER, 0); // mask by Victor Yu. 09-02-2002 1667 /* mask by Victor Yu. 09-02-2002 */
1668 /* outb(info->IER, 0); */
1601 outb(0, info->base + UART_IER); 1669 outb(0, info->base + UART_IER);
1602 info->IER |= UART_IER_THRI; 1670 info->IER |= UART_IER_THRI;
1603 outb(info->IER, info->base + UART_IER); /* force Tx interrupt */ 1671 /* force Tx interrupt */
1604 } // add by Victor Yu. 09-02-2002 1672 outb(info->IER, info->base + UART_IER);
1605 //MX_UNLOCK(&info->slock); 1673 } /* add by Victor Yu. 09-02-2002 */
1674 /* MX_UNLOCK(&info->slock); */
1606 } 1675 }
1607 1676
1608 if (info->tty->termios->c_cflag & CRTSCTS) { 1677 if (info->tty->termios->c_cflag & CRTSCTS) {
1609 //MX_LOCK(&info->slock); 1678 /* MX_LOCK(&info->slock); */
1610 info->MCR &= ~UART_MCR_RTS; 1679 info->MCR &= ~UART_MCR_RTS;
1611 outb(info->MCR, info->base + UART_MCR); 1680 outb(info->MCR, info->base + UART_MCR);
1612 //MX_UNLOCK(&info->slock); 1681 /* MX_UNLOCK(&info->slock); */
1613 } 1682 }
1614} 1683}
1615 1684
1616static void mxser_startrx(struct tty_struct *tty) 1685static void mxser_startrx(struct tty_struct *tty)
1617{ 1686{
1618 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1687 struct mxser_struct *info = tty->driver_data;
1619 //unsigned long flags; 1688 /* unsigned long flags; */
1620 1689
1621 info->ldisc_stop_rx = 0; 1690 info->ldisc_stop_rx = 0;
1622 if (I_IXOFF(tty)) { 1691 if (I_IXOFF(tty)) {
1623 if (info->x_char) 1692 if (info->x_char)
1624 info->x_char = 0; 1693 info->x_char = 0;
1625 else { 1694 else {
1626 //MX_LOCK(&info->slock); 1695 /* MX_LOCK(&info->slock); */
1627 1696
1628 // following add by Victor Yu. 09-02-2002 1697 /* following add by Victor Yu. 09-02-2002 */
1629 if (info->IsMoxaMustChipFlag) { 1698 if (info->IsMoxaMustChipFlag) {
1630 info->IER |= MOXA_MUST_RECV_ISR; 1699 info->IER |= MOXA_MUST_RECV_ISR;
1631 outb(info->IER, info->base + UART_IER); 1700 outb(info->IER, info->base + UART_IER);
1632 } else { 1701 } else {
1633 // above add by Victor Yu. 09-02-2002 1702 /* above add by Victor Yu. 09-02-2002 */
1634 1703
1635 info->x_char = START_CHAR(tty); 1704 info->x_char = START_CHAR(tty);
1636 // outb(info->IER, 0); // mask by Victor Yu. 09-02-2002 1705 /* mask by Victor Yu. 09-02-2002 */
1637 outb(0, info->base + UART_IER); // add by Victor Yu. 09-02-2002 1706 /* outb(info->IER, 0); */
1638 info->IER |= UART_IER_THRI; /* force Tx interrupt */ 1707 /* add by Victor Yu. 09-02-2002 */
1708 outb(0, info->base + UART_IER);
1709 /* force Tx interrupt */
1710 info->IER |= UART_IER_THRI;
1639 outb(info->IER, info->base + UART_IER); 1711 outb(info->IER, info->base + UART_IER);
1640 } // add by Victor Yu. 09-02-2002 1712 } /* add by Victor Yu. 09-02-2002 */
1641 //MX_UNLOCK(&info->slock); 1713 /* MX_UNLOCK(&info->slock); */
1642 } 1714 }
1643 } 1715 }
1644 1716
1645 if (info->tty->termios->c_cflag & CRTSCTS) { 1717 if (info->tty->termios->c_cflag & CRTSCTS) {
1646 //MX_LOCK(&info->slock); 1718 /* MX_LOCK(&info->slock); */
1647 info->MCR |= UART_MCR_RTS; 1719 info->MCR |= UART_MCR_RTS;
1648 outb(info->MCR, info->base + UART_MCR); 1720 outb(info->MCR, info->base + UART_MCR);
1649 //MX_UNLOCK(&info->slock); 1721 /* MX_UNLOCK(&info->slock); */
1650 } 1722 }
1651} 1723}
1652 1724
@@ -1656,48 +1728,53 @@ static void mxser_startrx(struct tty_struct *tty)
1656 */ 1728 */
1657static void mxser_throttle(struct tty_struct *tty) 1729static void mxser_throttle(struct tty_struct *tty)
1658{ 1730{
1659 //struct mxser_struct *info = (struct mxser_struct *)tty->driver_data; 1731 /* struct mxser_struct *info = tty->driver_data; */
1660 //unsigned long flags; 1732 /* unsigned long flags; */
1661 //MX_LOCK(&info->slock); 1733
1734 /* MX_LOCK(&info->slock); */
1662 mxser_stoprx(tty); 1735 mxser_stoprx(tty);
1663 //MX_UNLOCK(&info->slock); 1736 /* MX_UNLOCK(&info->slock); */
1664} 1737}
1665 1738
1666static void mxser_unthrottle(struct tty_struct *tty) 1739static void mxser_unthrottle(struct tty_struct *tty)
1667{ 1740{
1668 //struct mxser_struct *info = (struct mxser_struct *)tty->driver_data; 1741 /* struct mxser_struct *info = tty->driver_data; */
1669 //unsigned long flags; 1742 /* unsigned long flags; */
1670 //MX_LOCK(&info->slock); 1743
1744 /* MX_LOCK(&info->slock); */
1671 mxser_startrx(tty); 1745 mxser_startrx(tty);
1672 //MX_UNLOCK(&info->slock); 1746 /* MX_UNLOCK(&info->slock); */
1673} 1747}
1674 1748
1675static void mxser_set_termios(struct tty_struct *tty, struct termios *old_termios) 1749static void mxser_set_termios(struct tty_struct *tty, struct termios *old_termios)
1676{ 1750{
1677 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1751 struct mxser_struct *info = tty->driver_data;
1678 unsigned long flags; 1752 unsigned long flags;
1679 1753
1680 if ((tty->termios->c_cflag != old_termios->c_cflag) || (RELEVANT_IFLAG(tty->termios->c_iflag) != RELEVANT_IFLAG(old_termios->c_iflag))) { 1754 if ((tty->termios->c_cflag != old_termios->c_cflag) ||
1755 (RELEVANT_IFLAG(tty->termios->c_iflag) != RELEVANT_IFLAG(old_termios->c_iflag))) {
1681 1756
1682 mxser_change_speed(info, old_termios); 1757 mxser_change_speed(info, old_termios);
1683 1758
1684 if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { 1759 if ((old_termios->c_cflag & CRTSCTS) &&
1760 !(tty->termios->c_cflag & CRTSCTS)) {
1685 tty->hw_stopped = 0; 1761 tty->hw_stopped = 0;
1686 mxser_start(tty); 1762 mxser_start(tty);
1687 } 1763 }
1688 } 1764 }
1689 1765
1690/* Handle sw stopped */ 1766/* Handle sw stopped */
1691 if ((old_termios->c_iflag & IXON) && !(tty->termios->c_iflag & IXON)) { 1767 if ((old_termios->c_iflag & IXON) &&
1768 !(tty->termios->c_iflag & IXON)) {
1692 tty->stopped = 0; 1769 tty->stopped = 0;
1693 1770
1694 // following add by Victor Yu. 09-02-2002 1771 /* following add by Victor Yu. 09-02-2002 */
1695 if (info->IsMoxaMustChipFlag) { 1772 if (info->IsMoxaMustChipFlag) {
1696 spin_lock_irqsave(&info->slock, flags); 1773 spin_lock_irqsave(&info->slock, flags);
1697 DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base); 1774 DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base);
1698 spin_unlock_irqrestore(&info->slock, flags); 1775 spin_unlock_irqrestore(&info->slock, flags);
1699 } 1776 }
1700 // above add by Victor Yu. 09-02-2002 1777 /* above add by Victor Yu. 09-02-2002 */
1701 1778
1702 mxser_start(tty); 1779 mxser_start(tty);
1703 } 1780 }
@@ -1711,7 +1788,7 @@ static void mxser_set_termios(struct tty_struct *tty, struct termios *old_termio
1711 */ 1788 */
1712static void mxser_stop(struct tty_struct *tty) 1789static void mxser_stop(struct tty_struct *tty)
1713{ 1790{
1714 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1791 struct mxser_struct *info = tty->driver_data;
1715 unsigned long flags; 1792 unsigned long flags;
1716 1793
1717 spin_lock_irqsave(&info->slock, flags); 1794 spin_lock_irqsave(&info->slock, flags);
@@ -1724,7 +1801,7 @@ static void mxser_stop(struct tty_struct *tty)
1724 1801
1725static void mxser_start(struct tty_struct *tty) 1802static void mxser_start(struct tty_struct *tty)
1726{ 1803{
1727 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1804 struct mxser_struct *info = tty->driver_data;
1728 unsigned long flags; 1805 unsigned long flags;
1729 1806
1730 spin_lock_irqsave(&info->slock, flags); 1807 spin_lock_irqsave(&info->slock, flags);
@@ -1740,7 +1817,7 @@ static void mxser_start(struct tty_struct *tty)
1740 */ 1817 */
1741static void mxser_wait_until_sent(struct tty_struct *tty, int timeout) 1818static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
1742{ 1819{
1743 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1820 struct mxser_struct *info = tty->driver_data;
1744 unsigned long orig_jiffies, char_time; 1821 unsigned long orig_jiffies, char_time;
1745 int lsr; 1822 int lsr;
1746 1823
@@ -1777,7 +1854,8 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
1777 if (!timeout || timeout > 2 * info->timeout) 1854 if (!timeout || timeout > 2 * info->timeout)
1778 timeout = 2 * info->timeout; 1855 timeout = 2 * info->timeout;
1779#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT 1856#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
1780 printk(KERN_DEBUG "In rs_wait_until_sent(%d) check=%lu...", timeout, char_time); 1857 printk(KERN_DEBUG "In rs_wait_until_sent(%d) check=%lu...",
1858 timeout, char_time);
1781 printk("jiff=%lu...", jiffies); 1859 printk("jiff=%lu...", jiffies);
1782#endif 1860#endif
1783 while (!((lsr = inb(info->base + UART_LSR)) & UART_LSR_TEMT)) { 1861 while (!((lsr = inb(info->base + UART_LSR)) & UART_LSR_TEMT)) {
@@ -1803,7 +1881,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
1803 */ 1881 */
1804void mxser_hangup(struct tty_struct *tty) 1882void mxser_hangup(struct tty_struct *tty)
1805{ 1883{
1806 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1884 struct mxser_struct *info = tty->driver_data;
1807 1885
1808 mxser_flush_buffer(tty); 1886 mxser_flush_buffer(tty);
1809 mxser_shutdown(info); 1887 mxser_shutdown(info);
@@ -1815,24 +1893,26 @@ void mxser_hangup(struct tty_struct *tty)
1815} 1893}
1816 1894
1817 1895
1818// added by James 03-12-2004. 1896/* added by James 03-12-2004. */
1819/* 1897/*
1820 * mxser_rs_break() --- routine which turns the break handling on or off 1898 * mxser_rs_break() --- routine which turns the break handling on or off
1821 */ 1899 */
1822static void mxser_rs_break(struct tty_struct *tty, int break_state) 1900static void mxser_rs_break(struct tty_struct *tty, int break_state)
1823{ 1901{
1824 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 1902 struct mxser_struct *info = tty->driver_data;
1825 unsigned long flags; 1903 unsigned long flags;
1826 1904
1827 spin_lock_irqsave(&info->slock, flags); 1905 spin_lock_irqsave(&info->slock, flags);
1828 if (break_state == -1) 1906 if (break_state == -1)
1829 outb(inb(info->base + UART_LCR) | UART_LCR_SBC, info->base + UART_LCR); 1907 outb(inb(info->base + UART_LCR) | UART_LCR_SBC,
1908 info->base + UART_LCR);
1830 else 1909 else
1831 outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC, info->base + UART_LCR); 1910 outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC,
1911 info->base + UART_LCR);
1832 spin_unlock_irqrestore(&info->slock, flags); 1912 spin_unlock_irqrestore(&info->slock, flags);
1833} 1913}
1834 1914
1835// (above) added by James. 1915/* (above) added by James. */
1836 1916
1837 1917
1838/* 1918/*
@@ -1848,7 +1928,7 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1848 int handled = IRQ_NONE; 1928 int handled = IRQ_NONE;
1849 1929
1850 port = NULL; 1930 port = NULL;
1851 //spin_lock(&gm_lock); 1931 /* spin_lock(&gm_lock); */
1852 1932
1853 for (i = 0; i < MXSER_BOARDS; i++) { 1933 for (i = 0; i < MXSER_BOARDS; i++) {
1854 if (dev_id == &(mxvar_table[i * MXSER_PORTS_PER_BOARD])) { 1934 if (dev_id == &(mxvar_table[i * MXSER_PORTS_PER_BOARD])) {
@@ -1857,29 +1937,25 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1857 } 1937 }
1858 } 1938 }
1859 1939
1860 if (i == MXSER_BOARDS) { 1940 if (i == MXSER_BOARDS)
1861 goto irq_stop; 1941 goto irq_stop;
1862 } 1942 if (port == 0)
1863 if (port == 0) {
1864 goto irq_stop; 1943 goto irq_stop;
1865 }
1866 max = mxser_numports[mxsercfg[i].board_type - 1]; 1944 max = mxser_numports[mxsercfg[i].board_type - 1];
1867 while (1) { 1945 while (1) {
1868 irqbits = inb(port->vector) & port->vectormask; 1946 irqbits = inb(port->vector) & port->vectormask;
1869 if (irqbits == port->vectormask) { 1947 if (irqbits == port->vectormask)
1870 break; 1948 break;
1871 }
1872 1949
1873 handled = IRQ_HANDLED; 1950 handled = IRQ_HANDLED;
1874 for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) { 1951 for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
1875 if (irqbits == port->vectormask) { 1952 if (irqbits == port->vectormask)
1876 break; 1953 break;
1877 }
1878 if (bits & irqbits) 1954 if (bits & irqbits)
1879 continue; 1955 continue;
1880 info = port + i; 1956 info = port + i;
1881 1957
1882 // following add by Victor Yu. 09-13-2002 1958 /* following add by Victor Yu. 09-13-2002 */
1883 iir = inb(info->base + UART_IIR); 1959 iir = inb(info->base + UART_IIR);
1884 if (iir & UART_IIR_NO_INT) 1960 if (iir & UART_IIR_NO_INT)
1885 continue; 1961 continue;
@@ -1890,9 +1966,9 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1890 inb(info->base + UART_MSR); 1966 inb(info->base + UART_MSR);
1891 continue; 1967 continue;
1892 } 1968 }
1893 // above add by Victor Yu. 09-13-2002 1969 /* above add by Victor Yu. 09-13-2002 */
1894 /* 1970 /*
1895 if ( info->tty->flip.count < TTY_FLIPBUF_SIZE/4 ){ 1971 if (info->tty->flip.count < TTY_FLIPBUF_SIZE / 4) {
1896 info->IER |= MOXA_MUST_RECV_ISR; 1972 info->IER |= MOXA_MUST_RECV_ISR;
1897 outb(info->IER, info->base + UART_IER); 1973 outb(info->IER, info->base + UART_IER);
1898 } 1974 }
@@ -1908,18 +1984,15 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1908 status = inb(info->base + UART_LSR) & info->read_status_mask; 1984 status = inb(info->base + UART_LSR) & info->read_status_mask;
1909 */ 1985 */
1910 1986
1911 // following add by Victor Yu. 09-02-2002 1987 /* following add by Victor Yu. 09-02-2002 */
1912 status = inb(info->base + UART_LSR); 1988 status = inb(info->base + UART_LSR);
1913 1989
1914 if (status & UART_LSR_PE) { 1990 if (status & UART_LSR_PE)
1915 info->err_shadow |= NPPI_NOTIFY_PARITY; 1991 info->err_shadow |= NPPI_NOTIFY_PARITY;
1916 } 1992 if (status & UART_LSR_FE)
1917 if (status & UART_LSR_FE) {
1918 info->err_shadow |= NPPI_NOTIFY_FRAMING; 1993 info->err_shadow |= NPPI_NOTIFY_FRAMING;
1919 } 1994 if (status & UART_LSR_OE)
1920 if (status & UART_LSR_OE) {
1921 info->err_shadow |= NPPI_NOTIFY_HW_OVERRUN; 1995 info->err_shadow |= NPPI_NOTIFY_HW_OVERRUN;
1922 }
1923 if (status & UART_LSR_BI) 1996 if (status & UART_LSR_BI)
1924 info->err_shadow |= NPPI_NOTIFY_BREAK; 1997 info->err_shadow |= NPPI_NOTIFY_BREAK;
1925 1998
@@ -1930,11 +2003,14 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1930 continue; 2003 continue;
1931 } 2004 }
1932 */ 2005 */
1933 if (iir == MOXA_MUST_IIR_GDA || iir == MOXA_MUST_IIR_RDA || iir == MOXA_MUST_IIR_RTO || iir == MOXA_MUST_IIR_LSR) 2006 if (iir == MOXA_MUST_IIR_GDA ||
2007 iir == MOXA_MUST_IIR_RDA ||
2008 iir == MOXA_MUST_IIR_RTO ||
2009 iir == MOXA_MUST_IIR_LSR)
1934 mxser_receive_chars(info, &status); 2010 mxser_receive_chars(info, &status);
1935 2011
1936 } else { 2012 } else {
1937 // above add by Victor Yu. 09-02-2002 2013 /* above add by Victor Yu. 09-02-2002 */
1938 2014
1939 status &= info->read_status_mask; 2015 status &= info->read_status_mask;
1940 if (status & UART_LSR_DR) 2016 if (status & UART_LSR_DR)
@@ -1944,13 +2020,13 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1944 if (msr & UART_MSR_ANY_DELTA) { 2020 if (msr & UART_MSR_ANY_DELTA) {
1945 mxser_check_modem_status(info, msr); 2021 mxser_check_modem_status(info, msr);
1946 } 2022 }
1947 // following add by Victor Yu. 09-13-2002 2023 /* following add by Victor Yu. 09-13-2002 */
1948 if (info->IsMoxaMustChipFlag) { 2024 if (info->IsMoxaMustChipFlag) {
1949 if ((iir == 0x02) && (status & UART_LSR_THRE)) { 2025 if ((iir == 0x02) && (status & UART_LSR_THRE)) {
1950 mxser_transmit_chars(info); 2026 mxser_transmit_chars(info);
1951 } 2027 }
1952 } else { 2028 } else {
1953 // above add by Victor Yu. 09-13-2002 2029 /* above add by Victor Yu. 09-13-2002 */
1954 2030
1955 if (status & UART_LSR_THRE) { 2031 if (status & UART_LSR_THRE) {
1956/* 8-2-99 by William 2032/* 8-2-99 by William
@@ -1966,7 +2042,7 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1966 } 2042 }
1967 2043
1968 irq_stop: 2044 irq_stop:
1969 //spin_unlock(&gm_lock); 2045 /* spin_unlock(&gm_lock); */
1970 return handled; 2046 return handled;
1971} 2047}
1972 2048
@@ -1984,56 +2060,58 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
1984 2060
1985 recv_room = tty->receive_room; 2061 recv_room = tty->receive_room;
1986 if ((recv_room == 0) && (!info->ldisc_stop_rx)) { 2062 if ((recv_room == 0) && (!info->ldisc_stop_rx)) {
1987 //mxser_throttle(tty); 2063 /* mxser_throttle(tty); */
1988 mxser_stoprx(tty); 2064 mxser_stoprx(tty);
1989 //return; 2065 /* return; */
1990 } 2066 }
1991 2067
1992 // following add by Victor Yu. 09-02-2002 2068 /* following add by Victor Yu. 09-02-2002 */
1993 if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) { 2069 if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
1994 2070
1995 if (*status & UART_LSR_SPECIAL) { 2071 if (*status & UART_LSR_SPECIAL) {
1996 goto intr_old; 2072 goto intr_old;
1997 } 2073 }
1998 // following add by Victor Yu. 02-11-2004 2074 /* following add by Victor Yu. 02-11-2004 */
1999 if (info->IsMoxaMustChipFlag == MOXA_MUST_MU860_HWID && (*status & MOXA_MUST_LSR_RERR)) 2075 if (info->IsMoxaMustChipFlag == MOXA_MUST_MU860_HWID &&
2076 (*status & MOXA_MUST_LSR_RERR))
2000 goto intr_old; 2077 goto intr_old;
2001 // above add by Victor Yu. 02-14-2004 2078 /* above add by Victor Yu. 02-14-2004 */
2002 if (*status & MOXA_MUST_LSR_RERR) 2079 if (*status & MOXA_MUST_LSR_RERR)
2003 goto intr_old; 2080 goto intr_old;
2004 2081
2005 gdl = inb(info->base + MOXA_MUST_GDL_REGISTER); 2082 gdl = inb(info->base + MOXA_MUST_GDL_REGISTER);
2006 2083
2007 if (info->IsMoxaMustChipFlag == MOXA_MUST_MU150_HWID) // add by Victor Yu. 02-11-2004 2084 /* add by Victor Yu. 02-11-2004 */
2085 if (info->IsMoxaMustChipFlag == MOXA_MUST_MU150_HWID)
2008 gdl &= MOXA_MUST_GDL_MASK; 2086 gdl &= MOXA_MUST_GDL_MASK;
2009 if (gdl >= recv_room) { 2087 if (gdl >= recv_room) {
2010 if (!info->ldisc_stop_rx) { 2088 if (!info->ldisc_stop_rx) {
2011 //mxser_throttle(tty); 2089 /* mxser_throttle(tty); */
2012 mxser_stoprx(tty); 2090 mxser_stoprx(tty);
2013 } 2091 }
2014 //return; 2092 /* return; */
2015 } 2093 }
2016 while (gdl--) { 2094 while (gdl--) {
2017 ch = inb(info->base + UART_RX); 2095 ch = inb(info->base + UART_RX);
2018 tty_insert_flip_char(tty, ch, 0); 2096 tty_insert_flip_char(tty, ch, 0);
2019 cnt++; 2097 cnt++;
2020 /* 2098 /*
2021 if((cnt>=HI_WATER) && (info->stop_rx==0)){ 2099 if ((cnt >= HI_WATER) && (info->stop_rx == 0)) {
2022 mxser_stoprx(tty); 2100 mxser_stoprx(tty);
2023 info->stop_rx=1; 2101 info->stop_rx = 1;
2024 break; 2102 break;
2025 } */ 2103 } */
2026 } 2104 }
2027 goto end_intr; 2105 goto end_intr;
2028 } 2106 }
2029intr_old: 2107 intr_old:
2030 // above add by Victor Yu. 09-02-2002 2108 /* above add by Victor Yu. 09-02-2002 */
2031 2109
2032 do { 2110 do {
2033 if (max-- < 0) 2111 if (max-- < 0)
2034 break; 2112 break;
2035 /* 2113 /*
2036 if((cnt>=HI_WATER) && (info->stop_rx==0)){ 2114 if ((cnt >= HI_WATER) && (info->stop_rx == 0)) {
2037 mxser_stoprx(tty); 2115 mxser_stoprx(tty);
2038 info->stop_rx=1; 2116 info->stop_rx=1;
2039 break; 2117 break;
@@ -2041,11 +2119,11 @@ intr_old:
2041 */ 2119 */
2042 2120
2043 ch = inb(info->base + UART_RX); 2121 ch = inb(info->base + UART_RX);
2044 // following add by Victor Yu. 09-02-2002 2122 /* following add by Victor Yu. 09-02-2002 */
2045 if (info->IsMoxaMustChipFlag && (*status & UART_LSR_OE) /*&& !(*status&UART_LSR_DR) */ ) 2123 if (info->IsMoxaMustChipFlag && (*status & UART_LSR_OE) /*&& !(*status&UART_LSR_DR) */ )
2046 outb(0x23, info->base + UART_FCR); 2124 outb(0x23, info->base + UART_FCR);
2047 *status &= info->read_status_mask; 2125 *status &= info->read_status_mask;
2048 // above add by Victor Yu. 09-02-2002 2126 /* above add by Victor Yu. 09-02-2002 */
2049 if (*status & info->ignore_status_mask) { 2127 if (*status & info->ignore_status_mask) {
2050 if (++ignored > 100) 2128 if (++ignored > 100)
2051 break; 2129 break;
@@ -2080,7 +2158,7 @@ intr_old:
2080 cnt++; 2158 cnt++;
2081 if (cnt >= recv_room) { 2159 if (cnt >= recv_room) {
2082 if (!info->ldisc_stop_rx) { 2160 if (!info->ldisc_stop_rx) {
2083 //mxser_throttle(tty); 2161 /* mxser_throttle(tty); */
2084 mxser_stoprx(tty); 2162 mxser_stoprx(tty);
2085 } 2163 }
2086 break; 2164 break;
@@ -2088,21 +2166,20 @@ intr_old:
2088 2166
2089 } 2167 }
2090 2168
2091 // following add by Victor Yu. 09-02-2002 2169 /* following add by Victor Yu. 09-02-2002 */
2092 if (info->IsMoxaMustChipFlag) 2170 if (info->IsMoxaMustChipFlag)
2093 break; 2171 break;
2094 // above add by Victor Yu. 09-02-2002 2172 /* above add by Victor Yu. 09-02-2002 */
2095 2173
2096 /* mask by Victor Yu. 09-02-2002 2174 /* mask by Victor Yu. 09-02-2002
2097 *status = inb(info->base + UART_LSR) & info->read_status_mask; 2175 *status = inb(info->base + UART_LSR) & info->read_status_mask;
2098 */ 2176 */
2099 // following add by Victor Yu. 09-02-2002 2177 /* following add by Victor Yu. 09-02-2002 */
2100 *status = inb(info->base + UART_LSR); 2178 *status = inb(info->base + UART_LSR);
2101 // above add by Victor Yu. 09-02-2002 2179 /* above add by Victor Yu. 09-02-2002 */
2102 } while (*status & UART_LSR_DR); 2180 } while (*status & UART_LSR_DR);
2103 2181
2104end_intr: // add by Victor Yu. 09-02-2002 2182end_intr: /* add by Victor Yu. 09-02-2002 */
2105
2106 mxvar_log.rxcnt[info->port] += cnt; 2183 mxvar_log.rxcnt[info->port] += cnt;
2107 info->mon_data.rxcnt += cnt; 2184 info->mon_data.rxcnt += cnt;
2108 info->mon_data.up_rxcnt += cnt; 2185 info->mon_data.up_rxcnt += cnt;
@@ -2137,7 +2214,10 @@ static void mxser_transmit_chars(struct mxser_struct *info)
2137 return; 2214 return;
2138 } 2215 }
2139 2216
2140 if ((info->xmit_cnt <= 0) || info->tty->stopped || (info->tty->hw_stopped && (info->type != PORT_16550A) && (!info->IsMoxaMustChipFlag))) { 2217 if ((info->xmit_cnt <= 0) || info->tty->stopped ||
2218 (info->tty->hw_stopped &&
2219 (info->type != PORT_16550A) &&
2220 (!info->IsMoxaMustChipFlag))) {
2141 info->IER &= ~UART_IER_THRI; 2221 info->IER &= ~UART_IER_THRI;
2142 outb(info->IER, info->base + UART_IER); 2222 outb(info->IER, info->base + UART_IER);
2143 spin_unlock_irqrestore(&info->slock, flags); 2223 spin_unlock_irqrestore(&info->slock, flags);
@@ -2147,17 +2227,18 @@ static void mxser_transmit_chars(struct mxser_struct *info)
2147 cnt = info->xmit_cnt; 2227 cnt = info->xmit_cnt;
2148 count = info->xmit_fifo_size; 2228 count = info->xmit_fifo_size;
2149 do { 2229 do {
2150 outb(info->xmit_buf[info->xmit_tail++], info->base + UART_TX); 2230 outb(info->xmit_buf[info->xmit_tail++],
2231 info->base + UART_TX);
2151 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE - 1); 2232 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE - 1);
2152 if (--info->xmit_cnt <= 0) 2233 if (--info->xmit_cnt <= 0)
2153 break; 2234 break;
2154 } while (--count > 0); 2235 } while (--count > 0);
2155 mxvar_log.txcnt[info->port] += (cnt - info->xmit_cnt); 2236 mxvar_log.txcnt[info->port] += (cnt - info->xmit_cnt);
2156 2237
2157// added by James 03-12-2004. 2238/* added by James 03-12-2004. */
2158 info->mon_data.txcnt += (cnt - info->xmit_cnt); 2239 info->mon_data.txcnt += (cnt - info->xmit_cnt);
2159 info->mon_data.up_txcnt += (cnt - info->xmit_cnt); 2240 info->mon_data.up_txcnt += (cnt - info->xmit_cnt);
2160// (above) added by James. 2241/* (above) added by James. */
2161 2242
2162/* added by casper 1/11/2000 */ 2243/* added by casper 1/11/2000 */
2163 info->icount.tx += (cnt - info->xmit_cnt); 2244 info->icount.tx += (cnt - info->xmit_cnt);
@@ -2188,7 +2269,6 @@ static void mxser_check_modem_status(struct mxser_struct *info, int status)
2188 info->mon_data.modem_status = status; 2269 info->mon_data.modem_status = status;
2189 wake_up_interruptible(&info->delta_msr_wait); 2270 wake_up_interruptible(&info->delta_msr_wait);
2190 2271
2191
2192 if ((info->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) { 2272 if ((info->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
2193 if (status & UART_MSR_DCD) 2273 if (status & UART_MSR_DCD)
2194 wake_up_interruptible(&info->open_wait); 2274 wake_up_interruptible(&info->open_wait);
@@ -2200,7 +2280,8 @@ static void mxser_check_modem_status(struct mxser_struct *info, int status)
2200 if (status & UART_MSR_CTS) { 2280 if (status & UART_MSR_CTS) {
2201 info->tty->hw_stopped = 0; 2281 info->tty->hw_stopped = 0;
2202 2282
2203 if ((info->type != PORT_16550A) && (!info->IsMoxaMustChipFlag)) { 2283 if ((info->type != PORT_16550A) &&
2284 (!info->IsMoxaMustChipFlag)) {
2204 info->IER |= UART_IER_THRI; 2285 info->IER |= UART_IER_THRI;
2205 outb(info->IER, info->base + UART_IER); 2286 outb(info->IER, info->base + UART_IER);
2206 } 2287 }
@@ -2209,7 +2290,8 @@ static void mxser_check_modem_status(struct mxser_struct *info, int status)
2209 } else { 2290 } else {
2210 if (!(status & UART_MSR_CTS)) { 2291 if (!(status & UART_MSR_CTS)) {
2211 info->tty->hw_stopped = 1; 2292 info->tty->hw_stopped = 1;
2212 if ((info->type != PORT_16550A) && (!info->IsMoxaMustChipFlag)) { 2293 if ((info->type != PORT_16550A) &&
2294 (!info->IsMoxaMustChipFlag)) {
2213 info->IER &= ~UART_IER_THRI; 2295 info->IER &= ~UART_IER_THRI;
2214 outb(info->IER, info->base + UART_IER); 2296 outb(info->IER, info->base + UART_IER);
2215 } 2297 }
@@ -2231,7 +2313,7 @@ static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp, stru
2231 */ 2313 */
2232 if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { 2314 if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) {
2233 info->flags |= ASYNC_NORMAL_ACTIVE; 2315 info->flags |= ASYNC_NORMAL_ACTIVE;
2234 return (0); 2316 return 0;
2235 } 2317 }
2236 2318
2237 if (tty->termios->c_cflag & CLOCAL) 2319 if (tty->termios->c_cflag & CLOCAL)
@@ -2254,7 +2336,8 @@ static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp, stru
2254 info->blocked_open++; 2336 info->blocked_open++;
2255 while (1) { 2337 while (1) {
2256 spin_lock_irqsave(&info->slock, flags); 2338 spin_lock_irqsave(&info->slock, flags);
2257 outb(inb(info->base + UART_MCR) | UART_MCR_DTR | UART_MCR_RTS, info->base + UART_MCR); 2339 outb(inb(info->base + UART_MCR) |
2340 UART_MCR_DTR | UART_MCR_RTS, info->base + UART_MCR);
2258 spin_unlock_irqrestore(&info->slock, flags); 2341 spin_unlock_irqrestore(&info->slock, flags);
2259 set_current_state(TASK_INTERRUPTIBLE); 2342 set_current_state(TASK_INTERRUPTIBLE);
2260 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) { 2343 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) {
@@ -2264,7 +2347,9 @@ static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp, stru
2264 retval = -ERESTARTSYS; 2347 retval = -ERESTARTSYS;
2265 break; 2348 break;
2266 } 2349 }
2267 if (!(info->flags & ASYNC_CLOSING) && (do_clocal || (inb(info->base + UART_MSR) & UART_MSR_DCD))) 2350 if (!(info->flags & ASYNC_CLOSING) &&
2351 (do_clocal ||
2352 (inb(info->base + UART_MSR) & UART_MSR_DCD)))
2268 break; 2353 break;
2269 if (signal_pending(current)) { 2354 if (signal_pending(current)) {
2270 retval = -ERESTARTSYS; 2355 retval = -ERESTARTSYS;
@@ -2278,27 +2363,26 @@ static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp, stru
2278 info->count++; 2363 info->count++;
2279 info->blocked_open--; 2364 info->blocked_open--;
2280 if (retval) 2365 if (retval)
2281 return (retval); 2366 return retval;
2282 info->flags |= ASYNC_NORMAL_ACTIVE; 2367 info->flags |= ASYNC_NORMAL_ACTIVE;
2283 return (0); 2368 return 0;
2284} 2369}
2285 2370
2286static int mxser_startup(struct mxser_struct *info) 2371static int mxser_startup(struct mxser_struct *info)
2287{ 2372{
2288
2289 unsigned long page; 2373 unsigned long page;
2290 unsigned long flags; 2374 unsigned long flags;
2291 2375
2292 page = __get_free_page(GFP_KERNEL); 2376 page = __get_free_page(GFP_KERNEL);
2293 if (!page) 2377 if (!page)
2294 return (-ENOMEM); 2378 return -ENOMEM;
2295 2379
2296 spin_lock_irqsave(&info->slock, flags); 2380 spin_lock_irqsave(&info->slock, flags);
2297 2381
2298 if (info->flags & ASYNC_INITIALIZED) { 2382 if (info->flags & ASYNC_INITIALIZED) {
2299 free_page(page); 2383 free_page(page);
2300 spin_unlock_irqrestore(&info->slock, flags); 2384 spin_unlock_irqrestore(&info->slock, flags);
2301 return (0); 2385 return 0;
2302 } 2386 }
2303 2387
2304 if (!info->base || !info->type) { 2388 if (!info->base || !info->type) {
@@ -2306,7 +2390,7 @@ static int mxser_startup(struct mxser_struct *info)
2306 set_bit(TTY_IO_ERROR, &info->tty->flags); 2390 set_bit(TTY_IO_ERROR, &info->tty->flags);
2307 free_page(page); 2391 free_page(page);
2308 spin_unlock_irqrestore(&info->slock, flags); 2392 spin_unlock_irqrestore(&info->slock, flags);
2309 return (0); 2393 return 0;
2310 } 2394 }
2311 if (info->xmit_buf) 2395 if (info->xmit_buf)
2312 free_page(page); 2396 free_page(page);
@@ -2318,9 +2402,12 @@ static int mxser_startup(struct mxser_struct *info)
2318 * (they will be reenabled in mxser_change_speed()) 2402 * (they will be reenabled in mxser_change_speed())
2319 */ 2403 */
2320 if (info->IsMoxaMustChipFlag) 2404 if (info->IsMoxaMustChipFlag)
2321 outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT | MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR); 2405 outb((UART_FCR_CLEAR_RCVR |
2406 UART_FCR_CLEAR_XMIT |
2407 MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR);
2322 else 2408 else
2323 outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), info->base + UART_FCR); 2409 outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
2410 info->base + UART_FCR);
2324 2411
2325 /* 2412 /*
2326 * At this point there's no way the LSR could still be 0xFF; 2413 * At this point there's no way the LSR could still be 0xFF;
@@ -2332,9 +2419,9 @@ static int mxser_startup(struct mxser_struct *info)
2332 if (capable(CAP_SYS_ADMIN)) { 2419 if (capable(CAP_SYS_ADMIN)) {
2333 if (info->tty) 2420 if (info->tty)
2334 set_bit(TTY_IO_ERROR, &info->tty->flags); 2421 set_bit(TTY_IO_ERROR, &info->tty->flags);
2335 return (0); 2422 return 0;
2336 } else 2423 } else
2337 return (-ENODEV); 2424 return -ENODEV;
2338 } 2425 }
2339 2426
2340 /* 2427 /*
@@ -2356,12 +2443,12 @@ static int mxser_startup(struct mxser_struct *info)
2356 * Finally, enable interrupts 2443 * Finally, enable interrupts
2357 */ 2444 */
2358 info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI; 2445 info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
2359// info->IER = UART_IER_RLSI | UART_IER_RDI; 2446 /* info->IER = UART_IER_RLSI | UART_IER_RDI; */
2360 2447
2361 // following add by Victor Yu. 08-30-2002 2448 /* following add by Victor Yu. 08-30-2002 */
2362 if (info->IsMoxaMustChipFlag) 2449 if (info->IsMoxaMustChipFlag)
2363 info->IER |= MOXA_MUST_IER_EGDAI; 2450 info->IER |= MOXA_MUST_IER_EGDAI;
2364 // above add by Victor Yu. 08-30-2002 2451 /* above add by Victor Yu. 08-30-2002 */
2365 outb(info->IER, info->base + UART_IER); /* enable interrupts */ 2452 outb(info->IER, info->base + UART_IER); /* enable interrupts */
2366 2453
2367 /* 2454 /*
@@ -2383,7 +2470,7 @@ static int mxser_startup(struct mxser_struct *info)
2383 mxser_change_speed(info, NULL); 2470 mxser_change_speed(info, NULL);
2384 2471
2385 info->flags |= ASYNC_INITIALIZED; 2472 info->flags |= ASYNC_INITIALIZED;
2386 return (0); 2473 return 0;
2387} 2474}
2388 2475
2389/* 2476/*
@@ -2421,12 +2508,15 @@ static void mxser_shutdown(struct mxser_struct *info)
2421 outb(info->MCR, info->base + UART_MCR); 2508 outb(info->MCR, info->base + UART_MCR);
2422 2509
2423 /* clear Rx/Tx FIFO's */ 2510 /* clear Rx/Tx FIFO's */
2424 // following add by Victor Yu. 08-30-2002 2511 /* following add by Victor Yu. 08-30-2002 */
2425 if (info->IsMoxaMustChipFlag) 2512 if (info->IsMoxaMustChipFlag)
2426 outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT | MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR); 2513 outb((UART_FCR_CLEAR_RCVR |
2514 UART_FCR_CLEAR_XMIT |
2515 MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR);
2427 else 2516 else
2428 // above add by Victor Yu. 08-30-2002 2517 /* above add by Victor Yu. 08-30-2002 */
2429 outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), info->base + UART_FCR); 2518 outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
2519 info->base + UART_FCR);
2430 2520
2431 /* read data port to reset things */ 2521 /* read data port to reset things */
2432 (void) inb(info->base + UART_RX); 2522 (void) inb(info->base + UART_RX);
@@ -2436,11 +2526,10 @@ static void mxser_shutdown(struct mxser_struct *info)
2436 2526
2437 info->flags &= ~ASYNC_INITIALIZED; 2527 info->flags &= ~ASYNC_INITIALIZED;
2438 2528
2439 // following add by Victor Yu. 09-23-2002 2529 /* following add by Victor Yu. 09-23-2002 */
2440 if (info->IsMoxaMustChipFlag) { 2530 if (info->IsMoxaMustChipFlag)
2441 SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->base); 2531 SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->base);
2442 } 2532 /* above add by Victor Yu. 09-23-2002 */
2443 // above add by Victor Yu. 09-23-2002
2444 2533
2445 spin_unlock_irqrestore(&info->slock, flags); 2534 spin_unlock_irqrestore(&info->slock, flags);
2446} 2535}
@@ -2457,14 +2546,12 @@ static int mxser_change_speed(struct mxser_struct *info, struct termios *old_ter
2457 long baud; 2546 long baud;
2458 unsigned long flags; 2547 unsigned long flags;
2459 2548
2460
2461 if (!info->tty || !info->tty->termios) 2549 if (!info->tty || !info->tty->termios)
2462 return ret; 2550 return ret;
2463 cflag = info->tty->termios->c_cflag; 2551 cflag = info->tty->termios->c_cflag;
2464 if (!(info->base)) 2552 if (!(info->base))
2465 return ret; 2553 return ret;
2466 2554
2467
2468#ifndef B921600 2555#ifndef B921600
2469#define B921600 (B460800 +1) 2556#define B921600 (B460800 +1)
2470#endif 2557#endif
@@ -2559,9 +2646,8 @@ static int mxser_change_speed(struct mxser_struct *info, struct termios *old_ter
2559 cval |= 0x04; 2646 cval |= 0x04;
2560 if (cflag & PARENB) 2647 if (cflag & PARENB)
2561 cval |= UART_LCR_PARITY; 2648 cval |= UART_LCR_PARITY;
2562 if (!(cflag & PARODD)) { 2649 if (!(cflag & PARODD))
2563 cval |= UART_LCR_EPAR; 2650 cval |= UART_LCR_EPAR;
2564 }
2565 if (cflag & CMSPAR) 2651 if (cflag & CMSPAR)
2566 cval |= UART_LCR_SPAR; 2652 cval |= UART_LCR_SPAR;
2567 2653
@@ -2574,13 +2660,12 @@ static int mxser_change_speed(struct mxser_struct *info, struct termios *old_ter
2574 fcr = 0; 2660 fcr = 0;
2575 } else { 2661 } else {
2576 fcr = UART_FCR_ENABLE_FIFO; 2662 fcr = UART_FCR_ENABLE_FIFO;
2577 // following add by Victor Yu. 08-30-2002 2663 /* following add by Victor Yu. 08-30-2002 */
2578 if (info->IsMoxaMustChipFlag) { 2664 if (info->IsMoxaMustChipFlag) {
2579 fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; 2665 fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
2580 SET_MOXA_MUST_FIFO_VALUE(info); 2666 SET_MOXA_MUST_FIFO_VALUE(info);
2581 } else { 2667 } else {
2582 // above add by Victor Yu. 08-30-2002 2668 /* above add by Victor Yu. 08-30-2002 */
2583
2584 switch (info->rx_trigger) { 2669 switch (info->rx_trigger) {
2585 case 1: 2670 case 1:
2586 fcr |= UART_FCR_TRIGGER_1; 2671 fcr |= UART_FCR_TRIGGER_1;
@@ -2606,22 +2691,24 @@ static int mxser_change_speed(struct mxser_struct *info, struct termios *old_ter
2606 info->IER |= UART_IER_MSI; 2691 info->IER |= UART_IER_MSI;
2607 if ((info->type == PORT_16550A) || (info->IsMoxaMustChipFlag)) { 2692 if ((info->type == PORT_16550A) || (info->IsMoxaMustChipFlag)) {
2608 info->MCR |= UART_MCR_AFE; 2693 info->MCR |= UART_MCR_AFE;
2609 //status = mxser_get_msr(info->base, 0, info->port); 2694 /* status = mxser_get_msr(info->base, 0, info->port); */
2610/* save_flags(flags); 2695/*
2696 save_flags(flags);
2611 cli(); 2697 cli();
2612 status = inb(baseaddr + UART_MSR); 2698 status = inb(baseaddr + UART_MSR);
2613 restore_flags(flags);*/ 2699 restore_flags(flags);
2614 //mxser_check_modem_status(info, status); 2700*/
2701 /* mxser_check_modem_status(info, status); */
2615 } else { 2702 } else {
2616 //status = mxser_get_msr(info->base, 0, info->port); 2703 /* status = mxser_get_msr(info->base, 0, info->port); */
2617 2704 /* MX_LOCK(&info->slock); */
2618 //MX_LOCK(&info->slock);
2619 status = inb(info->base + UART_MSR); 2705 status = inb(info->base + UART_MSR);
2620 //MX_UNLOCK(&info->slock); 2706 /* MX_UNLOCK(&info->slock); */
2621 if (info->tty->hw_stopped) { 2707 if (info->tty->hw_stopped) {
2622 if (status & UART_MSR_CTS) { 2708 if (status & UART_MSR_CTS) {
2623 info->tty->hw_stopped = 0; 2709 info->tty->hw_stopped = 0;
2624 if ((info->type != PORT_16550A) && (!info->IsMoxaMustChipFlag)) { 2710 if ((info->type != PORT_16550A) &&
2711 (!info->IsMoxaMustChipFlag)) {
2625 info->IER |= UART_IER_THRI; 2712 info->IER |= UART_IER_THRI;
2626 outb(info->IER, info->base + UART_IER); 2713 outb(info->IER, info->base + UART_IER);
2627 } 2714 }
@@ -2630,7 +2717,8 @@ static int mxser_change_speed(struct mxser_struct *info, struct termios *old_ter
2630 } else { 2717 } else {
2631 if (!(status & UART_MSR_CTS)) { 2718 if (!(status & UART_MSR_CTS)) {
2632 info->tty->hw_stopped = 1; 2719 info->tty->hw_stopped = 1;
2633 if ((info->type != PORT_16550A) && (!info->IsMoxaMustChipFlag)) { 2720 if ((info->type != PORT_16550A) &&
2721 (!info->IsMoxaMustChipFlag)) {
2634 info->IER &= ~UART_IER_THRI; 2722 info->IER &= ~UART_IER_THRI;
2635 outb(info->IER, info->base + UART_IER); 2723 outb(info->IER, info->base + UART_IER);
2636 } 2724 }
@@ -2668,11 +2756,17 @@ static int mxser_change_speed(struct mxser_struct *info, struct termios *old_ter
2668 * overruns too. (For real raw support). 2756 * overruns too. (For real raw support).
2669 */ 2757 */
2670 if (I_IGNPAR(info->tty)) { 2758 if (I_IGNPAR(info->tty)) {
2671 info->ignore_status_mask |= UART_LSR_OE | UART_LSR_PE | UART_LSR_FE; 2759 info->ignore_status_mask |=
2672 info->read_status_mask |= UART_LSR_OE | UART_LSR_PE | UART_LSR_FE; 2760 UART_LSR_OE |
2761 UART_LSR_PE |
2762 UART_LSR_FE;
2763 info->read_status_mask |=
2764 UART_LSR_OE |
2765 UART_LSR_PE |
2766 UART_LSR_FE;
2673 } 2767 }
2674 } 2768 }
2675 // following add by Victor Yu. 09-02-2002 2769 /* following add by Victor Yu. 09-02-2002 */
2676 if (info->IsMoxaMustChipFlag) { 2770 if (info->IsMoxaMustChipFlag) {
2677 spin_lock_irqsave(&info->slock, flags); 2771 spin_lock_irqsave(&info->slock, flags);
2678 SET_MOXA_MUST_XON1_VALUE(info->base, START_CHAR(info->tty)); 2772 SET_MOXA_MUST_XON1_VALUE(info->base, START_CHAR(info->tty));
@@ -2698,7 +2792,7 @@ static int mxser_change_speed(struct mxser_struct *info, struct termios *old_ter
2698 */ 2792 */
2699 spin_unlock_irqrestore(&info->slock, flags); 2793 spin_unlock_irqrestore(&info->slock, flags);
2700 } 2794 }
2701 // above add by Victor Yu. 09-02-2002 2795 /* above add by Victor Yu. 09-02-2002 */
2702 2796
2703 2797
2704 outb(fcr, info->base + UART_FCR); /* set fcr */ 2798 outb(fcr, info->base + UART_FCR); /* set fcr */
@@ -2729,10 +2823,8 @@ static int mxser_set_baud(struct mxser_struct *info, long newspd)
2729 quot = (2 * info->baud_base / 269); 2823 quot = (2 * info->baud_base / 269);
2730 } else if (newspd) { 2824 } else if (newspd) {
2731 quot = info->baud_base / newspd; 2825 quot = info->baud_base / newspd;
2732
2733 if (quot == 0) 2826 if (quot == 0)
2734 quot = 1; 2827 quot = 1;
2735
2736 } else { 2828 } else {
2737 quot = 0; 2829 quot = 0;
2738 } 2830 }
@@ -2765,8 +2857,6 @@ static int mxser_set_baud(struct mxser_struct *info, long newspd)
2765 return ret; 2857 return ret;
2766} 2858}
2767 2859
2768
2769
2770/* 2860/*
2771 * ------------------------------------------------------------ 2861 * ------------------------------------------------------------
2772 * friends of mxser_ioctl() 2862 * friends of mxser_ioctl()
@@ -2777,7 +2867,7 @@ static int mxser_get_serial_info(struct mxser_struct *info, struct serial_struct
2777 struct serial_struct tmp; 2867 struct serial_struct tmp;
2778 2868
2779 if (!retinfo) 2869 if (!retinfo)
2780 return (-EFAULT); 2870 return -EFAULT;
2781 memset(&tmp, 0, sizeof(tmp)); 2871 memset(&tmp, 0, sizeof(tmp));
2782 tmp.type = info->type; 2872 tmp.type = info->type;
2783 tmp.line = info->port; 2873 tmp.line = info->port;
@@ -2791,7 +2881,7 @@ static int mxser_get_serial_info(struct mxser_struct *info, struct serial_struct
2791 tmp.hub6 = 0; 2881 tmp.hub6 = 0;
2792 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) 2882 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
2793 return -EFAULT; 2883 return -EFAULT;
2794 return (0); 2884 return 0;
2795} 2885}
2796 2886
2797static int mxser_set_serial_info(struct mxser_struct *info, struct serial_struct __user *new_info) 2887static int mxser_set_serial_info(struct mxser_struct *info, struct serial_struct __user *new_info)
@@ -2801,29 +2891,37 @@ static int mxser_set_serial_info(struct mxser_struct *info, struct serial_struct
2801 int retval = 0; 2891 int retval = 0;
2802 2892
2803 if (!new_info || !info->base) 2893 if (!new_info || !info->base)
2804 return (-EFAULT); 2894 return -EFAULT;
2805 if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) 2895 if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
2806 return -EFAULT; 2896 return -EFAULT;
2807 2897
2808 if ((new_serial.irq != info->irq) || (new_serial.port != info->base) || (new_serial.custom_divisor != info->custom_divisor) || (new_serial.baud_base != info->baud_base)) 2898 if ((new_serial.irq != info->irq) ||
2809 return (-EPERM); 2899 (new_serial.port != info->base) ||
2900 (new_serial.custom_divisor != info->custom_divisor) ||
2901 (new_serial.baud_base != info->baud_base))
2902 return -EPERM;
2810 2903
2811 flags = info->flags & ASYNC_SPD_MASK; 2904 flags = info->flags & ASYNC_SPD_MASK;
2812 2905
2813 if (!capable(CAP_SYS_ADMIN)) { 2906 if (!capable(CAP_SYS_ADMIN)) {
2814 if ((new_serial.baud_base != info->baud_base) || (new_serial.close_delay != info->close_delay) || ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK))) 2907 if ((new_serial.baud_base != info->baud_base) ||
2815 return (-EPERM); 2908 (new_serial.close_delay != info->close_delay) ||
2816 info->flags = ((info->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); 2909 ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK)))
2910 return -EPERM;
2911 info->flags = ((info->flags & ~ASYNC_USR_MASK) |
2912 (new_serial.flags & ASYNC_USR_MASK));
2817 } else { 2913 } else {
2818 /* 2914 /*
2819 * OK, past this point, all the error checking has been done. 2915 * OK, past this point, all the error checking has been done.
2820 * At this point, we start making changes..... 2916 * At this point, we start making changes.....
2821 */ 2917 */
2822 info->flags = ((info->flags & ~ASYNC_FLAGS) | (new_serial.flags & ASYNC_FLAGS)); 2918 info->flags = ((info->flags & ~ASYNC_FLAGS) |
2919 (new_serial.flags & ASYNC_FLAGS));
2823 info->close_delay = new_serial.close_delay * HZ / 100; 2920 info->close_delay = new_serial.close_delay * HZ / 100;
2824 info->closing_wait = new_serial.closing_wait * HZ / 100; 2921 info->closing_wait = new_serial.closing_wait * HZ / 100;
2825 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 2922 info->tty->low_latency =
2826 info->tty->low_latency = 0; //(info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 2923 (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
2924 info->tty->low_latency = 0; /* (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; */
2827 } 2925 }
2828 2926
2829 /* added by casper, 3/17/2000, for mouse */ 2927 /* added by casper, 3/17/2000, for mouse */
@@ -2831,7 +2929,6 @@ static int mxser_set_serial_info(struct mxser_struct *info, struct serial_struct
2831 2929
2832 process_txrx_fifo(info); 2930 process_txrx_fifo(info);
2833 2931
2834 /* */
2835 if (info->flags & ASYNC_INITIALIZED) { 2932 if (info->flags & ASYNC_INITIALIZED) {
2836 if (flags != (info->flags & ASYNC_SPD_MASK)) { 2933 if (flags != (info->flags & ASYNC_SPD_MASK)) {
2837 mxser_change_speed(info, NULL); 2934 mxser_change_speed(info, NULL);
@@ -2839,7 +2936,7 @@ static int mxser_set_serial_info(struct mxser_struct *info, struct serial_struct
2839 } else { 2936 } else {
2840 retval = mxser_startup(info); 2937 retval = mxser_startup(info);
2841 } 2938 }
2842 return (retval); 2939 return retval;
2843} 2940}
2844 2941
2845/* 2942/*
@@ -2876,25 +2973,27 @@ static void mxser_send_break(struct mxser_struct *info, int duration)
2876 return; 2973 return;
2877 set_current_state(TASK_INTERRUPTIBLE); 2974 set_current_state(TASK_INTERRUPTIBLE);
2878 spin_lock_irqsave(&info->slock, flags); 2975 spin_lock_irqsave(&info->slock, flags);
2879 outb(inb(info->base + UART_LCR) | UART_LCR_SBC, info->base + UART_LCR); 2976 outb(inb(info->base + UART_LCR) | UART_LCR_SBC,
2977 info->base + UART_LCR);
2880 spin_unlock_irqrestore(&info->slock, flags); 2978 spin_unlock_irqrestore(&info->slock, flags);
2881 schedule_timeout(duration); 2979 schedule_timeout(duration);
2882 spin_lock_irqsave(&info->slock, flags); 2980 spin_lock_irqsave(&info->slock, flags);
2883 outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC, info->base + UART_LCR); 2981 outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC,
2982 info->base + UART_LCR);
2884 spin_unlock_irqrestore(&info->slock, flags); 2983 spin_unlock_irqrestore(&info->slock, flags);
2885} 2984}
2886 2985
2887static int mxser_tiocmget(struct tty_struct *tty, struct file *file) 2986static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
2888{ 2987{
2889 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 2988 struct mxser_struct *info = tty->driver_data;
2890 unsigned char control, status; 2989 unsigned char control, status;
2891 unsigned long flags; 2990 unsigned long flags;
2892 2991
2893 2992
2894 if (tty->index == MXSER_PORTS) 2993 if (tty->index == MXSER_PORTS)
2895 return (-ENOIOCTLCMD); 2994 return -ENOIOCTLCMD;
2896 if (tty->flags & (1 << TTY_IO_ERROR)) 2995 if (tty->flags & (1 << TTY_IO_ERROR))
2897 return (-EIO); 2996 return -EIO;
2898 2997
2899 control = info->MCR; 2998 control = info->MCR;
2900 2999
@@ -2904,12 +3003,16 @@ static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
2904 mxser_check_modem_status(info, status); 3003 mxser_check_modem_status(info, status);
2905 spin_unlock_irqrestore(&info->slock, flags); 3004 spin_unlock_irqrestore(&info->slock, flags);
2906 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) | 3005 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
2907 ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) | ((status & UART_MSR_RI) ? TIOCM_RNG : 0) | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0); 3006 ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
3007 ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
3008 ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
3009 ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
3010 ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
2908} 3011}
2909 3012
2910static int mxser_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) 3013static int mxser_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear)
2911{ 3014{
2912 struct mxser_struct *info = (struct mxser_struct *) tty->driver_data; 3015 struct mxser_struct *info = tty->driver_data;
2913 unsigned long flags; 3016 unsigned long flags;
2914 3017
2915 3018
@@ -2968,38 +3071,36 @@ static int mxser_get_ISA_conf(int cap, struct mxser_hwconf *hwconf)
2968 hwconf->board_type = MXSER_BOARD_CI104J; 3071 hwconf->board_type = MXSER_BOARD_CI104J;
2969 hwconf->ports = 4; 3072 hwconf->ports = 4;
2970 } else 3073 } else
2971 return (0); 3074 return 0;
2972 3075
2973 irq = 0; 3076 irq = 0;
2974 if (hwconf->ports == 2) { 3077 if (hwconf->ports == 2) {
2975 irq = regs[9] & 0xF000; 3078 irq = regs[9] & 0xF000;
2976 irq = irq | (irq >> 4); 3079 irq = irq | (irq >> 4);
2977 if (irq != (regs[9] & 0xFF00)) 3080 if (irq != (regs[9] & 0xFF00))
2978 return (MXSER_ERR_IRQ_CONFLIT); 3081 return MXSER_ERR_IRQ_CONFLIT;
2979 } else if (hwconf->ports == 4) { 3082 } else if (hwconf->ports == 4) {
2980 irq = regs[9] & 0xF000; 3083 irq = regs[9] & 0xF000;
2981 irq = irq | (irq >> 4); 3084 irq = irq | (irq >> 4);
2982 irq = irq | (irq >> 8); 3085 irq = irq | (irq >> 8);
2983 if (irq != regs[9]) 3086 if (irq != regs[9])
2984 return (MXSER_ERR_IRQ_CONFLIT); 3087 return MXSER_ERR_IRQ_CONFLIT;
2985 } else if (hwconf->ports == 8) { 3088 } else if (hwconf->ports == 8) {
2986 irq = regs[9] & 0xF000; 3089 irq = regs[9] & 0xF000;
2987 irq = irq | (irq >> 4); 3090 irq = irq | (irq >> 4);
2988 irq = irq | (irq >> 8); 3091 irq = irq | (irq >> 8);
2989 if ((irq != regs[9]) || (irq != regs[10])) 3092 if ((irq != regs[9]) || (irq != regs[10]))
2990 return (MXSER_ERR_IRQ_CONFLIT); 3093 return MXSER_ERR_IRQ_CONFLIT;
2991 } 3094 }
2992 3095
2993 if (!irq) { 3096 if (!irq)
2994 return (MXSER_ERR_IRQ); 3097 return MXSER_ERR_IRQ;
2995 } 3098 hwconf->irq = ((int)(irq & 0xF000) >> 12);
2996 hwconf->irq = ((int) (irq & 0xF000) >> 12);
2997 for (i = 0; i < 8; i++) 3099 for (i = 0; i < 8; i++)
2998 hwconf->ioaddr[i] = (int) regs[i + 1] & 0xFFF8; 3100 hwconf->ioaddr[i] = (int) regs[i + 1] & 0xFFF8;
2999 if ((regs[12] & 0x80) == 0) { 3101 if ((regs[12] & 0x80) == 0)
3000 return (MXSER_ERR_VECTOR); 3102 return MXSER_ERR_VECTOR;
3001 } 3103 hwconf->vector = (int)regs[11]; /* interrupt vector */
3002 hwconf->vector = (int) regs[11]; /* interrupt vector */
3003 if (id == 1) 3104 if (id == 1)
3004 hwconf->vector_mask = 0x00FF; 3105 hwconf->vector_mask = 0x00FF;
3005 else 3106 else
@@ -3007,10 +3108,10 @@ static int mxser_get_ISA_conf(int cap, struct mxser_hwconf *hwconf)
3007 for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) { 3108 for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
3008 if (regs[12] & bits) { 3109 if (regs[12] & bits) {
3009 hwconf->baud_base[i] = 921600; 3110 hwconf->baud_base[i] = 921600;
3010 hwconf->MaxCanSetBaudRate[i] = 921600; // add by Victor Yu. 09-04-2002 3111 hwconf->MaxCanSetBaudRate[i] = 921600; /* add by Victor Yu. 09-04-2002 */
3011 } else { 3112 } else {
3012 hwconf->baud_base[i] = 115200; 3113 hwconf->baud_base[i] = 115200;
3013 hwconf->MaxCanSetBaudRate[i] = 115200; // add by Victor Yu. 09-04-2002 3114 hwconf->MaxCanSetBaudRate[i] = 115200; /* add by Victor Yu. 09-04-2002 */
3014 } 3115 }
3015 } 3116 }
3016 scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB); 3117 scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
@@ -3030,7 +3131,7 @@ static int mxser_get_ISA_conf(int cap, struct mxser_hwconf *hwconf)
3030 hwconf->ports = 4; 3131 hwconf->ports = 4;
3031 request_region(hwconf->ioaddr[0], 8 * hwconf->ports, "mxser(IO)"); 3132 request_region(hwconf->ioaddr[0], 8 * hwconf->ports, "mxser(IO)");
3032 request_region(hwconf->vector, 1, "mxser(vector)"); 3133 request_region(hwconf->vector, 1, "mxser(vector)");
3033 return (hwconf->ports); 3134 return hwconf->ports;
3034} 3135}
3035 3136
3036#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */ 3137#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
@@ -3053,7 +3154,7 @@ static int mxser_read_register(int port, unsigned short *regs)
3053 3154
3054 id = mxser_program_mode(port); 3155 id = mxser_program_mode(port);
3055 if (id < 0) 3156 if (id < 0)
3056 return (id); 3157 return id;
3057 for (i = 0; i < 14; i++) { 3158 for (i = 0; i < 14; i++) {
3058 k = (i & 0x3F) | 0x180; 3159 k = (i & 0x3F) | 0x180;
3059 for (j = 0x100; j > 0; j >>= 1) { 3160 for (j = 0x100; j > 0; j >>= 1) {
@@ -3066,7 +3167,7 @@ static int mxser_read_register(int port, unsigned short *regs)
3066 outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */ 3167 outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
3067 } 3168 }
3068 } 3169 }
3069 (void) inb(port); 3170 (void)inb(port);
3070 value = 0; 3171 value = 0;
3071 for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) { 3172 for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
3072 outb(CHIP_CS, port); 3173 outb(CHIP_CS, port);
@@ -3078,28 +3179,33 @@ static int mxser_read_register(int port, unsigned short *regs)
3078 outb(0, port); 3179 outb(0, port);
3079 } 3180 }
3080 mxser_normal_mode(port); 3181 mxser_normal_mode(port);
3081 return (id); 3182 return id;
3082} 3183}
3083 3184
3084static int mxser_program_mode(int port) 3185static int mxser_program_mode(int port)
3085{ 3186{
3086 int id, i, j, n; 3187 int id, i, j, n;
3087 //unsigned long flags; 3188 /* unsigned long flags; */
3088 3189
3089 spin_lock(&gm_lock); 3190 spin_lock(&gm_lock);
3090 outb(0, port); 3191 outb(0, port);
3091 outb(0, port); 3192 outb(0, port);
3092 outb(0, port); 3193 outb(0, port);
3093 (void) inb(port); 3194 (void)inb(port);
3094 (void) inb(port); 3195 (void)inb(port);
3095 outb(0, port); 3196 outb(0, port);
3096 (void) inb(port); 3197 (void)inb(port);
3097 //restore_flags(flags); 3198 /* restore_flags(flags); */
3098 spin_unlock(&gm_lock); 3199 spin_unlock(&gm_lock);
3099 3200
3100 id = inb(port + 1) & 0x1F; 3201 id = inb(port + 1) & 0x1F;
3101 if ((id != C168_ASIC_ID) && (id != C104_ASIC_ID) && (id != C102_ASIC_ID) && (id != CI132_ASIC_ID) && (id != CI134_ASIC_ID) && (id != CI104J_ASIC_ID)) 3202 if ((id != C168_ASIC_ID) &&
3102 return (-1); 3203 (id != C104_ASIC_ID) &&
3204 (id != C102_ASIC_ID) &&
3205 (id != CI132_ASIC_ID) &&
3206 (id != CI134_ASIC_ID) &&
3207 (id != CI104J_ASIC_ID))
3208 return -1;
3103 for (i = 0, j = 0; i < 4; i++) { 3209 for (i = 0, j = 0; i < 4; i++) {
3104 n = inb(port + 2); 3210 n = inb(port + 2);
3105 if (n == 'M') { 3211 if (n == 'M') {
@@ -3112,7 +3218,7 @@ static int mxser_program_mode(int port)
3112 } 3218 }
3113 if (j != 2) 3219 if (j != 2)
3114 id = -2; 3220 id = -2;
3115 return (id); 3221 return id;
3116} 3222}
3117 3223
3118static void mxser_normal_mode(int port) 3224static void mxser_normal_mode(int port)
@@ -3130,7 +3236,7 @@ static void mxser_normal_mode(int port)
3130 if ((n & 0x61) == 0x60) 3236 if ((n & 0x61) == 0x60)
3131 break; 3237 break;
3132 if ((n & 1) == 1) 3238 if ((n & 1) == 1)
3133 (void) inb(port); 3239 (void)inb(port);
3134 } 3240 }
3135 outb(0x00, port + 4); 3241 outb(0x00, port + 4);
3136} 3242}
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index c48de09d68f0..203dc2b661d5 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -951,7 +951,8 @@ static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
951 { 951 {
952queue_the_message: 952queue_the_message:
953 953
954 pMsg = kmalloc(sizeof(struct r3964_message), GFP_KERNEL); 954 pMsg = kmalloc(sizeof(struct r3964_message),
955 error_code?GFP_ATOMIC:GFP_KERNEL);
955 TRACE_M("add_msg - kmalloc %p",pMsg); 956 TRACE_M("add_msg - kmalloc %p",pMsg);
956 if(pMsg==NULL) { 957 if(pMsg==NULL) {
957 return; 958 return;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 0c141c295fb6..17bc8abd5df5 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1582,7 +1582,7 @@ static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
1582 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) 1582 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
1583 return; 1583 return;
1584 1584
1585 if (!tty || !info->tx_buf) 1585 if (!info->tx_buf)
1586 return; 1586 return;
1587 1587
1588 spin_lock_irqsave(&info->lock,flags); 1588 spin_lock_irqsave(&info->lock,flags);
@@ -1649,7 +1649,7 @@ static int mgslpc_write(struct tty_struct * tty,
1649 __FILE__,__LINE__,info->device_name,count); 1649 __FILE__,__LINE__,info->device_name,count);
1650 1650
1651 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") || 1651 if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
1652 !tty || !info->tx_buf) 1652 !info->tx_buf)
1653 goto cleanup; 1653 goto cleanup;
1654 1654
1655 if (info->params.mode == MGSL_MODE_HDLC) { 1655 if (info->params.mode == MGSL_MODE_HDLC) {
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 7edc6a4dbdc4..0708c5164c83 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -324,35 +324,15 @@ static void rp_do_receive(struct r_port *info,
324 CHANNEL_t * cp, unsigned int ChanStatus) 324 CHANNEL_t * cp, unsigned int ChanStatus)
325{ 325{
326 unsigned int CharNStat; 326 unsigned int CharNStat;
327 int ToRecv, wRecv, space = 0, count; 327 int ToRecv, wRecv, space;
328 unsigned char *cbuf, *chead; 328 unsigned char *cbuf;
329 char *fbuf, *fhead;
330 struct tty_ldisc *ld;
331
332 ld = tty_ldisc_ref(tty);
333 329
334 ToRecv = sGetRxCnt(cp); 330 ToRecv = sGetRxCnt(cp);
335 space = tty->receive_room;
336 if (space > 2 * TTY_FLIPBUF_SIZE)
337 space = 2 * TTY_FLIPBUF_SIZE;
338 count = 0;
339#ifdef ROCKET_DEBUG_INTR 331#ifdef ROCKET_DEBUG_INTR
340 printk(KERN_INFO "rp_do_receive(%d, %d)...", ToRecv, space); 332 printk(KERN_INFO "rp_do_receive(%d)...", ToRecv);
341#endif 333#endif
342 334 if (ToRecv == 0)
343 /* 335 return;
344 * determine how many we can actually read in. If we can't
345 * read any in then we have a software overrun condition.
346 */
347 if (ToRecv > space)
348 ToRecv = space;
349
350 ToRecv = tty_prepare_flip_string_flags(tty, &chead, &fhead, ToRecv);
351 if (ToRecv <= 0)
352 goto done;
353
354 cbuf = chead;
355 fbuf = fhead;
356 336
357 /* 337 /*
358 * if status indicates there are errored characters in the 338 * if status indicates there are errored characters in the
@@ -380,6 +360,8 @@ static void rp_do_receive(struct r_port *info,
380 info->read_status_mask); 360 info->read_status_mask);
381#endif 361#endif
382 while (ToRecv) { 362 while (ToRecv) {
363 char flag;
364
383 CharNStat = sInW(sGetTxRxDataIO(cp)); 365 CharNStat = sInW(sGetTxRxDataIO(cp));
384#ifdef ROCKET_DEBUG_RECEIVE 366#ifdef ROCKET_DEBUG_RECEIVE
385 printk(KERN_INFO "%x...", CharNStat); 367 printk(KERN_INFO "%x...", CharNStat);
@@ -392,17 +374,16 @@ static void rp_do_receive(struct r_port *info,
392 } 374 }
393 CharNStat &= info->read_status_mask; 375 CharNStat &= info->read_status_mask;
394 if (CharNStat & STMBREAKH) 376 if (CharNStat & STMBREAKH)
395 *fbuf++ = TTY_BREAK; 377 flag = TTY_BREAK;
396 else if (CharNStat & STMPARITYH) 378 else if (CharNStat & STMPARITYH)
397 *fbuf++ = TTY_PARITY; 379 flag = TTY_PARITY;
398 else if (CharNStat & STMFRAMEH) 380 else if (CharNStat & STMFRAMEH)
399 *fbuf++ = TTY_FRAME; 381 flag = TTY_FRAME;
400 else if (CharNStat & STMRCVROVRH) 382 else if (CharNStat & STMRCVROVRH)
401 *fbuf++ = TTY_OVERRUN; 383 flag = TTY_OVERRUN;
402 else 384 else
403 *fbuf++ = TTY_NORMAL; 385 flag = TTY_NORMAL;
404 *cbuf++ = CharNStat & 0xff; 386 tty_insert_flip_char(tty, CharNStat & 0xff, flag);
405 count++;
406 ToRecv--; 387 ToRecv--;
407 } 388 }
408 389
@@ -422,20 +403,23 @@ static void rp_do_receive(struct r_port *info,
422 * characters at time by doing repeated word IO 403 * characters at time by doing repeated word IO
423 * transfer. 404 * transfer.
424 */ 405 */
406 space = tty_prepare_flip_string(tty, &cbuf, ToRecv);
407 if (space < ToRecv) {
408#ifdef ROCKET_DEBUG_RECEIVE
409 printk(KERN_INFO "rp_do_receive:insufficient space ToRecv=%d space=%d\n", ToRecv, space);
410#endif
411 if (space <= 0)
412 return;
413 ToRecv = space;
414 }
425 wRecv = ToRecv >> 1; 415 wRecv = ToRecv >> 1;
426 if (wRecv) 416 if (wRecv)
427 sInStrW(sGetTxRxDataIO(cp), (unsigned short *) cbuf, wRecv); 417 sInStrW(sGetTxRxDataIO(cp), (unsigned short *) cbuf, wRecv);
428 if (ToRecv & 1) 418 if (ToRecv & 1)
429 cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp)); 419 cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp));
430 memset(fbuf, TTY_NORMAL, ToRecv);
431 cbuf += ToRecv;
432 fbuf += ToRecv;
433 count += ToRecv;
434 } 420 }
435 /* Push the data up to the tty layer */ 421 /* Push the data up to the tty layer */
436 ld->receive_buf(tty, chead, fhead, count); 422 tty_flip_buffer_push(tty);
437done:
438 tty_ldisc_deref(ld);
439} 423}
440 424
441/* 425/*
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 5343e9fc6ab7..1b5330299e30 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -1683,7 +1683,7 @@ static int sx_write(struct tty_struct * tty,
1683 1683
1684 bp = port_Board(port); 1684 bp = port_Board(port);
1685 1685
1686 if (!tty || !port->xmit_buf || !tmp_buf) { 1686 if (!port->xmit_buf || !tmp_buf) {
1687 func_exit(); 1687 func_exit();
1688 return 0; 1688 return 0;
1689 } 1689 }
@@ -1733,7 +1733,7 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch)
1733 return; 1733 return;
1734 } 1734 }
1735 dprintk (SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf); 1735 dprintk (SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf);
1736 if (!tty || !port->xmit_buf) { 1736 if (!port->xmit_buf) {
1737 func_exit(); 1737 func_exit();
1738 return; 1738 return;
1739 } 1739 }
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index b4d1f4eea435..4e35d4181224 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -101,6 +101,7 @@ MODULE_LICENSE("GPL");
101 101
102static struct pci_device_id pci_table[] = { 102static struct pci_device_id pci_table[] = {
103 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 103 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
104 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
104 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 105 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
105 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 106 {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
106 {0,}, /* terminate list */ 107 {0,}, /* terminate list */
@@ -870,7 +871,7 @@ static int write(struct tty_struct *tty,
870 goto cleanup; 871 goto cleanup;
871 DBGINFO(("%s write count=%d\n", info->device_name, count)); 872 DBGINFO(("%s write count=%d\n", info->device_name, count));
872 873
873 if (!tty || !info->tx_buf) 874 if (!info->tx_buf)
874 goto cleanup; 875 goto cleanup;
875 876
876 if (count > info->max_frame_size) { 877 if (count > info->max_frame_size) {
@@ -924,7 +925,7 @@ static void put_char(struct tty_struct *tty, unsigned char ch)
924 if (sanity_check(info, tty->name, "put_char")) 925 if (sanity_check(info, tty->name, "put_char"))
925 return; 926 return;
926 DBGINFO(("%s put_char(%d)\n", info->device_name, ch)); 927 DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
927 if (!tty || !info->tx_buf) 928 if (!info->tx_buf)
928 return; 929 return;
929 spin_lock_irqsave(&info->lock,flags); 930 spin_lock_irqsave(&info->lock,flags);
930 if (!info->tx_active && (info->tx_count < info->max_frame_size)) 931 if (!info->tx_active && (info->tx_count < info->max_frame_size))
@@ -2515,7 +2516,8 @@ static int set_txidle(struct slgt_info *info, int idle_mode)
2515 DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode)); 2516 DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
2516 spin_lock_irqsave(&info->lock,flags); 2517 spin_lock_irqsave(&info->lock,flags);
2517 info->idle_mode = idle_mode; 2518 info->idle_mode = idle_mode;
2518 tx_set_idle(info); 2519 if (info->params.mode != MGSL_MODE_ASYNC)
2520 tx_set_idle(info);
2519 spin_unlock_irqrestore(&info->lock,flags); 2521 spin_unlock_irqrestore(&info->lock,flags);
2520 return 0; 2522 return 0;
2521} 2523}
@@ -3076,7 +3078,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
3076 3078
3077static int alloc_tmp_rbuf(struct slgt_info *info) 3079static int alloc_tmp_rbuf(struct slgt_info *info)
3078{ 3080{
3079 info->tmp_rbuf = kmalloc(info->max_frame_size, GFP_KERNEL); 3081 info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
3080 if (info->tmp_rbuf == NULL) 3082 if (info->tmp_rbuf == NULL)
3081 return -ENOMEM; 3083 return -ENOMEM;
3082 return 0; 3084 return 0;
@@ -3276,6 +3278,9 @@ static void add_device(struct slgt_info *info)
3276 case SYNCLINK_GT_DEVICE_ID: 3278 case SYNCLINK_GT_DEVICE_ID:
3277 devstr = "GT"; 3279 devstr = "GT";
3278 break; 3280 break;
3281 case SYNCLINK_GT2_DEVICE_ID:
3282 devstr = "GT2";
3283 break;
3279 case SYNCLINK_GT4_DEVICE_ID: 3284 case SYNCLINK_GT4_DEVICE_ID:
3280 devstr = "GT4"; 3285 devstr = "GT4";
3281 break; 3286 break;
@@ -3353,7 +3358,9 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
3353 int i; 3358 int i;
3354 int port_count = 1; 3359 int port_count = 1;
3355 3360
3356 if (pdev->device == SYNCLINK_GT4_DEVICE_ID) 3361 if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
3362 port_count = 2;
3363 else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
3357 port_count = 4; 3364 port_count = 4;
3358 3365
3359 /* allocate device instances for all ports */ 3366 /* allocate device instances for all ports */
@@ -3940,8 +3947,6 @@ static void async_mode(struct slgt_info *info)
3940 3947
3941 msc_set_vcr(info); 3948 msc_set_vcr(info);
3942 3949
3943 tx_set_idle(info);
3944
3945 /* SCR (serial control) 3950 /* SCR (serial control)
3946 * 3951 *
3947 * 15 1=tx req on FIFO half empty 3952 * 15 1=tx req on FIFO half empty
@@ -4012,7 +4017,7 @@ static void hdlc_mode(struct slgt_info *info)
4012 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break; 4017 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4013 } 4018 }
4014 4019
4015 switch (info->params.crc_type) 4020 switch (info->params.crc_type & HDLC_CRC_MASK)
4016 { 4021 {
4017 case HDLC_CRC_16_CCITT: val |= BIT9; break; 4022 case HDLC_CRC_16_CCITT: val |= BIT9; break;
4018 case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break; 4023 case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
@@ -4073,7 +4078,7 @@ static void hdlc_mode(struct slgt_info *info)
4073 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break; 4078 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4074 } 4079 }
4075 4080
4076 switch (info->params.crc_type) 4081 switch (info->params.crc_type & HDLC_CRC_MASK)
4077 { 4082 {
4078 case HDLC_CRC_16_CCITT: val |= BIT9; break; 4083 case HDLC_CRC_16_CCITT: val |= BIT9; break;
4079 case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break; 4084 case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
@@ -4175,17 +4180,38 @@ static void hdlc_mode(struct slgt_info *info)
4175 */ 4180 */
4176static void tx_set_idle(struct slgt_info *info) 4181static void tx_set_idle(struct slgt_info *info)
4177{ 4182{
4178 unsigned char val = 0xff; 4183 unsigned char val;
4184 unsigned short tcr;
4179 4185
4180 switch(info->idle_mode) 4186 /* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
4181 { 4187 * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
4182 case HDLC_TXIDLE_FLAGS: val = 0x7e; break; 4188 */
4183 case HDLC_TXIDLE_ALT_ZEROS_ONES: val = 0xaa; break; 4189 tcr = rd_reg16(info, TCR);
4184 case HDLC_TXIDLE_ZEROS: val = 0x00; break; 4190 if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
4185 case HDLC_TXIDLE_ONES: val = 0xff; break; 4191 /* disable preamble, set idle size to 16 bits */
4186 case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break; 4192 tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
4187 case HDLC_TXIDLE_SPACE: val = 0x00; break; 4193 /* MSB of 16 bit idle specified in tx preamble register (TPR) */
4188 case HDLC_TXIDLE_MARK: val = 0xff; break; 4194 wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
4195 } else if (!(tcr & BIT6)) {
4196 /* preamble is disabled, set idle size to 8 bits */
4197 tcr &= ~(BIT5 + BIT4);
4198 }
4199 wr_reg16(info, TCR, tcr);
4200
4201 if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
4202 /* LSB of custom tx idle specified in tx idle register */
4203 val = (unsigned char)(info->idle_mode & 0xff);
4204 } else {
4205 /* standard 8 bit idle patterns */
4206 switch(info->idle_mode)
4207 {
4208 case HDLC_TXIDLE_FLAGS: val = 0x7e; break;
4209 case HDLC_TXIDLE_ALT_ZEROS_ONES:
4210 case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
4211 case HDLC_TXIDLE_ZEROS:
4212 case HDLC_TXIDLE_SPACE: val = 0x00; break;
4213 default: val = 0xff;
4214 }
4189 } 4215 }
4190 4216
4191 wr_reg8(info, TIR, val); 4217 wr_reg8(info, TIR, val);
@@ -4313,6 +4339,12 @@ static int rx_get_frame(struct slgt_info *info)
4313 unsigned long flags; 4339 unsigned long flags;
4314 struct tty_struct *tty = info->tty; 4340 struct tty_struct *tty = info->tty;
4315 unsigned char addr_field = 0xff; 4341 unsigned char addr_field = 0xff;
4342 unsigned int crc_size = 0;
4343
4344 switch (info->params.crc_type & HDLC_CRC_MASK) {
4345 case HDLC_CRC_16_CCITT: crc_size = 2; break;
4346 case HDLC_CRC_32_CCITT: crc_size = 4; break;
4347 }
4316 4348
4317check_again: 4349check_again:
4318 4350
@@ -4357,7 +4389,7 @@ check_again:
4357 status = desc_status(info->rbufs[end]); 4389 status = desc_status(info->rbufs[end]);
4358 4390
4359 /* ignore CRC bit if not using CRC (bit is undefined) */ 4391 /* ignore CRC bit if not using CRC (bit is undefined) */
4360 if (info->params.crc_type == HDLC_CRC_NONE) 4392 if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
4361 status &= ~BIT1; 4393 status &= ~BIT1;
4362 4394
4363 if (framesize == 0 || 4395 if (framesize == 0 ||
@@ -4366,34 +4398,34 @@ check_again:
4366 goto check_again; 4398 goto check_again;
4367 } 4399 }
4368 4400
4369 if (framesize < 2 || status & (BIT1+BIT0)) { 4401 if (framesize < (2 + crc_size) || status & BIT0) {
4370 if (framesize < 2 || (status & BIT0)) 4402 info->icount.rxshort++;
4371 info->icount.rxshort++;
4372 else
4373 info->icount.rxcrc++;
4374 framesize = 0; 4403 framesize = 0;
4404 } else if (status & BIT1) {
4405 info->icount.rxcrc++;
4406 if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
4407 framesize = 0;
4408 }
4375 4409
4376#ifdef CONFIG_HDLC 4410#ifdef CONFIG_HDLC
4377 { 4411 if (framesize == 0) {
4378 struct net_device_stats *stats = hdlc_stats(info->netdev); 4412 struct net_device_stats *stats = hdlc_stats(info->netdev);
4379 stats->rx_errors++; 4413 stats->rx_errors++;
4380 stats->rx_frame_errors++; 4414 stats->rx_frame_errors++;
4381 }
4382#endif
4383 } else {
4384 /* adjust frame size for CRC, if any */
4385 if (info->params.crc_type == HDLC_CRC_16_CCITT)
4386 framesize -= 2;
4387 else if (info->params.crc_type == HDLC_CRC_32_CCITT)
4388 framesize -= 4;
4389 } 4415 }
4416#endif
4390 4417
4391 DBGBH(("%s rx frame status=%04X size=%d\n", 4418 DBGBH(("%s rx frame status=%04X size=%d\n",
4392 info->device_name, status, framesize)); 4419 info->device_name, status, framesize));
4393 DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, DMABUFSIZE), "rx"); 4420 DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, DMABUFSIZE), "rx");
4394 4421
4395 if (framesize) { 4422 if (framesize) {
4396 if (framesize > info->max_frame_size) 4423 if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
4424 framesize -= crc_size;
4425 crc_size = 0;
4426 }
4427
4428 if (framesize > info->max_frame_size + crc_size)
4397 info->icount.rxlong++; 4429 info->icount.rxlong++;
4398 else { 4430 else {
4399 /* copy dma buffer(s) to contiguous temp buffer */ 4431 /* copy dma buffer(s) to contiguous temp buffer */
@@ -4413,6 +4445,11 @@ check_again:
4413 i = 0; 4445 i = 0;
4414 } 4446 }
4415 4447
4448 if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
4449 *p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
4450 framesize++;
4451 }
4452
4416#ifdef CONFIG_HDLC 4453#ifdef CONFIG_HDLC
4417 if (info->netcount) 4454 if (info->netcount)
4418 hdlcdev_rx(info,info->tmp_rbuf, framesize); 4455 hdlcdev_rx(info,info->tmp_rbuf, framesize);
@@ -4671,13 +4708,13 @@ static int loopback_test(struct slgt_info *info)
4671static int adapter_test(struct slgt_info *info) 4708static int adapter_test(struct slgt_info *info)
4672{ 4709{
4673 DBGINFO(("testing %s\n", info->device_name)); 4710 DBGINFO(("testing %s\n", info->device_name));
4674 if ((info->init_error = register_test(info)) < 0) { 4711 if (register_test(info) < 0) {
4675 printk("register test failure %s addr=%08X\n", 4712 printk("register test failure %s addr=%08X\n",
4676 info->device_name, info->phys_reg_addr); 4713 info->device_name, info->phys_reg_addr);
4677 } else if ((info->init_error = irq_test(info)) < 0) { 4714 } else if (irq_test(info) < 0) {
4678 printk("IRQ test failure %s IRQ=%d\n", 4715 printk("IRQ test failure %s IRQ=%d\n",
4679 info->device_name, info->irq_level); 4716 info->device_name, info->irq_level);
4680 } else if ((info->init_error = loopback_test(info)) < 0) { 4717 } else if (loopback_test(info) < 0) {
4681 printk("loopback test failure %s\n", info->device_name); 4718 printk("loopback test failure %s\n", info->device_name);
4682 } 4719 }
4683 return info->init_error; 4720 return info->init_error;
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 858740131115..21bf15ad9980 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -988,7 +988,7 @@ static int write(struct tty_struct *tty,
988 if (sanity_check(info, tty->name, "write")) 988 if (sanity_check(info, tty->name, "write"))
989 goto cleanup; 989 goto cleanup;
990 990
991 if (!tty || !info->tx_buf) 991 if (!info->tx_buf)
992 goto cleanup; 992 goto cleanup;
993 993
994 if (info->params.mode == MGSL_MODE_HDLC) { 994 if (info->params.mode == MGSL_MODE_HDLC) {
@@ -1067,7 +1067,7 @@ static void put_char(struct tty_struct *tty, unsigned char ch)
1067 if (sanity_check(info, tty->name, "put_char")) 1067 if (sanity_check(info, tty->name, "put_char"))
1068 return; 1068 return;
1069 1069
1070 if (!tty || !info->tx_buf) 1070 if (!info->tx_buf)
1071 return; 1071 return;
1072 1072
1073 spin_lock_irqsave(&info->lock,flags); 1073 spin_lock_irqsave(&info->lock,flags);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 1e371a510dd2..731c3d5da0dc 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -6,8 +6,7 @@
6menu "Firmware Drivers" 6menu "Firmware Drivers"
7 7
8config EDD 8config EDD
9 tristate "BIOS Enhanced Disk Drive calls determine boot disk (EXPERIMENTAL)" 9 tristate "BIOS Enhanced Disk Drive calls determine boot disk"
10 depends on EXPERIMENTAL
11 depends on !IA64 10 depends on !IA64
12 help 11 help
13 Say Y or M here if you want to enable BIOS Enhanced Disk Drive 12 Say Y or M here if you want to enable BIOS Enhanced Disk Drive
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 948bd7e1445a..b9e3886d9e16 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -255,10 +255,15 @@ void __init dmi_scan_machine(void)
255/** 255/**
256 * dmi_check_system - check system DMI data 256 * dmi_check_system - check system DMI data
257 * @list: array of dmi_system_id structures to match against 257 * @list: array of dmi_system_id structures to match against
258 * All non-null elements of the list must match
259 * their slot's (field index's) data (i.e., each
260 * list string must be a substring of the specified
261 * DMI slot's string data) to be considered a
262 * successful match.
258 * 263 *
259 * Walk the blacklist table running matching functions until someone 264 * Walk the blacklist table running matching functions until someone
260 * returns non zero or we hit the end. Callback function is called for 265 * returns non zero or we hit the end. Callback function is called for
261 * each successfull match. Returns the number of matches. 266 * each successful match. Returns the number of matches.
262 */ 267 */
263int dmi_check_system(struct dmi_system_id *list) 268int dmi_check_system(struct dmi_system_id *list)
264{ 269{
@@ -287,7 +292,7 @@ EXPORT_SYMBOL(dmi_check_system);
287 292
288/** 293/**
289 * dmi_get_system_info - return DMI data value 294 * dmi_get_system_info - return DMI data value
290 * @field: data index (see enum dmi_filed) 295 * @field: data index (see enum dmi_field)
291 * 296 *
292 * Returns one DMI data value, can be used to perform 297 * Returns one DMI data value, can be used to perform
293 * complex DMI data checks. 298 * complex DMI data checks.
@@ -301,13 +306,13 @@ EXPORT_SYMBOL(dmi_get_system_info);
301/** 306/**
302 * dmi_find_device - find onboard device by type/name 307 * dmi_find_device - find onboard device by type/name
303 * @type: device type or %DMI_DEV_TYPE_ANY to match all device types 308 * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
304 * @desc: device name string or %NULL to match all 309 * @name: device name string or %NULL to match all
305 * @from: previous device found in search, or %NULL for new search. 310 * @from: previous device found in search, or %NULL for new search.
306 * 311 *
307 * Iterates through the list of known onboard devices. If a device is 312 * Iterates through the list of known onboard devices. If a device is
308 * found with a matching @vendor and @device, a pointer to its device 313 * found with a matching @vendor and @device, a pointer to its device
309 * structure is returned. Otherwise, %NULL is returned. 314 * structure is returned. Otherwise, %NULL is returned.
310 * A new search is initiated by passing %NULL to the @from argument. 315 * A new search is initiated by passing %NULL as the @from argument.
311 * If @from is not %NULL, searches continue from next device. 316 * If @from is not %NULL, searches continue from next device.
312 */ 317 */
313struct dmi_device * dmi_find_device(int type, const char *name, 318struct dmi_device * dmi_find_device(int type, const char *name,
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 6de3cd3d6e8e..99fa42402e71 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -395,7 +395,8 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
395 * we cannot reliably check if drive can auto-close 395 * we cannot reliably check if drive can auto-close
396 */ 396 */
397 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) 397 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
398 log = 0; 398 break;
399 log = 1;
399 break; 400 break;
400 case UNIT_ATTENTION: 401 case UNIT_ATTENTION:
401 /* 402 /*
@@ -417,6 +418,11 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
417 struct request *failed_command, 418 struct request *failed_command,
418 struct request_sense *sense) 419 struct request_sense *sense)
419{ 420{
421 unsigned long sector;
422 unsigned long bio_sectors;
423 unsigned long valid;
424 struct cdrom_info *info = drive->driver_data;
425
420 if (!cdrom_log_sense(drive, failed_command, sense)) 426 if (!cdrom_log_sense(drive, failed_command, sense))
421 return; 427 return;
422 428
@@ -429,6 +435,37 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
429 if (sense->sense_key == 0x05 && sense->asc == 0x24) 435 if (sense->sense_key == 0x05 && sense->asc == 0x24)
430 return; 436 return;
431 437
438 if (sense->error_code == 0x70) { /* Current Error */
439 switch(sense->sense_key) {
440 case MEDIUM_ERROR:
441 case VOLUME_OVERFLOW:
442 case ILLEGAL_REQUEST:
443 if (!sense->valid)
444 break;
445 if (failed_command == NULL ||
446 !blk_fs_request(failed_command))
447 break;
448 sector = (sense->information[0] << 24) |
449 (sense->information[1] << 16) |
450 (sense->information[2] << 8) |
451 (sense->information[3]);
452
453 bio_sectors = bio_sectors(failed_command->bio);
454 if (bio_sectors < 4)
455 bio_sectors = 4;
456 if (drive->queue->hardsect_size == 2048)
457 sector <<= 2; /* Device sector size is 2K */
458 sector &= ~(bio_sectors -1);
459 valid = (sector - failed_command->sector) << 9;
460
461 if (valid < 0)
462 valid = 0;
463 if (sector < get_capacity(info->disk) &&
464 drive->probed_capacity - sector < 4 * 75) {
465 set_capacity(info->disk, sector);
466 }
467 }
468 }
432#if VERBOSE_IDE_CD_ERRORS 469#if VERBOSE_IDE_CD_ERRORS
433 { 470 {
434 int i; 471 int i;
@@ -609,17 +646,23 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
609 sense = failed->sense; 646 sense = failed->sense;
610 failed->sense_len = rq->sense_len; 647 failed->sense_len = rq->sense_len;
611 } 648 }
612 649 cdrom_analyze_sense_data(drive, failed, sense);
613 /* 650 /*
614 * now end failed request 651 * now end failed request
615 */ 652 */
616 spin_lock_irqsave(&ide_lock, flags); 653 if (blk_fs_request(failed)) {
617 end_that_request_chunk(failed, 0, failed->data_len); 654 if (ide_end_dequeued_request(drive, failed, 0,
618 end_that_request_last(failed, 0); 655 failed->hard_nr_sectors))
619 spin_unlock_irqrestore(&ide_lock, flags); 656 BUG();
620 } 657 } else {
621 658 spin_lock_irqsave(&ide_lock, flags);
622 cdrom_analyze_sense_data(drive, failed, sense); 659 end_that_request_chunk(failed, 0,
660 failed->data_len);
661 end_that_request_last(failed, 0);
662 spin_unlock_irqrestore(&ide_lock, flags);
663 }
664 } else
665 cdrom_analyze_sense_data(drive, NULL, sense);
623 } 666 }
624 667
625 if (!rq->current_nr_sectors && blk_fs_request(rq)) 668 if (!rq->current_nr_sectors && blk_fs_request(rq))
@@ -633,6 +676,13 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
633 ide_end_request(drive, uptodate, nsectors); 676 ide_end_request(drive, uptodate, nsectors);
634} 677}
635 678
679static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 stat)
680{
681 if (stat & 0x80)
682 return;
683 ide_dump_status(drive, msg, stat);
684}
685
636/* Returns 0 if the request should be continued. 686/* Returns 0 if the request should be continued.
637 Returns 1 if the request was ended. */ 687 Returns 1 if the request was ended. */
638static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) 688static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
@@ -761,16 +811,16 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
761 sense_key == DATA_PROTECT) { 811 sense_key == DATA_PROTECT) {
762 /* No point in retrying after an illegal 812 /* No point in retrying after an illegal
763 request or data protect error.*/ 813 request or data protect error.*/
764 ide_dump_status (drive, "command error", stat); 814 ide_dump_status_no_sense (drive, "command error", stat);
765 do_end_request = 1; 815 do_end_request = 1;
766 } else if (sense_key == MEDIUM_ERROR) { 816 } else if (sense_key == MEDIUM_ERROR) {
767 /* No point in re-trying a zillion times on a bad 817 /* No point in re-trying a zillion times on a bad
768 * sector... If we got here the error is not correctable */ 818 * sector... If we got here the error is not correctable */
769 ide_dump_status (drive, "media error (bad sector)", stat); 819 ide_dump_status_no_sense (drive, "media error (bad sector)", stat);
770 do_end_request = 1; 820 do_end_request = 1;
771 } else if (sense_key == BLANK_CHECK) { 821 } else if (sense_key == BLANK_CHECK) {
772 /* Disk appears blank ?? */ 822 /* Disk appears blank ?? */
773 ide_dump_status (drive, "media error (blank)", stat); 823 ide_dump_status_no_sense (drive, "media error (blank)", stat);
774 do_end_request = 1; 824 do_end_request = 1;
775 } else if ((err & ~ABRT_ERR) != 0) { 825 } else if ((err & ~ABRT_ERR) != 0) {
776 /* Go to the default handler 826 /* Go to the default handler
@@ -782,13 +832,27 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
782 do_end_request = 1; 832 do_end_request = 1;
783 } 833 }
784 834
785 if (do_end_request) 835 /* End a request through request sense analysis when we have
786 cdrom_end_request(drive, 0); 836 sense data. We need this in order to perform end of media
787 837 processing */
788 /* If we got a CHECK_CONDITION status, 838
789 queue a request sense command. */ 839 if (do_end_request) {
790 if ((stat & ERR_STAT) != 0) 840 if (stat & ERR_STAT) {
791 cdrom_queue_request_sense(drive, NULL, NULL); 841 unsigned long flags;
842 spin_lock_irqsave(&ide_lock, flags);
843 blkdev_dequeue_request(rq);
844 HWGROUP(drive)->rq = NULL;
845 spin_unlock_irqrestore(&ide_lock, flags);
846
847 cdrom_queue_request_sense(drive, rq->sense, rq);
848 } else
849 cdrom_end_request(drive, 0);
850 } else {
851 /* If we got a CHECK_CONDITION status,
852 queue a request sense command. */
853 if (stat & ERR_STAT)
854 cdrom_queue_request_sense(drive, NULL, NULL);
855 }
792 } else { 856 } else {
793 blk_dump_rq_flags(rq, "ide-cd: bad rq"); 857 blk_dump_rq_flags(rq, "ide-cd: bad rq");
794 cdrom_end_request(drive, 0); 858 cdrom_end_request(drive, 0);
@@ -1491,8 +1555,7 @@ static ide_startstop_t cdrom_do_packet_command (ide_drive_t *drive)
1491} 1555}
1492 1556
1493 1557
1494static 1558static int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq)
1495int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq)
1496{ 1559{
1497 struct request_sense sense; 1560 struct request_sense sense;
1498 int retries = 10; 1561 int retries = 10;
@@ -2220,6 +2283,9 @@ static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
2220 toc->capacity = 0x1fffff; 2283 toc->capacity = 0x1fffff;
2221 2284
2222 set_capacity(info->disk, toc->capacity * sectors_per_frame); 2285 set_capacity(info->disk, toc->capacity * sectors_per_frame);
2286 /* Save a private copy of te TOC capacity for error handling */
2287 drive->probed_capacity = toc->capacity * sectors_per_frame;
2288
2223 blk_queue_hardsect_size(drive->queue, 2289 blk_queue_hardsect_size(drive->queue,
2224 sectors_per_frame << SECTOR_BITS); 2290 sectors_per_frame << SECTOR_BITS);
2225 2291
@@ -2342,6 +2408,7 @@ static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
2342 if (!stat && (last_written > toc->capacity)) { 2408 if (!stat && (last_written > toc->capacity)) {
2343 toc->capacity = last_written; 2409 toc->capacity = last_written;
2344 set_capacity(info->disk, toc->capacity * sectors_per_frame); 2410 set_capacity(info->disk, toc->capacity * sectors_per_frame);
2411 drive->probed_capacity = toc->capacity * sectors_per_frame;
2345 } 2412 }
2346 2413
2347 /* Remember that we've read this stuff. */ 2414 /* Remember that we've read this stuff. */
@@ -2698,14 +2765,11 @@ int ide_cdrom_drive_status (struct cdrom_device_info *cdi, int slot_nr)
2698 * any other way to detect this... 2765 * any other way to detect this...
2699 */ 2766 */
2700 if (sense.sense_key == NOT_READY) { 2767 if (sense.sense_key == NOT_READY) {
2701 if (sense.asc == 0x3a) { 2768 if (sense.asc == 0x3a && sense.ascq == 1)
2702 if (sense.ascq == 1) 2769 return CDS_NO_DISC;
2703 return CDS_NO_DISC; 2770 else
2704 else if (sense.ascq == 0 || sense.ascq == 2) 2771 return CDS_TRAY_OPEN;
2705 return CDS_TRAY_OPEN;
2706 }
2707 } 2772 }
2708
2709 return CDS_DRIVE_NOT_READY; 2773 return CDS_DRIVE_NOT_READY;
2710} 2774}
2711 2775
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index a1179e924962..4656587aa2f7 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -1284,7 +1284,7 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
1284 1284
1285 debug_log(KERN_INFO "rq_status: %d, dev: %s, flags: %lx, errors: %d\n", 1285 debug_log(KERN_INFO "rq_status: %d, dev: %s, flags: %lx, errors: %d\n",
1286 rq->rq_status, 1286 rq->rq_status,
1287 rq->rq_disk ? rq->rq_disk->disk_name ? "?", 1287 rq->rq_disk ? rq->rq_disk->disk_name : "?",
1288 rq->flags, rq->errors); 1288 rq->flags, rq->errors);
1289 debug_log(KERN_INFO "sector: %ld, nr_sectors: %ld, " 1289 debug_log(KERN_INFO "sector: %ld, nr_sectors: %ld, "
1290 "current_nr_sectors: %d\n", (long)rq->sector, 1290 "current_nr_sectors: %d\n", (long)rq->sector,
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 4f2f138de2ca..622a55c72f03 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -223,6 +223,63 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
223} 223}
224 224
225/** 225/**
226 * ide_end_dequeued_request - complete an IDE I/O
227 * @drive: IDE device for the I/O
228 * @uptodate:
229 * @nr_sectors: number of sectors completed
230 *
231 * Complete an I/O that is no longer on the request queue. This
232 * typically occurs when we pull the request and issue a REQUEST_SENSE.
233 * We must still finish the old request but we must not tamper with the
234 * queue in the meantime.
235 *
236 * NOTE: This path does not handle barrier, but barrier is not supported
237 * on ide-cd anyway.
238 */
239
240int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
241 int uptodate, int nr_sectors)
242{
243 unsigned long flags;
244 int ret = 1;
245
246 spin_lock_irqsave(&ide_lock, flags);
247
248 BUG_ON(!(rq->flags & REQ_STARTED));
249
250 /*
251 * if failfast is set on a request, override number of sectors and
252 * complete the whole request right now
253 */
254 if (blk_noretry_request(rq) && end_io_error(uptodate))
255 nr_sectors = rq->hard_nr_sectors;
256
257 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
258 rq->errors = -EIO;
259
260 /*
261 * decide whether to reenable DMA -- 3 is a random magic for now,
262 * if we DMA timeout more than 3 times, just stay in PIO
263 */
264 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
265 drive->state = 0;
266 HWGROUP(drive)->hwif->ide_dma_on(drive);
267 }
268
269 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
270 add_disk_randomness(rq->rq_disk);
271 if (blk_rq_tagged(rq))
272 blk_queue_end_tag(drive->queue, rq);
273 end_that_request_last(rq, uptodate);
274 ret = 0;
275 }
276 spin_unlock_irqrestore(&ide_lock, flags);
277 return ret;
278}
279EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
280
281
282/**
226 * ide_complete_pm_request - end the current Power Management request 283 * ide_complete_pm_request - end the current Power Management request
227 * @drive: target drive 284 * @drive: target drive
228 * @rq: request 285 * @rq: request
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index f04791a58df0..09f3a7dab28a 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2646,21 +2646,23 @@ static idetape_stage_t *idetape_kmalloc_stage (idetape_tape_t *tape)
2646 return __idetape_kmalloc_stage(tape, 0, 0); 2646 return __idetape_kmalloc_stage(tape, 0, 0);
2647} 2647}
2648 2648
2649static void idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *stage, const char __user *buf, int n) 2649static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *stage, const char __user *buf, int n)
2650{ 2650{
2651 struct idetape_bh *bh = tape->bh; 2651 struct idetape_bh *bh = tape->bh;
2652 int count; 2652 int count;
2653 int ret = 0;
2653 2654
2654 while (n) { 2655 while (n) {
2655#if IDETAPE_DEBUG_BUGS 2656#if IDETAPE_DEBUG_BUGS
2656 if (bh == NULL) { 2657 if (bh == NULL) {
2657 printk(KERN_ERR "ide-tape: bh == NULL in " 2658 printk(KERN_ERR "ide-tape: bh == NULL in "
2658 "idetape_copy_stage_from_user\n"); 2659 "idetape_copy_stage_from_user\n");
2659 return; 2660 return 1;
2660 } 2661 }
2661#endif /* IDETAPE_DEBUG_BUGS */ 2662#endif /* IDETAPE_DEBUG_BUGS */
2662 count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), (unsigned int)n); 2663 count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), (unsigned int)n);
2663 copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf, count); 2664 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf, count))
2665 ret = 1;
2664 n -= count; 2666 n -= count;
2665 atomic_add(count, &bh->b_count); 2667 atomic_add(count, &bh->b_count);
2666 buf += count; 2668 buf += count;
@@ -2671,23 +2673,26 @@ static void idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t
2671 } 2673 }
2672 } 2674 }
2673 tape->bh = bh; 2675 tape->bh = bh;
2676 return ret;
2674} 2677}
2675 2678
2676static void idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, idetape_stage_t *stage, int n) 2679static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, idetape_stage_t *stage, int n)
2677{ 2680{
2678 struct idetape_bh *bh = tape->bh; 2681 struct idetape_bh *bh = tape->bh;
2679 int count; 2682 int count;
2683 int ret = 0;
2680 2684
2681 while (n) { 2685 while (n) {
2682#if IDETAPE_DEBUG_BUGS 2686#if IDETAPE_DEBUG_BUGS
2683 if (bh == NULL) { 2687 if (bh == NULL) {
2684 printk(KERN_ERR "ide-tape: bh == NULL in " 2688 printk(KERN_ERR "ide-tape: bh == NULL in "
2685 "idetape_copy_stage_to_user\n"); 2689 "idetape_copy_stage_to_user\n");
2686 return; 2690 return 1;
2687 } 2691 }
2688#endif /* IDETAPE_DEBUG_BUGS */ 2692#endif /* IDETAPE_DEBUG_BUGS */
2689 count = min(tape->b_count, n); 2693 count = min(tape->b_count, n);
2690 copy_to_user(buf, tape->b_data, count); 2694 if (copy_to_user(buf, tape->b_data, count))
2695 ret = 1;
2691 n -= count; 2696 n -= count;
2692 tape->b_data += count; 2697 tape->b_data += count;
2693 tape->b_count -= count; 2698 tape->b_count -= count;
@@ -2700,6 +2705,7 @@ static void idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf,
2700 } 2705 }
2701 } 2706 }
2702 } 2707 }
2708 return ret;
2703} 2709}
2704 2710
2705static void idetape_init_merge_stage (idetape_tape_t *tape) 2711static void idetape_init_merge_stage (idetape_tape_t *tape)
@@ -3719,6 +3725,7 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
3719 struct ide_tape_obj *tape = ide_tape_f(file); 3725 struct ide_tape_obj *tape = ide_tape_f(file);
3720 ide_drive_t *drive = tape->drive; 3726 ide_drive_t *drive = tape->drive;
3721 ssize_t bytes_read,temp, actually_read = 0, rc; 3727 ssize_t bytes_read,temp, actually_read = 0, rc;
3728 ssize_t ret = 0;
3722 3729
3723#if IDETAPE_DEBUG_LOG 3730#if IDETAPE_DEBUG_LOG
3724 if (tape->debug_level >= 3) 3731 if (tape->debug_level >= 3)
@@ -3737,7 +3744,8 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
3737 return (0); 3744 return (0);
3738 if (tape->merge_stage_size) { 3745 if (tape->merge_stage_size) {
3739 actually_read = min((unsigned int)(tape->merge_stage_size), (unsigned int)count); 3746 actually_read = min((unsigned int)(tape->merge_stage_size), (unsigned int)count);
3740 idetape_copy_stage_to_user(tape, buf, tape->merge_stage, actually_read); 3747 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, actually_read))
3748 ret = -EFAULT;
3741 buf += actually_read; 3749 buf += actually_read;
3742 tape->merge_stage_size -= actually_read; 3750 tape->merge_stage_size -= actually_read;
3743 count -= actually_read; 3751 count -= actually_read;
@@ -3746,7 +3754,8 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
3746 bytes_read = idetape_add_chrdev_read_request(drive, tape->capabilities.ctl); 3754 bytes_read = idetape_add_chrdev_read_request(drive, tape->capabilities.ctl);
3747 if (bytes_read <= 0) 3755 if (bytes_read <= 0)
3748 goto finish; 3756 goto finish;
3749 idetape_copy_stage_to_user(tape, buf, tape->merge_stage, bytes_read); 3757 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, bytes_read))
3758 ret = -EFAULT;
3750 buf += bytes_read; 3759 buf += bytes_read;
3751 count -= bytes_read; 3760 count -= bytes_read;
3752 actually_read += bytes_read; 3761 actually_read += bytes_read;
@@ -3756,7 +3765,8 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
3756 if (bytes_read <= 0) 3765 if (bytes_read <= 0)
3757 goto finish; 3766 goto finish;
3758 temp = min((unsigned long)count, (unsigned long)bytes_read); 3767 temp = min((unsigned long)count, (unsigned long)bytes_read);
3759 idetape_copy_stage_to_user(tape, buf, tape->merge_stage, temp); 3768 if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, temp))
3769 ret = -EFAULT;
3760 actually_read += temp; 3770 actually_read += temp;
3761 tape->merge_stage_size = bytes_read-temp; 3771 tape->merge_stage_size = bytes_read-temp;
3762 } 3772 }
@@ -3769,7 +3779,8 @@ finish:
3769 idetape_space_over_filemarks(drive, MTFSF, 1); 3779 idetape_space_over_filemarks(drive, MTFSF, 1);
3770 return 0; 3780 return 0;
3771 } 3781 }
3772 return actually_read; 3782
3783 return (ret) ? ret : actually_read;
3773} 3784}
3774 3785
3775static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf, 3786static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
@@ -3777,7 +3788,8 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
3777{ 3788{
3778 struct ide_tape_obj *tape = ide_tape_f(file); 3789 struct ide_tape_obj *tape = ide_tape_f(file);
3779 ide_drive_t *drive = tape->drive; 3790 ide_drive_t *drive = tape->drive;
3780 ssize_t retval, actually_written = 0; 3791 ssize_t actually_written = 0;
3792 ssize_t ret = 0;
3781 3793
3782 /* The drive is write protected. */ 3794 /* The drive is write protected. */
3783 if (tape->write_prot) 3795 if (tape->write_prot)
@@ -3813,7 +3825,7 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
3813 * some drives (Seagate STT3401A) will return an error. 3825 * some drives (Seagate STT3401A) will return an error.
3814 */ 3826 */
3815 if (drive->dsc_overlap) { 3827 if (drive->dsc_overlap) {
3816 retval = idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, 0, tape->merge_stage->bh); 3828 ssize_t retval = idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, 0, tape->merge_stage->bh);
3817 if (retval < 0) { 3829 if (retval < 0) {
3818 __idetape_kfree_stage(tape->merge_stage); 3830 __idetape_kfree_stage(tape->merge_stage);
3819 tape->merge_stage = NULL; 3831 tape->merge_stage = NULL;
@@ -3834,12 +3846,14 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
3834 } 3846 }
3835#endif /* IDETAPE_DEBUG_BUGS */ 3847#endif /* IDETAPE_DEBUG_BUGS */
3836 actually_written = min((unsigned int)(tape->stage_size - tape->merge_stage_size), (unsigned int)count); 3848 actually_written = min((unsigned int)(tape->stage_size - tape->merge_stage_size), (unsigned int)count);
3837 idetape_copy_stage_from_user(tape, tape->merge_stage, buf, actually_written); 3849 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, actually_written))
3850 ret = -EFAULT;
3838 buf += actually_written; 3851 buf += actually_written;
3839 tape->merge_stage_size += actually_written; 3852 tape->merge_stage_size += actually_written;
3840 count -= actually_written; 3853 count -= actually_written;
3841 3854
3842 if (tape->merge_stage_size == tape->stage_size) { 3855 if (tape->merge_stage_size == tape->stage_size) {
3856 ssize_t retval;
3843 tape->merge_stage_size = 0; 3857 tape->merge_stage_size = 0;
3844 retval = idetape_add_chrdev_write_request(drive, tape->capabilities.ctl); 3858 retval = idetape_add_chrdev_write_request(drive, tape->capabilities.ctl);
3845 if (retval <= 0) 3859 if (retval <= 0)
@@ -3847,7 +3861,9 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
3847 } 3861 }
3848 } 3862 }
3849 while (count >= tape->stage_size) { 3863 while (count >= tape->stage_size) {
3850 idetape_copy_stage_from_user(tape, tape->merge_stage, buf, tape->stage_size); 3864 ssize_t retval;
3865 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, tape->stage_size))
3866 ret = -EFAULT;
3851 buf += tape->stage_size; 3867 buf += tape->stage_size;
3852 count -= tape->stage_size; 3868 count -= tape->stage_size;
3853 retval = idetape_add_chrdev_write_request(drive, tape->capabilities.ctl); 3869 retval = idetape_add_chrdev_write_request(drive, tape->capabilities.ctl);
@@ -3857,10 +3873,11 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
3857 } 3873 }
3858 if (count) { 3874 if (count) {
3859 actually_written += count; 3875 actually_written += count;
3860 idetape_copy_stage_from_user(tape, tape->merge_stage, buf, count); 3876 if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, count))
3877 ret = -EFAULT;
3861 tape->merge_stage_size += count; 3878 tape->merge_stage_size += count;
3862 } 3879 }
3863 return (actually_written); 3880 return (ret) ? ret : actually_written;
3864} 3881}
3865 3882
3866static int idetape_write_filemark (ide_drive_t *drive) 3883static int idetape_write_filemark (ide_drive_t *drive)
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 79b81be67975..186737539cf5 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support"
4 4
5config IEEE1394 5config IEEE1394
6 tristate "IEEE 1394 (FireWire) support" 6 tristate "IEEE 1394 (FireWire) support"
7 depends on (PCI || BROKEN) && (BROKEN || !FRV) 7 depends on PCI || BROKEN
8 select NET 8 select NET
9 help 9 help
10 IEEE 1394 describes a high performance serial bus, which is also 10 IEEE 1394 describes a high performance serial bus, which is also
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 948f1b8c4238..50c71e17de73 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -8,6 +8,7 @@
8 * directory of the kernel sources for details. 8 * directory of the kernel sources for details.
9 */ 9 */
10 10
11#include <linux/bitmap.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/config.h> 13#include <linux/config.h>
13#include <linux/list.h> 14#include <linux/list.h>
@@ -334,10 +335,12 @@ static ssize_t fw_show_ne_bus_options(struct device *dev, struct device_attribut
334static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL); 335static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL);
335 336
336 337
338/* tlabels_free, tlabels_allocations, tlabels_mask are read non-atomically
339 * here, therefore displayed values may be occasionally wrong. */
337static ssize_t fw_show_ne_tlabels_free(struct device *dev, struct device_attribute *attr, char *buf) 340static ssize_t fw_show_ne_tlabels_free(struct device *dev, struct device_attribute *attr, char *buf)
338{ 341{
339 struct node_entry *ne = container_of(dev, struct node_entry, device); 342 struct node_entry *ne = container_of(dev, struct node_entry, device);
340 return sprintf(buf, "%d\n", atomic_read(&ne->tpool->count.count) + 1); 343 return sprintf(buf, "%d\n", 64 - bitmap_weight(ne->tpool->pool, 64));
341} 344}
342static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL); 345static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL);
343 346
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 161afddd0f44..386023c594d7 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -773,8 +773,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
773 773
774 ts->last_msg = m; 774 ts->last_msg = m;
775 775
776 if (request_irq(spi->irq, ads7846_irq, 776 if (request_irq(spi->irq, ads7846_irq, SA_TRIGGER_FALLING,
777 SA_SAMPLE_RANDOM | SA_TRIGGER_FALLING,
778 spi->dev.driver->name, ts)) { 777 spi->dev.driver->name, ts)) {
779 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); 778 dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
780 err = -EBUSY; 779 err = -EBUSY;
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index a18d56bdafd9..a595d386312f 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -399,16 +399,14 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
399 set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE); 399 set_GPIO_IRQ_edge(GPIO_BITSY_NPOWER_BUTTON, GPIO_RISING_EDGE);
400 400
401 if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler, 401 if (request_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, action_button_handler,
402 SA_SHIRQ | SA_INTERRUPT | SA_SAMPLE_RANDOM, 402 SA_SHIRQ | SA_INTERRUPT, "h3600_action", &ts->dev)) {
403 "h3600_action", &ts->dev)) {
404 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); 403 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
405 err = -EBUSY; 404 err = -EBUSY;
406 goto fail2; 405 goto fail2;
407 } 406 }
408 407
409 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, 408 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
410 SA_SHIRQ | SA_INTERRUPT | SA_SAMPLE_RANDOM, 409 SA_SHIRQ | SA_INTERRUPT, "h3600_suspend", &ts->dev)) {
411 "h3600_suspend", &ts->dev)) {
412 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); 410 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
413 err = -EBUSY; 411 err = -EBUSY;
414 goto fail3; 412 goto fail3;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index f573d5af0b1f..96509989e921 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -93,5 +93,14 @@ config LEDS_TRIGGER_IDE_DISK
93 This allows LEDs to be controlled by IDE disk activity. 93 This allows LEDs to be controlled by IDE disk activity.
94 If unsure, say Y. 94 If unsure, say Y.
95 95
96config LEDS_TRIGGER_HEARTBEAT
97 tristate "LED Heartbeat Trigger"
98 depends LEDS_TRIGGERS
99 help
100 This allows LEDs to be controlled by a CPU load average.
101 The flash frequency is a hyperbolic function of the 1-minute
102 load average.
103 If unsure, say Y.
104
96endmenu 105endmenu
97 106
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index dcea1001faa4..88d3b6eaa6a2 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
16# LED Triggers 16# LED Triggers
17obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o 17obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
18obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o 18obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
19obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
new file mode 100644
index 000000000000..4bf8cec8b8c1
--- /dev/null
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -0,0 +1,118 @@
1/*
2 * LED Heartbeat Trigger
3 *
4 * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
5 *
6 * Based on Richard Purdie's ledtrig-timer.c and some arch's
7 * CONFIG_HEARTBEAT code.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/timer.h>
18#include <linux/sched.h>
19#include <linux/leds.h>
20#include "leds.h"
21
22struct heartbeat_trig_data {
23 unsigned int phase;
24 unsigned int period;
25 struct timer_list timer;
26};
27
28static void led_heartbeat_function(unsigned long data)
29{
30 struct led_classdev *led_cdev = (struct led_classdev *) data;
31 struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
32 unsigned long brightness = LED_OFF;
33 unsigned long delay = 0;
34
35 /* acts like an actual heart beat -- ie thump-thump-pause... */
36 switch (heartbeat_data->phase) {
37 case 0:
38 /*
39 * The hyperbolic function below modifies the
40 * heartbeat period length in dependency of the
41 * current (1min) load. It goes through the points
42 * f(0)=1260, f(1)=860, f(5)=510, f(inf)->300.
43 */
44 heartbeat_data->period = 300 +
45 (6720 << FSHIFT) / (5 * avenrun[0] + (7 << FSHIFT));
46 heartbeat_data->period =
47 msecs_to_jiffies(heartbeat_data->period);
48 delay = msecs_to_jiffies(70);
49 heartbeat_data->phase++;
50 brightness = LED_FULL;
51 break;
52 case 1:
53 delay = heartbeat_data->period / 4 - msecs_to_jiffies(70);
54 heartbeat_data->phase++;
55 break;
56 case 2:
57 delay = msecs_to_jiffies(70);
58 heartbeat_data->phase++;
59 brightness = LED_FULL;
60 break;
61 default:
62 delay = heartbeat_data->period - heartbeat_data->period / 4 -
63 msecs_to_jiffies(70);
64 heartbeat_data->phase = 0;
65 break;
66 }
67
68 led_set_brightness(led_cdev, brightness);
69 mod_timer(&heartbeat_data->timer, jiffies + delay);
70}
71
72static void heartbeat_trig_activate(struct led_classdev *led_cdev)
73{
74 struct heartbeat_trig_data *heartbeat_data;
75
76 heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL);
77 if (!heartbeat_data)
78 return;
79
80 led_cdev->trigger_data = heartbeat_data;
81 setup_timer(&heartbeat_data->timer,
82 led_heartbeat_function, (unsigned long) led_cdev);
83 heartbeat_data->phase = 0;
84 led_heartbeat_function(heartbeat_data->timer.data);
85}
86
87static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
88{
89 struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
90
91 if (heartbeat_data) {
92 del_timer_sync(&heartbeat_data->timer);
93 kfree(heartbeat_data);
94 }
95}
96
97static struct led_trigger heartbeat_led_trigger = {
98 .name = "heartbeat",
99 .activate = heartbeat_trig_activate,
100 .deactivate = heartbeat_trig_deactivate,
101};
102
103static int __init heartbeat_trig_init(void)
104{
105 return led_trigger_register(&heartbeat_led_trigger);
106}
107
108static void __exit heartbeat_trig_exit(void)
109{
110 led_trigger_unregister(&heartbeat_led_trigger);
111}
112
113module_init(heartbeat_trig_init);
114module_exit(heartbeat_trig_exit);
115
116MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
117MODULE_DESCRIPTION("Heartbeat LED trigger");
118MODULE_LICENSE("GPL");
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index ccf5df44cde4..37cd6ee4586b 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -99,17 +99,22 @@ config PMAC_MEDIABAY
99 devices are not fully supported in the bay as I never had one to 99 devices are not fully supported in the bay as I never had one to
100 try with 100 try with
101 101
102# made a separate option since backlight may end up beeing used
103# on non-powerbook machines (but only on PMU based ones AFAIK)
104config PMAC_BACKLIGHT 102config PMAC_BACKLIGHT
105 bool "Backlight control for LCD screens" 103 bool "Backlight control for LCD screens"
106 depends on ADB_PMU && (BROKEN || !PPC64) 104 depends on ADB_PMU && (BROKEN || !PPC64)
107 help 105 help
108 Say Y here to build in code to manage the LCD backlight on a 106 Say Y here to enable Macintosh specific extensions of the generic
109 Macintosh PowerBook. With this code, the backlight will be turned 107 backlight code. With this enabled, the brightness keys on older
110 on and off appropriately on power-management and lid-open/lid-closed 108 PowerBooks will be enabled so you can change the screen brightness.
111 events; also, the PowerBook button device will be enabled so you can 109 Newer models should use an userspace daemon like pbbuttonsd.
112 change the screen brightness. 110
111config PMAC_BACKLIGHT_LEGACY
112 bool "Provide legacy ioctl's on /dev/pmu for the backlight"
113 depends on PMAC_BACKLIGHT && (BROKEN || !PPC64)
114 help
115 Say Y if you want to enable legacy ioctl's on /dev/pmu. This is for
116 programs which use this old interface. New and updated programs
117 should use the backlight classes in sysfs.
113 118
114config ADB_MACIO 119config ADB_MACIO
115 bool "Include MacIO (CHRP) ADB driver" 120 bool "Include MacIO (CHRP) ADB driver"
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 6081acdea404..8972e53d2dcb 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_INPUT_ADBHID) += adbhid.o
12obj-$(CONFIG_ANSLCD) += ans-lcd.o 12obj-$(CONFIG_ANSLCD) += ans-lcd.o
13 13
14obj-$(CONFIG_ADB_PMU) += via-pmu.o 14obj-$(CONFIG_ADB_PMU) += via-pmu.o
15obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o
15obj-$(CONFIG_ADB_CUDA) += via-cuda.o 16obj-$(CONFIG_ADB_CUDA) += via-cuda.o
16obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o 17obj-$(CONFIG_PMAC_APM_EMU) += apm_emu.o
17obj-$(CONFIG_PMAC_SMU) += smu.o 18obj-$(CONFIG_PMAC_SMU) += smu.o
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index 394334ec5765..c26e1236b275 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -503,9 +503,7 @@ adbhid_buttons_input(unsigned char *data, int nb, struct pt_regs *regs, int auto
503 case 0x1f: /* Powerbook button device */ 503 case 0x1f: /* Powerbook button device */
504 { 504 {
505 int down = (data[1] == (data[1] & 0xf)); 505 int down = (data[1] == (data[1] & 0xf));
506#ifdef CONFIG_PMAC_BACKLIGHT 506
507 int backlight = get_backlight_level();
508#endif
509 /* 507 /*
510 * XXX: Where is the contrast control for the passive? 508 * XXX: Where is the contrast control for the passive?
511 * -- Cort 509 * -- Cort
@@ -530,29 +528,17 @@ adbhid_buttons_input(unsigned char *data, int nb, struct pt_regs *regs, int auto
530 528
531 case 0xa: /* brightness decrease */ 529 case 0xa: /* brightness decrease */
532#ifdef CONFIG_PMAC_BACKLIGHT 530#ifdef CONFIG_PMAC_BACKLIGHT
533 if (!disable_kernel_backlight) { 531 if (!disable_kernel_backlight && down)
534 if (down && backlight >= 0) { 532 pmac_backlight_key_down();
535 if (backlight > BACKLIGHT_OFF) 533#endif
536 set_backlight_level(backlight-1);
537 else
538 set_backlight_level(BACKLIGHT_OFF);
539 }
540 }
541#endif /* CONFIG_PMAC_BACKLIGHT */
542 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSDOWN, down); 534 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSDOWN, down);
543 break; 535 break;
544 536
545 case 0x9: /* brightness increase */ 537 case 0x9: /* brightness increase */
546#ifdef CONFIG_PMAC_BACKLIGHT 538#ifdef CONFIG_PMAC_BACKLIGHT
547 if (!disable_kernel_backlight) { 539 if (!disable_kernel_backlight && down)
548 if (down && backlight >= 0) { 540 pmac_backlight_key_up();
549 if (backlight < BACKLIGHT_MAX) 541#endif
550 set_backlight_level(backlight+1);
551 else
552 set_backlight_level(BACKLIGHT_MAX);
553 }
554 }
555#endif /* CONFIG_PMAC_BACKLIGHT */
556 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSUP, down); 542 input_report_key(adbhid[id]->input, KEY_BRIGHTNESSUP, down);
557 break; 543 break;
558 544
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
new file mode 100644
index 000000000000..b42d05f2aaff
--- /dev/null
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -0,0 +1,150 @@
1/*
2 * Backlight code for via-pmu
3 *
4 * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi.
5 * Copyright (C) 2001-2002 Benjamin Herrenschmidt
6 * Copyright (C) 2006 Michael Hanselmann <linux-kernel@hansmi.ch>
7 *
8 */
9
10#include <asm/ptrace.h>
11#include <linux/adb.h>
12#include <linux/pmu.h>
13#include <asm/backlight.h>
14#include <asm/prom.h>
15
16#define MAX_PMU_LEVEL 0xFF
17
18static struct device_node *vias;
19static struct backlight_properties pmu_backlight_data;
20
21static int pmu_backlight_get_level_brightness(struct fb_info *info,
22 int level)
23{
24 int pmulevel;
25
26 /* Get and convert the value */
27 mutex_lock(&info->bl_mutex);
28 pmulevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_PMU_LEVEL;
29 mutex_unlock(&info->bl_mutex);
30
31 if (pmulevel < 0)
32 pmulevel = 0;
33 else if (pmulevel > MAX_PMU_LEVEL)
34 pmulevel = MAX_PMU_LEVEL;
35
36 return pmulevel;
37}
38
39static int pmu_backlight_update_status(struct backlight_device *bd)
40{
41 struct fb_info *info = class_get_devdata(&bd->class_dev);
42 struct adb_request req;
43 int pmulevel, level = bd->props->brightness;
44
45 if (vias == NULL)
46 return -ENODEV;
47
48 if (bd->props->power != FB_BLANK_UNBLANK ||
49 bd->props->fb_blank != FB_BLANK_UNBLANK)
50 level = 0;
51
52 pmulevel = pmu_backlight_get_level_brightness(info, level);
53
54 pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel);
55 pmu_wait_complete(&req);
56
57 pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
58 PMU_POW_BACKLIGHT | (level > 0 ? PMU_POW_ON : PMU_POW_OFF));
59 pmu_wait_complete(&req);
60
61 return 0;
62}
63
64static int pmu_backlight_get_brightness(struct backlight_device *bd)
65{
66 return bd->props->brightness;
67}
68
69static struct backlight_properties pmu_backlight_data = {
70 .owner = THIS_MODULE,
71 .get_brightness = pmu_backlight_get_brightness,
72 .update_status = pmu_backlight_update_status,
73 .max_brightness = (FB_BACKLIGHT_LEVELS - 1),
74};
75
76void __init pmu_backlight_init(struct device_node *in_vias)
77{
78 struct backlight_device *bd;
79 struct fb_info *info;
80 char name[10];
81 int level, autosave;
82
83 vias = in_vias;
84
85 /* Special case for the old PowerBook since I can't test on it */
86 autosave =
87 machine_is_compatible("AAPL,3400/2400") ||
88 machine_is_compatible("AAPL,3500");
89
90 if (!autosave &&
91 !pmac_has_backlight_type("pmu") &&
92 !machine_is_compatible("AAPL,PowerBook1998") &&
93 !machine_is_compatible("PowerBook1,1"))
94 return;
95
96 /* Actually, this is a hack, but I don't know of a better way
97 * to get the first framebuffer device.
98 */
99 info = registered_fb[0];
100 if (!info) {
101 printk("pmubl: No framebuffer found\n");
102 goto error;
103 }
104
105 snprintf(name, sizeof(name), "pmubl%d", info->node);
106
107 bd = backlight_device_register(name, info, &pmu_backlight_data);
108 if (IS_ERR(bd)) {
109 printk("pmubl: Backlight registration failed\n");
110 goto error;
111 }
112
113 mutex_lock(&info->bl_mutex);
114 info->bl_dev = bd;
115 fb_bl_default_curve(info, 0x7F, 0x46, 0x0E);
116 mutex_unlock(&info->bl_mutex);
117
118 level = pmu_backlight_data.max_brightness;
119
120 if (autosave) {
121 /* read autosaved value if available */
122 struct adb_request req;
123 pmu_request(&req, NULL, 2, 0xd9, 0);
124 pmu_wait_complete(&req);
125
126 mutex_lock(&info->bl_mutex);
127 level = pmac_backlight_curve_lookup(info,
128 (req.reply[0] >> 4) *
129 pmu_backlight_data.max_brightness / 15);
130 mutex_unlock(&info->bl_mutex);
131 }
132
133 up(&bd->sem);
134 bd->props->brightness = level;
135 bd->props->power = FB_BLANK_UNBLANK;
136 bd->props->update_status(bd);
137 down(&bd->sem);
138
139 mutex_lock(&pmac_backlight_mutex);
140 if (!pmac_backlight)
141 pmac_backlight = bd;
142 mutex_unlock(&pmac_backlight_mutex);
143
144 printk("pmubl: Backlight initialized (%s)\n", name);
145
146 return;
147
148error:
149 return;
150}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c63d4e7984be..2a355ae59562 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -144,7 +144,6 @@ static int data_index;
144static int data_len; 144static int data_len;
145static volatile int adb_int_pending; 145static volatile int adb_int_pending;
146static volatile int disable_poll; 146static volatile int disable_poll;
147static struct adb_request bright_req_1, bright_req_2;
148static struct device_node *vias; 147static struct device_node *vias;
149static int pmu_kind = PMU_UNKNOWN; 148static int pmu_kind = PMU_UNKNOWN;
150static int pmu_fully_inited = 0; 149static int pmu_fully_inited = 0;
@@ -161,7 +160,7 @@ static int drop_interrupts;
161#if defined(CONFIG_PM) && defined(CONFIG_PPC32) 160#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
162static int option_lid_wakeup = 1; 161static int option_lid_wakeup = 1;
163#endif /* CONFIG_PM && CONFIG_PPC32 */ 162#endif /* CONFIG_PM && CONFIG_PPC32 */
164#if (defined(CONFIG_PM)&&defined(CONFIG_PPC32))||defined(CONFIG_PMAC_BACKLIGHT) 163#if (defined(CONFIG_PM)&&defined(CONFIG_PPC32))||defined(CONFIG_PMAC_BACKLIGHT_LEGACY)
165static int sleep_in_progress; 164static int sleep_in_progress;
166#endif 165#endif
167static unsigned long async_req_locks; 166static unsigned long async_req_locks;
@@ -208,10 +207,6 @@ static int proc_get_info(char *page, char **start, off_t off,
208 int count, int *eof, void *data); 207 int count, int *eof, void *data);
209static int proc_get_irqstats(char *page, char **start, off_t off, 208static int proc_get_irqstats(char *page, char **start, off_t off,
210 int count, int *eof, void *data); 209 int count, int *eof, void *data);
211#ifdef CONFIG_PMAC_BACKLIGHT
212static int pmu_set_backlight_level(int level, void* data);
213static int pmu_set_backlight_enable(int on, int level, void* data);
214#endif /* CONFIG_PMAC_BACKLIGHT */
215static void pmu_pass_intr(unsigned char *data, int len); 210static void pmu_pass_intr(unsigned char *data, int len);
216static int proc_get_batt(char *page, char **start, off_t off, 211static int proc_get_batt(char *page, char **start, off_t off,
217 int count, int *eof, void *data); 212 int count, int *eof, void *data);
@@ -292,13 +287,6 @@ static char *pbook_type[] = {
292 "Core99" 287 "Core99"
293}; 288};
294 289
295#ifdef CONFIG_PMAC_BACKLIGHT
296static struct backlight_controller pmu_backlight_controller = {
297 pmu_set_backlight_enable,
298 pmu_set_backlight_level
299};
300#endif /* CONFIG_PMAC_BACKLIGHT */
301
302int __init find_via_pmu(void) 290int __init find_via_pmu(void)
303{ 291{
304 u64 taddr; 292 u64 taddr;
@@ -417,8 +405,6 @@ static int __init via_pmu_start(void)
417 if (vias == NULL) 405 if (vias == NULL)
418 return -ENODEV; 406 return -ENODEV;
419 407
420 bright_req_1.complete = 1;
421 bright_req_2.complete = 1;
422 batt_req.complete = 1; 408 batt_req.complete = 1;
423 409
424#ifndef CONFIG_PPC_MERGE 410#ifndef CONFIG_PPC_MERGE
@@ -483,9 +469,9 @@ static int __init via_pmu_dev_init(void)
483 return -ENODEV; 469 return -ENODEV;
484 470
485#ifdef CONFIG_PMAC_BACKLIGHT 471#ifdef CONFIG_PMAC_BACKLIGHT
486 /* Enable backlight */ 472 /* Initialize backlight */
487 register_backlight_controller(&pmu_backlight_controller, NULL, "pmu"); 473 pmu_backlight_init(vias);
488#endif /* CONFIG_PMAC_BACKLIGHT */ 474#endif
489 475
490#ifdef CONFIG_PPC32 476#ifdef CONFIG_PPC32
491 if (machine_is_compatible("AAPL,3400/2400") || 477 if (machine_is_compatible("AAPL,3400/2400") ||
@@ -1424,7 +1410,7 @@ next:
1424#ifdef CONFIG_INPUT_ADBHID 1410#ifdef CONFIG_INPUT_ADBHID
1425 if (!disable_kernel_backlight) 1411 if (!disable_kernel_backlight)
1426#endif /* CONFIG_INPUT_ADBHID */ 1412#endif /* CONFIG_INPUT_ADBHID */
1427 set_backlight_level(data[1] >> 4); 1413 pmac_backlight_set_legacy_brightness(data[1] >> 4);
1428#endif /* CONFIG_PMAC_BACKLIGHT */ 1414#endif /* CONFIG_PMAC_BACKLIGHT */
1429 } 1415 }
1430 /* Tick interrupt */ 1416 /* Tick interrupt */
@@ -1674,61 +1660,6 @@ gpio1_interrupt(int irq, void *arg, struct pt_regs *regs)
1674 return IRQ_NONE; 1660 return IRQ_NONE;
1675} 1661}
1676 1662
1677#ifdef CONFIG_PMAC_BACKLIGHT
1678static int backlight_to_bright[] = {
1679 0x7f, 0x46, 0x42, 0x3e, 0x3a, 0x36, 0x32, 0x2e,
1680 0x2a, 0x26, 0x22, 0x1e, 0x1a, 0x16, 0x12, 0x0e
1681};
1682
1683static int
1684pmu_set_backlight_enable(int on, int level, void* data)
1685{
1686 struct adb_request req;
1687
1688 if (vias == NULL)
1689 return -ENODEV;
1690
1691 if (on) {
1692 pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT,
1693 backlight_to_bright[level]);
1694 pmu_wait_complete(&req);
1695 }
1696 pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
1697 PMU_POW_BACKLIGHT | (on ? PMU_POW_ON : PMU_POW_OFF));
1698 pmu_wait_complete(&req);
1699
1700 return 0;
1701}
1702
1703static void
1704pmu_bright_complete(struct adb_request *req)
1705{
1706 if (req == &bright_req_1)
1707 clear_bit(1, &async_req_locks);
1708 if (req == &bright_req_2)
1709 clear_bit(2, &async_req_locks);
1710}
1711
1712static int
1713pmu_set_backlight_level(int level, void* data)
1714{
1715 if (vias == NULL)
1716 return -ENODEV;
1717
1718 if (test_and_set_bit(1, &async_req_locks))
1719 return -EAGAIN;
1720 pmu_request(&bright_req_1, pmu_bright_complete, 2, PMU_BACKLIGHT_BRIGHT,
1721 backlight_to_bright[level]);
1722 if (test_and_set_bit(2, &async_req_locks))
1723 return -EAGAIN;
1724 pmu_request(&bright_req_2, pmu_bright_complete, 2, PMU_POWER_CTRL,
1725 PMU_POW_BACKLIGHT | (level > BACKLIGHT_OFF ?
1726 PMU_POW_ON : PMU_POW_OFF));
1727
1728 return 0;
1729}
1730#endif /* CONFIG_PMAC_BACKLIGHT */
1731
1732void 1663void
1733pmu_enable_irled(int on) 1664pmu_enable_irled(int on)
1734{ 1665{
@@ -2145,9 +2076,8 @@ pmac_suspend_devices(void)
2145 return -EBUSY; 2076 return -EBUSY;
2146 } 2077 }
2147 2078
2148 /* Wait for completion of async backlight requests */ 2079 /* Wait for completion of async requests */
2149 while (!bright_req_1.complete || !bright_req_2.complete || 2080 while (!batt_req.complete)
2150 !batt_req.complete)
2151 pmu_poll(); 2081 pmu_poll();
2152 2082
2153 /* Giveup the lazy FPU & vec so we don't have to back them 2083 /* Giveup the lazy FPU & vec so we don't have to back them
@@ -2678,26 +2608,34 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2678 return put_user(1, argp); 2608 return put_user(1, argp);
2679#endif /* CONFIG_PM && CONFIG_PPC32 */ 2609#endif /* CONFIG_PM && CONFIG_PPC32 */
2680 2610
2681#ifdef CONFIG_PMAC_BACKLIGHT 2611#ifdef CONFIG_PMAC_BACKLIGHT_LEGACY
2682 /* Backlight should have its own device or go via 2612 /* Compatibility ioctl's for backlight */
2683 * the fbdev
2684 */
2685 case PMU_IOC_GET_BACKLIGHT: 2613 case PMU_IOC_GET_BACKLIGHT:
2614 {
2615 int brightness;
2616
2686 if (sleep_in_progress) 2617 if (sleep_in_progress)
2687 return -EBUSY; 2618 return -EBUSY;
2688 error = get_backlight_level(); 2619
2689 if (error < 0) 2620 brightness = pmac_backlight_get_legacy_brightness();
2690 return error; 2621 if (brightness < 0)
2691 return put_user(error, argp); 2622 return brightness;
2623 else
2624 return put_user(brightness, argp);
2625
2626 }
2692 case PMU_IOC_SET_BACKLIGHT: 2627 case PMU_IOC_SET_BACKLIGHT:
2693 { 2628 {
2694 __u32 value; 2629 int brightness;
2630
2695 if (sleep_in_progress) 2631 if (sleep_in_progress)
2696 return -EBUSY; 2632 return -EBUSY;
2697 error = get_user(value, argp); 2633
2698 if (!error) 2634 error = get_user(brightness, argp);
2699 error = set_backlight_level(value); 2635 if (error)
2700 break; 2636 return error;
2637
2638 return pmac_backlight_set_legacy_brightness(brightness);
2701 } 2639 }
2702#ifdef CONFIG_INPUT_ADBHID 2640#ifdef CONFIG_INPUT_ADBHID
2703 case PMU_IOC_GRAB_BACKLIGHT: { 2641 case PMU_IOC_GRAB_BACKLIGHT: {
@@ -2713,7 +2651,7 @@ pmu_ioctl(struct inode * inode, struct file *filp,
2713 return 0; 2651 return 0;
2714 } 2652 }
2715#endif /* CONFIG_INPUT_ADBHID */ 2653#endif /* CONFIG_INPUT_ADBHID */
2716#endif /* CONFIG_PMAC_BACKLIGHT */ 2654#endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */
2717 case PMU_IOC_GET_MODEL: 2655 case PMU_IOC_GET_MODEL:
2718 return put_user(pmu_kind, argp); 2656 return put_user(pmu_kind, argp);
2719 case PMU_IOC_HAS_ADB: 2657 case PMU_IOC_HAS_ADB:
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 844fa74ac9ec..2a0d538b387f 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1105,7 +1105,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1105 return ret; 1105 return ret;
1106 1106
1107 /* We get a patch from userspace */ 1107 /* We get a patch from userspace */
1108 IRDA_MESSAGE("%s(): Received firmware %s (%u bytes)\n", 1108 IRDA_MESSAGE("%s(): Received firmware %s (%zu bytes)\n",
1109 __FUNCTION__, stir421x_fw_name, fw->size); 1109 __FUNCTION__, stir421x_fw_name, fw->size);
1110 1110
1111 ret = -EINVAL; 1111 ret = -EINVAL;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index d4c0002b43db..a2fad50437e6 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -55,7 +55,7 @@ static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.ne
55/* sun3/60 addr/irq for the lance chip. If your sun is different, 55/* sun3/60 addr/irq for the lance chip. If your sun is different,
56 change this. */ 56 change this. */
57#define LANCE_OBIO 0x120000 57#define LANCE_OBIO 0x120000
58#define LANCE_IRQ IRQ3 58#define LANCE_IRQ IRQ_AUTO_3
59 59
60/* Debug level: 60/* Debug level:
61 * 0 = silent, print only serious errors 61 * 0 = silent, print only serious errors
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 22e794071cf4..7628c2d81f45 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -60,9 +60,9 @@
60 60
61static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org"; 61static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org";
62 62
63static unsigned int valid_port[] __initdata = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390}; 63static unsigned int valid_port[] = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390};
64 64
65static unsigned int valid_mem[] __initdata = { 65static unsigned int valid_mem[] = {
66 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000, 66 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
67 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000, 67 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
68 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000, 68 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index b2e8e49c8659..43e521e99126 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -108,10 +108,10 @@ static int module_load_notify(struct notifier_block * self, unsigned long val, v
108 return 0; 108 return 0;
109 109
110 /* FIXME: should we process all CPU buffers ? */ 110 /* FIXME: should we process all CPU buffers ? */
111 down(&buffer_sem); 111 mutex_lock(&buffer_mutex);
112 add_event_entry(ESCAPE_CODE); 112 add_event_entry(ESCAPE_CODE);
113 add_event_entry(MODULE_LOADED_CODE); 113 add_event_entry(MODULE_LOADED_CODE);
114 up(&buffer_sem); 114 mutex_unlock(&buffer_mutex);
115#endif 115#endif
116 return 0; 116 return 0;
117} 117}
@@ -501,7 +501,7 @@ void sync_buffer(int cpu)
501 sync_buffer_state state = sb_buffer_start; 501 sync_buffer_state state = sb_buffer_start;
502 unsigned long available; 502 unsigned long available;
503 503
504 down(&buffer_sem); 504 mutex_lock(&buffer_mutex);
505 505
506 add_cpu_switch(cpu); 506 add_cpu_switch(cpu);
507 507
@@ -550,5 +550,5 @@ void sync_buffer(int cpu)
550 550
551 mark_done(cpu); 551 mark_done(cpu);
552 552
553 up(&buffer_sem); 553 mutex_unlock(&buffer_mutex);
554} 554}
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index b80318f03420..04d641714d34 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -24,7 +24,7 @@
24#include "event_buffer.h" 24#include "event_buffer.h"
25#include "oprofile_stats.h" 25#include "oprofile_stats.h"
26 26
27DECLARE_MUTEX(buffer_sem); 27DEFINE_MUTEX(buffer_mutex);
28 28
29static unsigned long buffer_opened; 29static unsigned long buffer_opened;
30static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); 30static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
@@ -32,7 +32,7 @@ static unsigned long * event_buffer;
32static unsigned long buffer_size; 32static unsigned long buffer_size;
33static unsigned long buffer_watershed; 33static unsigned long buffer_watershed;
34static size_t buffer_pos; 34static size_t buffer_pos;
35/* atomic_t because wait_event checks it outside of buffer_sem */ 35/* atomic_t because wait_event checks it outside of buffer_mutex */
36static atomic_t buffer_ready = ATOMIC_INIT(0); 36static atomic_t buffer_ready = ATOMIC_INIT(0);
37 37
38/* Add an entry to the event buffer. When we 38/* Add an entry to the event buffer. When we
@@ -60,10 +60,10 @@ void add_event_entry(unsigned long value)
60 */ 60 */
61void wake_up_buffer_waiter(void) 61void wake_up_buffer_waiter(void)
62{ 62{
63 down(&buffer_sem); 63 mutex_lock(&buffer_mutex);
64 atomic_set(&buffer_ready, 1); 64 atomic_set(&buffer_ready, 1);
65 wake_up(&buffer_wait); 65 wake_up(&buffer_wait);
66 up(&buffer_sem); 66 mutex_unlock(&buffer_mutex);
67} 67}
68 68
69 69
@@ -162,7 +162,7 @@ static ssize_t event_buffer_read(struct file * file, char __user * buf,
162 if (!atomic_read(&buffer_ready)) 162 if (!atomic_read(&buffer_ready))
163 return -EAGAIN; 163 return -EAGAIN;
164 164
165 down(&buffer_sem); 165 mutex_lock(&buffer_mutex);
166 166
167 atomic_set(&buffer_ready, 0); 167 atomic_set(&buffer_ready, 0);
168 168
@@ -177,7 +177,7 @@ static ssize_t event_buffer_read(struct file * file, char __user * buf,
177 buffer_pos = 0; 177 buffer_pos = 0;
178 178
179out: 179out:
180 up(&buffer_sem); 180 mutex_unlock(&buffer_mutex);
181 return retval; 181 return retval;
182} 182}
183 183
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 018023630599..92416276e577 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -11,7 +11,7 @@
11#define EVENT_BUFFER_H 11#define EVENT_BUFFER_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/semaphore.h> 14#include <asm/mutex.h>
15 15
16int alloc_event_buffer(void); 16int alloc_event_buffer(void);
17 17
@@ -46,6 +46,6 @@ extern struct file_operations event_buffer_fops;
46/* mutex between sync_cpu_buffers() and the 46/* mutex between sync_cpu_buffers() and the
47 * file reading code. 47 * file reading code.
48 */ 48 */
49extern struct semaphore buffer_sem; 49extern struct mutex buffer_mutex;
50 50
51#endif /* EVENT_BUFFER_H */ 51#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index b3f1cd6a24c1..e5162a64018b 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -12,7 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <asm/semaphore.h> 15#include <asm/mutex.h>
16 16
17#include "oprof.h" 17#include "oprof.h"
18#include "event_buffer.h" 18#include "event_buffer.h"
@@ -25,7 +25,7 @@ struct oprofile_operations oprofile_ops;
25unsigned long oprofile_started; 25unsigned long oprofile_started;
26unsigned long backtrace_depth; 26unsigned long backtrace_depth;
27static unsigned long is_setup; 27static unsigned long is_setup;
28static DECLARE_MUTEX(start_sem); 28static DEFINE_MUTEX(start_mutex);
29 29
30/* timer 30/* timer
31 0 - use performance monitoring hardware if available 31 0 - use performance monitoring hardware if available
@@ -37,7 +37,7 @@ int oprofile_setup(void)
37{ 37{
38 int err; 38 int err;
39 39
40 down(&start_sem); 40 mutex_lock(&start_mutex);
41 41
42 if ((err = alloc_cpu_buffers())) 42 if ((err = alloc_cpu_buffers()))
43 goto out; 43 goto out;
@@ -57,7 +57,7 @@ int oprofile_setup(void)
57 goto out3; 57 goto out3;
58 58
59 is_setup = 1; 59 is_setup = 1;
60 up(&start_sem); 60 mutex_unlock(&start_mutex);
61 return 0; 61 return 0;
62 62
63out3: 63out3:
@@ -68,7 +68,7 @@ out2:
68out1: 68out1:
69 free_cpu_buffers(); 69 free_cpu_buffers();
70out: 70out:
71 up(&start_sem); 71 mutex_unlock(&start_mutex);
72 return err; 72 return err;
73} 73}
74 74
@@ -78,7 +78,7 @@ int oprofile_start(void)
78{ 78{
79 int err = -EINVAL; 79 int err = -EINVAL;
80 80
81 down(&start_sem); 81 mutex_lock(&start_mutex);
82 82
83 if (!is_setup) 83 if (!is_setup)
84 goto out; 84 goto out;
@@ -95,7 +95,7 @@ int oprofile_start(void)
95 95
96 oprofile_started = 1; 96 oprofile_started = 1;
97out: 97out:
98 up(&start_sem); 98 mutex_unlock(&start_mutex);
99 return err; 99 return err;
100} 100}
101 101
@@ -103,7 +103,7 @@ out:
103/* echo 0>/dev/oprofile/enable */ 103/* echo 0>/dev/oprofile/enable */
104void oprofile_stop(void) 104void oprofile_stop(void)
105{ 105{
106 down(&start_sem); 106 mutex_lock(&start_mutex);
107 if (!oprofile_started) 107 if (!oprofile_started)
108 goto out; 108 goto out;
109 oprofile_ops.stop(); 109 oprofile_ops.stop();
@@ -111,20 +111,20 @@ void oprofile_stop(void)
111 /* wake up the daemon to read what remains */ 111 /* wake up the daemon to read what remains */
112 wake_up_buffer_waiter(); 112 wake_up_buffer_waiter();
113out: 113out:
114 up(&start_sem); 114 mutex_unlock(&start_mutex);
115} 115}
116 116
117 117
118void oprofile_shutdown(void) 118void oprofile_shutdown(void)
119{ 119{
120 down(&start_sem); 120 mutex_lock(&start_mutex);
121 sync_stop(); 121 sync_stop();
122 if (oprofile_ops.shutdown) 122 if (oprofile_ops.shutdown)
123 oprofile_ops.shutdown(); 123 oprofile_ops.shutdown();
124 is_setup = 0; 124 is_setup = 0;
125 free_event_buffer(); 125 free_event_buffer();
126 free_cpu_buffers(); 126 free_cpu_buffers();
127 up(&start_sem); 127 mutex_unlock(&start_mutex);
128} 128}
129 129
130 130
@@ -132,7 +132,7 @@ int oprofile_set_backtrace(unsigned long val)
132{ 132{
133 int err = 0; 133 int err = 0;
134 134
135 down(&start_sem); 135 mutex_lock(&start_mutex);
136 136
137 if (oprofile_started) { 137 if (oprofile_started) {
138 err = -EBUSY; 138 err = -EBUSY;
@@ -147,7 +147,7 @@ int oprofile_set_backtrace(unsigned long val)
147 backtrace_depth = val; 147 backtrace_depth = val;
148 148
149out: 149out:
150 up(&start_sem); 150 mutex_unlock(&start_mutex);
151 return err; 151 return err;
152} 152}
153 153
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 4d8dc27ea9d1..c7fa28a28b9f 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -136,6 +136,18 @@ config PARPORT_SUNBPP
136 found on many Sun machines. Note that many of the newer Ultras 136 found on many Sun machines. Note that many of the newer Ultras
137 actually have pc style hardware instead. 137 actually have pc style hardware instead.
138 138
139config PARPORT_AX88796
140 tristate "AX88796 Parallel Port"
141 depends on PARPORT
142 select PARPORT_NOT_PC
143 help
144 Say Y here if you need support for the parallel port hardware on
145 the AX88796 network controller chip. This code is also available
146 as a module (say M), called parport_ax88796.
147
148 The driver is not dependant on the AX88796 network driver, and
149 should not interfere with the networking functions of the chip.
150
139config PARPORT_1284 151config PARPORT_1284
140 bool "IEEE 1284 transfer modes" 152 bool "IEEE 1284 transfer modes"
141 depends on PARPORT 153 depends on PARPORT
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
index a19de35f8de2..696b8d4ca887 100644
--- a/drivers/parport/Makefile
+++ b/drivers/parport/Makefile
@@ -17,4 +17,5 @@ obj-$(CONFIG_PARPORT_MFC3) += parport_mfc3.o
17obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o 17obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o
18obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o 18obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o
19obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o 19obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o
20obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o 20obj-$(CONFIG_PARPORT_AX88796) += parport_ax88796.o
21obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o \ No newline at end of file
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 9ee67321b630..fd41e28101ea 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -283,7 +283,7 @@ void parport_close (struct pardevice *dev)
283 * 283 *
284 * This tries to locate a device on the given parallel port, 284 * This tries to locate a device on the given parallel port,
285 * multiplexor port and daisy chain address, and returns its 285 * multiplexor port and daisy chain address, and returns its
286 * device number or -NXIO if no device with those coordinates 286 * device number or %-ENXIO if no device with those coordinates
287 * exists. 287 * exists.
288 **/ 288 **/
289 289
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
new file mode 100644
index 000000000000..4baa719439a2
--- /dev/null
+++ b/drivers/parport/parport_ax88796.c
@@ -0,0 +1,443 @@
1/* linux/drivers/parport/parport_ax88796.c
2 *
3 * (c) 2005,2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10*/
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/parport.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/platform_device.h>
18
19#include <asm/io.h>
20#include <asm/irq.h>
21
22#define AX_SPR_BUSY (1<<7)
23#define AX_SPR_ACK (1<<6)
24#define AX_SPR_PE (1<<5)
25#define AX_SPR_SLCT (1<<4)
26#define AX_SPR_ERR (1<<3)
27
28#define AX_CPR_nDOE (1<<5)
29#define AX_CPR_SLCTIN (1<<3)
30#define AX_CPR_nINIT (1<<2)
31#define AX_CPR_ATFD (1<<1)
32#define AX_CPR_STRB (1<<0)
33
34struct ax_drvdata {
35 struct parport *parport;
36 struct parport_state suspend;
37
38 struct device *dev;
39 struct resource *io;
40
41 unsigned char irq_enabled;
42
43 void __iomem *base;
44 void __iomem *spp_data;
45 void __iomem *spp_spr;
46 void __iomem *spp_cpr;
47};
48
49static inline struct ax_drvdata *pp_to_drv(struct parport *p)
50{
51 return p->private_data;
52}
53
54static unsigned char
55parport_ax88796_read_data(struct parport *p)
56{
57 struct ax_drvdata *dd = pp_to_drv(p);
58
59 return readb(dd->spp_data);
60}
61
62static void
63parport_ax88796_write_data(struct parport *p, unsigned char data)
64{
65 struct ax_drvdata *dd = pp_to_drv(p);
66
67 writeb(data, dd->spp_data);
68}
69
70static unsigned char
71parport_ax88796_read_control(struct parport *p)
72{
73 struct ax_drvdata *dd = pp_to_drv(p);
74 unsigned int cpr = readb(dd->spp_cpr);
75 unsigned int ret = 0;
76
77 if (!(cpr & AX_CPR_STRB))
78 ret |= PARPORT_CONTROL_STROBE;
79
80 if (!(cpr & AX_CPR_ATFD))
81 ret |= PARPORT_CONTROL_AUTOFD;
82
83 if (cpr & AX_CPR_nINIT)
84 ret |= PARPORT_CONTROL_INIT;
85
86 if (!(cpr & AX_CPR_SLCTIN))
87 ret |= PARPORT_CONTROL_SELECT;
88
89 return ret;
90}
91
92static void
93parport_ax88796_write_control(struct parport *p, unsigned char control)
94{
95 struct ax_drvdata *dd = pp_to_drv(p);
96 unsigned int cpr = readb(dd->spp_cpr);
97
98 cpr &= AX_CPR_nDOE;
99
100 if (!(control & PARPORT_CONTROL_STROBE))
101 cpr |= AX_CPR_STRB;
102
103 if (!(control & PARPORT_CONTROL_AUTOFD))
104 cpr |= AX_CPR_ATFD;
105
106 if (control & PARPORT_CONTROL_INIT)
107 cpr |= AX_CPR_nINIT;
108
109 if (!(control & PARPORT_CONTROL_SELECT))
110 cpr |= AX_CPR_SLCTIN;
111
112 dev_dbg(dd->dev, "write_control: ctrl=%02x, cpr=%02x\n", control, cpr);
113 writeb(cpr, dd->spp_cpr);
114
115 if (parport_ax88796_read_control(p) != control) {
116 dev_err(dd->dev, "write_control: read != set (%02x, %02x)\n",
117 parport_ax88796_read_control(p), control);
118 }
119}
120
121static unsigned char
122parport_ax88796_read_status(struct parport *p)
123{
124 struct ax_drvdata *dd = pp_to_drv(p);
125 unsigned int status = readb(dd->spp_spr);
126 unsigned int ret = 0;
127
128 if (status & AX_SPR_BUSY)
129 ret |= PARPORT_STATUS_BUSY;
130
131 if (status & AX_SPR_ACK)
132 ret |= PARPORT_STATUS_ACK;
133
134 if (status & AX_SPR_ERR)
135 ret |= PARPORT_STATUS_ERROR;
136
137 if (status & AX_SPR_SLCT)
138 ret |= PARPORT_STATUS_SELECT;
139
140 if (status & AX_SPR_PE)
141 ret |= PARPORT_STATUS_PAPEROUT;
142
143 return ret;
144}
145
146static unsigned char
147parport_ax88796_frob_control(struct parport *p, unsigned char mask,
148 unsigned char val)
149{
150 struct ax_drvdata *dd = pp_to_drv(p);
151 unsigned char old = parport_ax88796_read_control(p);
152
153 dev_dbg(dd->dev, "frob: mask=%02x, val=%02x, old=%02x\n",
154 mask, val, old);
155
156 parport_ax88796_write_control(p, (old & ~mask) | val);
157 return old;
158}
159
160static void
161parport_ax88796_enable_irq(struct parport *p)
162{
163 struct ax_drvdata *dd = pp_to_drv(p);
164 unsigned long flags;
165
166 local_irq_save(flags);
167 if (!dd->irq_enabled) {
168 enable_irq(p->irq);
169 dd->irq_enabled = 1;
170 }
171 local_irq_restore(flags);
172}
173
174static void
175parport_ax88796_disable_irq(struct parport *p)
176{
177 struct ax_drvdata *dd = pp_to_drv(p);
178 unsigned long flags;
179
180 local_irq_save(flags);
181 if (dd->irq_enabled) {
182 disable_irq(p->irq);
183 dd->irq_enabled = 0;
184 }
185 local_irq_restore(flags);
186}
187
188static void
189parport_ax88796_data_forward(struct parport *p)
190{
191 struct ax_drvdata *dd = pp_to_drv(p);
192 void __iomem *cpr = dd->spp_cpr;
193
194 writeb((readb(cpr) & ~AX_CPR_nDOE), cpr);
195}
196
197static void
198parport_ax88796_data_reverse(struct parport *p)
199{
200 struct ax_drvdata *dd = pp_to_drv(p);
201 void __iomem *cpr = dd->spp_cpr;
202
203 writeb(readb(cpr) | AX_CPR_nDOE, cpr);
204}
205
206static void
207parport_ax88796_init_state(struct pardevice *d, struct parport_state *s)
208{
209 struct ax_drvdata *dd = pp_to_drv(d->port);
210
211 memset(s, 0, sizeof(struct parport_state));
212
213 dev_dbg(dd->dev, "init_state: %p: state=%p\n", d, s);
214 s->u.ax88796.cpr = readb(dd->spp_cpr);
215}
216
217static void
218parport_ax88796_save_state(struct parport *p, struct parport_state *s)
219{
220 struct ax_drvdata *dd = pp_to_drv(p);
221
222 dev_dbg(dd->dev, "save_state: %p: state=%p\n", p, s);
223 s->u.ax88796.cpr = readb(dd->spp_cpr);
224}
225
226static void
227parport_ax88796_restore_state(struct parport *p, struct parport_state *s)
228{
229 struct ax_drvdata *dd = pp_to_drv(p);
230
231 dev_dbg(dd->dev, "restore_state: %p: state=%p\n", p, s);
232 writeb(s->u.ax88796.cpr, dd->spp_cpr);
233}
234
235static irqreturn_t
236parport_ax88796_interrupt(int irq, void *dev_id, struct pt_regs *regs)
237{
238 parport_generic_irq(irq, dev_id, regs);
239 return IRQ_HANDLED;
240}
241
242
243static struct parport_operations parport_ax88796_ops = {
244 .write_data = parport_ax88796_write_data,
245 .read_data = parport_ax88796_read_data,
246
247 .write_control = parport_ax88796_write_control,
248 .read_control = parport_ax88796_read_control,
249 .frob_control = parport_ax88796_frob_control,
250
251 .read_status = parport_ax88796_read_status,
252
253 .enable_irq = parport_ax88796_enable_irq,
254 .disable_irq = parport_ax88796_disable_irq,
255
256 .data_forward = parport_ax88796_data_forward,
257 .data_reverse = parport_ax88796_data_reverse,
258
259 .init_state = parport_ax88796_init_state,
260 .save_state = parport_ax88796_save_state,
261 .restore_state = parport_ax88796_restore_state,
262
263 .epp_write_data = parport_ieee1284_epp_write_data,
264 .epp_read_data = parport_ieee1284_epp_read_data,
265 .epp_write_addr = parport_ieee1284_epp_write_addr,
266 .epp_read_addr = parport_ieee1284_epp_read_addr,
267
268 .ecp_write_data = parport_ieee1284_ecp_write_data,
269 .ecp_read_data = parport_ieee1284_ecp_read_data,
270 .ecp_write_addr = parport_ieee1284_ecp_write_addr,
271
272 .compat_write_data = parport_ieee1284_write_compat,
273 .nibble_read_data = parport_ieee1284_read_nibble,
274 .byte_read_data = parport_ieee1284_read_byte,
275
276 .owner = THIS_MODULE,
277};
278
279static int parport_ax88796_probe(struct platform_device *pdev)
280{
281 struct device *_dev = &pdev->dev;
282 struct ax_drvdata *dd;
283 struct parport *pp = NULL;
284 struct resource *res;
285 unsigned long size;
286 int spacing;
287 int irq;
288 int ret;
289
290 dd = kzalloc(sizeof(struct ax_drvdata), GFP_KERNEL);
291 if (dd == NULL) {
292 dev_err(_dev, "no memory for private data\n");
293 return -ENOMEM;
294 }
295
296 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
297 if (res == NULL) {
298 dev_err(_dev, "no MEM specified\n");
299 ret = -ENXIO;
300 goto exit_mem;
301 }
302
303 size = (res->end - res->start) + 1;
304 spacing = size / 3;
305
306 dd->io = request_mem_region(res->start, size, pdev->name);
307 if (dd->io == NULL) {
308 dev_err(_dev, "cannot reserve memory\n");
309 ret = -ENXIO;
310 goto exit_mem;
311 }
312
313 dd->base = ioremap(res->start, size);
314 if (dd->base == NULL) {
315 dev_err(_dev, "cannot ioremap region\n");
316 ret = -ENXIO;
317 goto exit_res;
318 }
319
320 irq = platform_get_irq(pdev, 0);
321 if (irq <= 0)
322 irq = PARPORT_IRQ_NONE;
323
324 pp = parport_register_port((unsigned long)dd->base, irq,
325 PARPORT_DMA_NONE,
326 &parport_ax88796_ops);
327
328 if (pp == NULL) {
329 dev_err(_dev, "failed to register parallel port\n");
330 ret = -ENOMEM;
331 goto exit_unmap;
332 }
333
334 pp->private_data = dd;
335 dd->parport = pp;
336 dd->dev = _dev;
337
338 dd->spp_data = dd->base;
339 dd->spp_spr = dd->base + (spacing * 1);
340 dd->spp_cpr = dd->base + (spacing * 2);
341
342 /* initialise the port controls */
343 writeb(AX_CPR_STRB, dd->spp_cpr);
344
345 if (irq >= 0) {
346 /* request irq */
347 ret = request_irq(irq, parport_ax88796_interrupt,
348 SA_TRIGGER_FALLING, pdev->name, pp);
349
350 if (ret < 0)
351 goto exit_port;
352
353 dd->irq_enabled = 1;
354 }
355
356 platform_set_drvdata(pdev, pp);
357
358 dev_info(_dev, "attached parallel port driver\n");
359 parport_announce_port(pp);
360
361 return 0;
362
363 exit_port:
364 parport_remove_port(pp);
365 exit_unmap:
366 iounmap(dd->base);
367 exit_res:
368 release_resource(dd->io);
369 kfree(dd->io);
370 exit_mem:
371 kfree(dd);
372 return ret;
373}
374
375static int parport_ax88796_remove(struct platform_device *pdev)
376{
377 struct parport *p = platform_get_drvdata(pdev);
378 struct ax_drvdata *dd = pp_to_drv(p);
379
380 free_irq(p->irq, p);
381 parport_remove_port(p);
382 iounmap(dd->base);
383 release_resource(dd->io);
384 kfree(dd->io);
385 kfree(dd);
386
387 return 0;
388}
389
390#ifdef CONFIG_PM
391
392static int parport_ax88796_suspend(struct platform_device *dev,
393 pm_message_t state)
394{
395 struct parport *p = platform_get_drvdata(dev);
396 struct ax_drvdata *dd = pp_to_drv(p);
397
398 parport_ax88796_save_state(p, &dd->suspend);
399 writeb(AX_CPR_nDOE | AX_CPR_STRB, dd->spp_cpr);
400 return 0;
401}
402
403static int parport_ax88796_resume(struct platform_device *dev)
404{
405 struct parport *p = platform_get_drvdata(dev);
406 struct ax_drvdata *dd = pp_to_drv(p);
407
408 parport_ax88796_restore_state(p, &dd->suspend);
409 return 0;
410}
411
412#else
413#define parport_ax88796_suspend NULL
414#define parport_ax88796_resume NULL
415#endif
416
417static struct platform_driver axdrv = {
418 .driver = {
419 .name = "ax88796-pp",
420 .owner = THIS_MODULE,
421 },
422 .probe = parport_ax88796_probe,
423 .remove = parport_ax88796_remove,
424 .suspend = parport_ax88796_suspend,
425 .resume = parport_ax88796_resume,
426};
427
428static int __init parport_ax88796_init(void)
429{
430 return platform_driver_register(&axdrv);
431}
432
433static void __exit parport_ax88796_exit(void)
434{
435 platform_driver_unregister(&axdrv);
436}
437
438module_init(parport_ax88796_init)
439module_exit(parport_ax88796_exit)
440
441MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
442MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
443MODULE_LICENSE("GPL");
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index bbbfd79adbaf..2cb22c8d3357 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -218,7 +218,7 @@ static void free_port (struct parport *port)
218 * parport_get_port - increment a port's reference count 218 * parport_get_port - increment a port's reference count
219 * @port: the port 219 * @port: the port
220 * 220 *
221 * This ensure's that a struct parport pointer remains valid 221 * This ensures that a struct parport pointer remains valid
222 * until the matching parport_put_port() call. 222 * until the matching parport_put_port() call.
223 **/ 223 **/
224 224
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index bb19c64073c6..0b4adcb60df4 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -60,30 +60,34 @@ static void card_remove_first(struct pnp_dev * dev)
60 card_remove(dev); 60 card_remove(dev);
61} 61}
62 62
63static int card_probe(struct pnp_card * card, struct pnp_card_driver * drv) 63static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
64{ 64{
65 const struct pnp_card_device_id *id = match_card(drv,card); 65 const struct pnp_card_device_id *id;
66 if (id) { 66 struct pnp_card_link *clink;
67 struct pnp_card_link * clink = pnp_alloc(sizeof(struct pnp_card_link)); 67 struct pnp_dev *dev;
68 if (!clink) 68
69 return 0; 69 if (!drv->probe)
70 clink->card = card; 70 return 0;
71 clink->driver = drv; 71 id = match_card(drv,card);
72 clink->pm_state = PMSG_ON; 72 if (!id)
73 if (drv->probe) { 73 return 0;
74 if (drv->probe(clink, id)>=0) 74
75 return 1; 75 clink = pnp_alloc(sizeof(*clink));
76 else { 76 if (!clink)
77 struct pnp_dev * dev; 77 return 0;
78 card_for_each_dev(card, dev) { 78 clink->card = card;
79 if (dev->card_link == clink) 79 clink->driver = drv;
80 pnp_release_card_device(dev); 80 clink->pm_state = PMSG_ON;
81 } 81
82 kfree(clink); 82 if (drv->probe(clink, id) >= 0)
83 } 83 return 1;
84 } else 84
85 return 1; 85 /* Recovery */
86 card_for_each_dev(card, dev) {
87 if (dev->card_link == clink)
88 pnp_release_card_device(dev);
86 } 89 }
90 kfree(clink);
87 return 0; 91 return 0;
88} 92}
89 93
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 65d090dbef46..bccff400b198 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -73,6 +73,13 @@ config RTC_INTF_DEV
73 This driver can also be built as a module. If so, the module 73 This driver can also be built as a module. If so, the module
74 will be called rtc-dev. 74 will be called rtc-dev.
75 75
76config RTC_INTF_DEV_UIE_EMUL
77 bool "RTC UIE emulation on dev interface"
78 depends on RTC_INTF_DEV
79 help
80 Provides an emulation for RTC_UIE if the underlaying rtc chip
81 driver did not provide RTC_UIE ioctls.
82
76comment "RTC drivers" 83comment "RTC drivers"
77 depends on RTC_CLASS 84 depends on RTC_CLASS
78 85
@@ -86,6 +93,34 @@ config RTC_DRV_X1205
86 This driver can also be built as a module. If so, the module 93 This driver can also be built as a module. If so, the module
87 will be called rtc-x1205. 94 will be called rtc-x1205.
88 95
96config RTC_DRV_DS1307
97 tristate "Dallas/Maxim DS1307 and similar I2C RTC chips"
98 depends on RTC_CLASS && I2C
99 help
100 If you say yes here you get support for various compatible RTC
101 chips (often with battery backup) connected with I2C. This driver
102 should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00,
103 and probably other chips. In some cases the RTC must already
104 have been initialized (by manufacturing or a bootloader).
105
106 The first seven registers on these chips hold an RTC, and other
107 registers may add features such as NVRAM, a trickle charger for
108 the RTC/NVRAM backup power, and alarms. This driver may not
109 expose all those available chip features.
110
111 This driver can also be built as a module. If so, the module
112 will be called rtc-ds1307.
113
114config RTC_DRV_DS1553
115 tristate "Dallas DS1553"
116 depends on RTC_CLASS
117 help
118 If you say yes here you get support for the
119 Dallas DS1553 timekeeping chip.
120
121 This driver can also be built as a module. If so, the module
122 will be called rtc-ds1553.
123
89config RTC_DRV_DS1672 124config RTC_DRV_DS1672
90 tristate "Dallas/Maxim DS1672" 125 tristate "Dallas/Maxim DS1672"
91 depends on RTC_CLASS && I2C 126 depends on RTC_CLASS && I2C
@@ -96,6 +131,16 @@ config RTC_DRV_DS1672
96 This driver can also be built as a module. If so, the module 131 This driver can also be built as a module. If so, the module
97 will be called rtc-ds1672. 132 will be called rtc-ds1672.
98 133
134config RTC_DRV_DS1742
135 tristate "Dallas DS1742"
136 depends on RTC_CLASS
137 help
138 If you say yes here you get support for the
139 Dallas DS1742 timekeeping chip.
140
141 This driver can also be built as a module. If so, the module
142 will be called rtc-ds1742.
143
99config RTC_DRV_PCF8563 144config RTC_DRV_PCF8563
100 tristate "Philips PCF8563/Epson RTC8564" 145 tristate "Philips PCF8563/Epson RTC8564"
101 depends on RTC_CLASS && I2C 146 depends on RTC_CLASS && I2C
@@ -107,6 +152,16 @@ config RTC_DRV_PCF8563
107 This driver can also be built as a module. If so, the module 152 This driver can also be built as a module. If so, the module
108 will be called rtc-pcf8563. 153 will be called rtc-pcf8563.
109 154
155config RTC_DRV_PCF8583
156 tristate "Philips PCF8583"
157 depends on RTC_CLASS && I2C
158 help
159 If you say yes here you get support for the
160 Philips PCF8583 RTC chip.
161
162 This driver can also be built as a module. If so, the module
163 will be called rtc-pcf8583.
164
110config RTC_DRV_RS5C372 165config RTC_DRV_RS5C372
111 tristate "Ricoh RS5C372A/B" 166 tristate "Ricoh RS5C372A/B"
112 depends on RTC_CLASS && I2C 167 depends on RTC_CLASS && I2C
@@ -157,6 +212,22 @@ config RTC_DRV_VR41XX
157 To compile this driver as a module, choose M here: the 212 To compile this driver as a module, choose M here: the
158 module will be called rtc-vr41xx. 213 module will be called rtc-vr41xx.
159 214
215config RTC_DRV_PL031
216 tristate "ARM AMBA PL031 RTC"
217 depends on RTC_CLASS && ARM_AMBA
218 help
219 If you say Y here you will get access to ARM AMBA
220 PrimeCell PL031 UART found on certain ARM SOCs.
221
222 To compile this driver as a module, choose M here: the
223 module will be called rtc-pl031.
224
225config RTC_DRV_AT91
226 tristate "AT91RM9200"
227 depends on RTC_CLASS && ARCH_AT91RM9200
228 help
229 Driver for the Atmel AT91RM9200's internal RTC (Realtime Clock).
230
160config RTC_DRV_TEST 231config RTC_DRV_TEST
161 tristate "Test driver/device" 232 tristate "Test driver/device"
162 depends on RTC_CLASS 233 depends on RTC_CLASS
@@ -172,4 +243,24 @@ config RTC_DRV_TEST
172 This driver can also be built as a module. If so, the module 243 This driver can also be built as a module. If so, the module
173 will be called rtc-test. 244 will be called rtc-test.
174 245
246config RTC_DRV_MAX6902
247 tristate "Maxim 6902"
248 depends on RTC_CLASS && SPI
249 help
250 If you say yes here you will get support for the
251 Maxim MAX6902 spi RTC chip.
252
253 This driver can also be built as a module. If so, the module
254 will be called rtc-max6902.
255
256config RTC_DRV_V3020
257 tristate "EM Microelectronic V3020"
258 depends on RTC_CLASS
259 help
260 If you say yes here you will get support for the
261 EM Microelectronic v3020 RTC chip.
262
263 This driver can also be built as a module. If so, the module
264 will be called rtc-v3020.
265
175endmenu 266endmenu
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index a9ca0f171686..900d210dd1a2 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -13,10 +13,18 @@ obj-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
13 13
14obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 14obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
15obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 15obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
16obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
16obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o 17obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
18obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
17obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o 19obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
20obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
18obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 21obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
19obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o 22obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
23obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
20obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o 24obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
21obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o 25obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
22obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o 26obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
27obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
28obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
29obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
30obj-$(CONFIG_RTC_DRV_AT91) += rtc-at91.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 413c7d54ea10..5396beec30d0 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -69,6 +69,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
69 rtc->id = id; 69 rtc->id = id;
70 rtc->ops = ops; 70 rtc->ops = ops;
71 rtc->owner = owner; 71 rtc->owner = owner;
72 rtc->max_user_freq = 64;
72 rtc->class_dev.dev = dev; 73 rtc->class_dev.dev = dev;
73 rtc->class_dev.class = rtc_class; 74 rtc->class_dev.class = rtc_class;
74 rtc->class_dev.release = rtc_device_release; 75 rtc->class_dev.release = rtc_device_release;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 56e490709b87..579cd667b16f 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -229,6 +229,9 @@ int rtc_irq_set_state(struct class_device *class_dev, struct rtc_task *task, int
229 unsigned long flags; 229 unsigned long flags;
230 struct rtc_device *rtc = to_rtc_device(class_dev); 230 struct rtc_device *rtc = to_rtc_device(class_dev);
231 231
232 if (rtc->ops->irq_set_state == NULL)
233 return -ENXIO;
234
232 spin_lock_irqsave(&rtc->irq_task_lock, flags); 235 spin_lock_irqsave(&rtc->irq_task_lock, flags);
233 if (rtc->irq_task != task) 236 if (rtc->irq_task != task)
234 err = -ENXIO; 237 err = -ENXIO;
@@ -243,25 +246,12 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_state);
243 246
244int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int freq) 247int rtc_irq_set_freq(struct class_device *class_dev, struct rtc_task *task, int freq)
245{ 248{
246 int err = 0, tmp = 0; 249 int err = 0;
247 unsigned long flags; 250 unsigned long flags;
248 struct rtc_device *rtc = to_rtc_device(class_dev); 251 struct rtc_device *rtc = to_rtc_device(class_dev);
249 252
250 /* allowed range is 2-8192 */ 253 if (rtc->ops->irq_set_freq == NULL)
251 if (freq < 2 || freq > 8192) 254 return -ENXIO;
252 return -EINVAL;
253/*
254 FIXME: this does not belong here, will move where appropriate
255 at a later stage. It cannot hurt right now, trust me :)
256 if ((freq > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE)))
257 return -EACCES;
258*/
259 /* check if freq is a power of 2 */
260 while (freq > (1 << tmp))
261 tmp++;
262
263 if (freq != (1 << tmp))
264 return -EINVAL;
265 255
266 spin_lock_irqsave(&rtc->irq_task_lock, flags); 256 spin_lock_irqsave(&rtc->irq_task_lock, flags);
267 if (rtc->irq_task != task) 257 if (rtc->irq_task != task)
diff --git a/drivers/rtc/rtc-at91.c b/drivers/rtc/rtc-at91.c
new file mode 100644
index 000000000000..b676f443c17e
--- /dev/null
+++ b/drivers/rtc/rtc-at91.c
@@ -0,0 +1,407 @@
1/*
2 * Real Time Clock interface for Linux on Atmel AT91RM9200
3 *
4 * Copyright (C) 2002 Rick Bronson
5 *
6 * Converted to RTC class model by Andrew Victor
7 *
8 * Ported to Linux 2.6 by Steven Scholz
9 * Based on s3c2410-rtc.c Simtec Electronics
10 *
11 * Based on sa1100-rtc.c by Nils Faerber
12 * Based on rtc.c by Paul Gortmaker
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/platform_device.h>
24#include <linux/time.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/interrupt.h>
28#include <linux/ioctl.h>
29#include <linux/completion.h>
30
31#include <asm/uaccess.h>
32#include <asm/rtc.h>
33
34#include <asm/mach/time.h>
35
36
37#define AT91_RTC_FREQ 1
38#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
39
40static DECLARE_COMPLETION(at91_rtc_updated);
41static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
42
43/*
44 * Decode time/date into rtc_time structure
45 */
46static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
47 struct rtc_time *tm)
48{
49 unsigned int time, date;
50
51 /* must read twice in case it changes */
52 do {
53 time = at91_sys_read(timereg);
54 date = at91_sys_read(calreg);
55 } while ((time != at91_sys_read(timereg)) ||
56 (date != at91_sys_read(calreg)));
57
58 tm->tm_sec = BCD2BIN((time & AT91_RTC_SEC) >> 0);
59 tm->tm_min = BCD2BIN((time & AT91_RTC_MIN) >> 8);
60 tm->tm_hour = BCD2BIN((time & AT91_RTC_HOUR) >> 16);
61
62 /*
63 * The Calendar Alarm register does not have a field for
64 * the year - so these will return an invalid value. When an
65 * alarm is set, at91_alarm_year wille store the current year.
66 */
67 tm->tm_year = BCD2BIN(date & AT91_RTC_CENT) * 100; /* century */
68 tm->tm_year += BCD2BIN((date & AT91_RTC_YEAR) >> 8); /* year */
69
70 tm->tm_wday = BCD2BIN((date & AT91_RTC_DAY) >> 21) - 1; /* day of the week [0-6], Sunday=0 */
71 tm->tm_mon = BCD2BIN((date & AT91_RTC_MONTH) >> 16) - 1;
72 tm->tm_mday = BCD2BIN((date & AT91_RTC_DATE) >> 24);
73}
74
75/*
76 * Read current time and date in RTC
77 */
78static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
79{
80 at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, tm);
81 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
82 tm->tm_year = tm->tm_year - 1900;
83
84 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
85 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
86 tm->tm_hour, tm->tm_min, tm->tm_sec);
87
88 return 0;
89}
90
91/*
92 * Set current time and date in RTC
93 */
94static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
95{
96 unsigned long cr;
97
98 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
99 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
100 tm->tm_hour, tm->tm_min, tm->tm_sec);
101
102 /* Stop Time/Calendar from counting */
103 cr = at91_sys_read(AT91_RTC_CR);
104 at91_sys_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
105
106 at91_sys_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
107 wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
108 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
109
110 at91_sys_write(AT91_RTC_TIMR,
111 BIN2BCD(tm->tm_sec) << 0
112 | BIN2BCD(tm->tm_min) << 8
113 | BIN2BCD(tm->tm_hour) << 16);
114
115 at91_sys_write(AT91_RTC_CALR,
116 BIN2BCD((tm->tm_year + 1900) / 100) /* century */
117 | BIN2BCD(tm->tm_year % 100) << 8 /* year */
118 | BIN2BCD(tm->tm_mon + 1) << 16 /* tm_mon starts at zero */
119 | BIN2BCD(tm->tm_wday + 1) << 21 /* day of the week [0-6], Sunday=0 */
120 | BIN2BCD(tm->tm_mday) << 24);
121
122 /* Restart Time/Calendar */
123 cr = at91_sys_read(AT91_RTC_CR);
124 at91_sys_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
125
126 return 0;
127}
128
129/*
130 * Read alarm time and date in RTC
131 */
132static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
133{
134 struct rtc_time *tm = &alrm->time;
135
136 at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm);
137 tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
138 tm->tm_year = at91_alarm_year - 1900;
139
140 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
141 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
142 tm->tm_hour, tm->tm_min, tm->tm_sec);
143
144 return 0;
145}
146
147/*
148 * Set alarm time and date in RTC
149 */
150static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
151{
152 struct rtc_time tm;
153
154 at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
155
156 at91_alarm_year = tm.tm_year;
157
158 tm.tm_hour = alrm->time.tm_hour;
159 tm.tm_min = alrm->time.tm_min;
160 tm.tm_sec = alrm->time.tm_sec;
161
162 at91_sys_write(AT91_RTC_TIMALR,
163 BIN2BCD(tm.tm_sec) << 0
164 | BIN2BCD(tm.tm_min) << 8
165 | BIN2BCD(tm.tm_hour) << 16
166 | AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
167 at91_sys_write(AT91_RTC_CALALR,
168 BIN2BCD(tm.tm_mon + 1) << 16 /* tm_mon starts at zero */
169 | BIN2BCD(tm.tm_mday) << 24
170 | AT91_RTC_DATEEN | AT91_RTC_MTHEN);
171
172 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
173 at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
174 tm.tm_min, tm.tm_sec);
175
176 return 0;
177}
178
179/*
180 * Handle commands from user-space
181 */
182static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
183 unsigned long arg)
184{
185 int ret = 0;
186
187 pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __FUNCTION__, cmd, arg);
188
189 switch (cmd) {
190 case RTC_AIE_OFF: /* alarm off */
191 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
192 break;
193 case RTC_AIE_ON: /* alarm on */
194 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
195 break;
196 case RTC_UIE_OFF: /* update off */
197 case RTC_PIE_OFF: /* periodic off */
198 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
199 break;
200 case RTC_UIE_ON: /* update on */
201 case RTC_PIE_ON: /* periodic on */
202 at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
203 break;
204 case RTC_IRQP_READ: /* read periodic alarm frequency */
205 ret = put_user(AT91_RTC_FREQ, (unsigned long *) arg);
206 break;
207 case RTC_IRQP_SET: /* set periodic alarm frequency */
208 if (arg != AT91_RTC_FREQ)
209 ret = -EINVAL;
210 break;
211 default:
212 ret = -ENOIOCTLCMD;
213 break;
214 }
215
216 return ret;
217}
218
219/*
220 * Provide additional RTC information in /proc/driver/rtc
221 */
222static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
223{
224 unsigned long imr = at91_sys_read(AT91_RTC_IMR);
225
226 seq_printf(seq, "alarm_IRQ\t: %s\n",
227 (imr & AT91_RTC_ALARM) ? "yes" : "no");
228 seq_printf(seq, "update_IRQ\t: %s\n",
229 (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
230 seq_printf(seq, "periodic_IRQ\t: %s\n",
231 (imr & AT91_RTC_SECEV) ? "yes" : "no");
232 seq_printf(seq, "periodic_freq\t: %ld\n",
233 (unsigned long) AT91_RTC_FREQ);
234
235 return 0;
236}
237
238/*
239 * IRQ handler for the RTC
240 */
241static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id,
242 struct pt_regs *regs)
243{
244 struct platform_device *pdev = dev_id;
245 struct rtc_device *rtc = platform_get_drvdata(pdev);
246 unsigned int rtsr;
247 unsigned long events = 0;
248
249 rtsr = at91_sys_read(AT91_RTC_SR) & at91_sys_read(AT91_RTC_IMR);
250 if (rtsr) { /* this interrupt is shared! Is it ours? */
251 if (rtsr & AT91_RTC_ALARM)
252 events |= (RTC_AF | RTC_IRQF);
253 if (rtsr & AT91_RTC_SECEV)
254 events |= (RTC_UF | RTC_IRQF);
255 if (rtsr & AT91_RTC_ACKUPD)
256 complete(&at91_rtc_updated);
257
258 at91_sys_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
259
260 rtc_update_irq(&rtc->class_dev, 1, events);
261
262 pr_debug("%s(): num=%ld, events=0x%02lx\n", __FUNCTION__,
263 events >> 8, events & 0x000000FF);
264
265 return IRQ_HANDLED;
266 }
267 return IRQ_NONE; /* not handled */
268}
269
270static struct rtc_class_ops at91_rtc_ops = {
271 .ioctl = at91_rtc_ioctl,
272 .read_time = at91_rtc_readtime,
273 .set_time = at91_rtc_settime,
274 .read_alarm = at91_rtc_readalarm,
275 .set_alarm = at91_rtc_setalarm,
276 .proc = at91_rtc_proc,
277};
278
279/*
280 * Initialize and install RTC driver
281 */
282static int __init at91_rtc_probe(struct platform_device *pdev)
283{
284 struct rtc_device *rtc;
285 int ret;
286
287 at91_sys_write(AT91_RTC_CR, 0);
288 at91_sys_write(AT91_RTC_MR, 0); /* 24 hour mode */
289
290 /* Disable all interrupts */
291 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
292 AT91_RTC_SECEV | AT91_RTC_TIMEV |
293 AT91_RTC_CALEV);
294
295 ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
296 SA_SHIRQ, "at91_rtc", pdev);
297 if (ret) {
298 printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n",
299 AT91_ID_SYS);
300 return ret;
301 }
302
303 rtc = rtc_device_register(pdev->name, &pdev->dev,
304 &at91_rtc_ops, THIS_MODULE);
305 if (IS_ERR(rtc)) {
306 free_irq(AT91_ID_SYS, pdev);
307 return PTR_ERR(rtc);
308 }
309 platform_set_drvdata(pdev, rtc);
310
311 printk(KERN_INFO "AT91 Real Time Clock driver.\n");
312 return 0;
313}
314
315/*
316 * Disable and remove the RTC driver
317 */
318static int __devexit at91_rtc_remove(struct platform_device *pdev)
319{
320 struct rtc_device *rtc = platform_get_drvdata(pdev);
321
322 /* Disable all interrupts */
323 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
324 AT91_RTC_SECEV | AT91_RTC_TIMEV |
325 AT91_RTC_CALEV);
326 free_irq(AT91_ID_SYS, pdev);
327
328 rtc_device_unregister(rtc);
329 platform_set_drvdata(pdev, NULL);
330
331 return 0;
332}
333
334#ifdef CONFIG_PM
335
336/* AT91RM9200 RTC Power management control */
337
338static struct timespec at91_rtc_delta;
339
340static int at91_rtc_suspend(struct platform_device *pdev, pm_message_t state)
341{
342 struct rtc_time tm;
343 struct timespec time;
344
345 time.tv_nsec = 0;
346
347 /* calculate time delta for suspend */
348 at91_rtc_readtime(&pdev->dev, &tm);
349 rtc_tm_to_time(&tm, &time.tv_sec);
350 save_time_delta(&at91_rtc_delta, &time);
351
352 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
353 1900 + tm.tm_year, tm.tm_mon, tm.tm_mday,
354 tm.tm_hour, tm.tm_min, tm.tm_sec);
355
356 return 0;
357}
358
359static int at91_rtc_resume(struct platform_device *pdev)
360{
361 struct rtc_time tm;
362 struct timespec time;
363
364 time.tv_nsec = 0;
365
366 at91_rtc_readtime(&pdev->dev, &tm);
367 rtc_tm_to_time(&tm, &time.tv_sec);
368 restore_time_delta(&at91_rtc_delta, &time);
369
370 pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
371 1900 + tm.tm_year, tm.tm_mon, tm.tm_mday,
372 tm.tm_hour, tm.tm_min, tm.tm_sec);
373
374 return 0;
375}
376#else
377#define at91_rtc_suspend NULL
378#define at91_rtc_resume NULL
379#endif
380
381static struct platform_driver at91_rtc_driver = {
382 .probe = at91_rtc_probe,
383 .remove = at91_rtc_remove,
384 .suspend = at91_rtc_suspend,
385 .resume = at91_rtc_resume,
386 .driver = {
387 .name = "at91_rtc",
388 .owner = THIS_MODULE,
389 },
390};
391
392static int __init at91_rtc_init(void)
393{
394 return platform_driver_register(&at91_rtc_driver);
395}
396
397static void __exit at91_rtc_exit(void)
398{
399 platform_driver_unregister(&at91_rtc_driver);
400}
401
402module_init(at91_rtc_init);
403module_exit(at91_rtc_exit);
404
405MODULE_AUTHOR("Rick Bronson");
406MODULE_DESCRIPTION("RTC driver for Atmel AT91RM9200");
407MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 2011567005f9..61a58259c93f 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -48,6 +48,93 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
48 return err; 48 return err;
49} 49}
50 50
51#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
52/*
53 * Routine to poll RTC seconds field for change as often as possible,
54 * after first RTC_UIE use timer to reduce polling
55 */
56static void rtc_uie_task(void *data)
57{
58 struct rtc_device *rtc = data;
59 struct rtc_time tm;
60 int num = 0;
61 int err;
62
63 err = rtc_read_time(&rtc->class_dev, &tm);
64 spin_lock_irq(&rtc->irq_lock);
65 if (rtc->stop_uie_polling || err) {
66 rtc->uie_task_active = 0;
67 } else if (rtc->oldsecs != tm.tm_sec) {
68 num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
69 rtc->oldsecs = tm.tm_sec;
70 rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
71 rtc->uie_timer_active = 1;
72 rtc->uie_task_active = 0;
73 add_timer(&rtc->uie_timer);
74 } else if (schedule_work(&rtc->uie_task) == 0) {
75 rtc->uie_task_active = 0;
76 }
77 spin_unlock_irq(&rtc->irq_lock);
78 if (num)
79 rtc_update_irq(&rtc->class_dev, num, RTC_UF | RTC_IRQF);
80}
81
82static void rtc_uie_timer(unsigned long data)
83{
84 struct rtc_device *rtc = (struct rtc_device *)data;
85 unsigned long flags;
86
87 spin_lock_irqsave(&rtc->irq_lock, flags);
88 rtc->uie_timer_active = 0;
89 rtc->uie_task_active = 1;
90 if ((schedule_work(&rtc->uie_task) == 0))
91 rtc->uie_task_active = 0;
92 spin_unlock_irqrestore(&rtc->irq_lock, flags);
93}
94
95static void clear_uie(struct rtc_device *rtc)
96{
97 spin_lock_irq(&rtc->irq_lock);
98 if (rtc->irq_active) {
99 rtc->stop_uie_polling = 1;
100 if (rtc->uie_timer_active) {
101 spin_unlock_irq(&rtc->irq_lock);
102 del_timer_sync(&rtc->uie_timer);
103 spin_lock_irq(&rtc->irq_lock);
104 rtc->uie_timer_active = 0;
105 }
106 if (rtc->uie_task_active) {
107 spin_unlock_irq(&rtc->irq_lock);
108 flush_scheduled_work();
109 spin_lock_irq(&rtc->irq_lock);
110 }
111 rtc->irq_active = 0;
112 }
113 spin_unlock_irq(&rtc->irq_lock);
114}
115
116static int set_uie(struct rtc_device *rtc)
117{
118 struct rtc_time tm;
119 int err;
120
121 err = rtc_read_time(&rtc->class_dev, &tm);
122 if (err)
123 return err;
124 spin_lock_irq(&rtc->irq_lock);
125 if (!rtc->irq_active) {
126 rtc->irq_active = 1;
127 rtc->stop_uie_polling = 0;
128 rtc->oldsecs = tm.tm_sec;
129 rtc->uie_task_active = 1;
130 if (schedule_work(&rtc->uie_task) == 0)
131 rtc->uie_task_active = 0;
132 }
133 rtc->irq_data = 0;
134 spin_unlock_irq(&rtc->irq_lock);
135 return 0;
136}
137#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
51 138
52static ssize_t 139static ssize_t
53rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 140rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -127,6 +214,28 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
127 struct rtc_wkalrm alarm; 214 struct rtc_wkalrm alarm;
128 void __user *uarg = (void __user *) arg; 215 void __user *uarg = (void __user *) arg;
129 216
217 /* check that the calles has appropriate permissions
218 * for certain ioctls. doing this check here is useful
219 * to avoid duplicate code in each driver.
220 */
221 switch (cmd) {
222 case RTC_EPOCH_SET:
223 case RTC_SET_TIME:
224 if (!capable(CAP_SYS_TIME))
225 return -EACCES;
226 break;
227
228 case RTC_IRQP_SET:
229 if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
230 return -EACCES;
231 break;
232
233 case RTC_PIE_ON:
234 if (!capable(CAP_SYS_RESOURCE))
235 return -EACCES;
236 break;
237 }
238
130 /* avoid conflicting IRQ users */ 239 /* avoid conflicting IRQ users */
131 if (cmd == RTC_PIE_ON || cmd == RTC_PIE_OFF || cmd == RTC_IRQP_SET) { 240 if (cmd == RTC_PIE_ON || cmd == RTC_PIE_OFF || cmd == RTC_IRQP_SET) {
132 spin_lock(&rtc->irq_task_lock); 241 spin_lock(&rtc->irq_task_lock);
@@ -185,9 +294,6 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
185 break; 294 break;
186 295
187 case RTC_SET_TIME: 296 case RTC_SET_TIME:
188 if (!capable(CAP_SYS_TIME))
189 return -EACCES;
190
191 if (copy_from_user(&tm, uarg, sizeof(tm))) 297 if (copy_from_user(&tm, uarg, sizeof(tm)))
192 return -EFAULT; 298 return -EFAULT;
193 299
@@ -203,10 +309,6 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
203 err = -EINVAL; 309 err = -EINVAL;
204 break; 310 break;
205 } 311 }
206 if (!capable(CAP_SYS_TIME)) {
207 err = -EACCES;
208 break;
209 }
210 rtc_epoch = arg; 312 rtc_epoch = arg;
211 err = 0; 313 err = 0;
212#endif 314#endif
@@ -232,6 +334,14 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
232 return -EFAULT; 334 return -EFAULT;
233 break; 335 break;
234 336
337#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
338 case RTC_UIE_OFF:
339 clear_uie(rtc);
340 return 0;
341
342 case RTC_UIE_ON:
343 return set_uie(rtc);
344#endif
235 default: 345 default:
236 err = -ENOTTY; 346 err = -ENOTTY;
237 break; 347 break;
@@ -244,6 +354,9 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
244{ 354{
245 struct rtc_device *rtc = to_rtc_device(file->private_data); 355 struct rtc_device *rtc = to_rtc_device(file->private_data);
246 356
357#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
358 clear_uie(rtc);
359#endif
247 if (rtc->ops->release) 360 if (rtc->ops->release)
248 rtc->ops->release(rtc->class_dev.dev); 361 rtc->ops->release(rtc->class_dev.dev);
249 362
@@ -284,6 +397,10 @@ static int rtc_dev_add_device(struct class_device *class_dev,
284 mutex_init(&rtc->char_lock); 397 mutex_init(&rtc->char_lock);
285 spin_lock_init(&rtc->irq_lock); 398 spin_lock_init(&rtc->irq_lock);
286 init_waitqueue_head(&rtc->irq_queue); 399 init_waitqueue_head(&rtc->irq_queue);
400#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
401 INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
402 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
403#endif
287 404
288 cdev_init(&rtc->char_dev, &rtc_dev_fops); 405 cdev_init(&rtc->char_dev, &rtc_dev_fops);
289 rtc->char_dev.owner = rtc->owner; 406 rtc->char_dev.owner = rtc->owner;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
new file mode 100644
index 000000000000..e8afb9384786
--- /dev/null
+++ b/drivers/rtc/rtc-ds1307.c
@@ -0,0 +1,388 @@
1/*
2 * rtc-ds1307.c - RTC driver for some mostly-compatible I2C chips.
3 *
4 * Copyright (C) 2005 James Chapman (ds1337 core)
5 * Copyright (C) 2006 David Brownell
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/i2c.h>
16#include <linux/string.h>
17#include <linux/rtc.h>
18#include <linux/bcd.h>
19
20
21
22/* We can't determine type by probing, but if we expect pre-Linux code
23 * to have set the chip up as a clock (turning on the oscillator and
24 * setting the date and time), Linux can ignore the non-clock features.
25 * That's a natural job for a factory or repair bench.
26 *
27 * If the I2C "force" mechanism is used, we assume the chip is a ds1337.
28 * (Much better would be board-specific tables of I2C devices, along with
29 * the platform_data drivers would use to sort such issues out.)
30 */
31enum ds_type {
32 unknown = 0,
33 ds_1307, /* or ds1338, ... */
34 ds_1337, /* or ds1339, ... */
35 ds_1340, /* or st m41t00, ... */
36 // rs5c372 too? different address...
37};
38
39static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
40
41I2C_CLIENT_INSMOD;
42
43
44
45/* RTC registers don't differ much, except for the century flag */
46#define DS1307_REG_SECS 0x00 /* 00-59 */
47# define DS1307_BIT_CH 0x80
48#define DS1307_REG_MIN 0x01 /* 00-59 */
49#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
50# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
51# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
52#define DS1307_REG_WDAY 0x03 /* 01-07 */
53#define DS1307_REG_MDAY 0x04 /* 01-31 */
54#define DS1307_REG_MONTH 0x05 /* 01-12 */
55# define DS1337_BIT_CENTURY 0x80 /* in REG_MONTH */
56#define DS1307_REG_YEAR 0x06 /* 00-99 */
57
58/* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
59 * start at 7, and they differ a lot. Only control and status matter for RTC;
60 * be careful using them.
61 */
62#define DS1307_REG_CONTROL 0x07
63# define DS1307_BIT_OUT 0x80
64# define DS1307_BIT_SQWE 0x10
65# define DS1307_BIT_RS1 0x02
66# define DS1307_BIT_RS0 0x01
67#define DS1337_REG_CONTROL 0x0e
68# define DS1337_BIT_nEOSC 0x80
69# define DS1337_BIT_RS2 0x10
70# define DS1337_BIT_RS1 0x08
71# define DS1337_BIT_INTCN 0x04
72# define DS1337_BIT_A2IE 0x02
73# define DS1337_BIT_A1IE 0x01
74#define DS1337_REG_STATUS 0x0f
75# define DS1337_BIT_OSF 0x80
76# define DS1337_BIT_A2I 0x02
77# define DS1337_BIT_A1I 0x01
78#define DS1339_REG_TRICKLE 0x10
79
80
81
82struct ds1307 {
83 u8 reg_addr;
84 u8 regs[8];
85 enum ds_type type;
86 struct i2c_msg msg[2];
87 struct i2c_client client;
88 struct rtc_device *rtc;
89};
90
91
92static int ds1307_get_time(struct device *dev, struct rtc_time *t)
93{
94 struct ds1307 *ds1307 = dev_get_drvdata(dev);
95 int tmp;
96
97 /* read the RTC registers all at once */
98 ds1307->msg[1].flags = I2C_M_RD;
99 ds1307->msg[1].len = 7;
100
101 tmp = i2c_transfer(ds1307->client.adapter, ds1307->msg, 2);
102 if (tmp != 2) {
103 dev_err(dev, "%s error %d\n", "read", tmp);
104 return -EIO;
105 }
106
107 dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
108 "read",
109 ds1307->regs[0], ds1307->regs[1],
110 ds1307->regs[2], ds1307->regs[3],
111 ds1307->regs[4], ds1307->regs[5],
112 ds1307->regs[6]);
113
114 t->tm_sec = BCD2BIN(ds1307->regs[DS1307_REG_SECS] & 0x7f);
115 t->tm_min = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
116 tmp = ds1307->regs[DS1307_REG_HOUR] & 0x3f;
117 t->tm_hour = BCD2BIN(tmp);
118 t->tm_wday = BCD2BIN(ds1307->regs[DS1307_REG_WDAY] & 0x07) - 1;
119 t->tm_mday = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
120 tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f;
121 t->tm_mon = BCD2BIN(tmp) - 1;
122
123 /* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */
124 t->tm_year = BCD2BIN(ds1307->regs[DS1307_REG_YEAR]) + 100;
125
126 dev_dbg(dev, "%s secs=%d, mins=%d, "
127 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
128 "read", t->tm_sec, t->tm_min,
129 t->tm_hour, t->tm_mday,
130 t->tm_mon, t->tm_year, t->tm_wday);
131
132 return 0;
133}
134
135static int ds1307_set_time(struct device *dev, struct rtc_time *t)
136{
137 struct ds1307 *ds1307 = dev_get_drvdata(dev);
138 int result;
139 int tmp;
140 u8 *buf = ds1307->regs;
141
142 dev_dbg(dev, "%s secs=%d, mins=%d, "
143 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
144 "write", dt->tm_sec, dt->tm_min,
145 dt->tm_hour, dt->tm_mday,
146 dt->tm_mon, dt->tm_year, dt->tm_wday);
147
148 *buf++ = 0; /* first register addr */
149 buf[DS1307_REG_SECS] = BIN2BCD(t->tm_sec);
150 buf[DS1307_REG_MIN] = BIN2BCD(t->tm_min);
151 buf[DS1307_REG_HOUR] = BIN2BCD(t->tm_hour);
152 buf[DS1307_REG_WDAY] = BIN2BCD(t->tm_wday + 1);
153 buf[DS1307_REG_MDAY] = BIN2BCD(t->tm_mday);
154 buf[DS1307_REG_MONTH] = BIN2BCD(t->tm_mon + 1);
155
156 /* assume 20YY not 19YY */
157 tmp = t->tm_year - 100;
158 buf[DS1307_REG_YEAR] = BIN2BCD(tmp);
159
160 if (ds1307->type == ds_1337)
161 buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
162 else if (ds1307->type == ds_1340)
163 buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
164 | DS1340_BIT_CENTURY;
165
166 ds1307->msg[1].flags = 0;
167 ds1307->msg[1].len = 8;
168
169 dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
170 "write", buf[0], buf[1], buf[2], buf[3],
171 buf[4], buf[5], buf[6]);
172
173 result = i2c_transfer(ds1307->client.adapter, &ds1307->msg[1], 1);
174 if (result != 1) {
175 dev_err(dev, "%s error %d\n", "write", tmp);
176 return -EIO;
177 }
178 return 0;
179}
180
181static struct rtc_class_ops ds13xx_rtc_ops = {
182 .read_time = ds1307_get_time,
183 .set_time = ds1307_set_time,
184};
185
186static struct i2c_driver ds1307_driver;
187
188static int __devinit
189ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
190{
191 struct ds1307 *ds1307;
192 int err = -ENODEV;
193 struct i2c_client *client;
194 int tmp;
195
196 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) {
197 err = -ENOMEM;
198 goto exit;
199 }
200
201 client = &ds1307->client;
202 client->addr = address;
203 client->adapter = adapter;
204 client->driver = &ds1307_driver;
205 client->flags = 0;
206
207 i2c_set_clientdata(client, ds1307);
208
209 ds1307->msg[0].addr = client->addr;
210 ds1307->msg[0].flags = 0;
211 ds1307->msg[0].len = 1;
212 ds1307->msg[0].buf = &ds1307->reg_addr;
213
214 ds1307->msg[1].addr = client->addr;
215 ds1307->msg[1].flags = I2C_M_RD;
216 ds1307->msg[1].len = sizeof(ds1307->regs);
217 ds1307->msg[1].buf = ds1307->regs;
218
219 /* HACK: "force" implies "needs ds1337-style-oscillator setup" */
220 if (kind >= 0) {
221 ds1307->type = ds_1337;
222
223 ds1307->reg_addr = DS1337_REG_CONTROL;
224 ds1307->msg[1].len = 2;
225
226 tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
227 if (tmp != 2) {
228 pr_debug("read error %d\n", tmp);
229 err = -EIO;
230 goto exit_free;
231 }
232
233 ds1307->reg_addr = 0;
234 ds1307->msg[1].len = sizeof(ds1307->regs);
235
236 /* oscillator is off; need to turn it on */
237 if ((ds1307->regs[0] & DS1337_BIT_nEOSC)
238 || (ds1307->regs[1] & DS1337_BIT_OSF)) {
239 printk(KERN_ERR "no ds1337 oscillator code\n");
240 goto exit_free;
241 }
242 } else
243 ds1307->type = ds_1307;
244
245read_rtc:
246 /* read RTC registers */
247
248 tmp = i2c_transfer(client->adapter, ds1307->msg, 2);
249 if (tmp != 2) {
250 pr_debug("read error %d\n", tmp);
251 err = -EIO;
252 goto exit_free;
253 }
254
255 /* minimal sanity checking; some chips (like DS1340) don't
256 * specify the extra bits as must-be-zero, but there are
257 * still a few values that are clearly out-of-range.
258 */
259 tmp = ds1307->regs[DS1307_REG_SECS];
260 if (tmp & DS1307_BIT_CH) {
261 if (ds1307->type && ds1307->type != ds_1307) {
262 pr_debug("not a ds1307?\n");
263 goto exit_free;
264 }
265 ds1307->type = ds_1307;
266
267 /* this partial initialization should work for ds1307,
268 * ds1338, ds1340, st m41t00, and more.
269 */
270 dev_warn(&client->dev, "oscillator started; SET TIME!\n");
271 i2c_smbus_write_byte_data(client, 0, 0);
272 goto read_rtc;
273 }
274 tmp = BCD2BIN(tmp & 0x7f);
275 if (tmp > 60)
276 goto exit_free;
277 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
278 if (tmp > 60)
279 goto exit_free;
280
281 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
282 if (tmp == 0 || tmp > 31)
283 goto exit_free;
284
285 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
286 if (tmp == 0 || tmp > 12)
287 goto exit_free;
288
289 /* force into in 24 hour mode (most chips) or
290 * disable century bit (ds1340)
291 */
292 tmp = ds1307->regs[DS1307_REG_HOUR];
293 if (tmp & (1 << 6)) {
294 if (tmp & (1 << 5))
295 tmp = BCD2BIN(tmp & 0x1f) + 12;
296 else
297 tmp = BCD2BIN(tmp);
298 i2c_smbus_write_byte_data(client,
299 DS1307_REG_HOUR,
300 BIN2BCD(tmp));
301 }
302
303 /* FIXME chips like 1337 can generate alarm irqs too; those are
304 * worth exposing through the API (especially when the irq is
305 * wakeup-capable).
306 */
307
308 switch (ds1307->type) {
309 case unknown:
310 strlcpy(client->name, "unknown", I2C_NAME_SIZE);
311 break;
312 case ds_1307:
313 strlcpy(client->name, "ds1307", I2C_NAME_SIZE);
314 break;
315 case ds_1337:
316 strlcpy(client->name, "ds1337", I2C_NAME_SIZE);
317 break;
318 case ds_1340:
319 strlcpy(client->name, "ds1340", I2C_NAME_SIZE);
320 break;
321 }
322
323 /* Tell the I2C layer a new client has arrived */
324 if ((err = i2c_attach_client(client)))
325 goto exit_free;
326
327 ds1307->rtc = rtc_device_register(client->name, &client->dev,
328 &ds13xx_rtc_ops, THIS_MODULE);
329 if (IS_ERR(ds1307->rtc)) {
330 err = PTR_ERR(ds1307->rtc);
331 dev_err(&client->dev,
332 "unable to register the class device\n");
333 goto exit_detach;
334 }
335
336 return 0;
337
338exit_detach:
339 i2c_detach_client(client);
340exit_free:
341 kfree(ds1307);
342exit:
343 return err;
344}
345
346static int __devinit
347ds1307_attach_adapter(struct i2c_adapter *adapter)
348{
349 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
350 return 0;
351 return i2c_probe(adapter, &addr_data, ds1307_detect);
352}
353
354static int __devexit ds1307_detach_client(struct i2c_client *client)
355{
356 int err;
357 struct ds1307 *ds1307 = i2c_get_clientdata(client);
358
359 rtc_device_unregister(ds1307->rtc);
360 if ((err = i2c_detach_client(client)))
361 return err;
362 kfree(ds1307);
363 return 0;
364}
365
366static struct i2c_driver ds1307_driver = {
367 .driver = {
368 .name = "ds1307",
369 .owner = THIS_MODULE,
370 },
371 .attach_adapter = ds1307_attach_adapter,
372 .detach_client = __devexit_p(ds1307_detach_client),
373};
374
375static int __init ds1307_init(void)
376{
377 return i2c_add_driver(&ds1307_driver);
378}
379module_init(ds1307_init);
380
381static void __exit ds1307_exit(void)
382{
383 i2c_del_driver(&ds1307_driver);
384}
385module_exit(ds1307_exit);
386
387MODULE_DESCRIPTION("RTC driver for DS1307 and similar chips");
388MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
new file mode 100644
index 000000000000..ecafbad41a24
--- /dev/null
+++ b/drivers/rtc/rtc-ds1553.c
@@ -0,0 +1,414 @@
1/*
2 * An rtc driver for the Dallas DS1553
3 *
4 * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/bcd.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/delay.h>
15#include <linux/jiffies.h>
16#include <linux/interrupt.h>
17#include <linux/rtc.h>
18#include <linux/platform_device.h>
19#include <linux/io.h>
20
21#define DRV_VERSION "0.1"
22
23#define RTC_REG_SIZE 0x2000
24#define RTC_OFFSET 0x1ff0
25
26#define RTC_FLAGS (RTC_OFFSET + 0)
27#define RTC_SECONDS_ALARM (RTC_OFFSET + 2)
28#define RTC_MINUTES_ALARM (RTC_OFFSET + 3)
29#define RTC_HOURS_ALARM (RTC_OFFSET + 4)
30#define RTC_DATE_ALARM (RTC_OFFSET + 5)
31#define RTC_INTERRUPTS (RTC_OFFSET + 6)
32#define RTC_WATCHDOG (RTC_OFFSET + 7)
33#define RTC_CONTROL (RTC_OFFSET + 8)
34#define RTC_CENTURY (RTC_OFFSET + 8)
35#define RTC_SECONDS (RTC_OFFSET + 9)
36#define RTC_MINUTES (RTC_OFFSET + 10)
37#define RTC_HOURS (RTC_OFFSET + 11)
38#define RTC_DAY (RTC_OFFSET + 12)
39#define RTC_DATE (RTC_OFFSET + 13)
40#define RTC_MONTH (RTC_OFFSET + 14)
41#define RTC_YEAR (RTC_OFFSET + 15)
42
43#define RTC_CENTURY_MASK 0x3f
44#define RTC_SECONDS_MASK 0x7f
45#define RTC_DAY_MASK 0x07
46
47/* Bits in the Control/Century register */
48#define RTC_WRITE 0x80
49#define RTC_READ 0x40
50
51/* Bits in the Seconds register */
52#define RTC_STOP 0x80
53
54/* Bits in the Flags register */
55#define RTC_FLAGS_AF 0x40
56#define RTC_FLAGS_BLF 0x10
57
58/* Bits in the Interrupts register */
59#define RTC_INTS_AE 0x80
60
61struct rtc_plat_data {
62 struct rtc_device *rtc;
63 void __iomem *ioaddr;
64 unsigned long baseaddr;
65 unsigned long last_jiffies;
66 int irq;
67 unsigned int irqen;
68 int alrm_sec;
69 int alrm_min;
70 int alrm_hour;
71 int alrm_mday;
72};
73
74static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
75{
76 struct platform_device *pdev = to_platform_device(dev);
77 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
78 void __iomem *ioaddr = pdata->ioaddr;
79 u8 century;
80
81 century = BIN2BCD((tm->tm_year + 1900) / 100);
82
83 writeb(RTC_WRITE, pdata->ioaddr + RTC_CONTROL);
84
85 writeb(BIN2BCD(tm->tm_year % 100), ioaddr + RTC_YEAR);
86 writeb(BIN2BCD(tm->tm_mon + 1), ioaddr + RTC_MONTH);
87 writeb(BIN2BCD(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
88 writeb(BIN2BCD(tm->tm_mday), ioaddr + RTC_DATE);
89 writeb(BIN2BCD(tm->tm_hour), ioaddr + RTC_HOURS);
90 writeb(BIN2BCD(tm->tm_min), ioaddr + RTC_MINUTES);
91 writeb(BIN2BCD(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
92
93 /* RTC_CENTURY and RTC_CONTROL share same register */
94 writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY);
95 writeb(century & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL);
96 return 0;
97}
98
99static int ds1553_rtc_read_time(struct device *dev, struct rtc_time *tm)
100{
101 struct platform_device *pdev = to_platform_device(dev);
102 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
103 void __iomem *ioaddr = pdata->ioaddr;
104 unsigned int year, month, day, hour, minute, second, week;
105 unsigned int century;
106
107 /* give enough time to update RTC in case of continuous read */
108 if (pdata->last_jiffies == jiffies)
109 msleep(1);
110 pdata->last_jiffies = jiffies;
111 writeb(RTC_READ, ioaddr + RTC_CONTROL);
112 second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK;
113 minute = readb(ioaddr + RTC_MINUTES);
114 hour = readb(ioaddr + RTC_HOURS);
115 day = readb(ioaddr + RTC_DATE);
116 week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK;
117 month = readb(ioaddr + RTC_MONTH);
118 year = readb(ioaddr + RTC_YEAR);
119 century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
120 writeb(0, ioaddr + RTC_CONTROL);
121 tm->tm_sec = BCD2BIN(second);
122 tm->tm_min = BCD2BIN(minute);
123 tm->tm_hour = BCD2BIN(hour);
124 tm->tm_mday = BCD2BIN(day);
125 tm->tm_wday = BCD2BIN(week);
126 tm->tm_mon = BCD2BIN(month) - 1;
127 /* year is 1900 + tm->tm_year */
128 tm->tm_year = BCD2BIN(year) + BCD2BIN(century) * 100 - 1900;
129
130 if (rtc_valid_tm(tm) < 0) {
131 dev_err(dev, "retrieved date/time is not valid.\n");
132 rtc_time_to_tm(0, tm);
133 }
134 return 0;
135}
136
137static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
138{
139 void __iomem *ioaddr = pdata->ioaddr;
140 unsigned long flags;
141
142 spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
143 writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
144 0x80 : BIN2BCD(pdata->alrm_mday),
145 ioaddr + RTC_DATE_ALARM);
146 writeb(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
147 0x80 : BIN2BCD(pdata->alrm_hour),
148 ioaddr + RTC_HOURS_ALARM);
149 writeb(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
150 0x80 : BIN2BCD(pdata->alrm_min),
151 ioaddr + RTC_MINUTES_ALARM);
152 writeb(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
153 0x80 : BIN2BCD(pdata->alrm_sec),
154 ioaddr + RTC_SECONDS_ALARM);
155 writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS);
156 readb(ioaddr + RTC_FLAGS); /* clear interrupts */
157 spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
158}
159
160static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
161{
162 struct platform_device *pdev = to_platform_device(dev);
163 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
164
165 if (pdata->irq < 0)
166 return -EINVAL;
167 pdata->alrm_mday = alrm->time.tm_mday;
168 pdata->alrm_hour = alrm->time.tm_hour;
169 pdata->alrm_min = alrm->time.tm_min;
170 pdata->alrm_sec = alrm->time.tm_sec;
171 if (alrm->enabled)
172 pdata->irqen |= RTC_AF;
173 ds1553_rtc_update_alarm(pdata);
174 return 0;
175}
176
177static int ds1553_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
178{
179 struct platform_device *pdev = to_platform_device(dev);
180 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
181
182 if (pdata->irq < 0)
183 return -EINVAL;
184 alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
185 alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
186 alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
187 alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
188 alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
189 return 0;
190}
191
192static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id,
193 struct pt_regs *regs)
194{
195 struct platform_device *pdev = dev_id;
196 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
197 void __iomem *ioaddr = pdata->ioaddr;
198 unsigned long events = RTC_IRQF;
199
200 /* read and clear interrupt */
201 if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
202 return IRQ_NONE;
203 if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
204 events |= RTC_UF;
205 else
206 events |= RTC_AF;
207 rtc_update_irq(&pdata->rtc->class_dev, 1, events);
208 return IRQ_HANDLED;
209}
210
211static void ds1553_rtc_release(struct device *dev)
212{
213 struct platform_device *pdev = to_platform_device(dev);
214 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
215
216 if (pdata->irq >= 0) {
217 pdata->irqen = 0;
218 ds1553_rtc_update_alarm(pdata);
219 }
220}
221
222static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
223 unsigned long arg)
224{
225 struct platform_device *pdev = to_platform_device(dev);
226 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
227
228 if (pdata->irq < 0)
229 return -ENOIOCTLCMD;
230 switch (cmd) {
231 case RTC_AIE_OFF:
232 pdata->irqen &= ~RTC_AF;
233 ds1553_rtc_update_alarm(pdata);
234 break;
235 case RTC_AIE_ON:
236 pdata->irqen |= RTC_AF;
237 ds1553_rtc_update_alarm(pdata);
238 break;
239 case RTC_UIE_OFF:
240 pdata->irqen &= ~RTC_UF;
241 ds1553_rtc_update_alarm(pdata);
242 break;
243 case RTC_UIE_ON:
244 pdata->irqen |= RTC_UF;
245 ds1553_rtc_update_alarm(pdata);
246 break;
247 default:
248 return -ENOIOCTLCMD;
249 }
250 return 0;
251}
252
253static struct rtc_class_ops ds1553_rtc_ops = {
254 .read_time = ds1553_rtc_read_time,
255 .set_time = ds1553_rtc_set_time,
256 .read_alarm = ds1553_rtc_read_alarm,
257 .set_alarm = ds1553_rtc_set_alarm,
258 .release = ds1553_rtc_release,
259 .ioctl = ds1553_rtc_ioctl,
260};
261
262static ssize_t ds1553_nvram_read(struct kobject *kobj, char *buf,
263 loff_t pos, size_t size)
264{
265 struct platform_device *pdev =
266 to_platform_device(container_of(kobj, struct device, kobj));
267 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
268 void __iomem *ioaddr = pdata->ioaddr;
269 ssize_t count;
270
271 for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
272 *buf++ = readb(ioaddr + pos++);
273 return count;
274}
275
276static ssize_t ds1553_nvram_write(struct kobject *kobj, char *buf,
277 loff_t pos, size_t size)
278{
279 struct platform_device *pdev =
280 to_platform_device(container_of(kobj, struct device, kobj));
281 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
282 void __iomem *ioaddr = pdata->ioaddr;
283 ssize_t count;
284
285 for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
286 writeb(*buf++, ioaddr + pos++);
287 return count;
288}
289
290static struct bin_attribute ds1553_nvram_attr = {
291 .attr = {
292 .name = "nvram",
293 .mode = S_IRUGO | S_IWUGO,
294 .owner = THIS_MODULE,
295 },
296 .size = RTC_OFFSET,
297 .read = ds1553_nvram_read,
298 .write = ds1553_nvram_write,
299};
300
301static int __init ds1553_rtc_probe(struct platform_device *pdev)
302{
303 struct rtc_device *rtc;
304 struct resource *res;
305 unsigned int cen, sec;
306 struct rtc_plat_data *pdata = NULL;
307 void __iomem *ioaddr = NULL;
308 int ret = 0;
309
310 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 if (!res)
312 return -ENODEV;
313 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
314 if (!pdata)
315 return -ENOMEM;
316 pdata->irq = -1;
317 if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
318 ret = -EBUSY;
319 goto out;
320 }
321 pdata->baseaddr = res->start;
322 ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
323 if (!ioaddr) {
324 ret = -ENOMEM;
325 goto out;
326 }
327 pdata->ioaddr = ioaddr;
328 pdata->irq = platform_get_irq(pdev, 0);
329
330 /* turn RTC on if it was not on */
331 sec = readb(ioaddr + RTC_SECONDS);
332 if (sec & RTC_STOP) {
333 sec &= RTC_SECONDS_MASK;
334 cen = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
335 writeb(RTC_WRITE, ioaddr + RTC_CONTROL);
336 writeb(sec, ioaddr + RTC_SECONDS);
337 writeb(cen & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL);
338 }
339 if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF)
340 dev_warn(&pdev->dev, "voltage-low detected.\n");
341
342 if (pdata->irq >= 0) {
343 writeb(0, ioaddr + RTC_INTERRUPTS);
344 if (request_irq(pdata->irq, ds1553_rtc_interrupt, SA_SHIRQ,
345 pdev->name, pdev) < 0) {
346 dev_warn(&pdev->dev, "interrupt not available.\n");
347 pdata->irq = -1;
348 }
349 }
350
351 rtc = rtc_device_register(pdev->name, &pdev->dev,
352 &ds1553_rtc_ops, THIS_MODULE);
353 if (IS_ERR(rtc)) {
354 ret = PTR_ERR(rtc);
355 goto out;
356 }
357 pdata->rtc = rtc;
358 pdata->last_jiffies = jiffies;
359 platform_set_drvdata(pdev, pdata);
360 sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
361 return 0;
362 out:
363 if (pdata->irq >= 0)
364 free_irq(pdata->irq, pdev);
365 if (ioaddr)
366 iounmap(ioaddr);
367 if (pdata->baseaddr)
368 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
369 kfree(pdata);
370 return ret;
371}
372
373static int __devexit ds1553_rtc_remove(struct platform_device *pdev)
374{
375 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
376
377 sysfs_remove_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
378 rtc_device_unregister(pdata->rtc);
379 if (pdata->irq >= 0) {
380 writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
381 free_irq(pdata->irq, pdev);
382 }
383 iounmap(pdata->ioaddr);
384 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
385 kfree(pdata);
386 return 0;
387}
388
389static struct platform_driver ds1553_rtc_driver = {
390 .probe = ds1553_rtc_probe,
391 .remove = __devexit_p(ds1553_rtc_remove),
392 .driver = {
393 .name = "ds1553",
394 .owner = THIS_MODULE,
395 },
396};
397
398static __init int ds1553_init(void)
399{
400 return platform_driver_register(&ds1553_rtc_driver);
401}
402
403static __exit void ds1553_exit(void)
404{
405 return platform_driver_unregister(&ds1553_rtc_driver);
406}
407
408module_init(ds1553_init);
409module_exit(ds1553_exit);
410
411MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
412MODULE_DESCRIPTION("Dallas DS1553 RTC driver");
413MODULE_LICENSE("GPL");
414MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
new file mode 100644
index 000000000000..8e47e5a06d2a
--- /dev/null
+++ b/drivers/rtc/rtc-ds1742.c
@@ -0,0 +1,259 @@
1/*
2 * An rtc driver for the Dallas DS1742
3 *
4 * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/bcd.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/delay.h>
15#include <linux/jiffies.h>
16#include <linux/rtc.h>
17#include <linux/platform_device.h>
18#include <linux/io.h>
19
20#define DRV_VERSION "0.1"
21
22#define RTC_REG_SIZE 0x800
23#define RTC_OFFSET 0x7f8
24
25#define RTC_CONTROL (RTC_OFFSET + 0)
26#define RTC_CENTURY (RTC_OFFSET + 0)
27#define RTC_SECONDS (RTC_OFFSET + 1)
28#define RTC_MINUTES (RTC_OFFSET + 2)
29#define RTC_HOURS (RTC_OFFSET + 3)
30#define RTC_DAY (RTC_OFFSET + 4)
31#define RTC_DATE (RTC_OFFSET + 5)
32#define RTC_MONTH (RTC_OFFSET + 6)
33#define RTC_YEAR (RTC_OFFSET + 7)
34
35#define RTC_CENTURY_MASK 0x3f
36#define RTC_SECONDS_MASK 0x7f
37#define RTC_DAY_MASK 0x07
38
39/* Bits in the Control/Century register */
40#define RTC_WRITE 0x80
41#define RTC_READ 0x40
42
43/* Bits in the Seconds register */
44#define RTC_STOP 0x80
45
46/* Bits in the Day register */
47#define RTC_BATT_FLAG 0x80
48
49struct rtc_plat_data {
50 struct rtc_device *rtc;
51 void __iomem *ioaddr;
52 unsigned long baseaddr;
53 unsigned long last_jiffies;
54};
55
56static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm)
57{
58 struct platform_device *pdev = to_platform_device(dev);
59 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
60 void __iomem *ioaddr = pdata->ioaddr;
61 u8 century;
62
63 century = BIN2BCD((tm->tm_year + 1900) / 100);
64
65 writeb(RTC_WRITE, ioaddr + RTC_CONTROL);
66
67 writeb(BIN2BCD(tm->tm_year % 100), ioaddr + RTC_YEAR);
68 writeb(BIN2BCD(tm->tm_mon + 1), ioaddr + RTC_MONTH);
69 writeb(BIN2BCD(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY);
70 writeb(BIN2BCD(tm->tm_mday), ioaddr + RTC_DATE);
71 writeb(BIN2BCD(tm->tm_hour), ioaddr + RTC_HOURS);
72 writeb(BIN2BCD(tm->tm_min), ioaddr + RTC_MINUTES);
73 writeb(BIN2BCD(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS);
74
75 /* RTC_CENTURY and RTC_CONTROL share same register */
76 writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY);
77 writeb(century & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL);
78 return 0;
79}
80
81static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm)
82{
83 struct platform_device *pdev = to_platform_device(dev);
84 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
85 void __iomem *ioaddr = pdata->ioaddr;
86 unsigned int year, month, day, hour, minute, second, week;
87 unsigned int century;
88
89 /* give enough time to update RTC in case of continuous read */
90 if (pdata->last_jiffies == jiffies)
91 msleep(1);
92 pdata->last_jiffies = jiffies;
93 writeb(RTC_READ, ioaddr + RTC_CONTROL);
94 second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK;
95 minute = readb(ioaddr + RTC_MINUTES);
96 hour = readb(ioaddr + RTC_HOURS);
97 day = readb(ioaddr + RTC_DATE);
98 week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK;
99 month = readb(ioaddr + RTC_MONTH);
100 year = readb(ioaddr + RTC_YEAR);
101 century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
102 writeb(0, ioaddr + RTC_CONTROL);
103 tm->tm_sec = BCD2BIN(second);
104 tm->tm_min = BCD2BIN(minute);
105 tm->tm_hour = BCD2BIN(hour);
106 tm->tm_mday = BCD2BIN(day);
107 tm->tm_wday = BCD2BIN(week);
108 tm->tm_mon = BCD2BIN(month) - 1;
109 /* year is 1900 + tm->tm_year */
110 tm->tm_year = BCD2BIN(year) + BCD2BIN(century) * 100 - 1900;
111
112 if (rtc_valid_tm(tm) < 0) {
113 dev_err(dev, "retrieved date/time is not valid.\n");
114 rtc_time_to_tm(0, tm);
115 }
116 return 0;
117}
118
119static struct rtc_class_ops ds1742_rtc_ops = {
120 .read_time = ds1742_rtc_read_time,
121 .set_time = ds1742_rtc_set_time,
122};
123
124static ssize_t ds1742_nvram_read(struct kobject *kobj, char *buf,
125 loff_t pos, size_t size)
126{
127 struct platform_device *pdev =
128 to_platform_device(container_of(kobj, struct device, kobj));
129 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
130 void __iomem *ioaddr = pdata->ioaddr;
131 ssize_t count;
132
133 for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
134 *buf++ = readb(ioaddr + pos++);
135 return count;
136}
137
138static ssize_t ds1742_nvram_write(struct kobject *kobj, char *buf,
139 loff_t pos, size_t size)
140{
141 struct platform_device *pdev =
142 to_platform_device(container_of(kobj, struct device, kobj));
143 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
144 void __iomem *ioaddr = pdata->ioaddr;
145 ssize_t count;
146
147 for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
148 writeb(*buf++, ioaddr + pos++);
149 return count;
150}
151
152static struct bin_attribute ds1742_nvram_attr = {
153 .attr = {
154 .name = "nvram",
155 .mode = S_IRUGO | S_IWUGO,
156 .owner = THIS_MODULE,
157 },
158 .size = RTC_OFFSET,
159 .read = ds1742_nvram_read,
160 .write = ds1742_nvram_write,
161};
162
163static int __init ds1742_rtc_probe(struct platform_device *pdev)
164{
165 struct rtc_device *rtc;
166 struct resource *res;
167 unsigned int cen, sec;
168 struct rtc_plat_data *pdata = NULL;
169 void __iomem *ioaddr = NULL;
170 int ret = 0;
171
172 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
173 if (!res)
174 return -ENODEV;
175 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
176 if (!pdata)
177 return -ENOMEM;
178 if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
179 ret = -EBUSY;
180 goto out;
181 }
182 pdata->baseaddr = res->start;
183 ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
184 if (!ioaddr) {
185 ret = -ENOMEM;
186 goto out;
187 }
188 pdata->ioaddr = ioaddr;
189
190 /* turn RTC on if it was not on */
191 sec = readb(ioaddr + RTC_SECONDS);
192 if (sec & RTC_STOP) {
193 sec &= RTC_SECONDS_MASK;
194 cen = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK;
195 writeb(RTC_WRITE, ioaddr + RTC_CONTROL);
196 writeb(sec, ioaddr + RTC_SECONDS);
197 writeb(cen & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL);
198 }
199 if (readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG)
200 dev_warn(&pdev->dev, "voltage-low detected.\n");
201
202 rtc = rtc_device_register(pdev->name, &pdev->dev,
203 &ds1742_rtc_ops, THIS_MODULE);
204 if (IS_ERR(rtc)) {
205 ret = PTR_ERR(rtc);
206 goto out;
207 }
208 pdata->rtc = rtc;
209 pdata->last_jiffies = jiffies;
210 platform_set_drvdata(pdev, pdata);
211 sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
212 return 0;
213 out:
214 if (ioaddr)
215 iounmap(ioaddr);
216 if (pdata->baseaddr)
217 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
218 kfree(pdata);
219 return ret;
220}
221
222static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
223{
224 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
225
226 sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
227 rtc_device_unregister(pdata->rtc);
228 iounmap(pdata->ioaddr);
229 release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
230 kfree(pdata);
231 return 0;
232}
233
234static struct platform_driver ds1742_rtc_driver = {
235 .probe = ds1742_rtc_probe,
236 .remove = __devexit_p(ds1742_rtc_remove),
237 .driver = {
238 .name = "ds1742",
239 .owner = THIS_MODULE,
240 },
241};
242
243static __init int ds1742_init(void)
244{
245 return platform_driver_register(&ds1742_rtc_driver);
246}
247
248static __exit void ds1742_exit(void)
249{
250 return platform_driver_unregister(&ds1742_rtc_driver);
251}
252
253module_init(ds1742_init);
254module_exit(ds1742_exit);
255
256MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
257MODULE_DESCRIPTION("Dallas DS1742 RTC driver");
258MODULE_LICENSE("GPL");
259MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index cfedc1d28ee1..9812120f3a7c 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -18,9 +18,19 @@ static const unsigned char rtc_days_in_month[] = {
18 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 18 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
19}; 19};
20 20
21static const unsigned short rtc_ydays[2][13] = {
22 /* Normal years */
23 { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
24 /* Leap years */
25 { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
26};
27
21#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400) 28#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400)
22#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400)) 29#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400))
23 30
31/*
32 * The number of days in the month.
33 */
24int rtc_month_days(unsigned int month, unsigned int year) 34int rtc_month_days(unsigned int month, unsigned int year)
25{ 35{
26 return rtc_days_in_month[month] + (LEAP_YEAR(year) && month == 1); 36 return rtc_days_in_month[month] + (LEAP_YEAR(year) && month == 1);
@@ -28,6 +38,15 @@ int rtc_month_days(unsigned int month, unsigned int year)
28EXPORT_SYMBOL(rtc_month_days); 38EXPORT_SYMBOL(rtc_month_days);
29 39
30/* 40/*
41 * The number of days since January 1. (0 to 365)
42 */
43int rtc_year_days(unsigned int day, unsigned int month, unsigned int year)
44{
45 return rtc_ydays[LEAP_YEAR(year)][month] + day-1;
46}
47EXPORT_SYMBOL(rtc_year_days);
48
49/*
31 * Convert seconds since 01-01-1970 00:00:00 to Gregorian date. 50 * Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
32 */ 51 */
33void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) 52void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
new file mode 100644
index 000000000000..2c9739562b5c
--- /dev/null
+++ b/drivers/rtc/rtc-max6902.c
@@ -0,0 +1,286 @@
1/* drivers/char/max6902.c
2 *
3 * Copyright (C) 2006 8D Technologies inc.
4 * Copyright (C) 2004 Compulab Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Driver for MAX6902 spi RTC
11 *
12 * Changelog:
13 *
14 * 24-May-2006: Raphael Assenat <raph@8d.com>
15 * - Major rework
16 * Converted to rtc_device and uses the SPI layer.
17 *
18 * ??-???-2005: Someone at Compulab
19 * - Initial driver creation.
20 */
21
22#include <linux/config.h>
23#include <linux/module.h>
24#include <linux/version.h>
25
26#include <linux/kernel.h>
27#include <linux/platform_device.h>
28#include <linux/init.h>
29#include <linux/rtc.h>
30#include <linux/spi/spi.h>
31#include <linux/bcd.h>
32#include <linux/delay.h>
33
34#define MAX6902_REG_SECONDS 0x01
35#define MAX6902_REG_MINUTES 0x03
36#define MAX6902_REG_HOURS 0x05
37#define MAX6902_REG_DATE 0x07
38#define MAX6902_REG_MONTH 0x09
39#define MAX6902_REG_DAY 0x0B
40#define MAX6902_REG_YEAR 0x0D
41#define MAX6902_REG_CONTROL 0x0F
42#define MAX6902_REG_CENTURY 0x13
43
44#undef MAX6902_DEBUG
45
46struct max6902 {
47 struct rtc_device *rtc;
48 u8 buf[9]; /* Burst read cmd + 8 registers */
49 u8 tx_buf[2];
50 u8 rx_buf[2];
51};
52
53static void max6902_set_reg(struct device *dev, unsigned char address,
54 unsigned char data)
55{
56 struct spi_device *spi = to_spi_device(dev);
57 unsigned char buf[2];
58
59 /* MSB must be '0' to write */
60 buf[0] = address & 0x7f;
61 buf[1] = data;
62
63 spi_write(spi, buf, 2);
64}
65
66static int max6902_get_reg(struct device *dev, unsigned char address,
67 unsigned char *data)
68{
69 struct spi_device *spi = to_spi_device(dev);
70 struct max6902 *chip = dev_get_drvdata(dev);
71 struct spi_message message;
72 struct spi_transfer xfer;
73 int status;
74
75 if (!data)
76 return -EINVAL;
77
78 /* Build our spi message */
79 spi_message_init(&message);
80 memset(&xfer, 0, sizeof(xfer));
81 xfer.len = 2;
82 /* Can tx_buf and rx_buf be equal? The doc in spi.h is not sure... */
83 xfer.tx_buf = chip->tx_buf;
84 xfer.rx_buf = chip->rx_buf;
85
86 /* Set MSB to indicate read */
87 chip->tx_buf[0] = address | 0x80;
88
89 spi_message_add_tail(&xfer, &message);
90
91 /* do the i/o */
92 status = spi_sync(spi, &message);
93 if (status == 0)
94 status = message.status;
95 else
96 return status;
97
98 *data = chip->rx_buf[1];
99
100 return status;
101}
102
103static int max6902_get_datetime(struct device *dev, struct rtc_time *dt)
104{
105 unsigned char tmp;
106 int century;
107 int err;
108 struct spi_device *spi = to_spi_device(dev);
109 struct max6902 *chip = dev_get_drvdata(dev);
110 struct spi_message message;
111 struct spi_transfer xfer;
112 int status;
113
114 err = max6902_get_reg(dev, MAX6902_REG_CENTURY, &tmp);
115 if (err)
116 return err;
117
118 /* build the message */
119 spi_message_init(&message);
120 memset(&xfer, 0, sizeof(xfer));
121 xfer.len = 1 + 7; /* Burst read command + 7 registers */
122 xfer.tx_buf = chip->buf;
123 xfer.rx_buf = chip->buf;
124 chip->buf[0] = 0xbf; /* Burst read */
125 spi_message_add_tail(&xfer, &message);
126
127 /* do the i/o */
128 status = spi_sync(spi, &message);
129 if (status == 0)
130 status = message.status;
131 else
132 return status;
133
134 /* The chip sends data in this order:
135 * Seconds, Minutes, Hours, Date, Month, Day, Year */
136 dt->tm_sec = BCD2BIN(chip->buf[1]);
137 dt->tm_min = BCD2BIN(chip->buf[2]);
138 dt->tm_hour = BCD2BIN(chip->buf[3]);
139 dt->tm_mday = BCD2BIN(chip->buf[4]);
140 dt->tm_mon = BCD2BIN(chip->buf[5] - 1);
141 dt->tm_wday = BCD2BIN(chip->buf[6]);
142 dt->tm_year = BCD2BIN(chip->buf[7]);
143
144 century = BCD2BIN(tmp) * 100;
145
146 dt->tm_year += century;
147 dt->tm_year -= 1900;
148
149#ifdef MAX6902_DEBUG
150 printk("\n%s : Read RTC values\n",__FUNCTION__);
151 printk("tm_hour: %i\n",dt->tm_hour);
152 printk("tm_min : %i\n",dt->tm_min);
153 printk("tm_sec : %i\n",dt->tm_sec);
154 printk("tm_year: %i\n",dt->tm_year);
155 printk("tm_mon : %i\n",dt->tm_mon);
156 printk("tm_mday: %i\n",dt->tm_mday);
157 printk("tm_wday: %i\n",dt->tm_wday);
158#endif
159
160 return 0;
161}
162
163static int max6902_set_datetime(struct device *dev, struct rtc_time *dt)
164{
165 dt->tm_year = dt->tm_year+1900;
166
167#ifdef MAX6902_DEBUG
168 printk("\n%s : Setting RTC values\n",__FUNCTION__);
169 printk("tm_sec : %i\n",dt->tm_sec);
170 printk("tm_min : %i\n",dt->tm_min);
171 printk("tm_hour: %i\n",dt->tm_hour);
172 printk("tm_mday: %i\n",dt->tm_mday);
173 printk("tm_wday: %i\n",dt->tm_wday);
174 printk("tm_year: %i\n",dt->tm_year);
175#endif
176
177 /* Remove write protection */
178 max6902_set_reg(dev, 0xF, 0);
179
180 max6902_set_reg(dev, 0x01, BIN2BCD(dt->tm_sec));
181 max6902_set_reg(dev, 0x03, BIN2BCD(dt->tm_min));
182 max6902_set_reg(dev, 0x05, BIN2BCD(dt->tm_hour));
183
184 max6902_set_reg(dev, 0x07, BIN2BCD(dt->tm_mday));
185 max6902_set_reg(dev, 0x09, BIN2BCD(dt->tm_mon+1));
186 max6902_set_reg(dev, 0x0B, BIN2BCD(dt->tm_wday));
187 max6902_set_reg(dev, 0x0D, BIN2BCD(dt->tm_year%100));
188 max6902_set_reg(dev, 0x13, BIN2BCD(dt->tm_year/100));
189
190 /* Compulab used a delay here. However, the datasheet
191 * does not mention a delay being required anywhere... */
192 /* delay(2000); */
193
194 /* Write protect */
195 max6902_set_reg(dev, 0xF, 0x80);
196
197 return 0;
198}
199
200static int max6902_read_time(struct device *dev, struct rtc_time *tm)
201{
202 return max6902_get_datetime(dev, tm);
203}
204
205static int max6902_set_time(struct device *dev, struct rtc_time *tm)
206{
207 return max6902_set_datetime(dev, tm);
208}
209
210static struct rtc_class_ops max6902_rtc_ops = {
211 .read_time = max6902_read_time,
212 .set_time = max6902_set_time,
213};
214
215static int __devinit max6902_probe(struct spi_device *spi)
216{
217 struct rtc_device *rtc;
218 unsigned char tmp;
219 struct max6902 *chip;
220 int res;
221
222 rtc = rtc_device_register("max6902",
223 &spi->dev, &max6902_rtc_ops, THIS_MODULE);
224 if (IS_ERR(rtc))
225 return PTR_ERR(rtc);
226
227 spi->mode = SPI_MODE_3;
228 spi->bits_per_word = 8;
229 spi_setup(spi);
230
231 chip = kzalloc(sizeof *chip, GFP_KERNEL);
232 if (!chip) {
233 rtc_device_unregister(rtc);
234 return -ENOMEM;
235 }
236 chip->rtc = rtc;
237 dev_set_drvdata(&spi->dev, chip);
238
239 res = max6902_get_reg(&spi->dev, MAX6902_REG_SECONDS, &tmp);
240 if (res) {
241 rtc_device_unregister(rtc);
242 return res;
243 }
244
245 return 0;
246}
247
248static int __devexit max6902_remove(struct spi_device *spi)
249{
250 struct max6902 *chip = platform_get_drvdata(spi);
251 struct rtc_device *rtc = chip->rtc;
252
253 if (rtc)
254 rtc_device_unregister(rtc);
255
256 kfree(chip);
257
258 return 0;
259}
260
261static struct spi_driver max6902_driver = {
262 .driver = {
263 .name = "max6902",
264 .bus = &spi_bus_type,
265 .owner = THIS_MODULE,
266 },
267 .probe = max6902_probe,
268 .remove = __devexit_p(max6902_remove),
269};
270
271static __init int max6902_init(void)
272{
273 printk("max6902 spi driver\n");
274 return spi_register_driver(&max6902_driver);
275}
276module_init(max6902_init);
277
278static __exit void max6902_exit(void)
279{
280 spi_unregister_driver(&max6902_driver);
281}
282module_exit(max6902_exit);
283
284MODULE_DESCRIPTION ("max6902 spi RTC driver");
285MODULE_AUTHOR ("Raphael Assenat");
286MODULE_LICENSE ("GPL");
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
new file mode 100644
index 000000000000..b235a30cb661
--- /dev/null
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -0,0 +1,394 @@
1/*
2 * drivers/rtc/rtc-pcf8583.c
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Driver for PCF8583 RTC & RAM chip
11 *
12 * Converted to the generic RTC susbsystem by G. Liakhovetski (2006)
13 */
14#include <linux/module.h>
15#include <linux/i2c.h>
16#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/mc146818rtc.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/bcd.h>
22
23struct rtc_mem {
24 unsigned int loc;
25 unsigned int nr;
26 unsigned char *data;
27};
28
29struct pcf8583 {
30 struct i2c_client client;
31 struct rtc_device *rtc;
32 unsigned char ctrl;
33};
34
35#define CTRL_STOP 0x80
36#define CTRL_HOLD 0x40
37#define CTRL_32KHZ 0x00
38#define CTRL_MASK 0x08
39#define CTRL_ALARMEN 0x04
40#define CTRL_ALARM 0x02
41#define CTRL_TIMER 0x01
42
43static unsigned short normal_i2c[] = { I2C_CLIENT_END };
44
45/* Module parameters */
46I2C_CLIENT_INSMOD;
47
48static struct i2c_driver pcf8583_driver;
49
50#define get_ctrl(x) ((struct pcf8583 *)i2c_get_clientdata(x))->ctrl
51#define set_ctrl(x, v) get_ctrl(x) = v
52
53#define CMOS_YEAR (64 + 128)
54#define CMOS_CHECKSUM (63)
55
56static int pcf8583_get_datetime(struct i2c_client *client, struct rtc_time *dt)
57{
58 unsigned char buf[8], addr[1] = { 1 };
59 struct i2c_msg msgs[2] = {
60 {
61 .addr = client->addr,
62 .flags = 0,
63 .len = 1,
64 .buf = addr,
65 }, {
66 .addr = client->addr,
67 .flags = I2C_M_RD,
68 .len = 6,
69 .buf = buf,
70 }
71 };
72 int ret;
73
74 memset(buf, 0, sizeof(buf));
75
76 ret = i2c_transfer(client->adapter, msgs, 2);
77 if (ret == 2) {
78 dt->tm_year = buf[4] >> 6;
79 dt->tm_wday = buf[5] >> 5;
80
81 buf[4] &= 0x3f;
82 buf[5] &= 0x1f;
83
84 dt->tm_sec = BCD_TO_BIN(buf[1]);
85 dt->tm_min = BCD_TO_BIN(buf[2]);
86 dt->tm_hour = BCD_TO_BIN(buf[3]);
87 dt->tm_mday = BCD_TO_BIN(buf[4]);
88 dt->tm_mon = BCD_TO_BIN(buf[5]);
89 }
90
91 return ret == 2 ? 0 : -EIO;
92}
93
94static int pcf8583_set_datetime(struct i2c_client *client, struct rtc_time *dt, int datetoo)
95{
96 unsigned char buf[8];
97 int ret, len = 6;
98
99 buf[0] = 0;
100 buf[1] = get_ctrl(client) | 0x80;
101 buf[2] = 0;
102 buf[3] = BIN_TO_BCD(dt->tm_sec);
103 buf[4] = BIN_TO_BCD(dt->tm_min);
104 buf[5] = BIN_TO_BCD(dt->tm_hour);
105
106 if (datetoo) {
107 len = 8;
108 buf[6] = BIN_TO_BCD(dt->tm_mday) | (dt->tm_year << 6);
109 buf[7] = BIN_TO_BCD(dt->tm_mon) | (dt->tm_wday << 5);
110 }
111
112 ret = i2c_master_send(client, (char *)buf, len);
113 if (ret != len)
114 return -EIO;
115
116 buf[1] = get_ctrl(client);
117 ret = i2c_master_send(client, (char *)buf, 2);
118
119 return ret == 2 ? 0 : -EIO;
120}
121
122static int pcf8583_get_ctrl(struct i2c_client *client, unsigned char *ctrl)
123{
124 *ctrl = get_ctrl(client);
125 return 0;
126}
127
128static int pcf8583_set_ctrl(struct i2c_client *client, unsigned char *ctrl)
129{
130 unsigned char buf[2];
131
132 buf[0] = 0;
133 buf[1] = *ctrl;
134 set_ctrl(client, *ctrl);
135
136 return i2c_master_send(client, (char *)buf, 2);
137}
138
139static int pcf8583_read_mem(struct i2c_client *client, struct rtc_mem *mem)
140{
141 unsigned char addr[1];
142 struct i2c_msg msgs[2] = {
143 {
144 .addr = client->addr,
145 .flags = 0,
146 .len = 1,
147 .buf = addr,
148 }, {
149 .addr = client->addr,
150 .flags = I2C_M_RD,
151 .len = mem->nr,
152 .buf = mem->data,
153 }
154 };
155
156 if (mem->loc < 8)
157 return -EINVAL;
158
159 addr[0] = mem->loc;
160
161 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
162}
163
164static int pcf8583_write_mem(struct i2c_client *client, struct rtc_mem *mem)
165{
166 unsigned char addr[1];
167 struct i2c_msg msgs[2] = {
168 {
169 .addr = client->addr,
170 .flags = 0,
171 .len = 1,
172 .buf = addr,
173 }, {
174 .addr = client->addr,
175 .flags = I2C_M_NOSTART,
176 .len = mem->nr,
177 .buf = mem->data,
178 }
179 };
180
181 if (mem->loc < 8)
182 return -EINVAL;
183
184 addr[0] = mem->loc;
185
186 return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
187}
188
189static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm)
190{
191 struct i2c_client *client = to_i2c_client(dev);
192 unsigned char ctrl, year[2];
193 struct rtc_mem mem = { CMOS_YEAR, sizeof(year), year };
194 int real_year, year_offset, err;
195
196 /*
197 * Ensure that the RTC is running.
198 */
199 pcf8583_get_ctrl(client, &ctrl);
200 if (ctrl & (CTRL_STOP | CTRL_HOLD)) {
201 unsigned char new_ctrl = ctrl & ~(CTRL_STOP | CTRL_HOLD);
202
203 printk(KERN_WARNING "RTC: resetting control %02x -> %02x\n",
204 ctrl, new_ctrl);
205
206 if ((err = pcf8583_set_ctrl(client, &new_ctrl)) < 0)
207 return err;
208 }
209
210 if (pcf8583_get_datetime(client, tm) ||
211 pcf8583_read_mem(client, &mem))
212 return -EIO;
213
214 real_year = year[0];
215
216 /*
217 * The RTC year holds the LSB two bits of the current
218 * year, which should reflect the LSB two bits of the
219 * CMOS copy of the year. Any difference indicates
220 * that we have to correct the CMOS version.
221 */
222 year_offset = tm->tm_year - (real_year & 3);
223 if (year_offset < 0)
224 /*
225 * RTC year wrapped. Adjust it appropriately.
226 */
227 year_offset += 4;
228
229 tm->tm_year = real_year + year_offset + year[1] * 100;
230
231 return 0;
232}
233
234static int pcf8583_rtc_set_time(struct device *dev, struct rtc_time *tm)
235{
236 struct i2c_client *client = to_i2c_client(dev);
237 unsigned char year[2], chk;
238 struct rtc_mem cmos_year = { CMOS_YEAR, sizeof(year), year };
239 struct rtc_mem cmos_check = { CMOS_CHECKSUM, 1, &chk };
240 int ret;
241
242 /*
243 * The RTC's own 2-bit year must reflect the least
244 * significant two bits of the CMOS year.
245 */
246
247 ret = pcf8583_set_datetime(client, tm, 1);
248 if (ret)
249 return ret;
250
251 ret = pcf8583_read_mem(client, &cmos_check);
252 if (ret)
253 return ret;
254
255 ret = pcf8583_read_mem(client, &cmos_year);
256 if (ret)
257 return ret;
258
259 chk -= year[1] + year[0];
260
261 year[1] = tm->tm_year / 100;
262 year[0] = tm->tm_year % 100;
263
264 chk += year[1] + year[0];
265
266 ret = pcf8583_write_mem(client, &cmos_year);
267
268 if (ret)
269 return ret;
270
271 ret = pcf8583_write_mem(client, &cmos_check);
272
273 return ret;
274}
275
276static struct rtc_class_ops pcf8583_rtc_ops = {
277 .read_time = pcf8583_rtc_read_time,
278 .set_time = pcf8583_rtc_set_time,
279};
280
281static int pcf8583_probe(struct i2c_adapter *adap, int addr, int kind);
282
283static int pcf8583_attach(struct i2c_adapter *adap)
284{
285 return i2c_probe(adap, &addr_data, pcf8583_probe);
286}
287
288static int pcf8583_detach(struct i2c_client *client)
289{
290 int err;
291 struct pcf8583 *pcf = i2c_get_clientdata(client);
292 struct rtc_device *rtc = pcf->rtc;
293
294 if (rtc)
295 rtc_device_unregister(rtc);
296
297 if ((err = i2c_detach_client(client)))
298 return err;
299
300 kfree(pcf);
301 return 0;
302}
303
304static struct i2c_driver pcf8583_driver = {
305 .driver = {
306 .name = "pcf8583",
307 },
308 .id = I2C_DRIVERID_PCF8583,
309 .attach_adapter = pcf8583_attach,
310 .detach_client = pcf8583_detach,
311};
312
313static int pcf8583_probe(struct i2c_adapter *adap, int addr, int kind)
314{
315 struct pcf8583 *pcf;
316 struct i2c_client *client;
317 struct rtc_device *rtc;
318 unsigned char buf[1], ad[1] = { 0 };
319 int err;
320 struct i2c_msg msgs[2] = {
321 {
322 .addr = addr,
323 .flags = 0,
324 .len = 1,
325 .buf = ad,
326 }, {
327 .addr = addr,
328 .flags = I2C_M_RD,
329 .len = 1,
330 .buf = buf,
331 }
332 };
333
334 pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
335 if (!pcf)
336 return -ENOMEM;
337
338 client = &pcf->client;
339
340 client->addr = addr;
341 client->adapter = adap;
342 client->driver = &pcf8583_driver;
343
344 strlcpy(client->name, pcf8583_driver.driver.name, I2C_NAME_SIZE);
345
346 if (i2c_transfer(client->adapter, msgs, 2) != 2) {
347 err = -EIO;
348 goto exit_kfree;
349 }
350
351 err = i2c_attach_client(client);
352
353 if (err)
354 goto exit_kfree;
355
356 rtc = rtc_device_register(pcf8583_driver.driver.name, &client->dev,
357 &pcf8583_rtc_ops, THIS_MODULE);
358
359 if (IS_ERR(rtc)) {
360 err = PTR_ERR(rtc);
361 goto exit_detach;
362 }
363
364 pcf->rtc = rtc;
365 i2c_set_clientdata(client, pcf);
366 set_ctrl(client, buf[0]);
367
368 return 0;
369
370exit_detach:
371 i2c_detach_client(client);
372
373exit_kfree:
374 kfree(pcf);
375
376 return err;
377}
378
379static __init int pcf8583_init(void)
380{
381 return i2c_add_driver(&pcf8583_driver);
382}
383
384static __exit void pcf8583_exit(void)
385{
386 i2c_del_driver(&pcf8583_driver);
387}
388
389module_init(pcf8583_init);
390module_exit(pcf8583_exit);
391
392MODULE_AUTHOR("Russell King");
393MODULE_DESCRIPTION("PCF8583 I2C RTC driver");
394MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
new file mode 100644
index 000000000000..ee538632660b
--- /dev/null
+++ b/drivers/rtc/rtc-pl031.c
@@ -0,0 +1,233 @@
1/*
2 * drivers/rtc/rtc-pl031.c
3 *
4 * Real Time Clock interface for ARM AMBA PrimeCell 031 RTC
5 *
6 * Author: Deepak Saxena <dsaxena@plexity.net>
7 *
8 * Copyright 2006 (c) MontaVista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/platform_device.h>
17#include <linux/module.h>
18#include <linux/rtc.h>
19#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/interrupt.h>
22#include <linux/string.h>
23#include <linux/pm.h>
24
25#include <linux/amba/bus.h>
26
27#include <asm/io.h>
28#include <asm/bitops.h>
29#include <asm/hardware.h>
30#include <asm/irq.h>
31#include <asm/rtc.h>
32
33/*
34 * Register definitions
35 */
36#define RTC_DR 0x00 /* Data read register */
37#define RTC_MR 0x04 /* Match register */
38#define RTC_LR 0x08 /* Data load register */
39#define RTC_CR 0x0c /* Control register */
40#define RTC_IMSC 0x10 /* Interrupt mask and set register */
41#define RTC_RIS 0x14 /* Raw interrupt status register */
42#define RTC_MIS 0x18 /* Masked interrupt status register */
43#define RTC_ICR 0x1c /* Interrupt clear register */
44
45struct pl031_local {
46 struct rtc_device *rtc;
47 void __iomem *base;
48};
49
50static irqreturn_t pl031_interrupt(int irq, void *dev_id, struct pt_regs *regs)
51{
52 struct rtc_device *rtc = dev_id;
53
54 rtc_update_irq(&rtc->class_dev, 1, RTC_AF);
55
56 return IRQ_HANDLED;
57}
58
59static int pl031_open(struct device *dev)
60{
61 /*
62 * We request IRQ in pl031_probe, so nothing to do here...
63 */
64 return 0;
65}
66
67static void pl031_release(struct device *dev)
68{
69}
70
71static int pl031_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
72{
73 struct pl031_local *ldata = dev_get_drvdata(dev);
74
75 switch (cmd) {
76 case RTC_AIE_OFF:
77 __raw_writel(1, ldata->base + RTC_MIS);
78 return 0;
79 case RTC_AIE_ON:
80 __raw_writel(0, ldata->base + RTC_MIS);
81 return 0;
82 }
83
84 return -ENOIOCTLCMD;
85}
86
87static int pl031_read_time(struct device *dev, struct rtc_time *tm)
88{
89 struct pl031_local *ldata = dev_get_drvdata(dev);
90
91 rtc_time_to_tm(__raw_readl(ldata->base + RTC_DR), tm);
92
93 return 0;
94}
95
96static int pl031_set_time(struct device *dev, struct rtc_time *tm)
97{
98 unsigned long time;
99 struct pl031_local *ldata = dev_get_drvdata(dev);
100
101 rtc_tm_to_time(tm, &time);
102 __raw_writel(time, ldata->base + RTC_LR);
103
104 return 0;
105}
106
107static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
108{
109 struct pl031_local *ldata = dev_get_drvdata(dev);
110
111 rtc_time_to_tm(__raw_readl(ldata->base + RTC_MR), &alarm->time);
112 alarm->pending = __raw_readl(ldata->base + RTC_RIS);
113 alarm->enabled = __raw_readl(ldata->base + RTC_IMSC);
114
115 return 0;
116}
117
118static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
119{
120 struct pl031_local *ldata = dev_get_drvdata(dev);
121 unsigned long time;
122
123 rtc_tm_to_time(&alarm->time, &time);
124
125 __raw_writel(time, ldata->base + RTC_MR);
126 __raw_writel(!alarm->enabled, ldata->base + RTC_MIS);
127
128 return 0;
129}
130
131static struct rtc_class_ops pl031_ops = {
132 .open = pl031_open,
133 .release = pl031_release,
134 .ioctl = pl031_ioctl,
135 .read_time = pl031_read_time,
136 .set_time = pl031_set_time,
137 .read_alarm = pl031_read_alarm,
138 .set_alarm = pl031_set_alarm,
139};
140
141static int pl031_remove(struct amba_device *adev)
142{
143 struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
144
145 if (ldata) {
146 dev_set_drvdata(&adev->dev, NULL);
147 free_irq(adev->irq[0], ldata->rtc);
148 rtc_device_unregister(ldata->rtc);
149 iounmap(ldata->base);
150 kfree(ldata);
151 }
152
153 return 0;
154}
155
156static int pl031_probe(struct amba_device *adev, void *id)
157{
158 int ret;
159 struct pl031_local *ldata;
160
161
162 ldata = kmalloc(sizeof(struct pl031_local), GFP_KERNEL);
163 if (!ldata) {
164 ret = -ENOMEM;
165 goto out;
166 }
167 dev_set_drvdata(&adev->dev, ldata);
168
169 ldata->base = ioremap(adev->res.start,
170 adev->res.end - adev->res.start + 1);
171 if (!ldata->base) {
172 ret = -ENOMEM;
173 goto out_no_remap;
174 }
175
176 if (request_irq(adev->irq[0], pl031_interrupt, SA_INTERRUPT,
177 "rtc-pl031", ldata->rtc)) {
178 ret = -EIO;
179 goto out_no_irq;
180 }
181
182 ldata->rtc = rtc_device_register("pl031", &adev->dev, &pl031_ops,
183 THIS_MODULE);
184 if (IS_ERR(ldata->rtc)) {
185 ret = PTR_ERR(ldata->rtc);
186 goto out_no_rtc;
187 }
188
189 return 0;
190
191out_no_rtc:
192 free_irq(adev->irq[0], ldata->rtc);
193out_no_irq:
194 iounmap(ldata->base);
195out_no_remap:
196 dev_set_drvdata(&adev->dev, NULL);
197 kfree(ldata);
198out:
199 return ret;
200}
201
202static struct amba_id pl031_ids[] __initdata = {
203 {
204 .id = 0x00041031,
205 .mask = 0x000fffff, },
206 {0, 0},
207};
208
209static struct amba_driver pl031_driver = {
210 .drv = {
211 .name = "rtc-pl031",
212 },
213 .id_table = pl031_ids,
214 .probe = pl031_probe,
215 .remove = pl031_remove,
216};
217
218static int __init pl031_init(void)
219{
220 return amba_driver_register(&pl031_driver);
221}
222
223static void __exit pl031_exit(void)
224{
225 amba_driver_unregister(&pl031_driver);
226}
227
228module_init(pl031_init);
229module_exit(pl031_exit);
230
231MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net");
232MODULE_DESCRIPTION("ARM AMBA PL031 RTC Driver");
233MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index a997529f8926..ab486fbc828d 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -229,8 +229,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
229 spin_unlock_irq(&sa1100_rtc_lock); 229 spin_unlock_irq(&sa1100_rtc_lock);
230 return 0; 230 return 0;
231 case RTC_PIE_ON: 231 case RTC_PIE_ON:
232 if ((rtc_freq > 64) && !capable(CAP_SYS_RESOURCE))
233 return -EACCES;
234 spin_lock_irq(&sa1100_rtc_lock); 232 spin_lock_irq(&sa1100_rtc_lock);
235 OSMR1 = TIMER_FREQ/rtc_freq + OSCR; 233 OSMR1 = TIMER_FREQ/rtc_freq + OSCR;
236 OIER |= OIER_E1; 234 OIER |= OIER_E1;
@@ -242,8 +240,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
242 case RTC_IRQP_SET: 240 case RTC_IRQP_SET:
243 if (arg < 1 || arg > TIMER_FREQ) 241 if (arg < 1 || arg > TIMER_FREQ)
244 return -EINVAL; 242 return -EINVAL;
245 if ((arg > 64) && (!capable(CAP_SYS_RESOURCE)))
246 return -EACCES;
247 rtc_freq = arg; 243 rtc_freq = arg;
248 return 0; 244 return 0;
249 } 245 }
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
new file mode 100644
index 000000000000..a40f400acff6
--- /dev/null
+++ b/drivers/rtc/rtc-v3020.c
@@ -0,0 +1,264 @@
1/* drivers/rtc/rtc-v3020.c
2 *
3 * Copyright (C) 2006 8D Technologies inc.
4 * Copyright (C) 2004 Compulab Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Driver for the V3020 RTC
11 *
12 * Changelog:
13 *
14 * 10-May-2006: Raphael Assenat <raph@8d.com>
15 * - Converted to platform driver
16 * - Use the generic rtc class
17 *
18 * ??-???-2004: Someone at Compulab
19 * - Initial driver creation.
20 *
21 */
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/rtc.h>
26#include <linux/types.h>
27#include <linux/bcd.h>
28#include <linux/rtc-v3020.h>
29
30#include <asm/io.h>
31
32#undef DEBUG
33
34struct v3020 {
35 void __iomem *ioaddress;
36 int leftshift;
37 struct rtc_device *rtc;
38};
39
40static void v3020_set_reg(struct v3020 *chip, unsigned char address,
41 unsigned char data)
42{
43 int i;
44 unsigned char tmp;
45
46 tmp = address;
47 for (i = 0; i < 4; i++) {
48 writel((tmp & 1) << chip->leftshift, chip->ioaddress);
49 tmp >>= 1;
50 }
51
52 /* Commands dont have data */
53 if (!V3020_IS_COMMAND(address)) {
54 for (i = 0; i < 8; i++) {
55 writel((data & 1) << chip->leftshift, chip->ioaddress);
56 data >>= 1;
57 }
58 }
59}
60
61static unsigned char v3020_get_reg(struct v3020 *chip, unsigned char address)
62{
63 unsigned int data=0;
64 int i;
65
66 for (i = 0; i < 4; i++) {
67 writel((address & 1) << chip->leftshift, chip->ioaddress);
68 address >>= 1;
69 }
70
71 for (i = 0; i < 8; i++) {
72 data >>= 1;
73 if (readl(chip->ioaddress) & (1 << chip->leftshift))
74 data |= 0x80;
75 }
76
77 return data;
78}
79
80static int v3020_read_time(struct device *dev, struct rtc_time *dt)
81{
82 struct v3020 *chip = dev_get_drvdata(dev);
83 int tmp;
84
85 /* Copy the current time to ram... */
86 v3020_set_reg(chip, V3020_CMD_CLOCK2RAM, 0);
87
88 /* ...and then read constant values. */
89 tmp = v3020_get_reg(chip, V3020_SECONDS);
90 dt->tm_sec = BCD2BIN(tmp);
91 tmp = v3020_get_reg(chip, V3020_MINUTES);
92 dt->tm_min = BCD2BIN(tmp);
93 tmp = v3020_get_reg(chip, V3020_HOURS);
94 dt->tm_hour = BCD2BIN(tmp);
95 tmp = v3020_get_reg(chip, V3020_MONTH_DAY);
96 dt->tm_mday = BCD2BIN(tmp);
97 tmp = v3020_get_reg(chip, V3020_MONTH);
98 dt->tm_mon = BCD2BIN(tmp);
99 tmp = v3020_get_reg(chip, V3020_WEEK_DAY);
100 dt->tm_wday = BCD2BIN(tmp);
101 tmp = v3020_get_reg(chip, V3020_YEAR);
102 dt->tm_year = BCD2BIN(tmp)+100;
103
104#ifdef DEBUG
105 printk("\n%s : Read RTC values\n",__FUNCTION__);
106 printk("tm_hour: %i\n",dt->tm_hour);
107 printk("tm_min : %i\n",dt->tm_min);
108 printk("tm_sec : %i\n",dt->tm_sec);
109 printk("tm_year: %i\n",dt->tm_year);
110 printk("tm_mon : %i\n",dt->tm_mon);
111 printk("tm_mday: %i\n",dt->tm_mday);
112 printk("tm_wday: %i\n",dt->tm_wday);
113#endif
114
115 return 0;
116}
117
118
119static int v3020_set_time(struct device *dev, struct rtc_time *dt)
120{
121 struct v3020 *chip = dev_get_drvdata(dev);
122
123#ifdef DEBUG
124 printk("\n%s : Setting RTC values\n",__FUNCTION__);
125 printk("tm_sec : %i\n",dt->tm_sec);
126 printk("tm_min : %i\n",dt->tm_min);
127 printk("tm_hour: %i\n",dt->tm_hour);
128 printk("tm_mday: %i\n",dt->tm_mday);
129 printk("tm_wday: %i\n",dt->tm_wday);
130 printk("tm_year: %i\n",dt->tm_year);
131#endif
132
133 /* Write all the values to ram... */
134 v3020_set_reg(chip, V3020_SECONDS, BIN2BCD(dt->tm_sec));
135 v3020_set_reg(chip, V3020_MINUTES, BIN2BCD(dt->tm_min));
136 v3020_set_reg(chip, V3020_HOURS, BIN2BCD(dt->tm_hour));
137 v3020_set_reg(chip, V3020_MONTH_DAY, BIN2BCD(dt->tm_mday));
138 v3020_set_reg(chip, V3020_MONTH, BIN2BCD(dt->tm_mon));
139 v3020_set_reg(chip, V3020_WEEK_DAY, BIN2BCD(dt->tm_wday));
140 v3020_set_reg(chip, V3020_YEAR, BIN2BCD(dt->tm_year % 100));
141
142 /* ...and set the clock. */
143 v3020_set_reg(chip, V3020_CMD_RAM2CLOCK, 0);
144
145 /* Compulab used this delay here. I dont know why,
146 * the datasheet does not specify a delay. */
147 /*mdelay(5);*/
148
149 return 0;
150}
151
152static struct rtc_class_ops v3020_rtc_ops = {
153 .read_time = v3020_read_time,
154 .set_time = v3020_set_time,
155};
156
157static int rtc_probe(struct platform_device *pdev)
158{
159 struct v3020_platform_data *pdata = pdev->dev.platform_data;
160 struct v3020 *chip;
161 struct rtc_device *rtc;
162 int retval = -EBUSY;
163 int i;
164 int temp;
165
166 if (pdev->num_resources != 1)
167 return -EBUSY;
168
169 if (pdev->resource[0].flags != IORESOURCE_MEM)
170 return -EBUSY;
171
172 if (pdev == NULL)
173 return -EBUSY;
174
175 chip = kzalloc(sizeof *chip, GFP_KERNEL);
176 if (!chip)
177 return -ENOMEM;
178
179 chip->leftshift = pdata->leftshift;
180 chip->ioaddress = ioremap(pdev->resource[0].start, 1);
181 if (chip->ioaddress == NULL)
182 goto err_chip;
183
184 /* Make sure the v3020 expects a communication cycle
185 * by reading 8 times */
186 for (i = 0; i < 8; i++)
187 temp = readl(chip->ioaddress);
188
189 /* Test chip by doing a write/read sequence
190 * to the chip ram */
191 v3020_set_reg(chip, V3020_SECONDS, 0x33);
192 if(v3020_get_reg(chip, V3020_SECONDS) != 0x33) {
193 retval = -ENODEV;
194 goto err_io;
195 }
196
197 /* Make sure frequency measurment mode, test modes, and lock
198 * are all disabled */
199 v3020_set_reg(chip, V3020_STATUS_0, 0x0);
200
201 dev_info(&pdev->dev, "Chip available at physical address 0x%p,"
202 "data connected to D%d\n",
203 (void*)pdev->resource[0].start,
204 chip->leftshift);
205
206 platform_set_drvdata(pdev, chip);
207
208 rtc = rtc_device_register("v3020",
209 &pdev->dev, &v3020_rtc_ops, THIS_MODULE);
210 if (IS_ERR(rtc)) {
211 retval = PTR_ERR(rtc);
212 goto err_io;
213 }
214 chip->rtc = rtc;
215
216 return 0;
217
218err_io:
219 iounmap(chip->ioaddress);
220err_chip:
221 kfree(chip);
222
223 return retval;
224}
225
226static int rtc_remove(struct platform_device *dev)
227{
228 struct v3020 *chip = platform_get_drvdata(dev);
229 struct rtc_device *rtc = chip->rtc;
230
231 if (rtc)
232 rtc_device_unregister(rtc);
233
234 iounmap(chip->ioaddress);
235 kfree(chip);
236
237 return 0;
238}
239
240static struct platform_driver rtc_device_driver = {
241 .probe = rtc_probe,
242 .remove = rtc_remove,
243 .driver = {
244 .name = "v3020",
245 .owner = THIS_MODULE,
246 },
247};
248
249static __init int v3020_init(void)
250{
251 return platform_driver_register(&rtc_device_driver);
252}
253
254static __exit void v3020_exit(void)
255{
256 platform_driver_unregister(&rtc_device_driver);
257}
258
259module_init(v3020_init);
260module_exit(v3020_exit);
261
262MODULE_DESCRIPTION("V3020 RTC");
263MODULE_AUTHOR("Raphael Assenat");
264MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 277596c302e3..33e029207e26 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -81,7 +81,6 @@ MODULE_LICENSE("GPL");
81 81
82#define RTC_FREQUENCY 32768 82#define RTC_FREQUENCY 32768
83#define MAX_PERIODIC_RATE 6553 83#define MAX_PERIODIC_RATE 6553
84#define MAX_USER_PERIODIC_RATE 64
85 84
86static void __iomem *rtc1_base; 85static void __iomem *rtc1_base;
87static void __iomem *rtc2_base; 86static void __iomem *rtc2_base;
@@ -240,9 +239,6 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
240 if (arg > MAX_PERIODIC_RATE) 239 if (arg > MAX_PERIODIC_RATE)
241 return -EINVAL; 240 return -EINVAL;
242 241
243 if (arg > MAX_USER_PERIODIC_RATE && capable(CAP_SYS_RESOURCE) == 0)
244 return -EACCES;
245
246 periodic_frequency = arg; 242 periodic_frequency = arg;
247 243
248 count = RTC_FREQUENCY; 244 count = RTC_FREQUENCY;
@@ -263,10 +259,6 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
263 /* Doesn't support before 1900 */ 259 /* Doesn't support before 1900 */
264 if (arg < 1900) 260 if (arg < 1900)
265 return -EINVAL; 261 return -EINVAL;
266
267 if (capable(CAP_SYS_TIME) == 0)
268 return -EACCES;
269
270 epoch = arg; 262 epoch = arg;
271 break; 263 break;
272 default: 264 default:
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 56fa69168898..a4c53c172db6 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -13,6 +13,7 @@
13#include <linux/cpumask.h> 13#include <linux/cpumask.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/reboot.h>
16#include <asm/atomic.h> 17#include <asm/atomic.h>
17#include <asm/ptrace.h> 18#include <asm/ptrace.h>
18#include <asm/sigp.h> 19#include <asm/sigp.h>
@@ -66,8 +67,6 @@ do_machine_quiesce(void)
66} 67}
67#endif 68#endif
68 69
69extern void ctrl_alt_del(void);
70
71/* Handler for quiesce event. Start shutdown procedure. */ 70/* Handler for quiesce event. Start shutdown procedure. */
72static void 71static void
73sclp_quiesce_handler(struct evbuf_header *evbuf) 72sclp_quiesce_handler(struct evbuf_header *evbuf)
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 5ae684c011f8..31b8a5f6116f 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -71,7 +71,6 @@ flash_mmap(struct file *file, struct vm_area_struct *vma)
71 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) 71 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size)
72 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); 72 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT));
73 73
74 vma->vm_flags |= (VM_SHM | VM_LOCKED);
75 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 74 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
76 75
77 if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot)) 76 if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot))
diff --git a/drivers/sbus/char/vfc_dev.c b/drivers/sbus/char/vfc_dev.c
index dfdd6be551f3..ddcd330b9e89 100644
--- a/drivers/sbus/char/vfc_dev.c
+++ b/drivers/sbus/char/vfc_dev.c
@@ -623,7 +623,7 @@ static int vfc_mmap(struct file *file, struct vm_area_struct *vma)
623 map_size = sizeof(struct vfc_regs); 623 map_size = sizeof(struct vfc_regs);
624 624
625 vma->vm_flags |= 625 vma->vm_flags |=
626 (VM_SHM | VM_LOCKED | VM_IO | VM_MAYREAD | VM_MAYWRITE | VM_MAYSHARE); 626 (VM_MAYREAD | VM_MAYWRITE | VM_MAYSHARE);
627 map_offset = (unsigned int) (long)dev->phys_regs; 627 map_offset = (unsigned int) (long)dev->phys_regs;
628 ret = io_remap_pfn_range(vma, vma->vm_start, 628 ret = io_remap_pfn_range(vma, vma->vm_start,
629 MK_IOSPACE_PFN(dev->which_io, 629 MK_IOSPACE_PFN(dev->which_io,
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 2a419634b256..5ee47555a8af 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -17316,7 +17316,7 @@ AdvWaitEEPCmd(AdvPortAddr iop_base)
17316/* 17316/*
17317 * Write the EEPROM from 'cfg_buf'. 17317 * Write the EEPROM from 'cfg_buf'.
17318 */ 17318 */
17319void 17319void __init
17320AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) 17320AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
17321{ 17321{
17322 ushort *wbuf; 17322 ushort *wbuf;
@@ -17383,7 +17383,7 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
17383/* 17383/*
17384 * Write the EEPROM from 'cfg_buf'. 17384 * Write the EEPROM from 'cfg_buf'.
17385 */ 17385 */
17386void 17386void __init
17387AdvSet38C0800EEPConfig(AdvPortAddr iop_base, 17387AdvSet38C0800EEPConfig(AdvPortAddr iop_base,
17388 ADVEEP_38C0800_CONFIG *cfg_buf) 17388 ADVEEP_38C0800_CONFIG *cfg_buf)
17389{ 17389{
@@ -17451,7 +17451,7 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base,
17451/* 17451/*
17452 * Write the EEPROM from 'cfg_buf'. 17452 * Write the EEPROM from 'cfg_buf'.
17453 */ 17453 */
17454void 17454void __init
17455AdvSet38C1600EEPConfig(AdvPortAddr iop_base, 17455AdvSet38C1600EEPConfig(AdvPortAddr iop_base,
17456 ADVEEP_38C1600_CONFIG *cfg_buf) 17456 ADVEEP_38C1600_CONFIG *cfg_buf)
17457{ 17457{
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index e31fadd61904..118206d68c6c 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -43,9 +43,6 @@
43 43
44/* #define DEBUG_MAC_ESP */ 44/* #define DEBUG_MAC_ESP */
45 45
46#define mac_turnon_irq(x) mac_enable_irq(x)
47#define mac_turnoff_irq(x) mac_disable_irq(x)
48
49extern void esp_handle(struct NCR_ESP *esp); 46extern void esp_handle(struct NCR_ESP *esp);
50extern void mac_esp_intr(int irq, void *dev_id, struct pt_regs *pregs); 47extern void mac_esp_intr(int irq, void *dev_id, struct pt_regs *pregs);
51 48
@@ -639,13 +636,13 @@ static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length)
639 636
640static void dma_ints_off(struct NCR_ESP * esp) 637static void dma_ints_off(struct NCR_ESP * esp)
641{ 638{
642 mac_turnoff_irq(esp->irq); 639 disable_irq(esp->irq);
643} 640}
644 641
645 642
646static void dma_ints_on(struct NCR_ESP * esp) 643static void dma_ints_on(struct NCR_ESP * esp)
647{ 644{
648 mac_turnon_irq(esp->irq); 645 enable_irq(esp->irq);
649} 646}
650 647
651/* 648/*
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 777f9bcd1179..a942a21dd87e 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -65,9 +65,6 @@
65#define RESET_BOOT 65#define RESET_BOOT
66#define DRIVER_SETUP 66#define DRIVER_SETUP
67 67
68#define ENABLE_IRQ() mac_enable_irq( IRQ_MAC_SCSI );
69#define DISABLE_IRQ() mac_disable_irq( IRQ_MAC_SCSI );
70
71extern void via_scsi_clear(void); 68extern void via_scsi_clear(void);
72 69
73#ifdef RESET_BOOT 70#ifdef RESET_BOOT
@@ -351,7 +348,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
351 printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." ); 348 printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." );
352 349
353 /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ 350 /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
354 mac_disable_irq(IRQ_MAC_SCSI); 351 disable_irq(IRQ_MAC_SCSI);
355 352
356 /* get in phase */ 353 /* get in phase */
357 NCR5380_write( TARGET_COMMAND_REG, 354 NCR5380_write( TARGET_COMMAND_REG,
@@ -369,7 +366,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
369 barrier(); 366 barrier();
370 367
371 /* switch on SCSI IRQ again */ 368 /* switch on SCSI IRQ again */
372 mac_enable_irq(IRQ_MAC_SCSI); 369 enable_irq(IRQ_MAC_SCSI);
373 370
374 printk(KERN_INFO " done\n" ); 371 printk(KERN_INFO " done\n" );
375} 372}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index bec1424eda85..b7caf60638e8 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -714,7 +714,7 @@ megaraid_io_detach(adapter_t *adapter)
714 * . Allocate memory required for all the commands 714 * . Allocate memory required for all the commands
715 * . Use internal library of FW routines, build up complete soft state 715 * . Use internal library of FW routines, build up complete soft state
716 */ 716 */
717static int __init 717static int __devinit
718megaraid_init_mbox(adapter_t *adapter) 718megaraid_init_mbox(adapter_t *adapter)
719{ 719{
720 struct pci_dev *pdev; 720 struct pci_dev *pdev;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index cc990bed9683..2e2c1eb15636 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -332,11 +332,11 @@ static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
332 struct scatterlist *sg = sp->SCp.buffer; 332 struct scatterlist *sg = sp->SCp.buffer;
333 333
334 while (sz >= 0) { 334 while (sz >= 0) {
335 sg[sz].dvma_address = dvma_map((unsigned long)page_address(sg[sz].page) + 335 sg[sz].dma_address = dvma_map((unsigned long)page_address(sg[sz].page) +
336 sg[sz].offset, sg[sz].length); 336 sg[sz].offset, sg[sz].length);
337 sz--; 337 sz--;
338 } 338 }
339 sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dvma_address); 339 sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address);
340} 340}
341 341
342static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 342static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
@@ -350,14 +350,14 @@ static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
350 struct scatterlist *sg = (struct scatterlist *)sp->buffer; 350 struct scatterlist *sg = (struct scatterlist *)sp->buffer;
351 351
352 while(sz >= 0) { 352 while(sz >= 0) {
353 dvma_unmap((char *)sg[sz].dvma_address); 353 dvma_unmap((char *)sg[sz].dma_address);
354 sz--; 354 sz--;
355 } 355 }
356} 356}
357 357
358static void dma_advance_sg (Scsi_Cmnd *sp) 358static void dma_advance_sg (Scsi_Cmnd *sp)
359{ 359{
360 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dvma_address); 360 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address);
361} 361}
362 362
363static int sun3x_esp_release(struct Scsi_Host *instance) 363static int sun3x_esp_release(struct Scsi_Host *instance)
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index e55f0ee7e7e4..574955b78a24 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1391,7 +1391,7 @@ static int wd7000_proc_info(struct Scsi_Host *host, char *buffer, char **start,
1391 * 1391 *
1392 */ 1392 */
1393 1393
1394static int wd7000_detect(struct scsi_host_template *tpnt) 1394static __init int wd7000_detect(struct scsi_host_template *tpnt)
1395{ 1395{
1396 short present = 0, biosaddr_ptr, sig_ptr, i, pass; 1396 short present = 0, biosaddr_ptr, sig_ptr, i, pass;
1397 short biosptr[NUM_CONFIGS]; 1397 short biosptr[NUM_CONFIGS];
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 5a2840aeb547..168ede7902bd 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -86,6 +86,11 @@ config FB_FIRMWARE_EDID
86 combination with certain motherboards and monitors are known to 86 combination with certain motherboards and monitors are known to
87 suffer from this problem. 87 suffer from this problem.
88 88
89config FB_BACKLIGHT
90 bool
91 depends on FB
92 default n
93
89config FB_MODE_HELPERS 94config FB_MODE_HELPERS
90 bool "Enable Video Mode Handling Helpers" 95 bool "Enable Video Mode Handling Helpers"
91 depends on FB 96 depends on FB
@@ -717,6 +722,16 @@ config FB_NVIDIA_I2C
717 independently validate video mode parameters, you should say Y 722 independently validate video mode parameters, you should say Y
718 here. 723 here.
719 724
725config FB_NVIDIA_BACKLIGHT
726 bool "Support for backlight control"
727 depends on FB_NVIDIA && PPC_PMAC
728 select FB_BACKLIGHT
729 select BACKLIGHT_LCD_SUPPORT
730 select BACKLIGHT_CLASS_DEVICE
731 default y
732 help
733 Say Y here if you want to control the backlight of your display.
734
720config FB_RIVA 735config FB_RIVA
721 tristate "nVidia Riva support" 736 tristate "nVidia Riva support"
722 depends on FB && PCI 737 depends on FB && PCI
@@ -755,6 +770,16 @@ config FB_RIVA_DEBUG
755 of debugging informations to provide to the maintainer when 770 of debugging informations to provide to the maintainer when
756 something goes wrong. 771 something goes wrong.
757 772
773config FB_RIVA_BACKLIGHT
774 bool "Support for backlight control"
775 depends on FB_RIVA && PPC_PMAC
776 select FB_BACKLIGHT
777 select BACKLIGHT_LCD_SUPPORT
778 select BACKLIGHT_CLASS_DEVICE
779 default y
780 help
781 Say Y here if you want to control the backlight of your display.
782
758config FB_I810 783config FB_I810
759 tristate "Intel 810/815 support (EXPERIMENTAL)" 784 tristate "Intel 810/815 support (EXPERIMENTAL)"
760 depends on FB && EXPERIMENTAL && PCI && X86_32 785 depends on FB && EXPERIMENTAL && PCI && X86_32
@@ -993,6 +1018,7 @@ config FB_RADEON
993 1018
994 There is a product page at 1019 There is a product page at
995 http://apps.ati.com/ATIcompare/ 1020 http://apps.ati.com/ATIcompare/
1021
996config FB_RADEON_I2C 1022config FB_RADEON_I2C
997 bool "DDC/I2C for ATI Radeon support" 1023 bool "DDC/I2C for ATI Radeon support"
998 depends on FB_RADEON 1024 depends on FB_RADEON
@@ -1000,6 +1026,16 @@ config FB_RADEON_I2C
1000 help 1026 help
1001 Say Y here if you want DDC/I2C support for your Radeon board. 1027 Say Y here if you want DDC/I2C support for your Radeon board.
1002 1028
1029config FB_RADEON_BACKLIGHT
1030 bool "Support for backlight control"
1031 depends on FB_RADEON && PPC_PMAC
1032 select FB_BACKLIGHT
1033 select BACKLIGHT_LCD_SUPPORT
1034 select BACKLIGHT_CLASS_DEVICE
1035 default y
1036 help
1037 Say Y here if you want to control the backlight of your display.
1038
1003config FB_RADEON_DEBUG 1039config FB_RADEON_DEBUG
1004 bool "Lots of debug output from Radeon driver" 1040 bool "Lots of debug output from Radeon driver"
1005 depends on FB_RADEON 1041 depends on FB_RADEON
@@ -1024,6 +1060,16 @@ config FB_ATY128
1024 To compile this driver as a module, choose M here: the 1060 To compile this driver as a module, choose M here: the
1025 module will be called aty128fb. 1061 module will be called aty128fb.
1026 1062
1063config FB_ATY128_BACKLIGHT
1064 bool "Support for backlight control"
1065 depends on FB_ATY128 && PPC_PMAC
1066 select FB_BACKLIGHT
1067 select BACKLIGHT_LCD_SUPPORT
1068 select BACKLIGHT_CLASS_DEVICE
1069 default y
1070 help
1071 Say Y here if you want to control the backlight of your display.
1072
1027config FB_ATY 1073config FB_ATY
1028 tristate "ATI Mach64 display support" if PCI || ATARI 1074 tristate "ATI Mach64 display support" if PCI || ATARI
1029 depends on FB && !SPARC32 1075 depends on FB && !SPARC32
@@ -1066,6 +1112,16 @@ config FB_ATY_GX
1066 is at 1112 is at
1067 <http://support.ati.com/products/pc/mach64/graphics_xpression.html>. 1113 <http://support.ati.com/products/pc/mach64/graphics_xpression.html>.
1068 1114
1115config FB_ATY_BACKLIGHT
1116 bool "Support for backlight control"
1117 depends on FB_ATY && PPC_PMAC
1118 select FB_BACKLIGHT
1119 select BACKLIGHT_LCD_SUPPORT
1120 select BACKLIGHT_CLASS_DEVICE
1121 default y
1122 help
1123 Say Y here if you want to control the backlight of your display.
1124
1069config FB_S3TRIO 1125config FB_S3TRIO
1070 bool "S3 Trio display support" 1126 bool "S3 Trio display support"
1071 depends on (FB = y) && PPC && BROKEN 1127 depends on (FB = y) && PPC && BROKEN
diff --git a/drivers/video/aty/Makefile b/drivers/video/aty/Makefile
index 18521397a6e3..a6cc0e9ec790 100644
--- a/drivers/video/aty/Makefile
+++ b/drivers/video/aty/Makefile
@@ -10,5 +10,6 @@ atyfb-objs := $(atyfb-y)
10 10
11radeonfb-y := radeon_base.o radeon_pm.o radeon_monitor.o radeon_accel.o 11radeonfb-y := radeon_base.o radeon_pm.o radeon_monitor.o radeon_accel.o
12radeonfb-$(CONFIG_FB_RADEON_I2C) += radeon_i2c.o 12radeonfb-$(CONFIG_FB_RADEON_I2C) += radeon_i2c.o
13radeonfb-$(CONFIG_FB_RADEON_BACKLIGHT) += radeon_backlight.o
13radeonfb-objs := $(radeonfb-y) 14radeonfb-objs := $(radeonfb-y)
14 15
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index f7bbff4ddc6a..db878fd55fb2 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -64,6 +64,7 @@
64#include <linux/pci.h> 64#include <linux/pci.h>
65#include <linux/ioport.h> 65#include <linux/ioport.h>
66#include <linux/console.h> 66#include <linux/console.h>
67#include <linux/backlight.h>
67#include <asm/io.h> 68#include <asm/io.h>
68 69
69#ifdef CONFIG_PPC_PMAC 70#ifdef CONFIG_PPC_PMAC
@@ -480,16 +481,6 @@ static struct fb_ops aty128fb_ops = {
480 .fb_imageblit = cfb_imageblit, 481 .fb_imageblit = cfb_imageblit,
481}; 482};
482 483
483#ifdef CONFIG_PMAC_BACKLIGHT
484static int aty128_set_backlight_enable(int on, int level, void* data);
485static int aty128_set_backlight_level(int level, void* data);
486
487static struct backlight_controller aty128_backlight_controller = {
488 aty128_set_backlight_enable,
489 aty128_set_backlight_level
490};
491#endif /* CONFIG_PMAC_BACKLIGHT */
492
493 /* 484 /*
494 * Functions to read from/write to the mmio registers 485 * Functions to read from/write to the mmio registers
495 * - endian conversions may possibly be avoided by 486 * - endian conversions may possibly be avoided by
@@ -1258,19 +1249,35 @@ static void aty128_set_crt_enable(struct aty128fb_par *par, int on)
1258static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) 1249static void aty128_set_lcd_enable(struct aty128fb_par *par, int on)
1259{ 1250{
1260 u32 reg; 1251 u32 reg;
1252#ifdef CONFIG_FB_ATY128_BACKLIGHT
1253 struct fb_info *info = pci_get_drvdata(par->pdev);
1254#endif
1261 1255
1262 if (on) { 1256 if (on) {
1263 reg = aty_ld_le32(LVDS_GEN_CNTL); 1257 reg = aty_ld_le32(LVDS_GEN_CNTL);
1264 reg |= LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION; 1258 reg |= LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION;
1265 reg &= ~LVDS_DISPLAY_DIS; 1259 reg &= ~LVDS_DISPLAY_DIS;
1266 aty_st_le32(LVDS_GEN_CNTL, reg); 1260 aty_st_le32(LVDS_GEN_CNTL, reg);
1267#ifdef CONFIG_PMAC_BACKLIGHT 1261#ifdef CONFIG_FB_ATY128_BACKLIGHT
1268 aty128_set_backlight_enable(get_backlight_enable(), 1262 mutex_lock(&info->bl_mutex);
1269 get_backlight_level(), par); 1263 if (info->bl_dev) {
1264 down(&info->bl_dev->sem);
1265 info->bl_dev->props->update_status(info->bl_dev);
1266 up(&info->bl_dev->sem);
1267 }
1268 mutex_unlock(&info->bl_mutex);
1270#endif 1269#endif
1271 } else { 1270 } else {
1272#ifdef CONFIG_PMAC_BACKLIGHT 1271#ifdef CONFIG_FB_ATY128_BACKLIGHT
1273 aty128_set_backlight_enable(0, 0, par); 1272 mutex_lock(&info->bl_mutex);
1273 if (info->bl_dev) {
1274 down(&info->bl_dev->sem);
1275 info->bl_dev->props->brightness = 0;
1276 info->bl_dev->props->power = FB_BLANK_POWERDOWN;
1277 info->bl_dev->props->update_status(info->bl_dev);
1278 up(&info->bl_dev->sem);
1279 }
1280 mutex_unlock(&info->bl_mutex);
1274#endif 1281#endif
1275 reg = aty_ld_le32(LVDS_GEN_CNTL); 1282 reg = aty_ld_le32(LVDS_GEN_CNTL);
1276 reg |= LVDS_DISPLAY_DIS; 1283 reg |= LVDS_DISPLAY_DIS;
@@ -1691,6 +1698,184 @@ static int __init aty128fb_setup(char *options)
1691} 1698}
1692#endif /* MODULE */ 1699#endif /* MODULE */
1693 1700
1701/* Backlight */
1702#ifdef CONFIG_FB_ATY128_BACKLIGHT
1703#define MAX_LEVEL 0xFF
1704
1705static struct backlight_properties aty128_bl_data;
1706
1707static int aty128_bl_get_level_brightness(struct aty128fb_par *par,
1708 int level)
1709{
1710 struct fb_info *info = pci_get_drvdata(par->pdev);
1711 int atylevel;
1712
1713 /* Get and convert the value */
1714 mutex_lock(&info->bl_mutex);
1715 atylevel = MAX_LEVEL -
1716 (info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL);
1717 mutex_unlock(&info->bl_mutex);
1718
1719 if (atylevel < 0)
1720 atylevel = 0;
1721 else if (atylevel > MAX_LEVEL)
1722 atylevel = MAX_LEVEL;
1723
1724 return atylevel;
1725}
1726
1727/* We turn off the LCD completely instead of just dimming the backlight.
1728 * This provides greater power saving and the display is useless without
1729 * backlight anyway
1730 */
1731#define BACKLIGHT_LVDS_OFF
1732/* That one prevents proper CRT output with LCD off */
1733#undef BACKLIGHT_DAC_OFF
1734
1735static int aty128_bl_update_status(struct backlight_device *bd)
1736{
1737 struct aty128fb_par *par = class_get_devdata(&bd->class_dev);
1738 unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL);
1739 int level;
1740
1741 if (bd->props->power != FB_BLANK_UNBLANK ||
1742 bd->props->fb_blank != FB_BLANK_UNBLANK ||
1743 !par->lcd_on)
1744 level = 0;
1745 else
1746 level = bd->props->brightness;
1747
1748 reg |= LVDS_BL_MOD_EN | LVDS_BLON;
1749 if (level > 0) {
1750 reg |= LVDS_DIGION;
1751 if (!(reg & LVDS_ON)) {
1752 reg &= ~LVDS_BLON;
1753 aty_st_le32(LVDS_GEN_CNTL, reg);
1754 aty_ld_le32(LVDS_GEN_CNTL);
1755 mdelay(10);
1756 reg |= LVDS_BLON;
1757 aty_st_le32(LVDS_GEN_CNTL, reg);
1758 }
1759 reg &= ~LVDS_BL_MOD_LEVEL_MASK;
1760 reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT);
1761#ifdef BACKLIGHT_LVDS_OFF
1762 reg |= LVDS_ON | LVDS_EN;
1763 reg &= ~LVDS_DISPLAY_DIS;
1764#endif
1765 aty_st_le32(LVDS_GEN_CNTL, reg);
1766#ifdef BACKLIGHT_DAC_OFF
1767 aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & (~DAC_PDWN));
1768#endif
1769 } else {
1770 reg &= ~LVDS_BL_MOD_LEVEL_MASK;
1771 reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT);
1772#ifdef BACKLIGHT_LVDS_OFF
1773 reg |= LVDS_DISPLAY_DIS;
1774 aty_st_le32(LVDS_GEN_CNTL, reg);
1775 aty_ld_le32(LVDS_GEN_CNTL);
1776 udelay(10);
1777 reg &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION);
1778#endif
1779 aty_st_le32(LVDS_GEN_CNTL, reg);
1780#ifdef BACKLIGHT_DAC_OFF
1781 aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PDWN);
1782#endif
1783 }
1784
1785 return 0;
1786}
1787
1788static int aty128_bl_get_brightness(struct backlight_device *bd)
1789{
1790 return bd->props->brightness;
1791}
1792
1793static struct backlight_properties aty128_bl_data = {
1794 .owner = THIS_MODULE,
1795 .get_brightness = aty128_bl_get_brightness,
1796 .update_status = aty128_bl_update_status,
1797 .max_brightness = (FB_BACKLIGHT_LEVELS - 1),
1798};
1799
1800static void aty128_bl_init(struct aty128fb_par *par)
1801{
1802 struct fb_info *info = pci_get_drvdata(par->pdev);
1803 struct backlight_device *bd;
1804 char name[12];
1805
1806 /* Could be extended to Rage128Pro LVDS output too */
1807 if (par->chip_gen != rage_M3)
1808 return;
1809
1810#ifdef CONFIG_PMAC_BACKLIGHT
1811 if (!pmac_has_backlight_type("ati"))
1812 return;
1813#endif
1814
1815 snprintf(name, sizeof(name), "aty128bl%d", info->node);
1816
1817 bd = backlight_device_register(name, par, &aty128_bl_data);
1818 if (IS_ERR(bd)) {
1819 info->bl_dev = NULL;
1820 printk("aty128: Backlight registration failed\n");
1821 goto error;
1822 }
1823
1824 mutex_lock(&info->bl_mutex);
1825 info->bl_dev = bd;
1826 fb_bl_default_curve(info, 0,
1827 63 * FB_BACKLIGHT_MAX / MAX_LEVEL,
1828 219 * FB_BACKLIGHT_MAX / MAX_LEVEL);
1829 mutex_unlock(&info->bl_mutex);
1830
1831 up(&bd->sem);
1832 bd->props->brightness = aty128_bl_data.max_brightness;
1833 bd->props->power = FB_BLANK_UNBLANK;
1834 bd->props->update_status(bd);
1835 down(&bd->sem);
1836
1837#ifdef CONFIG_PMAC_BACKLIGHT
1838 mutex_lock(&pmac_backlight_mutex);
1839 if (!pmac_backlight)
1840 pmac_backlight = bd;
1841 mutex_unlock(&pmac_backlight_mutex);
1842#endif
1843
1844 printk("aty128: Backlight initialized (%s)\n", name);
1845
1846 return;
1847
1848error:
1849 return;
1850}
1851
1852static void aty128_bl_exit(struct aty128fb_par *par)
1853{
1854 struct fb_info *info = pci_get_drvdata(par->pdev);
1855
1856#ifdef CONFIG_PMAC_BACKLIGHT
1857 mutex_lock(&pmac_backlight_mutex);
1858#endif
1859
1860 mutex_lock(&info->bl_mutex);
1861 if (info->bl_dev) {
1862#ifdef CONFIG_PMAC_BACKLIGHT
1863 if (pmac_backlight == info->bl_dev)
1864 pmac_backlight = NULL;
1865#endif
1866
1867 backlight_device_unregister(info->bl_dev);
1868 info->bl_dev = NULL;
1869
1870 printk("aty128: Backlight unloaded\n");
1871 }
1872 mutex_unlock(&info->bl_mutex);
1873
1874#ifdef CONFIG_PMAC_BACKLIGHT
1875 mutex_unlock(&pmac_backlight_mutex);
1876#endif
1877}
1878#endif /* CONFIG_FB_ATY128_BACKLIGHT */
1694 1879
1695/* 1880/*
1696 * Initialisation 1881 * Initialisation
@@ -1835,17 +2020,15 @@ static int __init aty128_init(struct pci_dev *pdev, const struct pci_device_id *
1835 if (register_framebuffer(info) < 0) 2020 if (register_framebuffer(info) < 0)
1836 return 0; 2021 return 0;
1837 2022
1838#ifdef CONFIG_PMAC_BACKLIGHT
1839 /* Could be extended to Rage128Pro LVDS output too */
1840 if (par->chip_gen == rage_M3)
1841 register_backlight_controller(&aty128_backlight_controller, par, "ati");
1842#endif /* CONFIG_PMAC_BACKLIGHT */
1843
1844 par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM); 2023 par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM);
1845 par->pdev = pdev; 2024 par->pdev = pdev;
1846 par->asleep = 0; 2025 par->asleep = 0;
1847 par->lock_blank = 0; 2026 par->lock_blank = 0;
1848 2027
2028#ifdef CONFIG_FB_ATY128_BACKLIGHT
2029 aty128_bl_init(par);
2030#endif
2031
1849 printk(KERN_INFO "fb%d: %s frame buffer device on %s\n", 2032 printk(KERN_INFO "fb%d: %s frame buffer device on %s\n",
1850 info->node, info->fix.id, video_card); 2033 info->node, info->fix.id, video_card);
1851 2034
@@ -1981,6 +2164,10 @@ static void __devexit aty128_remove(struct pci_dev *pdev)
1981 2164
1982 par = info->par; 2165 par = info->par;
1983 2166
2167#ifdef CONFIG_FB_ATY128_BACKLIGHT
2168 aty128_bl_exit(par);
2169#endif
2170
1984 unregister_framebuffer(info); 2171 unregister_framebuffer(info);
1985#ifdef CONFIG_MTRR 2172#ifdef CONFIG_MTRR
1986 if (par->mtrr.vram_valid) 2173 if (par->mtrr.vram_valid)
@@ -2011,10 +2198,14 @@ static int aty128fb_blank(int blank, struct fb_info *fb)
2011 if (par->lock_blank || par->asleep) 2198 if (par->lock_blank || par->asleep)
2012 return 0; 2199 return 0;
2013 2200
2014#ifdef CONFIG_PMAC_BACKLIGHT 2201#ifdef CONFIG_FB_ATY128_BACKLIGHT
2015 if (machine_is(powermac) && blank) 2202 if (machine_is(powermac) && blank) {
2016 set_backlight_enable(0); 2203 down(&fb->bl_dev->sem);
2017#endif /* CONFIG_PMAC_BACKLIGHT */ 2204 fb->bl_dev->props->power = FB_BLANK_POWERDOWN;
2205 fb->bl_dev->props->update_status(fb->bl_dev);
2206 up(&fb->bl_dev->sem);
2207 }
2208#endif
2018 2209
2019 if (blank & FB_BLANK_VSYNC_SUSPEND) 2210 if (blank & FB_BLANK_VSYNC_SUSPEND)
2020 state |= 2; 2211 state |= 2;
@@ -2029,10 +2220,14 @@ static int aty128fb_blank(int blank, struct fb_info *fb)
2029 aty128_set_crt_enable(par, par->crt_on && !blank); 2220 aty128_set_crt_enable(par, par->crt_on && !blank);
2030 aty128_set_lcd_enable(par, par->lcd_on && !blank); 2221 aty128_set_lcd_enable(par, par->lcd_on && !blank);
2031 } 2222 }
2032#ifdef CONFIG_PMAC_BACKLIGHT 2223#ifdef CONFIG_FB_ATY128_BACKLIGHT
2033 if (machine_is(powermac) && !blank) 2224 if (machine_is(powermac) && !blank) {
2034 set_backlight_enable(1); 2225 down(&fb->bl_dev->sem);
2035#endif /* CONFIG_PMAC_BACKLIGHT */ 2226 fb->bl_dev->props->power = FB_BLANK_UNBLANK;
2227 fb->bl_dev->props->update_status(fb->bl_dev);
2228 up(&fb->bl_dev->sem);
2229 }
2230#endif
2036 return 0; 2231 return 0;
2037} 2232}
2038 2233
@@ -2138,73 +2333,6 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
2138 return -EINVAL; 2333 return -EINVAL;
2139} 2334}
2140 2335
2141#ifdef CONFIG_PMAC_BACKLIGHT
2142static int backlight_conv[] = {
2143 0xff, 0xc0, 0xb5, 0xaa, 0x9f, 0x94, 0x89, 0x7e,
2144 0x73, 0x68, 0x5d, 0x52, 0x47, 0x3c, 0x31, 0x24
2145};
2146
2147/* We turn off the LCD completely instead of just dimming the backlight.
2148 * This provides greater power saving and the display is useless without
2149 * backlight anyway
2150 */
2151#define BACKLIGHT_LVDS_OFF
2152/* That one prevents proper CRT output with LCD off */
2153#undef BACKLIGHT_DAC_OFF
2154
2155static int aty128_set_backlight_enable(int on, int level, void *data)
2156{
2157 struct aty128fb_par *par = data;
2158 unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL);
2159
2160 if (!par->lcd_on)
2161 on = 0;
2162 reg |= LVDS_BL_MOD_EN | LVDS_BLON;
2163 if (on && level > BACKLIGHT_OFF) {
2164 reg |= LVDS_DIGION;
2165 if (!(reg & LVDS_ON)) {
2166 reg &= ~LVDS_BLON;
2167 aty_st_le32(LVDS_GEN_CNTL, reg);
2168 (void)aty_ld_le32(LVDS_GEN_CNTL);
2169 mdelay(10);
2170 reg |= LVDS_BLON;
2171 aty_st_le32(LVDS_GEN_CNTL, reg);
2172 }
2173 reg &= ~LVDS_BL_MOD_LEVEL_MASK;
2174 reg |= (backlight_conv[level] << LVDS_BL_MOD_LEVEL_SHIFT);
2175#ifdef BACKLIGHT_LVDS_OFF
2176 reg |= LVDS_ON | LVDS_EN;
2177 reg &= ~LVDS_DISPLAY_DIS;
2178#endif
2179 aty_st_le32(LVDS_GEN_CNTL, reg);
2180#ifdef BACKLIGHT_DAC_OFF
2181 aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & (~DAC_PDWN));
2182#endif
2183 } else {
2184 reg &= ~LVDS_BL_MOD_LEVEL_MASK;
2185 reg |= (backlight_conv[0] << LVDS_BL_MOD_LEVEL_SHIFT);
2186#ifdef BACKLIGHT_LVDS_OFF
2187 reg |= LVDS_DISPLAY_DIS;
2188 aty_st_le32(LVDS_GEN_CNTL, reg);
2189 (void)aty_ld_le32(LVDS_GEN_CNTL);
2190 udelay(10);
2191 reg &= ~(LVDS_ON | LVDS_EN | LVDS_BLON | LVDS_DIGION);
2192#endif
2193 aty_st_le32(LVDS_GEN_CNTL, reg);
2194#ifdef BACKLIGHT_DAC_OFF
2195 aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PDWN);
2196#endif
2197 }
2198
2199 return 0;
2200}
2201
2202static int aty128_set_backlight_level(int level, void* data)
2203{
2204 return aty128_set_backlight_enable(1, level, data);
2205}
2206#endif /* CONFIG_PMAC_BACKLIGHT */
2207
2208#if 0 2336#if 0
2209 /* 2337 /*
2210 * Accelerated functions 2338 * Accelerated functions
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index e9b7a64c1ac4..43d2cb58af87 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -151,6 +151,7 @@ struct atyfb_par {
151 int lock_blank; 151 int lock_blank;
152 unsigned long res_start; 152 unsigned long res_start;
153 unsigned long res_size; 153 unsigned long res_size;
154 struct pci_dev *pdev;
154#ifdef __sparc__ 155#ifdef __sparc__
155 struct pci_mmap_map *mmap_map; 156 struct pci_mmap_map *mmap_map;
156 u8 mmaped; 157 u8 mmaped;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index c054bb28b1c4..c5185f7cf4ba 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -66,6 +66,7 @@
66#include <linux/interrupt.h> 66#include <linux/interrupt.h>
67#include <linux/spinlock.h> 67#include <linux/spinlock.h>
68#include <linux/wait.h> 68#include <linux/wait.h>
69#include <linux/backlight.h>
69 70
70#include <asm/io.h> 71#include <asm/io.h>
71#include <asm/uaccess.h> 72#include <asm/uaccess.h>
@@ -2115,45 +2116,142 @@ static int atyfb_pci_resume(struct pci_dev *pdev)
2115 2116
2116#endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */ 2117#endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */
2117 2118
2118#ifdef CONFIG_PMAC_BACKLIGHT 2119/* Backlight */
2120#ifdef CONFIG_FB_ATY_BACKLIGHT
2121#define MAX_LEVEL 0xFF
2119 2122
2120 /* 2123static struct backlight_properties aty_bl_data;
2121 * LCD backlight control
2122 */
2123 2124
2124static int backlight_conv[] = { 2125static int aty_bl_get_level_brightness(struct atyfb_par *par, int level)
2125 0x00, 0x3f, 0x4c, 0x59, 0x66, 0x73, 0x80, 0x8d, 2126{
2126 0x9a, 0xa7, 0xb4, 0xc1, 0xcf, 0xdc, 0xe9, 0xff 2127 struct fb_info *info = pci_get_drvdata(par->pdev);
2127}; 2128 int atylevel;
2129
2130 /* Get and convert the value */
2131 mutex_lock(&info->bl_mutex);
2132 atylevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL;
2133 mutex_unlock(&info->bl_mutex);
2134
2135 if (atylevel < 0)
2136 atylevel = 0;
2137 else if (atylevel > MAX_LEVEL)
2138 atylevel = MAX_LEVEL;
2128 2139
2129static int aty_set_backlight_enable(int on, int level, void *data) 2140 return atylevel;
2141}
2142
2143static int aty_bl_update_status(struct backlight_device *bd)
2130{ 2144{
2131 struct fb_info *info = (struct fb_info *) data; 2145 struct atyfb_par *par = class_get_devdata(&bd->class_dev);
2132 struct atyfb_par *par = (struct atyfb_par *) info->par;
2133 unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par); 2146 unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
2147 int level;
2148
2149 if (bd->props->power != FB_BLANK_UNBLANK ||
2150 bd->props->fb_blank != FB_BLANK_UNBLANK)
2151 level = 0;
2152 else
2153 level = bd->props->brightness;
2134 2154
2135 reg |= (BLMOD_EN | BIASMOD_EN); 2155 reg |= (BLMOD_EN | BIASMOD_EN);
2136 if (on && level > BACKLIGHT_OFF) { 2156 if (level > 0) {
2137 reg &= ~BIAS_MOD_LEVEL_MASK; 2157 reg &= ~BIAS_MOD_LEVEL_MASK;
2138 reg |= (backlight_conv[level] << BIAS_MOD_LEVEL_SHIFT); 2158 reg |= (aty_bl_get_level_brightness(par, level) << BIAS_MOD_LEVEL_SHIFT);
2139 } else { 2159 } else {
2140 reg &= ~BIAS_MOD_LEVEL_MASK; 2160 reg &= ~BIAS_MOD_LEVEL_MASK;
2141 reg |= (backlight_conv[0] << BIAS_MOD_LEVEL_SHIFT); 2161 reg |= (aty_bl_get_level_brightness(par, 0) << BIAS_MOD_LEVEL_SHIFT);
2142 } 2162 }
2143 aty_st_lcd(LCD_MISC_CNTL, reg, par); 2163 aty_st_lcd(LCD_MISC_CNTL, reg, par);
2164
2144 return 0; 2165 return 0;
2145} 2166}
2146 2167
2147static int aty_set_backlight_level(int level, void *data) 2168static int aty_bl_get_brightness(struct backlight_device *bd)
2148{ 2169{
2149 return aty_set_backlight_enable(1, level, data); 2170 return bd->props->brightness;
2150} 2171}
2151 2172
2152static struct backlight_controller aty_backlight_controller = { 2173static struct backlight_properties aty_bl_data = {
2153 aty_set_backlight_enable, 2174 .owner = THIS_MODULE,
2154 aty_set_backlight_level 2175 .get_brightness = aty_bl_get_brightness,
2176 .update_status = aty_bl_update_status,
2177 .max_brightness = (FB_BACKLIGHT_LEVELS - 1),
2155}; 2178};
2156#endif /* CONFIG_PMAC_BACKLIGHT */ 2179
2180static void aty_bl_init(struct atyfb_par *par)
2181{
2182 struct fb_info *info = pci_get_drvdata(par->pdev);
2183 struct backlight_device *bd;
2184 char name[12];
2185
2186#ifdef CONFIG_PMAC_BACKLIGHT
2187 if (!pmac_has_backlight_type("ati"))
2188 return;
2189#endif
2190
2191 snprintf(name, sizeof(name), "atybl%d", info->node);
2192
2193 bd = backlight_device_register(name, par, &aty_bl_data);
2194 if (IS_ERR(bd)) {
2195 info->bl_dev = NULL;
2196 printk("aty: Backlight registration failed\n");
2197 goto error;
2198 }
2199
2200 mutex_lock(&info->bl_mutex);
2201 info->bl_dev = bd;
2202 fb_bl_default_curve(info, 0,
2203 0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL,
2204 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
2205 mutex_unlock(&info->bl_mutex);
2206
2207 up(&bd->sem);
2208 bd->props->brightness = aty_bl_data.max_brightness;
2209 bd->props->power = FB_BLANK_UNBLANK;
2210 bd->props->update_status(bd);
2211 down(&bd->sem);
2212
2213#ifdef CONFIG_PMAC_BACKLIGHT
2214 mutex_lock(&pmac_backlight_mutex);
2215 if (!pmac_backlight)
2216 pmac_backlight = bd;
2217 mutex_unlock(&pmac_backlight_mutex);
2218#endif
2219
2220 printk("aty: Backlight initialized (%s)\n", name);
2221
2222 return;
2223
2224error:
2225 return;
2226}
2227
2228static void aty_bl_exit(struct atyfb_par *par)
2229{
2230 struct fb_info *info = pci_get_drvdata(par->pdev);
2231
2232#ifdef CONFIG_PMAC_BACKLIGHT
2233 mutex_lock(&pmac_backlight_mutex);
2234#endif
2235
2236 mutex_lock(&info->bl_mutex);
2237 if (info->bl_dev) {
2238#ifdef CONFIG_PMAC_BACKLIGHT
2239 if (pmac_backlight == info->bl_dev)
2240 pmac_backlight = NULL;
2241#endif
2242
2243 backlight_device_unregister(info->bl_dev);
2244
2245 printk("aty: Backlight unloaded\n");
2246 }
2247 mutex_unlock(&info->bl_mutex);
2248
2249#ifdef CONFIG_PMAC_BACKLIGHT
2250 mutex_unlock(&pmac_backlight_mutex);
2251#endif
2252}
2253
2254#endif /* CONFIG_FB_ATY_BACKLIGHT */
2157 2255
2158static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk) 2256static void __init aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
2159{ 2257{
@@ -2513,9 +2611,13 @@ static int __init aty_init(struct fb_info *info, const char *name)
2513 /* these bits let the 101 powerbook wake up from sleep -- paulus */ 2611 /* these bits let the 101 powerbook wake up from sleep -- paulus */
2514 aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par) 2612 aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par)
2515 | (USE_F32KHZ | TRISTATE_MEM_EN), par); 2613 | (USE_F32KHZ | TRISTATE_MEM_EN), par);
2516 } else if (M64_HAS(MOBIL_BUS)) 2614 } else
2517 register_backlight_controller(&aty_backlight_controller, info, "ati"); 2615#endif
2518#endif /* CONFIG_PMAC_BACKLIGHT */ 2616 if (M64_HAS(MOBIL_BUS)) {
2617#ifdef CONFIG_FB_ATY_BACKLIGHT
2618 aty_bl_init (par);
2619#endif
2620 }
2519 2621
2520 memset(&var, 0, sizeof(var)); 2622 memset(&var, 0, sizeof(var));
2521#ifdef CONFIG_PPC 2623#ifdef CONFIG_PPC
@@ -2674,8 +2776,16 @@ static int atyfb_blank(int blank, struct fb_info *info)
2674 return 0; 2776 return 0;
2675 2777
2676#ifdef CONFIG_PMAC_BACKLIGHT 2778#ifdef CONFIG_PMAC_BACKLIGHT
2677 if (machine_is(powermac) && blank > FB_BLANK_NORMAL) 2779 if (machine_is(powermac) && blank > FB_BLANK_NORMAL) {
2678 set_backlight_enable(0); 2780 mutex_lock(&info->bl_mutex);
2781 if (info->bl_dev) {
2782 down(&info->bl_dev->sem);
2783 info->bl_dev->props->power = FB_BLANK_POWERDOWN;
2784 info->bl_dev->props->update_status(info->bl_dev);
2785 up(&info->bl_dev->sem);
2786 }
2787 mutex_unlock(&info->bl_mutex);
2788 }
2679#elif defined(CONFIG_FB_ATY_GENERIC_LCD) 2789#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2680 if (par->lcd_table && blank > FB_BLANK_NORMAL && 2790 if (par->lcd_table && blank > FB_BLANK_NORMAL &&
2681 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2791 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
@@ -2706,8 +2816,16 @@ static int atyfb_blank(int blank, struct fb_info *info)
2706 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par); 2816 aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
2707 2817
2708#ifdef CONFIG_PMAC_BACKLIGHT 2818#ifdef CONFIG_PMAC_BACKLIGHT
2709 if (machine_is(powermac) && blank <= FB_BLANK_NORMAL) 2819 if (machine_is(powermac) && blank <= FB_BLANK_NORMAL) {
2710 set_backlight_enable(1); 2820 mutex_lock(&info->bl_mutex);
2821 if (info->bl_dev) {
2822 down(&info->bl_dev->sem);
2823 info->bl_dev->props->power = FB_BLANK_UNBLANK;
2824 info->bl_dev->props->update_status(info->bl_dev);
2825 up(&info->bl_dev->sem);
2826 }
2827 mutex_unlock(&info->bl_mutex);
2828 }
2711#elif defined(CONFIG_FB_ATY_GENERIC_LCD) 2829#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
2712 if (par->lcd_table && blank <= FB_BLANK_NORMAL && 2830 if (par->lcd_table && blank <= FB_BLANK_NORMAL &&
2713 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) { 2831 (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
@@ -3440,6 +3558,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3440 par->res_start = res_start; 3558 par->res_start = res_start;
3441 par->res_size = res_size; 3559 par->res_size = res_size;
3442 par->irq = pdev->irq; 3560 par->irq = pdev->irq;
3561 par->pdev = pdev;
3443 3562
3444 /* Setup "info" structure */ 3563 /* Setup "info" structure */
3445#ifdef __sparc__ 3564#ifdef __sparc__
@@ -3571,6 +3690,11 @@ static void __devexit atyfb_remove(struct fb_info *info)
3571 aty_set_crtc(par, &saved_crtc); 3690 aty_set_crtc(par, &saved_crtc);
3572 par->pll_ops->set_pll(info, &saved_pll); 3691 par->pll_ops->set_pll(info, &saved_pll);
3573 3692
3693#ifdef CONFIG_FB_ATY_BACKLIGHT
3694 if (M64_HAS(MOBIL_BUS))
3695 aty_bl_exit(par);
3696#endif
3697
3574 unregister_framebuffer(info); 3698 unregister_framebuffer(info);
3575 3699
3576#ifdef CONFIG_MTRR 3700#ifdef CONFIG_MTRR
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
new file mode 100644
index 000000000000..7de66b855d4e
--- /dev/null
+++ b/drivers/video/aty/radeon_backlight.c
@@ -0,0 +1,247 @@
1/*
2 * Backlight code for ATI Radeon based graphic cards
3 *
4 * Copyright (c) 2000 Ani Joshi <ajoshi@kernel.crashing.org>
5 * Copyright (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
6 * Copyright (c) 2006 Michael Hanselmann <linux-kernel@hansmi.ch>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include "radeonfb.h"
14#include <linux/backlight.h>
15
16#ifdef CONFIG_PMAC_BACKLIGHT
17#include <asm/backlight.h>
18#endif
19
20#define MAX_RADEON_LEVEL 0xFF
21
22static struct backlight_properties radeon_bl_data;
23
24struct radeon_bl_privdata {
25 struct radeonfb_info *rinfo;
26 uint8_t negative;
27};
28
29static int radeon_bl_get_level_brightness(struct radeon_bl_privdata *pdata,
30 int level)
31{
32 struct fb_info *info = pdata->rinfo->info;
33 int rlevel;
34
35 mutex_lock(&info->bl_mutex);
36
37 /* Get and convert the value */
38 rlevel = pdata->rinfo->info->bl_curve[level] *
39 FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL;
40
41 mutex_unlock(&info->bl_mutex);
42
43 if (pdata->negative)
44 rlevel = MAX_RADEON_LEVEL - rlevel;
45
46 if (rlevel < 0)
47 rlevel = 0;
48 else if (rlevel > MAX_RADEON_LEVEL)
49 rlevel = MAX_RADEON_LEVEL;
50
51 return rlevel;
52}
53
54static int radeon_bl_update_status(struct backlight_device *bd)
55{
56 struct radeon_bl_privdata *pdata = class_get_devdata(&bd->class_dev);
57 struct radeonfb_info *rinfo = pdata->rinfo;
58 u32 lvds_gen_cntl, tmpPixclksCntl;
59 int level;
60
61 if (rinfo->mon1_type != MT_LCD)
62 return 0;
63
64 /* We turn off the LCD completely instead of just dimming the
65 * backlight. This provides some greater power saving and the display
66 * is useless without backlight anyway.
67 */
68 if (bd->props->power != FB_BLANK_UNBLANK ||
69 bd->props->fb_blank != FB_BLANK_UNBLANK)
70 level = 0;
71 else
72 level = bd->props->brightness;
73
74 del_timer_sync(&rinfo->lvds_timer);
75 radeon_engine_idle();
76
77 lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
78 if (level > 0) {
79 lvds_gen_cntl &= ~LVDS_DISPLAY_DIS;
80 if (!(lvds_gen_cntl & LVDS_BLON) || !(lvds_gen_cntl & LVDS_ON)) {
81 lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_DIGON);
82 lvds_gen_cntl |= LVDS_BLON | LVDS_EN;
83 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
84 lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
85 lvds_gen_cntl |=
86 (radeon_bl_get_level_brightness(pdata, level) <<
87 LVDS_BL_MOD_LEVEL_SHIFT);
88 lvds_gen_cntl |= LVDS_ON;
89 lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_BL_MOD_EN);
90 rinfo->pending_lvds_gen_cntl = lvds_gen_cntl;
91 mod_timer(&rinfo->lvds_timer,
92 jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
93 } else {
94 lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
95 lvds_gen_cntl |=
96 (radeon_bl_get_level_brightness(pdata, level) <<
97 LVDS_BL_MOD_LEVEL_SHIFT);
98 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
99 }
100 rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
101 rinfo->init_state.lvds_gen_cntl |= rinfo->pending_lvds_gen_cntl
102 & LVDS_STATE_MASK;
103 } else {
104 /* Asic bug, when turning off LVDS_ON, we have to make sure
105 RADEON_PIXCLK_LVDS_ALWAYS_ON bit is off
106 */
107 tmpPixclksCntl = INPLL(PIXCLKS_CNTL);
108 if (rinfo->is_mobility || rinfo->is_IGP)
109 OUTPLLP(PIXCLKS_CNTL, 0, ~PIXCLK_LVDS_ALWAYS_ONb);
110 lvds_gen_cntl &= ~(LVDS_BL_MOD_LEVEL_MASK | LVDS_BL_MOD_EN);
111 lvds_gen_cntl |= (radeon_bl_get_level_brightness(pdata, 0) <<
112 LVDS_BL_MOD_LEVEL_SHIFT);
113 lvds_gen_cntl |= LVDS_DISPLAY_DIS;
114 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
115 udelay(100);
116 lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN);
117 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
118 lvds_gen_cntl &= ~(LVDS_DIGON);
119 rinfo->pending_lvds_gen_cntl = lvds_gen_cntl;
120 mod_timer(&rinfo->lvds_timer,
121 jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
122 if (rinfo->is_mobility || rinfo->is_IGP)
123 OUTPLL(PIXCLKS_CNTL, tmpPixclksCntl);
124 }
125 rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
126 rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK);
127
128 return 0;
129}
130
131static int radeon_bl_get_brightness(struct backlight_device *bd)
132{
133 return bd->props->brightness;
134}
135
136static struct backlight_properties radeon_bl_data = {
137 .owner = THIS_MODULE,
138 .get_brightness = radeon_bl_get_brightness,
139 .update_status = radeon_bl_update_status,
140 .max_brightness = (FB_BACKLIGHT_LEVELS - 1),
141};
142
143void radeonfb_bl_init(struct radeonfb_info *rinfo)
144{
145 struct backlight_device *bd;
146 struct radeon_bl_privdata *pdata;
147 char name[12];
148
149 if (rinfo->mon1_type != MT_LCD)
150 return;
151
152#ifdef CONFIG_PMAC_BACKLIGHT
153 if (!pmac_has_backlight_type("ati") &&
154 !pmac_has_backlight_type("mnca"))
155 return;
156#endif
157
158 pdata = kmalloc(sizeof(struct radeon_bl_privdata), GFP_KERNEL);
159 if (!pdata) {
160 printk("radeonfb: Memory allocation failed\n");
161 goto error;
162 }
163
164 snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
165
166 bd = backlight_device_register(name, pdata, &radeon_bl_data);
167 if (IS_ERR(bd)) {
168 rinfo->info->bl_dev = NULL;
169 printk("radeonfb: Backlight registration failed\n");
170 goto error;
171 }
172
173 pdata->rinfo = rinfo;
174
175 /* Pardon me for that hack... maybe some day we can figure out in what
176 * direction backlight should work on a given panel?
177 */
178 pdata->negative =
179 (rinfo->family != CHIP_FAMILY_RV200 &&
180 rinfo->family != CHIP_FAMILY_RV250 &&
181 rinfo->family != CHIP_FAMILY_RV280 &&
182 rinfo->family != CHIP_FAMILY_RV350);
183
184#ifdef CONFIG_PMAC_BACKLIGHT
185 pdata->negative = pdata->negative ||
186 machine_is_compatible("PowerBook4,3") ||
187 machine_is_compatible("PowerBook6,3") ||
188 machine_is_compatible("PowerBook6,5");
189#endif
190
191 mutex_lock(&rinfo->info->bl_mutex);
192 rinfo->info->bl_dev = bd;
193 fb_bl_default_curve(rinfo->info, 0,
194 63 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL,
195 217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL);
196 mutex_unlock(&rinfo->info->bl_mutex);
197
198 up(&bd->sem);
199 bd->props->brightness = radeon_bl_data.max_brightness;
200 bd->props->power = FB_BLANK_UNBLANK;
201 bd->props->update_status(bd);
202 down(&bd->sem);
203
204#ifdef CONFIG_PMAC_BACKLIGHT
205 mutex_lock(&pmac_backlight_mutex);
206 if (!pmac_backlight)
207 pmac_backlight = bd;
208 mutex_unlock(&pmac_backlight_mutex);
209#endif
210
211 printk("radeonfb: Backlight initialized (%s)\n", name);
212
213 return;
214
215error:
216 kfree(pdata);
217 return;
218}
219
220void radeonfb_bl_exit(struct radeonfb_info *rinfo)
221{
222#ifdef CONFIG_PMAC_BACKLIGHT
223 mutex_lock(&pmac_backlight_mutex);
224#endif
225
226 mutex_lock(&rinfo->info->bl_mutex);
227 if (rinfo->info->bl_dev) {
228 struct radeon_bl_privdata *pdata;
229
230#ifdef CONFIG_PMAC_BACKLIGHT
231 if (pmac_backlight == rinfo->info->bl_dev)
232 pmac_backlight = NULL;
233#endif
234
235 pdata = class_get_devdata(&rinfo->info->bl_dev->class_dev);
236 backlight_device_unregister(rinfo->info->bl_dev);
237 kfree(pdata);
238 rinfo->info->bl_dev = NULL;
239
240 printk("radeonfb: Backlight unloaded\n");
241 }
242 mutex_unlock(&rinfo->info->bl_mutex);
243
244#ifdef CONFIG_PMAC_BACKLIGHT
245 mutex_unlock(&pmac_backlight_mutex);
246#endif
247}
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 387a18a47ac2..c5ecbb02e01d 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -78,10 +78,6 @@
78#include <asm/pci-bridge.h> 78#include <asm/pci-bridge.h>
79#include "../macmodes.h" 79#include "../macmodes.h"
80 80
81#ifdef CONFIG_PMAC_BACKLIGHT
82#include <asm/backlight.h>
83#endif
84
85#ifdef CONFIG_BOOTX_TEXT 81#ifdef CONFIG_BOOTX_TEXT
86#include <asm/btext.h> 82#include <asm/btext.h>
87#endif 83#endif
@@ -277,20 +273,6 @@ static int nomtrr = 0;
277 * prototypes 273 * prototypes
278 */ 274 */
279 275
280
281#ifdef CONFIG_PPC_OF
282
283#ifdef CONFIG_PMAC_BACKLIGHT
284static int radeon_set_backlight_enable(int on, int level, void *data);
285static int radeon_set_backlight_level(int level, void *data);
286static struct backlight_controller radeon_backlight_controller = {
287 radeon_set_backlight_enable,
288 radeon_set_backlight_level
289};
290#endif /* CONFIG_PMAC_BACKLIGHT */
291
292#endif /* CONFIG_PPC_OF */
293
294static void radeon_unmap_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev) 276static void radeon_unmap_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
295{ 277{
296 if (!rinfo->bios_seg) 278 if (!rinfo->bios_seg)
@@ -1913,116 +1895,6 @@ static int __devinit radeon_set_fbinfo (struct radeonfb_info *rinfo)
1913 return 0; 1895 return 0;
1914} 1896}
1915 1897
1916
1917#ifdef CONFIG_PMAC_BACKLIGHT
1918
1919/* TODO: Dbl check these tables, we don't go up to full ON backlight
1920 * in these, possibly because we noticed MacOS doesn't, but I'd prefer
1921 * having some more official numbers from ATI
1922 */
1923static int backlight_conv_m6[] = {
1924 0xff, 0xc0, 0xb5, 0xaa, 0x9f, 0x94, 0x89, 0x7e,
1925 0x73, 0x68, 0x5d, 0x52, 0x47, 0x3c, 0x31, 0x24
1926};
1927static int backlight_conv_m7[] = {
1928 0x00, 0x3f, 0x4a, 0x55, 0x60, 0x6b, 0x76, 0x81,
1929 0x8c, 0x97, 0xa2, 0xad, 0xb8, 0xc3, 0xce, 0xd9
1930};
1931
1932#define BACKLIGHT_LVDS_OFF
1933#undef BACKLIGHT_DAC_OFF
1934
1935/* We turn off the LCD completely instead of just dimming the backlight.
1936 * This provides some greater power saving and the display is useless
1937 * without backlight anyway.
1938 */
1939static int radeon_set_backlight_enable(int on, int level, void *data)
1940{
1941 struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
1942 u32 lvds_gen_cntl, tmpPixclksCntl;
1943 int* conv_table;
1944
1945 if (rinfo->mon1_type != MT_LCD)
1946 return 0;
1947
1948 /* Pardon me for that hack... maybe some day we can figure
1949 * out in what direction backlight should work on a given
1950 * panel ?
1951 */
1952 if ((rinfo->family == CHIP_FAMILY_RV200 ||
1953 rinfo->family == CHIP_FAMILY_RV250 ||
1954 rinfo->family == CHIP_FAMILY_RV280 ||
1955 rinfo->family == CHIP_FAMILY_RV350) &&
1956 !machine_is_compatible("PowerBook4,3") &&
1957 !machine_is_compatible("PowerBook6,3") &&
1958 !machine_is_compatible("PowerBook6,5"))
1959 conv_table = backlight_conv_m7;
1960 else
1961 conv_table = backlight_conv_m6;
1962
1963 del_timer_sync(&rinfo->lvds_timer);
1964 radeon_engine_idle();
1965
1966 lvds_gen_cntl = INREG(LVDS_GEN_CNTL);
1967 if (on && (level > BACKLIGHT_OFF)) {
1968 lvds_gen_cntl &= ~LVDS_DISPLAY_DIS;
1969 if (!(lvds_gen_cntl & LVDS_BLON) || !(lvds_gen_cntl & LVDS_ON)) {
1970 lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_DIGON);
1971 lvds_gen_cntl |= LVDS_BLON | LVDS_EN;
1972 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
1973 lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
1974 lvds_gen_cntl |= (conv_table[level] <<
1975 LVDS_BL_MOD_LEVEL_SHIFT);
1976 lvds_gen_cntl |= LVDS_ON;
1977 lvds_gen_cntl |= (rinfo->init_state.lvds_gen_cntl & LVDS_BL_MOD_EN);
1978 rinfo->pending_lvds_gen_cntl = lvds_gen_cntl;
1979 mod_timer(&rinfo->lvds_timer,
1980 jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
1981 } else {
1982 lvds_gen_cntl &= ~LVDS_BL_MOD_LEVEL_MASK;
1983 lvds_gen_cntl |= (conv_table[level] <<
1984 LVDS_BL_MOD_LEVEL_SHIFT);
1985 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
1986 }
1987 rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
1988 rinfo->init_state.lvds_gen_cntl |= rinfo->pending_lvds_gen_cntl
1989 & LVDS_STATE_MASK;
1990 } else {
1991 /* Asic bug, when turning off LVDS_ON, we have to make sure
1992 RADEON_PIXCLK_LVDS_ALWAYS_ON bit is off
1993 */
1994 tmpPixclksCntl = INPLL(PIXCLKS_CNTL);
1995 if (rinfo->is_mobility || rinfo->is_IGP)
1996 OUTPLLP(PIXCLKS_CNTL, 0, ~PIXCLK_LVDS_ALWAYS_ONb);
1997 lvds_gen_cntl &= ~(LVDS_BL_MOD_LEVEL_MASK | LVDS_BL_MOD_EN);
1998 lvds_gen_cntl |= (conv_table[0] <<
1999 LVDS_BL_MOD_LEVEL_SHIFT);
2000 lvds_gen_cntl |= LVDS_DISPLAY_DIS;
2001 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
2002 udelay(100);
2003 lvds_gen_cntl &= ~(LVDS_ON | LVDS_EN);
2004 OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
2005 lvds_gen_cntl &= ~(LVDS_DIGON);
2006 rinfo->pending_lvds_gen_cntl = lvds_gen_cntl;
2007 mod_timer(&rinfo->lvds_timer,
2008 jiffies + msecs_to_jiffies(rinfo->panel_info.pwr_delay));
2009 if (rinfo->is_mobility || rinfo->is_IGP)
2010 OUTPLL(PIXCLKS_CNTL, tmpPixclksCntl);
2011 }
2012 rinfo->init_state.lvds_gen_cntl &= ~LVDS_STATE_MASK;
2013 rinfo->init_state.lvds_gen_cntl |= (lvds_gen_cntl & LVDS_STATE_MASK);
2014
2015 return 0;
2016}
2017
2018
2019static int radeon_set_backlight_level(int level, void *data)
2020{
2021 return radeon_set_backlight_enable(1, level, data);
2022}
2023#endif /* CONFIG_PMAC_BACKLIGHT */
2024
2025
2026/* 1898/*
2027 * This reconfigure the card's internal memory map. In theory, we'd like 1899 * This reconfigure the card's internal memory map. In theory, we'd like
2028 * to setup the card's memory at the same address as it's PCI bus address, 1900 * to setup the card's memory at the same address as it's PCI bus address,
@@ -2477,14 +2349,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
2477 MTRR_TYPE_WRCOMB, 1); 2349 MTRR_TYPE_WRCOMB, 1);
2478#endif 2350#endif
2479 2351
2480#ifdef CONFIG_PMAC_BACKLIGHT 2352 radeonfb_bl_init(rinfo);
2481 if (rinfo->mon1_type == MT_LCD) {
2482 register_backlight_controller(&radeon_backlight_controller,
2483 rinfo, "ati");
2484 register_backlight_controller(&radeon_backlight_controller,
2485 rinfo, "mnca");
2486 }
2487#endif
2488 2353
2489 printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name); 2354 printk ("radeonfb (%s): %s\n", pci_name(rinfo->pdev), rinfo->name);
2490 2355
@@ -2528,7 +2393,8 @@ static void __devexit radeonfb_pci_unregister (struct pci_dev *pdev)
2528 2393
2529 if (!rinfo) 2394 if (!rinfo)
2530 return; 2395 return;
2531 2396
2397 radeonfb_bl_exit(rinfo);
2532 radeonfb_pm_exit(rinfo); 2398 radeonfb_pm_exit(rinfo);
2533 2399
2534 if (rinfo->mon1_EDID) 2400 if (rinfo->mon1_EDID)
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 217e00ab4a2d..1645943b1123 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -625,4 +625,13 @@ extern int radeon_screen_blank(struct radeonfb_info *rinfo, int blank, int mode_
625extern void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode, 625extern void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
626 int reg_only); 626 int reg_only);
627 627
628/* Backlight functions */
629#ifdef CONFIG_FB_RADEON_BACKLIGHT
630extern void radeonfb_bl_init(struct radeonfb_info *rinfo);
631extern void radeonfb_bl_exit(struct radeonfb_info *rinfo);
632#else
633static inline void radeonfb_bl_init(struct radeonfb_info *rinfo) {}
634static inline void radeonfb_bl_exit(struct radeonfb_info *rinfo) {}
635#endif
636
628#endif /* __RADEONFB_H__ */ 637#endif /* __RADEONFB_H__ */
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index 72ff6bf75e5e..d76bbfac92cc 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -148,9 +148,24 @@ static int chipsfb_set_par(struct fb_info *info)
148static int chipsfb_blank(int blank, struct fb_info *info) 148static int chipsfb_blank(int blank, struct fb_info *info)
149{ 149{
150#ifdef CONFIG_PMAC_BACKLIGHT 150#ifdef CONFIG_PMAC_BACKLIGHT
151 // used to disable backlight only for blank > 1, but it seems 151 mutex_lock(&pmac_backlight_mutex);
152 // useful at blank = 1 too (saves battery, extends backlight life) 152
153 set_backlight_enable(!blank); 153 if (pmac_backlight) {
154 down(&pmac_backlight->sem);
155
156 /* used to disable backlight only for blank > 1, but it seems
157 * useful at blank = 1 too (saves battery, extends backlight
158 * life)
159 */
160 if (blank)
161 pmac_backlight->props->power = FB_BLANK_POWERDOWN;
162 else
163 pmac_backlight->props->power = FB_BLANK_UNBLANK;
164 pmac_backlight->props->update_status(pmac_backlight);
165 up(&pmac_backlight->sem);
166 }
167
168 mutex_unlock(&pmac_backlight_mutex);
154#endif /* CONFIG_PMAC_BACKLIGHT */ 169#endif /* CONFIG_PMAC_BACKLIGHT */
155 170
156 return 1; /* get fb_blank to set the colormap to all black */ 171 return 1; /* get fb_blank to set the colormap to all black */
@@ -401,7 +416,14 @@ chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
401 416
402#ifdef CONFIG_PMAC_BACKLIGHT 417#ifdef CONFIG_PMAC_BACKLIGHT
403 /* turn on the backlight */ 418 /* turn on the backlight */
404 set_backlight_enable(1); 419 mutex_lock(&pmac_backlight_mutex);
420 if (pmac_backlight) {
421 down(&pmac_backlight->sem);
422 pmac_backlight->props->power = FB_BLANK_UNBLANK;
423 pmac_backlight->props->update_status(pmac_backlight);
424 up(&pmac_backlight->sem);
425 }
426 mutex_unlock(&pmac_backlight_mutex);
405#endif /* CONFIG_PMAC_BACKLIGHT */ 427#endif /* CONFIG_PMAC_BACKLIGHT */
406 428
407#ifdef CONFIG_PPC 429#ifdef CONFIG_PPC
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 34e07399756b..3ceb8c1b392e 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/fb.h> 19#include <linux/fb.h>
20#include <linux/console.h> 20#include <linux/console.h>
21#include <linux/module.h>
21 22
22/** 23/**
23 * framebuffer_alloc - creates a new frame buffer info structure 24 * framebuffer_alloc - creates a new frame buffer info structure
@@ -55,6 +56,10 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
55 56
56 info->device = dev; 57 info->device = dev;
57 58
59#ifdef CONFIG_FB_BACKLIGHT
60 mutex_init(&info->bl_mutex);
61#endif
62
58 return info; 63 return info;
59#undef PADDING 64#undef PADDING
60#undef BYTES_PER_LONG 65#undef BYTES_PER_LONG
@@ -414,6 +419,65 @@ static ssize_t show_fbstate(struct class_device *class_device, char *buf)
414 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state); 419 return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state);
415} 420}
416 421
422#ifdef CONFIG_FB_BACKLIGHT
423static ssize_t store_bl_curve(struct class_device *class_device,
424 const char *buf, size_t count)
425{
426 struct fb_info *fb_info = class_get_devdata(class_device);
427 u8 tmp_curve[FB_BACKLIGHT_LEVELS];
428 unsigned int i;
429
430 if (count != (FB_BACKLIGHT_LEVELS / 8 * 24))
431 return -EINVAL;
432
433 for (i = 0; i < (FB_BACKLIGHT_LEVELS / 8); ++i)
434 if (sscanf(&buf[i * 24],
435 "%2hhx %2hhx %2hhx %2hhx %2hhx %2hhx %2hhx %2hhx\n",
436 &tmp_curve[i * 8 + 0],
437 &tmp_curve[i * 8 + 1],
438 &tmp_curve[i * 8 + 2],
439 &tmp_curve[i * 8 + 3],
440 &tmp_curve[i * 8 + 4],
441 &tmp_curve[i * 8 + 5],
442 &tmp_curve[i * 8 + 6],
443 &tmp_curve[i * 8 + 7]) != 8)
444 return -EINVAL;
445
446 /* If there has been an error in the input data, we won't
447 * reach this loop.
448 */
449 mutex_lock(&fb_info->bl_mutex);
450 for (i = 0; i < FB_BACKLIGHT_LEVELS; ++i)
451 fb_info->bl_curve[i] = tmp_curve[i];
452 mutex_unlock(&fb_info->bl_mutex);
453
454 return count;
455}
456
457static ssize_t show_bl_curve(struct class_device *class_device, char *buf)
458{
459 struct fb_info *fb_info = class_get_devdata(class_device);
460 ssize_t len = 0;
461 unsigned int i;
462
463 mutex_lock(&fb_info->bl_mutex);
464 for (i = 0; i < FB_BACKLIGHT_LEVELS; i += 8)
465 len += snprintf(&buf[len], PAGE_SIZE,
466 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
467 fb_info->bl_curve[i + 0],
468 fb_info->bl_curve[i + 1],
469 fb_info->bl_curve[i + 2],
470 fb_info->bl_curve[i + 3],
471 fb_info->bl_curve[i + 4],
472 fb_info->bl_curve[i + 5],
473 fb_info->bl_curve[i + 6],
474 fb_info->bl_curve[i + 7]);
475 mutex_unlock(&fb_info->bl_mutex);
476
477 return len;
478}
479#endif
480
417/* When cmap is added back in it should be a binary attribute 481/* When cmap is added back in it should be a binary attribute
418 * not a text one. Consideration should also be given to converting 482 * not a text one. Consideration should also be given to converting
419 * fbdev to use configfs instead of sysfs */ 483 * fbdev to use configfs instead of sysfs */
@@ -432,6 +496,9 @@ static struct class_device_attribute class_device_attrs[] = {
432 __ATTR(con_rotate, S_IRUGO|S_IWUSR, show_con_rotate, store_con_rotate), 496 __ATTR(con_rotate, S_IRUGO|S_IWUSR, show_con_rotate, store_con_rotate),
433 __ATTR(con_rotate_all, S_IWUSR, NULL, store_con_rotate_all), 497 __ATTR(con_rotate_all, S_IWUSR, NULL, store_con_rotate_all),
434 __ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate), 498 __ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate),
499#ifdef CONFIG_FB_BACKLIGHT
500 __ATTR(bl_curve, S_IRUGO|S_IWUSR, show_bl_curve, store_bl_curve),
501#endif
435}; 502};
436 503
437int fb_init_class_device(struct fb_info *fb_info) 504int fb_init_class_device(struct fb_info *fb_info)
@@ -454,4 +521,25 @@ void fb_cleanup_class_device(struct fb_info *fb_info)
454 &class_device_attrs[i]); 521 &class_device_attrs[i]);
455} 522}
456 523
524#ifdef CONFIG_FB_BACKLIGHT
525/* This function generates a linear backlight curve
526 *
527 * 0: off
528 * 1-7: min
529 * 8-127: linear from min to max
530 */
531void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max)
532{
533 unsigned int i, flat, count, range = (max - min);
534
535 fb_info->bl_curve[0] = off;
457 536
537 for (flat = 1; flat < (FB_BACKLIGHT_LEVELS / 16); ++flat)
538 fb_info->bl_curve[flat] = min;
539
540 count = FB_BACKLIGHT_LEVELS * 15 / 16;
541 for (i = 0; i < count; ++i)
542 fb_info->bl_curve[flat + i] = min + (range * (i + 1) / count);
543}
544EXPORT_SYMBOL_GPL(fb_bl_default_curve);
545#endif
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 6b88050d21bf..8a0c2d3d3805 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -232,9 +232,6 @@ static int igafb_mmap(struct fb_info *info,
232 232
233 size = vma->vm_end - vma->vm_start; 233 size = vma->vm_end - vma->vm_start;
234 234
235 /* To stop the swapper from even considering these pages. */
236 vma->vm_flags |= (VM_SHM | VM_LOCKED);
237
238 /* Each page, see which map applies */ 235 /* Each page, see which map applies */
239 for (page = 0; page < size; ) { 236 for (page = 0; page < size; ) {
240 map_size = 0; 237 map_size = 0;
diff --git a/drivers/video/nvidia/Makefile b/drivers/video/nvidia/Makefile
index 690d37e8de5b..ca47432113e0 100644
--- a/drivers/video/nvidia/Makefile
+++ b/drivers/video/nvidia/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_FB_NVIDIA) += nvidiafb.o
7nvidiafb-y := nvidia.o nv_hw.o nv_setup.o \ 7nvidiafb-y := nvidia.o nv_hw.o nv_setup.o \
8 nv_accel.o 8 nv_accel.o
9nvidiafb-$(CONFIG_FB_NVIDIA_I2C) += nv_i2c.o 9nvidiafb-$(CONFIG_FB_NVIDIA_I2C) += nv_i2c.o
10nvidiafb-$(CONFIG_FB_NVIDIA_BACKLIGHT) += nv_backlight.o
10nvidiafb-$(CONFIG_PPC_OF) += nv_of.o 11nvidiafb-$(CONFIG_PPC_OF) += nv_of.o
11 12
12nvidiafb-objs := $(nvidiafb-y) \ No newline at end of file 13nvidiafb-objs := $(nvidiafb-y)
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
new file mode 100644
index 000000000000..1c1c10c699c5
--- /dev/null
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -0,0 +1,175 @@
1/*
2 * Backlight code for nVidia based graphic cards
3 *
4 * Copyright 2004 Antonino Daplas <adaplas@pol.net>
5 * Copyright (c) 2006 Michael Hanselmann <linux-kernel@hansmi.ch>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/backlight.h>
13#include <linux/fb.h>
14#include <linux/pci.h>
15#include "nv_local.h"
16#include "nv_type.h"
17#include "nv_proto.h"
18
19#ifdef CONFIG_PMAC_BACKLIGHT
20#include <asm/backlight.h>
21#include <asm/machdep.h>
22#endif
23
24/* We do not have any information about which values are allowed, thus
25 * we used safe values.
26 */
27#define MIN_LEVEL 0x158
28#define MAX_LEVEL 0x534
29
30static struct backlight_properties nvidia_bl_data;
31
32static int nvidia_bl_get_level_brightness(struct nvidia_par *par,
33 int level)
34{
35 struct fb_info *info = pci_get_drvdata(par->pci_dev);
36 int nlevel;
37
38 /* Get and convert the value */
39 mutex_lock(&info->bl_mutex);
40 nlevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL;
41 mutex_unlock(&info->bl_mutex);
42
43 if (nlevel < 0)
44 nlevel = 0;
45 else if (nlevel < MIN_LEVEL)
46 nlevel = MIN_LEVEL;
47 else if (nlevel > MAX_LEVEL)
48 nlevel = MAX_LEVEL;
49
50 return nlevel;
51}
52
53static int nvidia_bl_update_status(struct backlight_device *bd)
54{
55 struct nvidia_par *par = class_get_devdata(&bd->class_dev);
56 u32 tmp_pcrt, tmp_pmc, fpcontrol;
57 int level;
58
59 if (!par->FlatPanel)
60 return 0;
61
62 if (bd->props->power != FB_BLANK_UNBLANK ||
63 bd->props->fb_blank != FB_BLANK_UNBLANK)
64 level = 0;
65 else
66 level = bd->props->brightness;
67
68 tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF;
69 tmp_pcrt = NV_RD32(par->PCRTC0, 0x081C) & 0xFFFFFFFC;
70 fpcontrol = NV_RD32(par->PRAMDAC, 0x0848) & 0xCFFFFFCC;
71
72 if (level > 0) {
73 tmp_pcrt |= 0x1;
74 tmp_pmc |= (1 << 31); /* backlight bit */
75 tmp_pmc |= nvidia_bl_get_level_brightness(par, level) << 16;
76 fpcontrol |= par->fpSyncs;
77 } else
78 fpcontrol |= 0x20000022;
79
80 NV_WR32(par->PCRTC0, 0x081C, tmp_pcrt);
81 NV_WR32(par->PMC, 0x10F0, tmp_pmc);
82 NV_WR32(par->PRAMDAC, 0x848, fpcontrol);
83
84 return 0;
85}
86
87static int nvidia_bl_get_brightness(struct backlight_device *bd)
88{
89 return bd->props->brightness;
90}
91
92static struct backlight_properties nvidia_bl_data = {
93 .owner = THIS_MODULE,
94 .get_brightness = nvidia_bl_get_brightness,
95 .update_status = nvidia_bl_update_status,
96 .max_brightness = (FB_BACKLIGHT_LEVELS - 1),
97};
98
99void nvidia_bl_init(struct nvidia_par *par)
100{
101 struct fb_info *info = pci_get_drvdata(par->pci_dev);
102 struct backlight_device *bd;
103 char name[12];
104
105 if (!par->FlatPanel)
106 return;
107
108#ifdef CONFIG_PMAC_BACKLIGHT
109 if (!machine_is(powermac) ||
110 !pmac_has_backlight_type("mnca"))
111 return;
112#endif
113
114 snprintf(name, sizeof(name), "nvidiabl%d", info->node);
115
116 bd = backlight_device_register(name, par, &nvidia_bl_data);
117 if (IS_ERR(bd)) {
118 info->bl_dev = NULL;
119 printk("nvidia: Backlight registration failed\n");
120 goto error;
121 }
122
123 mutex_lock(&info->bl_mutex);
124 info->bl_dev = bd;
125 fb_bl_default_curve(info, 0,
126 0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL,
127 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
128 mutex_unlock(&info->bl_mutex);
129
130 up(&bd->sem);
131 bd->props->brightness = nvidia_bl_data.max_brightness;
132 bd->props->power = FB_BLANK_UNBLANK;
133 bd->props->update_status(bd);
134 down(&bd->sem);
135
136#ifdef CONFIG_PMAC_BACKLIGHT
137 mutex_lock(&pmac_backlight_mutex);
138 if (!pmac_backlight)
139 pmac_backlight = bd;
140 mutex_unlock(&pmac_backlight_mutex);
141#endif
142
143 printk("nvidia: Backlight initialized (%s)\n", name);
144
145 return;
146
147error:
148 return;
149}
150
151void nvidia_bl_exit(struct nvidia_par *par)
152{
153 struct fb_info *info = pci_get_drvdata(par->pci_dev);
154
155#ifdef CONFIG_PMAC_BACKLIGHT
156 mutex_lock(&pmac_backlight_mutex);
157#endif
158
159 mutex_lock(&info->bl_mutex);
160 if (info->bl_dev) {
161#ifdef CONFIG_PMAC_BACKLIGHT
162 if (pmac_backlight == info->bl_dev)
163 pmac_backlight = NULL;
164#endif
165
166 backlight_device_unregister(info->bl_dev);
167
168 printk("nvidia: Backlight unloaded\n");
169 }
170 mutex_unlock(&info->bl_mutex);
171
172#ifdef CONFIG_PMAC_BACKLIGHT
173 mutex_unlock(&pmac_backlight_mutex);
174#endif
175}
diff --git a/drivers/video/nvidia/nv_proto.h b/drivers/video/nvidia/nv_proto.h
index b149a690ee0f..6fba656cd56b 100644
--- a/drivers/video/nvidia/nv_proto.h
+++ b/drivers/video/nvidia/nv_proto.h
@@ -63,4 +63,14 @@ extern void nvidiafb_imageblit(struct fb_info *info,
63 const struct fb_image *image); 63 const struct fb_image *image);
64extern int nvidiafb_sync(struct fb_info *info); 64extern int nvidiafb_sync(struct fb_info *info);
65extern u8 byte_rev[256]; 65extern u8 byte_rev[256];
66
67/* in nv_backlight.h */
68#ifdef CONFIG_FB_NVIDIA_BACKLIGHT
69extern void nvidia_bl_init(struct nvidia_par *par);
70extern void nvidia_bl_exit(struct nvidia_par *par);
71#else
72static inline void nvidia_bl_init(struct nvidia_par *par) {}
73static inline void nvidia_bl_exit(struct nvidia_par *par) {}
74#endif
75
66#endif /* __NV_PROTO_H__ */ 76#endif /* __NV_PROTO_H__ */
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 093ab9977c7c..03a7c1e9ce38 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/console.h> 24#include <linux/console.h>
25#include <linux/backlight.h>
25#ifdef CONFIG_MTRR 26#ifdef CONFIG_MTRR
26#include <asm/mtrr.h> 27#include <asm/mtrr.h>
27#endif 28#endif
@@ -29,10 +30,6 @@
29#include <asm/prom.h> 30#include <asm/prom.h>
30#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
31#endif 32#endif
32#ifdef CONFIG_PMAC_BACKLIGHT
33#include <asm/machdep.h>
34#include <asm/backlight.h>
35#endif
36 33
37#include "nv_local.h" 34#include "nv_local.h"
38#include "nv_type.h" 35#include "nv_type.h"
@@ -470,75 +467,6 @@ static struct fb_var_screeninfo __devinitdata nvidiafb_default_var = {
470 .vmode = FB_VMODE_NONINTERLACED 467 .vmode = FB_VMODE_NONINTERLACED
471}; 468};
472 469
473/*
474 * Backlight control
475 */
476#ifdef CONFIG_PMAC_BACKLIGHT
477
478static int nvidia_backlight_levels[] = {
479 0x158,
480 0x192,
481 0x1c6,
482 0x200,
483 0x234,
484 0x268,
485 0x2a2,
486 0x2d6,
487 0x310,
488 0x344,
489 0x378,
490 0x3b2,
491 0x3e6,
492 0x41a,
493 0x454,
494 0x534,
495};
496
497/* ------------------------------------------------------------------------- *
498 *
499 * Backlight operations
500 *
501 * ------------------------------------------------------------------------- */
502
503static int nvidia_set_backlight_enable(int on, int level, void *data)
504{
505 struct nvidia_par *par = data;
506 u32 tmp_pcrt, tmp_pmc, fpcontrol;
507
508 tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF;
509 tmp_pcrt = NV_RD32(par->PCRTC0, 0x081C) & 0xFFFFFFFC;
510 fpcontrol = NV_RD32(par->PRAMDAC, 0x0848) & 0xCFFFFFCC;
511
512 if (on && (level > BACKLIGHT_OFF)) {
513 tmp_pcrt |= 0x1;
514 tmp_pmc |= (1 << 31); // backlight bit
515 tmp_pmc |= nvidia_backlight_levels[level - 1] << 16;
516 }
517
518 if (on)
519 fpcontrol |= par->fpSyncs;
520 else
521 fpcontrol |= 0x20000022;
522
523 NV_WR32(par->PCRTC0, 0x081C, tmp_pcrt);
524 NV_WR32(par->PMC, 0x10F0, tmp_pmc);
525 NV_WR32(par->PRAMDAC, 0x848, fpcontrol);
526
527 return 0;
528}
529
530static int nvidia_set_backlight_level(int level, void *data)
531{
532 return nvidia_set_backlight_enable(1, level, data);
533}
534
535static struct backlight_controller nvidia_backlight_controller = {
536 nvidia_set_backlight_enable,
537 nvidia_set_backlight_level
538};
539
540#endif /* CONFIG_PMAC_BACKLIGHT */
541
542static void nvidiafb_load_cursor_image(struct nvidia_par *par, u8 * data8, 470static void nvidiafb_load_cursor_image(struct nvidia_par *par, u8 * data8,
543 u16 bg, u16 fg, u32 w, u32 h) 471 u16 bg, u16 fg, u32 w, u32 h)
544{ 472{
@@ -1355,10 +1283,15 @@ static int nvidiafb_blank(int blank, struct fb_info *info)
1355 NVWriteSeq(par, 0x01, tmp); 1283 NVWriteSeq(par, 0x01, tmp);
1356 NVWriteCrtc(par, 0x1a, vesa); 1284 NVWriteCrtc(par, 0x1a, vesa);
1357 1285
1358#ifdef CONFIG_PMAC_BACKLIGHT 1286#ifdef CONFIG_FB_NVIDIA_BACKLIGHT
1359 if (par->FlatPanel && machine_is(powermac)) { 1287 mutex_lock(&info->bl_mutex);
1360 set_backlight_enable(!blank); 1288 if (info->bl_dev) {
1289 down(&info->bl_dev->sem);
1290 info->bl_dev->props->power = blank;
1291 info->bl_dev->props->update_status(info->bl_dev);
1292 up(&info->bl_dev->sem);
1361 } 1293 }
1294 mutex_unlock(&info->bl_mutex);
1362#endif 1295#endif
1363 1296
1364 NVTRACE_LEAVE(); 1297 NVTRACE_LEAVE();
@@ -1741,11 +1674,9 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
1741 "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n", 1674 "PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
1742 info->fix.id, 1675 info->fix.id,
1743 par->FbMapSize / (1024 * 1024), info->fix.smem_start); 1676 par->FbMapSize / (1024 * 1024), info->fix.smem_start);
1744#ifdef CONFIG_PMAC_BACKLIGHT 1677
1745 if (par->FlatPanel && machine_is(powermac)) 1678 nvidia_bl_init(par);
1746 register_backlight_controller(&nvidia_backlight_controller, 1679
1747 par, "mnca");
1748#endif
1749 NVTRACE_LEAVE(); 1680 NVTRACE_LEAVE();
1750 return 0; 1681 return 0;
1751 1682
@@ -1775,6 +1706,8 @@ static void __exit nvidiafb_remove(struct pci_dev *pd)
1775 1706
1776 NVTRACE_ENTER(); 1707 NVTRACE_ENTER();
1777 1708
1709 nvidia_bl_exit(par);
1710
1778 unregister_framebuffer(info); 1711 unregister_framebuffer(info);
1779#ifdef CONFIG_MTRR 1712#ifdef CONFIG_MTRR
1780 if (par->mtrr.vram_valid) 1713 if (par->mtrr.vram_valid)
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 3e9308f0f165..d4384ab1df65 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -41,6 +41,7 @@
41#include <linux/fb.h> 41#include <linux/fb.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/pci.h> 43#include <linux/pci.h>
44#include <linux/backlight.h>
44#ifdef CONFIG_MTRR 45#ifdef CONFIG_MTRR
45#include <asm/mtrr.h> 46#include <asm/mtrr.h>
46#endif 47#endif
@@ -272,34 +273,154 @@ static const struct riva_regs reg_template = {
272/* 273/*
273 * Backlight control 274 * Backlight control
274 */ 275 */
275#ifdef CONFIG_PMAC_BACKLIGHT 276#ifdef CONFIG_FB_RIVA_BACKLIGHT
277/* We do not have any information about which values are allowed, thus
278 * we used safe values.
279 */
280#define MIN_LEVEL 0x158
281#define MAX_LEVEL 0x534
276 282
277static int riva_backlight_levels[] = { 283static struct backlight_properties riva_bl_data;
278 0x158, 284
279 0x192, 285static int riva_bl_get_level_brightness(struct riva_par *par,
280 0x1c6, 286 int level)
281 0x200, 287{
282 0x234, 288 struct fb_info *info = pci_get_drvdata(par->pdev);
283 0x268, 289 int nlevel;
284 0x2a2, 290
285 0x2d6, 291 /* Get and convert the value */
286 0x310, 292 mutex_lock(&info->bl_mutex);
287 0x344, 293 nlevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL;
288 0x378, 294 mutex_unlock(&info->bl_mutex);
289 0x3b2, 295
290 0x3e6, 296 if (nlevel < 0)
291 0x41a, 297 nlevel = 0;
292 0x454, 298 else if (nlevel < MIN_LEVEL)
293 0x534, 299 nlevel = MIN_LEVEL;
294}; 300 else if (nlevel > MAX_LEVEL)
301 nlevel = MAX_LEVEL;
302
303 return nlevel;
304}
305
306static int riva_bl_update_status(struct backlight_device *bd)
307{
308 struct riva_par *par = class_get_devdata(&bd->class_dev);
309 U032 tmp_pcrt, tmp_pmc;
310 int level;
311
312 if (bd->props->power != FB_BLANK_UNBLANK ||
313 bd->props->fb_blank != FB_BLANK_UNBLANK)
314 level = 0;
315 else
316 level = bd->props->brightness;
317
318 tmp_pmc = par->riva.PMC[0x10F0/4] & 0x0000FFFF;
319 tmp_pcrt = par->riva.PCRTC0[0x081C/4] & 0xFFFFFFFC;
320 if(level > 0) {
321 tmp_pcrt |= 0x1;
322 tmp_pmc |= (1 << 31); /* backlight bit */
323 tmp_pmc |= riva_bl_get_level_brightness(par, level) << 16; /* level */
324 }
325 par->riva.PCRTC0[0x081C/4] = tmp_pcrt;
326 par->riva.PMC[0x10F0/4] = tmp_pmc;
327
328 return 0;
329}
330
331static int riva_bl_get_brightness(struct backlight_device *bd)
332{
333 return bd->props->brightness;
334}
295 335
296static int riva_set_backlight_enable(int on, int level, void *data); 336static struct backlight_properties riva_bl_data = {
297static int riva_set_backlight_level(int level, void *data); 337 .owner = THIS_MODULE,
298static struct backlight_controller riva_backlight_controller = { 338 .get_brightness = riva_bl_get_brightness,
299 riva_set_backlight_enable, 339 .update_status = riva_bl_update_status,
300 riva_set_backlight_level 340 .max_brightness = (FB_BACKLIGHT_LEVELS - 1),
301}; 341};
302#endif /* CONFIG_PMAC_BACKLIGHT */ 342
343static void riva_bl_init(struct riva_par *par)
344{
345 struct fb_info *info = pci_get_drvdata(par->pdev);
346 struct backlight_device *bd;
347 char name[12];
348
349 if (!par->FlatPanel)
350 return;
351
352#ifdef CONFIG_PMAC_BACKLIGHT
353 if (!machine_is(powermac) ||
354 !pmac_has_backlight_type("mnca"))
355 return;
356#endif
357
358 snprintf(name, sizeof(name), "rivabl%d", info->node);
359
360 bd = backlight_device_register(name, par, &riva_bl_data);
361 if (IS_ERR(bd)) {
362 info->bl_dev = NULL;
363 printk("riva: Backlight registration failed\n");
364 goto error;
365 }
366
367 mutex_lock(&info->bl_mutex);
368 info->bl_dev = bd;
369 fb_bl_default_curve(info, 0,
370 0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL,
371 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
372 mutex_unlock(&info->bl_mutex);
373
374 up(&bd->sem);
375 bd->props->brightness = riva_bl_data.max_brightness;
376 bd->props->power = FB_BLANK_UNBLANK;
377 bd->props->update_status(bd);
378 down(&bd->sem);
379
380#ifdef CONFIG_PMAC_BACKLIGHT
381 mutex_lock(&pmac_backlight_mutex);
382 if (!pmac_backlight)
383 pmac_backlight = bd;
384 mutex_unlock(&pmac_backlight_mutex);
385#endif
386
387 printk("riva: Backlight initialized (%s)\n", name);
388
389 return;
390
391error:
392 return;
393}
394
395static void riva_bl_exit(struct riva_par *par)
396{
397 struct fb_info *info = pci_get_drvdata(par->pdev);
398
399#ifdef CONFIG_PMAC_BACKLIGHT
400 mutex_lock(&pmac_backlight_mutex);
401#endif
402
403 mutex_lock(&info->bl_mutex);
404 if (info->bl_dev) {
405#ifdef CONFIG_PMAC_BACKLIGHT
406 if (pmac_backlight == info->bl_dev)
407 pmac_backlight = NULL;
408#endif
409
410 backlight_device_unregister(info->bl_dev);
411
412 printk("riva: Backlight unloaded\n");
413 }
414 mutex_unlock(&info->bl_mutex);
415
416#ifdef CONFIG_PMAC_BACKLIGHT
417 mutex_unlock(&pmac_backlight_mutex);
418#endif
419}
420#else
421static inline void riva_bl_init(struct riva_par *par) {}
422static inline void riva_bl_exit(struct riva_par *par) {}
423#endif /* CONFIG_FB_RIVA_BACKLIGHT */
303 424
304/* ------------------------------------------------------------------------- * 425/* ------------------------------------------------------------------------- *
305 * 426 *
@@ -973,36 +1094,6 @@ static int riva_get_cmap_len(const struct fb_var_screeninfo *var)
973 1094
974/* ------------------------------------------------------------------------- * 1095/* ------------------------------------------------------------------------- *
975 * 1096 *
976 * Backlight operations
977 *
978 * ------------------------------------------------------------------------- */
979
980#ifdef CONFIG_PMAC_BACKLIGHT
981static int riva_set_backlight_enable(int on, int level, void *data)
982{
983 struct riva_par *par = data;
984 U032 tmp_pcrt, tmp_pmc;
985
986 tmp_pmc = par->riva.PMC[0x10F0/4] & 0x0000FFFF;
987 tmp_pcrt = par->riva.PCRTC0[0x081C/4] & 0xFFFFFFFC;
988 if(on && (level > BACKLIGHT_OFF)) {
989 tmp_pcrt |= 0x1;
990 tmp_pmc |= (1 << 31); // backlight bit
991 tmp_pmc |= riva_backlight_levels[level-1] << 16; // level
992 }
993 par->riva.PCRTC0[0x081C/4] = tmp_pcrt;
994 par->riva.PMC[0x10F0/4] = tmp_pmc;
995 return 0;
996}
997
998static int riva_set_backlight_level(int level, void *data)
999{
1000 return riva_set_backlight_enable(1, level, data);
1001}
1002#endif /* CONFIG_PMAC_BACKLIGHT */
1003
1004/* ------------------------------------------------------------------------- *
1005 *
1006 * framebuffer operations 1097 * framebuffer operations
1007 * 1098 *
1008 * ------------------------------------------------------------------------- */ 1099 * ------------------------------------------------------------------------- */
@@ -1247,10 +1338,15 @@ static int rivafb_blank(int blank, struct fb_info *info)
1247 SEQout(par, 0x01, tmp); 1338 SEQout(par, 0x01, tmp);
1248 CRTCout(par, 0x1a, vesa); 1339 CRTCout(par, 0x1a, vesa);
1249 1340
1250#ifdef CONFIG_PMAC_BACKLIGHT 1341#ifdef CONFIG_FB_RIVA_BACKLIGHT
1251 if ( par->FlatPanel && machine_is(powermac)) { 1342 mutex_lock(&info->bl_mutex);
1252 set_backlight_enable(!blank); 1343 if (info->bl_dev) {
1344 down(&info->bl_dev->sem);
1345 info->bl_dev->props->power = blank;
1346 info->bl_dev->props->update_status(info->bl_dev);
1347 up(&info->bl_dev->sem);
1253 } 1348 }
1349 mutex_unlock(&info->bl_mutex);
1254#endif 1350#endif
1255 1351
1256 NVTRACE_LEAVE(); 1352 NVTRACE_LEAVE();
@@ -2037,11 +2133,9 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
2037 RIVAFB_VERSION, 2133 RIVAFB_VERSION,
2038 info->fix.smem_len / (1024 * 1024), 2134 info->fix.smem_len / (1024 * 1024),
2039 info->fix.smem_start); 2135 info->fix.smem_start);
2040#ifdef CONFIG_PMAC_BACKLIGHT 2136
2041 if (default_par->FlatPanel && machine_is(powermac)) 2137 riva_bl_init(info->par);
2042 register_backlight_controller(&riva_backlight_controller, 2138
2043 default_par, "mnca");
2044#endif
2045 NVTRACE_LEAVE(); 2139 NVTRACE_LEAVE();
2046 return 0; 2140 return 0;
2047 2141
@@ -2074,6 +2168,8 @@ static void __exit rivafb_remove(struct pci_dev *pd)
2074 2168
2075 NVTRACE_ENTER(); 2169 NVTRACE_ENTER();
2076 2170
2171 riva_bl_exit(par);
2172
2077#ifdef CONFIG_FB_RIVA_I2C 2173#ifdef CONFIG_FB_RIVA_I2C
2078 riva_delete_i2c_busses(par); 2174 riva_delete_i2c_busses(par);
2079 kfree(par->EDID); 2175 kfree(par->EDID);
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 9ac2d3171187..41f8c2d93892 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -551,7 +551,7 @@ static inline void enable_mmio(void)
551#define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) 551#define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F)
552 552
553/* Return flat panel's maximum x resolution */ 553/* Return flat panel's maximum x resolution */
554static int __init get_nativex(void) 554static int __devinit get_nativex(void)
555{ 555{
556 int x,y,tmp; 556 int x,y,tmp;
557 557
@@ -658,7 +658,7 @@ static void set_number_of_lines(int lines)
658 * If we see that FP is active we assume we have one. 658 * If we see that FP is active we assume we have one.
659 * Otherwise we have a CRT display.User can override. 659 * Otherwise we have a CRT display.User can override.
660 */ 660 */
661static unsigned int __init get_displaytype(void) 661static unsigned int __devinit get_displaytype(void)
662{ 662{
663 if (fp) 663 if (fp)
664 return DISPLAY_FP; 664 return DISPLAY_FP;
@@ -668,7 +668,7 @@ static unsigned int __init get_displaytype(void)
668} 668}
669 669
670/* Try detecting the video memory size */ 670/* Try detecting the video memory size */
671static unsigned int __init get_memsize(void) 671static unsigned int __devinit get_memsize(void)
672{ 672{
673 unsigned char tmp, tmp2; 673 unsigned char tmp, tmp2;
674 unsigned int k; 674 unsigned int k;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 2cb87ba4b1c1..5c6bdf82146c 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -530,9 +530,6 @@ error:
530 if (vfid) 530 if (vfid)
531 v9fs_fid_destroy(vfid); 531 v9fs_fid_destroy(vfid);
532 532
533 if (inode)
534 iput(inode);
535
536 return err; 533 return err;
537} 534}
538 535
@@ -1054,6 +1051,9 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer,
1054 int ret; 1051 int ret;
1055 char *link = __getname(); 1052 char *link = __getname();
1056 1053
1054 if (unlikely(!link))
1055 return -ENOMEM;
1056
1057 if (buflen > PATH_MAX) 1057 if (buflen > PATH_MAX)
1058 buflen = PATH_MAX; 1058 buflen = PATH_MAX;
1059 1059
@@ -1171,9 +1171,6 @@ error:
1171 if (vfid) 1171 if (vfid)
1172 v9fs_fid_destroy(vfid); 1172 v9fs_fid_destroy(vfid);
1173 1173
1174 if (inode)
1175 iput(inode);
1176
1177 return err; 1174 return err;
1178 1175
1179} 1176}
@@ -1227,6 +1224,9 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
1227 } 1224 }
1228 1225
1229 name = __getname(); 1226 name = __getname();
1227 if (unlikely(!name))
1228 return -ENOMEM;
1229
1230 sprintf(name, "%d\n", oldfid->fid); 1230 sprintf(name, "%d\n", oldfid->fid);
1231 retval = v9fs_vfs_mkspecial(dir, dentry, V9FS_DMLINK, name); 1231 retval = v9fs_vfs_mkspecial(dir, dentry, V9FS_DMLINK, name);
1232 __putname(name); 1232 __putname(name);
diff --git a/fs/Kconfig b/fs/Kconfig
index 2aa4624cc018..1cdc043922d5 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -776,7 +776,8 @@ endmenu
776menu "Pseudo filesystems" 776menu "Pseudo filesystems"
777 777
778config PROC_FS 778config PROC_FS
779 bool "/proc file system support" 779 bool "/proc file system support" if EMBEDDED
780 default y
780 help 781 help
781 This is a virtual file system providing information about the status 782 This is a virtual file system providing information about the status
782 of the system. "Virtual" means that it doesn't take up any space on 783 of the system. "Virtual" means that it doesn't take up any space on
@@ -1370,11 +1371,19 @@ config UFS_FS
1370 1371
1371config UFS_FS_WRITE 1372config UFS_FS_WRITE
1372 bool "UFS file system write support (DANGEROUS)" 1373 bool "UFS file system write support (DANGEROUS)"
1373 depends on UFS_FS && EXPERIMENTAL && BROKEN 1374 depends on UFS_FS && EXPERIMENTAL
1374 help 1375 help
1375 Say Y here if you want to try writing to UFS partitions. This is 1376 Say Y here if you want to try writing to UFS partitions. This is
1376 experimental, so you should back up your UFS partitions beforehand. 1377 experimental, so you should back up your UFS partitions beforehand.
1377 1378
1379config UFS_DEBUG
1380 bool "UFS debugging"
1381 depends on UFS_FS
1382 help
1383 If you are experiencing any problems with the UFS filesystem, say
1384 Y here. This will result in _many_ additional debugging messages to be
1385 written to the system log.
1386
1378endmenu 1387endmenu
1379 1388
1380menu "Network File Systems" 1389menu "Network File Systems"
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 8765cba35bb9..5200f4938df0 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -271,6 +271,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
271 int reserved; 271 int reserved;
272 unsigned long mount_flags; 272 unsigned long mount_flags;
273 int tmp_flags; /* fix remount prototype... */ 273 int tmp_flags; /* fix remount prototype... */
274 u8 sig[4];
274 275
275 pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options"); 276 pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options");
276 277
@@ -370,8 +371,9 @@ got_root:
370 printk(KERN_ERR "AFFS: Cannot read boot block\n"); 371 printk(KERN_ERR "AFFS: Cannot read boot block\n");
371 goto out_error; 372 goto out_error;
372 } 373 }
373 chksum = be32_to_cpu(*(__be32 *)boot_bh->b_data); 374 memcpy(sig, boot_bh->b_data, 4);
374 brelse(boot_bh); 375 brelse(boot_bh);
376 chksum = be32_to_cpu(*(__be32 *)sig);
375 377
376 /* Dircache filesystems are compatible with non-dircache ones 378 /* Dircache filesystems are compatible with non-dircache ones
377 * when reading. As long as they aren't supported, writing is 379 * when reading. As long as they aren't supported, writing is
@@ -420,11 +422,11 @@ got_root:
420 } 422 }
421 423
422 if (mount_flags & SF_VERBOSE) { 424 if (mount_flags & SF_VERBOSE) {
423 chksum = cpu_to_be32(chksum); 425 u8 len = AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0];
424 printk(KERN_NOTICE "AFFS: Mounting volume \"%*s\": Type=%.3s\\%c, Blocksize=%d\n", 426 printk(KERN_NOTICE "AFFS: Mounting volume \"%.*s\": Type=%.3s\\%c, Blocksize=%d\n",
425 AFFS_ROOT_TAIL(sb, root_bh)->disk_name[0], 427 len > 31 ? 31 : len,
426 AFFS_ROOT_TAIL(sb, root_bh)->disk_name + 1, 428 AFFS_ROOT_TAIL(sb, root_bh)->disk_name + 1,
427 (char *)&chksum,((char *)&chksum)[3] + '0',blocksize); 429 sig, sig[3] + '0', blocksize);
428 } 430 }
429 431
430 sb->s_flags |= MS_NODEV | MS_NOSUID; 432 sb->s_flags |= MS_NODEV | MS_NOSUID;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index b8ce02607d66..4456d1daa40f 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -174,6 +174,12 @@ static int autofs4_tree_busy(struct vfsmount *mnt,
174 struct autofs_info *ino = autofs4_dentry_ino(p); 174 struct autofs_info *ino = autofs4_dentry_ino(p);
175 unsigned int ino_count = atomic_read(&ino->count); 175 unsigned int ino_count = atomic_read(&ino->count);
176 176
177 /*
178 * Clean stale dentries below that have not been
179 * invalidated after a mount fail during lookup
180 */
181 d_invalidate(p);
182
177 /* allow for dget above and top is already dgot */ 183 /* allow for dget above and top is already dgot */
178 if (p == top) 184 if (p == top)
179 ino_count += 2; 185 ino_count += 2;
diff --git a/fs/dcache.c b/fs/dcache.c
index 313b54b2b8f2..b85fda360533 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -406,7 +406,7 @@ static void prune_dcache(int count, struct super_block *sb)
406 cond_resched_lock(&dcache_lock); 406 cond_resched_lock(&dcache_lock);
407 407
408 tmp = dentry_unused.prev; 408 tmp = dentry_unused.prev;
409 if (unlikely(sb)) { 409 if (sb) {
410 /* Try to find a dentry for this sb, but don't try 410 /* Try to find a dentry for this sb, but don't try
411 * too hard, if they aren't near the tail they will 411 * too hard, if they aren't near the tail they will
412 * be moved down again soon 412 * be moved down again soon
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 08e7e6a555ca..9c677bbd0b08 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * fs/eventpoll.c ( Efficent event polling implementation ) 2 * fs/eventpoll.c ( Efficent event polling implementation )
3 * Copyright (C) 2001,...,2003 Davide Libenzi 3 * Copyright (C) 2001,...,2006 Davide Libenzi
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -1004,7 +1004,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1004 1004
1005 /* Notify waiting tasks that events are available */ 1005 /* Notify waiting tasks that events are available */
1006 if (waitqueue_active(&ep->wq)) 1006 if (waitqueue_active(&ep->wq))
1007 wake_up(&ep->wq); 1007 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
1008 if (waitqueue_active(&ep->poll_wait)) 1008 if (waitqueue_active(&ep->poll_wait))
1009 pwake++; 1009 pwake++;
1010 } 1010 }
@@ -1083,7 +1083,8 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
1083 1083
1084 /* Notify waiting tasks that events are available */ 1084 /* Notify waiting tasks that events are available */
1085 if (waitqueue_active(&ep->wq)) 1085 if (waitqueue_active(&ep->wq))
1086 wake_up(&ep->wq); 1086 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
1087 TASK_INTERRUPTIBLE);
1087 if (waitqueue_active(&ep->poll_wait)) 1088 if (waitqueue_active(&ep->poll_wait))
1088 pwake++; 1089 pwake++;
1089 } 1090 }
@@ -1260,7 +1261,8 @@ is_linked:
1260 * wait list. 1261 * wait list.
1261 */ 1262 */
1262 if (waitqueue_active(&ep->wq)) 1263 if (waitqueue_active(&ep->wq))
1263 wake_up(&ep->wq); 1264 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
1265 TASK_INTERRUPTIBLE);
1264 if (waitqueue_active(&ep->poll_wait)) 1266 if (waitqueue_active(&ep->poll_wait))
1265 pwake++; 1267 pwake++;
1266 1268
@@ -1444,7 +1446,8 @@ static void ep_reinject_items(struct eventpoll *ep, struct list_head *txlist)
1444 * wait list. 1446 * wait list.
1445 */ 1447 */
1446 if (waitqueue_active(&ep->wq)) 1448 if (waitqueue_active(&ep->wq))
1447 wake_up(&ep->wq); 1449 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
1450 TASK_INTERRUPTIBLE);
1448 if (waitqueue_active(&ep->poll_wait)) 1451 if (waitqueue_active(&ep->poll_wait))
1449 pwake++; 1452 pwake++;
1450 } 1453 }
@@ -1516,7 +1519,7 @@ retry:
1516 * ep_poll_callback() when events will become available. 1519 * ep_poll_callback() when events will become available.
1517 */ 1520 */
1518 init_waitqueue_entry(&wait, current); 1521 init_waitqueue_entry(&wait, current);
1519 add_wait_queue(&ep->wq, &wait); 1522 __add_wait_queue(&ep->wq, &wait);
1520 1523
1521 for (;;) { 1524 for (;;) {
1522 /* 1525 /*
@@ -1536,7 +1539,7 @@ retry:
1536 jtimeout = schedule_timeout(jtimeout); 1539 jtimeout = schedule_timeout(jtimeout);
1537 write_lock_irqsave(&ep->lock, flags); 1540 write_lock_irqsave(&ep->lock, flags);
1538 } 1541 }
1539 remove_wait_queue(&ep->wq, &wait); 1542 __remove_wait_queue(&ep->wq, &wait);
1540 1543
1541 set_current_state(TASK_RUNNING); 1544 set_current_state(TASK_RUNNING);
1542 } 1545 }
diff --git a/fs/ext2/Makefile b/fs/ext2/Makefile
index c5d02da73bc3..e0b2b43c1fdb 100644
--- a/fs/ext2/Makefile
+++ b/fs/ext2/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_EXT2_FS) += ext2.o 5obj-$(CONFIG_EXT2_FS) += ext2.o
6 6
7ext2-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \ 7ext2-y := balloc.o dir.o file.o fsync.o ialloc.o inode.o \
8 ioctl.o namei.o super.o symlink.o 8 ioctl.o namei.o super.o symlink.o
9 9
10ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o 10ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 2c00953d4b0b..433a213a8bd9 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -521,6 +521,26 @@ io_error:
521 goto out_release; 521 goto out_release;
522} 522}
523 523
524#ifdef EXT2FS_DEBUG
525
526static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
527
528unsigned long ext2_count_free (struct buffer_head * map, unsigned int numchars)
529{
530 unsigned int i;
531 unsigned long sum = 0;
532
533 if (!map)
534 return (0);
535 for (i = 0; i < numchars; i++)
536 sum += nibblemap[map->b_data[i] & 0xf] +
537 nibblemap[(map->b_data[i] >> 4) & 0xf];
538 return (sum);
539}
540
541#endif /* EXT2FS_DEBUG */
542
543/* Superblock must be locked */
524unsigned long ext2_count_free_blocks (struct super_block * sb) 544unsigned long ext2_count_free_blocks (struct super_block * sb)
525{ 545{
526 struct ext2_group_desc * desc; 546 struct ext2_group_desc * desc;
@@ -530,7 +550,6 @@ unsigned long ext2_count_free_blocks (struct super_block * sb)
530 unsigned long bitmap_count, x; 550 unsigned long bitmap_count, x;
531 struct ext2_super_block *es; 551 struct ext2_super_block *es;
532 552
533 lock_super (sb);
534 es = EXT2_SB(sb)->s_es; 553 es = EXT2_SB(sb)->s_es;
535 desc_count = 0; 554 desc_count = 0;
536 bitmap_count = 0; 555 bitmap_count = 0;
@@ -554,7 +573,6 @@ unsigned long ext2_count_free_blocks (struct super_block * sb)
554 printk("ext2_count_free_blocks: stored = %lu, computed = %lu, %lu\n", 573 printk("ext2_count_free_blocks: stored = %lu, computed = %lu, %lu\n",
555 (long)le32_to_cpu(es->s_free_blocks_count), 574 (long)le32_to_cpu(es->s_free_blocks_count),
556 desc_count, bitmap_count); 575 desc_count, bitmap_count);
557 unlock_super (sb);
558 return bitmap_count; 576 return bitmap_count;
559#else 577#else
560 for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) { 578 for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
diff --git a/fs/ext2/bitmap.c b/fs/ext2/bitmap.c
deleted file mode 100644
index e9983a0dd396..000000000000
--- a/fs/ext2/bitmap.c
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * linux/fs/ext2/bitmap.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 */
9
10#ifdef EXT2FS_DEBUG
11
12#include <linux/buffer_head.h>
13
14#include "ext2.h"
15
16static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
17
18unsigned long ext2_count_free (struct buffer_head * map, unsigned int numchars)
19{
20 unsigned int i;
21 unsigned long sum = 0;
22
23 if (!map)
24 return (0);
25 for (i = 0; i < numchars; i++)
26 sum += nibblemap[map->b_data[i] & 0xf] +
27 nibblemap[(map->b_data[i] >> 4) & 0xf];
28 return (sum);
29}
30
31#endif /* EXT2FS_DEBUG */
32
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 3c1c9aaaca6b..92ea8265d7d5 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -399,8 +399,7 @@ ino_t ext2_inode_by_name(struct inode * dir, struct dentry *dentry)
399 de = ext2_find_entry (dir, dentry, &page); 399 de = ext2_find_entry (dir, dentry, &page);
400 if (de) { 400 if (de) {
401 res = le32_to_cpu(de->inode); 401 res = le32_to_cpu(de->inode);
402 kunmap(page); 402 ext2_put_page(page);
403 page_cache_release(page);
404 } 403 }
405 return res; 404 return res;
406} 405}
diff --git a/fs/ext2/fsync.c b/fs/ext2/fsync.c
index c9c2e5ffa48e..7806b9e8155b 100644
--- a/fs/ext2/fsync.c
+++ b/fs/ext2/fsync.c
@@ -24,7 +24,7 @@
24 24
25#include "ext2.h" 25#include "ext2.h"
26#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
27#include <linux/buffer_head.h> /* for fsync_inode_buffers() */ 27#include <linux/buffer_head.h> /* for sync_mapping_buffers() */
28 28
29 29
30/* 30/*
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index e52765219e16..308c252568c6 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -638,6 +638,7 @@ fail:
638 return ERR_PTR(err); 638 return ERR_PTR(err);
639} 639}
640 640
641/* Superblock must be locked */
641unsigned long ext2_count_free_inodes (struct super_block * sb) 642unsigned long ext2_count_free_inodes (struct super_block * sb)
642{ 643{
643 struct ext2_group_desc *desc; 644 struct ext2_group_desc *desc;
@@ -649,7 +650,6 @@ unsigned long ext2_count_free_inodes (struct super_block * sb)
649 unsigned long bitmap_count = 0; 650 unsigned long bitmap_count = 0;
650 struct buffer_head *bitmap_bh = NULL; 651 struct buffer_head *bitmap_bh = NULL;
651 652
652 lock_super (sb);
653 es = EXT2_SB(sb)->s_es; 653 es = EXT2_SB(sb)->s_es;
654 for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) { 654 for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
655 unsigned x; 655 unsigned x;
@@ -672,7 +672,6 @@ unsigned long ext2_count_free_inodes (struct super_block * sb)
672 printk("ext2_count_free_inodes: stored = %lu, computed = %lu, %lu\n", 672 printk("ext2_count_free_inodes: stored = %lu, computed = %lu, %lu\n",
673 percpu_counter_read(&EXT2_SB(sb)->s_freeinodes_counter), 673 percpu_counter_read(&EXT2_SB(sb)->s_freeinodes_counter),
674 desc_count, bitmap_count); 674 desc_count, bitmap_count);
675 unlock_super(sb);
676 return desc_count; 675 return desc_count;
677#else 676#else
678 for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) { 677 for (i = 0; i < EXT2_SB(sb)->s_groups_count; i++) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index ee4ba759581e..d4233b2e6436 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -854,7 +854,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
854 } 854 }
855 if (!ext2_check_descriptors (sb)) { 855 if (!ext2_check_descriptors (sb)) {
856 printk ("EXT2-fs: group descriptors corrupted!\n"); 856 printk ("EXT2-fs: group descriptors corrupted!\n");
857 db_count = i;
858 goto failed_mount2; 857 goto failed_mount2;
859 } 858 }
860 sbi->s_gdb_count = db_count; 859 sbi->s_gdb_count = db_count;
@@ -1046,6 +1045,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1046 unsigned long overhead; 1045 unsigned long overhead;
1047 int i; 1046 int i;
1048 1047
1048 lock_super(sb);
1049 if (test_opt (sb, MINIX_DF)) 1049 if (test_opt (sb, MINIX_DF))
1050 overhead = 0; 1050 overhead = 0;
1051 else { 1051 else {
@@ -1086,6 +1086,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1086 buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count); 1086 buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count);
1087 buf->f_ffree = ext2_count_free_inodes (sb); 1087 buf->f_ffree = ext2_count_free_inodes (sb);
1088 buf->f_namelen = EXT2_NAME_LEN; 1088 buf->f_namelen = EXT2_NAME_LEN;
1089 unlock_super(sb);
1089 return 0; 1090 return 0;
1090} 1091}
1091 1092
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 77927d6938f6..96172e89ddc3 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -163,20 +163,19 @@ restart:
163#endif 163#endif
164 164
165static int 165static int
166goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal, 166goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
167 unsigned int group, struct super_block * sb) 167 unsigned int group, struct super_block * sb)
168{ 168{
169 unsigned long group_first_block, group_last_block; 169 ext3_fsblk_t group_first_block, group_last_block;
170 170
171 group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + 171 group_first_block = ext3_group_first_block_no(sb, group);
172 group * EXT3_BLOCKS_PER_GROUP(sb);
173 group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; 172 group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
174 173
175 if ((rsv->_rsv_start > group_last_block) || 174 if ((rsv->_rsv_start > group_last_block) ||
176 (rsv->_rsv_end < group_first_block)) 175 (rsv->_rsv_end < group_first_block))
177 return 0; 176 return 0;
178 if ((goal >= 0) && ((goal + group_first_block < rsv->_rsv_start) 177 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
179 || (goal + group_first_block > rsv->_rsv_end))) 178 || (grp_goal + group_first_block > rsv->_rsv_end)))
180 return 0; 179 return 0;
181 return 1; 180 return 1;
182} 181}
@@ -187,7 +186,7 @@ goal_in_my_reservation(struct ext3_reserve_window *rsv, int goal,
187 * Returns NULL if there are no windows or if all windows start after the goal. 186 * Returns NULL if there are no windows or if all windows start after the goal.
188 */ 187 */
189static struct ext3_reserve_window_node * 188static struct ext3_reserve_window_node *
190search_reserve_window(struct rb_root *root, unsigned long goal) 189search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
191{ 190{
192 struct rb_node *n = root->rb_node; 191 struct rb_node *n = root->rb_node;
193 struct ext3_reserve_window_node *rsv; 192 struct ext3_reserve_window_node *rsv;
@@ -223,7 +222,7 @@ void ext3_rsv_window_add(struct super_block *sb,
223{ 222{
224 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root; 223 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
225 struct rb_node *node = &rsv->rsv_node; 224 struct rb_node *node = &rsv->rsv_node;
226 unsigned int start = rsv->rsv_start; 225 ext3_fsblk_t start = rsv->rsv_start;
227 226
228 struct rb_node ** p = &root->rb_node; 227 struct rb_node ** p = &root->rb_node;
229 struct rb_node * parent = NULL; 228 struct rb_node * parent = NULL;
@@ -310,20 +309,20 @@ void ext3_discard_reservation(struct inode *inode)
310 309
311/* Free given blocks, update quota and i_blocks field */ 310/* Free given blocks, update quota and i_blocks field */
312void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb, 311void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
313 unsigned long block, unsigned long count, 312 ext3_fsblk_t block, unsigned long count,
314 int *pdquot_freed_blocks) 313 unsigned long *pdquot_freed_blocks)
315{ 314{
316 struct buffer_head *bitmap_bh = NULL; 315 struct buffer_head *bitmap_bh = NULL;
317 struct buffer_head *gd_bh; 316 struct buffer_head *gd_bh;
318 unsigned long block_group; 317 unsigned long block_group;
319 unsigned long bit; 318 ext3_grpblk_t bit;
320 unsigned long i; 319 unsigned long i;
321 unsigned long overflow; 320 unsigned long overflow;
322 struct ext3_group_desc * desc; 321 struct ext3_group_desc * desc;
323 struct ext3_super_block * es; 322 struct ext3_super_block * es;
324 struct ext3_sb_info *sbi; 323 struct ext3_sb_info *sbi;
325 int err = 0, ret; 324 int err = 0, ret;
326 unsigned group_freed; 325 ext3_grpblk_t group_freed;
327 326
328 *pdquot_freed_blocks = 0; 327 *pdquot_freed_blocks = 0;
329 sbi = EXT3_SB(sb); 328 sbi = EXT3_SB(sb);
@@ -333,7 +332,7 @@ void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
333 block + count > le32_to_cpu(es->s_blocks_count)) { 332 block + count > le32_to_cpu(es->s_blocks_count)) {
334 ext3_error (sb, "ext3_free_blocks", 333 ext3_error (sb, "ext3_free_blocks",
335 "Freeing blocks not in datazone - " 334 "Freeing blocks not in datazone - "
336 "block = %lu, count = %lu", block, count); 335 "block = "E3FSBLK", count = %lu", block, count);
337 goto error_return; 336 goto error_return;
338 } 337 }
339 338
@@ -369,7 +368,7 @@ do_more:
369 sbi->s_itb_per_group)) 368 sbi->s_itb_per_group))
370 ext3_error (sb, "ext3_free_blocks", 369 ext3_error (sb, "ext3_free_blocks",
371 "Freeing blocks in system zones - " 370 "Freeing blocks in system zones - "
372 "Block = %lu, count = %lu", 371 "Block = "E3FSBLK", count = %lu",
373 block, count); 372 block, count);
374 373
375 /* 374 /*
@@ -453,7 +452,8 @@ do_more:
453 bit + i, bitmap_bh->b_data)) { 452 bit + i, bitmap_bh->b_data)) {
454 jbd_unlock_bh_state(bitmap_bh); 453 jbd_unlock_bh_state(bitmap_bh);
455 ext3_error(sb, __FUNCTION__, 454 ext3_error(sb, __FUNCTION__,
456 "bit already cleared for block %lu", block + i); 455 "bit already cleared for block "E3FSBLK,
456 block + i);
457 jbd_lock_bh_state(bitmap_bh); 457 jbd_lock_bh_state(bitmap_bh);
458 BUFFER_TRACE(bitmap_bh, "bit already cleared"); 458 BUFFER_TRACE(bitmap_bh, "bit already cleared");
459 } else { 459 } else {
@@ -493,10 +493,10 @@ error_return:
493 493
494/* Free given blocks, update quota and i_blocks field */ 494/* Free given blocks, update quota and i_blocks field */
495void ext3_free_blocks(handle_t *handle, struct inode *inode, 495void ext3_free_blocks(handle_t *handle, struct inode *inode,
496 unsigned long block, unsigned long count) 496 ext3_fsblk_t block, unsigned long count)
497{ 497{
498 struct super_block * sb; 498 struct super_block * sb;
499 int dquot_freed_blocks; 499 unsigned long dquot_freed_blocks;
500 500
501 sb = inode->i_sb; 501 sb = inode->i_sb;
502 if (!sb) { 502 if (!sb) {
@@ -525,7 +525,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
525 * data-writes at some point, and disable it for metadata allocations or 525 * data-writes at some point, and disable it for metadata allocations or
526 * sync-data inodes. 526 * sync-data inodes.
527 */ 527 */
528static int ext3_test_allocatable(int nr, struct buffer_head *bh) 528static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
529{ 529{
530 int ret; 530 int ret;
531 struct journal_head *jh = bh2jh(bh); 531 struct journal_head *jh = bh2jh(bh);
@@ -542,11 +542,11 @@ static int ext3_test_allocatable(int nr, struct buffer_head *bh)
542 return ret; 542 return ret;
543} 543}
544 544
545static int 545static ext3_grpblk_t
546bitmap_search_next_usable_block(int start, struct buffer_head *bh, 546bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
547 int maxblocks) 547 ext3_grpblk_t maxblocks)
548{ 548{
549 int next; 549 ext3_grpblk_t next;
550 struct journal_head *jh = bh2jh(bh); 550 struct journal_head *jh = bh2jh(bh);
551 551
552 /* 552 /*
@@ -576,10 +576,11 @@ bitmap_search_next_usable_block(int start, struct buffer_head *bh,
576 * the initial goal; then for a free byte somewhere in the bitmap; then 576 * the initial goal; then for a free byte somewhere in the bitmap; then
577 * for any free bit in the bitmap. 577 * for any free bit in the bitmap.
578 */ 578 */
579static int 579static ext3_grpblk_t
580find_next_usable_block(int start, struct buffer_head *bh, int maxblocks) 580find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
581 ext3_grpblk_t maxblocks)
581{ 582{
582 int here, next; 583 ext3_grpblk_t here, next;
583 char *p, *r; 584 char *p, *r;
584 585
585 if (start > 0) { 586 if (start > 0) {
@@ -591,7 +592,7 @@ find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
591 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the 592 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
592 * next 64-bit boundary is simple.. 593 * next 64-bit boundary is simple..
593 */ 594 */
594 int end_goal = (start + 63) & ~63; 595 ext3_grpblk_t end_goal = (start + 63) & ~63;
595 if (end_goal > maxblocks) 596 if (end_goal > maxblocks)
596 end_goal = maxblocks; 597 end_goal = maxblocks;
597 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start); 598 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
@@ -628,7 +629,7 @@ find_next_usable_block(int start, struct buffer_head *bh, int maxblocks)
628 * zero (failure). 629 * zero (failure).
629 */ 630 */
630static inline int 631static inline int
631claim_block(spinlock_t *lock, int block, struct buffer_head *bh) 632claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
632{ 633{
633 struct journal_head *jh = bh2jh(bh); 634 struct journal_head *jh = bh2jh(bh);
634 int ret; 635 int ret;
@@ -651,19 +652,18 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh)
651 * new bitmap. In that case we must release write access to the old one via 652 * new bitmap. In that case we must release write access to the old one via
652 * ext3_journal_release_buffer(), else we'll run out of credits. 653 * ext3_journal_release_buffer(), else we'll run out of credits.
653 */ 654 */
654static int 655static ext3_grpblk_t
655ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, 656ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
656 struct buffer_head *bitmap_bh, int goal, 657 struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
657 unsigned long *count, struct ext3_reserve_window *my_rsv) 658 unsigned long *count, struct ext3_reserve_window *my_rsv)
658{ 659{
659 int group_first_block, start, end; 660 ext3_fsblk_t group_first_block;
661 ext3_grpblk_t start, end;
660 unsigned long num = 0; 662 unsigned long num = 0;
661 663
662 /* we do allocation within the reservation window if we have a window */ 664 /* we do allocation within the reservation window if we have a window */
663 if (my_rsv) { 665 if (my_rsv) {
664 group_first_block = 666 group_first_block = ext3_group_first_block_no(sb, group);
665 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) +
666 group * EXT3_BLOCKS_PER_GROUP(sb);
667 if (my_rsv->_rsv_start >= group_first_block) 667 if (my_rsv->_rsv_start >= group_first_block)
668 start = my_rsv->_rsv_start - group_first_block; 668 start = my_rsv->_rsv_start - group_first_block;
669 else 669 else
@@ -673,13 +673,13 @@ ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
673 if (end > EXT3_BLOCKS_PER_GROUP(sb)) 673 if (end > EXT3_BLOCKS_PER_GROUP(sb))
674 /* reservation window crosses group boundary */ 674 /* reservation window crosses group boundary */
675 end = EXT3_BLOCKS_PER_GROUP(sb); 675 end = EXT3_BLOCKS_PER_GROUP(sb);
676 if ((start <= goal) && (goal < end)) 676 if ((start <= grp_goal) && (grp_goal < end))
677 start = goal; 677 start = grp_goal;
678 else 678 else
679 goal = -1; 679 grp_goal = -1;
680 } else { 680 } else {
681 if (goal > 0) 681 if (grp_goal > 0)
682 start = goal; 682 start = grp_goal;
683 else 683 else
684 start = 0; 684 start = 0;
685 end = EXT3_BLOCKS_PER_GROUP(sb); 685 end = EXT3_BLOCKS_PER_GROUP(sb);
@@ -688,43 +688,43 @@ ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
688 BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb)); 688 BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
689 689
690repeat: 690repeat:
691 if (goal < 0 || !ext3_test_allocatable(goal, bitmap_bh)) { 691 if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
692 goal = find_next_usable_block(start, bitmap_bh, end); 692 grp_goal = find_next_usable_block(start, bitmap_bh, end);
693 if (goal < 0) 693 if (grp_goal < 0)
694 goto fail_access; 694 goto fail_access;
695 if (!my_rsv) { 695 if (!my_rsv) {
696 int i; 696 int i;
697 697
698 for (i = 0; i < 7 && goal > start && 698 for (i = 0; i < 7 && grp_goal > start &&
699 ext3_test_allocatable(goal - 1, 699 ext3_test_allocatable(grp_goal - 1,
700 bitmap_bh); 700 bitmap_bh);
701 i++, goal--) 701 i++, grp_goal--)
702 ; 702 ;
703 } 703 }
704 } 704 }
705 start = goal; 705 start = grp_goal;
706 706
707 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { 707 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) {
708 /* 708 /*
709 * The block was allocated by another thread, or it was 709 * The block was allocated by another thread, or it was
710 * allocated and then freed by another thread 710 * allocated and then freed by another thread
711 */ 711 */
712 start++; 712 start++;
713 goal++; 713 grp_goal++;
714 if (start >= end) 714 if (start >= end)
715 goto fail_access; 715 goto fail_access;
716 goto repeat; 716 goto repeat;
717 } 717 }
718 num++; 718 num++;
719 goal++; 719 grp_goal++;
720 while (num < *count && goal < end 720 while (num < *count && grp_goal < end
721 && ext3_test_allocatable(goal, bitmap_bh) 721 && ext3_test_allocatable(grp_goal, bitmap_bh)
722 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { 722 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) {
723 num++; 723 num++;
724 goal++; 724 grp_goal++;
725 } 725 }
726 *count = num; 726 *count = num;
727 return goal - num; 727 return grp_goal - num;
728fail_access: 728fail_access:
729 *count = num; 729 *count = num;
730 return -1; 730 return -1;
@@ -766,12 +766,13 @@ fail_access:
766static int find_next_reservable_window( 766static int find_next_reservable_window(
767 struct ext3_reserve_window_node *search_head, 767 struct ext3_reserve_window_node *search_head,
768 struct ext3_reserve_window_node *my_rsv, 768 struct ext3_reserve_window_node *my_rsv,
769 struct super_block * sb, int start_block, 769 struct super_block * sb,
770 int last_block) 770 ext3_fsblk_t start_block,
771 ext3_fsblk_t last_block)
771{ 772{
772 struct rb_node *next; 773 struct rb_node *next;
773 struct ext3_reserve_window_node *rsv, *prev; 774 struct ext3_reserve_window_node *rsv, *prev;
774 int cur; 775 ext3_fsblk_t cur;
775 int size = my_rsv->rsv_goal_size; 776 int size = my_rsv->rsv_goal_size;
776 777
777 /* TODO: make the start of the reservation window byte-aligned */ 778 /* TODO: make the start of the reservation window byte-aligned */
@@ -873,10 +874,10 @@ static int find_next_reservable_window(
873 * 874 *
874 * @rsv: the reservation 875 * @rsv: the reservation
875 * 876 *
876 * @goal: The goal (group-relative). It is where the search for a 877 * @grp_goal: The goal (group-relative). It is where the search for a
877 * free reservable space should start from. 878 * free reservable space should start from.
878 * if we have a goal(goal >0 ), then start from there, 879 * if we have a grp_goal(grp_goal >0 ), then start from there,
879 * no goal(goal = -1), we start from the first block 880 * no grp_goal(grp_goal = -1), we start from the first block
880 * of the group. 881 * of the group.
881 * 882 *
882 * @sb: the super block 883 * @sb: the super block
@@ -885,25 +886,24 @@ static int find_next_reservable_window(
885 * 886 *
886 */ 887 */
887static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, 888static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
888 int goal, struct super_block *sb, 889 ext3_grpblk_t grp_goal, struct super_block *sb,
889 unsigned int group, struct buffer_head *bitmap_bh) 890 unsigned int group, struct buffer_head *bitmap_bh)
890{ 891{
891 struct ext3_reserve_window_node *search_head; 892 struct ext3_reserve_window_node *search_head;
892 int group_first_block, group_end_block, start_block; 893 ext3_fsblk_t group_first_block, group_end_block, start_block;
893 int first_free_block; 894 ext3_grpblk_t first_free_block;
894 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root; 895 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
895 unsigned long size; 896 unsigned long size;
896 int ret; 897 int ret;
897 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; 898 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
898 899
899 group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + 900 group_first_block = ext3_group_first_block_no(sb, group);
900 group * EXT3_BLOCKS_PER_GROUP(sb);
901 group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1; 901 group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
902 902
903 if (goal < 0) 903 if (grp_goal < 0)
904 start_block = group_first_block; 904 start_block = group_first_block;
905 else 905 else
906 start_block = goal + group_first_block; 906 start_block = grp_goal + group_first_block;
907 907
908 size = my_rsv->rsv_goal_size; 908 size = my_rsv->rsv_goal_size;
909 909
@@ -1057,14 +1057,15 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1057 * sorted double linked list should be fast. 1057 * sorted double linked list should be fast.
1058 * 1058 *
1059 */ 1059 */
1060static int 1060static ext3_grpblk_t
1061ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, 1061ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1062 unsigned int group, struct buffer_head *bitmap_bh, 1062 unsigned int group, struct buffer_head *bitmap_bh,
1063 int goal, struct ext3_reserve_window_node * my_rsv, 1063 ext3_grpblk_t grp_goal,
1064 struct ext3_reserve_window_node * my_rsv,
1064 unsigned long *count, int *errp) 1065 unsigned long *count, int *errp)
1065{ 1066{
1066 unsigned long group_first_block; 1067 ext3_fsblk_t group_first_block;
1067 int ret = 0; 1068 ext3_grpblk_t ret = 0;
1068 int fatal; 1069 int fatal;
1069 unsigned long num = *count; 1070 unsigned long num = *count;
1070 1071
@@ -1090,17 +1091,16 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1090 */ 1091 */
1091 if (my_rsv == NULL ) { 1092 if (my_rsv == NULL ) {
1092 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, 1093 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1093 goal, count, NULL); 1094 grp_goal, count, NULL);
1094 goto out; 1095 goto out;
1095 } 1096 }
1096 /* 1097 /*
1097 * goal is a group relative block number (if there is a goal) 1098 * grp_goal is a group relative block number (if there is a goal)
1098 * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb) 1099 * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
1099 * first block is a filesystem wide block number 1100 * first block is a filesystem wide block number
1100 * first block is the block number of the first block in this group 1101 * first block is the block number of the first block in this group
1101 */ 1102 */
1102 group_first_block = le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block) + 1103 group_first_block = ext3_group_first_block_no(sb, group);
1103 group * EXT3_BLOCKS_PER_GROUP(sb);
1104 1104
1105 /* 1105 /*
1106 * Basically we will allocate a new block from inode's reservation 1106 * Basically we will allocate a new block from inode's reservation
@@ -1119,24 +1119,24 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1119 */ 1119 */
1120 while (1) { 1120 while (1) {
1121 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || 1121 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1122 !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { 1122 !goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) {
1123 if (my_rsv->rsv_goal_size < *count) 1123 if (my_rsv->rsv_goal_size < *count)
1124 my_rsv->rsv_goal_size = *count; 1124 my_rsv->rsv_goal_size = *count;
1125 ret = alloc_new_reservation(my_rsv, goal, sb, 1125 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1126 group, bitmap_bh); 1126 group, bitmap_bh);
1127 if (ret < 0) 1127 if (ret < 0)
1128 break; /* failed */ 1128 break; /* failed */
1129 1129
1130 if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) 1130 if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb))
1131 goal = -1; 1131 grp_goal = -1;
1132 } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count) 1132 } else if (grp_goal > 0 && (my_rsv->rsv_end-grp_goal+1) < *count)
1133 try_to_extend_reservation(my_rsv, sb, 1133 try_to_extend_reservation(my_rsv, sb,
1134 *count-my_rsv->rsv_end + goal - 1); 1134 *count-my_rsv->rsv_end + grp_goal - 1);
1135 1135
1136 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) 1136 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
1137 || (my_rsv->rsv_end < group_first_block)) 1137 || (my_rsv->rsv_end < group_first_block))
1138 BUG(); 1138 BUG();
1139 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, 1139 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, grp_goal,
1140 &num, &my_rsv->rsv_window); 1140 &num, &my_rsv->rsv_window);
1141 if (ret >= 0) { 1141 if (ret >= 0) {
1142 my_rsv->rsv_alloc_hit += num; 1142 my_rsv->rsv_alloc_hit += num;
@@ -1164,7 +1164,7 @@ out:
1164 1164
1165static int ext3_has_free_blocks(struct ext3_sb_info *sbi) 1165static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
1166{ 1166{
1167 int free_blocks, root_blocks; 1167 ext3_fsblk_t free_blocks, root_blocks;
1168 1168
1169 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 1169 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1170 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); 1170 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
@@ -1200,19 +1200,20 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1200 * bitmap, and then for any free bit if that fails. 1200 * bitmap, and then for any free bit if that fails.
1201 * This function also updates quota and i_blocks field. 1201 * This function also updates quota and i_blocks field.
1202 */ 1202 */
1203int ext3_new_blocks(handle_t *handle, struct inode *inode, 1203ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1204 unsigned long goal, unsigned long *count, int *errp) 1204 ext3_fsblk_t goal, unsigned long *count, int *errp)
1205{ 1205{
1206 struct buffer_head *bitmap_bh = NULL; 1206 struct buffer_head *bitmap_bh = NULL;
1207 struct buffer_head *gdp_bh; 1207 struct buffer_head *gdp_bh;
1208 int group_no; 1208 int group_no;
1209 int goal_group; 1209 int goal_group;
1210 int ret_block; 1210 ext3_grpblk_t grp_target_blk; /* blockgroup relative goal block */
1211 ext3_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
1212 ext3_fsblk_t ret_block; /* filesyetem-wide allocated block */
1211 int bgi; /* blockgroup iteration index */ 1213 int bgi; /* blockgroup iteration index */
1212 int target_block;
1213 int fatal = 0, err; 1214 int fatal = 0, err;
1214 int performed_allocation = 0; 1215 int performed_allocation = 0;
1215 int free_blocks; 1216 ext3_grpblk_t free_blocks; /* number of free blocks in a group */
1216 struct super_block *sb; 1217 struct super_block *sb;
1217 struct ext3_group_desc *gdp; 1218 struct ext3_group_desc *gdp;
1218 struct ext3_super_block *es; 1219 struct ext3_super_block *es;
@@ -1285,16 +1286,17 @@ retry:
1285 my_rsv = NULL; 1286 my_rsv = NULL;
1286 1287
1287 if (free_blocks > 0) { 1288 if (free_blocks > 0) {
1288 ret_block = ((goal - le32_to_cpu(es->s_first_data_block)) % 1289 grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
1289 EXT3_BLOCKS_PER_GROUP(sb)); 1290 EXT3_BLOCKS_PER_GROUP(sb));
1290 bitmap_bh = read_block_bitmap(sb, group_no); 1291 bitmap_bh = read_block_bitmap(sb, group_no);
1291 if (!bitmap_bh) 1292 if (!bitmap_bh)
1292 goto io_error; 1293 goto io_error;
1293 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, 1294 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1294 bitmap_bh, ret_block, my_rsv, &num, &fatal); 1295 group_no, bitmap_bh, grp_target_blk,
1296 my_rsv, &num, &fatal);
1295 if (fatal) 1297 if (fatal)
1296 goto out; 1298 goto out;
1297 if (ret_block >= 0) 1299 if (grp_alloc_blk >= 0)
1298 goto allocated; 1300 goto allocated;
1299 } 1301 }
1300 1302
@@ -1327,11 +1329,15 @@ retry:
1327 bitmap_bh = read_block_bitmap(sb, group_no); 1329 bitmap_bh = read_block_bitmap(sb, group_no);
1328 if (!bitmap_bh) 1330 if (!bitmap_bh)
1329 goto io_error; 1331 goto io_error;
1330 ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, 1332 /*
1331 bitmap_bh, -1, my_rsv, &num, &fatal); 1333 * try to allocate block(s) from this group, without a goal(-1).
1334 */
1335 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1336 group_no, bitmap_bh, -1, my_rsv,
1337 &num, &fatal);
1332 if (fatal) 1338 if (fatal)
1333 goto out; 1339 goto out;
1334 if (ret_block >= 0) 1340 if (grp_alloc_blk >= 0)
1335 goto allocated; 1341 goto allocated;
1336 } 1342 }
1337 /* 1343 /*
@@ -1360,18 +1366,18 @@ allocated:
1360 if (fatal) 1366 if (fatal)
1361 goto out; 1367 goto out;
1362 1368
1363 target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) 1369 ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
1364 + le32_to_cpu(es->s_first_data_block);
1365 1370
1366 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) || 1371 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
1367 in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) || 1372 in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
1368 in_range(target_block, le32_to_cpu(gdp->bg_inode_table), 1373 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
1369 EXT3_SB(sb)->s_itb_per_group) || 1374 EXT3_SB(sb)->s_itb_per_group) ||
1370 in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table), 1375 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1371 EXT3_SB(sb)->s_itb_per_group)) 1376 EXT3_SB(sb)->s_itb_per_group))
1372 ext3_error(sb, "ext3_new_block", 1377 ext3_error(sb, "ext3_new_block",
1373 "Allocating block in system zone - " 1378 "Allocating block in system zone - "
1374 "blocks from %u, length %lu", target_block, num); 1379 "blocks from "E3FSBLK", length %lu",
1380 ret_block, num);
1375 1381
1376 performed_allocation = 1; 1382 performed_allocation = 1;
1377 1383
@@ -1380,7 +1386,7 @@ allocated:
1380 struct buffer_head *debug_bh; 1386 struct buffer_head *debug_bh;
1381 1387
1382 /* Record bitmap buffer state in the newly allocated block */ 1388 /* Record bitmap buffer state in the newly allocated block */
1383 debug_bh = sb_find_get_block(sb, target_block); 1389 debug_bh = sb_find_get_block(sb, ret_block);
1384 if (debug_bh) { 1390 if (debug_bh) {
1385 BUFFER_TRACE(debug_bh, "state when allocated"); 1391 BUFFER_TRACE(debug_bh, "state when allocated");
1386 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state"); 1392 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
@@ -1393,24 +1399,21 @@ allocated:
1393 int i; 1399 int i;
1394 1400
1395 for (i = 0; i < num; i++) { 1401 for (i = 0; i < num; i++) {
1396 if (ext3_test_bit(ret_block, 1402 if (ext3_test_bit(grp_alloc_blk+i,
1397 bh2jh(bitmap_bh)->b_committed_data)) { 1403 bh2jh(bitmap_bh)->b_committed_data)) {
1398 printk("%s: block was unexpectedly set in " 1404 printk("%s: block was unexpectedly set in "
1399 "b_committed_data\n", __FUNCTION__); 1405 "b_committed_data\n", __FUNCTION__);
1400 } 1406 }
1401 } 1407 }
1402 } 1408 }
1403 ext3_debug("found bit %d\n", ret_block); 1409 ext3_debug("found bit %d\n", grp_alloc_blk);
1404 spin_unlock(sb_bgl_lock(sbi, group_no)); 1410 spin_unlock(sb_bgl_lock(sbi, group_no));
1405 jbd_unlock_bh_state(bitmap_bh); 1411 jbd_unlock_bh_state(bitmap_bh);
1406#endif 1412#endif
1407 1413
1408 /* ret_block was blockgroup-relative. Now it becomes fs-relative */
1409 ret_block = target_block;
1410
1411 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { 1414 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
1412 ext3_error(sb, "ext3_new_block", 1415 ext3_error(sb, "ext3_new_block",
1413 "block(%d) >= blocks count(%d) - " 1416 "block("E3FSBLK") >= blocks count(%d) - "
1414 "block_group = %d, es == %p ", ret_block, 1417 "block_group = %d, es == %p ", ret_block,
1415 le32_to_cpu(es->s_blocks_count), group_no, es); 1418 le32_to_cpu(es->s_blocks_count), group_no, es);
1416 goto out; 1419 goto out;
@@ -1421,7 +1424,7 @@ allocated:
1421 * list of some description. We don't know in advance whether 1424 * list of some description. We don't know in advance whether
1422 * the caller wants to use it as metadata or data. 1425 * the caller wants to use it as metadata or data.
1423 */ 1426 */
1424 ext3_debug("allocating block %d. Goal hits %d of %d.\n", 1427 ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
1425 ret_block, goal_hits, goal_attempts); 1428 ret_block, goal_hits, goal_attempts);
1426 1429
1427 spin_lock(sb_bgl_lock(sbi, group_no)); 1430 spin_lock(sb_bgl_lock(sbi, group_no));
@@ -1461,23 +1464,24 @@ out:
1461 return 0; 1464 return 0;
1462} 1465}
1463 1466
1464int ext3_new_block(handle_t *handle, struct inode *inode, 1467ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
1465 unsigned long goal, int *errp) 1468 ext3_fsblk_t goal, int *errp)
1466{ 1469{
1467 unsigned long count = 1; 1470 unsigned long count = 1;
1468 1471
1469 return ext3_new_blocks(handle, inode, goal, &count, errp); 1472 return ext3_new_blocks(handle, inode, goal, &count, errp);
1470} 1473}
1471 1474
1472unsigned long ext3_count_free_blocks(struct super_block *sb) 1475ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
1473{ 1476{
1474 unsigned long desc_count; 1477 ext3_fsblk_t desc_count;
1475 struct ext3_group_desc *gdp; 1478 struct ext3_group_desc *gdp;
1476 int i; 1479 int i;
1477 unsigned long ngroups = EXT3_SB(sb)->s_groups_count; 1480 unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1478#ifdef EXT3FS_DEBUG 1481#ifdef EXT3FS_DEBUG
1479 struct ext3_super_block *es; 1482 struct ext3_super_block *es;
1480 unsigned long bitmap_count, x; 1483 ext3_fsblk_t bitmap_count;
1484 unsigned long x;
1481 struct buffer_head *bitmap_bh = NULL; 1485 struct buffer_head *bitmap_bh = NULL;
1482 1486
1483 es = EXT3_SB(sb)->s_es; 1487 es = EXT3_SB(sb)->s_es;
@@ -1502,8 +1506,10 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
1502 bitmap_count += x; 1506 bitmap_count += x;
1503 } 1507 }
1504 brelse(bitmap_bh); 1508 brelse(bitmap_bh);
1505 printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n", 1509 printk("ext3_count_free_blocks: stored = "E3FSBLK
1506 le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); 1510 ", computed = "E3FSBLK", "E3FSBLK"\n",
1511 le32_to_cpu(es->s_free_blocks_count),
1512 desc_count, bitmap_count);
1507 return bitmap_count; 1513 return bitmap_count;
1508#else 1514#else
1509 desc_count = 0; 1515 desc_count = 0;
@@ -1520,7 +1526,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
1520} 1526}
1521 1527
1522static inline int 1528static inline int
1523block_in_use(unsigned long block, struct super_block *sb, unsigned char *map) 1529block_in_use(ext3_fsblk_t block, struct super_block *sb, unsigned char *map)
1524{ 1530{
1525 return ext3_test_bit ((block - 1531 return ext3_test_bit ((block -
1526 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) % 1532 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index dc826464f313..36546ed36a14 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -262,9 +262,11 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
262 int ngroups = sbi->s_groups_count; 262 int ngroups = sbi->s_groups_count;
263 int inodes_per_group = EXT3_INODES_PER_GROUP(sb); 263 int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
264 int freei, avefreei; 264 int freei, avefreei;
265 int freeb, avefreeb; 265 ext3_fsblk_t freeb, avefreeb;
266 int blocks_per_dir, ndirs; 266 ext3_fsblk_t blocks_per_dir;
267 int max_debt, max_dirs, min_blocks, min_inodes; 267 int ndirs;
268 int max_debt, max_dirs, min_inodes;
269 ext3_grpblk_t min_blocks;
268 int group = -1, i; 270 int group = -1, i;
269 struct ext3_group_desc *desc; 271 struct ext3_group_desc *desc;
270 struct buffer_head *bh; 272 struct buffer_head *bh;
@@ -307,7 +309,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
307 min_inodes = avefreei - inodes_per_group / 4; 309 min_inodes = avefreei - inodes_per_group / 4;
308 min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4; 310 min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
309 311
310 max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, BLOCK_COST); 312 max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, (ext3_fsblk_t)BLOCK_COST);
311 if (max_debt * INODE_COST > inodes_per_group) 313 if (max_debt * INODE_COST > inodes_per_group)
312 max_debt = inodes_per_group / INODE_COST; 314 max_debt = inodes_per_group / INODE_COST;
313 if (max_debt > 255) 315 if (max_debt > 255)
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2edd7eec88fd..0321e1b9034a 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -62,7 +62,7 @@ static int ext3_inode_is_fast_symlink(struct inode *inode)
62 * still needs to be revoked. 62 * still needs to be revoked.
63 */ 63 */
64int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, 64int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
65 struct buffer_head *bh, int blocknr) 65 struct buffer_head *bh, ext3_fsblk_t blocknr)
66{ 66{
67 int err; 67 int err;
68 68
@@ -407,13 +407,13 @@ no_block:
407 * 407 *
408 * Caller must make sure that @ind is valid and will stay that way. 408 * Caller must make sure that @ind is valid and will stay that way.
409 */ 409 */
410static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) 410static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
411{ 411{
412 struct ext3_inode_info *ei = EXT3_I(inode); 412 struct ext3_inode_info *ei = EXT3_I(inode);
413 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; 413 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
414 __le32 *p; 414 __le32 *p;
415 unsigned long bg_start; 415 ext3_fsblk_t bg_start;
416 unsigned long colour; 416 ext3_grpblk_t colour;
417 417
418 /* Try to find previous block */ 418 /* Try to find previous block */
419 for (p = ind->p - 1; p >= start; p--) { 419 for (p = ind->p - 1; p >= start; p--) {
@@ -429,8 +429,7 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
429 * It is going to be referred to from the inode itself? OK, just put it 429 * It is going to be referred to from the inode itself? OK, just put it
430 * into the same cylinder group then. 430 * into the same cylinder group then.
431 */ 431 */
432 bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + 432 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
433 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
434 colour = (current->pid % 16) * 433 colour = (current->pid % 16) *
435 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16); 434 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
436 return bg_start + colour; 435 return bg_start + colour;
@@ -448,7 +447,7 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
448 * stores it in *@goal and returns zero. 447 * stores it in *@goal and returns zero.
449 */ 448 */
450 449
451static unsigned long ext3_find_goal(struct inode *inode, long block, 450static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
452 Indirect chain[4], Indirect *partial) 451 Indirect chain[4], Indirect *partial)
453{ 452{
454 struct ext3_block_alloc_info *block_i; 453 struct ext3_block_alloc_info *block_i;
@@ -516,13 +515,13 @@ static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
516 * direct blocks 515 * direct blocks
517 */ 516 */
518static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, 517static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
519 unsigned long goal, int indirect_blks, int blks, 518 ext3_fsblk_t goal, int indirect_blks, int blks,
520 unsigned long long new_blocks[4], int *err) 519 ext3_fsblk_t new_blocks[4], int *err)
521{ 520{
522 int target, i; 521 int target, i;
523 unsigned long count = 0; 522 unsigned long count = 0;
524 int index = 0; 523 int index = 0;
525 unsigned long current_block = 0; 524 ext3_fsblk_t current_block = 0;
526 int ret = 0; 525 int ret = 0;
527 526
528 /* 527 /*
@@ -592,7 +591,7 @@ failed_out:
592 * as described above and return 0. 591 * as described above and return 0.
593 */ 592 */
594static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 593static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
595 int indirect_blks, int *blks, unsigned long goal, 594 int indirect_blks, int *blks, ext3_fsblk_t goal,
596 int *offsets, Indirect *branch) 595 int *offsets, Indirect *branch)
597{ 596{
598 int blocksize = inode->i_sb->s_blocksize; 597 int blocksize = inode->i_sb->s_blocksize;
@@ -600,8 +599,8 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
600 int err = 0; 599 int err = 0;
601 struct buffer_head *bh; 600 struct buffer_head *bh;
602 int num; 601 int num;
603 unsigned long long new_blocks[4]; 602 ext3_fsblk_t new_blocks[4];
604 unsigned long long current_block; 603 ext3_fsblk_t current_block;
605 604
606 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, 605 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
607 *blks, new_blocks, &err); 606 *blks, new_blocks, &err);
@@ -688,7 +687,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
688 int i; 687 int i;
689 int err = 0; 688 int err = 0;
690 struct ext3_block_alloc_info *block_i; 689 struct ext3_block_alloc_info *block_i;
691 unsigned long current_block; 690 ext3_fsblk_t current_block;
692 691
693 block_i = EXT3_I(inode)->i_block_alloc_info; 692 block_i = EXT3_I(inode)->i_block_alloc_info;
694 /* 693 /*
@@ -795,13 +794,13 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
795 int offsets[4]; 794 int offsets[4];
796 Indirect chain[4]; 795 Indirect chain[4];
797 Indirect *partial; 796 Indirect *partial;
798 unsigned long goal; 797 ext3_fsblk_t goal;
799 int indirect_blks; 798 int indirect_blks;
800 int blocks_to_boundary = 0; 799 int blocks_to_boundary = 0;
801 int depth; 800 int depth;
802 struct ext3_inode_info *ei = EXT3_I(inode); 801 struct ext3_inode_info *ei = EXT3_I(inode);
803 int count = 0; 802 int count = 0;
804 unsigned long first_block = 0; 803 ext3_fsblk_t first_block = 0;
805 804
806 805
807 J_ASSERT(handle != NULL || create == 0); 806 J_ASSERT(handle != NULL || create == 0);
@@ -819,7 +818,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
819 count++; 818 count++;
820 /*map more blocks*/ 819 /*map more blocks*/
821 while (count < maxblocks && count <= blocks_to_boundary) { 820 while (count < maxblocks && count <= blocks_to_boundary) {
822 unsigned long blk; 821 ext3_fsblk_t blk;
823 822
824 if (!verify_chain(chain, partial)) { 823 if (!verify_chain(chain, partial)) {
825 /* 824 /*
@@ -1759,7 +1758,7 @@ void ext3_set_aops(struct inode *inode)
1759static int ext3_block_truncate_page(handle_t *handle, struct page *page, 1758static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1760 struct address_space *mapping, loff_t from) 1759 struct address_space *mapping, loff_t from)
1761{ 1760{
1762 unsigned long index = from >> PAGE_CACHE_SHIFT; 1761 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1763 unsigned offset = from & (PAGE_CACHE_SIZE-1); 1762 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1764 unsigned blocksize, iblock, length, pos; 1763 unsigned blocksize, iblock, length, pos;
1765 struct inode *inode = mapping->host; 1764 struct inode *inode = mapping->host;
@@ -1960,7 +1959,7 @@ no_top:
1960 * than `count' because there can be holes in there. 1959 * than `count' because there can be holes in there.
1961 */ 1960 */
1962static void ext3_clear_blocks(handle_t *handle, struct inode *inode, 1961static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
1963 struct buffer_head *bh, unsigned long block_to_free, 1962 struct buffer_head *bh, ext3_fsblk_t block_to_free,
1964 unsigned long count, __le32 *first, __le32 *last) 1963 unsigned long count, __le32 *first, __le32 *last)
1965{ 1964{
1966 __le32 *p; 1965 __le32 *p;
@@ -2022,12 +2021,12 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
2022 struct buffer_head *this_bh, 2021 struct buffer_head *this_bh,
2023 __le32 *first, __le32 *last) 2022 __le32 *first, __le32 *last)
2024{ 2023{
2025 unsigned long block_to_free = 0; /* Starting block # of a run */ 2024 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
2026 unsigned long count = 0; /* Number of blocks in the run */ 2025 unsigned long count = 0; /* Number of blocks in the run */
2027 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 2026 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2028 corresponding to 2027 corresponding to
2029 block_to_free */ 2028 block_to_free */
2030 unsigned long nr; /* Current block # */ 2029 ext3_fsblk_t nr; /* Current block # */
2031 __le32 *p; /* Pointer into inode/ind 2030 __le32 *p; /* Pointer into inode/ind
2032 for current block */ 2031 for current block */
2033 int err; 2032 int err;
@@ -2089,7 +2088,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2089 struct buffer_head *parent_bh, 2088 struct buffer_head *parent_bh,
2090 __le32 *first, __le32 *last, int depth) 2089 __le32 *first, __le32 *last, int depth)
2091{ 2090{
2092 unsigned long nr; 2091 ext3_fsblk_t nr;
2093 __le32 *p; 2092 __le32 *p;
2094 2093
2095 if (is_handle_aborted(handle)) 2094 if (is_handle_aborted(handle))
@@ -2113,7 +2112,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
2113 */ 2112 */
2114 if (!bh) { 2113 if (!bh) {
2115 ext3_error(inode->i_sb, "ext3_free_branches", 2114 ext3_error(inode->i_sb, "ext3_free_branches",
2116 "Read failure, inode=%ld, block=%ld", 2115 "Read failure, inode=%ld, block="E3FSBLK,
2117 inode->i_ino, nr); 2116 inode->i_ino, nr);
2118 continue; 2117 continue;
2119 } 2118 }
@@ -2394,11 +2393,12 @@ out_stop:
2394 ext3_journal_stop(handle); 2393 ext3_journal_stop(handle);
2395} 2394}
2396 2395
2397static unsigned long ext3_get_inode_block(struct super_block *sb, 2396static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2398 unsigned long ino, struct ext3_iloc *iloc) 2397 unsigned long ino, struct ext3_iloc *iloc)
2399{ 2398{
2400 unsigned long desc, group_desc, block_group; 2399 unsigned long desc, group_desc, block_group;
2401 unsigned long offset, block; 2400 unsigned long offset;
2401 ext3_fsblk_t block;
2402 struct buffer_head *bh; 2402 struct buffer_head *bh;
2403 struct ext3_group_desc * gdp; 2403 struct ext3_group_desc * gdp;
2404 2404
@@ -2448,7 +2448,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
2448static int __ext3_get_inode_loc(struct inode *inode, 2448static int __ext3_get_inode_loc(struct inode *inode,
2449 struct ext3_iloc *iloc, int in_mem) 2449 struct ext3_iloc *iloc, int in_mem)
2450{ 2450{
2451 unsigned long block; 2451 ext3_fsblk_t block;
2452 struct buffer_head *bh; 2452 struct buffer_head *bh;
2453 2453
2454 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc); 2454 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
@@ -2459,7 +2459,8 @@ static int __ext3_get_inode_loc(struct inode *inode,
2459 if (!bh) { 2459 if (!bh) {
2460 ext3_error (inode->i_sb, "ext3_get_inode_loc", 2460 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2461 "unable to read inode block - " 2461 "unable to read inode block - "
2462 "inode=%lu, block=%lu", inode->i_ino, block); 2462 "inode=%lu, block="E3FSBLK,
2463 inode->i_ino, block);
2463 return -EIO; 2464 return -EIO;
2464 } 2465 }
2465 if (!buffer_uptodate(bh)) { 2466 if (!buffer_uptodate(bh)) {
@@ -2540,7 +2541,7 @@ make_io:
2540 if (!buffer_uptodate(bh)) { 2541 if (!buffer_uptodate(bh)) {
2541 ext3_error(inode->i_sb, "ext3_get_inode_loc", 2542 ext3_error(inode->i_sb, "ext3_get_inode_loc",
2542 "unable to read inode block - " 2543 "unable to read inode block - "
2543 "inode=%lu, block=%lu", 2544 "inode=%lu, block="E3FSBLK,
2544 inode->i_ino, block); 2545 inode->i_ino, block);
2545 brelse(bh); 2546 brelse(bh);
2546 return -EIO; 2547 return -EIO;
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 8c22aa9a7fbb..3a6b012d120c 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -204,7 +204,7 @@ flags_err:
204 return 0; 204 return 0;
205 } 205 }
206 case EXT3_IOC_GROUP_EXTEND: { 206 case EXT3_IOC_GROUP_EXTEND: {
207 unsigned long n_blocks_count; 207 ext3_fsblk_t n_blocks_count;
208 struct super_block *sb = inode->i_sb; 208 struct super_block *sb = inode->i_sb;
209 int err; 209 int err;
210 210
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index b8f5cd1e540d..d9176dba3698 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1379,7 +1379,6 @@ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
1379 int dx_fallback=0; 1379 int dx_fallback=0;
1380#endif 1380#endif
1381 unsigned blocksize; 1381 unsigned blocksize;
1382 unsigned nlen, rlen;
1383 u32 block, blocks; 1382 u32 block, blocks;
1384 1383
1385 sb = dir->i_sb; 1384 sb = dir->i_sb;
@@ -1417,8 +1416,7 @@ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
1417 return retval; 1416 return retval;
1418 de = (struct ext3_dir_entry_2 *) bh->b_data; 1417 de = (struct ext3_dir_entry_2 *) bh->b_data;
1419 de->inode = 0; 1418 de->inode = 0;
1420 de->rec_len = cpu_to_le16(rlen = blocksize); 1419 de->rec_len = cpu_to_le16(blocksize);
1421 nlen = 0;
1422 return add_dirent_to_buf(handle, dentry, inode, de, bh); 1420 return add_dirent_to_buf(handle, dentry, inode, de, bh);
1423} 1421}
1424 1422
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 34b39e9a1e5a..dfd811895d8f 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -28,16 +28,16 @@ static int verify_group_input(struct super_block *sb,
28{ 28{
29 struct ext3_sb_info *sbi = EXT3_SB(sb); 29 struct ext3_sb_info *sbi = EXT3_SB(sb);
30 struct ext3_super_block *es = sbi->s_es; 30 struct ext3_super_block *es = sbi->s_es;
31 unsigned start = le32_to_cpu(es->s_blocks_count); 31 ext3_fsblk_t start = le32_to_cpu(es->s_blocks_count);
32 unsigned end = start + input->blocks_count; 32 ext3_fsblk_t end = start + input->blocks_count;
33 unsigned group = input->group; 33 unsigned group = input->group;
34 unsigned itend = input->inode_table + sbi->s_itb_per_group; 34 ext3_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
35 unsigned overhead = ext3_bg_has_super(sb, group) ? 35 unsigned overhead = ext3_bg_has_super(sb, group) ?
36 (1 + ext3_bg_num_gdb(sb, group) + 36 (1 + ext3_bg_num_gdb(sb, group) +
37 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; 37 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
38 unsigned metaend = start + overhead; 38 ext3_fsblk_t metaend = start + overhead;
39 struct buffer_head *bh = NULL; 39 struct buffer_head *bh = NULL;
40 int free_blocks_count; 40 ext3_grpblk_t free_blocks_count;
41 int err = -EINVAL; 41 int err = -EINVAL;
42 42
43 input->free_blocks_count = free_blocks_count = 43 input->free_blocks_count = free_blocks_count =
@@ -64,7 +64,8 @@ static int verify_group_input(struct super_block *sb,
64 ext3_warning(sb, __FUNCTION__, "Bad blocks count %u", 64 ext3_warning(sb, __FUNCTION__, "Bad blocks count %u",
65 input->blocks_count); 65 input->blocks_count);
66 else if (!(bh = sb_bread(sb, end - 1))) 66 else if (!(bh = sb_bread(sb, end - 1)))
67 ext3_warning(sb, __FUNCTION__, "Cannot read last block (%u)", 67 ext3_warning(sb, __FUNCTION__,
68 "Cannot read last block ("E3FSBLK")",
68 end - 1); 69 end - 1);
69 else if (outside(input->block_bitmap, start, end)) 70 else if (outside(input->block_bitmap, start, end))
70 ext3_warning(sb, __FUNCTION__, 71 ext3_warning(sb, __FUNCTION__,
@@ -77,7 +78,7 @@ static int verify_group_input(struct super_block *sb,
77 else if (outside(input->inode_table, start, end) || 78 else if (outside(input->inode_table, start, end) ||
78 outside(itend - 1, start, end)) 79 outside(itend - 1, start, end))
79 ext3_warning(sb, __FUNCTION__, 80 ext3_warning(sb, __FUNCTION__,
80 "Inode table not in group (blocks %u-%u)", 81 "Inode table not in group (blocks %u-"E3FSBLK")",
81 input->inode_table, itend - 1); 82 input->inode_table, itend - 1);
82 else if (input->inode_bitmap == input->block_bitmap) 83 else if (input->inode_bitmap == input->block_bitmap)
83 ext3_warning(sb, __FUNCTION__, 84 ext3_warning(sb, __FUNCTION__,
@@ -85,24 +86,27 @@ static int verify_group_input(struct super_block *sb,
85 input->block_bitmap); 86 input->block_bitmap);
86 else if (inside(input->block_bitmap, input->inode_table, itend)) 87 else if (inside(input->block_bitmap, input->inode_table, itend))
87 ext3_warning(sb, __FUNCTION__, 88 ext3_warning(sb, __FUNCTION__,
88 "Block bitmap (%u) in inode table (%u-%u)", 89 "Block bitmap (%u) in inode table (%u-"E3FSBLK")",
89 input->block_bitmap, input->inode_table, itend-1); 90 input->block_bitmap, input->inode_table, itend-1);
90 else if (inside(input->inode_bitmap, input->inode_table, itend)) 91 else if (inside(input->inode_bitmap, input->inode_table, itend))
91 ext3_warning(sb, __FUNCTION__, 92 ext3_warning(sb, __FUNCTION__,
92 "Inode bitmap (%u) in inode table (%u-%u)", 93 "Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
93 input->inode_bitmap, input->inode_table, itend-1); 94 input->inode_bitmap, input->inode_table, itend-1);
94 else if (inside(input->block_bitmap, start, metaend)) 95 else if (inside(input->block_bitmap, start, metaend))
95 ext3_warning(sb, __FUNCTION__, 96 ext3_warning(sb, __FUNCTION__,
96 "Block bitmap (%u) in GDT table (%u-%u)", 97 "Block bitmap (%u) in GDT table"
98 " ("E3FSBLK"-"E3FSBLK")",
97 input->block_bitmap, start, metaend - 1); 99 input->block_bitmap, start, metaend - 1);
98 else if (inside(input->inode_bitmap, start, metaend)) 100 else if (inside(input->inode_bitmap, start, metaend))
99 ext3_warning(sb, __FUNCTION__, 101 ext3_warning(sb, __FUNCTION__,
100 "Inode bitmap (%u) in GDT table (%u-%u)", 102 "Inode bitmap (%u) in GDT table"
103 " ("E3FSBLK"-"E3FSBLK")",
101 input->inode_bitmap, start, metaend - 1); 104 input->inode_bitmap, start, metaend - 1);
102 else if (inside(input->inode_table, start, metaend) || 105 else if (inside(input->inode_table, start, metaend) ||
103 inside(itend - 1, start, metaend)) 106 inside(itend - 1, start, metaend))
104 ext3_warning(sb, __FUNCTION__, 107 ext3_warning(sb, __FUNCTION__,
105 "Inode table (%u-%u) overlaps GDT table (%u-%u)", 108 "Inode table (%u-"E3FSBLK") overlaps"
109 "GDT table ("E3FSBLK"-"E3FSBLK")",
106 input->inode_table, itend - 1, start, metaend - 1); 110 input->inode_table, itend - 1, start, metaend - 1);
107 else 111 else
108 err = 0; 112 err = 0;
@@ -112,7 +116,7 @@ static int verify_group_input(struct super_block *sb,
112} 116}
113 117
114static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, 118static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
115 unsigned long blk) 119 ext3_fsblk_t blk)
116{ 120{
117 struct buffer_head *bh; 121 struct buffer_head *bh;
118 int err; 122 int err;
@@ -163,15 +167,14 @@ static int setup_new_group_blocks(struct super_block *sb,
163 struct ext3_new_group_data *input) 167 struct ext3_new_group_data *input)
164{ 168{
165 struct ext3_sb_info *sbi = EXT3_SB(sb); 169 struct ext3_sb_info *sbi = EXT3_SB(sb);
166 unsigned long start = input->group * sbi->s_blocks_per_group + 170 ext3_fsblk_t start = ext3_group_first_block_no(sb, input->group);
167 le32_to_cpu(sbi->s_es->s_first_data_block);
168 int reserved_gdb = ext3_bg_has_super(sb, input->group) ? 171 int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
169 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0; 172 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
170 unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group); 173 unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group);
171 struct buffer_head *bh; 174 struct buffer_head *bh;
172 handle_t *handle; 175 handle_t *handle;
173 unsigned long block; 176 ext3_fsblk_t block;
174 int bit; 177 ext3_grpblk_t bit;
175 int i; 178 int i;
176 int err = 0, err2; 179 int err = 0, err2;
177 180
@@ -328,7 +331,7 @@ static unsigned ext3_list_backups(struct super_block *sb, unsigned *three,
328static int verify_reserved_gdb(struct super_block *sb, 331static int verify_reserved_gdb(struct super_block *sb,
329 struct buffer_head *primary) 332 struct buffer_head *primary)
330{ 333{
331 const unsigned long blk = primary->b_blocknr; 334 const ext3_fsblk_t blk = primary->b_blocknr;
332 const unsigned long end = EXT3_SB(sb)->s_groups_count; 335 const unsigned long end = EXT3_SB(sb)->s_groups_count;
333 unsigned three = 1; 336 unsigned three = 1;
334 unsigned five = 5; 337 unsigned five = 5;
@@ -340,7 +343,8 @@ static int verify_reserved_gdb(struct super_block *sb,
340 while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) { 343 while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
341 if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){ 344 if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
342 ext3_warning(sb, __FUNCTION__, 345 ext3_warning(sb, __FUNCTION__,
343 "reserved GDT %ld missing grp %d (%ld)", 346 "reserved GDT "E3FSBLK
347 " missing grp %d ("E3FSBLK")",
344 blk, grp, 348 blk, grp,
345 grp * EXT3_BLOCKS_PER_GROUP(sb) + blk); 349 grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
346 return -EINVAL; 350 return -EINVAL;
@@ -372,7 +376,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
372 struct super_block *sb = inode->i_sb; 376 struct super_block *sb = inode->i_sb;
373 struct ext3_super_block *es = EXT3_SB(sb)->s_es; 377 struct ext3_super_block *es = EXT3_SB(sb)->s_es;
374 unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb); 378 unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
375 unsigned long gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; 379 ext3_fsblk_t gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
376 struct buffer_head **o_group_desc, **n_group_desc; 380 struct buffer_head **o_group_desc, **n_group_desc;
377 struct buffer_head *dind; 381 struct buffer_head *dind;
378 int gdbackups; 382 int gdbackups;
@@ -417,7 +421,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
417 data = (__u32 *)dind->b_data; 421 data = (__u32 *)dind->b_data;
418 if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) { 422 if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
419 ext3_warning(sb, __FUNCTION__, 423 ext3_warning(sb, __FUNCTION__,
420 "new group %u GDT block %lu not reserved", 424 "new group %u GDT block "E3FSBLK" not reserved",
421 input->group, gdblock); 425 input->group, gdblock);
422 err = -EINVAL; 426 err = -EINVAL;
423 goto exit_dind; 427 goto exit_dind;
@@ -515,7 +519,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
515 struct buffer_head **primary; 519 struct buffer_head **primary;
516 struct buffer_head *dind; 520 struct buffer_head *dind;
517 struct ext3_iloc iloc; 521 struct ext3_iloc iloc;
518 unsigned long blk; 522 ext3_fsblk_t blk;
519 __u32 *data, *end; 523 __u32 *data, *end;
520 int gdbackups = 0; 524 int gdbackups = 0;
521 int res, i; 525 int res, i;
@@ -540,7 +544,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
540 for (res = 0; res < reserved_gdb; res++, blk++) { 544 for (res = 0; res < reserved_gdb; res++, blk++) {
541 if (le32_to_cpu(*data) != blk) { 545 if (le32_to_cpu(*data) != blk) {
542 ext3_warning(sb, __FUNCTION__, 546 ext3_warning(sb, __FUNCTION__,
543 "reserved block %lu not at offset %ld", 547 "reserved block "E3FSBLK
548 " not at offset %ld",
544 blk, (long)(data - (__u32 *)dind->b_data)); 549 blk, (long)(data - (__u32 *)dind->b_data));
545 err = -EINVAL; 550 err = -EINVAL;
546 goto exit_bh; 551 goto exit_bh;
@@ -902,15 +907,16 @@ exit_put:
902 * GDT blocks are reserved to grow to the desired size. 907 * GDT blocks are reserved to grow to the desired size.
903 */ 908 */
904int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, 909int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
905 unsigned long n_blocks_count) 910 ext3_fsblk_t n_blocks_count)
906{ 911{
907 unsigned long o_blocks_count; 912 ext3_fsblk_t o_blocks_count;
908 unsigned long o_groups_count; 913 unsigned long o_groups_count;
909 unsigned long last; 914 ext3_grpblk_t last;
910 int add; 915 ext3_grpblk_t add;
911 struct buffer_head * bh; 916 struct buffer_head * bh;
912 handle_t *handle; 917 handle_t *handle;
913 int err, freed_blocks; 918 int err;
919 unsigned long freed_blocks;
914 920
915 /* We don't need to worry about locking wrt other resizers just 921 /* We don't need to worry about locking wrt other resizers just
916 * yet: we're going to revalidate es->s_blocks_count after 922 * yet: we're going to revalidate es->s_blocks_count after
@@ -919,12 +925,22 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
919 o_groups_count = EXT3_SB(sb)->s_groups_count; 925 o_groups_count = EXT3_SB(sb)->s_groups_count;
920 926
921 if (test_opt(sb, DEBUG)) 927 if (test_opt(sb, DEBUG))
922 printk(KERN_DEBUG "EXT3-fs: extending last group from %lu to %lu blocks\n", 928 printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
923 o_blocks_count, n_blocks_count); 929 o_blocks_count, n_blocks_count);
924 930
925 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) 931 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
926 return 0; 932 return 0;
927 933
934 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
935 printk(KERN_ERR "EXT3-fs: filesystem on %s:"
936 " too large to resize to %lu blocks safely\n",
937 sb->s_id, n_blocks_count);
938 if (sizeof(sector_t) < 8)
939 ext3_warning(sb, __FUNCTION__,
940 "CONFIG_LBD not enabled\n");
941 return -EINVAL;
942 }
943
928 if (n_blocks_count < o_blocks_count) { 944 if (n_blocks_count < o_blocks_count) {
929 ext3_warning(sb, __FUNCTION__, 945 ext3_warning(sb, __FUNCTION__,
930 "can't shrink FS - resize aborted"); 946 "can't shrink FS - resize aborted");
@@ -948,7 +964,8 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
948 964
949 if (o_blocks_count + add < n_blocks_count) 965 if (o_blocks_count + add < n_blocks_count)
950 ext3_warning(sb, __FUNCTION__, 966 ext3_warning(sb, __FUNCTION__,
951 "will only finish group (%lu blocks, %u new)", 967 "will only finish group ("E3FSBLK
968 " blocks, %u new)",
952 o_blocks_count + add, add); 969 o_blocks_count + add, add);
953 970
954 /* See if the device is actually as big as what was requested */ 971 /* See if the device is actually as big as what was requested */
@@ -991,10 +1008,10 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
991 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); 1008 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
992 sb->s_dirt = 1; 1009 sb->s_dirt = 1;
993 unlock_super(sb); 1010 unlock_super(sb);
994 ext3_debug("freeing blocks %ld through %ld\n", o_blocks_count, 1011 ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count,
995 o_blocks_count + add); 1012 o_blocks_count + add);
996 ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); 1013 ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
997 ext3_debug("freed blocks %ld through %ld\n", o_blocks_count, 1014 ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n", o_blocks_count,
998 o_blocks_count + add); 1015 o_blocks_count + add);
999 if ((err = ext3_journal_stop(handle))) 1016 if ((err = ext3_journal_stop(handle)))
1000 goto exit_put; 1017 goto exit_put;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index a60cc6ec130f..b2891cc29db1 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -689,14 +689,15 @@ static match_table_t tokens = {
689 {Opt_resize, "resize"}, 689 {Opt_resize, "resize"},
690}; 690};
691 691
692static unsigned long get_sb_block(void **data) 692static ext3_fsblk_t get_sb_block(void **data)
693{ 693{
694 unsigned long sb_block; 694 ext3_fsblk_t sb_block;
695 char *options = (char *) *data; 695 char *options = (char *) *data;
696 696
697 if (!options || strncmp(options, "sb=", 3) != 0) 697 if (!options || strncmp(options, "sb=", 3) != 0)
698 return 1; /* Default location */ 698 return 1; /* Default location */
699 options += 3; 699 options += 3;
700 /*todo: use simple_strtoll with >32bit ext3 */
700 sb_block = simple_strtoul(options, &options, 0); 701 sb_block = simple_strtoul(options, &options, 0);
701 if (*options && *options != ',') { 702 if (*options && *options != ',') {
702 printk("EXT3-fs: Invalid sb specification: %s\n", 703 printk("EXT3-fs: Invalid sb specification: %s\n",
@@ -711,7 +712,7 @@ static unsigned long get_sb_block(void **data)
711 712
712static int parse_options (char *options, struct super_block *sb, 713static int parse_options (char *options, struct super_block *sb,
713 unsigned long *inum, unsigned long *journal_devnum, 714 unsigned long *inum, unsigned long *journal_devnum,
714 unsigned long *n_blocks_count, int is_remount) 715 ext3_fsblk_t *n_blocks_count, int is_remount)
715{ 716{
716 struct ext3_sb_info *sbi = EXT3_SB(sb); 717 struct ext3_sb_info *sbi = EXT3_SB(sb);
717 char * p; 718 char * p;
@@ -1128,7 +1129,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
1128static int ext3_check_descriptors (struct super_block * sb) 1129static int ext3_check_descriptors (struct super_block * sb)
1129{ 1130{
1130 struct ext3_sb_info *sbi = EXT3_SB(sb); 1131 struct ext3_sb_info *sbi = EXT3_SB(sb);
1131 unsigned long block = le32_to_cpu(sbi->s_es->s_first_data_block); 1132 ext3_fsblk_t block = le32_to_cpu(sbi->s_es->s_first_data_block);
1132 struct ext3_group_desc * gdp = NULL; 1133 struct ext3_group_desc * gdp = NULL;
1133 int desc_block = 0; 1134 int desc_block = 0;
1134 int i; 1135 int i;
@@ -1315,15 +1316,14 @@ static loff_t ext3_max_size(int bits)
1315 return res; 1316 return res;
1316} 1317}
1317 1318
1318static unsigned long descriptor_loc(struct super_block *sb, 1319static ext3_fsblk_t descriptor_loc(struct super_block *sb,
1319 unsigned long logic_sb_block, 1320 ext3_fsblk_t logic_sb_block,
1320 int nr) 1321 int nr)
1321{ 1322{
1322 struct ext3_sb_info *sbi = EXT3_SB(sb); 1323 struct ext3_sb_info *sbi = EXT3_SB(sb);
1323 unsigned long bg, first_data_block, first_meta_bg; 1324 unsigned long bg, first_meta_bg;
1324 int has_super = 0; 1325 int has_super = 0;
1325 1326
1326 first_data_block = le32_to_cpu(sbi->s_es->s_first_data_block);
1327 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); 1327 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
1328 1328
1329 if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || 1329 if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) ||
@@ -1332,7 +1332,7 @@ static unsigned long descriptor_loc(struct super_block *sb,
1332 bg = sbi->s_desc_per_block * nr; 1332 bg = sbi->s_desc_per_block * nr;
1333 if (ext3_bg_has_super(sb, bg)) 1333 if (ext3_bg_has_super(sb, bg))
1334 has_super = 1; 1334 has_super = 1;
1335 return (first_data_block + has_super + (bg * sbi->s_blocks_per_group)); 1335 return (has_super + ext3_group_first_block_no(sb, bg));
1336} 1336}
1337 1337
1338 1338
@@ -1341,9 +1341,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1341 struct buffer_head * bh; 1341 struct buffer_head * bh;
1342 struct ext3_super_block *es = NULL; 1342 struct ext3_super_block *es = NULL;
1343 struct ext3_sb_info *sbi; 1343 struct ext3_sb_info *sbi;
1344 unsigned long block; 1344 ext3_fsblk_t block;
1345 unsigned long sb_block = get_sb_block(&data); 1345 ext3_fsblk_t sb_block = get_sb_block(&data);
1346 unsigned long logic_sb_block; 1346 ext3_fsblk_t logic_sb_block;
1347 unsigned long offset = 0; 1347 unsigned long offset = 0;
1348 unsigned long journal_inum = 0; 1348 unsigned long journal_inum = 0;
1349 unsigned long journal_devnum = 0; 1349 unsigned long journal_devnum = 0;
@@ -1565,6 +1565,16 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1565 goto failed_mount; 1565 goto failed_mount;
1566 } 1566 }
1567 1567
1568 if (le32_to_cpu(es->s_blocks_count) >
1569 (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1570 printk(KERN_ERR "EXT3-fs: filesystem on %s:"
1571 " too large to mount safely\n", sb->s_id);
1572 if (sizeof(sector_t) < 8)
1573 printk(KERN_WARNING "EXT3-fs: CONFIG_LBD not "
1574 "enabled\n");
1575 goto failed_mount;
1576 }
1577
1568 if (EXT3_BLOCKS_PER_GROUP(sb) == 0) 1578 if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
1569 goto cantfind_ext3; 1579 goto cantfind_ext3;
1570 sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) - 1580 sbi->s_groups_count = (le32_to_cpu(es->s_blocks_count) -
@@ -1593,7 +1603,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1593 } 1603 }
1594 } 1604 }
1595 if (!ext3_check_descriptors (sb)) { 1605 if (!ext3_check_descriptors (sb)) {
1596 printk (KERN_ERR "EXT3-fs: group descriptors corrupted !\n"); 1606 printk(KERN_ERR "EXT3-fs: group descriptors corrupted!\n");
1597 goto failed_mount2; 1607 goto failed_mount2;
1598 } 1608 }
1599 sbi->s_gdb_count = db_count; 1609 sbi->s_gdb_count = db_count;
@@ -1830,10 +1840,10 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
1830{ 1840{
1831 struct buffer_head * bh; 1841 struct buffer_head * bh;
1832 journal_t *journal; 1842 journal_t *journal;
1833 int start; 1843 ext3_fsblk_t start;
1834 int len; 1844 ext3_fsblk_t len;
1835 int hblock, blocksize; 1845 int hblock, blocksize;
1836 unsigned long sb_block; 1846 ext3_fsblk_t sb_block;
1837 unsigned long offset; 1847 unsigned long offset;
1838 struct ext3_super_block * es; 1848 struct ext3_super_block * es;
1839 struct block_device *bdev; 1849 struct block_device *bdev;
@@ -2206,7 +2216,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
2206{ 2216{
2207 struct ext3_super_block * es; 2217 struct ext3_super_block * es;
2208 struct ext3_sb_info *sbi = EXT3_SB(sb); 2218 struct ext3_sb_info *sbi = EXT3_SB(sb);
2209 unsigned long n_blocks_count = 0; 2219 ext3_fsblk_t n_blocks_count = 0;
2210 unsigned long old_sb_flags; 2220 unsigned long old_sb_flags;
2211 struct ext3_mount_options old_opts; 2221 struct ext3_mount_options old_opts;
2212 int err; 2222 int err;
@@ -2326,7 +2336,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
2326 struct super_block *sb = dentry->d_sb; 2336 struct super_block *sb = dentry->d_sb;
2327 struct ext3_sb_info *sbi = EXT3_SB(sb); 2337 struct ext3_sb_info *sbi = EXT3_SB(sb);
2328 struct ext3_super_block *es = sbi->s_es; 2338 struct ext3_super_block *es = sbi->s_es;
2329 unsigned long overhead; 2339 ext3_fsblk_t overhead;
2330 int i; 2340 int i;
2331 2341
2332 if (test_opt (sb, MINIX_DF)) 2342 if (test_opt (sb, MINIX_DF))
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index e8d60bf6b7df..a44a0562203a 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -225,7 +225,7 @@ ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
225 error = -ENODATA; 225 error = -ENODATA;
226 if (!EXT3_I(inode)->i_file_acl) 226 if (!EXT3_I(inode)->i_file_acl)
227 goto cleanup; 227 goto cleanup;
228 ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl); 228 ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
229 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); 229 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
230 if (!bh) 230 if (!bh)
231 goto cleanup; 231 goto cleanup;
@@ -233,7 +233,7 @@ ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
233 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 233 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
234 if (ext3_xattr_check_block(bh)) { 234 if (ext3_xattr_check_block(bh)) {
235bad_block: ext3_error(inode->i_sb, __FUNCTION__, 235bad_block: ext3_error(inode->i_sb, __FUNCTION__,
236 "inode %ld: bad block %d", inode->i_ino, 236 "inode %ld: bad block "E3FSBLK, inode->i_ino,
237 EXT3_I(inode)->i_file_acl); 237 EXT3_I(inode)->i_file_acl);
238 error = -EIO; 238 error = -EIO;
239 goto cleanup; 239 goto cleanup;
@@ -366,7 +366,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
366 error = 0; 366 error = 0;
367 if (!EXT3_I(inode)->i_file_acl) 367 if (!EXT3_I(inode)->i_file_acl)
368 goto cleanup; 368 goto cleanup;
369 ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl); 369 ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
370 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); 370 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
371 error = -EIO; 371 error = -EIO;
372 if (!bh) 372 if (!bh)
@@ -375,7 +375,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
375 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 375 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
376 if (ext3_xattr_check_block(bh)) { 376 if (ext3_xattr_check_block(bh)) {
377 ext3_error(inode->i_sb, __FUNCTION__, 377 ext3_error(inode->i_sb, __FUNCTION__,
378 "inode %ld: bad block %d", inode->i_ino, 378 "inode %ld: bad block "E3FSBLK, inode->i_ino,
379 EXT3_I(inode)->i_file_acl); 379 EXT3_I(inode)->i_file_acl);
380 error = -EIO; 380 error = -EIO;
381 goto cleanup; 381 goto cleanup;
@@ -647,7 +647,7 @@ ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
647 le32_to_cpu(BHDR(bs->bh)->h_refcount)); 647 le32_to_cpu(BHDR(bs->bh)->h_refcount));
648 if (ext3_xattr_check_block(bs->bh)) { 648 if (ext3_xattr_check_block(bs->bh)) {
649 ext3_error(sb, __FUNCTION__, 649 ext3_error(sb, __FUNCTION__,
650 "inode %ld: bad block %d", inode->i_ino, 650 "inode %ld: bad block "E3FSBLK, inode->i_ino,
651 EXT3_I(inode)->i_file_acl); 651 EXT3_I(inode)->i_file_acl);
652 error = -EIO; 652 error = -EIO;
653 goto cleanup; 653 goto cleanup;
@@ -792,11 +792,12 @@ inserted:
792 get_bh(new_bh); 792 get_bh(new_bh);
793 } else { 793 } else {
794 /* We need to allocate a new block */ 794 /* We need to allocate a new block */
795 int goal = le32_to_cpu( 795 ext3_fsblk_t goal = le32_to_cpu(
796 EXT3_SB(sb)->s_es->s_first_data_block) + 796 EXT3_SB(sb)->s_es->s_first_data_block) +
797 EXT3_I(inode)->i_block_group * 797 (ext3_fsblk_t)EXT3_I(inode)->i_block_group *
798 EXT3_BLOCKS_PER_GROUP(sb); 798 EXT3_BLOCKS_PER_GROUP(sb);
799 int block = ext3_new_block(handle, inode, goal, &error); 799 ext3_fsblk_t block = ext3_new_block(handle, inode,
800 goal, &error);
800 if (error) 801 if (error)
801 goto cleanup; 802 goto cleanup;
802 ea_idebug(inode, "creating block %d", block); 803 ea_idebug(inode, "creating block %d", block);
@@ -847,7 +848,7 @@ cleanup_dquot:
847 848
848bad_block: 849bad_block:
849 ext3_error(inode->i_sb, __FUNCTION__, 850 ext3_error(inode->i_sb, __FUNCTION__,
850 "inode %ld: bad block %d", inode->i_ino, 851 "inode %ld: bad block "E3FSBLK, inode->i_ino,
851 EXT3_I(inode)->i_file_acl); 852 EXT3_I(inode)->i_file_acl);
852 goto cleanup; 853 goto cleanup;
853 854
@@ -1076,14 +1077,14 @@ ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
1076 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); 1077 bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
1077 if (!bh) { 1078 if (!bh) {
1078 ext3_error(inode->i_sb, __FUNCTION__, 1079 ext3_error(inode->i_sb, __FUNCTION__,
1079 "inode %ld: block %d read error", inode->i_ino, 1080 "inode %ld: block "E3FSBLK" read error", inode->i_ino,
1080 EXT3_I(inode)->i_file_acl); 1081 EXT3_I(inode)->i_file_acl);
1081 goto cleanup; 1082 goto cleanup;
1082 } 1083 }
1083 if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || 1084 if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
1084 BHDR(bh)->h_blocks != cpu_to_le32(1)) { 1085 BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1085 ext3_error(inode->i_sb, __FUNCTION__, 1086 ext3_error(inode->i_sb, __FUNCTION__,
1086 "inode %ld: bad block %d", inode->i_ino, 1087 "inode %ld: bad block "E3FSBLK, inode->i_ino,
1087 EXT3_I(inode)->i_file_acl); 1088 EXT3_I(inode)->i_file_acl);
1088 goto cleanup; 1089 goto cleanup;
1089 } 1090 }
@@ -1210,11 +1211,11 @@ again:
1210 bh = sb_bread(inode->i_sb, ce->e_block); 1211 bh = sb_bread(inode->i_sb, ce->e_block);
1211 if (!bh) { 1212 if (!bh) {
1212 ext3_error(inode->i_sb, __FUNCTION__, 1213 ext3_error(inode->i_sb, __FUNCTION__,
1213 "inode %ld: block %ld read error", 1214 "inode %ld: block %lu read error",
1214 inode->i_ino, (unsigned long) ce->e_block); 1215 inode->i_ino, (unsigned long) ce->e_block);
1215 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= 1216 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
1216 EXT3_XATTR_REFCOUNT_MAX) { 1217 EXT3_XATTR_REFCOUNT_MAX) {
1217 ea_idebug(inode, "block %ld refcount %d>=%d", 1218 ea_idebug(inode, "block %lu refcount %d>=%d",
1218 (unsigned long) ce->e_block, 1219 (unsigned long) ce->e_block,
1219 le32_to_cpu(BHDR(bh)->h_refcount), 1220 le32_to_cpu(BHDR(bh)->h_refcount),
1220 EXT3_XATTR_REFCOUNT_MAX); 1221 EXT3_XATTR_REFCOUNT_MAX);
diff --git a/fs/freevxfs/vxfs.h b/fs/freevxfs/vxfs.h
index 583bd78086d8..d35979a58743 100644
--- a/fs/freevxfs/vxfs.h
+++ b/fs/freevxfs/vxfs.h
@@ -159,11 +159,11 @@ struct vxfs_sb {
159 * In core superblock filesystem private data for VxFS. 159 * In core superblock filesystem private data for VxFS.
160 */ 160 */
161struct vxfs_sb_info { 161struct vxfs_sb_info {
162 struct vxfs_sb *vsi_raw; /* raw (on disk) supeblock */ 162 struct vxfs_sb *vsi_raw; /* raw (on disk) superblock */
163 struct buffer_head *vsi_bp; /* buffer for raw superblock*/ 163 struct buffer_head *vsi_bp; /* buffer for raw superblock*/
164 struct inode *vsi_fship; /* fileset header inode */ 164 struct inode *vsi_fship; /* fileset header inode */
165 struct inode *vsi_ilist; /* inode list inode */ 165 struct inode *vsi_ilist; /* inode list inode */
166 struct inode *vsi_stilist; /* structual inode list inode */ 166 struct inode *vsi_stilist; /* structural inode list inode */
167 u_long vsi_iext; /* initial inode list */ 167 u_long vsi_iext; /* initial inode list */
168 ino_t vsi_fshino; /* fileset header inode */ 168 ino_t vsi_fshino; /* fileset header inode */
169 daddr_t vsi_oltext; /* OLT extent */ 169 daddr_t vsi_oltext; /* OLT extent */
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
index 6dee109aeea4..78948b4b1894 100644
--- a/fs/freevxfs/vxfs_fshead.c
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -112,7 +112,7 @@ vxfs_read_fshead(struct super_block *sbp)
112 112
113 vip = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino); 113 vip = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino);
114 if (!vip) { 114 if (!vip) {
115 printk(KERN_ERR "vxfs: unabled to read fsh inode\n"); 115 printk(KERN_ERR "vxfs: unable to read fsh inode\n");
116 return -EINVAL; 116 return -EINVAL;
117 } 117 }
118 if (!VXFS_ISFSH(vip)) { 118 if (!VXFS_ISFSH(vip)) {
@@ -129,13 +129,13 @@ vxfs_read_fshead(struct super_block *sbp)
129 129
130 infp->vsi_fship = vxfs_get_fake_inode(sbp, vip); 130 infp->vsi_fship = vxfs_get_fake_inode(sbp, vip);
131 if (!infp->vsi_fship) { 131 if (!infp->vsi_fship) {
132 printk(KERN_ERR "vxfs: unabled to get fsh inode\n"); 132 printk(KERN_ERR "vxfs: unable to get fsh inode\n");
133 goto out_free_fship; 133 goto out_free_fship;
134 } 134 }
135 135
136 sfp = vxfs_getfsh(infp->vsi_fship, 0); 136 sfp = vxfs_getfsh(infp->vsi_fship, 0);
137 if (!sfp) { 137 if (!sfp) {
138 printk(KERN_ERR "vxfs: unabled to get structural fsh\n"); 138 printk(KERN_ERR "vxfs: unable to get structural fsh\n");
139 goto out_iput_fship; 139 goto out_iput_fship;
140 } 140 }
141 141
@@ -145,7 +145,7 @@ vxfs_read_fshead(struct super_block *sbp)
145 145
146 pfp = vxfs_getfsh(infp->vsi_fship, 1); 146 pfp = vxfs_getfsh(infp->vsi_fship, 1);
147 if (!pfp) { 147 if (!pfp) {
148 printk(KERN_ERR "vxfs: unabled to get primary fsh\n"); 148 printk(KERN_ERR "vxfs: unable to get primary fsh\n");
149 goto out_free_sfp; 149 goto out_free_sfp;
150 } 150 }
151 151
@@ -159,7 +159,7 @@ vxfs_read_fshead(struct super_block *sbp)
159 159
160 infp->vsi_stilist = vxfs_get_fake_inode(sbp, tip); 160 infp->vsi_stilist = vxfs_get_fake_inode(sbp, tip);
161 if (!infp->vsi_stilist) { 161 if (!infp->vsi_stilist) {
162 printk(KERN_ERR "vxfs: unabled to get structual list inode\n"); 162 printk(KERN_ERR "vxfs: unable to get structural list inode\n");
163 kfree(tip); 163 kfree(tip);
164 goto out_free_pfp; 164 goto out_free_pfp;
165 } 165 }
@@ -174,7 +174,7 @@ vxfs_read_fshead(struct super_block *sbp)
174 goto out_iput_stilist; 174 goto out_iput_stilist;
175 infp->vsi_ilist = vxfs_get_fake_inode(sbp, tip); 175 infp->vsi_ilist = vxfs_get_fake_inode(sbp, tip);
176 if (!infp->vsi_ilist) { 176 if (!infp->vsi_ilist) {
177 printk(KERN_ERR "vxfs: unabled to get inode list inode\n"); 177 printk(KERN_ERR "vxfs: unable to get inode list inode\n");
178 kfree(tip); 178 kfree(tip);
179 goto out_iput_stilist; 179 goto out_iput_stilist;
180 } 180 }
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index c3e1f760cac9..72437065f6ad 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_FUSE_FS) += fuse.o 5obj-$(CONFIG_FUSE_FS) += fuse.o
6 6
7fuse-objs := dev.o dir.o file.o inode.o 7fuse-objs := dev.o dir.o file.o inode.o control.o
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
new file mode 100644
index 000000000000..a3bce3a77253
--- /dev/null
+++ b/fs/fuse/control.c
@@ -0,0 +1,218 @@
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13
14#define FUSE_CTL_SUPER_MAGIC 0x65735543
15
16/*
17 * This is non-NULL when the single instance of the control filesystem
18 * exists. Protected by fuse_mutex
19 */
20static struct super_block *fuse_control_sb;
21
22static struct fuse_conn *fuse_ctl_file_conn_get(struct file *file)
23{
24 struct fuse_conn *fc;
25 mutex_lock(&fuse_mutex);
26 fc = file->f_dentry->d_inode->u.generic_ip;
27 if (fc)
28 fc = fuse_conn_get(fc);
29 mutex_unlock(&fuse_mutex);
30 return fc;
31}
32
33static ssize_t fuse_conn_abort_write(struct file *file, const char __user *buf,
34 size_t count, loff_t *ppos)
35{
36 struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
37 if (fc) {
38 fuse_abort_conn(fc);
39 fuse_conn_put(fc);
40 }
41 return count;
42}
43
44static ssize_t fuse_conn_waiting_read(struct file *file, char __user *buf,
45 size_t len, loff_t *ppos)
46{
47 char tmp[32];
48 size_t size;
49
50 if (!*ppos) {
51 struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
52 if (!fc)
53 return 0;
54
55 file->private_data=(void *)(long)atomic_read(&fc->num_waiting);
56 fuse_conn_put(fc);
57 }
58 size = sprintf(tmp, "%ld\n", (long)file->private_data);
59 return simple_read_from_buffer(buf, len, ppos, tmp, size);
60}
61
62static const struct file_operations fuse_ctl_abort_ops = {
63 .open = nonseekable_open,
64 .write = fuse_conn_abort_write,
65};
66
67static const struct file_operations fuse_ctl_waiting_ops = {
68 .open = nonseekable_open,
69 .read = fuse_conn_waiting_read,
70};
71
72static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
73 struct fuse_conn *fc,
74 const char *name,
75 int mode, int nlink,
76 struct inode_operations *iop,
77 const struct file_operations *fop)
78{
79 struct dentry *dentry;
80 struct inode *inode;
81
82 BUG_ON(fc->ctl_ndents >= FUSE_CTL_NUM_DENTRIES);
83 dentry = d_alloc_name(parent, name);
84 if (!dentry)
85 return NULL;
86
87 fc->ctl_dentry[fc->ctl_ndents++] = dentry;
88 inode = new_inode(fuse_control_sb);
89 if (!inode)
90 return NULL;
91
92 inode->i_mode = mode;
93 inode->i_uid = fc->user_id;
94 inode->i_gid = fc->group_id;
95 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
96 /* setting ->i_op to NULL is not allowed */
97 if (iop)
98 inode->i_op = iop;
99 inode->i_fop = fop;
100 inode->i_nlink = nlink;
101 inode->u.generic_ip = fc;
102 d_add(dentry, inode);
103 return dentry;
104}
105
106/*
107 * Add a connection to the control filesystem (if it exists). Caller
108 * must host fuse_mutex
109 */
110int fuse_ctl_add_conn(struct fuse_conn *fc)
111{
112 struct dentry *parent;
113 char name[32];
114
115 if (!fuse_control_sb)
116 return 0;
117
118 parent = fuse_control_sb->s_root;
119 parent->d_inode->i_nlink++;
120 sprintf(name, "%llu", (unsigned long long) fc->id);
121 parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2,
122 &simple_dir_inode_operations,
123 &simple_dir_operations);
124 if (!parent)
125 goto err;
126
127 if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1,
128 NULL, &fuse_ctl_waiting_ops) ||
129 !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1,
130 NULL, &fuse_ctl_abort_ops))
131 goto err;
132
133 return 0;
134
135 err:
136 fuse_ctl_remove_conn(fc);
137 return -ENOMEM;
138}
139
140/*
141 * Remove a connection from the control filesystem (if it exists).
142 * Caller must host fuse_mutex
143 */
144void fuse_ctl_remove_conn(struct fuse_conn *fc)
145{
146 int i;
147
148 if (!fuse_control_sb)
149 return;
150
151 for (i = fc->ctl_ndents - 1; i >= 0; i--) {
152 struct dentry *dentry = fc->ctl_dentry[i];
153 dentry->d_inode->u.generic_ip = NULL;
154 d_drop(dentry);
155 dput(dentry);
156 }
157 fuse_control_sb->s_root->d_inode->i_nlink--;
158}
159
160static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
161{
162 struct tree_descr empty_descr = {""};
163 struct fuse_conn *fc;
164 int err;
165
166 err = simple_fill_super(sb, FUSE_CTL_SUPER_MAGIC, &empty_descr);
167 if (err)
168 return err;
169
170 mutex_lock(&fuse_mutex);
171 BUG_ON(fuse_control_sb);
172 fuse_control_sb = sb;
173 list_for_each_entry(fc, &fuse_conn_list, entry) {
174 err = fuse_ctl_add_conn(fc);
175 if (err) {
176 fuse_control_sb = NULL;
177 mutex_unlock(&fuse_mutex);
178 return err;
179 }
180 }
181 mutex_unlock(&fuse_mutex);
182
183 return 0;
184}
185
186static int fuse_ctl_get_sb(struct file_system_type *fs_type, int flags,
187 const char *dev_name, void *raw_data,
188 struct vfsmount *mnt)
189{
190 return get_sb_single(fs_type, flags, raw_data,
191 fuse_ctl_fill_super, mnt);
192}
193
194static void fuse_ctl_kill_sb(struct super_block *sb)
195{
196 mutex_lock(&fuse_mutex);
197 fuse_control_sb = NULL;
198 mutex_unlock(&fuse_mutex);
199
200 kill_litter_super(sb);
201}
202
203static struct file_system_type fuse_ctl_fs_type = {
204 .owner = THIS_MODULE,
205 .name = "fusectl",
206 .get_sb = fuse_ctl_get_sb,
207 .kill_sb = fuse_ctl_kill_sb,
208};
209
210int __init fuse_ctl_init(void)
211{
212 return register_filesystem(&fuse_ctl_fs_type);
213}
214
215void fuse_ctl_cleanup(void)
216{
217 unregister_filesystem(&fuse_ctl_fs_type);
218}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 104a62dadb94..1e2006caf158 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -34,6 +34,7 @@ static void fuse_request_init(struct fuse_req *req)
34{ 34{
35 memset(req, 0, sizeof(*req)); 35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list); 36 INIT_LIST_HEAD(&req->list);
37 INIT_LIST_HEAD(&req->intr_entry);
37 init_waitqueue_head(&req->waitq); 38 init_waitqueue_head(&req->waitq);
38 atomic_set(&req->count, 1); 39 atomic_set(&req->count, 1);
39} 40}
@@ -64,18 +65,6 @@ static void restore_sigs(sigset_t *oldset)
64 sigprocmask(SIG_SETMASK, oldset, NULL); 65 sigprocmask(SIG_SETMASK, oldset, NULL);
65} 66}
66 67
67/*
68 * Reset request, so that it can be reused
69 *
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
72 */
73void fuse_reset_request(struct fuse_req *req)
74{
75 BUG_ON(atomic_read(&req->count) != 1);
76 fuse_request_init(req);
77}
78
79static void __fuse_get_request(struct fuse_req *req) 68static void __fuse_get_request(struct fuse_req *req)
80{ 69{
81 atomic_inc(&req->count); 70 atomic_inc(&req->count);
@@ -88,6 +77,13 @@ static void __fuse_put_request(struct fuse_req *req)
88 atomic_dec(&req->count); 77 atomic_dec(&req->count);
89} 78}
90 79
80static void fuse_req_init_context(struct fuse_req *req)
81{
82 req->in.h.uid = current->fsuid;
83 req->in.h.gid = current->fsgid;
84 req->in.h.pid = current->pid;
85}
86
91struct fuse_req *fuse_get_req(struct fuse_conn *fc) 87struct fuse_req *fuse_get_req(struct fuse_conn *fc)
92{ 88{
93 struct fuse_req *req; 89 struct fuse_req *req;
@@ -103,14 +99,16 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
103 if (intr) 99 if (intr)
104 goto out; 100 goto out;
105 101
102 err = -ENOTCONN;
103 if (!fc->connected)
104 goto out;
105
106 req = fuse_request_alloc(); 106 req = fuse_request_alloc();
107 err = -ENOMEM; 107 err = -ENOMEM;
108 if (!req) 108 if (!req)
109 goto out; 109 goto out;
110 110
111 req->in.h.uid = current->fsuid; 111 fuse_req_init_context(req);
112 req->in.h.gid = current->fsgid;
113 req->in.h.pid = current->pid;
114 req->waiting = 1; 112 req->waiting = 1;
115 return req; 113 return req;
116 114
@@ -119,142 +117,183 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
119 return ERR_PTR(err); 117 return ERR_PTR(err);
120} 118}
121 119
122void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 120/*
121 * Return request in fuse_file->reserved_req. However that may
122 * currently be in use. If that is the case, wait for it to become
123 * available.
124 */
125static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
126 struct file *file)
123{ 127{
124 if (atomic_dec_and_test(&req->count)) { 128 struct fuse_req *req = NULL;
125 if (req->waiting) 129 struct fuse_file *ff = file->private_data;
126 atomic_dec(&fc->num_waiting); 130
127 fuse_request_free(req); 131 do {
128 } 132 wait_event(fc->blocked_waitq, ff->reserved_req);
133 spin_lock(&fc->lock);
134 if (ff->reserved_req) {
135 req = ff->reserved_req;
136 ff->reserved_req = NULL;
137 get_file(file);
138 req->stolen_file = file;
139 }
140 spin_unlock(&fc->lock);
141 } while (!req);
142
143 return req;
129} 144}
130 145
131/* 146/*
132 * Called with sbput_sem held for read (request_end) or write 147 * Put stolen request back into fuse_file->reserved_req
133 * (fuse_put_super). By the time fuse_put_super() is finished, all
134 * inodes belonging to background requests must be released, so the
135 * iputs have to be done within the locked region.
136 */ 148 */
137void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) 149static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
138{ 150{
139 iput(req->inode); 151 struct file *file = req->stolen_file;
140 iput(req->inode2); 152 struct fuse_file *ff = file->private_data;
153
141 spin_lock(&fc->lock); 154 spin_lock(&fc->lock);
142 list_del(&req->bg_entry); 155 fuse_request_init(req);
143 if (fc->num_background == FUSE_MAX_BACKGROUND) { 156 BUG_ON(ff->reserved_req);
144 fc->blocked = 0; 157 ff->reserved_req = req;
145 wake_up_all(&fc->blocked_waitq); 158 wake_up(&fc->blocked_waitq);
146 }
147 fc->num_background--;
148 spin_unlock(&fc->lock); 159 spin_unlock(&fc->lock);
160 fput(file);
149} 161}
150 162
151/* 163/*
152 * This function is called when a request is finished. Either a reply 164 * Gets a requests for a file operation, always succeeds
153 * has arrived or it was interrupted (and not yet sent) or some error
154 * occurred during communication with userspace, or the device file
155 * was closed. In case of a background request the reference to the
156 * stored objects are released. The requester thread is woken up (if
157 * still waiting), the 'end' callback is called if given, else the
158 * reference to the request is released
159 * 165 *
160 * Releasing extra reference for foreground requests must be done 166 * This is used for sending the FLUSH request, which must get to
161 * within the same locked region as setting state to finished. This 167 * userspace, due to POSIX locks which may need to be unlocked.
162 * is because fuse_reset_request() may be called after request is
163 * finished and it must be the sole possessor. If request is
164 * interrupted and put in the background, it will return with an error
165 * and hence never be reset and reused.
166 * 168 *
167 * Called with fc->lock, unlocks it 169 * If allocation fails due to OOM, use the reserved request in
170 * fuse_file.
171 *
172 * This is very unlikely to deadlock accidentally, since the
173 * filesystem should not have it's own file open. If deadlock is
174 * intentional, it can still be broken by "aborting" the filesystem.
168 */ 175 */
169static void request_end(struct fuse_conn *fc, struct fuse_req *req) 176struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
170{ 177{
171 list_del(&req->list); 178 struct fuse_req *req;
172 req->state = FUSE_REQ_FINISHED;
173 if (!req->background) {
174 spin_unlock(&fc->lock);
175 wake_up(&req->waitq);
176 fuse_put_request(fc, req);
177 } else {
178 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
179 req->end = NULL;
180 spin_unlock(&fc->lock);
181 down_read(&fc->sbput_sem);
182 if (fc->mounted)
183 fuse_release_background(fc, req);
184 up_read(&fc->sbput_sem);
185 179
186 /* fput must go outside sbput_sem, otherwise it can deadlock */ 180 atomic_inc(&fc->num_waiting);
187 if (req->file) 181 wait_event(fc->blocked_waitq, !fc->blocked);
188 fput(req->file); 182 req = fuse_request_alloc();
183 if (!req)
184 req = get_reserved_req(fc, file);
189 185
190 if (end) 186 fuse_req_init_context(req);
191 end(fc, req); 187 req->waiting = 1;
188 return req;
189}
190
191void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
192{
193 if (atomic_dec_and_test(&req->count)) {
194 if (req->waiting)
195 atomic_dec(&fc->num_waiting);
196
197 if (req->stolen_file)
198 put_reserved_req(fc, req);
192 else 199 else
193 fuse_put_request(fc, req); 200 fuse_request_free(req);
194 } 201 }
195} 202}
196 203
197/* 204/*
198 * Unfortunately request interruption not just solves the deadlock 205 * This function is called when a request is finished. Either a reply
199 * problem, it causes problems too. These stem from the fact, that an 206 * has arrived or it was aborted (and not yet sent) or some error
200 * interrupted request is continued to be processed in userspace, 207 * occurred during communication with userspace, or the device file
201 * while all the locks and object references (inode and file) held 208 * was closed. The requester thread is woken up (if still waiting),
202 * during the operation are released. 209 * the 'end' callback is called if given, else the reference to the
203 * 210 * request is released
204 * To release the locks is exactly why there's a need to interrupt the
205 * request, so there's not a lot that can be done about this, except
206 * introduce additional locking in userspace.
207 *
208 * More important is to keep inode and file references until userspace
209 * has replied, otherwise FORGET and RELEASE could be sent while the
210 * inode/file is still used by the filesystem.
211 *
212 * For this reason the concept of "background" request is introduced.
213 * An interrupted request is backgrounded if it has been already sent
214 * to userspace. Backgrounding involves getting an extra reference to
215 * inode(s) or file used in the request, and adding the request to
216 * fc->background list. When a reply is received for a background
217 * request, the object references are released, and the request is
218 * removed from the list. If the filesystem is unmounted while there
219 * are still background requests, the list is walked and references
220 * are released as if a reply was received.
221 * 211 *
222 * There's one more use for a background request. The RELEASE message is 212 * Called with fc->lock, unlocks it
223 * always sent as background, since it doesn't return an error or
224 * data.
225 */ 213 */
226static void background_request(struct fuse_conn *fc, struct fuse_req *req) 214static void request_end(struct fuse_conn *fc, struct fuse_req *req)
227{ 215{
228 req->background = 1; 216 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
229 list_add(&req->bg_entry, &fc->background); 217 req->end = NULL;
230 fc->num_background++; 218 list_del(&req->list);
231 if (fc->num_background == FUSE_MAX_BACKGROUND) 219 list_del(&req->intr_entry);
232 fc->blocked = 1; 220 req->state = FUSE_REQ_FINISHED;
233 if (req->inode) 221 if (req->background) {
234 req->inode = igrab(req->inode); 222 if (fc->num_background == FUSE_MAX_BACKGROUND) {
235 if (req->inode2) 223 fc->blocked = 0;
236 req->inode2 = igrab(req->inode2); 224 wake_up_all(&fc->blocked_waitq);
225 }
226 fc->num_background--;
227 }
228 spin_unlock(&fc->lock);
229 dput(req->dentry);
230 mntput(req->vfsmount);
237 if (req->file) 231 if (req->file)
238 get_file(req->file); 232 fput(req->file);
233 wake_up(&req->waitq);
234 if (end)
235 end(fc, req);
236 else
237 fuse_put_request(fc, req);
239} 238}
240 239
241/* Called with fc->lock held. Releases, and then reacquires it. */ 240static void wait_answer_interruptible(struct fuse_conn *fc,
242static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 241 struct fuse_req *req)
243{ 242{
244 sigset_t oldset; 243 if (signal_pending(current))
244 return;
245 245
246 spin_unlock(&fc->lock); 246 spin_unlock(&fc->lock);
247 block_sigs(&oldset);
248 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); 247 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
249 restore_sigs(&oldset);
250 spin_lock(&fc->lock); 248 spin_lock(&fc->lock);
251 if (req->state == FUSE_REQ_FINISHED && !req->interrupted) 249}
252 return; 250
251static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
252{
253 list_add_tail(&req->intr_entry, &fc->interrupts);
254 wake_up(&fc->waitq);
255 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
256}
257
258/* Called with fc->lock held. Releases, and then reacquires it. */
259static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
260{
261 if (!fc->no_interrupt) {
262 /* Any signal may interrupt this */
263 wait_answer_interruptible(fc, req);
264
265 if (req->aborted)
266 goto aborted;
267 if (req->state == FUSE_REQ_FINISHED)
268 return;
253 269
254 if (!req->interrupted) {
255 req->out.h.error = -EINTR;
256 req->interrupted = 1; 270 req->interrupted = 1;
271 if (req->state == FUSE_REQ_SENT)
272 queue_interrupt(fc, req);
273 }
274
275 if (req->force) {
276 spin_unlock(&fc->lock);
277 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
278 spin_lock(&fc->lock);
279 } else {
280 sigset_t oldset;
281
282 /* Only fatal signals may interrupt this */
283 block_sigs(&oldset);
284 wait_answer_interruptible(fc, req);
285 restore_sigs(&oldset);
257 } 286 }
287
288 if (req->aborted)
289 goto aborted;
290 if (req->state == FUSE_REQ_FINISHED)
291 return;
292
293 req->out.h.error = -EINTR;
294 req->aborted = 1;
295
296 aborted:
258 if (req->locked) { 297 if (req->locked) {
259 /* This is uninterruptible sleep, because data is 298 /* This is uninterruptible sleep, because data is
260 being copied to/from the buffers of req. During 299 being copied to/from the buffers of req. During
@@ -268,8 +307,11 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
268 if (req->state == FUSE_REQ_PENDING) { 307 if (req->state == FUSE_REQ_PENDING) {
269 list_del(&req->list); 308 list_del(&req->list);
270 __fuse_put_request(req); 309 __fuse_put_request(req);
271 } else if (req->state == FUSE_REQ_SENT) 310 } else if (req->state == FUSE_REQ_SENT) {
272 background_request(fc, req); 311 spin_unlock(&fc->lock);
312 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
313 spin_lock(&fc->lock);
314 }
273} 315}
274 316
275static unsigned len_args(unsigned numargs, struct fuse_arg *args) 317static unsigned len_args(unsigned numargs, struct fuse_arg *args)
@@ -283,13 +325,19 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
283 return nbytes; 325 return nbytes;
284} 326}
285 327
328static u64 fuse_get_unique(struct fuse_conn *fc)
329 {
330 fc->reqctr++;
331 /* zero is special */
332 if (fc->reqctr == 0)
333 fc->reqctr = 1;
334
335 return fc->reqctr;
336}
337
286static void queue_request(struct fuse_conn *fc, struct fuse_req *req) 338static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
287{ 339{
288 fc->reqctr++; 340 req->in.h.unique = fuse_get_unique(fc);
289 /* zero is special */
290 if (fc->reqctr == 0)
291 fc->reqctr = 1;
292 req->in.h.unique = fc->reqctr;
293 req->in.h.len = sizeof(struct fuse_in_header) + 341 req->in.h.len = sizeof(struct fuse_in_header) +
294 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); 342 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
295 list_add_tail(&req->list, &fc->pending); 343 list_add_tail(&req->list, &fc->pending);
@@ -302,9 +350,6 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
302 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 350 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
303} 351}
304 352
305/*
306 * This can only be interrupted by a SIGKILL
307 */
308void request_send(struct fuse_conn *fc, struct fuse_req *req) 353void request_send(struct fuse_conn *fc, struct fuse_req *req)
309{ 354{
310 req->isreply = 1; 355 req->isreply = 1;
@@ -327,8 +372,12 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
327static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) 372static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
328{ 373{
329 spin_lock(&fc->lock); 374 spin_lock(&fc->lock);
330 background_request(fc, req);
331 if (fc->connected) { 375 if (fc->connected) {
376 req->background = 1;
377 fc->num_background++;
378 if (fc->num_background == FUSE_MAX_BACKGROUND)
379 fc->blocked = 1;
380
332 queue_request(fc, req); 381 queue_request(fc, req);
333 spin_unlock(&fc->lock); 382 spin_unlock(&fc->lock);
334 } else { 383 } else {
@@ -352,14 +401,14 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
352/* 401/*
353 * Lock the request. Up to the next unlock_request() there mustn't be 402 * Lock the request. Up to the next unlock_request() there mustn't be
354 * anything that could cause a page-fault. If the request was already 403 * anything that could cause a page-fault. If the request was already
355 * interrupted bail out. 404 * aborted bail out.
356 */ 405 */
357static int lock_request(struct fuse_conn *fc, struct fuse_req *req) 406static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
358{ 407{
359 int err = 0; 408 int err = 0;
360 if (req) { 409 if (req) {
361 spin_lock(&fc->lock); 410 spin_lock(&fc->lock);
362 if (req->interrupted) 411 if (req->aborted)
363 err = -ENOENT; 412 err = -ENOENT;
364 else 413 else
365 req->locked = 1; 414 req->locked = 1;
@@ -369,7 +418,7 @@ static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
369} 418}
370 419
371/* 420/*
372 * Unlock request. If it was interrupted during being locked, the 421 * Unlock request. If it was aborted during being locked, the
373 * requester thread is currently waiting for it to be unlocked, so 422 * requester thread is currently waiting for it to be unlocked, so
374 * wake it up. 423 * wake it up.
375 */ 424 */
@@ -378,7 +427,7 @@ static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
378 if (req) { 427 if (req) {
379 spin_lock(&fc->lock); 428 spin_lock(&fc->lock);
380 req->locked = 0; 429 req->locked = 0;
381 if (req->interrupted) 430 if (req->aborted)
382 wake_up(&req->waitq); 431 wake_up(&req->waitq);
383 spin_unlock(&fc->lock); 432 spin_unlock(&fc->lock);
384 } 433 }
@@ -557,13 +606,18 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
557 return err; 606 return err;
558} 607}
559 608
609static int request_pending(struct fuse_conn *fc)
610{
611 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
612}
613
560/* Wait until a request is available on the pending list */ 614/* Wait until a request is available on the pending list */
561static void request_wait(struct fuse_conn *fc) 615static void request_wait(struct fuse_conn *fc)
562{ 616{
563 DECLARE_WAITQUEUE(wait, current); 617 DECLARE_WAITQUEUE(wait, current);
564 618
565 add_wait_queue_exclusive(&fc->waitq, &wait); 619 add_wait_queue_exclusive(&fc->waitq, &wait);
566 while (fc->connected && list_empty(&fc->pending)) { 620 while (fc->connected && !request_pending(fc)) {
567 set_current_state(TASK_INTERRUPTIBLE); 621 set_current_state(TASK_INTERRUPTIBLE);
568 if (signal_pending(current)) 622 if (signal_pending(current))
569 break; 623 break;
@@ -577,11 +631,50 @@ static void request_wait(struct fuse_conn *fc)
577} 631}
578 632
579/* 633/*
634 * Transfer an interrupt request to userspace
635 *
636 * Unlike other requests this is assembled on demand, without a need
637 * to allocate a separate fuse_req structure.
638 *
639 * Called with fc->lock held, releases it
640 */
641static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
642 const struct iovec *iov, unsigned long nr_segs)
643{
644 struct fuse_copy_state cs;
645 struct fuse_in_header ih;
646 struct fuse_interrupt_in arg;
647 unsigned reqsize = sizeof(ih) + sizeof(arg);
648 int err;
649
650 list_del_init(&req->intr_entry);
651 req->intr_unique = fuse_get_unique(fc);
652 memset(&ih, 0, sizeof(ih));
653 memset(&arg, 0, sizeof(arg));
654 ih.len = reqsize;
655 ih.opcode = FUSE_INTERRUPT;
656 ih.unique = req->intr_unique;
657 arg.unique = req->in.h.unique;
658
659 spin_unlock(&fc->lock);
660 if (iov_length(iov, nr_segs) < reqsize)
661 return -EINVAL;
662
663 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
664 err = fuse_copy_one(&cs, &ih, sizeof(ih));
665 if (!err)
666 err = fuse_copy_one(&cs, &arg, sizeof(arg));
667 fuse_copy_finish(&cs);
668
669 return err ? err : reqsize;
670}
671
672/*
580 * Read a single request into the userspace filesystem's buffer. This 673 * Read a single request into the userspace filesystem's buffer. This
581 * function waits until a request is available, then removes it from 674 * function waits until a request is available, then removes it from
582 * the pending list and copies request data to userspace buffer. If 675 * the pending list and copies request data to userspace buffer. If
583 * no reply is needed (FORGET) or request has been interrupted or 676 * no reply is needed (FORGET) or request has been aborted or there
584 * there was an error during the copying then it's finished by calling 677 * was an error during the copying then it's finished by calling
585 * request_end(). Otherwise add it to the processing list, and set 678 * request_end(). Otherwise add it to the processing list, and set
586 * the 'sent' flag. 679 * the 'sent' flag.
587 */ 680 */
@@ -601,7 +694,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
601 spin_lock(&fc->lock); 694 spin_lock(&fc->lock);
602 err = -EAGAIN; 695 err = -EAGAIN;
603 if ((file->f_flags & O_NONBLOCK) && fc->connected && 696 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
604 list_empty(&fc->pending)) 697 !request_pending(fc))
605 goto err_unlock; 698 goto err_unlock;
606 699
607 request_wait(fc); 700 request_wait(fc);
@@ -609,9 +702,15 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
609 if (!fc->connected) 702 if (!fc->connected)
610 goto err_unlock; 703 goto err_unlock;
611 err = -ERESTARTSYS; 704 err = -ERESTARTSYS;
612 if (list_empty(&fc->pending)) 705 if (!request_pending(fc))
613 goto err_unlock; 706 goto err_unlock;
614 707
708 if (!list_empty(&fc->interrupts)) {
709 req = list_entry(fc->interrupts.next, struct fuse_req,
710 intr_entry);
711 return fuse_read_interrupt(fc, req, iov, nr_segs);
712 }
713
615 req = list_entry(fc->pending.next, struct fuse_req, list); 714 req = list_entry(fc->pending.next, struct fuse_req, list);
616 req->state = FUSE_REQ_READING; 715 req->state = FUSE_REQ_READING;
617 list_move(&req->list, &fc->io); 716 list_move(&req->list, &fc->io);
@@ -636,10 +735,10 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
636 fuse_copy_finish(&cs); 735 fuse_copy_finish(&cs);
637 spin_lock(&fc->lock); 736 spin_lock(&fc->lock);
638 req->locked = 0; 737 req->locked = 0;
639 if (!err && req->interrupted) 738 if (!err && req->aborted)
640 err = -ENOENT; 739 err = -ENOENT;
641 if (err) { 740 if (err) {
642 if (!req->interrupted) 741 if (!req->aborted)
643 req->out.h.error = -EIO; 742 req->out.h.error = -EIO;
644 request_end(fc, req); 743 request_end(fc, req);
645 return err; 744 return err;
@@ -649,6 +748,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
649 else { 748 else {
650 req->state = FUSE_REQ_SENT; 749 req->state = FUSE_REQ_SENT;
651 list_move_tail(&req->list, &fc->processing); 750 list_move_tail(&req->list, &fc->processing);
751 if (req->interrupted)
752 queue_interrupt(fc, req);
652 spin_unlock(&fc->lock); 753 spin_unlock(&fc->lock);
653 } 754 }
654 return reqsize; 755 return reqsize;
@@ -675,7 +776,7 @@ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
675 list_for_each(entry, &fc->processing) { 776 list_for_each(entry, &fc->processing) {
676 struct fuse_req *req; 777 struct fuse_req *req;
677 req = list_entry(entry, struct fuse_req, list); 778 req = list_entry(entry, struct fuse_req, list);
678 if (req->in.h.unique == unique) 779 if (req->in.h.unique == unique || req->intr_unique == unique)
679 return req; 780 return req;
680 } 781 }
681 return NULL; 782 return NULL;
@@ -741,17 +842,33 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
741 goto err_unlock; 842 goto err_unlock;
742 843
743 req = request_find(fc, oh.unique); 844 req = request_find(fc, oh.unique);
744 err = -EINVAL;
745 if (!req) 845 if (!req)
746 goto err_unlock; 846 goto err_unlock;
747 847
748 if (req->interrupted) { 848 if (req->aborted) {
749 spin_unlock(&fc->lock); 849 spin_unlock(&fc->lock);
750 fuse_copy_finish(&cs); 850 fuse_copy_finish(&cs);
751 spin_lock(&fc->lock); 851 spin_lock(&fc->lock);
752 request_end(fc, req); 852 request_end(fc, req);
753 return -ENOENT; 853 return -ENOENT;
754 } 854 }
855 /* Is it an interrupt reply? */
856 if (req->intr_unique == oh.unique) {
857 err = -EINVAL;
858 if (nbytes != sizeof(struct fuse_out_header))
859 goto err_unlock;
860
861 if (oh.error == -ENOSYS)
862 fc->no_interrupt = 1;
863 else if (oh.error == -EAGAIN)
864 queue_interrupt(fc, req);
865
866 spin_unlock(&fc->lock);
867 fuse_copy_finish(&cs);
868 return nbytes;
869 }
870
871 req->state = FUSE_REQ_WRITING;
755 list_move(&req->list, &fc->io); 872 list_move(&req->list, &fc->io);
756 req->out.h = oh; 873 req->out.h = oh;
757 req->locked = 1; 874 req->locked = 1;
@@ -764,9 +881,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
764 spin_lock(&fc->lock); 881 spin_lock(&fc->lock);
765 req->locked = 0; 882 req->locked = 0;
766 if (!err) { 883 if (!err) {
767 if (req->interrupted) 884 if (req->aborted)
768 err = -ENOENT; 885 err = -ENOENT;
769 } else if (!req->interrupted) 886 } else if (!req->aborted)
770 req->out.h.error = -EIO; 887 req->out.h.error = -EIO;
771 request_end(fc, req); 888 request_end(fc, req);
772 889
@@ -800,7 +917,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
800 spin_lock(&fc->lock); 917 spin_lock(&fc->lock);
801 if (!fc->connected) 918 if (!fc->connected)
802 mask = POLLERR; 919 mask = POLLERR;
803 else if (!list_empty(&fc->pending)) 920 else if (request_pending(fc))
804 mask |= POLLIN | POLLRDNORM; 921 mask |= POLLIN | POLLRDNORM;
805 spin_unlock(&fc->lock); 922 spin_unlock(&fc->lock);
806 923
@@ -826,7 +943,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
826/* 943/*
827 * Abort requests under I/O 944 * Abort requests under I/O
828 * 945 *
829 * The requests are set to interrupted and finished, and the request 946 * The requests are set to aborted and finished, and the request
830 * waiter is woken up. This will make request_wait_answer() wait 947 * waiter is woken up. This will make request_wait_answer() wait
831 * until the request is unlocked and then return. 948 * until the request is unlocked and then return.
832 * 949 *
@@ -841,7 +958,7 @@ static void end_io_requests(struct fuse_conn *fc)
841 list_entry(fc->io.next, struct fuse_req, list); 958 list_entry(fc->io.next, struct fuse_req, list);
842 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 959 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
843 960
844 req->interrupted = 1; 961 req->aborted = 1;
845 req->out.h.error = -ECONNABORTED; 962 req->out.h.error = -ECONNABORTED;
846 req->state = FUSE_REQ_FINISHED; 963 req->state = FUSE_REQ_FINISHED;
847 list_del_init(&req->list); 964 list_del_init(&req->list);
@@ -874,19 +991,20 @@ static void end_io_requests(struct fuse_conn *fc)
874 * onto the pending list is prevented by req->connected being false. 991 * onto the pending list is prevented by req->connected being false.
875 * 992 *
876 * Progression of requests under I/O to the processing list is 993 * Progression of requests under I/O to the processing list is
877 * prevented by the req->interrupted flag being true for these 994 * prevented by the req->aborted flag being true for these requests.
878 * requests. For this reason requests on the io list must be aborted 995 * For this reason requests on the io list must be aborted first.
879 * first.
880 */ 996 */
881void fuse_abort_conn(struct fuse_conn *fc) 997void fuse_abort_conn(struct fuse_conn *fc)
882{ 998{
883 spin_lock(&fc->lock); 999 spin_lock(&fc->lock);
884 if (fc->connected) { 1000 if (fc->connected) {
885 fc->connected = 0; 1001 fc->connected = 0;
1002 fc->blocked = 0;
886 end_io_requests(fc); 1003 end_io_requests(fc);
887 end_requests(fc, &fc->pending); 1004 end_requests(fc, &fc->pending);
888 end_requests(fc, &fc->processing); 1005 end_requests(fc, &fc->processing);
889 wake_up_all(&fc->waitq); 1006 wake_up_all(&fc->waitq);
1007 wake_up_all(&fc->blocked_waitq);
890 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 1008 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
891 } 1009 }
892 spin_unlock(&fc->lock); 1010 spin_unlock(&fc->lock);
@@ -902,7 +1020,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
902 end_requests(fc, &fc->processing); 1020 end_requests(fc, &fc->processing);
903 spin_unlock(&fc->lock); 1021 spin_unlock(&fc->lock);
904 fasync_helper(-1, file, 0, &fc->fasync); 1022 fasync_helper(-1, file, 0, &fc->fasync);
905 kobject_put(&fc->kobj); 1023 fuse_conn_put(fc);
906 } 1024 }
907 1025
908 return 0; 1026 return 0;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8d7546e832e8..72a74cde6de8 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1,6 +1,6 @@
1/* 1/*
2 FUSE: Filesystem in Userspace 2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4 4
5 This program can be distributed under the terms of the GNU GPL. 5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING. 6 See the file COPYING.
@@ -79,7 +79,6 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
79{ 79{
80 req->in.h.opcode = FUSE_LOOKUP; 80 req->in.h.opcode = FUSE_LOOKUP;
81 req->in.h.nodeid = get_node_id(dir); 81 req->in.h.nodeid = get_node_id(dir);
82 req->inode = dir;
83 req->in.numargs = 1; 82 req->in.numargs = 1;
84 req->in.args[0].size = entry->d_name.len + 1; 83 req->in.args[0].size = entry->d_name.len + 1;
85 req->in.args[0].value = entry->d_name.name; 84 req->in.args[0].value = entry->d_name.name;
@@ -225,6 +224,20 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
225} 224}
226 225
227/* 226/*
227 * Synchronous release for the case when something goes wrong in CREATE_OPEN
228 */
229static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff,
230 u64 nodeid, int flags)
231{
232 struct fuse_req *req;
233
234 req = fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE);
235 req->force = 1;
236 request_send(fc, req);
237 fuse_put_request(fc, req);
238}
239
240/*
228 * Atomic create+open operation 241 * Atomic create+open operation
229 * 242 *
230 * If the filesystem doesn't support this, then fall back to separate 243 * If the filesystem doesn't support this, then fall back to separate
@@ -237,6 +250,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
237 struct inode *inode; 250 struct inode *inode;
238 struct fuse_conn *fc = get_fuse_conn(dir); 251 struct fuse_conn *fc = get_fuse_conn(dir);
239 struct fuse_req *req; 252 struct fuse_req *req;
253 struct fuse_req *forget_req;
240 struct fuse_open_in inarg; 254 struct fuse_open_in inarg;
241 struct fuse_open_out outopen; 255 struct fuse_open_out outopen;
242 struct fuse_entry_out outentry; 256 struct fuse_entry_out outentry;
@@ -247,9 +261,14 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
247 if (fc->no_create) 261 if (fc->no_create)
248 return -ENOSYS; 262 return -ENOSYS;
249 263
264 forget_req = fuse_get_req(fc);
265 if (IS_ERR(forget_req))
266 return PTR_ERR(forget_req);
267
250 req = fuse_get_req(fc); 268 req = fuse_get_req(fc);
269 err = PTR_ERR(req);
251 if (IS_ERR(req)) 270 if (IS_ERR(req))
252 return PTR_ERR(req); 271 goto out_put_forget_req;
253 272
254 err = -ENOMEM; 273 err = -ENOMEM;
255 ff = fuse_file_alloc(); 274 ff = fuse_file_alloc();
@@ -262,7 +281,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
262 inarg.mode = mode; 281 inarg.mode = mode;
263 req->in.h.opcode = FUSE_CREATE; 282 req->in.h.opcode = FUSE_CREATE;
264 req->in.h.nodeid = get_node_id(dir); 283 req->in.h.nodeid = get_node_id(dir);
265 req->inode = dir;
266 req->in.numargs = 2; 284 req->in.numargs = 2;
267 req->in.args[0].size = sizeof(inarg); 285 req->in.args[0].size = sizeof(inarg);
268 req->in.args[0].value = &inarg; 286 req->in.args[0].value = &inarg;
@@ -285,25 +303,23 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
285 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid)) 303 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
286 goto out_free_ff; 304 goto out_free_ff;
287 305
306 fuse_put_request(fc, req);
288 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, 307 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
289 &outentry.attr); 308 &outentry.attr);
290 err = -ENOMEM;
291 if (!inode) { 309 if (!inode) {
292 flags &= ~(O_CREAT | O_EXCL | O_TRUNC); 310 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
293 ff->fh = outopen.fh; 311 ff->fh = outopen.fh;
294 /* Special release, with inode = NULL, this will 312 fuse_sync_release(fc, ff, outentry.nodeid, flags);
295 trigger a 'forget' request when the release is 313 fuse_send_forget(fc, forget_req, outentry.nodeid, 1);
296 complete */ 314 return -ENOMEM;
297 fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0);
298 goto out_put_request;
299 } 315 }
300 fuse_put_request(fc, req); 316 fuse_put_request(fc, forget_req);
301 d_instantiate(entry, inode); 317 d_instantiate(entry, inode);
302 fuse_change_timeout(entry, &outentry); 318 fuse_change_timeout(entry, &outentry);
303 file = lookup_instantiate_filp(nd, entry, generic_file_open); 319 file = lookup_instantiate_filp(nd, entry, generic_file_open);
304 if (IS_ERR(file)) { 320 if (IS_ERR(file)) {
305 ff->fh = outopen.fh; 321 ff->fh = outopen.fh;
306 fuse_send_release(fc, ff, outentry.nodeid, inode, flags, 0); 322 fuse_sync_release(fc, ff, outentry.nodeid, flags);
307 return PTR_ERR(file); 323 return PTR_ERR(file);
308 } 324 }
309 fuse_finish_open(inode, file, ff, &outopen); 325 fuse_finish_open(inode, file, ff, &outopen);
@@ -313,6 +329,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
313 fuse_file_free(ff); 329 fuse_file_free(ff);
314 out_put_request: 330 out_put_request:
315 fuse_put_request(fc, req); 331 fuse_put_request(fc, req);
332 out_put_forget_req:
333 fuse_put_request(fc, forget_req);
316 return err; 334 return err;
317} 335}
318 336
@@ -328,7 +346,6 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
328 int err; 346 int err;
329 347
330 req->in.h.nodeid = get_node_id(dir); 348 req->in.h.nodeid = get_node_id(dir);
331 req->inode = dir;
332 req->out.numargs = 1; 349 req->out.numargs = 1;
333 req->out.args[0].size = sizeof(outarg); 350 req->out.args[0].size = sizeof(outarg);
334 req->out.args[0].value = &outarg; 351 req->out.args[0].value = &outarg;
@@ -448,7 +465,6 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
448 465
449 req->in.h.opcode = FUSE_UNLINK; 466 req->in.h.opcode = FUSE_UNLINK;
450 req->in.h.nodeid = get_node_id(dir); 467 req->in.h.nodeid = get_node_id(dir);
451 req->inode = dir;
452 req->in.numargs = 1; 468 req->in.numargs = 1;
453 req->in.args[0].size = entry->d_name.len + 1; 469 req->in.args[0].size = entry->d_name.len + 1;
454 req->in.args[0].value = entry->d_name.name; 470 req->in.args[0].value = entry->d_name.name;
@@ -480,7 +496,6 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
480 496
481 req->in.h.opcode = FUSE_RMDIR; 497 req->in.h.opcode = FUSE_RMDIR;
482 req->in.h.nodeid = get_node_id(dir); 498 req->in.h.nodeid = get_node_id(dir);
483 req->inode = dir;
484 req->in.numargs = 1; 499 req->in.numargs = 1;
485 req->in.args[0].size = entry->d_name.len + 1; 500 req->in.args[0].size = entry->d_name.len + 1;
486 req->in.args[0].value = entry->d_name.name; 501 req->in.args[0].value = entry->d_name.name;
@@ -510,8 +525,6 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
510 inarg.newdir = get_node_id(newdir); 525 inarg.newdir = get_node_id(newdir);
511 req->in.h.opcode = FUSE_RENAME; 526 req->in.h.opcode = FUSE_RENAME;
512 req->in.h.nodeid = get_node_id(olddir); 527 req->in.h.nodeid = get_node_id(olddir);
513 req->inode = olddir;
514 req->inode2 = newdir;
515 req->in.numargs = 3; 528 req->in.numargs = 3;
516 req->in.args[0].size = sizeof(inarg); 529 req->in.args[0].size = sizeof(inarg);
517 req->in.args[0].value = &inarg; 530 req->in.args[0].value = &inarg;
@@ -558,7 +571,6 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
558 memset(&inarg, 0, sizeof(inarg)); 571 memset(&inarg, 0, sizeof(inarg));
559 inarg.oldnodeid = get_node_id(inode); 572 inarg.oldnodeid = get_node_id(inode);
560 req->in.h.opcode = FUSE_LINK; 573 req->in.h.opcode = FUSE_LINK;
561 req->inode2 = inode;
562 req->in.numargs = 2; 574 req->in.numargs = 2;
563 req->in.args[0].size = sizeof(inarg); 575 req->in.args[0].size = sizeof(inarg);
564 req->in.args[0].value = &inarg; 576 req->in.args[0].value = &inarg;
@@ -587,7 +599,6 @@ int fuse_do_getattr(struct inode *inode)
587 599
588 req->in.h.opcode = FUSE_GETATTR; 600 req->in.h.opcode = FUSE_GETATTR;
589 req->in.h.nodeid = get_node_id(inode); 601 req->in.h.nodeid = get_node_id(inode);
590 req->inode = inode;
591 req->out.numargs = 1; 602 req->out.numargs = 1;
592 req->out.args[0].size = sizeof(arg); 603 req->out.args[0].size = sizeof(arg);
593 req->out.args[0].value = &arg; 604 req->out.args[0].value = &arg;
@@ -679,7 +690,6 @@ static int fuse_access(struct inode *inode, int mask)
679 inarg.mask = mask; 690 inarg.mask = mask;
680 req->in.h.opcode = FUSE_ACCESS; 691 req->in.h.opcode = FUSE_ACCESS;
681 req->in.h.nodeid = get_node_id(inode); 692 req->in.h.nodeid = get_node_id(inode);
682 req->inode = inode;
683 req->in.numargs = 1; 693 req->in.numargs = 1;
684 req->in.args[0].size = sizeof(inarg); 694 req->in.args[0].size = sizeof(inarg);
685 req->in.args[0].value = &inarg; 695 req->in.args[0].value = &inarg;
@@ -820,7 +830,6 @@ static char *read_link(struct dentry *dentry)
820 } 830 }
821 req->in.h.opcode = FUSE_READLINK; 831 req->in.h.opcode = FUSE_READLINK;
822 req->in.h.nodeid = get_node_id(inode); 832 req->in.h.nodeid = get_node_id(inode);
823 req->inode = inode;
824 req->out.argvar = 1; 833 req->out.argvar = 1;
825 req->out.numargs = 1; 834 req->out.numargs = 1;
826 req->out.args[0].size = PAGE_SIZE - 1; 835 req->out.args[0].size = PAGE_SIZE - 1;
@@ -939,7 +948,6 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
939 iattr_to_fattr(attr, &inarg); 948 iattr_to_fattr(attr, &inarg);
940 req->in.h.opcode = FUSE_SETATTR; 949 req->in.h.opcode = FUSE_SETATTR;
941 req->in.h.nodeid = get_node_id(inode); 950 req->in.h.nodeid = get_node_id(inode);
942 req->inode = inode;
943 req->in.numargs = 1; 951 req->in.numargs = 1;
944 req->in.args[0].size = sizeof(inarg); 952 req->in.args[0].size = sizeof(inarg);
945 req->in.args[0].value = &inarg; 953 req->in.args[0].value = &inarg;
@@ -1002,7 +1010,6 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
1002 inarg.flags = flags; 1010 inarg.flags = flags;
1003 req->in.h.opcode = FUSE_SETXATTR; 1011 req->in.h.opcode = FUSE_SETXATTR;
1004 req->in.h.nodeid = get_node_id(inode); 1012 req->in.h.nodeid = get_node_id(inode);
1005 req->inode = inode;
1006 req->in.numargs = 3; 1013 req->in.numargs = 3;
1007 req->in.args[0].size = sizeof(inarg); 1014 req->in.args[0].size = sizeof(inarg);
1008 req->in.args[0].value = &inarg; 1015 req->in.args[0].value = &inarg;
@@ -1041,7 +1048,6 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
1041 inarg.size = size; 1048 inarg.size = size;
1042 req->in.h.opcode = FUSE_GETXATTR; 1049 req->in.h.opcode = FUSE_GETXATTR;
1043 req->in.h.nodeid = get_node_id(inode); 1050 req->in.h.nodeid = get_node_id(inode);
1044 req->inode = inode;
1045 req->in.numargs = 2; 1051 req->in.numargs = 2;
1046 req->in.args[0].size = sizeof(inarg); 1052 req->in.args[0].size = sizeof(inarg);
1047 req->in.args[0].value = &inarg; 1053 req->in.args[0].value = &inarg;
@@ -1091,7 +1097,6 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1091 inarg.size = size; 1097 inarg.size = size;
1092 req->in.h.opcode = FUSE_LISTXATTR; 1098 req->in.h.opcode = FUSE_LISTXATTR;
1093 req->in.h.nodeid = get_node_id(inode); 1099 req->in.h.nodeid = get_node_id(inode);
1094 req->inode = inode;
1095 req->in.numargs = 1; 1100 req->in.numargs = 1;
1096 req->in.args[0].size = sizeof(inarg); 1101 req->in.args[0].size = sizeof(inarg);
1097 req->in.args[0].value = &inarg; 1102 req->in.args[0].value = &inarg;
@@ -1135,7 +1140,6 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
1135 1140
1136 req->in.h.opcode = FUSE_REMOVEXATTR; 1141 req->in.h.opcode = FUSE_REMOVEXATTR;
1137 req->in.h.nodeid = get_node_id(inode); 1142 req->in.h.nodeid = get_node_id(inode);
1138 req->inode = inode;
1139 req->in.numargs = 1; 1143 req->in.numargs = 1;
1140 req->in.args[0].size = strlen(name) + 1; 1144 req->in.args[0].size = strlen(name) + 1;
1141 req->in.args[0].value = name; 1145 req->in.args[0].value = name;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 087f3b734f40..28aa81eae2cc 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -30,7 +30,6 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); 30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
31 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 31 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
32 req->in.h.nodeid = get_node_id(inode); 32 req->in.h.nodeid = get_node_id(inode);
33 req->inode = inode;
34 req->in.numargs = 1; 33 req->in.numargs = 1;
35 req->in.args[0].size = sizeof(inarg); 34 req->in.args[0].size = sizeof(inarg);
36 req->in.args[0].value = &inarg; 35 req->in.args[0].value = &inarg;
@@ -49,8 +48,8 @@ struct fuse_file *fuse_file_alloc(void)
49 struct fuse_file *ff; 48 struct fuse_file *ff;
50 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 49 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
51 if (ff) { 50 if (ff) {
52 ff->release_req = fuse_request_alloc(); 51 ff->reserved_req = fuse_request_alloc();
53 if (!ff->release_req) { 52 if (!ff->reserved_req) {
54 kfree(ff); 53 kfree(ff);
55 ff = NULL; 54 ff = NULL;
56 } 55 }
@@ -60,7 +59,7 @@ struct fuse_file *fuse_file_alloc(void)
60 59
61void fuse_file_free(struct fuse_file *ff) 60void fuse_file_free(struct fuse_file *ff)
62{ 61{
63 fuse_request_free(ff->release_req); 62 fuse_request_free(ff->reserved_req);
64 kfree(ff); 63 kfree(ff);
65} 64}
66 65
@@ -113,37 +112,22 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
113 return err; 112 return err;
114} 113}
115 114
116/* Special case for failed iget in CREATE */ 115struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags,
117static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 116 int opcode)
118{ 117{
119 /* If called from end_io_requests(), req has more than one 118 struct fuse_req *req = ff->reserved_req;
120 reference and fuse_reset_request() cannot work */
121 if (fc->connected) {
122 u64 nodeid = req->in.h.nodeid;
123 fuse_reset_request(req);
124 fuse_send_forget(fc, req, nodeid, 1);
125 } else
126 fuse_put_request(fc, req);
127}
128
129void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
130 u64 nodeid, struct inode *inode, int flags, int isdir)
131{
132 struct fuse_req * req = ff->release_req;
133 struct fuse_release_in *inarg = &req->misc.release_in; 119 struct fuse_release_in *inarg = &req->misc.release_in;
134 120
135 inarg->fh = ff->fh; 121 inarg->fh = ff->fh;
136 inarg->flags = flags; 122 inarg->flags = flags;
137 req->in.h.opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; 123 req->in.h.opcode = opcode;
138 req->in.h.nodeid = nodeid; 124 req->in.h.nodeid = nodeid;
139 req->inode = inode;
140 req->in.numargs = 1; 125 req->in.numargs = 1;
141 req->in.args[0].size = sizeof(struct fuse_release_in); 126 req->in.args[0].size = sizeof(struct fuse_release_in);
142 req->in.args[0].value = inarg; 127 req->in.args[0].value = inarg;
143 request_send_background(fc, req);
144 if (!inode)
145 req->end = fuse_release_end;
146 kfree(ff); 128 kfree(ff);
129
130 return req;
147} 131}
148 132
149int fuse_release_common(struct inode *inode, struct file *file, int isdir) 133int fuse_release_common(struct inode *inode, struct file *file, int isdir)
@@ -151,8 +135,15 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir)
151 struct fuse_file *ff = file->private_data; 135 struct fuse_file *ff = file->private_data;
152 if (ff) { 136 if (ff) {
153 struct fuse_conn *fc = get_fuse_conn(inode); 137 struct fuse_conn *fc = get_fuse_conn(inode);
154 u64 nodeid = get_node_id(inode); 138 struct fuse_req *req;
155 fuse_send_release(fc, ff, nodeid, inode, file->f_flags, isdir); 139
140 req = fuse_release_fill(ff, get_node_id(inode), file->f_flags,
141 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
142
143 /* Hold vfsmount and dentry until release is finished */
144 req->vfsmount = mntget(file->f_vfsmnt);
145 req->dentry = dget(file->f_dentry);
146 request_send_background(fc, req);
156 } 147 }
157 148
158 /* Return value is ignored by VFS */ 149 /* Return value is ignored by VFS */
@@ -169,6 +160,28 @@ static int fuse_release(struct inode *inode, struct file *file)
169 return fuse_release_common(inode, file, 0); 160 return fuse_release_common(inode, file, 0);
170} 161}
171 162
163/*
164 * Scramble the ID space with XTEA, so that the value of the files_struct
165 * pointer is not exposed to userspace.
166 */
167static u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
168{
169 u32 *k = fc->scramble_key;
170 u64 v = (unsigned long) id;
171 u32 v0 = v;
172 u32 v1 = v >> 32;
173 u32 sum = 0;
174 int i;
175
176 for (i = 0; i < 32; i++) {
177 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
178 sum += 0x9E3779B9;
179 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
180 }
181
182 return (u64) v0 + ((u64) v1 << 32);
183}
184
172static int fuse_flush(struct file *file, fl_owner_t id) 185static int fuse_flush(struct file *file, fl_owner_t id)
173{ 186{
174 struct inode *inode = file->f_dentry->d_inode; 187 struct inode *inode = file->f_dentry->d_inode;
@@ -184,19 +197,16 @@ static int fuse_flush(struct file *file, fl_owner_t id)
184 if (fc->no_flush) 197 if (fc->no_flush)
185 return 0; 198 return 0;
186 199
187 req = fuse_get_req(fc); 200 req = fuse_get_req_nofail(fc, file);
188 if (IS_ERR(req))
189 return PTR_ERR(req);
190
191 memset(&inarg, 0, sizeof(inarg)); 201 memset(&inarg, 0, sizeof(inarg));
192 inarg.fh = ff->fh; 202 inarg.fh = ff->fh;
203 inarg.lock_owner = fuse_lock_owner_id(fc, id);
193 req->in.h.opcode = FUSE_FLUSH; 204 req->in.h.opcode = FUSE_FLUSH;
194 req->in.h.nodeid = get_node_id(inode); 205 req->in.h.nodeid = get_node_id(inode);
195 req->inode = inode;
196 req->file = file;
197 req->in.numargs = 1; 206 req->in.numargs = 1;
198 req->in.args[0].size = sizeof(inarg); 207 req->in.args[0].size = sizeof(inarg);
199 req->in.args[0].value = &inarg; 208 req->in.args[0].value = &inarg;
209 req->force = 1;
200 request_send(fc, req); 210 request_send(fc, req);
201 err = req->out.h.error; 211 err = req->out.h.error;
202 fuse_put_request(fc, req); 212 fuse_put_request(fc, req);
@@ -232,8 +242,6 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
232 inarg.fsync_flags = datasync ? 1 : 0; 242 inarg.fsync_flags = datasync ? 1 : 0;
233 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 243 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
234 req->in.h.nodeid = get_node_id(inode); 244 req->in.h.nodeid = get_node_id(inode);
235 req->inode = inode;
236 req->file = file;
237 req->in.numargs = 1; 245 req->in.numargs = 1;
238 req->in.args[0].size = sizeof(inarg); 246 req->in.args[0].size = sizeof(inarg);
239 req->in.args[0].value = &inarg; 247 req->in.args[0].value = &inarg;
@@ -266,8 +274,6 @@ void fuse_read_fill(struct fuse_req *req, struct file *file,
266 inarg->size = count; 274 inarg->size = count;
267 req->in.h.opcode = opcode; 275 req->in.h.opcode = opcode;
268 req->in.h.nodeid = get_node_id(inode); 276 req->in.h.nodeid = get_node_id(inode);
269 req->inode = inode;
270 req->file = file;
271 req->in.numargs = 1; 277 req->in.numargs = 1;
272 req->in.args[0].size = sizeof(struct fuse_read_in); 278 req->in.args[0].size = sizeof(struct fuse_read_in);
273 req->in.args[0].value = inarg; 279 req->in.args[0].value = inarg;
@@ -342,6 +348,8 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file,
342 req->out.page_zeroing = 1; 348 req->out.page_zeroing = 1;
343 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 349 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
344 if (fc->async_read) { 350 if (fc->async_read) {
351 get_file(file);
352 req->file = file;
345 req->end = fuse_readpages_end; 353 req->end = fuse_readpages_end;
346 request_send_background(fc, req); 354 request_send_background(fc, req);
347 } else { 355 } else {
@@ -420,8 +428,6 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file,
420 inarg.size = count; 428 inarg.size = count;
421 req->in.h.opcode = FUSE_WRITE; 429 req->in.h.opcode = FUSE_WRITE;
422 req->in.h.nodeid = get_node_id(inode); 430 req->in.h.nodeid = get_node_id(inode);
423 req->inode = inode;
424 req->file = file;
425 req->in.argpages = 1; 431 req->in.argpages = 1;
426 req->in.numargs = 2; 432 req->in.numargs = 2;
427 req->in.args[0].size = sizeof(struct fuse_write_in); 433 req->in.args[0].size = sizeof(struct fuse_write_in);
@@ -619,6 +625,126 @@ static int fuse_set_page_dirty(struct page *page)
619 return 0; 625 return 0;
620} 626}
621 627
628static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
629 struct file_lock *fl)
630{
631 switch (ffl->type) {
632 case F_UNLCK:
633 break;
634
635 case F_RDLCK:
636 case F_WRLCK:
637 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
638 ffl->end < ffl->start)
639 return -EIO;
640
641 fl->fl_start = ffl->start;
642 fl->fl_end = ffl->end;
643 fl->fl_pid = ffl->pid;
644 break;
645
646 default:
647 return -EIO;
648 }
649 fl->fl_type = ffl->type;
650 return 0;
651}
652
653static void fuse_lk_fill(struct fuse_req *req, struct file *file,
654 const struct file_lock *fl, int opcode, pid_t pid)
655{
656 struct inode *inode = file->f_dentry->d_inode;
657 struct fuse_conn *fc = get_fuse_conn(inode);
658 struct fuse_file *ff = file->private_data;
659 struct fuse_lk_in *arg = &req->misc.lk_in;
660
661 arg->fh = ff->fh;
662 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
663 arg->lk.start = fl->fl_start;
664 arg->lk.end = fl->fl_end;
665 arg->lk.type = fl->fl_type;
666 arg->lk.pid = pid;
667 req->in.h.opcode = opcode;
668 req->in.h.nodeid = get_node_id(inode);
669 req->in.numargs = 1;
670 req->in.args[0].size = sizeof(*arg);
671 req->in.args[0].value = arg;
672}
673
674static int fuse_getlk(struct file *file, struct file_lock *fl)
675{
676 struct inode *inode = file->f_dentry->d_inode;
677 struct fuse_conn *fc = get_fuse_conn(inode);
678 struct fuse_req *req;
679 struct fuse_lk_out outarg;
680 int err;
681
682 req = fuse_get_req(fc);
683 if (IS_ERR(req))
684 return PTR_ERR(req);
685
686 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0);
687 req->out.numargs = 1;
688 req->out.args[0].size = sizeof(outarg);
689 req->out.args[0].value = &outarg;
690 request_send(fc, req);
691 err = req->out.h.error;
692 fuse_put_request(fc, req);
693 if (!err)
694 err = convert_fuse_file_lock(&outarg.lk, fl);
695
696 return err;
697}
698
699static int fuse_setlk(struct file *file, struct file_lock *fl)
700{
701 struct inode *inode = file->f_dentry->d_inode;
702 struct fuse_conn *fc = get_fuse_conn(inode);
703 struct fuse_req *req;
704 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
705 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
706 int err;
707
708 /* Unlock on close is handled by the flush method */
709 if (fl->fl_flags & FL_CLOSE)
710 return 0;
711
712 req = fuse_get_req(fc);
713 if (IS_ERR(req))
714 return PTR_ERR(req);
715
716 fuse_lk_fill(req, file, fl, opcode, pid);
717 request_send(fc, req);
718 err = req->out.h.error;
719 /* locking is restartable */
720 if (err == -EINTR)
721 err = -ERESTARTSYS;
722 fuse_put_request(fc, req);
723 return err;
724}
725
726static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
727{
728 struct inode *inode = file->f_dentry->d_inode;
729 struct fuse_conn *fc = get_fuse_conn(inode);
730 int err;
731
732 if (cmd == F_GETLK) {
733 if (fc->no_lock) {
734 if (!posix_test_lock(file, fl, fl))
735 fl->fl_type = F_UNLCK;
736 err = 0;
737 } else
738 err = fuse_getlk(file, fl);
739 } else {
740 if (fc->no_lock)
741 err = posix_lock_file_wait(file, fl);
742 else
743 err = fuse_setlk(file, fl);
744 }
745 return err;
746}
747
622static const struct file_operations fuse_file_operations = { 748static const struct file_operations fuse_file_operations = {
623 .llseek = generic_file_llseek, 749 .llseek = generic_file_llseek,
624 .read = generic_file_read, 750 .read = generic_file_read,
@@ -628,6 +754,7 @@ static const struct file_operations fuse_file_operations = {
628 .flush = fuse_flush, 754 .flush = fuse_flush,
629 .release = fuse_release, 755 .release = fuse_release,
630 .fsync = fuse_fsync, 756 .fsync = fuse_fsync,
757 .lock = fuse_file_lock,
631 .sendfile = generic_file_sendfile, 758 .sendfile = generic_file_sendfile,
632}; 759};
633 760
@@ -639,6 +766,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
639 .flush = fuse_flush, 766 .flush = fuse_flush,
640 .release = fuse_release, 767 .release = fuse_release,
641 .fsync = fuse_fsync, 768 .fsync = fuse_fsync,
769 .lock = fuse_file_lock,
642 /* no mmap and sendfile */ 770 /* no mmap and sendfile */
643}; 771};
644 772
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0474202cb5dc..0dbf96621841 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -8,12 +8,13 @@
8 8
9#include <linux/fuse.h> 9#include <linux/fuse.h>
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/mount.h>
11#include <linux/wait.h> 12#include <linux/wait.h>
12#include <linux/list.h> 13#include <linux/list.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/backing-dev.h> 16#include <linux/backing-dev.h>
16#include <asm/semaphore.h> 17#include <linux/mutex.h>
17 18
18/** Max number of pages that can be used in a single read request */ 19/** Max number of pages that can be used in a single read request */
19#define FUSE_MAX_PAGES_PER_REQ 32 20#define FUSE_MAX_PAGES_PER_REQ 32
@@ -24,6 +25,9 @@
24/** It could be as large as PATH_MAX, but would that have any uses? */ 25/** It could be as large as PATH_MAX, but would that have any uses? */
25#define FUSE_NAME_MAX 1024 26#define FUSE_NAME_MAX 1024
26 27
28/** Number of dentries for each connection in the control filesystem */
29#define FUSE_CTL_NUM_DENTRIES 3
30
27/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem 31/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem
28 module will check permissions based on the file mode. Otherwise no 32 module will check permissions based on the file mode. Otherwise no
29 permission checking is done in the kernel */ 33 permission checking is done in the kernel */
@@ -33,6 +37,11 @@
33 doing the mount will be allowed to access the filesystem */ 37 doing the mount will be allowed to access the filesystem */
34#define FUSE_ALLOW_OTHER (1 << 1) 38#define FUSE_ALLOW_OTHER (1 << 1)
35 39
40/** List of active connections */
41extern struct list_head fuse_conn_list;
42
43/** Global mutex protecting fuse_conn_list and the control filesystem */
44extern struct mutex fuse_mutex;
36 45
37/** FUSE inode */ 46/** FUSE inode */
38struct fuse_inode { 47struct fuse_inode {
@@ -56,7 +65,7 @@ struct fuse_inode {
56/** FUSE specific file data */ 65/** FUSE specific file data */
57struct fuse_file { 66struct fuse_file {
58 /** Request reserved for flush and release */ 67 /** Request reserved for flush and release */
59 struct fuse_req *release_req; 68 struct fuse_req *reserved_req;
60 69
61 /** File handle used by userspace */ 70 /** File handle used by userspace */
62 u64 fh; 71 u64 fh;
@@ -122,6 +131,7 @@ enum fuse_req_state {
122 FUSE_REQ_PENDING, 131 FUSE_REQ_PENDING,
123 FUSE_REQ_READING, 132 FUSE_REQ_READING,
124 FUSE_REQ_SENT, 133 FUSE_REQ_SENT,
134 FUSE_REQ_WRITING,
125 FUSE_REQ_FINISHED 135 FUSE_REQ_FINISHED
126}; 136};
127 137
@@ -135,12 +145,15 @@ struct fuse_req {
135 fuse_conn */ 145 fuse_conn */
136 struct list_head list; 146 struct list_head list;
137 147
138 /** Entry on the background list */ 148 /** Entry on the interrupts list */
139 struct list_head bg_entry; 149 struct list_head intr_entry;
140 150
141 /** refcount */ 151 /** refcount */
142 atomic_t count; 152 atomic_t count;
143 153
154 /** Unique ID for the interrupt request */
155 u64 intr_unique;
156
144 /* 157 /*
145 * The following bitfields are either set once before the 158 * The following bitfields are either set once before the
146 * request is queued or setting/clearing them is protected by 159 * request is queued or setting/clearing them is protected by
@@ -150,12 +163,18 @@ struct fuse_req {
150 /** True if the request has reply */ 163 /** True if the request has reply */
151 unsigned isreply:1; 164 unsigned isreply:1;
152 165
153 /** The request was interrupted */ 166 /** Force sending of the request even if interrupted */
154 unsigned interrupted:1; 167 unsigned force:1;
168
169 /** The request was aborted */
170 unsigned aborted:1;
155 171
156 /** Request is sent in the background */ 172 /** Request is sent in the background */
157 unsigned background:1; 173 unsigned background:1;
158 174
175 /** The request has been interrupted */
176 unsigned interrupted:1;
177
159 /** Data is being copied to/from the request */ 178 /** Data is being copied to/from the request */
160 unsigned locked:1; 179 unsigned locked:1;
161 180
@@ -181,6 +200,7 @@ struct fuse_req {
181 struct fuse_init_in init_in; 200 struct fuse_init_in init_in;
182 struct fuse_init_out init_out; 201 struct fuse_init_out init_out;
183 struct fuse_read_in read_in; 202 struct fuse_read_in read_in;
203 struct fuse_lk_in lk_in;
184 } misc; 204 } misc;
185 205
186 /** page vector */ 206 /** page vector */
@@ -192,17 +212,20 @@ struct fuse_req {
192 /** offset of data on first page */ 212 /** offset of data on first page */
193 unsigned page_offset; 213 unsigned page_offset;
194 214
195 /** Inode used in the request */
196 struct inode *inode;
197
198 /** Second inode used in the request (or NULL) */
199 struct inode *inode2;
200
201 /** File used in the request (or NULL) */ 215 /** File used in the request (or NULL) */
202 struct file *file; 216 struct file *file;
203 217
218 /** vfsmount used in release */
219 struct vfsmount *vfsmount;
220
221 /** dentry used in release */
222 struct dentry *dentry;
223
204 /** Request completion callback */ 224 /** Request completion callback */
205 void (*end)(struct fuse_conn *, struct fuse_req *); 225 void (*end)(struct fuse_conn *, struct fuse_req *);
226
227 /** Request is stolen from fuse_file->reserved_req */
228 struct file *stolen_file;
206}; 229};
207 230
208/** 231/**
@@ -216,6 +239,9 @@ struct fuse_conn {
216 /** Lock protecting accessess to members of this structure */ 239 /** Lock protecting accessess to members of this structure */
217 spinlock_t lock; 240 spinlock_t lock;
218 241
242 /** Refcount */
243 atomic_t count;
244
219 /** The user id for this mount */ 245 /** The user id for this mount */
220 uid_t user_id; 246 uid_t user_id;
221 247
@@ -243,13 +269,12 @@ struct fuse_conn {
243 /** The list of requests under I/O */ 269 /** The list of requests under I/O */
244 struct list_head io; 270 struct list_head io;
245 271
246 /** Requests put in the background (RELEASE or any other
247 interrupted request) */
248 struct list_head background;
249
250 /** Number of requests currently in the background */ 272 /** Number of requests currently in the background */
251 unsigned num_background; 273 unsigned num_background;
252 274
275 /** Pending interrupts */
276 struct list_head interrupts;
277
253 /** Flag indicating if connection is blocked. This will be 278 /** Flag indicating if connection is blocked. This will be
254 the case before the INIT reply is received, and if there 279 the case before the INIT reply is received, and if there
255 are too many outstading backgrounds requests */ 280 are too many outstading backgrounds requests */
@@ -258,15 +283,9 @@ struct fuse_conn {
258 /** waitq for blocked connection */ 283 /** waitq for blocked connection */
259 wait_queue_head_t blocked_waitq; 284 wait_queue_head_t blocked_waitq;
260 285
261 /** RW semaphore for exclusion with fuse_put_super() */
262 struct rw_semaphore sbput_sem;
263
264 /** The next unique request id */ 286 /** The next unique request id */
265 u64 reqctr; 287 u64 reqctr;
266 288
267 /** Mount is active */
268 unsigned mounted;
269
270 /** Connection established, cleared on umount, connection 289 /** Connection established, cleared on umount, connection
271 abort and device release */ 290 abort and device release */
272 unsigned connected; 291 unsigned connected;
@@ -305,12 +324,18 @@ struct fuse_conn {
305 /** Is removexattr not implemented by fs? */ 324 /** Is removexattr not implemented by fs? */
306 unsigned no_removexattr : 1; 325 unsigned no_removexattr : 1;
307 326
327 /** Are file locking primitives not implemented by fs? */
328 unsigned no_lock : 1;
329
308 /** Is access not implemented by fs? */ 330 /** Is access not implemented by fs? */
309 unsigned no_access : 1; 331 unsigned no_access : 1;
310 332
311 /** Is create not implemented by fs? */ 333 /** Is create not implemented by fs? */
312 unsigned no_create : 1; 334 unsigned no_create : 1;
313 335
336 /** Is interrupt not implemented by fs? */
337 unsigned no_interrupt : 1;
338
314 /** The number of requests waiting for completion */ 339 /** The number of requests waiting for completion */
315 atomic_t num_waiting; 340 atomic_t num_waiting;
316 341
@@ -320,11 +345,23 @@ struct fuse_conn {
320 /** Backing dev info */ 345 /** Backing dev info */
321 struct backing_dev_info bdi; 346 struct backing_dev_info bdi;
322 347
323 /** kobject */ 348 /** Entry on the fuse_conn_list */
324 struct kobject kobj; 349 struct list_head entry;
350
351 /** Unique ID */
352 u64 id;
353
354 /** Dentries in the control filesystem */
355 struct dentry *ctl_dentry[FUSE_CTL_NUM_DENTRIES];
356
357 /** number of dentries used in the above array */
358 int ctl_ndents;
325 359
326 /** O_ASYNC requests */ 360 /** O_ASYNC requests */
327 struct fasync_struct *fasync; 361 struct fasync_struct *fasync;
362
363 /** Key for lock owner ID scrambling */
364 u32 scramble_key[4];
328}; 365};
329 366
330static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) 367static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -337,11 +374,6 @@ static inline struct fuse_conn *get_fuse_conn(struct inode *inode)
337 return get_fuse_conn_super(inode->i_sb); 374 return get_fuse_conn_super(inode->i_sb);
338} 375}
339 376
340static inline struct fuse_conn *get_fuse_conn_kobj(struct kobject *obj)
341{
342 return container_of(obj, struct fuse_conn, kobj);
343}
344
345static inline struct fuse_inode *get_fuse_inode(struct inode *inode) 377static inline struct fuse_inode *get_fuse_inode(struct inode *inode)
346{ 378{
347 return container_of(inode, struct fuse_inode, inode); 379 return container_of(inode, struct fuse_inode, inode);
@@ -383,12 +415,9 @@ void fuse_file_free(struct fuse_file *ff);
383void fuse_finish_open(struct inode *inode, struct file *file, 415void fuse_finish_open(struct inode *inode, struct file *file,
384 struct fuse_file *ff, struct fuse_open_out *outarg); 416 struct fuse_file *ff, struct fuse_open_out *outarg);
385 417
386/** 418/** */
387 * Send a RELEASE request 419struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags,
388 */ 420 int opcode);
389void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
390 u64 nodeid, struct inode *inode, int flags, int isdir);
391
392/** 421/**
393 * Send RELEASE or RELEASEDIR request 422 * Send RELEASE or RELEASEDIR request
394 */ 423 */
@@ -435,6 +464,9 @@ int fuse_dev_init(void);
435 */ 464 */
436void fuse_dev_cleanup(void); 465void fuse_dev_cleanup(void);
437 466
467int fuse_ctl_init(void);
468void fuse_ctl_cleanup(void);
469
438/** 470/**
439 * Allocate a request 471 * Allocate a request
440 */ 472 */
@@ -446,14 +478,14 @@ struct fuse_req *fuse_request_alloc(void);
446void fuse_request_free(struct fuse_req *req); 478void fuse_request_free(struct fuse_req *req);
447 479
448/** 480/**
449 * Reinitialize a request, the preallocated flag is left unmodified 481 * Get a request, may fail with -ENOMEM
450 */ 482 */
451void fuse_reset_request(struct fuse_req *req); 483struct fuse_req *fuse_get_req(struct fuse_conn *fc);
452 484
453/** 485/**
454 * Reserve a preallocated request 486 * Gets a requests for a file operation, always succeeds
455 */ 487 */
456struct fuse_req *fuse_get_req(struct fuse_conn *fc); 488struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file);
457 489
458/** 490/**
459 * Decrement reference count of a request. If count goes to zero free 491 * Decrement reference count of a request. If count goes to zero free
@@ -476,11 +508,6 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
476 */ 508 */
477void request_send_background(struct fuse_conn *fc, struct fuse_req *req); 509void request_send_background(struct fuse_conn *fc, struct fuse_req *req);
478 510
479/**
480 * Release inodes and file associated with background request
481 */
482void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req);
483
484/* Abort all requests */ 511/* Abort all requests */
485void fuse_abort_conn(struct fuse_conn *fc); 512void fuse_abort_conn(struct fuse_conn *fc);
486 513
@@ -493,3 +520,23 @@ int fuse_do_getattr(struct inode *inode);
493 * Invalidate inode attributes 520 * Invalidate inode attributes
494 */ 521 */
495void fuse_invalidate_attr(struct inode *inode); 522void fuse_invalidate_attr(struct inode *inode);
523
524/**
525 * Acquire reference to fuse_conn
526 */
527struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
528
529/**
530 * Release reference to fuse_conn
531 */
532void fuse_conn_put(struct fuse_conn *fc);
533
534/**
535 * Add connection to control filesystem
536 */
537int fuse_ctl_add_conn(struct fuse_conn *fc);
538
539/**
540 * Remove connection from control filesystem
541 */
542void fuse_ctl_remove_conn(struct fuse_conn *fc);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index a13c0f529058..5ceb8bd7a189 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -11,25 +11,20 @@
11#include <linux/pagemap.h> 11#include <linux/pagemap.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/mount.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/parser.h> 17#include <linux/parser.h>
19#include <linux/statfs.h> 18#include <linux/statfs.h>
19#include <linux/random.h>
20 20
21MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); 21MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
22MODULE_DESCRIPTION("Filesystem in Userspace"); 22MODULE_DESCRIPTION("Filesystem in Userspace");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
25static kmem_cache_t *fuse_inode_cachep; 25static kmem_cache_t *fuse_inode_cachep;
26static struct subsystem connections_subsys; 26struct list_head fuse_conn_list;
27 27DEFINE_MUTEX(fuse_mutex);
28struct fuse_conn_attr {
29 struct attribute attr;
30 ssize_t (*show)(struct fuse_conn *, char *);
31 ssize_t (*store)(struct fuse_conn *, const char *, size_t);
32};
33 28
34#define FUSE_SUPER_MAGIC 0x65735546 29#define FUSE_SUPER_MAGIC 0x65735546
35 30
@@ -104,6 +99,14 @@ static void fuse_clear_inode(struct inode *inode)
104 } 99 }
105} 100}
106 101
102static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
103{
104 if (*flags & MS_MANDLOCK)
105 return -EINVAL;
106
107 return 0;
108}
109
107void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr) 110void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
108{ 111{
109 if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size) 112 if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size)
@@ -204,22 +207,19 @@ static void fuse_put_super(struct super_block *sb)
204{ 207{
205 struct fuse_conn *fc = get_fuse_conn_super(sb); 208 struct fuse_conn *fc = get_fuse_conn_super(sb);
206 209
207 down_write(&fc->sbput_sem);
208 while (!list_empty(&fc->background))
209 fuse_release_background(fc,
210 list_entry(fc->background.next,
211 struct fuse_req, bg_entry));
212
213 spin_lock(&fc->lock); 210 spin_lock(&fc->lock);
214 fc->mounted = 0;
215 fc->connected = 0; 211 fc->connected = 0;
212 fc->blocked = 0;
216 spin_unlock(&fc->lock); 213 spin_unlock(&fc->lock);
217 up_write(&fc->sbput_sem);
218 /* Flush all readers on this fs */ 214 /* Flush all readers on this fs */
219 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 215 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
220 wake_up_all(&fc->waitq); 216 wake_up_all(&fc->waitq);
221 kobject_del(&fc->kobj); 217 wake_up_all(&fc->blocked_waitq);
222 kobject_put(&fc->kobj); 218 mutex_lock(&fuse_mutex);
219 list_del(&fc->entry);
220 fuse_ctl_remove_conn(fc);
221 mutex_unlock(&fuse_mutex);
222 fuse_conn_put(fc);
223} 223}
224 224
225static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) 225static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
@@ -369,11 +369,6 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
369 return 0; 369 return 0;
370} 370}
371 371
372static void fuse_conn_release(struct kobject *kobj)
373{
374 kfree(get_fuse_conn_kobj(kobj));
375}
376
377static struct fuse_conn *new_conn(void) 372static struct fuse_conn *new_conn(void)
378{ 373{
379 struct fuse_conn *fc; 374 struct fuse_conn *fc;
@@ -381,24 +376,35 @@ static struct fuse_conn *new_conn(void)
381 fc = kzalloc(sizeof(*fc), GFP_KERNEL); 376 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
382 if (fc) { 377 if (fc) {
383 spin_lock_init(&fc->lock); 378 spin_lock_init(&fc->lock);
379 atomic_set(&fc->count, 1);
384 init_waitqueue_head(&fc->waitq); 380 init_waitqueue_head(&fc->waitq);
385 init_waitqueue_head(&fc->blocked_waitq); 381 init_waitqueue_head(&fc->blocked_waitq);
386 INIT_LIST_HEAD(&fc->pending); 382 INIT_LIST_HEAD(&fc->pending);
387 INIT_LIST_HEAD(&fc->processing); 383 INIT_LIST_HEAD(&fc->processing);
388 INIT_LIST_HEAD(&fc->io); 384 INIT_LIST_HEAD(&fc->io);
389 INIT_LIST_HEAD(&fc->background); 385 INIT_LIST_HEAD(&fc->interrupts);
390 init_rwsem(&fc->sbput_sem);
391 kobj_set_kset_s(fc, connections_subsys);
392 kobject_init(&fc->kobj);
393 atomic_set(&fc->num_waiting, 0); 386 atomic_set(&fc->num_waiting, 0);
394 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 387 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
395 fc->bdi.unplug_io_fn = default_unplug_io_fn; 388 fc->bdi.unplug_io_fn = default_unplug_io_fn;
396 fc->reqctr = 0; 389 fc->reqctr = 0;
397 fc->blocked = 1; 390 fc->blocked = 1;
391 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
398 } 392 }
399 return fc; 393 return fc;
400} 394}
401 395
396void fuse_conn_put(struct fuse_conn *fc)
397{
398 if (atomic_dec_and_test(&fc->count))
399 kfree(fc);
400}
401
402struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
403{
404 atomic_inc(&fc->count);
405 return fc;
406}
407
402static struct inode *get_root_inode(struct super_block *sb, unsigned mode) 408static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
403{ 409{
404 struct fuse_attr attr; 410 struct fuse_attr attr;
@@ -414,6 +420,7 @@ static struct super_operations fuse_super_operations = {
414 .destroy_inode = fuse_destroy_inode, 420 .destroy_inode = fuse_destroy_inode,
415 .read_inode = fuse_read_inode, 421 .read_inode = fuse_read_inode,
416 .clear_inode = fuse_clear_inode, 422 .clear_inode = fuse_clear_inode,
423 .remount_fs = fuse_remount_fs,
417 .put_super = fuse_put_super, 424 .put_super = fuse_put_super,
418 .umount_begin = fuse_umount_begin, 425 .umount_begin = fuse_umount_begin,
419 .statfs = fuse_statfs, 426 .statfs = fuse_statfs,
@@ -433,8 +440,12 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
433 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; 440 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
434 if (arg->flags & FUSE_ASYNC_READ) 441 if (arg->flags & FUSE_ASYNC_READ)
435 fc->async_read = 1; 442 fc->async_read = 1;
436 } else 443 if (!(arg->flags & FUSE_POSIX_LOCKS))
444 fc->no_lock = 1;
445 } else {
437 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 446 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
447 fc->no_lock = 1;
448 }
438 449
439 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); 450 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
440 fc->minor = arg->minor; 451 fc->minor = arg->minor;
@@ -452,7 +463,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
452 arg->major = FUSE_KERNEL_VERSION; 463 arg->major = FUSE_KERNEL_VERSION;
453 arg->minor = FUSE_KERNEL_MINOR_VERSION; 464 arg->minor = FUSE_KERNEL_MINOR_VERSION;
454 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 465 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
455 arg->flags |= FUSE_ASYNC_READ; 466 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS;
456 req->in.h.opcode = FUSE_INIT; 467 req->in.h.opcode = FUSE_INIT;
457 req->in.numargs = 1; 468 req->in.numargs = 1;
458 req->in.args[0].size = sizeof(*arg); 469 req->in.args[0].size = sizeof(*arg);
@@ -468,10 +479,9 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
468 request_send_background(fc, req); 479 request_send_background(fc, req);
469} 480}
470 481
471static unsigned long long conn_id(void) 482static u64 conn_id(void)
472{ 483{
473 /* BKL is held for ->get_sb() */ 484 static u64 ctr = 1;
474 static unsigned long long ctr = 1;
475 return ctr++; 485 return ctr++;
476} 486}
477 487
@@ -485,6 +495,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
485 struct fuse_req *init_req; 495 struct fuse_req *init_req;
486 int err; 496 int err;
487 497
498 if (sb->s_flags & MS_MANDLOCK)
499 return -EINVAL;
500
488 if (!parse_fuse_opt((char *) data, &d)) 501 if (!parse_fuse_opt((char *) data, &d))
489 return -EINVAL; 502 return -EINVAL;
490 503
@@ -528,25 +541,21 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
528 if (!init_req) 541 if (!init_req)
529 goto err_put_root; 542 goto err_put_root;
530 543
531 err = kobject_set_name(&fc->kobj, "%llu", conn_id()); 544 mutex_lock(&fuse_mutex);
532 if (err)
533 goto err_free_req;
534
535 err = kobject_add(&fc->kobj);
536 if (err)
537 goto err_free_req;
538
539 /* Setting file->private_data can't race with other mount()
540 instances, since BKL is held for ->get_sb() */
541 err = -EINVAL; 545 err = -EINVAL;
542 if (file->private_data) 546 if (file->private_data)
543 goto err_kobject_del; 547 goto err_unlock;
544 548
549 fc->id = conn_id();
550 err = fuse_ctl_add_conn(fc);
551 if (err)
552 goto err_unlock;
553
554 list_add_tail(&fc->entry, &fuse_conn_list);
545 sb->s_root = root_dentry; 555 sb->s_root = root_dentry;
546 fc->mounted = 1;
547 fc->connected = 1; 556 fc->connected = 1;
548 kobject_get(&fc->kobj); 557 file->private_data = fuse_conn_get(fc);
549 file->private_data = fc; 558 mutex_unlock(&fuse_mutex);
550 /* 559 /*
551 * atomic_dec_and_test() in fput() provides the necessary 560 * atomic_dec_and_test() in fput() provides the necessary
552 * memory barrier for file->private_data to be visible on all 561 * memory barrier for file->private_data to be visible on all
@@ -558,15 +567,14 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
558 567
559 return 0; 568 return 0;
560 569
561 err_kobject_del: 570 err_unlock:
562 kobject_del(&fc->kobj); 571 mutex_unlock(&fuse_mutex);
563 err_free_req:
564 fuse_request_free(init_req); 572 fuse_request_free(init_req);
565 err_put_root: 573 err_put_root:
566 dput(root_dentry); 574 dput(root_dentry);
567 err: 575 err:
568 fput(file); 576 fput(file);
569 kobject_put(&fc->kobj); 577 fuse_conn_put(fc);
570 return err; 578 return err;
571} 579}
572 580
@@ -584,68 +592,8 @@ static struct file_system_type fuse_fs_type = {
584 .kill_sb = kill_anon_super, 592 .kill_sb = kill_anon_super,
585}; 593};
586 594
587static ssize_t fuse_conn_waiting_show(struct fuse_conn *fc, char *page)
588{
589 return sprintf(page, "%i\n", atomic_read(&fc->num_waiting));
590}
591
592static ssize_t fuse_conn_abort_store(struct fuse_conn *fc, const char *page,
593 size_t count)
594{
595 fuse_abort_conn(fc);
596 return count;
597}
598
599static struct fuse_conn_attr fuse_conn_waiting =
600 __ATTR(waiting, 0400, fuse_conn_waiting_show, NULL);
601static struct fuse_conn_attr fuse_conn_abort =
602 __ATTR(abort, 0600, NULL, fuse_conn_abort_store);
603
604static struct attribute *fuse_conn_attrs[] = {
605 &fuse_conn_waiting.attr,
606 &fuse_conn_abort.attr,
607 NULL,
608};
609
610static ssize_t fuse_conn_attr_show(struct kobject *kobj,
611 struct attribute *attr,
612 char *page)
613{
614 struct fuse_conn_attr *fca =
615 container_of(attr, struct fuse_conn_attr, attr);
616
617 if (fca->show)
618 return fca->show(get_fuse_conn_kobj(kobj), page);
619 else
620 return -EACCES;
621}
622
623static ssize_t fuse_conn_attr_store(struct kobject *kobj,
624 struct attribute *attr,
625 const char *page, size_t count)
626{
627 struct fuse_conn_attr *fca =
628 container_of(attr, struct fuse_conn_attr, attr);
629
630 if (fca->store)
631 return fca->store(get_fuse_conn_kobj(kobj), page, count);
632 else
633 return -EACCES;
634}
635
636static struct sysfs_ops fuse_conn_sysfs_ops = {
637 .show = &fuse_conn_attr_show,
638 .store = &fuse_conn_attr_store,
639};
640
641static struct kobj_type ktype_fuse_conn = {
642 .release = fuse_conn_release,
643 .sysfs_ops = &fuse_conn_sysfs_ops,
644 .default_attrs = fuse_conn_attrs,
645};
646
647static decl_subsys(fuse, NULL, NULL); 595static decl_subsys(fuse, NULL, NULL);
648static decl_subsys(connections, &ktype_fuse_conn, NULL); 596static decl_subsys(connections, NULL, NULL);
649 597
650static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, 598static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
651 unsigned long flags) 599 unsigned long flags)
@@ -719,6 +667,7 @@ static int __init fuse_init(void)
719 printk("fuse init (API version %i.%i)\n", 667 printk("fuse init (API version %i.%i)\n",
720 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); 668 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
721 669
670 INIT_LIST_HEAD(&fuse_conn_list);
722 res = fuse_fs_init(); 671 res = fuse_fs_init();
723 if (res) 672 if (res)
724 goto err; 673 goto err;
@@ -731,8 +680,14 @@ static int __init fuse_init(void)
731 if (res) 680 if (res)
732 goto err_dev_cleanup; 681 goto err_dev_cleanup;
733 682
683 res = fuse_ctl_init();
684 if (res)
685 goto err_sysfs_cleanup;
686
734 return 0; 687 return 0;
735 688
689 err_sysfs_cleanup:
690 fuse_sysfs_cleanup();
736 err_dev_cleanup: 691 err_dev_cleanup:
737 fuse_dev_cleanup(); 692 fuse_dev_cleanup();
738 err_fs_cleanup: 693 err_fs_cleanup:
@@ -745,6 +700,7 @@ static void __exit fuse_exit(void)
745{ 700{
746 printk(KERN_DEBUG "fuse exit\n"); 701 printk(KERN_DEBUG "fuse exit\n");
747 702
703 fuse_ctl_cleanup();
748 fuse_sysfs_cleanup(); 704 fuse_sysfs_cleanup();
749 fuse_fs_cleanup(); 705 fuse_fs_cleanup();
750 fuse_dev_cleanup(); 706 fuse_dev_cleanup();
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 80d7f53fd0a7..de5bafb4e853 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -531,6 +531,7 @@ static int do_one_pass(journal_t *journal,
531 default: 531 default:
532 jbd_debug(3, "Unrecognised magic %d, end of scan.\n", 532 jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
533 blocktype); 533 blocktype);
534 brelse(bh);
534 goto done; 535 goto done;
535 } 536 }
536 } 537 }
diff --git a/fs/namei.c b/fs/namei.c
index bb4a3e40e432..c784e8bb57a3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2243,14 +2243,16 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
2243 int error; 2243 int error;
2244 char * to; 2244 char * to;
2245 2245
2246 if (flags != 0) 2246 if ((flags & ~AT_SYMLINK_FOLLOW) != 0)
2247 return -EINVAL; 2247 return -EINVAL;
2248 2248
2249 to = getname(newname); 2249 to = getname(newname);
2250 if (IS_ERR(to)) 2250 if (IS_ERR(to))
2251 return PTR_ERR(to); 2251 return PTR_ERR(to);
2252 2252
2253 error = __user_walk_fd(olddfd, oldname, 0, &old_nd); 2253 error = __user_walk_fd(olddfd, oldname,
2254 flags & AT_SYMLINK_FOLLOW ? LOOKUP_FOLLOW : 0,
2255 &old_nd);
2254 if (error) 2256 if (error)
2255 goto exit; 2257 goto exit;
2256 error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd); 2258 error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 88292f9e4b9b..2e42c2dcae12 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1358,7 +1358,7 @@ err_out:
1358 goto out; 1358 goto out;
1359} 1359}
1360 1360
1361static size_t __ntfs_copy_from_user_iovec(char *vaddr, 1361static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
1362 const struct iovec *iov, size_t iov_ofs, size_t bytes) 1362 const struct iovec *iov, size_t iov_ofs, size_t bytes)
1363{ 1363{
1364 size_t total = 0; 1364 size_t total = 0;
@@ -1376,10 +1376,6 @@ static size_t __ntfs_copy_from_user_iovec(char *vaddr,
1376 bytes -= len; 1376 bytes -= len;
1377 vaddr += len; 1377 vaddr += len;
1378 if (unlikely(left)) { 1378 if (unlikely(left)) {
1379 /*
1380 * Zero the rest of the target like __copy_from_user().
1381 */
1382 memset(vaddr, 0, bytes);
1383 total -= left; 1379 total -= left;
1384 break; 1380 break;
1385 } 1381 }
@@ -1420,11 +1416,13 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp,
1420 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s 1416 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
1421 * single-segment behaviour. 1417 * single-segment behaviour.
1422 * 1418 *
1423 * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and 1419 * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
1424 * when not atomic. This is ok because __ntfs_copy_from_user_iovec() calls 1420 * when atomic and when not atomic. This is ok because
1425 * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In 1421 * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
1426 * fact, the only difference between __copy_from_user_inatomic() and 1422 * and it is ok to call this when non-atomic.
1427 * __copy_from_user() is that the latter calls might_sleep(). And on many 1423 * Infact, the only difference between __copy_from_user_inatomic() and
1424 * __copy_from_user() is that the latter calls might_sleep() and the former
1425 * should not zero the tail of the buffer on error. And on many
1428 * architectures __copy_from_user_inatomic() is just defined to 1426 * architectures __copy_from_user_inatomic() is just defined to
1429 * __copy_from_user() so it makes no difference at all on those architectures. 1427 * __copy_from_user() so it makes no difference at all on those architectures.
1430 */ 1428 */
@@ -1441,14 +1439,18 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1441 if (len > bytes) 1439 if (len > bytes)
1442 len = bytes; 1440 len = bytes;
1443 kaddr = kmap_atomic(*pages, KM_USER0); 1441 kaddr = kmap_atomic(*pages, KM_USER0);
1444 copied = __ntfs_copy_from_user_iovec(kaddr + ofs, 1442 copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
1445 *iov, *iov_ofs, len); 1443 *iov, *iov_ofs, len);
1446 kunmap_atomic(kaddr, KM_USER0); 1444 kunmap_atomic(kaddr, KM_USER0);
1447 if (unlikely(copied != len)) { 1445 if (unlikely(copied != len)) {
1448 /* Do it the slow way. */ 1446 /* Do it the slow way. */
1449 kaddr = kmap(*pages); 1447 kaddr = kmap(*pages);
1450 copied = __ntfs_copy_from_user_iovec(kaddr + ofs, 1448 copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
1451 *iov, *iov_ofs, len); 1449 *iov, *iov_ofs, len);
1450 /*
1451 * Zero the rest of the target like __copy_from_user().
1452 */
1453 memset(kaddr + ofs + copied, 0, len - copied);
1452 kunmap(*pages); 1454 kunmap(*pages);
1453 if (unlikely(copied != len)) 1455 if (unlikely(copied != len))
1454 goto err_out; 1456 goto err_out;
diff --git a/fs/open.c b/fs/open.c
index 5fb16e5267dc..303f06d2a7b9 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -322,7 +322,7 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
322 322
323 error = locks_verify_truncate(inode, file, length); 323 error = locks_verify_truncate(inode, file, length);
324 if (!error) 324 if (!error)
325 error = do_truncate(dentry, length, 0, file); 325 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
326out_putf: 326out_putf:
327 fput(file); 327 fput(file);
328out: 328out:
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 464e2bce0203..efc7c91128af 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -64,6 +64,11 @@ static int openpromfs_readdir(struct file *, void *, filldir_t);
64static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd); 64static struct dentry *openpromfs_lookup(struct inode *, struct dentry *dentry, struct nameidata *nd);
65static int openpromfs_unlink (struct inode *, struct dentry *dentry); 65static int openpromfs_unlink (struct inode *, struct dentry *dentry);
66 66
67static inline u16 ptr_nod(void *p)
68{
69 return (long)p & 0xFFFF;
70}
71
67static ssize_t nodenum_read(struct file *file, char __user *buf, 72static ssize_t nodenum_read(struct file *file, char __user *buf,
68 size_t count, loff_t *ppos) 73 size_t count, loff_t *ppos)
69{ 74{
@@ -72,7 +77,7 @@ static ssize_t nodenum_read(struct file *file, char __user *buf,
72 77
73 if (count < 0 || !inode->u.generic_ip) 78 if (count < 0 || !inode->u.generic_ip)
74 return -EINVAL; 79 return -EINVAL;
75 sprintf (buffer, "%8.8x\n", (u32)(long)(inode->u.generic_ip)); 80 sprintf (buffer, "%8.8lx\n", (long)inode->u.generic_ip);
76 if (file->f_pos >= 9) 81 if (file->f_pos >= 9)
77 return 0; 82 return 0;
78 if (count > 9 - file->f_pos) 83 if (count > 9 - file->f_pos)
@@ -95,9 +100,9 @@ static ssize_t property_read(struct file *filp, char __user *buf,
95 char buffer[64]; 100 char buffer[64];
96 101
97 if (!filp->private_data) { 102 if (!filp->private_data) {
98 node = nodes[(u16)((long)inode->u.generic_ip)].node; 103 node = nodes[ptr_nod(inode->u.generic_ip)].node;
99 i = ((u32)(long)inode->u.generic_ip) >> 16; 104 i = ((u32)(long)inode->u.generic_ip) >> 16;
100 if ((u16)((long)inode->u.generic_ip) == aliases) { 105 if (ptr_nod(inode->u.generic_ip) == aliases) {
101 if (i >= aliases_nodes) 106 if (i >= aliases_nodes)
102 p = NULL; 107 p = NULL;
103 else 108 else
@@ -111,7 +116,7 @@ static ssize_t property_read(struct file *filp, char __user *buf,
111 return -EIO; 116 return -EIO;
112 i = prom_getproplen (node, p); 117 i = prom_getproplen (node, p);
113 if (i < 0) { 118 if (i < 0) {
114 if ((u16)((long)inode->u.generic_ip) == aliases) 119 if (ptr_nod(inode->u.generic_ip) == aliases)
115 i = 0; 120 i = 0;
116 else 121 else
117 return -EIO; 122 return -EIO;
@@ -123,7 +128,7 @@ static ssize_t property_read(struct file *filp, char __user *buf,
123 GFP_KERNEL); 128 GFP_KERNEL);
124 if (!filp->private_data) 129 if (!filp->private_data)
125 return -ENOMEM; 130 return -ENOMEM;
126 op = (openprom_property *)filp->private_data; 131 op = filp->private_data;
127 op->flag = 0; 132 op->flag = 0;
128 op->alloclen = 2 * i; 133 op->alloclen = 2 * i;
129 strcpy (op->name, p); 134 strcpy (op->name, p);
@@ -163,7 +168,7 @@ static ssize_t property_read(struct file *filp, char __user *buf,
163 op->len--; 168 op->len--;
164 } 169 }
165 } else 170 } else
166 op = (openprom_property *)filp->private_data; 171 op = filp->private_data;
167 if (!count || !(op->len || (op->flag & OPP_ASCIIZ))) 172 if (!count || !(op->len || (op->flag & OPP_ASCIIZ)))
168 return 0; 173 return 0;
169 if (*ppos >= 0xffffff || count >= 0xffffff) 174 if (*ppos >= 0xffffff || count >= 0xffffff)
@@ -335,7 +340,7 @@ static ssize_t property_write(struct file *filp, const char __user *buf,
335 return i; 340 return i;
336 } 341 }
337 k = *ppos; 342 k = *ppos;
338 op = (openprom_property *)filp->private_data; 343 op = filp->private_data;
339 if (!(op->flag & OPP_STRING)) { 344 if (!(op->flag & OPP_STRING)) {
340 u32 *first, *last; 345 u32 *first, *last;
341 int first_off, last_cnt; 346 int first_off, last_cnt;
@@ -388,13 +393,13 @@ static ssize_t property_write(struct file *filp, const char __user *buf,
388 memcpy (b, filp->private_data, 393 memcpy (b, filp->private_data,
389 sizeof (openprom_property) 394 sizeof (openprom_property)
390 + strlen (op->name) + op->alloclen); 395 + strlen (op->name) + op->alloclen);
391 memset (((char *)b) + sizeof (openprom_property) 396 memset (b + sizeof (openprom_property)
392 + strlen (op->name) + op->alloclen, 397 + strlen (op->name) + op->alloclen,
393 0, 2 * i - op->alloclen); 398 0, 2 * i - op->alloclen);
394 op = (openprom_property *)b; 399 op = b;
395 op->alloclen = 2*i; 400 op->alloclen = 2*i;
396 b = filp->private_data; 401 b = filp->private_data;
397 filp->private_data = (void *)op; 402 filp->private_data = op;
398 kfree (b); 403 kfree (b);
399 } 404 }
400 first = ((u32 *)op->value) + (k / 9); 405 first = ((u32 *)op->value) + (k / 9);
@@ -448,10 +453,11 @@ static ssize_t property_write(struct file *filp, const char __user *buf,
448 *q |= simple_strtoul (tmp, NULL, 16); 453 *q |= simple_strtoul (tmp, NULL, 16);
449 buf += last_cnt; 454 buf += last_cnt;
450 } else { 455 } else {
451 char tchars[17]; /* XXX yuck... */ 456 char tchars[2 * sizeof(long) + 1];
452 457
453 if (copy_from_user(tchars, buf, 16)) 458 if (copy_from_user(tchars, buf, sizeof(tchars) - 1))
454 return -EFAULT; 459 return -EFAULT;
460 tchars[sizeof(tchars) - 1] = '\0';
455 *q = simple_strtoul (tchars, NULL, 16); 461 *q = simple_strtoul (tchars, NULL, 16);
456 buf += 9; 462 buf += 9;
457 } 463 }
@@ -497,13 +503,13 @@ write_try_string:
497 memcpy (b, filp->private_data, 503 memcpy (b, filp->private_data,
498 sizeof (openprom_property) 504 sizeof (openprom_property)
499 + strlen (op->name) + op->alloclen); 505 + strlen (op->name) + op->alloclen);
500 memset (((char *)b) + sizeof (openprom_property) 506 memset (b + sizeof (openprom_property)
501 + strlen (op->name) + op->alloclen, 507 + strlen (op->name) + op->alloclen,
502 0, 2*(count - *ppos) - op->alloclen); 508 0, 2*(count - *ppos) - op->alloclen);
503 op = (openprom_property *)b; 509 op = b;
504 op->alloclen = 2*(count + *ppos); 510 op->alloclen = 2*(count + *ppos);
505 b = filp->private_data; 511 b = filp->private_data;
506 filp->private_data = (void *)op; 512 filp->private_data = op;
507 kfree (b); 513 kfree (b);
508 } 514 }
509 p = op->value + *ppos - ((op->flag & OPP_QUOTED) ? 1 : 0); 515 p = op->value + *ppos - ((op->flag & OPP_QUOTED) ? 1 : 0);
@@ -532,15 +538,15 @@ write_try_string:
532 538
533int property_release (struct inode *inode, struct file *filp) 539int property_release (struct inode *inode, struct file *filp)
534{ 540{
535 openprom_property *op = (openprom_property *)filp->private_data; 541 openprom_property *op = filp->private_data;
536 int error; 542 int error;
537 u32 node; 543 u32 node;
538 544
539 if (!op) 545 if (!op)
540 return 0; 546 return 0;
541 lock_kernel(); 547 lock_kernel();
542 node = nodes[(u16)((long)inode->u.generic_ip)].node; 548 node = nodes[ptr_nod(inode->u.generic_ip)].node;
543 if ((u16)((long)inode->u.generic_ip) == aliases) { 549 if (ptr_nod(inode->u.generic_ip) == aliases) {
544 if ((op->flag & OPP_DIRTY) && (op->flag & OPP_STRING)) { 550 if ((op->flag & OPP_DIRTY) && (op->flag & OPP_STRING)) {
545 char *p = op->name; 551 char *p = op->name;
546 int i = (op->value - op->name) - strlen (op->name) - 1; 552 int i = (op->value - op->name) - strlen (op->name) - 1;
@@ -931,7 +937,7 @@ static int __init check_space (u16 n)
931 return -1; 937 return -1;
932 938
933 if (nodes) { 939 if (nodes) {
934 memcpy ((char *)pages, (char *)nodes, 940 memcpy ((char *)pages, nodes,
935 (1 << alloced) * PAGE_SIZE); 941 (1 << alloced) * PAGE_SIZE);
936 free_pages ((unsigned long)nodes, alloced); 942 free_pages ((unsigned long)nodes, alloced);
937 } 943 }
diff --git a/fs/select.c b/fs/select.c
index 9c4f0f2604f1..33b72ba0f86f 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -746,9 +746,9 @@ out_fds:
746asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, 746asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
747 long timeout_msecs) 747 long timeout_msecs)
748{ 748{
749 s64 timeout_jiffies = 0; 749 s64 timeout_jiffies;
750 750
751 if (timeout_msecs) { 751 if (timeout_msecs > 0) {
752#if HZ > 1000 752#if HZ > 1000
753 /* We can only overflow if HZ > 1000 */ 753 /* We can only overflow if HZ > 1000 */
754 if (timeout_msecs / 1000 > (s64)0x7fffffffffffffffULL / (s64)HZ) 754 if (timeout_msecs / 1000 > (s64)0x7fffffffffffffffULL / (s64)HZ)
@@ -756,6 +756,9 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
756 else 756 else
757#endif 757#endif
758 timeout_jiffies = msecs_to_jiffies(timeout_msecs); 758 timeout_jiffies = msecs_to_jiffies(timeout_msecs);
759 } else {
760 /* Infinite (< 0) or no (0) timeout */
761 timeout_jiffies = timeout_msecs;
759 } 762 }
760 763
761 return do_sys_poll(ufds, nfds, &timeout_jiffies); 764 return do_sys_poll(ufds, nfds, &timeout_jiffies);
diff --git a/fs/smbfs/smbiod.c b/fs/smbfs/smbiod.c
index 481a97a423fa..3f71384020cb 100644
--- a/fs/smbfs/smbiod.c
+++ b/fs/smbfs/smbiod.c
@@ -20,6 +20,7 @@
20#include <linux/smp_lock.h> 20#include <linux/smp_lock.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/net.h> 22#include <linux/net.h>
23#include <linux/kthread.h>
23#include <net/ip.h> 24#include <net/ip.h>
24 25
25#include <linux/smb_fs.h> 26#include <linux/smb_fs.h>
@@ -40,7 +41,7 @@ enum smbiod_state {
40}; 41};
41 42
42static enum smbiod_state smbiod_state = SMBIOD_DEAD; 43static enum smbiod_state smbiod_state = SMBIOD_DEAD;
43static pid_t smbiod_pid; 44static struct task_struct *smbiod_thread;
44static DECLARE_WAIT_QUEUE_HEAD(smbiod_wait); 45static DECLARE_WAIT_QUEUE_HEAD(smbiod_wait);
45static LIST_HEAD(smb_servers); 46static LIST_HEAD(smb_servers);
46static DEFINE_SPINLOCK(servers_lock); 47static DEFINE_SPINLOCK(servers_lock);
@@ -67,20 +68,29 @@ void smbiod_wake_up(void)
67 */ 68 */
68static int smbiod_start(void) 69static int smbiod_start(void)
69{ 70{
70 pid_t pid; 71 struct task_struct *tsk;
72 int err = 0;
73
71 if (smbiod_state != SMBIOD_DEAD) 74 if (smbiod_state != SMBIOD_DEAD)
72 return 0; 75 return 0;
73 smbiod_state = SMBIOD_STARTING; 76 smbiod_state = SMBIOD_STARTING;
74 __module_get(THIS_MODULE); 77 __module_get(THIS_MODULE);
75 spin_unlock(&servers_lock); 78 spin_unlock(&servers_lock);
76 pid = kernel_thread(smbiod, NULL, 0); 79 tsk = kthread_run(smbiod, NULL, "smbiod");
77 if (pid < 0) 80 if (IS_ERR(tsk)) {
81 err = PTR_ERR(tsk);
78 module_put(THIS_MODULE); 82 module_put(THIS_MODULE);
83 }
79 84
80 spin_lock(&servers_lock); 85 spin_lock(&servers_lock);
81 smbiod_state = pid < 0 ? SMBIOD_DEAD : SMBIOD_RUNNING; 86 if (err < 0) {
82 smbiod_pid = pid; 87 smbiod_state = SMBIOD_DEAD;
83 return pid; 88 smbiod_thread = NULL;
89 } else {
90 smbiod_state = SMBIOD_RUNNING;
91 smbiod_thread = tsk;
92 }
93 return err;
84} 94}
85 95
86/* 96/*
@@ -290,8 +300,6 @@ out:
290 */ 300 */
291static int smbiod(void *unused) 301static int smbiod(void *unused)
292{ 302{
293 daemonize("smbiod");
294
295 allow_signal(SIGKILL); 303 allow_signal(SIGKILL);
296 304
297 VERBOSE("SMB Kernel thread starting (%d) ...\n", current->pid); 305 VERBOSE("SMB Kernel thread starting (%d) ...\n", current->pid);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 3ada9dcf55b8..95b878e5c7a0 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -21,14 +21,6 @@
21#include "swab.h" 21#include "swab.h"
22#include "util.h" 22#include "util.h"
23 23
24#undef UFS_BALLOC_DEBUG
25
26#ifdef UFS_BALLOC_DEBUG
27#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
28#else
29#define UFSD(x)
30#endif
31
32static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *); 24static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *); 25static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
34static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *); 26static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
@@ -39,7 +31,8 @@ static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *,
39/* 31/*
40 * Free 'count' fragments from fragment number 'fragment' 32 * Free 'count' fragments from fragment number 'fragment'
41 */ 33 */
42void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) { 34void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
35{
43 struct super_block * sb; 36 struct super_block * sb;
44 struct ufs_sb_private_info * uspi; 37 struct ufs_sb_private_info * uspi;
45 struct ufs_super_block_first * usb1; 38 struct ufs_super_block_first * usb1;
@@ -51,7 +44,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
51 uspi = UFS_SB(sb)->s_uspi; 44 uspi = UFS_SB(sb)->s_uspi;
52 usb1 = ubh_get_usb_first(uspi); 45 usb1 = ubh_get_usb_first(uspi);
53 46
54 UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) 47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
55 48
56 if (ufs_fragnum(fragment) + count > uspi->s_fpg) 49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
57 ufs_error (sb, "ufs_free_fragments", "internal error"); 50 ufs_error (sb, "ufs_free_fragments", "internal error");
@@ -68,7 +61,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
68 ucpi = ufs_load_cylinder (sb, cgno); 61 ucpi = ufs_load_cylinder (sb, cgno);
69 if (!ucpi) 62 if (!ucpi)
70 goto failed; 63 goto failed;
71 ucg = ubh_get_ucg (UCPI_UBH); 64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
72 if (!ufs_cg_chkmagic(sb, ucg)) { 65 if (!ufs_cg_chkmagic(sb, ucg)) {
73 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); 66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
74 goto failed; 67 goto failed;
@@ -76,11 +69,11 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
76 69
77 end_bit = bit + count; 70 end_bit = bit + count;
78 bbase = ufs_blknum (bit); 71 bbase = ufs_blknum (bit);
79 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); 72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
80 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); 73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
81 for (i = bit; i < end_bit; i++) { 74 for (i = bit; i < end_bit; i++) {
82 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i)) 75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
83 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i); 76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
84 else 77 else
85 ufs_error (sb, "ufs_free_fragments", 78 ufs_error (sb, "ufs_free_fragments",
86 "bit already cleared for fragment %u", i); 79 "bit already cleared for fragment %u", i);
@@ -90,51 +83,52 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
90 83
91 84
92 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
93 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count); 86 uspi->cs_total.cs_nffree += count;
94 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
95 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); 88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
96 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); 89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
97 90
98 /* 91 /*
99 * Trying to reassemble free fragments into block 92 * Trying to reassemble free fragments into block
100 */ 93 */
101 blkno = ufs_fragstoblks (bbase); 94 blkno = ufs_fragstoblks (bbase);
102 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { 95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
103 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); 96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
104 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb); 97 uspi->cs_total.cs_nffree -= uspi->s_fpb;
105 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); 98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
106 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
107 ufs_clusteracct (sb, ucpi, blkno, 1); 100 ufs_clusteracct (sb, ucpi, blkno, 1);
108 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
109 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1); 102 uspi->cs_total.cs_nbfree++;
110 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); 103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
111 cylno = ufs_cbtocylno (bbase); 104 cylno = ufs_cbtocylno (bbase);
112 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1); 105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
113 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); 106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
114 } 107 }
115 108
116 ubh_mark_buffer_dirty (USPI_UBH); 109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
117 ubh_mark_buffer_dirty (UCPI_UBH); 110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
118 if (sb->s_flags & MS_SYNCHRONOUS) { 111 if (sb->s_flags & MS_SYNCHRONOUS) {
119 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); 112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
120 ubh_wait_on_buffer (UCPI_UBH); 113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
121 } 114 }
122 sb->s_dirt = 1; 115 sb->s_dirt = 1;
123 116
124 unlock_super (sb); 117 unlock_super (sb);
125 UFSD(("EXIT\n")) 118 UFSD("EXIT\n");
126 return; 119 return;
127 120
128failed: 121failed:
129 unlock_super (sb); 122 unlock_super (sb);
130 UFSD(("EXIT (FAILED)\n")) 123 UFSD("EXIT (FAILED)\n");
131 return; 124 return;
132} 125}
133 126
134/* 127/*
135 * Free 'count' fragments from fragment number 'fragment' (free whole blocks) 128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
136 */ 129 */
137void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { 130void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
131{
138 struct super_block * sb; 132 struct super_block * sb;
139 struct ufs_sb_private_info * uspi; 133 struct ufs_sb_private_info * uspi;
140 struct ufs_super_block_first * usb1; 134 struct ufs_super_block_first * usb1;
@@ -146,7 +140,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
146 uspi = UFS_SB(sb)->s_uspi; 140 uspi = UFS_SB(sb)->s_uspi;
147 usb1 = ubh_get_usb_first(uspi); 141 usb1 = ubh_get_usb_first(uspi);
148 142
149 UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) 143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
150 144
151 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { 145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
152 ufs_error (sb, "ufs_free_blocks", "internal error, " 146 ufs_error (sb, "ufs_free_blocks", "internal error, "
@@ -162,7 +156,7 @@ do_more:
162 bit = ufs_dtogd (fragment); 156 bit = ufs_dtogd (fragment);
163 if (cgno >= uspi->s_ncg) { 157 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); 158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
165 goto failed; 159 goto failed_unlock;
166 } 160 }
167 end_bit = bit + count; 161 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) { 162 if (end_bit > uspi->s_fpg) {
@@ -173,36 +167,36 @@ do_more:
173 167
174 ucpi = ufs_load_cylinder (sb, cgno); 168 ucpi = ufs_load_cylinder (sb, cgno);
175 if (!ucpi) 169 if (!ucpi)
176 goto failed; 170 goto failed_unlock;
177 ucg = ubh_get_ucg (UCPI_UBH); 171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
178 if (!ufs_cg_chkmagic(sb, ucg)) { 172 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); 173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
180 goto failed; 174 goto failed_unlock;
181 } 175 }
182 176
183 for (i = bit; i < end_bit; i += uspi->s_fpb) { 177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i); 178 blkno = ufs_fragstoblks(i);
185 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { 179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 } 181 }
188 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno); 182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190 ufs_clusteracct (sb, ucpi, blkno, 1); 184 ufs_clusteracct (sb, ucpi, blkno, 1);
191 DQUOT_FREE_BLOCK(inode, uspi->s_fpb); 185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
192 186
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1); 188 uspi->cs_total.cs_nbfree++;
195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); 189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196 cylno = ufs_cbtocylno(i); 190 cylno = ufs_cbtocylno(i);
197 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1); 191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
198 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); 192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
199 } 193 }
200 194
201 ubh_mark_buffer_dirty (USPI_UBH); 195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
202 ubh_mark_buffer_dirty (UCPI_UBH); 196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
203 if (sb->s_flags & MS_SYNCHRONOUS) { 197 if (sb->s_flags & MS_SYNCHRONOUS) {
204 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); 198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
205 ubh_wait_on_buffer (UCPI_UBH); 199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
206 } 200 }
207 201
208 if (overflow) { 202 if (overflow) {
@@ -213,38 +207,127 @@ do_more:
213 207
214 sb->s_dirt = 1; 208 sb->s_dirt = 1;
215 unlock_super (sb); 209 unlock_super (sb);
216 UFSD(("EXIT\n")) 210 UFSD("EXIT\n");
217 return; 211 return;
218 212
219failed: 213failed_unlock:
220 unlock_super (sb); 214 unlock_super (sb);
221 UFSD(("EXIT (FAILED)\n")) 215failed:
216 UFSD("EXIT (FAILED)\n");
222 return; 217 return;
223} 218}
224 219
220static struct page *ufs_get_locked_page(struct address_space *mapping,
221 unsigned long index)
222{
223 struct page *page;
224
225try_again:
226 page = find_lock_page(mapping, index);
227 if (!page) {
228 page = read_cache_page(mapping, index,
229 (filler_t*)mapping->a_ops->readpage,
230 NULL);
231 if (IS_ERR(page)) {
232 printk(KERN_ERR "ufs_change_blocknr: "
233 "read_cache_page error: ino %lu, index: %lu\n",
234 mapping->host->i_ino, index);
235 goto out;
236 }
225 237
238 lock_page(page);
226 239
227#define NULLIFY_FRAGMENTS \ 240 if (!PageUptodate(page) || PageError(page)) {
228 for (i = oldcount; i < newcount; i++) { \ 241 unlock_page(page);
229 bh = sb_getblk(sb, result + i); \ 242 page_cache_release(page);
230 memset (bh->b_data, 0, sb->s_blocksize); \ 243
231 set_buffer_uptodate(bh); \ 244 printk(KERN_ERR "ufs_change_blocknr: "
232 mark_buffer_dirty (bh); \ 245 "can not read page: ino %lu, index: %lu\n",
233 if (IS_SYNC(inode)) \ 246 mapping->host->i_ino, index);
234 sync_dirty_buffer(bh); \ 247
235 brelse (bh); \ 248 page = ERR_PTR(-EIO);
249 goto out;
250 }
236 } 251 }
237 252
238unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment, 253 if (unlikely(!page->mapping || !page_has_buffers(page))) {
239 unsigned goal, unsigned count, int * err ) 254 unlock_page(page);
255 page_cache_release(page);
256 goto try_again;/*we really need these buffers*/
257 }
258out:
259 return page;
260}
261
262/*
263 * Modify inode page cache in such way:
264 * have - blocks with b_blocknr equal to oldb...oldb+count-1
265 * get - blocks with b_blocknr equal to newb...newb+count-1
266 * also we suppose that oldb...oldb+count-1 blocks
267 * situated at the end of file.
268 *
269 * We can come here from ufs_writepage or ufs_prepare_write,
270 * locked_page is argument of these functions, so we already lock it.
271 */
272static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
273 unsigned int count, unsigned int oldb,
274 unsigned int newb, struct page *locked_page)
275{
276 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
277 struct address_space *mapping = inode->i_mapping;
278 pgoff_t index, cur_index = locked_page->index;
279 unsigned int i, j;
280 struct page *page;
281 struct buffer_head *head, *bh;
282
283 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
284 inode->i_ino, count, oldb, newb);
285
286 BUG_ON(!PageLocked(locked_page));
287
288 for (i = 0; i < count; i += blk_per_page) {
289 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
290
291 if (likely(cur_index != index)) {
292 page = ufs_get_locked_page(mapping, index);
293 if (IS_ERR(page))
294 continue;
295 } else
296 page = locked_page;
297
298 j = i;
299 head = page_buffers(page);
300 bh = head;
301 do {
302 if (likely(bh->b_blocknr == j + oldb && j < count)) {
303 unmap_underlying_metadata(bh->b_bdev,
304 bh->b_blocknr);
305 bh->b_blocknr = newb + j++;
306 mark_buffer_dirty(bh);
307 }
308
309 bh = bh->b_this_page;
310 } while (bh != head);
311
312 set_page_dirty(page);
313
314 if (likely(cur_index != index)) {
315 unlock_page(page);
316 page_cache_release(page);
317 }
318 }
319 UFSD("EXIT\n");
320}
321
322unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
323 unsigned goal, unsigned count, int * err, struct page *locked_page)
240{ 324{
241 struct super_block * sb; 325 struct super_block * sb;
242 struct ufs_sb_private_info * uspi; 326 struct ufs_sb_private_info * uspi;
243 struct ufs_super_block_first * usb1; 327 struct ufs_super_block_first * usb1;
244 struct buffer_head * bh; 328 unsigned cgno, oldcount, newcount, tmp, request, result;
245 unsigned cgno, oldcount, newcount, tmp, request, i, result;
246 329
247 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count)) 330 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
248 331
249 sb = inode->i_sb; 332 sb = inode->i_sb;
250 uspi = UFS_SB(sb)->s_uspi; 333 uspi = UFS_SB(sb)->s_uspi;
@@ -273,14 +356,14 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
273 return (unsigned)-1; 356 return (unsigned)-1;
274 } 357 }
275 if (fragment < UFS_I(inode)->i_lastfrag) { 358 if (fragment < UFS_I(inode)->i_lastfrag) {
276 UFSD(("EXIT (ALREADY ALLOCATED)\n")) 359 UFSD("EXIT (ALREADY ALLOCATED)\n");
277 unlock_super (sb); 360 unlock_super (sb);
278 return 0; 361 return 0;
279 } 362 }
280 } 363 }
281 else { 364 else {
282 if (tmp) { 365 if (tmp) {
283 UFSD(("EXIT (ALREADY ALLOCATED)\n")) 366 UFSD("EXIT (ALREADY ALLOCATED)\n");
284 unlock_super(sb); 367 unlock_super(sb);
285 return 0; 368 return 0;
286 } 369 }
@@ -289,9 +372,9 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
289 /* 372 /*
290 * There is not enough space for user on the device 373 * There is not enough space for user on the device
291 */ 374 */
292 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) { 375 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
293 unlock_super (sb); 376 unlock_super (sb);
294 UFSD(("EXIT (FAILED)\n")) 377 UFSD("EXIT (FAILED)\n");
295 return 0; 378 return 0;
296 } 379 }
297 380
@@ -310,12 +393,10 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
310 if (result) { 393 if (result) {
311 *p = cpu_to_fs32(sb, result); 394 *p = cpu_to_fs32(sb, result);
312 *err = 0; 395 *err = 0;
313 inode->i_blocks += count << uspi->s_nspfshift;
314 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 396 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
315 NULLIFY_FRAGMENTS
316 } 397 }
317 unlock_super(sb); 398 unlock_super(sb);
318 UFSD(("EXIT, result %u\n", result)) 399 UFSD("EXIT, result %u\n", result);
319 return result; 400 return result;
320 } 401 }
321 402
@@ -325,11 +406,9 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
325 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err); 406 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
326 if (result) { 407 if (result) {
327 *err = 0; 408 *err = 0;
328 inode->i_blocks += count << uspi->s_nspfshift;
329 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 409 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
330 NULLIFY_FRAGMENTS
331 unlock_super(sb); 410 unlock_super(sb);
332 UFSD(("EXIT, result %u\n", result)) 411 UFSD("EXIT, result %u\n", result);
333 return result; 412 return result;
334 } 413 }
335 414
@@ -339,8 +418,8 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
339 switch (fs32_to_cpu(sb, usb1->fs_optim)) { 418 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
340 case UFS_OPTSPACE: 419 case UFS_OPTSPACE:
341 request = newcount; 420 request = newcount;
342 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) 421 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
343 > uspi->s_dsize * uspi->s_minfree / (2 * 100) ) 422 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
344 break; 423 break;
345 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 424 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
346 break; 425 break;
@@ -349,7 +428,7 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
349 428
350 case UFS_OPTTIME: 429 case UFS_OPTTIME:
351 request = uspi->s_fpb; 430 request = uspi->s_fpb;
352 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize * 431 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
353 (uspi->s_minfree - 2) / 100) 432 (uspi->s_minfree - 2) / 100)
354 break; 433 break;
355 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 434 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
@@ -357,39 +436,22 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
357 } 436 }
358 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 437 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
359 if (result) { 438 if (result) {
360 for (i = 0; i < oldcount; i++) { 439 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
361 bh = sb_bread(sb, tmp + i); 440 result, locked_page);
362 if(bh) 441
363 {
364 clear_buffer_dirty(bh);
365 bh->b_blocknr = result + i;
366 mark_buffer_dirty (bh);
367 if (IS_SYNC(inode))
368 sync_dirty_buffer(bh);
369 brelse (bh);
370 }
371 else
372 {
373 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
374 unlock_super(sb);
375 return 0;
376 }
377 }
378 *p = cpu_to_fs32(sb, result); 442 *p = cpu_to_fs32(sb, result);
379 *err = 0; 443 *err = 0;
380 inode->i_blocks += count << uspi->s_nspfshift;
381 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 444 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
382 NULLIFY_FRAGMENTS
383 unlock_super(sb); 445 unlock_super(sb);
384 if (newcount < request) 446 if (newcount < request)
385 ufs_free_fragments (inode, result + newcount, request - newcount); 447 ufs_free_fragments (inode, result + newcount, request - newcount);
386 ufs_free_fragments (inode, tmp, oldcount); 448 ufs_free_fragments (inode, tmp, oldcount);
387 UFSD(("EXIT, result %u\n", result)) 449 UFSD("EXIT, result %u\n", result);
388 return result; 450 return result;
389 } 451 }
390 452
391 unlock_super(sb); 453 unlock_super(sb);
392 UFSD(("EXIT (FAILED)\n")) 454 UFSD("EXIT (FAILED)\n");
393 return 0; 455 return 0;
394} 456}
395 457
@@ -404,7 +466,7 @@ ufs_add_fragments (struct inode * inode, unsigned fragment,
404 struct ufs_cylinder_group * ucg; 466 struct ufs_cylinder_group * ucg;
405 unsigned cgno, fragno, fragoff, count, fragsize, i; 467 unsigned cgno, fragno, fragoff, count, fragsize, i;
406 468
407 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount)) 469 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
408 470
409 sb = inode->i_sb; 471 sb = inode->i_sb;
410 uspi = UFS_SB(sb)->s_uspi; 472 uspi = UFS_SB(sb)->s_uspi;
@@ -419,7 +481,7 @@ ufs_add_fragments (struct inode * inode, unsigned fragment,
419 ucpi = ufs_load_cylinder (sb, cgno); 481 ucpi = ufs_load_cylinder (sb, cgno);
420 if (!ucpi) 482 if (!ucpi)
421 return 0; 483 return 0;
422 ucg = ubh_get_ucg (UCPI_UBH); 484 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
423 if (!ufs_cg_chkmagic(sb, ucg)) { 485 if (!ufs_cg_chkmagic(sb, ucg)) {
424 ufs_panic (sb, "ufs_add_fragments", 486 ufs_panic (sb, "ufs_add_fragments",
425 "internal error, bad magic number on cg %u", cgno); 487 "internal error, bad magic number on cg %u", cgno);
@@ -429,14 +491,14 @@ ufs_add_fragments (struct inode * inode, unsigned fragment,
429 fragno = ufs_dtogd (fragment); 491 fragno = ufs_dtogd (fragment);
430 fragoff = ufs_fragnum (fragno); 492 fragoff = ufs_fragnum (fragno);
431 for (i = oldcount; i < newcount; i++) 493 for (i = oldcount; i < newcount; i++)
432 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i)) 494 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
433 return 0; 495 return 0;
434 /* 496 /*
435 * Block can be extended 497 * Block can be extended
436 */ 498 */
437 ucg->cg_time = cpu_to_fs32(sb, get_seconds()); 499 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
438 for (i = newcount; i < (uspi->s_fpb - fragoff); i++) 500 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
439 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i)) 501 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
440 break; 502 break;
441 fragsize = i - oldcount; 503 fragsize = i - oldcount;
442 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize])) 504 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
@@ -446,7 +508,7 @@ ufs_add_fragments (struct inode * inode, unsigned fragment,
446 if (fragsize != count) 508 if (fragsize != count)
447 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); 509 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
448 for (i = oldcount; i < newcount; i++) 510 for (i = oldcount; i < newcount; i++)
449 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i); 511 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
450 if(DQUOT_ALLOC_BLOCK(inode, count)) { 512 if(DQUOT_ALLOC_BLOCK(inode, count)) {
451 *err = -EDQUOT; 513 *err = -EDQUOT;
452 return 0; 514 return 0;
@@ -454,17 +516,17 @@ ufs_add_fragments (struct inode * inode, unsigned fragment,
454 516
455 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); 517 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
456 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 518 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
457 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); 519 uspi->cs_total.cs_nffree -= count;
458 520
459 ubh_mark_buffer_dirty (USPI_UBH); 521 ubh_mark_buffer_dirty (USPI_UBH(uspi));
460 ubh_mark_buffer_dirty (UCPI_UBH); 522 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
461 if (sb->s_flags & MS_SYNCHRONOUS) { 523 if (sb->s_flags & MS_SYNCHRONOUS) {
462 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); 524 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
463 ubh_wait_on_buffer (UCPI_UBH); 525 ubh_wait_on_buffer (UCPI_UBH(ucpi));
464 } 526 }
465 sb->s_dirt = 1; 527 sb->s_dirt = 1;
466 528
467 UFSD(("EXIT, fragment %u\n", fragment)) 529 UFSD("EXIT, fragment %u\n", fragment);
468 530
469 return fragment; 531 return fragment;
470} 532}
@@ -487,7 +549,7 @@ static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
487 struct ufs_cylinder_group * ucg; 549 struct ufs_cylinder_group * ucg;
488 unsigned oldcg, i, j, k, result, allocsize; 550 unsigned oldcg, i, j, k, result, allocsize;
489 551
490 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count)) 552 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
491 553
492 sb = inode->i_sb; 554 sb = inode->i_sb;
493 uspi = UFS_SB(sb)->s_uspi; 555 uspi = UFS_SB(sb)->s_uspi;
@@ -521,14 +583,14 @@ static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
521 UFS_TEST_FREE_SPACE_CG 583 UFS_TEST_FREE_SPACE_CG
522 } 584 }
523 585
524 UFSD(("EXIT (FAILED)\n")) 586 UFSD("EXIT (FAILED)\n");
525 return 0; 587 return 0;
526 588
527cg_found: 589cg_found:
528 ucpi = ufs_load_cylinder (sb, cgno); 590 ucpi = ufs_load_cylinder (sb, cgno);
529 if (!ucpi) 591 if (!ucpi)
530 return 0; 592 return 0;
531 ucg = ubh_get_ucg (UCPI_UBH); 593 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
532 if (!ufs_cg_chkmagic(sb, ucg)) 594 if (!ufs_cg_chkmagic(sb, ucg))
533 ufs_panic (sb, "ufs_alloc_fragments", 595 ufs_panic (sb, "ufs_alloc_fragments",
534 "internal error, bad magic number on cg %u", cgno); 596 "internal error, bad magic number on cg %u", cgno);
@@ -551,12 +613,12 @@ cg_found:
551 return 0; 613 return 0;
552 goal = ufs_dtogd (result); 614 goal = ufs_dtogd (result);
553 for (i = count; i < uspi->s_fpb; i++) 615 for (i = count; i < uspi->s_fpb; i++)
554 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i); 616 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
555 i = uspi->s_fpb - count; 617 i = uspi->s_fpb - count;
556 DQUOT_FREE_BLOCK(inode, i); 618 DQUOT_FREE_BLOCK(inode, i);
557 619
558 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 620 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
559 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i); 621 uspi->cs_total.cs_nffree += i;
560 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); 622 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
561 fs32_add(sb, &ucg->cg_frsum[i], 1); 623 fs32_add(sb, &ucg->cg_frsum[i], 1);
562 goto succed; 624 goto succed;
@@ -570,10 +632,10 @@ cg_found:
570 return 0; 632 return 0;
571 } 633 }
572 for (i = 0; i < count; i++) 634 for (i = 0; i < count; i++)
573 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i); 635 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
574 636
575 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); 637 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
576 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count); 638 uspi->cs_total.cs_nffree -= count;
577 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 639 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
578 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1); 640 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
579 641
@@ -581,16 +643,16 @@ cg_found:
581 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1); 643 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
582 644
583succed: 645succed:
584 ubh_mark_buffer_dirty (USPI_UBH); 646 ubh_mark_buffer_dirty (USPI_UBH(uspi));
585 ubh_mark_buffer_dirty (UCPI_UBH); 647 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
586 if (sb->s_flags & MS_SYNCHRONOUS) { 648 if (sb->s_flags & MS_SYNCHRONOUS) {
587 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi); 649 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
588 ubh_wait_on_buffer (UCPI_UBH); 650 ubh_wait_on_buffer (UCPI_UBH(ucpi));
589 } 651 }
590 sb->s_dirt = 1; 652 sb->s_dirt = 1;
591 653
592 result += cgno * uspi->s_fpg; 654 result += cgno * uspi->s_fpg;
593 UFSD(("EXIT3, result %u\n", result)) 655 UFSD("EXIT3, result %u\n", result);
594 return result; 656 return result;
595} 657}
596 658
@@ -603,12 +665,12 @@ static unsigned ufs_alloccg_block (struct inode * inode,
603 struct ufs_cylinder_group * ucg; 665 struct ufs_cylinder_group * ucg;
604 unsigned result, cylno, blkno; 666 unsigned result, cylno, blkno;
605 667
606 UFSD(("ENTER, goal %u\n", goal)) 668 UFSD("ENTER, goal %u\n", goal);
607 669
608 sb = inode->i_sb; 670 sb = inode->i_sb;
609 uspi = UFS_SB(sb)->s_uspi; 671 uspi = UFS_SB(sb)->s_uspi;
610 usb1 = ubh_get_usb_first(uspi); 672 usb1 = ubh_get_usb_first(uspi);
611 ucg = ubh_get_ucg(UCPI_UBH); 673 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
612 674
613 if (goal == 0) { 675 if (goal == 0) {
614 goal = ucpi->c_rotor; 676 goal = ucpi->c_rotor;
@@ -620,7 +682,7 @@ static unsigned ufs_alloccg_block (struct inode * inode,
620 /* 682 /*
621 * If the requested block is available, use it. 683 * If the requested block is available, use it.
622 */ 684 */
623 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) { 685 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
624 result = goal; 686 result = goal;
625 goto gotit; 687 goto gotit;
626 } 688 }
@@ -632,7 +694,7 @@ norot:
632 ucpi->c_rotor = result; 694 ucpi->c_rotor = result;
633gotit: 695gotit:
634 blkno = ufs_fragstoblks(result); 696 blkno = ufs_fragstoblks(result);
635 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno); 697 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
636 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 698 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
637 ufs_clusteracct (sb, ucpi, blkno, -1); 699 ufs_clusteracct (sb, ucpi, blkno, -1);
638 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { 700 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
@@ -641,31 +703,76 @@ gotit:
641 } 703 }
642 704
643 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1); 705 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
644 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1); 706 uspi->cs_total.cs_nbfree--;
645 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1); 707 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
646 cylno = ufs_cbtocylno(result); 708 cylno = ufs_cbtocylno(result);
647 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1); 709 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
648 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1); 710 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
649 711
650 UFSD(("EXIT, result %u\n", result)) 712 UFSD("EXIT, result %u\n", result);
651 713
652 return result; 714 return result;
653} 715}
654 716
655static unsigned ufs_bitmap_search (struct super_block * sb, 717static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
656 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count) 718 struct ufs_buffer_head *ubh,
719 unsigned begin, unsigned size,
720 unsigned char *table, unsigned char mask)
657{ 721{
658 struct ufs_sb_private_info * uspi; 722 unsigned rest, offset;
659 struct ufs_super_block_first * usb1; 723 unsigned char *cp;
660 struct ufs_cylinder_group * ucg; 724
661 unsigned start, length, location, result; 725
662 unsigned possition, fragsize, blockmap, mask; 726 offset = begin & ~uspi->s_fmask;
663 727 begin >>= uspi->s_fshift;
664 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count)) 728 for (;;) {
729 if ((offset + size) < uspi->s_fsize)
730 rest = size;
731 else
732 rest = uspi->s_fsize - offset;
733 size -= rest;
734 cp = ubh->bh[begin]->b_data + offset;
735 while ((table[*cp++] & mask) == 0 && --rest)
736 ;
737 if (rest || !size)
738 break;
739 begin++;
740 offset = 0;
741 }
742 return (size + rest);
743}
744
745/*
746 * Find a block of the specified size in the specified cylinder group.
747 * @sp: pointer to super block
748 * @ucpi: pointer to cylinder group info
749 * @goal: near which block we want find new one
750 * @count: specified size
751 */
752static unsigned ufs_bitmap_search(struct super_block *sb,
753 struct ufs_cg_private_info *ucpi,
754 unsigned goal, unsigned count)
755{
756 /*
757 * Bit patterns for identifying fragments in the block map
758 * used as ((map & mask_arr) == want_arr)
759 */
760 static const int mask_arr[9] = {
761 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
762 };
763 static const int want_arr[9] = {
764 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
765 };
766 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
767 struct ufs_super_block_first *usb1;
768 struct ufs_cylinder_group *ucg;
769 unsigned start, length, loc, result;
770 unsigned pos, want, blockmap, mask, end;
771
772 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
665 773
666 uspi = UFS_SB(sb)->s_uspi;
667 usb1 = ubh_get_usb_first (uspi); 774 usb1 = ubh_get_usb_first (uspi);
668 ucg = ubh_get_ucg(UCPI_UBH); 775 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
669 776
670 if (goal) 777 if (goal)
671 start = ufs_dtogd(goal) >> 3; 778 start = ufs_dtogd(goal) >> 3;
@@ -673,53 +780,50 @@ static unsigned ufs_bitmap_search (struct super_block * sb,
673 start = ucpi->c_frotor >> 3; 780 start = ucpi->c_frotor >> 3;
674 781
675 length = ((uspi->s_fpg + 7) >> 3) - start; 782 length = ((uspi->s_fpg + 7) >> 3) - start;
676 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length, 783 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
677 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other, 784 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
678 1 << (count - 1 + (uspi->s_fpb & 7))); 785 1 << (count - 1 + (uspi->s_fpb & 7)));
679 if (location == 0) { 786 if (loc == 0) {
680 length = start + 1; 787 length = start + 1;
681 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length, 788 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
682 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other, 789 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
683 1 << (count - 1 + (uspi->s_fpb & 7))); 790 ufs_fragtable_other,
684 if (location == 0) { 791 1 << (count - 1 + (uspi->s_fpb & 7)));
685 ufs_error (sb, "ufs_bitmap_search", 792 if (loc == 0) {
686 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n", 793 ufs_error(sb, "ufs_bitmap_search",
687 ucpi->c_cgx, start, length, count, ucpi->c_freeoff); 794 "bitmap corrupted on cg %u, start %u,"
795 " length %u, count %u, freeoff %u\n",
796 ucpi->c_cgx, start, length, count,
797 ucpi->c_freeoff);
688 return (unsigned)-1; 798 return (unsigned)-1;
689 } 799 }
690 start = 0; 800 start = 0;
691 } 801 }
692 result = (start + length - location) << 3; 802 result = (start + length - loc) << 3;
693 ucpi->c_frotor = result; 803 ucpi->c_frotor = result;
694 804
695 /* 805 /*
696 * found the byte in the map 806 * found the byte in the map
697 */ 807 */
698 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result); 808
699 fragsize = 0; 809 for (end = result + 8; result < end; result += uspi->s_fpb) {
700 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) { 810 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
701 if (blockmap & mask) { 811 blockmap <<= 1;
702 if (!(possition & uspi->s_fpbmask)) 812 mask = mask_arr[count];
703 fragsize = 1; 813 want = want_arr[count];
704 else 814 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
705 fragsize++; 815 if ((blockmap & mask) == want) {
706 } 816 UFSD("EXIT, result %u\n", result);
707 else { 817 return result + pos;
708 if (fragsize == count) { 818 }
709 result += possition - count; 819 mask <<= 1;
710 UFSD(("EXIT, result %u\n", result)) 820 want <<= 1;
711 return result; 821 }
712 } 822 }
713 fragsize = 0; 823
714 } 824 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
715 } 825 ucpi->c_cgx);
716 if (fragsize == count) { 826 UFSD("EXIT (FAILED)\n");
717 result += possition - count;
718 UFSD(("EXIT, result %u\n", result))
719 return result;
720 }
721 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
722 UFSD(("EXIT (FAILED)\n"))
723 return (unsigned)-1; 827 return (unsigned)-1;
724} 828}
725 829
@@ -734,9 +838,9 @@ static void ufs_clusteracct(struct super_block * sb,
734 return; 838 return;
735 839
736 if (cnt > 0) 840 if (cnt > 0)
737 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno); 841 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
738 else 842 else
739 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno); 843 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
740 844
741 /* 845 /*
742 * Find the size of the cluster going forward. 846 * Find the size of the cluster going forward.
@@ -745,7 +849,7 @@ static void ufs_clusteracct(struct super_block * sb,
745 end = start + uspi->s_contigsumsize; 849 end = start + uspi->s_contigsumsize;
746 if ( end >= ucpi->c_nclusterblks) 850 if ( end >= ucpi->c_nclusterblks)
747 end = ucpi->c_nclusterblks; 851 end = ucpi->c_nclusterblks;
748 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start); 852 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
749 if (i > end) 853 if (i > end)
750 i = end; 854 i = end;
751 forw = i - start; 855 forw = i - start;
@@ -757,7 +861,7 @@ static void ufs_clusteracct(struct super_block * sb,
757 end = start - uspi->s_contigsumsize; 861 end = start - uspi->s_contigsumsize;
758 if (end < 0 ) 862 if (end < 0 )
759 end = -1; 863 end = -1;
760 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end); 864 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
761 if ( i < end) 865 if ( i < end)
762 i = end; 866 i = end;
763 back = start - i; 867 back = start - i;
@@ -769,11 +873,11 @@ static void ufs_clusteracct(struct super_block * sb,
769 i = back + forw + 1; 873 i = back + forw + 1;
770 if (i > uspi->s_contigsumsize) 874 if (i > uspi->s_contigsumsize)
771 i = uspi->s_contigsumsize; 875 i = uspi->s_contigsumsize;
772 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt); 876 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
773 if (back > 0) 877 if (back > 0)
774 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt); 878 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
775 if (forw > 0) 879 if (forw > 0)
776 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt); 880 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
777} 881}
778 882
779 883
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c
index 14abb8b835f7..09c39e5e6386 100644
--- a/fs/ufs/cylinder.c
+++ b/fs/ufs/cylinder.c
@@ -20,15 +20,6 @@
20#include "swab.h" 20#include "swab.h"
21#include "util.h" 21#include "util.h"
22 22
23#undef UFS_CYLINDER_DEBUG
24
25#ifdef UFS_CYLINDER_DEBUG
26#define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
27#else
28#define UFSD(x)
29#endif
30
31
32/* 23/*
33 * Read cylinder group into cache. The memory space for ufs_cg_private_info 24 * Read cylinder group into cache. The memory space for ufs_cg_private_info
34 * structure is already allocated during ufs_read_super. 25 * structure is already allocated during ufs_read_super.
@@ -42,19 +33,19 @@ static void ufs_read_cylinder (struct super_block * sb,
42 struct ufs_cylinder_group * ucg; 33 struct ufs_cylinder_group * ucg;
43 unsigned i, j; 34 unsigned i, j;
44 35
45 UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr)) 36 UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr);
46 uspi = sbi->s_uspi; 37 uspi = sbi->s_uspi;
47 ucpi = sbi->s_ucpi[bitmap_nr]; 38 ucpi = sbi->s_ucpi[bitmap_nr];
48 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; 39 ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
49 40
50 UCPI_UBH->fragment = ufs_cgcmin(cgno); 41 UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
51 UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits; 42 UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
52 /* 43 /*
53 * We have already the first fragment of cylinder group block in buffer 44 * We have already the first fragment of cylinder group block in buffer
54 */ 45 */
55 UCPI_UBH->bh[0] = sbi->s_ucg[cgno]; 46 UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
56 for (i = 1; i < UCPI_UBH->count; i++) 47 for (i = 1; i < UCPI_UBH(ucpi)->count; i++)
57 if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i))) 48 if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i)))
58 goto failed; 49 goto failed;
59 sbi->s_cgno[bitmap_nr] = cgno; 50 sbi->s_cgno[bitmap_nr] = cgno;
60 51
@@ -73,7 +64,7 @@ static void ufs_read_cylinder (struct super_block * sb,
73 ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff); 64 ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
74 ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff); 65 ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
75 ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks); 66 ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
76 UFSD(("EXIT\n")) 67 UFSD("EXIT\n");
77 return; 68 return;
78 69
79failed: 70failed:
@@ -95,15 +86,15 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
95 struct ufs_cylinder_group * ucg; 86 struct ufs_cylinder_group * ucg;
96 unsigned i; 87 unsigned i;
97 88
98 UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr)) 89 UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);
99 90
100 uspi = sbi->s_uspi; 91 uspi = sbi->s_uspi;
101 if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { 92 if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
102 UFSD(("EXIT\n")) 93 UFSD("EXIT\n");
103 return; 94 return;
104 } 95 }
105 ucpi = sbi->s_ucpi[bitmap_nr]; 96 ucpi = sbi->s_ucpi[bitmap_nr];
106 ucg = ubh_get_ucg(UCPI_UBH); 97 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
107 98
108 if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) { 99 if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
109 ufs_panic (sb, "ufs_put_cylinder", "internal error"); 100 ufs_panic (sb, "ufs_put_cylinder", "internal error");
@@ -116,13 +107,13 @@ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
116 ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor); 107 ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
117 ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor); 108 ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
118 ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor); 109 ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
119 ubh_mark_buffer_dirty (UCPI_UBH); 110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
120 for (i = 1; i < UCPI_UBH->count; i++) { 111 for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
121 brelse (UCPI_UBH->bh[i]); 112 brelse (UCPI_UBH(ucpi)->bh[i]);
122 } 113 }
123 114
124 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; 115 sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
125 UFSD(("EXIT\n")) 116 UFSD("EXIT\n");
126} 117}
127 118
128/* 119/*
@@ -139,7 +130,7 @@ struct ufs_cg_private_info * ufs_load_cylinder (
139 struct ufs_cg_private_info * ucpi; 130 struct ufs_cg_private_info * ucpi;
140 unsigned cg, i, j; 131 unsigned cg, i, j;
141 132
142 UFSD(("ENTER, cgno %u\n", cgno)) 133 UFSD("ENTER, cgno %u\n", cgno);
143 134
144 uspi = sbi->s_uspi; 135 uspi = sbi->s_uspi;
145 if (cgno >= uspi->s_ncg) { 136 if (cgno >= uspi->s_ncg) {
@@ -150,7 +141,7 @@ struct ufs_cg_private_info * ufs_load_cylinder (
150 * Cylinder group number cg it in cache and it was last used 141 * Cylinder group number cg it in cache and it was last used
151 */ 142 */
152 if (sbi->s_cgno[0] == cgno) { 143 if (sbi->s_cgno[0] == cgno) {
153 UFSD(("EXIT\n")) 144 UFSD("EXIT\n");
154 return sbi->s_ucpi[0]; 145 return sbi->s_ucpi[0];
155 } 146 }
156 /* 147 /*
@@ -160,16 +151,16 @@ struct ufs_cg_private_info * ufs_load_cylinder (
160 if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) { 151 if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
161 if (sbi->s_cgno[cgno] != cgno) { 152 if (sbi->s_cgno[cgno] != cgno) {
162 ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache"); 153 ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
163 UFSD(("EXIT (FAILED)\n")) 154 UFSD("EXIT (FAILED)\n");
164 return NULL; 155 return NULL;
165 } 156 }
166 else { 157 else {
167 UFSD(("EXIT\n")) 158 UFSD("EXIT\n");
168 return sbi->s_ucpi[cgno]; 159 return sbi->s_ucpi[cgno];
169 } 160 }
170 } else { 161 } else {
171 ufs_read_cylinder (sb, cgno, cgno); 162 ufs_read_cylinder (sb, cgno, cgno);
172 UFSD(("EXIT\n")) 163 UFSD("EXIT\n");
173 return sbi->s_ucpi[cgno]; 164 return sbi->s_ucpi[cgno];
174 } 165 }
175 } 166 }
@@ -204,6 +195,6 @@ struct ufs_cg_private_info * ufs_load_cylinder (
204 sbi->s_ucpi[0] = ucpi; 195 sbi->s_ucpi[0] = ucpi;
205 ufs_read_cylinder (sb, cgno, 0); 196 ufs_read_cylinder (sb, cgno, 0);
206 } 197 }
207 UFSD(("EXIT\n")) 198 UFSD("EXIT\n");
208 return sbi->s_ucpi[0]; 199 return sbi->s_ucpi[0];
209} 200}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 1a561202d3f4..7f0a0aa63584 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -11,31 +11,20 @@
11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by 11 * 4.4BSD (FreeBSD) support added on February 1st 1998 by
12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based 12 * Niels Kristian Bech Jensen <nkbj@image.dk> partially based
13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. 13 * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>.
14 *
15 * Migration to usage of "page cache" on May 2006 by
16 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
14 */ 17 */
15 18
16#include <linux/time.h> 19#include <linux/time.h>
17#include <linux/fs.h> 20#include <linux/fs.h>
18#include <linux/ufs_fs.h> 21#include <linux/ufs_fs.h>
19#include <linux/smp_lock.h> 22#include <linux/smp_lock.h>
20#include <linux/buffer_head.h>
21#include <linux/sched.h> 23#include <linux/sched.h>
22 24
23#include "swab.h" 25#include "swab.h"
24#include "util.h" 26#include "util.h"
25 27
26#undef UFS_DIR_DEBUG
27
28#ifdef UFS_DIR_DEBUG
29#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
30#else
31#define UFSD(x)
32#endif
33
34static int
35ufs_check_dir_entry (const char *, struct inode *, struct ufs_dir_entry *,
36 struct buffer_head *, unsigned long);
37
38
39/* 28/*
40 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure. 29 * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure.
41 * 30 *
@@ -51,495 +40,541 @@ static inline int ufs_match(struct super_block *sb, int len,
51 return !memcmp(name, de->d_name, len); 40 return !memcmp(name, de->d_name, len);
52} 41}
53 42
54/* 43static int ufs_commit_chunk(struct page *page, unsigned from, unsigned to)
55 * This is blatantly stolen from ext2fs
56 */
57static int
58ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
59{ 44{
60 struct inode *inode = filp->f_dentry->d_inode; 45 struct inode *dir = page->mapping->host;
61 int error = 0; 46 int err = 0;
62 unsigned long offset, lblk; 47 dir->i_version++;
63 int i, stored; 48 page->mapping->a_ops->commit_write(NULL, page, from, to);
64 struct buffer_head * bh; 49 if (IS_DIRSYNC(dir))
65 struct ufs_dir_entry * de; 50 err = write_one_page(page, 1);
66 struct super_block * sb; 51 else
67 int de_reclen; 52 unlock_page(page);
68 unsigned flags; 53 return err;
69 u64 blk= 0L; 54}
70
71 lock_kernel();
72
73 sb = inode->i_sb;
74 flags = UFS_SB(sb)->s_flags;
75
76 UFSD(("ENTER, ino %lu f_pos %lu\n", inode->i_ino, (unsigned long) filp->f_pos))
77
78 stored = 0;
79 bh = NULL;
80 offset = filp->f_pos & (sb->s_blocksize - 1);
81
82 while (!error && !stored && filp->f_pos < inode->i_size) {
83 lblk = (filp->f_pos) >> sb->s_blocksize_bits;
84 blk = ufs_frag_map(inode, lblk);
85 if (!blk || !(bh = sb_bread(sb, blk))) {
86 /* XXX - error - skip to the next block */
87 printk("ufs_readdir: "
88 "dir inode %lu has a hole at offset %lu\n",
89 inode->i_ino, (unsigned long int)filp->f_pos);
90 filp->f_pos += sb->s_blocksize - offset;
91 continue;
92 }
93
94revalidate:
95 /* If the dir block has changed since the last call to
96 * readdir(2), then we might be pointing to an invalid
97 * dirent right now. Scan from the start of the block
98 * to make sure. */
99 if (filp->f_version != inode->i_version) {
100 for (i = 0; i < sb->s_blocksize && i < offset; ) {
101 de = (struct ufs_dir_entry *)(bh->b_data + i);
102 /* It's too expensive to do a full
103 * dirent test each time round this
104 * loop, but we do have to test at
105 * least that it is non-zero. A
106 * failure will be detected in the
107 * dirent test below. */
108 de_reclen = fs16_to_cpu(sb, de->d_reclen);
109 if (de_reclen < 1)
110 break;
111 i += de_reclen;
112 }
113 offset = i;
114 filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
115 | offset;
116 filp->f_version = inode->i_version;
117 }
118 55
119 while (!error && filp->f_pos < inode->i_size 56static inline void ufs_put_page(struct page *page)
120 && offset < sb->s_blocksize) { 57{
121 de = (struct ufs_dir_entry *) (bh->b_data + offset); 58 kunmap(page);
122 /* XXX - put in a real ufs_check_dir_entry() */ 59 page_cache_release(page);
123 if ((de->d_reclen == 0) || (ufs_get_de_namlen(sb, de) == 0)) { 60}
124 filp->f_pos = (filp->f_pos &
125 (sb->s_blocksize - 1)) +
126 sb->s_blocksize;
127 brelse(bh);
128 unlock_kernel();
129 return stored;
130 }
131 if (!ufs_check_dir_entry ("ufs_readdir", inode, de,
132 bh, offset)) {
133 /* On error, skip the f_pos to the
134 next block. */
135 filp->f_pos = (filp->f_pos |
136 (sb->s_blocksize - 1)) +
137 1;
138 brelse (bh);
139 unlock_kernel();
140 return stored;
141 }
142 offset += fs16_to_cpu(sb, de->d_reclen);
143 if (de->d_ino) {
144 /* We might block in the next section
145 * if the data destination is
146 * currently swapped out. So, use a
147 * version stamp to detect whether or
148 * not the directory has been modified
149 * during the copy operation. */
150 unsigned long version = filp->f_version;
151 unsigned char d_type = DT_UNKNOWN;
152 61
153 UFSD(("filldir(%s,%u)\n", de->d_name, 62static inline unsigned long ufs_dir_pages(struct inode *inode)
154 fs32_to_cpu(sb, de->d_ino))) 63{
155 UFSD(("namlen %u\n", ufs_get_de_namlen(sb, de))) 64 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
65}
156 66
157 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) 67ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry)
158 d_type = de->d_u.d_44.d_type; 68{
159 error = filldir(dirent, de->d_name, 69 ino_t res = 0;
160 ufs_get_de_namlen(sb, de), filp->f_pos, 70 struct ufs_dir_entry *de;
161 fs32_to_cpu(sb, de->d_ino), d_type); 71 struct page *page;
162 if (error) 72
163 break; 73 de = ufs_find_entry(dir, dentry, &page);
164 if (version != filp->f_version) 74 if (de) {
165 goto revalidate; 75 res = fs32_to_cpu(dir->i_sb, de->d_ino);
166 stored ++; 76 ufs_put_page(page);
167 }
168 filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
169 }
170 offset = 0;
171 brelse (bh);
172 } 77 }
173 unlock_kernel(); 78 return res;
174 return 0;
175} 79}
176 80
177/*
178 * define how far ahead to read directories while searching them.
179 */
180#define NAMEI_RA_CHUNKS 2
181#define NAMEI_RA_BLOCKS 4
182#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
183#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
184 81
185/* 82/* Releases the page */
186 * ufs_find_entry() 83void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
187 * 84 struct page *page, struct inode *inode)
188 * finds an entry in the specified directory with the wanted name. It
189 * returns the cache buffer in which the entry was found, and the entry
190 * itself (as a parameter - res_bh). It does NOT read the inode of the
191 * entry - you'll have to do that yourself if you want to.
192 */
193struct ufs_dir_entry * ufs_find_entry (struct dentry *dentry,
194 struct buffer_head ** res_bh)
195{ 85{
196 struct super_block * sb; 86 unsigned from = (char *) de - (char *) page_address(page);
197 struct buffer_head * bh_use[NAMEI_RA_SIZE]; 87 unsigned to = from + fs16_to_cpu(dir->i_sb, de->d_reclen);
198 struct buffer_head * bh_read[NAMEI_RA_SIZE]; 88 int err;
199 unsigned long offset;
200 int block, toread, i, err;
201 struct inode *dir = dentry->d_parent->d_inode;
202 const char *name = dentry->d_name.name;
203 int namelen = dentry->d_name.len;
204 89
205 UFSD(("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen)) 90 lock_page(page);
206 91 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
207 *res_bh = NULL; 92 BUG_ON(err);
208 93 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
209 sb = dir->i_sb; 94 ufs_set_de_type(dir->i_sb, de, inode->i_mode);
210 95 err = ufs_commit_chunk(page, from, to);
211 if (namelen > UFS_MAXNAMLEN) 96 ufs_put_page(page);
212 return NULL; 97 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
98 mark_inode_dirty(dir);
99}
213 100
214 memset (bh_use, 0, sizeof (bh_use));
215 toread = 0;
216 for (block = 0; block < NAMEI_RA_SIZE; ++block) {
217 struct buffer_head * bh;
218 101
219 if ((block << sb->s_blocksize_bits) >= dir->i_size) 102static void ufs_check_page(struct page *page)
220 break; 103{
221 bh = ufs_getfrag (dir, block, 0, &err); 104 struct inode *dir = page->mapping->host;
222 bh_use[block] = bh; 105 struct super_block *sb = dir->i_sb;
223 if (bh && !buffer_uptodate(bh)) 106 char *kaddr = page_address(page);
224 bh_read[toread++] = bh; 107 unsigned offs, rec_len;
108 unsigned limit = PAGE_CACHE_SIZE;
109 struct ufs_dir_entry *p;
110 char *error;
111
112 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
113 limit = dir->i_size & ~PAGE_CACHE_MASK;
114 if (limit & (UFS_SECTOR_SIZE - 1))
115 goto Ebadsize;
116 if (!limit)
117 goto out;
225 } 118 }
119 for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) {
120 p = (struct ufs_dir_entry *)(kaddr + offs);
121 rec_len = fs16_to_cpu(sb, p->d_reclen);
122
123 if (rec_len < UFS_DIR_REC_LEN(1))
124 goto Eshort;
125 if (rec_len & 3)
126 goto Ealign;
127 if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p)))
128 goto Enamelen;
129 if (((offs + rec_len - 1) ^ offs) & ~(UFS_SECTOR_SIZE-1))
130 goto Espan;
131 if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
132 UFS_SB(sb)->s_uspi->s_ncg))
133 goto Einumber;
134 }
135 if (offs != limit)
136 goto Eend;
137out:
138 SetPageChecked(page);
139 return;
140
141 /* Too bad, we had an error */
142
143Ebadsize:
144 ufs_error(sb, "ufs_check_page",
145 "size of directory #%lu is not a multiple of chunk size",
146 dir->i_ino
147 );
148 goto fail;
149Eshort:
150 error = "rec_len is smaller than minimal";
151 goto bad_entry;
152Ealign:
153 error = "unaligned directory entry";
154 goto bad_entry;
155Enamelen:
156 error = "rec_len is too small for name_len";
157 goto bad_entry;
158Espan:
159 error = "directory entry across blocks";
160 goto bad_entry;
161Einumber:
162 error = "inode out of bounds";
163bad_entry:
164 ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - "
165 "offset=%lu, rec_len=%d, name_len=%d",
166 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
167 rec_len, ufs_get_de_namlen(sb, p));
168 goto fail;
169Eend:
170 p = (struct ufs_dir_entry *)(kaddr + offs);
171 ufs_error (sb, "ext2_check_page",
172 "entry in directory #%lu spans the page boundary"
173 "offset=%lu",
174 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
175fail:
176 SetPageChecked(page);
177 SetPageError(page);
178}
226 179
227 for (block = 0, offset = 0; offset < dir->i_size; block++) { 180static struct page *ufs_get_page(struct inode *dir, unsigned long n)
228 struct buffer_head * bh; 181{
229 struct ufs_dir_entry * de; 182 struct address_space *mapping = dir->i_mapping;
230 char * dlimit; 183 struct page *page = read_cache_page(mapping, n,
231 184 (filler_t*)mapping->a_ops->readpage, NULL);
232 if ((block % NAMEI_RA_BLOCKS) == 0 && toread) { 185 if (!IS_ERR(page)) {
233 ll_rw_block (READ, toread, bh_read); 186 wait_on_page_locked(page);
234 toread = 0; 187 kmap(page);
235 } 188 if (!PageUptodate(page))
236 bh = bh_use[block % NAMEI_RA_SIZE]; 189 goto fail;
237 if (!bh) { 190 if (!PageChecked(page))
238 ufs_error (sb, "ufs_find_entry", 191 ufs_check_page(page);
239 "directory #%lu contains a hole at offset %lu", 192 if (PageError(page))
240 dir->i_ino, offset); 193 goto fail;
241 offset += sb->s_blocksize;
242 continue;
243 }
244 wait_on_buffer (bh);
245 if (!buffer_uptodate(bh)) {
246 /*
247 * read error: all bets are off
248 */
249 break;
250 }
251
252 de = (struct ufs_dir_entry *) bh->b_data;
253 dlimit = bh->b_data + sb->s_blocksize;
254 while ((char *) de < dlimit && offset < dir->i_size) {
255 /* this code is executed quadratically often */
256 /* do minimal checking by hand */
257 int de_len;
258
259 if ((char *) de + namelen <= dlimit &&
260 ufs_match(sb, namelen, name, de)) {
261 /* found a match -
262 just to be sure, do a full check */
263 if (!ufs_check_dir_entry("ufs_find_entry",
264 dir, de, bh, offset))
265 goto failed;
266 for (i = 0; i < NAMEI_RA_SIZE; ++i) {
267 if (bh_use[i] != bh)
268 brelse (bh_use[i]);
269 }
270 *res_bh = bh;
271 return de;
272 }
273 /* prevent looping on a bad block */
274 de_len = fs16_to_cpu(sb, de->d_reclen);
275 if (de_len <= 0)
276 goto failed;
277 offset += de_len;
278 de = (struct ufs_dir_entry *) ((char *) de + de_len);
279 }
280
281 brelse (bh);
282 if (((block + NAMEI_RA_SIZE) << sb->s_blocksize_bits ) >=
283 dir->i_size)
284 bh = NULL;
285 else
286 bh = ufs_getfrag (dir, block + NAMEI_RA_SIZE, 0, &err);
287 bh_use[block % NAMEI_RA_SIZE] = bh;
288 if (bh && !buffer_uptodate(bh))
289 bh_read[toread++] = bh;
290 } 194 }
195 return page;
291 196
292failed: 197fail:
293 for (i = 0; i < NAMEI_RA_SIZE; ++i) brelse (bh_use[i]); 198 ufs_put_page(page);
294 UFSD(("EXIT\n")) 199 return ERR_PTR(-EIO);
295 return NULL;
296} 200}
297 201
298static int 202/*
299ufs_check_dir_entry (const char *function, struct inode *dir, 203 * Return the offset into page `page_nr' of the last valid
300 struct ufs_dir_entry *de, struct buffer_head *bh, 204 * byte in that page, plus one.
301 unsigned long offset) 205 */
206static unsigned
207ufs_last_byte(struct inode *inode, unsigned long page_nr)
302{ 208{
303 struct super_block *sb = dir->i_sb; 209 unsigned last_byte = inode->i_size;
304 const char *error_msg = NULL; 210
305 int rlen = fs16_to_cpu(sb, de->d_reclen); 211 last_byte -= page_nr << PAGE_CACHE_SHIFT;
306 212 if (last_byte > PAGE_CACHE_SIZE)
307 if (rlen < UFS_DIR_REC_LEN(1)) 213 last_byte = PAGE_CACHE_SIZE;
308 error_msg = "reclen is smaller than minimal"; 214 return last_byte;
309 else if (rlen % 4 != 0)
310 error_msg = "reclen % 4 != 0";
311 else if (rlen < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)))
312 error_msg = "reclen is too small for namlen";
313 else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
314 error_msg = "directory entry across blocks";
315 else if (fs32_to_cpu(sb, de->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg *
316 UFS_SB(sb)->s_uspi->s_ncg))
317 error_msg = "inode out of bounds";
318
319 if (error_msg != NULL)
320 ufs_error (sb, function, "bad entry in directory #%lu, size %Lu: %s - "
321 "offset=%lu, inode=%lu, reclen=%d, namlen=%d",
322 dir->i_ino, dir->i_size, error_msg, offset,
323 (unsigned long)fs32_to_cpu(sb, de->d_ino),
324 rlen, ufs_get_de_namlen(sb, de));
325
326 return (error_msg == NULL ? 1 : 0);
327} 215}
328 216
329struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct buffer_head **p) 217static inline struct ufs_dir_entry *
218ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p)
330{ 219{
331 int err; 220 return (struct ufs_dir_entry *)((char *)p +
332 struct buffer_head *bh = ufs_bread (dir, 0, 0, &err); 221 fs16_to_cpu(sb, p->d_reclen));
333 struct ufs_dir_entry *res = NULL;
334
335 if (bh) {
336 res = (struct ufs_dir_entry *) bh->b_data;
337 res = (struct ufs_dir_entry *)((char *)res +
338 fs16_to_cpu(dir->i_sb, res->d_reclen));
339 }
340 *p = bh;
341 return res;
342} 222}
343ino_t ufs_inode_by_name(struct inode * dir, struct dentry *dentry) 223
224struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
344{ 225{
345 ino_t res = 0; 226 struct page *page = ufs_get_page(dir, 0);
346 struct ufs_dir_entry * de; 227 struct ufs_dir_entry *de = NULL;
347 struct buffer_head *bh;
348 228
349 de = ufs_find_entry (dentry, &bh); 229 if (!IS_ERR(page)) {
350 if (de) { 230 de = ufs_next_entry(dir->i_sb,
351 res = fs32_to_cpu(dir->i_sb, de->d_ino); 231 (struct ufs_dir_entry *)page_address(page));
352 brelse(bh); 232 *p = page;
353 } 233 }
354 return res; 234 return de;
355} 235}
356 236
357void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, 237/*
358 struct buffer_head *bh, struct inode *inode) 238 * ufs_find_entry()
239 *
240 * finds an entry in the specified directory with the wanted name. It
241 * returns the page in which the entry was found, and the entry itself
242 * (as a parameter - res_dir). Page is returned mapped and unlocked.
243 * Entry is guaranteed to be valid.
244 */
245struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
246 struct page **res_page)
359{ 247{
360 dir->i_version++; 248 struct super_block *sb = dir->i_sb;
361 de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); 249 const char *name = dentry->d_name.name;
362 mark_buffer_dirty(bh); 250 int namelen = dentry->d_name.len;
363 if (IS_DIRSYNC(dir)) 251 unsigned reclen = UFS_DIR_REC_LEN(namelen);
364 sync_dirty_buffer(bh); 252 unsigned long start, n;
365 brelse (bh); 253 unsigned long npages = ufs_dir_pages(dir);
254 struct page *page = NULL;
255 struct ufs_inode_info *ui = UFS_I(dir);
256 struct ufs_dir_entry *de;
257
258 UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen);
259
260 if (npages == 0 || namelen > UFS_MAXNAMLEN)
261 goto out;
262
263 /* OFFSET_CACHE */
264 *res_page = NULL;
265
266 start = ui->i_dir_start_lookup;
267
268 if (start >= npages)
269 start = 0;
270 n = start;
271 do {
272 char *kaddr;
273 page = ufs_get_page(dir, n);
274 if (!IS_ERR(page)) {
275 kaddr = page_address(page);
276 de = (struct ufs_dir_entry *) kaddr;
277 kaddr += ufs_last_byte(dir, n) - reclen;
278 while ((char *) de <= kaddr) {
279 if (de->d_reclen == 0) {
280 ufs_error(dir->i_sb, __FUNCTION__,
281 "zero-length directory entry");
282 ufs_put_page(page);
283 goto out;
284 }
285 if (ufs_match(sb, namelen, name, de))
286 goto found;
287 de = ufs_next_entry(sb, de);
288 }
289 ufs_put_page(page);
290 }
291 if (++n >= npages)
292 n = 0;
293 } while (n != start);
294out:
295 return NULL;
296
297found:
298 *res_page = page;
299 ui->i_dir_start_lookup = n;
300 return de;
366} 301}
367 302
368/* 303/*
369 * ufs_add_entry() 304 * Parent is locked.
370 *
371 * adds a file entry to the specified directory, using the same
372 * semantics as ufs_find_entry(). It returns NULL if it failed.
373 */ 305 */
374int ufs_add_link(struct dentry *dentry, struct inode *inode) 306int ufs_add_link(struct dentry *dentry, struct inode *inode)
375{ 307{
376 struct super_block * sb;
377 struct ufs_sb_private_info * uspi;
378 unsigned long offset;
379 unsigned fragoff;
380 unsigned short rec_len;
381 struct buffer_head * bh;
382 struct ufs_dir_entry * de, * de1;
383 struct inode *dir = dentry->d_parent->d_inode; 308 struct inode *dir = dentry->d_parent->d_inode;
384 const char *name = dentry->d_name.name; 309 const char *name = dentry->d_name.name;
385 int namelen = dentry->d_name.len; 310 int namelen = dentry->d_name.len;
311 struct super_block *sb = dir->i_sb;
312 unsigned reclen = UFS_DIR_REC_LEN(namelen);
313 unsigned short rec_len, name_len;
314 struct page *page = NULL;
315 struct ufs_dir_entry *de;
316 unsigned long npages = ufs_dir_pages(dir);
317 unsigned long n;
318 char *kaddr;
319 unsigned from, to;
386 int err; 320 int err;
387 321
388 UFSD(("ENTER, name %s, namelen %u\n", name, namelen)) 322 UFSD("ENTER, name %s, namelen %u\n", name, namelen);
389 323
390 sb = dir->i_sb; 324 /*
391 uspi = UFS_SB(sb)->s_uspi; 325 * We take care of directory expansion in the same loop.
392 326 * This code plays outside i_size, so it locks the page
393 if (!namelen) 327 * to protect that region.
394 return -EINVAL; 328 */
395 bh = ufs_bread (dir, 0, 0, &err); 329 for (n = 0; n <= npages; n++) {
396 if (!bh) 330 char *dir_end;
397 return err; 331
398 rec_len = UFS_DIR_REC_LEN(namelen); 332 page = ufs_get_page(dir, n);
399 offset = 0; 333 err = PTR_ERR(page);
400 de = (struct ufs_dir_entry *) bh->b_data; 334 if (IS_ERR(page))
401 while (1) { 335 goto out;
402 if ((char *)de >= UFS_SECTOR_SIZE + bh->b_data) { 336 lock_page(page);
403 fragoff = offset & ~uspi->s_fmask; 337 kaddr = page_address(page);
404 if (fragoff != 0 && fragoff != UFS_SECTOR_SIZE) 338 dir_end = kaddr + ufs_last_byte(dir, n);
405 ufs_error (sb, "ufs_add_entry", "internal error" 339 de = (struct ufs_dir_entry *)kaddr;
406 " fragoff %u", fragoff); 340 kaddr += PAGE_CACHE_SIZE - reclen;
407 if (!fragoff) { 341 while ((char *)de <= kaddr) {
408 brelse (bh); 342 if ((char *)de == dir_end) {
409 bh = ufs_bread (dir, offset >> sb->s_blocksize_bits, 1, &err); 343 /* We hit i_size */
410 if (!bh) 344 name_len = 0;
411 return err; 345 rec_len = UFS_SECTOR_SIZE;
412 }
413 if (dir->i_size <= offset) {
414 if (dir->i_size == 0) {
415 brelse(bh);
416 return -ENOENT;
417 }
418 de = (struct ufs_dir_entry *) (bh->b_data + fragoff);
419 de->d_ino = 0;
420 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE); 346 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE);
421 ufs_set_de_namlen(sb, de, 0); 347 de->d_ino = 0;
422 dir->i_size = offset + UFS_SECTOR_SIZE; 348 goto got_it;
423 mark_inode_dirty(dir);
424 } else {
425 de = (struct ufs_dir_entry *) bh->b_data;
426 } 349 }
350 if (de->d_reclen == 0) {
351 ufs_error(dir->i_sb, __FUNCTION__,
352 "zero-length directory entry");
353 err = -EIO;
354 goto out_unlock;
355 }
356 err = -EEXIST;
357 if (ufs_match(sb, namelen, name, de))
358 goto out_unlock;
359 name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de));
360 rec_len = fs16_to_cpu(sb, de->d_reclen);
361 if (!de->d_ino && rec_len >= reclen)
362 goto got_it;
363 if (rec_len >= name_len + reclen)
364 goto got_it;
365 de = (struct ufs_dir_entry *) ((char *) de + rec_len);
427 } 366 }
428 if (!ufs_check_dir_entry ("ufs_add_entry", dir, de, bh, offset)) { 367 unlock_page(page);
429 brelse (bh); 368 ufs_put_page(page);
430 return -ENOENT;
431 }
432 if (ufs_match(sb, namelen, name, de)) {
433 brelse (bh);
434 return -EEXIST;
435 }
436 if (de->d_ino == 0 && fs16_to_cpu(sb, de->d_reclen) >= rec_len)
437 break;
438
439 if (fs16_to_cpu(sb, de->d_reclen) >=
440 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)) + rec_len)
441 break;
442 offset += fs16_to_cpu(sb, de->d_reclen);
443 de = (struct ufs_dir_entry *) ((char *) de + fs16_to_cpu(sb, de->d_reclen));
444 } 369 }
445 370 BUG();
371 return -EINVAL;
372
373got_it:
374 from = (char*)de - (char*)page_address(page);
375 to = from + rec_len;
376 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
377 if (err)
378 goto out_unlock;
446 if (de->d_ino) { 379 if (de->d_ino) {
447 de1 = (struct ufs_dir_entry *) ((char *) de + 380 struct ufs_dir_entry *de1 =
448 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de))); 381 (struct ufs_dir_entry *) ((char *) de + name_len);
449 de1->d_reclen = 382 de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len);
450 cpu_to_fs16(sb, fs16_to_cpu(sb, de->d_reclen) - 383 de->d_reclen = cpu_to_fs16(sb, name_len);
451 UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de))); 384
452 de->d_reclen =
453 cpu_to_fs16(sb, UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)));
454 de = de1; 385 de = de1;
455 } 386 }
456 de->d_ino = 0; 387
457 ufs_set_de_namlen(sb, de, namelen); 388 ufs_set_de_namlen(sb, de, namelen);
458 memcpy (de->d_name, name, namelen + 1); 389 memcpy(de->d_name, name, namelen + 1);
459 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 390 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
460 ufs_set_de_type(sb, de, inode->i_mode); 391 ufs_set_de_type(sb, de, inode->i_mode);
461 mark_buffer_dirty(bh); 392
462 if (IS_DIRSYNC(dir)) 393 err = ufs_commit_chunk(page, from, to);
463 sync_dirty_buffer(bh);
464 brelse (bh);
465 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 394 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
466 dir->i_version++; 395
467 mark_inode_dirty(dir); 396 mark_inode_dirty(dir);
397 /* OFFSET_CACHE */
398out_put:
399 ufs_put_page(page);
400out:
401 return err;
402out_unlock:
403 unlock_page(page);
404 goto out_put;
405}
468 406
469 UFSD(("EXIT\n")) 407static inline unsigned
408ufs_validate_entry(struct super_block *sb, char *base,
409 unsigned offset, unsigned mask)
410{
411 struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset);
412 struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask));
413 while ((char*)p < (char*)de) {
414 if (p->d_reclen == 0)
415 break;
416 p = ufs_next_entry(sb, p);
417 }
418 return (char *)p - base;
419}
420
421
422/*
423 * This is blatantly stolen from ext2fs
424 */
425static int
426ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
427{
428 loff_t pos = filp->f_pos;
429 struct inode *inode = filp->f_dentry->d_inode;
430 struct super_block *sb = inode->i_sb;
431 unsigned int offset = pos & ~PAGE_CACHE_MASK;
432 unsigned long n = pos >> PAGE_CACHE_SHIFT;
433 unsigned long npages = ufs_dir_pages(inode);
434 unsigned chunk_mask = ~(UFS_SECTOR_SIZE - 1);
435 int need_revalidate = filp->f_version != inode->i_version;
436 unsigned flags = UFS_SB(sb)->s_flags;
437
438 UFSD("BEGIN\n");
439
440 if (pos > inode->i_size - UFS_DIR_REC_LEN(1))
441 return 0;
442
443 for ( ; n < npages; n++, offset = 0) {
444 char *kaddr, *limit;
445 struct ufs_dir_entry *de;
446
447 struct page *page = ufs_get_page(inode, n);
448
449 if (IS_ERR(page)) {
450 ufs_error(sb, __FUNCTION__,
451 "bad page in #%lu",
452 inode->i_ino);
453 filp->f_pos += PAGE_CACHE_SIZE - offset;
454 return -EIO;
455 }
456 kaddr = page_address(page);
457 if (unlikely(need_revalidate)) {
458 if (offset) {
459 offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
460 filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
461 }
462 filp->f_version = inode->i_version;
463 need_revalidate = 0;
464 }
465 de = (struct ufs_dir_entry *)(kaddr+offset);
466 limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
467 for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
468 if (de->d_reclen == 0) {
469 ufs_error(sb, __FUNCTION__,
470 "zero-length directory entry");
471 ufs_put_page(page);
472 return -EIO;
473 }
474 if (de->d_ino) {
475 int over;
476 unsigned char d_type = DT_UNKNOWN;
477
478 offset = (char *)de - kaddr;
479
480 UFSD("filldir(%s,%u)\n", de->d_name,
481 fs32_to_cpu(sb, de->d_ino));
482 UFSD("namlen %u\n", ufs_get_de_namlen(sb, de));
483
484 if ((flags & UFS_DE_MASK) == UFS_DE_44BSD)
485 d_type = de->d_u.d_44.d_type;
486
487 over = filldir(dirent, de->d_name,
488 ufs_get_de_namlen(sb, de),
489 (n<<PAGE_CACHE_SHIFT) | offset,
490 fs32_to_cpu(sb, de->d_ino), d_type);
491 if (over) {
492 ufs_put_page(page);
493 return 0;
494 }
495 }
496 filp->f_pos += fs16_to_cpu(sb, de->d_reclen);
497 }
498 ufs_put_page(page);
499 }
470 return 0; 500 return 0;
471} 501}
472 502
503
473/* 504/*
474 * ufs_delete_entry deletes a directory entry by merging it with the 505 * ufs_delete_entry deletes a directory entry by merging it with the
475 * previous entry. 506 * previous entry.
476 */ 507 */
477int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir, 508int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
478 struct buffer_head * bh ) 509 struct page * page)
479
480{ 510{
481 struct super_block * sb; 511 struct super_block *sb = inode->i_sb;
482 struct ufs_dir_entry * de, * pde; 512 struct address_space *mapping = page->mapping;
483 unsigned i; 513 char *kaddr = page_address(page);
484 514 unsigned from = ((char*)dir - kaddr) & ~(UFS_SECTOR_SIZE - 1);
485 UFSD(("ENTER\n")) 515 unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
516 struct ufs_dir_entry *pde = NULL;
517 struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from);
518 int err;
486 519
487 sb = inode->i_sb; 520 UFSD("ENTER\n");
488 i = 0; 521
489 pde = NULL; 522 UFSD("ino %u, reclen %u, namlen %u, name %s\n",
490 de = (struct ufs_dir_entry *) bh->b_data; 523 fs32_to_cpu(sb, de->d_ino),
491 524 fs16_to_cpu(sb, de->d_reclen),
492 UFSD(("ino %u, reclen %u, namlen %u, name %s\n", 525 ufs_get_de_namlen(sb, de), de->d_name);
493 fs32_to_cpu(sb, de->d_ino), 526
494 fs16_to_cpu(sb, de->d_reclen), 527 while ((char*)de < (char*)dir) {
495 ufs_get_de_namlen(sb, de), de->d_name)) 528 if (de->d_reclen == 0) {
496 529 ufs_error(inode->i_sb, __FUNCTION__,
497 while (i < bh->b_size) { 530 "zero-length directory entry");
498 if (!ufs_check_dir_entry ("ufs_delete_entry", inode, de, bh, i)) { 531 err = -EIO;
499 brelse(bh); 532 goto out;
500 return -EIO;
501 }
502 if (de == dir) {
503 if (pde)
504 fs16_add(sb, &pde->d_reclen,
505 fs16_to_cpu(sb, dir->d_reclen));
506 dir->d_ino = 0;
507 inode->i_version++;
508 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
509 mark_inode_dirty(inode);
510 mark_buffer_dirty(bh);
511 if (IS_DIRSYNC(inode))
512 sync_dirty_buffer(bh);
513 brelse(bh);
514 UFSD(("EXIT\n"))
515 return 0;
516 } 533 }
517 i += fs16_to_cpu(sb, de->d_reclen); 534 pde = de;
518 if (i == UFS_SECTOR_SIZE) pde = NULL; 535 de = ufs_next_entry(sb, de);
519 else pde = de;
520 de = (struct ufs_dir_entry *)
521 ((char *) de + fs16_to_cpu(sb, de->d_reclen));
522 if (i == UFS_SECTOR_SIZE && de->d_reclen == 0)
523 break;
524 } 536 }
525 UFSD(("EXIT\n")) 537 if (pde)
526 brelse(bh); 538 from = (char*)pde - (char*)page_address(page);
527 return -ENOENT; 539 lock_page(page);
540 err = mapping->a_ops->prepare_write(NULL, page, from, to);
541 BUG_ON(err);
542 if (pde)
543 pde->d_reclen = cpu_to_fs16(sb, to-from);
544 dir->d_ino = 0;
545 err = ufs_commit_chunk(page, from, to);
546 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
547 mark_inode_dirty(inode);
548out:
549 ufs_put_page(page);
550 UFSD("EXIT\n");
551 return err;
528} 552}
529 553
530int ufs_make_empty(struct inode * inode, struct inode *dir) 554int ufs_make_empty(struct inode * inode, struct inode *dir)
531{ 555{
532 struct super_block * sb = dir->i_sb; 556 struct super_block * sb = dir->i_sb;
533 struct buffer_head * dir_block; 557 struct address_space *mapping = inode->i_mapping;
558 struct page *page = grab_cache_page(mapping, 0);
534 struct ufs_dir_entry * de; 559 struct ufs_dir_entry * de;
560 char *base;
535 int err; 561 int err;
536 562
537 dir_block = ufs_bread (inode, 0, 1, &err); 563 if (!page)
538 if (!dir_block) 564 return -ENOMEM;
539 return err; 565 kmap(page);
566 err = mapping->a_ops->prepare_write(NULL, page, 0, UFS_SECTOR_SIZE);
567 if (err) {
568 unlock_page(page);
569 goto fail;
570 }
571
572
573 base = (char*)page_address(page);
574 memset(base, 0, PAGE_CACHE_SIZE);
575
576 de = (struct ufs_dir_entry *) base;
540 577
541 inode->i_blocks = sb->s_blocksize / UFS_SECTOR_SIZE;
542 de = (struct ufs_dir_entry *) dir_block->b_data;
543 de->d_ino = cpu_to_fs32(sb, inode->i_ino); 578 de->d_ino = cpu_to_fs32(sb, inode->i_ino);
544 ufs_set_de_type(sb, de, inode->i_mode); 579 ufs_set_de_type(sb, de, inode->i_mode);
545 ufs_set_de_namlen(sb, de, 1); 580 ufs_set_de_namlen(sb, de, 1);
@@ -552,72 +587,65 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
552 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1)); 587 de->d_reclen = cpu_to_fs16(sb, UFS_SECTOR_SIZE - UFS_DIR_REC_LEN(1));
553 ufs_set_de_namlen(sb, de, 2); 588 ufs_set_de_namlen(sb, de, 2);
554 strcpy (de->d_name, ".."); 589 strcpy (de->d_name, "..");
555 mark_buffer_dirty(dir_block); 590
556 brelse (dir_block); 591 err = ufs_commit_chunk(page, 0, UFS_SECTOR_SIZE);
557 mark_inode_dirty(inode); 592fail:
558 return 0; 593 kunmap(page);
594 page_cache_release(page);
595 return err;
559} 596}
560 597
561/* 598/*
562 * routine to check that the specified directory is empty (for rmdir) 599 * routine to check that the specified directory is empty (for rmdir)
563 */ 600 */
564int ufs_empty_dir (struct inode * inode) 601int ufs_empty_dir(struct inode * inode)
565{ 602{
566 struct super_block * sb; 603 struct super_block *sb = inode->i_sb;
567 unsigned long offset; 604 struct page *page = NULL;
568 struct buffer_head * bh; 605 unsigned long i, npages = ufs_dir_pages(inode);
569 struct ufs_dir_entry * de, * de1; 606
570 int err; 607 for (i = 0; i < npages; i++) {
571 608 char *kaddr;
572 sb = inode->i_sb; 609 struct ufs_dir_entry *de;
573 610 page = ufs_get_page(inode, i);
574 if (inode->i_size < UFS_DIR_REC_LEN(1) + UFS_DIR_REC_LEN(2) || 611
575 !(bh = ufs_bread (inode, 0, 0, &err))) { 612 if (IS_ERR(page))
576 ufs_warning (inode->i_sb, "empty_dir", 613 continue;
577 "bad directory (dir #%lu) - no data block", 614
578 inode->i_ino); 615 kaddr = page_address(page);
579 return 1; 616 de = (struct ufs_dir_entry *)kaddr;
580 } 617 kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1);
581 de = (struct ufs_dir_entry *) bh->b_data; 618
582 de1 = (struct ufs_dir_entry *) 619 while ((char *)de <= kaddr) {
583 ((char *)de + fs16_to_cpu(sb, de->d_reclen)); 620 if (de->d_reclen == 0) {
584 if (fs32_to_cpu(sb, de->d_ino) != inode->i_ino || de1->d_ino == 0 || 621 ufs_error(inode->i_sb, __FUNCTION__,
585 strcmp (".", de->d_name) || strcmp ("..", de1->d_name)) { 622 "zero-length directory entry: "
586 ufs_warning (inode->i_sb, "empty_dir", 623 "kaddr=%p, de=%p\n", kaddr, de);
587 "bad directory (dir #%lu) - no `.' or `..'", 624 goto not_empty;
588 inode->i_ino);
589 return 1;
590 }
591 offset = fs16_to_cpu(sb, de->d_reclen) + fs16_to_cpu(sb, de1->d_reclen);
592 de = (struct ufs_dir_entry *)
593 ((char *)de1 + fs16_to_cpu(sb, de1->d_reclen));
594 while (offset < inode->i_size ) {
595 if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
596 brelse (bh);
597 bh = ufs_bread (inode, offset >> sb->s_blocksize_bits, 1, &err);
598 if (!bh) {
599 ufs_error (sb, "empty_dir",
600 "directory #%lu contains a hole at offset %lu",
601 inode->i_ino, offset);
602 offset += sb->s_blocksize;
603 continue;
604 } 625 }
605 de = (struct ufs_dir_entry *) bh->b_data; 626 if (de->d_ino) {
606 } 627 u16 namelen=ufs_get_de_namlen(sb, de);
607 if (!ufs_check_dir_entry ("empty_dir", inode, de, bh, offset)) { 628 /* check for . and .. */
608 brelse (bh); 629 if (de->d_name[0] != '.')
609 return 1; 630 goto not_empty;
610 } 631 if (namelen > 2)
611 if (de->d_ino) { 632 goto not_empty;
612 brelse (bh); 633 if (namelen < 2) {
613 return 0; 634 if (inode->i_ino !=
635 fs32_to_cpu(sb, de->d_ino))
636 goto not_empty;
637 } else if (de->d_name[1] != '.')
638 goto not_empty;
639 }
640 de = ufs_next_entry(sb, de);
614 } 641 }
615 offset += fs16_to_cpu(sb, de->d_reclen); 642 ufs_put_page(page);
616 de = (struct ufs_dir_entry *)
617 ((char *)de + fs16_to_cpu(sb, de->d_reclen));
618 } 643 }
619 brelse (bh);
620 return 1; 644 return 1;
645
646not_empty:
647 ufs_put_page(page);
648 return 0;
621} 649}
622 650
623const struct file_operations ufs_dir_operations = { 651const struct file_operations ufs_dir_operations = {
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index 312fd3f86313..0e5001512a9d 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -25,6 +25,26 @@
25 25
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/ufs_fs.h> 27#include <linux/ufs_fs.h>
28#include <linux/buffer_head.h> /* for sync_mapping_buffers() */
29
30static int ufs_sync_file(struct file *file, struct dentry *dentry, int datasync)
31{
32 struct inode *inode = dentry->d_inode;
33 int err;
34 int ret;
35
36 ret = sync_mapping_buffers(inode->i_mapping);
37 if (!(inode->i_state & I_DIRTY))
38 return ret;
39 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
40 return ret;
41
42 err = ufs_sync_inode(inode);
43 if (ret == 0)
44 ret = err;
45 return ret;
46}
47
28 48
29/* 49/*
30 * We have mostly NULL's here: the current defaults are ok for 50 * We have mostly NULL's here: the current defaults are ok for
@@ -37,6 +57,7 @@ const struct file_operations ufs_file_operations = {
37 .write = generic_file_write, 57 .write = generic_file_write,
38 .mmap = generic_file_mmap, 58 .mmap = generic_file_mmap,
39 .open = generic_file_open, 59 .open = generic_file_open,
60 .fsync = ufs_sync_file,
40 .sendfile = generic_file_sendfile, 61 .sendfile = generic_file_sendfile,
41}; 62};
42 63
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index c7a47ed4f430..9501dcd3b213 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -34,14 +34,6 @@
34#include "swab.h" 34#include "swab.h"
35#include "util.h" 35#include "util.h"
36 36
37#undef UFS_IALLOC_DEBUG
38
39#ifdef UFS_IALLOC_DEBUG
40#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
41#else
42#define UFSD(x)
43#endif
44
45/* 37/*
46 * NOTE! When we get the inode, we're the only people 38 * NOTE! When we get the inode, we're the only people
47 * that have access to it, and as such there are no 39 * that have access to it, and as such there are no
@@ -68,7 +60,7 @@ void ufs_free_inode (struct inode * inode)
68 int is_directory; 60 int is_directory;
69 unsigned ino, cg, bit; 61 unsigned ino, cg, bit;
70 62
71 UFSD(("ENTER, ino %lu\n", inode->i_ino)) 63 UFSD("ENTER, ino %lu\n", inode->i_ino);
72 64
73 sb = inode->i_sb; 65 sb = inode->i_sb;
74 uspi = UFS_SB(sb)->s_uspi; 66 uspi = UFS_SB(sb)->s_uspi;
@@ -91,7 +83,7 @@ void ufs_free_inode (struct inode * inode)
91 unlock_super (sb); 83 unlock_super (sb);
92 return; 84 return;
93 } 85 }
94 ucg = ubh_get_ucg(UCPI_UBH); 86 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
95 if (!ufs_cg_chkmagic(sb, ucg)) 87 if (!ufs_cg_chkmagic(sb, ucg))
96 ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number"); 88 ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");
97 89
@@ -104,33 +96,33 @@ void ufs_free_inode (struct inode * inode)
104 96
105 clear_inode (inode); 97 clear_inode (inode);
106 98
107 if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit)) 99 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
108 ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino); 100 ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
109 else { 101 else {
110 ubh_clrbit (UCPI_UBH, ucpi->c_iusedoff, bit); 102 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
111 if (ino < ucpi->c_irotor) 103 if (ino < ucpi->c_irotor)
112 ucpi->c_irotor = ino; 104 ucpi->c_irotor = ino;
113 fs32_add(sb, &ucg->cg_cs.cs_nifree, 1); 105 fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
114 fs32_add(sb, &usb1->fs_cstotal.cs_nifree, 1); 106 uspi->cs_total.cs_nifree++;
115 fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1); 107 fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);
116 108
117 if (is_directory) { 109 if (is_directory) {
118 fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1); 110 fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
119 fs32_sub(sb, &usb1->fs_cstotal.cs_ndir, 1); 111 uspi->cs_total.cs_ndir--;
120 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1); 112 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
121 } 113 }
122 } 114 }
123 115
124 ubh_mark_buffer_dirty (USPI_UBH); 116 ubh_mark_buffer_dirty (USPI_UBH(uspi));
125 ubh_mark_buffer_dirty (UCPI_UBH); 117 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
126 if (sb->s_flags & MS_SYNCHRONOUS) { 118 if (sb->s_flags & MS_SYNCHRONOUS) {
127 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **) &ucpi); 119 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
128 ubh_wait_on_buffer (UCPI_UBH); 120 ubh_wait_on_buffer (UCPI_UBH(ucpi));
129 } 121 }
130 122
131 sb->s_dirt = 1; 123 sb->s_dirt = 1;
132 unlock_super (sb); 124 unlock_super (sb);
133 UFSD(("EXIT\n")) 125 UFSD("EXIT\n");
134} 126}
135 127
136/* 128/*
@@ -155,7 +147,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
155 unsigned cg, bit, i, j, start; 147 unsigned cg, bit, i, j, start;
156 struct ufs_inode_info *ufsi; 148 struct ufs_inode_info *ufsi;
157 149
158 UFSD(("ENTER\n")) 150 UFSD("ENTER\n");
159 151
160 /* Cannot create files in a deleted directory */ 152 /* Cannot create files in a deleted directory */
161 if (!dir || !dir->i_nlink) 153 if (!dir || !dir->i_nlink)
@@ -213,43 +205,43 @@ cg_found:
213 ucpi = ufs_load_cylinder (sb, cg); 205 ucpi = ufs_load_cylinder (sb, cg);
214 if (!ucpi) 206 if (!ucpi)
215 goto failed; 207 goto failed;
216 ucg = ubh_get_ucg(UCPI_UBH); 208 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
217 if (!ufs_cg_chkmagic(sb, ucg)) 209 if (!ufs_cg_chkmagic(sb, ucg))
218 ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number"); 210 ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");
219 211
220 start = ucpi->c_irotor; 212 start = ucpi->c_irotor;
221 bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start); 213 bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start);
222 if (!(bit < uspi->s_ipg)) { 214 if (!(bit < uspi->s_ipg)) {
223 bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start); 215 bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start);
224 if (!(bit < start)) { 216 if (!(bit < start)) {
225 ufs_error (sb, "ufs_new_inode", 217 ufs_error (sb, "ufs_new_inode",
226 "cylinder group %u corrupted - error in inode bitmap\n", cg); 218 "cylinder group %u corrupted - error in inode bitmap\n", cg);
227 goto failed; 219 goto failed;
228 } 220 }
229 } 221 }
230 UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg)) 222 UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg);
231 if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit)) 223 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
232 ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit); 224 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
233 else { 225 else {
234 ufs_panic (sb, "ufs_new_inode", "internal error"); 226 ufs_panic (sb, "ufs_new_inode", "internal error");
235 goto failed; 227 goto failed;
236 } 228 }
237 229
238 fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); 230 fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
239 fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1); 231 uspi->cs_total.cs_nifree--;
240 fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); 232 fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
241 233
242 if (S_ISDIR(mode)) { 234 if (S_ISDIR(mode)) {
243 fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); 235 fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
244 fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1); 236 uspi->cs_total.cs_ndir++;
245 fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); 237 fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
246 } 238 }
247 239
248 ubh_mark_buffer_dirty (USPI_UBH); 240 ubh_mark_buffer_dirty (USPI_UBH(uspi));
249 ubh_mark_buffer_dirty (UCPI_UBH); 241 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
250 if (sb->s_flags & MS_SYNCHRONOUS) { 242 if (sb->s_flags & MS_SYNCHRONOUS) {
251 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **) &ucpi); 243 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
252 ubh_wait_on_buffer (UCPI_UBH); 244 ubh_wait_on_buffer (UCPI_UBH(ucpi));
253 } 245 }
254 sb->s_dirt = 1; 246 sb->s_dirt = 1;
255 247
@@ -272,6 +264,7 @@ cg_found:
272 ufsi->i_shadow = 0; 264 ufsi->i_shadow = 0;
273 ufsi->i_osync = 0; 265 ufsi->i_osync = 0;
274 ufsi->i_oeftflag = 0; 266 ufsi->i_oeftflag = 0;
267 ufsi->i_dir_start_lookup = 0;
275 memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); 268 memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
276 269
277 insert_inode_hash(inode); 270 insert_inode_hash(inode);
@@ -287,14 +280,14 @@ cg_found:
287 return ERR_PTR(-EDQUOT); 280 return ERR_PTR(-EDQUOT);
288 } 281 }
289 282
290 UFSD(("allocating inode %lu\n", inode->i_ino)) 283 UFSD("allocating inode %lu\n", inode->i_ino);
291 UFSD(("EXIT\n")) 284 UFSD("EXIT\n");
292 return inode; 285 return inode;
293 286
294failed: 287failed:
295 unlock_super (sb); 288 unlock_super (sb);
296 make_bad_inode(inode); 289 make_bad_inode(inode);
297 iput (inode); 290 iput (inode);
298 UFSD(("EXIT (FAILED)\n")) 291 UFSD("EXIT (FAILED)\n");
299 return ERR_PTR(-ENOSPC); 292 return ERR_PTR(-ENOSPC);
300} 293}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 3c3f62ce2ad9..f2dbdf5a8769 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -41,14 +41,7 @@
41#include "swab.h" 41#include "swab.h"
42#include "util.h" 42#include "util.h"
43 43
44#undef UFS_INODE_DEBUG 44static u64 ufs_frag_map(struct inode *inode, sector_t frag);
45#undef UFS_INODE_DEBUG_MORE
46
47#ifdef UFS_INODE_DEBUG
48#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
49#else
50#define UFSD(x)
51#endif
52 45
53static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4]) 46static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
54{ 47{
@@ -61,7 +54,7 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off
61 int n = 0; 54 int n = 0;
62 55
63 56
64 UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks)); 57 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
65 if (i_block < 0) { 58 if (i_block < 0) {
66 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0"); 59 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
67 } else if (i_block < direct_blocks) { 60 } else if (i_block < direct_blocks) {
@@ -89,7 +82,7 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off
89 * the begining of the filesystem. 82 * the begining of the filesystem.
90 */ 83 */
91 84
92u64 ufs_frag_map(struct inode *inode, sector_t frag) 85static u64 ufs_frag_map(struct inode *inode, sector_t frag)
93{ 86{
94 struct ufs_inode_info *ufsi = UFS_I(inode); 87 struct ufs_inode_info *ufsi = UFS_I(inode);
95 struct super_block *sb = inode->i_sb; 88 struct super_block *sb = inode->i_sb;
@@ -104,8 +97,8 @@ u64 ufs_frag_map(struct inode *inode, sector_t frag)
104 unsigned flags = UFS_SB(sb)->s_flags; 97 unsigned flags = UFS_SB(sb)->s_flags;
105 u64 temp = 0L; 98 u64 temp = 0L;
106 99
107 UFSD((": frag = %llu depth = %d\n", (unsigned long long)frag, depth)); 100 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth);
108 UFSD((": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask)); 101 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask);
109 102
110 if (depth == 0) 103 if (depth == 0)
111 return 0; 104 return 0;
@@ -161,26 +154,64 @@ out:
161 return ret; 154 return ret;
162} 155}
163 156
164static struct buffer_head * ufs_inode_getfrag (struct inode *inode, 157static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh)
165 unsigned int fragment, unsigned int new_fragment, 158{
166 unsigned int required, int *err, int metadata, long *phys, int *new) 159 lock_buffer(bh);
160 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
161 set_buffer_uptodate(bh);
162 mark_buffer_dirty(bh);
163 unlock_buffer(bh);
164 if (IS_SYNC(inode))
165 sync_dirty_buffer(bh);
166}
167
168static struct buffer_head *
169ufs_clear_frags(struct inode *inode, sector_t beg,
170 unsigned int n)
171{
172 struct buffer_head *res, *bh;
173 sector_t end = beg + n;
174
175 res = sb_getblk(inode->i_sb, beg);
176 ufs_clear_frag(inode, res);
177 for (++beg; beg < end; ++beg) {
178 bh = sb_getblk(inode->i_sb, beg);
179 ufs_clear_frag(inode, bh);
180 brelse(bh);
181 }
182 return res;
183}
184
185/**
186 * ufs_inode_getfrag() - allocate new fragment(s)
187 * @inode - pointer to inode
188 * @fragment - number of `fragment' which hold pointer
189 * to new allocated fragment(s)
190 * @new_fragment - number of new allocated fragment(s)
191 * @required - how many fragment(s) we require
192 * @err - we set it if something wrong
193 * @phys - pointer to where we save physical number of new allocated fragments,
194 * NULL if we allocate not data(indirect blocks for example).
195 * @new - we set it if we allocate new block
196 * @locked_page - for ufs_new_fragments()
197 */
198static struct buffer_head *
199ufs_inode_getfrag(struct inode *inode, unsigned int fragment,
200 sector_t new_fragment, unsigned int required, int *err,
201 long *phys, int *new, struct page *locked_page)
167{ 202{
168 struct ufs_inode_info *ufsi = UFS_I(inode); 203 struct ufs_inode_info *ufsi = UFS_I(inode);
169 struct super_block * sb; 204 struct super_block *sb = inode->i_sb;
170 struct ufs_sb_private_info * uspi; 205 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
171 struct buffer_head * result; 206 struct buffer_head * result;
172 unsigned block, blockoff, lastfrag, lastblock, lastblockoff; 207 unsigned block, blockoff, lastfrag, lastblock, lastblockoff;
173 unsigned tmp, goal; 208 unsigned tmp, goal;
174 __fs32 * p, * p2; 209 __fs32 * p, * p2;
175 unsigned flags = 0;
176 210
177 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u, required %u\n", 211 UFSD("ENTER, ino %lu, fragment %u, new_fragment %llu, required %u, "
178 inode->i_ino, fragment, new_fragment, required)) 212 "metadata %d\n", inode->i_ino, fragment,
213 (unsigned long long)new_fragment, required, !phys);
179 214
180 sb = inode->i_sb;
181 uspi = UFS_SB(sb)->s_uspi;
182
183 flags = UFS_SB(sb)->s_flags;
184 /* TODO : to be done for write support 215 /* TODO : to be done for write support
185 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 216 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
186 goto ufs2; 217 goto ufs2;
@@ -195,16 +226,16 @@ repeat:
195 tmp = fs32_to_cpu(sb, *p); 226 tmp = fs32_to_cpu(sb, *p);
196 lastfrag = ufsi->i_lastfrag; 227 lastfrag = ufsi->i_lastfrag;
197 if (tmp && fragment < lastfrag) { 228 if (tmp && fragment < lastfrag) {
198 if (metadata) { 229 if (!phys) {
199 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 230 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
200 if (tmp == fs32_to_cpu(sb, *p)) { 231 if (tmp == fs32_to_cpu(sb, *p)) {
201 UFSD(("EXIT, result %u\n", tmp + blockoff)) 232 UFSD("EXIT, result %u\n", tmp + blockoff);
202 return result; 233 return result;
203 } 234 }
204 brelse (result); 235 brelse (result);
205 goto repeat; 236 goto repeat;
206 } else { 237 } else {
207 *phys = tmp; 238 *phys = tmp + blockoff;
208 return NULL; 239 return NULL;
209 } 240 }
210 } 241 }
@@ -221,7 +252,8 @@ repeat:
221 if (lastblockoff) { 252 if (lastblockoff) {
222 p2 = ufsi->i_u1.i_data + lastblock; 253 p2 = ufsi->i_u1.i_data + lastblock;
223 tmp = ufs_new_fragments (inode, p2, lastfrag, 254 tmp = ufs_new_fragments (inode, p2, lastfrag,
224 fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err); 255 fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff,
256 err, locked_page);
225 if (!tmp) { 257 if (!tmp) {
226 if (lastfrag != ufsi->i_lastfrag) 258 if (lastfrag != ufsi->i_lastfrag)
227 goto repeat; 259 goto repeat;
@@ -233,14 +265,16 @@ repeat:
233 } 265 }
234 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; 266 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb;
235 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 267 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
236 goal, required + blockoff, err); 268 goal, required + blockoff,
269 err, locked_page);
237 } 270 }
238 /* 271 /*
239 * We will extend last allocated block 272 * We will extend last allocated block
240 */ 273 */
241 else if (lastblock == block) { 274 else if (lastblock == block) {
242 tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff), 275 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff),
243 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err); 276 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff),
277 err, locked_page);
244 } 278 }
245 /* 279 /*
246 * We will allocate new block before last allocated block 280 * We will allocate new block before last allocated block
@@ -248,8 +282,8 @@ repeat:
248 else /* (lastblock > block) */ { 282 else /* (lastblock > block) */ {
249 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) 283 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1])))
250 goal = tmp + uspi->s_fpb; 284 goal = tmp + uspi->s_fpb;
251 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 285 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
252 goal, uspi->s_fpb, err); 286 goal, uspi->s_fpb, err, locked_page);
253 } 287 }
254 if (!tmp) { 288 if (!tmp) {
255 if ((!blockoff && *p) || 289 if ((!blockoff && *p) ||
@@ -259,14 +293,10 @@ repeat:
259 return NULL; 293 return NULL;
260 } 294 }
261 295
262 /* The nullification of framgents done in ufs/balloc.c is 296 if (!phys) {
263 * something I don't have the stomache to move into here right 297 result = ufs_clear_frags(inode, tmp + blockoff, required);
264 * now. -DaveM
265 */
266 if (metadata) {
267 result = sb_getblk(inode->i_sb, tmp + blockoff);
268 } else { 298 } else {
269 *phys = tmp; 299 *phys = tmp + blockoff;
270 result = NULL; 300 result = NULL;
271 *err = 0; 301 *err = 0;
272 *new = 1; 302 *new = 1;
@@ -276,7 +306,7 @@ repeat:
276 if (IS_SYNC(inode)) 306 if (IS_SYNC(inode))
277 ufs_sync_inode (inode); 307 ufs_sync_inode (inode);
278 mark_inode_dirty(inode); 308 mark_inode_dirty(inode);
279 UFSD(("EXIT, result %u\n", tmp + blockoff)) 309 UFSD("EXIT, result %u\n", tmp + blockoff);
280 return result; 310 return result;
281 311
282 /* This part : To be implemented .... 312 /* This part : To be implemented ....
@@ -295,22 +325,35 @@ repeat2:
295 */ 325 */
296} 326}
297 327
298static struct buffer_head * ufs_block_getfrag (struct inode *inode, 328/**
299 struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment, 329 * ufs_inode_getblock() - allocate new block
300 unsigned int blocksize, int * err, int metadata, long *phys, int *new) 330 * @inode - pointer to inode
331 * @bh - pointer to block which hold "pointer" to new allocated block
332 * @fragment - number of `fragment' which hold pointer
333 * to new allocated block
334 * @new_fragment - number of new allocated fragment
335 * (block will hold this fragment and also uspi->s_fpb-1)
336 * @err - see ufs_inode_getfrag()
337 * @phys - see ufs_inode_getfrag()
338 * @new - see ufs_inode_getfrag()
339 * @locked_page - see ufs_inode_getfrag()
340 */
341static struct buffer_head *
342ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
343 unsigned int fragment, sector_t new_fragment, int *err,
344 long *phys, int *new, struct page *locked_page)
301{ 345{
302 struct super_block * sb; 346 struct super_block *sb = inode->i_sb;
303 struct ufs_sb_private_info * uspi; 347 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
304 struct buffer_head * result; 348 struct buffer_head * result;
305 unsigned tmp, goal, block, blockoff; 349 unsigned tmp, goal, block, blockoff;
306 __fs32 * p; 350 __fs32 * p;
307 351
308 sb = inode->i_sb;
309 uspi = UFS_SB(sb)->s_uspi;
310 block = ufs_fragstoblks (fragment); 352 block = ufs_fragstoblks (fragment);
311 blockoff = ufs_fragnum (fragment); 353 blockoff = ufs_fragnum (fragment);
312 354
313 UFSD(("ENTER, ino %lu, fragment %u, new_fragment %u\n", inode->i_ino, fragment, new_fragment)) 355 UFSD("ENTER, ino %lu, fragment %u, new_fragment %llu, metadata %d\n",
356 inode->i_ino, fragment, (unsigned long long)new_fragment, !phys);
314 357
315 result = NULL; 358 result = NULL;
316 if (!bh) 359 if (!bh)
@@ -326,14 +369,14 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode,
326repeat: 369repeat:
327 tmp = fs32_to_cpu(sb, *p); 370 tmp = fs32_to_cpu(sb, *p);
328 if (tmp) { 371 if (tmp) {
329 if (metadata) { 372 if (!phys) {
330 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff); 373 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
331 if (tmp == fs32_to_cpu(sb, *p)) 374 if (tmp == fs32_to_cpu(sb, *p))
332 goto out; 375 goto out;
333 brelse (result); 376 brelse (result);
334 goto repeat; 377 goto repeat;
335 } else { 378 } else {
336 *phys = tmp; 379 *phys = tmp + blockoff;
337 goto out; 380 goto out;
338 } 381 }
339 } 382 }
@@ -342,21 +385,19 @@ repeat:
342 goal = tmp + uspi->s_fpb; 385 goal = tmp + uspi->s_fpb;
343 else 386 else
344 goal = bh->b_blocknr + uspi->s_fpb; 387 goal = bh->b_blocknr + uspi->s_fpb;
345 tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err); 388 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
389 uspi->s_fpb, err, locked_page);
346 if (!tmp) { 390 if (!tmp) {
347 if (fs32_to_cpu(sb, *p)) 391 if (fs32_to_cpu(sb, *p))
348 goto repeat; 392 goto repeat;
349 goto out; 393 goto out;
350 } 394 }
351 395
352 /* The nullification of framgents done in ufs/balloc.c is 396
353 * something I don't have the stomache to move into here right 397 if (!phys) {
354 * now. -DaveM 398 result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb);
355 */
356 if (metadata) {
357 result = sb_getblk(sb, tmp + blockoff);
358 } else { 399 } else {
359 *phys = tmp; 400 *phys = tmp + blockoff;
360 *new = 1; 401 *new = 1;
361 } 402 }
362 403
@@ -365,18 +406,19 @@ repeat:
365 sync_dirty_buffer(bh); 406 sync_dirty_buffer(bh);
366 inode->i_ctime = CURRENT_TIME_SEC; 407 inode->i_ctime = CURRENT_TIME_SEC;
367 mark_inode_dirty(inode); 408 mark_inode_dirty(inode);
368 UFSD(("result %u\n", tmp + blockoff)); 409 UFSD("result %u\n", tmp + blockoff);
369out: 410out:
370 brelse (bh); 411 brelse (bh);
371 UFSD(("EXIT\n")); 412 UFSD("EXIT\n");
372 return result; 413 return result;
373} 414}
374 415
375/* 416/**
376 * This function gets the block which contains the fragment. 417 * ufs_getfrag_bloc() - `get_block_t' function, interface between UFS and
418 * readpage, writepage and so on
377 */ 419 */
378 420
379int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 421int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
380{ 422{
381 struct super_block * sb = inode->i_sb; 423 struct super_block * sb = inode->i_sb;
382 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi; 424 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
@@ -387,7 +429,7 @@ int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_hea
387 429
388 if (!create) { 430 if (!create) {
389 phys64 = ufs_frag_map(inode, fragment); 431 phys64 = ufs_frag_map(inode, fragment);
390 UFSD(("phys64 = %llu \n",phys64)); 432 UFSD("phys64 = %llu \n",phys64);
391 if (phys64) 433 if (phys64)
392 map_bh(bh_result, sb, phys64); 434 map_bh(bh_result, sb, phys64);
393 return 0; 435 return 0;
@@ -402,7 +444,7 @@ int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_hea
402 444
403 lock_kernel(); 445 lock_kernel();
404 446
405 UFSD(("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment)) 447 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
406 if (fragment < 0) 448 if (fragment < 0)
407 goto abort_negative; 449 goto abort_negative;
408 if (fragment > 450 if (fragment >
@@ -418,15 +460,15 @@ int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_hea
418 * it much more readable: 460 * it much more readable:
419 */ 461 */
420#define GET_INODE_DATABLOCK(x) \ 462#define GET_INODE_DATABLOCK(x) \
421 ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new) 463 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new, bh_result->b_page)
422#define GET_INODE_PTR(x) \ 464#define GET_INODE_PTR(x) \
423 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL) 465 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, bh_result->b_page)
424#define GET_INDIRECT_DATABLOCK(x) \ 466#define GET_INDIRECT_DATABLOCK(x) \
425 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ 467 ufs_inode_getblock(inode, bh, x, fragment, \
426 &err, 0, &phys, &new); 468 &err, &phys, &new, bh_result->b_page);
427#define GET_INDIRECT_PTR(x) \ 469#define GET_INDIRECT_PTR(x) \
428 ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ 470 ufs_inode_getblock(inode, bh, x, fragment, \
429 &err, 1, NULL, NULL); 471 &err, NULL, NULL, bh_result->b_page);
430 472
431 if (ptr < UFS_NDIR_FRAGMENT) { 473 if (ptr < UFS_NDIR_FRAGMENT) {
432 bh = GET_INODE_DATABLOCK(ptr); 474 bh = GET_INODE_DATABLOCK(ptr);
@@ -474,8 +516,9 @@ abort_too_big:
474 goto abort; 516 goto abort;
475} 517}
476 518
477struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment, 519static struct buffer_head *ufs_getfrag(struct inode *inode,
478 int create, int *err) 520 unsigned int fragment,
521 int create, int *err)
479{ 522{
480 struct buffer_head dummy; 523 struct buffer_head dummy;
481 int error; 524 int error;
@@ -502,7 +545,7 @@ struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment,
502{ 545{
503 struct buffer_head * bh; 546 struct buffer_head * bh;
504 547
505 UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment)) 548 UFSD("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment);
506 bh = ufs_getfrag (inode, fragment, create, err); 549 bh = ufs_getfrag (inode, fragment, create, err);
507 if (!bh || buffer_uptodate(bh)) 550 if (!bh || buffer_uptodate(bh))
508 return bh; 551 return bh;
@@ -540,6 +583,28 @@ struct address_space_operations ufs_aops = {
540 .bmap = ufs_bmap 583 .bmap = ufs_bmap
541}; 584};
542 585
586static void ufs_set_inode_ops(struct inode *inode)
587{
588 if (S_ISREG(inode->i_mode)) {
589 inode->i_op = &ufs_file_inode_operations;
590 inode->i_fop = &ufs_file_operations;
591 inode->i_mapping->a_ops = &ufs_aops;
592 } else if (S_ISDIR(inode->i_mode)) {
593 inode->i_op = &ufs_dir_inode_operations;
594 inode->i_fop = &ufs_dir_operations;
595 inode->i_mapping->a_ops = &ufs_aops;
596 } else if (S_ISLNK(inode->i_mode)) {
597 if (!inode->i_blocks)
598 inode->i_op = &ufs_fast_symlink_inode_operations;
599 else {
600 inode->i_op = &page_symlink_inode_operations;
601 inode->i_mapping->a_ops = &ufs_aops;
602 }
603 } else
604 init_special_inode(inode, inode->i_mode,
605 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
606}
607
543void ufs_read_inode (struct inode * inode) 608void ufs_read_inode (struct inode * inode)
544{ 609{
545 struct ufs_inode_info *ufsi = UFS_I(inode); 610 struct ufs_inode_info *ufsi = UFS_I(inode);
@@ -552,7 +617,7 @@ void ufs_read_inode (struct inode * inode)
552 unsigned i; 617 unsigned i;
553 unsigned flags; 618 unsigned flags;
554 619
555 UFSD(("ENTER, ino %lu\n", inode->i_ino)) 620 UFSD("ENTER, ino %lu\n", inode->i_ino);
556 621
557 sb = inode->i_sb; 622 sb = inode->i_sb;
558 uspi = UFS_SB(sb)->s_uspi; 623 uspi = UFS_SB(sb)->s_uspi;
@@ -603,38 +668,22 @@ void ufs_read_inode (struct inode * inode)
603 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 668 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
604 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 669 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
605 ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; 670 ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
671 ufsi->i_dir_start_lookup = 0;
606 672
607 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 673 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
608 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 674 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
609 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; 675 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
610 } 676 } else {
611 else {
612 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 677 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
613 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; 678 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
614 } 679 }
615 ufsi->i_osync = 0; 680 ufsi->i_osync = 0;
616 681
617 if (S_ISREG(inode->i_mode)) { 682 ufs_set_inode_ops(inode);
618 inode->i_op = &ufs_file_inode_operations;
619 inode->i_fop = &ufs_file_operations;
620 inode->i_mapping->a_ops = &ufs_aops;
621 } else if (S_ISDIR(inode->i_mode)) {
622 inode->i_op = &ufs_dir_inode_operations;
623 inode->i_fop = &ufs_dir_operations;
624 } else if (S_ISLNK(inode->i_mode)) {
625 if (!inode->i_blocks)
626 inode->i_op = &ufs_fast_symlink_inode_operations;
627 else {
628 inode->i_op = &page_symlink_inode_operations;
629 inode->i_mapping->a_ops = &ufs_aops;
630 }
631 } else
632 init_special_inode(inode, inode->i_mode,
633 ufs_get_inode_dev(sb, ufsi));
634 683
635 brelse (bh); 684 brelse (bh);
636 685
637 UFSD(("EXIT\n")) 686 UFSD("EXIT\n");
638 return; 687 return;
639 688
640bad_inode: 689bad_inode:
@@ -642,7 +691,7 @@ bad_inode:
642 return; 691 return;
643 692
644ufs2_inode : 693ufs2_inode :
645 UFSD(("Reading ufs2 inode, ino %lu\n", inode->i_ino)) 694 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
646 695
647 ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino)); 696 ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino));
648 697
@@ -690,27 +739,11 @@ ufs2_inode :
690 } 739 }
691 ufsi->i_osync = 0; 740 ufsi->i_osync = 0;
692 741
693 if (S_ISREG(inode->i_mode)) { 742 ufs_set_inode_ops(inode);
694 inode->i_op = &ufs_file_inode_operations;
695 inode->i_fop = &ufs_file_operations;
696 inode->i_mapping->a_ops = &ufs_aops;
697 } else if (S_ISDIR(inode->i_mode)) {
698 inode->i_op = &ufs_dir_inode_operations;
699 inode->i_fop = &ufs_dir_operations;
700 } else if (S_ISLNK(inode->i_mode)) {
701 if (!inode->i_blocks)
702 inode->i_op = &ufs_fast_symlink_inode_operations;
703 else {
704 inode->i_op = &page_symlink_inode_operations;
705 inode->i_mapping->a_ops = &ufs_aops;
706 }
707 } else /* TODO : here ...*/
708 init_special_inode(inode, inode->i_mode,
709 ufs_get_inode_dev(sb, ufsi));
710 743
711 brelse(bh); 744 brelse(bh);
712 745
713 UFSD(("EXIT\n")) 746 UFSD("EXIT\n");
714 return; 747 return;
715} 748}
716 749
@@ -724,7 +757,7 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
724 unsigned i; 757 unsigned i;
725 unsigned flags; 758 unsigned flags;
726 759
727 UFSD(("ENTER, ino %lu\n", inode->i_ino)) 760 UFSD("ENTER, ino %lu\n", inode->i_ino);
728 761
729 sb = inode->i_sb; 762 sb = inode->i_sb;
730 uspi = UFS_SB(sb)->s_uspi; 763 uspi = UFS_SB(sb)->s_uspi;
@@ -785,7 +818,7 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
785 sync_dirty_buffer(bh); 818 sync_dirty_buffer(bh);
786 brelse (bh); 819 brelse (bh);
787 820
788 UFSD(("EXIT\n")) 821 UFSD("EXIT\n");
789 return 0; 822 return 0;
790} 823}
791 824
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 8d5f98a01c74..abd5f23a426d 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * linux/fs/ufs/namei.c 2 * linux/fs/ufs/namei.c
3 * 3 *
4 * Migration to usage of "page cache" on May 2006 by
5 * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base.
6 *
4 * Copyright (C) 1998 7 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz> 8 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics 9 * Charles University, Faculty of Mathematics and Physics
@@ -28,21 +31,9 @@
28#include <linux/fs.h> 31#include <linux/fs.h>
29#include <linux/ufs_fs.h> 32#include <linux/ufs_fs.h>
30#include <linux/smp_lock.h> 33#include <linux/smp_lock.h>
31#include <linux/buffer_head.h>
32#include "swab.h" /* will go away - see comment in mknod() */ 34#include "swab.h" /* will go away - see comment in mknod() */
33#include "util.h" 35#include "util.h"
34 36
35/*
36#undef UFS_NAMEI_DEBUG
37*/
38#define UFS_NAMEI_DEBUG
39
40#ifdef UFS_NAMEI_DEBUG
41#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
42#else
43#define UFSD(x)
44#endif
45
46static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) 37static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
47{ 38{
48 int err = ufs_add_link(dentry, inode); 39 int err = ufs_add_link(dentry, inode);
@@ -88,8 +79,13 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
88static int ufs_create (struct inode * dir, struct dentry * dentry, int mode, 79static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
89 struct nameidata *nd) 80 struct nameidata *nd)
90{ 81{
91 struct inode * inode = ufs_new_inode(dir, mode); 82 struct inode *inode;
92 int err = PTR_ERR(inode); 83 int err;
84
85 UFSD("BEGIN\n");
86 inode = ufs_new_inode(dir, mode);
87 err = PTR_ERR(inode);
88
93 if (!IS_ERR(inode)) { 89 if (!IS_ERR(inode)) {
94 inode->i_op = &ufs_file_inode_operations; 90 inode->i_op = &ufs_file_inode_operations;
95 inode->i_fop = &ufs_file_operations; 91 inode->i_fop = &ufs_file_operations;
@@ -99,6 +95,7 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
99 err = ufs_add_nondir(dentry, inode); 95 err = ufs_add_nondir(dentry, inode);
100 unlock_kernel(); 96 unlock_kernel();
101 } 97 }
98 UFSD("END: err=%d\n", err);
102 return err; 99 return err;
103} 100}
104 101
@@ -205,6 +202,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
205 202
206 inode->i_op = &ufs_dir_inode_operations; 203 inode->i_op = &ufs_dir_inode_operations;
207 inode->i_fop = &ufs_dir_operations; 204 inode->i_fop = &ufs_dir_operations;
205 inode->i_mapping->a_ops = &ufs_aops;
208 206
209 inode_inc_link_count(inode); 207 inode_inc_link_count(inode);
210 208
@@ -231,19 +229,18 @@ out_dir:
231 goto out; 229 goto out;
232} 230}
233 231
234static int ufs_unlink(struct inode * dir, struct dentry *dentry) 232static int ufs_unlink(struct inode *dir, struct dentry *dentry)
235{ 233{
236 struct inode * inode = dentry->d_inode; 234 struct inode * inode = dentry->d_inode;
237 struct buffer_head * bh; 235 struct ufs_dir_entry *de;
238 struct ufs_dir_entry * de; 236 struct page *page;
239 int err = -ENOENT; 237 int err = -ENOENT;
240 238
241 lock_kernel(); 239 de = ufs_find_entry(dir, dentry, &page);
242 de = ufs_find_entry (dentry, &bh);
243 if (!de) 240 if (!de)
244 goto out; 241 goto out;
245 242
246 err = ufs_delete_entry (dir, de, bh); 243 err = ufs_delete_entry(dir, de, page);
247 if (err) 244 if (err)
248 goto out; 245 goto out;
249 246
@@ -251,7 +248,6 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry)
251 inode_dec_link_count(inode); 248 inode_dec_link_count(inode);
252 err = 0; 249 err = 0;
253out: 250out:
254 unlock_kernel();
255 return err; 251 return err;
256} 252}
257 253
@@ -273,42 +269,42 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
273 return err; 269 return err;
274} 270}
275 271
276static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry, 272static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
277 struct inode * new_dir, struct dentry * new_dentry ) 273 struct inode *new_dir, struct dentry *new_dentry)
278{ 274{
279 struct inode *old_inode = old_dentry->d_inode; 275 struct inode *old_inode = old_dentry->d_inode;
280 struct inode *new_inode = new_dentry->d_inode; 276 struct inode *new_inode = new_dentry->d_inode;
281 struct buffer_head *dir_bh = NULL; 277 struct page *dir_page = NULL;
282 struct ufs_dir_entry *dir_de = NULL; 278 struct ufs_dir_entry * dir_de = NULL;
283 struct buffer_head *old_bh; 279 struct page *old_page;
284 struct ufs_dir_entry *old_de; 280 struct ufs_dir_entry *old_de;
285 int err = -ENOENT; 281 int err = -ENOENT;
286 282
287 lock_kernel(); 283 old_de = ufs_find_entry(old_dir, old_dentry, &old_page);
288 old_de = ufs_find_entry (old_dentry, &old_bh);
289 if (!old_de) 284 if (!old_de)
290 goto out; 285 goto out;
291 286
292 if (S_ISDIR(old_inode->i_mode)) { 287 if (S_ISDIR(old_inode->i_mode)) {
293 err = -EIO; 288 err = -EIO;
294 dir_de = ufs_dotdot(old_inode, &dir_bh); 289 dir_de = ufs_dotdot(old_inode, &dir_page);
295 if (!dir_de) 290 if (!dir_de)
296 goto out_old; 291 goto out_old;
297 } 292 }
298 293
299 if (new_inode) { 294 if (new_inode) {
300 struct buffer_head *new_bh; 295 struct page *new_page;
301 struct ufs_dir_entry *new_de; 296 struct ufs_dir_entry *new_de;
302 297
303 err = -ENOTEMPTY; 298 err = -ENOTEMPTY;
304 if (dir_de && !ufs_empty_dir (new_inode)) 299 if (dir_de && !ufs_empty_dir(new_inode))
305 goto out_dir; 300 goto out_dir;
301
306 err = -ENOENT; 302 err = -ENOENT;
307 new_de = ufs_find_entry (new_dentry, &new_bh); 303 new_de = ufs_find_entry(new_dir, new_dentry, &new_page);
308 if (!new_de) 304 if (!new_de)
309 goto out_dir; 305 goto out_dir;
310 inode_inc_link_count(old_inode); 306 inode_inc_link_count(old_inode);
311 ufs_set_link(new_dir, new_de, new_bh, old_inode); 307 ufs_set_link(new_dir, new_de, new_page, old_inode);
312 new_inode->i_ctime = CURRENT_TIME_SEC; 308 new_inode->i_ctime = CURRENT_TIME_SEC;
313 if (dir_de) 309 if (dir_de)
314 new_inode->i_nlink--; 310 new_inode->i_nlink--;
@@ -329,24 +325,32 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry,
329 inode_inc_link_count(new_dir); 325 inode_inc_link_count(new_dir);
330 } 326 }
331 327
332 ufs_delete_entry (old_dir, old_de, old_bh); 328 /*
329 * Like most other Unix systems, set the ctime for inodes on a
330 * rename.
331 * inode_dec_link_count() will mark the inode dirty.
332 */
333 old_inode->i_ctime = CURRENT_TIME_SEC;
333 334
335 ufs_delete_entry(old_dir, old_de, old_page);
334 inode_dec_link_count(old_inode); 336 inode_dec_link_count(old_inode);
335 337
336 if (dir_de) { 338 if (dir_de) {
337 ufs_set_link(old_inode, dir_de, dir_bh, new_dir); 339 ufs_set_link(old_inode, dir_de, dir_page, new_dir);
338 inode_dec_link_count(old_dir); 340 inode_dec_link_count(old_dir);
339 } 341 }
340 unlock_kernel();
341 return 0; 342 return 0;
342 343
344
343out_dir: 345out_dir:
344 if (dir_de) 346 if (dir_de) {
345 brelse(dir_bh); 347 kunmap(dir_page);
348 page_cache_release(dir_page);
349 }
346out_old: 350out_old:
347 brelse (old_bh); 351 kunmap(old_page);
352 page_cache_release(old_page);
348out: 353out:
349 unlock_kernel();
350 return err; 354 return err;
351} 355}
352 356
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index fe5ab2aa2899..74ef5e9bedff 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -90,95 +90,84 @@
90#include "swab.h" 90#include "swab.h"
91#include "util.h" 91#include "util.h"
92 92
93#undef UFS_SUPER_DEBUG 93#ifdef CONFIG_UFS_DEBUG
94#undef UFS_SUPER_DEBUG_MORE
95
96
97#undef UFS_SUPER_DEBUG_MORE
98#ifdef UFS_SUPER_DEBUG
99#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
100#else
101#define UFSD(x)
102#endif
103
104#ifdef UFS_SUPER_DEBUG_MORE
105/* 94/*
106 * Print contents of ufs_super_block, useful for debugging 95 * Print contents of ufs_super_block, useful for debugging
107 */ 96 */
108void ufs_print_super_stuff(struct super_block *sb, 97static void ufs_print_super_stuff(struct super_block *sb, unsigned flags,
109 struct ufs_super_block_first * usb1, 98 struct ufs_super_block_first *usb1,
110 struct ufs_super_block_second * usb2, 99 struct ufs_super_block_second *usb2,
111 struct ufs_super_block_third * usb3) 100 struct ufs_super_block_third *usb3)
112{ 101{
113 printk("ufs_print_super_stuff\n"); 102 printk("ufs_print_super_stuff\n");
114 printk("size of usb: %u\n", sizeof(struct ufs_super_block)); 103 printk(" magic: 0x%x\n", fs32_to_cpu(sb, usb3->fs_magic));
115 printk(" magic: 0x%x\n", fs32_to_cpu(sb, usb3->fs_magic)); 104 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
116 printk(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno)); 105 printk(" fs_size: %llu\n", (unsigned long long)
117 printk(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno)); 106 fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size));
118 printk(" iblkno: %u\n", fs32_to_cpu(sb, usb1->fs_iblkno)); 107 printk(" fs_dsize: %llu\n", (unsigned long long)
119 printk(" dblkno: %u\n", fs32_to_cpu(sb, usb1->fs_dblkno)); 108 fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize));
120 printk(" cgoffset: %u\n", fs32_to_cpu(sb, usb1->fs_cgoffset)); 109 printk(" bsize: %u\n",
121 printk(" ~cgmask: 0x%x\n", ~fs32_to_cpu(sb, usb1->fs_cgmask)); 110 fs32_to_cpu(sb, usb1->fs_bsize));
122 printk(" size: %u\n", fs32_to_cpu(sb, usb1->fs_size)); 111 printk(" fsize: %u\n",
123 printk(" dsize: %u\n", fs32_to_cpu(sb, usb1->fs_dsize)); 112 fs32_to_cpu(sb, usb1->fs_fsize));
124 printk(" ncg: %u\n", fs32_to_cpu(sb, usb1->fs_ncg)); 113 printk(" fs_volname: %s\n", usb2->fs_un.fs_u2.fs_volname);
125 printk(" bsize: %u\n", fs32_to_cpu(sb, usb1->fs_bsize)); 114 printk(" fs_sblockloc: %llu\n", (unsigned long long)
126 printk(" fsize: %u\n", fs32_to_cpu(sb, usb1->fs_fsize)); 115 fs64_to_cpu(sb, usb2->fs_un.fs_u2.fs_sblockloc));
127 printk(" frag: %u\n", fs32_to_cpu(sb, usb1->fs_frag)); 116 printk(" cs_ndir(No of dirs): %llu\n", (unsigned long long)
128 printk(" fragshift: %u\n", fs32_to_cpu(sb, usb1->fs_fragshift)); 117 fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir));
129 printk(" ~fmask: %u\n", ~fs32_to_cpu(sb, usb1->fs_fmask)); 118 printk(" cs_nbfree(No of free blocks): %llu\n",
130 printk(" fshift: %u\n", fs32_to_cpu(sb, usb1->fs_fshift)); 119 (unsigned long long)
131 printk(" sbsize: %u\n", fs32_to_cpu(sb, usb1->fs_sbsize)); 120 fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_nbfree));
132 printk(" spc: %u\n", fs32_to_cpu(sb, usb1->fs_spc)); 121 } else {
133 printk(" cpg: %u\n", fs32_to_cpu(sb, usb1->fs_cpg)); 122 printk(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno));
134 printk(" ipg: %u\n", fs32_to_cpu(sb, usb1->fs_ipg)); 123 printk(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno));
135 printk(" fpg: %u\n", fs32_to_cpu(sb, usb1->fs_fpg)); 124 printk(" iblkno: %u\n", fs32_to_cpu(sb, usb1->fs_iblkno));
136 printk(" csaddr: %u\n", fs32_to_cpu(sb, usb1->fs_csaddr)); 125 printk(" dblkno: %u\n", fs32_to_cpu(sb, usb1->fs_dblkno));
137 printk(" cssize: %u\n", fs32_to_cpu(sb, usb1->fs_cssize)); 126 printk(" cgoffset: %u\n",
138 printk(" cgsize: %u\n", fs32_to_cpu(sb, usb1->fs_cgsize)); 127 fs32_to_cpu(sb, usb1->fs_cgoffset));
139 printk(" fstodb: %u\n", fs32_to_cpu(sb, usb1->fs_fsbtodb)); 128 printk(" ~cgmask: 0x%x\n",
140 printk(" contigsumsize: %d\n", fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize)); 129 ~fs32_to_cpu(sb, usb1->fs_cgmask));
141 printk(" postblformat: %u\n", fs32_to_cpu(sb, usb3->fs_postblformat)); 130 printk(" size: %u\n", fs32_to_cpu(sb, usb1->fs_size));
142 printk(" nrpos: %u\n", fs32_to_cpu(sb, usb3->fs_nrpos)); 131 printk(" dsize: %u\n", fs32_to_cpu(sb, usb1->fs_dsize));
143 printk(" ndir %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir)); 132 printk(" ncg: %u\n", fs32_to_cpu(sb, usb1->fs_ncg));
144 printk(" nifree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree)); 133 printk(" bsize: %u\n", fs32_to_cpu(sb, usb1->fs_bsize));
145 printk(" nbfree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree)); 134 printk(" fsize: %u\n", fs32_to_cpu(sb, usb1->fs_fsize));
146 printk(" nffree %u\n", fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)); 135 printk(" frag: %u\n", fs32_to_cpu(sb, usb1->fs_frag));
147 printk("\n"); 136 printk(" fragshift: %u\n",
148} 137 fs32_to_cpu(sb, usb1->fs_fragshift));
149 138 printk(" ~fmask: %u\n", ~fs32_to_cpu(sb, usb1->fs_fmask));
150/* 139 printk(" fshift: %u\n", fs32_to_cpu(sb, usb1->fs_fshift));
151 * Print contents of ufs2 ufs_super_block, useful for debugging 140 printk(" sbsize: %u\n", fs32_to_cpu(sb, usb1->fs_sbsize));
152 */ 141 printk(" spc: %u\n", fs32_to_cpu(sb, usb1->fs_spc));
153void ufs2_print_super_stuff( 142 printk(" cpg: %u\n", fs32_to_cpu(sb, usb1->fs_cpg));
154 struct super_block *sb, 143 printk(" ipg: %u\n", fs32_to_cpu(sb, usb1->fs_ipg));
155 struct ufs_super_block *usb) 144 printk(" fpg: %u\n", fs32_to_cpu(sb, usb1->fs_fpg));
156{ 145 printk(" csaddr: %u\n", fs32_to_cpu(sb, usb1->fs_csaddr));
157 printk("ufs_print_super_stuff\n"); 146 printk(" cssize: %u\n", fs32_to_cpu(sb, usb1->fs_cssize));
158 printk("size of usb: %u\n", sizeof(struct ufs_super_block)); 147 printk(" cgsize: %u\n", fs32_to_cpu(sb, usb1->fs_cgsize));
159 printk(" magic: 0x%x\n", fs32_to_cpu(sb, usb->fs_magic)); 148 printk(" fstodb: %u\n",
160 printk(" fs_size: %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size)); 149 fs32_to_cpu(sb, usb1->fs_fsbtodb));
161 printk(" fs_dsize: %u\n",fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize)); 150 printk(" nrpos: %u\n", fs32_to_cpu(sb, usb3->fs_nrpos));
162 printk(" bsize: %u\n", fs32_to_cpu(usb, usb->fs_bsize)); 151 printk(" ndir %u\n",
163 printk(" fsize: %u\n", fs32_to_cpu(usb, usb->fs_fsize)); 152 fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir));
164 printk(" fs_volname: %s\n", usb->fs_u11.fs_u2.fs_volname); 153 printk(" nifree %u\n",
165 printk(" fs_fsmnt: %s\n", usb->fs_u11.fs_u2.fs_fsmnt); 154 fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree));
166 printk(" fs_sblockloc: %u\n",fs64_to_cpu(sb, 155 printk(" nbfree %u\n",
167 usb->fs_u11.fs_u2.fs_sblockloc)); 156 fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree));
168 printk(" cs_ndir(No of dirs): %u\n",fs64_to_cpu(sb, 157 printk(" nffree %u\n",
169 usb->fs_u11.fs_u2.fs_cstotal.cs_ndir)); 158 fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree));
170 printk(" cs_nbfree(No of free blocks): %u\n",fs64_to_cpu(sb, 159 }
171 usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree));
172 printk("\n"); 160 printk("\n");
173} 161}
174 162
175/* 163/*
176 * Print contents of ufs_cylinder_group, useful for debugging 164 * Print contents of ufs_cylinder_group, useful for debugging
177 */ 165 */
178void ufs_print_cylinder_stuff(struct super_block *sb, struct ufs_cylinder_group *cg) 166static void ufs_print_cylinder_stuff(struct super_block *sb,
167 struct ufs_cylinder_group *cg)
179{ 168{
180 printk("\nufs_print_cylinder_stuff\n"); 169 printk("\nufs_print_cylinder_stuff\n");
181 printk("size of ucg: %u\n", sizeof(struct ufs_cylinder_group)); 170 printk("size of ucg: %zu\n", sizeof(struct ufs_cylinder_group));
182 printk(" magic: %x\n", fs32_to_cpu(sb, cg->cg_magic)); 171 printk(" magic: %x\n", fs32_to_cpu(sb, cg->cg_magic));
183 printk(" time: %u\n", fs32_to_cpu(sb, cg->cg_time)); 172 printk(" time: %u\n", fs32_to_cpu(sb, cg->cg_time));
184 printk(" cgx: %u\n", fs32_to_cpu(sb, cg->cg_cgx)); 173 printk(" cgx: %u\n", fs32_to_cpu(sb, cg->cg_cgx));
@@ -202,12 +191,18 @@ void ufs_print_cylinder_stuff(struct super_block *sb, struct ufs_cylinder_group
202 printk(" iuseoff: %u\n", fs32_to_cpu(sb, cg->cg_iusedoff)); 191 printk(" iuseoff: %u\n", fs32_to_cpu(sb, cg->cg_iusedoff));
203 printk(" freeoff: %u\n", fs32_to_cpu(sb, cg->cg_freeoff)); 192 printk(" freeoff: %u\n", fs32_to_cpu(sb, cg->cg_freeoff));
204 printk(" nextfreeoff: %u\n", fs32_to_cpu(sb, cg->cg_nextfreeoff)); 193 printk(" nextfreeoff: %u\n", fs32_to_cpu(sb, cg->cg_nextfreeoff));
205 printk(" clustersumoff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clustersumoff)); 194 printk(" clustersumoff %u\n",
206 printk(" clusteroff %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clusteroff)); 195 fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clustersumoff));
207 printk(" nclusterblks %u\n", fs32_to_cpu(sb, cg->cg_u.cg_44.cg_nclusterblks)); 196 printk(" clusteroff %u\n",
197 fs32_to_cpu(sb, cg->cg_u.cg_44.cg_clusteroff));
198 printk(" nclusterblks %u\n",
199 fs32_to_cpu(sb, cg->cg_u.cg_44.cg_nclusterblks));
208 printk("\n"); 200 printk("\n");
209} 201}
210#endif /* UFS_SUPER_DEBUG_MORE */ 202#else
203# define ufs_print_super_stuff(sb, flags, usb1, usb2, usb3) /**/
204# define ufs_print_cylinder_stuff(sb, cg) /**/
205#endif /* CONFIG_UFS_DEBUG */
211 206
212static struct super_operations ufs_super_ops; 207static struct super_operations ufs_super_ops;
213 208
@@ -225,7 +220,7 @@ void ufs_error (struct super_block * sb, const char * function,
225 220
226 if (!(sb->s_flags & MS_RDONLY)) { 221 if (!(sb->s_flags & MS_RDONLY)) {
227 usb1->fs_clean = UFS_FSBAD; 222 usb1->fs_clean = UFS_FSBAD;
228 ubh_mark_buffer_dirty(USPI_UBH); 223 ubh_mark_buffer_dirty(USPI_UBH(uspi));
229 sb->s_dirt = 1; 224 sb->s_dirt = 1;
230 sb->s_flags |= MS_RDONLY; 225 sb->s_flags |= MS_RDONLY;
231 } 226 }
@@ -257,7 +252,7 @@ void ufs_panic (struct super_block * sb, const char * function,
257 252
258 if (!(sb->s_flags & MS_RDONLY)) { 253 if (!(sb->s_flags & MS_RDONLY)) {
259 usb1->fs_clean = UFS_FSBAD; 254 usb1->fs_clean = UFS_FSBAD;
260 ubh_mark_buffer_dirty(USPI_UBH); 255 ubh_mark_buffer_dirty(USPI_UBH(uspi));
261 sb->s_dirt = 1; 256 sb->s_dirt = 1;
262 } 257 }
263 va_start (args, fmt); 258 va_start (args, fmt);
@@ -309,7 +304,7 @@ static int ufs_parse_options (char * options, unsigned * mount_options)
309{ 304{
310 char * p; 305 char * p;
311 306
312 UFSD(("ENTER\n")) 307 UFSD("ENTER\n");
313 308
314 if (!options) 309 if (!options)
315 return 1; 310 return 1;
@@ -386,27 +381,57 @@ static int ufs_parse_options (char * options, unsigned * mount_options)
386} 381}
387 382
388/* 383/*
384 * Diffrent types of UFS hold fs_cstotal in different
385 * places, and use diffrent data structure for it.
386 * To make things simplier we just copy fs_cstotal to ufs_sb_private_info
387 */
388static void ufs_setup_cstotal(struct super_block *sb)
389{
390 struct ufs_sb_info *sbi = UFS_SB(sb);
391 struct ufs_sb_private_info *uspi = sbi->s_uspi;
392 struct ufs_super_block_first *usb1;
393 struct ufs_super_block_second *usb2;
394 struct ufs_super_block_third *usb3;
395 unsigned mtype = sbi->s_mount_opt & UFS_MOUNT_UFSTYPE;
396
397 UFSD("ENTER, mtype=%u\n", mtype);
398 usb1 = ubh_get_usb_first(uspi);
399 usb2 = ubh_get_usb_second(uspi);
400 usb3 = ubh_get_usb_third(uspi);
401
402 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
403 (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
404 mtype == UFS_MOUNT_UFSTYPE_UFS2) {
405 /*we have statistic in different place, then usual*/
406 uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir);
407 uspi->cs_total.cs_nbfree = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_nbfree);
408 uspi->cs_total.cs_nifree = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nifree);
409 uspi->cs_total.cs_nffree = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nffree);
410 } else {
411 uspi->cs_total.cs_ndir = fs32_to_cpu(sb, usb1->fs_cstotal.cs_ndir);
412 uspi->cs_total.cs_nbfree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree);
413 uspi->cs_total.cs_nifree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree);
414 uspi->cs_total.cs_nffree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree);
415 }
416 UFSD("EXIT\n");
417}
418
419/*
389 * Read on-disk structures associated with cylinder groups 420 * Read on-disk structures associated with cylinder groups
390 */ 421 */
391static int ufs_read_cylinder_structures (struct super_block *sb) 422static int ufs_read_cylinder_structures(struct super_block *sb)
392{ 423{
393 struct ufs_sb_info * sbi = UFS_SB(sb); 424 struct ufs_sb_info *sbi = UFS_SB(sb);
394 struct ufs_sb_private_info * uspi; 425 struct ufs_sb_private_info *uspi = sbi->s_uspi;
395 struct ufs_super_block *usb; 426 unsigned flags = sbi->s_flags;
396 struct ufs_buffer_head * ubh; 427 struct ufs_buffer_head * ubh;
397 unsigned char * base, * space; 428 unsigned char * base, * space;
398 unsigned size, blks, i; 429 unsigned size, blks, i;
399 unsigned flags = 0; 430 struct ufs_super_block_third *usb3;
400
401 UFSD(("ENTER\n"))
402
403 uspi = sbi->s_uspi;
404 431
405 usb = (struct ufs_super_block *) 432 UFSD("ENTER\n");
406 ((struct ufs_buffer_head *)uspi)->bh[0]->b_data;
407 433
408 flags = UFS_SB(sb)->s_flags; 434 usb3 = ubh_get_usb_third(uspi);
409
410 /* 435 /*
411 * Read cs structures from (usually) first data block 436 * Read cs structures from (usually) first data block
412 * on the device. 437 * on the device.
@@ -424,7 +449,7 @@ static int ufs_read_cylinder_structures (struct super_block *sb)
424 449
425 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 450 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
426 ubh = ubh_bread(sb, 451 ubh = ubh_bread(sb,
427 fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_csaddr) + i, size); 452 fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_csaddr) + i, size);
428 else 453 else
429 ubh = ubh_bread(sb, uspi->s_csaddr + i, size); 454 ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
430 455
@@ -451,14 +476,13 @@ static int ufs_read_cylinder_structures (struct super_block *sb)
451 sbi->s_cgno[i] = UFS_CGNO_EMPTY; 476 sbi->s_cgno[i] = UFS_CGNO_EMPTY;
452 } 477 }
453 for (i = 0; i < uspi->s_ncg; i++) { 478 for (i = 0; i < uspi->s_ncg; i++) {
454 UFSD(("read cg %u\n", i)) 479 UFSD("read cg %u\n", i);
455 if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i)))) 480 if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
456 goto failed; 481 goto failed;
457 if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data)) 482 if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data))
458 goto failed; 483 goto failed;
459#ifdef UFS_SUPER_DEBUG_MORE 484
460 ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data); 485 ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data);
461#endif
462 } 486 }
463 for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) { 487 for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
464 if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL))) 488 if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL)))
@@ -466,7 +490,7 @@ static int ufs_read_cylinder_structures (struct super_block *sb)
466 sbi->s_cgno[i] = UFS_CGNO_EMPTY; 490 sbi->s_cgno[i] = UFS_CGNO_EMPTY;
467 } 491 }
468 sbi->s_cg_loaded = 0; 492 sbi->s_cg_loaded = 0;
469 UFSD(("EXIT\n")) 493 UFSD("EXIT\n");
470 return 1; 494 return 1;
471 495
472failed: 496failed:
@@ -479,26 +503,69 @@ failed:
479 for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) 503 for (i = 0; i < UFS_MAX_GROUP_LOADED; i++)
480 kfree (sbi->s_ucpi[i]); 504 kfree (sbi->s_ucpi[i]);
481 } 505 }
482 UFSD(("EXIT (FAILED)\n")) 506 UFSD("EXIT (FAILED)\n");
483 return 0; 507 return 0;
484} 508}
485 509
486/* 510/*
487 * Put on-disk structures associated with cylinder groups and 511 * Sync our internal copy of fs_cstotal with disk
488 * write them back to disk
489 */ 512 */
490static void ufs_put_cylinder_structures (struct super_block *sb) 513static void ufs_put_cstotal(struct super_block *sb)
491{ 514{
492 struct ufs_sb_info * sbi = UFS_SB(sb); 515 unsigned mtype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE;
493 struct ufs_sb_private_info * uspi; 516 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
517 struct ufs_super_block_first *usb1;
518 struct ufs_super_block_second *usb2;
519 struct ufs_super_block_third *usb3;
520
521 UFSD("ENTER\n");
522 usb1 = ubh_get_usb_first(uspi);
523 usb2 = ubh_get_usb_second(uspi);
524 usb3 = ubh_get_usb_third(uspi);
525
526 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD &&
527 (usb1->fs_flags & UFS_FLAGS_UPDATED)) ||
528 mtype == UFS_MOUNT_UFSTYPE_UFS2) {
529 /*we have statistic in different place, then usual*/
530 usb2->fs_un.fs_u2.cs_ndir =
531 cpu_to_fs64(sb, uspi->cs_total.cs_ndir);
532 usb2->fs_un.fs_u2.cs_nbfree =
533 cpu_to_fs64(sb, uspi->cs_total.cs_nbfree);
534 usb3->fs_un1.fs_u2.cs_nifree =
535 cpu_to_fs64(sb, uspi->cs_total.cs_nifree);
536 usb3->fs_un1.fs_u2.cs_nffree =
537 cpu_to_fs64(sb, uspi->cs_total.cs_nffree);
538 } else {
539 usb1->fs_cstotal.cs_ndir =
540 cpu_to_fs32(sb, uspi->cs_total.cs_ndir);
541 usb1->fs_cstotal.cs_nbfree =
542 cpu_to_fs32(sb, uspi->cs_total.cs_nbfree);
543 usb1->fs_cstotal.cs_nifree =
544 cpu_to_fs32(sb, uspi->cs_total.cs_nifree);
545 usb1->fs_cstotal.cs_nffree =
546 cpu_to_fs32(sb, uspi->cs_total.cs_nffree);
547 }
548 ubh_mark_buffer_dirty(USPI_UBH(uspi));
549 UFSD("EXIT\n");
550}
551
552/**
553 * ufs_put_super_internal() - put on-disk intrenal structures
554 * @sb: pointer to super_block structure
555 * Put on-disk structures associated with cylinder groups
556 * and write them back to disk, also update cs_total on disk
557 */
558static void ufs_put_super_internal(struct super_block *sb)
559{
560 struct ufs_sb_info *sbi = UFS_SB(sb);
561 struct ufs_sb_private_info *uspi = sbi->s_uspi;
494 struct ufs_buffer_head * ubh; 562 struct ufs_buffer_head * ubh;
495 unsigned char * base, * space; 563 unsigned char * base, * space;
496 unsigned blks, size, i; 564 unsigned blks, size, i;
497
498 UFSD(("ENTER\n"))
499
500 uspi = sbi->s_uspi;
501 565
566
567 UFSD("ENTER\n");
568 ufs_put_cstotal(sb);
502 size = uspi->s_cssize; 569 size = uspi->s_cssize;
503 blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 570 blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
504 base = space = (char*) sbi->s_csp; 571 base = space = (char*) sbi->s_csp;
@@ -523,7 +590,7 @@ static void ufs_put_cylinder_structures (struct super_block *sb)
523 brelse (sbi->s_ucg[i]); 590 brelse (sbi->s_ucg[i]);
524 kfree (sbi->s_ucg); 591 kfree (sbi->s_ucg);
525 kfree (base); 592 kfree (base);
526 UFSD(("EXIT\n")) 593 UFSD("EXIT\n");
527} 594}
528 595
529static int ufs_fill_super(struct super_block *sb, void *data, int silent) 596static int ufs_fill_super(struct super_block *sb, void *data, int silent)
@@ -533,7 +600,6 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
533 struct ufs_super_block_first * usb1; 600 struct ufs_super_block_first * usb1;
534 struct ufs_super_block_second * usb2; 601 struct ufs_super_block_second * usb2;
535 struct ufs_super_block_third * usb3; 602 struct ufs_super_block_third * usb3;
536 struct ufs_super_block *usb;
537 struct ufs_buffer_head * ubh; 603 struct ufs_buffer_head * ubh;
538 struct inode *inode; 604 struct inode *inode;
539 unsigned block_size, super_block_size; 605 unsigned block_size, super_block_size;
@@ -544,7 +610,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
544 ubh = NULL; 610 ubh = NULL;
545 flags = 0; 611 flags = 0;
546 612
547 UFSD(("ENTER\n")) 613 UFSD("ENTER\n");
548 614
549 sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL); 615 sbi = kmalloc(sizeof(struct ufs_sb_info), GFP_KERNEL);
550 if (!sbi) 616 if (!sbi)
@@ -552,7 +618,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
552 sb->s_fs_info = sbi; 618 sb->s_fs_info = sbi;
553 memset(sbi, 0, sizeof(struct ufs_sb_info)); 619 memset(sbi, 0, sizeof(struct ufs_sb_info));
554 620
555 UFSD(("flag %u\n", (int)(sb->s_flags & MS_RDONLY))) 621 UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
556 622
557#ifndef CONFIG_UFS_FS_WRITE 623#ifndef CONFIG_UFS_FS_WRITE
558 if (!(sb->s_flags & MS_RDONLY)) { 624 if (!(sb->s_flags & MS_RDONLY)) {
@@ -593,7 +659,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
593 the rules */ 659 the rules */
594 switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { 660 switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
595 case UFS_MOUNT_UFSTYPE_44BSD: 661 case UFS_MOUNT_UFSTYPE_44BSD:
596 UFSD(("ufstype=44bsd\n")) 662 UFSD("ufstype=44bsd\n");
597 uspi->s_fsize = block_size = 512; 663 uspi->s_fsize = block_size = 512;
598 uspi->s_fmask = ~(512 - 1); 664 uspi->s_fmask = ~(512 - 1);
599 uspi->s_fshift = 9; 665 uspi->s_fshift = 9;
@@ -602,7 +668,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
602 flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD; 668 flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
603 break; 669 break;
604 case UFS_MOUNT_UFSTYPE_UFS2: 670 case UFS_MOUNT_UFSTYPE_UFS2:
605 UFSD(("ufstype=ufs2\n")); 671 UFSD("ufstype=ufs2\n");
606 super_block_offset=SBLOCK_UFS2; 672 super_block_offset=SBLOCK_UFS2;
607 uspi->s_fsize = block_size = 512; 673 uspi->s_fsize = block_size = 512;
608 uspi->s_fmask = ~(512 - 1); 674 uspi->s_fmask = ~(512 - 1);
@@ -617,7 +683,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
617 break; 683 break;
618 684
619 case UFS_MOUNT_UFSTYPE_SUN: 685 case UFS_MOUNT_UFSTYPE_SUN:
620 UFSD(("ufstype=sun\n")) 686 UFSD("ufstype=sun\n");
621 uspi->s_fsize = block_size = 1024; 687 uspi->s_fsize = block_size = 1024;
622 uspi->s_fmask = ~(1024 - 1); 688 uspi->s_fmask = ~(1024 - 1);
623 uspi->s_fshift = 10; 689 uspi->s_fshift = 10;
@@ -628,7 +694,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
628 break; 694 break;
629 695
630 case UFS_MOUNT_UFSTYPE_SUNx86: 696 case UFS_MOUNT_UFSTYPE_SUNx86:
631 UFSD(("ufstype=sunx86\n")) 697 UFSD("ufstype=sunx86\n");
632 uspi->s_fsize = block_size = 1024; 698 uspi->s_fsize = block_size = 1024;
633 uspi->s_fmask = ~(1024 - 1); 699 uspi->s_fmask = ~(1024 - 1);
634 uspi->s_fshift = 10; 700 uspi->s_fshift = 10;
@@ -639,7 +705,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
639 break; 705 break;
640 706
641 case UFS_MOUNT_UFSTYPE_OLD: 707 case UFS_MOUNT_UFSTYPE_OLD:
642 UFSD(("ufstype=old\n")) 708 UFSD("ufstype=old\n");
643 uspi->s_fsize = block_size = 1024; 709 uspi->s_fsize = block_size = 1024;
644 uspi->s_fmask = ~(1024 - 1); 710 uspi->s_fmask = ~(1024 - 1);
645 uspi->s_fshift = 10; 711 uspi->s_fshift = 10;
@@ -654,7 +720,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
654 break; 720 break;
655 721
656 case UFS_MOUNT_UFSTYPE_NEXTSTEP: 722 case UFS_MOUNT_UFSTYPE_NEXTSTEP:
657 UFSD(("ufstype=nextstep\n")) 723 UFSD("ufstype=nextstep\n");
658 uspi->s_fsize = block_size = 1024; 724 uspi->s_fsize = block_size = 1024;
659 uspi->s_fmask = ~(1024 - 1); 725 uspi->s_fmask = ~(1024 - 1);
660 uspi->s_fshift = 10; 726 uspi->s_fshift = 10;
@@ -669,7 +735,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
669 break; 735 break;
670 736
671 case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD: 737 case UFS_MOUNT_UFSTYPE_NEXTSTEP_CD:
672 UFSD(("ufstype=nextstep-cd\n")) 738 UFSD("ufstype=nextstep-cd\n");
673 uspi->s_fsize = block_size = 2048; 739 uspi->s_fsize = block_size = 2048;
674 uspi->s_fmask = ~(2048 - 1); 740 uspi->s_fmask = ~(2048 - 1);
675 uspi->s_fshift = 11; 741 uspi->s_fshift = 11;
@@ -684,7 +750,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
684 break; 750 break;
685 751
686 case UFS_MOUNT_UFSTYPE_OPENSTEP: 752 case UFS_MOUNT_UFSTYPE_OPENSTEP:
687 UFSD(("ufstype=openstep\n")) 753 UFSD("ufstype=openstep\n");
688 uspi->s_fsize = block_size = 1024; 754 uspi->s_fsize = block_size = 1024;
689 uspi->s_fmask = ~(1024 - 1); 755 uspi->s_fmask = ~(1024 - 1);
690 uspi->s_fshift = 10; 756 uspi->s_fshift = 10;
@@ -699,7 +765,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
699 break; 765 break;
700 766
701 case UFS_MOUNT_UFSTYPE_HP: 767 case UFS_MOUNT_UFSTYPE_HP:
702 UFSD(("ufstype=hp\n")) 768 UFSD("ufstype=hp\n");
703 uspi->s_fsize = block_size = 1024; 769 uspi->s_fsize = block_size = 1024;
704 uspi->s_fmask = ~(1024 - 1); 770 uspi->s_fmask = ~(1024 - 1);
705 uspi->s_fshift = 10; 771 uspi->s_fshift = 10;
@@ -737,8 +803,6 @@ again:
737 usb1 = ubh_get_usb_first(uspi); 803 usb1 = ubh_get_usb_first(uspi);
738 usb2 = ubh_get_usb_second(uspi); 804 usb2 = ubh_get_usb_second(uspi);
739 usb3 = ubh_get_usb_third(uspi); 805 usb3 = ubh_get_usb_third(uspi);
740 usb = (struct ufs_super_block *)
741 ((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
742 806
743 /* 807 /*
744 * Check ufs magic number 808 * Check ufs magic number
@@ -820,16 +884,12 @@ magic_found:
820 ubh = NULL; 884 ubh = NULL;
821 block_size = uspi->s_fsize; 885 block_size = uspi->s_fsize;
822 super_block_size = uspi->s_sbsize; 886 super_block_size = uspi->s_sbsize;
823 UFSD(("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size)) 887 UFSD("another value of block_size or super_block_size %u, %u\n", block_size, super_block_size);
824 goto again; 888 goto again;
825 } 889 }
826 890
827#ifdef UFS_SUPER_DEBUG_MORE 891
828 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 892 ufs_print_super_stuff(sb, flags, usb1, usb2, usb3);
829 ufs2_print_super_stuff(sb,usb);
830 else
831 ufs_print_super_stuff(sb, usb1, usb2, usb3);
832#endif
833 893
834 /* 894 /*
835 * Check, if file system was correctly unmounted. 895 * Check, if file system was correctly unmounted.
@@ -842,13 +902,13 @@ magic_found:
842 (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) { 902 (ufs_get_fs_state(sb, usb1, usb3) == (UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time))))) {
843 switch(usb1->fs_clean) { 903 switch(usb1->fs_clean) {
844 case UFS_FSCLEAN: 904 case UFS_FSCLEAN:
845 UFSD(("fs is clean\n")) 905 UFSD("fs is clean\n");
846 break; 906 break;
847 case UFS_FSSTABLE: 907 case UFS_FSSTABLE:
848 UFSD(("fs is stable\n")) 908 UFSD("fs is stable\n");
849 break; 909 break;
850 case UFS_FSOSF1: 910 case UFS_FSOSF1:
851 UFSD(("fs is DEC OSF/1\n")) 911 UFSD("fs is DEC OSF/1\n");
852 break; 912 break;
853 case UFS_FSACTIVE: 913 case UFS_FSACTIVE:
854 printk("ufs_read_super: fs is active\n"); 914 printk("ufs_read_super: fs is active\n");
@@ -863,8 +923,7 @@ magic_found:
863 sb->s_flags |= MS_RDONLY; 923 sb->s_flags |= MS_RDONLY;
864 break; 924 break;
865 } 925 }
866 } 926 } else {
867 else {
868 printk("ufs_read_super: fs needs fsck\n"); 927 printk("ufs_read_super: fs needs fsck\n");
869 sb->s_flags |= MS_RDONLY; 928 sb->s_flags |= MS_RDONLY;
870 } 929 }
@@ -884,10 +943,9 @@ magic_found:
884 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); 943 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask);
885 944
886 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 945 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
887 uspi->s_u2_size = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_size); 946 uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size);
888 uspi->s_u2_dsize = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize); 947 uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
889 } 948 } else {
890 else {
891 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); 949 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size);
892 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); 950 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize);
893 } 951 }
@@ -901,8 +959,8 @@ magic_found:
901 uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask); 959 uspi->s_fmask = fs32_to_cpu(sb, usb1->fs_fmask);
902 uspi->s_bshift = fs32_to_cpu(sb, usb1->fs_bshift); 960 uspi->s_bshift = fs32_to_cpu(sb, usb1->fs_bshift);
903 uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift); 961 uspi->s_fshift = fs32_to_cpu(sb, usb1->fs_fshift);
904 UFSD(("uspi->s_bshift = %d,uspi->s_fshift = %d", uspi->s_bshift, 962 UFSD("uspi->s_bshift = %d,uspi->s_fshift = %d", uspi->s_bshift,
905 uspi->s_fshift)); 963 uspi->s_fshift);
906 uspi->s_fpbshift = fs32_to_cpu(sb, usb1->fs_fragshift); 964 uspi->s_fpbshift = fs32_to_cpu(sb, usb1->fs_fragshift);
907 uspi->s_fsbtodb = fs32_to_cpu(sb, usb1->fs_fsbtodb); 965 uspi->s_fsbtodb = fs32_to_cpu(sb, usb1->fs_fsbtodb);
908 /* s_sbsize already set */ 966 /* s_sbsize already set */
@@ -922,8 +980,8 @@ magic_found:
922 uspi->s_spc = fs32_to_cpu(sb, usb1->fs_spc); 980 uspi->s_spc = fs32_to_cpu(sb, usb1->fs_spc);
923 uspi->s_ipg = fs32_to_cpu(sb, usb1->fs_ipg); 981 uspi->s_ipg = fs32_to_cpu(sb, usb1->fs_ipg);
924 uspi->s_fpg = fs32_to_cpu(sb, usb1->fs_fpg); 982 uspi->s_fpg = fs32_to_cpu(sb, usb1->fs_fpg);
925 uspi->s_cpc = fs32_to_cpu(sb, usb2->fs_cpc); 983 uspi->s_cpc = fs32_to_cpu(sb, usb2->fs_un.fs_u1.fs_cpc);
926 uspi->s_contigsumsize = fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_contigsumsize); 984 uspi->s_contigsumsize = fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_contigsumsize);
927 uspi->s_qbmask = ufs_get_fs_qbmask(sb, usb3); 985 uspi->s_qbmask = ufs_get_fs_qbmask(sb, usb3);
928 uspi->s_qfmask = ufs_get_fs_qfmask(sb, usb3); 986 uspi->s_qfmask = ufs_get_fs_qfmask(sb, usb3);
929 uspi->s_postblformat = fs32_to_cpu(sb, usb3->fs_postblformat); 987 uspi->s_postblformat = fs32_to_cpu(sb, usb3->fs_postblformat);
@@ -935,12 +993,11 @@ magic_found:
935 * Compute another frequently used values 993 * Compute another frequently used values
936 */ 994 */
937 uspi->s_fpbmask = uspi->s_fpb - 1; 995 uspi->s_fpbmask = uspi->s_fpb - 1;
938 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 996 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
939 uspi->s_apbshift = uspi->s_bshift - 3; 997 uspi->s_apbshift = uspi->s_bshift - 3;
940 } 998 else
941 else {
942 uspi->s_apbshift = uspi->s_bshift - 2; 999 uspi->s_apbshift = uspi->s_bshift - 2;
943 } 1000
944 uspi->s_2apbshift = uspi->s_apbshift * 2; 1001 uspi->s_2apbshift = uspi->s_apbshift * 2;
945 uspi->s_3apbshift = uspi->s_apbshift * 3; 1002 uspi->s_3apbshift = uspi->s_apbshift * 3;
946 uspi->s_apb = 1 << uspi->s_apbshift; 1003 uspi->s_apb = 1 << uspi->s_apbshift;
@@ -956,7 +1013,7 @@ magic_found:
956 if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == 1013 if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) ==
957 UFS_MOUNT_UFSTYPE_44BSD) 1014 UFS_MOUNT_UFSTYPE_44BSD)
958 uspi->s_maxsymlinklen = 1015 uspi->s_maxsymlinklen =
959 fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_maxsymlinklen); 1016 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
960 1017
961 sbi->s_flags = flags; 1018 sbi->s_flags = flags;
962 1019
@@ -967,7 +1024,7 @@ magic_found:
967 if (!sb->s_root) 1024 if (!sb->s_root)
968 goto dalloc_failed; 1025 goto dalloc_failed;
969 1026
970 1027 ufs_setup_cstotal(sb);
971 /* 1028 /*
972 * Read cylinder group structures 1029 * Read cylinder group structures
973 */ 1030 */
@@ -975,7 +1032,7 @@ magic_found:
975 if (!ufs_read_cylinder_structures(sb)) 1032 if (!ufs_read_cylinder_structures(sb))
976 goto failed; 1033 goto failed;
977 1034
978 UFSD(("EXIT\n")) 1035 UFSD("EXIT\n");
979 return 0; 1036 return 0;
980 1037
981dalloc_failed: 1038dalloc_failed:
@@ -986,15 +1043,16 @@ failed:
986 kfree (uspi); 1043 kfree (uspi);
987 kfree(sbi); 1044 kfree(sbi);
988 sb->s_fs_info = NULL; 1045 sb->s_fs_info = NULL;
989 UFSD(("EXIT (FAILED)\n")) 1046 UFSD("EXIT (FAILED)\n");
990 return -EINVAL; 1047 return -EINVAL;
991 1048
992failed_nomem: 1049failed_nomem:
993 UFSD(("EXIT (NOMEM)\n")) 1050 UFSD("EXIT (NOMEM)\n");
994 return -ENOMEM; 1051 return -ENOMEM;
995} 1052}
996 1053
997static void ufs_write_super (struct super_block *sb) { 1054static void ufs_write_super(struct super_block *sb)
1055{
998 struct ufs_sb_private_info * uspi; 1056 struct ufs_sb_private_info * uspi;
999 struct ufs_super_block_first * usb1; 1057 struct ufs_super_block_first * usb1;
1000 struct ufs_super_block_third * usb3; 1058 struct ufs_super_block_third * usb3;
@@ -1002,7 +1060,7 @@ static void ufs_write_super (struct super_block *sb) {
1002 1060
1003 lock_kernel(); 1061 lock_kernel();
1004 1062
1005 UFSD(("ENTER\n")) 1063 UFSD("ENTER\n");
1006 flags = UFS_SB(sb)->s_flags; 1064 flags = UFS_SB(sb)->s_flags;
1007 uspi = UFS_SB(sb)->s_uspi; 1065 uspi = UFS_SB(sb)->s_uspi;
1008 usb1 = ubh_get_usb_first(uspi); 1066 usb1 = ubh_get_usb_first(uspi);
@@ -1014,26 +1072,27 @@ static void ufs_write_super (struct super_block *sb) {
1014 || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) 1072 || (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
1015 ufs_set_fs_state(sb, usb1, usb3, 1073 ufs_set_fs_state(sb, usb1, usb3,
1016 UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); 1074 UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
1017 ubh_mark_buffer_dirty (USPI_UBH); 1075 ufs_put_cstotal(sb);
1018 } 1076 }
1019 sb->s_dirt = 0; 1077 sb->s_dirt = 0;
1020 UFSD(("EXIT\n")) 1078 UFSD("EXIT\n");
1021 unlock_kernel(); 1079 unlock_kernel();
1022} 1080}
1023 1081
1024static void ufs_put_super (struct super_block *sb) 1082static void ufs_put_super(struct super_block *sb)
1025{ 1083{
1026 struct ufs_sb_info * sbi = UFS_SB(sb); 1084 struct ufs_sb_info * sbi = UFS_SB(sb);
1027 1085
1028 UFSD(("ENTER\n")) 1086 UFSD("ENTER\n");
1029 1087
1030 if (!(sb->s_flags & MS_RDONLY)) 1088 if (!(sb->s_flags & MS_RDONLY))
1031 ufs_put_cylinder_structures (sb); 1089 ufs_put_super_internal(sb);
1032 1090
1033 ubh_brelse_uspi (sbi->s_uspi); 1091 ubh_brelse_uspi (sbi->s_uspi);
1034 kfree (sbi->s_uspi); 1092 kfree (sbi->s_uspi);
1035 kfree (sbi); 1093 kfree (sbi);
1036 sb->s_fs_info = NULL; 1094 sb->s_fs_info = NULL;
1095 UFSD("EXIT\n");
1037 return; 1096 return;
1038} 1097}
1039 1098
@@ -1062,8 +1121,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1062 return -EINVAL; 1121 return -EINVAL;
1063 if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) { 1122 if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) {
1064 new_mount_opt |= ufstype; 1123 new_mount_opt |= ufstype;
1065 } 1124 } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
1066 else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
1067 printk("ufstype can't be changed during remount\n"); 1125 printk("ufstype can't be changed during remount\n");
1068 return -EINVAL; 1126 return -EINVAL;
1069 } 1127 }
@@ -1077,20 +1135,19 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1077 * fs was mouted as rw, remounting ro 1135 * fs was mouted as rw, remounting ro
1078 */ 1136 */
1079 if (*mount_flags & MS_RDONLY) { 1137 if (*mount_flags & MS_RDONLY) {
1080 ufs_put_cylinder_structures(sb); 1138 ufs_put_super_internal(sb);
1081 usb1->fs_time = cpu_to_fs32(sb, get_seconds()); 1139 usb1->fs_time = cpu_to_fs32(sb, get_seconds());
1082 if ((flags & UFS_ST_MASK) == UFS_ST_SUN 1140 if ((flags & UFS_ST_MASK) == UFS_ST_SUN
1083 || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) 1141 || (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
1084 ufs_set_fs_state(sb, usb1, usb3, 1142 ufs_set_fs_state(sb, usb1, usb3,
1085 UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); 1143 UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
1086 ubh_mark_buffer_dirty (USPI_UBH); 1144 ubh_mark_buffer_dirty (USPI_UBH(uspi));
1087 sb->s_dirt = 0; 1145 sb->s_dirt = 0;
1088 sb->s_flags |= MS_RDONLY; 1146 sb->s_flags |= MS_RDONLY;
1089 } 1147 } else {
1090 /* 1148 /*
1091 * fs was mounted as ro, remounting rw 1149 * fs was mounted as ro, remounting rw
1092 */ 1150 */
1093 else {
1094#ifndef CONFIG_UFS_FS_WRITE 1151#ifndef CONFIG_UFS_FS_WRITE
1095 printk("ufs was compiled with read-only support, " 1152 printk("ufs was compiled with read-only support, "
1096 "can't be mounted as read-write\n"); 1153 "can't be mounted as read-write\n");
@@ -1102,7 +1159,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1102 printk("this ufstype is read-only supported\n"); 1159 printk("this ufstype is read-only supported\n");
1103 return -EINVAL; 1160 return -EINVAL;
1104 } 1161 }
1105 if (!ufs_read_cylinder_structures (sb)) { 1162 if (!ufs_read_cylinder_structures(sb)) {
1106 printk("failed during remounting\n"); 1163 printk("failed during remounting\n");
1107 return -EPERM; 1164 return -EPERM;
1108 } 1165 }
@@ -1113,37 +1170,31 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
1113 return 0; 1170 return 0;
1114} 1171}
1115 1172
1116static int ufs_statfs (struct dentry *dentry, struct kstatfs *buf) 1173static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
1117{ 1174{
1118 struct super_block *sb = dentry->d_sb; 1175 struct super_block *sb = dentry->d_sb;
1119 struct ufs_sb_private_info * uspi; 1176 struct ufs_sb_private_info *uspi= UFS_SB(sb)->s_uspi;
1120 struct ufs_super_block_first * usb1; 1177 unsigned flags = UFS_SB(sb)->s_flags;
1121 struct ufs_super_block * usb; 1178 struct ufs_super_block_first *usb1;
1122 unsigned flags = 0; 1179 struct ufs_super_block_second *usb2;
1180 struct ufs_super_block_third *usb3;
1123 1181
1124 lock_kernel(); 1182 lock_kernel();
1125 1183
1126 uspi = UFS_SB(sb)->s_uspi; 1184 usb1 = ubh_get_usb_first(uspi);
1127 usb1 = ubh_get_usb_first (uspi); 1185 usb2 = ubh_get_usb_second(uspi);
1128 usb = (struct ufs_super_block *) 1186 usb3 = ubh_get_usb_third(uspi);
1129 ((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
1130 1187
1131 flags = UFS_SB(sb)->s_flags;
1132 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1188 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
1133 buf->f_type = UFS2_MAGIC; 1189 buf->f_type = UFS2_MAGIC;
1134 buf->f_blocks = fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_dsize); 1190 buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize);
1135 buf->f_bfree = ufs_blkstofrags(fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nbfree)) + 1191 } else {
1136 fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_cstotal.cs_nffree);
1137 buf->f_ffree = fs64_to_cpu(sb,
1138 usb->fs_u11.fs_u2.fs_cstotal.cs_nifree);
1139 }
1140 else {
1141 buf->f_type = UFS_MAGIC; 1192 buf->f_type = UFS_MAGIC;
1142 buf->f_blocks = uspi->s_dsize; 1193 buf->f_blocks = uspi->s_dsize;
1143 buf->f_bfree = ufs_blkstofrags(fs32_to_cpu(sb, usb1->fs_cstotal.cs_nbfree)) +
1144 fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree);
1145 buf->f_ffree = fs32_to_cpu(sb, usb1->fs_cstotal.cs_nifree);
1146 } 1194 }
1195 buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
1196 uspi->cs_total.cs_nffree;
1197 buf->f_ffree = uspi->cs_total.cs_nifree;
1147 buf->f_bsize = sb->s_blocksize; 1198 buf->f_bsize = sb->s_blocksize;
1148 buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree)) 1199 buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree))
1149 ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0; 1200 ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 02e86291ef8a..3c3b301f8701 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -49,14 +49,6 @@
49#include "swab.h" 49#include "swab.h"
50#include "util.h" 50#include "util.h"
51 51
52#undef UFS_TRUNCATE_DEBUG
53
54#ifdef UFS_TRUNCATE_DEBUG
55#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
56#else
57#define UFSD(x)
58#endif
59
60/* 52/*
61 * Secure deletion currently doesn't work. It interacts very badly 53 * Secure deletion currently doesn't work. It interacts very badly
62 * with buffers shared with memory mappings, and for that reason 54 * with buffers shared with memory mappings, and for that reason
@@ -82,7 +74,7 @@ static int ufs_trunc_direct (struct inode * inode)
82 unsigned i, tmp; 74 unsigned i, tmp;
83 int retry; 75 int retry;
84 76
85 UFSD(("ENTER\n")) 77 UFSD("ENTER\n");
86 78
87 sb = inode->i_sb; 79 sb = inode->i_sb;
88 uspi = UFS_SB(sb)->s_uspi; 80 uspi = UFS_SB(sb)->s_uspi;
@@ -105,7 +97,7 @@ static int ufs_trunc_direct (struct inode * inode)
105 block2 = ufs_fragstoblks (frag3); 97 block2 = ufs_fragstoblks (frag3);
106 } 98 }
107 99
108 UFSD(("frag1 %u, frag2 %u, block1 %u, block2 %u, frag3 %u, frag4 %u\n", frag1, frag2, block1, block2, frag3, frag4)) 100 UFSD("frag1 %u, frag2 %u, block1 %u, block2 %u, frag3 %u, frag4 %u\n", frag1, frag2, block1, block2, frag3, frag4);
109 101
110 if (frag1 >= frag2) 102 if (frag1 >= frag2)
111 goto next1; 103 goto next1;
@@ -120,9 +112,8 @@ static int ufs_trunc_direct (struct inode * inode)
120 frag1 = ufs_fragnum (frag1); 112 frag1 = ufs_fragnum (frag1);
121 frag2 = ufs_fragnum (frag2); 113 frag2 = ufs_fragnum (frag2);
122 114
123 inode->i_blocks -= (frag2-frag1) << uspi->s_nspfshift;
124 mark_inode_dirty(inode);
125 ufs_free_fragments (inode, tmp + frag1, frag2 - frag1); 115 ufs_free_fragments (inode, tmp + frag1, frag2 - frag1);
116 mark_inode_dirty(inode);
126 frag_to_free = tmp + frag1; 117 frag_to_free = tmp + frag1;
127 118
128next1: 119next1:
@@ -136,8 +127,7 @@ next1:
136 continue; 127 continue;
137 128
138 *p = 0; 129 *p = 0;
139 inode->i_blocks -= uspi->s_nspb; 130
140 mark_inode_dirty(inode);
141 if (free_count == 0) { 131 if (free_count == 0) {
142 frag_to_free = tmp; 132 frag_to_free = tmp;
143 free_count = uspi->s_fpb; 133 free_count = uspi->s_fpb;
@@ -148,6 +138,7 @@ next1:
148 frag_to_free = tmp; 138 frag_to_free = tmp;
149 free_count = uspi->s_fpb; 139 free_count = uspi->s_fpb;
150 } 140 }
141 mark_inode_dirty(inode);
151 } 142 }
152 143
153 if (free_count > 0) 144 if (free_count > 0)
@@ -166,12 +157,12 @@ next1:
166 frag4 = ufs_fragnum (frag4); 157 frag4 = ufs_fragnum (frag4);
167 158
168 *p = 0; 159 *p = 0;
169 inode->i_blocks -= frag4 << uspi->s_nspfshift; 160
170 mark_inode_dirty(inode);
171 ufs_free_fragments (inode, tmp, frag4); 161 ufs_free_fragments (inode, tmp, frag4);
162 mark_inode_dirty(inode);
172 next3: 163 next3:
173 164
174 UFSD(("EXIT\n")) 165 UFSD("EXIT\n");
175 return retry; 166 return retry;
176} 167}
177 168
@@ -186,7 +177,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
186 unsigned frag_to_free, free_count; 177 unsigned frag_to_free, free_count;
187 int retry; 178 int retry;
188 179
189 UFSD(("ENTER\n")) 180 UFSD("ENTER\n");
190 181
191 sb = inode->i_sb; 182 sb = inode->i_sb;
192 uspi = UFS_SB(sb)->s_uspi; 183 uspi = UFS_SB(sb)->s_uspi;
@@ -227,7 +218,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
227 frag_to_free = tmp; 218 frag_to_free = tmp;
228 free_count = uspi->s_fpb; 219 free_count = uspi->s_fpb;
229 } 220 }
230 inode->i_blocks -= uspi->s_nspb; 221
231 mark_inode_dirty(inode); 222 mark_inode_dirty(inode);
232 } 223 }
233 224
@@ -238,26 +229,21 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p)
238 if (*ubh_get_addr32(ind_ubh,i)) 229 if (*ubh_get_addr32(ind_ubh,i))
239 break; 230 break;
240 if (i >= uspi->s_apb) { 231 if (i >= uspi->s_apb) {
241 if (ubh_max_bcount(ind_ubh) != 1) { 232 tmp = fs32_to_cpu(sb, *p);
242 retry = 1; 233 *p = 0;
243 } 234
244 else { 235 ufs_free_blocks (inode, tmp, uspi->s_fpb);
245 tmp = fs32_to_cpu(sb, *p); 236 mark_inode_dirty(inode);
246 *p = 0; 237 ubh_bforget(ind_ubh);
247 inode->i_blocks -= uspi->s_nspb; 238 ind_ubh = NULL;
248 mark_inode_dirty(inode);
249 ufs_free_blocks (inode, tmp, uspi->s_fpb);
250 ubh_bforget(ind_ubh);
251 ind_ubh = NULL;
252 }
253 } 239 }
254 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) { 240 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
255 ubh_ll_rw_block (SWRITE, 1, &ind_ubh); 241 ubh_ll_rw_block(SWRITE, ind_ubh);
256 ubh_wait_on_buffer (ind_ubh); 242 ubh_wait_on_buffer (ind_ubh);
257 } 243 }
258 ubh_brelse (ind_ubh); 244 ubh_brelse (ind_ubh);
259 245
260 UFSD(("EXIT\n")) 246 UFSD("EXIT\n");
261 247
262 return retry; 248 return retry;
263} 249}
@@ -271,7 +257,7 @@ static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
271 __fs32 * dind; 257 __fs32 * dind;
272 int retry = 0; 258 int retry = 0;
273 259
274 UFSD(("ENTER\n")) 260 UFSD("ENTER\n");
275 261
276 sb = inode->i_sb; 262 sb = inode->i_sb;
277 uspi = UFS_SB(sb)->s_uspi; 263 uspi = UFS_SB(sb)->s_uspi;
@@ -306,25 +292,21 @@ static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
306 if (*ubh_get_addr32 (dind_bh, i)) 292 if (*ubh_get_addr32 (dind_bh, i))
307 break; 293 break;
308 if (i >= uspi->s_apb) { 294 if (i >= uspi->s_apb) {
309 if (ubh_max_bcount(dind_bh) != 1) 295 tmp = fs32_to_cpu(sb, *p);
310 retry = 1; 296 *p = 0;
311 else { 297
312 tmp = fs32_to_cpu(sb, *p); 298 ufs_free_blocks(inode, tmp, uspi->s_fpb);
313 *p = 0; 299 mark_inode_dirty(inode);
314 inode->i_blocks -= uspi->s_nspb; 300 ubh_bforget(dind_bh);
315 mark_inode_dirty(inode); 301 dind_bh = NULL;
316 ufs_free_blocks (inode, tmp, uspi->s_fpb);
317 ubh_bforget(dind_bh);
318 dind_bh = NULL;
319 }
320 } 302 }
321 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) { 303 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
322 ubh_ll_rw_block (SWRITE, 1, &dind_bh); 304 ubh_ll_rw_block(SWRITE, dind_bh);
323 ubh_wait_on_buffer (dind_bh); 305 ubh_wait_on_buffer (dind_bh);
324 } 306 }
325 ubh_brelse (dind_bh); 307 ubh_brelse (dind_bh);
326 308
327 UFSD(("EXIT\n")) 309 UFSD("EXIT\n");
328 310
329 return retry; 311 return retry;
330} 312}
@@ -339,7 +321,7 @@ static int ufs_trunc_tindirect (struct inode * inode)
339 __fs32 * tind, * p; 321 __fs32 * tind, * p;
340 int retry; 322 int retry;
341 323
342 UFSD(("ENTER\n")) 324 UFSD("ENTER\n");
343 325
344 sb = inode->i_sb; 326 sb = inode->i_sb;
345 uspi = UFS_SB(sb)->s_uspi; 327 uspi = UFS_SB(sb)->s_uspi;
@@ -370,25 +352,21 @@ static int ufs_trunc_tindirect (struct inode * inode)
370 if (*ubh_get_addr32 (tind_bh, i)) 352 if (*ubh_get_addr32 (tind_bh, i))
371 break; 353 break;
372 if (i >= uspi->s_apb) { 354 if (i >= uspi->s_apb) {
373 if (ubh_max_bcount(tind_bh) != 1) 355 tmp = fs32_to_cpu(sb, *p);
374 retry = 1; 356 *p = 0;
375 else { 357
376 tmp = fs32_to_cpu(sb, *p); 358 ufs_free_blocks(inode, tmp, uspi->s_fpb);
377 *p = 0; 359 mark_inode_dirty(inode);
378 inode->i_blocks -= uspi->s_nspb; 360 ubh_bforget(tind_bh);
379 mark_inode_dirty(inode); 361 tind_bh = NULL;
380 ufs_free_blocks (inode, tmp, uspi->s_fpb);
381 ubh_bforget(tind_bh);
382 tind_bh = NULL;
383 }
384 } 362 }
385 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { 363 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
386 ubh_ll_rw_block (SWRITE, 1, &tind_bh); 364 ubh_ll_rw_block(SWRITE, tind_bh);
387 ubh_wait_on_buffer (tind_bh); 365 ubh_wait_on_buffer (tind_bh);
388 } 366 }
389 ubh_brelse (tind_bh); 367 ubh_brelse (tind_bh);
390 368
391 UFSD(("EXIT\n")) 369 UFSD("EXIT\n");
392 return retry; 370 return retry;
393} 371}
394 372
@@ -399,7 +377,7 @@ void ufs_truncate (struct inode * inode)
399 struct ufs_sb_private_info * uspi; 377 struct ufs_sb_private_info * uspi;
400 int retry; 378 int retry;
401 379
402 UFSD(("ENTER\n")) 380 UFSD("ENTER\n");
403 sb = inode->i_sb; 381 sb = inode->i_sb;
404 uspi = UFS_SB(sb)->s_uspi; 382 uspi = UFS_SB(sb)->s_uspi;
405 383
@@ -430,5 +408,5 @@ void ufs_truncate (struct inode * inode)
430 ufsi->i_lastfrag = DIRECT_FRAGMENT; 408 ufsi->i_lastfrag = DIRECT_FRAGMENT;
431 unlock_kernel(); 409 unlock_kernel();
432 mark_inode_dirty(inode); 410 mark_inode_dirty(inode);
433 UFSD(("EXIT\n")) 411 UFSD("EXIT\n");
434} 412}
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 59acc8f073ac..a2f13f45708b 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -14,15 +14,6 @@
14#include "swab.h" 14#include "swab.h"
15#include "util.h" 15#include "util.h"
16 16
17#undef UFS_UTILS_DEBUG
18
19#ifdef UFS_UTILS_DEBUG
20#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
21#else
22#define UFSD(x)
23#endif
24
25
26struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi, 17struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
27 struct super_block *sb, u64 fragment, u64 size) 18 struct super_block *sb, u64 fragment, u64 size)
28{ 19{
@@ -63,17 +54,17 @@ struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
63 count = size >> uspi->s_fshift; 54 count = size >> uspi->s_fshift;
64 if (count <= 0 || count > UFS_MAXFRAG) 55 if (count <= 0 || count > UFS_MAXFRAG)
65 return NULL; 56 return NULL;
66 USPI_UBH->fragment = fragment; 57 USPI_UBH(uspi)->fragment = fragment;
67 USPI_UBH->count = count; 58 USPI_UBH(uspi)->count = count;
68 for (i = 0; i < count; i++) 59 for (i = 0; i < count; i++)
69 if (!(USPI_UBH->bh[i] = sb_bread(sb, fragment + i))) 60 if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i)))
70 goto failed; 61 goto failed;
71 for (; i < UFS_MAXFRAG; i++) 62 for (; i < UFS_MAXFRAG; i++)
72 USPI_UBH->bh[i] = NULL; 63 USPI_UBH(uspi)->bh[i] = NULL;
73 return USPI_UBH; 64 return USPI_UBH(uspi);
74failed: 65failed:
75 for (j = 0; j < i; j++) 66 for (j = 0; j < i; j++)
76 brelse (USPI_UBH->bh[j]); 67 brelse (USPI_UBH(uspi)->bh[j]);
77 return NULL; 68 return NULL;
78} 69}
79 70
@@ -90,11 +81,11 @@ void ubh_brelse (struct ufs_buffer_head * ubh)
90void ubh_brelse_uspi (struct ufs_sb_private_info * uspi) 81void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
91{ 82{
92 unsigned i; 83 unsigned i;
93 if (!USPI_UBH) 84 if (!USPI_UBH(uspi))
94 return; 85 return;
95 for ( i = 0; i < USPI_UBH->count; i++ ) { 86 for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) {
96 brelse (USPI_UBH->bh[i]); 87 brelse (USPI_UBH(uspi)->bh[i]);
97 USPI_UBH->bh[i] = NULL; 88 USPI_UBH(uspi)->bh[i] = NULL;
98 } 89 }
99} 90}
100 91
@@ -121,13 +112,12 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
121 } 112 }
122} 113}
123 114
124void ubh_ll_rw_block (int rw, unsigned nr, struct ufs_buffer_head * ubh[]) 115void ubh_ll_rw_block(int rw, struct ufs_buffer_head *ubh)
125{ 116{
126 unsigned i;
127 if (!ubh) 117 if (!ubh)
128 return; 118 return;
129 for ( i = 0; i < nr; i++ ) 119
130 ll_rw_block (rw, ubh[i]->count, ubh[i]->bh); 120 ll_rw_block(rw, ubh->count, ubh->bh);
131} 121}
132 122
133void ubh_wait_on_buffer (struct ufs_buffer_head * ubh) 123void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
@@ -139,18 +129,6 @@ void ubh_wait_on_buffer (struct ufs_buffer_head * ubh)
139 wait_on_buffer (ubh->bh[i]); 129 wait_on_buffer (ubh->bh[i]);
140} 130}
141 131
142unsigned ubh_max_bcount (struct ufs_buffer_head * ubh)
143{
144 unsigned i;
145 unsigned max = 0;
146 if (!ubh)
147 return 0;
148 for ( i = 0; i < ubh->count; i++ )
149 if ( atomic_read(&ubh->bh[i]->b_count) > max )
150 max = atomic_read(&ubh->bh[i]->b_count);
151 return max;
152}
153
154void ubh_bforget (struct ufs_buffer_head * ubh) 132void ubh_bforget (struct ufs_buffer_head * ubh)
155{ 133{
156 unsigned i; 134 unsigned i;
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 48d6d9bcc157..406981fff5e7 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -17,10 +17,16 @@
17#define in_range(b,first,len) ((b)>=(first)&&(b)<(first)+(len)) 17#define in_range(b,first,len) ((b)>=(first)&&(b)<(first)+(len))
18 18
19/* 19/*
20 * macros used for retyping 20 * functions used for retyping
21 */ 21 */
22#define UCPI_UBH ((struct ufs_buffer_head *)ucpi) 22static inline struct ufs_buffer_head *UCPI_UBH(struct ufs_cg_private_info *cpi)
23#define USPI_UBH ((struct ufs_buffer_head *)uspi) 23{
24 return &cpi->c_ubh;
25}
26static inline struct ufs_buffer_head *USPI_UBH(struct ufs_sb_private_info *spi)
27{
28 return &spi->s_ubh;
29}
24 30
25 31
26 32
@@ -33,12 +39,12 @@ ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
33{ 39{
34 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 40 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
35 case UFS_ST_SUN: 41 case UFS_ST_SUN:
36 return fs32_to_cpu(sb, usb3->fs_u2.fs_sun.fs_state); 42 return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state);
37 case UFS_ST_SUNx86: 43 case UFS_ST_SUNx86:
38 return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state); 44 return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
39 case UFS_ST_44BSD: 45 case UFS_ST_44BSD:
40 default: 46 default:
41 return fs32_to_cpu(sb, usb3->fs_u2.fs_44.fs_state); 47 return fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_state);
42 } 48 }
43} 49}
44 50
@@ -48,13 +54,13 @@ ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
48{ 54{
49 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 55 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
50 case UFS_ST_SUN: 56 case UFS_ST_SUN:
51 usb3->fs_u2.fs_sun.fs_state = cpu_to_fs32(sb, value); 57 usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value);
52 break; 58 break;
53 case UFS_ST_SUNx86: 59 case UFS_ST_SUNx86:
54 usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value); 60 usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
55 break; 61 break;
56 case UFS_ST_44BSD: 62 case UFS_ST_44BSD:
57 usb3->fs_u2.fs_44.fs_state = cpu_to_fs32(sb, value); 63 usb3->fs_un2.fs_44.fs_state = cpu_to_fs32(sb, value);
58 break; 64 break;
59 } 65 }
60} 66}
@@ -64,7 +70,7 @@ ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
64 struct ufs_super_block_third *usb3) 70 struct ufs_super_block_third *usb3)
65{ 71{
66 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) 72 if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
67 return fs32_to_cpu(sb, usb3->fs_u2.fs_sunx86.fs_npsect); 73 return fs32_to_cpu(sb, usb3->fs_un2.fs_sunx86.fs_npsect);
68 else 74 else
69 return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect); 75 return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
70} 76}
@@ -76,16 +82,16 @@ ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
76 82
77 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 83 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
78 case UFS_ST_SUN: 84 case UFS_ST_SUN:
79 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qbmask[0]; 85 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qbmask[0];
80 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qbmask[1]; 86 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qbmask[1];
81 break; 87 break;
82 case UFS_ST_SUNx86: 88 case UFS_ST_SUNx86:
83 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qbmask[0]; 89 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qbmask[0];
84 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qbmask[1]; 90 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qbmask[1];
85 break; 91 break;
86 case UFS_ST_44BSD: 92 case UFS_ST_44BSD:
87 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qbmask[0]; 93 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qbmask[0];
88 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qbmask[1]; 94 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qbmask[1];
89 break; 95 break;
90 } 96 }
91 97
@@ -99,16 +105,16 @@ ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
99 105
100 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { 106 switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
101 case UFS_ST_SUN: 107 case UFS_ST_SUN:
102 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sun.fs_qfmask[0]; 108 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qfmask[0];
103 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sun.fs_qfmask[1]; 109 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qfmask[1];
104 break; 110 break;
105 case UFS_ST_SUNx86: 111 case UFS_ST_SUNx86:
106 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_sunx86.fs_qfmask[0]; 112 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qfmask[0];
107 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_sunx86.fs_qfmask[1]; 113 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qfmask[1];
108 break; 114 break;
109 case UFS_ST_44BSD: 115 case UFS_ST_44BSD:
110 ((__fs32 *)&tmp)[0] = usb3->fs_u2.fs_44.fs_qfmask[0]; 116 ((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qfmask[0];
111 ((__fs32 *)&tmp)[1] = usb3->fs_u2.fs_44.fs_qfmask[1]; 117 ((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qfmask[1];
112 break; 118 break;
113 } 119 }
114 120
@@ -236,9 +242,8 @@ extern void ubh_brelse (struct ufs_buffer_head *);
236extern void ubh_brelse_uspi (struct ufs_sb_private_info *); 242extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
237extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *); 243extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
238extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int); 244extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
239extern void ubh_ll_rw_block (int, unsigned, struct ufs_buffer_head **); 245extern void ubh_ll_rw_block(int, struct ufs_buffer_head *);
240extern void ubh_wait_on_buffer (struct ufs_buffer_head *); 246extern void ubh_wait_on_buffer (struct ufs_buffer_head *);
241extern unsigned ubh_max_bcount (struct ufs_buffer_head *);
242extern void ubh_bforget (struct ufs_buffer_head *); 247extern void ubh_bforget (struct ufs_buffer_head *);
243extern int ubh_buffer_dirty (struct ufs_buffer_head *); 248extern int ubh_buffer_dirty (struct ufs_buffer_head *);
244#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size) 249#define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
@@ -297,40 +302,26 @@ static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
297#define ubh_blkmap(ubh,begin,bit) \ 302#define ubh_blkmap(ubh,begin,bit) \
298 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) 303 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
299 304
300
301/*
302 * Macros for access to superblock array structures
303 */
304#define ubh_postbl(ubh,cylno,i) \
305 ((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
306 ? (*(__s16*)(ubh_get_addr(ubh, \
307 (unsigned)(&((struct ufs_super_block *)0)->fs_opostbl) \
308 + (((cylno) * 16 + (i)) << 1) ) )) \
309 : (*(__s16*)(ubh_get_addr(ubh, \
310 uspi->s_postbloff + (((cylno) * uspi->s_nrpos + (i)) << 1) ))))
311
312#define ubh_rotbl(ubh,i) \
313 ((uspi->s_postblformat != UFS_DYNAMICPOSTBLFMT) \
314 ? (*(__u8*)(ubh_get_addr(ubh, \
315 (unsigned)(&((struct ufs_super_block *)0)->fs_space) + (i)))) \
316 : (*(__u8*)(ubh_get_addr(ubh, uspi->s_rotbloff + (i)))))
317
318/* 305/*
319 * Determine the number of available frags given a 306 * Determine the number of available frags given a
320 * percentage to hold in reserve. 307 * percentage to hold in reserve.
321 */ 308 */
322#define ufs_freespace(usb, percentreserved) \ 309static inline u64
323 (ufs_blkstofrags(fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nbfree)) + \ 310ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved)
324 fs32_to_cpu(sb, (usb)->fs_cstotal.cs_nffree) - (uspi->s_dsize * (percentreserved) / 100)) 311{
312 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
313 uspi->cs_total.cs_nffree -
314 (uspi->s_dsize * (percentreserved) / 100);
315}
325 316
326/* 317/*
327 * Macros to access cylinder group array structures 318 * Macros to access cylinder group array structures
328 */ 319 */
329#define ubh_cg_blktot(ucpi,cylno) \ 320#define ubh_cg_blktot(ucpi,cylno) \
330 (*((__fs32*)ubh_get_addr(UCPI_UBH, (ucpi)->c_btotoff + ((cylno) << 2)))) 321 (*((__fs32*)ubh_get_addr(UCPI_UBH(ucpi), (ucpi)->c_btotoff + ((cylno) << 2))))
331 322
332#define ubh_cg_blks(ucpi,cylno,rpos) \ 323#define ubh_cg_blks(ucpi,cylno,rpos) \
333 (*((__fs16*)ubh_get_addr(UCPI_UBH, \ 324 (*((__fs16*)ubh_get_addr(UCPI_UBH(ucpi), \
334 (ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 )))) 325 (ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
335 326
336/* 327/*
@@ -508,29 +499,3 @@ static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
508 if (fragsize > 0 && fragsize < uspi->s_fpb) 499 if (fragsize > 0 && fragsize < uspi->s_fpb)
509 fs32_add(sb, &fraglist[fragsize], cnt); 500 fs32_add(sb, &fraglist[fragsize], cnt);
510} 501}
511
512#define ubh_scanc(ubh,begin,size,table,mask) _ubh_scanc_(uspi,ubh,begin,size,table,mask)
513static inline unsigned _ubh_scanc_(struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
514 unsigned begin, unsigned size, unsigned char * table, unsigned char mask)
515{
516 unsigned rest, offset;
517 unsigned char * cp;
518
519
520 offset = begin & ~uspi->s_fmask;
521 begin >>= uspi->s_fshift;
522 for (;;) {
523 if ((offset + size) < uspi->s_fsize)
524 rest = size;
525 else
526 rest = uspi->s_fsize - offset;
527 size -= rest;
528 cp = ubh->bh[begin]->b_data + offset;
529 while ((table[*cp++] & mask) == 0 && --rest);
530 if (rest || !size)
531 break;
532 begin++;
533 offset = 0;
534 }
535 return (size + rest);
536}
diff --git a/include/asm-alpha/floppy.h b/include/asm-alpha/floppy.h
index e177d4180f83..21816d35ef89 100644
--- a/include/asm-alpha/floppy.h
+++ b/include/asm-alpha/floppy.h
@@ -25,9 +25,8 @@
25#define fd_enable_irq() enable_irq(FLOPPY_IRQ) 25#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
26#define fd_disable_irq() disable_irq(FLOPPY_IRQ) 26#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
27#define fd_cacheflush(addr,size) /* nothing */ 27#define fd_cacheflush(addr,size) /* nothing */
28#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \ 28#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
29 SA_INTERRUPT|SA_SAMPLE_RANDOM, \ 29 SA_INTERRUPT, "floppy", NULL)
30 "floppy", NULL)
31#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); 30#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
32 31
33#ifdef CONFIG_PCI 32#ifdef CONFIG_PCI
diff --git a/include/asm-arm/floppy.h b/include/asm-arm/floppy.h
index 6ea657c886b9..aa0c8d28d8d9 100644
--- a/include/asm-arm/floppy.h
+++ b/include/asm-arm/floppy.h
@@ -25,7 +25,7 @@
25 25
26#define fd_inb(port) inb((port)) 26#define fd_inb(port) inb((port))
27#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ 27#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
28 SA_INTERRUPT|SA_SAMPLE_RANDOM,"floppy",NULL) 28 SA_INTERRUPT,"floppy",NULL)
29#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) 29#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
30#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) 30#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
31#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) 31#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
diff --git a/include/asm-arm26/floppy.h b/include/asm-arm26/floppy.h
index 9e090ad7e477..a18af069ca28 100644
--- a/include/asm-arm26/floppy.h
+++ b/include/asm-arm26/floppy.h
@@ -22,7 +22,7 @@
22 22
23#define fd_inb(port) inb((port)) 23#define fd_inb(port) inb((port))
24#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ 24#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
25 SA_INTERRUPT|SA_SAMPLE_RANDOM,"floppy",NULL) 25 SA_INTERRUPT,"floppy",NULL)
26#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) 26#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
27#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) 27#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
28#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) 28#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 0cd9711895fa..845cb67ad8ea 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -38,4 +38,17 @@
38#endif 38#endif
39#endif 39#endif
40 40
41#define WARN_ON_ONCE(condition) \
42({ \
43 static int __warn_once = 1; \
44 int __ret = 0; \
45 \
46 if (unlikely((condition) && __warn_once)) { \
47 __warn_once = 0; \
48 WARN_ON(1); \
49 __ret = 1; \
50 } \
51 __ret; \
52})
53
41#endif 54#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index c0caf433a7d7..c74521157461 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -14,6 +14,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
14/* var is in discarded region: offset to particular copy we want */ 14/* var is in discarded region: offset to particular copy we want */
15#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) 15#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
16#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) 16#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
17#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id())
17 18
18/* A macro to avoid #include hell... */ 19/* A macro to avoid #include hell... */
19#define percpu_modcopy(pcpudst, src, size) \ 20#define percpu_modcopy(pcpudst, src, size) \
@@ -30,6 +31,7 @@ do { \
30 31
31#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 32#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
32#define __get_cpu_var(var) per_cpu__##var 33#define __get_cpu_var(var) per_cpu__##var
34#define __raw_get_cpu_var(var) per_cpu__##var
33 35
34#endif /* SMP */ 36#endif /* SMP */
35 37
diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h
index 03403045c182..9cb2793eb211 100644
--- a/include/asm-i386/floppy.h
+++ b/include/asm-i386/floppy.h
@@ -147,9 +147,8 @@ static int fd_request_irq(void)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, 147 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
148 "floppy", NULL); 148 "floppy", NULL);
149 else 149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt, 150 return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT,
151 SA_INTERRUPT|SA_SAMPLE_RANDOM, 151 "floppy", NULL);
152 "floppy", NULL);
153 152
154} 153}
155 154
diff --git a/include/asm-i386/mach-default/setup_arch_pre.h b/include/asm-i386/mach-default/setup_arch.h
index fb42099e7bd4..fb42099e7bd4 100644
--- a/include/asm-i386/mach-default/setup_arch_pre.h
+++ b/include/asm-i386/mach-default/setup_arch.h
diff --git a/include/asm-i386/mach-default/setup_arch_post.h b/include/asm-i386/mach-default/setup_arch_post.h
deleted file mode 100644
index 2fc4888721f6..000000000000
--- a/include/asm-i386/mach-default/setup_arch_post.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/**
2 * machine_specific_memory_setup - Hook for machine specific memory setup.
3 *
4 * Description:
5 * This is included late in kernel/setup.c so that it can make
6 * use of all of the static functions.
7 **/
8
9static char * __init machine_specific_memory_setup(void)
10{
11 char *who;
12
13
14 who = "BIOS-e820";
15
16 /*
17 * Try to copy the BIOS-supplied E820-map.
18 *
19 * Otherwise fake a memory map; one section from 0k->640k,
20 * the next section from 1mb->appropriate_mem_k
21 */
22 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
23 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
24 unsigned long mem_size;
25
26 /* compare results from other methods and take the greater */
27 if (ALT_MEM_K < EXT_MEM_K) {
28 mem_size = EXT_MEM_K;
29 who = "BIOS-88";
30 } else {
31 mem_size = ALT_MEM_K;
32 who = "BIOS-e801";
33 }
34
35 e820.nr_map = 0;
36 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
37 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
38 }
39 return who;
40}
diff --git a/include/asm-i386/mach-visws/setup_arch_pre.h b/include/asm-i386/mach-visws/setup_arch.h
index b92d6d9a4d3c..b92d6d9a4d3c 100644
--- a/include/asm-i386/mach-visws/setup_arch_pre.h
+++ b/include/asm-i386/mach-visws/setup_arch.h
diff --git a/include/asm-i386/mach-visws/setup_arch_post.h b/include/asm-i386/mach-visws/setup_arch_post.h
deleted file mode 100644
index cdbd895a54b1..000000000000
--- a/include/asm-i386/mach-visws/setup_arch_post.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/* Hook for machine specific memory setup.
2 *
3 * This is included late in kernel/setup.c so that it can make use of all of
4 * the static functions. */
5
6#define MB (1024 * 1024)
7
8unsigned long sgivwfb_mem_phys;
9unsigned long sgivwfb_mem_size;
10
11long long mem_size __initdata = 0;
12
13static char * __init machine_specific_memory_setup(void)
14{
15 long long gfx_mem_size = 8 * MB;
16
17 mem_size = ALT_MEM_K;
18
19 if (!mem_size) {
20 printk(KERN_WARNING "Bootloader didn't set memory size, upgrade it !\n");
21 mem_size = 128 * MB;
22 }
23
24 /*
25 * this hardcodes the graphics memory to 8 MB
26 * it really should be sized dynamically (or at least
27 * set as a boot param)
28 */
29 if (!sgivwfb_mem_size) {
30 printk(KERN_WARNING "Defaulting to 8 MB framebuffer size\n");
31 sgivwfb_mem_size = 8 * MB;
32 }
33
34 /*
35 * Trim to nearest MB
36 */
37 sgivwfb_mem_size &= ~((1 << 20) - 1);
38 sgivwfb_mem_phys = mem_size - gfx_mem_size;
39
40 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
41 add_memory_region(HIGH_MEMORY, mem_size - sgivwfb_mem_size - HIGH_MEMORY, E820_RAM);
42 add_memory_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED);
43
44 return "PROM";
45
46 /* Remove gcc warnings */
47 (void) sanitize_e820_map(NULL, NULL);
48 (void) copy_e820_map(NULL, 0);
49}
diff --git a/include/asm-i386/mach-voyager/setup_arch_pre.h b/include/asm-i386/mach-voyager/setup_arch.h
index 48f7e6ff49a5..84d01ad33459 100644
--- a/include/asm-i386/mach-voyager/setup_arch_pre.h
+++ b/include/asm-i386/mach-voyager/setup_arch.h
@@ -3,7 +3,7 @@
3 3
4/* Hook to call BIOS initialisation function */ 4/* Hook to call BIOS initialisation function */
5 5
6/* for voyager, pass the voyager BIOS/SUS info area to the detection 6/* for voyager, pass the voyager BIOS/SUS info area to the detection
7 * routines */ 7 * routines */
8 8
9#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO); 9#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO);
diff --git a/include/asm-i386/mach-voyager/setup_arch_post.h b/include/asm-i386/mach-voyager/setup_arch_post.h
deleted file mode 100644
index f6f6c2cbc75c..000000000000
--- a/include/asm-i386/mach-voyager/setup_arch_post.h
+++ /dev/null
@@ -1,73 +0,0 @@
1/* Hook for machine specific memory setup.
2 *
3 * This is included late in kernel/setup.c so that it can make use of all of
4 * the static functions. */
5
6static char * __init machine_specific_memory_setup(void)
7{
8 char *who;
9
10 who = "NOT VOYAGER";
11
12 if(voyager_level == 5) {
13 __u32 addr, length;
14 int i;
15
16 who = "Voyager-SUS";
17
18 e820.nr_map = 0;
19 for(i=0; voyager_memory_detect(i, &addr, &length); i++) {
20 add_memory_region(addr, length, E820_RAM);
21 }
22 return who;
23 } else if(voyager_level == 4) {
24 __u32 tom;
25 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
26 /* select the DINO config space */
27 outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
28 /* Read DINO top of memory register */
29 tom = ((inb(catbase + 0x4) & 0xf0) << 16)
30 + ((inb(catbase + 0x5) & 0x7f) << 24);
31
32 if(inb(catbase) != VOYAGER_DINO) {
33 printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
34 tom = (EXT_MEM_K)<<10;
35 }
36 who = "Voyager-TOM";
37 add_memory_region(0, 0x9f000, E820_RAM);
38 /* map from 1M to top of memory */
39 add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM);
40 /* FIXME: Should check the ASICs to see if I need to
41 * take out the 8M window. Just do it at the moment
42 * */
43 add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED);
44 return who;
45 }
46
47 who = "BIOS-e820";
48
49 /*
50 * Try to copy the BIOS-supplied E820-map.
51 *
52 * Otherwise fake a memory map; one section from 0k->640k,
53 * the next section from 1mb->appropriate_mem_k
54 */
55 sanitize_e820_map(E820_MAP, &E820_MAP_NR);
56 if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
57 unsigned long mem_size;
58
59 /* compare results from other methods and take the greater */
60 if (ALT_MEM_K < EXT_MEM_K) {
61 mem_size = EXT_MEM_K;
62 who = "BIOS-88";
63 } else {
64 mem_size = ALT_MEM_K;
65 who = "BIOS-e801";
66 }
67
68 e820.nr_map = 0;
69 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
70 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
71 }
72 return who;
73}
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index ee941457b55d..f737e423029e 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -59,6 +59,21 @@ extern unsigned char boot_params[PARAM_SIZE];
59#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) 59#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF))
60#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) 60#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF))
61 61
62/*
63 * Do NOT EVER look at the BIOS memory size location.
64 * It does not work on many machines.
65 */
66#define LOWMEMSIZE() (0x9f000)
67
68struct e820entry;
69
70char * __init machine_specific_memory_setup(void);
71
72int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
73int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
74void __init add_memory_region(unsigned long long start,
75 unsigned long long size, int type);
76
62#endif /* __ASSEMBLY__ */ 77#endif /* __ASSEMBLY__ */
63 78
64#endif /* _i386_SETUP_H */ 79#endif /* _i386_SETUP_H */
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 8462f8e0e658..54d905ebc63d 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -390,8 +390,12 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
390 const void *from, unsigned long n); 390 const void *from, unsigned long n);
391unsigned long __must_check __copy_from_user_ll(void *to, 391unsigned long __must_check __copy_from_user_ll(void *to,
392 const void __user *from, unsigned long n); 392 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nozero(void *to,
394 const void __user *from, unsigned long n);
393unsigned long __must_check __copy_from_user_ll_nocache(void *to, 395unsigned long __must_check __copy_from_user_ll_nocache(void *to,
394 const void __user *from, unsigned long n); 396 const void __user *from, unsigned long n);
397unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
398 const void __user *from, unsigned long n);
395 399
396/* 400/*
397 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault 401 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
@@ -458,10 +462,41 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
458 * 462 *
459 * If some data could not be copied, this function will pad the copied 463 * If some data could not be copied, this function will pad the copied
460 * data to the requested size using zero bytes. 464 * data to the requested size using zero bytes.
465 *
466 * An alternate version - __copy_from_user_inatomic() - may be called from
467 * atomic context and will fail rather than sleep. In this case the
468 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
469 * for explanation of why this is needed.
461 */ 470 */
462static __always_inline unsigned long 471static __always_inline unsigned long
463__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 472__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
464{ 473{
474 /* Avoid zeroing the tail if the copy fails..
475 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
476 * but as the zeroing behaviour is only significant when n is not
477 * constant, that shouldn't be a problem.
478 */
479 if (__builtin_constant_p(n)) {
480 unsigned long ret;
481
482 switch (n) {
483 case 1:
484 __get_user_size(*(u8 *)to, from, 1, ret, 1);
485 return ret;
486 case 2:
487 __get_user_size(*(u16 *)to, from, 2, ret, 2);
488 return ret;
489 case 4:
490 __get_user_size(*(u32 *)to, from, 4, ret, 4);
491 return ret;
492 }
493 }
494 return __copy_from_user_ll_nozero(to, from, n);
495}
496static __always_inline unsigned long
497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499 might_sleep();
465 if (__builtin_constant_p(n)) { 500 if (__builtin_constant_p(n)) {
466 unsigned long ret; 501 unsigned long ret;
467 502
@@ -482,9 +517,10 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
482 517
483#define ARCH_HAS_NOCACHE_UACCESS 518#define ARCH_HAS_NOCACHE_UACCESS
484 519
485static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, 520static __always_inline unsigned long __copy_from_user_nocache(void *to,
486 const void __user *from, unsigned long n) 521 const void __user *from, unsigned long n)
487{ 522{
523 might_sleep();
488 if (__builtin_constant_p(n)) { 524 if (__builtin_constant_p(n)) {
489 unsigned long ret; 525 unsigned long ret;
490 526
@@ -504,17 +540,9 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to,
504} 540}
505 541
506static __always_inline unsigned long 542static __always_inline unsigned long
507__copy_from_user(void *to, const void __user *from, unsigned long n) 543__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n)
508{ 544{
509 might_sleep(); 545 return __copy_from_user_ll_nocache_nozero(to, from, n);
510 return __copy_from_user_inatomic(to, from, n);
511}
512
513static __always_inline unsigned long
514__copy_from_user_nocache(void *to, const void __user *from, unsigned long n)
515{
516 might_sleep();
517 return __copy_from_user_inatomic_nocache(to, from, n);
518} 546}
519 547
520unsigned long __must_check copy_to_user(void __user *to, 548unsigned long __must_check copy_to_user(void __user *to,
diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h
index ae357d504fba..24d898b650c5 100644
--- a/include/asm-ia64/percpu.h
+++ b/include/asm-ia64/percpu.h
@@ -42,6 +42,7 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
42 42
43#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) 43#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
44#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) 44#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
45#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
45 46
46extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); 47extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
47extern void setup_per_cpu_areas (void); 48extern void setup_per_cpu_areas (void);
@@ -51,6 +52,7 @@ extern void *per_cpu_init(void);
51 52
52#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 53#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
53#define __get_cpu_var(var) per_cpu__##var 54#define __get_cpu_var(var) per_cpu__##var
55#define __raw_get_cpu_var(var) per_cpu__##var
54#define per_cpu_init() (__phys_per_cpu_start) 56#define per_cpu_init() (__phys_per_cpu_start)
55 57
56#endif /* SMP */ 58#endif /* SMP */
diff --git a/include/asm-m68k/amigaints.h b/include/asm-m68k/amigaints.h
index aa968d014bb6..7c8713468fd2 100644
--- a/include/asm-m68k/amigaints.h
+++ b/include/asm-m68k/amigaints.h
@@ -13,6 +13,8 @@
13#ifndef _ASMm68k_AMIGAINTS_H_ 13#ifndef _ASMm68k_AMIGAINTS_H_
14#define _ASMm68k_AMIGAINTS_H_ 14#define _ASMm68k_AMIGAINTS_H_
15 15
16#include <asm/irq.h>
17
16/* 18/*
17** Amiga Interrupt sources. 19** Amiga Interrupt sources.
18** 20**
@@ -23,72 +25,52 @@
23#define CIA_IRQS (5) 25#define CIA_IRQS (5)
24#define AMI_IRQS (32) /* AUTO_IRQS+AMI_STD_IRQS+2*CIA_IRQS */ 26#define AMI_IRQS (32) /* AUTO_IRQS+AMI_STD_IRQS+2*CIA_IRQS */
25 27
26/* vertical blanking interrupt */ 28/* builtin serial port interrupts */
27#define IRQ_AMIGA_VERTB 0 29#define IRQ_AMIGA_TBE (IRQ_USER+0)
30#define IRQ_AMIGA_RBF (IRQ_USER+11)
28 31
29/* copper interrupt */ 32/* floppy disk interrupts */
30#define IRQ_AMIGA_COPPER 1 33#define IRQ_AMIGA_DSKBLK (IRQ_USER+1)
34#define IRQ_AMIGA_DSKSYN (IRQ_USER+12)
31 35
32/* Audio interrupts */ 36/* software interrupts */
33#define IRQ_AMIGA_AUD0 2 37#define IRQ_AMIGA_SOFT (IRQ_USER+2)
34#define IRQ_AMIGA_AUD1 3
35#define IRQ_AMIGA_AUD2 4
36#define IRQ_AMIGA_AUD3 5
37 38
38/* Blitter done interrupt */ 39/* interrupts from external hardware */
39#define IRQ_AMIGA_BLIT 6 40#define IRQ_AMIGA_PORTS IRQ_AUTO_2
41#define IRQ_AMIGA_EXTER IRQ_AUTO_6
40 42
41/* floppy disk interrupts */ 43/* copper interrupt */
42#define IRQ_AMIGA_DSKSYN 7 44#define IRQ_AMIGA_COPPER (IRQ_USER+4)
43#define IRQ_AMIGA_DSKBLK 8
44 45
45/* builtin serial port interrupts */ 46/* vertical blanking interrupt */
46#define IRQ_AMIGA_RBF 9 47#define IRQ_AMIGA_VERTB (IRQ_USER+5)
47#define IRQ_AMIGA_TBE 10
48 48
49/* software interrupts */ 49/* Blitter done interrupt */
50#define IRQ_AMIGA_SOFT 11 50#define IRQ_AMIGA_BLIT (IRQ_USER+6)
51 51
52/* interrupts from external hardware */ 52/* Audio interrupts */
53#define IRQ_AMIGA_PORTS 12 53#define IRQ_AMIGA_AUD0 (IRQ_USER+7)
54#define IRQ_AMIGA_EXTER 13 54#define IRQ_AMIGA_AUD1 (IRQ_USER+8)
55#define IRQ_AMIGA_AUD2 (IRQ_USER+9)
56#define IRQ_AMIGA_AUD3 (IRQ_USER+10)
55 57
56/* CIA interrupt sources */ 58/* CIA interrupt sources */
57#define IRQ_AMIGA_CIAA 14 59#define IRQ_AMIGA_CIAA (IRQ_USER+14)
58#define IRQ_AMIGA_CIAA_TA 14 60#define IRQ_AMIGA_CIAA_TA (IRQ_USER+14)
59#define IRQ_AMIGA_CIAA_TB 15 61#define IRQ_AMIGA_CIAA_TB (IRQ_USER+15)
60#define IRQ_AMIGA_CIAA_ALRM 16 62#define IRQ_AMIGA_CIAA_ALRM (IRQ_USER+16)
61#define IRQ_AMIGA_CIAA_SP 17 63#define IRQ_AMIGA_CIAA_SP (IRQ_USER+17)
62#define IRQ_AMIGA_CIAA_FLG 18 64#define IRQ_AMIGA_CIAA_FLG (IRQ_USER+18)
63#define IRQ_AMIGA_CIAB 19 65#define IRQ_AMIGA_CIAB (IRQ_USER+19)
64#define IRQ_AMIGA_CIAB_TA 19 66#define IRQ_AMIGA_CIAB_TA (IRQ_USER+19)
65#define IRQ_AMIGA_CIAB_TB 20 67#define IRQ_AMIGA_CIAB_TB (IRQ_USER+20)
66#define IRQ_AMIGA_CIAB_ALRM 21 68#define IRQ_AMIGA_CIAB_ALRM (IRQ_USER+21)
67#define IRQ_AMIGA_CIAB_SP 22 69#define IRQ_AMIGA_CIAB_SP (IRQ_USER+22)
68#define IRQ_AMIGA_CIAB_FLG 23 70#define IRQ_AMIGA_CIAB_FLG (IRQ_USER+23)
69
70/* auto-vector interrupts */
71#define IRQ_AMIGA_AUTO 24
72#define IRQ_AMIGA_AUTO_0 24 /* This is just a dummy */
73#define IRQ_AMIGA_AUTO_1 25
74#define IRQ_AMIGA_AUTO_2 26
75#define IRQ_AMIGA_AUTO_3 27
76#define IRQ_AMIGA_AUTO_4 28
77#define IRQ_AMIGA_AUTO_5 29
78#define IRQ_AMIGA_AUTO_6 30
79#define IRQ_AMIGA_AUTO_7 31
80
81#define IRQ_FLOPPY IRQ_AMIGA_DSKBLK
82 71
83/* INTREQR masks */
84#define IRQ1_MASK 0x0007 /* INTREQR mask for IRQ 1 */
85#define IRQ2_MASK 0x0008 /* INTREQR mask for IRQ 2 */
86#define IRQ3_MASK 0x0070 /* INTREQR mask for IRQ 3 */
87#define IRQ4_MASK 0x0780 /* INTREQR mask for IRQ 4 */
88#define IRQ5_MASK 0x1800 /* INTREQR mask for IRQ 5 */
89#define IRQ6_MASK 0x2000 /* INTREQR mask for IRQ 6 */
90#define IRQ7_MASK 0x4000 /* INTREQR mask for IRQ 7 */
91 72
73/* INTREQR masks */
92#define IF_SETCLR 0x8000 /* set/clr bit */ 74#define IF_SETCLR 0x8000 /* set/clr bit */
93#define IF_INTEN 0x4000 /* master interrupt bit in INT* registers */ 75#define IF_INTEN 0x4000 /* master interrupt bit in INT* registers */
94#define IF_EXTER 0x2000 /* external level 6 and CIA B interrupt */ 76#define IF_EXTER 0x2000 /* external level 6 and CIA B interrupt */
@@ -106,9 +88,6 @@
106#define IF_DSKBLK 0x0002 /* diskblock DMA finished */ 88#define IF_DSKBLK 0x0002 /* diskblock DMA finished */
107#define IF_TBE 0x0001 /* serial transmit buffer empty interrupt */ 89#define IF_TBE 0x0001 /* serial transmit buffer empty interrupt */
108 90
109extern void amiga_do_irq(int irq, struct pt_regs *fp);
110extern void amiga_do_irq_list(int irq, struct pt_regs *fp);
111
112/* CIA interrupt control register bits */ 91/* CIA interrupt control register bits */
113 92
114#define CIA_ICR_TA 0x01 93#define CIA_ICR_TA 0x01
@@ -125,6 +104,7 @@ extern void amiga_do_irq_list(int irq, struct pt_regs *fp);
125 104
126extern struct ciabase ciaa_base, ciab_base; 105extern struct ciabase ciaa_base, ciab_base;
127 106
107extern void cia_init_IRQ(struct ciabase *base);
128extern unsigned char cia_set_irq(struct ciabase *base, unsigned char mask); 108extern unsigned char cia_set_irq(struct ciabase *base, unsigned char mask);
129extern unsigned char cia_able_irq(struct ciabase *base, unsigned char mask); 109extern unsigned char cia_able_irq(struct ciabase *base, unsigned char mask);
130 110
diff --git a/include/asm-m68k/apollohw.h b/include/asm-m68k/apollohw.h
index 4304e1c33e17..a1373b9aa281 100644
--- a/include/asm-m68k/apollohw.h
+++ b/include/asm-m68k/apollohw.h
@@ -3,6 +3,8 @@
3#ifndef _ASMm68k_APOLLOHW_H_ 3#ifndef _ASMm68k_APOLLOHW_H_
4#define _ASMm68k_APOLLOHW_H_ 4#define _ASMm68k_APOLLOHW_H_
5 5
6#include <linux/types.h>
7
6/* 8/*
7 apollo models 9 apollo models
8*/ 10*/
@@ -101,4 +103,6 @@ extern u_long timer_physaddr;
101 103
102#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE) 104#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE)
103 105
106#define IRQ_APOLLO IRQ_USER
107
104#endif 108#endif
diff --git a/include/asm-m68k/atari_stdma.h b/include/asm-m68k/atari_stdma.h
index 64f92880ce43..b4eadf852738 100644
--- a/include/asm-m68k/atari_stdma.h
+++ b/include/asm-m68k/atari_stdma.h
@@ -3,7 +3,7 @@
3#define _atari_stdma_h 3#define _atari_stdma_h
4 4
5 5
6#include <asm/irq.h> 6#include <linux/interrupt.h>
7 7
8 8
9/***************************** Prototypes *****************************/ 9/***************************** Prototypes *****************************/
diff --git a/include/asm-m68k/atariints.h b/include/asm-m68k/atariints.h
index 42952c890593..0ed454fc24bb 100644
--- a/include/asm-m68k/atariints.h
+++ b/include/asm-m68k/atariints.h
@@ -45,17 +45,6 @@
45#define IRQ_TYPE_FAST 1 45#define IRQ_TYPE_FAST 1
46#define IRQ_TYPE_PRIO 2 46#define IRQ_TYPE_PRIO 2
47 47
48#define IRQ_SPURIOUS (0)
49
50/* auto-vector interrupts */
51#define IRQ_AUTO_1 (1)
52#define IRQ_AUTO_2 (2)
53#define IRQ_AUTO_3 (3)
54#define IRQ_AUTO_4 (4)
55#define IRQ_AUTO_5 (5)
56#define IRQ_AUTO_6 (6)
57#define IRQ_AUTO_7 (7)
58
59/* ST-MFP interrupts */ 48/* ST-MFP interrupts */
60#define IRQ_MFP_BUSY (8) 49#define IRQ_MFP_BUSY (8)
61#define IRQ_MFP_DCD (9) 50#define IRQ_MFP_DCD (9)
diff --git a/include/asm-m68k/bvme6000hw.h b/include/asm-m68k/bvme6000hw.h
index 28a859b03959..f40d2f8510ee 100644
--- a/include/asm-m68k/bvme6000hw.h
+++ b/include/asm-m68k/bvme6000hw.h
@@ -109,23 +109,23 @@ typedef struct {
109 109
110#define BVME_IRQ_TYPE_PRIO 0 110#define BVME_IRQ_TYPE_PRIO 0
111 111
112#define BVME_IRQ_PRN 0x54 112#define BVME_IRQ_PRN (IRQ_USER+20)
113#define BVME_IRQ_I596 0x1a 113#define BVME_IRQ_TIMER (IRQ_USER+25)
114#define BVME_IRQ_SCSI 0x1b 114#define BVME_IRQ_I596 IRQ_AUTO_2
115#define BVME_IRQ_TIMER 0x59 115#define BVME_IRQ_SCSI IRQ_AUTO_3
116#define BVME_IRQ_RTC 0x1e 116#define BVME_IRQ_RTC IRQ_AUTO_6
117#define BVME_IRQ_ABORT 0x1f 117#define BVME_IRQ_ABORT IRQ_AUTO_7
118 118
119/* SCC interrupts */ 119/* SCC interrupts */
120#define BVME_IRQ_SCC_BASE 0x40 120#define BVME_IRQ_SCC_BASE IRQ_USER
121#define BVME_IRQ_SCCB_TX 0x40 121#define BVME_IRQ_SCCB_TX IRQ_USER
122#define BVME_IRQ_SCCB_STAT 0x42 122#define BVME_IRQ_SCCB_STAT (IRQ_USER+2)
123#define BVME_IRQ_SCCB_RX 0x44 123#define BVME_IRQ_SCCB_RX (IRQ_USER+4)
124#define BVME_IRQ_SCCB_SPCOND 0x46 124#define BVME_IRQ_SCCB_SPCOND (IRQ_USER+6)
125#define BVME_IRQ_SCCA_TX 0x48 125#define BVME_IRQ_SCCA_TX (IRQ_USER+8)
126#define BVME_IRQ_SCCA_STAT 0x4a 126#define BVME_IRQ_SCCA_STAT (IRQ_USER+10)
127#define BVME_IRQ_SCCA_RX 0x4c 127#define BVME_IRQ_SCCA_RX (IRQ_USER+12)
128#define BVME_IRQ_SCCA_SPCOND 0x4e 128#define BVME_IRQ_SCCA_SPCOND (IRQ_USER+14)
129 129
130/* Address control registers */ 130/* Address control registers */
131 131
diff --git a/include/asm-m68k/cacheflush.h b/include/asm-m68k/cacheflush.h
index 8aba971b1368..24d3ff449135 100644
--- a/include/asm-m68k/cacheflush.h
+++ b/include/asm-m68k/cacheflush.h
@@ -3,26 +3,30 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5 5
6/* cache code */
7#define FLUSH_I_AND_D (0x00000808)
8#define FLUSH_I (0x00000008)
9
6/* 10/*
7 * Cache handling functions 11 * Cache handling functions
8 */ 12 */
9 13
10#define flush_icache() \ 14static inline void flush_icache(void)
11({ \ 15{
12 if (CPU_IS_040_OR_060) \ 16 if (CPU_IS_040_OR_060)
13 __asm__ __volatile__("nop\n\t" \ 17 asm volatile ( "nop\n"
14 ".chip 68040\n\t" \ 18 " .chip 68040\n"
15 "cinva %%ic\n\t" \ 19 " cpusha %bc\n"
16 ".chip 68k" : ); \ 20 " .chip 68k");
17 else { \ 21 else {
18 unsigned long _tmp; \ 22 unsigned long tmp;
19 __asm__ __volatile__("movec %%cacr,%0\n\t" \ 23 asm volatile ( "movec %%cacr,%0\n"
20 "orw %1,%0\n\t" \ 24 " or.w %1,%0\n"
21 "movec %0,%%cacr" \ 25 " movec %0,%%cacr"
22 : "=&d" (_tmp) \ 26 : "=&d" (tmp)
23 : "id" (FLUSH_I)); \ 27 : "id" (FLUSH_I));
24 } \ 28 }
25}) 29}
26 30
27/* 31/*
28 * invalidate the cache for the specified memory range. 32 * invalidate the cache for the specified memory range.
@@ -43,10 +47,6 @@ extern void cache_push(unsigned long paddr, int len);
43 */ 47 */
44extern void cache_push_v(unsigned long vaddr, int len); 48extern void cache_push_v(unsigned long vaddr, int len);
45 49
46/* cache code */
47#define FLUSH_I_AND_D (0x00000808)
48#define FLUSH_I (0x00000008)
49
50/* This is needed whenever the virtual mapping of the current 50/* This is needed whenever the virtual mapping of the current
51 process changes. */ 51 process changes. */
52#define __flush_cache_all() \ 52#define __flush_cache_all() \
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index dffd59cf1364..cebbb03370ec 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -1,11 +1,91 @@
1#ifndef _M68K_DMA_MAPPING_H 1#ifndef _M68K_DMA_MAPPING_H
2#define _M68K_DMA_MAPPING_H 2#define _M68K_DMA_MAPPING_H
3 3
4#include <asm/cache.h>
4 5
5#ifdef CONFIG_PCI 6struct scatterlist;
6#include <asm-generic/dma-mapping.h> 7
7#else 8static inline int dma_supported(struct device *dev, u64 mask)
8#include <asm-generic/dma-mapping-broken.h> 9{
9#endif 10 return 1;
11}
12
13static inline int dma_set_mask(struct device *dev, u64 mask)
14{
15 return 0;
16}
17
18static inline int dma_get_cache_alignment(void)
19{
20 return 1 << L1_CACHE_SHIFT;
21}
22
23static inline int dma_is_consistent(dma_addr_t dma_addr)
24{
25 return 0;
26}
27
28extern void *dma_alloc_coherent(struct device *, size_t,
29 dma_addr_t *, int);
30extern void dma_free_coherent(struct device *, size_t,
31 void *, dma_addr_t);
32
33static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
34 dma_addr_t *handle, int flag)
35{
36 return dma_alloc_coherent(dev, size, handle, flag);
37}
38static inline void dma_free_noncoherent(struct device *dev, size_t size,
39 void *addr, dma_addr_t handle)
40{
41 dma_free_coherent(dev, size, addr, handle);
42}
43static inline void dma_cache_sync(void *vaddr, size_t size,
44 enum dma_data_direction dir)
45{
46 /* we use coherent allocation, so not much to do here. */
47}
48
49extern dma_addr_t dma_map_single(struct device *, void *, size_t,
50 enum dma_data_direction);
51static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
52 size_t size, enum dma_data_direction dir)
53{
54}
55
56extern dma_addr_t dma_map_page(struct device *, struct page *,
57 unsigned long, size_t size,
58 enum dma_data_direction);
59static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
60 size_t size, enum dma_data_direction dir)
61{
62}
63
64extern int dma_map_sg(struct device *, struct scatterlist *, int,
65 enum dma_data_direction);
66static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
67 int nhwentries, enum dma_data_direction dir)
68{
69}
70
71extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
72 enum dma_data_direction);
73extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
74 enum dma_data_direction);
75
76static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
77 size_t size, enum dma_data_direction dir)
78{
79}
80
81static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84}
85
86static inline int dma_mapping_error(dma_addr_t handle)
87{
88 return 0;
89}
10 90
11#endif /* _M68K_DMA_MAPPING_H */ 91#endif /* _M68K_DMA_MAPPING_H */
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index 9727ca9d9f26..f4ae7d8feac6 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -1,13 +1,9 @@
1#ifndef _M68K_IRQ_H_ 1#ifndef _M68K_IRQ_H_
2#define _M68K_IRQ_H_ 2#define _M68K_IRQ_H_
3 3
4#include <linux/interrupt.h> 4#include <linux/linkage.h>
5 5#include <linux/hardirq.h>
6/* 6#include <linux/spinlock_types.h>
7 * # of m68k interrupts
8 */
9
10#define SYS_IRQS 8
11 7
12/* 8/*
13 * This should be the same as the max(NUM_X_SOURCES) for all the 9 * This should be the same as the max(NUM_X_SOURCES) for all the
@@ -15,10 +11,20 @@
15 * Currently the Atari has 72 and the Amiga 24, but if both are 11 * Currently the Atari has 72 and the Amiga 24, but if both are
16 * supported in the kernel it is better to make room for 72. 12 * supported in the kernel it is better to make room for 72.
17 */ 13 */
18#if defined(CONFIG_ATARI) || defined(CONFIG_MAC) 14#if defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
19#define NR_IRQS (72+SYS_IRQS) 15#define NR_IRQS 200
16#elif defined(CONFIG_ATARI) || defined(CONFIG_MAC)
17#define NR_IRQS 72
18#elif defined(CONFIG_Q40)
19#define NR_IRQS 43
20#elif defined(CONFIG_AMIGA)
21#define NR_IRQS 32
22#elif defined(CONFIG_APOLLO)
23#define NR_IRQS 24
24#elif defined(CONFIG_HP300)
25#define NR_IRQS 8
20#else 26#else
21#define NR_IRQS (24+SYS_IRQS) 27#error unknown nr of irqs
22#endif 28#endif
23 29
24/* 30/*
@@ -40,53 +46,25 @@
40 * that routine requires service. 46 * that routine requires service.
41 */ 47 */
42 48
43#define IRQ1 (1) /* level 1 interrupt */ 49#define IRQ_SPURIOUS 0
44#define IRQ2 (2) /* level 2 interrupt */
45#define IRQ3 (3) /* level 3 interrupt */
46#define IRQ4 (4) /* level 4 interrupt */
47#define IRQ5 (5) /* level 5 interrupt */
48#define IRQ6 (6) /* level 6 interrupt */
49#define IRQ7 (7) /* level 7 interrupt (non-maskable) */
50 50
51/* 51#define IRQ_AUTO_1 1 /* level 1 interrupt */
52 * "Generic" interrupt sources 52#define IRQ_AUTO_2 2 /* level 2 interrupt */
53 */ 53#define IRQ_AUTO_3 3 /* level 3 interrupt */
54 54#define IRQ_AUTO_4 4 /* level 4 interrupt */
55#define IRQ_SCHED_TIMER (8) /* interrupt source for scheduling timer */ 55#define IRQ_AUTO_5 5 /* level 5 interrupt */
56#define IRQ_AUTO_6 6 /* level 6 interrupt */
57#define IRQ_AUTO_7 7 /* level 7 interrupt (non-maskable) */
56 58
57static __inline__ int irq_canonicalize(int irq) 59#define IRQ_USER 8
58{
59 return irq;
60}
61
62/*
63 * Machine specific interrupt sources.
64 *
65 * Adding an interrupt service routine for a source with this bit
66 * set indicates a special machine specific interrupt source.
67 * The machine specific files define these sources.
68 *
69 * The IRQ_MACHSPEC bit is now gone - the only thing it did was to
70 * introduce unnecessary overhead.
71 *
72 * All interrupt handling is actually machine specific so it is better
73 * to use function pointers, as used by the Sparc port, and select the
74 * interrupt handling functions when initializing the kernel. This way
75 * we save some unnecessary overhead at run-time.
76 * 01/11/97 - Jes
77 */
78 60
79extern void (*enable_irq)(unsigned int); 61extern unsigned int irq_canonicalize(unsigned int irq);
80extern void (*disable_irq)(unsigned int); 62extern void enable_irq(unsigned int);
63extern void disable_irq(unsigned int);
81#define disable_irq_nosync disable_irq 64#define disable_irq_nosync disable_irq
82 65
83struct pt_regs; 66struct pt_regs;
84 67
85extern int cpu_request_irq(unsigned int,
86 irqreturn_t (*)(int, void *, struct pt_regs *),
87 unsigned long, const char *, void *);
88extern void cpu_free_irq(unsigned int, void *);
89
90/* 68/*
91 * various flags for request_irq() - the Amiga now uses the standard 69 * various flags for request_irq() - the Amiga now uses the standard
92 * mechanism like all other architectures - SA_INTERRUPT and SA_SHIRQ 70 * mechanism like all other architectures - SA_INTERRUPT and SA_SHIRQ
@@ -105,29 +83,45 @@ extern void cpu_free_irq(unsigned int, void *);
105 * interrupt source (if it supports chaining). 83 * interrupt source (if it supports chaining).
106 */ 84 */
107typedef struct irq_node { 85typedef struct irq_node {
108 irqreturn_t (*handler)(int, void *, struct pt_regs *); 86 int (*handler)(int, void *, struct pt_regs *);
109 unsigned long flags;
110 void *dev_id; 87 void *dev_id;
111 const char *devname;
112 struct irq_node *next; 88 struct irq_node *next;
89 unsigned long flags;
90 const char *devname;
113} irq_node_t; 91} irq_node_t;
114 92
115/* 93/*
116 * This structure has only 4 elements for speed reasons 94 * This structure has only 4 elements for speed reasons
117 */ 95 */
118typedef struct irq_handler { 96typedef struct irq_handler {
119 irqreturn_t (*handler)(int, void *, struct pt_regs *); 97 int (*handler)(int, void *, struct pt_regs *);
120 unsigned long flags; 98 unsigned long flags;
121 void *dev_id; 99 void *dev_id;
122 const char *devname; 100 const char *devname;
123} irq_handler_t; 101} irq_handler_t;
124 102
125/* count of spurious interrupts */ 103struct irq_controller {
126extern volatile unsigned int num_spurious; 104 const char *name;
105 spinlock_t lock;
106 int (*startup)(unsigned int irq);
107 void (*shutdown)(unsigned int irq);
108 void (*enable)(unsigned int irq);
109 void (*disable)(unsigned int irq);
110};
111
112extern int m68k_irq_startup(unsigned int);
113extern void m68k_irq_shutdown(unsigned int);
127 114
128/* 115/*
129 * This function returns a new irq_node_t 116 * This function returns a new irq_node_t
130 */ 117 */
131extern irq_node_t *new_irq_node(void); 118extern irq_node_t *new_irq_node(void);
132 119
120extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *));
121extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
122 void (*handler)(unsigned int, struct pt_regs *));
123extern void m68k_setup_irq_controller(struct irq_controller *, unsigned int, unsigned int);
124
125asmlinkage void m68k_handle_int(unsigned int, struct pt_regs *);
126
133#endif /* _M68K_IRQ_H_ */ 127#endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68k/mac_oss.h b/include/asm-m68k/mac_oss.h
index 7644a639cd6c..7221f7251934 100644
--- a/include/asm-m68k/mac_oss.h
+++ b/include/asm-m68k/mac_oss.h
@@ -69,12 +69,12 @@
69 69
70#define OSS_IRQLEV_DISABLED 0 70#define OSS_IRQLEV_DISABLED 0
71#define OSS_IRQLEV_IOPISM 1 /* ADB? */ 71#define OSS_IRQLEV_IOPISM 1 /* ADB? */
72#define OSS_IRQLEV_SCSI 2 72#define OSS_IRQLEV_SCSI IRQ_AUTO_2
73#define OSS_IRQLEV_NUBUS 3 /* keep this on its own level */ 73#define OSS_IRQLEV_NUBUS IRQ_AUTO_3 /* keep this on its own level */
74#define OSS_IRQLEV_IOPSCC 4 /* matches VIA alternate mapping */ 74#define OSS_IRQLEV_IOPSCC IRQ_AUTO_4 /* matches VIA alternate mapping */
75#define OSS_IRQLEV_SOUND 5 /* matches VIA alternate mapping */ 75#define OSS_IRQLEV_SOUND IRQ_AUTO_5 /* matches VIA alternate mapping */
76#define OSS_IRQLEV_60HZ 6 /* matches VIA alternate mapping */ 76#define OSS_IRQLEV_60HZ 6 /* matches VIA alternate mapping */
77#define OSS_IRQLEV_VIA1 6 /* matches VIA alternate mapping */ 77#define OSS_IRQLEV_VIA1 IRQ_AUTO_6 /* matches VIA alternate mapping */
78#define OSS_IRQLEV_PARITY 7 /* matches VIA alternate mapping */ 78#define OSS_IRQLEV_PARITY 7 /* matches VIA alternate mapping */
79 79
80#ifndef __ASSEMBLY__ 80#ifndef __ASSEMBLY__
diff --git a/include/asm-m68k/machdep.h b/include/asm-m68k/machdep.h
index 7d3fee342369..df898f27e434 100644
--- a/include/asm-m68k/machdep.h
+++ b/include/asm-m68k/machdep.h
@@ -13,14 +13,8 @@ struct buffer_head;
13extern void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *)); 13extern void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *));
14/* machine dependent irq functions */ 14/* machine dependent irq functions */
15extern void (*mach_init_IRQ) (void); 15extern void (*mach_init_IRQ) (void);
16extern irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *);
17extern int (*mach_request_irq) (unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
18 unsigned long flags, const char *devname, void *dev_id);
19extern void (*mach_free_irq) (unsigned int irq, void *dev_id);
20extern void (*mach_get_model) (char *model); 16extern void (*mach_get_model) (char *model);
21extern int (*mach_get_hardware_list) (char *buffer); 17extern int (*mach_get_hardware_list) (char *buffer);
22extern int (*mach_get_irq_list) (struct seq_file *p, void *v);
23extern irqreturn_t (*mach_process_int) (int irq, struct pt_regs *fp);
24/* machine dependent timer functions */ 18/* machine dependent timer functions */
25extern unsigned long (*mach_gettimeoffset)(void); 19extern unsigned long (*mach_gettimeoffset)(void);
26extern int (*mach_hwclk)(int, struct rtc_time*); 20extern int (*mach_hwclk)(int, struct rtc_time*);
diff --git a/include/asm-m68k/macintosh.h b/include/asm-m68k/macintosh.h
index 6fc3d19512d1..27d11da2b479 100644
--- a/include/asm-m68k/macintosh.h
+++ b/include/asm-m68k/macintosh.h
@@ -11,17 +11,7 @@
11extern void mac_reset(void); 11extern void mac_reset(void);
12extern void mac_poweroff(void); 12extern void mac_poweroff(void);
13extern void mac_init_IRQ(void); 13extern void mac_init_IRQ(void);
14extern int mac_request_irq (unsigned int, irqreturn_t (*)(int, void *,
15 struct pt_regs *),
16 unsigned long, const char *, void *);
17extern void mac_free_irq(unsigned int, void *);
18extern void mac_enable_irq(unsigned int);
19extern void mac_disable_irq(unsigned int);
20extern int mac_irq_pending(unsigned int); 14extern int mac_irq_pending(unsigned int);
21extern int show_mac_interrupts(struct seq_file *, void *);
22#if 0
23extern void mac_default_handler(int irq);
24#endif
25extern void mac_identify(void); 15extern void mac_identify(void);
26extern void mac_report_hardware(void); 16extern void mac_report_hardware(void);
27extern void mac_debugging_penguin(int); 17extern void mac_debugging_penguin(int);
diff --git a/include/asm-m68k/macints.h b/include/asm-m68k/macints.h
index fd8c3a9fea4d..679c48ab4407 100644
--- a/include/asm-m68k/macints.h
+++ b/include/asm-m68k/macints.h
@@ -59,17 +59,6 @@
59#define IRQ_SRC(irq) (irq >> 3) 59#define IRQ_SRC(irq) (irq >> 3)
60#define IRQ_IDX(irq) (irq & 7) 60#define IRQ_IDX(irq) (irq & 7)
61 61
62#define IRQ_SPURIOUS (0)
63
64/* auto-vector interrupts */
65#define IRQ_AUTO_1 (1)
66#define IRQ_AUTO_2 (2)
67#define IRQ_AUTO_3 (3)
68#define IRQ_AUTO_4 (4)
69#define IRQ_AUTO_5 (5)
70#define IRQ_AUTO_6 (6)
71#define IRQ_AUTO_7 (7)
72
73/* VIA1 interrupts */ 62/* VIA1 interrupts */
74#define IRQ_VIA1_0 (8) /* one second int. */ 63#define IRQ_VIA1_0 (8) /* one second int. */
75#define IRQ_VIA1_1 (9) /* VBlank int. */ 64#define IRQ_VIA1_1 (9) /* VBlank int. */
@@ -163,7 +152,4 @@
163#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */ 152#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */
164#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */ 153#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */
165 154
166extern irq_node_t *mac_irq_list[NUM_MAC_SOURCES];
167extern void mac_do_irq_list(int irq, struct pt_regs *);
168
169#endif /* asm/macints.h */ 155#endif /* asm/macints.h */
diff --git a/include/asm-m68k/mvme147hw.h b/include/asm-m68k/mvme147hw.h
index f245139f3671..b81043108472 100644
--- a/include/asm-m68k/mvme147hw.h
+++ b/include/asm-m68k/mvme147hw.h
@@ -1,6 +1,8 @@
1#ifndef _MVME147HW_H_ 1#ifndef _MVME147HW_H_
2#define _MVME147HW_H_ 2#define _MVME147HW_H_
3 3
4#include <asm/irq.h>
5
4typedef struct { 6typedef struct {
5 unsigned char 7 unsigned char
6 ctrl, 8 ctrl,
@@ -72,39 +74,39 @@ struct pcc_regs {
72#define PCC_LEVEL_SCSI_PORT 0x04 74#define PCC_LEVEL_SCSI_PORT 0x04
73#define PCC_LEVEL_SCSI_DMA 0x04 75#define PCC_LEVEL_SCSI_DMA 0x04
74 76
75#define PCC_IRQ_AC_FAIL 0x40 77#define PCC_IRQ_AC_FAIL (IRQ_USER+0)
76#define PCC_IRQ_BERR 0x41 78#define PCC_IRQ_BERR (IRQ_USER+1)
77#define PCC_IRQ_ABORT 0x42 79#define PCC_IRQ_ABORT (IRQ_USER+2)
78/* #define PCC_IRQ_SERIAL 0x43 */ 80/* #define PCC_IRQ_SERIAL (IRQ_USER+3) */
79#define PCC_IRQ_PRINTER 0x47 81#define PCC_IRQ_PRINTER (IRQ_USER+7)
80#define PCC_IRQ_TIMER1 0x48 82#define PCC_IRQ_TIMER1 (IRQ_USER+8)
81#define PCC_IRQ_TIMER2 0x49 83#define PCC_IRQ_TIMER2 (IRQ_USER+9)
82#define PCC_IRQ_SOFTWARE1 0x4a 84#define PCC_IRQ_SOFTWARE1 (IRQ_USER+10)
83#define PCC_IRQ_SOFTWARE2 0x4b 85#define PCC_IRQ_SOFTWARE2 (IRQ_USER+11)
84 86
85 87
86#define M147_SCC_A_ADDR 0xfffe3002 88#define M147_SCC_A_ADDR 0xfffe3002
87#define M147_SCC_B_ADDR 0xfffe3000 89#define M147_SCC_B_ADDR 0xfffe3000
88#define M147_SCC_PCLK 5000000 90#define M147_SCC_PCLK 5000000
89 91
90#define MVME147_IRQ_SCSI_PORT 0x45 92#define MVME147_IRQ_SCSI_PORT (IRQ_USER+0x45)
91#define MVME147_IRQ_SCSI_DMA 0x46 93#define MVME147_IRQ_SCSI_DMA (IRQ_USER+0x46)
92 94
93/* SCC interrupts, for MVME147 */ 95/* SCC interrupts, for MVME147 */
94 96
95#define MVME147_IRQ_TYPE_PRIO 0 97#define MVME147_IRQ_TYPE_PRIO 0
96#define MVME147_IRQ_SCC_BASE 0x60 98#define MVME147_IRQ_SCC_BASE (IRQ_USER+32)
97#define MVME147_IRQ_SCCB_TX 0x60 99#define MVME147_IRQ_SCCB_TX (IRQ_USER+32)
98#define MVME147_IRQ_SCCB_STAT 0x62 100#define MVME147_IRQ_SCCB_STAT (IRQ_USER+34)
99#define MVME147_IRQ_SCCB_RX 0x64 101#define MVME147_IRQ_SCCB_RX (IRQ_USER+36)
100#define MVME147_IRQ_SCCB_SPCOND 0x66 102#define MVME147_IRQ_SCCB_SPCOND (IRQ_USER+38)
101#define MVME147_IRQ_SCCA_TX 0x68 103#define MVME147_IRQ_SCCA_TX (IRQ_USER+40)
102#define MVME147_IRQ_SCCA_STAT 0x6a 104#define MVME147_IRQ_SCCA_STAT (IRQ_USER+42)
103#define MVME147_IRQ_SCCA_RX 0x6c 105#define MVME147_IRQ_SCCA_RX (IRQ_USER+44)
104#define MVME147_IRQ_SCCA_SPCOND 0x6e 106#define MVME147_IRQ_SCCA_SPCOND (IRQ_USER+46)
105 107
106#define MVME147_LANCE_BASE 0xfffe1800 108#define MVME147_LANCE_BASE 0xfffe1800
107#define MVME147_LANCE_IRQ 0x44 109#define MVME147_LANCE_IRQ (IRQ_USER+4)
108 110
109#define ETHERNET_ADDRESS 0xfffe0778 111#define ETHERNET_ADDRESS 0xfffe0778
110 112
diff --git a/include/asm-m68k/mvme16xhw.h b/include/asm-m68k/mvme16xhw.h
index 5d07231d2426..6117f56653d2 100644
--- a/include/asm-m68k/mvme16xhw.h
+++ b/include/asm-m68k/mvme16xhw.h
@@ -66,28 +66,28 @@ typedef struct {
66 66
67#define MVME162_IRQ_TYPE_PRIO 0 67#define MVME162_IRQ_TYPE_PRIO 0
68 68
69#define MVME167_IRQ_PRN 0x54 69#define MVME167_IRQ_PRN (IRQ_USER+20)
70#define MVME16x_IRQ_I596 0x57 70#define MVME16x_IRQ_I596 (IRQ_USER+23)
71#define MVME16x_IRQ_SCSI 0x55 71#define MVME16x_IRQ_SCSI (IRQ_USER+21)
72#define MVME16x_IRQ_FLY 0x7f 72#define MVME16x_IRQ_FLY (IRQ_USER+63)
73#define MVME167_IRQ_SER_ERR 0x5c 73#define MVME167_IRQ_SER_ERR (IRQ_USER+28)
74#define MVME167_IRQ_SER_MODEM 0x5d 74#define MVME167_IRQ_SER_MODEM (IRQ_USER+29)
75#define MVME167_IRQ_SER_TX 0x5e 75#define MVME167_IRQ_SER_TX (IRQ_USER+30)
76#define MVME167_IRQ_SER_RX 0x5f 76#define MVME167_IRQ_SER_RX (IRQ_USER+31)
77#define MVME16x_IRQ_TIMER 0x59 77#define MVME16x_IRQ_TIMER (IRQ_USER+25)
78#define MVME167_IRQ_ABORT 0x6e 78#define MVME167_IRQ_ABORT (IRQ_USER+46)
79#define MVME162_IRQ_ABORT 0x5e 79#define MVME162_IRQ_ABORT (IRQ_USER+30)
80 80
81/* SCC interrupts, for MVME162 */ 81/* SCC interrupts, for MVME162 */
82#define MVME162_IRQ_SCC_BASE 0x40 82#define MVME162_IRQ_SCC_BASE (IRQ_USER+0)
83#define MVME162_IRQ_SCCB_TX 0x40 83#define MVME162_IRQ_SCCB_TX (IRQ_USER+0)
84#define MVME162_IRQ_SCCB_STAT 0x42 84#define MVME162_IRQ_SCCB_STAT (IRQ_USER+2)
85#define MVME162_IRQ_SCCB_RX 0x44 85#define MVME162_IRQ_SCCB_RX (IRQ_USER+4)
86#define MVME162_IRQ_SCCB_SPCOND 0x46 86#define MVME162_IRQ_SCCB_SPCOND (IRQ_USER+6)
87#define MVME162_IRQ_SCCA_TX 0x48 87#define MVME162_IRQ_SCCA_TX (IRQ_USER+8)
88#define MVME162_IRQ_SCCA_STAT 0x4a 88#define MVME162_IRQ_SCCA_STAT (IRQ_USER+10)
89#define MVME162_IRQ_SCCA_RX 0x4c 89#define MVME162_IRQ_SCCA_RX (IRQ_USER+12)
90#define MVME162_IRQ_SCCA_SPCOND 0x4e 90#define MVME162_IRQ_SCCA_SPCOND (IRQ_USER+14)
91 91
92/* MVME162 version register */ 92/* MVME162 version register */
93 93
diff --git a/include/asm-m68k/scatterlist.h b/include/asm-m68k/scatterlist.h
index d7c9b5c5e6c7..8e612266da51 100644
--- a/include/asm-m68k/scatterlist.h
+++ b/include/asm-m68k/scatterlist.h
@@ -2,18 +2,17 @@
2#define _M68K_SCATTERLIST_H 2#define _M68K_SCATTERLIST_H
3 3
4struct scatterlist { 4struct scatterlist {
5 /* These two are only valid if ADDRESS member of this
6 * struct is NULL.
7 */
8 struct page *page; 5 struct page *page;
9 unsigned int offset; 6 unsigned int offset;
10
11 unsigned int length; 7 unsigned int length;
12 8
13 __u32 dvma_address; /* A place to hang host-specific addresses at. */ 9 __u32 dma_address; /* A place to hang host-specific addresses at. */
14}; 10};
15 11
16/* This is bogus and should go away. */ 12/* This is bogus and should go away. */
17#define ISA_DMA_THRESHOLD (0x00ffffff) 13#define ISA_DMA_THRESHOLD (0x00ffffff)
18 14
15#define sg_dma_address(sg) ((sg)->dma_address)
16#define sg_dma_len(sg) ((sg)->length)
17
19#endif /* !(_M68K_SCATTERLIST_H) */ 18#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h
index b7b7ea20caab..85037a3d3e8e 100644
--- a/include/asm-m68k/signal.h
+++ b/include/asm-m68k/signal.h
@@ -156,13 +156,17 @@ typedef struct sigaltstack {
156 156
157static inline void sigaddset(sigset_t *set, int _sig) 157static inline void sigaddset(sigset_t *set, int _sig)
158{ 158{
159 __asm__("bfset %0{%1,#1}" : "=m" (*set) : "id" ((_sig - 1) ^ 31) 159 asm ("bfset %0{%1,#1}"
160 : "+od" (*set)
161 : "id" ((_sig - 1) ^ 31)
160 : "cc"); 162 : "cc");
161} 163}
162 164
163static inline void sigdelset(sigset_t *set, int _sig) 165static inline void sigdelset(sigset_t *set, int _sig)
164{ 166{
165 __asm__("bfclr %0{%1,#1}" : "=m"(*set) : "id"((_sig - 1) ^ 31) 167 asm ("bfclr %0{%1,#1}"
168 : "+od" (*set)
169 : "id" ((_sig - 1) ^ 31)
166 : "cc"); 170 : "cc");
167} 171}
168 172
@@ -175,8 +179,10 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
175static inline int __gen_sigismember(sigset_t *set, int _sig) 179static inline int __gen_sigismember(sigset_t *set, int _sig)
176{ 180{
177 int ret; 181 int ret;
178 __asm__("bfextu %1{%2,#1},%0" 182 asm ("bfextu %1{%2,#1},%0"
179 : "=d"(ret) : "m"(*set), "id"((_sig-1) ^ 31)); 183 : "=d" (ret)
184 : "od" (*set), "id" ((_sig-1) ^ 31)
185 : "cc");
180 return ret; 186 return ret;
181} 187}
182 188
@@ -187,7 +193,10 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
187 193
188static inline int sigfindinword(unsigned long word) 194static inline int sigfindinword(unsigned long word)
189{ 195{
190 __asm__("bfffo %1{#0,#0},%0" : "=d"(word) : "d"(word & -word) : "cc"); 196 asm ("bfffo %1{#0,#0},%0"
197 : "=d" (word)
198 : "d" (word & -word)
199 : "cc");
191 return word ^ 31; 200 return word ^ 31;
192} 201}
193 202
diff --git a/include/asm-m68k/sun3ints.h b/include/asm-m68k/sun3ints.h
index bd038fccb64b..de91fa071b99 100644
--- a/include/asm-m68k/sun3ints.h
+++ b/include/asm-m68k/sun3ints.h
@@ -12,37 +12,25 @@
12#define SUN3INTS_H 12#define SUN3INTS_H
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/kernel_stat.h>
18#include <linux/interrupt.h> 15#include <linux/interrupt.h>
19#include <linux/seq_file.h>
20#include <asm/segment.h>
21#include <asm/intersil.h> 16#include <asm/intersil.h>
22#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/traps.h>
23 19
24#define SUN3_INT_VECS 192 20#define SUN3_INT_VECS 192
25 21
26void sun3_enable_irq(unsigned int irq); 22void sun3_enable_irq(unsigned int irq);
27void sun3_disable_irq(unsigned int irq); 23void sun3_disable_irq(unsigned int irq);
28int sun3_request_irq(unsigned int irq,
29 irqreturn_t (*handler)(int, void *, struct pt_regs *),
30 unsigned long flags, const char *devname, void *dev_id
31 );
32extern void sun3_init_IRQ (void); 24extern void sun3_init_IRQ (void);
33extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *);
34extern void sun3_free_irq (unsigned int irq, void *dev_id);
35extern void sun3_enable_interrupts (void); 25extern void sun3_enable_interrupts (void);
36extern void sun3_disable_interrupts (void); 26extern void sun3_disable_interrupts (void);
37extern int show_sun3_interrupts(struct seq_file *, void *);
38extern irqreturn_t sun3_process_int(int, struct pt_regs *);
39extern volatile unsigned char* sun3_intreg; 27extern volatile unsigned char* sun3_intreg;
40 28
41/* master list of VME vectors -- don't fuck with this */ 29/* master list of VME vectors -- don't fuck with this */
42#define SUN3_VEC_FLOPPY 0x40 30#define SUN3_VEC_FLOPPY (IRQ_USER+0)
43#define SUN3_VEC_VMESCSI0 0x40 31#define SUN3_VEC_VMESCSI0 (IRQ_USER+0)
44#define SUN3_VEC_VMESCSI1 0x41 32#define SUN3_VEC_VMESCSI1 (IRQ_USER+1)
45#define SUN3_VEC_CG 0xA8 33#define SUN3_VEC_CG (IRQ_USER+104)
46 34
47 35
48#endif /* SUN3INTS_H */ 36#endif /* SUN3INTS_H */
diff --git a/include/asm-m68k/traps.h b/include/asm-m68k/traps.h
index 475056191252..8caef25624c7 100644
--- a/include/asm-m68k/traps.h
+++ b/include/asm-m68k/traps.h
@@ -13,8 +13,15 @@
13 13
14#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
15 15
16#include <linux/linkage.h>
17#include <asm/ptrace.h>
18
16typedef void (*e_vector)(void); 19typedef void (*e_vector)(void);
17 20
21asmlinkage void auto_inthandler(void);
22asmlinkage void user_inthandler(void);
23asmlinkage void bad_inthandler(void);
24
18extern e_vector vectors[]; 25extern e_vector vectors[];
19 26
20#endif 27#endif
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index b761ef218cea..88b1f47400e1 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -181,144 +181,164 @@ asm volatile ("\n" \
181unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); 181unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
182unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); 182unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
183 183
184#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
185 asm volatile ("\n" \
186 "1: moves."#s1" (%2)+,%3\n" \
187 " move."#s1" %3,(%1)+\n" \
188 "2: moves."#s2" (%2)+,%3\n" \
189 " move."#s2" %3,(%1)+\n" \
190 " .ifnc \""#s3"\",\"\"\n" \
191 "3: moves."#s3" (%2)+,%3\n" \
192 " move."#s3" %3,(%1)+\n" \
193 " .endif\n" \
194 "4:\n" \
195 " .section __ex_table,\"a\"\n" \
196 " .align 4\n" \
197 " .long 1b,10f\n" \
198 " .long 2b,20f\n" \
199 " .ifnc \""#s3"\",\"\"\n" \
200 " .long 3b,30f\n" \
201 " .endif\n" \
202 " .previous\n" \
203 "\n" \
204 " .section .fixup,\"ax\"\n" \
205 " .even\n" \
206 "10: clr."#s1" (%1)+\n" \
207 "20: clr."#s2" (%1)+\n" \
208 " .ifnc \""#s3"\",\"\"\n" \
209 "30: clr."#s3" (%1)+\n" \
210 " .endif\n" \
211 " moveq.l #"#n",%0\n" \
212 " jra 4b\n" \
213 " .previous\n" \
214 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
215 : : "memory")
216
184static __always_inline unsigned long 217static __always_inline unsigned long
185__constant_copy_from_user(void *to, const void __user *from, unsigned long n) 218__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
186{ 219{
187 unsigned long res = 0, tmp; 220 unsigned long res = 0, tmp;
188 221
189 /* limit the inlined version to 3 moves */
190 if (n == 11 || n > 12)
191 return __generic_copy_from_user(to, from, n);
192
193 switch (n) { 222 switch (n) {
194 case 1: 223 case 1:
195 __get_user_asm(res, *(u8 *)to, (u8 *)from, u8, b, d, 1); 224 __get_user_asm(res, *(u8 *)to, (u8 *)from, u8, b, d, 1);
196 return res; 225 break;
197 case 2: 226 case 2:
198 __get_user_asm(res, *(u16 *)to, (u16 *)from, u16, w, d, 2); 227 __get_user_asm(res, *(u16 *)to, (u16 *)from, u16, w, d, 2);
199 return res; 228 break;
229 case 3:
230 __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
231 break;
200 case 4: 232 case 4:
201 __get_user_asm(res, *(u32 *)to, (u32 *)from, u32, l, r, 4); 233 __get_user_asm(res, *(u32 *)to, (u32 *)from, u32, l, r, 4);
202 return res; 234 break;
235 case 5:
236 __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
237 break;
238 case 6:
239 __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
240 break;
241 case 7:
242 __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
243 break;
244 case 8:
245 __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
246 break;
247 case 9:
248 __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
249 break;
250 case 10:
251 __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
252 break;
253 case 12:
254 __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
255 break;
256 default:
257 /* we limit the inlined version to 3 moves */
258 return __generic_copy_from_user(to, from, n);
203 } 259 }
204 260
205 asm volatile ("\n"
206 " .ifndef .Lfrom_user\n"
207 " .set .Lfrom_user,1\n"
208 " .macro copy_from_user to,from,tmp\n"
209 " .if .Lcnt >= 4\n"
210 "1: moves.l (\\from)+,\\tmp\n"
211 " move.l \\tmp,(\\to)+\n"
212 " .set .Lcnt,.Lcnt-4\n"
213 " .elseif .Lcnt & 2\n"
214 "1: moves.w (\\from)+,\\tmp\n"
215 " move.w \\tmp,(\\to)+\n"
216 " .set .Lcnt,.Lcnt-2\n"
217 " .elseif .Lcnt & 1\n"
218 "1: moves.b (\\from)+,\\tmp\n"
219 " move.b \\tmp,(\\to)+\n"
220 " .set .Lcnt,.Lcnt-1\n"
221 " .else\n"
222 " .exitm\n"
223 " .endif\n"
224 "\n"
225 " .section __ex_table,\"a\"\n"
226 " .align 4\n"
227 " .long 1b,3f\n"
228 " .previous\n"
229 " .endm\n"
230 " .endif\n"
231 "\n"
232 " .set .Lcnt,%c4\n"
233 " copy_from_user %1,%2,%3\n"
234 " copy_from_user %1,%2,%3\n"
235 " copy_from_user %1,%2,%3\n"
236 "2:\n"
237 " .section .fixup,\"ax\"\n"
238 " .even\n"
239 "3: moveq.l %4,%0\n"
240 " move.l %5,%1\n"
241 " .rept %c4 / 4\n"
242 " clr.l (%1)+\n"
243 " .endr\n"
244 " .if %c4 & 2\n"
245 " clr.w (%1)+\n"
246 " .endif\n"
247 " .if %c4 & 1\n"
248 " clr.b (%1)+\n"
249 " .endif\n"
250 " jra 2b\n"
251 " .previous\n"
252 : "+r" (res), "+a" (to), "+a" (from), "=&d" (tmp)
253 : "i" (n), "g" (to)
254 : "memory");
255
256 return res; 261 return res;
257} 262}
258 263
264#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
265 asm volatile ("\n" \
266 " move."#s1" (%2)+,%3\n" \
267 "11: moves."#s1" %3,(%1)+\n" \
268 "12: move."#s2" (%2)+,%3\n" \
269 "21: moves."#s2" %3,(%1)+\n" \
270 "22:\n" \
271 " .ifnc \""#s3"\",\"\"\n" \
272 " move."#s3" (%2)+,%3\n" \
273 "31: moves."#s3" %3,(%1)+\n" \
274 "32:\n" \
275 " .endif\n" \
276 "4:\n" \
277 "\n" \
278 " .section __ex_table,\"a\"\n" \
279 " .align 4\n" \
280 " .long 11b,5f\n" \
281 " .long 12b,5f\n" \
282 " .long 21b,5f\n" \
283 " .long 22b,5f\n" \
284 " .ifnc \""#s3"\",\"\"\n" \
285 " .long 31b,5f\n" \
286 " .long 32b,5f\n" \
287 " .endif\n" \
288 " .previous\n" \
289 "\n" \
290 " .section .fixup,\"ax\"\n" \
291 " .even\n" \
292 "5: moveq.l #"#n",%0\n" \
293 " jra 4b\n" \
294 " .previous\n" \
295 : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
296 : : "memory")
297
259static __always_inline unsigned long 298static __always_inline unsigned long
260__constant_copy_to_user(void __user *to, const void *from, unsigned long n) 299__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
261{ 300{
262 unsigned long res = 0, tmp; 301 unsigned long res = 0, tmp;
263 302
264 /* limit the inlined version to 3 moves */
265 if (n == 11 || n > 12)
266 return __generic_copy_to_user(to, from, n);
267
268 switch (n) { 303 switch (n) {
269 case 1: 304 case 1:
270 __put_user_asm(res, *(u8 *)from, (u8 *)to, b, d, 1); 305 __put_user_asm(res, *(u8 *)from, (u8 *)to, b, d, 1);
271 return res; 306 break;
272 case 2: 307 case 2:
273 __put_user_asm(res, *(u16 *)from, (u16 *)to, w, d, 2); 308 __put_user_asm(res, *(u16 *)from, (u16 *)to, w, d, 2);
274 return res; 309 break;
310 case 3:
311 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
312 break;
275 case 4: 313 case 4:
276 __put_user_asm(res, *(u32 *)from, (u32 *)to, l, r, 4); 314 __put_user_asm(res, *(u32 *)from, (u32 *)to, l, r, 4);
277 return res; 315 break;
316 case 5:
317 __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
318 break;
319 case 6:
320 __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
321 break;
322 case 7:
323 __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
324 break;
325 case 8:
326 __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
327 break;
328 case 9:
329 __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
330 break;
331 case 10:
332 __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
333 break;
334 case 12:
335 __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
336 break;
337 default:
338 /* limit the inlined version to 3 moves */
339 return __generic_copy_to_user(to, from, n);
278 } 340 }
279 341
280 asm volatile ("\n"
281 " .ifndef .Lto_user\n"
282 " .set .Lto_user,1\n"
283 " .macro copy_to_user to,from,tmp\n"
284 " .if .Lcnt >= 4\n"
285 " move.l (\\from)+,\\tmp\n"
286 "11: moves.l \\tmp,(\\to)+\n"
287 "12: .set .Lcnt,.Lcnt-4\n"
288 " .elseif .Lcnt & 2\n"
289 " move.w (\\from)+,\\tmp\n"
290 "11: moves.w \\tmp,(\\to)+\n"
291 "12: .set .Lcnt,.Lcnt-2\n"
292 " .elseif .Lcnt & 1\n"
293 " move.b (\\from)+,\\tmp\n"
294 "11: moves.b \\tmp,(\\to)+\n"
295 "12: .set .Lcnt,.Lcnt-1\n"
296 " .else\n"
297 " .exitm\n"
298 " .endif\n"
299 "\n"
300 " .section __ex_table,\"a\"\n"
301 " .align 4\n"
302 " .long 11b,3f\n"
303 " .long 12b,3f\n"
304 " .previous\n"
305 " .endm\n"
306 " .endif\n"
307 "\n"
308 " .set .Lcnt,%c4\n"
309 " copy_to_user %1,%2,%3\n"
310 " copy_to_user %1,%2,%3\n"
311 " copy_to_user %1,%2,%3\n"
312 "2:\n"
313 " .section .fixup,\"ax\"\n"
314 " .even\n"
315 "3: moveq.l %4,%0\n"
316 " jra 2b\n"
317 " .previous\n"
318 : "+r" (res), "+a" (to), "+a" (from), "=&d" (tmp)
319 : "i" (n)
320 : "memory");
321
322 return res; 342 return res;
323} 343}
324 344
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index 986511db54a6..900f472fdd2b 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -145,8 +145,5 @@ static inline void __user *compat_alloc_user_space(long len)
145 145
146 return (void __user *) (regs->regs[29] - len); 146 return (void __user *) (regs->regs[29] - len);
147} 147}
148#if defined (__MIPSEL__)
149#define __COMPAT_ENDIAN_SWAP__ 1
150#endif
151 148
152#endif /* _ASM_COMPAT_H */ 149#endif /* _ASM_COMPAT_H */
diff --git a/include/asm-mips/mach-generic/floppy.h b/include/asm-mips/mach-generic/floppy.h
index 682a5858f8d7..83cd69e30ec3 100644
--- a/include/asm-mips/mach-generic/floppy.h
+++ b/include/asm-mips/mach-generic/floppy.h
@@ -98,7 +98,7 @@ static inline void fd_disable_irq(void)
98static inline int fd_request_irq(void) 98static inline int fd_request_irq(void)
99{ 99{
100 return request_irq(FLOPPY_IRQ, floppy_interrupt, 100 return request_irq(FLOPPY_IRQ, floppy_interrupt,
101 SA_INTERRUPT | SA_SAMPLE_RANDOM, "floppy", NULL); 101 SA_INTERRUPT, "floppy", NULL);
102} 102}
103 103
104static inline void fd_free_irq(void) 104static inline void fd_free_irq(void)
diff --git a/include/asm-mips/mach-jazz/floppy.h b/include/asm-mips/mach-jazz/floppy.h
index c9dad99b1232..9413117915f4 100644
--- a/include/asm-mips/mach-jazz/floppy.h
+++ b/include/asm-mips/mach-jazz/floppy.h
@@ -90,7 +90,7 @@ static inline void fd_disable_irq(void)
90static inline int fd_request_irq(void) 90static inline int fd_request_irq(void)
91{ 91{
92 return request_irq(FLOPPY_IRQ, floppy_interrupt, 92 return request_irq(FLOPPY_IRQ, floppy_interrupt,
93 SA_INTERRUPT | SA_SAMPLE_RANDOM, "floppy", NULL); 93 SA_INTERRUPT, "floppy", NULL);
94} 94}
95 95
96static inline void fd_free_irq(void) 96static inline void fd_free_irq(void)
diff --git a/include/asm-parisc/floppy.h b/include/asm-parisc/floppy.h
index ca3aed768cdc..458cdb2a7530 100644
--- a/include/asm-parisc/floppy.h
+++ b/include/asm-parisc/floppy.h
@@ -159,10 +159,8 @@ static int fd_request_irq(void)
159 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, 159 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
160 "floppy", NULL); 160 "floppy", NULL);
161 else 161 else
162 return request_irq(FLOPPY_IRQ, floppy_interrupt, 162 return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT,
163 SA_INTERRUPT|SA_SAMPLE_RANDOM, 163 "floppy", NULL);
164 "floppy", NULL);
165
166} 164}
167 165
168static unsigned long dma_mem_alloc(unsigned long size) 166static unsigned long dma_mem_alloc(unsigned long size)
diff --git a/include/asm-powerpc/backlight.h b/include/asm-powerpc/backlight.h
index 1ba1f27a0b63..a5e9e656e332 100644
--- a/include/asm-powerpc/backlight.h
+++ b/include/asm-powerpc/backlight.h
@@ -2,30 +2,30 @@
2 * Routines for handling backlight control on PowerBooks 2 * Routines for handling backlight control on PowerBooks
3 * 3 *
4 * For now, implementation resides in 4 * For now, implementation resides in
5 * arch/powerpc/platforms/powermac/pmac_support.c 5 * arch/powerpc/platforms/powermac/backlight.c
6 * 6 *
7 */ 7 */
8#ifndef __ASM_POWERPC_BACKLIGHT_H 8#ifndef __ASM_POWERPC_BACKLIGHT_H
9#define __ASM_POWERPC_BACKLIGHT_H 9#define __ASM_POWERPC_BACKLIGHT_H
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11 11
12/* Abstract values */ 12#include <linux/fb.h>
13#define BACKLIGHT_OFF 0 13#include <linux/mutex.h>
14#define BACKLIGHT_MIN 1
15#define BACKLIGHT_MAX 0xf
16 14
17struct backlight_controller { 15/* For locking instructions, see the implementation file */
18 int (*set_enable)(int enable, int level, void *data); 16extern struct backlight_device *pmac_backlight;
19 int (*set_level)(int level, void *data); 17extern struct mutex pmac_backlight_mutex;
20};
21 18
22extern void register_backlight_controller(struct backlight_controller *ctrler, void *data, char *type); 19extern void pmac_backlight_calc_curve(struct fb_info*);
23extern void unregister_backlight_controller(struct backlight_controller *ctrler, void *data); 20extern int pmac_backlight_curve_lookup(struct fb_info *info, int value);
24 21
25extern int set_backlight_enable(int enable); 22extern int pmac_has_backlight_type(const char *type);
26extern int get_backlight_enable(void); 23
27extern int set_backlight_level(int level); 24extern void pmac_backlight_key_up(void);
28extern int get_backlight_level(void); 25extern void pmac_backlight_key_down(void);
26
27extern int pmac_backlight_set_legacy_brightness(int brightness);
28extern int pmac_backlight_get_legacy_brightness(void);
29 29
30#endif /* __KERNEL__ */ 30#endif /* __KERNEL__ */
31#endif 31#endif
diff --git a/include/asm-powerpc/floppy.h b/include/asm-powerpc/floppy.h
index 7e2d169ee856..9c8d91bf5a0d 100644
--- a/include/asm-powerpc/floppy.h
+++ b/include/asm-powerpc/floppy.h
@@ -27,8 +27,7 @@
27#define fd_disable_irq() disable_irq(FLOPPY_IRQ) 27#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
28#define fd_cacheflush(addr,size) /* nothing */ 28#define fd_cacheflush(addr,size) /* nothing */
29#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \ 29#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \
30 SA_INTERRUPT|SA_SAMPLE_RANDOM, \ 30 SA_INTERRUPT, "floppy", NULL)
31 "floppy", NULL)
32#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); 31#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
33 32
34#ifdef CONFIG_PCI 33#ifdef CONFIG_PCI
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 184a7a4d2fdf..faa1fc703053 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -22,6 +22,7 @@
22/* var is in discarded region: offset to particular copy we want */ 22/* var is in discarded region: offset to particular copy we want */
23#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 23#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
24#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) 24#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
25#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
25 26
26/* A macro to avoid #include hell... */ 27/* A macro to avoid #include hell... */
27#define percpu_modcopy(pcpudst, src, size) \ 28#define percpu_modcopy(pcpudst, src, size) \
@@ -41,6 +42,7 @@ extern void setup_per_cpu_areas(void);
41 42
42#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 43#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
43#define __get_cpu_var(var) per_cpu__##var 44#define __get_cpu_var(var) per_cpu__##var
45#define __raw_get_cpu_var(var) per_cpu__##var
44 46
45#endif /* SMP */ 47#endif /* SMP */
46 48
diff --git a/include/asm-ppc/floppy.h b/include/asm-ppc/floppy.h
index 8ccd4a276fe9..2ba191eba448 100644
--- a/include/asm-ppc/floppy.h
+++ b/include/asm-ppc/floppy.h
@@ -99,10 +99,8 @@ static int fd_request_irq(void)
99 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, 99 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
100 "floppy", NULL); 100 "floppy", NULL);
101 else 101 else
102 return request_irq(FLOPPY_IRQ, floppy_interrupt, 102 return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT,
103 SA_INTERRUPT|SA_SAMPLE_RANDOM, 103 "floppy", NULL);
104 "floppy", NULL);
105
106} 104}
107 105
108static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) 106static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 436d216601e5..d9a8cca9b653 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -40,6 +40,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
40 __typeof__(type) per_cpu__##name 40 __typeof__(type) per_cpu__##name
41 41
42#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) 42#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
43#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
43#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) 44#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
44 45
45/* A macro to avoid #include hell... */ 46/* A macro to avoid #include hell... */
@@ -57,6 +58,7 @@ do { \
57 __typeof__(type) per_cpu__##name 58 __typeof__(type) per_cpu__##name
58 59
59#define __get_cpu_var(var) __reloc_hide(var,0) 60#define __get_cpu_var(var) __reloc_hide(var,0)
61#define __raw_get_cpu_var(var) __reloc_hide(var,0)
60#define per_cpu(var,cpu) __reloc_hide(var,0) 62#define per_cpu(var,cpu) __reloc_hide(var,0)
61 63
62#endif /* SMP */ 64#endif /* SMP */
diff --git a/include/asm-sh/floppy.h b/include/asm-sh/floppy.h
index 38d7a2942476..307d9ce9f9ed 100644
--- a/include/asm-sh/floppy.h
+++ b/include/asm-sh/floppy.h
@@ -147,11 +147,10 @@ static int fd_request_irq(void)
147{ 147{
148 if(can_use_virtual_dma) 148 if(can_use_virtual_dma)
149 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, 149 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
150 "floppy", NULL); 150 "floppy", NULL);
151 else 151 else
152 return request_irq(FLOPPY_IRQ, floppy_interrupt, 152 return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT,
153 SA_INTERRUPT|SA_SAMPLE_RANDOM, 153 "floppy", NULL);
154 "floppy", NULL);
155 154
156} 155}
157 156
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index baef13b58952..a6ece06b83db 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -21,6 +21,7 @@ register unsigned long __local_per_cpu_offset asm("g5");
21/* var is in discarded region: offset to particular copy we want */ 21/* var is in discarded region: offset to particular copy we want */
22#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 22#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
23#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset)) 23#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
24#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
24 25
25/* A macro to avoid #include hell... */ 26/* A macro to avoid #include hell... */
26#define percpu_modcopy(pcpudst, src, size) \ 27#define percpu_modcopy(pcpudst, src, size) \
@@ -37,6 +38,7 @@ do { \
37 38
38#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) 39#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
39#define __get_cpu_var(var) per_cpu__##var 40#define __get_cpu_var(var) per_cpu__##var
41#define __raw_get_cpu_var(var) per_cpu__##var
40 42
41#endif /* SMP */ 43#endif /* SMP */
42 44
diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h
index 52825ce689f2..006291e89b4a 100644
--- a/include/asm-x86_64/floppy.h
+++ b/include/asm-x86_64/floppy.h
@@ -147,10 +147,8 @@ static int fd_request_irq(void)
147 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT, 147 return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
148 "floppy", NULL); 148 "floppy", NULL);
149 else 149 else
150 return request_irq(FLOPPY_IRQ, floppy_interrupt, 150 return request_irq(FLOPPY_IRQ, floppy_interrupt, SA_INTERRUPT,
151 SA_INTERRUPT|SA_SAMPLE_RANDOM, 151 "floppy", NULL);
152 "floppy", NULL);
153
154} 152}
155 153
156static unsigned long dma_mem_alloc(unsigned long size) 154static unsigned long dma_mem_alloc(unsigned long size)
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 7f33aaf9f7b1..549eb929b2c0 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -21,6 +21,7 @@
21/* var is in discarded region: offset to particular copy we want */ 21/* var is in discarded region: offset to particular copy we want */
22#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu))) 22#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
23#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset())) 23#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
24#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
24 25
25/* A macro to avoid #include hell... */ 26/* A macro to avoid #include hell... */
26#define percpu_modcopy(pcpudst, src, size) \ 27#define percpu_modcopy(pcpudst, src, size) \
@@ -40,6 +41,7 @@ extern void setup_per_cpu_areas(void);
40 41
41#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) 42#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
42#define __get_cpu_var(var) per_cpu__##var 43#define __get_cpu_var(var) per_cpu__##var
44#define __raw_get_cpu_var(var) per_cpu__##var
43 45
44#endif /* SMP */ 46#endif /* SMP */
45 47
diff --git a/include/linux/acct.h b/include/linux/acct.h
index 3d54fbcf969e..e86bae7324d2 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -121,13 +121,17 @@ struct vfsmount;
121struct super_block; 121struct super_block;
122extern void acct_auto_close_mnt(struct vfsmount *m); 122extern void acct_auto_close_mnt(struct vfsmount *m);
123extern void acct_auto_close(struct super_block *sb); 123extern void acct_auto_close(struct super_block *sb);
124extern void acct_process(long exitcode); 124extern void acct_init_pacct(struct pacct_struct *pacct);
125extern void acct_collect(long exitcode, int group_dead);
126extern void acct_process(void);
125extern void acct_update_integrals(struct task_struct *tsk); 127extern void acct_update_integrals(struct task_struct *tsk);
126extern void acct_clear_integrals(struct task_struct *tsk); 128extern void acct_clear_integrals(struct task_struct *tsk);
127#else 129#else
128#define acct_auto_close_mnt(x) do { } while (0) 130#define acct_auto_close_mnt(x) do { } while (0)
129#define acct_auto_close(x) do { } while (0) 131#define acct_auto_close(x) do { } while (0)
130#define acct_process(x) do { } while (0) 132#define acct_init_pacct(x) do { } while (0)
133#define acct_collect(x,y) do { } while (0)
134#define acct_process() do { } while (0)
131#define acct_update_integrals(x) do { } while (0) 135#define acct_update_integrals(x) do { } while (0)
132#define acct_clear_integrals(task) do { } while (0) 136#define acct_clear_integrals(task) do { } while (0)
133#endif 137#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b60ffe32cd21..76bdaeab6f62 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -47,7 +47,7 @@
47#define BIO_BUG_ON 47#define BIO_BUG_ON
48#endif 48#endif
49 49
50#define BIO_MAX_PAGES (256) 50#define BIO_MAX_PAGES 256
51#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 51#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
52#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 52#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
53 53
diff --git a/include/linux/console.h b/include/linux/console.h
index 08734e660d41..d0f8a8009490 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -87,6 +87,7 @@ void give_up_console(const struct consw *sw);
87#define CON_CONSDEV (2) /* Last on the command line */ 87#define CON_CONSDEV (2) /* Last on the command line */
88#define CON_ENABLED (4) 88#define CON_ENABLED (4)
89#define CON_BOOT (8) 89#define CON_BOOT (8)
90#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
90 91
91struct console 92struct console
92{ 93{
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 9cbb781d6f80..b268a3c0c376 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -317,7 +317,8 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
317 (cpu) < NR_CPUS; \ 317 (cpu) < NR_CPUS; \
318 (cpu) = next_cpu((cpu), (mask))) 318 (cpu) = next_cpu((cpu), (mask)))
319#else /* NR_CPUS == 1 */ 319#else /* NR_CPUS == 1 */
320#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++) 320#define for_each_cpu_mask(cpu, mask) \
321 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
321#endif /* NR_CPUS */ 322#endif /* NR_CPUS */
322 323
323/* 324/*
@@ -405,7 +406,6 @@ int __any_online_cpu(const cpumask_t *mask);
405#define any_online_cpu(mask) 0 406#define any_online_cpu(mask) 0
406#endif 407#endif
407 408
408#define for_each_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
409#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) 409#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map)
410#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) 410#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map)
411#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) 411#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 1e4bdfcf83a2..84cfa8bbdc36 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * include/linux/eventpoll.h ( Efficent event polling implementation ) 2 * include/linux/eventpoll.h ( Efficent event polling implementation )
3 * Copyright (C) 2001,...,2003 Davide Libenzi 3 * Copyright (C) 2001,...,2006 Davide Libenzi
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 757d54d8f1a5..5607e6457a65 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -710,6 +710,14 @@ struct dir_private_info {
710 __u32 next_hash; 710 __u32 next_hash;
711}; 711};
712 712
713/* calculate the first block number of the group */
714static inline ext3_fsblk_t
715ext3_group_first_block_no(struct super_block *sb, unsigned long group_no)
716{
717 return group_no * (ext3_fsblk_t)EXT3_BLOCKS_PER_GROUP(sb) +
718 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
719}
720
713/* 721/*
714 * Special error return code only used by dx_probe() and its callers. 722 * Special error return code only used by dx_probe() and its callers.
715 */ 723 */
@@ -730,14 +738,16 @@ struct dir_private_info {
730/* balloc.c */ 738/* balloc.c */
731extern int ext3_bg_has_super(struct super_block *sb, int group); 739extern int ext3_bg_has_super(struct super_block *sb, int group);
732extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); 740extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
733extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *); 741extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
734extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long, 742 ext3_fsblk_t goal, int *errp);
735 unsigned long *, int *); 743extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
736extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long, 744 ext3_fsblk_t goal, unsigned long *count, int *errp);
737 unsigned long); 745extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
738extern void ext3_free_blocks_sb (handle_t *, struct super_block *, 746 ext3_fsblk_t block, unsigned long count);
739 unsigned long, unsigned long, int *); 747extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
740extern unsigned long ext3_count_free_blocks (struct super_block *); 748 ext3_fsblk_t block, unsigned long count,
749 unsigned long *pdquot_freed_blocks);
750extern ext3_fsblk_t ext3_count_free_blocks (struct super_block *);
741extern void ext3_check_blocks_bitmap (struct super_block *); 751extern void ext3_check_blocks_bitmap (struct super_block *);
742extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, 752extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
743 unsigned int block_group, 753 unsigned int block_group,
@@ -773,7 +783,8 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
773 783
774 784
775/* inode.c */ 785/* inode.c */
776int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); 786int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
787 struct buffer_head *bh, ext3_fsblk_t blocknr);
777struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); 788struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
778struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); 789struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
779int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, 790int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
@@ -808,7 +819,7 @@ extern int ext3_group_add(struct super_block *sb,
808 struct ext3_new_group_data *input); 819 struct ext3_new_group_data *input);
809extern int ext3_group_extend(struct super_block *sb, 820extern int ext3_group_extend(struct super_block *sb,
810 struct ext3_super_block *es, 821 struct ext3_super_block *es,
811 unsigned long n_blocks_count); 822 ext3_fsblk_t n_blocks_count);
812 823
813/* super.c */ 824/* super.c */
814extern void ext3_error (struct super_block *, const char *, const char *, ...) 825extern void ext3_error (struct super_block *, const char *, const char *, ...)
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 7abf90147180..2f18b9511f21 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -21,9 +21,17 @@
21#include <linux/seqlock.h> 21#include <linux/seqlock.h>
22#include <linux/mutex.h> 22#include <linux/mutex.h>
23 23
24/* data type for block offset of block group */
25typedef int ext3_grpblk_t;
26
27/* data type for filesystem-wide blocks number */
28typedef unsigned long ext3_fsblk_t;
29
30#define E3FSBLK "%lu"
31
24struct ext3_reserve_window { 32struct ext3_reserve_window {
25 __u32 _rsv_start; /* First byte reserved */ 33 ext3_fsblk_t _rsv_start; /* First byte reserved */
26 __u32 _rsv_end; /* Last byte reserved or 0 */ 34 ext3_fsblk_t _rsv_end; /* Last byte reserved or 0 */
27}; 35};
28 36
29struct ext3_reserve_window_node { 37struct ext3_reserve_window_node {
@@ -50,7 +58,7 @@ struct ext3_block_alloc_info {
50 * allocated to this file. This give us the goal (target) for the next 58 * allocated to this file. This give us the goal (target) for the next
51 * allocation when we detect linearly ascending requests. 59 * allocation when we detect linearly ascending requests.
52 */ 60 */
53 __u32 last_alloc_physical_block; 61 ext3_fsblk_t last_alloc_physical_block;
54}; 62};
55 63
56#define rsv_start rsv_window._rsv_start 64#define rsv_start rsv_window._rsv_start
@@ -67,7 +75,7 @@ struct ext3_inode_info {
67 __u8 i_frag_no; 75 __u8 i_frag_no;
68 __u8 i_frag_size; 76 __u8 i_frag_size;
69#endif 77#endif
70 __u32 i_file_acl; 78 ext3_fsblk_t i_file_acl;
71 __u32 i_dir_acl; 79 __u32 i_dir_acl;
72 __u32 i_dtime; 80 __u32 i_dtime;
73 81
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 315d89740ddf..f1281687e549 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1,6 +1,7 @@
1#ifndef _LINUX_FB_H 1#ifndef _LINUX_FB_H
2#define _LINUX_FB_H 2#define _LINUX_FB_H
3 3
4#include <linux/backlight.h>
4#include <asm/types.h> 5#include <asm/types.h>
5 6
6/* Definitions of frame buffers */ 7/* Definitions of frame buffers */
@@ -366,6 +367,12 @@ struct fb_cursor {
366 struct fb_image image; /* Cursor image */ 367 struct fb_image image; /* Cursor image */
367}; 368};
368 369
370#ifdef CONFIG_FB_BACKLIGHT
371/* Settings for the generic backlight code */
372#define FB_BACKLIGHT_LEVELS 128
373#define FB_BACKLIGHT_MAX 0xFF
374#endif
375
369#ifdef __KERNEL__ 376#ifdef __KERNEL__
370 377
371#include <linux/fs.h> 378#include <linux/fs.h>
@@ -756,6 +763,21 @@ struct fb_info {
756 struct fb_cmap cmap; /* Current cmap */ 763 struct fb_cmap cmap; /* Current cmap */
757 struct list_head modelist; /* mode list */ 764 struct list_head modelist; /* mode list */
758 struct fb_videomode *mode; /* current mode */ 765 struct fb_videomode *mode; /* current mode */
766
767#ifdef CONFIG_FB_BACKLIGHT
768 /* Lock ordering:
769 * bl_mutex (protects bl_dev and bl_curve)
770 * bl_dev->sem (backlight class)
771 */
772 struct mutex bl_mutex;
773
774 /* assigned backlight device */
775 struct backlight_device *bl_dev;
776
777 /* Backlight level curve */
778 u8 bl_curve[FB_BACKLIGHT_LEVELS];
779#endif
780
759 struct fb_ops *fbops; 781 struct fb_ops *fbops;
760 struct device *device; 782 struct device *device;
761 struct class_device *class_device; /* sysfs per device attrs */ 783 struct class_device *class_device; /* sysfs per device attrs */
@@ -895,6 +917,7 @@ extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
895extern void framebuffer_release(struct fb_info *info); 917extern void framebuffer_release(struct fb_info *info);
896extern int fb_init_class_device(struct fb_info *fb_info); 918extern int fb_init_class_device(struct fb_info *fb_info);
897extern void fb_cleanup_class_device(struct fb_info *head); 919extern void fb_cleanup_class_device(struct fb_info *head);
920extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
898 921
899/* drivers/video/fbmon.c */ 922/* drivers/video/fbmon.c */
900#define FB_MAXTIMINGS 0 923#define FB_MAXTIMINGS 0
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index c52a63755fdd..996f5611cd59 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -29,6 +29,7 @@
29#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */ 29#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
30#define AT_REMOVEDIR 0x200 /* Remove directory instead of 30#define AT_REMOVEDIR 0x200 /* Remove directory instead of
31 unlinking file. */ 31 unlinking file. */
32#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
32 33
33#ifdef __KERNEL__ 34#ifdef __KERNEL__
34 35
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 5425b60021e3..9fc48a674b82 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -1,6 +1,6 @@
1/* 1/*
2 FUSE: Filesystem in Userspace 2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4 4
5 This program can be distributed under the terms of the GNU GPL. 5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING. 6 See the file COPYING.
@@ -9,18 +9,19 @@
9/* This file defines the kernel interface of FUSE */ 9/* This file defines the kernel interface of FUSE */
10 10
11#include <asm/types.h> 11#include <asm/types.h>
12#include <linux/major.h>
12 13
13/** Version number of this interface */ 14/** Version number of this interface */
14#define FUSE_KERNEL_VERSION 7 15#define FUSE_KERNEL_VERSION 7
15 16
16/** Minor version number of this interface */ 17/** Minor version number of this interface */
17#define FUSE_KERNEL_MINOR_VERSION 6 18#define FUSE_KERNEL_MINOR_VERSION 7
18 19
19/** The node ID of the root inode */ 20/** The node ID of the root inode */
20#define FUSE_ROOT_ID 1 21#define FUSE_ROOT_ID 1
21 22
22/** The major number of the fuse character device */ 23/** The major number of the fuse character device */
23#define FUSE_MAJOR 10 24#define FUSE_MAJOR MISC_MAJOR
24 25
25/** The minor number of the fuse character device */ 26/** The minor number of the fuse character device */
26#define FUSE_MINOR 229 27#define FUSE_MINOR 229
@@ -58,6 +59,13 @@ struct fuse_kstatfs {
58 __u32 spare[6]; 59 __u32 spare[6];
59}; 60};
60 61
62struct fuse_file_lock {
63 __u64 start;
64 __u64 end;
65 __u32 type;
66 __u32 pid; /* tgid */
67};
68
61/** 69/**
62 * Bitmasks for fuse_setattr_in.valid 70 * Bitmasks for fuse_setattr_in.valid
63 */ 71 */
@@ -82,6 +90,7 @@ struct fuse_kstatfs {
82 * INIT request/reply flags 90 * INIT request/reply flags
83 */ 91 */
84#define FUSE_ASYNC_READ (1 << 0) 92#define FUSE_ASYNC_READ (1 << 0)
93#define FUSE_POSIX_LOCKS (1 << 1)
85 94
86enum fuse_opcode { 95enum fuse_opcode {
87 FUSE_LOOKUP = 1, 96 FUSE_LOOKUP = 1,
@@ -112,8 +121,12 @@ enum fuse_opcode {
112 FUSE_READDIR = 28, 121 FUSE_READDIR = 28,
113 FUSE_RELEASEDIR = 29, 122 FUSE_RELEASEDIR = 29,
114 FUSE_FSYNCDIR = 30, 123 FUSE_FSYNCDIR = 30,
124 FUSE_GETLK = 31,
125 FUSE_SETLK = 32,
126 FUSE_SETLKW = 33,
115 FUSE_ACCESS = 34, 127 FUSE_ACCESS = 34,
116 FUSE_CREATE = 35 128 FUSE_CREATE = 35,
129 FUSE_INTERRUPT = 36,
117}; 130};
118 131
119/* The read buffer is required to be at least 8k, but may be much larger */ 132/* The read buffer is required to be at least 8k, but may be much larger */
@@ -199,6 +212,7 @@ struct fuse_flush_in {
199 __u64 fh; 212 __u64 fh;
200 __u32 flush_flags; 213 __u32 flush_flags;
201 __u32 padding; 214 __u32 padding;
215 __u64 lock_owner;
202}; 216};
203 217
204struct fuse_read_in { 218struct fuse_read_in {
@@ -247,6 +261,16 @@ struct fuse_getxattr_out {
247 __u32 padding; 261 __u32 padding;
248}; 262};
249 263
264struct fuse_lk_in {
265 __u64 fh;
266 __u64 owner;
267 struct fuse_file_lock lk;
268};
269
270struct fuse_lk_out {
271 struct fuse_file_lock lk;
272};
273
250struct fuse_access_in { 274struct fuse_access_in {
251 __u32 mask; 275 __u32 mask;
252 __u32 padding; 276 __u32 padding;
@@ -268,6 +292,10 @@ struct fuse_init_out {
268 __u32 max_write; 292 __u32 max_write;
269}; 293};
270 294
295struct fuse_interrupt_in {
296 __u64 unique;
297};
298
271struct fuse_in_header { 299struct fuse_in_header {
272 __u32 len; 300 __u32 len;
273 __u32 opcode; 301 __u32 opcode;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 7d2a1b974c5e..07d7305f131e 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -40,7 +40,6 @@ struct hrtimer_base;
40 40
41/** 41/**
42 * struct hrtimer - the basic hrtimer structure 42 * struct hrtimer - the basic hrtimer structure
43 *
44 * @node: red black tree node for time ordered insertion 43 * @node: red black tree node for time ordered insertion
45 * @expires: the absolute expiry time in the hrtimers internal 44 * @expires: the absolute expiry time in the hrtimers internal
46 * representation. The time is related to the clock on 45 * representation. The time is related to the clock on
@@ -59,7 +58,6 @@ struct hrtimer {
59 58
60/** 59/**
61 * struct hrtimer_sleeper - simple sleeper structure 60 * struct hrtimer_sleeper - simple sleeper structure
62 *
63 * @timer: embedded timer structure 61 * @timer: embedded timer structure
64 * @task: task to wake up 62 * @task: task to wake up
65 * 63 *
@@ -72,7 +70,6 @@ struct hrtimer_sleeper {
72 70
73/** 71/**
74 * struct hrtimer_base - the timer base for a specific clock 72 * struct hrtimer_base - the timer base for a specific clock
75 *
76 * @index: clock type index for per_cpu support when moving a timer 73 * @index: clock type index for per_cpu support when moving a timer
77 * to a base on another cpu. 74 * to a base on another cpu.
78 * @lock: lock protecting the base and associated timers 75 * @lock: lock protecting the base and associated timers
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 77e66d055f5b..ef7bef207f48 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -630,6 +630,7 @@ typedef struct ide_drive_s {
630 unsigned int usage; /* current "open()" count for drive */ 630 unsigned int usage; /* current "open()" count for drive */
631 unsigned int failures; /* current failure count */ 631 unsigned int failures; /* current failure count */
632 unsigned int max_failures; /* maximum allowed failure count */ 632 unsigned int max_failures; /* maximum allowed failure count */
633 u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
633 634
634 u64 capacity64; /* total number of sectors */ 635 u64 capacity64; /* total number of sectors */
635 636
@@ -1005,6 +1006,8 @@ extern ide_hwif_t ide_hwifs[]; /* master data repository */
1005extern int noautodma; 1006extern int noautodma;
1006 1007
1007extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); 1008extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
1009int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
1010 int uptodate, int nr_sectors);
1008 1011
1009/* 1012/*
1010 * This is used on exit from the driver to designate the next irq handler 1013 * This is used on exit from the driver to designate the next irq handler
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 8c21aaa248b4..3c5e4c2e517d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -117,6 +117,8 @@ extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
117 __attribute__ ((format (printf, 3, 4))); 117 __attribute__ ((format (printf, 3, 4)));
118extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) 118extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
119 __attribute__ ((format (printf, 3, 0))); 119 __attribute__ ((format (printf, 3, 0)));
120extern char *kasprintf(gfp_t gfp, const char *fmt, ...)
121 __attribute__ ((format (printf, 2, 3)));
120 122
121extern int sscanf(const char *, const char *, ...) 123extern int sscanf(const char *, const char *, ...)
122 __attribute__ ((format (scanf, 2, 3))); 124 __attribute__ ((format (scanf, 2, 3)));
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index ebdd41fd1082..7cce5dfa092f 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -4,37 +4,19 @@
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7/**
8 * kthread_create: create a kthread.
9 * @threadfn: the function to run until signal_pending(current).
10 * @data: data ptr for @threadfn.
11 * @namefmt: printf-style name for the thread.
12 *
13 * Description: This helper function creates and names a kernel
14 * thread. The thread will be stopped: use wake_up_process() to start
15 * it. See also kthread_run(), kthread_create_on_cpu().
16 *
17 * When woken, the thread will run @threadfn() with @data as its
18 * argument. @threadfn can either call do_exit() directly if it is a
19 * standalone thread for which noone will call kthread_stop(), or
20 * return when 'kthread_should_stop()' is true (which means
21 * kthread_stop() has been called). The return value should be zero
22 * or a negative error number: it will be passed to kthread_stop().
23 *
24 * Returns a task_struct or ERR_PTR(-ENOMEM).
25 */
26struct task_struct *kthread_create(int (*threadfn)(void *data), 7struct task_struct *kthread_create(int (*threadfn)(void *data),
27 void *data, 8 void *data,
28 const char namefmt[], ...); 9 const char namefmt[], ...);
29 10
30/** 11/**
31 * kthread_run: create and wake a thread. 12 * kthread_run - create and wake a thread.
32 * @threadfn: the function to run until signal_pending(current). 13 * @threadfn: the function to run until signal_pending(current).
33 * @data: data ptr for @threadfn. 14 * @data: data ptr for @threadfn.
34 * @namefmt: printf-style name for the thread. 15 * @namefmt: printf-style name for the thread.
35 * 16 *
36 * Description: Convenient wrapper for kthread_create() followed by 17 * Description: Convenient wrapper for kthread_create() followed by
37 * wake_up_process(). Returns the kthread, or ERR_PTR(-ENOMEM). */ 18 * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM).
19 */
38#define kthread_run(threadfn, data, namefmt, ...) \ 20#define kthread_run(threadfn, data, namefmt, ...) \
39({ \ 21({ \
40 struct task_struct *__k \ 22 struct task_struct *__k \
@@ -44,50 +26,9 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
44 __k; \ 26 __k; \
45}) 27})
46 28
47/**
48 * kthread_bind: bind a just-created kthread to a cpu.
49 * @k: thread created by kthread_create().
50 * @cpu: cpu (might not be online, must be possible) for @k to run on.
51 *
52 * Description: This function is equivalent to set_cpus_allowed(),
53 * except that @cpu doesn't need to be online, and the thread must be
54 * stopped (ie. just returned from kthread_create().
55 */
56void kthread_bind(struct task_struct *k, unsigned int cpu); 29void kthread_bind(struct task_struct *k, unsigned int cpu);
57
58/**
59 * kthread_stop: stop a thread created by kthread_create().
60 * @k: thread created by kthread_create().
61 *
62 * Sets kthread_should_stop() for @k to return true, wakes it, and
63 * waits for it to exit. Your threadfn() must not call do_exit()
64 * itself if you use this function! This can also be called after
65 * kthread_create() instead of calling wake_up_process(): the thread
66 * will exit without calling threadfn().
67 *
68 * Returns the result of threadfn(), or -EINTR if wake_up_process()
69 * was never called. */
70int kthread_stop(struct task_struct *k); 30int kthread_stop(struct task_struct *k);
71
72/**
73 * kthread_stop_sem: stop a thread created by kthread_create().
74 * @k: thread created by kthread_create().
75 * @s: semaphore that @k waits on while idle.
76 *
77 * Does essentially the same thing as kthread_stop() above, but wakes
78 * @k by calling up(@s).
79 *
80 * Returns the result of threadfn(), or -EINTR if wake_up_process()
81 * was never called. */
82int kthread_stop_sem(struct task_struct *k, struct semaphore *s); 31int kthread_stop_sem(struct task_struct *k, struct semaphore *s);
83
84/**
85 * kthread_should_stop: should this kthread return now?
86 *
87 * When someone calls kthread_stop on your kthread, it will be woken
88 * and this will return true. You should then return, and your return
89 * value will be passed through to kthread_stop().
90 */
91int kthread_should_stop(void); 32int kthread_should_stop(void);
92 33
93#endif /* _LINUX_KTHREAD_H */ 34#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 62bc57580707..ed3396dcc4f7 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -66,7 +66,6 @@ typedef union {
66 66
67/** 67/**
68 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 68 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
69 *
70 * @secs: seconds to set 69 * @secs: seconds to set
71 * @nsecs: nanoseconds to set 70 * @nsecs: nanoseconds to set
72 * 71 *
@@ -138,7 +137,6 @@ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
138 137
139/** 138/**
140 * ktime_sub - subtract two ktime_t variables 139 * ktime_sub - subtract two ktime_t variables
141 *
142 * @lhs: minuend 140 * @lhs: minuend
143 * @rhs: subtrahend 141 * @rhs: subtrahend
144 * 142 *
@@ -157,7 +155,6 @@ static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
157 155
158/** 156/**
159 * ktime_add - add two ktime_t variables 157 * ktime_add - add two ktime_t variables
160 *
161 * @add1: addend1 158 * @add1: addend1
162 * @add2: addend2 159 * @add2: addend2
163 * 160 *
@@ -184,7 +181,6 @@ static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
184 181
185/** 182/**
186 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable 183 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
187 *
188 * @kt: addend 184 * @kt: addend
189 * @nsec: the scalar nsec value to add 185 * @nsec: the scalar nsec value to add
190 * 186 *
@@ -194,7 +190,6 @@ extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
194 190
195/** 191/**
196 * timespec_to_ktime - convert a timespec to ktime_t format 192 * timespec_to_ktime - convert a timespec to ktime_t format
197 *
198 * @ts: the timespec variable to convert 193 * @ts: the timespec variable to convert
199 * 194 *
200 * Returns a ktime_t variable with the converted timespec value 195 * Returns a ktime_t variable with the converted timespec value
@@ -207,7 +202,6 @@ static inline ktime_t timespec_to_ktime(const struct timespec ts)
207 202
208/** 203/**
209 * timeval_to_ktime - convert a timeval to ktime_t format 204 * timeval_to_ktime - convert a timeval to ktime_t format
210 *
211 * @tv: the timeval variable to convert 205 * @tv: the timeval variable to convert
212 * 206 *
213 * Returns a ktime_t variable with the converted timeval value 207 * Returns a ktime_t variable with the converted timeval value
@@ -220,7 +214,6 @@ static inline ktime_t timeval_to_ktime(const struct timeval tv)
220 214
221/** 215/**
222 * ktime_to_timespec - convert a ktime_t variable to timespec format 216 * ktime_to_timespec - convert a ktime_t variable to timespec format
223 *
224 * @kt: the ktime_t variable to convert 217 * @kt: the ktime_t variable to convert
225 * 218 *
226 * Returns the timespec representation of the ktime value 219 * Returns the timespec representation of the ktime value
@@ -233,7 +226,6 @@ static inline struct timespec ktime_to_timespec(const ktime_t kt)
233 226
234/** 227/**
235 * ktime_to_timeval - convert a ktime_t variable to timeval format 228 * ktime_to_timeval - convert a ktime_t variable to timeval format
236 *
237 * @kt: the ktime_t variable to convert 229 * @kt: the ktime_t variable to convert
238 * 230 *
239 * Returns the timeval representation of the ktime value 231 * Returns the timeval representation of the ktime value
diff --git a/include/linux/list.h b/include/linux/list.h
index a02642e4710a..37ca31b21bb7 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -281,16 +281,17 @@ static inline int list_empty(const struct list_head *head)
281} 281}
282 282
283/** 283/**
284 * list_empty_careful - tests whether a list is 284 * list_empty_careful - tests whether a list is empty and not being modified
285 * empty _and_ checks that no other CPU might be 285 * @head: the list to test
286 * in the process of still modifying either member 286 *
287 * Description:
288 * tests whether a list is empty _and_ checks that no other CPU might be
289 * in the process of modifying either member (next or prev)
287 * 290 *
288 * NOTE: using list_empty_careful() without synchronization 291 * NOTE: using list_empty_careful() without synchronization
289 * can only be safe if the only activity that can happen 292 * can only be safe if the only activity that can happen
290 * to the list entry is list_del_init(). Eg. it cannot be used 293 * to the list entry is list_del_init(). Eg. it cannot be used
291 * if another CPU could re-list_add() it. 294 * if another CPU could re-list_add() it.
292 *
293 * @head: the list to test.
294 */ 295 */
295static inline int list_empty_careful(const struct list_head *head) 296static inline int list_empty_careful(const struct list_head *head)
296{ 297{
@@ -350,7 +351,7 @@ static inline void list_splice_init(struct list_head *list,
350 351
351/** 352/**
352 * list_for_each - iterate over a list 353 * list_for_each - iterate over a list
353 * @pos: the &struct list_head to use as a loop counter. 354 * @pos: the &struct list_head to use as a loop cursor.
354 * @head: the head for your list. 355 * @head: the head for your list.
355 */ 356 */
356#define list_for_each(pos, head) \ 357#define list_for_each(pos, head) \
@@ -359,7 +360,7 @@ static inline void list_splice_init(struct list_head *list,
359 360
360/** 361/**
361 * __list_for_each - iterate over a list 362 * __list_for_each - iterate over a list
362 * @pos: the &struct list_head to use as a loop counter. 363 * @pos: the &struct list_head to use as a loop cursor.
363 * @head: the head for your list. 364 * @head: the head for your list.
364 * 365 *
365 * This variant differs from list_for_each() in that it's the 366 * This variant differs from list_for_each() in that it's the
@@ -372,7 +373,7 @@ static inline void list_splice_init(struct list_head *list,
372 373
373/** 374/**
374 * list_for_each_prev - iterate over a list backwards 375 * list_for_each_prev - iterate over a list backwards
375 * @pos: the &struct list_head to use as a loop counter. 376 * @pos: the &struct list_head to use as a loop cursor.
376 * @head: the head for your list. 377 * @head: the head for your list.
377 */ 378 */
378#define list_for_each_prev(pos, head) \ 379#define list_for_each_prev(pos, head) \
@@ -380,8 +381,8 @@ static inline void list_splice_init(struct list_head *list,
380 pos = pos->prev) 381 pos = pos->prev)
381 382
382/** 383/**
383 * list_for_each_safe - iterate over a list safe against removal of list entry 384 * list_for_each_safe - iterate over a list safe against removal of list entry
384 * @pos: the &struct list_head to use as a loop counter. 385 * @pos: the &struct list_head to use as a loop cursor.
385 * @n: another &struct list_head to use as temporary storage 386 * @n: another &struct list_head to use as temporary storage
386 * @head: the head for your list. 387 * @head: the head for your list.
387 */ 388 */
@@ -391,7 +392,7 @@ static inline void list_splice_init(struct list_head *list,
391 392
392/** 393/**
393 * list_for_each_entry - iterate over list of given type 394 * list_for_each_entry - iterate over list of given type
394 * @pos: the type * to use as a loop counter. 395 * @pos: the type * to use as a loop cursor.
395 * @head: the head for your list. 396 * @head: the head for your list.
396 * @member: the name of the list_struct within the struct. 397 * @member: the name of the list_struct within the struct.
397 */ 398 */
@@ -402,7 +403,7 @@ static inline void list_splice_init(struct list_head *list,
402 403
403/** 404/**
404 * list_for_each_entry_reverse - iterate backwards over list of given type. 405 * list_for_each_entry_reverse - iterate backwards over list of given type.
405 * @pos: the type * to use as a loop counter. 406 * @pos: the type * to use as a loop cursor.
406 * @head: the head for your list. 407 * @head: the head for your list.
407 * @member: the name of the list_struct within the struct. 408 * @member: the name of the list_struct within the struct.
408 */ 409 */
@@ -412,21 +413,24 @@ static inline void list_splice_init(struct list_head *list,
412 pos = list_entry(pos->member.prev, typeof(*pos), member)) 413 pos = list_entry(pos->member.prev, typeof(*pos), member))
413 414
414/** 415/**
415 * list_prepare_entry - prepare a pos entry for use as a start point in 416 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue
416 * list_for_each_entry_continue
417 * @pos: the type * to use as a start point 417 * @pos: the type * to use as a start point
418 * @head: the head of the list 418 * @head: the head of the list
419 * @member: the name of the list_struct within the struct. 419 * @member: the name of the list_struct within the struct.
420 *
421 * Prepares a pos entry for use as a start point in list_for_each_entry_continue.
420 */ 422 */
421#define list_prepare_entry(pos, head, member) \ 423#define list_prepare_entry(pos, head, member) \
422 ((pos) ? : list_entry(head, typeof(*pos), member)) 424 ((pos) ? : list_entry(head, typeof(*pos), member))
423 425
424/** 426/**
425 * list_for_each_entry_continue - iterate over list of given type 427 * list_for_each_entry_continue - continue iteration over list of given type
426 * continuing after existing point 428 * @pos: the type * to use as a loop cursor.
427 * @pos: the type * to use as a loop counter.
428 * @head: the head for your list. 429 * @head: the head for your list.
429 * @member: the name of the list_struct within the struct. 430 * @member: the name of the list_struct within the struct.
431 *
432 * Continue to iterate over list of given type, continuing after
433 * the current position.
430 */ 434 */
431#define list_for_each_entry_continue(pos, head, member) \ 435#define list_for_each_entry_continue(pos, head, member) \
432 for (pos = list_entry(pos->member.next, typeof(*pos), member); \ 436 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
@@ -434,11 +438,12 @@ static inline void list_splice_init(struct list_head *list,
434 pos = list_entry(pos->member.next, typeof(*pos), member)) 438 pos = list_entry(pos->member.next, typeof(*pos), member))
435 439
436/** 440/**
437 * list_for_each_entry_from - iterate over list of given type 441 * list_for_each_entry_from - iterate over list of given type from the current point
438 * continuing from existing point 442 * @pos: the type * to use as a loop cursor.
439 * @pos: the type * to use as a loop counter.
440 * @head: the head for your list. 443 * @head: the head for your list.
441 * @member: the name of the list_struct within the struct. 444 * @member: the name of the list_struct within the struct.
445 *
446 * Iterate over list of given type, continuing from current position.
442 */ 447 */
443#define list_for_each_entry_from(pos, head, member) \ 448#define list_for_each_entry_from(pos, head, member) \
444 for (; prefetch(pos->member.next), &pos->member != (head); \ 449 for (; prefetch(pos->member.next), &pos->member != (head); \
@@ -446,7 +451,7 @@ static inline void list_splice_init(struct list_head *list,
446 451
447/** 452/**
448 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 453 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
449 * @pos: the type * to use as a loop counter. 454 * @pos: the type * to use as a loop cursor.
450 * @n: another type * to use as temporary storage 455 * @n: another type * to use as temporary storage
451 * @head: the head for your list. 456 * @head: the head for your list.
452 * @member: the name of the list_struct within the struct. 457 * @member: the name of the list_struct within the struct.
@@ -458,12 +463,14 @@ static inline void list_splice_init(struct list_head *list,
458 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 463 pos = n, n = list_entry(n->member.next, typeof(*n), member))
459 464
460/** 465/**
461 * list_for_each_entry_safe_continue - iterate over list of given type 466 * list_for_each_entry_safe_continue
462 * continuing after existing point safe against removal of list entry 467 * @pos: the type * to use as a loop cursor.
463 * @pos: the type * to use as a loop counter.
464 * @n: another type * to use as temporary storage 468 * @n: another type * to use as temporary storage
465 * @head: the head for your list. 469 * @head: the head for your list.
466 * @member: the name of the list_struct within the struct. 470 * @member: the name of the list_struct within the struct.
471 *
472 * Iterate over list of given type, continuing after current point,
473 * safe against removal of list entry.
467 */ 474 */
468#define list_for_each_entry_safe_continue(pos, n, head, member) \ 475#define list_for_each_entry_safe_continue(pos, n, head, member) \
469 for (pos = list_entry(pos->member.next, typeof(*pos), member), \ 476 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
@@ -472,12 +479,14 @@ static inline void list_splice_init(struct list_head *list,
472 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 479 pos = n, n = list_entry(n->member.next, typeof(*n), member))
473 480
474/** 481/**
475 * list_for_each_entry_safe_from - iterate over list of given type 482 * list_for_each_entry_safe_from
476 * from existing point safe against removal of list entry 483 * @pos: the type * to use as a loop cursor.
477 * @pos: the type * to use as a loop counter.
478 * @n: another type * to use as temporary storage 484 * @n: another type * to use as temporary storage
479 * @head: the head for your list. 485 * @head: the head for your list.
480 * @member: the name of the list_struct within the struct. 486 * @member: the name of the list_struct within the struct.
487 *
488 * Iterate over list of given type from current point, safe against
489 * removal of list entry.
481 */ 490 */
482#define list_for_each_entry_safe_from(pos, n, head, member) \ 491#define list_for_each_entry_safe_from(pos, n, head, member) \
483 for (n = list_entry(pos->member.next, typeof(*pos), member); \ 492 for (n = list_entry(pos->member.next, typeof(*pos), member); \
@@ -485,12 +494,14 @@ static inline void list_splice_init(struct list_head *list,
485 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 494 pos = n, n = list_entry(n->member.next, typeof(*n), member))
486 495
487/** 496/**
488 * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against 497 * list_for_each_entry_safe_reverse
489 * removal of list entry 498 * @pos: the type * to use as a loop cursor.
490 * @pos: the type * to use as a loop counter.
491 * @n: another type * to use as temporary storage 499 * @n: another type * to use as temporary storage
492 * @head: the head for your list. 500 * @head: the head for your list.
493 * @member: the name of the list_struct within the struct. 501 * @member: the name of the list_struct within the struct.
502 *
503 * Iterate backwards over list of given type, safe against removal
504 * of list entry.
494 */ 505 */
495#define list_for_each_entry_safe_reverse(pos, n, head, member) \ 506#define list_for_each_entry_safe_reverse(pos, n, head, member) \
496 for (pos = list_entry((head)->prev, typeof(*pos), member), \ 507 for (pos = list_entry((head)->prev, typeof(*pos), member), \
@@ -500,7 +511,7 @@ static inline void list_splice_init(struct list_head *list,
500 511
501/** 512/**
502 * list_for_each_rcu - iterate over an rcu-protected list 513 * list_for_each_rcu - iterate over an rcu-protected list
503 * @pos: the &struct list_head to use as a loop counter. 514 * @pos: the &struct list_head to use as a loop cursor.
504 * @head: the head for your list. 515 * @head: the head for your list.
505 * 516 *
506 * This list-traversal primitive may safely run concurrently with 517 * This list-traversal primitive may safely run concurrently with
@@ -518,12 +529,13 @@ static inline void list_splice_init(struct list_head *list,
518 pos = pos->next) 529 pos = pos->next)
519 530
520/** 531/**
521 * list_for_each_safe_rcu - iterate over an rcu-protected list safe 532 * list_for_each_safe_rcu
522 * against removal of list entry 533 * @pos: the &struct list_head to use as a loop cursor.
523 * @pos: the &struct list_head to use as a loop counter.
524 * @n: another &struct list_head to use as temporary storage 534 * @n: another &struct list_head to use as temporary storage
525 * @head: the head for your list. 535 * @head: the head for your list.
526 * 536 *
537 * Iterate over an rcu-protected list, safe against removal of list entry.
538 *
527 * This list-traversal primitive may safely run concurrently with 539 * This list-traversal primitive may safely run concurrently with
528 * the _rcu list-mutation primitives such as list_add_rcu() 540 * the _rcu list-mutation primitives such as list_add_rcu()
529 * as long as the traversal is guarded by rcu_read_lock(). 541 * as long as the traversal is guarded by rcu_read_lock().
@@ -535,7 +547,7 @@ static inline void list_splice_init(struct list_head *list,
535 547
536/** 548/**
537 * list_for_each_entry_rcu - iterate over rcu list of given type 549 * list_for_each_entry_rcu - iterate over rcu list of given type
538 * @pos: the type * to use as a loop counter. 550 * @pos: the type * to use as a loop cursor.
539 * @head: the head for your list. 551 * @head: the head for your list.
540 * @member: the name of the list_struct within the struct. 552 * @member: the name of the list_struct within the struct.
541 * 553 *
@@ -551,11 +563,12 @@ static inline void list_splice_init(struct list_head *list,
551 563
552 564
553/** 565/**
554 * list_for_each_continue_rcu - iterate over an rcu-protected list 566 * list_for_each_continue_rcu
555 * continuing after existing point. 567 * @pos: the &struct list_head to use as a loop cursor.
556 * @pos: the &struct list_head to use as a loop counter.
557 * @head: the head for your list. 568 * @head: the head for your list.
558 * 569 *
570 * Iterate over an rcu-protected list, continuing after current point.
571 *
559 * This list-traversal primitive may safely run concurrently with 572 * This list-traversal primitive may safely run concurrently with
560 * the _rcu list-mutation primitives such as list_add_rcu() 573 * the _rcu list-mutation primitives such as list_add_rcu()
561 * as long as the traversal is guarded by rcu_read_lock(). 574 * as long as the traversal is guarded by rcu_read_lock().
@@ -681,11 +694,14 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
681 694
682 695
683/** 696/**
684 * hlist_add_head_rcu - adds the specified element to the specified hlist, 697 * hlist_add_head_rcu
685 * while permitting racing traversals.
686 * @n: the element to add to the hash list. 698 * @n: the element to add to the hash list.
687 * @h: the list to add to. 699 * @h: the list to add to.
688 * 700 *
701 * Description:
702 * Adds the specified element to the specified hlist,
703 * while permitting racing traversals.
704 *
689 * The caller must take whatever precautions are necessary 705 * The caller must take whatever precautions are necessary
690 * (such as holding appropriate locks) to avoid racing 706 * (such as holding appropriate locks) to avoid racing
691 * with another list-mutation primitive, such as hlist_add_head_rcu() 707 * with another list-mutation primitive, such as hlist_add_head_rcu()
@@ -730,11 +746,14 @@ static inline void hlist_add_after(struct hlist_node *n,
730} 746}
731 747
732/** 748/**
733 * hlist_add_before_rcu - adds the specified element to the specified hlist 749 * hlist_add_before_rcu
734 * before the specified node while permitting racing traversals.
735 * @n: the new element to add to the hash list. 750 * @n: the new element to add to the hash list.
736 * @next: the existing element to add the new element before. 751 * @next: the existing element to add the new element before.
737 * 752 *
753 * Description:
754 * Adds the specified element to the specified hlist
755 * before the specified node while permitting racing traversals.
756 *
738 * The caller must take whatever precautions are necessary 757 * The caller must take whatever precautions are necessary
739 * (such as holding appropriate locks) to avoid racing 758 * (such as holding appropriate locks) to avoid racing
740 * with another list-mutation primitive, such as hlist_add_head_rcu() 759 * with another list-mutation primitive, such as hlist_add_head_rcu()
@@ -755,11 +774,14 @@ static inline void hlist_add_before_rcu(struct hlist_node *n,
755} 774}
756 775
757/** 776/**
758 * hlist_add_after_rcu - adds the specified element to the specified hlist 777 * hlist_add_after_rcu
759 * after the specified node while permitting racing traversals.
760 * @prev: the existing element to add the new element after. 778 * @prev: the existing element to add the new element after.
761 * @n: the new element to add to the hash list. 779 * @n: the new element to add to the hash list.
762 * 780 *
781 * Description:
782 * Adds the specified element to the specified hlist
783 * after the specified node while permitting racing traversals.
784 *
763 * The caller must take whatever precautions are necessary 785 * The caller must take whatever precautions are necessary
764 * (such as holding appropriate locks) to avoid racing 786 * (such as holding appropriate locks) to avoid racing
765 * with another list-mutation primitive, such as hlist_add_head_rcu() 787 * with another list-mutation primitive, such as hlist_add_head_rcu()
@@ -792,8 +814,8 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
792 814
793/** 815/**
794 * hlist_for_each_entry - iterate over list of given type 816 * hlist_for_each_entry - iterate over list of given type
795 * @tpos: the type * to use as a loop counter. 817 * @tpos: the type * to use as a loop cursor.
796 * @pos: the &struct hlist_node to use as a loop counter. 818 * @pos: the &struct hlist_node to use as a loop cursor.
797 * @head: the head for your list. 819 * @head: the head for your list.
798 * @member: the name of the hlist_node within the struct. 820 * @member: the name of the hlist_node within the struct.
799 */ 821 */
@@ -804,9 +826,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
804 pos = pos->next) 826 pos = pos->next)
805 827
806/** 828/**
807 * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point 829 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
808 * @tpos: the type * to use as a loop counter. 830 * @tpos: the type * to use as a loop cursor.
809 * @pos: the &struct hlist_node to use as a loop counter. 831 * @pos: the &struct hlist_node to use as a loop cursor.
810 * @member: the name of the hlist_node within the struct. 832 * @member: the name of the hlist_node within the struct.
811 */ 833 */
812#define hlist_for_each_entry_continue(tpos, pos, member) \ 834#define hlist_for_each_entry_continue(tpos, pos, member) \
@@ -816,9 +838,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
816 pos = pos->next) 838 pos = pos->next)
817 839
818/** 840/**
819 * hlist_for_each_entry_from - iterate over a hlist continuing from existing point 841 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
820 * @tpos: the type * to use as a loop counter. 842 * @tpos: the type * to use as a loop cursor.
821 * @pos: the &struct hlist_node to use as a loop counter. 843 * @pos: the &struct hlist_node to use as a loop cursor.
822 * @member: the name of the hlist_node within the struct. 844 * @member: the name of the hlist_node within the struct.
823 */ 845 */
824#define hlist_for_each_entry_from(tpos, pos, member) \ 846#define hlist_for_each_entry_from(tpos, pos, member) \
@@ -828,8 +850,8 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
828 850
829/** 851/**
830 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry 852 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
831 * @tpos: the type * to use as a loop counter. 853 * @tpos: the type * to use as a loop cursor.
832 * @pos: the &struct hlist_node to use as a loop counter. 854 * @pos: the &struct hlist_node to use as a loop cursor.
833 * @n: another &struct hlist_node to use as temporary storage 855 * @n: another &struct hlist_node to use as temporary storage
834 * @head: the head for your list. 856 * @head: the head for your list.
835 * @member: the name of the hlist_node within the struct. 857 * @member: the name of the hlist_node within the struct.
@@ -842,8 +864,8 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
842 864
843/** 865/**
844 * hlist_for_each_entry_rcu - iterate over rcu list of given type 866 * hlist_for_each_entry_rcu - iterate over rcu list of given type
845 * @tpos: the type * to use as a loop counter. 867 * @tpos: the type * to use as a loop cursor.
846 * @pos: the &struct hlist_node to use as a loop counter. 868 * @pos: the &struct hlist_node to use as a loop cursor.
847 * @head: the head for your list. 869 * @head: the head for your list.
848 * @member: the name of the hlist_node within the struct. 870 * @member: the name of the hlist_node within the struct.
849 * 871 *
diff --git a/include/linux/loop.h b/include/linux/loop.h
index e76c7611d6cc..bf3d2345ce99 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -59,7 +59,7 @@ struct loop_device {
59 struct bio *lo_bio; 59 struct bio *lo_bio;
60 struct bio *lo_biotail; 60 struct bio *lo_biotail;
61 int lo_state; 61 int lo_state;
62 struct completion lo_done; 62 struct task_struct *lo_thread;
63 struct completion lo_bh_done; 63 struct completion lo_bh_done;
64 struct mutex lo_ctl_mutex; 64 struct mutex lo_ctl_mutex;
65 int lo_pending; 65 int lo_pending;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 5dba23a1c0d0..48148e0cdbd1 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -16,7 +16,9 @@ extern int fail_migrate_page(struct address_space *,
16 struct page *, struct page *); 16 struct page *, struct page *);
17 17
18extern int migrate_prep(void); 18extern int migrate_prep(void);
19 19extern int migrate_vmas(struct mm_struct *mm,
20 const nodemask_t *from, const nodemask_t *to,
21 unsigned long flags);
20#else 22#else
21 23
22static inline int isolate_lru_page(struct page *p, struct list_head *list) 24static inline int isolate_lru_page(struct page *p, struct list_head *list)
@@ -30,6 +32,13 @@ static inline int migrate_pages_to(struct list_head *pagelist,
30 32
31static inline int migrate_prep(void) { return -ENOSYS; } 33static inline int migrate_prep(void) { return -ENOSYS; }
32 34
35static inline int migrate_vmas(struct mm_struct *mm,
36 const nodemask_t *from, const nodemask_t *to,
37 unsigned long flags)
38{
39 return -ENOSYS;
40}
41
33/* Possible settings for the migrate_page() method in address_operations */ 42/* Possible settings for the migrate_page() method in address_operations */
34#define migrate_page NULL 43#define migrate_page NULL
35#define fail_migrate_page NULL 44#define fail_migrate_page NULL
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3b09444121d9..a929ea197e48 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -145,7 +145,6 @@ extern unsigned int kobjsize(const void *objp);
145 145
146#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 146#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
147#define VM_GROWSUP 0x00000200 147#define VM_GROWSUP 0x00000200
148#define VM_SHM 0x00000000 /* Means nothing: delete it later */
149#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 148#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
150#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 149#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
151 150
@@ -207,6 +206,8 @@ struct vm_operations_struct {
207 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 206 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
208 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 207 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
209 unsigned long addr); 208 unsigned long addr);
209 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
210 const nodemask_t *to, unsigned long flags);
210#endif 211#endif
211}; 212};
212 213
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 1d7cdd20b553..e712e7d47cc2 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -77,11 +77,11 @@ struct nbd_device {
77 * server. All data are in network byte order. 77 * server. All data are in network byte order.
78 */ 78 */
79struct nbd_request { 79struct nbd_request {
80 __u32 magic; 80 __be32 magic;
81 __u32 type; /* == READ || == WRITE */ 81 __be32 type; /* == READ || == WRITE */
82 char handle[8]; 82 char handle[8];
83 __u64 from; 83 __be64 from;
84 __u32 len; 84 __be32 len;
85} 85}
86#ifdef __GNUC__ 86#ifdef __GNUC__
87 __attribute__ ((packed)) 87 __attribute__ ((packed))
@@ -93,8 +93,8 @@ struct nbd_request {
93 * it has completed an I/O request (or an error occurs). 93 * it has completed an I/O request (or an error occurs).
94 */ 94 */
95struct nbd_reply { 95struct nbd_reply {
96 __u32 magic; 96 __be32 magic;
97 __u32 error; /* 0 = ok, else error */ 97 __be32 error; /* 0 = ok, else error */
98 char handle[8]; /* handle you got from request */ 98 char handle[8]; /* handle you got from request */
99}; 99};
100#endif 100#endif
diff --git a/include/linux/parport.h b/include/linux/parport.h
index d42737eeee06..5bf321e82c99 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -127,6 +127,10 @@ struct amiga_parport_state {
127 unsigned char statusdir;/* ciab.ddrb & 7 */ 127 unsigned char statusdir;/* ciab.ddrb & 7 */
128}; 128};
129 129
130struct ax88796_parport_state {
131 unsigned char cpr;
132};
133
130struct ip32_parport_state { 134struct ip32_parport_state {
131 unsigned int dcr; 135 unsigned int dcr;
132 unsigned int ecr; 136 unsigned int ecr;
@@ -138,6 +142,7 @@ struct parport_state {
138 /* ARC has no state. */ 142 /* ARC has no state. */
139 struct ax_parport_state ax; 143 struct ax_parport_state ax;
140 struct amiga_parport_state amiga; 144 struct amiga_parport_state amiga;
145 struct ax88796_parport_state ax88796;
141 /* Atari has not state. */ 146 /* Atari has not state. */
142 struct ip32_parport_state ip32; 147 struct ip32_parport_state ip32;
143 void *misc; 148 void *misc;
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index ecce5912f4d6..2ed807ddc08c 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -230,4 +230,8 @@ extern int pmu_battery_count;
230extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; 230extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
231extern unsigned int pmu_power_flags; 231extern unsigned int pmu_power_flags;
232 232
233/* Backlight */
234extern int disable_kernel_backlight;
235extern void pmu_backlight_init(struct device_node*);
236
233#endif /* __KERNEL__ */ 237#endif /* __KERNEL__ */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 015297ff73fa..1dd1c707311f 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -59,13 +59,13 @@ extern void machine_crash_shutdown(struct pt_regs *);
59 * Architecture independent implemenations of sys_reboot commands. 59 * Architecture independent implemenations of sys_reboot commands.
60 */ 60 */
61 61
62extern void kernel_restart_prepare(char *cmd);
63extern void kernel_shutdown_prepare(enum system_states state); 62extern void kernel_shutdown_prepare(enum system_states state);
64 63
65extern void kernel_restart(char *cmd); 64extern void kernel_restart(char *cmd);
66extern void kernel_halt(void); 65extern void kernel_halt(void);
67extern void kernel_power_off(void); 66extern void kernel_power_off(void);
68extern void kernel_kexec(void); 67
68void ctrl_alt_del(void);
69 69
70/* 70/*
71 * Emergency restart, callable from an interrupt handler. 71 * Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 21a86cb6acdb..ae13db714742 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/time.h> 4#include <linux/time.h>
5 5
6struct task_struct;
7
6/* 8/*
7 * Resource control/accounting header file for linux 9 * Resource control/accounting header file for linux
8 */ 10 */
@@ -67,4 +69,6 @@ struct rlimit {
67 */ 69 */
68#include <asm/resource.h> 70#include <asm/resource.h>
69 71
72int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
73
70#endif 74#endif
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h
new file mode 100644
index 000000000000..bf74e63c98fe
--- /dev/null
+++ b/include/linux/rtc-v3020.h
@@ -0,0 +1,35 @@
1/*
2 * v3020.h - Registers definition and platform data structure for the v3020 RTC.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2006, 8D Technologies inc.
9 */
10#ifndef __LINUX_V3020_H
11#define __LINUX_V3020_H
12
13/* The v3020 has only one data pin but which one
14 * is used depends on the board. */
15struct v3020_platform_data {
16 int leftshift; /* (1<<(leftshift)) & readl() */
17};
18
19#define V3020_STATUS_0 0x00
20#define V3020_STATUS_1 0x01
21#define V3020_SECONDS 0x02
22#define V3020_MINUTES 0x03
23#define V3020_HOURS 0x04
24#define V3020_MONTH_DAY 0x05
25#define V3020_MONTH 0x06
26#define V3020_YEAR 0x07
27#define V3020_WEEK_DAY 0x08
28#define V3020_WEEK 0x09
29
30#define V3020_IS_COMMAND(val) ((val)>=0x0E)
31
32#define V3020_CMD_RAM2CLOCK 0x0E
33#define V3020_CMD_CLOCK2RAM 0x0F
34
35#endif /* __LINUX_V3020_H */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index ab61cd1199f2..36e2bf4b4315 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -102,6 +102,7 @@ struct rtc_pll_info {
102#include <linux/interrupt.h> 102#include <linux/interrupt.h>
103 103
104extern int rtc_month_days(unsigned int month, unsigned int year); 104extern int rtc_month_days(unsigned int month, unsigned int year);
105extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year);
105extern int rtc_valid_tm(struct rtc_time *tm); 106extern int rtc_valid_tm(struct rtc_time *tm);
106extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); 107extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
107extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); 108extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
@@ -155,6 +156,17 @@ struct rtc_device
155 struct rtc_task *irq_task; 156 struct rtc_task *irq_task;
156 spinlock_t irq_task_lock; 157 spinlock_t irq_task_lock;
157 int irq_freq; 158 int irq_freq;
159 int max_user_freq;
160#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
161 struct work_struct uie_task;
162 struct timer_list uie_timer;
163 /* Those fields are protected by rtc->irq_lock */
164 unsigned int oldsecs;
165 unsigned int irq_active:1;
166 unsigned int stop_uie_polling:1;
167 unsigned int uie_task_active:1;
168 unsigned int uie_timer_active:1;
169#endif
158}; 170};
159#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev) 171#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev)
160 172
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 38b4791e6a5d..8d11d9310db0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -358,6 +358,14 @@ struct sighand_struct {
358 spinlock_t siglock; 358 spinlock_t siglock;
359}; 359};
360 360
361struct pacct_struct {
362 int ac_flag;
363 long ac_exitcode;
364 unsigned long ac_mem;
365 cputime_t ac_utime, ac_stime;
366 unsigned long ac_minflt, ac_majflt;
367};
368
361/* 369/*
362 * NOTE! "signal_struct" does not have it's own 370 * NOTE! "signal_struct" does not have it's own
363 * locking, because a shared signal_struct always 371 * locking, because a shared signal_struct always
@@ -449,6 +457,9 @@ struct signal_struct {
449 struct key *session_keyring; /* keyring inherited over fork */ 457 struct key *session_keyring; /* keyring inherited over fork */
450 struct key *process_keyring; /* keyring private to this process */ 458 struct key *process_keyring; /* keyring private to this process */
451#endif 459#endif
460#ifdef CONFIG_BSD_PROCESS_ACCT
461 struct pacct_struct pacct; /* per-process accounting information */
462#endif
452}; 463};
453 464
454/* Context switch must be unlocked if interrupts are to be enabled */ 465/* Context switch must be unlocked if interrupts are to be enabled */
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 2993302f7923..0577f5284cbc 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * SyncLink Multiprotocol Serial Adapter Driver 2 * SyncLink Multiprotocol Serial Adapter Driver
3 * 3 *
4 * $Id: synclink.h,v 3.11 2006/02/06 21:20:29 paulkf Exp $ 4 * $Id: synclink.h,v 3.13 2006/05/23 18:25:06 paulkf Exp $
5 * 5 *
6 * Copyright (C) 1998-2000 by Microgate Corporation 6 * Copyright (C) 1998-2000 by Microgate Corporation
7 * 7 *
@@ -97,6 +97,8 @@
97#define HDLC_TXIDLE_ALT_MARK_SPACE 4 97#define HDLC_TXIDLE_ALT_MARK_SPACE 4
98#define HDLC_TXIDLE_SPACE 5 98#define HDLC_TXIDLE_SPACE 5
99#define HDLC_TXIDLE_MARK 6 99#define HDLC_TXIDLE_MARK 6
100#define HDLC_TXIDLE_CUSTOM_8 0x10000000
101#define HDLC_TXIDLE_CUSTOM_16 0x20000000
100 102
101#define HDLC_ENCODING_NRZ 0 103#define HDLC_ENCODING_NRZ 0
102#define HDLC_ENCODING_NRZB 1 104#define HDLC_ENCODING_NRZB 1
@@ -170,6 +172,7 @@ typedef struct _MGSL_PARAMS
170#define SYNCLINK_GT_DEVICE_ID 0x0070 172#define SYNCLINK_GT_DEVICE_ID 0x0070
171#define SYNCLINK_GT4_DEVICE_ID 0x0080 173#define SYNCLINK_GT4_DEVICE_ID 0x0080
172#define SYNCLINK_AC_DEVICE_ID 0x0090 174#define SYNCLINK_AC_DEVICE_ID 0x0090
175#define SYNCLINK_GT2_DEVICE_ID 0x00A0
173#define MGSL_MAX_SERIAL_NUMBER 30 176#define MGSL_MAX_SERIAL_NUMBER 30
174 177
175/* 178/*
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index c7132029af0f..6a60770984e9 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -55,7 +55,7 @@ enum
55 CTL_KERN=1, /* General kernel info and control */ 55 CTL_KERN=1, /* General kernel info and control */
56 CTL_VM=2, /* VM management */ 56 CTL_VM=2, /* VM management */
57 CTL_NET=3, /* Networking */ 57 CTL_NET=3, /* Networking */
58 CTL_PROC=4, /* Process info */ 58 /* was CTL_PROC */
59 CTL_FS=5, /* Filesystems */ 59 CTL_FS=5, /* Filesystems */
60 CTL_DEBUG=6, /* Debugging */ 60 CTL_DEBUG=6, /* Debugging */
61 CTL_DEV=7, /* Devices */ 61 CTL_DEV=7, /* Devices */
@@ -767,8 +767,6 @@ enum {
767 NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4, 767 NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
768}; 768};
769 769
770/* CTL_PROC names: */
771
772/* CTL_FS names: */ 770/* CTL_FS names: */
773enum 771enum
774{ 772{
diff --git a/include/linux/ufs_fs.h b/include/linux/ufs_fs.h
index 86b5b4271b5a..914f911325be 100644
--- a/include/linux/ufs_fs.h
+++ b/include/linux/ufs_fs.h
@@ -220,6 +220,19 @@ typedef __u16 __bitwise __fs16;
220 */ 220 */
221#define UFS_MINFREE 5 221#define UFS_MINFREE 5
222#define UFS_DEFAULTOPT UFS_OPTTIME 222#define UFS_DEFAULTOPT UFS_OPTTIME
223
224/*
225 * Debug code
226 */
227#ifdef CONFIG_UFS_DEBUG
228# define UFSD(f, a...) { \
229 printk ("UFSD (%s, %d): %s:", \
230 __FILE__, __LINE__, __FUNCTION__); \
231 printk (f, ## a); \
232 }
233#else
234# define UFSD(f, a...) /**/
235#endif
223 236
224/* 237/*
225 * Turn file system block numbers into disk block addresses. 238 * Turn file system block numbers into disk block addresses.
@@ -339,7 +352,22 @@ struct ufs2_csum_total {
339}; 352};
340 353
341/* 354/*
355 * File system flags
356 */
357#define UFS_UNCLEAN 0x01 /* file system not clean at mount (unused) */
358#define UFS_DOSOFTDEP 0x02 /* file system using soft dependencies */
359#define UFS_NEEDSFSCK 0x04 /* needs sync fsck (FreeBSD compat, unused) */
360#define UFS_INDEXDIRS 0x08 /* kernel supports indexed directories */
361#define UFS_ACLS 0x10 /* file system has ACLs enabled */
362#define UFS_MULTILABEL 0x20 /* file system is MAC multi-label */
363#define UFS_FLAGS_UPDATED 0x80 /* flags have been moved to new location */
364
365#if 0
366/*
342 * This is the actual superblock, as it is laid out on the disk. 367 * This is the actual superblock, as it is laid out on the disk.
368 * Do NOT use this structure, because of sizeof(ufs_super_block) > 512 and
369 * it may occupy several blocks, use
370 * struct ufs_super_block_(first,second,third) instead.
343 */ 371 */
344struct ufs_super_block { 372struct ufs_super_block {
345 __fs32 fs_link; /* UNUSED */ 373 __fs32 fs_link; /* UNUSED */
@@ -416,7 +444,7 @@ struct ufs_super_block {
416 __s8 fs_fmod; /* super block modified flag */ 444 __s8 fs_fmod; /* super block modified flag */
417 __s8 fs_clean; /* file system is clean flag */ 445 __s8 fs_clean; /* file system is clean flag */
418 __s8 fs_ronly; /* mounted read-only flag */ 446 __s8 fs_ronly; /* mounted read-only flag */
419 __s8 fs_flags; /* currently unused flag */ 447 __s8 fs_flags;
420 union { 448 union {
421 struct { 449 struct {
422 __s8 fs_fsmnt[UFS_MAXMNTLEN];/* name mounted on */ 450 __s8 fs_fsmnt[UFS_MAXMNTLEN];/* name mounted on */
@@ -485,6 +513,7 @@ struct ufs_super_block {
485 __fs32 fs_magic; /* magic number */ 513 __fs32 fs_magic; /* magic number */
486 __u8 fs_space[1]; /* list of blocks for each rotation */ 514 __u8 fs_space[1]; /* list of blocks for each rotation */
487}; 515};
516#endif/*struct ufs_super_block*/
488 517
489/* 518/*
490 * Preference for optimization. 519 * Preference for optimization.
@@ -666,7 +695,7 @@ struct ufs_buffer_head {
666}; 695};
667 696
668struct ufs_cg_private_info { 697struct ufs_cg_private_info {
669 struct ufs_cylinder_group ucg; 698 struct ufs_buffer_head c_ubh;
670 __u32 c_cgx; /* number of cylidner group */ 699 __u32 c_cgx; /* number of cylidner group */
671 __u16 c_ncyl; /* number of cyl's this cg */ 700 __u16 c_ncyl; /* number of cyl's this cg */
672 __u16 c_niblk; /* number of inode blocks this cg */ 701 __u16 c_niblk; /* number of inode blocks this cg */
@@ -686,6 +715,7 @@ struct ufs_cg_private_info {
686 715
687struct ufs_sb_private_info { 716struct ufs_sb_private_info {
688 struct ufs_buffer_head s_ubh; /* buffer containing super block */ 717 struct ufs_buffer_head s_ubh; /* buffer containing super block */
718 struct ufs2_csum_total cs_total;
689 __u32 s_sblkno; /* offset of super-blocks in filesys */ 719 __u32 s_sblkno; /* offset of super-blocks in filesys */
690 __u32 s_cblkno; /* offset of cg-block in filesys */ 720 __u32 s_cblkno; /* offset of cg-block in filesys */
691 __u32 s_iblkno; /* offset of inode-blocks in filesys */ 721 __u32 s_iblkno; /* offset of inode-blocks in filesys */
@@ -824,16 +854,54 @@ struct ufs_super_block_first {
824}; 854};
825 855
826struct ufs_super_block_second { 856struct ufs_super_block_second {
827 __s8 fs_fsmnt[212]; 857 union {
828 __fs32 fs_cgrotor; 858 struct {
829 __fs32 fs_csp[UFS_MAXCSBUFS]; 859 __s8 fs_fsmnt[212];
830 __fs32 fs_maxcluster; 860 __fs32 fs_cgrotor;
831 __fs32 fs_cpc; 861 __fs32 fs_csp[UFS_MAXCSBUFS];
832 __fs16 fs_opostbl[82]; 862 __fs32 fs_maxcluster;
833}; 863 __fs32 fs_cpc;
864 __fs16 fs_opostbl[82];
865 } fs_u1;
866 struct {
867 __s8 fs_fsmnt[UFS2_MAXMNTLEN - UFS_MAXMNTLEN + 212];
868 __u8 fs_volname[UFS2_MAXVOLLEN];
869 __fs64 fs_swuid;
870 __fs32 fs_pad;
871 __fs32 fs_cgrotor;
872 __fs32 fs_ocsp[UFS2_NOCSPTRS];
873 __fs32 fs_contigdirs;
874 __fs32 fs_csp;
875 __fs32 fs_maxcluster;
876 __fs32 fs_active;
877 __fs32 fs_old_cpc;
878 __fs32 fs_maxbsize;
879 __fs64 fs_sparecon64[17];
880 __fs64 fs_sblockloc;
881 __fs64 cs_ndir;
882 __fs64 cs_nbfree;
883 } fs_u2;
884 } fs_un;
885};
834 886
835struct ufs_super_block_third { 887struct ufs_super_block_third {
836 __fs16 fs_opostbl[46]; 888 union {
889 struct {
890 __fs16 fs_opostbl[46];
891 } fs_u1;
892 struct {
893 __fs64 cs_nifree; /* number of free inodes */
894 __fs64 cs_nffree; /* number of free frags */
895 __fs64 cs_numclusters; /* number of free clusters */
896 __fs64 cs_spare[3]; /* future expansion */
897 struct ufs_timeval fs_time; /* last time written */
898 __fs64 fs_size; /* number of blocks in fs */
899 __fs64 fs_dsize; /* number of data blocks in fs */
900 __fs64 fs_csaddr; /* blk addr of cyl grp summary area */
901 __fs64 fs_pendingblocks;/* blocks in process of being freed */
902 __fs32 fs_pendinginodes;/*inodes in process of being freed */
903 } fs_u2;
904 } fs_un1;
837 union { 905 union {
838 struct { 906 struct {
839 __fs32 fs_sparecon[53];/* reserved for future constants */ 907 __fs32 fs_sparecon[53];/* reserved for future constants */
@@ -861,7 +929,7 @@ struct ufs_super_block_third {
861 __fs32 fs_qfmask[2]; /* ~usb_fmask */ 929 __fs32 fs_qfmask[2]; /* ~usb_fmask */
862 __fs32 fs_state; /* file system state time stamp */ 930 __fs32 fs_state; /* file system state time stamp */
863 } fs_44; 931 } fs_44;
864 } fs_u2; 932 } fs_un2;
865 __fs32 fs_postblformat; 933 __fs32 fs_postblformat;
866 __fs32 fs_nrpos; 934 __fs32 fs_nrpos;
867 __fs32 fs_postbloff; 935 __fs32 fs_postbloff;
@@ -875,7 +943,8 @@ struct ufs_super_block_third {
875/* balloc.c */ 943/* balloc.c */
876extern void ufs_free_fragments (struct inode *, unsigned, unsigned); 944extern void ufs_free_fragments (struct inode *, unsigned, unsigned);
877extern void ufs_free_blocks (struct inode *, unsigned, unsigned); 945extern void ufs_free_blocks (struct inode *, unsigned, unsigned);
878extern unsigned ufs_new_fragments (struct inode *, __fs32 *, unsigned, unsigned, unsigned, int *); 946extern unsigned ufs_new_fragments(struct inode *, __fs32 *, unsigned, unsigned,
947 unsigned, int *, struct page *);
879 948
880/* cylinder.c */ 949/* cylinder.c */
881extern struct ufs_cg_private_info * ufs_load_cylinder (struct super_block *, unsigned); 950extern struct ufs_cg_private_info * ufs_load_cylinder (struct super_block *, unsigned);
@@ -886,11 +955,12 @@ extern struct inode_operations ufs_dir_inode_operations;
886extern int ufs_add_link (struct dentry *, struct inode *); 955extern int ufs_add_link (struct dentry *, struct inode *);
887extern ino_t ufs_inode_by_name(struct inode *, struct dentry *); 956extern ino_t ufs_inode_by_name(struct inode *, struct dentry *);
888extern int ufs_make_empty(struct inode *, struct inode *); 957extern int ufs_make_empty(struct inode *, struct inode *);
889extern struct ufs_dir_entry * ufs_find_entry (struct dentry *, struct buffer_head **); 958extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct dentry *, struct page **);
890extern int ufs_delete_entry (struct inode *, struct ufs_dir_entry *, struct buffer_head *); 959extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
891extern int ufs_empty_dir (struct inode *); 960extern int ufs_empty_dir (struct inode *);
892extern struct ufs_dir_entry * ufs_dotdot (struct inode *, struct buffer_head **); 961extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
893extern void ufs_set_link(struct inode *, struct ufs_dir_entry *, struct buffer_head *, struct inode *); 962extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
963 struct page *page, struct inode *inode);
894 964
895/* file.c */ 965/* file.c */
896extern struct inode_operations ufs_file_inode_operations; 966extern struct inode_operations ufs_file_inode_operations;
@@ -903,13 +973,11 @@ extern void ufs_free_inode (struct inode *inode);
903extern struct inode * ufs_new_inode (struct inode *, int); 973extern struct inode * ufs_new_inode (struct inode *, int);
904 974
905/* inode.c */ 975/* inode.c */
906extern u64 ufs_frag_map (struct inode *, sector_t);
907extern void ufs_read_inode (struct inode *); 976extern void ufs_read_inode (struct inode *);
908extern void ufs_put_inode (struct inode *); 977extern void ufs_put_inode (struct inode *);
909extern int ufs_write_inode (struct inode *, int); 978extern int ufs_write_inode (struct inode *, int);
910extern int ufs_sync_inode (struct inode *); 979extern int ufs_sync_inode (struct inode *);
911extern void ufs_delete_inode (struct inode *); 980extern void ufs_delete_inode (struct inode *);
912extern struct buffer_head * ufs_getfrag (struct inode *, unsigned, int, int *);
913extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *); 981extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *);
914extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create); 982extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create);
915 983
diff --git a/include/linux/ufs_fs_i.h b/include/linux/ufs_fs_i.h
index 21665a953978..f50ce3b0cd52 100644
--- a/include/linux/ufs_fs_i.h
+++ b/include/linux/ufs_fs_i.h
@@ -27,6 +27,7 @@ struct ufs_inode_info {
27 __u32 i_oeftflag; 27 __u32 i_oeftflag;
28 __u16 i_osync; 28 __u16 i_osync;
29 __u32 i_lastfrag; 29 __u32 i_lastfrag;
30 __u32 i_dir_start_lookup;
30 struct inode vfs_inode; 31 struct inode vfs_inode;
31}; 32};
32 33
diff --git a/init/Kconfig b/init/Kconfig
index df864a358221..e0358f3946a1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -151,7 +151,8 @@ config BSD_PROCESS_ACCT_V3
151 at <http://www.physik3.uni-rostock.de/tim/kernel/utils/acct/>. 151 at <http://www.physik3.uni-rostock.de/tim/kernel/utils/acct/>.
152 152
153config SYSCTL 153config SYSCTL
154 bool "Sysctl support" 154 bool "Sysctl support" if EMBEDDED
155 default y
155 ---help--- 156 ---help---
156 The sysctl interface provides a means of dynamically changing 157 The sysctl interface provides a means of dynamically changing
157 certain kernel parameters and variables on the fly without requiring 158 certain kernel parameters and variables on the fly without requiring
diff --git a/kernel/acct.c b/kernel/acct.c
index 6802020e0ceb..368c4f03fe0e 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -75,7 +75,7 @@ int acct_parm[3] = {4, 2, 30};
75/* 75/*
76 * External references and all of the globals. 76 * External references and all of the globals.
77 */ 77 */
78static void do_acct_process(long, struct file *); 78static void do_acct_process(struct file *);
79 79
80/* 80/*
81 * This structure is used so that all the data protected by lock 81 * This structure is used so that all the data protected by lock
@@ -196,7 +196,7 @@ static void acct_file_reopen(struct file *file)
196 if (old_acct) { 196 if (old_acct) {
197 mnt_unpin(old_acct->f_vfsmnt); 197 mnt_unpin(old_acct->f_vfsmnt);
198 spin_unlock(&acct_globals.lock); 198 spin_unlock(&acct_globals.lock);
199 do_acct_process(0, old_acct); 199 do_acct_process(old_acct);
200 filp_close(old_acct, NULL); 200 filp_close(old_acct, NULL);
201 spin_lock(&acct_globals.lock); 201 spin_lock(&acct_globals.lock);
202 } 202 }
@@ -419,16 +419,15 @@ static u32 encode_float(u64 value)
419/* 419/*
420 * do_acct_process does all actual work. Caller holds the reference to file. 420 * do_acct_process does all actual work. Caller holds the reference to file.
421 */ 421 */
422static void do_acct_process(long exitcode, struct file *file) 422static void do_acct_process(struct file *file)
423{ 423{
424 struct pacct_struct *pacct = &current->signal->pacct;
424 acct_t ac; 425 acct_t ac;
425 mm_segment_t fs; 426 mm_segment_t fs;
426 unsigned long vsize;
427 unsigned long flim; 427 unsigned long flim;
428 u64 elapsed; 428 u64 elapsed;
429 u64 run_time; 429 u64 run_time;
430 struct timespec uptime; 430 struct timespec uptime;
431 unsigned long jiffies;
432 431
433 /* 432 /*
434 * First check to see if there is enough free_space to continue 433 * First check to see if there is enough free_space to continue
@@ -469,12 +468,6 @@ static void do_acct_process(long exitcode, struct file *file)
469#endif 468#endif
470 do_div(elapsed, AHZ); 469 do_div(elapsed, AHZ);
471 ac.ac_btime = xtime.tv_sec - elapsed; 470 ac.ac_btime = xtime.tv_sec - elapsed;
472 jiffies = cputime_to_jiffies(cputime_add(current->utime,
473 current->signal->utime));
474 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies));
475 jiffies = cputime_to_jiffies(cputime_add(current->stime,
476 current->signal->stime));
477 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies));
478 /* we really need to bite the bullet and change layout */ 471 /* we really need to bite the bullet and change layout */
479 ac.ac_uid = current->uid; 472 ac.ac_uid = current->uid;
480 ac.ac_gid = current->gid; 473 ac.ac_gid = current->gid;
@@ -496,37 +489,18 @@ static void do_acct_process(long exitcode, struct file *file)
496 old_encode_dev(tty_devnum(current->signal->tty)) : 0; 489 old_encode_dev(tty_devnum(current->signal->tty)) : 0;
497 read_unlock(&tasklist_lock); 490 read_unlock(&tasklist_lock);
498 491
499 ac.ac_flag = 0; 492 spin_lock(&current->sighand->siglock);
500 if (current->flags & PF_FORKNOEXEC) 493 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
501 ac.ac_flag |= AFORK; 494 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
502 if (current->flags & PF_SUPERPRIV) 495 ac.ac_flag = pacct->ac_flag;
503 ac.ac_flag |= ASU; 496 ac.ac_mem = encode_comp_t(pacct->ac_mem);
504 if (current->flags & PF_DUMPCORE) 497 ac.ac_minflt = encode_comp_t(pacct->ac_minflt);
505 ac.ac_flag |= ACORE; 498 ac.ac_majflt = encode_comp_t(pacct->ac_majflt);
506 if (current->flags & PF_SIGNALED) 499 ac.ac_exitcode = pacct->ac_exitcode;
507 ac.ac_flag |= AXSIG; 500 spin_unlock(&current->sighand->siglock);
508
509 vsize = 0;
510 if (current->mm) {
511 struct vm_area_struct *vma;
512 down_read(&current->mm->mmap_sem);
513 vma = current->mm->mmap;
514 while (vma) {
515 vsize += vma->vm_end - vma->vm_start;
516 vma = vma->vm_next;
517 }
518 up_read(&current->mm->mmap_sem);
519 }
520 vsize = vsize / 1024;
521 ac.ac_mem = encode_comp_t(vsize);
522 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */ 501 ac.ac_io = encode_comp_t(0 /* current->io_usage */); /* %% */
523 ac.ac_rw = encode_comp_t(ac.ac_io / 1024); 502 ac.ac_rw = encode_comp_t(ac.ac_io / 1024);
524 ac.ac_minflt = encode_comp_t(current->signal->min_flt +
525 current->min_flt);
526 ac.ac_majflt = encode_comp_t(current->signal->maj_flt +
527 current->maj_flt);
528 ac.ac_swaps = encode_comp_t(0); 503 ac.ac_swaps = encode_comp_t(0);
529 ac.ac_exitcode = exitcode;
530 504
531 /* 505 /*
532 * Kernel segment override to datasegment and write it 506 * Kernel segment override to datasegment and write it
@@ -546,12 +520,63 @@ static void do_acct_process(long exitcode, struct file *file)
546} 520}
547 521
548/** 522/**
523 * acct_init_pacct - initialize a new pacct_struct
524 */
525void acct_init_pacct(struct pacct_struct *pacct)
526{
527 memset(pacct, 0, sizeof(struct pacct_struct));
528 pacct->ac_utime = pacct->ac_stime = cputime_zero;
529}
530
531/**
532 * acct_collect - collect accounting information into pacct_struct
533 * @exitcode: task exit code
534 * @group_dead: not 0, if this thread is the last one in the process.
535 */
536void acct_collect(long exitcode, int group_dead)
537{
538 struct pacct_struct *pacct = &current->signal->pacct;
539 unsigned long vsize = 0;
540
541 if (group_dead && current->mm) {
542 struct vm_area_struct *vma;
543 down_read(&current->mm->mmap_sem);
544 vma = current->mm->mmap;
545 while (vma) {
546 vsize += vma->vm_end - vma->vm_start;
547 vma = vma->vm_next;
548 }
549 up_read(&current->mm->mmap_sem);
550 }
551
552 spin_lock_irq(&current->sighand->siglock);
553 if (group_dead)
554 pacct->ac_mem = vsize / 1024;
555 if (thread_group_leader(current)) {
556 pacct->ac_exitcode = exitcode;
557 if (current->flags & PF_FORKNOEXEC)
558 pacct->ac_flag |= AFORK;
559 }
560 if (current->flags & PF_SUPERPRIV)
561 pacct->ac_flag |= ASU;
562 if (current->flags & PF_DUMPCORE)
563 pacct->ac_flag |= ACORE;
564 if (current->flags & PF_SIGNALED)
565 pacct->ac_flag |= AXSIG;
566 pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime);
567 pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime);
568 pacct->ac_minflt += current->min_flt;
569 pacct->ac_majflt += current->maj_flt;
570 spin_unlock_irq(&current->sighand->siglock);
571}
572
573/**
549 * acct_process - now just a wrapper around do_acct_process 574 * acct_process - now just a wrapper around do_acct_process
550 * @exitcode: task exit code 575 * @exitcode: task exit code
551 * 576 *
552 * handles process accounting for an exiting task 577 * handles process accounting for an exiting task
553 */ 578 */
554void acct_process(long exitcode) 579void acct_process()
555{ 580{
556 struct file *file = NULL; 581 struct file *file = NULL;
557 582
@@ -570,7 +595,7 @@ void acct_process(long exitcode)
570 get_file(file); 595 get_file(file);
571 spin_unlock(&acct_globals.lock); 596 spin_unlock(&acct_globals.lock);
572 597
573 do_acct_process(exitcode, file); 598 do_acct_process(file);
574 fput(file); 599 fput(file);
575} 600}
576 601
@@ -599,9 +624,7 @@ void acct_update_integrals(struct task_struct *tsk)
599 */ 624 */
600void acct_clear_integrals(struct task_struct *tsk) 625void acct_clear_integrals(struct task_struct *tsk)
601{ 626{
602 if (tsk) { 627 tsk->acct_stimexpd = 0;
603 tsk->acct_stimexpd = 0; 628 tsk->acct_rss_mem1 = 0;
604 tsk->acct_rss_mem1 = 0; 629 tsk->acct_vm_mem1 = 0;
605 tsk->acct_vm_mem1 = 0;
606 }
607} 630}
diff --git a/kernel/compat.c b/kernel/compat.c
index 2f672332430f..126dee9530aa 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -730,17 +730,10 @@ void
730sigset_from_compat (sigset_t *set, compat_sigset_t *compat) 730sigset_from_compat (sigset_t *set, compat_sigset_t *compat)
731{ 731{
732 switch (_NSIG_WORDS) { 732 switch (_NSIG_WORDS) {
733#if defined (__COMPAT_ENDIAN_SWAP__)
734 case 4: set->sig[3] = compat->sig[7] | (((long)compat->sig[6]) << 32 );
735 case 3: set->sig[2] = compat->sig[5] | (((long)compat->sig[4]) << 32 );
736 case 2: set->sig[1] = compat->sig[3] | (((long)compat->sig[2]) << 32 );
737 case 1: set->sig[0] = compat->sig[1] | (((long)compat->sig[0]) << 32 );
738#else
739 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 ); 733 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32 );
740 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 ); 734 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32 );
741 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 ); 735 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32 );
742 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 ); 736 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32 );
743#endif
744 } 737 }
745} 738}
746 739
diff --git a/kernel/exit.c b/kernel/exit.c
index a3baf92462bd..e76bd02e930e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -36,6 +36,7 @@
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/pipe_fs_i.h> 37#include <linux/pipe_fs_i.h>
38#include <linux/audit.h> /* for audit_free() */ 38#include <linux/audit.h> /* for audit_free() */
39#include <linux/resource.h>
39 40
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/unistd.h> 42#include <asm/unistd.h>
@@ -45,8 +46,6 @@
45extern void sem_exit (void); 46extern void sem_exit (void);
46extern struct task_struct *child_reaper; 47extern struct task_struct *child_reaper;
47 48
48int getrusage(struct task_struct *, int, struct rusage __user *);
49
50static void exit_mm(struct task_struct * tsk); 49static void exit_mm(struct task_struct * tsk);
51 50
52static void __unhash_process(struct task_struct *p) 51static void __unhash_process(struct task_struct *p)
@@ -895,11 +894,11 @@ fastcall NORET_TYPE void do_exit(long code)
895 if (group_dead) { 894 if (group_dead) {
896 hrtimer_cancel(&tsk->signal->real_timer); 895 hrtimer_cancel(&tsk->signal->real_timer);
897 exit_itimers(tsk->signal); 896 exit_itimers(tsk->signal);
898 acct_process(code);
899 } 897 }
898 acct_collect(code, group_dead);
900 if (unlikely(tsk->robust_list)) 899 if (unlikely(tsk->robust_list))
901 exit_robust_list(tsk); 900 exit_robust_list(tsk);
902#ifdef CONFIG_COMPAT 901#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
903 if (unlikely(tsk->compat_robust_list)) 902 if (unlikely(tsk->compat_robust_list))
904 compat_exit_robust_list(tsk); 903 compat_exit_robust_list(tsk);
905#endif 904#endif
@@ -907,6 +906,8 @@ fastcall NORET_TYPE void do_exit(long code)
907 audit_free(tsk); 906 audit_free(tsk);
908 exit_mm(tsk); 907 exit_mm(tsk);
909 908
909 if (group_dead)
910 acct_process();
910 exit_sem(tsk); 911 exit_sem(tsk);
911 __exit_files(tsk); 912 __exit_files(tsk);
912 __exit_fs(tsk); 913 __exit_fs(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 49adc0e8d47c..dfd10cb370c3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -874,6 +874,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
874 tsk->it_prof_expires = 874 tsk->it_prof_expires =
875 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 875 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
876 } 876 }
877 acct_init_pacct(&sig->pacct);
877 878
878 return 0; 879 return 0;
879} 880}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 18324305724a..55601b3ce60e 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -98,7 +98,6 @@ static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
98 98
99/** 99/**
100 * ktime_get_ts - get the monotonic clock in timespec format 100 * ktime_get_ts - get the monotonic clock in timespec format
101 *
102 * @ts: pointer to timespec variable 101 * @ts: pointer to timespec variable
103 * 102 *
104 * The function calculates the monotonic clock from the realtime 103 * The function calculates the monotonic clock from the realtime
@@ -238,7 +237,6 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
238# ifndef CONFIG_KTIME_SCALAR 237# ifndef CONFIG_KTIME_SCALAR
239/** 238/**
240 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable 239 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
241 *
242 * @kt: addend 240 * @kt: addend
243 * @nsec: the scalar nsec value to add 241 * @nsec: the scalar nsec value to add
244 * 242 *
@@ -299,7 +297,6 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
299 297
300/** 298/**
301 * hrtimer_forward - forward the timer expiry 299 * hrtimer_forward - forward the timer expiry
302 *
303 * @timer: hrtimer to forward 300 * @timer: hrtimer to forward
304 * @now: forward past this time 301 * @now: forward past this time
305 * @interval: the interval to forward 302 * @interval: the interval to forward
@@ -411,7 +408,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
411 408
412/** 409/**
413 * hrtimer_start - (re)start an relative timer on the current CPU 410 * hrtimer_start - (re)start an relative timer on the current CPU
414 *
415 * @timer: the timer to be added 411 * @timer: the timer to be added
416 * @tim: expiry time 412 * @tim: expiry time
417 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) 413 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
@@ -460,14 +456,13 @@ EXPORT_SYMBOL_GPL(hrtimer_start);
460 456
461/** 457/**
462 * hrtimer_try_to_cancel - try to deactivate a timer 458 * hrtimer_try_to_cancel - try to deactivate a timer
463 *
464 * @timer: hrtimer to stop 459 * @timer: hrtimer to stop
465 * 460 *
466 * Returns: 461 * Returns:
467 * 0 when the timer was not active 462 * 0 when the timer was not active
468 * 1 when the timer was active 463 * 1 when the timer was active
469 * -1 when the timer is currently excuting the callback function and 464 * -1 when the timer is currently excuting the callback function and
470 * can not be stopped 465 * cannot be stopped
471 */ 466 */
472int hrtimer_try_to_cancel(struct hrtimer *timer) 467int hrtimer_try_to_cancel(struct hrtimer *timer)
473{ 468{
@@ -489,7 +484,6 @@ EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
489 484
490/** 485/**
491 * hrtimer_cancel - cancel a timer and wait for the handler to finish. 486 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
492 *
493 * @timer: the timer to be cancelled 487 * @timer: the timer to be cancelled
494 * 488 *
495 * Returns: 489 * Returns:
@@ -510,7 +504,6 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
510 504
511/** 505/**
512 * hrtimer_get_remaining - get remaining time for the timer 506 * hrtimer_get_remaining - get remaining time for the timer
513 *
514 * @timer: the timer to read 507 * @timer: the timer to read
515 */ 508 */
516ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 509ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
@@ -564,7 +557,6 @@ ktime_t hrtimer_get_next_event(void)
564 557
565/** 558/**
566 * hrtimer_init - initialize a timer to the given clock 559 * hrtimer_init - initialize a timer to the given clock
567 *
568 * @timer: the timer to be initialized 560 * @timer: the timer to be initialized
569 * @clock_id: the clock to be used 561 * @clock_id: the clock to be used
570 * @mode: timer mode abs/rel 562 * @mode: timer mode abs/rel
@@ -576,7 +568,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
576 568
577 memset(timer, 0, sizeof(struct hrtimer)); 569 memset(timer, 0, sizeof(struct hrtimer));
578 570
579 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 571 bases = __raw_get_cpu_var(hrtimer_bases);
580 572
581 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS) 573 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
582 clock_id = CLOCK_MONOTONIC; 574 clock_id = CLOCK_MONOTONIC;
@@ -588,7 +580,6 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
588 580
589/** 581/**
590 * hrtimer_get_res - get the timer resolution for a clock 582 * hrtimer_get_res - get the timer resolution for a clock
591 *
592 * @which_clock: which clock to query 583 * @which_clock: which clock to query
593 * @tp: pointer to timespec variable to store the resolution 584 * @tp: pointer to timespec variable to store the resolution
594 * 585 *
@@ -599,7 +590,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
599{ 590{
600 struct hrtimer_base *bases; 591 struct hrtimer_base *bases;
601 592
602 bases = per_cpu(hrtimer_bases, raw_smp_processor_id()); 593 bases = __raw_get_cpu_var(hrtimer_bases);
603 *tp = ktime_to_timespec(bases[which_clock].resolution); 594 *tp = ktime_to_timespec(bases[which_clock].resolution);
604 595
605 return 0; 596 return 0;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index c5f3c6613b6d..24be714b04c7 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -45,6 +45,13 @@ struct kthread_stop_info
45static DEFINE_MUTEX(kthread_stop_lock); 45static DEFINE_MUTEX(kthread_stop_lock);
46static struct kthread_stop_info kthread_stop_info; 46static struct kthread_stop_info kthread_stop_info;
47 47
48/**
49 * kthread_should_stop - should this kthread return now?
50 *
51 * When someone calls kthread_stop on your kthread, it will be woken
52 * and this will return true. You should then return, and your return
53 * value will be passed through to kthread_stop().
54 */
48int kthread_should_stop(void) 55int kthread_should_stop(void)
49{ 56{
50 return (kthread_stop_info.k == current); 57 return (kthread_stop_info.k == current);
@@ -122,6 +129,25 @@ static void keventd_create_kthread(void *_create)
122 complete(&create->done); 129 complete(&create->done);
123} 130}
124 131
132/**
133 * kthread_create - create a kthread.
134 * @threadfn: the function to run until signal_pending(current).
135 * @data: data ptr for @threadfn.
136 * @namefmt: printf-style name for the thread.
137 *
138 * Description: This helper function creates and names a kernel
139 * thread. The thread will be stopped: use wake_up_process() to start
140 * it. See also kthread_run(), kthread_create_on_cpu().
141 *
142 * When woken, the thread will run @threadfn() with @data as its
143 * argument. @threadfn can either call do_exit() directly if it is a
144 * standalone thread for which noone will call kthread_stop(), or
145 * return when 'kthread_should_stop()' is true (which means
146 * kthread_stop() has been called). The return value should be zero
147 * or a negative error number; it will be passed to kthread_stop().
148 *
149 * Returns a task_struct or ERR_PTR(-ENOMEM).
150 */
125struct task_struct *kthread_create(int (*threadfn)(void *data), 151struct task_struct *kthread_create(int (*threadfn)(void *data),
126 void *data, 152 void *data,
127 const char namefmt[], 153 const char namefmt[],
@@ -156,6 +182,15 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
156} 182}
157EXPORT_SYMBOL(kthread_create); 183EXPORT_SYMBOL(kthread_create);
158 184
185/**
186 * kthread_bind - bind a just-created kthread to a cpu.
187 * @k: thread created by kthread_create().
188 * @cpu: cpu (might not be online, must be possible) for @k to run on.
189 *
190 * Description: This function is equivalent to set_cpus_allowed(),
191 * except that @cpu doesn't need to be online, and the thread must be
192 * stopped (i.e., just returned from kthread_create().
193 */
159void kthread_bind(struct task_struct *k, unsigned int cpu) 194void kthread_bind(struct task_struct *k, unsigned int cpu)
160{ 195{
161 BUG_ON(k->state != TASK_INTERRUPTIBLE); 196 BUG_ON(k->state != TASK_INTERRUPTIBLE);
@@ -166,12 +201,36 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
166} 201}
167EXPORT_SYMBOL(kthread_bind); 202EXPORT_SYMBOL(kthread_bind);
168 203
204/**
205 * kthread_stop - stop a thread created by kthread_create().
206 * @k: thread created by kthread_create().
207 *
208 * Sets kthread_should_stop() for @k to return true, wakes it, and
209 * waits for it to exit. Your threadfn() must not call do_exit()
210 * itself if you use this function! This can also be called after
211 * kthread_create() instead of calling wake_up_process(): the thread
212 * will exit without calling threadfn().
213 *
214 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
215 * was never called.
216 */
169int kthread_stop(struct task_struct *k) 217int kthread_stop(struct task_struct *k)
170{ 218{
171 return kthread_stop_sem(k, NULL); 219 return kthread_stop_sem(k, NULL);
172} 220}
173EXPORT_SYMBOL(kthread_stop); 221EXPORT_SYMBOL(kthread_stop);
174 222
223/**
224 * kthread_stop_sem - stop a thread created by kthread_create().
225 * @k: thread created by kthread_create().
226 * @s: semaphore that @k waits on while idle.
227 *
228 * Does essentially the same thing as kthread_stop() above, but wakes
229 * @k by calling up(@s).
230 *
231 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
232 * was never called.
233 */
175int kthread_stop_sem(struct task_struct *k, struct semaphore *s) 234int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
176{ 235{
177 int ret; 236 int ret;
@@ -210,5 +269,5 @@ static __init int helper_init(void)
210 269
211 return 0; 270 return 0;
212} 271}
213core_initcall(helper_init);
214 272
273core_initcall(helper_init);
diff --git a/kernel/module.c b/kernel/module.c
index bbe04862e1b0..d75275de1c28 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1326,7 +1326,7 @@ int is_exported(const char *name, const struct module *mod)
1326 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) 1326 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
1327 return 1; 1327 return 1;
1328 else 1328 else
1329 if (lookup_symbol(name, mod->syms, mod->syms + mod->num_syms)) 1329 if (mod && lookup_symbol(name, mod->syms, mod->syms + mod->num_syms))
1330 return 1; 1330 return 1;
1331 else 1331 else
1332 return 0; 1332 return 0;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index cdf315e794ff..fc311a4673a2 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -38,7 +38,7 @@ config PM_DEBUG
38 38
39config PM_TRACE 39config PM_TRACE
40 bool "Suspend/resume event tracing" 40 bool "Suspend/resume event tracing"
41 depends on PM && PM_DEBUG && X86 41 depends on PM && PM_DEBUG && X86_32
42 default y 42 default y
43 ---help--- 43 ---help---
44 This enables some cheesy code to save the last PM event point in the 44 This enables some cheesy code to save the last PM event point in the
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 81d4d982f3f0..e13e74067845 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -231,7 +231,7 @@ static int software_resume(void)
231late_initcall(software_resume); 231late_initcall(software_resume);
232 232
233 233
234static char * pm_disk_modes[] = { 234static const char * const pm_disk_modes[] = {
235 [PM_DISK_FIRMWARE] = "firmware", 235 [PM_DISK_FIRMWARE] = "firmware",
236 [PM_DISK_PLATFORM] = "platform", 236 [PM_DISK_PLATFORM] = "platform",
237 [PM_DISK_SHUTDOWN] = "shutdown", 237 [PM_DISK_SHUTDOWN] = "shutdown",
diff --git a/kernel/power/main.c b/kernel/power/main.c
index cdf0f07af92f..6d295c776794 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -145,7 +145,7 @@ static void suspend_finish(suspend_state_t state)
145 145
146 146
147 147
148static char *pm_states[PM_SUSPEND_MAX] = { 148static const char * const pm_states[PM_SUSPEND_MAX] = {
149 [PM_SUSPEND_STANDBY] = "standby", 149 [PM_SUSPEND_STANDBY] = "standby",
150 [PM_SUSPEND_MEM] = "mem", 150 [PM_SUSPEND_MEM] = "mem",
151#ifdef CONFIG_SOFTWARE_SUSPEND 151#ifdef CONFIG_SOFTWARE_SUSPEND
@@ -262,7 +262,7 @@ static ssize_t state_show(struct subsystem * subsys, char * buf)
262static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n) 262static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n)
263{ 263{
264 suspend_state_t state = PM_SUSPEND_STANDBY; 264 suspend_state_t state = PM_SUSPEND_STANDBY;
265 char ** s; 265 const char * const *s;
266 char *p; 266 char *p;
267 int error; 267 int error;
268 int len; 268 int len;
diff --git a/kernel/printk.c b/kernel/printk.c
index 19a955619294..95b7fe17f124 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -24,6 +24,7 @@
24#include <linux/console.h> 24#include <linux/console.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/moduleparam.h>
27#include <linux/interrupt.h> /* For in_interrupt() */ 28#include <linux/interrupt.h> /* For in_interrupt() */
28#include <linux/config.h> 29#include <linux/config.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
@@ -327,7 +328,9 @@ static void __call_console_drivers(unsigned long start, unsigned long end)
327 struct console *con; 328 struct console *con;
328 329
329 for (con = console_drivers; con; con = con->next) { 330 for (con = console_drivers; con; con = con->next) {
330 if ((con->flags & CON_ENABLED) && con->write) 331 if ((con->flags & CON_ENABLED) && con->write &&
332 (cpu_online(smp_processor_id()) ||
333 (con->flags & CON_ANYTIME)))
331 con->write(con, &LOG_BUF(start), end - start); 334 con->write(con, &LOG_BUF(start), end - start);
332 } 335 }
333} 336}
@@ -437,6 +440,7 @@ static int printk_time = 1;
437#else 440#else
438static int printk_time = 0; 441static int printk_time = 0;
439#endif 442#endif
443module_param(printk_time, int, S_IRUGO | S_IWUSR);
440 444
441static int __init printk_time_setup(char *str) 445static int __init printk_time_setup(char *str)
442{ 446{
@@ -453,6 +457,18 @@ __attribute__((weak)) unsigned long long printk_clock(void)
453 return sched_clock(); 457 return sched_clock();
454} 458}
455 459
460/* Check if we have any console registered that can be called early in boot. */
461static int have_callable_console(void)
462{
463 struct console *con;
464
465 for (con = console_drivers; con; con = con->next)
466 if (con->flags & CON_ANYTIME)
467 return 1;
468
469 return 0;
470}
471
456/** 472/**
457 * printk - print a kernel message 473 * printk - print a kernel message
458 * @fmt: format string 474 * @fmt: format string
@@ -566,27 +582,29 @@ asmlinkage int vprintk(const char *fmt, va_list args)
566 log_level_unknown = 1; 582 log_level_unknown = 1;
567 } 583 }
568 584
569 if (!cpu_online(smp_processor_id())) { 585 if (!down_trylock(&console_sem)) {
570 /* 586 /*
571 * Some console drivers may assume that per-cpu resources have 587 * We own the drivers. We can drop the spinlock and
572 * been allocated. So don't allow them to be called by this 588 * let release_console_sem() print the text, maybe ...
573 * CPU until it is officially up. We shouldn't be calling into
574 * random console drivers on a CPU which doesn't exist yet..
575 */ 589 */
590 console_locked = 1;
576 printk_cpu = UINT_MAX; 591 printk_cpu = UINT_MAX;
577 spin_unlock_irqrestore(&logbuf_lock, flags); 592 spin_unlock_irqrestore(&logbuf_lock, flags);
578 goto out; 593
579 }
580 if (!down_trylock(&console_sem)) {
581 console_locked = 1;
582 /* 594 /*
583 * We own the drivers. We can drop the spinlock and let 595 * Console drivers may assume that per-cpu resources have
584 * release_console_sem() print the text 596 * been allocated. So unless they're explicitly marked as
597 * being able to cope (CON_ANYTIME) don't call them until
598 * this CPU is officially up.
585 */ 599 */
586 printk_cpu = UINT_MAX; 600 if (cpu_online(smp_processor_id()) || have_callable_console()) {
587 spin_unlock_irqrestore(&logbuf_lock, flags); 601 console_may_schedule = 0;
588 console_may_schedule = 0; 602 release_console_sem();
589 release_console_sem(); 603 } else {
604 /* Release by hand to avoid flushing the buffer. */
605 console_locked = 0;
606 up(&console_sem);
607 }
590 } else { 608 } else {
591 /* 609 /*
592 * Someone else owns the drivers. We drop the spinlock, which 610 * Someone else owns the drivers. We drop the spinlock, which
@@ -596,7 +614,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
596 printk_cpu = UINT_MAX; 614 printk_cpu = UINT_MAX;
597 spin_unlock_irqrestore(&logbuf_lock, flags); 615 spin_unlock_irqrestore(&logbuf_lock, flags);
598 } 616 }
599out: 617
600 preempt_enable(); 618 preempt_enable();
601 return printed_len; 619 return printed_len;
602} 620}
diff --git a/kernel/sched.c b/kernel/sched.c
index 5dbc42694477..f06d059edef5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4152,7 +4152,7 @@ EXPORT_SYMBOL(yield);
4152 */ 4152 */
4153void __sched io_schedule(void) 4153void __sched io_schedule(void)
4154{ 4154{
4155 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4155 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4156 4156
4157 atomic_inc(&rq->nr_iowait); 4157 atomic_inc(&rq->nr_iowait);
4158 schedule(); 4158 schedule();
@@ -4163,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
4163 4163
4164long __sched io_schedule_timeout(long timeout) 4164long __sched io_schedule_timeout(long timeout)
4165{ 4165{
4166 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id()); 4166 struct runqueue *rq = &__raw_get_cpu_var(runqueues);
4167 long ret; 4167 long ret;
4168 4168
4169 atomic_inc(&rq->nr_iowait); 4169 atomic_inc(&rq->nr_iowait);
@@ -4756,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4756 break; 4756 break;
4757#ifdef CONFIG_HOTPLUG_CPU 4757#ifdef CONFIG_HOTPLUG_CPU
4758 case CPU_UP_CANCELED: 4758 case CPU_UP_CANCELED:
4759 if (!cpu_rq(cpu)->migration_thread)
4760 break;
4759 /* Unbind it from offline cpu so it can run. Fall thru. */ 4761 /* Unbind it from offline cpu so it can run. Fall thru. */
4760 kthread_bind(cpu_rq(cpu)->migration_thread, 4762 kthread_bind(cpu_rq(cpu)->migration_thread,
4761 any_online_cpu(cpu_online_map)); 4763 any_online_cpu(cpu_online_map));
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 336f92d64e2e..9e2f1c6e73d7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -470,6 +470,8 @@ static int cpu_callback(struct notifier_block *nfb,
470 break; 470 break;
471#ifdef CONFIG_HOTPLUG_CPU 471#ifdef CONFIG_HOTPLUG_CPU
472 case CPU_UP_CANCELED: 472 case CPU_UP_CANCELED:
473 if (!per_cpu(ksoftirqd, hotcpu))
474 break;
473 /* Unbind so it can run. Fall thru. */ 475 /* Unbind so it can run. Fall thru. */
474 kthread_bind(per_cpu(ksoftirqd, hotcpu), 476 kthread_bind(per_cpu(ksoftirqd, hotcpu),
475 any_online_cpu(cpu_online_map)); 477 any_online_cpu(cpu_online_map));
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 14c7faf02909..b5c3b94e01ce 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -36,7 +36,7 @@ static struct notifier_block panic_block = {
36 36
37void touch_softlockup_watchdog(void) 37void touch_softlockup_watchdog(void)
38{ 38{
39 per_cpu(touch_timestamp, raw_smp_processor_id()) = jiffies; 39 __raw_get_cpu_var(touch_timestamp) = jiffies;
40} 40}
41EXPORT_SYMBOL(touch_softlockup_watchdog); 41EXPORT_SYMBOL(touch_softlockup_watchdog);
42 42
@@ -127,6 +127,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
127 break; 127 break;
128#ifdef CONFIG_HOTPLUG_CPU 128#ifdef CONFIG_HOTPLUG_CPU
129 case CPU_UP_CANCELED: 129 case CPU_UP_CANCELED:
130 if (!per_cpu(watchdog_task, hotcpu))
131 break;
130 /* Unbind so it can run. Fall thru. */ 132 /* Unbind so it can run. Fall thru. */
131 kthread_bind(per_cpu(watchdog_task, hotcpu), 133 kthread_bind(per_cpu(watchdog_task, hotcpu),
132 any_online_cpu(cpu_online_map)); 134 any_online_cpu(cpu_online_map));
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index dcfb5d731466..2c0aacc37c55 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -4,6 +4,7 @@
4#include <linux/cpu.h> 4#include <linux/cpu.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/syscalls.h> 6#include <linux/syscalls.h>
7#include <linux/kthread.h>
7#include <asm/atomic.h> 8#include <asm/atomic.h>
8#include <asm/semaphore.h> 9#include <asm/semaphore.h>
9#include <asm/uaccess.h> 10#include <asm/uaccess.h>
@@ -25,13 +26,11 @@ static unsigned int stopmachine_num_threads;
25static atomic_t stopmachine_thread_ack; 26static atomic_t stopmachine_thread_ack;
26static DECLARE_MUTEX(stopmachine_mutex); 27static DECLARE_MUTEX(stopmachine_mutex);
27 28
28static int stopmachine(void *cpu) 29static int stopmachine(void *unused)
29{ 30{
30 int irqs_disabled = 0; 31 int irqs_disabled = 0;
31 int prepared = 0; 32 int prepared = 0;
32 33
33 set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
34
35 /* Ack: we are alive */ 34 /* Ack: we are alive */
36 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ 35 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
37 atomic_inc(&stopmachine_thread_ack); 36 atomic_inc(&stopmachine_thread_ack);
@@ -85,7 +84,8 @@ static void stopmachine_set_state(enum stopmachine_state state)
85 84
86static int stop_machine(void) 85static int stop_machine(void)
87{ 86{
88 int i, ret = 0; 87 int ret = 0;
88 unsigned int i;
89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 89 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
90 90
91 /* One high-prio thread per cpu. We'll do this one. */ 91 /* One high-prio thread per cpu. We'll do this one. */
@@ -96,11 +96,16 @@ static int stop_machine(void)
96 stopmachine_state = STOPMACHINE_WAIT; 96 stopmachine_state = STOPMACHINE_WAIT;
97 97
98 for_each_online_cpu(i) { 98 for_each_online_cpu(i) {
99 struct task_struct *tsk;
99 if (i == raw_smp_processor_id()) 100 if (i == raw_smp_processor_id())
100 continue; 101 continue;
101 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 102 tsk = kthread_create(stopmachine, NULL, "stopmachine");
102 if (ret < 0) 103 if (IS_ERR(tsk)) {
104 ret = PTR_ERR(tsk);
103 break; 105 break;
106 }
107 kthread_bind(tsk, i);
108 wake_up_process(tsk);
104 stopmachine_num_threads++; 109 stopmachine_num_threads++;
105 } 110 }
106 111
diff --git a/kernel/sys.c b/kernel/sys.c
index 90930b28d2ca..2d5179c67cec 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -137,14 +137,15 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
137 unsigned long val, void *v) 137 unsigned long val, void *v)
138{ 138{
139 int ret = NOTIFY_DONE; 139 int ret = NOTIFY_DONE;
140 struct notifier_block *nb; 140 struct notifier_block *nb, *next_nb;
141 141
142 nb = rcu_dereference(*nl); 142 nb = rcu_dereference(*nl);
143 while (nb) { 143 while (nb) {
144 next_nb = rcu_dereference(nb->next);
144 ret = nb->notifier_call(nb, val, v); 145 ret = nb->notifier_call(nb, val, v);
145 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) 146 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
146 break; 147 break;
147 nb = rcu_dereference(nb->next); 148 nb = next_nb;
148 } 149 }
149 return ret; 150 return ret;
150} 151}
@@ -588,7 +589,7 @@ void emergency_restart(void)
588} 589}
589EXPORT_SYMBOL_GPL(emergency_restart); 590EXPORT_SYMBOL_GPL(emergency_restart);
590 591
591void kernel_restart_prepare(char *cmd) 592static void kernel_restart_prepare(char *cmd)
592{ 593{
593 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 594 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
594 system_state = SYSTEM_RESTART; 595 system_state = SYSTEM_RESTART;
@@ -622,7 +623,7 @@ EXPORT_SYMBOL_GPL(kernel_restart);
622 * Move into place and start executing a preloaded standalone 623 * Move into place and start executing a preloaded standalone
623 * executable. If nothing was preloaded return an error. 624 * executable. If nothing was preloaded return an error.
624 */ 625 */
625void kernel_kexec(void) 626static void kernel_kexec(void)
626{ 627{
627#ifdef CONFIG_KEXEC 628#ifdef CONFIG_KEXEC
628 struct kimage *image; 629 struct kimage *image;
@@ -636,7 +637,6 @@ void kernel_kexec(void)
636 machine_kexec(image); 637 machine_kexec(image);
637#endif 638#endif
638} 639}
639EXPORT_SYMBOL_GPL(kernel_kexec);
640 640
641void kernel_shutdown_prepare(enum system_states state) 641void kernel_shutdown_prepare(enum system_states state)
642{ 642{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index eb8bd214e7d7..2c0e65819448 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -143,7 +143,6 @@ static struct ctl_table_header root_table_header =
143 143
144static ctl_table kern_table[]; 144static ctl_table kern_table[];
145static ctl_table vm_table[]; 145static ctl_table vm_table[];
146static ctl_table proc_table[];
147static ctl_table fs_table[]; 146static ctl_table fs_table[];
148static ctl_table debug_table[]; 147static ctl_table debug_table[];
149static ctl_table dev_table[]; 148static ctl_table dev_table[];
@@ -203,12 +202,6 @@ static ctl_table root_table[] = {
203 }, 202 },
204#endif 203#endif
205 { 204 {
206 .ctl_name = CTL_PROC,
207 .procname = "proc",
208 .mode = 0555,
209 .child = proc_table,
210 },
211 {
212 .ctl_name = CTL_FS, 205 .ctl_name = CTL_FS,
213 .procname = "fs", 206 .procname = "fs",
214 .mode = 0555, 207 .mode = 0555,
@@ -927,10 +920,6 @@ static ctl_table vm_table[] = {
927 { .ctl_name = 0 } 920 { .ctl_name = 0 }
928}; 921};
929 922
930static ctl_table proc_table[] = {
931 { .ctl_name = 0 }
932};
933
934static ctl_table fs_table[] = { 923static ctl_table fs_table[] = {
935 { 924 {
936 .ctl_name = FS_NRINODE, 925 .ctl_name = FS_NRINODE,
diff --git a/kernel/timer.c b/kernel/timer.c
index f35b3939e937..eb97371b87d8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -146,7 +146,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
146void fastcall init_timer(struct timer_list *timer) 146void fastcall init_timer(struct timer_list *timer)
147{ 147{
148 timer->entry.next = NULL; 148 timer->entry.next = NULL;
149 timer->base = per_cpu(tvec_bases, raw_smp_processor_id()); 149 timer->base = __raw_get_cpu_var(tvec_bases);
150} 150}
151EXPORT_SYMBOL(init_timer); 151EXPORT_SYMBOL(init_timer);
152 152
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 740c5abceb07..565cf7a1febd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -428,22 +428,34 @@ int schedule_delayed_work_on(int cpu,
428 return ret; 428 return ret;
429} 429}
430 430
431int schedule_on_each_cpu(void (*func) (void *info), void *info) 431/**
432 * schedule_on_each_cpu - call a function on each online CPU from keventd
433 * @func: the function to call
434 * @info: a pointer to pass to func()
435 *
436 * Returns zero on success.
437 * Returns -ve errno on failure.
438 *
439 * Appears to be racy against CPU hotplug.
440 *
441 * schedule_on_each_cpu() is very slow.
442 */
443int schedule_on_each_cpu(void (*func)(void *info), void *info)
432{ 444{
433 int cpu; 445 int cpu;
434 struct work_struct *work; 446 struct work_struct *works;
435 447
436 work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL); 448 works = alloc_percpu(struct work_struct);
437 449 if (!works)
438 if (!work)
439 return -ENOMEM; 450 return -ENOMEM;
451
440 for_each_online_cpu(cpu) { 452 for_each_online_cpu(cpu) {
441 INIT_WORK(work + cpu, func, info); 453 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
442 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 454 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
443 work + cpu); 455 per_cpu_ptr(works, cpu));
444 } 456 }
445 flush_workqueue(keventd_wq); 457 flush_workqueue(keventd_wq);
446 kfree(work); 458 free_percpu(works);
447 return 0; 459 return 0;
448} 460}
449 461
@@ -578,6 +590,8 @@ static int workqueue_cpu_callback(struct notifier_block *nfb,
578 590
579 case CPU_UP_CANCELED: 591 case CPU_UP_CANCELED:
580 list_for_each_entry(wq, &workqueues, list) { 592 list_for_each_entry(wq, &workqueues, list) {
593 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
594 continue;
581 /* Unbind so it can run. */ 595 /* Unbind so it can run. */
582 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 596 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
583 any_online_cpu(cpu_online_map)); 597 any_online_cpu(cpu_online_map));
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ed2ae3b0cd06..d71e38c54ea5 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -317,16 +317,16 @@ EXPORT_SYMBOL(bitmap_scnprintf);
317 317
318/** 318/**
319 * bitmap_parse - convert an ASCII hex string into a bitmap. 319 * bitmap_parse - convert an ASCII hex string into a bitmap.
320 * @buf: pointer to buffer in user space containing string. 320 * @ubuf: pointer to buffer in user space containing string.
321 * @buflen: buffer size in bytes. If string is smaller than this 321 * @ubuflen: buffer size in bytes. If string is smaller than this
322 * then it must be terminated with a \0. 322 * then it must be terminated with a \0.
323 * @maskp: pointer to bitmap array that will contain result. 323 * @maskp: pointer to bitmap array that will contain result.
324 * @nmaskbits: size of bitmap, in bits. 324 * @nmaskbits: size of bitmap, in bits.
325 * 325 *
326 * Commas group hex digits into chunks. Each chunk defines exactly 32 326 * Commas group hex digits into chunks. Each chunk defines exactly 32
327 * bits of the resultant bitmask. No chunk may specify a value larger 327 * bits of the resultant bitmask. No chunk may specify a value larger
328 * than 32 bits (-EOVERFLOW), and if a chunk specifies a smaller value 328 * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
329 * then leading 0-bits are prepended. -EINVAL is returned for illegal 329 * then leading 0-bits are prepended. %-EINVAL is returned for illegal
330 * characters and for grouping errors such as "1,,5", ",44", "," and "". 330 * characters and for grouping errors such as "1,,5", ",44", "," and "".
331 * Leading and trailing whitespace accepted, but not embedded whitespace. 331 * Leading and trailing whitespace accepted, but not embedded whitespace.
332 */ 332 */
@@ -452,8 +452,8 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
452 452
453/** 453/**
454 * bitmap_parselist - convert list format ASCII string to bitmap 454 * bitmap_parselist - convert list format ASCII string to bitmap
455 * @buf: read nul-terminated user string from this buffer 455 * @bp: read nul-terminated user string from this buffer
456 * @mask: write resulting mask here 456 * @maskp: write resulting mask here
457 * @nmaskbits: number of bits in mask to be written 457 * @nmaskbits: number of bits in mask to be written
458 * 458 *
459 * Input format is a comma-separated list of decimal numbers and 459 * Input format is a comma-separated list of decimal numbers and
@@ -461,10 +461,11 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
461 * decimal numbers, the smallest and largest bit numbers set in 461 * decimal numbers, the smallest and largest bit numbers set in
462 * the range. 462 * the range.
463 * 463 *
464 * Returns 0 on success, -errno on invalid input strings: 464 * Returns 0 on success, -errno on invalid input strings.
465 * -EINVAL: second number in range smaller than first 465 * Error values:
466 * -EINVAL: invalid character in string 466 * %-EINVAL: second number in range smaller than first
467 * -ERANGE: bit number specified too large for mask 467 * %-EINVAL: invalid character in string
468 * %-ERANGE: bit number specified too large for mask
468 */ 469 */
469int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) 470int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
470{ 471{
@@ -625,10 +626,10 @@ EXPORT_SYMBOL(bitmap_remap);
625 626
626/** 627/**
627 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit 628 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
628 * @oldbit - bit position to be mapped 629 * @oldbit: bit position to be mapped
629 * @old: defines domain of map 630 * @old: defines domain of map
630 * @new: defines range of map 631 * @new: defines range of map
631 * @bits: number of bits in each of these bitmaps 632 * @bits: number of bits in each of these bitmaps
632 * 633 *
633 * Let @old and @new define a mapping of bit positions, such that 634 * Let @old and @new define a mapping of bit positions, such that
634 * whatever position is held by the n-th set bit in @old is mapped 635 * whatever position is held by the n-th set bit in @old is mapped
@@ -790,7 +791,7 @@ EXPORT_SYMBOL(bitmap_release_region);
790 * 791 *
791 * Allocate (set bits in) a specified region of a bitmap. 792 * Allocate (set bits in) a specified region of a bitmap.
792 * 793 *
793 * Return 0 on success, or -EBUSY if specified region wasn't 794 * Return 0 on success, or %-EBUSY if specified region wasn't
794 * free (not all bits were zero). 795 * free (not all bits were zero).
795 */ 796 */
796int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) 797int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index 115d149af407..7f6dd68d2d09 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -53,9 +53,9 @@ EXPORT_SYMBOL(crc_ccitt_table);
53 53
54/** 54/**
55 * crc_ccitt - recompute the CRC for the data buffer 55 * crc_ccitt - recompute the CRC for the data buffer
56 * @crc - previous CRC value 56 * @crc: previous CRC value
57 * @buffer - data pointer 57 * @buffer: data pointer
58 * @len - number of bytes in the buffer 58 * @len: number of bytes in the buffer
59 */ 59 */
60u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len) 60u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
61{ 61{
diff --git a/lib/crc16.c b/lib/crc16.c
index 011fe573c666..8737b084d1f9 100644
--- a/lib/crc16.c
+++ b/lib/crc16.c
@@ -47,12 +47,12 @@ u16 const crc16_table[256] = {
47EXPORT_SYMBOL(crc16_table); 47EXPORT_SYMBOL(crc16_table);
48 48
49/** 49/**
50 * Compute the CRC-16 for the data buffer 50 * crc16 - compute the CRC-16 for the data buffer
51 * @crc: previous CRC value
52 * @buffer: data pointer
53 * @len: number of bytes in the buffer
51 * 54 *
52 * @param crc previous CRC value 55 * Returns the updated CRC value.
53 * @param buffer data pointer
54 * @param len number of bytes in the buffer
55 * @return the updated CRC value
56 */ 56 */
57u16 crc16(u16 crc, u8 const *buffer, size_t len) 57u16 crc16(u16 crc, u8 const *buffer, size_t len)
58{ 58{
diff --git a/lib/crc32.c b/lib/crc32.c
index 065198f98b3f..285fd9bc61be 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -42,20 +42,21 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
42MODULE_DESCRIPTION("Ethernet CRC32 calculations"); 42MODULE_DESCRIPTION("Ethernet CRC32 calculations");
43MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
44 44
45/**
46 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
47 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
48 * other uses, or the previous crc32 value if computing incrementally.
49 * @p: pointer to buffer over which CRC is run
50 * @len: length of buffer @p
51 */
52u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len);
53
45#if CRC_LE_BITS == 1 54#if CRC_LE_BITS == 1
46/* 55/*
47 * In fact, the table-based code will work in this case, but it can be 56 * In fact, the table-based code will work in this case, but it can be
48 * simplified by inlining the table in ?: form. 57 * simplified by inlining the table in ?: form.
49 */ 58 */
50 59
51/**
52 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
53 * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
54 * other uses, or the previous crc32 value if computing incrementally.
55 * @p - pointer to buffer over which CRC is run
56 * @len - length of buffer @p
57 *
58 */
59u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len) 60u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
60{ 61{
61 int i; 62 int i;
@@ -68,14 +69,6 @@ u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
68} 69}
69#else /* Table-based approach */ 70#else /* Table-based approach */
70 71
71/**
72 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
73 * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
74 * other uses, or the previous crc32 value if computing incrementally.
75 * @p - pointer to buffer over which CRC is run
76 * @len - length of buffer @p
77 *
78 */
79u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len) 72u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
80{ 73{
81# if CRC_LE_BITS == 8 74# if CRC_LE_BITS == 8
@@ -145,20 +138,21 @@ u32 __attribute_pure__ crc32_le(u32 crc, unsigned char const *p, size_t len)
145} 138}
146#endif 139#endif
147 140
141/**
142 * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
143 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
144 * other uses, or the previous crc32 value if computing incrementally.
145 * @p: pointer to buffer over which CRC is run
146 * @len: length of buffer @p
147 */
148u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len);
149
148#if CRC_BE_BITS == 1 150#if CRC_BE_BITS == 1
149/* 151/*
150 * In fact, the table-based code will work in this case, but it can be 152 * In fact, the table-based code will work in this case, but it can be
151 * simplified by inlining the table in ?: form. 153 * simplified by inlining the table in ?: form.
152 */ 154 */
153 155
154/**
155 * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
156 * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
157 * other uses, or the previous crc32 value if computing incrementally.
158 * @p - pointer to buffer over which CRC is run
159 * @len - length of buffer @p
160 *
161 */
162u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len) 156u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
163{ 157{
164 int i; 158 int i;
@@ -173,14 +167,6 @@ u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
173} 167}
174 168
175#else /* Table-based approach */ 169#else /* Table-based approach */
176/**
177 * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
178 * @crc - seed value for computation. ~0 for Ethernet, sometimes 0 for
179 * other uses, or the previous crc32 value if computing incrementally.
180 * @p - pointer to buffer over which CRC is run
181 * @len - length of buffer @p
182 *
183 */
184u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len) 170u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
185{ 171{
186# if CRC_BE_BITS == 8 172# if CRC_BE_BITS == 8
@@ -249,6 +235,10 @@ u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
249} 235}
250#endif 236#endif
251 237
238/**
239 * bitreverse - reverse the order of bits in a u32 value
240 * @x: value to be bit-reversed
241 */
252u32 bitreverse(u32 x) 242u32 bitreverse(u32 x)
253{ 243{
254 x = (x >> 16) | (x << 16); 244 x = (x >> 16) | (x << 16);
diff --git a/lib/idr.c b/lib/idr.c
index d226259c3c28..de19030a999b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -48,15 +48,21 @@ static struct idr_layer *alloc_layer(struct idr *idp)
48 return(p); 48 return(p);
49} 49}
50 50
51/* only called when idp->lock is held */
52static void __free_layer(struct idr *idp, struct idr_layer *p)
53{
54 p->ary[0] = idp->id_free;
55 idp->id_free = p;
56 idp->id_free_cnt++;
57}
58
51static void free_layer(struct idr *idp, struct idr_layer *p) 59static void free_layer(struct idr *idp, struct idr_layer *p)
52{ 60{
53 /* 61 /*
54 * Depends on the return element being zeroed. 62 * Depends on the return element being zeroed.
55 */ 63 */
56 spin_lock(&idp->lock); 64 spin_lock(&idp->lock);
57 p->ary[0] = idp->id_free; 65 __free_layer(idp, p);
58 idp->id_free = p;
59 idp->id_free_cnt++;
60 spin_unlock(&idp->lock); 66 spin_unlock(&idp->lock);
61} 67}
62 68
@@ -184,12 +190,14 @@ build_up:
184 * The allocation failed. If we built part of 190 * The allocation failed. If we built part of
185 * the structure tear it down. 191 * the structure tear it down.
186 */ 192 */
193 spin_lock(&idp->lock);
187 for (new = p; p && p != idp->top; new = p) { 194 for (new = p; p && p != idp->top; new = p) {
188 p = p->ary[0]; 195 p = p->ary[0];
189 new->ary[0] = NULL; 196 new->ary[0] = NULL;
190 new->bitmap = new->count = 0; 197 new->bitmap = new->count = 0;
191 free_layer(idp, new); 198 __free_layer(idp, new);
192 } 199 }
200 spin_unlock(&idp->lock);
193 return -1; 201 return -1;
194 } 202 }
195 new->ary[0] = p; 203 new->ary[0] = p;
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 52b6dc144ce3..60f46803af3f 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -88,7 +88,7 @@ crc32c_le(u32 crc, unsigned char const *p, size_t len)
88 * reflect output bytes = true 88 * reflect output bytes = true
89 */ 89 */
90 90
91static u32 crc32c_table[256] = { 91static const u32 crc32c_table[256] = {
92 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, 92 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L,
93 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, 93 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL,
94 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL, 94 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL,
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index b32efae7688e..637d55608de5 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -530,7 +530,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
530 int ret = tag_get(slot, tag, offset); 530 int ret = tag_get(slot, tag, offset);
531 531
532 BUG_ON(ret && saw_unset_tag); 532 BUG_ON(ret && saw_unset_tag);
533 return ret; 533 return !!ret;
534 } 534 }
535 slot = slot->slots[offset]; 535 slot = slot->slots[offset];
536 shift -= RADIX_TREE_MAP_SHIFT; 536 shift -= RADIX_TREE_MAP_SHIFT;
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index f8ac9fa95de1..2cc11faa4ff1 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -54,7 +54,6 @@ static DEFINE_MUTEX(rslistlock);
54 54
55/** 55/**
56 * rs_init - Initialize a Reed-Solomon codec 56 * rs_init - Initialize a Reed-Solomon codec
57 *
58 * @symsize: symbol size, bits (1-8) 57 * @symsize: symbol size, bits (1-8)
59 * @gfpoly: Field generator polynomial coefficients 58 * @gfpoly: Field generator polynomial coefficients
60 * @fcr: first root of RS code generator polynomial, index form 59 * @fcr: first root of RS code generator polynomial, index form
@@ -62,7 +61,7 @@ static DEFINE_MUTEX(rslistlock);
62 * @nroots: RS code generator polynomial degree (number of roots) 61 * @nroots: RS code generator polynomial degree (number of roots)
63 * 62 *
64 * Allocate a control structure and the polynom arrays for faster 63 * Allocate a control structure and the polynom arrays for faster
65 * en/decoding. Fill the arrays according to the given parameters 64 * en/decoding. Fill the arrays according to the given parameters.
66 */ 65 */
67static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, 66static struct rs_control *rs_init(int symsize, int gfpoly, int fcr,
68 int prim, int nroots) 67 int prim, int nroots)
@@ -155,8 +154,7 @@ errrs:
155 154
156 155
157/** 156/**
158 * free_rs - Free the rs control structure, if its not longer used 157 * free_rs - Free the rs control structure, if it is no longer used
159 *
160 * @rs: the control structure which is not longer used by the 158 * @rs: the control structure which is not longer used by the
161 * caller 159 * caller
162 */ 160 */
@@ -176,7 +174,6 @@ void free_rs(struct rs_control *rs)
176 174
177/** 175/**
178 * init_rs - Find a matching or allocate a new rs control structure 176 * init_rs - Find a matching or allocate a new rs control structure
179 *
180 * @symsize: the symbol size (number of bits) 177 * @symsize: the symbol size (number of bits)
181 * @gfpoly: the extended Galois field generator polynomial coefficients, 178 * @gfpoly: the extended Galois field generator polynomial coefficients,
182 * with the 0th coefficient in the low order bit. The polynomial 179 * with the 0th coefficient in the low order bit. The polynomial
@@ -236,7 +233,6 @@ out:
236#ifdef CONFIG_REED_SOLOMON_ENC8 233#ifdef CONFIG_REED_SOLOMON_ENC8
237/** 234/**
238 * encode_rs8 - Calculate the parity for data values (8bit data width) 235 * encode_rs8 - Calculate the parity for data values (8bit data width)
239 *
240 * @rs: the rs control structure 236 * @rs: the rs control structure
241 * @data: data field of a given type 237 * @data: data field of a given type
242 * @len: data length 238 * @len: data length
@@ -258,7 +254,6 @@ EXPORT_SYMBOL_GPL(encode_rs8);
258#ifdef CONFIG_REED_SOLOMON_DEC8 254#ifdef CONFIG_REED_SOLOMON_DEC8
259/** 255/**
260 * decode_rs8 - Decode codeword (8bit data width) 256 * decode_rs8 - Decode codeword (8bit data width)
261 *
262 * @rs: the rs control structure 257 * @rs: the rs control structure
263 * @data: data field of a given type 258 * @data: data field of a given type
264 * @par: received parity data field 259 * @par: received parity data field
@@ -285,7 +280,6 @@ EXPORT_SYMBOL_GPL(decode_rs8);
285#ifdef CONFIG_REED_SOLOMON_ENC16 280#ifdef CONFIG_REED_SOLOMON_ENC16
286/** 281/**
287 * encode_rs16 - Calculate the parity for data values (16bit data width) 282 * encode_rs16 - Calculate the parity for data values (16bit data width)
288 *
289 * @rs: the rs control structure 283 * @rs: the rs control structure
290 * @data: data field of a given type 284 * @data: data field of a given type
291 * @len: data length 285 * @len: data length
@@ -305,7 +299,6 @@ EXPORT_SYMBOL_GPL(encode_rs16);
305#ifdef CONFIG_REED_SOLOMON_DEC16 299#ifdef CONFIG_REED_SOLOMON_DEC16
306/** 300/**
307 * decode_rs16 - Decode codeword (16bit data width) 301 * decode_rs16 - Decode codeword (16bit data width)
308 *
309 * @rs: the rs control structure 302 * @rs: the rs control structure
310 * @data: data field of a given type 303 * @data: data field of a given type
311 * @par: received parity data field 304 * @par: received parity data field
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b07db5ca3f66..797428afd111 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -187,49 +187,49 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
187 size -= precision; 187 size -= precision;
188 if (!(type&(ZEROPAD+LEFT))) { 188 if (!(type&(ZEROPAD+LEFT))) {
189 while(size-->0) { 189 while(size-->0) {
190 if (buf <= end) 190 if (buf < end)
191 *buf = ' '; 191 *buf = ' ';
192 ++buf; 192 ++buf;
193 } 193 }
194 } 194 }
195 if (sign) { 195 if (sign) {
196 if (buf <= end) 196 if (buf < end)
197 *buf = sign; 197 *buf = sign;
198 ++buf; 198 ++buf;
199 } 199 }
200 if (type & SPECIAL) { 200 if (type & SPECIAL) {
201 if (base==8) { 201 if (base==8) {
202 if (buf <= end) 202 if (buf < end)
203 *buf = '0'; 203 *buf = '0';
204 ++buf; 204 ++buf;
205 } else if (base==16) { 205 } else if (base==16) {
206 if (buf <= end) 206 if (buf < end)
207 *buf = '0'; 207 *buf = '0';
208 ++buf; 208 ++buf;
209 if (buf <= end) 209 if (buf < end)
210 *buf = digits[33]; 210 *buf = digits[33];
211 ++buf; 211 ++buf;
212 } 212 }
213 } 213 }
214 if (!(type & LEFT)) { 214 if (!(type & LEFT)) {
215 while (size-- > 0) { 215 while (size-- > 0) {
216 if (buf <= end) 216 if (buf < end)
217 *buf = c; 217 *buf = c;
218 ++buf; 218 ++buf;
219 } 219 }
220 } 220 }
221 while (i < precision--) { 221 while (i < precision--) {
222 if (buf <= end) 222 if (buf < end)
223 *buf = '0'; 223 *buf = '0';
224 ++buf; 224 ++buf;
225 } 225 }
226 while (i-- > 0) { 226 while (i-- > 0) {
227 if (buf <= end) 227 if (buf < end)
228 *buf = tmp[i]; 228 *buf = tmp[i];
229 ++buf; 229 ++buf;
230 } 230 }
231 while (size-- > 0) { 231 while (size-- > 0) {
232 if (buf <= end) 232 if (buf < end)
233 *buf = ' '; 233 *buf = ' ';
234 ++buf; 234 ++buf;
235 } 235 }
@@ -272,7 +272,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
272 /* 'z' changed to 'Z' --davidm 1/25/99 */ 272 /* 'z' changed to 'Z' --davidm 1/25/99 */
273 /* 't' added for ptrdiff_t */ 273 /* 't' added for ptrdiff_t */
274 274
275 /* Reject out-of-range values early */ 275 /* Reject out-of-range values early. Large positive sizes are
276 used for unknown buffer sizes. */
276 if (unlikely((int) size < 0)) { 277 if (unlikely((int) size < 0)) {
277 /* There can be only one.. */ 278 /* There can be only one.. */
278 static int warn = 1; 279 static int warn = 1;
@@ -282,16 +283,17 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
282 } 283 }
283 284
284 str = buf; 285 str = buf;
285 end = buf + size - 1; 286 end = buf + size;
286 287
287 if (end < buf - 1) { 288 /* Make sure end is always >= buf */
288 end = ((void *) -1); 289 if (end < buf) {
289 size = end - buf + 1; 290 end = ((void *)-1);
291 size = end - buf;
290 } 292 }
291 293
292 for (; *fmt ; ++fmt) { 294 for (; *fmt ; ++fmt) {
293 if (*fmt != '%') { 295 if (*fmt != '%') {
294 if (str <= end) 296 if (str < end)
295 *str = *fmt; 297 *str = *fmt;
296 ++str; 298 ++str;
297 continue; 299 continue;
@@ -357,17 +359,17 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
357 case 'c': 359 case 'c':
358 if (!(flags & LEFT)) { 360 if (!(flags & LEFT)) {
359 while (--field_width > 0) { 361 while (--field_width > 0) {
360 if (str <= end) 362 if (str < end)
361 *str = ' '; 363 *str = ' ';
362 ++str; 364 ++str;
363 } 365 }
364 } 366 }
365 c = (unsigned char) va_arg(args, int); 367 c = (unsigned char) va_arg(args, int);
366 if (str <= end) 368 if (str < end)
367 *str = c; 369 *str = c;
368 ++str; 370 ++str;
369 while (--field_width > 0) { 371 while (--field_width > 0) {
370 if (str <= end) 372 if (str < end)
371 *str = ' '; 373 *str = ' ';
372 ++str; 374 ++str;
373 } 375 }
@@ -382,18 +384,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
382 384
383 if (!(flags & LEFT)) { 385 if (!(flags & LEFT)) {
384 while (len < field_width--) { 386 while (len < field_width--) {
385 if (str <= end) 387 if (str < end)
386 *str = ' '; 388 *str = ' ';
387 ++str; 389 ++str;
388 } 390 }
389 } 391 }
390 for (i = 0; i < len; ++i) { 392 for (i = 0; i < len; ++i) {
391 if (str <= end) 393 if (str < end)
392 *str = *s; 394 *str = *s;
393 ++str; ++s; 395 ++str; ++s;
394 } 396 }
395 while (len < field_width--) { 397 while (len < field_width--) {
396 if (str <= end) 398 if (str < end)
397 *str = ' '; 399 *str = ' ';
398 ++str; 400 ++str;
399 } 401 }
@@ -426,7 +428,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
426 continue; 428 continue;
427 429
428 case '%': 430 case '%':
429 if (str <= end) 431 if (str < end)
430 *str = '%'; 432 *str = '%';
431 ++str; 433 ++str;
432 continue; 434 continue;
@@ -449,11 +451,11 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
449 break; 451 break;
450 452
451 default: 453 default:
452 if (str <= end) 454 if (str < end)
453 *str = '%'; 455 *str = '%';
454 ++str; 456 ++str;
455 if (*fmt) { 457 if (*fmt) {
456 if (str <= end) 458 if (str < end)
457 *str = *fmt; 459 *str = *fmt;
458 ++str; 460 ++str;
459 } else { 461 } else {
@@ -483,14 +485,13 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
483 str = number(str, end, num, base, 485 str = number(str, end, num, base,
484 field_width, precision, flags); 486 field_width, precision, flags);
485 } 487 }
486 if (str <= end) 488 if (size > 0) {
487 *str = '\0'; 489 if (str < end)
488 else if (size > 0) 490 *str = '\0';
489 /* don't write out a null byte if the buf size is zero */ 491 else
490 *end = '\0'; 492 *end = '\0';
491 /* the trailing null byte doesn't count towards the total 493 }
492 * ++str; 494 /* the trailing null byte doesn't count towards the total */
493 */
494 return str-buf; 495 return str-buf;
495} 496}
496 497
@@ -848,3 +849,26 @@ int sscanf(const char * buf, const char * fmt, ...)
848} 849}
849 850
850EXPORT_SYMBOL(sscanf); 851EXPORT_SYMBOL(sscanf);
852
853
854/* Simplified asprintf. */
855char *kasprintf(gfp_t gfp, const char *fmt, ...)
856{
857 va_list ap;
858 unsigned int len;
859 char *p;
860
861 va_start(ap, fmt);
862 len = vsnprintf(NULL, 0, fmt, ap);
863 va_end(ap);
864
865 p = kmalloc(len+1, gfp);
866 if (!p)
867 return NULL;
868 va_start(ap, fmt);
869 vsnprintf(p, len+1, fmt, ap);
870 va_end(ap);
871 return p;
872}
873
874EXPORT_SYMBOL(kasprintf);
diff --git a/mm/filemap.c b/mm/filemap.c
index 807a463fd5ed..9c7334bafda8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -828,6 +828,32 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
828} 828}
829EXPORT_SYMBOL(grab_cache_page_nowait); 829EXPORT_SYMBOL(grab_cache_page_nowait);
830 830
831/*
832 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
833 * a _large_ part of the i/o request. Imagine the worst scenario:
834 *
835 * ---R__________________________________________B__________
836 * ^ reading here ^ bad block(assume 4k)
837 *
838 * read(R) => miss => readahead(R...B) => media error => frustrating retries
839 * => failing the whole request => read(R) => read(R+1) =>
840 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
841 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
842 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
843 *
844 * It is going insane. Fix it by quickly scaling down the readahead size.
845 */
846static void shrink_readahead_size_eio(struct file *filp,
847 struct file_ra_state *ra)
848{
849 if (!ra->ra_pages)
850 return;
851
852 ra->ra_pages /= 4;
853 printk(KERN_WARNING "Reducing readahead size to %luK\n",
854 ra->ra_pages << (PAGE_CACHE_SHIFT - 10));
855}
856
831/** 857/**
832 * do_generic_mapping_read - generic file read routine 858 * do_generic_mapping_read - generic file read routine
833 * @mapping: address_space to be read 859 * @mapping: address_space to be read
@@ -985,6 +1011,7 @@ readpage:
985 } 1011 }
986 unlock_page(page); 1012 unlock_page(page);
987 error = -EIO; 1013 error = -EIO;
1014 shrink_readahead_size_eio(filp, &ra);
988 goto readpage_error; 1015 goto readpage_error;
989 } 1016 }
990 unlock_page(page); 1017 unlock_page(page);
@@ -1522,6 +1549,7 @@ page_not_uptodate:
1522 * Things didn't work out. Return zero to tell the 1549 * Things didn't work out. Return zero to tell the
1523 * mm layer so, possibly freeing the page cache page first. 1550 * mm layer so, possibly freeing the page cache page first.
1524 */ 1551 */
1552 shrink_readahead_size_eio(file, ra);
1525 page_cache_release(page); 1553 page_cache_release(page);
1526 return NULL; 1554 return NULL;
1527} 1555}
@@ -1892,7 +1920,7 @@ int remove_suid(struct dentry *dentry)
1892EXPORT_SYMBOL(remove_suid); 1920EXPORT_SYMBOL(remove_suid);
1893 1921
1894size_t 1922size_t
1895__filemap_copy_from_user_iovec(char *vaddr, 1923__filemap_copy_from_user_iovec_inatomic(char *vaddr,
1896 const struct iovec *iov, size_t base, size_t bytes) 1924 const struct iovec *iov, size_t base, size_t bytes)
1897{ 1925{
1898 size_t copied = 0, left = 0; 1926 size_t copied = 0, left = 0;
@@ -1908,12 +1936,8 @@ __filemap_copy_from_user_iovec(char *vaddr,
1908 vaddr += copy; 1936 vaddr += copy;
1909 iov++; 1937 iov++;
1910 1938
1911 if (unlikely(left)) { 1939 if (unlikely(left))
1912 /* zero the rest of the target like __copy_from_user */
1913 if (bytes)
1914 memset(vaddr, 0, bytes);
1915 break; 1940 break;
1916 }
1917 } 1941 }
1918 return copied - left; 1942 return copied - left;
1919} 1943}
diff --git a/mm/filemap.h b/mm/filemap.h
index 5683cde22055..536979fb4ba7 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -16,15 +16,23 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17 17
18size_t 18size_t
19__filemap_copy_from_user_iovec(char *vaddr, 19__filemap_copy_from_user_iovec_inatomic(char *vaddr,
20 const struct iovec *iov, 20 const struct iovec *iov,
21 size_t base, 21 size_t base,
22 size_t bytes); 22 size_t bytes);
23 23
24/* 24/*
25 * Copy as much as we can into the page and return the number of bytes which 25 * Copy as much as we can into the page and return the number of bytes which
26 * were sucessfully copied. If a fault is encountered then clear the page 26 * were sucessfully copied. If a fault is encountered then clear the page
27 * out to (offset+bytes) and return the number of bytes which were copied. 27 * out to (offset+bytes) and return the number of bytes which were copied.
28 *
29 * NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
30 * to *NOT* zero any tail of the buffer that it failed to copy. If it does,
31 * and if the following non-atomic copy succeeds, then there is a small window
32 * where the target page contains neither the data before the write, nor the
33 * data after the write (it contains zero). A read at this time will see
34 * data that is inconsistent with any ordering of the read and the write.
35 * (This has been detected in practice).
28 */ 36 */
29static inline size_t 37static inline size_t
30filemap_copy_from_user(struct page *page, unsigned long offset, 38filemap_copy_from_user(struct page *page, unsigned long offset,
@@ -60,13 +68,15 @@ filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
60 size_t copied; 68 size_t copied;
61 69
62 kaddr = kmap_atomic(page, KM_USER0); 70 kaddr = kmap_atomic(page, KM_USER0);
63 copied = __filemap_copy_from_user_iovec(kaddr + offset, iov, 71 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
64 base, bytes); 72 base, bytes);
65 kunmap_atomic(kaddr, KM_USER0); 73 kunmap_atomic(kaddr, KM_USER0);
66 if (copied != bytes) { 74 if (copied != bytes) {
67 kaddr = kmap(page); 75 kaddr = kmap(page);
68 copied = __filemap_copy_from_user_iovec(kaddr + offset, iov, 76 copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
69 base, bytes); 77 base, bytes);
78 if (bytes - copied)
79 memset(kaddr + offset + copied, 0, bytes - copied);
70 kunmap(page); 80 kunmap(page);
71 } 81 }
72 return copied; 82 return copied;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ec4a1a950df9..73e0f23b7f51 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -632,6 +632,10 @@ int do_migrate_pages(struct mm_struct *mm,
632 632
633 down_read(&mm->mmap_sem); 633 down_read(&mm->mmap_sem);
634 634
635 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
636 if (err)
637 goto out;
638
635/* 639/*
636 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 640 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
637 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 641 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
@@ -691,7 +695,7 @@ int do_migrate_pages(struct mm_struct *mm,
691 if (err < 0) 695 if (err < 0)
692 break; 696 break;
693 } 697 }
694 698out:
695 up_read(&mm->mmap_sem); 699 up_read(&mm->mmap_sem);
696 if (err < 0) 700 if (err < 0)
697 return err; 701 return err;
diff --git a/mm/migrate.c b/mm/migrate.c
index 1c2a71aa05cd..3f1e0c2c942c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -616,15 +616,13 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
616 /* 616 /*
617 * Establish migration ptes or remove ptes 617 * Establish migration ptes or remove ptes
618 */ 618 */
619 if (try_to_unmap(page, 1) != SWAP_FAIL) { 619 try_to_unmap(page, 1);
620 if (!page_mapped(page)) 620 if (!page_mapped(page))
621 rc = move_to_new_page(newpage, page); 621 rc = move_to_new_page(newpage, page);
622 } else
623 /* A vma has VM_LOCKED set -> permanent failure */
624 rc = -EPERM;
625 622
626 if (rc) 623 if (rc)
627 remove_migration_ptes(page, page); 624 remove_migration_ptes(page, page);
625
628unlock: 626unlock:
629 unlock_page(page); 627 unlock_page(page);
630 628
@@ -976,3 +974,23 @@ out2:
976} 974}
977#endif 975#endif
978 976
977/*
978 * Call migration functions in the vma_ops that may prepare
979 * memory in a vm for migration. migration functions may perform
980 * the migration for vmas that do not have an underlying page struct.
981 */
982int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
983 const nodemask_t *from, unsigned long flags)
984{
985 struct vm_area_struct *vma;
986 int err = 0;
987
988 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
989 if (vma->vm_ops && vma->vm_ops->migrate) {
990 err = vma->vm_ops->migrate(vma, to, from, flags);
991 if (err)
992 break;
993 }
994 }
995 return err;
996}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 423db0db7c02..6c1174fcf52c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -957,8 +957,7 @@ restart:
957 goto got_pg; 957 goto got_pg;
958 958
959 do { 959 do {
960 if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL)) 960 wakeup_kswapd(*z, order);
961 wakeup_kswapd(*z, order);
962 } while (*(++z)); 961 } while (*(++z));
963 962
964 /* 963 /*
diff --git a/mm/pdflush.c b/mm/pdflush.c
index df7e50b8f70c..b02102feeb4b 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -104,21 +104,20 @@ static int __pdflush(struct pdflush_work *my_work)
104 list_move(&my_work->list, &pdflush_list); 104 list_move(&my_work->list, &pdflush_list);
105 my_work->when_i_went_to_sleep = jiffies; 105 my_work->when_i_went_to_sleep = jiffies;
106 spin_unlock_irq(&pdflush_lock); 106 spin_unlock_irq(&pdflush_lock);
107
108 schedule(); 107 schedule();
109 if (try_to_freeze()) { 108 try_to_freeze();
110 spin_lock_irq(&pdflush_lock);
111 continue;
112 }
113
114 spin_lock_irq(&pdflush_lock); 109 spin_lock_irq(&pdflush_lock);
115 if (!list_empty(&my_work->list)) { 110 if (!list_empty(&my_work->list)) {
116 printk("pdflush: bogus wakeup!\n"); 111 /*
112 * Someone woke us up, but without removing our control
113 * structure from the global list. swsusp will do this
114 * in try_to_freeze()->refrigerator(). Handle it.
115 */
117 my_work->fn = NULL; 116 my_work->fn = NULL;
118 continue; 117 continue;
119 } 118 }
120 if (my_work->fn == NULL) { 119 if (my_work->fn == NULL) {
121 printk("pdflush: NULL work function\n"); 120 printk("pdflush: bogus wakeup\n");
122 continue; 121 continue;
123 } 122 }
124 spin_unlock_irq(&pdflush_lock); 123 spin_unlock_irq(&pdflush_lock);
diff --git a/mm/readahead.c b/mm/readahead.c
index 0f142a40984b..e39e416860d7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -118,8 +118,7 @@ static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
118#define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 118#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
119 119
120/** 120/**
121 * read_cache_pages - populate an address space with some pages, and 121 * read_cache_pages - populate an address space with some pages & start reads against them
122 * start reads against them.
123 * @mapping: the address_space 122 * @mapping: the address_space
124 * @pages: The address of a list_head which contains the target pages. These 123 * @pages: The address of a list_head which contains the target pages. These
125 * pages have their ->index populated and are otherwise uninitialised. 124 * pages have their ->index populated and are otherwise uninitialised.
@@ -182,14 +181,11 @@ static int read_pages(struct address_space *mapping, struct file *filp,
182 list_del(&page->lru); 181 list_del(&page->lru);
183 if (!add_to_page_cache(page, mapping, 182 if (!add_to_page_cache(page, mapping,
184 page->index, GFP_KERNEL)) { 183 page->index, GFP_KERNEL)) {
185 ret = mapping->a_ops->readpage(filp, page); 184 mapping->a_ops->readpage(filp, page);
186 if (ret != AOP_TRUNCATED_PAGE) { 185 if (!pagevec_add(&lru_pvec, page))
187 if (!pagevec_add(&lru_pvec, page)) 186 __pagevec_lru_add(&lru_pvec);
188 __pagevec_lru_add(&lru_pvec); 187 } else
189 continue; 188 page_cache_release(page);
190 } /* else fall through to release */
191 }
192 page_cache_release(page);
193 } 189 }
194 pagevec_lru_add(&lru_pvec); 190 pagevec_lru_add(&lru_pvec);
195 ret = 0; 191 ret = 0;
diff --git a/mm/rmap.c b/mm/rmap.c
index 882a85826bb2..e76909e880ca 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -562,9 +562,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
562 * If it's recently referenced (perhaps page_referenced 562 * If it's recently referenced (perhaps page_referenced
563 * skipped over this mm) then we should reactivate it. 563 * skipped over this mm) then we should reactivate it.
564 */ 564 */
565 if ((vma->vm_flags & VM_LOCKED) || 565 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
566 (ptep_clear_flush_young(vma, address, pte) 566 (ptep_clear_flush_young(vma, address, pte)))) {
567 && !migration)) {
568 ret = SWAP_FAIL; 567 ret = SWAP_FAIL;
569 goto out_unmap; 568 goto out_unmap;
570 } 569 }
@@ -771,7 +770,7 @@ static int try_to_unmap_file(struct page *page, int migration)
771 770
772 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 771 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
773 shared.vm_set.list) { 772 shared.vm_set.list) {
774 if (vma->vm_flags & VM_LOCKED) 773 if ((vma->vm_flags & VM_LOCKED) && !migration)
775 continue; 774 continue;
776 cursor = (unsigned long) vma->vm_private_data; 775 cursor = (unsigned long) vma->vm_private_data;
777 if (cursor > max_nl_cursor) 776 if (cursor > max_nl_cursor)
@@ -805,7 +804,7 @@ static int try_to_unmap_file(struct page *page, int migration)
805 do { 804 do {
806 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 805 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
807 shared.vm_set.list) { 806 shared.vm_set.list) {
808 if (vma->vm_flags & VM_LOCKED) 807 if ((vma->vm_flags & VM_LOCKED) && !migration)
809 continue; 808 continue;
810 cursor = (unsigned long) vma->vm_private_data; 809 cursor = (unsigned long) vma->vm_private_data;
811 while ( cursor < max_nl_cursor && 810 while ( cursor < max_nl_cursor &&
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index cee3397ec277..706c0025ec5e 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1761,7 +1761,7 @@ translate_compat_table(const char *name,
1761 goto free_newinfo; 1761 goto free_newinfo;
1762 1762
1763 /* And one copy for every other CPU */ 1763 /* And one copy for every other CPU */
1764 for_each_cpu(i) 1764 for_each_possible_cpu(i)
1765 if (newinfo->entries[i] && newinfo->entries[i] != entry1) 1765 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1766 memcpy(newinfo->entries[i], entry1, newinfo->size); 1766 memcpy(newinfo->entries[i], entry1, newinfo->size);
1767 1767
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cc9423de7311..60b11aece5c3 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -244,7 +244,7 @@ static unsigned int rt_hash_rnd;
244 244
245static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 245static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
246#define RT_CACHE_STAT_INC(field) \ 246#define RT_CACHE_STAT_INC(field) \
247 (per_cpu(rt_cache_stat, raw_smp_processor_id()).field++) 247 (__raw_get_cpu_var(rt_cache_stat).field++)
248 248
249static int rt_intern_hash(unsigned hash, struct rtable *rth, 249static int rt_intern_hash(unsigned hash, struct rtable *rth,
250 struct rtable **res); 250 struct rtable **res);
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 75f21d843c1d..ce59fc2d8de4 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -18,7 +18,8 @@ def getsizes(file):
18 for l in os.popen("nm --size-sort " + file).readlines(): 18 for l in os.popen("nm --size-sort " + file).readlines():
19 size, type, name = l[:-1].split() 19 size, type, name = l[:-1].split()
20 if type in "tTdDbB": 20 if type in "tTdDbB":
21 sym[name] = int(size, 16) 21 if "." in name: name = "static." + name.split(".")[0]
22 sym[name] = sym.get(name, 0) + int(size, 16)
22 return sym 23 return sym
23 24
24old = getsizes(sys.argv[1]) 25old = getsizes(sys.argv[1])
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index dadfa20ffec0..b34924663ac1 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -89,11 +89,21 @@ sub bysize($) {
89# 89#
90my $funcre = qr/^$x* <(.*)>:$/; 90my $funcre = qr/^$x* <(.*)>:$/;
91my $func; 91my $func;
92my $file, $lastslash;
93
92while (my $line = <STDIN>) { 94while (my $line = <STDIN>) {
93 if ($line =~ m/$funcre/) { 95 if ($line =~ m/$funcre/) {
94 $func = $1; 96 $func = $1;
95 } 97 }
96 if ($line =~ m/$re/) { 98 elsif ($line =~ m/(.*):\s*file format/) {
99 $file = $1;
100 $file =~ s/\.ko//;
101 $lastslash = rindex($file, "/");
102 if ($lastslash != -1) {
103 $file = substr($file, $lastslash + 1);
104 }
105 }
106 elsif ($line =~ m/$re/) {
97 my $size = $1; 107 my $size = $1;
98 $size = hex($size) if ($size =~ /^0x/); 108 $size = hex($size) if ($size =~ /^0x/);
99 109
@@ -109,7 +119,7 @@ while (my $line = <STDIN>) {
109 $addr =~ s/ /0/g; 119 $addr =~ s/ /0/g;
110 $addr = "0x$addr"; 120 $addr = "0x$addr";
111 121
112 my $intro = "$addr $func:"; 122 my $intro = "$addr $func [$file]:";
113 my $padlen = 56 - length($intro); 123 my $padlen = 56 - length($intro);
114 while ($padlen > 0) { 124 while ($padlen > 0) {
115 $intro .= ' '; 125 $intro .= ' ';
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 99fe4b7fb2f1..00e21297aefe 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -253,6 +253,7 @@ my $lineprefix="";
253# 3 - scanning prototype. 253# 3 - scanning prototype.
254# 4 - documentation block 254# 4 - documentation block
255my $state; 255my $state;
256my $in_doc_sect;
256 257
257#declaration types: can be 258#declaration types: can be
258# 'function', 'struct', 'union', 'enum', 'typedef' 259# 'function', 'struct', 'union', 'enum', 'typedef'
@@ -1064,7 +1065,7 @@ sub output_struct_man(%) {
1064 } 1065 }
1065 print "};\n.br\n"; 1066 print "};\n.br\n";
1066 1067
1067 print ".SH Arguments\n"; 1068 print ".SH Members\n";
1068 foreach $parameter (@{$args{'parameterlist'}}) { 1069 foreach $parameter (@{$args{'parameterlist'}}) {
1069 ($parameter =~ /^#/) && next; 1070 ($parameter =~ /^#/) && next;
1070 1071
@@ -1673,6 +1674,9 @@ sub process_state3_type($$) {
1673# replace <, >, and & 1674# replace <, >, and &
1674sub xml_escape($) { 1675sub xml_escape($) {
1675 my $text = shift; 1676 my $text = shift;
1677 if (($output_mode eq "text") || ($output_mode eq "man")) {
1678 return $text;
1679 }
1676 $text =~ s/\&/\\\\\\amp;/g; 1680 $text =~ s/\&/\\\\\\amp;/g;
1677 $text =~ s/\</\\\\\\lt;/g; 1681 $text =~ s/\</\\\\\\lt;/g;
1678 $text =~ s/\>/\\\\\\gt;/g; 1682 $text =~ s/\>/\\\\\\gt;/g;
@@ -1706,6 +1710,7 @@ sub process_file($) {
1706 if ($state == 0) { 1710 if ($state == 0) {
1707 if (/$doc_start/o) { 1711 if (/$doc_start/o) {
1708 $state = 1; # next line is always the function name 1712 $state = 1; # next line is always the function name
1713 $in_doc_sect = 0;
1709 } 1714 }
1710 } elsif ($state == 1) { # this line is the function name (always) 1715 } elsif ($state == 1) { # this line is the function name (always)
1711 if (/$doc_block/o) { 1716 if (/$doc_block/o) {
@@ -1756,12 +1761,20 @@ sub process_file($) {
1756 $newcontents = $2; 1761 $newcontents = $2;
1757 1762
1758 if ($contents ne "") { 1763 if ($contents ne "") {
1764 if (!$in_doc_sect && $verbose) {
1765 print STDERR "Warning(${file}:$.): contents before sections\n";
1766 ++$warnings;
1767 }
1759 dump_section($section, xml_escape($contents)); 1768 dump_section($section, xml_escape($contents));
1760 $section = $section_default; 1769 $section = $section_default;
1761 } 1770 }
1762 1771
1772 $in_doc_sect = 1;
1763 $contents = $newcontents; 1773 $contents = $newcontents;
1764 if ($contents ne "") { 1774 if ($contents ne "") {
1775 if (substr($contents, 0, 1) eq " ") {
1776 $contents = substr($contents, 1);
1777 }
1765 $contents .= "\n"; 1778 $contents .= "\n";
1766 } 1779 }
1767 $section = $newsection; 1780 $section = $newsection;
@@ -1776,7 +1789,7 @@ sub process_file($) {
1776 $prototype = ""; 1789 $prototype = "";
1777 $state = 3; 1790 $state = 3;
1778 $brcount = 0; 1791 $brcount = 0;
1779# print STDERR "end of doc comment, looking for prototype\n"; 1792# print STDERR "end of doc comment, looking for prototype\n";
1780 } elsif (/$doc_content/) { 1793 } elsif (/$doc_content/) {
1781 # miguel-style comment kludge, look for blank lines after 1794 # miguel-style comment kludge, look for blank lines after
1782 # @parameter line to signify start of description 1795 # @parameter line to signify start of description
@@ -1793,7 +1806,7 @@ sub process_file($) {
1793 print STDERR "Warning(${file}:$.): bad line: $_"; 1806 print STDERR "Warning(${file}:$.): bad line: $_";
1794 ++$warnings; 1807 ++$warnings;
1795 } 1808 }
1796 } elsif ($state == 3) { # scanning for function { (end of prototype) 1809 } elsif ($state == 3) { # scanning for function '{' (end of prototype)
1797 if ($decl_type eq 'function') { 1810 if ($decl_type eq 'function') {
1798 process_state3_function($_, $file); 1811 process_state3_function($_, $file);
1799 } else { 1812 } else {
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 080ab036b67a..95754e2f71b8 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -114,8 +114,9 @@ config SOUND_VRC5477
114 with the AC97 codec. 114 with the AC97 codec.
115 115
116config SOUND_AU1550_AC97 116config SOUND_AU1550_AC97
117 tristate "Au1550 AC97 Sound" 117 tristate "Au1550/Au1200 AC97 Sound"
118 depends on SOUND_PRIME && SOC_AU1550 118 select SND_AC97_CODEC
119 depends on SOUND_PRIME && (SOC_AU1550 || SOC_AU1200)
119 120
120config SOUND_TRIDENT 121config SOUND_TRIDENT
121 tristate "Trident 4DWave DX/NX, SiS 7018 or ALi 5451 PCI Audio Core" 122 tristate "Trident 4DWave DX/NX, SiS 7018 or ALi 5451 PCI Audio Core"
diff --git a/sound/oss/au1550_ac97.c b/sound/oss/au1550_ac97.c
index 9011abe241ab..4cdb86252d67 100644
--- a/sound/oss/au1550_ac97.c
+++ b/sound/oss/au1550_ac97.c
@@ -213,7 +213,8 @@ rdcodec(struct ac97_codec *codec, u8 addr)
213 } 213 }
214 if (i == POLL_COUNT) { 214 if (i == POLL_COUNT) {
215 err("rdcodec: read poll expired!"); 215 err("rdcodec: read poll expired!");
216 return 0; 216 data = 0;
217 goto out;
217 } 218 }
218 219
219 /* wait for command done? 220 /* wait for command done?
@@ -226,7 +227,8 @@ rdcodec(struct ac97_codec *codec, u8 addr)
226 } 227 }
227 if (i == POLL_COUNT) { 228 if (i == POLL_COUNT) {
228 err("rdcodec: read cmdwait expired!"); 229 err("rdcodec: read cmdwait expired!");
229 return 0; 230 data = 0;
231 goto out;
230 } 232 }
231 233
232 data = au_readl(PSC_AC97CDC) & 0xffff; 234 data = au_readl(PSC_AC97CDC) & 0xffff;
@@ -237,6 +239,7 @@ rdcodec(struct ac97_codec *codec, u8 addr)
237 au_writel(PSC_AC97EVNT_CD, PSC_AC97EVNT); 239 au_writel(PSC_AC97EVNT_CD, PSC_AC97EVNT);
238 au_sync(); 240 au_sync();
239 241
242 out:
240 spin_unlock_irqrestore(&s->lock, flags); 243 spin_unlock_irqrestore(&s->lock, flags);
241 244
242 return data; 245 return data;
@@ -1892,6 +1895,8 @@ static /*const */ struct file_operations au1550_audio_fops = {
1892 1895
1893MODULE_AUTHOR("Advanced Micro Devices (AMD), dan@embeddededge.com"); 1896MODULE_AUTHOR("Advanced Micro Devices (AMD), dan@embeddededge.com");
1894MODULE_DESCRIPTION("Au1550 AC97 Audio Driver"); 1897MODULE_DESCRIPTION("Au1550 AC97 Audio Driver");
1898MODULE_LICENSE("GPL");
1899
1895 1900
1896static int __devinit 1901static int __devinit
1897au1550_probe(void) 1902au1550_probe(void)
diff --git a/sound/oss/emu10k1/midi.c b/sound/oss/emu10k1/midi.c
index 25ae8e4a488d..8ac77df86397 100644
--- a/sound/oss/emu10k1/midi.c
+++ b/sound/oss/emu10k1/midi.c
@@ -45,7 +45,7 @@
45#include "../sound_config.h" 45#include "../sound_config.h"
46#endif 46#endif
47 47
48static DEFINE_SPINLOCK(midi_spinlock __attribute((unused))); 48static DEFINE_SPINLOCK(midi_spinlock);
49 49
50static void init_midi_hdr(struct midi_hdr *midihdr) 50static void init_midi_hdr(struct midi_hdr *midihdr)
51{ 51{
diff --git a/sound/oss/msnd.c b/sound/oss/msnd.c
index 5dbfc0f9c3c7..ba38d6200099 100644
--- a/sound/oss/msnd.c
+++ b/sound/oss/msnd.c
@@ -47,7 +47,7 @@
47static multisound_dev_t *devs[MSND_MAX_DEVS]; 47static multisound_dev_t *devs[MSND_MAX_DEVS];
48static int num_devs; 48static int num_devs;
49 49
50int __init msnd_register(multisound_dev_t *dev) 50int msnd_register(multisound_dev_t *dev)
51{ 51{
52 int i; 52 int i;
53 53