aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt43
-rw-r--r--arch/alpha/kernel/process.c29
-rw-r--r--arch/alpha/kernel/ptrace.c47
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/ptrace.c37
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm26/kernel/process.c5
-rw-r--r--arch/arm26/kernel/ptrace.c29
-rw-r--r--arch/arm26/kernel/traps.c8
-rw-r--r--arch/cris/arch-v10/kernel/process.c4
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c4
-rw-r--r--arch/cris/arch-v32/kernel/process.c6
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c6
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/arch-v32/mm/tlb.c4
-rw-r--r--arch/frv/kernel/process.c4
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/i386/kernel/process.c26
-rw-r--r--arch/i386/kernel/smpboot.c4
-rw-r--r--arch/i386/kernel/vm86.c2
-rw-r--r--arch/ia64/ia32/elfcore32.h3
-rw-r--r--arch/ia64/ia32/ia32_signal.c4
-rw-r--r--arch/ia64/ia32/ia32_support.c4
-rw-r--r--arch/ia64/ia32/sys_ia32.c12
-rw-r--r--arch/ia64/kernel/mca.c4
-rw-r--r--arch/ia64/kernel/perfmon.c32
-rw-r--r--arch/ia64/kernel/process.c12
-rw-r--r--arch/ia64/kernel/ptrace.c24
-rw-r--r--arch/ia64/kernel/setup.c18
-rw-r--r--arch/ia64/kernel/signal.c10
-rw-r--r--arch/ia64/kernel/sys_ia64.c2
-rw-r--r--arch/m32r/kernel/process.c5
-rw-r--r--arch/m32r/kernel/ptrace.c25
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m68k/amiga/amiints.c46
-rw-r--r--arch/m68k/amiga/amisound.c2
-rw-r--r--arch/m68k/amiga/cia.c8
-rw-r--r--arch/m68k/amiga/config.c27
-rw-r--r--arch/m68k/apollo/config.c3
-rw-r--r--arch/m68k/atari/config.c9
-rw-r--r--arch/m68k/bvme6000/rtc.c6
-rw-r--r--arch/m68k/hp300/config.c3
-rw-r--r--arch/m68k/kernel/asm-offsets.c2
-rw-r--r--arch/m68k/kernel/head.S2
-rw-r--r--arch/m68k/kernel/process.c7
-rw-r--r--arch/m68k/kernel/setup.c19
-rw-r--r--arch/m68k/kernel/signal.c62
-rw-r--r--arch/m68k/kernel/sys_m68k.c39
-rw-r--r--arch/m68k/kernel/traps.c38
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds3
-rw-r--r--arch/m68k/lib/checksum.c2
-rw-r--r--arch/m68k/mac/config.c3
-rw-r--r--arch/m68k/mac/iop.c4
-rw-r--r--arch/m68k/mac/misc.c326
-rw-r--r--arch/m68k/math-emu/multi_arith.h2
-rw-r--r--arch/m68k/mm/kmap.c12
-rw-r--r--arch/m68k/mvme16x/rtc.c6
-rw-r--r--arch/m68k/q40/config.c5
-rw-r--r--arch/m68k/sun3/config.c3
-rw-r--r--arch/m68k/sun3x/config.c4
-rw-r--r--arch/m68knommu/kernel/process.c5
-rw-r--r--arch/mips/kernel/process.c10
-rw-r--r--arch/mips/kernel/ptrace.c14
-rw-r--r--arch/mips/kernel/ptrace32.c10
-rw-r--r--arch/mips/kernel/smp_mt.c7
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sibyte/cfe/smp.c2
-rw-r--r--arch/parisc/kernel/process.c6
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/process.c14
-rw-r--r--arch/powerpc/kernel/ptrace-common.h4
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/pseries/smp.c2
-rw-r--r--arch/ppc/amiga/amiints.c40
-rw-r--r--arch/ppc/amiga/cia.c8
-rw-r--r--arch/ppc/amiga/config.c24
-rw-r--r--arch/ppc/kernel/smp.c4
-rw-r--r--arch/ppc/platforms/apus_setup.c30
-rw-r--r--arch/ppc/xmon/xmon.c2
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/process.c12
-rw-r--r--arch/s390/kernel/ptrace.c26
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/sh/kernel/process.c46
-rw-r--r--arch/sh/kernel/ptrace.c14
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh64/kernel/process.c4
-rw-r--r--arch/sh64/lib/dbg.c2
-rw-r--r--arch/sparc/kernel/process.c12
-rw-r--r--arch/sparc/kernel/ptrace.c4
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--arch/sparc/kernel/traps.c4
-rw-r--r--arch/sparc64/kernel/process.c10
-rw-r--r--arch/sparc64/kernel/ptrace.c46
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/kernel/smp.c2
-rw-r--r--arch/sparc64/kernel/traps.c4
-rw-r--r--arch/um/kernel/process_kern.c2
-rw-r--r--arch/um/kernel/skas/process_kern.c4
-rw-r--r--arch/um/kernel/tt/exec_kern.c2
-rw-r--r--arch/um/kernel/tt/process_kern.c8
-rw-r--r--arch/v850/kernel/process.c2
-rw-r--r--arch/v850/kernel/ptrace.c2
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c7
-rw-r--r--arch/x86_64/ia32/ptrace32.c6
-rw-r--r--arch/x86_64/kernel/i387.c2
-rw-r--r--arch/x86_64/kernel/i8259.c2
-rw-r--r--arch/x86_64/kernel/process.c11
-rw-r--r--arch/x86_64/kernel/ptrace.c10
-rw-r--r--arch/x86_64/kernel/smpboot.c4
-rw-r--r--arch/x86_64/kernel/traps.c6
-rw-r--r--arch/xtensa/kernel/process.c4
-rw-r--r--arch/xtensa/kernel/ptrace.c12
-rw-r--r--block/elevator.c20
-rw-r--r--drivers/block/amiflop.c30
-rw-r--r--drivers/block/ataflop.c27
-rw-r--r--drivers/char/amiserial.c20
-rw-r--r--drivers/char/dsp56k.c29
-rw-r--r--drivers/char/scc.h2
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/input/joystick/amijoy.c4
-rw-r--r--drivers/input/mouse/amimouse.c6
-rw-r--r--drivers/macintosh/adb-iop.c2
-rw-r--r--drivers/macintosh/via-macii.c4
-rw-r--r--drivers/macintosh/via-maciisi.c22
-rw-r--r--drivers/macintosh/via-pmu68k.c4
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/mac8390.c31
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR53C9x.c5
-rw-r--r--drivers/scsi/blz1230.c4
-rw-r--r--drivers/scsi/blz2060.c4
-rw-r--r--drivers/scsi/cyberstorm.c4
-rw-r--r--drivers/scsi/cyberstormII.c4
-rw-r--r--drivers/scsi/fastlane.c4
-rw-r--r--drivers/scsi/oktagon_esp.c2
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/video/amifb.c36
-rw-r--r--drivers/video/aty/atyfb_base.c2
-rw-r--r--drivers/video/macfb.c15
-rw-r--r--drivers/zorro/proc.c2
-rw-r--r--fs/compat_ioctl.c29
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c1088
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h10
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c1373
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h696
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c121
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c56
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.h18
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c19
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h19
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c18
-rw-r--r--fs/xfs/support/debug.c60
-rw-r--r--fs/xfs/support/debug.h25
-rw-r--r--fs/xfs/support/uuid.c23
-rw-r--r--fs/xfs/xfs_arch.h22
-rw-r--r--fs/xfs/xfs_attr_leaf.c12
-rw-r--r--fs/xfs/xfs_attr_leaf.h79
-rw-r--r--fs/xfs/xfs_bmap.c412
-rw-r--r--fs/xfs/xfs_bmap.h7
-rw-r--r--fs/xfs/xfs_clnt.h2
-rw-r--r--fs/xfs/xfs_dfrag.c16
-rw-r--r--fs/xfs/xfs_dinode.h22
-rw-r--r--fs/xfs/xfs_dir.c2
-rw-r--r--fs/xfs/xfs_dir.h2
-rw-r--r--fs/xfs/xfs_dir2.h3
-rw-r--r--fs/xfs/xfs_dir_leaf.h64
-rw-r--r--fs/xfs/xfs_error.c1
-rw-r--r--fs/xfs/xfs_error.h8
-rw-r--r--fs/xfs/xfs_fs.h10
-rw-r--r--fs/xfs/xfs_fsops.c26
-rw-r--r--fs/xfs/xfs_fsops.h1
-rw-r--r--fs/xfs/xfs_iget.c5
-rw-r--r--fs/xfs/xfs_inode.c61
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_inode_item.c9
-rw-r--r--fs/xfs/xfs_iomap.c425
-rw-r--r--fs/xfs/xfs_itable.c5
-rw-r--r--fs/xfs/xfs_log.c123
-rw-r--r--fs/xfs/xfs_log.h11
-rw-r--r--fs/xfs/xfs_log_priv.h77
-rw-r--r--fs/xfs/xfs_log_recover.c12
-rw-r--r--fs/xfs/xfs_mount.c5
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_rename.c7
-rw-r--r--fs/xfs/xfs_rw.c9
-rw-r--r--fs/xfs/xfs_sb.h17
-rw-r--r--fs/xfs/xfs_trans.c14
-rw-r--r--fs/xfs/xfs_trans.h1
-rw-r--r--fs/xfs/xfs_utils.c9
-rw-r--r--fs/xfs/xfs_vfsops.c50
-rw-r--r--fs/xfs/xfs_vnodeops.c193
-rw-r--r--include/asm-alpha/mmu_context.h6
-rw-r--r--include/asm-alpha/processor.h13
-rw-r--r--include/asm-alpha/ptrace.h6
-rw-r--r--include/asm-alpha/system.h18
-rw-r--r--include/asm-alpha/thread_info.h2
-rw-r--r--include/asm-arm/processor.h8
-rw-r--r--include/asm-arm/system.h12
-rw-r--r--include/asm-arm/thread_info.h7
-rw-r--r--include/asm-arm26/system.h12
-rw-r--r--include/asm-arm26/thread_info.h9
-rw-r--r--include/asm-cris/arch-v10/processor.h2
-rw-r--r--include/asm-cris/arch-v32/processor.h2
-rw-r--r--include/asm-cris/processor.h3
-rw-r--r--include/asm-cris/thread_info.h2
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-h8300/thread_info.h2
-rw-r--r--include/asm-i386/i387.h8
-rw-r--r--include/asm-i386/processor.h12
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/topology.h1
-rw-r--r--include/asm-ia64/compat.h2
-rw-r--r--include/asm-ia64/processor.h2
-rw-r--r--include/asm-ia64/ptrace.h4
-rw-r--r--include/asm-ia64/system.h9
-rw-r--r--include/asm-ia64/thread_info.h9
-rw-r--r--include/asm-ia64/topology.h2
-rw-r--r--include/asm-m32r/ptrace.h3
-rw-r--r--include/asm-m32r/system.h10
-rw-r--r--include/asm-m32r/thread_info.h2
-rw-r--r--include/asm-m68k/amigahw.h12
-rw-r--r--include/asm-m68k/amigaints.h2
-rw-r--r--include/asm-m68k/checksum.h2
-rw-r--r--include/asm-m68k/dsp56k.h2
-rw-r--r--include/asm-m68k/floppy.h2
-rw-r--r--include/asm-m68k/hardirq.h9
-rw-r--r--include/asm-m68k/io.h49
-rw-r--r--include/asm-m68k/irq.h9
-rw-r--r--include/asm-m68k/machdep.h1
-rw-r--r--include/asm-m68k/raw_io.h40
-rw-r--r--include/asm-m68k/signal.h2
-rw-r--r--include/asm-m68k/sun3_pgtable.h2
-rw-r--r--include/asm-m68k/sun3ints.h1
-rw-r--r--include/asm-m68k/sun3xflop.h4
-rw-r--r--include/asm-m68k/thread_info.h1
-rw-r--r--include/asm-m68k/uaccess.h20
-rw-r--r--include/asm-m68k/zorro.h8
-rw-r--r--include/asm-m68knommu/machdep.h1
-rw-r--r--include/asm-m68knommu/thread_info.h2
-rw-r--r--include/asm-mips/mach-ip27/topology.h1
-rw-r--r--include/asm-mips/processor.h10
-rw-r--r--include/asm-mips/system.h12
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-parisc/system.h9
-rw-r--r--include/asm-parisc/thread_info.h3
-rw-r--r--include/asm-powerpc/system.h10
-rw-r--r--include/asm-powerpc/thread_info.h3
-rw-r--r--include/asm-powerpc/topology.h1
-rw-r--r--include/asm-ppc/system.h10
-rw-r--r--include/asm-s390/elf.h2
-rw-r--r--include/asm-s390/processor.h8
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-s390/thread_info.h2
-rw-r--r--include/asm-sh/ptrace.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh64/thread_info.h2
-rw-r--r--include/asm-sparc/system.h12
-rw-r--r--include/asm-sparc/thread_info.h3
-rw-r--r--include/asm-sparc64/elf.h2
-rw-r--r--include/asm-sparc64/mmu_context.h2
-rw-r--r--include/asm-sparc64/processor.h5
-rw-r--r--include/asm-sparc64/system.h14
-rw-r--r--include/asm-um/thread_info.h3
-rw-r--r--include/asm-v850/processor.h8
-rw-r--r--include/asm-v850/thread_info.h2
-rw-r--r--include/asm-x86_64/compat.h2
-rw-r--r--include/asm-x86_64/i387.h10
-rw-r--r--include/asm-x86_64/processor.h4
-rw-r--r--include/asm-x86_64/system.h9
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/asm-x86_64/topology.h1
-rw-r--r--include/asm-xtensa/processor.h6
-rw-r--r--include/asm-xtensa/ptrace.h4
-rw-r--r--include/asm-xtensa/thread_info.h2
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/topology.h2
-rw-r--r--kernel/sched.c478
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/swap.c2
-rw-r--r--mm/tiny-shmem.c2
-rw-r--r--sound/oss/dmasound/dmasound.h1
-rw-r--r--sound/oss/dmasound/dmasound_atari.c112
-rw-r--r--sound/oss/dmasound/dmasound_paula.c14
-rw-r--r--sound/oss/dmasound/dmasound_q40.c18
-rw-r--r--sound/oss/dmasound/trans_16.c1
308 files changed, 4600 insertions, 3798 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index dd0bfc291a68..fe11fccf7e41 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -856,6 +856,49 @@ running once the system is up.
856 856
857 mga= [HW,DRM] 857 mga= [HW,DRM]
858 858
859 migration_cost=
860 [KNL,SMP] debug: override scheduler migration costs
861 Format: <level-1-usecs>,<level-2-usecs>,...
862 This debugging option can be used to override the
863 default scheduler migration cost matrix. The numbers
864 are indexed by 'CPU domain distance'.
865 E.g. migration_cost=1000,2000,3000 on an SMT NUMA
866 box will set up an intra-core migration cost of
867 1 msec, an inter-core migration cost of 2 msecs,
868 and an inter-node migration cost of 3 msecs.
869
870 WARNING: using the wrong values here can break
871 scheduler performance, so it's only for scheduler
872 development purposes, not production environments.
873
874 migration_debug=
875 [KNL,SMP] migration cost auto-detect verbosity
876 Format=<0|1|2>
877 If a system's migration matrix reported at bootup
878 seems erroneous then this option can be used to
879 increase verbosity of the detection process.
880 We default to 0 (no extra messages), 1 will print
881 some more information, and 2 will be really
882 verbose (probably only useful if you also have a
883 serial console attached to the system).
884
885 migration_factor=
886 [KNL,SMP] multiply/divide migration costs by a factor
887 Format=<percent>
888 This debug option can be used to proportionally
889 increase or decrease the auto-detected migration
890 costs for all entries of the migration matrix.
891 E.g. migration_factor=150 will increase migration
892 costs by 50%. (and thus the scheduler will be less
893 eager migrating cache-hot tasks)
894 migration_factor=80 will decrease migration costs
895 by 20%. (thus the scheduler will be more eager to
896 migrate tasks)
897
898 WARNING: using the wrong values here can break
899 scheduler performance, so it's only for scheduler
900 development purposes, not production environments.
901
859 mousedev.tap_time= 902 mousedev.tap_time=
860 [MOUSE] Maximum time between finger touching and 903 [MOUSE] Maximum time between finger touching and
861 leaving touchpad surface for touch to be considered 904 leaving touchpad surface for touch to be considered
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index abb739b88ed1..9924fd07743a 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -276,7 +276,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
276{ 276{
277 extern void ret_from_fork(void); 277 extern void ret_from_fork(void);
278 278
279 struct thread_info *childti = p->thread_info; 279 struct thread_info *childti = task_thread_info(p);
280 struct pt_regs * childregs; 280 struct pt_regs * childregs;
281 struct switch_stack * childstack, *stack; 281 struct switch_stack * childstack, *stack;
282 unsigned long stack_offset, settls; 282 unsigned long stack_offset, settls;
@@ -285,7 +285,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
285 if (!(regs->ps & 8)) 285 if (!(regs->ps & 8))
286 stack_offset = (PAGE_SIZE-1) & (unsigned long) regs; 286 stack_offset = (PAGE_SIZE-1) & (unsigned long) regs;
287 childregs = (struct pt_regs *) 287 childregs = (struct pt_regs *)
288 (stack_offset + PAGE_SIZE + (long) childti); 288 (stack_offset + PAGE_SIZE + task_stack_page(p));
289 289
290 *childregs = *regs; 290 *childregs = *regs;
291 settls = regs->r20; 291 settls = regs->r20;
@@ -428,30 +428,15 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
428int 428int
429dump_elf_task(elf_greg_t *dest, struct task_struct *task) 429dump_elf_task(elf_greg_t *dest, struct task_struct *task)
430{ 430{
431 struct thread_info *ti; 431 dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
432 struct pt_regs *pt;
433
434 ti = task->thread_info;
435 pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
436
437 dump_elf_thread(dest, pt, ti);
438
439 return 1; 432 return 1;
440} 433}
441 434
442int 435int
443dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task) 436dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
444{ 437{
445 struct thread_info *ti; 438 struct switch_stack *sw = (struct switch_stack *)task_pt_regs(task) - 1;
446 struct pt_regs *pt;
447 struct switch_stack *sw;
448
449 ti = task->thread_info;
450 pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
451 sw = (struct switch_stack *)pt - 1;
452
453 memcpy(dest, sw->fp, 32 * 8); 439 memcpy(dest, sw->fp, 32 * 8);
454
455 return 1; 440 return 1;
456} 441}
457 442
@@ -492,8 +477,8 @@ out:
492unsigned long 477unsigned long
493thread_saved_pc(task_t *t) 478thread_saved_pc(task_t *t)
494{ 479{
495 unsigned long base = (unsigned long)t->thread_info; 480 unsigned long base = (unsigned long)task_stack_page(t);
496 unsigned long fp, sp = t->thread_info->pcb.ksp; 481 unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
497 482
498 if (sp > base && sp+6*8 < base + 16*1024) { 483 if (sp > base && sp+6*8 < base + 16*1024) {
499 fp = ((unsigned long*)sp)[6]; 484 fp = ((unsigned long*)sp)[6];
@@ -523,7 +508,7 @@ get_wchan(struct task_struct *p)
523 508
524 pc = thread_saved_pc(p); 509 pc = thread_saved_pc(p);
525 if (in_sched_functions(pc)) { 510 if (in_sched_functions(pc)) {
526 schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6]; 511 schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6];
527 return ((unsigned long *)schedule_frame)[12]; 512 return ((unsigned long *)schedule_frame)[12];
528 } 513 }
529 return pc; 514 return pc;
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index 9969d212e94d..0cd060598f9a 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -72,6 +72,13 @@ enum {
72 REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64 72 REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64
73}; 73};
74 74
75#define PT_REG(reg) \
76 (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
77
78#define SW_REG(reg) \
79 (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
80 + offsetof(struct switch_stack, reg))
81
75static int regoff[] = { 82static int regoff[] = {
76 PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3), 83 PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3),
77 PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7), 84 PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7),
@@ -103,14 +110,14 @@ get_reg_addr(struct task_struct * task, unsigned long regno)
103 unsigned long *addr; 110 unsigned long *addr;
104 111
105 if (regno == 30) { 112 if (regno == 30) {
106 addr = &task->thread_info->pcb.usp; 113 addr = &task_thread_info(task)->pcb.usp;
107 } else if (regno == 65) { 114 } else if (regno == 65) {
108 addr = &task->thread_info->pcb.unique; 115 addr = &task_thread_info(task)->pcb.unique;
109 } else if (regno == 31 || regno > 65) { 116 } else if (regno == 31 || regno > 65) {
110 zero = 0; 117 zero = 0;
111 addr = &zero; 118 addr = &zero;
112 } else { 119 } else {
113 addr = (void *)task->thread_info + regoff[regno]; 120 addr = task_stack_page(task) + regoff[regno];
114 } 121 }
115 return addr; 122 return addr;
116} 123}
@@ -125,7 +132,7 @@ get_reg(struct task_struct * task, unsigned long regno)
125 if (regno == 63) { 132 if (regno == 63) {
126 unsigned long fpcr = *get_reg_addr(task, regno); 133 unsigned long fpcr = *get_reg_addr(task, regno);
127 unsigned long swcr 134 unsigned long swcr
128 = task->thread_info->ieee_state & IEEE_SW_MASK; 135 = task_thread_info(task)->ieee_state & IEEE_SW_MASK;
129 swcr = swcr_update_status(swcr, fpcr); 136 swcr = swcr_update_status(swcr, fpcr);
130 return fpcr | swcr; 137 return fpcr | swcr;
131 } 138 }
@@ -139,8 +146,8 @@ static int
139put_reg(struct task_struct *task, unsigned long regno, unsigned long data) 146put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
140{ 147{
141 if (regno == 63) { 148 if (regno == 63) {
142 task->thread_info->ieee_state 149 task_thread_info(task)->ieee_state
143 = ((task->thread_info->ieee_state & ~IEEE_SW_MASK) 150 = ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK)
144 | (data & IEEE_SW_MASK)); 151 | (data & IEEE_SW_MASK));
145 data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data); 152 data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data);
146 } 153 }
@@ -188,35 +195,35 @@ ptrace_set_bpt(struct task_struct * child)
188 * branch (emulation can be tricky for fp branches). 195 * branch (emulation can be tricky for fp branches).
189 */ 196 */
190 displ = ((s32)(insn << 11)) >> 9; 197 displ = ((s32)(insn << 11)) >> 9;
191 child->thread_info->bpt_addr[nsaved++] = pc + 4; 198 task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
192 if (displ) /* guard against unoptimized code */ 199 if (displ) /* guard against unoptimized code */
193 child->thread_info->bpt_addr[nsaved++] 200 task_thread_info(child)->bpt_addr[nsaved++]
194 = pc + 4 + displ; 201 = pc + 4 + displ;
195 DBG(DBG_BPT, ("execing branch\n")); 202 DBG(DBG_BPT, ("execing branch\n"));
196 } else if (op_code == 0x1a) { 203 } else if (op_code == 0x1a) {
197 reg_b = (insn >> 16) & 0x1f; 204 reg_b = (insn >> 16) & 0x1f;
198 child->thread_info->bpt_addr[nsaved++] = get_reg(child, reg_b); 205 task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b);
199 DBG(DBG_BPT, ("execing jump\n")); 206 DBG(DBG_BPT, ("execing jump\n"));
200 } else { 207 } else {
201 child->thread_info->bpt_addr[nsaved++] = pc + 4; 208 task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
202 DBG(DBG_BPT, ("execing normal insn\n")); 209 DBG(DBG_BPT, ("execing normal insn\n"));
203 } 210 }
204 211
205 /* install breakpoints: */ 212 /* install breakpoints: */
206 for (i = 0; i < nsaved; ++i) { 213 for (i = 0; i < nsaved; ++i) {
207 res = read_int(child, child->thread_info->bpt_addr[i], 214 res = read_int(child, task_thread_info(child)->bpt_addr[i],
208 (int *) &insn); 215 (int *) &insn);
209 if (res < 0) 216 if (res < 0)
210 return res; 217 return res;
211 child->thread_info->bpt_insn[i] = insn; 218 task_thread_info(child)->bpt_insn[i] = insn;
212 DBG(DBG_BPT, (" -> next_pc=%lx\n", 219 DBG(DBG_BPT, (" -> next_pc=%lx\n",
213 child->thread_info->bpt_addr[i])); 220 task_thread_info(child)->bpt_addr[i]));
214 res = write_int(child, child->thread_info->bpt_addr[i], 221 res = write_int(child, task_thread_info(child)->bpt_addr[i],
215 BREAKINST); 222 BREAKINST);
216 if (res < 0) 223 if (res < 0)
217 return res; 224 return res;
218 } 225 }
219 child->thread_info->bpt_nsaved = nsaved; 226 task_thread_info(child)->bpt_nsaved = nsaved;
220 return 0; 227 return 0;
221} 228}
222 229
@@ -227,9 +234,9 @@ ptrace_set_bpt(struct task_struct * child)
227int 234int
228ptrace_cancel_bpt(struct task_struct * child) 235ptrace_cancel_bpt(struct task_struct * child)
229{ 236{
230 int i, nsaved = child->thread_info->bpt_nsaved; 237 int i, nsaved = task_thread_info(child)->bpt_nsaved;
231 238
232 child->thread_info->bpt_nsaved = 0; 239 task_thread_info(child)->bpt_nsaved = 0;
233 240
234 if (nsaved > 2) { 241 if (nsaved > 2) {
235 printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); 242 printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
@@ -237,8 +244,8 @@ ptrace_cancel_bpt(struct task_struct * child)
237 } 244 }
238 245
239 for (i = 0; i < nsaved; ++i) { 246 for (i = 0; i < nsaved; ++i) {
240 write_int(child, child->thread_info->bpt_addr[i], 247 write_int(child, task_thread_info(child)->bpt_addr[i],
241 child->thread_info->bpt_insn[i]); 248 task_thread_info(child)->bpt_insn[i]);
242 } 249 }
243 return (nsaved != 0); 250 return (nsaved != 0);
244} 251}
@@ -355,7 +362,7 @@ do_sys_ptrace(long request, long pid, long addr, long data,
355 if (!valid_signal(data)) 362 if (!valid_signal(data))
356 break; 363 break;
357 /* Mark single stepping. */ 364 /* Mark single stepping. */
358 child->thread_info->bpt_nsaved = -1; 365 task_thread_info(child)->bpt_nsaved = -1;
359 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 366 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
360 child->exit_code = data; 367 child->exit_code = data;
361 wake_up_process(child); 368 wake_up_process(child);
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index da0be3465791..4b873527ce1c 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -302,7 +302,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
302 + hwrpb->processor_offset 302 + hwrpb->processor_offset
303 + cpuid * hwrpb->processor_size); 303 + cpuid * hwrpb->processor_size);
304 hwpcb = (struct pcb_struct *) cpu->hwpcb; 304 hwpcb = (struct pcb_struct *) cpu->hwpcb;
305 ipcb = &idle->thread_info->pcb; 305 ipcb = &task_thread_info(idle)->pcb;
306 306
307 /* Initialize the CPU's HWPCB to something just good enough for 307 /* Initialize the CPU's HWPCB to something just good enough for
308 us to get started. Immediately after starting, we'll swpctx 308 us to get started. Immediately after starting, we'll swpctx
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 54a21bdcba5c..4b4e4cf79c80 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -342,10 +342,10 @@ void flush_thread(void)
342void release_thread(struct task_struct *dead_task) 342void release_thread(struct task_struct *dead_task)
343{ 343{
344#if defined(CONFIG_VFP) 344#if defined(CONFIG_VFP)
345 vfp_release_thread(&dead_task->thread_info->vfpstate); 345 vfp_release_thread(&task_thread_info(dead_task)->vfpstate);
346#endif 346#endif
347#if defined(CONFIG_IWMMXT) 347#if defined(CONFIG_IWMMXT)
348 iwmmxt_task_release(dead_task->thread_info); 348 iwmmxt_task_release(task_thread_info(dead_task));
349#endif 349#endif
350} 350}
351 351
@@ -355,10 +355,9 @@ int
355copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, 355copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
356 unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) 356 unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
357{ 357{
358 struct thread_info *thread = p->thread_info; 358 struct thread_info *thread = task_thread_info(p);
359 struct pt_regs *childregs; 359 struct pt_regs *childregs = task_pt_regs(p);
360 360
361 childregs = (void *)thread + THREAD_START_SP - sizeof(*regs);
362 *childregs = *regs; 361 *childregs = *regs;
363 childregs->ARM_r0 = 0; 362 childregs->ARM_r0 = 0;
364 childregs->ARM_sp = stack_start; 363 childregs->ARM_sp = stack_start;
@@ -460,8 +459,8 @@ unsigned long get_wchan(struct task_struct *p)
460 if (!p || p == current || p->state == TASK_RUNNING) 459 if (!p || p == current || p->state == TASK_RUNNING)
461 return 0; 460 return 0;
462 461
463 stack_start = (unsigned long)(p->thread_info + 1); 462 stack_start = (unsigned long)end_of_stack(p);
464 stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE; 463 stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
465 464
466 fp = thread_saved_fp(p); 465 fp = thread_saved_fp(p);
467 do { 466 do {
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 2b84f78d7b0f..e591f72bcdeb 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -55,23 +55,6 @@
55#endif 55#endif
56 56
57/* 57/*
58 * Get the address of the live pt_regs for the specified task.
59 * These are saved onto the top kernel stack when the process
60 * is not running.
61 *
62 * Note: if a user thread is execve'd from kernel space, the
63 * kernel stack will not be empty on entry to the kernel, so
64 * ptracing these tasks will fail.
65 */
66static inline struct pt_regs *
67get_user_regs(struct task_struct *task)
68{
69 return (struct pt_regs *)
70 ((unsigned long)task->thread_info + THREAD_SIZE -
71 8 - sizeof(struct pt_regs));
72}
73
74/*
75 * this routine will get a word off of the processes privileged stack. 58 * this routine will get a word off of the processes privileged stack.
76 * the offset is how far from the base addr as stored in the THREAD. 59 * the offset is how far from the base addr as stored in the THREAD.
77 * this routine assumes that all the privileged stacks are in our 60 * this routine assumes that all the privileged stacks are in our
@@ -79,7 +62,7 @@ get_user_regs(struct task_struct *task)
79 */ 62 */
80static inline long get_user_reg(struct task_struct *task, int offset) 63static inline long get_user_reg(struct task_struct *task, int offset)
81{ 64{
82 return get_user_regs(task)->uregs[offset]; 65 return task_pt_regs(task)->uregs[offset];
83} 66}
84 67
85/* 68/*
@@ -91,7 +74,7 @@ static inline long get_user_reg(struct task_struct *task, int offset)
91static inline int 74static inline int
92put_user_reg(struct task_struct *task, int offset, long data) 75put_user_reg(struct task_struct *task, int offset, long data)
93{ 76{
94 struct pt_regs newregs, *regs = get_user_regs(task); 77 struct pt_regs newregs, *regs = task_pt_regs(task);
95 int ret = -EINVAL; 78 int ret = -EINVAL;
96 79
97 newregs = *regs; 80 newregs = *regs;
@@ -421,7 +404,7 @@ void ptrace_set_bpt(struct task_struct *child)
421 u32 insn; 404 u32 insn;
422 int res; 405 int res;
423 406
424 regs = get_user_regs(child); 407 regs = task_pt_regs(child);
425 pc = instruction_pointer(regs); 408 pc = instruction_pointer(regs);
426 409
427 if (thumb_mode(regs)) { 410 if (thumb_mode(regs)) {
@@ -572,7 +555,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
572 */ 555 */
573static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) 556static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
574{ 557{
575 struct pt_regs *regs = get_user_regs(tsk); 558 struct pt_regs *regs = task_pt_regs(tsk);
576 559
577 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; 560 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
578} 561}
@@ -587,7 +570,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
587 570
588 ret = -EFAULT; 571 ret = -EFAULT;
589 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { 572 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
590 struct pt_regs *regs = get_user_regs(tsk); 573 struct pt_regs *regs = task_pt_regs(tsk);
591 574
592 ret = -EINVAL; 575 ret = -EINVAL;
593 if (valid_user_regs(&newregs)) { 576 if (valid_user_regs(&newregs)) {
@@ -604,7 +587,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
604 */ 587 */
605static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp) 588static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
606{ 589{
607 return copy_to_user(ufp, &tsk->thread_info->fpstate, 590 return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
608 sizeof(struct user_fp)) ? -EFAULT : 0; 591 sizeof(struct user_fp)) ? -EFAULT : 0;
609} 592}
610 593
@@ -613,7 +596,7 @@ static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
613 */ 596 */
614static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp) 597static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
615{ 598{
616 struct thread_info *thread = tsk->thread_info; 599 struct thread_info *thread = task_thread_info(tsk);
617 thread->used_cp[1] = thread->used_cp[2] = 1; 600 thread->used_cp[1] = thread->used_cp[2] = 1;
618 return copy_from_user(&thread->fpstate, ufp, 601 return copy_from_user(&thread->fpstate, ufp,
619 sizeof(struct user_fp)) ? -EFAULT : 0; 602 sizeof(struct user_fp)) ? -EFAULT : 0;
@@ -626,7 +609,7 @@ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
626 */ 609 */
627static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) 610static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
628{ 611{
629 struct thread_info *thread = tsk->thread_info; 612 struct thread_info *thread = task_thread_info(tsk);
630 void *ptr = &thread->fpstate; 613 void *ptr = &thread->fpstate;
631 614
632 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) 615 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
@@ -643,7 +626,7 @@ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
643 */ 626 */
644static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) 627static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
645{ 628{
646 struct thread_info *thread = tsk->thread_info; 629 struct thread_info *thread = task_thread_info(tsk);
647 void *ptr = &thread->fpstate; 630 void *ptr = &thread->fpstate;
648 631
649 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) 632 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
@@ -779,7 +762,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
779#endif 762#endif
780 763
781 case PTRACE_GET_THREAD_AREA: 764 case PTRACE_GET_THREAD_AREA:
782 ret = put_user(child->thread_info->tp_value, 765 ret = put_user(task_thread_info(child)->tp_value,
783 (unsigned long __user *) data); 766 (unsigned long __user *) data);
784 break; 767 break;
785 768
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 373c0959bc2f..7338948bd7d3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -114,7 +114,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
114 * We need to tell the secondary core where to find 114 * We need to tell the secondary core where to find
115 * its stack and the page tables. 115 * its stack and the page tables.
116 */ 116 */
117 secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP; 117 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
118 secondary_data.pgdir = virt_to_phys(pgd); 118 secondary_data.pgdir = virt_to_phys(pgd);
119 wmb(); 119 wmb();
120 120
@@ -245,7 +245,7 @@ void __cpuexit cpu_die(void)
245 __asm__("mov sp, %0\n" 245 __asm__("mov sp, %0\n"
246 " b secondary_start_kernel" 246 " b secondary_start_kernel"
247 : 247 :
248 : "r" ((void *)current->thread_info + THREAD_SIZE - 8)); 248 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
249} 249}
250#endif /* CONFIG_HOTPLUG_CPU */ 250#endif /* CONFIG_HOTPLUG_CPU */
251 251
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index c9fe6f5f7ee3..93cfd3ffcc72 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -164,7 +164,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
164 } else if (verify_stack(fp)) { 164 } else if (verify_stack(fp)) {
165 printk("invalid frame pointer 0x%08x", fp); 165 printk("invalid frame pointer 0x%08x", fp);
166 ok = 0; 166 ok = 0;
167 } else if (fp < (unsigned long)(tsk->thread_info + 1)) 167 } else if (fp < (unsigned long)end_of_stack(tsk))
168 printk("frame pointer underflow"); 168 printk("frame pointer underflow");
169 printk("\n"); 169 printk("\n");
170 170
@@ -210,7 +210,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
210 210
211 if (!user_mode(regs) || in_interrupt()) { 211 if (!user_mode(regs) || in_interrupt()) {
212 dump_mem("Stack: ", regs->ARM_sp, 212 dump_mem("Stack: ", regs->ARM_sp,
213 THREAD_SIZE + (unsigned long)tsk->thread_info); 213 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
214 dump_backtrace(regs, tsk); 214 dump_backtrace(regs, tsk);
215 dump_instr(regs); 215 dump_instr(regs);
216 } 216 }
diff --git a/arch/arm26/kernel/process.c b/arch/arm26/kernel/process.c
index 15833a0057dd..386305659171 100644
--- a/arch/arm26/kernel/process.c
+++ b/arch/arm26/kernel/process.c
@@ -277,10 +277,9 @@ int
277copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, 277copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
278 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 278 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
279{ 279{
280 struct thread_info *thread = p->thread_info; 280 struct thread_info *thread = task_thread_info(p);
281 struct pt_regs *childregs; 281 struct pt_regs *childregs = task_pt_regs(p);
282 282
283 childregs = __get_user_regs(thread);
284 *childregs = *regs; 283 *childregs = *regs;
285 childregs->ARM_r0 = 0; 284 childregs->ARM_r0 = 0;
286 childregs->ARM_sp = stack_start; 285 childregs->ARM_sp = stack_start;
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index 4e6b7356a722..3c3371d4683e 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -40,21 +40,6 @@
40#define BREAKINST_ARM 0xef9f0001 40#define BREAKINST_ARM 0xef9f0001
41 41
42/* 42/*
43 * Get the address of the live pt_regs for the specified task.
44 * These are saved onto the top kernel stack when the process
45 * is not running.
46 *
47 * Note: if a user thread is execve'd from kernel space, the
48 * kernel stack will not be empty on entry to the kernel, so
49 * ptracing these tasks will fail.
50 */
51static inline struct pt_regs *
52get_user_regs(struct task_struct *task)
53{
54 return __get_user_regs(task->thread_info);
55}
56
57/*
58 * this routine will get a word off of the processes privileged stack. 43 * this routine will get a word off of the processes privileged stack.
59 * the offset is how far from the base addr as stored in the THREAD. 44 * the offset is how far from the base addr as stored in the THREAD.
60 * this routine assumes that all the privileged stacks are in our 45 * this routine assumes that all the privileged stacks are in our
@@ -62,7 +47,7 @@ get_user_regs(struct task_struct *task)
62 */ 47 */
63static inline long get_user_reg(struct task_struct *task, int offset) 48static inline long get_user_reg(struct task_struct *task, int offset)
64{ 49{
65 return get_user_regs(task)->uregs[offset]; 50 return task_pt_regs(task)->uregs[offset];
66} 51}
67 52
68/* 53/*
@@ -74,7 +59,7 @@ static inline long get_user_reg(struct task_struct *task, int offset)
74static inline int 59static inline int
75put_user_reg(struct task_struct *task, int offset, long data) 60put_user_reg(struct task_struct *task, int offset, long data)
76{ 61{
77 struct pt_regs newregs, *regs = get_user_regs(task); 62 struct pt_regs newregs, *regs = task_pt_regs(task);
78 int ret = -EINVAL; 63 int ret = -EINVAL;
79 64
80 newregs = *regs; 65 newregs = *regs;
@@ -377,7 +362,7 @@ void ptrace_set_bpt(struct task_struct *child)
377 u32 insn; 362 u32 insn;
378 int res; 363 int res;
379 364
380 regs = get_user_regs(child); 365 regs = task_pt_regs(child);
381 pc = instruction_pointer(regs); 366 pc = instruction_pointer(regs);
382 367
383 res = read_instr(child, pc, &insn); 368 res = read_instr(child, pc, &insn);
@@ -500,7 +485,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
500 */ 485 */
501static int ptrace_getregs(struct task_struct *tsk, void *uregs) 486static int ptrace_getregs(struct task_struct *tsk, void *uregs)
502{ 487{
503 struct pt_regs *regs = get_user_regs(tsk); 488 struct pt_regs *regs = task_pt_regs(tsk);
504 489
505 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; 490 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
506} 491}
@@ -515,7 +500,7 @@ static int ptrace_setregs(struct task_struct *tsk, void *uregs)
515 500
516 ret = -EFAULT; 501 ret = -EFAULT;
517 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { 502 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
518 struct pt_regs *regs = get_user_regs(tsk); 503 struct pt_regs *regs = task_pt_regs(tsk);
519 504
520 ret = -EINVAL; 505 ret = -EINVAL;
521 if (valid_user_regs(&newregs)) { 506 if (valid_user_regs(&newregs)) {
@@ -532,7 +517,7 @@ static int ptrace_setregs(struct task_struct *tsk, void *uregs)
532 */ 517 */
533static int ptrace_getfpregs(struct task_struct *tsk, void *ufp) 518static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
534{ 519{
535 return copy_to_user(ufp, &tsk->thread_info->fpstate, 520 return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
536 sizeof(struct user_fp)) ? -EFAULT : 0; 521 sizeof(struct user_fp)) ? -EFAULT : 0;
537} 522}
538 523
@@ -542,7 +527,7 @@ static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
542static int ptrace_setfpregs(struct task_struct *tsk, void *ufp) 527static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
543{ 528{
544 set_stopped_child_used_math(tsk); 529 set_stopped_child_used_math(tsk);
545 return copy_from_user(&tsk->thread_info->fpstate, ufp, 530 return copy_from_user(&task_threas_info(tsk)->fpstate, ufp,
546 sizeof(struct user_fp)) ? -EFAULT : 0; 531 sizeof(struct user_fp)) ? -EFAULT : 0;
547} 532}
548 533
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
index f64f59022392..5847ea5d7747 100644
--- a/arch/arm26/kernel/traps.c
+++ b/arch/arm26/kernel/traps.c
@@ -132,7 +132,7 @@ static void dump_instr(struct pt_regs *regs)
132 132
133/*static*/ void __dump_stack(struct task_struct *tsk, unsigned long sp) 133/*static*/ void __dump_stack(struct task_struct *tsk, unsigned long sp)
134{ 134{
135 dump_mem("Stack: ", sp, 8192+(unsigned long)tsk->thread_info); 135 dump_mem("Stack: ", sp, 8192+(unsigned long)task_stack_page(tsk));
136} 136}
137 137
138void dump_stack(void) 138void dump_stack(void)
@@ -158,7 +158,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
158 } else if (verify_stack(fp)) { 158 } else if (verify_stack(fp)) {
159 printk("invalid frame pointer 0x%08x", fp); 159 printk("invalid frame pointer 0x%08x", fp);
160 ok = 0; 160 ok = 0;
161 } else if (fp < (unsigned long)(tsk->thread_info + 1)) 161 } else if (fp < (unsigned long)end_of_stack(tsk))
162 printk("frame pointer underflow"); 162 printk("frame pointer underflow");
163 printk("\n"); 163 printk("\n");
164 164
@@ -168,7 +168,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
168 168
169/* FIXME - this is probably wrong.. */ 169/* FIXME - this is probably wrong.. */
170void show_stack(struct task_struct *task, unsigned long *sp) { 170void show_stack(struct task_struct *task, unsigned long *sp) {
171 dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task->thread_info); 171 dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task_stack_page(task));
172} 172}
173 173
174DEFINE_SPINLOCK(die_lock); 174DEFINE_SPINLOCK(die_lock);
@@ -187,7 +187,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
187 printk("CPU: %d\n", smp_processor_id()); 187 printk("CPU: %d\n", smp_processor_id());
188 show_regs(regs); 188 show_regs(regs);
189 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 189 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
190 current->comm, current->pid, tsk->thread_info + 1); 190 current->comm, current->pid, end_of_stack(tsk));
191 191
192 if (!user_mode(regs) || in_interrupt()) { 192 if (!user_mode(regs) || in_interrupt()) {
193 __dump_stack(tsk, (unsigned long)(regs + 1)); 193 __dump_stack(tsk, (unsigned long)(regs + 1));
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index 69e28b4057e8..0a675ce9e099 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -79,7 +79,7 @@ void hard_reset_now (void)
79 */ 79 */
80unsigned long thread_saved_pc(struct task_struct *t) 80unsigned long thread_saved_pc(struct task_struct *t)
81{ 81{
82 return (unsigned long)user_regs(t->thread_info)->irp; 82 return task_pt_regs(t)->irp;
83} 83}
84 84
85static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg) 85static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg)
@@ -128,7 +128,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
128 * remember that the task_struct doubles as the kernel stack for the task 128 * remember that the task_struct doubles as the kernel stack for the task
129 */ 129 */
130 130
131 childregs = user_regs(p->thread_info); 131 childregs = task_pt_regs(p);
132 132
133 *childregs = *regs; /* struct copy of pt_regs */ 133 *childregs = *regs; /* struct copy of pt_regs */
134 134
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index 6cbd34a27b90..f214f74f264e 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -37,7 +37,7 @@ inline long get_reg(struct task_struct *task, unsigned int regno)
37 if (regno == PT_USP) 37 if (regno == PT_USP)
38 return task->thread.usp; 38 return task->thread.usp;
39 else if (regno < PT_MAX) 39 else if (regno < PT_MAX)
40 return ((unsigned long *)user_regs(task->thread_info))[regno]; 40 return ((unsigned long *)task_pt_regs(task))[regno];
41 else 41 else
42 return 0; 42 return 0;
43} 43}
@@ -51,7 +51,7 @@ inline int put_reg(struct task_struct *task, unsigned int regno,
51 if (regno == PT_USP) 51 if (regno == PT_USP)
52 task->thread.usp = data; 52 task->thread.usp = data;
53 else if (regno < PT_MAX) 53 else if (regno < PT_MAX)
54 ((unsigned long *)user_regs(task->thread_info))[regno] = data; 54 ((unsigned long *)task_pt_regs(task))[regno] = data;
55 else 55 else
56 return -1; 56 return -1;
57 return 0; 57 return 0;
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 882be42114f7..843513102d3c 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -96,7 +96,7 @@ hard_reset_now(void)
96 */ 96 */
97unsigned long thread_saved_pc(struct task_struct *t) 97unsigned long thread_saved_pc(struct task_struct *t)
98{ 98{
99 return (unsigned long)user_regs(t->thread_info)->erp; 99 return task_pt_regs(t)->erp;
100} 100}
101 101
102static void 102static void
@@ -148,7 +148,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
148 * fix it up. Note: the task_struct doubles as the kernel stack for the 148 * fix it up. Note: the task_struct doubles as the kernel stack for the
149 * task. 149 * task.
150 */ 150 */
151 childregs = user_regs(p->thread_info); 151 childregs = task_pt_regs(p);
152 *childregs = *regs; /* Struct copy of pt_regs. */ 152 *childregs = *regs; /* Struct copy of pt_regs. */
153 p->set_child_tid = p->clear_child_tid = NULL; 153 p->set_child_tid = p->clear_child_tid = NULL;
154 childregs->r10 = 0; /* Child returns 0 after a fork/clone. */ 154 childregs->r10 = 0; /* Child returns 0 after a fork/clone. */
@@ -157,7 +157,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
157 * The TLS is in $mof beacuse it is the 5th argument to sys_clone. 157 * The TLS is in $mof beacuse it is the 5th argument to sys_clone.
158 */ 158 */
159 if (p->mm && (clone_flags & CLONE_SETTLS)) { 159 if (p->mm && (clone_flags & CLONE_SETTLS)) {
160 p->thread_info->tls = regs->mof; 160 task_thread_info(p)->tls = regs->mof;
161 } 161 }
162 162
163 /* Put the switch stack right below the pt_regs. */ 163 /* Put the switch stack right below the pt_regs. */
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index 5528b83a622b..82cf2e3624a4 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -46,7 +46,7 @@ long get_reg(struct task_struct *task, unsigned int regno)
46 unsigned long ret; 46 unsigned long ret;
47 47
48 if (regno <= PT_EDA) 48 if (regno <= PT_EDA)
49 ret = ((unsigned long *)user_regs(task->thread_info))[regno]; 49 ret = ((unsigned long *)task_pt_regs(task))[regno];
50 else if (regno == PT_USP) 50 else if (regno == PT_USP)
51 ret = task->thread.usp; 51 ret = task->thread.usp;
52 else if (regno == PT_PPC) 52 else if (regno == PT_PPC)
@@ -65,13 +65,13 @@ long get_reg(struct task_struct *task, unsigned int regno)
65int put_reg(struct task_struct *task, unsigned int regno, unsigned long data) 65int put_reg(struct task_struct *task, unsigned int regno, unsigned long data)
66{ 66{
67 if (regno <= PT_EDA) 67 if (regno <= PT_EDA)
68 ((unsigned long *)user_regs(task->thread_info))[regno] = data; 68 ((unsigned long *)task_pt_regs(task))[regno] = data;
69 else if (regno == PT_USP) 69 else if (regno == PT_USP)
70 task->thread.usp = data; 70 task->thread.usp = data;
71 else if (regno == PT_PPC) { 71 else if (regno == PT_PPC) {
72 /* Write pseudo-PC to ERP only if changed. */ 72 /* Write pseudo-PC to ERP only if changed. */
73 if (data != get_pseudo_pc(task)) 73 if (data != get_pseudo_pc(task))
74 ((unsigned long *)user_regs(task->thread_info))[PT_ERP] = data; 74 task_pt_regs(task)->erp = data;
75 } else if (regno <= PT_MAX) 75 } else if (regno <= PT_MAX)
76 return put_debugreg(task->pid, regno, data); 76 return put_debugreg(task->pid, regno, data);
77 else 77 else
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 13867f4fad16..da40d19a151e 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -113,10 +113,10 @@ smp_boot_one_cpu(int cpuid)
113 if (IS_ERR(idle)) 113 if (IS_ERR(idle))
114 panic("SMP: fork failed for CPU:%d", cpuid); 114 panic("SMP: fork failed for CPU:%d", cpuid);
115 115
116 idle->thread_info->cpu = cpuid; 116 task_thread_info(idle)->cpu = cpuid;
117 117
118 /* Information to the CPU that is about to boot */ 118 /* Information to the CPU that is about to boot */
119 smp_init_current_idle_thread = idle->thread_info; 119 smp_init_current_idle_thread = task_thread_info(idle);
120 cpu_now_booting = cpuid; 120 cpu_now_booting = cpuid;
121 121
122 /* Wait for CPU to come online */ 122 /* Wait for CPU to come online */
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
index b08a28bb58ab..9d75d7692303 100644
--- a/arch/cris/arch-v32/mm/tlb.c
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -198,9 +198,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
198 per_cpu(current_pgd, cpu) = next->pgd; 198 per_cpu(current_pgd, cpu) = next->pgd;
199 199
200 /* Switch context in the MMU. */ 200 /* Switch context in the MMU. */
201 if (tsk && tsk->thread_info) 201 if (tsk && task_thread_info(tsk))
202 { 202 {
203 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | tsk->thread_info->tls); 203 SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | task_thread_info(tsk)->tls);
204 } 204 }
205 else 205 else
206 { 206 {
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index c4488379ac3b..0fff8a61ef2a 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -204,7 +204,7 @@ int copy_thread(int nr, unsigned long clone_flags,
204 204
205 regs0 = __kernel_frame0_ptr; 205 regs0 = __kernel_frame0_ptr;
206 childregs0 = (struct pt_regs *) 206 childregs0 = (struct pt_regs *)
207 ((unsigned long) p->thread_info + THREAD_SIZE - USER_CONTEXT_SIZE); 207 (task_stack_page(p) + THREAD_SIZE - USER_CONTEXT_SIZE);
208 childregs = childregs0; 208 childregs = childregs0;
209 209
210 /* set up the userspace frame (the only place that the USP is stored) */ 210 /* set up the userspace frame (the only place that the USP is stored) */
@@ -220,7 +220,7 @@ int copy_thread(int nr, unsigned long clone_flags,
220 *childregs = *regs; 220 *childregs = *regs;
221 childregs->sp = (unsigned long) childregs0; 221 childregs->sp = (unsigned long) childregs0;
222 childregs->next_frame = childregs0; 222 childregs->next_frame = childregs0;
223 childregs->gr15 = (unsigned long) p->thread_info; 223 childregs->gr15 = (unsigned long) task_thread_info(p);
224 childregs->gr29 = (unsigned long) p; 224 childregs->gr29 = (unsigned long) p;
225 } 225 }
226 226
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 585ed5efd0f7..ed79ae20e88d 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -195,7 +195,7 @@ int copy_thread(int nr, unsigned long clone_flags,
195{ 195{
196 struct pt_regs * childregs; 196 struct pt_regs * childregs;
197 197
198 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; 198 childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
199 199
200 *childregs = *regs; 200 *childregs = *regs;
201 childregs->retpc = (unsigned long) ret_from_fork; 201 childregs->retpc = (unsigned long) ret_from_fork;
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 035928f3f6c1..2185377fdde1 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -424,18 +424,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
424 struct task_struct *tsk; 424 struct task_struct *tsk;
425 int err; 425 int err;
426 426
427 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; 427 childregs = task_pt_regs(p);
428 /*
429 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
430 * This is necessary to guarantee that the entire "struct pt_regs"
431 * is accessable even if the CPU haven't stored the SS/ESP registers
432 * on the stack (interrupt gate does not save these registers
433 * when switching to the same priv ring).
434 * Therefore beware: accessing the xss/esp fields of the
435 * "struct pt_regs" is possible, but they may contain the
436 * completely wrong values.
437 */
438 childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
439 *childregs = *regs; 428 *childregs = *regs;
440 childregs->eax = 0; 429 childregs->eax = 0;
441 childregs->esp = esp; 430 childregs->esp = esp;
@@ -540,12 +529,7 @@ EXPORT_SYMBOL(dump_thread);
540 */ 529 */
541int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 530int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
542{ 531{
543 struct pt_regs ptregs; 532 struct pt_regs ptregs = *task_pt_regs(tsk);
544
545 ptregs = *(struct pt_regs *)
546 ((unsigned long)tsk->thread_info +
547 /* see comments in copy_thread() about -8 */
548 THREAD_SIZE - sizeof(ptregs) - 8);
549 ptregs.xcs &= 0xffff; 533 ptregs.xcs &= 0xffff;
550 ptregs.xds &= 0xffff; 534 ptregs.xds &= 0xffff;
551 ptregs.xes &= 0xffff; 535 ptregs.xes &= 0xffff;
@@ -601,8 +585,8 @@ static inline void disable_tsc(struct task_struct *prev_p,
601 * gcc should eliminate the ->thread_info dereference if 585 * gcc should eliminate the ->thread_info dereference if
602 * has_secure_computing returns 0 at compile time (SECCOMP=n). 586 * has_secure_computing returns 0 at compile time (SECCOMP=n).
603 */ 587 */
604 prev = prev_p->thread_info; 588 prev = task_thread_info(prev_p);
605 next = next_p->thread_info; 589 next = task_thread_info(next_p);
606 590
607 if (has_secure_computing(prev) || has_secure_computing(next)) { 591 if (has_secure_computing(prev) || has_secure_computing(next)) {
608 /* slow path here */ 592 /* slow path here */
@@ -787,7 +771,7 @@ unsigned long get_wchan(struct task_struct *p)
787 int count = 0; 771 int count = 0;
788 if (!p || p == current || p->state == TASK_RUNNING) 772 if (!p || p == current || p->state == TASK_RUNNING)
789 return 0; 773 return 0;
790 stack_page = (unsigned long)p->thread_info; 774 stack_page = (unsigned long)task_stack_page(p);
791 esp = p->thread.esp; 775 esp = p->thread.esp;
792 if (!stack_page || esp < stack_page || esp > top_esp+stack_page) 776 if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
793 return 0; 777 return 0;
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b3c2e2c26743..255adb498268 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -875,8 +875,7 @@ static inline struct task_struct * alloc_idle_task(int cpu)
875 /* initialize thread_struct. we really want to avoid destroy 875 /* initialize thread_struct. we really want to avoid destroy
876 * idle tread 876 * idle tread
877 */ 877 */
878 idle->thread.esp = (unsigned long)(((struct pt_regs *) 878 idle->thread.esp = (unsigned long)task_pt_regs(idle);
879 (THREAD_SIZE + (unsigned long) idle->thread_info)) - 1);
880 init_idle(idle, cpu); 879 init_idle(idle, cpu);
881 return idle; 880 return idle;
882 } 881 }
@@ -1096,6 +1095,7 @@ static void smp_tune_scheduling (void)
1096 cachesize = 16; /* Pentiums, 2x8kB cache */ 1095 cachesize = 16; /* Pentiums, 2x8kB cache */
1097 bandwidth = 100; 1096 bandwidth = 100;
1098 } 1097 }
1098 max_cache_size = cachesize * 1024;
1099 } 1099 }
1100} 1100}
1101 1101
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index cbdb0afed76a..0c90ae54ddfa 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -311,7 +311,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
311 "movl %1,%%ebp\n\t" 311 "movl %1,%%ebp\n\t"
312 "jmp resume_userspace" 312 "jmp resume_userspace"
313 : /* no outputs */ 313 : /* no outputs */
314 :"r" (&info->regs), "r" (tsk->thread_info) : "ax"); 314 :"r" (&info->regs), "r" (task_thread_info(tsk)) : "ax");
315 /* we never return here */ 315 /* we never return here */
316} 316}
317 317
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index b73b8b6b10c1..a47f63b204fb 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -95,8 +95,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
95static inline int elf_core_copy_task_regs(struct task_struct *t, 95static inline int elf_core_copy_task_regs(struct task_struct *t,
96 elf_gregset_t* elfregs) 96 elf_gregset_t* elfregs)
97{ 97{
98 struct pt_regs *pp = ia64_task_regs(t); 98 ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
99 ELF_CORE_COPY_REGS((*elfregs), pp);
100 return 1; 99 return 1;
101} 100}
102 101
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index aa891c9bc9b6..5856510210fa 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -255,7 +255,7 @@ save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
255 */ 255 */
256 fp_tos = (fsr>>11)&0x7; 256 fp_tos = (fsr>>11)&0x7;
257 fr8_st_map = (8-fp_tos)&0x7; 257 fr8_st_map = (8-fp_tos)&0x7;
258 ptp = ia64_task_regs(tsk); 258 ptp = task_pt_regs(tsk);
259 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); 259 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
260 ia64f2ia32f(fpregp, &ptp->f8); 260 ia64f2ia32f(fpregp, &ptp->f8);
261 copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); 261 copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
@@ -389,7 +389,7 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
389 fr8_st_map = (8-fp_tos)&0x7; 389 fr8_st_map = (8-fp_tos)&0x7;
390 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); 390 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
391 391
392 ptp = ia64_task_regs(tsk); 392 ptp = task_pt_regs(tsk);
393 copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); 393 copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
394 ia32f2ia64f(&ptp->f8, fpregp); 394 ia32f2ia64f(&ptp->f8, fpregp);
395 copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); 395 copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index 4f630043b3ae..c187743965a0 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -58,7 +58,7 @@ load_desc (u16 selector)
58void 58void
59ia32_load_segment_descriptors (struct task_struct *task) 59ia32_load_segment_descriptors (struct task_struct *task)
60{ 60{
61 struct pt_regs *regs = ia64_task_regs(task); 61 struct pt_regs *regs = task_pt_regs(task);
62 62
63 /* Setup the segment descriptors */ 63 /* Setup the segment descriptors */
64 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ 64 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
@@ -113,7 +113,7 @@ void
113ia32_load_state (struct task_struct *t) 113ia32_load_state (struct task_struct *t)
114{ 114{
115 unsigned long eflag, fsr, fcr, fir, fdr, tssd; 115 unsigned long eflag, fsr, fcr, fir, fdr, tssd;
116 struct pt_regs *regs = ia64_task_regs(t); 116 struct pt_regs *regs = task_pt_regs(t);
117 117
118 eflag = t->thread.eflag; 118 eflag = t->thread.eflag;
119 fsr = t->thread.fsr; 119 fsr = t->thread.fsr;
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 0668b2b7714d..3945d378bd7e 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1482,7 +1482,7 @@ getreg (struct task_struct *child, int regno)
1482{ 1482{
1483 struct pt_regs *child_regs; 1483 struct pt_regs *child_regs;
1484 1484
1485 child_regs = ia64_task_regs(child); 1485 child_regs = task_pt_regs(child);
1486 switch (regno / sizeof(int)) { 1486 switch (regno / sizeof(int)) {
1487 case PT_EBX: return child_regs->r11; 1487 case PT_EBX: return child_regs->r11;
1488 case PT_ECX: return child_regs->r9; 1488 case PT_ECX: return child_regs->r9;
@@ -1510,7 +1510,7 @@ putreg (struct task_struct *child, int regno, unsigned int value)
1510{ 1510{
1511 struct pt_regs *child_regs; 1511 struct pt_regs *child_regs;
1512 1512
1513 child_regs = ia64_task_regs(child); 1513 child_regs = task_pt_regs(child);
1514 switch (regno / sizeof(int)) { 1514 switch (regno / sizeof(int)) {
1515 case PT_EBX: child_regs->r11 = value; break; 1515 case PT_EBX: child_regs->r11 = value; break;
1516 case PT_ECX: child_regs->r9 = value; break; 1516 case PT_ECX: child_regs->r9 = value; break;
@@ -1626,7 +1626,7 @@ save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user
1626 * Stack frames start with 16-bytes of temp space 1626 * Stack frames start with 16-bytes of temp space
1627 */ 1627 */
1628 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1628 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1629 ptp = ia64_task_regs(tsk); 1629 ptp = task_pt_regs(tsk);
1630 tos = (tsk->thread.fsr >> 11) & 7; 1630 tos = (tsk->thread.fsr >> 11) & 7;
1631 for (i = 0; i < 8; i++) 1631 for (i = 0; i < 8; i++)
1632 put_fpreg(i, &save->st_space[i], ptp, swp, tos); 1632 put_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1659,7 +1659,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __us
1659 * Stack frames start with 16-bytes of temp space 1659 * Stack frames start with 16-bytes of temp space
1660 */ 1660 */
1661 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1661 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1662 ptp = ia64_task_regs(tsk); 1662 ptp = task_pt_regs(tsk);
1663 tos = (tsk->thread.fsr >> 11) & 7; 1663 tos = (tsk->thread.fsr >> 11) & 7;
1664 for (i = 0; i < 8; i++) 1664 for (i = 0; i < 8; i++)
1665 get_fpreg(i, &save->st_space[i], ptp, swp, tos); 1665 get_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1690,7 +1690,7 @@ save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user
1690 * Stack frames start with 16-bytes of temp space 1690 * Stack frames start with 16-bytes of temp space
1691 */ 1691 */
1692 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1692 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1693 ptp = ia64_task_regs(tsk); 1693 ptp = task_pt_regs(tsk);
1694 tos = (tsk->thread.fsr >> 11) & 7; 1694 tos = (tsk->thread.fsr >> 11) & 7;
1695 for (i = 0; i < 8; i++) 1695 for (i = 0; i < 8; i++)
1696 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); 1696 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
@@ -1734,7 +1734,7 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u
1734 * Stack frames start with 16-bytes of temp space 1734 * Stack frames start with 16-bytes of temp space
1735 */ 1735 */
1736 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1736 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1737 ptp = ia64_task_regs(tsk); 1737 ptp = task_pt_regs(tsk);
1738 tos = (tsk->thread.fsr >> 11) & 7; 1738 tos = (tsk->thread.fsr >> 11) & 7;
1739 for (i = 0; i < 8; i++) 1739 for (i = 0; i < 8; i++)
1740 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); 1740 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 355af15287c7..ee7eec9ee576 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -766,7 +766,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
766 l = strlen(previous_current->comm); 766 l = strlen(previous_current->comm);
767 snprintf(comm, sizeof(comm), "%s %*s %d", 767 snprintf(comm, sizeof(comm), "%s %*s %d",
768 current->comm, l, previous_current->comm, 768 current->comm, l, previous_current->comm,
769 previous_current->thread_info->cpu); 769 task_thread_info(previous_current)->cpu);
770 } 770 }
771 memcpy(current->comm, comm, sizeof(current->comm)); 771 memcpy(current->comm, comm, sizeof(current->comm));
772 772
@@ -1423,7 +1423,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
1423 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); 1423 struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
1424 struct thread_info *ti; 1424 struct thread_info *ti;
1425 memset(p, 0, KERNEL_STACK_SIZE); 1425 memset(p, 0, KERNEL_STACK_SIZE);
1426 ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE); 1426 ti = task_thread_info(p);
1427 ti->flags = _TIF_MCA_INIT; 1427 ti->flags = _TIF_MCA_INIT;
1428 ti->preempt_count = 1; 1428 ti->preempt_count = 1;
1429 ti->task = p; 1429 ti->task = p;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index c026ac1142a6..bd87cb6b7a81 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1710,7 +1710,7 @@ static void
1710pfm_syswide_force_stop(void *info) 1710pfm_syswide_force_stop(void *info)
1711{ 1711{
1712 pfm_context_t *ctx = (pfm_context_t *)info; 1712 pfm_context_t *ctx = (pfm_context_t *)info;
1713 struct pt_regs *regs = ia64_task_regs(current); 1713 struct pt_regs *regs = task_pt_regs(current);
1714 struct task_struct *owner; 1714 struct task_struct *owner;
1715 unsigned long flags; 1715 unsigned long flags;
1716 int ret; 1716 int ret;
@@ -1815,7 +1815,7 @@ pfm_flush(struct file *filp)
1815 is_system = ctx->ctx_fl_system; 1815 is_system = ctx->ctx_fl_system;
1816 1816
1817 task = PFM_CTX_TASK(ctx); 1817 task = PFM_CTX_TASK(ctx);
1818 regs = ia64_task_regs(task); 1818 regs = task_pt_regs(task);
1819 1819
1820 DPRINT(("ctx_state=%d is_current=%d\n", 1820 DPRINT(("ctx_state=%d is_current=%d\n",
1821 state, 1821 state,
@@ -1945,7 +1945,7 @@ pfm_close(struct inode *inode, struct file *filp)
1945 is_system = ctx->ctx_fl_system; 1945 is_system = ctx->ctx_fl_system;
1946 1946
1947 task = PFM_CTX_TASK(ctx); 1947 task = PFM_CTX_TASK(ctx);
1948 regs = ia64_task_regs(task); 1948 regs = task_pt_regs(task);
1949 1949
1950 DPRINT(("ctx_state=%d is_current=%d\n", 1950 DPRINT(("ctx_state=%d is_current=%d\n",
1951 state, 1951 state,
@@ -4052,7 +4052,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4052 */ 4052 */
4053 ia64_psr(regs)->up = 0; 4053 ia64_psr(regs)->up = 0;
4054 } else { 4054 } else {
4055 tregs = ia64_task_regs(task); 4055 tregs = task_pt_regs(task);
4056 4056
4057 /* 4057 /*
4058 * stop monitoring at the user level 4058 * stop monitoring at the user level
@@ -4134,7 +4134,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4134 ia64_psr(regs)->up = 1; 4134 ia64_psr(regs)->up = 1;
4135 4135
4136 } else { 4136 } else {
4137 tregs = ia64_task_regs(ctx->ctx_task); 4137 tregs = task_pt_regs(ctx->ctx_task);
4138 4138
4139 /* 4139 /*
4140 * start monitoring at the kernel level the next 4140 * start monitoring at the kernel level the next
@@ -4404,7 +4404,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4404 /* 4404 /*
4405 * when not current, task MUST be stopped, so this is safe 4405 * when not current, task MUST be stopped, so this is safe
4406 */ 4406 */
4407 regs = ia64_task_regs(task); 4407 regs = task_pt_regs(task);
4408 4408
4409 /* force a full reload */ 4409 /* force a full reload */
4410 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; 4410 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
@@ -4530,7 +4530,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4530 /* 4530 /*
4531 * per-task mode 4531 * per-task mode
4532 */ 4532 */
4533 tregs = task == current ? regs : ia64_task_regs(task); 4533 tregs = task == current ? regs : task_pt_regs(task);
4534 4534
4535 if (task == current) { 4535 if (task == current) {
4536 /* 4536 /*
@@ -4593,7 +4593,7 @@ pfm_exit_thread(struct task_struct *task)
4593{ 4593{
4594 pfm_context_t *ctx; 4594 pfm_context_t *ctx;
4595 unsigned long flags; 4595 unsigned long flags;
4596 struct pt_regs *regs = ia64_task_regs(task); 4596 struct pt_regs *regs = task_pt_regs(task);
4597 int ret, state; 4597 int ret, state;
4598 int free_ok = 0; 4598 int free_ok = 0;
4599 4599
@@ -4926,7 +4926,7 @@ restart_args:
4926 if (unlikely(ret)) goto abort_locked; 4926 if (unlikely(ret)) goto abort_locked;
4927 4927
4928skip_fd: 4928skip_fd:
4929 ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); 4929 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4930 4930
4931 call_made = 1; 4931 call_made = 1;
4932 4932
@@ -5050,7 +5050,7 @@ pfm_handle_work(void)
5050 5050
5051 pfm_clear_task_notify(); 5051 pfm_clear_task_notify();
5052 5052
5053 regs = ia64_task_regs(current); 5053 regs = task_pt_regs(current);
5054 5054
5055 /* 5055 /*
5056 * extract reason for being here and clear 5056 * extract reason for being here and clear
@@ -5794,7 +5794,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c
5794 * on every CPU, so we can rely on the pid to identify the idle task. 5794 * on every CPU, so we can rely on the pid to identify the idle task.
5795 */ 5795 */
5796 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { 5796 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5797 regs = ia64_task_regs(task); 5797 regs = task_pt_regs(task);
5798 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; 5798 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5799 return; 5799 return;
5800 } 5800 }
@@ -5877,7 +5877,7 @@ pfm_save_regs(struct task_struct *task)
5877 flags = pfm_protect_ctx_ctxsw(ctx); 5877 flags = pfm_protect_ctx_ctxsw(ctx);
5878 5878
5879 if (ctx->ctx_state == PFM_CTX_ZOMBIE) { 5879 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5880 struct pt_regs *regs = ia64_task_regs(task); 5880 struct pt_regs *regs = task_pt_regs(task);
5881 5881
5882 pfm_clear_psr_up(); 5882 pfm_clear_psr_up();
5883 5883
@@ -6077,7 +6077,7 @@ pfm_load_regs (struct task_struct *task)
6077 BUG_ON(psr & IA64_PSR_I); 6077 BUG_ON(psr & IA64_PSR_I);
6078 6078
6079 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { 6079 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6080 struct pt_regs *regs = ia64_task_regs(task); 6080 struct pt_regs *regs = task_pt_regs(task);
6081 6081
6082 BUG_ON(ctx->ctx_smpl_hdr); 6082 BUG_ON(ctx->ctx_smpl_hdr);
6083 6083
@@ -6446,7 +6446,7 @@ pfm_alt_save_pmu_state(void *data)
6446{ 6446{
6447 struct pt_regs *regs; 6447 struct pt_regs *regs;
6448 6448
6449 regs = ia64_task_regs(current); 6449 regs = task_pt_regs(current);
6450 6450
6451 DPRINT(("called\n")); 6451 DPRINT(("called\n"));
6452 6452
@@ -6472,7 +6472,7 @@ pfm_alt_restore_pmu_state(void *data)
6472{ 6472{
6473 struct pt_regs *regs; 6473 struct pt_regs *regs;
6474 6474
6475 regs = ia64_task_regs(current); 6475 regs = task_pt_regs(current);
6476 6476
6477 DPRINT(("called\n")); 6477 DPRINT(("called\n"));
6478 6478
@@ -6754,7 +6754,7 @@ dump_pmu_state(const char *from)
6754 local_irq_save(flags); 6754 local_irq_save(flags);
6755 6755
6756 this_cpu = smp_processor_id(); 6756 this_cpu = smp_processor_id();
6757 regs = ia64_task_regs(current); 6757 regs = task_pt_regs(current);
6758 info = PFM_CPUINFO_GET(); 6758 info = PFM_CPUINFO_GET();
6759 dcr = ia64_getreg(_IA64_REG_CR_DCR); 6759 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6760 6760
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e9904c74d2ba..309d59658e5f 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -328,7 +328,7 @@ ia64_save_extra (struct task_struct *task)
328#endif 328#endif
329 329
330#ifdef CONFIG_IA32_SUPPORT 330#ifdef CONFIG_IA32_SUPPORT
331 if (IS_IA32_PROCESS(ia64_task_regs(task))) 331 if (IS_IA32_PROCESS(task_pt_regs(task)))
332 ia32_save_state(task); 332 ia32_save_state(task);
333#endif 333#endif
334} 334}
@@ -353,7 +353,7 @@ ia64_load_extra (struct task_struct *task)
353#endif 353#endif
354 354
355#ifdef CONFIG_IA32_SUPPORT 355#ifdef CONFIG_IA32_SUPPORT
356 if (IS_IA32_PROCESS(ia64_task_regs(task))) 356 if (IS_IA32_PROCESS(task_pt_regs(task)))
357 ia32_load_state(task); 357 ia32_load_state(task);
358#endif 358#endif
359} 359}
@@ -488,7 +488,7 @@ copy_thread (int nr, unsigned long clone_flags,
488 * If we're cloning an IA32 task then save the IA32 extra 488 * If we're cloning an IA32 task then save the IA32 extra
489 * state from the current task to the new task 489 * state from the current task to the new task
490 */ 490 */
491 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 491 if (IS_IA32_PROCESS(task_pt_regs(current))) {
492 ia32_save_state(p); 492 ia32_save_state(p);
493 if (clone_flags & CLONE_SETTLS) 493 if (clone_flags & CLONE_SETTLS)
494 retval = ia32_clone_tls(p, child_ptregs); 494 retval = ia32_clone_tls(p, child_ptregs);
@@ -701,7 +701,7 @@ int
701kernel_thread_helper (int (*fn)(void *), void *arg) 701kernel_thread_helper (int (*fn)(void *), void *arg)
702{ 702{
703#ifdef CONFIG_IA32_SUPPORT 703#ifdef CONFIG_IA32_SUPPORT
704 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 704 if (IS_IA32_PROCESS(task_pt_regs(current))) {
705 /* A kernel thread is always a 64-bit process. */ 705 /* A kernel thread is always a 64-bit process. */
706 current->thread.map_base = DEFAULT_MAP_BASE; 706 current->thread.map_base = DEFAULT_MAP_BASE;
707 current->thread.task_size = DEFAULT_TASK_SIZE; 707 current->thread.task_size = DEFAULT_TASK_SIZE;
@@ -722,7 +722,7 @@ flush_thread (void)
722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
723 ia64_drop_fpu(current); 723 ia64_drop_fpu(current);
724#ifdef CONFIG_IA32_SUPPORT 724#ifdef CONFIG_IA32_SUPPORT
725 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 725 if (IS_IA32_PROCESS(task_pt_regs(current))) {
726 ia32_drop_partial_page_list(current); 726 ia32_drop_partial_page_list(current);
727 current->thread.task_size = IA32_PAGE_OFFSET; 727 current->thread.task_size = IA32_PAGE_OFFSET;
728 set_fs(USER_DS); 728 set_fs(USER_DS);
@@ -755,7 +755,7 @@ exit_thread (void)
755 if (current->thread.flags & IA64_THREAD_DBG_VALID) 755 if (current->thread.flags & IA64_THREAD_DBG_VALID)
756 pfm_release_debug_registers(current); 756 pfm_release_debug_registers(current);
757#endif 757#endif
758 if (IS_IA32_PROCESS(ia64_task_regs(current))) 758 if (IS_IA32_PROCESS(task_pt_regs(current)))
759 ia32_drop_partial_page_list(current); 759 ia32_drop_partial_page_list(current);
760} 760}
761 761
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 8d88eeea02d1..eaed14aac6aa 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -254,7 +254,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
254 long num_regs, nbits; 254 long num_regs, nbits;
255 struct pt_regs *pt; 255 struct pt_regs *pt;
256 256
257 pt = ia64_task_regs(task); 257 pt = task_pt_regs(task);
258 kbsp = (unsigned long *) sw->ar_bspstore; 258 kbsp = (unsigned long *) sw->ar_bspstore;
259 ubspstore = (unsigned long *) pt->ar_bspstore; 259 ubspstore = (unsigned long *) pt->ar_bspstore;
260 260
@@ -314,7 +314,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
314 struct pt_regs *pt; 314 struct pt_regs *pt;
315 unsigned long cfm, *urbs_kargs; 315 unsigned long cfm, *urbs_kargs;
316 316
317 pt = ia64_task_regs(task); 317 pt = task_pt_regs(task);
318 kbsp = (unsigned long *) sw->ar_bspstore; 318 kbsp = (unsigned long *) sw->ar_bspstore;
319 ubspstore = (unsigned long *) pt->ar_bspstore; 319 ubspstore = (unsigned long *) pt->ar_bspstore;
320 320
@@ -407,7 +407,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
407 407
408 urbs_end = (long *) user_rbs_end; 408 urbs_end = (long *) user_rbs_end;
409 laddr = (unsigned long *) addr; 409 laddr = (unsigned long *) addr;
410 child_regs = ia64_task_regs(child); 410 child_regs = task_pt_regs(child);
411 bspstore = (unsigned long *) child_regs->ar_bspstore; 411 bspstore = (unsigned long *) child_regs->ar_bspstore;
412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
413 if (on_kernel_rbs(addr, (unsigned long) bspstore, 413 if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -467,7 +467,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
467 struct pt_regs *child_regs; 467 struct pt_regs *child_regs;
468 468
469 laddr = (unsigned long *) addr; 469 laddr = (unsigned long *) addr;
470 child_regs = ia64_task_regs(child); 470 child_regs = task_pt_regs(child);
471 bspstore = (unsigned long *) child_regs->ar_bspstore; 471 bspstore = (unsigned long *) child_regs->ar_bspstore;
472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
473 if (on_kernel_rbs(addr, (unsigned long) bspstore, 473 if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -567,7 +567,7 @@ thread_matches (struct task_struct *thread, unsigned long addr)
567 */ 567 */
568 return 0; 568 return 0;
569 569
570 thread_regs = ia64_task_regs(thread); 570 thread_regs = task_pt_regs(thread);
571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); 571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) 572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
573 return 0; 573 return 0;
@@ -627,7 +627,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
627inline void 627inline void
628ia64_flush_fph (struct task_struct *task) 628ia64_flush_fph (struct task_struct *task)
629{ 629{
630 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 630 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
631 631
632 /* 632 /*
633 * Prevent migrating this task while 633 * Prevent migrating this task while
@@ -653,7 +653,7 @@ ia64_flush_fph (struct task_struct *task)
653void 653void
654ia64_sync_fph (struct task_struct *task) 654ia64_sync_fph (struct task_struct *task)
655{ 655{
656 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 656 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
657 657
658 ia64_flush_fph(task); 658 ia64_flush_fph(task);
659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { 659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
@@ -794,7 +794,7 @@ access_uarea (struct task_struct *child, unsigned long addr,
794 + offsetof(struct pt_regs, reg))) 794 + offsetof(struct pt_regs, reg)))
795 795
796 796
797 pt = ia64_task_regs(child); 797 pt = task_pt_regs(child);
798 sw = (struct switch_stack *) (child->thread.ksp + 16); 798 sw = (struct switch_stack *) (child->thread.ksp + 16);
799 799
800 if ((addr & 0x7) != 0) { 800 if ((addr & 0x7) != 0) {
@@ -1120,7 +1120,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
1121 return -EIO; 1121 return -EIO;
1122 1122
1123 pt = ia64_task_regs(child); 1123 pt = task_pt_regs(child);
1124 sw = (struct switch_stack *) (child->thread.ksp + 16); 1124 sw = (struct switch_stack *) (child->thread.ksp + 16);
1125 unw_init_from_blocked_task(&info, child); 1125 unw_init_from_blocked_task(&info, child);
1126 if (unw_unwind_to_user(&info) < 0) { 1126 if (unw_unwind_to_user(&info) < 0) {
@@ -1265,7 +1265,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1266 return -EIO; 1266 return -EIO;
1267 1267
1268 pt = ia64_task_regs(child); 1268 pt = task_pt_regs(child);
1269 sw = (struct switch_stack *) (child->thread.ksp + 16); 1269 sw = (struct switch_stack *) (child->thread.ksp + 16);
1270 unw_init_from_blocked_task(&info, child); 1270 unw_init_from_blocked_task(&info, child);
1271 if (unw_unwind_to_user(&info) < 0) { 1271 if (unw_unwind_to_user(&info) < 0) {
@@ -1403,7 +1403,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1403void 1403void
1404ptrace_disable (struct task_struct *child) 1404ptrace_disable (struct task_struct *child)
1405{ 1405{
1406 struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); 1406 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1407 1407
1408 /* make sure the single step/taken-branch trap bits are not set: */ 1408 /* make sure the single step/taken-branch trap bits are not set: */
1409 child_psr->ss = 0; 1409 child_psr->ss = 0;
@@ -1456,7 +1456,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1456 if (ret < 0) 1456 if (ret < 0)
1457 goto out_tsk; 1457 goto out_tsk;
1458 1458
1459 pt = ia64_task_regs(child); 1459 pt = task_pt_regs(child);
1460 sw = (struct switch_stack *) (child->thread.ksp + 16); 1460 sw = (struct switch_stack *) (child->thread.ksp + 16);
1461 1461
1462 switch (request) { 1462 switch (request) {
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c33305d8e5eb..c0766575a3a2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -60,6 +60,7 @@
60#include <asm/smp.h> 60#include <asm/smp.h>
61#include <asm/system.h> 61#include <asm/system.h>
62#include <asm/unistd.h> 62#include <asm/unistd.h>
63#include <asm/system.h>
63 64
64#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 65#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
65# error "struct cpuinfo_ia64 too big!" 66# error "struct cpuinfo_ia64 too big!"
@@ -695,6 +696,7 @@ static void
695get_max_cacheline_size (void) 696get_max_cacheline_size (void)
696{ 697{
697 unsigned long line_size, max = 1; 698 unsigned long line_size, max = 1;
699 unsigned int cache_size = 0;
698 u64 l, levels, unique_caches; 700 u64 l, levels, unique_caches;
699 pal_cache_config_info_t cci; 701 pal_cache_config_info_t cci;
700 s64 status; 702 s64 status;
@@ -724,6 +726,8 @@ get_max_cacheline_size (void)
724 line_size = 1 << cci.pcci_line_size; 726 line_size = 1 << cci.pcci_line_size;
725 if (line_size > max) 727 if (line_size > max)
726 max = line_size; 728 max = line_size;
729 if (cache_size < cci.pcci_cache_size)
730 cache_size = cci.pcci_cache_size;
727 if (!cci.pcci_unified) { 731 if (!cci.pcci_unified) {
728 status = ia64_pal_cache_config_info(l, 732 status = ia64_pal_cache_config_info(l,
729 /* cache_type (instruction)= */ 1, 733 /* cache_type (instruction)= */ 1,
@@ -740,6 +744,9 @@ get_max_cacheline_size (void)
740 ia64_i_cache_stride_shift = cci.pcci_stride; 744 ia64_i_cache_stride_shift = cci.pcci_stride;
741 } 745 }
742 out: 746 out:
747#ifdef CONFIG_SMP
748 max_cache_size = max(max_cache_size, cache_size);
749#endif
743 if (max > ia64_max_cacheline_size) 750 if (max > ia64_max_cacheline_size)
744 ia64_max_cacheline_size = max; 751 ia64_max_cacheline_size = max;
745} 752}
@@ -794,7 +801,7 @@ cpu_init (void)
794#endif 801#endif
795 802
796 /* Clear the stack memory reserved for pt_regs: */ 803 /* Clear the stack memory reserved for pt_regs: */
797 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 804 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
798 805
799 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 806 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
800 807
@@ -870,6 +877,15 @@ cpu_init (void)
870 pm_idle = default_idle; 877 pm_idle = default_idle;
871} 878}
872 879
880/*
881 * On SMP systems, when the scheduler does migration-cost autodetection,
882 * it needs a way to flush as much of the CPU's caches as possible.
883 */
884void sched_cacheflush(void)
885{
886 ia64_sal_cache_flush(3);
887}
888
873void 889void
874check_bugs (void) 890check_bugs (void)
875{ 891{
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 58ce07efc56e..463f6bb44d07 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -655,11 +655,11 @@ set_sigdelayed(pid_t pid, int signo, int code, void __user *addr)
655 655
656 if (!t) 656 if (!t)
657 return; 657 return;
658 t->thread_info->sigdelayed.signo = signo; 658 task_thread_info(t)->sigdelayed.signo = signo;
659 t->thread_info->sigdelayed.code = code; 659 task_thread_info(t)->sigdelayed.code = code;
660 t->thread_info->sigdelayed.addr = addr; 660 task_thread_info(t)->sigdelayed.addr = addr;
661 t->thread_info->sigdelayed.start_time = start_time; 661 task_thread_info(t)->sigdelayed.start_time = start_time;
662 t->thread_info->sigdelayed.pid = pid; 662 task_thread_info(t)->sigdelayed.pid = pid;
663 wmb(); 663 wmb();
664 set_tsk_thread_flag(t, TIF_SIGDELAYED); 664 set_tsk_thread_flag(t, TIF_SIGDELAYED);
665 } 665 }
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f2dbcd1db0d4..c7b943f10199 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -151,7 +151,7 @@ out:
151asmlinkage long 151asmlinkage long
152sys_pipe (void) 152sys_pipe (void)
153{ 153{
154 struct pt_regs *regs = ia64_task_regs(current); 154 struct pt_regs *regs = task_pt_regs(current);
155 int fd[2]; 155 int fd[2];
156 int retval; 156 int retval;
157 157
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 2a1f250349b7..5dfc7ea45cf7 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -242,13 +242,10 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
242int copy_thread(int nr, unsigned long clone_flags, unsigned long spu, 242int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
243 unsigned long unused, struct task_struct *tsk, struct pt_regs *regs) 243 unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
244{ 244{
245 struct pt_regs *childregs; 245 struct pt_regs *childregs = task_pt_regs(tsk);
246 unsigned long sp = (unsigned long)tsk->thread_info + THREAD_SIZE;
247 extern void ret_from_fork(void); 246 extern void ret_from_fork(void);
248 247
249 /* Copy registers */ 248 /* Copy registers */
250 sp -= sizeof (struct pt_regs);
251 childregs = (struct pt_regs *)sp;
252 *childregs = *regs; 249 *childregs = *regs;
253 250
254 childregs->spu = spu; 251 childregs->spu = spu;
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 9b75caaf5cec..340a3bf59b88 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -35,23 +35,6 @@
35#include <asm/mmu_context.h> 35#include <asm/mmu_context.h>
36 36
37/* 37/*
38 * Get the address of the live pt_regs for the specified task.
39 * These are saved onto the top kernel stack when the process
40 * is not running.
41 *
42 * Note: if a user thread is execve'd from kernel space, the
43 * kernel stack will not be empty on entry to the kernel, so
44 * ptracing these tasks will fail.
45 */
46static inline struct pt_regs *
47get_user_regs(struct task_struct *task)
48{
49 return (struct pt_regs *)
50 ((unsigned long)task->thread_info + THREAD_SIZE
51 - sizeof(struct pt_regs));
52}
53
54/*
55 * This routine will get a word off of the process kernel stack. 38 * This routine will get a word off of the process kernel stack.
56 */ 39 */
57static inline unsigned long int 40static inline unsigned long int
@@ -59,7 +42,7 @@ get_stack_long(struct task_struct *task, int offset)
59{ 42{
60 unsigned long *stack; 43 unsigned long *stack;
61 44
62 stack = (unsigned long *)get_user_regs(task); 45 stack = (unsigned long *)task_pt_regs(task);
63 46
64 return stack[offset]; 47 return stack[offset];
65} 48}
@@ -72,7 +55,7 @@ put_stack_long(struct task_struct *task, int offset, unsigned long data)
72{ 55{
73 unsigned long *stack; 56 unsigned long *stack;
74 57
75 stack = (unsigned long *)get_user_regs(task); 58 stack = (unsigned long *)task_pt_regs(task);
76 stack[offset] = data; 59 stack[offset] = data;
77 60
78 return 0; 61 return 0;
@@ -208,7 +191,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
208 */ 191 */
209static int ptrace_getregs(struct task_struct *tsk, void __user *uregs) 192static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
210{ 193{
211 struct pt_regs *regs = get_user_regs(tsk); 194 struct pt_regs *regs = task_pt_regs(tsk);
212 195
213 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; 196 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
214} 197}
@@ -223,7 +206,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
223 206
224 ret = -EFAULT; 207 ret = -EFAULT;
225 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) { 208 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
226 struct pt_regs *regs = get_user_regs(tsk); 209 struct pt_regs *regs = task_pt_regs(tsk);
227 *regs = newregs; 210 *regs = newregs;
228 ret = 0; 211 ret = 0;
229 } 212 }
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index b90c54169fa5..d7ec16e7fb25 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -286,7 +286,7 @@ static void __init do_boot_cpu(int phys_id)
286 /* So we see what's up */ 286 /* So we see what's up */
287 printk("Booting processor %d/%d\n", phys_id, cpu_id); 287 printk("Booting processor %d/%d\n", phys_id, cpu_id);
288 stack_start.spi = (void *)idle->thread.sp; 288 stack_start.spi = (void *)idle->thread.sp;
289 idle->thread_info->cpu = cpu_id; 289 task_thread_info(idle)->cpu = cpu_id;
290 290
291 /* 291 /*
292 * Send Startup IPI 292 * Send Startup IPI
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index d9edf2d1a492..b0aa61bf8700 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -126,9 +126,9 @@ void __init amiga_init_IRQ(void)
126 gayle.inten = GAYLE_IRQ_IDE; 126 gayle.inten = GAYLE_IRQ_IDE;
127 127
128 /* turn off all interrupts and enable the master interrupt bit */ 128 /* turn off all interrupts and enable the master interrupt bit */
129 custom.intena = 0x7fff; 129 amiga_custom.intena = 0x7fff;
130 custom.intreq = 0x7fff; 130 amiga_custom.intreq = 0x7fff;
131 custom.intena = IF_SETCLR | IF_INTEN; 131 amiga_custom.intena = IF_SETCLR | IF_INTEN;
132 132
133 cia_init_IRQ(&ciaa_base); 133 cia_init_IRQ(&ciaa_base);
134 cia_init_IRQ(&ciab_base); 134 cia_init_IRQ(&ciab_base);
@@ -245,7 +245,7 @@ int amiga_request_irq(unsigned int irq,
245 245
246 /* enable the interrupt */ 246 /* enable the interrupt */
247 if (irq < IRQ_AMIGA_PORTS && !ami_ablecount[irq]) 247 if (irq < IRQ_AMIGA_PORTS && !ami_ablecount[irq])
248 custom.intena = IF_SETCLR | amiga_intena_vals[irq]; 248 amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
249 249
250 return error; 250 return error;
251} 251}
@@ -274,7 +274,7 @@ void amiga_free_irq(unsigned int irq, void *dev_id)
274 amiga_delete_irq(&ami_irq_list[irq], dev_id); 274 amiga_delete_irq(&ami_irq_list[irq], dev_id);
275 /* if server list empty, disable the interrupt */ 275 /* if server list empty, disable the interrupt */
276 if (!ami_irq_list[irq] && irq < IRQ_AMIGA_PORTS) 276 if (!ami_irq_list[irq] && irq < IRQ_AMIGA_PORTS)
277 custom.intena = amiga_intena_vals[irq]; 277 amiga_custom.intena = amiga_intena_vals[irq];
278 } else { 278 } else {
279 if (ami_irq_list[irq]->dev_id != dev_id) 279 if (ami_irq_list[irq]->dev_id != dev_id)
280 printk("%s: removing probably wrong IRQ %d from %s\n", 280 printk("%s: removing probably wrong IRQ %d from %s\n",
@@ -283,7 +283,7 @@ void amiga_free_irq(unsigned int irq, void *dev_id)
283 ami_irq_list[irq]->flags = 0; 283 ami_irq_list[irq]->flags = 0;
284 ami_irq_list[irq]->dev_id = NULL; 284 ami_irq_list[irq]->dev_id = NULL;
285 ami_irq_list[irq]->devname = NULL; 285 ami_irq_list[irq]->devname = NULL;
286 custom.intena = amiga_intena_vals[irq]; 286 amiga_custom.intena = amiga_intena_vals[irq];
287 } 287 }
288} 288}
289 289
@@ -327,7 +327,7 @@ void amiga_enable_irq(unsigned int irq)
327 } 327 }
328 328
329 /* enable the interrupt */ 329 /* enable the interrupt */
330 custom.intena = IF_SETCLR | amiga_intena_vals[irq]; 330 amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
331} 331}
332 332
333void amiga_disable_irq(unsigned int irq) 333void amiga_disable_irq(unsigned int irq)
@@ -358,7 +358,7 @@ void amiga_disable_irq(unsigned int irq)
358 } 358 }
359 359
360 /* disable the interrupt */ 360 /* disable the interrupt */
361 custom.intena = amiga_intena_vals[irq]; 361 amiga_custom.intena = amiga_intena_vals[irq];
362} 362}
363 363
364inline void amiga_do_irq(int irq, struct pt_regs *fp) 364inline void amiga_do_irq(int irq, struct pt_regs *fp)
@@ -373,7 +373,7 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
373 373
374 kstat_cpu(0).irqs[SYS_IRQS + irq]++; 374 kstat_cpu(0).irqs[SYS_IRQS + irq]++;
375 375
376 custom.intreq = amiga_intena_vals[irq]; 376 amiga_custom.intreq = amiga_intena_vals[irq];
377 377
378 for (node = ami_irq_list[irq]; node; node = node->next) 378 for (node = ami_irq_list[irq]; node; node = node->next)
379 node->handler(irq, node->dev_id, fp); 379 node->handler(irq, node->dev_id, fp);
@@ -385,23 +385,23 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
385 385
386static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp) 386static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
387{ 387{
388 unsigned short ints = custom.intreqr & custom.intenar; 388 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
389 389
390 /* if serial transmit buffer empty, interrupt */ 390 /* if serial transmit buffer empty, interrupt */
391 if (ints & IF_TBE) { 391 if (ints & IF_TBE) {
392 custom.intreq = IF_TBE; 392 amiga_custom.intreq = IF_TBE;
393 amiga_do_irq(IRQ_AMIGA_TBE, fp); 393 amiga_do_irq(IRQ_AMIGA_TBE, fp);
394 } 394 }
395 395
396 /* if floppy disk transfer complete, interrupt */ 396 /* if floppy disk transfer complete, interrupt */
397 if (ints & IF_DSKBLK) { 397 if (ints & IF_DSKBLK) {
398 custom.intreq = IF_DSKBLK; 398 amiga_custom.intreq = IF_DSKBLK;
399 amiga_do_irq(IRQ_AMIGA_DSKBLK, fp); 399 amiga_do_irq(IRQ_AMIGA_DSKBLK, fp);
400 } 400 }
401 401
402 /* if software interrupt set, interrupt */ 402 /* if software interrupt set, interrupt */
403 if (ints & IF_SOFT) { 403 if (ints & IF_SOFT) {
404 custom.intreq = IF_SOFT; 404 amiga_custom.intreq = IF_SOFT;
405 amiga_do_irq(IRQ_AMIGA_SOFT, fp); 405 amiga_do_irq(IRQ_AMIGA_SOFT, fp);
406 } 406 }
407 return IRQ_HANDLED; 407 return IRQ_HANDLED;
@@ -409,17 +409,17 @@ static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
409 409
410static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp) 410static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
411{ 411{
412 unsigned short ints = custom.intreqr & custom.intenar; 412 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
413 413
414 /* if a blitter interrupt */ 414 /* if a blitter interrupt */
415 if (ints & IF_BLIT) { 415 if (ints & IF_BLIT) {
416 custom.intreq = IF_BLIT; 416 amiga_custom.intreq = IF_BLIT;
417 amiga_do_irq(IRQ_AMIGA_BLIT, fp); 417 amiga_do_irq(IRQ_AMIGA_BLIT, fp);
418 } 418 }
419 419
420 /* if a copper interrupt */ 420 /* if a copper interrupt */
421 if (ints & IF_COPER) { 421 if (ints & IF_COPER) {
422 custom.intreq = IF_COPER; 422 amiga_custom.intreq = IF_COPER;
423 amiga_do_irq(IRQ_AMIGA_COPPER, fp); 423 amiga_do_irq(IRQ_AMIGA_COPPER, fp);
424 } 424 }
425 425
@@ -431,29 +431,29 @@ static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
431 431
432static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp) 432static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
433{ 433{
434 unsigned short ints = custom.intreqr & custom.intenar; 434 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
435 435
436 /* if audio 0 interrupt */ 436 /* if audio 0 interrupt */
437 if (ints & IF_AUD0) { 437 if (ints & IF_AUD0) {
438 custom.intreq = IF_AUD0; 438 amiga_custom.intreq = IF_AUD0;
439 amiga_do_irq(IRQ_AMIGA_AUD0, fp); 439 amiga_do_irq(IRQ_AMIGA_AUD0, fp);
440 } 440 }
441 441
442 /* if audio 1 interrupt */ 442 /* if audio 1 interrupt */
443 if (ints & IF_AUD1) { 443 if (ints & IF_AUD1) {
444 custom.intreq = IF_AUD1; 444 amiga_custom.intreq = IF_AUD1;
445 amiga_do_irq(IRQ_AMIGA_AUD1, fp); 445 amiga_do_irq(IRQ_AMIGA_AUD1, fp);
446 } 446 }
447 447
448 /* if audio 2 interrupt */ 448 /* if audio 2 interrupt */
449 if (ints & IF_AUD2) { 449 if (ints & IF_AUD2) {
450 custom.intreq = IF_AUD2; 450 amiga_custom.intreq = IF_AUD2;
451 amiga_do_irq(IRQ_AMIGA_AUD2, fp); 451 amiga_do_irq(IRQ_AMIGA_AUD2, fp);
452 } 452 }
453 453
454 /* if audio 3 interrupt */ 454 /* if audio 3 interrupt */
455 if (ints & IF_AUD3) { 455 if (ints & IF_AUD3) {
456 custom.intreq = IF_AUD3; 456 amiga_custom.intreq = IF_AUD3;
457 amiga_do_irq(IRQ_AMIGA_AUD3, fp); 457 amiga_do_irq(IRQ_AMIGA_AUD3, fp);
458 } 458 }
459 return IRQ_HANDLED; 459 return IRQ_HANDLED;
@@ -461,7 +461,7 @@ static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
461 461
462static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp) 462static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
463{ 463{
464 unsigned short ints = custom.intreqr & custom.intenar; 464 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
465 465
466 /* if serial receive buffer full interrupt */ 466 /* if serial receive buffer full interrupt */
467 if (ints & IF_RBF) { 467 if (ints & IF_RBF) {
@@ -471,7 +471,7 @@ static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
471 471
472 /* if a disk sync interrupt */ 472 /* if a disk sync interrupt */
473 if (ints & IF_DSKSYN) { 473 if (ints & IF_DSKSYN) {
474 custom.intreq = IF_DSKSYN; 474 amiga_custom.intreq = IF_DSKSYN;
475 amiga_do_irq(IRQ_AMIGA_DSKSYN, fp); 475 amiga_do_irq(IRQ_AMIGA_DSKSYN, fp);
476 } 476 }
477 return IRQ_HANDLED; 477 return IRQ_HANDLED;
diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c
index bd5d134e9f12..ae94db5d93b2 100644
--- a/arch/m68k/amiga/amisound.c
+++ b/arch/m68k/amiga/amisound.c
@@ -24,6 +24,8 @@ static const signed char sine_data[] = {
24}; 24};
25#define DATA_SIZE (sizeof(sine_data)/sizeof(sine_data[0])) 25#define DATA_SIZE (sizeof(sine_data)/sizeof(sine_data[0]))
26 26
27#define custom amiga_custom
28
27 /* 29 /*
28 * The minimum period for audio may be modified by the frame buffer 30 * The minimum period for audio may be modified by the frame buffer
29 * device since it depends on htotal (for OCS/ECS/AGA) 31 * device since it depends on htotal (for OCS/ECS/AGA)
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 7d55682615e3..9476eb9440f5 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -60,7 +60,7 @@ unsigned char cia_set_irq(struct ciabase *base, unsigned char mask)
60 else 60 else
61 base->icr_data &= ~mask; 61 base->icr_data &= ~mask;
62 if (base->icr_data & base->icr_mask) 62 if (base->icr_data & base->icr_mask)
63 custom.intreq = IF_SETCLR | base->int_mask; 63 amiga_custom.intreq = IF_SETCLR | base->int_mask;
64 return old & base->icr_mask; 64 return old & base->icr_mask;
65} 65}
66 66
@@ -89,7 +89,7 @@ unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
89 } 89 }
90 } 90 }
91 if (base->icr_data & base->icr_mask) 91 if (base->icr_data & base->icr_mask)
92 custom.intreq = IF_SETCLR | base->int_mask; 92 amiga_custom.intreq = IF_SETCLR | base->int_mask;
93 return old; 93 return old;
94} 94}
95 95
@@ -133,7 +133,7 @@ static irqreturn_t cia_handler(int irq, void *dev_id, struct pt_regs *fp)
133 mach_irq = base->cia_irq; 133 mach_irq = base->cia_irq;
134 irq = SYS_IRQS + mach_irq; 134 irq = SYS_IRQS + mach_irq;
135 ints = cia_set_irq(base, CIA_ICR_ALL); 135 ints = cia_set_irq(base, CIA_ICR_ALL);
136 custom.intreq = base->int_mask; 136 amiga_custom.intreq = base->int_mask;
137 for (i = 0; i < CIA_IRQS; i++, irq++, mach_irq++) { 137 for (i = 0; i < CIA_IRQS; i++, irq++, mach_irq++) {
138 if (ints & 1) { 138 if (ints & 1) {
139 kstat_cpu(0).irqs[irq]++; 139 kstat_cpu(0).irqs[irq]++;
@@ -162,7 +162,7 @@ void __init cia_init_IRQ(struct ciabase *base)
162 /* install CIA handler */ 162 /* install CIA handler */
163 request_irq(base->handler_irq, cia_handler, 0, base->name, base); 163 request_irq(base->handler_irq, cia_handler, 0, base->name, base);
164 164
165 custom.intena = IF_SETCLR | base->int_mask; 165 amiga_custom.intena = IF_SETCLR | base->int_mask;
166} 166}
167 167
168int cia_get_irq_list(struct ciabase *base, struct seq_file *p) 168int cia_get_irq_list(struct ciabase *base, struct seq_file *p)
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 4775e18a78f0..12e3706fe02c 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -105,9 +105,6 @@ static int a2000_hwclk (int, struct rtc_time *);
105static int amiga_set_clock_mmss (unsigned long); 105static int amiga_set_clock_mmss (unsigned long);
106static unsigned int amiga_get_ss (void); 106static unsigned int amiga_get_ss (void);
107extern void amiga_mksound( unsigned int count, unsigned int ticks ); 107extern void amiga_mksound( unsigned int count, unsigned int ticks );
108#ifdef CONFIG_AMIGA_FLOPPY
109extern void amiga_floppy_setup(char *, int *);
110#endif
111static void amiga_reset (void); 108static void amiga_reset (void);
112extern void amiga_init_sound(void); 109extern void amiga_init_sound(void);
113static void amiga_savekmsg_init(void); 110static void amiga_savekmsg_init(void);
@@ -290,7 +287,7 @@ static void __init amiga_identify(void)
290 case CS_OCS: 287 case CS_OCS:
291 case CS_ECS: 288 case CS_ECS:
292 case CS_AGA: 289 case CS_AGA:
293 switch (custom.deniseid & 0xf) { 290 switch (amiga_custom.deniseid & 0xf) {
294 case 0x0c: 291 case 0x0c:
295 AMIGAHW_SET(DENISE_HR); 292 AMIGAHW_SET(DENISE_HR);
296 break; 293 break;
@@ -303,7 +300,7 @@ static void __init amiga_identify(void)
303 AMIGAHW_SET(DENISE); 300 AMIGAHW_SET(DENISE);
304 break; 301 break;
305 } 302 }
306 switch ((custom.vposr>>8) & 0x7f) { 303 switch ((amiga_custom.vposr>>8) & 0x7f) {
307 case 0x00: 304 case 0x00:
308 AMIGAHW_SET(AGNUS_PAL); 305 AMIGAHW_SET(AGNUS_PAL);
309 break; 306 break;
@@ -427,13 +424,7 @@ void __init config_amiga(void)
427 424
428 mach_set_clock_mmss = amiga_set_clock_mmss; 425 mach_set_clock_mmss = amiga_set_clock_mmss;
429 mach_get_ss = amiga_get_ss; 426 mach_get_ss = amiga_get_ss;
430#ifdef CONFIG_AMIGA_FLOPPY
431 mach_floppy_setup = amiga_floppy_setup;
432#endif
433 mach_reset = amiga_reset; 427 mach_reset = amiga_reset;
434#ifdef CONFIG_DUMMY_CONSOLE
435 conswitchp = &dummy_con;
436#endif
437#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE) 428#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
438 mach_beep = amiga_mksound; 429 mach_beep = amiga_mksound;
439#endif 430#endif
@@ -447,9 +438,9 @@ void __init config_amiga(void)
447 amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */ 438 amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */
448 439
449 /* clear all DMA bits */ 440 /* clear all DMA bits */
450 custom.dmacon = DMAF_ALL; 441 amiga_custom.dmacon = DMAF_ALL;
451 /* ensure that the DMA master bit is set */ 442 /* ensure that the DMA master bit is set */
452 custom.dmacon = DMAF_SETCLR | DMAF_MASTER; 443 amiga_custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
453 444
454 /* don't use Z2 RAM as system memory on Z3 capable machines */ 445 /* don't use Z2 RAM as system memory on Z3 capable machines */
455 if (AMIGAHW_PRESENT(ZORRO3)) { 446 if (AMIGAHW_PRESENT(ZORRO3)) {
@@ -830,8 +821,8 @@ static void amiga_savekmsg_init(void)
830 821
831static void amiga_serial_putc(char c) 822static void amiga_serial_putc(char c)
832{ 823{
833 custom.serdat = (unsigned char)c | 0x100; 824 amiga_custom.serdat = (unsigned char)c | 0x100;
834 while (!(custom.serdatr & 0x2000)) 825 while (!(amiga_custom.serdatr & 0x2000))
835 ; 826 ;
836} 827}
837 828
@@ -855,11 +846,11 @@ int amiga_serial_console_wait_key(struct console *co)
855{ 846{
856 int ch; 847 int ch;
857 848
858 while (!(custom.intreqr & IF_RBF)) 849 while (!(amiga_custom.intreqr & IF_RBF))
859 barrier(); 850 barrier();
860 ch = custom.serdatr & 0xff; 851 ch = amiga_custom.serdatr & 0xff;
861 /* clear the interrupt, so that another character can be read */ 852 /* clear the interrupt, so that another character can be read */
862 custom.intreq = IF_RBF; 853 amiga_custom.intreq = IF_RBF;
863 return ch; 854 return ch;
864} 855}
865 856
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 264929471253..d401962d9b25 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -176,9 +176,6 @@ void config_apollo(void) {
176 mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */ 176 mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
177 mach_process_int = dn_process_int; 177 mach_process_int = dn_process_int;
178 mach_reset = dn_dummy_reset; /* */ 178 mach_reset = dn_dummy_reset; /* */
179#ifdef CONFIG_DUMMY_CONSOLE
180 conswitchp = &dummy_con;
181#endif
182#ifdef CONFIG_HEARTBEAT 179#ifdef CONFIG_HEARTBEAT
183 mach_heartbeat = dn_heartbeat; 180 mach_heartbeat = dn_heartbeat;
184#endif 181#endif
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 9261d2deeaf5..1012b08e5522 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -52,9 +52,6 @@ int atari_rtc_year_offset;
52 52
53/* local function prototypes */ 53/* local function prototypes */
54static void atari_reset( void ); 54static void atari_reset( void );
55#ifdef CONFIG_ATARI_FLOPPY
56extern void atari_floppy_setup(char *, int *);
57#endif
58static void atari_get_model(char *model); 55static void atari_get_model(char *model);
59static int atari_get_hardware_list(char *buffer); 56static int atari_get_hardware_list(char *buffer);
60 57
@@ -244,12 +241,6 @@ void __init config_atari(void)
244 mach_get_irq_list = show_atari_interrupts; 241 mach_get_irq_list = show_atari_interrupts;
245 mach_gettimeoffset = atari_gettimeoffset; 242 mach_gettimeoffset = atari_gettimeoffset;
246 mach_reset = atari_reset; 243 mach_reset = atari_reset;
247#ifdef CONFIG_ATARI_FLOPPY
248 mach_floppy_setup = atari_floppy_setup;
249#endif
250#ifdef CONFIG_DUMMY_CONSOLE
251 conswitchp = &dummy_con;
252#endif
253 mach_max_dma_address = 0xffffff; 244 mach_max_dma_address = 0xffffff;
254#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE) 245#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
255 mach_beep = atari_mksound; 246 mach_beep = atari_mksound;
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index f7573f2bcb9c..703cbc6dc9cc 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -47,6 +47,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
47 unsigned char msr; 47 unsigned char msr;
48 unsigned long flags; 48 unsigned long flags;
49 struct rtc_time wtime; 49 struct rtc_time wtime;
50 void __user *argp = (void __user *)arg;
50 51
51 switch (cmd) { 52 switch (cmd) {
52 case RTC_RD_TIME: /* Read the time/date from RTC */ 53 case RTC_RD_TIME: /* Read the time/date from RTC */
@@ -69,7 +70,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
69 } while (wtime.tm_sec != BCD2BIN(rtc->bcd_sec)); 70 } while (wtime.tm_sec != BCD2BIN(rtc->bcd_sec));
70 rtc->msr = msr; 71 rtc->msr = msr;
71 local_irq_restore(flags); 72 local_irq_restore(flags);
72 return copy_to_user((void *)arg, &wtime, sizeof wtime) ? 73 return copy_to_user(argp, &wtime, sizeof wtime) ?
73 -EFAULT : 0; 74 -EFAULT : 0;
74 } 75 }
75 case RTC_SET_TIME: /* Set the RTC */ 76 case RTC_SET_TIME: /* Set the RTC */
@@ -81,8 +82,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
81 if (!capable(CAP_SYS_ADMIN)) 82 if (!capable(CAP_SYS_ADMIN))
82 return -EACCES; 83 return -EACCES;
83 84
84 if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, 85 if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
85 sizeof(struct rtc_time)))
86 return -EFAULT; 86 return -EFAULT;
87 87
88 yrs = rtc_tm.tm_year; 88 yrs = rtc_tm.tm_year;
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index a0b854f3f94a..6d129eef370f 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -261,9 +261,6 @@ void __init config_hp300(void)
261#ifdef CONFIG_HEARTBEAT 261#ifdef CONFIG_HEARTBEAT
262 mach_heartbeat = hp300_pulse; 262 mach_heartbeat = hp300_pulse;
263#endif 263#endif
264#ifdef CONFIG_DUMMY_CONSOLE
265 conswitchp = &dummy_con;
266#endif
267 mach_max_dma_address = 0xffffffff; 264 mach_max_dma_address = 0xffffffff;
268 265
269 if (hp300_model >= HP_330 && hp300_model <= HP_433S && hp300_model != HP_350) { 266 if (hp300_model >= HP_330 && hp300_model <= HP_433S && hp300_model != HP_350) {
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index c787c5ba9513..246a8820c223 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -92,7 +92,7 @@ int main(void)
92 DEFINE(TRAP_TRACE, TRAP_TRACE); 92 DEFINE(TRAP_TRACE, TRAP_TRACE);
93 93
94 /* offsets into the custom struct */ 94 /* offsets into the custom struct */
95 DEFINE(CUSTOMBASE, &custom); 95 DEFINE(CUSTOMBASE, &amiga_custom);
96 DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar)); 96 DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
97 DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr)); 97 DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
98 DEFINE(C_INTENA, offsetof(struct CUSTOM, intena)); 98 DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index d4336d846df1..70002c146eed 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -273,8 +273,10 @@
273 * Macintosh console support 273 * Macintosh console support
274 */ 274 */
275 275
276#ifdef CONFIG_FRAMEBUFFER_CONSOLE
276#define CONSOLE 277#define CONSOLE
277#define CONSOLE_PENGUIN 278#define CONSOLE_PENGUIN
279#endif
278 280
279/* 281/*
280 * Macintosh serial debug support; outputs boot info to the printer 282 * Macintosh serial debug support; outputs boot info to the printer
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 13d109328a42..3f9cb55d0356 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -238,10 +238,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
238{ 238{
239 struct pt_regs * childregs; 239 struct pt_regs * childregs;
240 struct switch_stack * childstack, *stack; 240 struct switch_stack * childstack, *stack;
241 unsigned long stack_offset, *retp; 241 unsigned long *retp;
242 242
243 stack_offset = THREAD_SIZE - sizeof(struct pt_regs); 243 childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
244 childregs = (struct pt_regs *) ((unsigned long) (p->thread_info) + stack_offset);
245 244
246 *childregs = *regs; 245 *childregs = *regs;
247 childregs->d0 = 0; 246 childregs->d0 = 0;
@@ -386,7 +385,7 @@ unsigned long get_wchan(struct task_struct *p)
386 if (!p || p == current || p->state == TASK_RUNNING) 385 if (!p || p == current || p->state == TASK_RUNNING)
387 return 0; 386 return 0;
388 387
389 stack_page = (unsigned long)(p->thread_info); 388 stack_page = (unsigned long)task_stack_page(p);
390 fp = ((struct switch_stack *)p->thread.ksp)->a6; 389 fp = ((struct switch_stack *)p->thread.ksp)->a6;
391 do { 390 do {
392 if (fp < stack_page+sizeof(struct thread_info) || 391 if (fp < stack_page+sizeof(struct thread_info) ||
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index d6ca99242e5a..750d5b3c971f 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -84,9 +84,6 @@ void (*mach_reset)( void );
84void (*mach_halt)( void ); 84void (*mach_halt)( void );
85void (*mach_power_off)( void ); 85void (*mach_power_off)( void );
86long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */ 86long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
87#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
88void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
89#endif
90#ifdef CONFIG_HEARTBEAT 87#ifdef CONFIG_HEARTBEAT
91void (*mach_heartbeat) (int); 88void (*mach_heartbeat) (int);
92EXPORT_SYMBOL(mach_heartbeat); 89EXPORT_SYMBOL(mach_heartbeat);
@@ -100,6 +97,8 @@ void (*mach_beep)(unsigned int, unsigned int);
100#if defined(CONFIG_ISA) && defined(MULTI_ISA) 97#if defined(CONFIG_ISA) && defined(MULTI_ISA)
101int isa_type; 98int isa_type;
102int isa_sex; 99int isa_sex;
100EXPORT_SYMBOL(isa_type);
101EXPORT_SYMBOL(isa_sex);
103#endif 102#endif
104 103
105extern int amiga_parse_bootinfo(const struct bi_record *); 104extern int amiga_parse_bootinfo(const struct bi_record *);
@@ -280,6 +279,10 @@ void __init setup_arch(char **cmdline_p)
280 } 279 }
281 } 280 }
282 281
282#ifdef CONFIG_DUMMY_CONSOLE
283 conswitchp = &dummy_con;
284#endif
285
283 switch (m68k_machtype) { 286 switch (m68k_machtype) {
284#ifdef CONFIG_AMIGA 287#ifdef CONFIG_AMIGA
285 case MACH_AMIGA: 288 case MACH_AMIGA:
@@ -521,16 +524,6 @@ int get_hardware_list(char *buffer)
521 return(len); 524 return(len);
522} 525}
523 526
524
525#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
526void __init floppy_setup(char *str, int *ints)
527{
528 if (mach_floppy_setup)
529 mach_floppy_setup (str, ints);
530}
531
532#endif
533
534void check_bugs(void) 527void check_bugs(void)
535{ 528{
536#ifndef CONFIG_M68KFPU_EMU 529#ifndef CONFIG_M68KFPU_EMU
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 9c636a4c238d..866917bfa028 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -96,7 +96,7 @@ asmlinkage int do_sigsuspend(struct pt_regs *regs)
96asmlinkage int 96asmlinkage int
97do_rt_sigsuspend(struct pt_regs *regs) 97do_rt_sigsuspend(struct pt_regs *regs)
98{ 98{
99 sigset_t *unewset = (sigset_t *)regs->d1; 99 sigset_t __user *unewset = (sigset_t __user *)regs->d1;
100 size_t sigsetsize = (size_t)regs->d2; 100 size_t sigsetsize = (size_t)regs->d2;
101 sigset_t saveset, newset; 101 sigset_t saveset, newset;
102 102
@@ -122,8 +122,8 @@ do_rt_sigsuspend(struct pt_regs *regs)
122} 122}
123 123
124asmlinkage int 124asmlinkage int
125sys_sigaction(int sig, const struct old_sigaction *act, 125sys_sigaction(int sig, const struct old_sigaction __user *act,
126 struct old_sigaction *oact) 126 struct old_sigaction __user *oact)
127{ 127{
128 struct k_sigaction new_ka, old_ka; 128 struct k_sigaction new_ka, old_ka;
129 int ret; 129 int ret;
@@ -154,7 +154,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
154} 154}
155 155
156asmlinkage int 156asmlinkage int
157sys_sigaltstack(const stack_t *uss, stack_t *uoss) 157sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
158{ 158{
159 return do_sigaltstack(uss, uoss, rdusp()); 159 return do_sigaltstack(uss, uoss, rdusp());
160} 160}
@@ -169,10 +169,10 @@ sys_sigaltstack(const stack_t *uss, stack_t *uoss)
169 169
170struct sigframe 170struct sigframe
171{ 171{
172 char *pretcode; 172 char __user *pretcode;
173 int sig; 173 int sig;
174 int code; 174 int code;
175 struct sigcontext *psc; 175 struct sigcontext __user *psc;
176 char retcode[8]; 176 char retcode[8];
177 unsigned long extramask[_NSIG_WORDS-1]; 177 unsigned long extramask[_NSIG_WORDS-1];
178 struct sigcontext sc; 178 struct sigcontext sc;
@@ -180,10 +180,10 @@ struct sigframe
180 180
181struct rt_sigframe 181struct rt_sigframe
182{ 182{
183 char *pretcode; 183 char __user *pretcode;
184 int sig; 184 int sig;
185 struct siginfo *pinfo; 185 struct siginfo __user *pinfo;
186 void *puc; 186 void __user *puc;
187 char retcode[8]; 187 char retcode[8];
188 struct siginfo info; 188 struct siginfo info;
189 struct ucontext uc; 189 struct ucontext uc;
@@ -248,7 +248,7 @@ out:
248#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4] 248#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
249#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1] 249#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
250 250
251static inline int rt_restore_fpu_state(struct ucontext *uc) 251static inline int rt_restore_fpu_state(struct ucontext __user *uc)
252{ 252{
253 unsigned char fpstate[FPCONTEXT_SIZE]; 253 unsigned char fpstate[FPCONTEXT_SIZE];
254 int context_size = CPU_IS_060 ? 8 : 0; 254 int context_size = CPU_IS_060 ? 8 : 0;
@@ -267,7 +267,7 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
267 return 0; 267 return 0;
268 } 268 }
269 269
270 if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate)) 270 if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
271 goto out; 271 goto out;
272 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { 272 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
273 if (!CPU_IS_060) 273 if (!CPU_IS_060)
@@ -306,7 +306,7 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
306 "m" (*fpregs.f_fpcntl)); 306 "m" (*fpregs.f_fpcntl));
307 } 307 }
308 if (context_size && 308 if (context_size &&
309 __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1, 309 __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
310 context_size)) 310 context_size))
311 goto out; 311 goto out;
312 __asm__ volatile (".chip 68k/68881\n\t" 312 __asm__ volatile (".chip 68k/68881\n\t"
@@ -319,7 +319,7 @@ out:
319} 319}
320 320
321static inline int 321static inline int
322restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp, 322restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
323 int *pd0) 323 int *pd0)
324{ 324{
325 int fsize, formatvec; 325 int fsize, formatvec;
@@ -404,10 +404,10 @@ badframe:
404 404
405static inline int 405static inline int
406rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, 406rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
407 struct ucontext *uc, int *pd0) 407 struct ucontext __user *uc, int *pd0)
408{ 408{
409 int fsize, temp; 409 int fsize, temp;
410 greg_t *gregs = uc->uc_mcontext.gregs; 410 greg_t __user *gregs = uc->uc_mcontext.gregs;
411 unsigned long usp; 411 unsigned long usp;
412 int err; 412 int err;
413 413
@@ -506,7 +506,7 @@ asmlinkage int do_sigreturn(unsigned long __unused)
506 struct switch_stack *sw = (struct switch_stack *) &__unused; 506 struct switch_stack *sw = (struct switch_stack *) &__unused;
507 struct pt_regs *regs = (struct pt_regs *) (sw + 1); 507 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
508 unsigned long usp = rdusp(); 508 unsigned long usp = rdusp();
509 struct sigframe *frame = (struct sigframe *)(usp - 4); 509 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
510 sigset_t set; 510 sigset_t set;
511 int d0; 511 int d0;
512 512
@@ -536,7 +536,7 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
536 struct switch_stack *sw = (struct switch_stack *) &__unused; 536 struct switch_stack *sw = (struct switch_stack *) &__unused;
537 struct pt_regs *regs = (struct pt_regs *) (sw + 1); 537 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
538 unsigned long usp = rdusp(); 538 unsigned long usp = rdusp();
539 struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4); 539 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
540 sigset_t set; 540 sigset_t set;
541 int d0; 541 int d0;
542 542
@@ -596,7 +596,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
596 } 596 }
597} 597}
598 598
599static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs) 599static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
600{ 600{
601 unsigned char fpstate[FPCONTEXT_SIZE]; 601 unsigned char fpstate[FPCONTEXT_SIZE];
602 int context_size = CPU_IS_060 ? 8 : 0; 602 int context_size = CPU_IS_060 ? 8 : 0;
@@ -617,7 +617,7 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
617 ".chip 68k" 617 ".chip 68k"
618 : : "m" (*fpstate) : "memory"); 618 : : "m" (*fpstate) : "memory");
619 619
620 err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate); 620 err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
621 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) { 621 if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
622 fpregset_t fpregs; 622 fpregset_t fpregs;
623 if (!CPU_IS_060) 623 if (!CPU_IS_060)
@@ -642,7 +642,7 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
642 sizeof(fpregs)); 642 sizeof(fpregs));
643 } 643 }
644 if (context_size) 644 if (context_size)
645 err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4, 645 err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
646 context_size); 646 context_size);
647 return err; 647 return err;
648} 648}
@@ -662,10 +662,10 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
662 save_fpu_state(sc, regs); 662 save_fpu_state(sc, regs);
663} 663}
664 664
665static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) 665static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
666{ 666{
667 struct switch_stack *sw = (struct switch_stack *)regs - 1; 667 struct switch_stack *sw = (struct switch_stack *)regs - 1;
668 greg_t *gregs = uc->uc_mcontext.gregs; 668 greg_t __user *gregs = uc->uc_mcontext.gregs;
669 int err = 0; 669 int err = 0;
670 670
671 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); 671 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
@@ -753,7 +753,7 @@ static inline void push_cache (unsigned long vaddr)
753 } 753 }
754} 754}
755 755
756static inline void * 756static inline void __user *
757get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) 757get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
758{ 758{
759 unsigned long usp; 759 unsigned long usp;
@@ -766,13 +766,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
766 if (!on_sig_stack(usp)) 766 if (!on_sig_stack(usp))
767 usp = current->sas_ss_sp + current->sas_ss_size; 767 usp = current->sas_ss_sp + current->sas_ss_size;
768 } 768 }
769 return (void *)((usp - frame_size) & -8UL); 769 return (void __user *)((usp - frame_size) & -8UL);
770} 770}
771 771
772static void setup_frame (int sig, struct k_sigaction *ka, 772static void setup_frame (int sig, struct k_sigaction *ka,
773 sigset_t *set, struct pt_regs *regs) 773 sigset_t *set, struct pt_regs *regs)
774{ 774{
775 struct sigframe *frame; 775 struct sigframe __user *frame;
776 int fsize = frame_extra_sizes[regs->format]; 776 int fsize = frame_extra_sizes[regs->format];
777 struct sigcontext context; 777 struct sigcontext context;
778 int err = 0; 778 int err = 0;
@@ -813,7 +813,7 @@ static void setup_frame (int sig, struct k_sigaction *ka,
813 err |= __put_user(frame->retcode, &frame->pretcode); 813 err |= __put_user(frame->retcode, &frame->pretcode);
814 /* moveq #,d0; trap #0 */ 814 /* moveq #,d0; trap #0 */
815 err |= __put_user(0x70004e40 + (__NR_sigreturn << 16), 815 err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
816 (long *)(frame->retcode)); 816 (long __user *)(frame->retcode));
817 817
818 if (err) 818 if (err)
819 goto give_sigsegv; 819 goto give_sigsegv;
@@ -849,7 +849,7 @@ give_sigsegv:
849static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, 849static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
850 sigset_t *set, struct pt_regs *regs) 850 sigset_t *set, struct pt_regs *regs)
851{ 851{
852 struct rt_sigframe *frame; 852 struct rt_sigframe __user *frame;
853 int fsize = frame_extra_sizes[regs->format]; 853 int fsize = frame_extra_sizes[regs->format];
854 int err = 0; 854 int err = 0;
855 855
@@ -880,8 +880,8 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
880 880
881 /* Create the ucontext. */ 881 /* Create the ucontext. */
882 err |= __put_user(0, &frame->uc.uc_flags); 882 err |= __put_user(0, &frame->uc.uc_flags);
883 err |= __put_user(0, &frame->uc.uc_link); 883 err |= __put_user(NULL, &frame->uc.uc_link);
884 err |= __put_user((void *)current->sas_ss_sp, 884 err |= __put_user((void __user *)current->sas_ss_sp,
885 &frame->uc.uc_stack.ss_sp); 885 &frame->uc.uc_stack.ss_sp);
886 err |= __put_user(sas_ss_flags(rdusp()), 886 err |= __put_user(sas_ss_flags(rdusp()),
887 &frame->uc.uc_stack.ss_flags); 887 &frame->uc.uc_stack.ss_flags);
@@ -893,8 +893,8 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
893 err |= __put_user(frame->retcode, &frame->pretcode); 893 err |= __put_user(frame->retcode, &frame->pretcode);
894 /* moveq #,d0; notb d0; trap #0 */ 894 /* moveq #,d0; notb d0; trap #0 */
895 err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16), 895 err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
896 (long *)(frame->retcode + 0)); 896 (long __user *)(frame->retcode + 0));
897 err |= __put_user(0x4e40, (short *)(frame->retcode + 4)); 897 err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
898 898
899 if (err) 899 if (err)
900 goto give_sigsegv; 900 goto give_sigsegv;
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 640895b2c51a..143c552d38f3 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -32,7 +32,7 @@
32 * sys_pipe() is the normal C calling standard for creating 32 * sys_pipe() is the normal C calling standard for creating
33 * a pipe. It's not the way unix traditionally does this, though. 33 * a pipe. It's not the way unix traditionally does this, though.
34 */ 34 */
35asmlinkage int sys_pipe(unsigned long * fildes) 35asmlinkage int sys_pipe(unsigned long __user * fildes)
36{ 36{
37 int fd[2]; 37 int fd[2];
38 int error; 38 int error;
@@ -94,7 +94,7 @@ struct mmap_arg_struct {
94 unsigned long offset; 94 unsigned long offset;
95}; 95};
96 96
97asmlinkage int old_mmap(struct mmap_arg_struct *arg) 97asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
98{ 98{
99 struct mmap_arg_struct a; 99 struct mmap_arg_struct a;
100 int error = -EFAULT; 100 int error = -EFAULT;
@@ -160,11 +160,11 @@ out:
160 160
161struct sel_arg_struct { 161struct sel_arg_struct {
162 unsigned long n; 162 unsigned long n;
163 fd_set *inp, *outp, *exp; 163 fd_set __user *inp, *outp, *exp;
164 struct timeval *tvp; 164 struct timeval __user *tvp;
165}; 165};
166 166
167asmlinkage int old_select(struct sel_arg_struct *arg) 167asmlinkage int old_select(struct sel_arg_struct __user *arg)
168{ 168{
169 struct sel_arg_struct a; 169 struct sel_arg_struct a;
170 170
@@ -180,7 +180,7 @@ asmlinkage int old_select(struct sel_arg_struct *arg)
180 * This is really horribly ugly. 180 * This is really horribly ugly.
181 */ 181 */
182asmlinkage int sys_ipc (uint call, int first, int second, 182asmlinkage int sys_ipc (uint call, int first, int second,
183 int third, void *ptr, long fifth) 183 int third, void __user *ptr, long fifth)
184{ 184{
185 int version, ret; 185 int version, ret;
186 186
@@ -190,14 +190,14 @@ asmlinkage int sys_ipc (uint call, int first, int second,
190 if (call <= SEMCTL) 190 if (call <= SEMCTL)
191 switch (call) { 191 switch (call) {
192 case SEMOP: 192 case SEMOP:
193 return sys_semop (first, (struct sembuf *)ptr, second); 193 return sys_semop (first, ptr, second);
194 case SEMGET: 194 case SEMGET:
195 return sys_semget (first, second, third); 195 return sys_semget (first, second, third);
196 case SEMCTL: { 196 case SEMCTL: {
197 union semun fourth; 197 union semun fourth;
198 if (!ptr) 198 if (!ptr)
199 return -EINVAL; 199 return -EINVAL;
200 if (get_user(fourth.__pad, (void **) ptr)) 200 if (get_user(fourth.__pad, (void __user *__user *) ptr))
201 return -EFAULT; 201 return -EFAULT;
202 return sys_semctl (first, second, third, fourth); 202 return sys_semctl (first, second, third, fourth);
203 } 203 }
@@ -207,31 +207,26 @@ asmlinkage int sys_ipc (uint call, int first, int second,
207 if (call <= MSGCTL) 207 if (call <= MSGCTL)
208 switch (call) { 208 switch (call) {
209 case MSGSND: 209 case MSGSND:
210 return sys_msgsnd (first, (struct msgbuf *) ptr, 210 return sys_msgsnd (first, ptr, second, third);
211 second, third);
212 case MSGRCV: 211 case MSGRCV:
213 switch (version) { 212 switch (version) {
214 case 0: { 213 case 0: {
215 struct ipc_kludge tmp; 214 struct ipc_kludge tmp;
216 if (!ptr) 215 if (!ptr)
217 return -EINVAL; 216 return -EINVAL;
218 if (copy_from_user (&tmp, 217 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
219 (struct ipc_kludge *)ptr,
220 sizeof (tmp)))
221 return -EFAULT; 218 return -EFAULT;
222 return sys_msgrcv (first, tmp.msgp, second, 219 return sys_msgrcv (first, tmp.msgp, second,
223 tmp.msgtyp, third); 220 tmp.msgtyp, third);
224 } 221 }
225 default: 222 default:
226 return sys_msgrcv (first, 223 return sys_msgrcv (first, ptr,
227 (struct msgbuf *) ptr,
228 second, fifth, third); 224 second, fifth, third);
229 } 225 }
230 case MSGGET: 226 case MSGGET:
231 return sys_msgget ((key_t) first, second); 227 return sys_msgget ((key_t) first, second);
232 case MSGCTL: 228 case MSGCTL:
233 return sys_msgctl (first, second, 229 return sys_msgctl (first, second, ptr);
234 (struct msqid_ds *) ptr);
235 default: 230 default:
236 return -ENOSYS; 231 return -ENOSYS;
237 } 232 }
@@ -241,20 +236,18 @@ asmlinkage int sys_ipc (uint call, int first, int second,
241 switch (version) { 236 switch (version) {
242 default: { 237 default: {
243 ulong raddr; 238 ulong raddr;
244 ret = do_shmat (first, (char *) ptr, 239 ret = do_shmat (first, ptr, second, &raddr);
245 second, &raddr);
246 if (ret) 240 if (ret)
247 return ret; 241 return ret;
248 return put_user (raddr, (ulong *) third); 242 return put_user (raddr, (ulong __user *) third);
249 } 243 }
250 } 244 }
251 case SHMDT: 245 case SHMDT:
252 return sys_shmdt ((char *)ptr); 246 return sys_shmdt (ptr);
253 case SHMGET: 247 case SHMGET:
254 return sys_shmget (first, second, third); 248 return sys_shmget (first, second, third);
255 case SHMCTL: 249 case SHMCTL:
256 return sys_shmctl (first, second, 250 return sys_shmctl (first, second, ptr);
257 (struct shmid_ds *) ptr);
258 default: 251 default:
259 return -ENOSYS; 252 return -ENOSYS;
260 } 253 }
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index deb36e8b04a2..cdf58fbb3e73 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -169,25 +169,25 @@ void __init trap_init (void)
169 169
170 if (CPU_IS_060 && !FPU_IS_EMU) { 170 if (CPU_IS_060 && !FPU_IS_EMU) {
171 /* set up IFPSP entry points */ 171 /* set up IFPSP entry points */
172 asmlinkage void snan_vec(void) asm ("_060_fpsp_snan"); 172 asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
173 asmlinkage void operr_vec(void) asm ("_060_fpsp_operr"); 173 asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
174 asmlinkage void ovfl_vec(void) asm ("_060_fpsp_ovfl"); 174 asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
175 asmlinkage void unfl_vec(void) asm ("_060_fpsp_unfl"); 175 asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
176 asmlinkage void dz_vec(void) asm ("_060_fpsp_dz"); 176 asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
177 asmlinkage void inex_vec(void) asm ("_060_fpsp_inex"); 177 asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
178 asmlinkage void fline_vec(void) asm ("_060_fpsp_fline"); 178 asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
179 asmlinkage void unsupp_vec(void) asm ("_060_fpsp_unsupp"); 179 asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
180 asmlinkage void effadd_vec(void) asm ("_060_fpsp_effadd"); 180 asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
181 181
182 vectors[VEC_FPNAN] = snan_vec; 182 vectors[VEC_FPNAN] = snan_vec6;
183 vectors[VEC_FPOE] = operr_vec; 183 vectors[VEC_FPOE] = operr_vec6;
184 vectors[VEC_FPOVER] = ovfl_vec; 184 vectors[VEC_FPOVER] = ovfl_vec6;
185 vectors[VEC_FPUNDER] = unfl_vec; 185 vectors[VEC_FPUNDER] = unfl_vec6;
186 vectors[VEC_FPDIVZ] = dz_vec; 186 vectors[VEC_FPDIVZ] = dz_vec6;
187 vectors[VEC_FPIR] = inex_vec; 187 vectors[VEC_FPIR] = inex_vec6;
188 vectors[VEC_LINE11] = fline_vec; 188 vectors[VEC_LINE11] = fline_vec6;
189 vectors[VEC_FPUNSUP] = unsupp_vec; 189 vectors[VEC_FPUNSUP] = unsupp_vec6;
190 vectors[VEC_UNIMPEA] = effadd_vec; 190 vectors[VEC_UNIMPEA] = effadd_vec6;
191 } 191 }
192 192
193 /* if running on an amiga, make the NMI interrupt do nothing */ 193 /* if running on an amiga, make the NMI interrupt do nothing */
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index e58654f3f8dd..69d1d3d30c78 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -13,6 +13,7 @@ SECTIONS
13 .text : { 13 .text : {
14 *(.text) 14 *(.text)
15 SCHED_TEXT 15 SCHED_TEXT
16 LOCK_TEXT
16 *(.fixup) 17 *(.fixup)
17 *(.gnu.warning) 18 *(.gnu.warning)
18 } :text = 0x4e75 19 } :text = 0x4e75
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index cc37e8d3c1e2..65cc39c24185 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -14,6 +14,7 @@ SECTIONS
14 *(.head) 14 *(.head)
15 *(.text) 15 *(.text)
16 SCHED_TEXT 16 SCHED_TEXT
17 LOCK_TEXT
17 *(.fixup) 18 *(.fixup)
18 *(.gnu.warning) 19 *(.gnu.warning)
19 } :text = 0x4e75 20 } :text = 0x4e75
@@ -66,7 +67,7 @@ __init_begin = .;
66 __initramfs_end = .; 67 __initramfs_end = .;
67 . = ALIGN(8192); 68 . = ALIGN(8192);
68 __init_end = .; 69 __init_end = .;
69 .init.task : { *(init_task) } 70 .data.init.task : { *(.data.init_task) }
70 71
71 72
72 .bss : { *(.bss) } /* BSS */ 73 .bss : { *(.bss) } /* BSS */
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index 4a5c5445c610..cb13c6e3ccae 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -134,7 +134,7 @@ EXPORT_SYMBOL(csum_partial);
134 */ 134 */
135 135
136unsigned int 136unsigned int
137csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, 137csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
138 int len, int sum, int *csum_err) 138 int len, int sum, int *csum_err)
139{ 139{
140 /* 140 /*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index cd19cbb213e8..14f8d3f4e195 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -212,9 +212,6 @@ void __init config_mac(void)
212 mach_reset = mac_reset; 212 mach_reset = mac_reset;
213 mach_halt = mac_poweroff; 213 mach_halt = mac_poweroff;
214 mach_power_off = mac_poweroff; 214 mach_power_off = mac_poweroff;
215#ifdef CONFIG_DUMMY_CONSOLE
216 conswitchp = &dummy_con;
217#endif
218 mach_max_dma_address = 0xffffffff; 215 mach_max_dma_address = 0xffffffff;
219#if 0 216#if 0
220 mach_debug_init = mac_debug_init; 217 mach_debug_init = mac_debug_init;
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index d889ba80ccdc..9179a3798407 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -293,8 +293,8 @@ void __init iop_init(void)
293 } 293 }
294 294
295 for (i = 0 ; i < NUM_IOP_CHAN ; i++) { 295 for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
296 iop_send_queue[IOP_NUM_SCC][i] = 0; 296 iop_send_queue[IOP_NUM_SCC][i] = NULL;
297 iop_send_queue[IOP_NUM_ISM][i] = 0; 297 iop_send_queue[IOP_NUM_ISM][i] = NULL;
298 iop_listeners[IOP_NUM_SCC][i].devname = NULL; 298 iop_listeners[IOP_NUM_SCC][i].devname = NULL;
299 iop_listeners[IOP_NUM_SCC][i].handler = NULL; 299 iop_listeners[IOP_NUM_SCC][i].handler = NULL;
300 iop_listeners[IOP_NUM_ISM][i].devname = NULL; 300 iop_listeners[IOP_NUM_ISM][i].devname = NULL;
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 5b80d7cd954a..bbb0c3b95e9c 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -39,72 +39,163 @@
39extern struct mac_booter_data mac_bi_data; 39extern struct mac_booter_data mac_bi_data;
40static void (*rom_reset)(void); 40static void (*rom_reset)(void);
41 41
42#ifdef CONFIG_ADB 42#ifdef CONFIG_ADB_CUDA
43/* 43static long cuda_read_time(void)
44 * Return the current time as the number of seconds since January 1, 1904.
45 */
46
47static long adb_read_time(void)
48{ 44{
49 volatile struct adb_request req; 45 struct adb_request req;
50 long time; 46 long time;
51 47
52 adb_request((struct adb_request *) &req, NULL, 48 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
53 ADBREQ_RAW|ADBREQ_SYNC, 49 return 0;
54 2, CUDA_PACKET, CUDA_GET_TIME); 50 while (!req.complete)
51 cuda_poll();
55 52
56 time = (req.reply[3] << 24) | (req.reply[4] << 16) 53 time = (req.reply[3] << 24) | (req.reply[4] << 16)
57 | (req.reply[5] << 8) | req.reply[6]; 54 | (req.reply[5] << 8) | req.reply[6];
58 return time - RTC_OFFSET; 55 return time - RTC_OFFSET;
59} 56}
60 57
61/* 58static void cuda_write_time(long data)
62 * Set the current system time 59{
63 */ 60 struct adb_request req;
61 data += RTC_OFFSET;
62 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
63 (data >> 24) & 0xFF, (data >> 16) & 0xFF,
64 (data >> 8) & 0xFF, data & 0xFF) < 0)
65 return;
66 while (!req.complete)
67 cuda_poll();
68}
64 69
65static void adb_write_time(long data) 70static __u8 cuda_read_pram(int offset)
66{ 71{
67 volatile struct adb_request req; 72 struct adb_request req;
73 if (cuda_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
74 (offset >> 8) & 0xFF, offset & 0xFF) < 0)
75 return 0;
76 while (!req.complete)
77 cuda_poll();
78 return req.reply[3];
79}
68 80
69 data += RTC_OFFSET; 81static void cuda_write_pram(int offset, __u8 data)
82{
83 struct adb_request req;
84 if (cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
85 (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
86 return;
87 while (!req.complete)
88 cuda_poll();
89}
90#else
91#define cuda_read_time() 0
92#define cuda_write_time(n)
93#define cuda_read_pram NULL
94#define cuda_write_pram NULL
95#endif
96
97#ifdef CONFIG_ADB_PMU68K
98static long pmu_read_time(void)
99{
100 struct adb_request req;
101 long time;
102
103 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
104 return 0;
105 while (!req.complete)
106 pmu_poll();
70 107
71 adb_request((struct adb_request *) &req, NULL, 108 time = (req.reply[0] << 24) | (req.reply[1] << 16)
72 ADBREQ_RAW|ADBREQ_SYNC, 109 | (req.reply[2] << 8) | req.reply[3];
73 6, CUDA_PACKET, CUDA_SET_TIME, 110 return time - RTC_OFFSET;
111}
112
113static void pmu_write_time(long data)
114{
115 struct adb_request req;
116 data += RTC_OFFSET;
117 if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
74 (data >> 24) & 0xFF, (data >> 16) & 0xFF, 118 (data >> 24) & 0xFF, (data >> 16) & 0xFF,
75 (data >> 8) & 0xFF, data & 0xFF); 119 (data >> 8) & 0xFF, data & 0xFF) < 0)
120 return;
121 while (!req.complete)
122 pmu_poll();
76} 123}
77 124
78/* 125static __u8 pmu_read_pram(int offset)
79 * Get a byte from the NVRAM 126{
80 */ 127 struct adb_request req;
128 if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM,
129 (offset >> 8) & 0xFF, offset & 0xFF) < 0)
130 return 0;
131 while (!req.complete)
132 pmu_poll();
133 return req.reply[3];
134}
81 135
82static __u8 adb_read_pram(int offset) 136static void pmu_write_pram(int offset, __u8 data)
83{ 137{
84 volatile struct adb_request req; 138 struct adb_request req;
139 if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM,
140 (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
141 return;
142 while (!req.complete)
143 pmu_poll();
144}
145#else
146#define pmu_read_time() 0
147#define pmu_write_time(n)
148#define pmu_read_pram NULL
149#define pmu_write_pram NULL
150#endif
85 151
86 adb_request((struct adb_request *) &req, NULL, 152#ifdef CONFIG_ADB_MACIISI
87 ADBREQ_RAW|ADBREQ_SYNC, 153extern int maciisi_request(struct adb_request *req,
88 4, CUDA_PACKET, CUDA_GET_PRAM, 154 void (*done)(struct adb_request *), int nbytes, ...);
89 (offset >> 8) & 0xFF, offset & 0xFF); 155
90 return req.reply[3]; 156static long maciisi_read_time(void)
157{
158 struct adb_request req;
159 long time;
160
161 if (maciisi_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME))
162 return 0;
163
164 time = (req.reply[3] << 24) | (req.reply[4] << 16)
165 | (req.reply[5] << 8) | req.reply[6];
166 return time - RTC_OFFSET;
91} 167}
92 168
93/* 169static void maciisi_write_time(long data)
94 * Write a byte to the NVRAM 170{
95 */ 171 struct adb_request req;
172 data += RTC_OFFSET;
173 maciisi_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
174 (data >> 24) & 0xFF, (data >> 16) & 0xFF,
175 (data >> 8) & 0xFF, data & 0xFF);
176}
96 177
97static void adb_write_pram(int offset, __u8 data) 178static __u8 maciisi_read_pram(int offset)
98{ 179{
99 volatile struct adb_request req; 180 struct adb_request req;
181 if (maciisi_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
182 (offset >> 8) & 0xFF, offset & 0xFF))
183 return 0;
184 return req.reply[3];
185}
100 186
101 adb_request((struct adb_request *) &req, NULL, 187static void maciisi_write_pram(int offset, __u8 data)
102 ADBREQ_RAW|ADBREQ_SYNC, 188{
103 5, CUDA_PACKET, CUDA_SET_PRAM, 189 struct adb_request req;
104 (offset >> 8) & 0xFF, offset & 0xFF, 190 maciisi_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
105 data); 191 (offset >> 8) & 0xFF, offset & 0xFF, data);
106} 192}
107#endif /* CONFIG_ADB */ 193#else
194#define maciisi_read_time() 0
195#define maciisi_write_time(n)
196#define maciisi_read_pram NULL
197#define maciisi_write_pram NULL
198#endif
108 199
109/* 200/*
110 * VIA PRAM/RTC access routines 201 * VIA PRAM/RTC access routines
@@ -305,42 +396,55 @@ static void oss_shutdown(void)
305 396
306static void cuda_restart(void) 397static void cuda_restart(void)
307{ 398{
308 adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC, 399 struct adb_request req;
309 2, CUDA_PACKET, CUDA_RESET_SYSTEM); 400 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM) < 0)
401 return;
402 while (!req.complete)
403 cuda_poll();
310} 404}
311 405
312static void cuda_shutdown(void) 406static void cuda_shutdown(void)
313{ 407{
314 adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC, 408 struct adb_request req;
315 2, CUDA_PACKET, CUDA_POWERDOWN); 409 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN) < 0)
410 return;
411 while (!req.complete)
412 cuda_poll();
316} 413}
317 414
318#endif /* CONFIG_ADB_CUDA */ 415#endif /* CONFIG_ADB_CUDA */
319 416
320#ifdef CONFIG_ADB_PMU 417#ifdef CONFIG_ADB_PMU68K
321 418
322void pmu_restart(void) 419void pmu_restart(void)
323{ 420{
324 adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC, 421 struct adb_request req;
325 3, PMU_PACKET, PMU_SET_INTR_MASK, 422 if (pmu_request(&req, NULL,
326 PMU_INT_ADB|PMU_INT_TICK); 423 2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
327 424 return;
328 adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC, 425 while (!req.complete)
329 2, PMU_PACKET, PMU_RESET); 426 pmu_poll();
427 if (pmu_request(&req, NULL, 1, PMU_RESET) < 0)
428 return;
429 while (!req.complete)
430 pmu_poll();
330} 431}
331 432
332void pmu_shutdown(void) 433void pmu_shutdown(void)
333{ 434{
334 adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC, 435 struct adb_request req;
335 3, PMU_PACKET, PMU_SET_INTR_MASK, 436 if (pmu_request(&req, NULL,
336 PMU_INT_ADB|PMU_INT_TICK); 437 2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
337 438 return;
338 adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC, 439 while (!req.complete)
339 6, PMU_PACKET, PMU_SHUTDOWN, 440 pmu_poll();
340 'M', 'A', 'T', 'T'); 441 if (pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 'M', 'A', 'T', 'T') < 0)
442 return;
443 while (!req.complete)
444 pmu_poll();
341} 445}
342 446
343#endif /* CONFIG_ADB_PMU */ 447#endif
344 448
345/* 449/*
346 *------------------------------------------------------------------- 450 *-------------------------------------------------------------------
@@ -351,21 +455,22 @@ void pmu_shutdown(void)
351 455
352void mac_pram_read(int offset, __u8 *buffer, int len) 456void mac_pram_read(int offset, __u8 *buffer, int len)
353{ 457{
354 __u8 (*func)(int) = NULL; 458 __u8 (*func)(int);
355 int i; 459 int i;
356 460
357 if (macintosh_config->adb_type == MAC_ADB_IISI || 461 switch(macintosh_config->adb_type) {
358 macintosh_config->adb_type == MAC_ADB_PB1 || 462 case MAC_ADB_IISI:
359 macintosh_config->adb_type == MAC_ADB_PB2 || 463 func = maciisi_read_pram; break;
360 macintosh_config->adb_type == MAC_ADB_CUDA) { 464 case MAC_ADB_PB1:
361#ifdef CONFIG_ADB 465 case MAC_ADB_PB2:
362 func = adb_read_pram; 466 func = pmu_read_pram; break;
363#else 467 case MAC_ADB_CUDA:
364 return; 468 func = cuda_read_pram; break;
365#endif 469 default:
366 } else {
367 func = via_read_pram; 470 func = via_read_pram;
368 } 471 }
472 if (!func)
473 return;
369 for (i = 0 ; i < len ; i++) { 474 for (i = 0 ; i < len ; i++) {
370 buffer[i] = (*func)(offset++); 475 buffer[i] = (*func)(offset++);
371 } 476 }
@@ -373,21 +478,22 @@ void mac_pram_read(int offset, __u8 *buffer, int len)
373 478
374void mac_pram_write(int offset, __u8 *buffer, int len) 479void mac_pram_write(int offset, __u8 *buffer, int len)
375{ 480{
376 void (*func)(int, __u8) = NULL; 481 void (*func)(int, __u8);
377 int i; 482 int i;
378 483
379 if (macintosh_config->adb_type == MAC_ADB_IISI || 484 switch(macintosh_config->adb_type) {
380 macintosh_config->adb_type == MAC_ADB_PB1 || 485 case MAC_ADB_IISI:
381 macintosh_config->adb_type == MAC_ADB_PB2 || 486 func = maciisi_write_pram; break;
382 macintosh_config->adb_type == MAC_ADB_CUDA) { 487 case MAC_ADB_PB1:
383#ifdef CONFIG_ADB 488 case MAC_ADB_PB2:
384 func = adb_write_pram; 489 func = pmu_write_pram; break;
385#else 490 case MAC_ADB_CUDA:
386 return; 491 func = cuda_write_pram; break;
387#endif 492 default:
388 } else {
389 func = via_write_pram; 493 func = via_write_pram;
390 } 494 }
495 if (!func)
496 return;
391 for (i = 0 ; i < len ; i++) { 497 for (i = 0 ; i < len ; i++) {
392 (*func)(offset++, buffer[i]); 498 (*func)(offset++, buffer[i]);
393 } 499 }
@@ -408,7 +514,7 @@ void mac_poweroff(void)
408 } else if (macintosh_config->adb_type == MAC_ADB_CUDA) { 514 } else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
409 cuda_shutdown(); 515 cuda_shutdown();
410#endif 516#endif
411#ifdef CONFIG_ADB_PMU 517#ifdef CONFIG_ADB_PMU68K
412 } else if (macintosh_config->adb_type == MAC_ADB_PB1 518 } else if (macintosh_config->adb_type == MAC_ADB_PB1
413 || macintosh_config->adb_type == MAC_ADB_PB2) { 519 || macintosh_config->adb_type == MAC_ADB_PB2) {
414 pmu_shutdown(); 520 pmu_shutdown();
@@ -448,7 +554,7 @@ void mac_reset(void)
448 } else if (macintosh_config->adb_type == MAC_ADB_CUDA) { 554 } else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
449 cuda_restart(); 555 cuda_restart();
450#endif 556#endif
451#ifdef CONFIG_ADB_PMU 557#ifdef CONFIG_ADB_PMU68K
452 } else if (macintosh_config->adb_type == MAC_ADB_PB1 558 } else if (macintosh_config->adb_type == MAC_ADB_PB1
453 || macintosh_config->adb_type == MAC_ADB_PB2) { 559 || macintosh_config->adb_type == MAC_ADB_PB2) {
454 pmu_restart(); 560 pmu_restart();
@@ -466,12 +572,13 @@ void mac_reset(void)
466 /* make a 1-to-1 mapping, using the transparent tran. reg. */ 572 /* make a 1-to-1 mapping, using the transparent tran. reg. */
467 unsigned long virt = (unsigned long) mac_reset; 573 unsigned long virt = (unsigned long) mac_reset;
468 unsigned long phys = virt_to_phys(mac_reset); 574 unsigned long phys = virt_to_phys(mac_reset);
575 unsigned long addr = (phys&0xFF000000)|0x8777;
469 unsigned long offset = phys-virt; 576 unsigned long offset = phys-virt;
470 local_irq_disable(); /* lets not screw this up, ok? */ 577 local_irq_disable(); /* lets not screw this up, ok? */
471 __asm__ __volatile__(".chip 68030\n\t" 578 __asm__ __volatile__(".chip 68030\n\t"
472 "pmove %0,%/tt0\n\t" 579 "pmove %0,%/tt0\n\t"
473 ".chip 68k" 580 ".chip 68k"
474 : : "m" ((phys&0xFF000000)|0x8777)); 581 : : "m" (addr));
475 /* Now jump to physical address so we can disable MMU */ 582 /* Now jump to physical address so we can disable MMU */
476 __asm__ __volatile__( 583 __asm__ __volatile__(
477 ".chip 68030\n\t" 584 ".chip 68030\n\t"
@@ -588,20 +695,22 @@ int mac_hwclk(int op, struct rtc_time *t)
588 unsigned long now; 695 unsigned long now;
589 696
590 if (!op) { /* read */ 697 if (!op) { /* read */
591 if (macintosh_config->adb_type == MAC_ADB_II) { 698 switch (macintosh_config->adb_type) {
699 case MAC_ADB_II:
700 case MAC_ADB_IOP:
592 now = via_read_time(); 701 now = via_read_time();
593 } else 702 break;
594#ifdef CONFIG_ADB 703 case MAC_ADB_IISI:
595 if ((macintosh_config->adb_type == MAC_ADB_IISI) || 704 now = maciisi_read_time();
596 (macintosh_config->adb_type == MAC_ADB_PB1) || 705 break;
597 (macintosh_config->adb_type == MAC_ADB_PB2) || 706 case MAC_ADB_PB1:
598 (macintosh_config->adb_type == MAC_ADB_CUDA)) { 707 case MAC_ADB_PB2:
599 now = adb_read_time(); 708 now = pmu_read_time();
600 } else 709 break;
601#endif 710 case MAC_ADB_CUDA:
602 if (macintosh_config->adb_type == MAC_ADB_IOP) { 711 now = cuda_read_time();
603 now = via_read_time(); 712 break;
604 } else { 713 default:
605 now = 0; 714 now = 0;
606 } 715 }
607 716
@@ -619,15 +728,20 @@ int mac_hwclk(int op, struct rtc_time *t)
619 now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, 728 now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
620 t->tm_hour, t->tm_min, t->tm_sec); 729 t->tm_hour, t->tm_min, t->tm_sec);
621 730
622 if (macintosh_config->adb_type == MAC_ADB_II) { 731 switch (macintosh_config->adb_type) {
623 via_write_time(now); 732 case MAC_ADB_II:
624 } else if ((macintosh_config->adb_type == MAC_ADB_IISI) || 733 case MAC_ADB_IOP:
625 (macintosh_config->adb_type == MAC_ADB_PB1) ||
626 (macintosh_config->adb_type == MAC_ADB_PB2) ||
627 (macintosh_config->adb_type == MAC_ADB_CUDA)) {
628 adb_write_time(now);
629 } else if (macintosh_config->adb_type == MAC_ADB_IOP) {
630 via_write_time(now); 734 via_write_time(now);
735 break;
736 case MAC_ADB_CUDA:
737 cuda_write_time(now);
738 break;
739 case MAC_ADB_PB1:
740 case MAC_ADB_PB2:
741 pmu_write_time(now);
742 break;
743 case MAC_ADB_IISI:
744 maciisi_write_time(now);
631 } 745 }
632#endif 746#endif
633 } 747 }
diff --git a/arch/m68k/math-emu/multi_arith.h b/arch/m68k/math-emu/multi_arith.h
index 02251e5afd89..4ad0ca918e2e 100644
--- a/arch/m68k/math-emu/multi_arith.h
+++ b/arch/m68k/math-emu/multi_arith.h
@@ -366,7 +366,7 @@ static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1,
366 366
367#define fp_mul64(desth, destl, src1, src2) ({ \ 367#define fp_mul64(desth, destl, src1, src2) ({ \
368 asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth) \ 368 asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth) \
369 : "g" (src1), "0" (src2)); \ 369 : "dm" (src1), "0" (src2)); \
370}) 370})
371#define fp_div64(quot, rem, srch, srcl, div) \ 371#define fp_div64(quot, rem, srch, srcl, div) \
372 asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem) \ 372 asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem) \
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index fe2383e36b06..85ad19a0ac79 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -102,7 +102,7 @@ static inline void free_io_area(void *addr)
102 */ 102 */
103/* Rewritten by Andreas Schwab to remove all races. */ 103/* Rewritten by Andreas Schwab to remove all races. */
104 104
105void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) 105void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
106{ 106{
107 struct vm_struct *area; 107 struct vm_struct *area;
108 unsigned long virtaddr, retaddr; 108 unsigned long virtaddr, retaddr;
@@ -121,7 +121,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
121 if (MACH_IS_AMIGA) { 121 if (MACH_IS_AMIGA) {
122 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000) 122 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
123 && (cacheflag == IOMAP_NOCACHE_SER)) 123 && (cacheflag == IOMAP_NOCACHE_SER))
124 return (void *)physaddr; 124 return (void __iomem *)physaddr;
125 } 125 }
126#endif 126#endif
127 127
@@ -218,21 +218,21 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
218#endif 218#endif
219 flush_tlb_all(); 219 flush_tlb_all();
220 220
221 return (void *)retaddr; 221 return (void __iomem *)retaddr;
222} 222}
223 223
224/* 224/*
225 * Unmap a ioremap()ed region again 225 * Unmap a ioremap()ed region again
226 */ 226 */
227void iounmap(void *addr) 227void iounmap(void __iomem *addr)
228{ 228{
229#ifdef CONFIG_AMIGA 229#ifdef CONFIG_AMIGA
230 if ((!MACH_IS_AMIGA) || 230 if ((!MACH_IS_AMIGA) ||
231 (((unsigned long)addr < 0x40000000) || 231 (((unsigned long)addr < 0x40000000) ||
232 ((unsigned long)addr > 0x60000000))) 232 ((unsigned long)addr > 0x60000000)))
233 free_io_area(addr); 233 free_io_area((__force void *)addr);
234#else 234#else
235 free_io_area(addr); 235 free_io_area((__force void *)addr);
236#endif 236#endif
237} 237}
238 238
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 30f5921ece9b..a69fe3048edc 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -45,6 +45,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
45 volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE; 45 volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE;
46 unsigned long flags; 46 unsigned long flags;
47 struct rtc_time wtime; 47 struct rtc_time wtime;
48 void __user *argp = (void __user *)arg;
48 49
49 switch (cmd) { 50 switch (cmd) {
50 case RTC_RD_TIME: /* Read the time/date from RTC */ 51 case RTC_RD_TIME: /* Read the time/date from RTC */
@@ -64,7 +65,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
64 wtime.tm_wday = BCD2BIN(rtc->bcd_dow)-1; 65 wtime.tm_wday = BCD2BIN(rtc->bcd_dow)-1;
65 rtc->ctrl = 0; 66 rtc->ctrl = 0;
66 local_irq_restore(flags); 67 local_irq_restore(flags);
67 return copy_to_user((void *)arg, &wtime, sizeof wtime) ? 68 return copy_to_user(argp, &wtime, sizeof wtime) ?
68 -EFAULT : 0; 69 -EFAULT : 0;
69 } 70 }
70 case RTC_SET_TIME: /* Set the RTC */ 71 case RTC_SET_TIME: /* Set the RTC */
@@ -76,8 +77,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
76 if (!capable(CAP_SYS_ADMIN)) 77 if (!capable(CAP_SYS_ADMIN))
77 return -EACCES; 78 return -EACCES;
78 79
79 if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, 80 if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
80 sizeof(struct rtc_time)))
81 return -EFAULT; 81 return -EFAULT;
82 82
83 yrs = rtc_tm.tm_year; 83 yrs = rtc_tm.tm_year;
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 02b626bae4ae..5e0f9b04d45e 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -36,8 +36,6 @@
36#include <asm/machdep.h> 36#include <asm/machdep.h>
37#include <asm/q40_master.h> 37#include <asm/q40_master.h>
38 38
39extern void floppy_setup(char *str, int *ints);
40
41extern irqreturn_t q40_process_int (int level, struct pt_regs *regs); 39extern irqreturn_t q40_process_int (int level, struct pt_regs *regs);
42extern irqreturn_t (*q40_default_handler[]) (int, void *, struct pt_regs *); /* added just for debugging */ 40extern irqreturn_t (*q40_default_handler[]) (int, void *, struct pt_regs *); /* added just for debugging */
43extern void q40_init_IRQ (void); 41extern void q40_init_IRQ (void);
@@ -194,9 +192,6 @@ void __init config_q40(void)
194 mach_heartbeat = q40_heartbeat; 192 mach_heartbeat = q40_heartbeat;
195#endif 193#endif
196 mach_halt = q40_halt; 194 mach_halt = q40_halt;
197#ifdef CONFIG_DUMMY_CONSOLE
198 conswitchp = &dummy_con;
199#endif
200 195
201 /* disable a few things that SMSQ might have left enabled */ 196 /* disable a few things that SMSQ might have left enabled */
202 q40_disable_irqs(); 197 q40_disable_irqs();
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 77d05bcc3221..f1ca0dfbaa67 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -160,9 +160,6 @@ void __init config_sun3(void)
160 mach_hwclk = sun3_hwclk; 160 mach_hwclk = sun3_hwclk;
161 mach_halt = sun3_halt; 161 mach_halt = sun3_halt;
162 mach_get_hardware_list = sun3_get_hardware_list; 162 mach_get_hardware_list = sun3_get_hardware_list;
163#if defined(CONFIG_DUMMY_CONSOLE)
164 conswitchp = &dummy_con;
165#endif
166 163
167 memory_start = ((((int)&_end) + 0x2000) & ~0x1fff); 164 memory_start = ((((int)&_end) + 0x2000) & ~0x1fff);
168// PROM seems to want the last couple of physical pages. --m 165// PROM seems to want the last couple of physical pages. --m
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 0ef547f5494d..0920f5d33606 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -71,10 +71,6 @@ void __init config_sun3x(void)
71 mach_get_model = sun3_get_model; 71 mach_get_model = sun3_get_model;
72 mach_get_hardware_list = sun3x_get_hardware_list; 72 mach_get_hardware_list = sun3x_get_hardware_list;
73 73
74#ifdef CONFIG_DUMMY_CONSOLE
75 conswitchp = &dummy_con;
76#endif
77
78 sun3_intreg = (unsigned char *)SUN3X_INTREG; 74 sun3_intreg = (unsigned char *)SUN3X_INTREG;
79 75
80 /* only the serial console is known to work anyway... */ 76 /* only the serial console is known to work anyway... */
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 8b3cf57ba706..99bf43824795 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -198,10 +198,9 @@ int copy_thread(int nr, unsigned long clone_flags,
198{ 198{
199 struct pt_regs * childregs; 199 struct pt_regs * childregs;
200 struct switch_stack * childstack, *stack; 200 struct switch_stack * childstack, *stack;
201 unsigned long stack_offset, *retp; 201 unsigned long *retp;
202 202
203 stack_offset = THREAD_SIZE - sizeof(struct pt_regs); 203 childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
204 childregs = (struct pt_regs *) ((unsigned long) p->thread_info + stack_offset);
205 204
206 *childregs = *regs; 205 *childregs = *regs;
207 childregs->d0 = 0; 206 childregs->d0 = 0;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 0476a4dce14e..fa98f10d0132 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -140,12 +140,12 @@ void flush_thread(void)
140int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 140int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
141 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 141 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
142{ 142{
143 struct thread_info *ti = p->thread_info; 143 struct thread_info *ti = task_thread_info(p);
144 struct pt_regs *childregs; 144 struct pt_regs *childregs;
145 long childksp; 145 long childksp;
146 p->set_child_tid = p->clear_child_tid = NULL; 146 p->set_child_tid = p->clear_child_tid = NULL;
147 147
148 childksp = (unsigned long)ti + THREAD_SIZE - 32; 148 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
149 149
150 preempt_disable(); 150 preempt_disable();
151 151
@@ -229,9 +229,7 @@ void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
229 229
230int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs) 230int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs)
231{ 231{
232 struct thread_info *ti = tsk->thread_info; 232 elf_dump_regs(*regs, task_pt_regs(tsk));
233 long ksp = (unsigned long)ti + THREAD_SIZE - 32;
234 elf_dump_regs(&(*regs)[0], (struct pt_regs *) ksp - 1);
235 return 1; 233 return 1;
236} 234}
237 235
@@ -409,7 +407,7 @@ unsigned long get_wchan(struct task_struct *p)
409 if (!p || p == current || p->state == TASK_RUNNING) 407 if (!p || p == current || p->state == TASK_RUNNING)
410 return 0; 408 return 0;
411 409
412 stack_page = (unsigned long)p->thread_info; 410 stack_page = (unsigned long)task_stack_page(p);
413 if (!stack_page || !mips_frame_info_initialized) 411 if (!stack_page || !mips_frame_info_initialized)
414 return 0; 412 return 0;
415 413
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 8d2549335304..f838b36cc765 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -64,8 +64,7 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
64 if (!access_ok(VERIFY_WRITE, data, 38 * 8)) 64 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
65 return -EIO; 65 return -EIO;
66 66
67 regs = (struct pt_regs *) ((unsigned long) child->thread_info + 67 regs = task_pt_regs(child);
68 THREAD_SIZE - 32 - sizeof(struct pt_regs));
69 68
70 for (i = 0; i < 32; i++) 69 for (i = 0; i < 32; i++)
71 __put_user (regs->regs[i], data + i); 70 __put_user (regs->regs[i], data + i);
@@ -92,8 +91,7 @@ int ptrace_setregs (struct task_struct *child, __s64 __user *data)
92 if (!access_ok(VERIFY_READ, data, 38 * 8)) 91 if (!access_ok(VERIFY_READ, data, 38 * 8))
93 return -EIO; 92 return -EIO;
94 93
95 regs = (struct pt_regs *) ((unsigned long) child->thread_info + 94 regs = task_pt_regs(child);
96 THREAD_SIZE - 32 - sizeof(struct pt_regs));
97 95
98 for (i = 0; i < 32; i++) 96 for (i = 0; i < 32; i++)
99 __get_user (regs->regs[i], data + i); 97 __get_user (regs->regs[i], data + i);
@@ -198,8 +196,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
198 struct pt_regs *regs; 196 struct pt_regs *regs;
199 unsigned long tmp = 0; 197 unsigned long tmp = 0;
200 198
201 regs = (struct pt_regs *) ((unsigned long) child->thread_info + 199 regs = task_pt_regs(child);
202 THREAD_SIZE - 32 - sizeof(struct pt_regs));
203 ret = 0; /* Default return value. */ 200 ret = 0; /* Default return value. */
204 201
205 switch (addr) { 202 switch (addr) {
@@ -314,8 +311,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
314 case PTRACE_POKEUSR: { 311 case PTRACE_POKEUSR: {
315 struct pt_regs *regs; 312 struct pt_regs *regs;
316 ret = 0; 313 ret = 0;
317 regs = (struct pt_regs *) ((unsigned long) child->thread_info + 314 regs = task_pt_regs(child);
318 THREAD_SIZE - 32 - sizeof(struct pt_regs));
319 315
320 switch (addr) { 316 switch (addr) {
321 case 0 ... 31: 317 case 0 ... 31:
@@ -442,7 +438,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
442 break; 438 break;
443 439
444 case PTRACE_GET_THREAD_AREA: 440 case PTRACE_GET_THREAD_AREA:
445 ret = put_user(child->thread_info->tp_value, 441 ret = put_user(task_thread_info(child)->tp_value,
446 (unsigned long __user *) data); 442 (unsigned long __user *) data);
447 break; 443 break;
448 444
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 1f998bfde165..0c82b25d8c6d 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -126,8 +126,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
126 struct pt_regs *regs; 126 struct pt_regs *regs;
127 unsigned int tmp; 127 unsigned int tmp;
128 128
129 regs = (struct pt_regs *) ((unsigned long) child->thread_info + 129 regs = task_pt_regs(child);
130 THREAD_SIZE - 32 - sizeof(struct pt_regs));
131 ret = 0; /* Default return value. */ 130 ret = 0; /* Default return value. */
132 131
133 switch (addr) { 132 switch (addr) {
@@ -259,8 +258,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
259 case PTRACE_POKEUSR: { 258 case PTRACE_POKEUSR: {
260 struct pt_regs *regs; 259 struct pt_regs *regs;
261 ret = 0; 260 ret = 0;
262 regs = (struct pt_regs *) ((unsigned long) child->thread_info + 261 regs = task_pt_regs(child);
263 THREAD_SIZE - 32 - sizeof(struct pt_regs));
264 262
265 switch (addr) { 263 switch (addr) {
266 case 0 ... 31: 264 case 0 ... 31:
@@ -377,7 +375,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
377 break; 375 break;
378 376
379 case PTRACE_GET_THREAD_AREA: 377 case PTRACE_GET_THREAD_AREA:
380 ret = put_user(child->thread_info->tp_value, 378 ret = put_user(task_thread_info(child)->tp_value,
381 (unsigned int __user *) (unsigned long) data); 379 (unsigned int __user *) (unsigned long) data);
382 break; 380 break;
383 381
@@ -391,7 +389,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
391 break; 389 break;
392 390
393 case PTRACE_GET_THREAD_AREA_3264: 391 case PTRACE_GET_THREAD_AREA_3264:
394 ret = put_user(child->thread_info->tp_value, 392 ret = put_user(task_thread_info(child)->tp_value,
395 (unsigned long __user *) (unsigned long) data); 393 (unsigned long __user *) (unsigned long) data);
396 break; 394 break;
397 395
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c
index d429544ba4bc..794a1c3de2a4 100644
--- a/arch/mips/kernel/smp_mt.c
+++ b/arch/mips/kernel/smp_mt.c
@@ -287,6 +287,7 @@ void prom_prepare_cpus(unsigned int max_cpus)
287 */ 287 */
288void prom_boot_secondary(int cpu, struct task_struct *idle) 288void prom_boot_secondary(int cpu, struct task_struct *idle)
289{ 289{
290 struct thread_info *gp = task_thread_info(idle);
290 dvpe(); 291 dvpe();
291 set_c0_mvpcontrol(MVPCONTROL_VPC); 292 set_c0_mvpcontrol(MVPCONTROL_VPC);
292 293
@@ -307,11 +308,9 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
307 write_tc_gpr_sp( __KSTK_TOS(idle)); 308 write_tc_gpr_sp( __KSTK_TOS(idle));
308 309
309 /* global pointer */ 310 /* global pointer */
310 write_tc_gpr_gp((unsigned long)idle->thread_info); 311 write_tc_gpr_gp((unsigned long)gp);
311 312
312 flush_icache_range((unsigned long)idle->thread_info, 313 flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
313 (unsigned long)idle->thread_info +
314 sizeof(struct thread_info));
315 314
316 /* finally out of configuration and into chaos */ 315 /* finally out of configuration and into chaos */
317 clear_c0_mvpcontrol(MVPCONTROL_VPC); 316 clear_c0_mvpcontrol(MVPCONTROL_VPC);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 006881942aa2..332358430ff5 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -263,7 +263,7 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
263 263
264void sys_set_thread_area(unsigned long addr) 264void sys_set_thread_area(unsigned long addr)
265{ 265{
266 struct thread_info *ti = current->thread_info; 266 struct thread_info *ti = task_thread_info(current);
267 267
268 ti->tp_value = addr; 268 ti->tp_value = addr;
269 269
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 7058893d5ad2..59a187956de0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -519,7 +519,7 @@ static inline int simulate_llsc(struct pt_regs *regs)
519 */ 519 */
520static inline int simulate_rdhwr(struct pt_regs *regs) 520static inline int simulate_rdhwr(struct pt_regs *regs)
521{ 521{
522 struct thread_info *ti = current->thread_info; 522 struct thread_info *ti = task_thread_info(current);
523 unsigned int opcode; 523 unsigned int opcode;
524 524
525 if (unlikely(get_insn_opcode(regs, &opcode))) 525 if (unlikely(get_insn_opcode(regs, &opcode)))
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 0527170d6adb..f17f575f58f0 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -93,8 +93,8 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
93 */ 93 */
94void prom_boot_secondary(int cpu, struct task_struct *idle) 94void prom_boot_secondary(int cpu, struct task_struct *idle)
95{ 95{
96 unsigned long gp = (unsigned long) idle->thread_info; 96 unsigned long gp = (unsigned long) task_thread_info(idle);
97 unsigned long sp = gp + THREAD_SIZE - 32; 97 unsigned long sp = __KSTK_TOP(idle);
98 98
99 secondary_sp = sp; 99 secondary_sp = sp;
100 secondary_gp = gp; 100 secondary_gp = gp;
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 3a8291b7d26d..dbef3f6b5650 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -168,8 +168,8 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
168 */ 168 */
169void __init prom_boot_secondary(int cpu, struct task_struct *idle) 169void __init prom_boot_secondary(int cpu, struct task_struct *idle)
170{ 170{
171 unsigned long gp = (unsigned long) idle->thread_info; 171 unsigned long gp = (unsigned long)task_thread_info(idle);
172 unsigned long sp = gp + THREAD_SIZE - 32; 172 unsigned long sp = __KSTK_TOS(idle);
173 173
174 LAUNCH_SLAVE(cputonasid(cpu),cputoslice(cpu), 174 LAUNCH_SLAVE(cputonasid(cpu),cputoslice(cpu),
175 (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap), 175 (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
diff --git a/arch/mips/sibyte/cfe/smp.c b/arch/mips/sibyte/cfe/smp.c
index e8485124b8fc..4477af3d8074 100644
--- a/arch/mips/sibyte/cfe/smp.c
+++ b/arch/mips/sibyte/cfe/smp.c
@@ -60,7 +60,7 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
60 60
61 retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, 61 retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
62 __KSTK_TOS(idle), 62 __KSTK_TOS(idle),
63 (unsigned long)idle->thread_info, 0); 63 (unsigned long)task_thread_info(idle), 0);
64 if (retval != 0) 64 if (retval != 0)
65 printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); 65 printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
66} 66}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 4eb70a40ec7e..5da41677e70b 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -295,7 +295,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
295 struct task_struct * p, struct pt_regs * pregs) 295 struct task_struct * p, struct pt_regs * pregs)
296{ 296{
297 struct pt_regs * cregs = &(p->thread.regs); 297 struct pt_regs * cregs = &(p->thread.regs);
298 struct thread_info *ti = p->thread_info; 298 void *stack = task_stack_page(p);
299 299
300 /* We have to use void * instead of a function pointer, because 300 /* We have to use void * instead of a function pointer, because
301 * function pointers aren't a pointer to the function on 64-bit. 301 * function pointers aren't a pointer to the function on 64-bit.
@@ -322,7 +322,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
322 */ 322 */
323 if (usp == 1) { 323 if (usp == 1) {
324 /* kernel thread */ 324 /* kernel thread */
325 cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN); 325 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN;
326 /* Must exit via ret_from_kernel_thread in order 326 /* Must exit via ret_from_kernel_thread in order
327 * to call schedule_tail() 327 * to call schedule_tail()
328 */ 328 */
@@ -344,7 +344,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
344 */ 344 */
345 345
346 /* Use same stack depth as parent */ 346 /* Use same stack depth as parent */
347 cregs->ksp = ((unsigned long)(ti)) 347 cregs->ksp = (unsigned long)stack
348 + (pregs->gr[21] & (THREAD_SIZE - 1)); 348 + (pregs->gr[21] & (THREAD_SIZE - 1));
349 cregs->gr[30] = usp; 349 cregs->gr[30] = usp;
350 if (p->personality == PER_HPUX) { 350 if (p->personality == PER_HPUX) {
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 17f23c26f1ca..25564b7ca6bb 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -517,7 +517,7 @@ int __init smp_boot_one_cpu(int cpuid)
517 if (IS_ERR(idle)) 517 if (IS_ERR(idle))
518 panic("SMP: fork failed for CPU:%d", cpuid); 518 panic("SMP: fork failed for CPU:%d", cpuid);
519 519
520 idle->thread_info->cpu = cpuid; 520 task_thread_info(idle)->cpu = cpuid;
521 521
522 /* Let _start know what logical CPU we're booting 522 /* Let _start know what logical CPU we're booting
523 ** (offset into init_tasks[],cpu_data[]) 523 ** (offset into init_tasks[],cpu_data[])
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9101358cc6b3..57703994a063 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -426,7 +426,7 @@ void show_regs(struct pt_regs * regs)
426 if (trap == 0x300 || trap == 0x600) 426 if (trap == 0x300 || trap == 0x600)
427 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); 427 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
428 printk("TASK = %p[%d] '%s' THREAD: %p", 428 printk("TASK = %p[%d] '%s' THREAD: %p",
429 current, current->pid, current->comm, current->thread_info); 429 current, current->pid, current->comm, task_thread_info(current));
430 430
431#ifdef CONFIG_SMP 431#ifdef CONFIG_SMP
432 printk(" CPU: %d", smp_processor_id()); 432 printk(" CPU: %d", smp_processor_id());
@@ -505,7 +505,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
505{ 505{
506 struct pt_regs *childregs, *kregs; 506 struct pt_regs *childregs, *kregs;
507 extern void ret_from_fork(void); 507 extern void ret_from_fork(void);
508 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; 508 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
509 509
510 CHECK_FULL_REGS(regs); 510 CHECK_FULL_REGS(regs);
511 /* Copy registers */ 511 /* Copy registers */
@@ -518,7 +518,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
518#ifdef CONFIG_PPC32 518#ifdef CONFIG_PPC32
519 childregs->gpr[2] = (unsigned long) p; 519 childregs->gpr[2] = (unsigned long) p;
520#else 520#else
521 clear_ti_thread_flag(p->thread_info, TIF_32BIT); 521 clear_tsk_thread_flag(p, TIF_32BIT);
522#endif 522#endif
523 p->thread.regs = NULL; /* no user register state */ 523 p->thread.regs = NULL; /* no user register state */
524 } else { 524 } else {
@@ -590,10 +590,8 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
590 * set. Do it now. 590 * set. Do it now.
591 */ 591 */
592 if (!current->thread.regs) { 592 if (!current->thread.regs) {
593 unsigned long childregs = (unsigned long)current->thread_info + 593 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
594 THREAD_SIZE; 594 current->thread.regs = regs - 1;
595 childregs -= sizeof(struct pt_regs);
596 current->thread.regs = (struct pt_regs *)childregs;
597 } 595 }
598 596
599 memset(regs->gpr, 0, sizeof(regs->gpr)); 597 memset(regs->gpr, 0, sizeof(regs->gpr));
@@ -769,7 +767,7 @@ out:
769static int validate_sp(unsigned long sp, struct task_struct *p, 767static int validate_sp(unsigned long sp, struct task_struct *p,
770 unsigned long nbytes) 768 unsigned long nbytes)
771{ 769{
772 unsigned long stack_page = (unsigned long)p->thread_info; 770 unsigned long stack_page = (unsigned long)task_stack_page(p);
773 771
774 if (sp >= stack_page + sizeof(struct thread_struct) 772 if (sp >= stack_page + sizeof(struct thread_struct)
775 && sp <= stack_page + THREAD_SIZE - nbytes) 773 && sp <= stack_page + THREAD_SIZE - nbytes)
diff --git a/arch/powerpc/kernel/ptrace-common.h b/arch/powerpc/kernel/ptrace-common.h
index b1babb729673..5ccbdbe0d5c9 100644
--- a/arch/powerpc/kernel/ptrace-common.h
+++ b/arch/powerpc/kernel/ptrace-common.h
@@ -62,7 +62,7 @@ static inline void set_single_step(struct task_struct *task)
62 struct pt_regs *regs = task->thread.regs; 62 struct pt_regs *regs = task->thread.regs;
63 if (regs != NULL) 63 if (regs != NULL)
64 regs->msr |= MSR_SE; 64 regs->msr |= MSR_SE;
65 set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP); 65 set_tsk_thread_flag(task, TIF_SINGLESTEP);
66} 66}
67 67
68static inline void clear_single_step(struct task_struct *task) 68static inline void clear_single_step(struct task_struct *task)
@@ -70,7 +70,7 @@ static inline void clear_single_step(struct task_struct *task)
70 struct pt_regs *regs = task->thread.regs; 70 struct pt_regs *regs = task->thread.regs;
71 if (regs != NULL) 71 if (regs != NULL)
72 regs->msr &= ~MSR_SE; 72 regs->msr &= ~MSR_SE;
73 clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP); 73 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
74} 74}
75 75
76#ifdef CONFIG_ALTIVEC 76#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d381ec90b759..c8458c531b25 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -338,8 +338,8 @@ static void __init smp_create_idle(unsigned int cpu)
338#ifdef CONFIG_PPC64 338#ifdef CONFIG_PPC64
339 paca[cpu].__current = p; 339 paca[cpu].__current = p;
340#endif 340#endif
341 current_set[cpu] = p->thread_info; 341 current_set[cpu] = task_thread_info(p);
342 p->thread_info->cpu = cpu; 342 task_thread_info(p)->cpu = cpu;
343} 343}
344 344
345void __init smp_prepare_cpus(unsigned int max_cpus) 345void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -375,7 +375,7 @@ void __devinit smp_prepare_boot_cpu(void)
375#ifdef CONFIG_PPC64 375#ifdef CONFIG_PPC64
376 paca[boot_cpuid].__current = current; 376 paca[boot_cpuid].__current = current;
377#endif 377#endif
378 current_set[boot_cpuid] = current->thread_info; 378 current_set[boot_cpuid] = task_thread_info(current);
379} 379}
380 380
381#ifdef CONFIG_HOTPLUG_CPU 381#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index de96eadf419d..bdf6c5fe58c0 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -86,7 +86,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
86 pcpu = get_hard_smp_processor_id(lcpu); 86 pcpu = get_hard_smp_processor_id(lcpu);
87 87
88 /* Fixup atomic count: it exited inside IRQ handler. */ 88 /* Fixup atomic count: it exited inside IRQ handler. */
89 paca[lcpu].__current->thread_info->preempt_count = 0; 89 task_thread_info(paca[lcpu].__current)->preempt_count = 0;
90 90
91 /* 91 /*
92 * If the RTAS start-cpu token does not exist then presume the 92 * If the RTAS start-cpu token does not exist then presume the
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index e4d017dd5ef3..8e6b1ed1396e 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -282,7 +282,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
282 pcpu = get_hard_smp_processor_id(lcpu); 282 pcpu = get_hard_smp_processor_id(lcpu);
283 283
284 /* Fixup atomic count: it exited inside IRQ handler. */ 284 /* Fixup atomic count: it exited inside IRQ handler. */
285 paca[lcpu].__current->thread_info->preempt_count = 0; 285 task_thread_info(paca[lcpu].__current)->preempt_count = 0;
286 286
287 /* 287 /*
288 * If the RTAS start-cpu token does not exist then presume the 288 * If the RTAS start-cpu token does not exist then presume the
diff --git a/arch/ppc/amiga/amiints.c b/arch/ppc/amiga/amiints.c
index 91195e2ce38d..5f35cf3986f7 100644
--- a/arch/ppc/amiga/amiints.c
+++ b/arch/ppc/amiga/amiints.c
@@ -96,8 +96,8 @@ void amiga_init_IRQ(void)
96 gayle.inten = GAYLE_IRQ_IDE; 96 gayle.inten = GAYLE_IRQ_IDE;
97 97
98 /* turn off all interrupts... */ 98 /* turn off all interrupts... */
99 custom.intena = 0x7fff; 99 amiga_custom.intena = 0x7fff;
100 custom.intreq = 0x7fff; 100 amiga_custom.intreq = 0x7fff;
101 101
102#ifdef CONFIG_APUS 102#ifdef CONFIG_APUS
103 /* Clear any inter-CPU interrupt requests. Circumvents bug in 103 /* Clear any inter-CPU interrupt requests. Circumvents bug in
@@ -110,7 +110,7 @@ void amiga_init_IRQ(void)
110 APUS_WRITE(APUS_IPL_EMU, IPLEMU_SETRESET | IPLEMU_IPLMASK); 110 APUS_WRITE(APUS_IPL_EMU, IPLEMU_SETRESET | IPLEMU_IPLMASK);
111#endif 111#endif
112 /* ... and enable the master interrupt bit */ 112 /* ... and enable the master interrupt bit */
113 custom.intena = IF_SETCLR | IF_INTEN; 113 amiga_custom.intena = IF_SETCLR | IF_INTEN;
114 114
115 cia_init_IRQ(&ciaa_base); 115 cia_init_IRQ(&ciaa_base);
116 cia_init_IRQ(&ciab_base); 116 cia_init_IRQ(&ciab_base);
@@ -151,7 +151,7 @@ void amiga_enable_irq(unsigned int irq)
151 } 151 }
152 152
153 /* enable the interrupt */ 153 /* enable the interrupt */
154 custom.intena = IF_SETCLR | ami_intena_vals[irq]; 154 amiga_custom.intena = IF_SETCLR | ami_intena_vals[irq];
155} 155}
156 156
157void amiga_disable_irq(unsigned int irq) 157void amiga_disable_irq(unsigned int irq)
@@ -177,7 +177,7 @@ void amiga_disable_irq(unsigned int irq)
177 } 177 }
178 178
179 /* disable the interrupt */ 179 /* disable the interrupt */
180 custom.intena = ami_intena_vals[irq]; 180 amiga_custom.intena = ami_intena_vals[irq];
181} 181}
182 182
183inline void amiga_do_irq(int irq, struct pt_regs *fp) 183inline void amiga_do_irq(int irq, struct pt_regs *fp)
@@ -196,7 +196,7 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
196 196
197 kstat_cpu(0).irqs[irq]++; 197 kstat_cpu(0).irqs[irq]++;
198 198
199 custom.intreq = ami_intena_vals[irq]; 199 amiga_custom.intreq = ami_intena_vals[irq];
200 200
201 for (action = desc->action; action; action = action->next) 201 for (action = desc->action; action; action = action->next)
202 action->handler(irq, action->dev_id, fp); 202 action->handler(irq, action->dev_id, fp);
@@ -208,40 +208,40 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
208 208
209static void ami_int1(int irq, void *dev_id, struct pt_regs *fp) 209static void ami_int1(int irq, void *dev_id, struct pt_regs *fp)
210{ 210{
211 unsigned short ints = custom.intreqr & custom.intenar; 211 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
212 212
213 /* if serial transmit buffer empty, interrupt */ 213 /* if serial transmit buffer empty, interrupt */
214 if (ints & IF_TBE) { 214 if (ints & IF_TBE) {
215 custom.intreq = IF_TBE; 215 amiga_custom.intreq = IF_TBE;
216 amiga_do_irq(IRQ_AMIGA_TBE, fp); 216 amiga_do_irq(IRQ_AMIGA_TBE, fp);
217 } 217 }
218 218
219 /* if floppy disk transfer complete, interrupt */ 219 /* if floppy disk transfer complete, interrupt */
220 if (ints & IF_DSKBLK) { 220 if (ints & IF_DSKBLK) {
221 custom.intreq = IF_DSKBLK; 221 amiga_custom.intreq = IF_DSKBLK;
222 amiga_do_irq(IRQ_AMIGA_DSKBLK, fp); 222 amiga_do_irq(IRQ_AMIGA_DSKBLK, fp);
223 } 223 }
224 224
225 /* if software interrupt set, interrupt */ 225 /* if software interrupt set, interrupt */
226 if (ints & IF_SOFT) { 226 if (ints & IF_SOFT) {
227 custom.intreq = IF_SOFT; 227 amiga_custom.intreq = IF_SOFT;
228 amiga_do_irq(IRQ_AMIGA_SOFT, fp); 228 amiga_do_irq(IRQ_AMIGA_SOFT, fp);
229 } 229 }
230} 230}
231 231
232static void ami_int3(int irq, void *dev_id, struct pt_regs *fp) 232static void ami_int3(int irq, void *dev_id, struct pt_regs *fp)
233{ 233{
234 unsigned short ints = custom.intreqr & custom.intenar; 234 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
235 235
236 /* if a blitter interrupt */ 236 /* if a blitter interrupt */
237 if (ints & IF_BLIT) { 237 if (ints & IF_BLIT) {
238 custom.intreq = IF_BLIT; 238 amiga_custom.intreq = IF_BLIT;
239 amiga_do_irq(IRQ_AMIGA_BLIT, fp); 239 amiga_do_irq(IRQ_AMIGA_BLIT, fp);
240 } 240 }
241 241
242 /* if a copper interrupt */ 242 /* if a copper interrupt */
243 if (ints & IF_COPER) { 243 if (ints & IF_COPER) {
244 custom.intreq = IF_COPER; 244 amiga_custom.intreq = IF_COPER;
245 amiga_do_irq(IRQ_AMIGA_COPPER, fp); 245 amiga_do_irq(IRQ_AMIGA_COPPER, fp);
246 } 246 }
247 247
@@ -252,36 +252,36 @@ static void ami_int3(int irq, void *dev_id, struct pt_regs *fp)
252 252
253static void ami_int4(int irq, void *dev_id, struct pt_regs *fp) 253static void ami_int4(int irq, void *dev_id, struct pt_regs *fp)
254{ 254{
255 unsigned short ints = custom.intreqr & custom.intenar; 255 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
256 256
257 /* if audio 0 interrupt */ 257 /* if audio 0 interrupt */
258 if (ints & IF_AUD0) { 258 if (ints & IF_AUD0) {
259 custom.intreq = IF_AUD0; 259 amiga_custom.intreq = IF_AUD0;
260 amiga_do_irq(IRQ_AMIGA_AUD0, fp); 260 amiga_do_irq(IRQ_AMIGA_AUD0, fp);
261 } 261 }
262 262
263 /* if audio 1 interrupt */ 263 /* if audio 1 interrupt */
264 if (ints & IF_AUD1) { 264 if (ints & IF_AUD1) {
265 custom.intreq = IF_AUD1; 265 amiga_custom.intreq = IF_AUD1;
266 amiga_do_irq(IRQ_AMIGA_AUD1, fp); 266 amiga_do_irq(IRQ_AMIGA_AUD1, fp);
267 } 267 }
268 268
269 /* if audio 2 interrupt */ 269 /* if audio 2 interrupt */
270 if (ints & IF_AUD2) { 270 if (ints & IF_AUD2) {
271 custom.intreq = IF_AUD2; 271 amiga_custom.intreq = IF_AUD2;
272 amiga_do_irq(IRQ_AMIGA_AUD2, fp); 272 amiga_do_irq(IRQ_AMIGA_AUD2, fp);
273 } 273 }
274 274
275 /* if audio 3 interrupt */ 275 /* if audio 3 interrupt */
276 if (ints & IF_AUD3) { 276 if (ints & IF_AUD3) {
277 custom.intreq = IF_AUD3; 277 amiga_custom.intreq = IF_AUD3;
278 amiga_do_irq(IRQ_AMIGA_AUD3, fp); 278 amiga_do_irq(IRQ_AMIGA_AUD3, fp);
279 } 279 }
280} 280}
281 281
282static void ami_int5(int irq, void *dev_id, struct pt_regs *fp) 282static void ami_int5(int irq, void *dev_id, struct pt_regs *fp)
283{ 283{
284 unsigned short ints = custom.intreqr & custom.intenar; 284 unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
285 285
286 /* if serial receive buffer full interrupt */ 286 /* if serial receive buffer full interrupt */
287 if (ints & IF_RBF) { 287 if (ints & IF_RBF) {
@@ -291,7 +291,7 @@ static void ami_int5(int irq, void *dev_id, struct pt_regs *fp)
291 291
292 /* if a disk sync interrupt */ 292 /* if a disk sync interrupt */
293 if (ints & IF_DSKSYN) { 293 if (ints & IF_DSKSYN) {
294 custom.intreq = IF_DSKSYN; 294 amiga_custom.intreq = IF_DSKSYN;
295 amiga_do_irq(IRQ_AMIGA_DSKSYN, fp); 295 amiga_do_irq(IRQ_AMIGA_DSKSYN, fp);
296 } 296 }
297} 297}
diff --git a/arch/ppc/amiga/cia.c b/arch/ppc/amiga/cia.c
index ad961465b6cb..4431c58f611a 100644
--- a/arch/ppc/amiga/cia.c
+++ b/arch/ppc/amiga/cia.c
@@ -66,7 +66,7 @@ static unsigned char cia_set_irq_private(struct ciabase *base,
66 else 66 else
67 base->icr_data &= ~mask; 67 base->icr_data &= ~mask;
68 if (base->icr_data & base->icr_mask) 68 if (base->icr_data & base->icr_mask)
69 custom.intreq = IF_SETCLR | base->int_mask; 69 amiga_custom.intreq = IF_SETCLR | base->int_mask;
70 return old & base->icr_mask; 70 return old & base->icr_mask;
71} 71}
72 72
@@ -114,7 +114,7 @@ static unsigned char cia_able_irq_private(struct ciabase *base,
114 base->icr_mask &= CIA_ICR_ALL; 114 base->icr_mask &= CIA_ICR_ALL;
115 115
116 if (base->icr_data & base->icr_mask) 116 if (base->icr_data & base->icr_mask)
117 custom.intreq = IF_SETCLR | base->int_mask; 117 amiga_custom.intreq = IF_SETCLR | base->int_mask;
118 return old; 118 return old;
119} 119}
120 120
@@ -145,7 +145,7 @@ static void cia_handler(int irq, void *dev_id, struct pt_regs *fp)
145 irq = base->cia_irq; 145 irq = base->cia_irq;
146 desc = irq_desc + irq; 146 desc = irq_desc + irq;
147 ints = cia_set_irq_private(base, CIA_ICR_ALL); 147 ints = cia_set_irq_private(base, CIA_ICR_ALL);
148 custom.intreq = base->int_mask; 148 amiga_custom.intreq = base->int_mask;
149 for (i = 0; i < CIA_IRQS; i++, irq++) { 149 for (i = 0; i < CIA_IRQS; i++, irq++) {
150 if (ints & 1) { 150 if (ints & 1) {
151 kstat_cpu(0).irqs[irq]++; 151 kstat_cpu(0).irqs[irq]++;
@@ -174,5 +174,5 @@ void __init cia_init_IRQ(struct ciabase *base)
174 action->name = base->name; 174 action->name = base->name;
175 setup_irq(base->handler_irq, &amiga_sys_irqaction[base->handler_irq-IRQ_AMIGA_AUTO]); 175 setup_irq(base->handler_irq, &amiga_sys_irqaction[base->handler_irq-IRQ_AMIGA_AUTO]);
176 176
177 custom.intena = IF_SETCLR | base->int_mask; 177 amiga_custom.intena = IF_SETCLR | base->int_mask;
178} 178}
diff --git a/arch/ppc/amiga/config.c b/arch/ppc/amiga/config.c
index af881d7454dd..60e2da1c92c0 100644
--- a/arch/ppc/amiga/config.c
+++ b/arch/ppc/amiga/config.c
@@ -90,9 +90,6 @@ static void a3000_gettod (int *, int *, int *, int *, int *, int *);
90static void a2000_gettod (int *, int *, int *, int *, int *, int *); 90static void a2000_gettod (int *, int *, int *, int *, int *, int *);
91static int amiga_hwclk (int, struct hwclk_time *); 91static int amiga_hwclk (int, struct hwclk_time *);
92static int amiga_set_clock_mmss (unsigned long); 92static int amiga_set_clock_mmss (unsigned long);
93#ifdef CONFIG_AMIGA_FLOPPY
94extern void amiga_floppy_setup(char *, int *);
95#endif
96static void amiga_reset (void); 93static void amiga_reset (void);
97extern void amiga_init_sound(void); 94extern void amiga_init_sound(void);
98static void amiga_savekmsg_init(void); 95static void amiga_savekmsg_init(void);
@@ -281,7 +278,7 @@ static void __init amiga_identify(void)
281 case CS_OCS: 278 case CS_OCS:
282 case CS_ECS: 279 case CS_ECS:
283 case CS_AGA: 280 case CS_AGA:
284 switch (custom.deniseid & 0xf) { 281 switch (amiga_custom.deniseid & 0xf) {
285 case 0x0c: 282 case 0x0c:
286 AMIGAHW_SET(DENISE_HR); 283 AMIGAHW_SET(DENISE_HR);
287 break; 284 break;
@@ -294,7 +291,7 @@ static void __init amiga_identify(void)
294 AMIGAHW_SET(DENISE); 291 AMIGAHW_SET(DENISE);
295 break; 292 break;
296 } 293 }
297 switch ((custom.vposr>>8) & 0x7f) { 294 switch ((amiga_custom.vposr>>8) & 0x7f) {
298 case 0x00: 295 case 0x00:
299 AMIGAHW_SET(AGNUS_PAL); 296 AMIGAHW_SET(AGNUS_PAL);
300 break; 297 break;
@@ -419,9 +416,6 @@ void __init config_amiga(void)
419 416
420 mach_hwclk = amiga_hwclk; 417 mach_hwclk = amiga_hwclk;
421 mach_set_clock_mmss = amiga_set_clock_mmss; 418 mach_set_clock_mmss = amiga_set_clock_mmss;
422#ifdef CONFIG_AMIGA_FLOPPY
423 mach_floppy_setup = amiga_floppy_setup;
424#endif
425 mach_reset = amiga_reset; 419 mach_reset = amiga_reset;
426#ifdef CONFIG_HEARTBEAT 420#ifdef CONFIG_HEARTBEAT
427 mach_heartbeat = amiga_heartbeat; 421 mach_heartbeat = amiga_heartbeat;
@@ -432,9 +426,9 @@ void __init config_amiga(void)
432 amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */ 426 amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */
433 427
434 /* clear all DMA bits */ 428 /* clear all DMA bits */
435 custom.dmacon = DMAF_ALL; 429 amiga_custom.dmacon = DMAF_ALL;
436 /* ensure that the DMA master bit is set */ 430 /* ensure that the DMA master bit is set */
437 custom.dmacon = DMAF_SETCLR | DMAF_MASTER; 431 amiga_custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
438 432
439 /* request all RAM */ 433 /* request all RAM */
440 for (i = 0; i < m68k_num_memory; i++) { 434 for (i = 0; i < m68k_num_memory; i++) {
@@ -753,9 +747,9 @@ static void amiga_savekmsg_init(void)
753 747
754static void amiga_serial_putc(char c) 748static void amiga_serial_putc(char c)
755{ 749{
756 custom.serdat = (unsigned char)c | 0x100; 750 amiga_custom.serdat = (unsigned char)c | 0x100;
757 mb(); 751 mb();
758 while (!(custom.serdatr & 0x2000)) 752 while (!(amiga_custom.serdatr & 0x2000))
759 ; 753 ;
760} 754}
761 755
@@ -785,11 +779,11 @@ int amiga_serial_console_wait_key(struct console *co)
785{ 779{
786 int ch; 780 int ch;
787 781
788 while (!(custom.intreqr & IF_RBF)) 782 while (!(amiga_custom.intreqr & IF_RBF))
789 barrier(); 783 barrier();
790 ch = custom.serdatr & 0xff; 784 ch = amiga_custom.serdatr & 0xff;
791 /* clear the interrupt, so that another character can be read */ 785 /* clear the interrupt, so that another character can be read */
792 custom.intreq = IF_RBF; 786 amiga_custom.intreq = IF_RBF;
793 return ch; 787 return ch;
794} 788}
795 789
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index becbfa397556..e55cdda6149a 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -318,7 +318,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
318 p = fork_idle(cpu); 318 p = fork_idle(cpu);
319 if (IS_ERR(p)) 319 if (IS_ERR(p))
320 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 320 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
321 p->thread_info->cpu = cpu; 321 task_thread_info(p)->cpu = cpu;
322 idle_tasks[cpu] = p; 322 idle_tasks[cpu] = p;
323 } 323 }
324} 324}
@@ -369,7 +369,7 @@ int __cpu_up(unsigned int cpu)
369 char buf[32]; 369 char buf[32];
370 int c; 370 int c;
371 371
372 secondary_ti = idle_tasks[cpu]->thread_info; 372 secondary_ti = task_thread_info(idle_tasks[cpu]);
373 mb(); 373 mb();
374 374
375 /* 375 /*
diff --git a/arch/ppc/platforms/apus_setup.c b/arch/ppc/platforms/apus_setup.c
index 2f74fde98ebc..c42c50073da5 100644
--- a/arch/ppc/platforms/apus_setup.c
+++ b/arch/ppc/platforms/apus_setup.c
@@ -55,9 +55,6 @@ int (*mach_hwclk) (int, struct hwclk_time*) = NULL;
55int (*mach_set_clock_mmss) (unsigned long) = NULL; 55int (*mach_set_clock_mmss) (unsigned long) = NULL;
56void (*mach_reset)( void ); 56void (*mach_reset)( void );
57long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */ 57long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
58#if defined(CONFIG_AMIGA_FLOPPY)
59void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
60#endif
61#ifdef CONFIG_HEARTBEAT 58#ifdef CONFIG_HEARTBEAT
62void (*mach_heartbeat) (int) = NULL; 59void (*mach_heartbeat) (int) = NULL;
63extern void apus_heartbeat (void); 60extern void apus_heartbeat (void);
@@ -76,7 +73,6 @@ struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
76 73
77struct mem_info ramdisk; 74struct mem_info ramdisk;
78 75
79extern void amiga_floppy_setup(char *, int *);
80extern void config_amiga(void); 76extern void config_amiga(void);
81 77
82static int __60nsram = 0; 78static int __60nsram = 0;
@@ -305,16 +301,6 @@ void kbd_reset_setup(char *str, int *ints)
305{ 301{
306} 302}
307 303
308/*********************************************************** FLOPPY */
309#if defined(CONFIG_AMIGA_FLOPPY)
310__init
311void floppy_setup(char *str, int *ints)
312{
313 if (mach_floppy_setup)
314 mach_floppy_setup (str, ints);
315}
316#endif
317
318/*********************************************************** MEMORY */ 304/*********************************************************** MEMORY */
319#define KMAP_MAX 32 305#define KMAP_MAX 32
320unsigned long kmap_chunks[KMAP_MAX*3]; 306unsigned long kmap_chunks[KMAP_MAX*3];
@@ -574,9 +560,9 @@ static __inline__ void ser_RTSon(void)
574 560
575int __debug_ser_out( unsigned char c ) 561int __debug_ser_out( unsigned char c )
576{ 562{
577 custom.serdat = c | 0x100; 563 amiga_custom.serdat = c | 0x100;
578 mb(); 564 mb();
579 while (!(custom.serdatr & 0x2000)) 565 while (!(amiga_custom.serdatr & 0x2000))
580 barrier(); 566 barrier();
581 return 1; 567 return 1;
582} 568}
@@ -586,11 +572,11 @@ unsigned char __debug_ser_in( void )
586 unsigned char c; 572 unsigned char c;
587 573
588 /* XXX: is that ok?? derived from amiga_ser.c... */ 574 /* XXX: is that ok?? derived from amiga_ser.c... */
589 while( !(custom.intreqr & IF_RBF) ) 575 while( !(amiga_custom.intreqr & IF_RBF) )
590 barrier(); 576 barrier();
591 c = custom.serdatr; 577 c = amiga_custom.serdatr;
592 /* clear the interrupt, so that another character can be read */ 578 /* clear the interrupt, so that another character can be read */
593 custom.intreq = IF_RBF; 579 amiga_custom.intreq = IF_RBF;
594 return c; 580 return c;
595} 581}
596 582
@@ -601,10 +587,10 @@ int __debug_serinit( void )
601 local_irq_save(flags); 587 local_irq_save(flags);
602 588
603 /* turn off Rx and Tx interrupts */ 589 /* turn off Rx and Tx interrupts */
604 custom.intena = IF_RBF | IF_TBE; 590 amiga_custom.intena = IF_RBF | IF_TBE;
605 591
606 /* clear any pending interrupt */ 592 /* clear any pending interrupt */
607 custom.intreq = IF_RBF | IF_TBE; 593 amiga_custom.intreq = IF_RBF | IF_TBE;
608 594
609 local_irq_restore(flags); 595 local_irq_restore(flags);
610 596
@@ -617,7 +603,7 @@ int __debug_serinit( void )
617 603
618#ifdef CONFIG_KGDB 604#ifdef CONFIG_KGDB
619 /* turn Rx interrupts on for GDB */ 605 /* turn Rx interrupts on for GDB */
620 custom.intena = IF_SETCLR | IF_RBF; 606 amiga_custom.intena = IF_SETCLR | IF_RBF;
621 ser_RTSon(); 607 ser_RTSon();
622#endif 608#endif
623 609
diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c
index 2b483b4f1602..9075a7538e26 100644
--- a/arch/ppc/xmon/xmon.c
+++ b/arch/ppc/xmon/xmon.c
@@ -99,7 +99,7 @@ static void remove_bpts(void);
99static void insert_bpts(void); 99static void insert_bpts(void);
100static struct bpt *at_breakpoint(unsigned pc); 100static struct bpt *at_breakpoint(unsigned pc);
101static void bpt_cmds(void); 101static void bpt_cmds(void);
102static void cacheflush(void); 102void cacheflush(void);
103#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
104static void cpu_cmd(void); 104static void cpu_cmd(void);
105#endif /* CONFIG_SMP */ 105#endif /* CONFIG_SMP */
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 03ba5893f17b..1f451c2cb071 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -112,7 +112,7 @@ static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
112 112
113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs) 113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
114{ 114{
115 struct pt_regs *ptregs = __KSTK_PTREGS(tsk); 115 struct pt_regs *ptregs = task_pt_regs(tsk);
116 int i; 116 int i;
117 117
118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4); 118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7dd58f8ac6b5..2ff90a1a1056 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -153,7 +153,7 @@ void show_regs(struct pt_regs *regs)
153{ 153{
154 struct task_struct *tsk = current; 154 struct task_struct *tsk = current;
155 155
156 printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted()); 156 printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted());
157 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 157 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
158 current->comm, current->pid, (void *) tsk, 158 current->comm, current->pid, (void *) tsk,
159 (void *) tsk->thread.ksp); 159 (void *) tsk->thread.ksp);
@@ -217,8 +217,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
217 struct pt_regs childregs; 217 struct pt_regs childregs;
218 } *frame; 218 } *frame;
219 219
220 frame = ((struct fake_frame *) 220 frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
221 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
222 p->thread.ksp = (unsigned long) frame; 221 p->thread.ksp = (unsigned long) frame;
223 /* Store access registers to kernel stack of new process. */ 222 /* Store access registers to kernel stack of new process. */
224 frame->childregs = *regs; 223 frame->childregs = *regs;
@@ -358,11 +357,10 @@ unsigned long get_wchan(struct task_struct *p)
358 unsigned long return_address; 357 unsigned long return_address;
359 int count; 358 int count;
360 359
361 if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info) 360 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
362 return 0; 361 return 0;
363 low = (struct stack_frame *) p->thread_info; 362 low = task_stack_page(p);
364 high = (struct stack_frame *) 363 high = (struct stack_frame *) task_pt_regs(p);
365 ((unsigned long) p->thread_info + THREAD_SIZE) - 1;
366 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); 364 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
367 if (sf <= low || sf > high) 365 if (sf <= low || sf > high)
368 return 0; 366 return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index cc02232aa96e..37dfe33dab73 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -52,7 +52,7 @@ FixPerRegisters(struct task_struct *task)
52 struct pt_regs *regs; 52 struct pt_regs *regs;
53 per_struct *per_info; 53 per_struct *per_info;
54 54
55 regs = __KSTK_PTREGS(task); 55 regs = task_pt_regs(task);
56 per_info = (per_struct *) &task->thread.per_info; 56 per_info = (per_struct *) &task->thread.per_info;
57 per_info->control_regs.bits.em_instruction_fetch = 57 per_info->control_regs.bits.em_instruction_fetch =
58 per_info->single_step | per_info->instruction_fetch; 58 per_info->single_step | per_info->instruction_fetch;
@@ -150,7 +150,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
150 /* 150 /*
151 * psw and gprs are stored on the stack 151 * psw and gprs are stored on the stack
152 */ 152 */
153 tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr); 153 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
154 if (addr == (addr_t) &dummy->regs.psw.mask) 154 if (addr == (addr_t) &dummy->regs.psw.mask)
155 /* Remove per bit from user psw. */ 155 /* Remove per bit from user psw. */
156 tmp &= ~PSW_MASK_PER; 156 tmp &= ~PSW_MASK_PER;
@@ -176,7 +176,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
176 /* 176 /*
177 * orig_gpr2 is stored on the kernel stack 177 * orig_gpr2 is stored on the kernel stack
178 */ 178 */
179 tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2; 179 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
180 180
181 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 181 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
182 /* 182 /*
@@ -243,7 +243,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
243 high order bit but older gdb's rely on it */ 243 high order bit but older gdb's rely on it */
244 data |= PSW_ADDR_AMODE; 244 data |= PSW_ADDR_AMODE;
245#endif 245#endif
246 *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data; 246 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
247 247
248 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 248 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
249 /* 249 /*
@@ -267,7 +267,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
267 /* 267 /*
268 * orig_gpr2 is stored on the kernel stack 268 * orig_gpr2 is stored on the kernel stack
269 */ 269 */
270 __KSTK_PTREGS(child)->orig_gpr2 = data; 270 task_pt_regs(child)->orig_gpr2 = data;
271 271
272 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 272 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
273 /* 273 /*
@@ -393,15 +393,15 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
393 */ 393 */
394 if (addr == (addr_t) &dummy32->regs.psw.mask) { 394 if (addr == (addr_t) &dummy32->regs.psw.mask) {
395 /* Fake a 31 bit psw mask. */ 395 /* Fake a 31 bit psw mask. */
396 tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32); 396 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); 397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
399 /* Fake a 31 bit psw address. */ 399 /* Fake a 31 bit psw address. */
400 tmp = (__u32) __KSTK_PTREGS(child)->psw.addr | 400 tmp = (__u32) task_pt_regs(child)->psw.addr |
401 PSW32_ADDR_AMODE31; 401 PSW32_ADDR_AMODE31;
402 } else { 402 } else {
403 /* gpr 0-15 */ 403 /* gpr 0-15 */
404 tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw + 404 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
405 addr*2 + 4); 405 addr*2 + 4);
406 } 406 }
407 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 407 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -415,7 +415,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
415 /* 415 /*
416 * orig_gpr2 is stored on the kernel stack 416 * orig_gpr2 is stored on the kernel stack
417 */ 417 */
418 tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4); 418 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
419 419
420 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 420 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
421 /* 421 /*
@@ -472,15 +472,15 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) 472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
473 /* Invalid psw mask. */ 473 /* Invalid psw mask. */
474 return -EINVAL; 474 return -EINVAL;
475 __KSTK_PTREGS(child)->psw.mask = 475 task_pt_regs(child)->psw.mask =
476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); 476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
478 /* Build a 64 bit psw address from 31 bit address. */ 478 /* Build a 64 bit psw address from 31 bit address. */
479 __KSTK_PTREGS(child)->psw.addr = 479 task_pt_regs(child)->psw.addr =
480 (__u64) tmp & PSW32_ADDR_INSN; 480 (__u64) tmp & PSW32_ADDR_INSN;
481 } else { 481 } else {
482 /* gpr 0-15 */ 482 /* gpr 0-15 */
483 *(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw 483 *(__u32*)((addr_t) &task_pt_regs(child)->psw
484 + addr*2 + 4) = tmp; 484 + addr*2 + 4) = tmp;
485 } 485 }
486 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 486 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -494,7 +494,7 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
494 /* 494 /*
495 * orig_gpr2 is stored on the kernel stack 495 * orig_gpr2 is stored on the kernel stack
496 */ 496 */
497 *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp; 497 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
498 498
499 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 499 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
500 /* 500 /*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e10f4ca00499..cbfcfd02a43a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -657,7 +657,7 @@ __cpu_up(unsigned int cpu)
657 idle = current_set[cpu]; 657 idle = current_set[cpu];
658 cpu_lowcore = lowcore_ptr[cpu]; 658 cpu_lowcore = lowcore_ptr[cpu];
659 cpu_lowcore->kernel_stack = (unsigned long) 659 cpu_lowcore->kernel_stack = (unsigned long)
660 idle->thread_info + (THREAD_SIZE); 660 task_stack_page(idle) + (THREAD_SIZE);
661 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 661 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
662 - sizeof(struct pt_regs) 662 - sizeof(struct pt_regs)
663 - sizeof(struct stack_frame)); 663 - sizeof(struct stack_frame));
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index c36353e8c140..b0d8ca8e5eeb 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -282,7 +282,7 @@ static inline void start_hz_timer(void)
282{ 282{
283 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 283 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
284 return; 284 return;
285 account_ticks(__KSTK_PTREGS(current)); 285 account_ticks(task_pt_regs(current));
286 cpu_clear(smp_processor_id(), nohz_cpu_mask); 286 cpu_clear(smp_processor_id(), nohz_cpu_mask);
287} 287}
288 288
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 95d109968619..5d21e9e6e7b4 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -136,8 +136,8 @@ void show_trace(struct task_struct *task, unsigned long * stack)
136 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, 136 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
137 S390_lowcore.async_stack); 137 S390_lowcore.async_stack);
138 if (task) 138 if (task)
139 __show_trace(sp, (unsigned long) task->thread_info, 139 __show_trace(sp, (unsigned long) task_stack_page(task),
140 (unsigned long) task->thread_info + THREAD_SIZE); 140 (unsigned long) task_stack_page(task) + THREAD_SIZE);
141 else 141 else
142 __show_trace(sp, S390_lowcore.thread_info, 142 __show_trace(sp, S390_lowcore.thread_info,
143 S390_lowcore.thread_info + THREAD_SIZE); 143 S390_lowcore.thread_info + THREAD_SIZE);
@@ -240,7 +240,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
240{ 240{
241 struct pt_regs *regs; 241 struct pt_regs *regs;
242 242
243 regs = __KSTK_PTREGS(task); 243 regs = task_pt_regs(task);
244 buffer += sprintf(buffer, "task: %p, ksp: %p\n", 244 buffer += sprintf(buffer, "task: %p, ksp: %p\n",
245 task, (void *)task->thread.ksp); 245 task, (void *)task->thread.ksp);
246 buffer += sprintf(buffer, "User PSW : %p %p\n", 246 buffer += sprintf(buffer, "User PSW : %p %p\n",
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 8a2bea34ddd2..aac15e42d03b 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -191,13 +191,8 @@ void flush_thread(void)
191{ 191{
192#if defined(CONFIG_SH_FPU) 192#if defined(CONFIG_SH_FPU)
193 struct task_struct *tsk = current; 193 struct task_struct *tsk = current;
194 struct pt_regs *regs = (struct pt_regs *)
195 ((unsigned long)tsk->thread_info
196 + THREAD_SIZE - sizeof(struct pt_regs)
197 - sizeof(unsigned long));
198
199 /* Forget lazy FPU state */ 194 /* Forget lazy FPU state */
200 clear_fpu(tsk, regs); 195 clear_fpu(tsk, task_pt_regs(tsk));
201 clear_used_math(); 196 clear_used_math();
202#endif 197#endif
203} 198}
@@ -232,13 +227,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
232{ 227{
233 struct pt_regs ptregs; 228 struct pt_regs ptregs;
234 229
235 ptregs = *(struct pt_regs *) 230 ptregs = *task_pt_regs(tsk);
236 ((unsigned long)tsk->thread_info + THREAD_SIZE
237 - sizeof(struct pt_regs)
238#ifdef CONFIG_SH_DSP
239 - sizeof(struct pt_dspregs)
240#endif
241 - sizeof(unsigned long));
242 elf_core_copy_regs(regs, &ptregs); 231 elf_core_copy_regs(regs, &ptregs);
243 232
244 return 1; 233 return 1;
@@ -252,11 +241,7 @@ dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
252#if defined(CONFIG_SH_FPU) 241#if defined(CONFIG_SH_FPU)
253 fpvalid = !!tsk_used_math(tsk); 242 fpvalid = !!tsk_used_math(tsk);
254 if (fpvalid) { 243 if (fpvalid) {
255 struct pt_regs *regs = (struct pt_regs *) 244 unlazy_fpu(tsk, task_pt_regs(tsk));
256 ((unsigned long)tsk->thread_info
257 + THREAD_SIZE - sizeof(struct pt_regs)
258 - sizeof(unsigned long));
259 unlazy_fpu(tsk, regs);
260 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 245 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
261 } 246 }
262#endif 247#endif
@@ -279,18 +264,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
279 copy_to_stopped_child_used_math(p); 264 copy_to_stopped_child_used_math(p);
280#endif 265#endif
281 266
282 childregs = ((struct pt_regs *) 267 childregs = task_pt_regs(p);
283 (THREAD_SIZE + (unsigned long) p->thread_info)
284#ifdef CONFIG_SH_DSP
285 - sizeof(struct pt_dspregs)
286#endif
287 - sizeof(unsigned long)) - 1;
288 *childregs = *regs; 268 *childregs = *regs;
289 269
290 if (user_mode(regs)) { 270 if (user_mode(regs)) {
291 childregs->regs[15] = usp; 271 childregs->regs[15] = usp;
292 } else { 272 } else {
293 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE; 273 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
294 } 274 }
295 if (clone_flags & CLONE_SETTLS) { 275 if (clone_flags & CLONE_SETTLS) {
296 childregs->gbr = childregs->regs[0]; 276 childregs->gbr = childregs->regs[0];
@@ -333,11 +313,7 @@ ubc_set_tracing(int asid, unsigned long pc)
333struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) 313struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
334{ 314{
335#if defined(CONFIG_SH_FPU) 315#if defined(CONFIG_SH_FPU)
336 struct pt_regs *regs = (struct pt_regs *) 316 unlazy_fpu(prev, task_pt_regs(prev));
337 ((unsigned long)prev->thread_info
338 + THREAD_SIZE - sizeof(struct pt_regs)
339 - sizeof(unsigned long));
340 unlazy_fpu(prev, regs);
341#endif 317#endif
342 318
343#ifdef CONFIG_PREEMPT 319#ifdef CONFIG_PREEMPT
@@ -346,13 +322,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
346 struct pt_regs *regs; 322 struct pt_regs *regs;
347 323
348 local_irq_save(flags); 324 local_irq_save(flags);
349 regs = (struct pt_regs *) 325 regs = task_pt_regs(prev);
350 ((unsigned long)prev->thread_info
351 + THREAD_SIZE - sizeof(struct pt_regs)
352#ifdef CONFIG_SH_DSP
353 - sizeof(struct pt_dspregs)
354#endif
355 - sizeof(unsigned long));
356 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { 326 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
357 int offset = (int)regs->regs[15]; 327 int offset = (int)regs->regs[15];
358 328
@@ -372,7 +342,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
372 */ 342 */
373 asm volatile("ldc %0, r7_bank" 343 asm volatile("ldc %0, r7_bank"
374 : /* no output */ 344 : /* no output */
375 : "r" (next->thread_info)); 345 : "r" (task_thread_info(next)));
376 346
377#ifdef CONFIG_MMU 347#ifdef CONFIG_MMU
378 /* If no tasks are using the UBC, we're done */ 348 /* If no tasks are using the UBC, we're done */
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index 1a8be06519ec..3887b4f6feb2 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -41,12 +41,7 @@ static inline int get_stack_long(struct task_struct *task, int offset)
41{ 41{
42 unsigned char *stack; 42 unsigned char *stack;
43 43
44 stack = (unsigned char *) 44 stack = (unsigned char *)task_pt_regs(task);
45 task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
46#ifdef CONFIG_SH_DSP
47 - sizeof(struct pt_dspregs)
48#endif
49 - sizeof(unsigned long);
50 stack += offset; 45 stack += offset;
51 return (*((int *)stack)); 46 return (*((int *)stack));
52} 47}
@@ -59,12 +54,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
59{ 54{
60 unsigned char *stack; 55 unsigned char *stack;
61 56
62 stack = (unsigned char *) 57 stack = (unsigned char *)task_pt_regs(task);
63 task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
64#ifdef CONFIG_SH_DSP
65 - sizeof(struct pt_dspregs)
66#endif
67 - sizeof(unsigned long);
68 stack += offset; 58 stack += offset;
69 *(unsigned long *) stack = data; 59 *(unsigned long *) stack = data;
70 return 0; 60 return 0;
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 59e49b18252c..62c7d1c0ad7b 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -103,7 +103,7 @@ int __cpu_up(unsigned int cpu)
103 if (IS_ERR(tsk)) 103 if (IS_ERR(tsk))
104 panic("Failed forking idle task for cpu %d\n", cpu); 104 panic("Failed forking idle task for cpu %d\n", cpu);
105 105
106 tsk->thread_info->cpu = cpu; 106 task_thread_info(tsk)->cpu = cpu;
107 107
108 cpu_set(cpu, cpu_online_map); 108 cpu_set(cpu, cpu_online_map);
109 109
diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c
index 419b5a710441..1da9c61d6823 100644
--- a/arch/sh64/kernel/process.c
+++ b/arch/sh64/kernel/process.c
@@ -744,7 +744,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
744 } 744 }
745#endif 745#endif
746 /* Copy from sh version */ 746 /* Copy from sh version */
747 childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p->thread_info )) - 1; 747 childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
748 748
749 *childregs = *regs; 749 *childregs = *regs;
750 750
@@ -752,7 +752,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
752 childregs->regs[15] = usp; 752 childregs->regs[15] = usp;
753 p->thread.uregs = childregs; 753 p->thread.uregs = childregs;
754 } else { 754 } else {
755 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE; 755 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
756 } 756 }
757 757
758 childregs->regs[9] = 0; /* Set return value for child */ 758 childregs->regs[9] = 0; /* Set return value for child */
diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c
index 526fedae6db8..58087331b8a6 100644
--- a/arch/sh64/lib/dbg.c
+++ b/arch/sh64/lib/dbg.c
@@ -174,7 +174,7 @@ void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
174 struct ring_node *rr; 174 struct ring_node *rr;
175 175
176 pid = current->pid; 176 pid = current->pid;
177 stack_bottom = (unsigned long) current->thread_info; 177 stack_bottom = (unsigned long) task_stack_page(current);
178 asm volatile("ori r15, 0, %0" : "=r" (sp)); 178 asm volatile("ori r15, 0, %0" : "=r" (sp));
179 rr = event_ring + event_ptr; 179 rr = event_ring + event_ptr;
180 rr->evt = evt; 180 rr->evt = evt;
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index ea8647411462..fbb05a452e51 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -302,7 +302,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
302 int count = 0; 302 int count = 0;
303 303
304 if (tsk != NULL) 304 if (tsk != NULL)
305 task_base = (unsigned long) tsk->thread_info; 305 task_base = (unsigned long) task_stack_page(tsk);
306 else 306 else
307 task_base = (unsigned long) current_thread_info(); 307 task_base = (unsigned long) current_thread_info();
308 308
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(dump_stack);
337 */ 337 */
338unsigned long thread_saved_pc(struct task_struct *tsk) 338unsigned long thread_saved_pc(struct task_struct *tsk)
339{ 339{
340 return tsk->thread_info->kpc; 340 return task_thread_info(tsk)->kpc;
341} 341}
342 342
343/* 343/*
@@ -392,7 +392,7 @@ void flush_thread(void)
392 /* We must fixup kregs as well. */ 392 /* We must fixup kregs as well. */
393 /* XXX This was not fixed for ti for a while, worked. Unused? */ 393 /* XXX This was not fixed for ti for a while, worked. Unused? */
394 current->thread.kregs = (struct pt_regs *) 394 current->thread.kregs = (struct pt_regs *)
395 ((char *)current->thread_info + (THREAD_SIZE - TRACEREG_SZ)); 395 (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
396 } 396 }
397} 397}
398 398
@@ -459,7 +459,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
459 unsigned long unused, 459 unsigned long unused,
460 struct task_struct *p, struct pt_regs *regs) 460 struct task_struct *p, struct pt_regs *regs)
461{ 461{
462 struct thread_info *ti = p->thread_info; 462 struct thread_info *ti = task_thread_info(p);
463 struct pt_regs *childregs; 463 struct pt_regs *childregs;
464 char *new_stack; 464 char *new_stack;
465 465
@@ -482,7 +482,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
482 * V V (stk.fr.) V (pt_regs) { (stk.fr.) } 482 * V V (stk.fr.) V (pt_regs) { (stk.fr.) }
483 * +----- - - - - - ------+===========+============={+==========}+ 483 * +----- - - - - - ------+===========+============={+==========}+
484 */ 484 */
485 new_stack = (char*)ti + THREAD_SIZE; 485 new_stack = task_stack_page(p) + THREAD_SIZE;
486 if (regs->psr & PSR_PS) 486 if (regs->psr & PSR_PS)
487 new_stack -= STACKFRAME_SZ; 487 new_stack -= STACKFRAME_SZ;
488 new_stack -= STACKFRAME_SZ + TRACEREG_SZ; 488 new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
@@ -724,7 +724,7 @@ unsigned long get_wchan(struct task_struct *task)
724 task->state == TASK_RUNNING) 724 task->state == TASK_RUNNING)
725 goto out; 725 goto out;
726 726
727 fp = task->thread_info->ksp + bias; 727 fp = task_thread_info(task)->ksp + bias;
728 do { 728 do {
729 /* Bogus frame pointer? */ 729 /* Bogus frame pointer? */
730 if (fp < (task_base + sizeof(struct thread_info)) || 730 if (fp < (task_base + sizeof(struct thread_info)) ||
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
index fc470c0e9dc6..1baf13ed5c3a 100644
--- a/arch/sparc/kernel/ptrace.c
+++ b/arch/sparc/kernel/ptrace.c
@@ -75,7 +75,7 @@ static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
75 struct task_struct *tsk, long __user *addr) 75 struct task_struct *tsk, long __user *addr)
76{ 76{
77 struct pt_regs *cregs = tsk->thread.kregs; 77 struct pt_regs *cregs = tsk->thread.kregs;
78 struct thread_info *t = tsk->thread_info; 78 struct thread_info *t = task_thread_info(tsk);
79 int v; 79 int v;
80 80
81 if(offset >= 1024) 81 if(offset >= 1024)
@@ -170,7 +170,7 @@ static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
170 struct task_struct *tsk) 170 struct task_struct *tsk)
171{ 171{
172 struct pt_regs *cregs = tsk->thread.kregs; 172 struct pt_regs *cregs = tsk->thread.kregs;
173 struct thread_info *t = tsk->thread_info; 173 struct thread_info *t = task_thread_info(tsk);
174 unsigned long value = regs->u_regs[UREG_I3]; 174 unsigned long value = regs->u_regs[UREG_I3];
175 175
176 if(offset >= 1024) 176 if(offset >= 1024)
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index cc1fc898495c..40d426cce824 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -200,7 +200,7 @@ void __init smp4d_boot_cpus(void)
200 /* Cook up an idler for this guy. */ 200 /* Cook up an idler for this guy. */
201 p = fork_idle(i); 201 p = fork_idle(i);
202 cpucount++; 202 cpucount++;
203 current_set[i] = p->thread_info; 203 current_set[i] = task_thread_info(p);
204 for (no = 0; !cpu_find_by_instance(no, NULL, &mid) 204 for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
205 && mid != i; no++) ; 205 && mid != i; no++) ;
206 206
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index f113422a3727..a21f27d10e55 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -173,7 +173,7 @@ void __init smp4m_boot_cpus(void)
173 /* Cook up an idler for this guy. */ 173 /* Cook up an idler for this guy. */
174 p = fork_idle(i); 174 p = fork_idle(i);
175 cpucount++; 175 cpucount++;
176 current_set[i] = p->thread_info; 176 current_set[i] = task_thread_info(p);
177 /* See trampoline.S for details... */ 177 /* See trampoline.S for details... */
178 entry += ((i-1) * 3); 178 entry += ((i-1) * 3);
179 179
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index 3f451ae66482..41d45c298fb2 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -291,7 +291,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
291#ifndef CONFIG_SMP 291#ifndef CONFIG_SMP
292 if(!fpt) { 292 if(!fpt) {
293#else 293#else
294 if(!(fpt->thread_info->flags & _TIF_USEDFPU)) { 294 if(!(task_thread_info(fpt)->flags & _TIF_USEDFPU)) {
295#endif 295#endif
296 fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); 296 fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
297 regs->psr &= ~PSR_EF; 297 regs->psr &= ~PSR_EF;
@@ -334,7 +334,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
334 /* nope, better SIGFPE the offending process... */ 334 /* nope, better SIGFPE the offending process... */
335 335
336#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
337 fpt->thread_info->flags &= ~_TIF_USEDFPU; 337 task_thread_info(fpt)->flags &= ~_TIF_USEDFPU;
338#endif 338#endif
339 if(psr & PSR_PS) { 339 if(psr & PSR_PS) {
340 /* The first fsr store/load we tried trapped, 340 /* The first fsr store/load we tried trapped,
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 02f9dec1d459..1dc3650c5cae 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -390,7 +390,7 @@ void show_regs32(struct pt_regs32 *regs)
390 390
391unsigned long thread_saved_pc(struct task_struct *tsk) 391unsigned long thread_saved_pc(struct task_struct *tsk)
392{ 392{
393 struct thread_info *ti = tsk->thread_info; 393 struct thread_info *ti = task_thread_info(tsk);
394 unsigned long ret = 0xdeadbeefUL; 394 unsigned long ret = 0xdeadbeefUL;
395 395
396 if (ti && ti->ksp) { 396 if (ti && ti->ksp) {
@@ -616,11 +616,11 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
616 unsigned long unused, 616 unsigned long unused,
617 struct task_struct *p, struct pt_regs *regs) 617 struct task_struct *p, struct pt_regs *regs)
618{ 618{
619 struct thread_info *t = p->thread_info; 619 struct thread_info *t = task_thread_info(p);
620 char *child_trap_frame; 620 char *child_trap_frame;
621 621
622 /* Calculate offset to stack_frame & pt_regs */ 622 /* Calculate offset to stack_frame & pt_regs */
623 child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); 623 child_trap_frame = task_stack_page(p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
624 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); 624 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
625 625
626 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | 626 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
@@ -845,9 +845,9 @@ unsigned long get_wchan(struct task_struct *task)
845 task->state == TASK_RUNNING) 845 task->state == TASK_RUNNING)
846 goto out; 846 goto out;
847 847
848 thread_info_base = (unsigned long) task->thread_info; 848 thread_info_base = (unsigned long) task_stack_page(task);
849 bias = STACK_BIAS; 849 bias = STACK_BIAS;
850 fp = task->thread_info->ksp + bias; 850 fp = task_thread_info(task)->ksp + bias;
851 851
852 do { 852 do {
853 /* Bogus frame pointer? */ 853 /* Bogus frame pointer? */
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 84d3df2264cb..3f9746f856d2 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -296,7 +296,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
296 case PTRACE_GETREGS: { 296 case PTRACE_GETREGS: {
297 struct pt_regs32 __user *pregs = 297 struct pt_regs32 __user *pregs =
298 (struct pt_regs32 __user *) addr; 298 (struct pt_regs32 __user *) addr;
299 struct pt_regs *cregs = child->thread_info->kregs; 299 struct pt_regs *cregs = task_pt_regs(child);
300 int rval; 300 int rval;
301 301
302 if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) || 302 if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
@@ -320,11 +320,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
320 320
321 case PTRACE_GETREGS64: { 321 case PTRACE_GETREGS64: {
322 struct pt_regs __user *pregs = (struct pt_regs __user *) addr; 322 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
323 struct pt_regs *cregs = child->thread_info->kregs; 323 struct pt_regs *cregs = task_pt_regs(child);
324 unsigned long tpc = cregs->tpc; 324 unsigned long tpc = cregs->tpc;
325 int rval; 325 int rval;
326 326
327 if ((child->thread_info->flags & _TIF_32BIT) != 0) 327 if ((task_thread_info(child)->flags & _TIF_32BIT) != 0)
328 tpc &= 0xffffffff; 328 tpc &= 0xffffffff;
329 if (__put_user(cregs->tstate, (&pregs->tstate)) || 329 if (__put_user(cregs->tstate, (&pregs->tstate)) ||
330 __put_user(tpc, (&pregs->tpc)) || 330 __put_user(tpc, (&pregs->tpc)) ||
@@ -348,7 +348,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
348 case PTRACE_SETREGS: { 348 case PTRACE_SETREGS: {
349 struct pt_regs32 __user *pregs = 349 struct pt_regs32 __user *pregs =
350 (struct pt_regs32 __user *) addr; 350 (struct pt_regs32 __user *) addr;
351 struct pt_regs *cregs = child->thread_info->kregs; 351 struct pt_regs *cregs = task_pt_regs(child);
352 unsigned int psr, pc, npc, y; 352 unsigned int psr, pc, npc, y;
353 int i; 353 int i;
354 354
@@ -381,7 +381,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
381 381
382 case PTRACE_SETREGS64: { 382 case PTRACE_SETREGS64: {
383 struct pt_regs __user *pregs = (struct pt_regs __user *) addr; 383 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
384 struct pt_regs *cregs = child->thread_info->kregs; 384 struct pt_regs *cregs = task_pt_regs(child);
385 unsigned long tstate, tpc, tnpc, y; 385 unsigned long tstate, tpc, tnpc, y;
386 int i; 386 int i;
387 387
@@ -395,7 +395,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
395 pt_error_return(regs, EFAULT); 395 pt_error_return(regs, EFAULT);
396 goto out_tsk; 396 goto out_tsk;
397 } 397 }
398 if ((child->thread_info->flags & _TIF_32BIT) != 0) { 398 if ((task_thread_info(child)->flags & _TIF_32BIT) != 0) {
399 tpc &= 0xffffffff; 399 tpc &= 0xffffffff;
400 tnpc &= 0xffffffff; 400 tnpc &= 0xffffffff;
401 } 401 }
@@ -430,11 +430,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
430 } fpq[16]; 430 } fpq[16];
431 }; 431 };
432 struct fps __user *fps = (struct fps __user *) addr; 432 struct fps __user *fps = (struct fps __user *) addr;
433 unsigned long *fpregs = child->thread_info->fpregs; 433 unsigned long *fpregs = task_thread_info(child)->fpregs;
434 434
435 if (copy_to_user(&fps->regs[0], fpregs, 435 if (copy_to_user(&fps->regs[0], fpregs,
436 (32 * sizeof(unsigned int))) || 436 (32 * sizeof(unsigned int))) ||
437 __put_user(child->thread_info->xfsr[0], (&fps->fsr)) || 437 __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr)) ||
438 __put_user(0, (&fps->fpqd)) || 438 __put_user(0, (&fps->fpqd)) ||
439 __put_user(0, (&fps->flags)) || 439 __put_user(0, (&fps->flags)) ||
440 __put_user(0, (&fps->extra)) || 440 __put_user(0, (&fps->extra)) ||
@@ -452,11 +452,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
452 unsigned long fsr; 452 unsigned long fsr;
453 }; 453 };
454 struct fps __user *fps = (struct fps __user *) addr; 454 struct fps __user *fps = (struct fps __user *) addr;
455 unsigned long *fpregs = child->thread_info->fpregs; 455 unsigned long *fpregs = task_thread_info(child)->fpregs;
456 456
457 if (copy_to_user(&fps->regs[0], fpregs, 457 if (copy_to_user(&fps->regs[0], fpregs,
458 (64 * sizeof(unsigned int))) || 458 (64 * sizeof(unsigned int))) ||
459 __put_user(child->thread_info->xfsr[0], (&fps->fsr))) { 459 __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
460 pt_error_return(regs, EFAULT); 460 pt_error_return(regs, EFAULT);
461 goto out_tsk; 461 goto out_tsk;
462 } 462 }
@@ -477,7 +477,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
477 } fpq[16]; 477 } fpq[16];
478 }; 478 };
479 struct fps __user *fps = (struct fps __user *) addr; 479 struct fps __user *fps = (struct fps __user *) addr;
480 unsigned long *fpregs = child->thread_info->fpregs; 480 unsigned long *fpregs = task_thread_info(child)->fpregs;
481 unsigned fsr; 481 unsigned fsr;
482 482
483 if (copy_from_user(fpregs, &fps->regs[0], 483 if (copy_from_user(fpregs, &fps->regs[0],
@@ -486,11 +486,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
486 pt_error_return(regs, EFAULT); 486 pt_error_return(regs, EFAULT);
487 goto out_tsk; 487 goto out_tsk;
488 } 488 }
489 child->thread_info->xfsr[0] &= 0xffffffff00000000UL; 489 task_thread_info(child)->xfsr[0] &= 0xffffffff00000000UL;
490 child->thread_info->xfsr[0] |= fsr; 490 task_thread_info(child)->xfsr[0] |= fsr;
491 if (!(child->thread_info->fpsaved[0] & FPRS_FEF)) 491 if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
492 child->thread_info->gsr[0] = 0; 492 task_thread_info(child)->gsr[0] = 0;
493 child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL); 493 task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
494 pt_succ_return(regs, 0); 494 pt_succ_return(regs, 0);
495 goto out_tsk; 495 goto out_tsk;
496 } 496 }
@@ -501,17 +501,17 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
501 unsigned long fsr; 501 unsigned long fsr;
502 }; 502 };
503 struct fps __user *fps = (struct fps __user *) addr; 503 struct fps __user *fps = (struct fps __user *) addr;
504 unsigned long *fpregs = child->thread_info->fpregs; 504 unsigned long *fpregs = task_thread_info(child)->fpregs;
505 505
506 if (copy_from_user(fpregs, &fps->regs[0], 506 if (copy_from_user(fpregs, &fps->regs[0],
507 (64 * sizeof(unsigned int))) || 507 (64 * sizeof(unsigned int))) ||
508 __get_user(child->thread_info->xfsr[0], (&fps->fsr))) { 508 __get_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
509 pt_error_return(regs, EFAULT); 509 pt_error_return(regs, EFAULT);
510 goto out_tsk; 510 goto out_tsk;
511 } 511 }
512 if (!(child->thread_info->fpsaved[0] & FPRS_FEF)) 512 if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
513 child->thread_info->gsr[0] = 0; 513 task_thread_info(child)->gsr[0] = 0;
514 child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU); 514 task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
515 pt_succ_return(regs, 0); 515 pt_succ_return(regs, 0);
516 goto out_tsk; 516 goto out_tsk;
517 } 517 }
@@ -562,8 +562,8 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
562#ifdef DEBUG_PTRACE 562#ifdef DEBUG_PTRACE
563 printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm, 563 printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
564 child->pid, child->exit_code, 564 child->pid, child->exit_code,
565 child->thread_info->kregs->tpc, 565 task_pt_regs(child)->tpc,
566 child->thread_info->kregs->tnpc); 566 task_pt_regs(child)->tnpc);
567 567
568#endif 568#endif
569 wake_up_process(child); 569 wake_up_process(child);
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 48180531562f..250745896aee 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -520,7 +520,7 @@ void __init setup_arch(char **cmdline_p)
520 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); 520 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
521#endif 521#endif
522 522
523 init_task.thread_info->kregs = &fake_swapper_regs; 523 task_thread_info(&init_task)->kregs = &fake_swapper_regs;
524 524
525#ifdef CONFIG_IP_PNP 525#ifdef CONFIG_IP_PNP
526 if (!ic_set_manually) { 526 if (!ic_set_manually) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 6efc03df51c3..1fb6323e65a4 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -335,7 +335,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
335 335
336 p = fork_idle(cpu); 336 p = fork_idle(cpu);
337 callin_flag = 0; 337 callin_flag = 0;
338 cpu_new_thread = p->thread_info; 338 cpu_new_thread = task_thread_info(p);
339 cpu_set(cpu, cpu_callout_map); 339 cpu_set(cpu, cpu_callout_map);
340 340
341 cpu_find_by_mid(cpu, &cpu_node); 341 cpu_find_by_mid(cpu, &cpu_node);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 5570e7bb22bb..8d44ae5a15e3 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1808,7 +1808,7 @@ static void user_instruction_dump (unsigned int __user *pc)
1808void show_stack(struct task_struct *tsk, unsigned long *_ksp) 1808void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1809{ 1809{
1810 unsigned long pc, fp, thread_base, ksp; 1810 unsigned long pc, fp, thread_base, ksp;
1811 struct thread_info *tp = tsk->thread_info; 1811 void *tp = task_stack_page(tsk);
1812 struct reg_window *rw; 1812 struct reg_window *rw;
1813 int count = 0; 1813 int count = 0;
1814 1814
@@ -1862,7 +1862,7 @@ static inline int is_kernel_stack(struct task_struct *task,
1862 return 0; 1862 return 0;
1863 } 1863 }
1864 1864
1865 thread_base = (unsigned long) task->thread_info; 1865 thread_base = (unsigned long) task_stack_page(task);
1866 thread_end = thread_base + sizeof(union thread_union); 1866 thread_end = thread_base + sizeof(union thread_union);
1867 if (rw_addr >= thread_base && 1867 if (rw_addr >= thread_base &&
1868 rw_addr < thread_end && 1868 rw_addr < thread_end &&
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index d2d3f256778c..7f13b85d2656 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -107,7 +107,7 @@ void set_current(void *t)
107{ 107{
108 struct task_struct *task = t; 108 struct task_struct *task = t;
109 109
110 cpu_tasks[task->thread_info->cpu] = ((struct cpu_task) 110 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
111 { external_pid(task), task }); 111 { external_pid(task), task });
112} 112}
113 113
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index 09790ccb161c..dc41c6dc2f34 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -118,7 +118,7 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
118 handler = new_thread_handler; 118 handler = new_thread_handler;
119 } 119 }
120 120
121 new_thread(p->thread_info, &p->thread.mode.skas.switch_buf, 121 new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
122 &p->thread.mode.skas.fork_buf, handler); 122 &p->thread.mode.skas.fork_buf, handler);
123 return(0); 123 return(0);
124} 124}
@@ -185,7 +185,7 @@ int start_uml_skas(void)
185 185
186 init_task.thread.request.u.thread.proc = start_kernel_proc; 186 init_task.thread.request.u.thread.proc = start_kernel_proc;
187 init_task.thread.request.u.thread.arg = NULL; 187 init_task.thread.request.u.thread.arg = NULL;
188 return(start_idle_thread(init_task.thread_info, 188 return(start_idle_thread(task_stack_page(&init_task),
189 &init_task.thread.mode.skas.switch_buf, 189 &init_task.thread.mode.skas.switch_buf,
190 &init_task.thread.mode.skas.fork_buf)); 190 &init_task.thread.mode.skas.fork_buf));
191} 191}
diff --git a/arch/um/kernel/tt/exec_kern.c b/arch/um/kernel/tt/exec_kern.c
index 136e54c47d37..8f40e4838736 100644
--- a/arch/um/kernel/tt/exec_kern.c
+++ b/arch/um/kernel/tt/exec_kern.c
@@ -39,7 +39,7 @@ void flush_thread_tt(void)
39 do_exit(SIGKILL); 39 do_exit(SIGKILL);
40 } 40 }
41 41
42 new_pid = start_fork_tramp(current->thread_info, stack, 0, exec_tramp); 42 new_pid = start_fork_tramp(task_stack_page(current), stack, 0, exec_tramp);
43 if(new_pid < 0){ 43 if(new_pid < 0){
44 printk(KERN_ERR 44 printk(KERN_ERR
45 "flush_thread : new thread failed, errno = %d\n", 45 "flush_thread : new thread failed, errno = %d\n",
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
index 14d4622a5fb8..62535303aa27 100644
--- a/arch/um/kernel/tt/process_kern.c
+++ b/arch/um/kernel/tt/process_kern.c
@@ -36,7 +36,7 @@ void switch_to_tt(void *prev, void *next)
36 from = prev; 36 from = prev;
37 to = next; 37 to = next;
38 38
39 cpu = from->thread_info->cpu; 39 cpu = task_thread_info(from)->cpu;
40 if(cpu == 0) 40 if(cpu == 0)
41 forward_interrupts(to->thread.mode.tt.extern_pid); 41 forward_interrupts(to->thread.mode.tt.extern_pid);
42#ifdef CONFIG_SMP 42#ifdef CONFIG_SMP
@@ -253,7 +253,7 @@ int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
253 253
254 clone_flags &= CLONE_VM; 254 clone_flags &= CLONE_VM;
255 p->thread.temp_stack = stack; 255 p->thread.temp_stack = stack;
256 new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp); 256 new_pid = start_fork_tramp(task_stack_page(p), stack, clone_flags, tramp);
257 if(new_pid < 0){ 257 if(new_pid < 0){
258 printk(KERN_ERR "copy_thread : clone failed - errno = %d\n", 258 printk(KERN_ERR "copy_thread : clone failed - errno = %d\n",
259 -new_pid); 259 -new_pid);
@@ -343,7 +343,7 @@ int do_proc_op(void *t, int proc_id)
343 pid = thread->request.u.exec.pid; 343 pid = thread->request.u.exec.pid;
344 do_exec(thread->mode.tt.extern_pid, pid); 344 do_exec(thread->mode.tt.extern_pid, pid);
345 thread->mode.tt.extern_pid = pid; 345 thread->mode.tt.extern_pid = pid;
346 cpu_tasks[task->thread_info->cpu].pid = pid; 346 cpu_tasks[task_thread_info(task)->cpu].pid = pid;
347 break; 347 break;
348 case OP_FORK: 348 case OP_FORK:
349 attach_process(thread->request.u.fork.pid); 349 attach_process(thread->request.u.fork.pid);
@@ -425,7 +425,7 @@ int start_uml_tt(void)
425 int pages; 425 int pages;
426 426
427 pages = (1 << CONFIG_KERNEL_STACK_ORDER); 427 pages = (1 << CONFIG_KERNEL_STACK_ORDER);
428 sp = (void *) ((unsigned long) init_task.thread_info) + 428 sp = task_stack_page(&init_task) +
429 pages * PAGE_SIZE - sizeof(unsigned long); 429 pages * PAGE_SIZE - sizeof(unsigned long);
430 return(tracer(start_kernel_proc, sp)); 430 return(tracer(start_kernel_proc, sp));
431} 431}
diff --git a/arch/v850/kernel/process.c b/arch/v850/kernel/process.c
index 062ffa0a9998..eb909937958b 100644
--- a/arch/v850/kernel/process.c
+++ b/arch/v850/kernel/process.c
@@ -114,7 +114,7 @@ int copy_thread (int nr, unsigned long clone_flags,
114 struct task_struct *p, struct pt_regs *regs) 114 struct task_struct *p, struct pt_regs *regs)
115{ 115{
116 /* Start pushing stuff from the top of the child's kernel stack. */ 116 /* Start pushing stuff from the top of the child's kernel stack. */
117 unsigned long orig_ksp = (unsigned long)p->thread_info + THREAD_SIZE; 117 unsigned long orig_ksp = task_tos(p);
118 unsigned long ksp = orig_ksp; 118 unsigned long ksp = orig_ksp;
119 /* We push two `state save' stack fames (see entry.S) on the new 119 /* We push two `state save' stack fames (see entry.S) on the new
120 kernel stack: 120 kernel stack:
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index 18492d02aaf6..67e057509664 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -58,7 +58,7 @@ static v850_reg_t *reg_save_addr (unsigned reg_offs, struct task_struct *t)
58 regs = thread_saved_regs (t); 58 regs = thread_saved_regs (t);
59 else 59 else
60 /* Register saved during kernel entry (or not available). */ 60 /* Register saved during kernel entry (or not available). */
61 regs = task_regs (t); 61 regs = task_pt_regs (t);
62 62
63 return (v850_reg_t *)((char *)regs + reg_offs); 63 return (v850_reg_t *)((char *)regs + reg_offs);
64} 64}
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 2b760d0d9ce2..029bddab0459 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -197,8 +197,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
197 197
198static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) 198static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
199{ 199{
200 struct pt_regs *pp = (struct pt_regs *)(t->thread.rsp0); 200 struct pt_regs *pp = task_pt_regs(t);
201 --pp;
202 ELF_CORE_COPY_REGS((*elfregs), pp); 201 ELF_CORE_COPY_REGS((*elfregs), pp);
203 /* fix wrong segments */ 202 /* fix wrong segments */
204 (*elfregs)[7] = t->thread.ds; 203 (*elfregs)[7] = t->thread.ds;
@@ -217,7 +216,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
217 if (!tsk_used_math(tsk)) 216 if (!tsk_used_math(tsk))
218 return 0; 217 return 0;
219 if (!regs) 218 if (!regs)
220 regs = ((struct pt_regs *)tsk->thread.rsp0) - 1; 219 regs = task_pt_regs(tsk);
221 if (tsk == current) 220 if (tsk == current)
222 unlazy_fpu(tsk); 221 unlazy_fpu(tsk);
223 set_fs(KERNEL_DS); 222 set_fs(KERNEL_DS);
@@ -233,7 +232,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
233static inline int 232static inline int
234elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) 233elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
235{ 234{
236 struct pt_regs *regs = ((struct pt_regs *)(t->thread.rsp0))-1; 235 struct pt_regs *regs = task_pt_regs(t);
237 if (!tsk_used_math(t)) 236 if (!tsk_used_math(t))
238 return 0; 237 return 0;
239 if (t == current) 238 if (t == current)
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index ea4394e021d6..23a4515a73b4 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -41,7 +41,7 @@
41static int putreg32(struct task_struct *child, unsigned regno, u32 val) 41static int putreg32(struct task_struct *child, unsigned regno, u32 val)
42{ 42{
43 int i; 43 int i;
44 __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs)); 44 __u64 *stack = (__u64 *)task_pt_regs(child);
45 45
46 switch (regno) { 46 switch (regno) {
47 case offsetof(struct user32, regs.fs): 47 case offsetof(struct user32, regs.fs):
@@ -137,7 +137,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 val)
137 137
138static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 138static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
139{ 139{
140 __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs)); 140 __u64 *stack = (__u64 *)task_pt_regs(child);
141 141
142 switch (regno) { 142 switch (regno) {
143 case offsetof(struct user32, regs.fs): 143 case offsetof(struct user32, regs.fs):
@@ -238,7 +238,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
238 if (ret < 0) 238 if (ret < 0)
239 goto out; 239 goto out;
240 240
241 childregs = (struct pt_regs *)(child->thread.rsp0 - sizeof(struct pt_regs)); 241 childregs = task_pt_regs(child);
242 242
243 switch (request) { 243 switch (request) {
244 case PTRACE_PEEKDATA: 244 case PTRACE_PEEKDATA:
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index d9b22b633e39..a5d7e16b928e 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -95,7 +95,7 @@ int save_i387(struct _fpstate __user *buf)
95 if (!used_math()) 95 if (!used_math())
96 return 0; 96 return 0;
97 clear_used_math(); /* trigger finit */ 97 clear_used_math(); /* trigger finit */
98 if (tsk->thread_info->status & TS_USEDFPU) { 98 if (task_thread_info(tsk)->status & TS_USEDFPU) {
99 err = save_i387_checking((struct i387_fxsave_struct __user *)buf); 99 err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
100 if (err) return err; 100 if (err) return err;
101 stts(); 101 stts();
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index b61965f0fb34..5ecd34ab8c2b 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -133,7 +133,7 @@ static void end_8259A_irq (unsigned int irq)
133{ 133{
134 if (irq > 256) { 134 if (irq > 256) {
135 char var; 135 char var;
136 printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, current->thread_info); 136 printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, task_thread_info(current));
137 137
138 BUG(); 138 BUG();
139 } 139 }
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 669cf0ed3266..8ded407e4a94 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -451,7 +451,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
451 struct task_struct *me = current; 451 struct task_struct *me = current;
452 452
453 childregs = ((struct pt_regs *) 453 childregs = ((struct pt_regs *)
454 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; 454 (THREAD_SIZE + task_stack_page(p))) - 1;
455 *childregs = *regs; 455 *childregs = *regs;
456 456
457 childregs->rax = 0; 457 childregs->rax = 0;
@@ -463,7 +463,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
463 p->thread.rsp0 = (unsigned long) (childregs+1); 463 p->thread.rsp0 = (unsigned long) (childregs+1);
464 p->thread.userrsp = me->thread.userrsp; 464 p->thread.userrsp = me->thread.userrsp;
465 465
466 set_ti_thread_flag(p->thread_info, TIF_FORK); 466 set_tsk_thread_flag(p, TIF_FORK);
467 467
468 p->thread.fs = me->thread.fs; 468 p->thread.fs = me->thread.fs;
469 p->thread.gs = me->thread.gs; 469 p->thread.gs = me->thread.gs;
@@ -590,7 +590,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
590 write_pda(oldrsp, next->userrsp); 590 write_pda(oldrsp, next->userrsp);
591 write_pda(pcurrent, next_p); 591 write_pda(pcurrent, next_p);
592 write_pda(kernelstack, 592 write_pda(kernelstack,
593 (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET); 593 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
594 594
595 /* 595 /*
596 * Now maybe reload the debug registers 596 * Now maybe reload the debug registers
@@ -704,7 +704,7 @@ unsigned long get_wchan(struct task_struct *p)
704 704
705 if (!p || p == current || p->state==TASK_RUNNING) 705 if (!p || p == current || p->state==TASK_RUNNING)
706 return 0; 706 return 0;
707 stack = (unsigned long)p->thread_info; 707 stack = (unsigned long)task_stack_page(p);
708 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE) 708 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
709 return 0; 709 return 0;
710 fp = *(u64 *)(p->thread.rsp); 710 fp = *(u64 *)(p->thread.rsp);
@@ -822,8 +822,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
822{ 822{
823 struct pt_regs *pp, ptregs; 823 struct pt_regs *pp, ptregs;
824 824
825 pp = (struct pt_regs *)(tsk->thread.rsp0); 825 pp = task_pt_regs(tsk);
826 --pp;
827 826
828 ptregs = *pp; 827 ptregs = *pp;
829 ptregs.cs &= 0xffff; 828 ptregs.cs &= 0xffff;
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 86248bc9303e..53205622351c 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -67,12 +67,6 @@ static inline unsigned long get_stack_long(struct task_struct *task, int offset)
67 return (*((unsigned long *)stack)); 67 return (*((unsigned long *)stack));
68} 68}
69 69
70static inline struct pt_regs *get_child_regs(struct task_struct *task)
71{
72 struct pt_regs *regs = (void *)task->thread.rsp0;
73 return regs - 1;
74}
75
76/* 70/*
77 * this routine will put a word on the processes privileged stack. 71 * this routine will put a word on the processes privileged stack.
78 * the offset is how far from the base addr as stored in the TSS. 72 * the offset is how far from the base addr as stored in the TSS.
@@ -170,7 +164,7 @@ static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
170 164
171static void set_singlestep(struct task_struct *child) 165static void set_singlestep(struct task_struct *child)
172{ 166{
173 struct pt_regs *regs = get_child_regs(child); 167 struct pt_regs *regs = task_pt_regs(child);
174 168
175 /* 169 /*
176 * Always set TIF_SINGLESTEP - this guarantees that 170 * Always set TIF_SINGLESTEP - this guarantees that
@@ -208,7 +202,7 @@ static void clear_singlestep(struct task_struct *child)
208 202
209 /* But touch TF only if it was set by us.. */ 203 /* But touch TF only if it was set by us.. */
210 if (child->ptrace & PT_DTRACE) { 204 if (child->ptrace & PT_DTRACE) {
211 struct pt_regs *regs = get_child_regs(child); 205 struct pt_regs *regs = task_pt_regs(child);
212 regs->eflags &= ~TRAP_FLAG; 206 regs->eflags &= ~TRAP_FLAG;
213 child->ptrace &= ~PT_DTRACE; 207 child->ptrace &= ~PT_DTRACE;
214 } 208 }
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index c9e941ae5019..a28756ef7cef 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -776,7 +776,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
776 776
777 if (c_idle.idle) { 777 if (c_idle.idle) {
778 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *) 778 c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
779 (THREAD_SIZE + (unsigned long) c_idle.idle->thread_info)) - 1); 779 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
780 init_idle(c_idle.idle, cpu); 780 init_idle(c_idle.idle, cpu);
781 goto do_rest; 781 goto do_rest;
782 } 782 }
@@ -814,7 +814,7 @@ do_rest:
814 init_rsp = c_idle.idle->thread.rsp; 814 init_rsp = c_idle.idle->thread.rsp;
815 per_cpu(init_tss,cpu).rsp0 = init_rsp; 815 per_cpu(init_tss,cpu).rsp0 = init_rsp;
816 initial_code = start_secondary; 816 initial_code = start_secondary;
817 clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK); 817 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
818 818
819 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu, 819 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
820 cpus_weight(cpu_present_map), 820 cpus_weight(cpu_present_map),
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 2671fd46ea85..8bb0aeda78b9 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -308,7 +308,7 @@ void show_registers(struct pt_regs *regs)
308 printk("CPU %d ", cpu); 308 printk("CPU %d ", cpu);
309 __show_regs(regs); 309 __show_regs(regs);
310 printk("Process %s (pid: %d, threadinfo %p, task %p)\n", 310 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
311 cur->comm, cur->pid, cur->thread_info, cur); 311 cur->comm, cur->pid, task_thread_info(cur), cur);
312 312
313 /* 313 /*
314 * When in-kernel, we also print out the stack and code at the 314 * When in-kernel, we also print out the stack and code at the
@@ -666,7 +666,7 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
666 ; 666 ;
667 /* Exception from user space */ 667 /* Exception from user space */
668 else if (user_mode(eregs)) 668 else if (user_mode(eregs))
669 regs = ((struct pt_regs *)current->thread.rsp0) - 1; 669 regs = task_pt_regs(current);
670 /* Exception from kernel and interrupts are enabled. Move to 670 /* Exception from kernel and interrupts are enabled. Move to
671 kernel process stack. */ 671 kernel process stack. */
672 else if (eregs->eflags & X86_EFLAGS_IF) 672 else if (eregs->eflags & X86_EFLAGS_IF)
@@ -912,7 +912,7 @@ asmlinkage void math_state_restore(void)
912 if (!used_math()) 912 if (!used_math())
913 init_fpu(me); 913 init_fpu(me);
914 restore_fpu_checking(&me->thread.i387.fxsave); 914 restore_fpu_checking(&me->thread.i387.fxsave);
915 me->thread_info->status |= TS_USEDFPU; 915 task_thread_info(me)->status |= TS_USEDFPU;
916} 916}
917 917
918void __init trap_init(void) 918void __init trap_init(void)
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 6a44b54ae817..f1f596644bfc 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -145,7 +145,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
145 int user_mode = user_mode(regs); 145 int user_mode = user_mode(regs);
146 146
147 /* Set up new TSS. */ 147 /* Set up new TSS. */
148 tos = (unsigned long)p->thread_info + THREAD_SIZE; 148 tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
149 if (user_mode) 149 if (user_mode)
150 childregs = (struct pt_regs*)(tos - PT_USER_SIZE); 150 childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
151 else 151 else
@@ -217,7 +217,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
217unsigned long get_wchan(struct task_struct *p) 217unsigned long get_wchan(struct task_struct *p)
218{ 218{
219 unsigned long sp, pc; 219 unsigned long sp, pc;
220 unsigned long stack_page = (unsigned long) p->thread_info; 220 unsigned long stack_page = (unsigned long) task_stack_page(p);
221 int count = 0; 221 int count = 0;
222 222
223 if (!p || p == current || p->state == TASK_RUNNING) 223 if (!p || p == current || p->state == TASK_RUNNING)
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index ab5c4c65b5c4..4cc85285a70a 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -72,7 +72,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
72 struct pt_regs *regs; 72 struct pt_regs *regs;
73 unsigned long tmp; 73 unsigned long tmp;
74 74
75 regs = xtensa_pt_regs(child); 75 regs = task_pt_regs(child);
76 tmp = 0; /* Default return value. */ 76 tmp = 0; /* Default return value. */
77 77
78 switch(addr) { 78 switch(addr) {
@@ -149,7 +149,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
149 case PTRACE_POKEUSR: 149 case PTRACE_POKEUSR:
150 { 150 {
151 struct pt_regs *regs; 151 struct pt_regs *regs;
152 regs = xtensa_pt_regs(child); 152 regs = task_pt_regs(child);
153 153
154 switch (addr) { 154 switch (addr) {
155 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: 155 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
@@ -240,7 +240,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
240 * elf_gregset_t format. */ 240 * elf_gregset_t format. */
241 241
242 xtensa_gregset_t format; 242 xtensa_gregset_t format;
243 struct pt_regs *regs = xtensa_pt_regs(child); 243 struct pt_regs *regs = task_pt_regs(child);
244 244
245 do_copy_regs (&format, regs, child); 245 do_copy_regs (&format, regs, child);
246 246
@@ -257,7 +257,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
257 * values in the elf_gregset_t format. */ 257 * values in the elf_gregset_t format. */
258 258
259 xtensa_gregset_t format; 259 xtensa_gregset_t format;
260 struct pt_regs *regs = xtensa_pt_regs(child); 260 struct pt_regs *regs = task_pt_regs(child);
261 261
262 if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){ 262 if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
263 ret = -EFAULT; 263 ret = -EFAULT;
@@ -281,7 +281,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
281 * elf_fpregset_t format. */ 281 * elf_fpregset_t format. */
282 282
283 elf_fpregset_t fpregs; 283 elf_fpregset_t fpregs;
284 struct pt_regs *regs = xtensa_pt_regs(child); 284 struct pt_regs *regs = task_pt_regs(child);
285 285
286 do_save_fpregs (&fpregs, regs, child); 286 do_save_fpregs (&fpregs, regs, child);
287 287
@@ -299,7 +299,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
299 * values in the elf_fpregset_t format. 299 * values in the elf_fpregset_t format.
300 */ 300 */
301 elf_fpregset_t fpregs; 301 elf_fpregset_t fpregs;
302 struct pt_regs *regs = xtensa_pt_regs(child); 302 struct pt_regs *regs = task_pt_regs(child);
303 303
304 ret = 0; 304 ret = 0;
305 if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) { 305 if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
diff --git a/block/elevator.c b/block/elevator.c
index 99a4d7b2f8ad..1d0759178e4b 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -610,23 +610,23 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
610 * request is released from the driver, io must be done 610 * request is released from the driver, io must be done
611 */ 611 */
612 if (blk_account_rq(rq)) { 612 if (blk_account_rq(rq)) {
613 struct request *first_rq = list_entry_rq(q->queue_head.next);
614
615 q->in_flight--; 613 q->in_flight--;
614 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
615 e->ops->elevator_completed_req_fn(q, rq);
616 }
616 617
617 /* 618 /*
618 * Check if the queue is waiting for fs requests to be 619 * Check if the queue is waiting for fs requests to be
619 * drained for flush sequence. 620 * drained for flush sequence.
620 */ 621 */
621 if (q->ordseq && q->in_flight == 0 && 622 if (unlikely(q->ordseq)) {
623 struct request *first_rq = list_entry_rq(q->queue_head.next);
624 if (q->in_flight == 0 &&
622 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 625 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
623 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { 626 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
624 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 627 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
625 q->request_fn(q); 628 q->request_fn(q);
626 } 629 }
627
628 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
629 e->ops->elevator_completed_req_fn(q, rq);
630 } 630 }
631} 631}
632 632
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 3c679d30b698..b6e290956214 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -194,6 +194,8 @@ static DECLARE_WAIT_QUEUE_HEAD(ms_wait);
194 */ 194 */
195#define MAX_ERRORS 12 195#define MAX_ERRORS 12
196 196
197#define custom amiga_custom
198
197/* Prevent "aliased" accesses. */ 199/* Prevent "aliased" accesses. */
198static int fd_ref[4] = { 0,0,0,0 }; 200static int fd_ref[4] = { 0,0,0,0 };
199static int fd_device[4] = { 0, 0, 0, 0 }; 201static int fd_device[4] = { 0, 0, 0, 0 };
@@ -1439,6 +1441,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1439{ 1441{
1440 int drive = iminor(inode) & 3; 1442 int drive = iminor(inode) & 3;
1441 static struct floppy_struct getprm; 1443 static struct floppy_struct getprm;
1444 void __user *argp = (void __user *)param;
1442 1445
1443 switch(cmd){ 1446 switch(cmd){
1444 case FDFMTBEG: 1447 case FDFMTBEG:
@@ -1484,9 +1487,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1484 getprm.head=unit[drive].type->heads; 1487 getprm.head=unit[drive].type->heads;
1485 getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult; 1488 getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult;
1486 getprm.size=unit[drive].blocks; 1489 getprm.size=unit[drive].blocks;
1487 if (copy_to_user((void *)param, 1490 if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct)))
1488 (void *)&getprm,
1489 sizeof(struct floppy_struct)))
1490 return -EFAULT; 1491 return -EFAULT;
1491 break; 1492 break;
1492 case FDSETPRM: 1493 case FDSETPRM:
@@ -1498,8 +1499,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1498 break; 1499 break;
1499#ifdef RAW_IOCTL 1500#ifdef RAW_IOCTL
1500 case IOCTL_RAW_TRACK: 1501 case IOCTL_RAW_TRACK:
1501 if (copy_to_user((void *)param, raw_buf, 1502 if (copy_to_user(argp, raw_buf, unit[drive].type->read_size))
1502 unit[drive].type->read_size))
1503 return -EFAULT; 1503 return -EFAULT;
1504 else 1504 else
1505 return unit[drive].type->read_size; 1505 return unit[drive].type->read_size;
@@ -1654,12 +1654,6 @@ static struct block_device_operations floppy_fops = {
1654 .media_changed = amiga_floppy_change, 1654 .media_changed = amiga_floppy_change,
1655}; 1655};
1656 1656
1657void __init amiga_floppy_setup (char *str, int *ints)
1658{
1659 printk (KERN_INFO "amiflop: Setting default df0 to %x\n", ints[1]);
1660 fd_def_df0 = ints[1];
1661}
1662
1663static int __init fd_probe_drives(void) 1657static int __init fd_probe_drives(void)
1664{ 1658{
1665 int drive,drives,nomem; 1659 int drive,drives,nomem;
@@ -1845,4 +1839,18 @@ void cleanup_module(void)
1845 unregister_blkdev(FLOPPY_MAJOR, "fd"); 1839 unregister_blkdev(FLOPPY_MAJOR, "fd");
1846} 1840}
1847#endif 1841#endif
1842
1843#else
1844static int __init amiga_floppy_setup (char *str)
1845{
1846 int n;
1847 if (!MACH_IS_AMIGA)
1848 return 0;
1849 if (!get_option(&str, &n))
1850 return 0;
1851 printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n);
1852 fd_def_df0 = n;
1853}
1854
1855__setup("floppy=", amiga_floppy_setup);
1848#endif 1856#endif
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 3aa68a5447d6..f8ce235ccfc3 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1361,7 +1361,7 @@ static int floppy_revalidate(struct gendisk *disk)
1361 formats, for 'permanent user-defined' parameter: 1361 formats, for 'permanent user-defined' parameter:
1362 restore default_params[] here if flagged valid! */ 1362 restore default_params[] here if flagged valid! */
1363 if (default_params[drive].blocks == 0) 1363 if (default_params[drive].blocks == 0)
1364 UDT = 0; 1364 UDT = NULL;
1365 else 1365 else
1366 UDT = &default_params[drive]; 1366 UDT = &default_params[drive];
1367 } 1367 }
@@ -1495,6 +1495,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1495 struct floppy_struct getprm; 1495 struct floppy_struct getprm;
1496 int settype; 1496 int settype;
1497 struct floppy_struct setprm; 1497 struct floppy_struct setprm;
1498 void __user *argp = (void __user *)param;
1498 1499
1499 switch (cmd) { 1500 switch (cmd) {
1500 case FDGETPRM: 1501 case FDGETPRM:
@@ -1521,7 +1522,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1521 getprm.head = 2; 1522 getprm.head = 2;
1522 getprm.track = dtp->blocks/dtp->spt/2; 1523 getprm.track = dtp->blocks/dtp->spt/2;
1523 getprm.stretch = dtp->stretch; 1524 getprm.stretch = dtp->stretch;
1524 if (copy_to_user((void *)param, &getprm, sizeof(getprm))) 1525 if (copy_to_user(argp, &getprm, sizeof(getprm)))
1525 return -EFAULT; 1526 return -EFAULT;
1526 return 0; 1527 return 0;
1527 } 1528 }
@@ -1540,7 +1541,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1540 /* get the parameters from user space */ 1541 /* get the parameters from user space */
1541 if (floppy->ref != 1 && floppy->ref != -1) 1542 if (floppy->ref != 1 && floppy->ref != -1)
1542 return -EBUSY; 1543 return -EBUSY;
1543 if (copy_from_user(&setprm, (void *) param, sizeof(setprm))) 1544 if (copy_from_user(&setprm, argp, sizeof(setprm)))
1544 return -EFAULT; 1545 return -EFAULT;
1545 /* 1546 /*
1546 * first of all: check for floppy change and revalidate, 1547 * first of all: check for floppy change and revalidate,
@@ -1647,7 +1648,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
1647 case FDFMTTRK: 1648 case FDFMTTRK:
1648 if (floppy->ref != 1 && floppy->ref != -1) 1649 if (floppy->ref != 1 && floppy->ref != -1)
1649 return -EBUSY; 1650 return -EBUSY;
1650 if (copy_from_user(&fmt_desc, (void *) param, sizeof(fmt_desc))) 1651 if (copy_from_user(&fmt_desc, argp, sizeof(fmt_desc)))
1651 return -EFAULT; 1652 return -EFAULT;
1652 return do_format(drive, type, &fmt_desc); 1653 return do_format(drive, type, &fmt_desc);
1653 case FDCLRPRM: 1654 case FDCLRPRM:
@@ -1950,14 +1951,20 @@ Enomem:
1950 return -ENOMEM; 1951 return -ENOMEM;
1951} 1952}
1952 1953
1953 1954#ifndef MODULE
1954void __init atari_floppy_setup( char *str, int *ints ) 1955static int __init atari_floppy_setup(char *str)
1955{ 1956{
1957 int ints[3 + FD_MAX_UNITS];
1956 int i; 1958 int i;
1959
1960 if (!MACH_IS_ATARI)
1961 return 0;
1962
1963 str = get_options(str, 3 + FD_MAX_UNITS, ints);
1957 1964
1958 if (ints[0] < 1) { 1965 if (ints[0] < 1) {
1959 printk(KERN_ERR "ataflop_setup: no arguments!\n" ); 1966 printk(KERN_ERR "ataflop_setup: no arguments!\n" );
1960 return; 1967 return 0;
1961 } 1968 }
1962 else if (ints[0] > 2+FD_MAX_UNITS) { 1969 else if (ints[0] > 2+FD_MAX_UNITS) {
1963 printk(KERN_ERR "ataflop_setup: too many arguments\n" ); 1970 printk(KERN_ERR "ataflop_setup: too many arguments\n" );
@@ -1977,9 +1984,13 @@ void __init atari_floppy_setup( char *str, int *ints )
1977 else 1984 else
1978 UserSteprate[i-3] = ints[i]; 1985 UserSteprate[i-3] = ints[i];
1979 } 1986 }
1987 return 1;
1980} 1988}
1981 1989
1982static void atari_floppy_exit(void) 1990__setup("floppy=", atari_floppy_setup);
1991#endif
1992
1993static void __exit atari_floppy_exit(void)
1983{ 1994{
1984 int i; 1995 int i;
1985 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 1996 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 869518e4035f..667a21c72edb 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -99,6 +99,7 @@ static char *serial_version = "4.30";
99#define _INLINE_ inline 99#define _INLINE_ inline
100#endif 100#endif
101 101
102#define custom amiga_custom
102static char *serial_name = "Amiga-builtin serial driver"; 103static char *serial_name = "Amiga-builtin serial driver";
103 104
104static struct tty_driver *serial_driver; 105static struct tty_driver *serial_driver;
@@ -1088,7 +1089,7 @@ static void rs_unthrottle(struct tty_struct * tty)
1088 */ 1089 */
1089 1090
1090static int get_serial_info(struct async_struct * info, 1091static int get_serial_info(struct async_struct * info,
1091 struct serial_struct * retinfo) 1092 struct serial_struct __user * retinfo)
1092{ 1093{
1093 struct serial_struct tmp; 1094 struct serial_struct tmp;
1094 struct serial_state *state = info->state; 1095 struct serial_state *state = info->state;
@@ -1112,7 +1113,7 @@ static int get_serial_info(struct async_struct * info,
1112} 1113}
1113 1114
1114static int set_serial_info(struct async_struct * info, 1115static int set_serial_info(struct async_struct * info,
1115 struct serial_struct * new_info) 1116 struct serial_struct __user * new_info)
1116{ 1117{
1117 struct serial_struct new_serial; 1118 struct serial_struct new_serial;
1118 struct serial_state old_state, *state; 1119 struct serial_state old_state, *state;
@@ -1193,7 +1194,7 @@ check_and_exit:
1193 * transmit holding register is empty. This functionality 1194 * transmit holding register is empty. This functionality
1194 * allows an RS485 driver to be written in user space. 1195 * allows an RS485 driver to be written in user space.
1195 */ 1196 */
1196static int get_lsr_info(struct async_struct * info, unsigned int *value) 1197static int get_lsr_info(struct async_struct * info, unsigned int __user *value)
1197{ 1198{
1198 unsigned char status; 1199 unsigned char status;
1199 unsigned int result; 1200 unsigned int result;
@@ -1284,6 +1285,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1284 struct async_struct * info = (struct async_struct *)tty->driver_data; 1285 struct async_struct * info = (struct async_struct *)tty->driver_data;
1285 struct async_icount cprev, cnow; /* kernel counter temps */ 1286 struct async_icount cprev, cnow; /* kernel counter temps */
1286 struct serial_icounter_struct icount; 1287 struct serial_icounter_struct icount;
1288 void __user *argp = (void __user *)arg;
1287 unsigned long flags; 1289 unsigned long flags;
1288 1290
1289 if (serial_paranoia_check(info, tty->name, "rs_ioctl")) 1291 if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
@@ -1298,19 +1300,17 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1298 1300
1299 switch (cmd) { 1301 switch (cmd) {
1300 case TIOCGSERIAL: 1302 case TIOCGSERIAL:
1301 return get_serial_info(info, 1303 return get_serial_info(info, argp);
1302 (struct serial_struct *) arg);
1303 case TIOCSSERIAL: 1304 case TIOCSSERIAL:
1304 return set_serial_info(info, 1305 return set_serial_info(info, argp);
1305 (struct serial_struct *) arg);
1306 case TIOCSERCONFIG: 1306 case TIOCSERCONFIG:
1307 return 0; 1307 return 0;
1308 1308
1309 case TIOCSERGETLSR: /* Get line status register */ 1309 case TIOCSERGETLSR: /* Get line status register */
1310 return get_lsr_info(info, (unsigned int *) arg); 1310 return get_lsr_info(info, argp);
1311 1311
1312 case TIOCSERGSTRUCT: 1312 case TIOCSERGSTRUCT:
1313 if (copy_to_user((struct async_struct *) arg, 1313 if (copy_to_user(argp,
1314 info, sizeof(struct async_struct))) 1314 info, sizeof(struct async_struct)))
1315 return -EFAULT; 1315 return -EFAULT;
1316 return 0; 1316 return 0;
@@ -1369,7 +1369,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1369 icount.brk = cnow.brk; 1369 icount.brk = cnow.brk;
1370 icount.buf_overrun = cnow.buf_overrun; 1370 icount.buf_overrun = cnow.buf_overrun;
1371 1371
1372 if (copy_to_user((void *)arg, &icount, sizeof(icount))) 1372 if (copy_to_user(argp, &icount, sizeof(icount)))
1373 return -EFAULT; 1373 return -EFAULT;
1374 return 0; 1374 return 0;
1375 case TIOCSERGWILD: 1375 case TIOCSERGWILD:
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 8693835cb2d5..e233cf280bc0 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -165,7 +165,7 @@ static int dsp56k_reset(void)
165 return 0; 165 return 0;
166} 166}
167 167
168static int dsp56k_upload(u_char *bin, int len) 168static int dsp56k_upload(u_char __user *bin, int len)
169{ 169{
170 int i; 170 int i;
171 u_char *p; 171 u_char *p;
@@ -199,7 +199,7 @@ static int dsp56k_upload(u_char *bin, int len)
199 return 0; 199 return 0;
200} 200}
201 201
202static ssize_t dsp56k_read(struct file *file, char *buf, size_t count, 202static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
203 loff_t *ppos) 203 loff_t *ppos)
204{ 204{
205 struct inode *inode = file->f_dentry->d_inode; 205 struct inode *inode = file->f_dentry->d_inode;
@@ -225,10 +225,10 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
225 } 225 }
226 case 2: /* 16 bit */ 226 case 2: /* 16 bit */
227 { 227 {
228 short *data; 228 short __user *data;
229 229
230 count /= 2; 230 count /= 2;
231 data = (short*) buf; 231 data = (short __user *) buf;
232 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, 232 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
233 put_user(dsp56k_host_interface.data.w[1], data+n++)); 233 put_user(dsp56k_host_interface.data.w[1], data+n++));
234 return 2*n; 234 return 2*n;
@@ -244,10 +244,10 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
244 } 244 }
245 case 4: /* 32 bit */ 245 case 4: /* 32 bit */
246 { 246 {
247 long *data; 247 long __user *data;
248 248
249 count /= 4; 249 count /= 4;
250 data = (long*) buf; 250 data = (long __user *) buf;
251 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE, 251 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
252 put_user(dsp56k_host_interface.data.l, data+n++)); 252 put_user(dsp56k_host_interface.data.l, data+n++));
253 return 4*n; 253 return 4*n;
@@ -262,7 +262,7 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
262 } 262 }
263} 263}
264 264
265static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count, 265static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
266 loff_t *ppos) 266 loff_t *ppos)
267{ 267{
268 struct inode *inode = file->f_dentry->d_inode; 268 struct inode *inode = file->f_dentry->d_inode;
@@ -287,10 +287,10 @@ static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
287 } 287 }
288 case 2: /* 16 bit */ 288 case 2: /* 16 bit */
289 { 289 {
290 const short *data; 290 const short __user *data;
291 291
292 count /= 2; 292 count /= 2;
293 data = (const short *)buf; 293 data = (const short __user *)buf;
294 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, 294 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
295 get_user(dsp56k_host_interface.data.w[1], data+n++)); 295 get_user(dsp56k_host_interface.data.w[1], data+n++));
296 return 2*n; 296 return 2*n;
@@ -306,10 +306,10 @@ static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
306 } 306 }
307 case 4: /* 32 bit */ 307 case 4: /* 32 bit */
308 { 308 {
309 const long *data; 309 const long __user *data;
310 310
311 count /= 4; 311 count /= 4;
312 data = (const long *)buf; 312 data = (const long __user *)buf;
313 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT, 313 handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
314 get_user(dsp56k_host_interface.data.l, data+n++)); 314 get_user(dsp56k_host_interface.data.l, data+n++));
315 return 4*n; 315 return 4*n;
@@ -328,6 +328,7 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
328 unsigned int cmd, unsigned long arg) 328 unsigned int cmd, unsigned long arg)
329{ 329{
330 int dev = iminor(inode) & 0x0f; 330 int dev = iminor(inode) & 0x0f;
331 void __user *argp = (void __user *)arg;
331 332
332 switch(dev) 333 switch(dev)
333 { 334 {
@@ -336,9 +337,9 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
336 switch(cmd) { 337 switch(cmd) {
337 case DSP56K_UPLOAD: 338 case DSP56K_UPLOAD:
338 { 339 {
339 char *bin; 340 char __user *bin;
340 int r, len; 341 int r, len;
341 struct dsp56k_upload *binary = (struct dsp56k_upload *) arg; 342 struct dsp56k_upload __user *binary = argp;
342 343
343 if(get_user(len, &binary->len) < 0) 344 if(get_user(len, &binary->len) < 0)
344 return -EFAULT; 345 return -EFAULT;
@@ -372,7 +373,7 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
372 case DSP56K_HOST_FLAGS: 373 case DSP56K_HOST_FLAGS:
373 { 374 {
374 int dir, out, status; 375 int dir, out, status;
375 struct dsp56k_host_flags *hf = (struct dsp56k_host_flags*) arg; 376 struct dsp56k_host_flags __user *hf = argp;
376 377
377 if(get_user(dir, &hf->dir) < 0) 378 if(get_user(dir, &hf->dir) < 0)
378 return -EFAULT; 379 return -EFAULT;
diff --git a/drivers/char/scc.h b/drivers/char/scc.h
index 51810f72f1a9..93998f5baff5 100644
--- a/drivers/char/scc.h
+++ b/drivers/char/scc.h
@@ -399,7 +399,7 @@ struct scc_port {
399 __asm__ __volatile__ ( "tstb %0" : : "g" (*_scc_del) : "cc" );\ 399 __asm__ __volatile__ ( "tstb %0" : : "g" (*_scc_del) : "cc" );\
400 } while (0) 400 } while (0)
401 401
402extern unsigned char scc_shadow[2][16]; 402static unsigned char scc_shadow[2][16];
403 403
404/* The following functions should relax the somehow complicated 404/* The following functions should relax the somehow complicated
405 * register access of the SCC. _SCCwrite() stores all written values 405 * register access of the SCC. _SCCwrite() stores all written values
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 362b33556b1a..745979f33dc2 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -159,7 +159,7 @@ struct input_event_compat {
159#ifdef CONFIG_X86_64 159#ifdef CONFIG_X86_64
160# define COMPAT_TEST is_compat_task() 160# define COMPAT_TEST is_compat_task()
161#elif defined(CONFIG_IA64) 161#elif defined(CONFIG_IA64)
162# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) 162# define COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current))
163#elif defined(CONFIG_S390) 163#elif defined(CONFIG_S390)
164# define COMPAT_TEST test_thread_flag(TIF_31BIT) 164# define COMPAT_TEST test_thread_flag(TIF_31BIT)
165#elif defined(CONFIG_MIPS) 165#elif defined(CONFIG_MIPS)
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 8558a99f6635..ec55a29fc861 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -64,8 +64,8 @@ static irqreturn_t amijoy_interrupt(int irq, void *dummy, struct pt_regs *fp)
64 if (amijoy[i]) { 64 if (amijoy[i]) {
65 65
66 switch (i) { 66 switch (i) {
67 case 0: data = ~custom.joy0dat; button = (~ciaa.pra >> 6) & 1; break; 67 case 0: data = ~amiga_custom.joy0dat; button = (~ciaa.pra >> 6) & 1; break;
68 case 1: data = ~custom.joy1dat; button = (~ciaa.pra >> 7) & 1; break; 68 case 1: data = ~amiga_custom.joy1dat; button = (~ciaa.pra >> 7) & 1; break;
69 } 69 }
70 70
71 input_regs(amijoy_dev[i], fp); 71 input_regs(amijoy_dev[i], fp);
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index d13d4c8fe3c5..c8b2cc9f184c 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -41,7 +41,7 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy, struct pt_regs *fp)
41 unsigned short joy0dat, potgor; 41 unsigned short joy0dat, potgor;
42 int nx, ny, dx, dy; 42 int nx, ny, dx, dy;
43 43
44 joy0dat = custom.joy0dat; 44 joy0dat = amiga_custom.joy0dat;
45 45
46 nx = joy0dat & 0xff; 46 nx = joy0dat & 0xff;
47 ny = joy0dat >> 8; 47 ny = joy0dat >> 8;
@@ -57,7 +57,7 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy, struct pt_regs *fp)
57 amimouse_lastx = nx; 57 amimouse_lastx = nx;
58 amimouse_lasty = ny; 58 amimouse_lasty = ny;
59 59
60 potgor = custom.potgor; 60 potgor = amiga_custom.potgor;
61 61
62 input_regs(amimouse_dev, fp); 62 input_regs(amimouse_dev, fp);
63 63
@@ -77,7 +77,7 @@ static int amimouse_open(struct input_dev *dev)
77{ 77{
78 unsigned short joy0dat; 78 unsigned short joy0dat;
79 79
80 joy0dat = custom.joy0dat; 80 joy0dat = amiga_custom.joy0dat;
81 81
82 amimouse_lastx = joy0dat & 0xff; 82 amimouse_lastx = joy0dat & 0xff;
83 amimouse_lasty = joy0dat >> 8; 83 amimouse_lasty = joy0dat >> 8;
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 71aeb912ec61..d56d400b6aaa 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -239,7 +239,7 @@ static int adb_iop_write(struct adb_request *req)
239 239
240 local_irq_save(flags); 240 local_irq_save(flags);
241 241
242 req->next = 0; 242 req->next = NULL;
243 req->sent = 0; 243 req->sent = 0;
244 req->complete = 0; 244 req->complete = 0;
245 req->reply_len = 0; 245 req->reply_len = 0;
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index e9a159ad3022..2a2ffe060169 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -260,7 +260,7 @@ static int macii_write(struct adb_request *req)
260 return -EINVAL; 260 return -EINVAL;
261 } 261 }
262 262
263 req->next = 0; 263 req->next = NULL;
264 req->sent = 0; 264 req->sent = 0;
265 req->complete = 0; 265 req->complete = 0;
266 req->reply_len = 0; 266 req->reply_len = 0;
@@ -295,7 +295,7 @@ static void macii_poll(void)
295 unsigned long flags; 295 unsigned long flags;
296 296
297 local_irq_save(flags); 297 local_irq_save(flags);
298 if (via[IFR] & SR_INT) macii_interrupt(0, 0, 0); 298 if (via[IFR] & SR_INT) macii_interrupt(0, NULL, NULL);
299 local_irq_restore(flags); 299 local_irq_restore(flags);
300} 300}
301 301
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index a1966975d58f..0129fcc3b183 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -294,6 +294,24 @@ static void maciisi_sync(struct adb_request *req)
294 printk(KERN_ERR "maciisi_send_request: poll timed out!\n"); 294 printk(KERN_ERR "maciisi_send_request: poll timed out!\n");
295} 295}
296 296
297int
298maciisi_request(struct adb_request *req, void (*done)(struct adb_request *),
299 int nbytes, ...)
300{
301 va_list list;
302 int i;
303
304 req->nbytes = nbytes;
305 req->done = done;
306 req->reply_expected = 0;
307 va_start(list, nbytes);
308 for (i = 0; i < nbytes; i++)
309 req->data[i++] = va_arg(list, int);
310 va_end(list);
311
312 return maciisi_send_request(req, 1);
313}
314
297/* Enqueue a request, and run the queue if possible */ 315/* Enqueue a request, and run the queue if possible */
298static int 316static int
299maciisi_write(struct adb_request* req) 317maciisi_write(struct adb_request* req)
@@ -308,7 +326,7 @@ maciisi_write(struct adb_request* req)
308 req->complete = 1; 326 req->complete = 1;
309 return -EINVAL; 327 return -EINVAL;
310 } 328 }
311 req->next = 0; 329 req->next = NULL;
312 req->sent = 0; 330 req->sent = 0;
313 req->complete = 0; 331 req->complete = 0;
314 req->reply_len = 0; 332 req->reply_len = 0;
@@ -403,7 +421,7 @@ maciisi_poll(void)
403 421
404 local_irq_save(flags); 422 local_irq_save(flags);
405 if (via[IFR] & SR_INT) { 423 if (via[IFR] & SR_INT) {
406 maciisi_interrupt(0, 0, 0); 424 maciisi_interrupt(0, NULL, NULL);
407 } 425 }
408 else /* avoid calling this function too quickly in a loop */ 426 else /* avoid calling this function too quickly in a loop */
409 udelay(ADB_DELAY); 427 udelay(ADB_DELAY);
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index 6f80d76ac17c..f08e52f2107b 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -493,7 +493,7 @@ pmu_queue_request(struct adb_request *req)
493 return -EINVAL; 493 return -EINVAL;
494 } 494 }
495 495
496 req->next = 0; 496 req->next = NULL;
497 req->sent = 0; 497 req->sent = 0;
498 req->complete = 0; 498 req->complete = 0;
499 local_irq_save(flags); 499 local_irq_save(flags);
@@ -717,7 +717,7 @@ pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
717 printk(KERN_ERR "PMU: extra ADB reply\n"); 717 printk(KERN_ERR "PMU: extra ADB reply\n");
718 return; 718 return;
719 } 719 }
720 req_awaiting_reply = 0; 720 req_awaiting_reply = NULL;
721 if (len <= 2) 721 if (len <= 2)
722 req->reply_len = 0; 722 req->reply_len = 0;
723 else { 723 else {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0302723fa21f..1778104e106c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1238,6 +1238,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1238 mdk_rdev_t *same_pdev; 1238 mdk_rdev_t *same_pdev;
1239 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1239 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1240 struct kobject *ko; 1240 struct kobject *ko;
1241 char *s;
1241 1242
1242 if (rdev->mddev) { 1243 if (rdev->mddev) {
1243 MD_BUG(); 1244 MD_BUG();
@@ -1277,6 +1278,8 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1277 bdevname(rdev->bdev,b); 1278 bdevname(rdev->bdev,b);
1278 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0) 1279 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1279 return -ENOMEM; 1280 return -ENOMEM;
1281 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1282 *s = '!';
1280 1283
1281 list_add(&rdev->same_set, &mddev->disks); 1284 list_add(&rdev->same_set, &mddev->disks);
1282 rdev->mddev = mddev; 1285 rdev->mddev = mddev;
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index 08703d6f934c..d8410634bcaf 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -150,7 +150,7 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
150 lp->lance.name = (char*)d->name; /* discards const, shut up gcc */ 150 lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
151 lp->lance.base = va; 151 lp->lance.base = va;
152 lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */ 152 lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
153 lp->lance.lance_init_block = 0; /* LANCE addr of same RAM */ 153 lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
154 lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */ 154 lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
155 lp->lance.irq = d->ipl; 155 lp->lance.irq = d->ipl;
156 lp->lance.writerap = hplance_writerap; 156 lp->lance.writerap = hplance_writerap;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index d8c99f038fa0..06cb460361a8 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -559,55 +559,52 @@ static void mac8390_no_reset(struct net_device *dev)
559/* directly from daynaport.c by Alan Cox */ 559/* directly from daynaport.c by Alan Cox */
560static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count) 560static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
561{ 561{
562 volatile unsigned short *ptr; 562 volatile unsigned char *ptr;
563 unsigned short *target=to; 563 unsigned char *target=to;
564 from<<=1; /* word, skip overhead */ 564 from<<=1; /* word, skip overhead */
565 ptr=(unsigned short *)(dev->mem_start+from); 565 ptr=(unsigned char *)(dev->mem_start+from);
566 /* Leading byte? */ 566 /* Leading byte? */
567 if (from&2) { 567 if (from&2) {
568 *((char *)target)++ = *(((char *)ptr++)-1); 568 *target++ = ptr[-1];
569 ptr += 2;
569 count--; 570 count--;
570 } 571 }
571 while(count>=2) 572 while(count>=2)
572 { 573 {
573 *target++=*ptr++; /* Copy and */ 574 *(unsigned short *)target = *(unsigned short volatile *)ptr;
574 ptr++; /* skip cruft */ 575 ptr += 4; /* skip cruft */
576 target += 2;
575 count-=2; 577 count-=2;
576 } 578 }
577 /* Trailing byte? */ 579 /* Trailing byte? */
578 if(count) 580 if(count)
579 { 581 *target = *ptr;
580 /* Big endian */
581 unsigned short v=*ptr;
582 *((char *)target)=v>>8;
583 }
584} 582}
585 583
586static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count) 584static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
587{ 585{
588 volatile unsigned short *ptr; 586 volatile unsigned short *ptr;
589 const unsigned short *src=from; 587 const unsigned char *src=from;
590 to<<=1; /* word, skip overhead */ 588 to<<=1; /* word, skip overhead */
591 ptr=(unsigned short *)(dev->mem_start+to); 589 ptr=(unsigned short *)(dev->mem_start+to);
592 /* Leading byte? */ 590 /* Leading byte? */
593 if (to&2) { /* avoid a byte write (stomps on other data) */ 591 if (to&2) { /* avoid a byte write (stomps on other data) */
594 ptr[-1] = (ptr[-1]&0xFF00)|*((unsigned char *)src)++; 592 ptr[-1] = (ptr[-1]&0xFF00)|*src++;
595 ptr++; 593 ptr++;
596 count--; 594 count--;
597 } 595 }
598 while(count>=2) 596 while(count>=2)
599 { 597 {
600 *ptr++=*src++; /* Copy and */ 598 *ptr++=*(unsigned short *)src; /* Copy and */
601 ptr++; /* skip cruft */ 599 ptr++; /* skip cruft */
600 src += 2;
602 count-=2; 601 count-=2;
603 } 602 }
604 /* Trailing byte? */ 603 /* Trailing byte? */
605 if(count) 604 if(count)
606 { 605 {
607 /* Big endian */
608 unsigned short v=*src;
609 /* card doesn't like byte writes */ 606 /* card doesn't like byte writes */
610 *ptr=(*ptr&0x00FF)|(v&0xFF00); 607 *ptr=(*ptr&0x00FF)|(*src << 8);
611 } 608 }
612} 609}
613 610
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 5c8fcd40ef4d..01bdb2334058 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -389,7 +389,7 @@ static int __init lance_probe( struct net_device *dev)
389 dev->stop = &lance_close; 389 dev->stop = &lance_close;
390 dev->get_stats = &lance_get_stats; 390 dev->get_stats = &lance_get_stats;
391 dev->set_multicast_list = &set_multicast_list; 391 dev->set_multicast_list = &set_multicast_list;
392 dev->set_mac_address = 0; 392 dev->set_mac_address = NULL;
393// KLUDGE -- REMOVE ME 393// KLUDGE -- REMOVE ME
394 set_bit(__LINK_STATE_PRESENT, &dev->state); 394 set_bit(__LINK_STATE_PRESENT, &dev->state);
395 395
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f062ea0f813a..6e0c059df6a5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -45,7 +45,7 @@ obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o
45obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o 45obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o
46obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o 46obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o
47obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o 47obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o
48obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp.o oktagon_io.o 48obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o
49obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o 49obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
50obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 50obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
51obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o 51obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
@@ -164,6 +164,7 @@ CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
164zalon7xx-objs := zalon.o ncr53c8xx.o 164zalon7xx-objs := zalon.o ncr53c8xx.o
165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 165NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
166libata-objs := libata-core.o libata-scsi.o 166libata-objs := libata-core.o libata-scsi.o
167oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
167 168
168# Files generated that shall be removed upon make clean 169# Files generated that shall be removed upon make clean
169clean-files := 53c7xx_d.h 53c700_d.h \ 170clean-files := 53c7xx_d.h 53c700_d.h \
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 640590bd014a..c7dd0154d012 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -1799,6 +1799,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
1799 */ 1799 */
1800 int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */ 1800 int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */
1801 int fifocnt = 0; 1801 int fifocnt = 0;
1802 unsigned char *p = phys_to_virt((unsigned long)SCptr->SCp.ptr);
1802 1803
1803 oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK; 1804 oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK;
1804 1805
@@ -1860,7 +1861,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
1860 1861
1861 /* read fifo */ 1862 /* read fifo */
1862 for(j=0;j<fifocnt;j++) 1863 for(j=0;j<fifocnt;j++)
1863 SCptr->SCp.ptr[i++] = esp_read(eregs->esp_fdata); 1864 p[i++] = esp_read(eregs->esp_fdata);
1864 1865
1865 ESPDATA(("(%d) ", i)); 1866 ESPDATA(("(%d) ", i));
1866 1867
@@ -1882,7 +1883,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
1882 1883
1883 /* fill fifo */ 1884 /* fill fifo */
1884 for(j=0;j<this_count;j++) 1885 for(j=0;j<this_count;j++)
1885 esp_write(eregs->esp_fdata, SCptr->SCp.ptr[i++]); 1886 esp_write(eregs->esp_fdata, p[i++]);
1886 1887
1887 /* how many left if this goes out ?? */ 1888 /* how many left if this goes out ?? */
1888 hmuch -= this_count; 1889 hmuch -= this_count;
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
index 763e409a1ff3..3867ac2de4c2 100644
--- a/drivers/scsi/blz1230.c
+++ b/drivers/scsi/blz1230.c
@@ -224,7 +224,7 @@ static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
224static void dma_dump_state(struct NCR_ESP *esp) 224static void dma_dump_state(struct NCR_ESP *esp)
225{ 225{
226 ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 226 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
227 custom.intreqr, custom.intenar)); 227 amiga_custom.intreqr, amiga_custom.intenar));
228} 228}
229 229
230void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 230void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -298,7 +298,7 @@ static int dma_irq_p(struct NCR_ESP *esp)
298 298
299static int dma_ports_p(struct NCR_ESP *esp) 299static int dma_ports_p(struct NCR_ESP *esp)
300{ 300{
301 return ((custom.intenar) & IF_PORTS); 301 return ((amiga_custom.intenar) & IF_PORTS);
302} 302}
303 303
304static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 304static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
index d72d05fffdfa..4ebe69e32756 100644
--- a/drivers/scsi/blz2060.c
+++ b/drivers/scsi/blz2060.c
@@ -190,7 +190,7 @@ static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
190static void dma_dump_state(struct NCR_ESP *esp) 190static void dma_dump_state(struct NCR_ESP *esp)
191{ 191{
192 ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 192 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
193 custom.intreqr, custom.intenar)); 193 amiga_custom.intreqr, amiga_custom.intenar));
194} 194}
195 195
196static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 196static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -251,7 +251,7 @@ static void dma_led_on(struct NCR_ESP *esp)
251 251
252static int dma_ports_p(struct NCR_ESP *esp) 252static int dma_ports_p(struct NCR_ESP *esp)
253{ 253{
254 return ((custom.intenar) & IF_PORTS); 254 return ((amiga_custom.intenar) & IF_PORTS);
255} 255}
256 256
257static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 257static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
index f9b940e56430..a4a4fac5c0a1 100644
--- a/drivers/scsi/cyberstorm.c
+++ b/drivers/scsi/cyberstorm.c
@@ -223,7 +223,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
223 esp->esp_id, ((struct cyber_dma_registers *) 223 esp->esp_id, ((struct cyber_dma_registers *)
224 (esp->dregs))->cond_reg)); 224 (esp->dregs))->cond_reg));
225 ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 225 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
226 custom.intreqr, custom.intenar)); 226 amiga_custom.intreqr, amiga_custom.intenar));
227} 227}
228 228
229static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 229static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -322,7 +322,7 @@ static void dma_led_on(struct NCR_ESP *esp)
322 322
323static int dma_ports_p(struct NCR_ESP *esp) 323static int dma_ports_p(struct NCR_ESP *esp)
324{ 324{
325 return ((custom.intenar) & IF_PORTS); 325 return ((amiga_custom.intenar) & IF_PORTS);
326} 326}
327 327
328static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 328static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
index a3caabfd7557..3a803d73bc5f 100644
--- a/drivers/scsi/cyberstormII.c
+++ b/drivers/scsi/cyberstormII.c
@@ -200,7 +200,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
200 esp->esp_id, ((struct cyberII_dma_registers *) 200 esp->esp_id, ((struct cyberII_dma_registers *)
201 (esp->dregs))->cond_reg)); 201 (esp->dregs))->cond_reg));
202 ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 202 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
203 custom.intreqr, custom.intenar)); 203 amiga_custom.intreqr, amiga_custom.intenar));
204} 204}
205 205
206static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 206static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -259,7 +259,7 @@ static void dma_led_on(struct NCR_ESP *esp)
259 259
260static int dma_ports_p(struct NCR_ESP *esp) 260static int dma_ports_p(struct NCR_ESP *esp)
261{ 261{
262 return ((custom.intenar) & IF_PORTS); 262 return ((amiga_custom.intenar) & IF_PORTS);
263} 263}
264 264
265static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 265static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
index ccee68b52f7e..8ae9c406a83b 100644
--- a/drivers/scsi/fastlane.c
+++ b/drivers/scsi/fastlane.c
@@ -268,7 +268,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
268 esp->esp_id, ((struct fastlane_dma_registers *) 268 esp->esp_id, ((struct fastlane_dma_registers *)
269 (esp->dregs))->cond_reg)); 269 (esp->dregs))->cond_reg));
270 ESPLOG(("intreq:<%04x>, intena:<%04x>\n", 270 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
271 custom.intreqr, custom.intenar)); 271 amiga_custom.intreqr, amiga_custom.intenar));
272} 272}
273 273
274static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length) 274static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -368,7 +368,7 @@ static void dma_led_on(struct NCR_ESP *esp)
368 368
369static int dma_ports_p(struct NCR_ESP *esp) 369static int dma_ports_p(struct NCR_ESP *esp)
370{ 370{
371 return ((custom.intenar) & IF_PORTS); 371 return ((amiga_custom.intenar) & IF_PORTS);
372} 372}
373 373
374static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 374static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index 5d9c9ada814f..dee426f8c07b 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -490,7 +490,7 @@ static void dma_led_on(struct NCR_ESP *esp)
490 490
491static int dma_ports_p(struct NCR_ESP *esp) 491static int dma_ports_p(struct NCR_ESP *esp)
492{ 492{
493 return ((custom.intenar) & IF_PORTS); 493 return ((amiga_custom.intenar) & IF_PORTS);
494} 494}
495 495
496static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 496static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fd63add6a577..fb53eeaee617 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -465,7 +465,7 @@ wd33c93_execute(struct Scsi_Host *instance)
465 */ 465 */
466 466
467 cmd = (struct scsi_cmnd *) hostdata->input_Q; 467 cmd = (struct scsi_cmnd *) hostdata->input_Q;
468 prev = 0; 468 prev = NULL;
469 while (cmd) { 469 while (cmd) {
470 if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))) 470 if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
471 break; 471 break;
@@ -1569,7 +1569,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
1569 */ 1569 */
1570 1570
1571 tmp = (struct scsi_cmnd *) hostdata->input_Q; 1571 tmp = (struct scsi_cmnd *) hostdata->input_Q;
1572 prev = 0; 1572 prev = NULL;
1573 while (tmp) { 1573 while (tmp) {
1574 if (tmp == cmd) { 1574 if (tmp == cmd) {
1575 if (prev) 1575 if (prev)
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index d549e215f3c5..2c42a812655a 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -590,6 +590,8 @@ static u_short maxfmode, chipset;
590#define highw(x) ((u_long)(x)>>16 & 0xffff) 590#define highw(x) ((u_long)(x)>>16 & 0xffff)
591#define loww(x) ((u_long)(x) & 0xffff) 591#define loww(x) ((u_long)(x) & 0xffff)
592 592
593#define custom amiga_custom
594
593#define VBlankOn() custom.intena = IF_SETCLR|IF_COPER 595#define VBlankOn() custom.intena = IF_SETCLR|IF_COPER
594#define VBlankOff() custom.intena = IF_COPER 596#define VBlankOff() custom.intena = IF_COPER
595 597
@@ -1164,8 +1166,8 @@ static void ami_update_display(void);
1164static void ami_init_display(void); 1166static void ami_init_display(void);
1165static void ami_do_blank(void); 1167static void ami_do_blank(void);
1166static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix); 1168static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix);
1167static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data); 1169static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
1168static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data); 1170static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
1169static int ami_get_cursorstate(struct fb_cursorstate *state); 1171static int ami_get_cursorstate(struct fb_cursorstate *state);
1170static int ami_set_cursorstate(struct fb_cursorstate *state); 1172static int ami_set_cursorstate(struct fb_cursorstate *state);
1171static void ami_set_sprite(void); 1173static void ami_set_sprite(void);
@@ -2179,6 +2181,7 @@ static int amifb_ioctl(struct inode *inode, struct file *file,
2179 struct fb_var_cursorinfo var; 2181 struct fb_var_cursorinfo var;
2180 struct fb_cursorstate state; 2182 struct fb_cursorstate state;
2181 } crsr; 2183 } crsr;
2184 void __user *argp = (void __user *)arg;
2182 int i; 2185 int i;
2183 2186
2184 switch (cmd) { 2187 switch (cmd) {
@@ -2186,33 +2189,32 @@ static int amifb_ioctl(struct inode *inode, struct file *file,
2186 i = ami_get_fix_cursorinfo(&crsr.fix); 2189 i = ami_get_fix_cursorinfo(&crsr.fix);
2187 if (i) 2190 if (i)
2188 return i; 2191 return i;
2189 return copy_to_user((void *)arg, &crsr.fix, 2192 return copy_to_user(argp, &crsr.fix,
2190 sizeof(crsr.fix)) ? -EFAULT : 0; 2193 sizeof(crsr.fix)) ? -EFAULT : 0;
2191 2194
2192 case FBIOGET_VCURSORINFO: 2195 case FBIOGET_VCURSORINFO:
2193 i = ami_get_var_cursorinfo(&crsr.var, 2196 i = ami_get_var_cursorinfo(&crsr.var,
2194 ((struct fb_var_cursorinfo *)arg)->data); 2197 ((struct fb_var_cursorinfo __user *)arg)->data);
2195 if (i) 2198 if (i)
2196 return i; 2199 return i;
2197 return copy_to_user((void *)arg, &crsr.var, 2200 return copy_to_user(argp, &crsr.var,
2198 sizeof(crsr.var)) ? -EFAULT : 0; 2201 sizeof(crsr.var)) ? -EFAULT : 0;
2199 2202
2200 case FBIOPUT_VCURSORINFO: 2203 case FBIOPUT_VCURSORINFO:
2201 if (copy_from_user(&crsr.var, (void *)arg, 2204 if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
2202 sizeof(crsr.var)))
2203 return -EFAULT; 2205 return -EFAULT;
2204 return ami_set_var_cursorinfo(&crsr.var, 2206 return ami_set_var_cursorinfo(&crsr.var,
2205 ((struct fb_var_cursorinfo *)arg)->data); 2207 ((struct fb_var_cursorinfo __user *)arg)->data);
2206 2208
2207 case FBIOGET_CURSORSTATE: 2209 case FBIOGET_CURSORSTATE:
2208 i = ami_get_cursorstate(&crsr.state); 2210 i = ami_get_cursorstate(&crsr.state);
2209 if (i) 2211 if (i)
2210 return i; 2212 return i;
2211 return copy_to_user((void *)arg, &crsr.state, 2213 return copy_to_user(argp, &crsr.state,
2212 sizeof(crsr.state)) ? -EFAULT : 0; 2214 sizeof(crsr.state)) ? -EFAULT : 0;
2213 2215
2214 case FBIOPUT_CURSORSTATE: 2216 case FBIOPUT_CURSORSTATE:
2215 if (copy_from_user(&crsr.state, (void *)arg, 2217 if (copy_from_user(&crsr.state, argp,
2216 sizeof(crsr.state))) 2218 sizeof(crsr.state)))
2217 return -EFAULT; 2219 return -EFAULT;
2218 return ami_set_cursorstate(&crsr.state); 2220 return ami_set_cursorstate(&crsr.state);
@@ -3325,7 +3327,7 @@ static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix)
3325 return 0; 3327 return 0;
3326} 3328}
3327 3329
3328static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data) 3330static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
3329{ 3331{
3330 struct amifb_par *par = &currentpar; 3332 struct amifb_par *par = &currentpar;
3331 register u_short *lspr, *sspr; 3333 register u_short *lspr, *sspr;
@@ -3347,14 +3349,14 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
3347 var->yspot = par->crsr.spot_y; 3349 var->yspot = par->crsr.spot_y;
3348 if (size > var->height*var->width) 3350 if (size > var->height*var->width)
3349 return -ENAMETOOLONG; 3351 return -ENAMETOOLONG;
3350 if (!access_ok(VERIFY_WRITE, (void *)data, size)) 3352 if (!access_ok(VERIFY_WRITE, data, size))
3351 return -EFAULT; 3353 return -EFAULT;
3352 delta = 1<<par->crsr.fmode; 3354 delta = 1<<par->crsr.fmode;
3353 lspr = lofsprite + (delta<<1); 3355 lspr = lofsprite + (delta<<1);
3354 if (par->bplcon0 & BPC0_LACE) 3356 if (par->bplcon0 & BPC0_LACE)
3355 sspr = shfsprite + (delta<<1); 3357 sspr = shfsprite + (delta<<1);
3356 else 3358 else
3357 sspr = 0; 3359 sspr = NULL;
3358 for (height = (short)var->height-1; height >= 0; height--) { 3360 for (height = (short)var->height-1; height >= 0; height--) {
3359 bits = 0; words = delta; datawords = 0; 3361 bits = 0; words = delta; datawords = 0;
3360 for (width = (short)var->width-1; width >= 0; width--) { 3362 for (width = (short)var->width-1; width >= 0; width--) {
@@ -3400,7 +3402,7 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
3400 return 0; 3402 return 0;
3401} 3403}
3402 3404
3403static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data) 3405static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
3404{ 3406{
3405 struct amifb_par *par = &currentpar; 3407 struct amifb_par *par = &currentpar;
3406 register u_short *lspr, *sspr; 3408 register u_short *lspr, *sspr;
@@ -3427,7 +3429,7 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
3427 return -EINVAL; 3429 return -EINVAL;
3428 if (!var->height) 3430 if (!var->height)
3429 return -EINVAL; 3431 return -EINVAL;
3430 if (!access_ok(VERIFY_READ, (void *)data, var->width*var->height)) 3432 if (!access_ok(VERIFY_READ, data, var->width*var->height))
3431 return -EFAULT; 3433 return -EFAULT;
3432 delta = 1<<fmode; 3434 delta = 1<<fmode;
3433 lofsprite = shfsprite = (u_short *)spritememory; 3435 lofsprite = shfsprite = (u_short *)spritememory;
@@ -3442,13 +3444,13 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
3442 if (((var->height+2)<<fmode<<2) > SPRITEMEMSIZE) 3444 if (((var->height+2)<<fmode<<2) > SPRITEMEMSIZE)
3443 return -EINVAL; 3445 return -EINVAL;
3444 memset(lspr, 0, (var->height+2)<<fmode<<2); 3446 memset(lspr, 0, (var->height+2)<<fmode<<2);
3445 sspr = 0; 3447 sspr = NULL;
3446 } 3448 }
3447 for (height = (short)var->height-1; height >= 0; height--) { 3449 for (height = (short)var->height-1; height >= 0; height--) {
3448 bits = 16; words = delta; datawords = 0; 3450 bits = 16; words = delta; datawords = 0;
3449 for (width = (short)var->width-1; width >= 0; width--) { 3451 for (width = (short)var->width-1; width >= 0; width--) {
3450 unsigned long tdata = 0; 3452 unsigned long tdata = 0;
3451 get_user(tdata, (char *)data); 3453 get_user(tdata, data);
3452 data++; 3454 data++;
3453#ifdef __mc68000__ 3455#ifdef __mc68000__
3454 asm volatile ( 3456 asm volatile (
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index e370125e4fbc..ed81005cbdba 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -3501,7 +3501,7 @@ err_release_mem:
3501 3501
3502static int __devinit atyfb_atari_probe(void) 3502static int __devinit atyfb_atari_probe(void)
3503{ 3503{
3504 struct aty_par *par; 3504 struct atyfb_par *par;
3505 struct fb_info *info; 3505 struct fb_info *info;
3506 int m64_num; 3506 int m64_num;
3507 u32 clock_r; 3507 u32 clock_r;
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index cfc748e94272..e6cbd9de944a 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -609,18 +609,19 @@ void __init macfb_setup(char *options)
609 } 609 }
610} 610}
611 611
612void __init macfb_init(void) 612static int __init macfb_init(void)
613{ 613{
614 int video_cmap_len, video_is_nubus = 0; 614 int video_cmap_len, video_is_nubus = 0;
615 struct nubus_dev* ndev = NULL; 615 struct nubus_dev* ndev = NULL;
616 char *option = NULL; 616 char *option = NULL;
617 int err;
617 618
618 if (fb_get_options("macfb", &option)) 619 if (fb_get_options("macfb", &option))
619 return -ENODEV; 620 return -ENODEV;
620 macfb_setup(option); 621 macfb_setup(option);
621 622
622 if (!MACH_IS_MAC) 623 if (!MACH_IS_MAC)
623 return; 624 return -ENODEV;
624 625
625 /* There can only be one internal video controller anyway so 626 /* There can only be one internal video controller anyway so
626 we're not too worried about this */ 627 we're not too worried about this */
@@ -958,11 +959,11 @@ void __init macfb_init(void)
958 959
959 fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0); 960 fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0);
960 961
961 if (register_framebuffer(&fb_info) < 0) 962 err = register_framebuffer(&fb_info);
962 return; 963 if (!err)
963 964 printk("fb%d: %s frame buffer device\n",
964 printk("fb%d: %s frame buffer device\n", 965 fb_info.node, fb_info.fix.id);
965 fb_info.node, fb_info.fix.id); 966 return err;
966} 967}
967 968
968module_init(macfb_init); 969module_init(macfb_init);
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 1a409c2c320c..7aa2d3de6d37 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -45,7 +45,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
45} 45}
46 46
47static ssize_t 47static ssize_t
48proc_bus_zorro_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos) 48proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
49{ 49{
50 struct inode *ino = file->f_dentry->d_inode; 50 struct inode *ino = file->f_dentry->d_inode;
51 struct proc_dir_entry *dp = PDE(ino); 51 struct proc_dir_entry *dp = PDE(ino);
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index f0b7256b2f87..5dd0207ffd46 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -122,6 +122,7 @@
122#include <linux/dvb/dmx.h> 122#include <linux/dvb/dmx.h>
123#include <linux/dvb/frontend.h> 123#include <linux/dvb/frontend.h>
124#include <linux/dvb/video.h> 124#include <linux/dvb/video.h>
125#include <linux/lp.h>
125 126
126/* Aiee. Someone does not find a difference between int and long */ 127/* Aiee. Someone does not find a difference between int and long */
127#define EXT2_IOC32_GETFLAGS _IOR('f', 1, int) 128#define EXT2_IOC32_GETFLAGS _IOR('f', 1, int)
@@ -2735,6 +2736,20 @@ static int do_ncp_setprivatedata(unsigned int fd, unsigned int cmd, unsigned lon
2735} 2736}
2736#endif 2737#endif
2737 2738
2739static int
2740lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
2741{
2742 struct compat_timeval *tc = (struct compat_timeval *)arg;
2743 struct timeval *tn = compat_alloc_user_space(sizeof(struct timeval));
2744 struct timeval ts;
2745 if (get_user(ts.tv_sec, &tc->tv_sec) ||
2746 get_user(ts.tv_usec, &tc->tv_usec) ||
2747 put_user(ts.tv_sec, &tn->tv_sec) ||
2748 put_user(ts.tv_usec, &tn->tv_usec))
2749 return -EFAULT;
2750 return sys_ioctl(fd, cmd, (unsigned long)tn);
2751}
2752
2738#define HANDLE_IOCTL(cmd,handler) \ 2753#define HANDLE_IOCTL(cmd,handler) \
2739 { (cmd), (ioctl_trans_handler_t)(handler) }, 2754 { (cmd), (ioctl_trans_handler_t)(handler) },
2740 2755
@@ -2962,6 +2977,20 @@ HANDLE_IOCTL(DMX_GET_EVENT, do_dmx_get_event)
2962HANDLE_IOCTL(VIDEO_GET_EVENT, do_video_get_event) 2977HANDLE_IOCTL(VIDEO_GET_EVENT, do_video_get_event)
2963HANDLE_IOCTL(VIDEO_STILLPICTURE, do_video_stillpicture) 2978HANDLE_IOCTL(VIDEO_STILLPICTURE, do_video_stillpicture)
2964HANDLE_IOCTL(VIDEO_SET_SPU_PALETTE, do_video_set_spu_palette) 2979HANDLE_IOCTL(VIDEO_SET_SPU_PALETTE, do_video_set_spu_palette)
2980
2981/* parport */
2982COMPATIBLE_IOCTL(LPTIME)
2983COMPATIBLE_IOCTL(LPCHAR)
2984COMPATIBLE_IOCTL(LPABORTOPEN)
2985COMPATIBLE_IOCTL(LPCAREFUL)
2986COMPATIBLE_IOCTL(LPWAIT)
2987COMPATIBLE_IOCTL(LPSETIRQ)
2988COMPATIBLE_IOCTL(LPGETSTATUS)
2989COMPATIBLE_IOCTL(LPGETSTATUS)
2990COMPATIBLE_IOCTL(LPRESET)
2991/*LPGETSTATS not implemented, but no kernels seem to compile it in anyways*/
2992COMPATIBLE_IOCTL(LPGETFLAGS)
2993HANDLE_IOCTL(LPSETTIMEOUT, lp_timeout_trans)
2965}; 2994};
2966 2995
2967int ioctl_table_size = ARRAY_SIZE(ioctl_start); 2996int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 94d3cdfbf9b8..d1db8c17a74e 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -40,11 +40,10 @@
40#include "xfs_rw.h" 40#include "xfs_rw.h"
41#include "xfs_iomap.h" 41#include "xfs_iomap.h"
42#include <linux/mpage.h> 42#include <linux/mpage.h>
43#include <linux/pagevec.h>
43#include <linux/writeback.h> 44#include <linux/writeback.h>
44 45
45STATIC void xfs_count_page_state(struct page *, int *, int *, int *); 46STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
46STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
47 struct writeback_control *wbc, void *, int, int);
48 47
49#if defined(XFS_RW_TRACE) 48#if defined(XFS_RW_TRACE)
50void 49void
@@ -55,17 +54,15 @@ xfs_page_trace(
55 int mask) 54 int mask)
56{ 55{
57 xfs_inode_t *ip; 56 xfs_inode_t *ip;
58 bhv_desc_t *bdp;
59 vnode_t *vp = LINVFS_GET_VP(inode); 57 vnode_t *vp = LINVFS_GET_VP(inode);
60 loff_t isize = i_size_read(inode); 58 loff_t isize = i_size_read(inode);
61 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; 59 loff_t offset = page_offset(page);
62 int delalloc = -1, unmapped = -1, unwritten = -1; 60 int delalloc = -1, unmapped = -1, unwritten = -1;
63 61
64 if (page_has_buffers(page)) 62 if (page_has_buffers(page))
65 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); 63 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
66 64
67 bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); 65 ip = xfs_vtoi(vp);
68 ip = XFS_BHVTOI(bdp);
69 if (!ip->i_rwtrace) 66 if (!ip->i_rwtrace)
70 return; 67 return;
71 68
@@ -103,15 +100,56 @@ xfs_finish_ioend(
103 queue_work(xfsdatad_workqueue, &ioend->io_work); 100 queue_work(xfsdatad_workqueue, &ioend->io_work);
104} 101}
105 102
103/*
104 * We're now finished for good with this ioend structure.
105 * Update the page state via the associated buffer_heads,
106 * release holds on the inode and bio, and finally free
107 * up memory. Do not use the ioend after this.
108 */
106STATIC void 109STATIC void
107xfs_destroy_ioend( 110xfs_destroy_ioend(
108 xfs_ioend_t *ioend) 111 xfs_ioend_t *ioend)
109{ 112{
113 struct buffer_head *bh, *next;
114
115 for (bh = ioend->io_buffer_head; bh; bh = next) {
116 next = bh->b_private;
117 bh->b_end_io(bh, ioend->io_uptodate);
118 }
119
110 vn_iowake(ioend->io_vnode); 120 vn_iowake(ioend->io_vnode);
111 mempool_free(ioend, xfs_ioend_pool); 121 mempool_free(ioend, xfs_ioend_pool);
112} 122}
113 123
114/* 124/*
125 * Buffered IO write completion for delayed allocate extents.
126 * TODO: Update ondisk isize now that we know the file data
127 * has been flushed (i.e. the notorious "NULL file" problem).
128 */
129STATIC void
130xfs_end_bio_delalloc(
131 void *data)
132{
133 xfs_ioend_t *ioend = data;
134
135 xfs_destroy_ioend(ioend);
136}
137
138/*
139 * Buffered IO write completion for regular, written extents.
140 */
141STATIC void
142xfs_end_bio_written(
143 void *data)
144{
145 xfs_ioend_t *ioend = data;
146
147 xfs_destroy_ioend(ioend);
148}
149
150/*
151 * IO write completion for unwritten extents.
152 *
115 * Issue transactions to convert a buffer range from unwritten 153 * Issue transactions to convert a buffer range from unwritten
116 * to written extents. 154 * to written extents.
117 */ 155 */
@@ -123,21 +161,10 @@ xfs_end_bio_unwritten(
123 vnode_t *vp = ioend->io_vnode; 161 vnode_t *vp = ioend->io_vnode;
124 xfs_off_t offset = ioend->io_offset; 162 xfs_off_t offset = ioend->io_offset;
125 size_t size = ioend->io_size; 163 size_t size = ioend->io_size;
126 struct buffer_head *bh, *next;
127 int error; 164 int error;
128 165
129 if (ioend->io_uptodate) 166 if (ioend->io_uptodate)
130 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error); 167 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
131
132 /* ioend->io_buffer_head is only non-NULL for buffered I/O */
133 for (bh = ioend->io_buffer_head; bh; bh = next) {
134 next = bh->b_private;
135
136 bh->b_end_io = NULL;
137 clear_buffer_unwritten(bh);
138 end_buffer_async_write(bh, ioend->io_uptodate);
139 }
140
141 xfs_destroy_ioend(ioend); 168 xfs_destroy_ioend(ioend);
142} 169}
143 170
@@ -149,7 +176,8 @@ xfs_end_bio_unwritten(
149 */ 176 */
150STATIC xfs_ioend_t * 177STATIC xfs_ioend_t *
151xfs_alloc_ioend( 178xfs_alloc_ioend(
152 struct inode *inode) 179 struct inode *inode,
180 unsigned int type)
153{ 181{
154 xfs_ioend_t *ioend; 182 xfs_ioend_t *ioend;
155 183
@@ -162,45 +190,25 @@ xfs_alloc_ioend(
162 */ 190 */
163 atomic_set(&ioend->io_remaining, 1); 191 atomic_set(&ioend->io_remaining, 1);
164 ioend->io_uptodate = 1; /* cleared if any I/O fails */ 192 ioend->io_uptodate = 1; /* cleared if any I/O fails */
193 ioend->io_list = NULL;
194 ioend->io_type = type;
165 ioend->io_vnode = LINVFS_GET_VP(inode); 195 ioend->io_vnode = LINVFS_GET_VP(inode);
166 ioend->io_buffer_head = NULL; 196 ioend->io_buffer_head = NULL;
197 ioend->io_buffer_tail = NULL;
167 atomic_inc(&ioend->io_vnode->v_iocount); 198 atomic_inc(&ioend->io_vnode->v_iocount);
168 ioend->io_offset = 0; 199 ioend->io_offset = 0;
169 ioend->io_size = 0; 200 ioend->io_size = 0;
170 201
171 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); 202 if (type == IOMAP_UNWRITTEN)
203 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
204 else if (type == IOMAP_DELAY)
205 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
206 else
207 INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
172 208
173 return ioend; 209 return ioend;
174} 210}
175 211
176void
177linvfs_unwritten_done(
178 struct buffer_head *bh,
179 int uptodate)
180{
181 xfs_ioend_t *ioend = bh->b_private;
182 static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED;
183 unsigned long flags;
184
185 ASSERT(buffer_unwritten(bh));
186 bh->b_end_io = NULL;
187
188 if (!uptodate)
189 ioend->io_uptodate = 0;
190
191 /*
192 * Deep magic here. We reuse b_private in the buffer_heads to build
193 * a chain for completing the I/O from user context after we've issued
194 * a transaction to convert the unwritten extent.
195 */
196 spin_lock_irqsave(&unwritten_done_lock, flags);
197 bh->b_private = ioend->io_buffer_head;
198 ioend->io_buffer_head = bh;
199 spin_unlock_irqrestore(&unwritten_done_lock, flags);
200
201 xfs_finish_ioend(ioend);
202}
203
204STATIC int 212STATIC int
205xfs_map_blocks( 213xfs_map_blocks(
206 struct inode *inode, 214 struct inode *inode,
@@ -218,138 +226,260 @@ xfs_map_blocks(
218 return -error; 226 return -error;
219} 227}
220 228
229STATIC inline int
230xfs_iomap_valid(
231 xfs_iomap_t *iomapp,
232 loff_t offset)
233{
234 return offset >= iomapp->iomap_offset &&
235 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
236}
237
221/* 238/*
222 * Finds the corresponding mapping in block @map array of the 239 * BIO completion handler for buffered IO.
223 * given @offset within a @page.
224 */ 240 */
225STATIC xfs_iomap_t * 241STATIC int
226xfs_offset_to_map( 242xfs_end_bio(
243 struct bio *bio,
244 unsigned int bytes_done,
245 int error)
246{
247 xfs_ioend_t *ioend = bio->bi_private;
248
249 if (bio->bi_size)
250 return 1;
251
252 ASSERT(ioend);
253 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
254
255 /* Toss bio and pass work off to an xfsdatad thread */
256 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
257 ioend->io_uptodate = 0;
258 bio->bi_private = NULL;
259 bio->bi_end_io = NULL;
260
261 bio_put(bio);
262 xfs_finish_ioend(ioend);
263 return 0;
264}
265
266STATIC void
267xfs_submit_ioend_bio(
268 xfs_ioend_t *ioend,
269 struct bio *bio)
270{
271 atomic_inc(&ioend->io_remaining);
272
273 bio->bi_private = ioend;
274 bio->bi_end_io = xfs_end_bio;
275
276 submit_bio(WRITE, bio);
277 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
278 bio_put(bio);
279}
280
281STATIC struct bio *
282xfs_alloc_ioend_bio(
283 struct buffer_head *bh)
284{
285 struct bio *bio;
286 int nvecs = bio_get_nr_vecs(bh->b_bdev);
287
288 do {
289 bio = bio_alloc(GFP_NOIO, nvecs);
290 nvecs >>= 1;
291 } while (!bio);
292
293 ASSERT(bio->bi_private == NULL);
294 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
295 bio->bi_bdev = bh->b_bdev;
296 bio_get(bio);
297 return bio;
298}
299
300STATIC void
301xfs_start_buffer_writeback(
302 struct buffer_head *bh)
303{
304 ASSERT(buffer_mapped(bh));
305 ASSERT(buffer_locked(bh));
306 ASSERT(!buffer_delay(bh));
307 ASSERT(!buffer_unwritten(bh));
308
309 mark_buffer_async_write(bh);
310 set_buffer_uptodate(bh);
311 clear_buffer_dirty(bh);
312}
313
314STATIC void
315xfs_start_page_writeback(
227 struct page *page, 316 struct page *page,
228 xfs_iomap_t *iomapp, 317 struct writeback_control *wbc,
229 unsigned long offset) 318 int clear_dirty,
319 int buffers)
320{
321 ASSERT(PageLocked(page));
322 ASSERT(!PageWriteback(page));
323 set_page_writeback(page);
324 if (clear_dirty)
325 clear_page_dirty(page);
326 unlock_page(page);
327 if (!buffers) {
328 end_page_writeback(page);
329 wbc->pages_skipped++; /* We didn't write this page */
330 }
331}
332
333static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
334{
335 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
336}
337
338/*
339 * Submit all of the bios for all of the ioends we have saved up,
340 * covering the initial writepage page and also any probed pages.
341 */
342STATIC void
343xfs_submit_ioend(
344 xfs_ioend_t *ioend)
345{
346 xfs_ioend_t *next;
347 struct buffer_head *bh;
348 struct bio *bio;
349 sector_t lastblock = 0;
350
351 do {
352 next = ioend->io_list;
353 bio = NULL;
354
355 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
356 xfs_start_buffer_writeback(bh);
357
358 if (!bio) {
359 retry:
360 bio = xfs_alloc_ioend_bio(bh);
361 } else if (bh->b_blocknr != lastblock + 1) {
362 xfs_submit_ioend_bio(ioend, bio);
363 goto retry;
364 }
365
366 if (bio_add_buffer(bio, bh) != bh->b_size) {
367 xfs_submit_ioend_bio(ioend, bio);
368 goto retry;
369 }
370
371 lastblock = bh->b_blocknr;
372 }
373 if (bio)
374 xfs_submit_ioend_bio(ioend, bio);
375 xfs_finish_ioend(ioend);
376 } while ((ioend = next) != NULL);
377}
378
379/*
380 * Cancel submission of all buffer_heads so far in this endio.
381 * Toss the endio too. Only ever called for the initial page
382 * in a writepage request, so only ever one page.
383 */
384STATIC void
385xfs_cancel_ioend(
386 xfs_ioend_t *ioend)
387{
388 xfs_ioend_t *next;
389 struct buffer_head *bh, *next_bh;
390
391 do {
392 next = ioend->io_list;
393 bh = ioend->io_buffer_head;
394 do {
395 next_bh = bh->b_private;
396 clear_buffer_async_write(bh);
397 unlock_buffer(bh);
398 } while ((bh = next_bh) != NULL);
399
400 vn_iowake(ioend->io_vnode);
401 mempool_free(ioend, xfs_ioend_pool);
402 } while ((ioend = next) != NULL);
403}
404
405/*
406 * Test to see if we've been building up a completion structure for
407 * earlier buffers -- if so, we try to append to this ioend if we
408 * can, otherwise we finish off any current ioend and start another.
409 * Return true if we've finished the given ioend.
410 */
411STATIC void
412xfs_add_to_ioend(
413 struct inode *inode,
414 struct buffer_head *bh,
415 xfs_off_t offset,
416 unsigned int type,
417 xfs_ioend_t **result,
418 int need_ioend)
230{ 419{
231 loff_t full_offset; /* offset from start of file */ 420 xfs_ioend_t *ioend = *result;
232 421
233 ASSERT(offset < PAGE_CACHE_SIZE); 422 if (!ioend || need_ioend || type != ioend->io_type) {
423 xfs_ioend_t *previous = *result;
234 424
235 full_offset = page->index; /* NB: using 64bit number */ 425 ioend = xfs_alloc_ioend(inode, type);
236 full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */ 426 ioend->io_offset = offset;
237 full_offset += offset; /* offset from page start */ 427 ioend->io_buffer_head = bh;
428 ioend->io_buffer_tail = bh;
429 if (previous)
430 previous->io_list = ioend;
431 *result = ioend;
432 } else {
433 ioend->io_buffer_tail->b_private = bh;
434 ioend->io_buffer_tail = bh;
435 }
238 436
239 if (full_offset < iomapp->iomap_offset) 437 bh->b_private = NULL;
240 return NULL; 438 ioend->io_size += bh->b_size;
241 if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
242 return iomapp;
243 return NULL;
244} 439}
245 440
246STATIC void 441STATIC void
247xfs_map_at_offset( 442xfs_map_at_offset(
248 struct page *page,
249 struct buffer_head *bh, 443 struct buffer_head *bh,
250 unsigned long offset, 444 loff_t offset,
251 int block_bits, 445 int block_bits,
252 xfs_iomap_t *iomapp) 446 xfs_iomap_t *iomapp)
253{ 447{
254 xfs_daddr_t bn; 448 xfs_daddr_t bn;
255 loff_t delta;
256 int sector_shift; 449 int sector_shift;
257 450
258 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); 451 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
259 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); 452 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
260 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); 453 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
261 454
262 delta = page->index;
263 delta <<= PAGE_CACHE_SHIFT;
264 delta += offset;
265 delta -= iomapp->iomap_offset;
266 delta >>= block_bits;
267
268 sector_shift = block_bits - BBSHIFT; 455 sector_shift = block_bits - BBSHIFT;
269 bn = iomapp->iomap_bn >> sector_shift; 456 bn = (iomapp->iomap_bn >> sector_shift) +
270 bn += delta; 457 ((offset - iomapp->iomap_offset) >> block_bits);
271 BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME)); 458
459 ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
272 ASSERT((bn << sector_shift) >= iomapp->iomap_bn); 460 ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
273 461
274 lock_buffer(bh); 462 lock_buffer(bh);
275 bh->b_blocknr = bn; 463 bh->b_blocknr = bn;
276 bh->b_bdev = iomapp->iomap_target->pbr_bdev; 464 bh->b_bdev = iomapp->iomap_target->bt_bdev;
277 set_buffer_mapped(bh); 465 set_buffer_mapped(bh);
278 clear_buffer_delay(bh); 466 clear_buffer_delay(bh);
467 clear_buffer_unwritten(bh);
279} 468}
280 469
281/* 470/*
282 * Look for a page at index which is unlocked and contains our 471 * Look for a page at index that is suitable for clustering.
283 * unwritten extent flagged buffers at its head. Returns page
284 * locked and with an extra reference count, and length of the
285 * unwritten extent component on this page that we can write,
286 * in units of filesystem blocks.
287 */
288STATIC struct page *
289xfs_probe_unwritten_page(
290 struct address_space *mapping,
291 pgoff_t index,
292 xfs_iomap_t *iomapp,
293 xfs_ioend_t *ioend,
294 unsigned long max_offset,
295 unsigned long *fsbs,
296 unsigned int bbits)
297{
298 struct page *page;
299
300 page = find_trylock_page(mapping, index);
301 if (!page)
302 return NULL;
303 if (PageWriteback(page))
304 goto out;
305
306 if (page->mapping && page_has_buffers(page)) {
307 struct buffer_head *bh, *head;
308 unsigned long p_offset = 0;
309
310 *fsbs = 0;
311 bh = head = page_buffers(page);
312 do {
313 if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
314 break;
315 if (!xfs_offset_to_map(page, iomapp, p_offset))
316 break;
317 if (p_offset >= max_offset)
318 break;
319 xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
320 set_buffer_unwritten_io(bh);
321 bh->b_private = ioend;
322 p_offset += bh->b_size;
323 (*fsbs)++;
324 } while ((bh = bh->b_this_page) != head);
325
326 if (p_offset)
327 return page;
328 }
329
330out:
331 unlock_page(page);
332 return NULL;
333}
334
335/*
336 * Look for a page at index which is unlocked and not mapped
337 * yet - clustering for mmap write case.
338 */ 472 */
339STATIC unsigned int 473STATIC unsigned int
340xfs_probe_unmapped_page( 474xfs_probe_page(
341 struct address_space *mapping, 475 struct page *page,
342 pgoff_t index, 476 unsigned int pg_offset,
343 unsigned int pg_offset) 477 int mapped)
344{ 478{
345 struct page *page;
346 int ret = 0; 479 int ret = 0;
347 480
348 page = find_trylock_page(mapping, index);
349 if (!page)
350 return 0;
351 if (PageWriteback(page)) 481 if (PageWriteback(page))
352 goto out; 482 return 0;
353 483
354 if (page->mapping && PageDirty(page)) { 484 if (page->mapping && PageDirty(page)) {
355 if (page_has_buffers(page)) { 485 if (page_has_buffers(page)) {
@@ -357,79 +487,101 @@ xfs_probe_unmapped_page(
357 487
358 bh = head = page_buffers(page); 488 bh = head = page_buffers(page);
359 do { 489 do {
360 if (buffer_mapped(bh) || !buffer_uptodate(bh)) 490 if (!buffer_uptodate(bh))
491 break;
492 if (mapped != buffer_mapped(bh))
361 break; 493 break;
362 ret += bh->b_size; 494 ret += bh->b_size;
363 if (ret >= pg_offset) 495 if (ret >= pg_offset)
364 break; 496 break;
365 } while ((bh = bh->b_this_page) != head); 497 } while ((bh = bh->b_this_page) != head);
366 } else 498 } else
367 ret = PAGE_CACHE_SIZE; 499 ret = mapped ? 0 : PAGE_CACHE_SIZE;
368 } 500 }
369 501
370out:
371 unlock_page(page);
372 return ret; 502 return ret;
373} 503}
374 504
375STATIC unsigned int 505STATIC size_t
376xfs_probe_unmapped_cluster( 506xfs_probe_cluster(
377 struct inode *inode, 507 struct inode *inode,
378 struct page *startpage, 508 struct page *startpage,
379 struct buffer_head *bh, 509 struct buffer_head *bh,
380 struct buffer_head *head) 510 struct buffer_head *head,
511 int mapped)
381{ 512{
513 struct pagevec pvec;
382 pgoff_t tindex, tlast, tloff; 514 pgoff_t tindex, tlast, tloff;
383 unsigned int pg_offset, len, total = 0; 515 size_t total = 0;
384 struct address_space *mapping = inode->i_mapping; 516 int done = 0, i;
385 517
386 /* First sum forwards in this page */ 518 /* First sum forwards in this page */
387 do { 519 do {
388 if (buffer_mapped(bh)) 520 if (mapped != buffer_mapped(bh))
389 break; 521 return total;
390 total += bh->b_size; 522 total += bh->b_size;
391 } while ((bh = bh->b_this_page) != head); 523 } while ((bh = bh->b_this_page) != head);
392 524
393 /* If we reached the end of the page, sum forwards in 525 /* if we reached the end of the page, sum forwards in following pages */
394 * following pages. 526 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
395 */ 527 tindex = startpage->index + 1;
396 if (bh == head) { 528
397 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 529 /* Prune this back to avoid pathological behavior */
398 /* Prune this back to avoid pathological behavior */ 530 tloff = min(tlast, startpage->index + 64);
399 tloff = min(tlast, startpage->index + 64); 531
400 for (tindex = startpage->index + 1; tindex < tloff; tindex++) { 532 pagevec_init(&pvec, 0);
401 len = xfs_probe_unmapped_page(mapping, tindex, 533 while (!done && tindex <= tloff) {
402 PAGE_CACHE_SIZE); 534 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
403 if (!len) 535
404 return total; 536 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
537 break;
538
539 for (i = 0; i < pagevec_count(&pvec); i++) {
540 struct page *page = pvec.pages[i];
541 size_t pg_offset, len = 0;
542
543 if (tindex == tlast) {
544 pg_offset =
545 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
546 if (!pg_offset) {
547 done = 1;
548 break;
549 }
550 } else
551 pg_offset = PAGE_CACHE_SIZE;
552
553 if (page->index == tindex && !TestSetPageLocked(page)) {
554 len = xfs_probe_page(page, pg_offset, mapped);
555 unlock_page(page);
556 }
557
558 if (!len) {
559 done = 1;
560 break;
561 }
562
405 total += len; 563 total += len;
564 tindex++;
406 } 565 }
407 if (tindex == tlast && 566
408 (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 567 pagevec_release(&pvec);
409 total += xfs_probe_unmapped_page(mapping, 568 cond_resched();
410 tindex, pg_offset);
411 }
412 } 569 }
570
413 return total; 571 return total;
414} 572}
415 573
416/* 574/*
417 * Probe for a given page (index) in the inode and test if it is delayed 575 * Test if a given page is suitable for writing as part of an unwritten
418 * and without unwritten buffers. Returns page locked and with an extra 576 * or delayed allocate extent.
419 * reference count.
420 */ 577 */
421STATIC struct page * 578STATIC int
422xfs_probe_delalloc_page( 579xfs_is_delayed_page(
423 struct inode *inode, 580 struct page *page,
424 pgoff_t index) 581 unsigned int type)
425{ 582{
426 struct page *page;
427
428 page = find_trylock_page(inode->i_mapping, index);
429 if (!page)
430 return NULL;
431 if (PageWriteback(page)) 583 if (PageWriteback(page))
432 goto out; 584 return 0;
433 585
434 if (page->mapping && page_has_buffers(page)) { 586 if (page->mapping && page_has_buffers(page)) {
435 struct buffer_head *bh, *head; 587 struct buffer_head *bh, *head;
@@ -437,243 +589,156 @@ xfs_probe_delalloc_page(
437 589
438 bh = head = page_buffers(page); 590 bh = head = page_buffers(page);
439 do { 591 do {
440 if (buffer_unwritten(bh)) { 592 if (buffer_unwritten(bh))
441 acceptable = 0; 593 acceptable = (type == IOMAP_UNWRITTEN);
594 else if (buffer_delay(bh))
595 acceptable = (type == IOMAP_DELAY);
596 else if (buffer_mapped(bh))
597 acceptable = (type == 0);
598 else
442 break; 599 break;
443 } else if (buffer_delay(bh)) {
444 acceptable = 1;
445 }
446 } while ((bh = bh->b_this_page) != head); 600 } while ((bh = bh->b_this_page) != head);
447 601
448 if (acceptable) 602 if (acceptable)
449 return page; 603 return 1;
450 }
451
452out:
453 unlock_page(page);
454 return NULL;
455}
456
457STATIC int
458xfs_map_unwritten(
459 struct inode *inode,
460 struct page *start_page,
461 struct buffer_head *head,
462 struct buffer_head *curr,
463 unsigned long p_offset,
464 int block_bits,
465 xfs_iomap_t *iomapp,
466 struct writeback_control *wbc,
467 int startio,
468 int all_bh)
469{
470 struct buffer_head *bh = curr;
471 xfs_iomap_t *tmp;
472 xfs_ioend_t *ioend;
473 loff_t offset;
474 unsigned long nblocks = 0;
475
476 offset = start_page->index;
477 offset <<= PAGE_CACHE_SHIFT;
478 offset += p_offset;
479
480 ioend = xfs_alloc_ioend(inode);
481
482 /* First map forwards in the page consecutive buffers
483 * covering this unwritten extent
484 */
485 do {
486 if (!buffer_unwritten(bh))
487 break;
488 tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
489 if (!tmp)
490 break;
491 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
492 set_buffer_unwritten_io(bh);
493 bh->b_private = ioend;
494 p_offset += bh->b_size;
495 nblocks++;
496 } while ((bh = bh->b_this_page) != head);
497
498 atomic_add(nblocks, &ioend->io_remaining);
499
500 /* If we reached the end of the page, map forwards in any
501 * following pages which are also covered by this extent.
502 */
503 if (bh == head) {
504 struct address_space *mapping = inode->i_mapping;
505 pgoff_t tindex, tloff, tlast;
506 unsigned long bs;
507 unsigned int pg_offset, bbits = inode->i_blkbits;
508 struct page *page;
509
510 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
511 tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
512 tloff = min(tlast, tloff);
513 for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
514 page = xfs_probe_unwritten_page(mapping,
515 tindex, iomapp, ioend,
516 PAGE_CACHE_SIZE, &bs, bbits);
517 if (!page)
518 break;
519 nblocks += bs;
520 atomic_add(bs, &ioend->io_remaining);
521 xfs_convert_page(inode, page, iomapp, wbc, ioend,
522 startio, all_bh);
523 /* stop if converting the next page might add
524 * enough blocks that the corresponding byte
525 * count won't fit in our ulong page buf length */
526 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
527 goto enough;
528 }
529
530 if (tindex == tlast &&
531 (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
532 page = xfs_probe_unwritten_page(mapping,
533 tindex, iomapp, ioend,
534 pg_offset, &bs, bbits);
535 if (page) {
536 nblocks += bs;
537 atomic_add(bs, &ioend->io_remaining);
538 xfs_convert_page(inode, page, iomapp, wbc, ioend,
539 startio, all_bh);
540 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
541 goto enough;
542 }
543 }
544 } 604 }
545 605
546enough:
547 ioend->io_size = (xfs_off_t)nblocks << block_bits;
548 ioend->io_offset = offset;
549 xfs_finish_ioend(ioend);
550 return 0; 606 return 0;
551} 607}
552 608
553STATIC void
554xfs_submit_page(
555 struct page *page,
556 struct writeback_control *wbc,
557 struct buffer_head *bh_arr[],
558 int bh_count,
559 int probed_page,
560 int clear_dirty)
561{
562 struct buffer_head *bh;
563 int i;
564
565 BUG_ON(PageWriteback(page));
566 if (bh_count)
567 set_page_writeback(page);
568 if (clear_dirty)
569 clear_page_dirty(page);
570 unlock_page(page);
571
572 if (bh_count) {
573 for (i = 0; i < bh_count; i++) {
574 bh = bh_arr[i];
575 mark_buffer_async_write(bh);
576 if (buffer_unwritten(bh))
577 set_buffer_unwritten_io(bh);
578 set_buffer_uptodate(bh);
579 clear_buffer_dirty(bh);
580 }
581
582 for (i = 0; i < bh_count; i++)
583 submit_bh(WRITE, bh_arr[i]);
584
585 if (probed_page && clear_dirty)
586 wbc->nr_to_write--; /* Wrote an "extra" page */
587 }
588}
589
590/* 609/*
591 * Allocate & map buffers for page given the extent map. Write it out. 610 * Allocate & map buffers for page given the extent map. Write it out.
592 * except for the original page of a writepage, this is called on 611 * except for the original page of a writepage, this is called on
593 * delalloc/unwritten pages only, for the original page it is possible 612 * delalloc/unwritten pages only, for the original page it is possible
594 * that the page has no mapping at all. 613 * that the page has no mapping at all.
595 */ 614 */
596STATIC void 615STATIC int
597xfs_convert_page( 616xfs_convert_page(
598 struct inode *inode, 617 struct inode *inode,
599 struct page *page, 618 struct page *page,
600 xfs_iomap_t *iomapp, 619 loff_t tindex,
620 xfs_iomap_t *mp,
621 xfs_ioend_t **ioendp,
601 struct writeback_control *wbc, 622 struct writeback_control *wbc,
602 void *private,
603 int startio, 623 int startio,
604 int all_bh) 624 int all_bh)
605{ 625{
606 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; 626 struct buffer_head *bh, *head;
607 xfs_iomap_t *mp = iomapp, *tmp; 627 xfs_off_t end_offset;
608 unsigned long offset, end_offset; 628 unsigned long p_offset;
609 int index = 0; 629 unsigned int type;
610 int bbits = inode->i_blkbits; 630 int bbits = inode->i_blkbits;
611 int len, page_dirty; 631 int len, page_dirty;
632 int count = 0, done = 0, uptodate = 1;
633 xfs_off_t offset = page_offset(page);
612 634
613 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)); 635 if (page->index != tindex)
636 goto fail;
637 if (TestSetPageLocked(page))
638 goto fail;
639 if (PageWriteback(page))
640 goto fail_unlock_page;
641 if (page->mapping != inode->i_mapping)
642 goto fail_unlock_page;
643 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
644 goto fail_unlock_page;
614 645
615 /* 646 /*
616 * page_dirty is initially a count of buffers on the page before 647 * page_dirty is initially a count of buffers on the page before
617 * EOF and is decrememted as we move each into a cleanable state. 648 * EOF and is decrememted as we move each into a cleanable state.
649 *
650 * Derivation:
651 *
652 * End offset is the highest offset that this page should represent.
653 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
654 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
655 * hence give us the correct page_dirty count. On any other page,
656 * it will be zero and in that case we need page_dirty to be the
657 * count of buffers on the page.
618 */ 658 */
659 end_offset = min_t(unsigned long long,
660 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
661 i_size_read(inode));
662
619 len = 1 << inode->i_blkbits; 663 len = 1 << inode->i_blkbits;
620 end_offset = max(end_offset, PAGE_CACHE_SIZE); 664 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
621 end_offset = roundup(end_offset, len); 665 PAGE_CACHE_SIZE);
622 page_dirty = end_offset / len; 666 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
667 page_dirty = p_offset / len;
623 668
624 offset = 0;
625 bh = head = page_buffers(page); 669 bh = head = page_buffers(page);
626 do { 670 do {
627 if (offset >= end_offset) 671 if (offset >= end_offset)
628 break; 672 break;
629 if (!(PageUptodate(page) || buffer_uptodate(bh))) 673 if (!buffer_uptodate(bh))
674 uptodate = 0;
675 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
676 done = 1;
630 continue; 677 continue;
631 if (buffer_mapped(bh) && all_bh && 678 }
632 !(buffer_unwritten(bh) || buffer_delay(bh))) { 679
680 if (buffer_unwritten(bh) || buffer_delay(bh)) {
681 if (buffer_unwritten(bh))
682 type = IOMAP_UNWRITTEN;
683 else
684 type = IOMAP_DELAY;
685
686 if (!xfs_iomap_valid(mp, offset)) {
687 done = 1;
688 continue;
689 }
690
691 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
692 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
693
694 xfs_map_at_offset(bh, offset, bbits, mp);
633 if (startio) { 695 if (startio) {
696 xfs_add_to_ioend(inode, bh, offset,
697 type, ioendp, done);
698 } else {
699 set_buffer_dirty(bh);
700 unlock_buffer(bh);
701 mark_buffer_dirty(bh);
702 }
703 page_dirty--;
704 count++;
705 } else {
706 type = 0;
707 if (buffer_mapped(bh) && all_bh && startio) {
634 lock_buffer(bh); 708 lock_buffer(bh);
635 bh_arr[index++] = bh; 709 xfs_add_to_ioend(inode, bh, offset,
710 type, ioendp, done);
711 count++;
636 page_dirty--; 712 page_dirty--;
713 } else {
714 done = 1;
637 } 715 }
638 continue;
639 } 716 }
640 tmp = xfs_offset_to_map(page, mp, offset); 717 } while (offset += len, (bh = bh->b_this_page) != head);
641 if (!tmp)
642 continue;
643 ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
644 ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
645 718
646 /* If this is a new unwritten extent buffer (i.e. one 719 if (uptodate && bh == head)
647 * that we haven't passed in private data for, we must 720 SetPageUptodate(page);
648 * now map this buffer too. 721
649 */ 722 if (startio) {
650 if (buffer_unwritten(bh) && !bh->b_end_io) { 723 if (count) {
651 ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN); 724 struct backing_dev_info *bdi;
652 xfs_map_unwritten(inode, page, head, bh, offset, 725
653 bbits, tmp, wbc, startio, all_bh); 726 bdi = inode->i_mapping->backing_dev_info;
654 } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) { 727 if (bdi_write_congested(bdi)) {
655 xfs_map_at_offset(page, bh, offset, bbits, tmp); 728 wbc->encountered_congestion = 1;
656 if (buffer_unwritten(bh)) { 729 done = 1;
657 set_buffer_unwritten_io(bh); 730 } else if (--wbc->nr_to_write <= 0) {
658 bh->b_private = private; 731 done = 1;
659 ASSERT(private);
660 } 732 }
661 } 733 }
662 if (startio) { 734 xfs_start_page_writeback(page, wbc, !page_dirty, count);
663 bh_arr[index++] = bh;
664 } else {
665 set_buffer_dirty(bh);
666 unlock_buffer(bh);
667 mark_buffer_dirty(bh);
668 }
669 page_dirty--;
670 } while (offset += len, (bh = bh->b_this_page) != head);
671
672 if (startio && index) {
673 xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
674 } else {
675 unlock_page(page);
676 } 735 }
736
737 return done;
738 fail_unlock_page:
739 unlock_page(page);
740 fail:
741 return 1;
677} 742}
678 743
679/* 744/*
@@ -685,19 +750,31 @@ xfs_cluster_write(
685 struct inode *inode, 750 struct inode *inode,
686 pgoff_t tindex, 751 pgoff_t tindex,
687 xfs_iomap_t *iomapp, 752 xfs_iomap_t *iomapp,
753 xfs_ioend_t **ioendp,
688 struct writeback_control *wbc, 754 struct writeback_control *wbc,
689 int startio, 755 int startio,
690 int all_bh, 756 int all_bh,
691 pgoff_t tlast) 757 pgoff_t tlast)
692{ 758{
693 struct page *page; 759 struct pagevec pvec;
760 int done = 0, i;
694 761
695 for (; tindex <= tlast; tindex++) { 762 pagevec_init(&pvec, 0);
696 page = xfs_probe_delalloc_page(inode, tindex); 763 while (!done && tindex <= tlast) {
697 if (!page) 764 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
765
766 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
698 break; 767 break;
699 xfs_convert_page(inode, page, iomapp, wbc, NULL, 768
700 startio, all_bh); 769 for (i = 0; i < pagevec_count(&pvec); i++) {
770 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
771 iomapp, ioendp, wbc, startio, all_bh);
772 if (done)
773 break;
774 }
775
776 pagevec_release(&pvec);
777 cond_resched();
701 } 778 }
702} 779}
703 780
@@ -728,18 +805,22 @@ xfs_page_state_convert(
728 int startio, 805 int startio,
729 int unmapped) /* also implies page uptodate */ 806 int unmapped) /* also implies page uptodate */
730{ 807{
731 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; 808 struct buffer_head *bh, *head;
732 xfs_iomap_t *iomp, iomap; 809 xfs_iomap_t iomap;
810 xfs_ioend_t *ioend = NULL, *iohead = NULL;
733 loff_t offset; 811 loff_t offset;
734 unsigned long p_offset = 0; 812 unsigned long p_offset = 0;
813 unsigned int type;
735 __uint64_t end_offset; 814 __uint64_t end_offset;
736 pgoff_t end_index, last_index, tlast; 815 pgoff_t end_index, last_index, tlast;
737 int len, err, i, cnt = 0, uptodate = 1; 816 ssize_t size, len;
738 int flags; 817 int flags, err, iomap_valid = 0, uptodate = 1;
739 int page_dirty; 818 int page_dirty, count = 0, trylock_flag = 0;
819 int all_bh = unmapped;
740 820
741 /* wait for other IO threads? */ 821 /* wait for other IO threads? */
742 flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK; 822 if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
823 trylock_flag |= BMAPI_TRYLOCK;
743 824
744 /* Is this page beyond the end of the file? */ 825 /* Is this page beyond the end of the file? */
745 offset = i_size_read(inode); 826 offset = i_size_read(inode);
@@ -754,161 +835,173 @@ xfs_page_state_convert(
754 } 835 }
755 } 836 }
756 837
757 end_offset = min_t(unsigned long long,
758 (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
759 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
760
761 /* 838 /*
762 * page_dirty is initially a count of buffers on the page before 839 * page_dirty is initially a count of buffers on the page before
763 * EOF and is decrememted as we move each into a cleanable state. 840 * EOF and is decrememted as we move each into a cleanable state.
764 */ 841 *
842 * Derivation:
843 *
844 * End offset is the highest offset that this page should represent.
845 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
846 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
847 * hence give us the correct page_dirty count. On any other page,
848 * it will be zero and in that case we need page_dirty to be the
849 * count of buffers on the page.
850 */
851 end_offset = min_t(unsigned long long,
852 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
765 len = 1 << inode->i_blkbits; 853 len = 1 << inode->i_blkbits;
766 p_offset = max(p_offset, PAGE_CACHE_SIZE); 854 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
767 p_offset = roundup(p_offset, len); 855 PAGE_CACHE_SIZE);
856 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
768 page_dirty = p_offset / len; 857 page_dirty = p_offset / len;
769 858
770 iomp = NULL;
771 p_offset = 0;
772 bh = head = page_buffers(page); 859 bh = head = page_buffers(page);
860 offset = page_offset(page);
861 flags = -1;
862 type = 0;
863
864 /* TODO: cleanup count and page_dirty */
773 865
774 do { 866 do {
775 if (offset >= end_offset) 867 if (offset >= end_offset)
776 break; 868 break;
777 if (!buffer_uptodate(bh)) 869 if (!buffer_uptodate(bh))
778 uptodate = 0; 870 uptodate = 0;
779 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) 871 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
872 /*
873 * the iomap is actually still valid, but the ioend
874 * isn't. shouldn't happen too often.
875 */
876 iomap_valid = 0;
780 continue; 877 continue;
781
782 if (iomp) {
783 iomp = xfs_offset_to_map(page, &iomap, p_offset);
784 } 878 }
785 879
880 if (iomap_valid)
881 iomap_valid = xfs_iomap_valid(&iomap, offset);
882
786 /* 883 /*
787 * First case, map an unwritten extent and prepare for 884 * First case, map an unwritten extent and prepare for
788 * extent state conversion transaction on completion. 885 * extent state conversion transaction on completion.
789 */ 886 *
790 if (buffer_unwritten(bh)) { 887 * Second case, allocate space for a delalloc buffer.
791 if (!startio) 888 * We can return EAGAIN here in the release page case.
792 continue; 889 *
793 if (!iomp) { 890 * Third case, an unmapped buffer was found, and we are
794 err = xfs_map_blocks(inode, offset, len, &iomap, 891 * in a path where we need to write the whole page out.
795 BMAPI_WRITE|BMAPI_IGNSTATE); 892 */
796 if (err) { 893 if (buffer_unwritten(bh) || buffer_delay(bh) ||
797 goto error; 894 ((buffer_uptodate(bh) || PageUptodate(page)) &&
798 } 895 !buffer_mapped(bh) && (unmapped || startio))) {
799 iomp = xfs_offset_to_map(page, &iomap, 896 /*
800 p_offset); 897 * Make sure we don't use a read-only iomap
898 */
899 if (flags == BMAPI_READ)
900 iomap_valid = 0;
901
902 if (buffer_unwritten(bh)) {
903 type = IOMAP_UNWRITTEN;
904 flags = BMAPI_WRITE|BMAPI_IGNSTATE;
905 } else if (buffer_delay(bh)) {
906 type = IOMAP_DELAY;
907 flags = BMAPI_ALLOCATE;
908 if (!startio)
909 flags |= trylock_flag;
910 } else {
911 type = IOMAP_NEW;
912 flags = BMAPI_WRITE|BMAPI_MMAP;
801 } 913 }
802 if (iomp) { 914
803 if (!bh->b_end_io) { 915 if (!iomap_valid) {
804 err = xfs_map_unwritten(inode, page, 916 if (type == IOMAP_NEW) {
805 head, bh, p_offset, 917 size = xfs_probe_cluster(inode,
806 inode->i_blkbits, iomp, 918 page, bh, head, 0);
807 wbc, startio, unmapped);
808 if (err) {
809 goto error;
810 }
811 } else { 919 } else {
812 set_bit(BH_Lock, &bh->b_state); 920 size = len;
813 } 921 }
814 BUG_ON(!buffer_locked(bh)); 922
815 bh_arr[cnt++] = bh; 923 err = xfs_map_blocks(inode, offset, size,
816 page_dirty--; 924 &iomap, flags);
817 } 925 if (err)
818 /*
819 * Second case, allocate space for a delalloc buffer.
820 * We can return EAGAIN here in the release page case.
821 */
822 } else if (buffer_delay(bh)) {
823 if (!iomp) {
824 err = xfs_map_blocks(inode, offset, len, &iomap,
825 BMAPI_ALLOCATE | flags);
826 if (err) {
827 goto error; 926 goto error;
828 } 927 iomap_valid = xfs_iomap_valid(&iomap, offset);
829 iomp = xfs_offset_to_map(page, &iomap,
830 p_offset);
831 } 928 }
832 if (iomp) { 929 if (iomap_valid) {
833 xfs_map_at_offset(page, bh, p_offset, 930 xfs_map_at_offset(bh, offset,
834 inode->i_blkbits, iomp); 931 inode->i_blkbits, &iomap);
835 if (startio) { 932 if (startio) {
836 bh_arr[cnt++] = bh; 933 xfs_add_to_ioend(inode, bh, offset,
934 type, &ioend,
935 !iomap_valid);
837 } else { 936 } else {
838 set_buffer_dirty(bh); 937 set_buffer_dirty(bh);
839 unlock_buffer(bh); 938 unlock_buffer(bh);
840 mark_buffer_dirty(bh); 939 mark_buffer_dirty(bh);
841 } 940 }
842 page_dirty--; 941 page_dirty--;
942 count++;
943 }
944 } else if (buffer_uptodate(bh) && startio) {
945 /*
946 * we got here because the buffer is already mapped.
947 * That means it must already have extents allocated
948 * underneath it. Map the extent by reading it.
949 */
950 if (!iomap_valid || type != 0) {
951 flags = BMAPI_READ;
952 size = xfs_probe_cluster(inode, page, bh,
953 head, 1);
954 err = xfs_map_blocks(inode, offset, size,
955 &iomap, flags);
956 if (err)
957 goto error;
958 iomap_valid = xfs_iomap_valid(&iomap, offset);
843 } 959 }
844 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
845 (unmapped || startio)) {
846 960
847 if (!buffer_mapped(bh)) { 961 type = 0;
848 int size; 962 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
849 963 ASSERT(buffer_mapped(bh));
850 /* 964 if (iomap_valid)
851 * Getting here implies an unmapped buffer 965 all_bh = 1;
852 * was found, and we are in a path where we 966 xfs_add_to_ioend(inode, bh, offset, type,
853 * need to write the whole page out. 967 &ioend, !iomap_valid);
854 */ 968 page_dirty--;
855 if (!iomp) { 969 count++;
856 size = xfs_probe_unmapped_cluster( 970 } else {
857 inode, page, bh, head); 971 iomap_valid = 0;
858 err = xfs_map_blocks(inode, offset,
859 size, &iomap,
860 BMAPI_WRITE|BMAPI_MMAP);
861 if (err) {
862 goto error;
863 }
864 iomp = xfs_offset_to_map(page, &iomap,
865 p_offset);
866 }
867 if (iomp) {
868 xfs_map_at_offset(page,
869 bh, p_offset,
870 inode->i_blkbits, iomp);
871 if (startio) {
872 bh_arr[cnt++] = bh;
873 } else {
874 set_buffer_dirty(bh);
875 unlock_buffer(bh);
876 mark_buffer_dirty(bh);
877 }
878 page_dirty--;
879 }
880 } else if (startio) {
881 if (buffer_uptodate(bh) &&
882 !test_and_set_bit(BH_Lock, &bh->b_state)) {
883 bh_arr[cnt++] = bh;
884 page_dirty--;
885 }
886 } 972 }
973 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
974 (unmapped || startio)) {
975 iomap_valid = 0;
887 } 976 }
888 } while (offset += len, p_offset += len, 977
889 ((bh = bh->b_this_page) != head)); 978 if (!iohead)
979 iohead = ioend;
980
981 } while (offset += len, ((bh = bh->b_this_page) != head));
890 982
891 if (uptodate && bh == head) 983 if (uptodate && bh == head)
892 SetPageUptodate(page); 984 SetPageUptodate(page);
893 985
894 if (startio) { 986 if (startio)
895 xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty); 987 xfs_start_page_writeback(page, wbc, 1, count);
896 }
897 988
898 if (iomp) { 989 if (ioend && iomap_valid) {
899 offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> 990 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
900 PAGE_CACHE_SHIFT; 991 PAGE_CACHE_SHIFT;
901 tlast = min_t(pgoff_t, offset, last_index); 992 tlast = min_t(pgoff_t, offset, last_index);
902 xfs_cluster_write(inode, page->index + 1, iomp, wbc, 993 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
903 startio, unmapped, tlast); 994 wbc, startio, all_bh, tlast);
904 } 995 }
905 996
997 if (iohead)
998 xfs_submit_ioend(iohead);
999
906 return page_dirty; 1000 return page_dirty;
907 1001
908error: 1002error:
909 for (i = 0; i < cnt; i++) { 1003 if (iohead)
910 unlock_buffer(bh_arr[i]); 1004 xfs_cancel_ioend(iohead);
911 }
912 1005
913 /* 1006 /*
914 * If it's delalloc and we have nowhere to put it, 1007 * If it's delalloc and we have nowhere to put it,
@@ -916,9 +1009,8 @@ error:
916 * us to try again. 1009 * us to try again.
917 */ 1010 */
918 if (err != -EAGAIN) { 1011 if (err != -EAGAIN) {
919 if (!unmapped) { 1012 if (!unmapped)
920 block_invalidatepage(page, 0); 1013 block_invalidatepage(page, 0);
921 }
922 ClearPageUptodate(page); 1014 ClearPageUptodate(page);
923 } 1015 }
924 return err; 1016 return err;
@@ -982,7 +1074,7 @@ __linvfs_get_block(
982 } 1074 }
983 1075
984 /* If this is a realtime file, data might be on a new device */ 1076 /* If this is a realtime file, data might be on a new device */
985 bh_result->b_bdev = iomap.iomap_target->pbr_bdev; 1077 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
986 1078
987 /* If we previously allocated a block out beyond eof and 1079 /* If we previously allocated a block out beyond eof and
988 * we are now coming back to use it then we will need to 1080 * we are now coming back to use it then we will need to
@@ -1094,10 +1186,10 @@ linvfs_direct_IO(
1094 if (error) 1186 if (error)
1095 return -error; 1187 return -error;
1096 1188
1097 iocb->private = xfs_alloc_ioend(inode); 1189 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1098 1190
1099 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1191 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1100 iomap.iomap_target->pbr_bdev, 1192 iomap.iomap_target->bt_bdev,
1101 iov, offset, nr_segs, 1193 iov, offset, nr_segs,
1102 linvfs_get_blocks_direct, 1194 linvfs_get_blocks_direct,
1103 linvfs_end_io_direct); 1195 linvfs_end_io_direct);
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 4720758a9ade..55339dd5a30d 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -23,14 +23,24 @@ extern mempool_t *xfs_ioend_pool;
23 23
24typedef void (*xfs_ioend_func_t)(void *); 24typedef void (*xfs_ioend_func_t)(void *);
25 25
26/*
27 * xfs_ioend struct manages large extent writes for XFS.
28 * It can manage several multi-page bio's at once.
29 */
26typedef struct xfs_ioend { 30typedef struct xfs_ioend {
31 struct xfs_ioend *io_list; /* next ioend in chain */
32 unsigned int io_type; /* delalloc / unwritten */
27 unsigned int io_uptodate; /* I/O status register */ 33 unsigned int io_uptodate; /* I/O status register */
28 atomic_t io_remaining; /* hold count */ 34 atomic_t io_remaining; /* hold count */
29 struct vnode *io_vnode; /* file being written to */ 35 struct vnode *io_vnode; /* file being written to */
30 struct buffer_head *io_buffer_head;/* buffer linked list head */ 36 struct buffer_head *io_buffer_head;/* buffer linked list head */
37 struct buffer_head *io_buffer_tail;/* buffer linked list tail */
31 size_t io_size; /* size of the extent */ 38 size_t io_size; /* size of the extent */
32 xfs_off_t io_offset; /* offset in the file */ 39 xfs_off_t io_offset; /* offset in the file */
33 struct work_struct io_work; /* xfsdatad work queue */ 40 struct work_struct io_work; /* xfsdatad work queue */
34} xfs_ioend_t; 41} xfs_ioend_t;
35 42
43extern struct address_space_operations linvfs_aops;
44extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
45
36#endif /* __XFS_IOPS_H__ */ 46#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 6fe21d2b8847..e44b7c1a3a36 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -31,76 +31,77 @@
31#include <linux/kthread.h> 31#include <linux/kthread.h>
32#include "xfs_linux.h" 32#include "xfs_linux.h"
33 33
34STATIC kmem_cache_t *pagebuf_zone; 34STATIC kmem_zone_t *xfs_buf_zone;
35STATIC kmem_shaker_t pagebuf_shake; 35STATIC kmem_shaker_t xfs_buf_shake;
36STATIC int xfsbufd(void *);
36STATIC int xfsbufd_wakeup(int, gfp_t); 37STATIC int xfsbufd_wakeup(int, gfp_t);
37STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); 38STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
38 39
39STATIC struct workqueue_struct *xfslogd_workqueue; 40STATIC struct workqueue_struct *xfslogd_workqueue;
40struct workqueue_struct *xfsdatad_workqueue; 41struct workqueue_struct *xfsdatad_workqueue;
41 42
42#ifdef PAGEBUF_TRACE 43#ifdef XFS_BUF_TRACE
43void 44void
44pagebuf_trace( 45xfs_buf_trace(
45 xfs_buf_t *pb, 46 xfs_buf_t *bp,
46 char *id, 47 char *id,
47 void *data, 48 void *data,
48 void *ra) 49 void *ra)
49{ 50{
50 ktrace_enter(pagebuf_trace_buf, 51 ktrace_enter(xfs_buf_trace_buf,
51 pb, id, 52 bp, id,
52 (void *)(unsigned long)pb->pb_flags, 53 (void *)(unsigned long)bp->b_flags,
53 (void *)(unsigned long)pb->pb_hold.counter, 54 (void *)(unsigned long)bp->b_hold.counter,
54 (void *)(unsigned long)pb->pb_sema.count.counter, 55 (void *)(unsigned long)bp->b_sema.count.counter,
55 (void *)current, 56 (void *)current,
56 data, ra, 57 data, ra,
57 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff), 58 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
58 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff), 59 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
59 (void *)(unsigned long)pb->pb_buffer_length, 60 (void *)(unsigned long)bp->b_buffer_length,
60 NULL, NULL, NULL, NULL, NULL); 61 NULL, NULL, NULL, NULL, NULL);
61} 62}
62ktrace_t *pagebuf_trace_buf; 63ktrace_t *xfs_buf_trace_buf;
63#define PAGEBUF_TRACE_SIZE 4096 64#define XFS_BUF_TRACE_SIZE 4096
64#define PB_TRACE(pb, id, data) \ 65#define XB_TRACE(bp, id, data) \
65 pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0)) 66 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
66#else 67#else
67#define PB_TRACE(pb, id, data) do { } while (0) 68#define XB_TRACE(bp, id, data) do { } while (0)
68#endif 69#endif
69 70
70#ifdef PAGEBUF_LOCK_TRACKING 71#ifdef XFS_BUF_LOCK_TRACKING
71# define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid) 72# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
72# define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1) 73# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
73# define PB_GET_OWNER(pb) ((pb)->pb_last_holder) 74# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
74#else 75#else
75# define PB_SET_OWNER(pb) do { } while (0) 76# define XB_SET_OWNER(bp) do { } while (0)
76# define PB_CLEAR_OWNER(pb) do { } while (0) 77# define XB_CLEAR_OWNER(bp) do { } while (0)
77# define PB_GET_OWNER(pb) do { } while (0) 78# define XB_GET_OWNER(bp) do { } while (0)
78#endif 79#endif
79 80
80#define pb_to_gfp(flags) \ 81#define xb_to_gfp(flags) \
81 ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \ 82 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
82 ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) 83 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
83 84
84#define pb_to_km(flags) \ 85#define xb_to_km(flags) \
85 (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) 86 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
86 87
87#define pagebuf_allocate(flags) \ 88#define xfs_buf_allocate(flags) \
88 kmem_zone_alloc(pagebuf_zone, pb_to_km(flags)) 89 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
89#define pagebuf_deallocate(pb) \ 90#define xfs_buf_deallocate(bp) \
90 kmem_zone_free(pagebuf_zone, (pb)); 91 kmem_zone_free(xfs_buf_zone, (bp));
91 92
92/* 93/*
93 * Page Region interfaces. 94 * Page Region interfaces.
94 * 95 *
95 * For pages in filesystems where the blocksize is smaller than the 96 * For pages in filesystems where the blocksize is smaller than the
96 * pagesize, we use the page->private field (long) to hold a bitmap 97 * pagesize, we use the page->private field (long) to hold a bitmap
97 * of uptodate regions within the page. 98 * of uptodate regions within the page.
98 * 99 *
99 * Each such region is "bytes per page / bits per long" bytes long. 100 * Each such region is "bytes per page / bits per long" bytes long.
100 * 101 *
101 * NBPPR == number-of-bytes-per-page-region 102 * NBPPR == number-of-bytes-per-page-region
102 * BTOPR == bytes-to-page-region (rounded up) 103 * BTOPR == bytes-to-page-region (rounded up)
103 * BTOPRT == bytes-to-page-region-truncated (rounded down) 104 * BTOPRT == bytes-to-page-region-truncated (rounded down)
104 */ 105 */
105#if (BITS_PER_LONG == 32) 106#if (BITS_PER_LONG == 32)
106#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */ 107#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
@@ -159,7 +160,7 @@ test_page_region(
159} 160}
160 161
161/* 162/*
162 * Mapping of multi-page buffers into contiguous virtual space 163 * Mapping of multi-page buffers into contiguous virtual space
163 */ 164 */
164 165
165typedef struct a_list { 166typedef struct a_list {
@@ -172,7 +173,7 @@ STATIC int as_list_len;
172STATIC DEFINE_SPINLOCK(as_lock); 173STATIC DEFINE_SPINLOCK(as_lock);
173 174
174/* 175/*
175 * Try to batch vunmaps because they are costly. 176 * Try to batch vunmaps because they are costly.
176 */ 177 */
177STATIC void 178STATIC void
178free_address( 179free_address(
@@ -215,83 +216,83 @@ purge_addresses(void)
215} 216}
216 217
217/* 218/*
218 * Internal pagebuf object manipulation 219 * Internal xfs_buf_t object manipulation
219 */ 220 */
220 221
221STATIC void 222STATIC void
222_pagebuf_initialize( 223_xfs_buf_initialize(
223 xfs_buf_t *pb, 224 xfs_buf_t *bp,
224 xfs_buftarg_t *target, 225 xfs_buftarg_t *target,
225 loff_t range_base, 226 xfs_off_t range_base,
226 size_t range_length, 227 size_t range_length,
227 page_buf_flags_t flags) 228 xfs_buf_flags_t flags)
228{ 229{
229 /* 230 /*
230 * We don't want certain flags to appear in pb->pb_flags. 231 * We don't want certain flags to appear in b_flags.
231 */ 232 */
232 flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD); 233 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
233 234
234 memset(pb, 0, sizeof(xfs_buf_t)); 235 memset(bp, 0, sizeof(xfs_buf_t));
235 atomic_set(&pb->pb_hold, 1); 236 atomic_set(&bp->b_hold, 1);
236 init_MUTEX_LOCKED(&pb->pb_iodonesema); 237 init_MUTEX_LOCKED(&bp->b_iodonesema);
237 INIT_LIST_HEAD(&pb->pb_list); 238 INIT_LIST_HEAD(&bp->b_list);
238 INIT_LIST_HEAD(&pb->pb_hash_list); 239 INIT_LIST_HEAD(&bp->b_hash_list);
239 init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */ 240 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
240 PB_SET_OWNER(pb); 241 XB_SET_OWNER(bp);
241 pb->pb_target = target; 242 bp->b_target = target;
242 pb->pb_file_offset = range_base; 243 bp->b_file_offset = range_base;
243 /* 244 /*
244 * Set buffer_length and count_desired to the same value initially. 245 * Set buffer_length and count_desired to the same value initially.
245 * I/O routines should use count_desired, which will be the same in 246 * I/O routines should use count_desired, which will be the same in
246 * most cases but may be reset (e.g. XFS recovery). 247 * most cases but may be reset (e.g. XFS recovery).
247 */ 248 */
248 pb->pb_buffer_length = pb->pb_count_desired = range_length; 249 bp->b_buffer_length = bp->b_count_desired = range_length;
249 pb->pb_flags = flags; 250 bp->b_flags = flags;
250 pb->pb_bn = XFS_BUF_DADDR_NULL; 251 bp->b_bn = XFS_BUF_DADDR_NULL;
251 atomic_set(&pb->pb_pin_count, 0); 252 atomic_set(&bp->b_pin_count, 0);
252 init_waitqueue_head(&pb->pb_waiters); 253 init_waitqueue_head(&bp->b_waiters);
253 254
254 XFS_STATS_INC(pb_create); 255 XFS_STATS_INC(xb_create);
255 PB_TRACE(pb, "initialize", target); 256 XB_TRACE(bp, "initialize", target);
256} 257}
257 258
258/* 259/*
259 * Allocate a page array capable of holding a specified number 260 * Allocate a page array capable of holding a specified number
260 * of pages, and point the page buf at it. 261 * of pages, and point the page buf at it.
261 */ 262 */
262STATIC int 263STATIC int
263_pagebuf_get_pages( 264_xfs_buf_get_pages(
264 xfs_buf_t *pb, 265 xfs_buf_t *bp,
265 int page_count, 266 int page_count,
266 page_buf_flags_t flags) 267 xfs_buf_flags_t flags)
267{ 268{
268 /* Make sure that we have a page list */ 269 /* Make sure that we have a page list */
269 if (pb->pb_pages == NULL) { 270 if (bp->b_pages == NULL) {
270 pb->pb_offset = page_buf_poff(pb->pb_file_offset); 271 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
271 pb->pb_page_count = page_count; 272 bp->b_page_count = page_count;
272 if (page_count <= PB_PAGES) { 273 if (page_count <= XB_PAGES) {
273 pb->pb_pages = pb->pb_page_array; 274 bp->b_pages = bp->b_page_array;
274 } else { 275 } else {
275 pb->pb_pages = kmem_alloc(sizeof(struct page *) * 276 bp->b_pages = kmem_alloc(sizeof(struct page *) *
276 page_count, pb_to_km(flags)); 277 page_count, xb_to_km(flags));
277 if (pb->pb_pages == NULL) 278 if (bp->b_pages == NULL)
278 return -ENOMEM; 279 return -ENOMEM;
279 } 280 }
280 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count); 281 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
281 } 282 }
282 return 0; 283 return 0;
283} 284}
284 285
285/* 286/*
286 * Frees pb_pages if it was malloced. 287 * Frees b_pages if it was allocated.
287 */ 288 */
288STATIC void 289STATIC void
289_pagebuf_free_pages( 290_xfs_buf_free_pages(
290 xfs_buf_t *bp) 291 xfs_buf_t *bp)
291{ 292{
292 if (bp->pb_pages != bp->pb_page_array) { 293 if (bp->b_pages != bp->b_page_array) {
293 kmem_free(bp->pb_pages, 294 kmem_free(bp->b_pages,
294 bp->pb_page_count * sizeof(struct page *)); 295 bp->b_page_count * sizeof(struct page *));
295 } 296 }
296} 297}
297 298
@@ -299,79 +300,79 @@ _pagebuf_free_pages(
299 * Releases the specified buffer. 300 * Releases the specified buffer.
300 * 301 *
301 * The modification state of any associated pages is left unchanged. 302 * The modification state of any associated pages is left unchanged.
302 * The buffer most not be on any hash - use pagebuf_rele instead for 303 * The buffer most not be on any hash - use xfs_buf_rele instead for
303 * hashed and refcounted buffers 304 * hashed and refcounted buffers
304 */ 305 */
305void 306void
306pagebuf_free( 307xfs_buf_free(
307 xfs_buf_t *bp) 308 xfs_buf_t *bp)
308{ 309{
309 PB_TRACE(bp, "free", 0); 310 XB_TRACE(bp, "free", 0);
310 311
311 ASSERT(list_empty(&bp->pb_hash_list)); 312 ASSERT(list_empty(&bp->b_hash_list));
312 313
313 if (bp->pb_flags & _PBF_PAGE_CACHE) { 314 if (bp->b_flags & _XBF_PAGE_CACHE) {
314 uint i; 315 uint i;
315 316
316 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1)) 317 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
317 free_address(bp->pb_addr - bp->pb_offset); 318 free_address(bp->b_addr - bp->b_offset);
318 319
319 for (i = 0; i < bp->pb_page_count; i++) 320 for (i = 0; i < bp->b_page_count; i++)
320 page_cache_release(bp->pb_pages[i]); 321 page_cache_release(bp->b_pages[i]);
321 _pagebuf_free_pages(bp); 322 _xfs_buf_free_pages(bp);
322 } else if (bp->pb_flags & _PBF_KMEM_ALLOC) { 323 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
323 /* 324 /*
324 * XXX(hch): bp->pb_count_desired might be incorrect (see 325 * XXX(hch): bp->b_count_desired might be incorrect (see
325 * pagebuf_associate_memory for details), but fortunately 326 * xfs_buf_associate_memory for details), but fortunately
326 * the Linux version of kmem_free ignores the len argument.. 327 * the Linux version of kmem_free ignores the len argument..
327 */ 328 */
328 kmem_free(bp->pb_addr, bp->pb_count_desired); 329 kmem_free(bp->b_addr, bp->b_count_desired);
329 _pagebuf_free_pages(bp); 330 _xfs_buf_free_pages(bp);
330 } 331 }
331 332
332 pagebuf_deallocate(bp); 333 xfs_buf_deallocate(bp);
333} 334}
334 335
335/* 336/*
336 * Finds all pages for buffer in question and builds it's page list. 337 * Finds all pages for buffer in question and builds it's page list.
337 */ 338 */
338STATIC int 339STATIC int
339_pagebuf_lookup_pages( 340_xfs_buf_lookup_pages(
340 xfs_buf_t *bp, 341 xfs_buf_t *bp,
341 uint flags) 342 uint flags)
342{ 343{
343 struct address_space *mapping = bp->pb_target->pbr_mapping; 344 struct address_space *mapping = bp->b_target->bt_mapping;
344 size_t blocksize = bp->pb_target->pbr_bsize; 345 size_t blocksize = bp->b_target->bt_bsize;
345 size_t size = bp->pb_count_desired; 346 size_t size = bp->b_count_desired;
346 size_t nbytes, offset; 347 size_t nbytes, offset;
347 gfp_t gfp_mask = pb_to_gfp(flags); 348 gfp_t gfp_mask = xb_to_gfp(flags);
348 unsigned short page_count, i; 349 unsigned short page_count, i;
349 pgoff_t first; 350 pgoff_t first;
350 loff_t end; 351 xfs_off_t end;
351 int error; 352 int error;
352 353
353 end = bp->pb_file_offset + bp->pb_buffer_length; 354 end = bp->b_file_offset + bp->b_buffer_length;
354 page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset); 355 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
355 356
356 error = _pagebuf_get_pages(bp, page_count, flags); 357 error = _xfs_buf_get_pages(bp, page_count, flags);
357 if (unlikely(error)) 358 if (unlikely(error))
358 return error; 359 return error;
359 bp->pb_flags |= _PBF_PAGE_CACHE; 360 bp->b_flags |= _XBF_PAGE_CACHE;
360 361
361 offset = bp->pb_offset; 362 offset = bp->b_offset;
362 first = bp->pb_file_offset >> PAGE_CACHE_SHIFT; 363 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
363 364
364 for (i = 0; i < bp->pb_page_count; i++) { 365 for (i = 0; i < bp->b_page_count; i++) {
365 struct page *page; 366 struct page *page;
366 uint retries = 0; 367 uint retries = 0;
367 368
368 retry: 369 retry:
369 page = find_or_create_page(mapping, first + i, gfp_mask); 370 page = find_or_create_page(mapping, first + i, gfp_mask);
370 if (unlikely(page == NULL)) { 371 if (unlikely(page == NULL)) {
371 if (flags & PBF_READ_AHEAD) { 372 if (flags & XBF_READ_AHEAD) {
372 bp->pb_page_count = i; 373 bp->b_page_count = i;
373 for (i = 0; i < bp->pb_page_count; i++) 374 for (i = 0; i < bp->b_page_count; i++)
374 unlock_page(bp->pb_pages[i]); 375 unlock_page(bp->b_pages[i]);
375 return -ENOMEM; 376 return -ENOMEM;
376 } 377 }
377 378
@@ -387,13 +388,13 @@ _pagebuf_lookup_pages(
387 "deadlock in %s (mode:0x%x)\n", 388 "deadlock in %s (mode:0x%x)\n",
388 __FUNCTION__, gfp_mask); 389 __FUNCTION__, gfp_mask);
389 390
390 XFS_STATS_INC(pb_page_retries); 391 XFS_STATS_INC(xb_page_retries);
391 xfsbufd_wakeup(0, gfp_mask); 392 xfsbufd_wakeup(0, gfp_mask);
392 blk_congestion_wait(WRITE, HZ/50); 393 blk_congestion_wait(WRITE, HZ/50);
393 goto retry; 394 goto retry;
394 } 395 }
395 396
396 XFS_STATS_INC(pb_page_found); 397 XFS_STATS_INC(xb_page_found);
397 398
398 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); 399 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
399 size -= nbytes; 400 size -= nbytes;
@@ -401,27 +402,27 @@ _pagebuf_lookup_pages(
401 if (!PageUptodate(page)) { 402 if (!PageUptodate(page)) {
402 page_count--; 403 page_count--;
403 if (blocksize >= PAGE_CACHE_SIZE) { 404 if (blocksize >= PAGE_CACHE_SIZE) {
404 if (flags & PBF_READ) 405 if (flags & XBF_READ)
405 bp->pb_locked = 1; 406 bp->b_locked = 1;
406 } else if (!PagePrivate(page)) { 407 } else if (!PagePrivate(page)) {
407 if (test_page_region(page, offset, nbytes)) 408 if (test_page_region(page, offset, nbytes))
408 page_count++; 409 page_count++;
409 } 410 }
410 } 411 }
411 412
412 bp->pb_pages[i] = page; 413 bp->b_pages[i] = page;
413 offset = 0; 414 offset = 0;
414 } 415 }
415 416
416 if (!bp->pb_locked) { 417 if (!bp->b_locked) {
417 for (i = 0; i < bp->pb_page_count; i++) 418 for (i = 0; i < bp->b_page_count; i++)
418 unlock_page(bp->pb_pages[i]); 419 unlock_page(bp->b_pages[i]);
419 } 420 }
420 421
421 if (page_count == bp->pb_page_count) 422 if (page_count == bp->b_page_count)
422 bp->pb_flags |= PBF_DONE; 423 bp->b_flags |= XBF_DONE;
423 424
424 PB_TRACE(bp, "lookup_pages", (long)page_count); 425 XB_TRACE(bp, "lookup_pages", (long)page_count);
425 return error; 426 return error;
426} 427}
427 428
@@ -429,23 +430,23 @@ _pagebuf_lookup_pages(
429 * Map buffer into kernel address-space if nessecary. 430 * Map buffer into kernel address-space if nessecary.
430 */ 431 */
431STATIC int 432STATIC int
432_pagebuf_map_pages( 433_xfs_buf_map_pages(
433 xfs_buf_t *bp, 434 xfs_buf_t *bp,
434 uint flags) 435 uint flags)
435{ 436{
436 /* A single page buffer is always mappable */ 437 /* A single page buffer is always mappable */
437 if (bp->pb_page_count == 1) { 438 if (bp->b_page_count == 1) {
438 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset; 439 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
439 bp->pb_flags |= PBF_MAPPED; 440 bp->b_flags |= XBF_MAPPED;
440 } else if (flags & PBF_MAPPED) { 441 } else if (flags & XBF_MAPPED) {
441 if (as_list_len > 64) 442 if (as_list_len > 64)
442 purge_addresses(); 443 purge_addresses();
443 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count, 444 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
444 VM_MAP, PAGE_KERNEL); 445 VM_MAP, PAGE_KERNEL);
445 if (unlikely(bp->pb_addr == NULL)) 446 if (unlikely(bp->b_addr == NULL))
446 return -ENOMEM; 447 return -ENOMEM;
447 bp->pb_addr += bp->pb_offset; 448 bp->b_addr += bp->b_offset;
448 bp->pb_flags |= PBF_MAPPED; 449 bp->b_flags |= XBF_MAPPED;
449 } 450 }
450 451
451 return 0; 452 return 0;
@@ -456,9 +457,7 @@ _pagebuf_map_pages(
456 */ 457 */
457 458
458/* 459/*
459 * _pagebuf_find 460 * Look up, and creates if absent, a lockable buffer for
460 *
461 * Looks up, and creates if absent, a lockable buffer for
462 * a given range of an inode. The buffer is returned 461 * a given range of an inode. The buffer is returned
463 * locked. If other overlapping buffers exist, they are 462 * locked. If other overlapping buffers exist, they are
464 * released before the new buffer is created and locked, 463 * released before the new buffer is created and locked,
@@ -466,55 +465,55 @@ _pagebuf_map_pages(
466 * are unlocked. No I/O is implied by this call. 465 * are unlocked. No I/O is implied by this call.
467 */ 466 */
468xfs_buf_t * 467xfs_buf_t *
469_pagebuf_find( 468_xfs_buf_find(
470 xfs_buftarg_t *btp, /* block device target */ 469 xfs_buftarg_t *btp, /* block device target */
471 loff_t ioff, /* starting offset of range */ 470 xfs_off_t ioff, /* starting offset of range */
472 size_t isize, /* length of range */ 471 size_t isize, /* length of range */
473 page_buf_flags_t flags, /* PBF_TRYLOCK */ 472 xfs_buf_flags_t flags,
474 xfs_buf_t *new_pb)/* newly allocated buffer */ 473 xfs_buf_t *new_bp)
475{ 474{
476 loff_t range_base; 475 xfs_off_t range_base;
477 size_t range_length; 476 size_t range_length;
478 xfs_bufhash_t *hash; 477 xfs_bufhash_t *hash;
479 xfs_buf_t *pb, *n; 478 xfs_buf_t *bp, *n;
480 479
481 range_base = (ioff << BBSHIFT); 480 range_base = (ioff << BBSHIFT);
482 range_length = (isize << BBSHIFT); 481 range_length = (isize << BBSHIFT);
483 482
484 /* Check for IOs smaller than the sector size / not sector aligned */ 483 /* Check for IOs smaller than the sector size / not sector aligned */
485 ASSERT(!(range_length < (1 << btp->pbr_sshift))); 484 ASSERT(!(range_length < (1 << btp->bt_sshift)));
486 ASSERT(!(range_base & (loff_t)btp->pbr_smask)); 485 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
487 486
488 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; 487 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
489 488
490 spin_lock(&hash->bh_lock); 489 spin_lock(&hash->bh_lock);
491 490
492 list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) { 491 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
493 ASSERT(btp == pb->pb_target); 492 ASSERT(btp == bp->b_target);
494 if (pb->pb_file_offset == range_base && 493 if (bp->b_file_offset == range_base &&
495 pb->pb_buffer_length == range_length) { 494 bp->b_buffer_length == range_length) {
496 /* 495 /*
497 * If we look at something bring it to the 496 * If we look at something, bring it to the
498 * front of the list for next time. 497 * front of the list for next time.
499 */ 498 */
500 atomic_inc(&pb->pb_hold); 499 atomic_inc(&bp->b_hold);
501 list_move(&pb->pb_hash_list, &hash->bh_list); 500 list_move(&bp->b_hash_list, &hash->bh_list);
502 goto found; 501 goto found;
503 } 502 }
504 } 503 }
505 504
506 /* No match found */ 505 /* No match found */
507 if (new_pb) { 506 if (new_bp) {
508 _pagebuf_initialize(new_pb, btp, range_base, 507 _xfs_buf_initialize(new_bp, btp, range_base,
509 range_length, flags); 508 range_length, flags);
510 new_pb->pb_hash = hash; 509 new_bp->b_hash = hash;
511 list_add(&new_pb->pb_hash_list, &hash->bh_list); 510 list_add(&new_bp->b_hash_list, &hash->bh_list);
512 } else { 511 } else {
513 XFS_STATS_INC(pb_miss_locked); 512 XFS_STATS_INC(xb_miss_locked);
514 } 513 }
515 514
516 spin_unlock(&hash->bh_lock); 515 spin_unlock(&hash->bh_lock);
517 return new_pb; 516 return new_bp;
518 517
519found: 518found:
520 spin_unlock(&hash->bh_lock); 519 spin_unlock(&hash->bh_lock);
@@ -523,74 +522,72 @@ found:
523 * if this does not work then we need to drop the 522 * if this does not work then we need to drop the
524 * spinlock and do a hard attempt on the semaphore. 523 * spinlock and do a hard attempt on the semaphore.
525 */ 524 */
526 if (down_trylock(&pb->pb_sema)) { 525 if (down_trylock(&bp->b_sema)) {
527 if (!(flags & PBF_TRYLOCK)) { 526 if (!(flags & XBF_TRYLOCK)) {
528 /* wait for buffer ownership */ 527 /* wait for buffer ownership */
529 PB_TRACE(pb, "get_lock", 0); 528 XB_TRACE(bp, "get_lock", 0);
530 pagebuf_lock(pb); 529 xfs_buf_lock(bp);
531 XFS_STATS_INC(pb_get_locked_waited); 530 XFS_STATS_INC(xb_get_locked_waited);
532 } else { 531 } else {
533 /* We asked for a trylock and failed, no need 532 /* We asked for a trylock and failed, no need
534 * to look at file offset and length here, we 533 * to look at file offset and length here, we
535 * know that this pagebuf at least overlaps our 534 * know that this buffer at least overlaps our
536 * pagebuf and is locked, therefore our buffer 535 * buffer and is locked, therefore our buffer
537 * either does not exist, or is this buffer 536 * either does not exist, or is this buffer.
538 */ 537 */
539 538 xfs_buf_rele(bp);
540 pagebuf_rele(pb); 539 XFS_STATS_INC(xb_busy_locked);
541 XFS_STATS_INC(pb_busy_locked); 540 return NULL;
542 return (NULL);
543 } 541 }
544 } else { 542 } else {
545 /* trylock worked */ 543 /* trylock worked */
546 PB_SET_OWNER(pb); 544 XB_SET_OWNER(bp);
547 } 545 }
548 546
549 if (pb->pb_flags & PBF_STALE) { 547 if (bp->b_flags & XBF_STALE) {
550 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0); 548 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
551 pb->pb_flags &= PBF_MAPPED; 549 bp->b_flags &= XBF_MAPPED;
552 } 550 }
553 PB_TRACE(pb, "got_lock", 0); 551 XB_TRACE(bp, "got_lock", 0);
554 XFS_STATS_INC(pb_get_locked); 552 XFS_STATS_INC(xb_get_locked);
555 return (pb); 553 return bp;
556} 554}
557 555
558/* 556/*
559 * xfs_buf_get_flags assembles a buffer covering the specified range. 557 * Assembles a buffer covering the specified range.
560 *
561 * Storage in memory for all portions of the buffer will be allocated, 558 * Storage in memory for all portions of the buffer will be allocated,
562 * although backing storage may not be. 559 * although backing storage may not be.
563 */ 560 */
564xfs_buf_t * 561xfs_buf_t *
565xfs_buf_get_flags( /* allocate a buffer */ 562xfs_buf_get_flags(
566 xfs_buftarg_t *target,/* target for buffer */ 563 xfs_buftarg_t *target,/* target for buffer */
567 loff_t ioff, /* starting offset of range */ 564 xfs_off_t ioff, /* starting offset of range */
568 size_t isize, /* length of range */ 565 size_t isize, /* length of range */
569 page_buf_flags_t flags) /* PBF_TRYLOCK */ 566 xfs_buf_flags_t flags)
570{ 567{
571 xfs_buf_t *pb, *new_pb; 568 xfs_buf_t *bp, *new_bp;
572 int error = 0, i; 569 int error = 0, i;
573 570
574 new_pb = pagebuf_allocate(flags); 571 new_bp = xfs_buf_allocate(flags);
575 if (unlikely(!new_pb)) 572 if (unlikely(!new_bp))
576 return NULL; 573 return NULL;
577 574
578 pb = _pagebuf_find(target, ioff, isize, flags, new_pb); 575 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
579 if (pb == new_pb) { 576 if (bp == new_bp) {
580 error = _pagebuf_lookup_pages(pb, flags); 577 error = _xfs_buf_lookup_pages(bp, flags);
581 if (error) 578 if (error)
582 goto no_buffer; 579 goto no_buffer;
583 } else { 580 } else {
584 pagebuf_deallocate(new_pb); 581 xfs_buf_deallocate(new_bp);
585 if (unlikely(pb == NULL)) 582 if (unlikely(bp == NULL))
586 return NULL; 583 return NULL;
587 } 584 }
588 585
589 for (i = 0; i < pb->pb_page_count; i++) 586 for (i = 0; i < bp->b_page_count; i++)
590 mark_page_accessed(pb->pb_pages[i]); 587 mark_page_accessed(bp->b_pages[i]);
591 588
592 if (!(pb->pb_flags & PBF_MAPPED)) { 589 if (!(bp->b_flags & XBF_MAPPED)) {
593 error = _pagebuf_map_pages(pb, flags); 590 error = _xfs_buf_map_pages(bp, flags);
594 if (unlikely(error)) { 591 if (unlikely(error)) {
595 printk(KERN_WARNING "%s: failed to map pages\n", 592 printk(KERN_WARNING "%s: failed to map pages\n",
596 __FUNCTION__); 593 __FUNCTION__);
@@ -598,97 +595,97 @@ xfs_buf_get_flags( /* allocate a buffer */
598 } 595 }
599 } 596 }
600 597
601 XFS_STATS_INC(pb_get); 598 XFS_STATS_INC(xb_get);
602 599
603 /* 600 /*
604 * Always fill in the block number now, the mapped cases can do 601 * Always fill in the block number now, the mapped cases can do
605 * their own overlay of this later. 602 * their own overlay of this later.
606 */ 603 */
607 pb->pb_bn = ioff; 604 bp->b_bn = ioff;
608 pb->pb_count_desired = pb->pb_buffer_length; 605 bp->b_count_desired = bp->b_buffer_length;
609 606
610 PB_TRACE(pb, "get", (unsigned long)flags); 607 XB_TRACE(bp, "get", (unsigned long)flags);
611 return pb; 608 return bp;
612 609
613 no_buffer: 610 no_buffer:
614 if (flags & (PBF_LOCK | PBF_TRYLOCK)) 611 if (flags & (XBF_LOCK | XBF_TRYLOCK))
615 pagebuf_unlock(pb); 612 xfs_buf_unlock(bp);
616 pagebuf_rele(pb); 613 xfs_buf_rele(bp);
617 return NULL; 614 return NULL;
618} 615}
619 616
620xfs_buf_t * 617xfs_buf_t *
621xfs_buf_read_flags( 618xfs_buf_read_flags(
622 xfs_buftarg_t *target, 619 xfs_buftarg_t *target,
623 loff_t ioff, 620 xfs_off_t ioff,
624 size_t isize, 621 size_t isize,
625 page_buf_flags_t flags) 622 xfs_buf_flags_t flags)
626{ 623{
627 xfs_buf_t *pb; 624 xfs_buf_t *bp;
628 625
629 flags |= PBF_READ; 626 flags |= XBF_READ;
630 627
631 pb = xfs_buf_get_flags(target, ioff, isize, flags); 628 bp = xfs_buf_get_flags(target, ioff, isize, flags);
632 if (pb) { 629 if (bp) {
633 if (!XFS_BUF_ISDONE(pb)) { 630 if (!XFS_BUF_ISDONE(bp)) {
634 PB_TRACE(pb, "read", (unsigned long)flags); 631 XB_TRACE(bp, "read", (unsigned long)flags);
635 XFS_STATS_INC(pb_get_read); 632 XFS_STATS_INC(xb_get_read);
636 pagebuf_iostart(pb, flags); 633 xfs_buf_iostart(bp, flags);
637 } else if (flags & PBF_ASYNC) { 634 } else if (flags & XBF_ASYNC) {
638 PB_TRACE(pb, "read_async", (unsigned long)flags); 635 XB_TRACE(bp, "read_async", (unsigned long)flags);
639 /* 636 /*
640 * Read ahead call which is already satisfied, 637 * Read ahead call which is already satisfied,
641 * drop the buffer 638 * drop the buffer
642 */ 639 */
643 goto no_buffer; 640 goto no_buffer;
644 } else { 641 } else {
645 PB_TRACE(pb, "read_done", (unsigned long)flags); 642 XB_TRACE(bp, "read_done", (unsigned long)flags);
646 /* We do not want read in the flags */ 643 /* We do not want read in the flags */
647 pb->pb_flags &= ~PBF_READ; 644 bp->b_flags &= ~XBF_READ;
648 } 645 }
649 } 646 }
650 647
651 return pb; 648 return bp;
652 649
653 no_buffer: 650 no_buffer:
654 if (flags & (PBF_LOCK | PBF_TRYLOCK)) 651 if (flags & (XBF_LOCK | XBF_TRYLOCK))
655 pagebuf_unlock(pb); 652 xfs_buf_unlock(bp);
656 pagebuf_rele(pb); 653 xfs_buf_rele(bp);
657 return NULL; 654 return NULL;
658} 655}
659 656
660/* 657/*
661 * If we are not low on memory then do the readahead in a deadlock 658 * If we are not low on memory then do the readahead in a deadlock
662 * safe manner. 659 * safe manner.
663 */ 660 */
664void 661void
665pagebuf_readahead( 662xfs_buf_readahead(
666 xfs_buftarg_t *target, 663 xfs_buftarg_t *target,
667 loff_t ioff, 664 xfs_off_t ioff,
668 size_t isize, 665 size_t isize,
669 page_buf_flags_t flags) 666 xfs_buf_flags_t flags)
670{ 667{
671 struct backing_dev_info *bdi; 668 struct backing_dev_info *bdi;
672 669
673 bdi = target->pbr_mapping->backing_dev_info; 670 bdi = target->bt_mapping->backing_dev_info;
674 if (bdi_read_congested(bdi)) 671 if (bdi_read_congested(bdi))
675 return; 672 return;
676 673
677 flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD); 674 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
678 xfs_buf_read_flags(target, ioff, isize, flags); 675 xfs_buf_read_flags(target, ioff, isize, flags);
679} 676}
680 677
681xfs_buf_t * 678xfs_buf_t *
682pagebuf_get_empty( 679xfs_buf_get_empty(
683 size_t len, 680 size_t len,
684 xfs_buftarg_t *target) 681 xfs_buftarg_t *target)
685{ 682{
686 xfs_buf_t *pb; 683 xfs_buf_t *bp;
687 684
688 pb = pagebuf_allocate(0); 685 bp = xfs_buf_allocate(0);
689 if (pb) 686 if (bp)
690 _pagebuf_initialize(pb, target, 0, len, 0); 687 _xfs_buf_initialize(bp, target, 0, len, 0);
691 return pb; 688 return bp;
692} 689}
693 690
694static inline struct page * 691static inline struct page *
@@ -704,8 +701,8 @@ mem_to_page(
704} 701}
705 702
706int 703int
707pagebuf_associate_memory( 704xfs_buf_associate_memory(
708 xfs_buf_t *pb, 705 xfs_buf_t *bp,
709 void *mem, 706 void *mem,
710 size_t len) 707 size_t len)
711{ 708{
@@ -722,40 +719,40 @@ pagebuf_associate_memory(
722 page_count++; 719 page_count++;
723 720
724 /* Free any previous set of page pointers */ 721 /* Free any previous set of page pointers */
725 if (pb->pb_pages) 722 if (bp->b_pages)
726 _pagebuf_free_pages(pb); 723 _xfs_buf_free_pages(bp);
727 724
728 pb->pb_pages = NULL; 725 bp->b_pages = NULL;
729 pb->pb_addr = mem; 726 bp->b_addr = mem;
730 727
731 rval = _pagebuf_get_pages(pb, page_count, 0); 728 rval = _xfs_buf_get_pages(bp, page_count, 0);
732 if (rval) 729 if (rval)
733 return rval; 730 return rval;
734 731
735 pb->pb_offset = offset; 732 bp->b_offset = offset;
736 ptr = (size_t) mem & PAGE_CACHE_MASK; 733 ptr = (size_t) mem & PAGE_CACHE_MASK;
737 end = PAGE_CACHE_ALIGN((size_t) mem + len); 734 end = PAGE_CACHE_ALIGN((size_t) mem + len);
738 end_cur = end; 735 end_cur = end;
739 /* set up first page */ 736 /* set up first page */
740 pb->pb_pages[0] = mem_to_page(mem); 737 bp->b_pages[0] = mem_to_page(mem);
741 738
742 ptr += PAGE_CACHE_SIZE; 739 ptr += PAGE_CACHE_SIZE;
743 pb->pb_page_count = ++i; 740 bp->b_page_count = ++i;
744 while (ptr < end) { 741 while (ptr < end) {
745 pb->pb_pages[i] = mem_to_page((void *)ptr); 742 bp->b_pages[i] = mem_to_page((void *)ptr);
746 pb->pb_page_count = ++i; 743 bp->b_page_count = ++i;
747 ptr += PAGE_CACHE_SIZE; 744 ptr += PAGE_CACHE_SIZE;
748 } 745 }
749 pb->pb_locked = 0; 746 bp->b_locked = 0;
750 747
751 pb->pb_count_desired = pb->pb_buffer_length = len; 748 bp->b_count_desired = bp->b_buffer_length = len;
752 pb->pb_flags |= PBF_MAPPED; 749 bp->b_flags |= XBF_MAPPED;
753 750
754 return 0; 751 return 0;
755} 752}
756 753
757xfs_buf_t * 754xfs_buf_t *
758pagebuf_get_no_daddr( 755xfs_buf_get_noaddr(
759 size_t len, 756 size_t len,
760 xfs_buftarg_t *target) 757 xfs_buftarg_t *target)
761{ 758{
@@ -764,10 +761,10 @@ pagebuf_get_no_daddr(
764 void *data; 761 void *data;
765 int error; 762 int error;
766 763
767 bp = pagebuf_allocate(0); 764 bp = xfs_buf_allocate(0);
768 if (unlikely(bp == NULL)) 765 if (unlikely(bp == NULL))
769 goto fail; 766 goto fail;
770 _pagebuf_initialize(bp, target, 0, len, 0); 767 _xfs_buf_initialize(bp, target, 0, len, 0);
771 768
772 try_again: 769 try_again:
773 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); 770 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
@@ -776,78 +773,73 @@ pagebuf_get_no_daddr(
776 773
777 /* check whether alignment matches.. */ 774 /* check whether alignment matches.. */
778 if ((__psunsigned_t)data != 775 if ((__psunsigned_t)data !=
779 ((__psunsigned_t)data & ~target->pbr_smask)) { 776 ((__psunsigned_t)data & ~target->bt_smask)) {
780 /* .. else double the size and try again */ 777 /* .. else double the size and try again */
781 kmem_free(data, malloc_len); 778 kmem_free(data, malloc_len);
782 malloc_len <<= 1; 779 malloc_len <<= 1;
783 goto try_again; 780 goto try_again;
784 } 781 }
785 782
786 error = pagebuf_associate_memory(bp, data, len); 783 error = xfs_buf_associate_memory(bp, data, len);
787 if (error) 784 if (error)
788 goto fail_free_mem; 785 goto fail_free_mem;
789 bp->pb_flags |= _PBF_KMEM_ALLOC; 786 bp->b_flags |= _XBF_KMEM_ALLOC;
790 787
791 pagebuf_unlock(bp); 788 xfs_buf_unlock(bp);
792 789
793 PB_TRACE(bp, "no_daddr", data); 790 XB_TRACE(bp, "no_daddr", data);
794 return bp; 791 return bp;
795 fail_free_mem: 792 fail_free_mem:
796 kmem_free(data, malloc_len); 793 kmem_free(data, malloc_len);
797 fail_free_buf: 794 fail_free_buf:
798 pagebuf_free(bp); 795 xfs_buf_free(bp);
799 fail: 796 fail:
800 return NULL; 797 return NULL;
801} 798}
802 799
803/* 800/*
804 * pagebuf_hold
805 *
806 * Increment reference count on buffer, to hold the buffer concurrently 801 * Increment reference count on buffer, to hold the buffer concurrently
807 * with another thread which may release (free) the buffer asynchronously. 802 * with another thread which may release (free) the buffer asynchronously.
808 *
809 * Must hold the buffer already to call this function. 803 * Must hold the buffer already to call this function.
810 */ 804 */
811void 805void
812pagebuf_hold( 806xfs_buf_hold(
813 xfs_buf_t *pb) 807 xfs_buf_t *bp)
814{ 808{
815 atomic_inc(&pb->pb_hold); 809 atomic_inc(&bp->b_hold);
816 PB_TRACE(pb, "hold", 0); 810 XB_TRACE(bp, "hold", 0);
817} 811}
818 812
819/* 813/*
820 * pagebuf_rele 814 * Releases a hold on the specified buffer. If the
821 * 815 * the hold count is 1, calls xfs_buf_free.
822 * pagebuf_rele releases a hold on the specified buffer. If the
823 * the hold count is 1, pagebuf_rele calls pagebuf_free.
824 */ 816 */
825void 817void
826pagebuf_rele( 818xfs_buf_rele(
827 xfs_buf_t *pb) 819 xfs_buf_t *bp)
828{ 820{
829 xfs_bufhash_t *hash = pb->pb_hash; 821 xfs_bufhash_t *hash = bp->b_hash;
830 822
831 PB_TRACE(pb, "rele", pb->pb_relse); 823 XB_TRACE(bp, "rele", bp->b_relse);
832 824
833 if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) { 825 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
834 if (pb->pb_relse) { 826 if (bp->b_relse) {
835 atomic_inc(&pb->pb_hold); 827 atomic_inc(&bp->b_hold);
836 spin_unlock(&hash->bh_lock); 828 spin_unlock(&hash->bh_lock);
837 (*(pb->pb_relse)) (pb); 829 (*(bp->b_relse)) (bp);
838 } else if (pb->pb_flags & PBF_FS_MANAGED) { 830 } else if (bp->b_flags & XBF_FS_MANAGED) {
839 spin_unlock(&hash->bh_lock); 831 spin_unlock(&hash->bh_lock);
840 } else { 832 } else {
841 ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q))); 833 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
842 list_del_init(&pb->pb_hash_list); 834 list_del_init(&bp->b_hash_list);
843 spin_unlock(&hash->bh_lock); 835 spin_unlock(&hash->bh_lock);
844 pagebuf_free(pb); 836 xfs_buf_free(bp);
845 } 837 }
846 } else { 838 } else {
847 /* 839 /*
848 * Catch reference count leaks 840 * Catch reference count leaks
849 */ 841 */
850 ASSERT(atomic_read(&pb->pb_hold) >= 0); 842 ASSERT(atomic_read(&bp->b_hold) >= 0);
851 } 843 }
852} 844}
853 845
@@ -863,168 +855,122 @@ pagebuf_rele(
863 */ 855 */
864 856
865/* 857/*
866 * pagebuf_cond_lock 858 * Locks a buffer object, if it is not already locked.
867 * 859 * Note that this in no way locks the underlying pages, so it is only
868 * pagebuf_cond_lock locks a buffer object, if it is not already locked. 860 * useful for synchronizing concurrent use of buffer objects, not for
869 * Note that this in no way 861 * synchronizing independent access to the underlying pages.
870 * locks the underlying pages, so it is only useful for synchronizing
871 * concurrent use of page buffer objects, not for synchronizing independent
872 * access to the underlying pages.
873 */ 862 */
874int 863int
875pagebuf_cond_lock( /* lock buffer, if not locked */ 864xfs_buf_cond_lock(
876 /* returns -EBUSY if locked) */ 865 xfs_buf_t *bp)
877 xfs_buf_t *pb)
878{ 866{
879 int locked; 867 int locked;
880 868
881 locked = down_trylock(&pb->pb_sema) == 0; 869 locked = down_trylock(&bp->b_sema) == 0;
882 if (locked) { 870 if (locked) {
883 PB_SET_OWNER(pb); 871 XB_SET_OWNER(bp);
884 } 872 }
885 PB_TRACE(pb, "cond_lock", (long)locked); 873 XB_TRACE(bp, "cond_lock", (long)locked);
886 return(locked ? 0 : -EBUSY); 874 return locked ? 0 : -EBUSY;
887} 875}
888 876
889#if defined(DEBUG) || defined(XFS_BLI_TRACE) 877#if defined(DEBUG) || defined(XFS_BLI_TRACE)
890/*
891 * pagebuf_lock_value
892 *
893 * Return lock value for a pagebuf
894 */
895int 878int
896pagebuf_lock_value( 879xfs_buf_lock_value(
897 xfs_buf_t *pb) 880 xfs_buf_t *bp)
898{ 881{
899 return(atomic_read(&pb->pb_sema.count)); 882 return atomic_read(&bp->b_sema.count);
900} 883}
901#endif 884#endif
902 885
903/* 886/*
904 * pagebuf_lock 887 * Locks a buffer object.
905 * 888 * Note that this in no way locks the underlying pages, so it is only
906 * pagebuf_lock locks a buffer object. Note that this in no way 889 * useful for synchronizing concurrent use of buffer objects, not for
907 * locks the underlying pages, so it is only useful for synchronizing 890 * synchronizing independent access to the underlying pages.
908 * concurrent use of page buffer objects, not for synchronizing independent
909 * access to the underlying pages.
910 */ 891 */
911int 892void
912pagebuf_lock( 893xfs_buf_lock(
913 xfs_buf_t *pb) 894 xfs_buf_t *bp)
914{ 895{
915 PB_TRACE(pb, "lock", 0); 896 XB_TRACE(bp, "lock", 0);
916 if (atomic_read(&pb->pb_io_remaining)) 897 if (atomic_read(&bp->b_io_remaining))
917 blk_run_address_space(pb->pb_target->pbr_mapping); 898 blk_run_address_space(bp->b_target->bt_mapping);
918 down(&pb->pb_sema); 899 down(&bp->b_sema);
919 PB_SET_OWNER(pb); 900 XB_SET_OWNER(bp);
920 PB_TRACE(pb, "locked", 0); 901 XB_TRACE(bp, "locked", 0);
921 return 0;
922} 902}
923 903
924/* 904/*
925 * pagebuf_unlock 905 * Releases the lock on the buffer object.
926 *
927 * pagebuf_unlock releases the lock on the buffer object created by
928 * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
929 * created by pagebuf_pin).
930 *
931 * If the buffer is marked delwri but is not queued, do so before we 906 * If the buffer is marked delwri but is not queued, do so before we
932 * unlock the buffer as we need to set flags correctly. We also need to 907 * unlock the buffer as we need to set flags correctly. We also need to
933 * take a reference for the delwri queue because the unlocker is going to 908 * take a reference for the delwri queue because the unlocker is going to
934 * drop their's and they don't know we just queued it. 909 * drop their's and they don't know we just queued it.
935 */ 910 */
936void 911void
937pagebuf_unlock( /* unlock buffer */ 912xfs_buf_unlock(
938 xfs_buf_t *pb) /* buffer to unlock */ 913 xfs_buf_t *bp)
939{ 914{
940 if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) { 915 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
941 atomic_inc(&pb->pb_hold); 916 atomic_inc(&bp->b_hold);
942 pb->pb_flags |= PBF_ASYNC; 917 bp->b_flags |= XBF_ASYNC;
943 pagebuf_delwri_queue(pb, 0); 918 xfs_buf_delwri_queue(bp, 0);
944 } 919 }
945 920
946 PB_CLEAR_OWNER(pb); 921 XB_CLEAR_OWNER(bp);
947 up(&pb->pb_sema); 922 up(&bp->b_sema);
948 PB_TRACE(pb, "unlock", 0); 923 XB_TRACE(bp, "unlock", 0);
949} 924}
950 925
951 926
952/* 927/*
953 * Pinning Buffer Storage in Memory 928 * Pinning Buffer Storage in Memory
954 */ 929 * Ensure that no attempt to force a buffer to disk will succeed.
955
956/*
957 * pagebuf_pin
958 *
959 * pagebuf_pin locks all of the memory represented by a buffer in
960 * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
961 * the same or different buffers affecting a given page, will
962 * properly count the number of outstanding "pin" requests. The
963 * buffer may be released after the pagebuf_pin and a different
964 * buffer used when calling pagebuf_unpin, if desired.
965 * pagebuf_pin should be used by the file system when it wants be
966 * assured that no attempt will be made to force the affected
967 * memory to disk. It does not assure that a given logical page
968 * will not be moved to a different physical page.
969 */ 930 */
970void 931void
971pagebuf_pin( 932xfs_buf_pin(
972 xfs_buf_t *pb) 933 xfs_buf_t *bp)
973{ 934{
974 atomic_inc(&pb->pb_pin_count); 935 atomic_inc(&bp->b_pin_count);
975 PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter); 936 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
976} 937}
977 938
978/*
979 * pagebuf_unpin
980 *
981 * pagebuf_unpin reverses the locking of memory performed by
982 * pagebuf_pin. Note that both functions affected the logical
983 * pages associated with the buffer, not the buffer itself.
984 */
985void 939void
986pagebuf_unpin( 940xfs_buf_unpin(
987 xfs_buf_t *pb) 941 xfs_buf_t *bp)
988{ 942{
989 if (atomic_dec_and_test(&pb->pb_pin_count)) { 943 if (atomic_dec_and_test(&bp->b_pin_count))
990 wake_up_all(&pb->pb_waiters); 944 wake_up_all(&bp->b_waiters);
991 } 945 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
992 PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
993} 946}
994 947
995int 948int
996pagebuf_ispin( 949xfs_buf_ispin(
997 xfs_buf_t *pb) 950 xfs_buf_t *bp)
998{ 951{
999 return atomic_read(&pb->pb_pin_count); 952 return atomic_read(&bp->b_pin_count);
1000} 953}
1001 954
1002/* 955STATIC void
1003 * pagebuf_wait_unpin 956xfs_buf_wait_unpin(
1004 * 957 xfs_buf_t *bp)
1005 * pagebuf_wait_unpin waits until all of the memory associated
1006 * with the buffer is not longer locked in memory. It returns
1007 * immediately if none of the affected pages are locked.
1008 */
1009static inline void
1010_pagebuf_wait_unpin(
1011 xfs_buf_t *pb)
1012{ 958{
1013 DECLARE_WAITQUEUE (wait, current); 959 DECLARE_WAITQUEUE (wait, current);
1014 960
1015 if (atomic_read(&pb->pb_pin_count) == 0) 961 if (atomic_read(&bp->b_pin_count) == 0)
1016 return; 962 return;
1017 963
1018 add_wait_queue(&pb->pb_waiters, &wait); 964 add_wait_queue(&bp->b_waiters, &wait);
1019 for (;;) { 965 for (;;) {
1020 set_current_state(TASK_UNINTERRUPTIBLE); 966 set_current_state(TASK_UNINTERRUPTIBLE);
1021 if (atomic_read(&pb->pb_pin_count) == 0) 967 if (atomic_read(&bp->b_pin_count) == 0)
1022 break; 968 break;
1023 if (atomic_read(&pb->pb_io_remaining)) 969 if (atomic_read(&bp->b_io_remaining))
1024 blk_run_address_space(pb->pb_target->pbr_mapping); 970 blk_run_address_space(bp->b_target->bt_mapping);
1025 schedule(); 971 schedule();
1026 } 972 }
1027 remove_wait_queue(&pb->pb_waiters, &wait); 973 remove_wait_queue(&bp->b_waiters, &wait);
1028 set_current_state(TASK_RUNNING); 974 set_current_state(TASK_RUNNING);
1029} 975}
1030 976
@@ -1032,241 +978,216 @@ _pagebuf_wait_unpin(
1032 * Buffer Utility Routines 978 * Buffer Utility Routines
1033 */ 979 */
1034 980
1035/*
1036 * pagebuf_iodone
1037 *
1038 * pagebuf_iodone marks a buffer for which I/O is in progress
1039 * done with respect to that I/O. The pb_iodone routine, if
1040 * present, will be called as a side-effect.
1041 */
1042STATIC void 981STATIC void
1043pagebuf_iodone_work( 982xfs_buf_iodone_work(
1044 void *v) 983 void *v)
1045{ 984{
1046 xfs_buf_t *bp = (xfs_buf_t *)v; 985 xfs_buf_t *bp = (xfs_buf_t *)v;
1047 986
1048 if (bp->pb_iodone) 987 if (bp->b_iodone)
1049 (*(bp->pb_iodone))(bp); 988 (*(bp->b_iodone))(bp);
1050 else if (bp->pb_flags & PBF_ASYNC) 989 else if (bp->b_flags & XBF_ASYNC)
1051 xfs_buf_relse(bp); 990 xfs_buf_relse(bp);
1052} 991}
1053 992
1054void 993void
1055pagebuf_iodone( 994xfs_buf_ioend(
1056 xfs_buf_t *pb, 995 xfs_buf_t *bp,
1057 int schedule) 996 int schedule)
1058{ 997{
1059 pb->pb_flags &= ~(PBF_READ | PBF_WRITE); 998 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1060 if (pb->pb_error == 0) 999 if (bp->b_error == 0)
1061 pb->pb_flags |= PBF_DONE; 1000 bp->b_flags |= XBF_DONE;
1062 1001
1063 PB_TRACE(pb, "iodone", pb->pb_iodone); 1002 XB_TRACE(bp, "iodone", bp->b_iodone);
1064 1003
1065 if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) { 1004 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1066 if (schedule) { 1005 if (schedule) {
1067 INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb); 1006 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1068 queue_work(xfslogd_workqueue, &pb->pb_iodone_work); 1007 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1069 } else { 1008 } else {
1070 pagebuf_iodone_work(pb); 1009 xfs_buf_iodone_work(bp);
1071 } 1010 }
1072 } else { 1011 } else {
1073 up(&pb->pb_iodonesema); 1012 up(&bp->b_iodonesema);
1074 } 1013 }
1075} 1014}
1076 1015
1077/*
1078 * pagebuf_ioerror
1079 *
1080 * pagebuf_ioerror sets the error code for a buffer.
1081 */
1082void 1016void
1083pagebuf_ioerror( /* mark/clear buffer error flag */ 1017xfs_buf_ioerror(
1084 xfs_buf_t *pb, /* buffer to mark */ 1018 xfs_buf_t *bp,
1085 int error) /* error to store (0 if none) */ 1019 int error)
1086{ 1020{
1087 ASSERT(error >= 0 && error <= 0xffff); 1021 ASSERT(error >= 0 && error <= 0xffff);
1088 pb->pb_error = (unsigned short)error; 1022 bp->b_error = (unsigned short)error;
1089 PB_TRACE(pb, "ioerror", (unsigned long)error); 1023 XB_TRACE(bp, "ioerror", (unsigned long)error);
1090} 1024}
1091 1025
1092/* 1026/*
1093 * pagebuf_iostart 1027 * Initiate I/O on a buffer, based on the flags supplied.
1094 * 1028 * The b_iodone routine in the buffer supplied will only be called
1095 * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1096 * If necessary, it will arrange for any disk space allocation required,
1097 * and it will break up the request if the block mappings require it.
1098 * The pb_iodone routine in the buffer supplied will only be called
1099 * when all of the subsidiary I/O requests, if any, have been completed. 1029 * when all of the subsidiary I/O requests, if any, have been completed.
1100 * pagebuf_iostart calls the pagebuf_ioinitiate routine or
1101 * pagebuf_iorequest, if the former routine is not defined, to start
1102 * the I/O on a given low-level request.
1103 */ 1030 */
1104int 1031int
1105pagebuf_iostart( /* start I/O on a buffer */ 1032xfs_buf_iostart(
1106 xfs_buf_t *pb, /* buffer to start */ 1033 xfs_buf_t *bp,
1107 page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */ 1034 xfs_buf_flags_t flags)
1108 /* PBF_WRITE, PBF_DELWRI, */
1109 /* PBF_DONT_BLOCK */
1110{ 1035{
1111 int status = 0; 1036 int status = 0;
1112 1037
1113 PB_TRACE(pb, "iostart", (unsigned long)flags); 1038 XB_TRACE(bp, "iostart", (unsigned long)flags);
1114 1039
1115 if (flags & PBF_DELWRI) { 1040 if (flags & XBF_DELWRI) {
1116 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC); 1041 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1117 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC); 1042 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1118 pagebuf_delwri_queue(pb, 1); 1043 xfs_buf_delwri_queue(bp, 1);
1119 return status; 1044 return status;
1120 } 1045 }
1121 1046
1122 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \ 1047 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1123 PBF_READ_AHEAD | _PBF_RUN_QUEUES); 1048 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1124 pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \ 1049 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1125 PBF_READ_AHEAD | _PBF_RUN_QUEUES); 1050 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1126 1051
1127 BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL); 1052 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1128 1053
1129 /* For writes allow an alternate strategy routine to precede 1054 /* For writes allow an alternate strategy routine to precede
1130 * the actual I/O request (which may not be issued at all in 1055 * the actual I/O request (which may not be issued at all in
1131 * a shutdown situation, for example). 1056 * a shutdown situation, for example).
1132 */ 1057 */
1133 status = (flags & PBF_WRITE) ? 1058 status = (flags & XBF_WRITE) ?
1134 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb); 1059 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1135 1060
1136 /* Wait for I/O if we are not an async request. 1061 /* Wait for I/O if we are not an async request.
1137 * Note: async I/O request completion will release the buffer, 1062 * Note: async I/O request completion will release the buffer,
1138 * and that can already be done by this point. So using the 1063 * and that can already be done by this point. So using the
1139 * buffer pointer from here on, after async I/O, is invalid. 1064 * buffer pointer from here on, after async I/O, is invalid.
1140 */ 1065 */
1141 if (!status && !(flags & PBF_ASYNC)) 1066 if (!status && !(flags & XBF_ASYNC))
1142 status = pagebuf_iowait(pb); 1067 status = xfs_buf_iowait(bp);
1143 1068
1144 return status; 1069 return status;
1145} 1070}
1146 1071
1147/*
1148 * Helper routine for pagebuf_iorequest
1149 */
1150
1151STATIC __inline__ int 1072STATIC __inline__ int
1152_pagebuf_iolocked( 1073_xfs_buf_iolocked(
1153 xfs_buf_t *pb) 1074 xfs_buf_t *bp)
1154{ 1075{
1155 ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); 1076 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1156 if (pb->pb_flags & PBF_READ) 1077 if (bp->b_flags & XBF_READ)
1157 return pb->pb_locked; 1078 return bp->b_locked;
1158 return 0; 1079 return 0;
1159} 1080}
1160 1081
1161STATIC __inline__ void 1082STATIC __inline__ void
1162_pagebuf_iodone( 1083_xfs_buf_ioend(
1163 xfs_buf_t *pb, 1084 xfs_buf_t *bp,
1164 int schedule) 1085 int schedule)
1165{ 1086{
1166 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { 1087 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1167 pb->pb_locked = 0; 1088 bp->b_locked = 0;
1168 pagebuf_iodone(pb, schedule); 1089 xfs_buf_ioend(bp, schedule);
1169 } 1090 }
1170} 1091}
1171 1092
1172STATIC int 1093STATIC int
1173bio_end_io_pagebuf( 1094xfs_buf_bio_end_io(
1174 struct bio *bio, 1095 struct bio *bio,
1175 unsigned int bytes_done, 1096 unsigned int bytes_done,
1176 int error) 1097 int error)
1177{ 1098{
1178 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private; 1099 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1179 unsigned int blocksize = pb->pb_target->pbr_bsize; 1100 unsigned int blocksize = bp->b_target->bt_bsize;
1180 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1101 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1181 1102
1182 if (bio->bi_size) 1103 if (bio->bi_size)
1183 return 1; 1104 return 1;
1184 1105
1185 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1106 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1186 pb->pb_error = EIO; 1107 bp->b_error = EIO;
1187 1108
1188 do { 1109 do {
1189 struct page *page = bvec->bv_page; 1110 struct page *page = bvec->bv_page;
1190 1111
1191 if (unlikely(pb->pb_error)) { 1112 if (unlikely(bp->b_error)) {
1192 if (pb->pb_flags & PBF_READ) 1113 if (bp->b_flags & XBF_READ)
1193 ClearPageUptodate(page); 1114 ClearPageUptodate(page);
1194 SetPageError(page); 1115 SetPageError(page);
1195 } else if (blocksize == PAGE_CACHE_SIZE) { 1116 } else if (blocksize >= PAGE_CACHE_SIZE) {
1196 SetPageUptodate(page); 1117 SetPageUptodate(page);
1197 } else if (!PagePrivate(page) && 1118 } else if (!PagePrivate(page) &&
1198 (pb->pb_flags & _PBF_PAGE_CACHE)) { 1119 (bp->b_flags & _XBF_PAGE_CACHE)) {
1199 set_page_region(page, bvec->bv_offset, bvec->bv_len); 1120 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1200 } 1121 }
1201 1122
1202 if (--bvec >= bio->bi_io_vec) 1123 if (--bvec >= bio->bi_io_vec)
1203 prefetchw(&bvec->bv_page->flags); 1124 prefetchw(&bvec->bv_page->flags);
1204 1125
1205 if (_pagebuf_iolocked(pb)) { 1126 if (_xfs_buf_iolocked(bp)) {
1206 unlock_page(page); 1127 unlock_page(page);
1207 } 1128 }
1208 } while (bvec >= bio->bi_io_vec); 1129 } while (bvec >= bio->bi_io_vec);
1209 1130
1210 _pagebuf_iodone(pb, 1); 1131 _xfs_buf_ioend(bp, 1);
1211 bio_put(bio); 1132 bio_put(bio);
1212 return 0; 1133 return 0;
1213} 1134}
1214 1135
1215STATIC void 1136STATIC void
1216_pagebuf_ioapply( 1137_xfs_buf_ioapply(
1217 xfs_buf_t *pb) 1138 xfs_buf_t *bp)
1218{ 1139{
1219 int i, rw, map_i, total_nr_pages, nr_pages; 1140 int i, rw, map_i, total_nr_pages, nr_pages;
1220 struct bio *bio; 1141 struct bio *bio;
1221 int offset = pb->pb_offset; 1142 int offset = bp->b_offset;
1222 int size = pb->pb_count_desired; 1143 int size = bp->b_count_desired;
1223 sector_t sector = pb->pb_bn; 1144 sector_t sector = bp->b_bn;
1224 unsigned int blocksize = pb->pb_target->pbr_bsize; 1145 unsigned int blocksize = bp->b_target->bt_bsize;
1225 int locking = _pagebuf_iolocked(pb); 1146 int locking = _xfs_buf_iolocked(bp);
1226 1147
1227 total_nr_pages = pb->pb_page_count; 1148 total_nr_pages = bp->b_page_count;
1228 map_i = 0; 1149 map_i = 0;
1229 1150
1230 if (pb->pb_flags & _PBF_RUN_QUEUES) { 1151 if (bp->b_flags & _XBF_RUN_QUEUES) {
1231 pb->pb_flags &= ~_PBF_RUN_QUEUES; 1152 bp->b_flags &= ~_XBF_RUN_QUEUES;
1232 rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC; 1153 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1233 } else { 1154 } else {
1234 rw = (pb->pb_flags & PBF_READ) ? READ : WRITE; 1155 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1235 } 1156 }
1236 1157
1237 if (pb->pb_flags & PBF_ORDERED) { 1158 if (bp->b_flags & XBF_ORDERED) {
1238 ASSERT(!(pb->pb_flags & PBF_READ)); 1159 ASSERT(!(bp->b_flags & XBF_READ));
1239 rw = WRITE_BARRIER; 1160 rw = WRITE_BARRIER;
1240 } 1161 }
1241 1162
1242 /* Special code path for reading a sub page size pagebuf in -- 1163 /* Special code path for reading a sub page size buffer in --
1243 * we populate up the whole page, and hence the other metadata 1164 * we populate up the whole page, and hence the other metadata
1244 * in the same page. This optimization is only valid when the 1165 * in the same page. This optimization is only valid when the
1245 * filesystem block size and the page size are equal. 1166 * filesystem block size is not smaller than the page size.
1246 */ 1167 */
1247 if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) && 1168 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1248 (pb->pb_flags & PBF_READ) && locking && 1169 (bp->b_flags & XBF_READ) && locking &&
1249 (blocksize == PAGE_CACHE_SIZE)) { 1170 (blocksize >= PAGE_CACHE_SIZE)) {
1250 bio = bio_alloc(GFP_NOIO, 1); 1171 bio = bio_alloc(GFP_NOIO, 1);
1251 1172
1252 bio->bi_bdev = pb->pb_target->pbr_bdev; 1173 bio->bi_bdev = bp->b_target->bt_bdev;
1253 bio->bi_sector = sector - (offset >> BBSHIFT); 1174 bio->bi_sector = sector - (offset >> BBSHIFT);
1254 bio->bi_end_io = bio_end_io_pagebuf; 1175 bio->bi_end_io = xfs_buf_bio_end_io;
1255 bio->bi_private = pb; 1176 bio->bi_private = bp;
1256 1177
1257 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0); 1178 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1258 size = 0; 1179 size = 0;
1259 1180
1260 atomic_inc(&pb->pb_io_remaining); 1181 atomic_inc(&bp->b_io_remaining);
1261 1182
1262 goto submit_io; 1183 goto submit_io;
1263 } 1184 }
1264 1185
1265 /* Lock down the pages which we need to for the request */ 1186 /* Lock down the pages which we need to for the request */
1266 if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) { 1187 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1267 for (i = 0; size; i++) { 1188 for (i = 0; size; i++) {
1268 int nbytes = PAGE_CACHE_SIZE - offset; 1189 int nbytes = PAGE_CACHE_SIZE - offset;
1269 struct page *page = pb->pb_pages[i]; 1190 struct page *page = bp->b_pages[i];
1270 1191
1271 if (nbytes > size) 1192 if (nbytes > size)
1272 nbytes = size; 1193 nbytes = size;
@@ -1276,30 +1197,30 @@ _pagebuf_ioapply(
1276 size -= nbytes; 1197 size -= nbytes;
1277 offset = 0; 1198 offset = 0;
1278 } 1199 }
1279 offset = pb->pb_offset; 1200 offset = bp->b_offset;
1280 size = pb->pb_count_desired; 1201 size = bp->b_count_desired;
1281 } 1202 }
1282 1203
1283next_chunk: 1204next_chunk:
1284 atomic_inc(&pb->pb_io_remaining); 1205 atomic_inc(&bp->b_io_remaining);
1285 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1206 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1286 if (nr_pages > total_nr_pages) 1207 if (nr_pages > total_nr_pages)
1287 nr_pages = total_nr_pages; 1208 nr_pages = total_nr_pages;
1288 1209
1289 bio = bio_alloc(GFP_NOIO, nr_pages); 1210 bio = bio_alloc(GFP_NOIO, nr_pages);
1290 bio->bi_bdev = pb->pb_target->pbr_bdev; 1211 bio->bi_bdev = bp->b_target->bt_bdev;
1291 bio->bi_sector = sector; 1212 bio->bi_sector = sector;
1292 bio->bi_end_io = bio_end_io_pagebuf; 1213 bio->bi_end_io = xfs_buf_bio_end_io;
1293 bio->bi_private = pb; 1214 bio->bi_private = bp;
1294 1215
1295 for (; size && nr_pages; nr_pages--, map_i++) { 1216 for (; size && nr_pages; nr_pages--, map_i++) {
1296 int nbytes = PAGE_CACHE_SIZE - offset; 1217 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1297 1218
1298 if (nbytes > size) 1219 if (nbytes > size)
1299 nbytes = size; 1220 nbytes = size;
1300 1221
1301 if (bio_add_page(bio, pb->pb_pages[map_i], 1222 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1302 nbytes, offset) < nbytes) 1223 if (rbytes < nbytes)
1303 break; 1224 break;
1304 1225
1305 offset = 0; 1226 offset = 0;
@@ -1315,107 +1236,102 @@ submit_io:
1315 goto next_chunk; 1236 goto next_chunk;
1316 } else { 1237 } else {
1317 bio_put(bio); 1238 bio_put(bio);
1318 pagebuf_ioerror(pb, EIO); 1239 xfs_buf_ioerror(bp, EIO);
1319 } 1240 }
1320} 1241}
1321 1242
1322/*
1323 * pagebuf_iorequest -- the core I/O request routine.
1324 */
1325int 1243int
1326pagebuf_iorequest( /* start real I/O */ 1244xfs_buf_iorequest(
1327 xfs_buf_t *pb) /* buffer to convey to device */ 1245 xfs_buf_t *bp)
1328{ 1246{
1329 PB_TRACE(pb, "iorequest", 0); 1247 XB_TRACE(bp, "iorequest", 0);
1330 1248
1331 if (pb->pb_flags & PBF_DELWRI) { 1249 if (bp->b_flags & XBF_DELWRI) {
1332 pagebuf_delwri_queue(pb, 1); 1250 xfs_buf_delwri_queue(bp, 1);
1333 return 0; 1251 return 0;
1334 } 1252 }
1335 1253
1336 if (pb->pb_flags & PBF_WRITE) { 1254 if (bp->b_flags & XBF_WRITE) {
1337 _pagebuf_wait_unpin(pb); 1255 xfs_buf_wait_unpin(bp);
1338 } 1256 }
1339 1257
1340 pagebuf_hold(pb); 1258 xfs_buf_hold(bp);
1341 1259
1342 /* Set the count to 1 initially, this will stop an I/O 1260 /* Set the count to 1 initially, this will stop an I/O
1343 * completion callout which happens before we have started 1261 * completion callout which happens before we have started
1344 * all the I/O from calling pagebuf_iodone too early. 1262 * all the I/O from calling xfs_buf_ioend too early.
1345 */ 1263 */
1346 atomic_set(&pb->pb_io_remaining, 1); 1264 atomic_set(&bp->b_io_remaining, 1);
1347 _pagebuf_ioapply(pb); 1265 _xfs_buf_ioapply(bp);
1348 _pagebuf_iodone(pb, 0); 1266 _xfs_buf_ioend(bp, 0);
1349 1267
1350 pagebuf_rele(pb); 1268 xfs_buf_rele(bp);
1351 return 0; 1269 return 0;
1352} 1270}
1353 1271
1354/* 1272/*
1355 * pagebuf_iowait 1273 * Waits for I/O to complete on the buffer supplied.
1356 * 1274 * It returns immediately if no I/O is pending.
1357 * pagebuf_iowait waits for I/O to complete on the buffer supplied. 1275 * It returns the I/O error code, if any, or 0 if there was no error.
1358 * It returns immediately if no I/O is pending. In any case, it returns
1359 * the error code, if any, or 0 if there is no error.
1360 */ 1276 */
1361int 1277int
1362pagebuf_iowait( 1278xfs_buf_iowait(
1363 xfs_buf_t *pb) 1279 xfs_buf_t *bp)
1364{ 1280{
1365 PB_TRACE(pb, "iowait", 0); 1281 XB_TRACE(bp, "iowait", 0);
1366 if (atomic_read(&pb->pb_io_remaining)) 1282 if (atomic_read(&bp->b_io_remaining))
1367 blk_run_address_space(pb->pb_target->pbr_mapping); 1283 blk_run_address_space(bp->b_target->bt_mapping);
1368 down(&pb->pb_iodonesema); 1284 down(&bp->b_iodonesema);
1369 PB_TRACE(pb, "iowaited", (long)pb->pb_error); 1285 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1370 return pb->pb_error; 1286 return bp->b_error;
1371} 1287}
1372 1288
1373caddr_t 1289xfs_caddr_t
1374pagebuf_offset( 1290xfs_buf_offset(
1375 xfs_buf_t *pb, 1291 xfs_buf_t *bp,
1376 size_t offset) 1292 size_t offset)
1377{ 1293{
1378 struct page *page; 1294 struct page *page;
1379 1295
1380 offset += pb->pb_offset; 1296 if (bp->b_flags & XBF_MAPPED)
1297 return XFS_BUF_PTR(bp) + offset;
1381 1298
1382 page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT]; 1299 offset += bp->b_offset;
1383 return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1)); 1300 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1301 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1384} 1302}
1385 1303
1386/* 1304/*
1387 * pagebuf_iomove
1388 *
1389 * Move data into or out of a buffer. 1305 * Move data into or out of a buffer.
1390 */ 1306 */
1391void 1307void
1392pagebuf_iomove( 1308xfs_buf_iomove(
1393 xfs_buf_t *pb, /* buffer to process */ 1309 xfs_buf_t *bp, /* buffer to process */
1394 size_t boff, /* starting buffer offset */ 1310 size_t boff, /* starting buffer offset */
1395 size_t bsize, /* length to copy */ 1311 size_t bsize, /* length to copy */
1396 caddr_t data, /* data address */ 1312 caddr_t data, /* data address */
1397 page_buf_rw_t mode) /* read/write flag */ 1313 xfs_buf_rw_t mode) /* read/write/zero flag */
1398{ 1314{
1399 size_t bend, cpoff, csize; 1315 size_t bend, cpoff, csize;
1400 struct page *page; 1316 struct page *page;
1401 1317
1402 bend = boff + bsize; 1318 bend = boff + bsize;
1403 while (boff < bend) { 1319 while (boff < bend) {
1404 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)]; 1320 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1405 cpoff = page_buf_poff(boff + pb->pb_offset); 1321 cpoff = xfs_buf_poff(boff + bp->b_offset);
1406 csize = min_t(size_t, 1322 csize = min_t(size_t,
1407 PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff); 1323 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1408 1324
1409 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); 1325 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1410 1326
1411 switch (mode) { 1327 switch (mode) {
1412 case PBRW_ZERO: 1328 case XBRW_ZERO:
1413 memset(page_address(page) + cpoff, 0, csize); 1329 memset(page_address(page) + cpoff, 0, csize);
1414 break; 1330 break;
1415 case PBRW_READ: 1331 case XBRW_READ:
1416 memcpy(data, page_address(page) + cpoff, csize); 1332 memcpy(data, page_address(page) + cpoff, csize);
1417 break; 1333 break;
1418 case PBRW_WRITE: 1334 case XBRW_WRITE:
1419 memcpy(page_address(page) + cpoff, data, csize); 1335 memcpy(page_address(page) + cpoff, data, csize);
1420 } 1336 }
1421 1337
@@ -1425,12 +1341,12 @@ pagebuf_iomove(
1425} 1341}
1426 1342
1427/* 1343/*
1428 * Handling of buftargs. 1344 * Handling of buffer targets (buftargs).
1429 */ 1345 */
1430 1346
1431/* 1347/*
1432 * Wait for any bufs with callbacks that have been submitted but 1348 * Wait for any bufs with callbacks that have been submitted but
1433 * have not yet returned... walk the hash list for the target. 1349 * have not yet returned... walk the hash list for the target.
1434 */ 1350 */
1435void 1351void
1436xfs_wait_buftarg( 1352xfs_wait_buftarg(
@@ -1444,15 +1360,15 @@ xfs_wait_buftarg(
1444 hash = &btp->bt_hash[i]; 1360 hash = &btp->bt_hash[i];
1445again: 1361again:
1446 spin_lock(&hash->bh_lock); 1362 spin_lock(&hash->bh_lock);
1447 list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) { 1363 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1448 ASSERT(btp == bp->pb_target); 1364 ASSERT(btp == bp->b_target);
1449 if (!(bp->pb_flags & PBF_FS_MANAGED)) { 1365 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1450 spin_unlock(&hash->bh_lock); 1366 spin_unlock(&hash->bh_lock);
1451 /* 1367 /*
1452 * Catch superblock reference count leaks 1368 * Catch superblock reference count leaks
1453 * immediately 1369 * immediately
1454 */ 1370 */
1455 BUG_ON(bp->pb_bn == 0); 1371 BUG_ON(bp->b_bn == 0);
1456 delay(100); 1372 delay(100);
1457 goto again; 1373 goto again;
1458 } 1374 }
@@ -1462,9 +1378,9 @@ again:
1462} 1378}
1463 1379
1464/* 1380/*
1465 * Allocate buffer hash table for a given target. 1381 * Allocate buffer hash table for a given target.
1466 * For devices containing metadata (i.e. not the log/realtime devices) 1382 * For devices containing metadata (i.e. not the log/realtime devices)
1467 * we need to allocate a much larger hash table. 1383 * we need to allocate a much larger hash table.
1468 */ 1384 */
1469STATIC void 1385STATIC void
1470xfs_alloc_bufhash( 1386xfs_alloc_bufhash(
@@ -1487,11 +1403,34 @@ STATIC void
1487xfs_free_bufhash( 1403xfs_free_bufhash(
1488 xfs_buftarg_t *btp) 1404 xfs_buftarg_t *btp)
1489{ 1405{
1490 kmem_free(btp->bt_hash, 1406 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1491 (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1492 btp->bt_hash = NULL; 1407 btp->bt_hash = NULL;
1493} 1408}
1494 1409
1410/*
1411 * buftarg list for delwrite queue processing
1412 */
1413STATIC LIST_HEAD(xfs_buftarg_list);
1414STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1415
1416STATIC void
1417xfs_register_buftarg(
1418 xfs_buftarg_t *btp)
1419{
1420 spin_lock(&xfs_buftarg_lock);
1421 list_add(&btp->bt_list, &xfs_buftarg_list);
1422 spin_unlock(&xfs_buftarg_lock);
1423}
1424
1425STATIC void
1426xfs_unregister_buftarg(
1427 xfs_buftarg_t *btp)
1428{
1429 spin_lock(&xfs_buftarg_lock);
1430 list_del(&btp->bt_list);
1431 spin_unlock(&xfs_buftarg_lock);
1432}
1433
1495void 1434void
1496xfs_free_buftarg( 1435xfs_free_buftarg(
1497 xfs_buftarg_t *btp, 1436 xfs_buftarg_t *btp,
@@ -1499,9 +1438,16 @@ xfs_free_buftarg(
1499{ 1438{
1500 xfs_flush_buftarg(btp, 1); 1439 xfs_flush_buftarg(btp, 1);
1501 if (external) 1440 if (external)
1502 xfs_blkdev_put(btp->pbr_bdev); 1441 xfs_blkdev_put(btp->bt_bdev);
1503 xfs_free_bufhash(btp); 1442 xfs_free_bufhash(btp);
1504 iput(btp->pbr_mapping->host); 1443 iput(btp->bt_mapping->host);
1444
1445 /* Unregister the buftarg first so that we don't get a
1446 * wakeup finding a non-existent task
1447 */
1448 xfs_unregister_buftarg(btp);
1449 kthread_stop(btp->bt_task);
1450
1505 kmem_free(btp, sizeof(*btp)); 1451 kmem_free(btp, sizeof(*btp));
1506} 1452}
1507 1453
@@ -1512,11 +1458,11 @@ xfs_setsize_buftarg_flags(
1512 unsigned int sectorsize, 1458 unsigned int sectorsize,
1513 int verbose) 1459 int verbose)
1514{ 1460{
1515 btp->pbr_bsize = blocksize; 1461 btp->bt_bsize = blocksize;
1516 btp->pbr_sshift = ffs(sectorsize) - 1; 1462 btp->bt_sshift = ffs(sectorsize) - 1;
1517 btp->pbr_smask = sectorsize - 1; 1463 btp->bt_smask = sectorsize - 1;
1518 1464
1519 if (set_blocksize(btp->pbr_bdev, sectorsize)) { 1465 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1520 printk(KERN_WARNING 1466 printk(KERN_WARNING
1521 "XFS: Cannot set_blocksize to %u on device %s\n", 1467 "XFS: Cannot set_blocksize to %u on device %s\n",
1522 sectorsize, XFS_BUFTARG_NAME(btp)); 1468 sectorsize, XFS_BUFTARG_NAME(btp));
@@ -1536,10 +1482,10 @@ xfs_setsize_buftarg_flags(
1536} 1482}
1537 1483
1538/* 1484/*
1539* When allocating the initial buffer target we have not yet 1485 * When allocating the initial buffer target we have not yet
1540* read in the superblock, so don't know what sized sectors 1486 * read in the superblock, so don't know what sized sectors
1541* are being used is at this early stage. Play safe. 1487 * are being used is at this early stage. Play safe.
1542*/ 1488 */
1543STATIC int 1489STATIC int
1544xfs_setsize_buftarg_early( 1490xfs_setsize_buftarg_early(
1545 xfs_buftarg_t *btp, 1491 xfs_buftarg_t *btp,
@@ -1587,10 +1533,30 @@ xfs_mapping_buftarg(
1587 mapping->a_ops = &mapping_aops; 1533 mapping->a_ops = &mapping_aops;
1588 mapping->backing_dev_info = bdi; 1534 mapping->backing_dev_info = bdi;
1589 mapping_set_gfp_mask(mapping, GFP_NOFS); 1535 mapping_set_gfp_mask(mapping, GFP_NOFS);
1590 btp->pbr_mapping = mapping; 1536 btp->bt_mapping = mapping;
1591 return 0; 1537 return 0;
1592} 1538}
1593 1539
1540STATIC int
1541xfs_alloc_delwrite_queue(
1542 xfs_buftarg_t *btp)
1543{
1544 int error = 0;
1545
1546 INIT_LIST_HEAD(&btp->bt_list);
1547 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1548 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1549 btp->bt_flags = 0;
1550 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1551 if (IS_ERR(btp->bt_task)) {
1552 error = PTR_ERR(btp->bt_task);
1553 goto out_error;
1554 }
1555 xfs_register_buftarg(btp);
1556out_error:
1557 return error;
1558}
1559
1594xfs_buftarg_t * 1560xfs_buftarg_t *
1595xfs_alloc_buftarg( 1561xfs_alloc_buftarg(
1596 struct block_device *bdev, 1562 struct block_device *bdev,
@@ -1600,12 +1566,14 @@ xfs_alloc_buftarg(
1600 1566
1601 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1567 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1602 1568
1603 btp->pbr_dev = bdev->bd_dev; 1569 btp->bt_dev = bdev->bd_dev;
1604 btp->pbr_bdev = bdev; 1570 btp->bt_bdev = bdev;
1605 if (xfs_setsize_buftarg_early(btp, bdev)) 1571 if (xfs_setsize_buftarg_early(btp, bdev))
1606 goto error; 1572 goto error;
1607 if (xfs_mapping_buftarg(btp, bdev)) 1573 if (xfs_mapping_buftarg(btp, bdev))
1608 goto error; 1574 goto error;
1575 if (xfs_alloc_delwrite_queue(btp))
1576 goto error;
1609 xfs_alloc_bufhash(btp, external); 1577 xfs_alloc_bufhash(btp, external);
1610 return btp; 1578 return btp;
1611 1579
@@ -1616,83 +1584,81 @@ error:
1616 1584
1617 1585
1618/* 1586/*
1619 * Pagebuf delayed write buffer handling 1587 * Delayed write buffer handling
1620 */ 1588 */
1621
1622STATIC LIST_HEAD(pbd_delwrite_queue);
1623STATIC DEFINE_SPINLOCK(pbd_delwrite_lock);
1624
1625STATIC void 1589STATIC void
1626pagebuf_delwri_queue( 1590xfs_buf_delwri_queue(
1627 xfs_buf_t *pb, 1591 xfs_buf_t *bp,
1628 int unlock) 1592 int unlock)
1629{ 1593{
1630 PB_TRACE(pb, "delwri_q", (long)unlock); 1594 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1631 ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) == 1595 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1632 (PBF_DELWRI|PBF_ASYNC)); 1596
1597 XB_TRACE(bp, "delwri_q", (long)unlock);
1598 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1633 1599
1634 spin_lock(&pbd_delwrite_lock); 1600 spin_lock(dwlk);
1635 /* If already in the queue, dequeue and place at tail */ 1601 /* If already in the queue, dequeue and place at tail */
1636 if (!list_empty(&pb->pb_list)) { 1602 if (!list_empty(&bp->b_list)) {
1637 ASSERT(pb->pb_flags & _PBF_DELWRI_Q); 1603 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1638 if (unlock) { 1604 if (unlock)
1639 atomic_dec(&pb->pb_hold); 1605 atomic_dec(&bp->b_hold);
1640 } 1606 list_del(&bp->b_list);
1641 list_del(&pb->pb_list);
1642 } 1607 }
1643 1608
1644 pb->pb_flags |= _PBF_DELWRI_Q; 1609 bp->b_flags |= _XBF_DELWRI_Q;
1645 list_add_tail(&pb->pb_list, &pbd_delwrite_queue); 1610 list_add_tail(&bp->b_list, dwq);
1646 pb->pb_queuetime = jiffies; 1611 bp->b_queuetime = jiffies;
1647 spin_unlock(&pbd_delwrite_lock); 1612 spin_unlock(dwlk);
1648 1613
1649 if (unlock) 1614 if (unlock)
1650 pagebuf_unlock(pb); 1615 xfs_buf_unlock(bp);
1651} 1616}
1652 1617
1653void 1618void
1654pagebuf_delwri_dequeue( 1619xfs_buf_delwri_dequeue(
1655 xfs_buf_t *pb) 1620 xfs_buf_t *bp)
1656{ 1621{
1622 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1657 int dequeued = 0; 1623 int dequeued = 0;
1658 1624
1659 spin_lock(&pbd_delwrite_lock); 1625 spin_lock(dwlk);
1660 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) { 1626 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1661 ASSERT(pb->pb_flags & _PBF_DELWRI_Q); 1627 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1662 list_del_init(&pb->pb_list); 1628 list_del_init(&bp->b_list);
1663 dequeued = 1; 1629 dequeued = 1;
1664 } 1630 }
1665 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1631 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1666 spin_unlock(&pbd_delwrite_lock); 1632 spin_unlock(dwlk);
1667 1633
1668 if (dequeued) 1634 if (dequeued)
1669 pagebuf_rele(pb); 1635 xfs_buf_rele(bp);
1670 1636
1671 PB_TRACE(pb, "delwri_dq", (long)dequeued); 1637 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1672} 1638}
1673 1639
1674STATIC void 1640STATIC void
1675pagebuf_runall_queues( 1641xfs_buf_runall_queues(
1676 struct workqueue_struct *queue) 1642 struct workqueue_struct *queue)
1677{ 1643{
1678 flush_workqueue(queue); 1644 flush_workqueue(queue);
1679} 1645}
1680 1646
1681/* Defines for pagebuf daemon */
1682STATIC struct task_struct *xfsbufd_task;
1683STATIC int xfsbufd_force_flush;
1684STATIC int xfsbufd_force_sleep;
1685
1686STATIC int 1647STATIC int
1687xfsbufd_wakeup( 1648xfsbufd_wakeup(
1688 int priority, 1649 int priority,
1689 gfp_t mask) 1650 gfp_t mask)
1690{ 1651{
1691 if (xfsbufd_force_sleep) 1652 xfs_buftarg_t *btp;
1692 return 0; 1653
1693 xfsbufd_force_flush = 1; 1654 spin_lock(&xfs_buftarg_lock);
1694 barrier(); 1655 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1695 wake_up_process(xfsbufd_task); 1656 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1657 continue;
1658 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1659 wake_up_process(btp->bt_task);
1660 }
1661 spin_unlock(&xfs_buftarg_lock);
1696 return 0; 1662 return 0;
1697} 1663}
1698 1664
@@ -1702,67 +1668,70 @@ xfsbufd(
1702{ 1668{
1703 struct list_head tmp; 1669 struct list_head tmp;
1704 unsigned long age; 1670 unsigned long age;
1705 xfs_buftarg_t *target; 1671 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1706 xfs_buf_t *pb, *n; 1672 xfs_buf_t *bp, *n;
1673 struct list_head *dwq = &target->bt_delwrite_queue;
1674 spinlock_t *dwlk = &target->bt_delwrite_lock;
1707 1675
1708 current->flags |= PF_MEMALLOC; 1676 current->flags |= PF_MEMALLOC;
1709 1677
1710 INIT_LIST_HEAD(&tmp); 1678 INIT_LIST_HEAD(&tmp);
1711 do { 1679 do {
1712 if (unlikely(freezing(current))) { 1680 if (unlikely(freezing(current))) {
1713 xfsbufd_force_sleep = 1; 1681 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1714 refrigerator(); 1682 refrigerator();
1715 } else { 1683 } else {
1716 xfsbufd_force_sleep = 0; 1684 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1717 } 1685 }
1718 1686
1719 schedule_timeout_interruptible( 1687 schedule_timeout_interruptible(
1720 xfs_buf_timer_centisecs * msecs_to_jiffies(10)); 1688 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1721 1689
1722 age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1690 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1723 spin_lock(&pbd_delwrite_lock); 1691 spin_lock(dwlk);
1724 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) { 1692 list_for_each_entry_safe(bp, n, dwq, b_list) {
1725 PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb)); 1693 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1726 ASSERT(pb->pb_flags & PBF_DELWRI); 1694 ASSERT(bp->b_flags & XBF_DELWRI);
1727 1695
1728 if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) { 1696 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1729 if (!xfsbufd_force_flush && 1697 if (!test_bit(XBT_FORCE_FLUSH,
1698 &target->bt_flags) &&
1730 time_before(jiffies, 1699 time_before(jiffies,
1731 pb->pb_queuetime + age)) { 1700 bp->b_queuetime + age)) {
1732 pagebuf_unlock(pb); 1701 xfs_buf_unlock(bp);
1733 break; 1702 break;
1734 } 1703 }
1735 1704
1736 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1705 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1737 pb->pb_flags |= PBF_WRITE; 1706 bp->b_flags |= XBF_WRITE;
1738 list_move(&pb->pb_list, &tmp); 1707 list_move(&bp->b_list, &tmp);
1739 } 1708 }
1740 } 1709 }
1741 spin_unlock(&pbd_delwrite_lock); 1710 spin_unlock(dwlk);
1742 1711
1743 while (!list_empty(&tmp)) { 1712 while (!list_empty(&tmp)) {
1744 pb = list_entry(tmp.next, xfs_buf_t, pb_list); 1713 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1745 target = pb->pb_target; 1714 ASSERT(target == bp->b_target);
1746 1715
1747 list_del_init(&pb->pb_list); 1716 list_del_init(&bp->b_list);
1748 pagebuf_iostrategy(pb); 1717 xfs_buf_iostrategy(bp);
1749 1718
1750 blk_run_address_space(target->pbr_mapping); 1719 blk_run_address_space(target->bt_mapping);
1751 } 1720 }
1752 1721
1753 if (as_list_len > 0) 1722 if (as_list_len > 0)
1754 purge_addresses(); 1723 purge_addresses();
1755 1724
1756 xfsbufd_force_flush = 0; 1725 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1757 } while (!kthread_should_stop()); 1726 } while (!kthread_should_stop());
1758 1727
1759 return 0; 1728 return 0;
1760} 1729}
1761 1730
1762/* 1731/*
1763 * Go through all incore buffers, and release buffers if they belong to 1732 * Go through all incore buffers, and release buffers if they belong to
1764 * the given device. This is used in filesystem error handling to 1733 * the given device. This is used in filesystem error handling to
1765 * preserve the consistency of its metadata. 1734 * preserve the consistency of its metadata.
1766 */ 1735 */
1767int 1736int
1768xfs_flush_buftarg( 1737xfs_flush_buftarg(
@@ -1770,73 +1739,72 @@ xfs_flush_buftarg(
1770 int wait) 1739 int wait)
1771{ 1740{
1772 struct list_head tmp; 1741 struct list_head tmp;
1773 xfs_buf_t *pb, *n; 1742 xfs_buf_t *bp, *n;
1774 int pincount = 0; 1743 int pincount = 0;
1744 struct list_head *dwq = &target->bt_delwrite_queue;
1745 spinlock_t *dwlk = &target->bt_delwrite_lock;
1775 1746
1776 pagebuf_runall_queues(xfsdatad_workqueue); 1747 xfs_buf_runall_queues(xfsdatad_workqueue);
1777 pagebuf_runall_queues(xfslogd_workqueue); 1748 xfs_buf_runall_queues(xfslogd_workqueue);
1778 1749
1779 INIT_LIST_HEAD(&tmp); 1750 INIT_LIST_HEAD(&tmp);
1780 spin_lock(&pbd_delwrite_lock); 1751 spin_lock(dwlk);
1781 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) { 1752 list_for_each_entry_safe(bp, n, dwq, b_list) {
1782 1753 ASSERT(bp->b_target == target);
1783 if (pb->pb_target != target) 1754 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1784 continue; 1755 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1785 1756 if (xfs_buf_ispin(bp)) {
1786 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
1787 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1788 if (pagebuf_ispin(pb)) {
1789 pincount++; 1757 pincount++;
1790 continue; 1758 continue;
1791 } 1759 }
1792 1760
1793 list_move(&pb->pb_list, &tmp); 1761 list_move(&bp->b_list, &tmp);
1794 } 1762 }
1795 spin_unlock(&pbd_delwrite_lock); 1763 spin_unlock(dwlk);
1796 1764
1797 /* 1765 /*
1798 * Dropped the delayed write list lock, now walk the temporary list 1766 * Dropped the delayed write list lock, now walk the temporary list
1799 */ 1767 */
1800 list_for_each_entry_safe(pb, n, &tmp, pb_list) { 1768 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1801 pagebuf_lock(pb); 1769 xfs_buf_lock(bp);
1802 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); 1770 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1803 pb->pb_flags |= PBF_WRITE; 1771 bp->b_flags |= XBF_WRITE;
1804 if (wait) 1772 if (wait)
1805 pb->pb_flags &= ~PBF_ASYNC; 1773 bp->b_flags &= ~XBF_ASYNC;
1806 else 1774 else
1807 list_del_init(&pb->pb_list); 1775 list_del_init(&bp->b_list);
1808 1776
1809 pagebuf_iostrategy(pb); 1777 xfs_buf_iostrategy(bp);
1810 } 1778 }
1811 1779
1812 /* 1780 /*
1813 * Remaining list items must be flushed before returning 1781 * Remaining list items must be flushed before returning
1814 */ 1782 */
1815 while (!list_empty(&tmp)) { 1783 while (!list_empty(&tmp)) {
1816 pb = list_entry(tmp.next, xfs_buf_t, pb_list); 1784 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1817 1785
1818 list_del_init(&pb->pb_list); 1786 list_del_init(&bp->b_list);
1819 xfs_iowait(pb); 1787 xfs_iowait(bp);
1820 xfs_buf_relse(pb); 1788 xfs_buf_relse(bp);
1821 } 1789 }
1822 1790
1823 if (wait) 1791 if (wait)
1824 blk_run_address_space(target->pbr_mapping); 1792 blk_run_address_space(target->bt_mapping);
1825 1793
1826 return pincount; 1794 return pincount;
1827} 1795}
1828 1796
1829int __init 1797int __init
1830pagebuf_init(void) 1798xfs_buf_init(void)
1831{ 1799{
1832 int error = -ENOMEM; 1800 int error = -ENOMEM;
1833 1801
1834#ifdef PAGEBUF_TRACE 1802#ifdef XFS_BUF_TRACE
1835 pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP); 1803 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1836#endif 1804#endif
1837 1805
1838 pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); 1806 xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1839 if (!pagebuf_zone) 1807 if (!xfs_buf_zone)
1840 goto out_free_trace_buf; 1808 goto out_free_trace_buf;
1841 1809
1842 xfslogd_workqueue = create_workqueue("xfslogd"); 1810 xfslogd_workqueue = create_workqueue("xfslogd");
@@ -1847,42 +1815,33 @@ pagebuf_init(void)
1847 if (!xfsdatad_workqueue) 1815 if (!xfsdatad_workqueue)
1848 goto out_destroy_xfslogd_workqueue; 1816 goto out_destroy_xfslogd_workqueue;
1849 1817
1850 xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd"); 1818 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1851 if (IS_ERR(xfsbufd_task)) { 1819 if (!xfs_buf_shake)
1852 error = PTR_ERR(xfsbufd_task);
1853 goto out_destroy_xfsdatad_workqueue; 1820 goto out_destroy_xfsdatad_workqueue;
1854 }
1855
1856 pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
1857 if (!pagebuf_shake)
1858 goto out_stop_xfsbufd;
1859 1821
1860 return 0; 1822 return 0;
1861 1823
1862 out_stop_xfsbufd:
1863 kthread_stop(xfsbufd_task);
1864 out_destroy_xfsdatad_workqueue: 1824 out_destroy_xfsdatad_workqueue:
1865 destroy_workqueue(xfsdatad_workqueue); 1825 destroy_workqueue(xfsdatad_workqueue);
1866 out_destroy_xfslogd_workqueue: 1826 out_destroy_xfslogd_workqueue:
1867 destroy_workqueue(xfslogd_workqueue); 1827 destroy_workqueue(xfslogd_workqueue);
1868 out_free_buf_zone: 1828 out_free_buf_zone:
1869 kmem_zone_destroy(pagebuf_zone); 1829 kmem_zone_destroy(xfs_buf_zone);
1870 out_free_trace_buf: 1830 out_free_trace_buf:
1871#ifdef PAGEBUF_TRACE 1831#ifdef XFS_BUF_TRACE
1872 ktrace_free(pagebuf_trace_buf); 1832 ktrace_free(xfs_buf_trace_buf);
1873#endif 1833#endif
1874 return error; 1834 return error;
1875} 1835}
1876 1836
1877void 1837void
1878pagebuf_terminate(void) 1838xfs_buf_terminate(void)
1879{ 1839{
1880 kmem_shake_deregister(pagebuf_shake); 1840 kmem_shake_deregister(xfs_buf_shake);
1881 kthread_stop(xfsbufd_task);
1882 destroy_workqueue(xfsdatad_workqueue); 1841 destroy_workqueue(xfsdatad_workqueue);
1883 destroy_workqueue(xfslogd_workqueue); 1842 destroy_workqueue(xfslogd_workqueue);
1884 kmem_zone_destroy(pagebuf_zone); 1843 kmem_zone_destroy(xfs_buf_zone);
1885#ifdef PAGEBUF_TRACE 1844#ifdef XFS_BUF_TRACE
1886 ktrace_free(pagebuf_trace_buf); 1845 ktrace_free(xfs_buf_trace_buf);
1887#endif 1846#endif
1888} 1847}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 237a35b915d1..4dd6592d5a4c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -32,44 +32,47 @@
32 * Base types 32 * Base types
33 */ 33 */
34 34
35#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 35#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
36 36
37#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) 37#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
38#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) 38#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
39#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) 39#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
40#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) 40#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
41 41
42typedef enum page_buf_rw_e { 42typedef enum {
43 PBRW_READ = 1, /* transfer into target memory */ 43 XBRW_READ = 1, /* transfer into target memory */
44 PBRW_WRITE = 2, /* transfer from target memory */ 44 XBRW_WRITE = 2, /* transfer from target memory */
45 PBRW_ZERO = 3 /* Zero target memory */ 45 XBRW_ZERO = 3, /* Zero target memory */
46} page_buf_rw_t; 46} xfs_buf_rw_t;
47 47
48 48typedef enum {
49typedef enum page_buf_flags_e { /* pb_flags values */ 49 XBF_READ = (1 << 0), /* buffer intended for reading from device */
50 PBF_READ = (1 << 0), /* buffer intended for reading from device */ 50 XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
51 PBF_WRITE = (1 << 1), /* buffer intended for writing to device */ 51 XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
52 PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */ 52 XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
53 PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ 53 XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
54 PBF_DONE = (1 << 5), /* all pages in the buffer uptodate */ 54 XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
55 PBF_DELWRI = (1 << 6), /* buffer has dirty pages */ 55 XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
56 PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */ 56 XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
57 PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ 57 XBF_ORDERED = (1 << 11), /* use ordered writes */
58 PBF_ORDERED = (1 << 11), /* use ordered writes */ 58 XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
59 PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
60 59
61 /* flags used only as arguments to access routines */ 60 /* flags used only as arguments to access routines */
62 PBF_LOCK = (1 << 14), /* lock requested */ 61 XBF_LOCK = (1 << 14), /* lock requested */
63 PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */ 62 XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
64 PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ 63 XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
65 64
66 /* flags used only internally */ 65 /* flags used only internally */
67 _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 66 _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
68 _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 67 _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
69 _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 68 _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
70 _PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ 69 _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
71} page_buf_flags_t; 70} xfs_buf_flags_t;
72 71
72typedef enum {
73 XBT_FORCE_SLEEP = (0 << 1),
74 XBT_FORCE_FLUSH = (1 << 1),
75} xfs_buftarg_flags_t;
73 76
74typedef struct xfs_bufhash { 77typedef struct xfs_bufhash {
75 struct list_head bh_list; 78 struct list_head bh_list;
@@ -77,477 +80,350 @@ typedef struct xfs_bufhash {
77} xfs_bufhash_t; 80} xfs_bufhash_t;
78 81
79typedef struct xfs_buftarg { 82typedef struct xfs_buftarg {
80 dev_t pbr_dev; 83 dev_t bt_dev;
81 struct block_device *pbr_bdev; 84 struct block_device *bt_bdev;
82 struct address_space *pbr_mapping; 85 struct address_space *bt_mapping;
83 unsigned int pbr_bsize; 86 unsigned int bt_bsize;
84 unsigned int pbr_sshift; 87 unsigned int bt_sshift;
85 size_t pbr_smask; 88 size_t bt_smask;
86 89
87 /* per-device buffer hash table */ 90 /* per device buffer hash table */
88 uint bt_hashmask; 91 uint bt_hashmask;
89 uint bt_hashshift; 92 uint bt_hashshift;
90 xfs_bufhash_t *bt_hash; 93 xfs_bufhash_t *bt_hash;
94
95 /* per device delwri queue */
96 struct task_struct *bt_task;
97 struct list_head bt_list;
98 struct list_head bt_delwrite_queue;
99 spinlock_t bt_delwrite_lock;
100 unsigned long bt_flags;
91} xfs_buftarg_t; 101} xfs_buftarg_t;
92 102
93/* 103/*
94 * xfs_buf_t: Buffer structure for page cache-based buffers 104 * xfs_buf_t: Buffer structure for pagecache-based buffers
105 *
106 * This buffer structure is used by the pagecache buffer management routines
107 * to refer to an assembly of pages forming a logical buffer.
95 * 108 *
96 * This buffer structure is used by the page cache buffer management routines 109 * The buffer structure is used on a temporary basis only, and discarded when
97 * to refer to an assembly of pages forming a logical buffer. The actual I/O 110 * released. The real data storage is recorded in the pagecache. Buffers are
98 * is performed with buffer_head structures, as required by drivers.
99 *
100 * The buffer structure is used on temporary basis only, and discarded when
101 * released. The real data storage is recorded in the page cache. Metadata is
102 * hashed to the block device on which the file system resides. 111 * hashed to the block device on which the file system resides.
103 */ 112 */
104 113
105struct xfs_buf; 114struct xfs_buf;
115typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
116typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
117typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
106 118
107/* call-back function on I/O completion */ 119#define XB_PAGES 2
108typedef void (*page_buf_iodone_t)(struct xfs_buf *);
109/* call-back function on I/O completion */
110typedef void (*page_buf_relse_t)(struct xfs_buf *);
111/* pre-write function */
112typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
113
114#define PB_PAGES 2
115 120
116typedef struct xfs_buf { 121typedef struct xfs_buf {
117 struct semaphore pb_sema; /* semaphore for lockables */ 122 struct semaphore b_sema; /* semaphore for lockables */
118 unsigned long pb_queuetime; /* time buffer was queued */ 123 unsigned long b_queuetime; /* time buffer was queued */
119 atomic_t pb_pin_count; /* pin count */ 124 atomic_t b_pin_count; /* pin count */
120 wait_queue_head_t pb_waiters; /* unpin waiters */ 125 wait_queue_head_t b_waiters; /* unpin waiters */
121 struct list_head pb_list; 126 struct list_head b_list;
122 page_buf_flags_t pb_flags; /* status flags */ 127 xfs_buf_flags_t b_flags; /* status flags */
123 struct list_head pb_hash_list; /* hash table list */ 128 struct list_head b_hash_list; /* hash table list */
124 xfs_bufhash_t *pb_hash; /* hash table list start */ 129 xfs_bufhash_t *b_hash; /* hash table list start */
125 xfs_buftarg_t *pb_target; /* buffer target (device) */ 130 xfs_buftarg_t *b_target; /* buffer target (device) */
126 atomic_t pb_hold; /* reference count */ 131 atomic_t b_hold; /* reference count */
127 xfs_daddr_t pb_bn; /* block number for I/O */ 132 xfs_daddr_t b_bn; /* block number for I/O */
128 loff_t pb_file_offset; /* offset in file */ 133 xfs_off_t b_file_offset; /* offset in file */
129 size_t pb_buffer_length; /* size of buffer in bytes */ 134 size_t b_buffer_length;/* size of buffer in bytes */
130 size_t pb_count_desired; /* desired transfer size */ 135 size_t b_count_desired;/* desired transfer size */
131 void *pb_addr; /* virtual address of buffer */ 136 void *b_addr; /* virtual address of buffer */
132 struct work_struct pb_iodone_work; 137 struct work_struct b_iodone_work;
133 atomic_t pb_io_remaining;/* #outstanding I/O requests */ 138 atomic_t b_io_remaining; /* #outstanding I/O requests */
134 page_buf_iodone_t pb_iodone; /* I/O completion function */ 139 xfs_buf_iodone_t b_iodone; /* I/O completion function */
135 page_buf_relse_t pb_relse; /* releasing function */ 140 xfs_buf_relse_t b_relse; /* releasing function */
136 page_buf_bdstrat_t pb_strat; /* pre-write function */ 141 xfs_buf_bdstrat_t b_strat; /* pre-write function */
137 struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */ 142 struct semaphore b_iodonesema; /* Semaphore for I/O waiters */
138 void *pb_fspriv; 143 void *b_fspriv;
139 void *pb_fspriv2; 144 void *b_fspriv2;
140 void *pb_fspriv3; 145 void *b_fspriv3;
141 unsigned short pb_error; /* error code on I/O */ 146 unsigned short b_error; /* error code on I/O */
142 unsigned short pb_locked; /* page array is locked */ 147 unsigned short b_locked; /* page array is locked */
143 unsigned int pb_page_count; /* size of page array */ 148 unsigned int b_page_count; /* size of page array */
144 unsigned int pb_offset; /* page offset in first page */ 149 unsigned int b_offset; /* page offset in first page */
145 struct page **pb_pages; /* array of page pointers */ 150 struct page **b_pages; /* array of page pointers */
146 struct page *pb_page_array[PB_PAGES]; /* inline pages */ 151 struct page *b_page_array[XB_PAGES]; /* inline pages */
147#ifdef PAGEBUF_LOCK_TRACKING 152#ifdef XFS_BUF_LOCK_TRACKING
148 int pb_last_holder; 153 int b_last_holder;
149#endif 154#endif
150} xfs_buf_t; 155} xfs_buf_t;
151 156
152 157
153/* Finding and Reading Buffers */ 158/* Finding and Reading Buffers */
154 159extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
155extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */ 160 xfs_buf_flags_t, xfs_buf_t *);
156 /* the block is in memory */
157 xfs_buftarg_t *, /* inode for block */
158 loff_t, /* starting offset of range */
159 size_t, /* length of range */
160 page_buf_flags_t, /* PBF_LOCK */
161 xfs_buf_t *); /* newly allocated buffer */
162
163#define xfs_incore(buftarg,blkno,len,lockit) \ 161#define xfs_incore(buftarg,blkno,len,lockit) \
164 _pagebuf_find(buftarg, blkno ,len, lockit, NULL) 162 _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
165
166extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */
167 xfs_buftarg_t *, /* inode for buffer */
168 loff_t, /* starting offset of range */
169 size_t, /* length of range */
170 page_buf_flags_t); /* PBF_LOCK, PBF_READ, */
171 /* PBF_ASYNC */
172 163
164extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
165 xfs_buf_flags_t);
173#define xfs_buf_get(target, blkno, len, flags) \ 166#define xfs_buf_get(target, blkno, len, flags) \
174 xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 167 xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
175
176extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
177 xfs_buftarg_t *, /* inode for buffer */
178 loff_t, /* starting offset of range */
179 size_t, /* length of range */
180 page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */
181 168
169extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
170 xfs_buf_flags_t);
182#define xfs_buf_read(target, blkno, len, flags) \ 171#define xfs_buf_read(target, blkno, len, flags) \
183 xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 172 xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
184
185extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */
186 /* no memory or disk address */
187 size_t len,
188 xfs_buftarg_t *); /* mount point "fake" inode */
189
190extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */
191 /* without disk address */
192 size_t len,
193 xfs_buftarg_t *); /* mount point "fake" inode */
194
195extern int pagebuf_associate_memory(
196 xfs_buf_t *,
197 void *,
198 size_t);
199
200extern void pagebuf_hold( /* increment reference count */
201 xfs_buf_t *); /* buffer to hold */
202 173
203extern void pagebuf_readahead( /* read ahead into cache */ 174extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
204 xfs_buftarg_t *, /* target for buffer (or NULL) */ 175extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
205 loff_t, /* starting offset of range */ 176extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
206 size_t, /* length of range */ 177extern void xfs_buf_hold(xfs_buf_t *);
207 page_buf_flags_t); /* additional read flags */ 178extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
179 xfs_buf_flags_t);
208 180
209/* Releasing Buffers */ 181/* Releasing Buffers */
210 182extern void xfs_buf_free(xfs_buf_t *);
211extern void pagebuf_free( /* deallocate a buffer */ 183extern void xfs_buf_rele(xfs_buf_t *);
212 xfs_buf_t *); /* buffer to deallocate */
213
214extern void pagebuf_rele( /* release hold on a buffer */
215 xfs_buf_t *); /* buffer to release */
216 184
217/* Locking and Unlocking Buffers */ 185/* Locking and Unlocking Buffers */
218 186extern int xfs_buf_cond_lock(xfs_buf_t *);
219extern int pagebuf_cond_lock( /* lock buffer, if not locked */ 187extern int xfs_buf_lock_value(xfs_buf_t *);
220 /* (returns -EBUSY if locked) */ 188extern void xfs_buf_lock(xfs_buf_t *);
221 xfs_buf_t *); /* buffer to lock */ 189extern void xfs_buf_unlock(xfs_buf_t *);
222
223extern int pagebuf_lock_value( /* return count on lock */
224 xfs_buf_t *); /* buffer to check */
225
226extern int pagebuf_lock( /* lock buffer */
227 xfs_buf_t *); /* buffer to lock */
228
229extern void pagebuf_unlock( /* unlock buffer */
230 xfs_buf_t *); /* buffer to unlock */
231 190
232/* Buffer Read and Write Routines */ 191/* Buffer Read and Write Routines */
233 192extern void xfs_buf_ioend(xfs_buf_t *, int);
234extern void pagebuf_iodone( /* mark buffer I/O complete */ 193extern void xfs_buf_ioerror(xfs_buf_t *, int);
235 xfs_buf_t *, /* buffer to mark */ 194extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
236 int); /* run completion locally, or in 195extern int xfs_buf_iorequest(xfs_buf_t *);
237 * a helper thread. */ 196extern int xfs_buf_iowait(xfs_buf_t *);
238 197extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
239extern void pagebuf_ioerror( /* mark buffer in error (or not) */ 198 xfs_buf_rw_t);
240 xfs_buf_t *, /* buffer to mark */ 199
241 int); /* error to store (0 if none) */ 200static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
242
243extern int pagebuf_iostart( /* start I/O on a buffer */
244 xfs_buf_t *, /* buffer to start */
245 page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */
246 /* PBF_READ, PBF_WRITE, */
247 /* PBF_DELWRI */
248
249extern int pagebuf_iorequest( /* start real I/O */
250 xfs_buf_t *); /* buffer to convey to device */
251
252extern int pagebuf_iowait( /* wait for buffer I/O done */
253 xfs_buf_t *); /* buffer to wait on */
254
255extern void pagebuf_iomove( /* move data in/out of pagebuf */
256 xfs_buf_t *, /* buffer to manipulate */
257 size_t, /* starting buffer offset */
258 size_t, /* length in buffer */
259 caddr_t, /* data pointer */
260 page_buf_rw_t); /* direction */
261
262static inline int pagebuf_iostrategy(xfs_buf_t *pb)
263{ 201{
264 return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb); 202 return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
265} 203}
266 204
267static inline int pagebuf_geterror(xfs_buf_t *pb) 205static inline int xfs_buf_geterror(xfs_buf_t *bp)
268{ 206{
269 return pb ? pb->pb_error : ENOMEM; 207 return bp ? bp->b_error : ENOMEM;
270} 208}
271 209
272/* Buffer Utility Routines */ 210/* Buffer Utility Routines */
273 211extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
274extern caddr_t pagebuf_offset( /* pointer at offset in buffer */
275 xfs_buf_t *, /* buffer to offset into */
276 size_t); /* offset */
277 212
278/* Pinning Buffer Storage in Memory */ 213/* Pinning Buffer Storage in Memory */
279 214extern void xfs_buf_pin(xfs_buf_t *);
280extern void pagebuf_pin( /* pin buffer in memory */ 215extern void xfs_buf_unpin(xfs_buf_t *);
281 xfs_buf_t *); /* buffer to pin */ 216extern int xfs_buf_ispin(xfs_buf_t *);
282
283extern void pagebuf_unpin( /* unpin buffered data */
284 xfs_buf_t *); /* buffer to unpin */
285
286extern int pagebuf_ispin( /* check if buffer is pinned */
287 xfs_buf_t *); /* buffer to check */
288 217
289/* Delayed Write Buffer Routines */ 218/* Delayed Write Buffer Routines */
290 219extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
291extern void pagebuf_delwri_dequeue(xfs_buf_t *);
292 220
293/* Buffer Daemon Setup Routines */ 221/* Buffer Daemon Setup Routines */
222extern int xfs_buf_init(void);
223extern void xfs_buf_terminate(void);
294 224
295extern int pagebuf_init(void); 225#ifdef XFS_BUF_TRACE
296extern void pagebuf_terminate(void); 226extern ktrace_t *xfs_buf_trace_buf;
297 227extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
298
299#ifdef PAGEBUF_TRACE
300extern ktrace_t *pagebuf_trace_buf;
301extern void pagebuf_trace(
302 xfs_buf_t *, /* buffer being traced */
303 char *, /* description of operation */
304 void *, /* arbitrary diagnostic value */
305 void *); /* return address */
306#else 228#else
307# define pagebuf_trace(pb, id, ptr, ra) do { } while (0) 229#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
308#endif 230#endif
309 231
310#define pagebuf_target_name(target) \ 232#define xfs_buf_target_name(target) \
311 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; }) 233 ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
312 234
313 235
236#define XFS_B_ASYNC XBF_ASYNC
237#define XFS_B_DELWRI XBF_DELWRI
238#define XFS_B_READ XBF_READ
239#define XFS_B_WRITE XBF_WRITE
240#define XFS_B_STALE XBF_STALE
314 241
315/* These are just for xfs_syncsub... it sets an internal variable 242#define XFS_BUF_TRYLOCK XBF_TRYLOCK
316 * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t 243#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
317 */ 244#define XFS_BUF_LOCK XBF_LOCK
318#define XFS_B_ASYNC PBF_ASYNC 245#define XFS_BUF_MAPPED XBF_MAPPED
319#define XFS_B_DELWRI PBF_DELWRI
320#define XFS_B_READ PBF_READ
321#define XFS_B_WRITE PBF_WRITE
322#define XFS_B_STALE PBF_STALE
323
324#define XFS_BUF_TRYLOCK PBF_TRYLOCK
325#define XFS_INCORE_TRYLOCK PBF_TRYLOCK
326#define XFS_BUF_LOCK PBF_LOCK
327#define XFS_BUF_MAPPED PBF_MAPPED
328
329#define BUF_BUSY PBF_DONT_BLOCK
330
331#define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
332#define XFS_BUF_ZEROFLAGS(x) \
333 ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
334
335#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
336#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
337#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
338#define XFS_BUF_SUPER_STALE(x) do { \
339 XFS_BUF_STALE(x); \
340 pagebuf_delwri_dequeue(x); \
341 XFS_BUF_DONE(x); \
342 } while (0)
343 246
344#define XFS_BUF_MANAGE PBF_FS_MANAGED 247#define BUF_BUSY XBF_DONT_BLOCK
345#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED) 248
346 249#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
347#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI) 250#define XFS_BUF_ZEROFLAGS(bp) \
348#define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x) 251 ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI))
349#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI) 252
350 253#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
351#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no) 254#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
352#define XFS_BUF_GETERROR(x) pagebuf_geterror(x) 255#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
353#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0) 256#define XFS_BUF_SUPER_STALE(bp) do { \
354 257 XFS_BUF_STALE(bp); \
355#define XFS_BUF_DONE(x) ((x)->pb_flags |= PBF_DONE) 258 xfs_buf_delwri_dequeue(bp); \
356#define XFS_BUF_UNDONE(x) ((x)->pb_flags &= ~PBF_DONE) 259 XFS_BUF_DONE(bp); \
357#define XFS_BUF_ISDONE(x) ((x)->pb_flags & PBF_DONE) 260 } while (0)
358
359#define XFS_BUF_BUSY(x) do { } while (0)
360#define XFS_BUF_UNBUSY(x) do { } while (0)
361#define XFS_BUF_ISBUSY(x) (1)
362
363#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC)
364#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC)
365#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC)
366
367#define XFS_BUF_ORDERED(x) ((x)->pb_flags |= PBF_ORDERED)
368#define XFS_BUF_UNORDERED(x) ((x)->pb_flags &= ~PBF_ORDERED)
369#define XFS_BUF_ISORDERED(x) ((x)->pb_flags & PBF_ORDERED)
370
371#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n")
372#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n")
373#define XFS_BUF_ISSHUT(x) (0)
374
375#define XFS_BUF_HOLD(x) pagebuf_hold(x)
376#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ)
377#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ)
378#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ)
379
380#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE)
381#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
382#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
383
384#define XFS_BUF_ISUNINITIAL(x) (0)
385#define XFS_BUF_UNUNINITIAL(x) (0)
386
387#define XFS_BUF_BP_ISMAPPED(bp) 1
388
389#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone
390#define XFS_BUF_SET_IODONE_FUNC(buf, func) \
391 (buf)->pb_iodone = (func)
392#define XFS_BUF_CLR_IODONE_FUNC(buf) \
393 (buf)->pb_iodone = NULL
394#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \
395 (buf)->pb_strat = (func)
396#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \
397 (buf)->pb_strat = NULL
398
399#define XFS_BUF_FSPRIVATE(buf, type) \
400 ((type)(buf)->pb_fspriv)
401#define XFS_BUF_SET_FSPRIVATE(buf, value) \
402 (buf)->pb_fspriv = (void *)(value)
403#define XFS_BUF_FSPRIVATE2(buf, type) \
404 ((type)(buf)->pb_fspriv2)
405#define XFS_BUF_SET_FSPRIVATE2(buf, value) \
406 (buf)->pb_fspriv2 = (void *)(value)
407#define XFS_BUF_FSPRIVATE3(buf, type) \
408 ((type)(buf)->pb_fspriv3)
409#define XFS_BUF_SET_FSPRIVATE3(buf, value) \
410 (buf)->pb_fspriv3 = (void *)(value)
411#define XFS_BUF_SET_START(buf)
412
413#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
414 (buf)->pb_relse = (value)
415
416#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
417
418static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
419{
420 if (bp->pb_flags & PBF_MAPPED)
421 return XFS_BUF_PTR(bp) + offset;
422 return (xfs_caddr_t) pagebuf_offset(bp, offset);
423}
424 261
425#define XFS_BUF_SET_PTR(bp, val, count) \ 262#define XFS_BUF_MANAGE XBF_FS_MANAGED
426 pagebuf_associate_memory(bp, val, count) 263#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
427#define XFS_BUF_ADDR(bp) ((bp)->pb_bn) 264
428#define XFS_BUF_SET_ADDR(bp, blk) \ 265#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
429 ((bp)->pb_bn = (xfs_daddr_t)(blk)) 266#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
430#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset) 267#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
431#define XFS_BUF_SET_OFFSET(bp, off) \ 268
432 ((bp)->pb_file_offset = (off)) 269#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
433#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired) 270#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
434#define XFS_BUF_SET_COUNT(bp, cnt) \ 271#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
435 ((bp)->pb_count_desired = (cnt)) 272
436#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length) 273#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
437#define XFS_BUF_SET_SIZE(bp, cnt) \ 274#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
438 ((bp)->pb_buffer_length = (cnt)) 275#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
439#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) 276
440#define XFS_BUF_SET_VTYPE(bp, type) 277#define XFS_BUF_BUSY(bp) do { } while (0)
441#define XFS_BUF_SET_REF(bp, ref) 278#define XFS_BUF_UNBUSY(bp) do { } while (0)
442 279#define XFS_BUF_ISBUSY(bp) (1)
443#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp) 280
444 281#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
445#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp) 282#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
446#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0) 283#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
447#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp) 284
448#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp) 285#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
449#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema); 286#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
450 287#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
451/* setup the buffer target from a buftarg structure */ 288
452#define XFS_BUF_SET_TARGET(bp, target) \ 289#define XFS_BUF_SHUT(bp) do { } while (0)
453 (bp)->pb_target = (target) 290#define XFS_BUF_UNSHUT(bp) do { } while (0)
454#define XFS_BUF_TARGET(bp) ((bp)->pb_target) 291#define XFS_BUF_ISSHUT(bp) (0)
455#define XFS_BUFTARG_NAME(target) \ 292
456 pagebuf_target_name(target) 293#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
457 294#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
458#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) 295#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
459#define XFS_BUF_SET_VTYPE(bp, type) 296#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
460#define XFS_BUF_SET_REF(bp, ref) 297
461 298#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
462static inline int xfs_bawrite(void *mp, xfs_buf_t *bp) 299#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
300#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
301
302#define XFS_BUF_ISUNINITIAL(bp) (0)
303#define XFS_BUF_UNUNINITIAL(bp) (0)
304
305#define XFS_BUF_BP_ISMAPPED(bp) (1)
306
307#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
308#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
309#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
310#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
311#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
312
313#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
314#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
315#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
316#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
317#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
318#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
319#define XFS_BUF_SET_START(bp) do { } while (0)
320#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
321
322#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
323#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
324#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
325#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
326#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
327#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
328#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
329#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
330#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
331#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
332
333#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
334#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
335#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
336
337#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
338
339#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
340#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
341#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
342#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
343#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
344
345#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
346#define XFS_BUF_TARGET(bp) ((bp)->b_target)
347#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
348
349static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
463{ 350{
464 bp->pb_fspriv3 = mp; 351 bp->b_fspriv3 = mp;
465 bp->pb_strat = xfs_bdstrat_cb; 352 bp->b_strat = xfs_bdstrat_cb;
466 pagebuf_delwri_dequeue(bp); 353 xfs_buf_delwri_dequeue(bp);
467 return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES); 354 return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
468} 355}
469 356
470static inline void xfs_buf_relse(xfs_buf_t *bp) 357static inline void xfs_buf_relse(xfs_buf_t *bp)
471{ 358{
472 if (!bp->pb_relse) 359 if (!bp->b_relse)
473 pagebuf_unlock(bp); 360 xfs_buf_unlock(bp);
474 pagebuf_rele(bp); 361 xfs_buf_rele(bp);
475} 362}
476 363
477#define xfs_bpin(bp) pagebuf_pin(bp) 364#define xfs_bpin(bp) xfs_buf_pin(bp)
478#define xfs_bunpin(bp) pagebuf_unpin(bp) 365#define xfs_bunpin(bp) xfs_buf_unpin(bp)
479 366
480#define xfs_buftrace(id, bp) \ 367#define xfs_buftrace(id, bp) \
481 pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0)) 368 xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
482 369
483#define xfs_biodone(pb) \ 370#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
484 pagebuf_iodone(pb, 0)
485 371
486#define xfs_biomove(pb, off, len, data, rw) \ 372#define xfs_biomove(bp, off, len, data, rw) \
487 pagebuf_iomove((pb), (off), (len), (data), \ 373 xfs_buf_iomove((bp), (off), (len), (data), \
488 ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ) 374 ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
489 375
490#define xfs_biozero(pb, off, len) \ 376#define xfs_biozero(bp, off, len) \
491 pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO) 377 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
492 378
493 379
494static inline int XFS_bwrite(xfs_buf_t *pb) 380static inline int XFS_bwrite(xfs_buf_t *bp)
495{ 381{
496 int iowait = (pb->pb_flags & PBF_ASYNC) == 0; 382 int iowait = (bp->b_flags & XBF_ASYNC) == 0;
497 int error = 0; 383 int error = 0;
498 384
499 if (!iowait) 385 if (!iowait)
500 pb->pb_flags |= _PBF_RUN_QUEUES; 386 bp->b_flags |= _XBF_RUN_QUEUES;
501 387
502 pagebuf_delwri_dequeue(pb); 388 xfs_buf_delwri_dequeue(bp);
503 pagebuf_iostrategy(pb); 389 xfs_buf_iostrategy(bp);
504 if (iowait) { 390 if (iowait) {
505 error = pagebuf_iowait(pb); 391 error = xfs_buf_iowait(bp);
506 xfs_buf_relse(pb); 392 xfs_buf_relse(bp);
507 } 393 }
508 return error; 394 return error;
509} 395}
510 396
511#define XFS_bdwrite(pb) \ 397#define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
512 pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
513 398
514static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp) 399static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
515{ 400{
516 bp->pb_strat = xfs_bdstrat_cb; 401 bp->b_strat = xfs_bdstrat_cb;
517 bp->pb_fspriv3 = mp; 402 bp->b_fspriv3 = mp;
518 403 return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
519 return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
520} 404}
521 405
522#define XFS_bdstrat(bp) pagebuf_iorequest(bp) 406#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
523 407
524#define xfs_iowait(pb) pagebuf_iowait(pb) 408#define xfs_iowait(bp) xfs_buf_iowait(bp)
525 409
526#define xfs_baread(target, rablkno, ralen) \ 410#define xfs_baread(target, rablkno, ralen) \
527 pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK) 411 xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
528
529#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
530#define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
531#define xfs_buf_free(bp) pagebuf_free(bp)
532 412
533 413
534/* 414/*
535 * Handling of buftargs. 415 * Handling of buftargs.
536 */ 416 */
537
538extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); 417extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
539extern void xfs_free_buftarg(xfs_buftarg_t *, int); 418extern void xfs_free_buftarg(xfs_buftarg_t *, int);
540extern void xfs_wait_buftarg(xfs_buftarg_t *); 419extern void xfs_wait_buftarg(xfs_buftarg_t *);
541extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 420extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
542extern int xfs_flush_buftarg(xfs_buftarg_t *, int); 421extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
543 422
544#define xfs_getsize_buftarg(buftarg) \ 423#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
545 block_size((buftarg)->pbr_bdev) 424#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
546#define xfs_readonly_buftarg(buftarg) \ 425
547 bdev_read_only((buftarg)->pbr_bdev) 426#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
548#define xfs_binval(buftarg) \ 427#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
549 xfs_flush_buftarg(buftarg, 1)
550#define XFS_bflush(buftarg) \
551 xfs_flush_buftarg(buftarg, 1)
552 428
553#endif /* __XFS_BUF_H__ */ 429#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 06111d0bbae4..ced4404339c7 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -509,16 +509,14 @@ linvfs_open_exec(
509 vnode_t *vp = LINVFS_GET_VP(inode); 509 vnode_t *vp = LINVFS_GET_VP(inode);
510 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); 510 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
511 int error = 0; 511 int error = 0;
512 bhv_desc_t *bdp;
513 xfs_inode_t *ip; 512 xfs_inode_t *ip;
514 513
515 if (vp->v_vfsp->vfs_flag & VFS_DMI) { 514 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
516 bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); 515 ip = xfs_vtoi(vp);
517 if (!bdp) { 516 if (!ip) {
518 error = -EINVAL; 517 error = -EINVAL;
519 goto open_exec_out; 518 goto open_exec_out;
520 } 519 }
521 ip = XFS_BHVTOI(bdp);
522 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) { 520 if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
523 error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 521 error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
524 0, 0, 0, NULL); 522 0, 0, 0, NULL);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 21667ba6dcd5..4db47790415c 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -146,13 +146,10 @@ xfs_find_handle(
146 146
147 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) { 147 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
148 xfs_inode_t *ip; 148 xfs_inode_t *ip;
149 bhv_desc_t *bhv;
150 int lock_mode; 149 int lock_mode;
151 150
152 /* need to get access to the xfs_inode to read the generation */ 151 /* need to get access to the xfs_inode to read the generation */
153 bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops); 152 ip = xfs_vtoi(vp);
154 ASSERT(bhv);
155 ip = XFS_BHVTOI(bhv);
156 ASSERT(ip); 153 ASSERT(ip);
157 lock_mode = xfs_ilock_map_shared(ip); 154 lock_mode = xfs_ilock_map_shared(ip);
158 155
@@ -751,9 +748,8 @@ xfs_ioctl(
751 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 748 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
752 mp->m_rtdev_targp : mp->m_ddev_targp; 749 mp->m_rtdev_targp : mp->m_ddev_targp;
753 750
754 da.d_mem = da.d_miniosz = 1 << target->pbr_sshift; 751 da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
755 /* The size dio will do in one go */ 752 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
756 da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
757 753
758 if (copy_to_user(arg, &da, sizeof(da))) 754 if (copy_to_user(arg, &da, sizeof(da)))
759 return -XFS_ERROR(EFAULT); 755 return -XFS_ERROR(EFAULT);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 9b8ee3470ecc..4bd3d03b23ed 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -54,11 +54,46 @@
54#include <linux/capability.h> 54#include <linux/capability.h>
55#include <linux/xattr.h> 55#include <linux/xattr.h>
56#include <linux/namei.h> 56#include <linux/namei.h>
57#include <linux/security.h>
57 58
58#define IS_NOATIME(inode) ((inode->i_sb->s_flags & MS_NOATIME) || \ 59#define IS_NOATIME(inode) ((inode->i_sb->s_flags & MS_NOATIME) || \
59 (S_ISDIR(inode->i_mode) && inode->i_sb->s_flags & MS_NODIRATIME)) 60 (S_ISDIR(inode->i_mode) && inode->i_sb->s_flags & MS_NODIRATIME))
60 61
61/* 62/*
63 * Get a XFS inode from a given vnode.
64 */
65xfs_inode_t *
66xfs_vtoi(
67 struct vnode *vp)
68{
69 bhv_desc_t *bdp;
70
71 bdp = bhv_lookup_range(VN_BHV_HEAD(vp),
72 VNODE_POSITION_XFS, VNODE_POSITION_XFS);
73 if (unlikely(bdp == NULL))
74 return NULL;
75 return XFS_BHVTOI(bdp);
76}
77
78/*
79 * Bring the atime in the XFS inode uptodate.
80 * Used before logging the inode to disk or when the Linux inode goes away.
81 */
82void
83xfs_synchronize_atime(
84 xfs_inode_t *ip)
85{
86 vnode_t *vp;
87
88 vp = XFS_ITOV_NULL(ip);
89 if (vp) {
90 struct inode *inode = &vp->v_inode;
91 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
92 ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
93 }
94}
95
96/*
62 * Change the requested timestamp in the given inode. 97 * Change the requested timestamp in the given inode.
63 * We don't lock across timestamp updates, and we don't log them but 98 * We don't lock across timestamp updates, and we don't log them but
64 * we do record the fact that there is dirty information in core. 99 * we do record the fact that there is dirty information in core.
@@ -77,23 +112,6 @@ xfs_ichgtime(
77 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); 112 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
78 timespec_t tv; 113 timespec_t tv;
79 114
80 /*
81 * We're not supposed to change timestamps in readonly-mounted
82 * filesystems. Throw it away if anyone asks us.
83 */
84 if (unlikely(IS_RDONLY(inode)))
85 return;
86
87 /*
88 * Don't update access timestamps on reads if mounted "noatime".
89 * Throw it away if anyone asks us.
90 */
91 if (unlikely(
92 (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) &&
93 (flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) ==
94 XFS_ICHGTIME_ACC))
95 return;
96
97 nanotime(&tv); 115 nanotime(&tv);
98 if (flags & XFS_ICHGTIME_MOD) { 116 if (flags & XFS_ICHGTIME_MOD) {
99 inode->i_mtime = tv; 117 inode->i_mtime = tv;
@@ -130,8 +148,6 @@ xfs_ichgtime(
130 * Variant on the above which avoids querying the system clock 148 * Variant on the above which avoids querying the system clock
131 * in situations where we know the Linux inode timestamps have 149 * in situations where we know the Linux inode timestamps have
132 * just been updated (and so we can update our inode cheaply). 150 * just been updated (and so we can update our inode cheaply).
133 * We also skip the readonly and noatime checks here, they are
134 * also catered for already.
135 */ 151 */
136void 152void
137xfs_ichgtime_fast( 153xfs_ichgtime_fast(
@@ -142,20 +158,16 @@ xfs_ichgtime_fast(
142 timespec_t *tvp; 158 timespec_t *tvp;
143 159
144 /* 160 /*
145 * We're not supposed to change timestamps in readonly-mounted 161 * Atime updates for read() & friends are handled lazily now, and
146 * filesystems. Throw it away if anyone asks us. 162 * explicit updates must go through xfs_ichgtime()
147 */ 163 */
148 if (unlikely(IS_RDONLY(inode))) 164 ASSERT((flags & XFS_ICHGTIME_ACC) == 0);
149 return;
150 165
151 /* 166 /*
152 * Don't update access timestamps on reads if mounted "noatime". 167 * We're not supposed to change timestamps in readonly-mounted
153 * Throw it away if anyone asks us. 168 * filesystems. Throw it away if anyone asks us.
154 */ 169 */
155 if (unlikely( 170 if (unlikely(IS_RDONLY(inode)))
156 (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) &&
157 ((flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) ==
158 XFS_ICHGTIME_ACC)))
159 return; 171 return;
160 172
161 if (flags & XFS_ICHGTIME_MOD) { 173 if (flags & XFS_ICHGTIME_MOD) {
@@ -163,11 +175,6 @@ xfs_ichgtime_fast(
163 ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec; 175 ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec;
164 ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec; 176 ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec;
165 } 177 }
166 if (flags & XFS_ICHGTIME_ACC) {
167 tvp = &inode->i_atime;
168 ip->i_d.di_atime.t_sec = (__int32_t)tvp->tv_sec;
169 ip->i_d.di_atime.t_nsec = (__int32_t)tvp->tv_nsec;
170 }
171 if (flags & XFS_ICHGTIME_CHG) { 178 if (flags & XFS_ICHGTIME_CHG) {
172 tvp = &inode->i_ctime; 179 tvp = &inode->i_ctime;
173 ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec; 180 ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec;
@@ -214,6 +221,39 @@ validate_fields(
214} 221}
215 222
216/* 223/*
224 * Hook in SELinux. This is not quite correct yet, what we really need
225 * here (as we do for default ACLs) is a mechanism by which creation of
226 * these attrs can be journalled at inode creation time (along with the
227 * inode, of course, such that log replay can't cause these to be lost).
228 */
229STATIC int
230linvfs_init_security(
231 struct vnode *vp,
232 struct inode *dir)
233{
234 struct inode *ip = LINVFS_GET_IP(vp);
235 size_t length;
236 void *value;
237 char *name;
238 int error;
239
240 error = security_inode_init_security(ip, dir, &name, &value, &length);
241 if (error) {
242 if (error == -EOPNOTSUPP)
243 return 0;
244 return -error;
245 }
246
247 VOP_ATTR_SET(vp, name, value, length, ATTR_SECURE, NULL, error);
248 if (!error)
249 VMODIFY(vp);
250
251 kfree(name);
252 kfree(value);
253 return error;
254}
255
256/*
217 * Determine whether a process has a valid fs_struct (kernel daemons 257 * Determine whether a process has a valid fs_struct (kernel daemons
218 * like knfsd don't have an fs_struct). 258 * like knfsd don't have an fs_struct).
219 * 259 *
@@ -278,6 +318,9 @@ linvfs_mknod(
278 break; 318 break;
279 } 319 }
280 320
321 if (!error)
322 error = linvfs_init_security(vp, dir);
323
281 if (default_acl) { 324 if (default_acl) {
282 if (!error) { 325 if (!error) {
283 error = _ACL_INHERIT(vp, &va, default_acl); 326 error = _ACL_INHERIT(vp, &va, default_acl);
@@ -294,8 +337,6 @@ linvfs_mknod(
294 teardown.d_inode = ip = LINVFS_GET_IP(vp); 337 teardown.d_inode = ip = LINVFS_GET_IP(vp);
295 teardown.d_name = dentry->d_name; 338 teardown.d_name = dentry->d_name;
296 339
297 vn_mark_bad(vp);
298
299 if (S_ISDIR(mode)) 340 if (S_ISDIR(mode))
300 VOP_RMDIR(dvp, &teardown, NULL, err2); 341 VOP_RMDIR(dvp, &teardown, NULL, err2);
301 else 342 else
@@ -506,7 +547,7 @@ linvfs_follow_link(
506 ASSERT(dentry); 547 ASSERT(dentry);
507 ASSERT(nd); 548 ASSERT(nd);
508 549
509 link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL); 550 link = (char *)kmalloc(MAXPATHLEN+1, GFP_KERNEL);
510 if (!link) { 551 if (!link) {
511 nd_set_link(nd, ERR_PTR(-ENOMEM)); 552 nd_set_link(nd, ERR_PTR(-ENOMEM));
512 return NULL; 553 return NULL;
@@ -522,12 +563,12 @@ linvfs_follow_link(
522 vp = LINVFS_GET_VP(dentry->d_inode); 563 vp = LINVFS_GET_VP(dentry->d_inode);
523 564
524 iov.iov_base = link; 565 iov.iov_base = link;
525 iov.iov_len = MAXNAMELEN; 566 iov.iov_len = MAXPATHLEN;
526 567
527 uio->uio_iov = &iov; 568 uio->uio_iov = &iov;
528 uio->uio_offset = 0; 569 uio->uio_offset = 0;
529 uio->uio_segflg = UIO_SYSSPACE; 570 uio->uio_segflg = UIO_SYSSPACE;
530 uio->uio_resid = MAXNAMELEN; 571 uio->uio_resid = MAXPATHLEN;
531 uio->uio_iovcnt = 1; 572 uio->uio_iovcnt = 1;
532 573
533 VOP_READLINK(vp, uio, 0, NULL, error); 574 VOP_READLINK(vp, uio, 0, NULL, error);
@@ -535,7 +576,7 @@ linvfs_follow_link(
535 kfree(link); 576 kfree(link);
536 link = ERR_PTR(-error); 577 link = ERR_PTR(-error);
537 } else { 578 } else {
538 link[MAXNAMELEN - uio->uio_resid] = '\0'; 579 link[MAXPATHLEN - uio->uio_resid] = '\0';
539 } 580 }
540 kfree(uio); 581 kfree(uio);
541 582
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index ee784b63acbf..6899a6b4a50a 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -26,11 +26,6 @@ extern struct file_operations linvfs_file_operations;
26extern struct file_operations linvfs_invis_file_operations; 26extern struct file_operations linvfs_invis_file_operations;
27extern struct file_operations linvfs_dir_operations; 27extern struct file_operations linvfs_dir_operations;
28 28
29extern struct address_space_operations linvfs_aops;
30
31extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
32extern void linvfs_unwritten_done(struct buffer_head *, int);
33
34extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, 29extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
35 int, unsigned int, void __user *); 30 int, unsigned int, void __user *);
36 31
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index d8e21ba0cccc..67389b745526 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -110,10 +110,6 @@
110 * delalloc and these ondisk-uninitialised buffers. 110 * delalloc and these ondisk-uninitialised buffers.
111 */ 111 */
112BUFFER_FNS(PrivateStart, unwritten); 112BUFFER_FNS(PrivateStart, unwritten);
113static inline void set_buffer_unwritten_io(struct buffer_head *bh)
114{
115 bh->b_end_io = linvfs_unwritten_done;
116}
117 113
118#define restricted_chown xfs_params.restrict_chown.val 114#define restricted_chown xfs_params.restrict_chown.val
119#define irix_sgid_inherit xfs_params.sgid_inherit.val 115#define irix_sgid_inherit xfs_params.sgid_inherit.val
@@ -232,7 +228,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
232#define xfs_itruncate_data(ip, off) \ 228#define xfs_itruncate_data(ip, off) \
233 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) 229 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
234#define xfs_statvfs_fsid(statp, mp) \ 230#define xfs_statvfs_fsid(statp, mp) \
235 ({ u64 id = huge_encode_dev((mp)->m_dev); \ 231 ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
236 __kernel_fsid_t *fsid = &(statp)->f_fsid; \ 232 __kernel_fsid_t *fsid = &(statp)->f_fsid; \
237 (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); }) 233 (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
238 234
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 885dfafeabee..e0ab45fbfebd 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -233,8 +233,8 @@ xfs_read(
233 xfs_buftarg_t *target = 233 xfs_buftarg_t *target =
234 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 234 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
235 mp->m_rtdev_targp : mp->m_ddev_targp; 235 mp->m_rtdev_targp : mp->m_ddev_targp;
236 if ((*offset & target->pbr_smask) || 236 if ((*offset & target->bt_smask) ||
237 (size & target->pbr_smask)) { 237 (size & target->bt_smask)) {
238 if (*offset == ip->i_d.di_size) { 238 if (*offset == ip->i_d.di_size) {
239 return (0); 239 return (0);
240 } 240 }
@@ -281,9 +281,6 @@ xfs_read(
281 281
282 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 282 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
283 283
284 if (likely(!(ioflags & IO_INVIS)))
285 xfs_ichgtime_fast(ip, inode, XFS_ICHGTIME_ACC);
286
287unlock_isem: 284unlock_isem:
288 if (unlikely(ioflags & IO_ISDIRECT)) 285 if (unlikely(ioflags & IO_ISDIRECT))
289 mutex_unlock(&inode->i_mutex); 286 mutex_unlock(&inode->i_mutex);
@@ -346,9 +343,6 @@ xfs_sendfile(
346 if (ret > 0) 343 if (ret > 0)
347 XFS_STATS_ADD(xs_read_bytes, ret); 344 XFS_STATS_ADD(xs_read_bytes, ret);
348 345
349 if (likely(!(ioflags & IO_INVIS)))
350 xfs_ichgtime_fast(ip, LINVFS_GET_IP(vp), XFS_ICHGTIME_ACC);
351
352 return ret; 346 return ret;
353} 347}
354 348
@@ -362,7 +356,6 @@ STATIC int /* error (positive) */
362xfs_zero_last_block( 356xfs_zero_last_block(
363 struct inode *ip, 357 struct inode *ip,
364 xfs_iocore_t *io, 358 xfs_iocore_t *io,
365 xfs_off_t offset,
366 xfs_fsize_t isize, 359 xfs_fsize_t isize,
367 xfs_fsize_t end_size) 360 xfs_fsize_t end_size)
368{ 361{
@@ -371,19 +364,16 @@ xfs_zero_last_block(
371 int nimaps; 364 int nimaps;
372 int zero_offset; 365 int zero_offset;
373 int zero_len; 366 int zero_len;
374 int isize_fsb_offset;
375 int error = 0; 367 int error = 0;
376 xfs_bmbt_irec_t imap; 368 xfs_bmbt_irec_t imap;
377 loff_t loff; 369 loff_t loff;
378 size_t lsize;
379 370
380 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); 371 ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
381 ASSERT(offset > isize);
382 372
383 mp = io->io_mount; 373 mp = io->io_mount;
384 374
385 isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize); 375 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
386 if (isize_fsb_offset == 0) { 376 if (zero_offset == 0) {
387 /* 377 /*
388 * There are no extra bytes in the last block on disk to 378 * There are no extra bytes in the last block on disk to
389 * zero, so return. 379 * zero, so return.
@@ -413,10 +403,8 @@ xfs_zero_last_block(
413 */ 403 */
414 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); 404 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
415 loff = XFS_FSB_TO_B(mp, last_fsb); 405 loff = XFS_FSB_TO_B(mp, last_fsb);
416 lsize = XFS_FSB_TO_B(mp, 1);
417 406
418 zero_offset = isize_fsb_offset; 407 zero_len = mp->m_sb.sb_blocksize - zero_offset;
419 zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
420 408
421 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size); 409 error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
422 410
@@ -447,20 +435,17 @@ xfs_zero_eof(
447 struct inode *ip = LINVFS_GET_IP(vp); 435 struct inode *ip = LINVFS_GET_IP(vp);
448 xfs_fileoff_t start_zero_fsb; 436 xfs_fileoff_t start_zero_fsb;
449 xfs_fileoff_t end_zero_fsb; 437 xfs_fileoff_t end_zero_fsb;
450 xfs_fileoff_t prev_zero_fsb;
451 xfs_fileoff_t zero_count_fsb; 438 xfs_fileoff_t zero_count_fsb;
452 xfs_fileoff_t last_fsb; 439 xfs_fileoff_t last_fsb;
453 xfs_extlen_t buf_len_fsb; 440 xfs_extlen_t buf_len_fsb;
454 xfs_extlen_t prev_zero_count;
455 xfs_mount_t *mp; 441 xfs_mount_t *mp;
456 int nimaps; 442 int nimaps;
457 int error = 0; 443 int error = 0;
458 xfs_bmbt_irec_t imap; 444 xfs_bmbt_irec_t imap;
459 loff_t loff;
460 size_t lsize;
461 445
462 ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 446 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
463 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 447 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
448 ASSERT(offset > isize);
464 449
465 mp = io->io_mount; 450 mp = io->io_mount;
466 451
@@ -468,7 +453,7 @@ xfs_zero_eof(
468 * First handle zeroing the block on which isize resides. 453 * First handle zeroing the block on which isize resides.
469 * We only zero a part of that block so it is handled specially. 454 * We only zero a part of that block so it is handled specially.
470 */ 455 */
471 error = xfs_zero_last_block(ip, io, offset, isize, end_size); 456 error = xfs_zero_last_block(ip, io, isize, end_size);
472 if (error) { 457 if (error) {
473 ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 458 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
474 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 459 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
@@ -496,8 +481,6 @@ xfs_zero_eof(
496 } 481 }
497 482
498 ASSERT(start_zero_fsb <= end_zero_fsb); 483 ASSERT(start_zero_fsb <= end_zero_fsb);
499 prev_zero_fsb = NULLFILEOFF;
500 prev_zero_count = 0;
501 while (start_zero_fsb <= end_zero_fsb) { 484 while (start_zero_fsb <= end_zero_fsb) {
502 nimaps = 1; 485 nimaps = 1;
503 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 486 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
@@ -519,10 +502,7 @@ xfs_zero_eof(
519 * that sits on a hole and sets the page as P_HOLE 502 * that sits on a hole and sets the page as P_HOLE
520 * and calls remapf if it is a mapped file. 503 * and calls remapf if it is a mapped file.
521 */ 504 */
522 prev_zero_fsb = NULLFILEOFF; 505 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
523 prev_zero_count = 0;
524 start_zero_fsb = imap.br_startoff +
525 imap.br_blockcount;
526 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 506 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
527 continue; 507 continue;
528 } 508 }
@@ -543,17 +523,15 @@ xfs_zero_eof(
543 */ 523 */
544 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 524 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
545 525
546 loff = XFS_FSB_TO_B(mp, start_zero_fsb); 526 error = xfs_iozero(ip,
547 lsize = XFS_FSB_TO_B(mp, buf_len_fsb); 527 XFS_FSB_TO_B(mp, start_zero_fsb),
548 528 XFS_FSB_TO_B(mp, buf_len_fsb),
549 error = xfs_iozero(ip, loff, lsize, end_size); 529 end_size);
550 530
551 if (error) { 531 if (error) {
552 goto out_lock; 532 goto out_lock;
553 } 533 }
554 534
555 prev_zero_fsb = start_zero_fsb;
556 prev_zero_count = buf_len_fsb;
557 start_zero_fsb = imap.br_startoff + buf_len_fsb; 535 start_zero_fsb = imap.br_startoff + buf_len_fsb;
558 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 536 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
559 537
@@ -640,7 +618,7 @@ xfs_write(
640 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 618 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
641 mp->m_rtdev_targp : mp->m_ddev_targp; 619 mp->m_rtdev_targp : mp->m_ddev_targp;
642 620
643 if ((pos & target->pbr_smask) || (count & target->pbr_smask)) 621 if ((pos & target->bt_smask) || (count & target->bt_smask))
644 return XFS_ERROR(-EINVAL); 622 return XFS_ERROR(-EINVAL);
645 623
646 if (!VN_CACHED(vp) && pos < i_size_read(inode)) 624 if (!VN_CACHED(vp) && pos < i_size_read(inode))
@@ -831,6 +809,10 @@ retry:
831 goto retry; 809 goto retry;
832 } 810 }
833 811
812 isize = i_size_read(inode);
813 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
814 *offset = isize;
815
834 if (*offset > xip->i_d.di_size) { 816 if (*offset > xip->i_d.di_size) {
835 xfs_ilock(xip, XFS_ILOCK_EXCL); 817 xfs_ilock(xip, XFS_ILOCK_EXCL);
836 if (*offset > xip->i_d.di_size) { 818 if (*offset > xip->i_d.di_size) {
@@ -956,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
956 938
957 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); 939 mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
958 if (!XFS_FORCED_SHUTDOWN(mp)) { 940 if (!XFS_FORCED_SHUTDOWN(mp)) {
959 pagebuf_iorequest(bp); 941 xfs_buf_iorequest(bp);
960 return 0; 942 return 0;
961 } else { 943 } else {
962 xfs_buftrace("XFS__BDSTRAT IOERROR", bp); 944 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
@@ -1009,7 +991,7 @@ xfsbdstrat(
1009 * if (XFS_BUF_IS_GRIO(bp)) { 991 * if (XFS_BUF_IS_GRIO(bp)) {
1010 */ 992 */
1011 993
1012 pagebuf_iorequest(bp); 994 xfs_buf_iorequest(bp);
1013 return 0; 995 return 0;
1014 } 996 }
1015 997
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
index 6c40a74be7c8..8955720a2c6b 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -34,7 +34,7 @@ xfs_read_xfsstats(
34 __uint64_t xs_write_bytes = 0; 34 __uint64_t xs_write_bytes = 0;
35 __uint64_t xs_read_bytes = 0; 35 __uint64_t xs_read_bytes = 0;
36 36
37 static struct xstats_entry { 37 static const struct xstats_entry {
38 char *desc; 38 char *desc;
39 int endpoint; 39 int endpoint;
40 } xstats[] = { 40 } xstats[] = {
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h
index 50027c4a5618..8ba7a2fa6c1d 100644
--- a/fs/xfs/linux-2.6/xfs_stats.h
+++ b/fs/xfs/linux-2.6/xfs_stats.h
@@ -109,15 +109,15 @@ struct xfsstats {
109 __uint32_t vn_remove; /* # times vn_remove called */ 109 __uint32_t vn_remove; /* # times vn_remove called */
110 __uint32_t vn_free; /* # times vn_free called */ 110 __uint32_t vn_free; /* # times vn_free called */
111#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9) 111#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
112 __uint32_t pb_get; 112 __uint32_t xb_get;
113 __uint32_t pb_create; 113 __uint32_t xb_create;
114 __uint32_t pb_get_locked; 114 __uint32_t xb_get_locked;
115 __uint32_t pb_get_locked_waited; 115 __uint32_t xb_get_locked_waited;
116 __uint32_t pb_busy_locked; 116 __uint32_t xb_busy_locked;
117 __uint32_t pb_miss_locked; 117 __uint32_t xb_miss_locked;
118 __uint32_t pb_page_retries; 118 __uint32_t xb_page_retries;
119 __uint32_t pb_page_found; 119 __uint32_t xb_page_found;
120 __uint32_t pb_get_read; 120 __uint32_t xb_get_read;
121/* Extra precision counters */ 121/* Extra precision counters */
122 __uint64_t xs_xstrat_bytes; 122 __uint64_t xs_xstrat_bytes;
123 __uint64_t xs_write_bytes; 123 __uint64_t xs_write_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 6116b5bf433e..f22e426d9e42 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -306,13 +306,15 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
306 xfs_fs_cmn_err(CE_NOTE, mp, 306 xfs_fs_cmn_err(CE_NOTE, mp,
307 "Disabling barriers, not supported with external log device"); 307 "Disabling barriers, not supported with external log device");
308 mp->m_flags &= ~XFS_MOUNT_BARRIER; 308 mp->m_flags &= ~XFS_MOUNT_BARRIER;
309 return;
309 } 310 }
310 311
311 if (mp->m_ddev_targp->pbr_bdev->bd_disk->queue->ordered == 312 if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
312 QUEUE_ORDERED_NONE) { 313 QUEUE_ORDERED_NONE) {
313 xfs_fs_cmn_err(CE_NOTE, mp, 314 xfs_fs_cmn_err(CE_NOTE, mp,
314 "Disabling barriers, not supported by the underlying device"); 315 "Disabling barriers, not supported by the underlying device");
315 mp->m_flags &= ~XFS_MOUNT_BARRIER; 316 mp->m_flags &= ~XFS_MOUNT_BARRIER;
317 return;
316 } 318 }
317 319
318 error = xfs_barrier_test(mp); 320 error = xfs_barrier_test(mp);
@@ -320,6 +322,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
320 xfs_fs_cmn_err(CE_NOTE, mp, 322 xfs_fs_cmn_err(CE_NOTE, mp,
321 "Disabling barriers, trial barrier write failed"); 323 "Disabling barriers, trial barrier write failed");
322 mp->m_flags &= ~XFS_MOUNT_BARRIER; 324 mp->m_flags &= ~XFS_MOUNT_BARRIER;
325 return;
323 } 326 }
324} 327}
325 328
@@ -327,7 +330,7 @@ void
327xfs_blkdev_issue_flush( 330xfs_blkdev_issue_flush(
328 xfs_buftarg_t *buftarg) 331 xfs_buftarg_t *buftarg)
329{ 332{
330 blkdev_issue_flush(buftarg->pbr_bdev, NULL); 333 blkdev_issue_flush(buftarg->bt_bdev, NULL);
331} 334}
332 335
333STATIC struct inode * 336STATIC struct inode *
@@ -576,7 +579,7 @@ xfssyncd(
576 timeleft = schedule_timeout_interruptible(timeleft); 579 timeleft = schedule_timeout_interruptible(timeleft);
577 /* swsusp */ 580 /* swsusp */
578 try_to_freeze(); 581 try_to_freeze();
579 if (kthread_should_stop()) 582 if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
580 break; 583 break;
581 584
582 spin_lock(&vfsp->vfs_sync_lock); 585 spin_lock(&vfsp->vfs_sync_lock);
@@ -966,9 +969,9 @@ init_xfs_fs( void )
966 if (error < 0) 969 if (error < 0)
967 goto undo_zones; 970 goto undo_zones;
968 971
969 error = pagebuf_init(); 972 error = xfs_buf_init();
970 if (error < 0) 973 if (error < 0)
971 goto undo_pagebuf; 974 goto undo_buffers;
972 975
973 vn_init(); 976 vn_init();
974 xfs_init(); 977 xfs_init();
@@ -982,9 +985,9 @@ init_xfs_fs( void )
982 return 0; 985 return 0;
983 986
984undo_register: 987undo_register:
985 pagebuf_terminate(); 988 xfs_buf_terminate();
986 989
987undo_pagebuf: 990undo_buffers:
988 linvfs_destroy_zones(); 991 linvfs_destroy_zones();
989 992
990undo_zones: 993undo_zones:
@@ -998,7 +1001,7 @@ exit_xfs_fs( void )
998 XFS_DM_EXIT(&xfs_fs_type); 1001 XFS_DM_EXIT(&xfs_fs_type);
999 unregister_filesystem(&xfs_fs_type); 1002 unregister_filesystem(&xfs_fs_type);
1000 xfs_cleanup(); 1003 xfs_cleanup();
1001 pagebuf_terminate(); 1004 xfs_buf_terminate();
1002 linvfs_destroy_zones(); 1005 linvfs_destroy_zones();
1003 ktrace_uninit(); 1006 ktrace_uninit();
1004} 1007}
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index e9bbcb4d6243..260dd8415dd7 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -106,7 +106,6 @@ vn_revalidate_core(
106 inode->i_blocks = vap->va_nblocks; 106 inode->i_blocks = vap->va_nblocks;
107 inode->i_mtime = vap->va_mtime; 107 inode->i_mtime = vap->va_mtime;
108 inode->i_ctime = vap->va_ctime; 108 inode->i_ctime = vap->va_ctime;
109 inode->i_atime = vap->va_atime;
110 inode->i_blksize = vap->va_blocksize; 109 inode->i_blksize = vap->va_blocksize;
111 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) 110 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
112 inode->i_flags |= S_IMMUTABLE; 111 inode->i_flags |= S_IMMUTABLE;
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index f2bbb327c081..0fe2419461d6 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -566,6 +566,25 @@ static inline int VN_BAD(struct vnode *vp)
566} 566}
567 567
568/* 568/*
569 * Extracting atime values in various formats
570 */
571static inline void vn_atime_to_bstime(struct vnode *vp, xfs_bstime_t *bs_atime)
572{
573 bs_atime->tv_sec = vp->v_inode.i_atime.tv_sec;
574 bs_atime->tv_nsec = vp->v_inode.i_atime.tv_nsec;
575}
576
577static inline void vn_atime_to_timespec(struct vnode *vp, struct timespec *ts)
578{
579 *ts = vp->v_inode.i_atime;
580}
581
582static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
583{
584 *tt = vp->v_inode.i_atime.tv_sec;
585}
586
587/*
569 * Some useful predicates. 588 * Some useful predicates.
570 */ 589 */
571#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping) 590#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 2f69822344e5..2ec6b441849c 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -239,7 +239,7 @@ xfs_qm_dquot_logitem_pushbuf(
239 * trying to duplicate our effort. 239 * trying to duplicate our effort.
240 */ 240 */
241 ASSERT(qip->qli_pushbuf_flag != 0); 241 ASSERT(qip->qli_pushbuf_flag != 0);
242 ASSERT(qip->qli_push_owner == get_thread_id()); 242 ASSERT(qip->qli_push_owner == current_pid());
243 243
244 /* 244 /*
245 * If flushlock isn't locked anymore, chances are that the 245 * If flushlock isn't locked anymore, chances are that the
@@ -333,7 +333,7 @@ xfs_qm_dquot_logitem_trylock(
333 qip->qli_pushbuf_flag = 1; 333 qip->qli_pushbuf_flag = 1;
334 ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno); 334 ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
335#ifdef DEBUG 335#ifdef DEBUG
336 qip->qli_push_owner = get_thread_id(); 336 qip->qli_push_owner = current_pid();
337#endif 337#endif
338 /* 338 /*
339 * The dquot is left locked. 339 * The dquot is left locked.
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index bb6991a7a617..7dcdd0640c32 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1392,11 +1392,12 @@ xfs_qm_qino_alloc(
1392{ 1392{
1393 xfs_trans_t *tp; 1393 xfs_trans_t *tp;
1394 int error; 1394 int error;
1395 unsigned long s; 1395 unsigned long s;
1396 cred_t zerocr; 1396 cred_t zerocr;
1397 xfs_inode_t zeroino;
1397 int committed; 1398 int committed;
1398 1399
1399 tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE); 1400 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
1400 if ((error = xfs_trans_reserve(tp, 1401 if ((error = xfs_trans_reserve(tp,
1401 XFS_QM_QINOCREATE_SPACE_RES(mp), 1402 XFS_QM_QINOCREATE_SPACE_RES(mp),
1402 XFS_CREATE_LOG_RES(mp), 0, 1403 XFS_CREATE_LOG_RES(mp), 0,
@@ -1406,8 +1407,9 @@ xfs_qm_qino_alloc(
1406 return (error); 1407 return (error);
1407 } 1408 }
1408 memset(&zerocr, 0, sizeof(zerocr)); 1409 memset(&zerocr, 0, sizeof(zerocr));
1410 memset(&zeroino, 0, sizeof(zeroino));
1409 1411
1410 if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, S_IFREG, 1, 0, 1412 if ((error = xfs_dir_ialloc(&tp, &zeroino, S_IFREG, 1, 0,
1411 &zerocr, 0, 1, ip, &committed))) { 1413 &zerocr, 0, 1, ip, &committed))) {
1412 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 1414 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1413 XFS_TRANS_ABORT); 1415 XFS_TRANS_ABORT);
@@ -1918,9 +1920,7 @@ xfs_qm_quotacheck(
1918 * at this point (because we intentionally didn't in dqget_noattach). 1920 * at this point (because we intentionally didn't in dqget_noattach).
1919 */ 1921 */
1920 if (error) { 1922 if (error) {
1921 xfs_qm_dqpurge_all(mp, 1923 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
1922 XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA|
1923 XFS_QMOPT_PQUOTA|XFS_QMOPT_QUOTAOFF);
1924 goto error_return; 1924 goto error_return;
1925 } 1925 }
1926 /* 1926 /*
@@ -2743,6 +2743,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
2743 xfs_dqunlock(udqp); 2743 xfs_dqunlock(udqp);
2744 ASSERT(ip->i_udquot == NULL); 2744 ASSERT(ip->i_udquot == NULL);
2745 ip->i_udquot = udqp; 2745 ip->i_udquot = udqp;
2746 ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
2746 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 2747 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2747 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 2748 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2748 } 2749 }
@@ -2752,7 +2753,10 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
2752 xfs_dqunlock(gdqp); 2753 xfs_dqunlock(gdqp);
2753 ASSERT(ip->i_gdquot == NULL); 2754 ASSERT(ip->i_gdquot == NULL);
2754 ip->i_gdquot = gdqp; 2755 ip->i_gdquot = gdqp;
2755 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id)); 2756 ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
2757 ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
2758 ip->i_d.di_gid : ip->i_d.di_projid) ==
2759 be32_to_cpu(gdqp->q_core.d_id));
2756 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 2760 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2757 } 2761 }
2758} 2762}
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index bb6dc91ea261..b08b3d9345b7 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -27,45 +27,12 @@ static DEFINE_SPINLOCK(xfs_err_lock);
27/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */ 27/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
28#define XFS_MAX_ERR_LEVEL 7 28#define XFS_MAX_ERR_LEVEL 7
29#define XFS_ERR_MASK ((1 << 3) - 1) 29#define XFS_ERR_MASK ((1 << 3) - 1)
30static char *err_level[XFS_MAX_ERR_LEVEL+1] = 30static const char * const err_level[XFS_MAX_ERR_LEVEL+1] =
31 {KERN_EMERG, KERN_ALERT, KERN_CRIT, 31 {KERN_EMERG, KERN_ALERT, KERN_CRIT,
32 KERN_ERR, KERN_WARNING, KERN_NOTICE, 32 KERN_ERR, KERN_WARNING, KERN_NOTICE,
33 KERN_INFO, KERN_DEBUG}; 33 KERN_INFO, KERN_DEBUG};
34 34
35void 35void
36assfail(char *a, char *f, int l)
37{
38 printk("XFS assertion failed: %s, file: %s, line: %d\n", a, f, l);
39 BUG();
40}
41
42#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
43
44unsigned long
45random(void)
46{
47 static unsigned long RandomValue = 1;
48 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
49 register long rv = RandomValue;
50 register long lo;
51 register long hi;
52
53 hi = rv / 127773;
54 lo = rv % 127773;
55 rv = 16807 * lo - 2836 * hi;
56 if( rv <= 0 ) rv += 2147483647;
57 return( RandomValue = rv );
58}
59
60int
61get_thread_id(void)
62{
63 return current->pid;
64}
65
66#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
67
68void
69cmn_err(register int level, char *fmt, ...) 36cmn_err(register int level, char *fmt, ...)
70{ 37{
71 char *fp = fmt; 38 char *fp = fmt;
@@ -90,7 +57,6 @@ cmn_err(register int level, char *fmt, ...)
90 BUG(); 57 BUG();
91} 58}
92 59
93
94void 60void
95icmn_err(register int level, char *fmt, va_list ap) 61icmn_err(register int level, char *fmt, va_list ap)
96{ 62{
@@ -109,3 +75,27 @@ icmn_err(register int level, char *fmt, va_list ap)
109 if (level == CE_PANIC) 75 if (level == CE_PANIC)
110 BUG(); 76 BUG();
111} 77}
78
79void
80assfail(char *expr, char *file, int line)
81{
82 printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line);
83 BUG();
84}
85
86#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
87unsigned long random(void)
88{
89 static unsigned long RandomValue = 1;
90 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
91 register long rv = RandomValue;
92 register long lo;
93 register long hi;
94
95 hi = rv / 127773;
96 lo = rv % 127773;
97 rv = 16807 * lo - 2836 * hi;
98 if (rv <= 0) rv += 2147483647;
99 return RandomValue = rv;
100}
101#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index aff558664c32..e3bf58112e7e 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -31,24 +31,23 @@ extern void icmn_err(int, char *, va_list)
31 __attribute__ ((format (printf, 2, 0))); 31 __attribute__ ((format (printf, 2, 0)));
32extern void cmn_err(int, char *, ...) 32extern void cmn_err(int, char *, ...)
33 __attribute__ ((format (printf, 2, 3))); 33 __attribute__ ((format (printf, 2, 3)));
34extern void assfail(char *expr, char *f, int l);
34 35
35#ifndef STATIC 36#define prdev(fmt,targ,args...) \
36# define STATIC static 37 printk("Device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
37#endif
38 38
39#ifdef DEBUG 39#define ASSERT_ALWAYS(expr) \
40# define ASSERT(EX) ((EX) ? ((void)0) : assfail(#EX, __FILE__, __LINE__)) 40 (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
41#else
42# define ASSERT(x) ((void)0)
43#endif
44 41
45extern void assfail(char *, char *, int); 42#ifndef DEBUG
46#ifdef DEBUG 43# define ASSERT(expr) ((void)0)
44#else
45# define ASSERT(expr) ASSERT_ALWAYS(expr)
47extern unsigned long random(void); 46extern unsigned long random(void);
48extern int get_thread_id(void);
49#endif 47#endif
50 48
51#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__)) 49#ifndef STATIC
52#define debug_stop_all_cpus(param) /* param is "cpumask_t *" */ 50# define STATIC static
51#endif
53 52
54#endif /* __XFS_SUPPORT_DEBUG_H__ */ 53#endif /* __XFS_SUPPORT_DEBUG_H__ */
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c
index 69ec4f540c3a..a3d565a67734 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/support/uuid.c
@@ -27,6 +27,16 @@ uuid_init(void)
27 mutex_init(&uuid_monitor); 27 mutex_init(&uuid_monitor);
28} 28}
29 29
30
31/* IRIX interpretation of an uuid_t */
32typedef struct {
33 __be32 uu_timelow;
34 __be16 uu_timemid;
35 __be16 uu_timehi;
36 __be16 uu_clockseq;
37 __be16 uu_node[3];
38} xfs_uu_t;
39
30/* 40/*
31 * uuid_getnodeuniq - obtain the node unique fields of a UUID. 41 * uuid_getnodeuniq - obtain the node unique fields of a UUID.
32 * 42 *
@@ -36,16 +46,11 @@ uuid_init(void)
36void 46void
37uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) 47uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
38{ 48{
39 char *uu = (char *)uuid; 49 xfs_uu_t *uup = (xfs_uu_t *)uuid;
40
41 /* on IRIX, this function assumes big-endian fields within
42 * the uuid, so we use INT_GET to get the same result on
43 * little-endian systems
44 */
45 50
46 fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) + 51 fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
47 INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT); 52 be16_to_cpu(uup->uu_timemid);
48 fsid[1] = INT_GET(*(u_int32_t*)(uu ), ARCH_CONVERT); 53 fsid[1] = be16_to_cpu(uup->uu_timelow);
49} 54}
50 55
51void 56void
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index 68e5051d8e24..c4836890b726 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -40,6 +40,22 @@
40#undef XFS_NATIVE_HOST 40#undef XFS_NATIVE_HOST
41#endif 41#endif
42 42
43#ifdef XFS_NATIVE_HOST
44#define cpu_to_be16(val) ((__be16)(val))
45#define cpu_to_be32(val) ((__be32)(val))
46#define cpu_to_be64(val) ((__be64)(val))
47#define be16_to_cpu(val) ((__uint16_t)(val))
48#define be32_to_cpu(val) ((__uint32_t)(val))
49#define be64_to_cpu(val) ((__uint64_t)(val))
50#else
51#define cpu_to_be16(val) (__swab16((__uint16_t)(val)))
52#define cpu_to_be32(val) (__swab32((__uint32_t)(val)))
53#define cpu_to_be64(val) (__swab64((__uint64_t)(val)))
54#define be16_to_cpu(val) (__swab16((__be16)(val)))
55#define be32_to_cpu(val) (__swab32((__be32)(val)))
56#define be64_to_cpu(val) (__swab64((__be64)(val)))
57#endif
58
43#endif /* __KERNEL__ */ 59#endif /* __KERNEL__ */
44 60
45/* do we need conversion? */ 61/* do we need conversion? */
@@ -186,7 +202,7 @@ static inline void be64_add(__be64 *a, __s64 b)
186 */ 202 */
187 203
188#define XFS_GET_DIR_INO4(di) \ 204#define XFS_GET_DIR_INO4(di) \
189 (((u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) 205 (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
190 206
191#define XFS_PUT_DIR_INO4(from, di) \ 207#define XFS_PUT_DIR_INO4(from, di) \
192do { \ 208do { \
@@ -197,9 +213,9 @@ do { \
197} while (0) 213} while (0)
198 214
199#define XFS_DI_HI(di) \ 215#define XFS_DI_HI(di) \
200 (((u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) 216 (((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
201#define XFS_DI_LO(di) \ 217#define XFS_DI_LO(di) \
202 (((u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7])) 218 (((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
203 219
204#define XFS_GET_DIR_INO8(di) \ 220#define XFS_GET_DIR_INO8(di) \
205 (((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \ 221 (((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 1c7421840c18..fe91eac4e2a7 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -128,7 +128,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
128 return (offset >= minforkoff) ? minforkoff : 0; 128 return (offset >= minforkoff) ? minforkoff : 0;
129 } 129 }
130 130
131 if (unlikely(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) { 131 if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
132 if (bytes <= XFS_IFORK_ASIZE(dp)) 132 if (bytes <= XFS_IFORK_ASIZE(dp))
133 return mp->m_attroffset >> 3; 133 return mp->m_attroffset >> 3;
134 return 0; 134 return 0;
@@ -157,7 +157,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
157{ 157{
158 unsigned long s; 158 unsigned long s;
159 159
160 if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR) && 160 if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
161 !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) { 161 !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) {
162 s = XFS_SB_LOCK(mp); 162 s = XFS_SB_LOCK(mp);
163 if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { 163 if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
@@ -311,7 +311,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
311 */ 311 */
312 totsize -= size; 312 totsize -= size;
313 if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname && 313 if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname &&
314 !(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) { 314 (mp->m_flags & XFS_MOUNT_ATTR2)) {
315 /* 315 /*
316 * Last attribute now removed, revert to original 316 * Last attribute now removed, revert to original
317 * inode format making all literal area available 317 * inode format making all literal area available
@@ -330,7 +330,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
330 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); 330 dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
331 ASSERT(dp->i_d.di_forkoff); 331 ASSERT(dp->i_d.di_forkoff);
332 ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname || 332 ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname ||
333 (mp->m_flags & XFS_MOUNT_COMPAT_ATTR)); 333 !(mp->m_flags & XFS_MOUNT_ATTR2));
334 dp->i_afp->if_ext_max = 334 dp->i_afp->if_ext_max =
335 XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t); 335 XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
336 dp->i_df.if_ext_max = 336 dp->i_df.if_ext_max =
@@ -739,7 +739,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
739 + name_loc->namelen 739 + name_loc->namelen
740 + INT_GET(name_loc->valuelen, ARCH_CONVERT); 740 + INT_GET(name_loc->valuelen, ARCH_CONVERT);
741 } 741 }
742 if (!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR) && 742 if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
743 (bytes == sizeof(struct xfs_attr_sf_hdr))) 743 (bytes == sizeof(struct xfs_attr_sf_hdr)))
744 return(-1); 744 return(-1);
745 return(xfs_attr_shortform_bytesfit(dp, bytes)); 745 return(xfs_attr_shortform_bytesfit(dp, bytes));
@@ -778,7 +778,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
778 goto out; 778 goto out;
779 779
780 if (forkoff == -1) { 780 if (forkoff == -1) {
781 ASSERT(!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR)); 781 ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
782 782
783 /* 783 /*
784 * Last attribute was removed, revert to original 784 * Last attribute was removed, revert to original
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index f6143ff251a0..541e34109bb9 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -63,7 +63,7 @@ struct xfs_trans;
63 * the leaf_entry. The namespaces are independent only because we also look 63 * the leaf_entry. The namespaces are independent only because we also look
64 * at the namespace bit when we are looking for a matching attribute name. 64 * at the namespace bit when we are looking for a matching attribute name.
65 * 65 *
66 * We also store a "incomplete" bit in the leaf_entry. It shows that an 66 * We also store an "incomplete" bit in the leaf_entry. It shows that an
67 * attribute is in the middle of being created and should not be shown to 67 * attribute is in the middle of being created and should not be shown to
68 * the user if we crash during the time that the bit is set. We clear the 68 * the user if we crash during the time that the bit is set. We clear the
69 * bit when we have finished setting up the attribute. We do this because 69 * bit when we have finished setting up the attribute. We do this because
@@ -72,42 +72,48 @@ struct xfs_trans;
72 */ 72 */
73#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ 73#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */
74 74
75typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */
76 __uint16_t base; /* base of free region */
77 __uint16_t size; /* length of free region */
78} xfs_attr_leaf_map_t;
79
80typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */
81 xfs_da_blkinfo_t info; /* block type, links, etc. */
82 __uint16_t count; /* count of active leaf_entry's */
83 __uint16_t usedbytes; /* num bytes of names/values stored */
84 __uint16_t firstused; /* first used byte in name area */
85 __uint8_t holes; /* != 0 if blk needs compaction */
86 __uint8_t pad1;
87 xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
88 /* N largest free regions */
89} xfs_attr_leaf_hdr_t;
90
91typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */
92 xfs_dahash_t hashval; /* hash value of name */
93 __uint16_t nameidx; /* index into buffer of name/value */
94 __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
95 __uint8_t pad2; /* unused pad byte */
96} xfs_attr_leaf_entry_t;
97
98typedef struct xfs_attr_leaf_name_local {
99 __uint16_t valuelen; /* number of bytes in value */
100 __uint8_t namelen; /* length of name bytes */
101 __uint8_t nameval[1]; /* name/value bytes */
102} xfs_attr_leaf_name_local_t;
103
104typedef struct xfs_attr_leaf_name_remote {
105 xfs_dablk_t valueblk; /* block number of value bytes */
106 __uint32_t valuelen; /* number of bytes in value */
107 __uint8_t namelen; /* length of name bytes */
108 __uint8_t name[1]; /* name bytes */
109} xfs_attr_leaf_name_remote_t;
110
75typedef struct xfs_attr_leafblock { 111typedef struct xfs_attr_leafblock {
76 struct xfs_attr_leaf_hdr { /* constant-structure header block */ 112 xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */
77 xfs_da_blkinfo_t info; /* block type, links, etc. */ 113 xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */
78 __uint16_t count; /* count of active leaf_entry's */ 114 xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */
79 __uint16_t usedbytes; /* num bytes of names/values stored */ 115 xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */
80 __uint16_t firstused; /* first used byte in name area */
81 __uint8_t holes; /* != 0 if blk needs compaction */
82 __uint8_t pad1;
83 struct xfs_attr_leaf_map { /* RLE map of free bytes */
84 __uint16_t base; /* base of free region */
85 __uint16_t size; /* length of free region */
86 } freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */
87 } hdr;
88 struct xfs_attr_leaf_entry { /* sorted on key, not name */
89 xfs_dahash_t hashval; /* hash value of name */
90 __uint16_t nameidx; /* index into buffer of name/value */
91 __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
92 __uint8_t pad2; /* unused pad byte */
93 } entries[1]; /* variable sized array */
94 struct xfs_attr_leaf_name_local {
95 __uint16_t valuelen; /* number of bytes in value */
96 __uint8_t namelen; /* length of name bytes */
97 __uint8_t nameval[1]; /* name/value bytes */
98 } namelist; /* grows from bottom of buf */
99 struct xfs_attr_leaf_name_remote {
100 xfs_dablk_t valueblk; /* block number of value bytes */
101 __uint32_t valuelen; /* number of bytes in value */
102 __uint8_t namelen; /* length of name bytes */
103 __uint8_t name[1]; /* name bytes */
104 } valuelist; /* grows from bottom of buf */
105} xfs_attr_leafblock_t; 116} xfs_attr_leafblock_t;
106typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t;
107typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t;
108typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t;
109typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t;
110typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t;
111 117
112/* 118/*
113 * Flags used in the leaf_entry[i].flags field. 119 * Flags used in the leaf_entry[i].flags field.
@@ -150,7 +156,8 @@ xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
150 (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; 156 (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)];
151} 157}
152 158
153#define XFS_ATTR_LEAF_NAME(leafp,idx) xfs_attr_leaf_name(leafp,idx) 159#define XFS_ATTR_LEAF_NAME(leafp,idx) \
160 xfs_attr_leaf_name(leafp,idx)
154static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) 161static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
155{ 162{
156 return (&((char *) 163 return (&((char *)
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index e415a4698e9c..70625e577c70 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2146,13 +2146,176 @@ xfs_bmap_add_extent_hole_real(
2146 return 0; /* keep gcc quite */ 2146 return 0; /* keep gcc quite */
2147} 2147}
2148 2148
2149/*
2150 * Adjust the size of the new extent based on di_extsize and rt extsize.
2151 */
2152STATIC int
2153xfs_bmap_extsize_align(
2154 xfs_mount_t *mp,
2155 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2156 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2157 xfs_extlen_t extsz, /* align to this extent size */
2158 int rt, /* is this a realtime inode? */
2159 int eof, /* is extent at end-of-file? */
2160 int delay, /* creating delalloc extent? */
2161 int convert, /* overwriting unwritten extent? */
2162 xfs_fileoff_t *offp, /* in/out: aligned offset */
2163 xfs_extlen_t *lenp) /* in/out: aligned length */
2164{
2165 xfs_fileoff_t orig_off; /* original offset */
2166 xfs_extlen_t orig_alen; /* original length */
2167 xfs_fileoff_t orig_end; /* original off+len */
2168 xfs_fileoff_t nexto; /* next file offset */
2169 xfs_fileoff_t prevo; /* previous file offset */
2170 xfs_fileoff_t align_off; /* temp for offset */
2171 xfs_extlen_t align_alen; /* temp for length */
2172 xfs_extlen_t temp; /* temp for calculations */
2173
2174 if (convert)
2175 return 0;
2176
2177 orig_off = align_off = *offp;
2178 orig_alen = align_alen = *lenp;
2179 orig_end = orig_off + orig_alen;
2180
2181 /*
2182 * If this request overlaps an existing extent, then don't
2183 * attempt to perform any additional alignment.
2184 */
2185 if (!delay && !eof &&
2186 (orig_off >= gotp->br_startoff) &&
2187 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2188 return 0;
2189 }
2190
2191 /*
2192 * If the file offset is unaligned vs. the extent size
2193 * we need to align it. This will be possible unless
2194 * the file was previously written with a kernel that didn't
2195 * perform this alignment, or if a truncate shot us in the
2196 * foot.
2197 */
2198 temp = do_mod(orig_off, extsz);
2199 if (temp) {
2200 align_alen += temp;
2201 align_off -= temp;
2202 }
2203 /*
2204 * Same adjustment for the end of the requested area.
2205 */
2206 if ((temp = (align_alen % extsz))) {
2207 align_alen += extsz - temp;
2208 }
2209 /*
2210 * If the previous block overlaps with this proposed allocation
2211 * then move the start forward without adjusting the length.
2212 */
2213 if (prevp->br_startoff != NULLFILEOFF) {
2214 if (prevp->br_startblock == HOLESTARTBLOCK)
2215 prevo = prevp->br_startoff;
2216 else
2217 prevo = prevp->br_startoff + prevp->br_blockcount;
2218 } else
2219 prevo = 0;
2220 if (align_off != orig_off && align_off < prevo)
2221 align_off = prevo;
2222 /*
2223 * If the next block overlaps with this proposed allocation
2224 * then move the start back without adjusting the length,
2225 * but not before offset 0.
2226 * This may of course make the start overlap previous block,
2227 * and if we hit the offset 0 limit then the next block
2228 * can still overlap too.
2229 */
2230 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2231 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2232 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2233 nexto = gotp->br_startoff + gotp->br_blockcount;
2234 else
2235 nexto = gotp->br_startoff;
2236 } else
2237 nexto = NULLFILEOFF;
2238 if (!eof &&
2239 align_off + align_alen != orig_end &&
2240 align_off + align_alen > nexto)
2241 align_off = nexto > align_alen ? nexto - align_alen : 0;
2242 /*
2243 * If we're now overlapping the next or previous extent that
2244 * means we can't fit an extsz piece in this hole. Just move
2245 * the start forward to the first valid spot and set
2246 * the length so we hit the end.
2247 */
2248 if (align_off != orig_off && align_off < prevo)
2249 align_off = prevo;
2250 if (align_off + align_alen != orig_end &&
2251 align_off + align_alen > nexto &&
2252 nexto != NULLFILEOFF) {
2253 ASSERT(nexto > prevo);
2254 align_alen = nexto - align_off;
2255 }
2256
2257 /*
2258 * If realtime, and the result isn't a multiple of the realtime
2259 * extent size we need to remove blocks until it is.
2260 */
2261 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2262 /*
2263 * We're not covering the original request, or
2264 * we won't be able to once we fix the length.
2265 */
2266 if (orig_off < align_off ||
2267 orig_end > align_off + align_alen ||
2268 align_alen - temp < orig_alen)
2269 return XFS_ERROR(EINVAL);
2270 /*
2271 * Try to fix it by moving the start up.
2272 */
2273 if (align_off + temp <= orig_off) {
2274 align_alen -= temp;
2275 align_off += temp;
2276 }
2277 /*
2278 * Try to fix it by moving the end in.
2279 */
2280 else if (align_off + align_alen - temp >= orig_end)
2281 align_alen -= temp;
2282 /*
2283 * Set the start to the minimum then trim the length.
2284 */
2285 else {
2286 align_alen -= orig_off - align_off;
2287 align_off = orig_off;
2288 align_alen -= align_alen % mp->m_sb.sb_rextsize;
2289 }
2290 /*
2291 * Result doesn't cover the request, fail it.
2292 */
2293 if (orig_off < align_off || orig_end > align_off + align_alen)
2294 return XFS_ERROR(EINVAL);
2295 } else {
2296 ASSERT(orig_off >= align_off);
2297 ASSERT(orig_end <= align_off + align_alen);
2298 }
2299
2300#ifdef DEBUG
2301 if (!eof && gotp->br_startoff != NULLFILEOFF)
2302 ASSERT(align_off + align_alen <= gotp->br_startoff);
2303 if (prevp->br_startoff != NULLFILEOFF)
2304 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2305#endif
2306
2307 *lenp = align_alen;
2308 *offp = align_off;
2309 return 0;
2310}
2311
2149#define XFS_ALLOC_GAP_UNITS 4 2312#define XFS_ALLOC_GAP_UNITS 4
2150 2313
2151/* 2314/*
2152 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. 2315 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2153 * It figures out where to ask the underlying allocator to put the new extent. 2316 * It figures out where to ask the underlying allocator to put the new extent.
2154 */ 2317 */
2155STATIC int /* error */ 2318STATIC int
2156xfs_bmap_alloc( 2319xfs_bmap_alloc(
2157 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2320 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2158{ 2321{
@@ -2163,10 +2326,10 @@ xfs_bmap_alloc(
2163 xfs_mount_t *mp; /* mount point structure */ 2326 xfs_mount_t *mp; /* mount point structure */
2164 int nullfb; /* true if ap->firstblock isn't set */ 2327 int nullfb; /* true if ap->firstblock isn't set */
2165 int rt; /* true if inode is realtime */ 2328 int rt; /* true if inode is realtime */
2166#ifdef __KERNEL__ 2329 xfs_extlen_t prod = 0; /* product factor for allocators */
2167 xfs_extlen_t prod=0; /* product factor for allocators */ 2330 xfs_extlen_t ralen = 0; /* realtime allocation length */
2168 xfs_extlen_t ralen=0; /* realtime allocation length */ 2331 xfs_extlen_t align; /* minimum allocation alignment */
2169#endif 2332 xfs_rtblock_t rtx;
2170 2333
2171#define ISVALID(x,y) \ 2334#define ISVALID(x,y) \
2172 (rt ? \ 2335 (rt ? \
@@ -2182,125 +2345,25 @@ xfs_bmap_alloc(
2182 nullfb = ap->firstblock == NULLFSBLOCK; 2345 nullfb = ap->firstblock == NULLFSBLOCK;
2183 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; 2346 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2184 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); 2347 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2185#ifdef __KERNEL__
2186 if (rt) { 2348 if (rt) {
2187 xfs_extlen_t extsz; /* file extent size for rt */ 2349 align = ap->ip->i_d.di_extsize ?
2188 xfs_fileoff_t nexto; /* next file offset */ 2350 ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
2189 xfs_extlen_t orig_alen; /* original ap->alen */ 2351 /* Set prod to match the extent size */
2190 xfs_fileoff_t orig_end; /* original off+len */ 2352 prod = align / mp->m_sb.sb_rextsize;
2191 xfs_fileoff_t orig_off; /* original ap->off */ 2353
2192 xfs_extlen_t mod_off; /* modulus calculations */ 2354 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2193 xfs_fileoff_t prevo; /* previous file offset */ 2355 align, rt, ap->eof, 0,
2194 xfs_rtblock_t rtx; /* realtime extent number */ 2356 ap->conv, &ap->off, &ap->alen);
2195 xfs_extlen_t temp; /* temp for rt calculations */ 2357 if (error)
2196 2358 return error;
2197 /* 2359 ASSERT(ap->alen);
2198 * Set prod to match the realtime extent size.
2199 */
2200 if (!(extsz = ap->ip->i_d.di_extsize))
2201 extsz = mp->m_sb.sb_rextsize;
2202 prod = extsz / mp->m_sb.sb_rextsize;
2203 orig_off = ap->off;
2204 orig_alen = ap->alen;
2205 orig_end = orig_off + orig_alen;
2206 /*
2207 * If the file offset is unaligned vs. the extent size
2208 * we need to align it. This will be possible unless
2209 * the file was previously written with a kernel that didn't
2210 * perform this alignment.
2211 */
2212 mod_off = do_mod(orig_off, extsz);
2213 if (mod_off) {
2214 ap->alen += mod_off;
2215 ap->off -= mod_off;
2216 }
2217 /*
2218 * Same adjustment for the end of the requested area.
2219 */
2220 if ((temp = (ap->alen % extsz)))
2221 ap->alen += extsz - temp;
2222 /*
2223 * If the previous block overlaps with this proposed allocation
2224 * then move the start forward without adjusting the length.
2225 */
2226 prevo =
2227 ap->prevp->br_startoff == NULLFILEOFF ?
2228 0 :
2229 (ap->prevp->br_startoff +
2230 ap->prevp->br_blockcount);
2231 if (ap->off != orig_off && ap->off < prevo)
2232 ap->off = prevo;
2233 /*
2234 * If the next block overlaps with this proposed allocation
2235 * then move the start back without adjusting the length,
2236 * but not before offset 0.
2237 * This may of course make the start overlap previous block,
2238 * and if we hit the offset 0 limit then the next block
2239 * can still overlap too.
2240 */
2241 nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ?
2242 NULLFILEOFF : ap->gotp->br_startoff;
2243 if (!ap->eof &&
2244 ap->off + ap->alen != orig_end &&
2245 ap->off + ap->alen > nexto)
2246 ap->off = nexto > ap->alen ? nexto - ap->alen : 0;
2247 /*
2248 * If we're now overlapping the next or previous extent that
2249 * means we can't fit an extsz piece in this hole. Just move
2250 * the start forward to the first valid spot and set
2251 * the length so we hit the end.
2252 */
2253 if ((ap->off != orig_off && ap->off < prevo) ||
2254 (ap->off + ap->alen != orig_end &&
2255 ap->off + ap->alen > nexto)) {
2256 ap->off = prevo;
2257 ap->alen = nexto - prevo;
2258 }
2259 /*
2260 * If the result isn't a multiple of rtextents we need to
2261 * remove blocks until it is.
2262 */
2263 if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) {
2264 /*
2265 * We're not covering the original request, or
2266 * we won't be able to once we fix the length.
2267 */
2268 if (orig_off < ap->off ||
2269 orig_end > ap->off + ap->alen ||
2270 ap->alen - temp < orig_alen)
2271 return XFS_ERROR(EINVAL);
2272 /*
2273 * Try to fix it by moving the start up.
2274 */
2275 if (ap->off + temp <= orig_off) {
2276 ap->alen -= temp;
2277 ap->off += temp;
2278 }
2279 /*
2280 * Try to fix it by moving the end in.
2281 */
2282 else if (ap->off + ap->alen - temp >= orig_end)
2283 ap->alen -= temp;
2284 /*
2285 * Set the start to the minimum then trim the length.
2286 */
2287 else {
2288 ap->alen -= orig_off - ap->off;
2289 ap->off = orig_off;
2290 ap->alen -= ap->alen % mp->m_sb.sb_rextsize;
2291 }
2292 /*
2293 * Result doesn't cover the request, fail it.
2294 */
2295 if (orig_off < ap->off || orig_end > ap->off + ap->alen)
2296 return XFS_ERROR(EINVAL);
2297 }
2298 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); 2360 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2361
2299 /* 2362 /*
2300 * If the offset & length are not perfectly aligned 2363 * If the offset & length are not perfectly aligned
2301 * then kill prod, it will just get us in trouble. 2364 * then kill prod, it will just get us in trouble.
2302 */ 2365 */
2303 if (do_mod(ap->off, extsz) || ap->alen % extsz) 2366 if (do_mod(ap->off, align) || ap->alen % align)
2304 prod = 1; 2367 prod = 1;
2305 /* 2368 /*
2306 * Set ralen to be the actual requested length in rtextents. 2369 * Set ralen to be the actual requested length in rtextents.
@@ -2326,15 +2389,24 @@ xfs_bmap_alloc(
2326 ap->rval = rtx * mp->m_sb.sb_rextsize; 2389 ap->rval = rtx * mp->m_sb.sb_rextsize;
2327 } else 2390 } else
2328 ap->rval = 0; 2391 ap->rval = 0;
2392 } else {
2393 align = (ap->userdata && ap->ip->i_d.di_extsize &&
2394 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
2395 ap->ip->i_d.di_extsize : 0;
2396 if (unlikely(align)) {
2397 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2398 align, rt,
2399 ap->eof, 0, ap->conv,
2400 &ap->off, &ap->alen);
2401 ASSERT(!error);
2402 ASSERT(ap->alen);
2403 }
2404 if (nullfb)
2405 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2406 else
2407 ap->rval = ap->firstblock;
2329 } 2408 }
2330#else 2409
2331 if (rt)
2332 ap->rval = 0;
2333#endif /* __KERNEL__ */
2334 else if (nullfb)
2335 ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2336 else
2337 ap->rval = ap->firstblock;
2338 /* 2410 /*
2339 * If allocating at eof, and there's a previous real block, 2411 * If allocating at eof, and there's a previous real block,
2340 * try to use it's last block as our starting point. 2412 * try to use it's last block as our starting point.
@@ -2598,11 +2670,12 @@ xfs_bmap_alloc(
2598 args.total = ap->total; 2670 args.total = ap->total;
2599 args.minlen = ap->minlen; 2671 args.minlen = ap->minlen;
2600 } 2672 }
2601 if (ap->ip->i_d.di_extsize) { 2673 if (unlikely(ap->userdata && ap->ip->i_d.di_extsize &&
2674 (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) {
2602 args.prod = ap->ip->i_d.di_extsize; 2675 args.prod = ap->ip->i_d.di_extsize;
2603 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) 2676 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2604 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2677 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2605 } else if (mp->m_sb.sb_blocksize >= NBPP) { 2678 } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
2606 args.prod = 1; 2679 args.prod = 1;
2607 args.mod = 0; 2680 args.mod = 0;
2608 } else { 2681 } else {
@@ -3580,14 +3653,16 @@ xfs_bmap_search_extents(
3580 3653
3581 ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, 3654 ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp,
3582 lastxp, gotp, prevp); 3655 lastxp, gotp, prevp);
3583 rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME; 3656 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
3584 if(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM)) { 3657 if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) {
3585 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld " 3658 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld "
3586 "start_block : %llx start_off : %llx blkcnt : %llx " 3659 "start_block : %llx start_off : %llx blkcnt : %llx "
3587 "extent-state : %x \n", 3660 "extent-state : %x \n",
3588 (ip->i_mount)->m_fsname,(long long)ip->i_ino, 3661 (ip->i_mount)->m_fsname, (long long)ip->i_ino,
3589 gotp->br_startblock, gotp->br_startoff, 3662 (unsigned long long)gotp->br_startblock,
3590 gotp->br_blockcount,gotp->br_state); 3663 (unsigned long long)gotp->br_startoff,
3664 (unsigned long long)gotp->br_blockcount,
3665 gotp->br_state);
3591 } 3666 }
3592 return ep; 3667 return ep;
3593} 3668}
@@ -3875,7 +3950,7 @@ xfs_bmap_add_attrfork(
3875 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size); 3950 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3876 if (!ip->i_d.di_forkoff) 3951 if (!ip->i_d.di_forkoff)
3877 ip->i_d.di_forkoff = mp->m_attroffset >> 3; 3952 ip->i_d.di_forkoff = mp->m_attroffset >> 3;
3878 else if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) 3953 else if (mp->m_flags & XFS_MOUNT_ATTR2)
3879 version = 2; 3954 version = 2;
3880 break; 3955 break;
3881 default: 3956 default:
@@ -4023,13 +4098,13 @@ xfs_bmap_compute_maxlevels(
4023 */ 4098 */
4024 if (whichfork == XFS_DATA_FORK) { 4099 if (whichfork == XFS_DATA_FORK) {
4025 maxleafents = MAXEXTNUM; 4100 maxleafents = MAXEXTNUM;
4026 sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ? 4101 sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
4027 mp->m_attroffset : XFS_BMDR_SPACE_CALC(MINDBTPTRS); 4102 XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset;
4028 } else { 4103 } else {
4029 maxleafents = MAXAEXTNUM; 4104 maxleafents = MAXAEXTNUM;
4030 sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ? 4105 sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
4031 mp->m_sb.sb_inodesize - mp->m_attroffset : 4106 XFS_BMDR_SPACE_CALC(MINABTPTRS) :
4032 XFS_BMDR_SPACE_CALC(MINABTPTRS); 4107 mp->m_sb.sb_inodesize - mp->m_attroffset;
4033 } 4108 }
4034 maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0); 4109 maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
4035 minleafrecs = mp->m_bmap_dmnr[0]; 4110 minleafrecs = mp->m_bmap_dmnr[0];
@@ -4418,8 +4493,8 @@ xfs_bmap_read_extents(
4418 num_recs = be16_to_cpu(block->bb_numrecs); 4493 num_recs = be16_to_cpu(block->bb_numrecs);
4419 if (unlikely(i + num_recs > room)) { 4494 if (unlikely(i + num_recs > room)) {
4420 ASSERT(i + num_recs <= room); 4495 ASSERT(i + num_recs <= room);
4421 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 4496 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
4422 "corrupt dinode %Lu, (btree extents). Unmount and run xfs_repair.", 4497 "corrupt dinode %Lu, (btree extents).",
4423 (unsigned long long) ip->i_ino); 4498 (unsigned long long) ip->i_ino);
4424 XFS_ERROR_REPORT("xfs_bmap_read_extents(1)", 4499 XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
4425 XFS_ERRLEVEL_LOW, 4500 XFS_ERRLEVEL_LOW,
@@ -4590,6 +4665,7 @@ xfs_bmapi(
4590 char contig; /* allocation must be one extent */ 4665 char contig; /* allocation must be one extent */
4591 char delay; /* this request is for delayed alloc */ 4666 char delay; /* this request is for delayed alloc */
4592 char exact; /* don't do all of wasdelayed extent */ 4667 char exact; /* don't do all of wasdelayed extent */
4668 char convert; /* unwritten extent I/O completion */
4593 xfs_bmbt_rec_t *ep; /* extent list entry pointer */ 4669 xfs_bmbt_rec_t *ep; /* extent list entry pointer */
4594 int error; /* error return */ 4670 int error; /* error return */
4595 xfs_bmbt_irec_t got; /* current extent list record */ 4671 xfs_bmbt_irec_t got; /* current extent list record */
@@ -4643,7 +4719,7 @@ xfs_bmapi(
4643 } 4719 }
4644 if (XFS_FORCED_SHUTDOWN(mp)) 4720 if (XFS_FORCED_SHUTDOWN(mp))
4645 return XFS_ERROR(EIO); 4721 return XFS_ERROR(EIO);
4646 rt = XFS_IS_REALTIME_INODE(ip); 4722 rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4647 ifp = XFS_IFORK_PTR(ip, whichfork); 4723 ifp = XFS_IFORK_PTR(ip, whichfork);
4648 ASSERT(ifp->if_ext_max == 4724 ASSERT(ifp->if_ext_max ==
4649 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); 4725 XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
@@ -4654,6 +4730,7 @@ xfs_bmapi(
4654 delay = (flags & XFS_BMAPI_DELAY) != 0; 4730 delay = (flags & XFS_BMAPI_DELAY) != 0;
4655 trim = (flags & XFS_BMAPI_ENTIRE) == 0; 4731 trim = (flags & XFS_BMAPI_ENTIRE) == 0;
4656 userdata = (flags & XFS_BMAPI_METADATA) == 0; 4732 userdata = (flags & XFS_BMAPI_METADATA) == 0;
4733 convert = (flags & XFS_BMAPI_CONVERT) != 0;
4657 exact = (flags & XFS_BMAPI_EXACT) != 0; 4734 exact = (flags & XFS_BMAPI_EXACT) != 0;
4658 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; 4735 rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
4659 contig = (flags & XFS_BMAPI_CONTIG) != 0; 4736 contig = (flags & XFS_BMAPI_CONTIG) != 0;
@@ -4748,15 +4825,25 @@ xfs_bmapi(
4748 } 4825 }
4749 minlen = contig ? alen : 1; 4826 minlen = contig ? alen : 1;
4750 if (delay) { 4827 if (delay) {
4751 xfs_extlen_t extsz = 0; 4828 xfs_extlen_t extsz;
4752 4829
4753 /* Figure out the extent size, adjust alen */ 4830 /* Figure out the extent size, adjust alen */
4754 if (rt) { 4831 if (rt) {
4755 if (!(extsz = ip->i_d.di_extsize)) 4832 if (!(extsz = ip->i_d.di_extsize))
4756 extsz = mp->m_sb.sb_rextsize; 4833 extsz = mp->m_sb.sb_rextsize;
4757 alen = roundup(alen, extsz); 4834 } else {
4758 extsz = alen / mp->m_sb.sb_rextsize; 4835 extsz = ip->i_d.di_extsize;
4759 } 4836 }
4837 if (extsz) {
4838 error = xfs_bmap_extsize_align(mp,
4839 &got, &prev, extsz,
4840 rt, eof, delay, convert,
4841 &aoff, &alen);
4842 ASSERT(!error);
4843 }
4844
4845 if (rt)
4846 extsz = alen / mp->m_sb.sb_rextsize;
4760 4847
4761 /* 4848 /*
4762 * Make a transaction-less quota reservation for 4849 * Make a transaction-less quota reservation for
@@ -4785,32 +4872,33 @@ xfs_bmapi(
4785 xfs_bmap_worst_indlen(ip, alen); 4872 xfs_bmap_worst_indlen(ip, alen);
4786 ASSERT(indlen > 0); 4873 ASSERT(indlen > 0);
4787 4874
4788 if (rt) 4875 if (rt) {
4789 error = xfs_mod_incore_sb(mp, 4876 error = xfs_mod_incore_sb(mp,
4790 XFS_SBS_FREXTENTS, 4877 XFS_SBS_FREXTENTS,
4791 -(extsz), rsvd); 4878 -(extsz), rsvd);
4792 else 4879 } else {
4793 error = xfs_mod_incore_sb(mp, 4880 error = xfs_mod_incore_sb(mp,
4794 XFS_SBS_FDBLOCKS, 4881 XFS_SBS_FDBLOCKS,
4795 -(alen), rsvd); 4882 -(alen), rsvd);
4883 }
4796 if (!error) { 4884 if (!error) {
4797 error = xfs_mod_incore_sb(mp, 4885 error = xfs_mod_incore_sb(mp,
4798 XFS_SBS_FDBLOCKS, 4886 XFS_SBS_FDBLOCKS,
4799 -(indlen), rsvd); 4887 -(indlen), rsvd);
4800 if (error && rt) { 4888 if (error && rt)
4801 xfs_mod_incore_sb(ip->i_mount, 4889 xfs_mod_incore_sb(mp,
4802 XFS_SBS_FREXTENTS, 4890 XFS_SBS_FREXTENTS,
4803 extsz, rsvd); 4891 extsz, rsvd);
4804 } else if (error) { 4892 else if (error)
4805 xfs_mod_incore_sb(ip->i_mount, 4893 xfs_mod_incore_sb(mp,
4806 XFS_SBS_FDBLOCKS, 4894 XFS_SBS_FDBLOCKS,
4807 alen, rsvd); 4895 alen, rsvd);
4808 }
4809 } 4896 }
4810 4897
4811 if (error) { 4898 if (error) {
4812 if (XFS_IS_QUOTA_ON(ip->i_mount)) 4899 if (XFS_IS_QUOTA_ON(mp))
4813 /* unreserve the blocks now */ 4900 /* unreserve the blocks now */
4901 (void)
4814 XFS_TRANS_UNRESERVE_QUOTA_NBLKS( 4902 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
4815 mp, NULL, ip, 4903 mp, NULL, ip,
4816 (long)alen, 0, rt ? 4904 (long)alen, 0, rt ?
@@ -4849,6 +4937,7 @@ xfs_bmapi(
4849 bma.firstblock = *firstblock; 4937 bma.firstblock = *firstblock;
4850 bma.alen = alen; 4938 bma.alen = alen;
4851 bma.off = aoff; 4939 bma.off = aoff;
4940 bma.conv = convert;
4852 bma.wasdel = wasdelay; 4941 bma.wasdel = wasdelay;
4853 bma.minlen = minlen; 4942 bma.minlen = minlen;
4854 bma.low = flist->xbf_low; 4943 bma.low = flist->xbf_low;
@@ -5270,8 +5359,7 @@ xfs_bunmapi(
5270 return 0; 5359 return 0;
5271 } 5360 }
5272 XFS_STATS_INC(xs_blk_unmap); 5361 XFS_STATS_INC(xs_blk_unmap);
5273 isrt = (whichfork == XFS_DATA_FORK) && 5362 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5274 (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
5275 start = bno; 5363 start = bno;
5276 bno = start + len - 1; 5364 bno = start + len - 1;
5277 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, 5365 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
@@ -5443,7 +5531,7 @@ xfs_bunmapi(
5443 } 5531 }
5444 if (wasdel) { 5532 if (wasdel) {
5445 ASSERT(STARTBLOCKVAL(del.br_startblock) > 0); 5533 ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
5446 /* Update realtim/data freespace, unreserve quota */ 5534 /* Update realtime/data freespace, unreserve quota */
5447 if (isrt) { 5535 if (isrt) {
5448 xfs_filblks_t rtexts; 5536 xfs_filblks_t rtexts;
5449 5537
@@ -5451,14 +5539,14 @@ xfs_bunmapi(
5451 do_div(rtexts, mp->m_sb.sb_rextsize); 5539 do_div(rtexts, mp->m_sb.sb_rextsize);
5452 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 5540 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5453 (int)rtexts, rsvd); 5541 (int)rtexts, rsvd);
5454 XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip, 5542 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5455 -((long)del.br_blockcount), 0, 5543 NULL, ip, -((long)del.br_blockcount), 0,
5456 XFS_QMOPT_RES_RTBLKS); 5544 XFS_QMOPT_RES_RTBLKS);
5457 } else { 5545 } else {
5458 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, 5546 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5459 (int)del.br_blockcount, rsvd); 5547 (int)del.br_blockcount, rsvd);
5460 XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip, 5548 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5461 -((long)del.br_blockcount), 0, 5549 NULL, ip, -((long)del.br_blockcount), 0,
5462 XFS_QMOPT_RES_REGBLKS); 5550 XFS_QMOPT_RES_REGBLKS);
5463 } 5551 }
5464 ip->i_delayed_blks -= del.br_blockcount; 5552 ip->i_delayed_blks -= del.br_blockcount;
@@ -5652,7 +5740,9 @@ xfs_getbmap(
5652 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5740 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5653 return XFS_ERROR(EINVAL); 5741 return XFS_ERROR(EINVAL);
5654 if (whichfork == XFS_DATA_FORK) { 5742 if (whichfork == XFS_DATA_FORK) {
5655 if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) { 5743 if ((ip->i_d.di_extsize && (ip->i_d.di_flags &
5744 (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) ||
5745 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5656 prealloced = 1; 5746 prealloced = 1;
5657 fixlen = XFS_MAXIOFFSET(mp); 5747 fixlen = XFS_MAXIOFFSET(mp);
5658 } else { 5748 } else {
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 2e0717a01309..12cc63dfc2c4 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -62,6 +62,10 @@ typedef struct xfs_bmap_free
62#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */ 62#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */
63 /* combine contig. space */ 63 /* combine contig. space */
64#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */ 64#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */
65/* XFS_BMAPI_DIRECT_IO 0x800 */
66#define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */
67 /* need write cache flushing and no */
68 /* additional allocation alignments */
65 69
66#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w) 70#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w)
67static inline int xfs_bmapi_aflag(int w) 71static inline int xfs_bmapi_aflag(int w)
@@ -101,7 +105,8 @@ typedef struct xfs_bmalloca {
101 char wasdel; /* replacing a delayed allocation */ 105 char wasdel; /* replacing a delayed allocation */
102 char userdata;/* set if is user data */ 106 char userdata;/* set if is user data */
103 char low; /* low on space, using seq'l ags */ 107 char low; /* low on space, using seq'l ags */
104 char aeof; /* allocated space at eof */ 108 char aeof; /* allocated space at eof */
109 char conv; /* overwriting unwritten extents */
105} xfs_bmalloca_t; 110} xfs_bmalloca_t;
106 111
107#ifdef __KERNEL__ 112#ifdef __KERNEL__
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
index 328a528b926d..f57cc9ac875e 100644
--- a/fs/xfs/xfs_clnt.h
+++ b/fs/xfs/xfs_clnt.h
@@ -57,7 +57,7 @@ struct xfs_mount_args {
57/* 57/*
58 * XFS mount option flags -- args->flags1 58 * XFS mount option flags -- args->flags1
59 */ 59 */
60#define XFSMNT_COMPAT_ATTR 0x00000001 /* do not use ATTR2 format */ 60#define XFSMNT_ATTR2 0x00000001 /* allow ATTR2 EA format */
61#define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount 61#define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount
62 * compatible */ 62 * compatible */
63#define XFSMNT_INO64 0x00000004 /* move inode numbers up 63#define XFSMNT_INO64 0x00000004 /* move inode numbers up
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 070259a4254c..c6191d00ad27 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -60,8 +60,6 @@ xfs_swapext(
60 xfs_bstat_t *sbp; 60 xfs_bstat_t *sbp;
61 struct file *fp = NULL, *tfp = NULL; 61 struct file *fp = NULL, *tfp = NULL;
62 vnode_t *vp, *tvp; 62 vnode_t *vp, *tvp;
63 bhv_desc_t *bdp, *tbdp;
64 vn_bhv_head_t *bhp, *tbhp;
65 static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL; 63 static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
66 int ilf_fields, tilf_fields; 64 int ilf_fields, tilf_fields;
67 int error = 0; 65 int error = 0;
@@ -90,13 +88,10 @@ xfs_swapext(
90 goto error0; 88 goto error0;
91 } 89 }
92 90
93 bhp = VN_BHV_HEAD(vp); 91 ip = xfs_vtoi(vp);
94 bdp = vn_bhv_lookup(bhp, &xfs_vnodeops); 92 if (ip == NULL) {
95 if (bdp == NULL) {
96 error = XFS_ERROR(EBADF); 93 error = XFS_ERROR(EBADF);
97 goto error0; 94 goto error0;
98 } else {
99 ip = XFS_BHVTOI(bdp);
100 } 95 }
101 96
102 if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || 97 if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) ||
@@ -105,13 +100,10 @@ xfs_swapext(
105 goto error0; 100 goto error0;
106 } 101 }
107 102
108 tbhp = VN_BHV_HEAD(tvp); 103 tip = xfs_vtoi(tvp);
109 tbdp = vn_bhv_lookup(tbhp, &xfs_vnodeops); 104 if (tip == NULL) {
110 if (tbdp == NULL) {
111 error = XFS_ERROR(EBADF); 105 error = XFS_ERROR(EBADF);
112 goto error0; 106 goto error0;
113 } else {
114 tip = XFS_BHVTOI(tbdp);
115 } 107 }
116 108
117 if (ip->i_mount != tip->i_mount) { 109 if (ip->i_mount != tip->i_mount) {
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index c5a0e537ff1a..79d0d9e1fbab 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -199,10 +199,16 @@ typedef enum xfs_dinode_fmt
199 199
200#define XFS_DFORK_DSIZE(dip,mp) \ 200#define XFS_DFORK_DSIZE(dip,mp) \
201 XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp) 201 XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp)
202#define XFS_DFORK_DSIZE_HOST(dip,mp) \
203 XFS_CFORK_DSIZE(&(dip)->di_core, mp)
202#define XFS_DFORK_ASIZE(dip,mp) \ 204#define XFS_DFORK_ASIZE(dip,mp) \
203 XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp) 205 XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp)
206#define XFS_DFORK_ASIZE_HOST(dip,mp) \
207 XFS_CFORK_ASIZE(&(dip)->di_core, mp)
204#define XFS_DFORK_SIZE(dip,mp,w) \ 208#define XFS_DFORK_SIZE(dip,mp,w) \
205 XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w) 209 XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w)
210#define XFS_DFORK_SIZE_HOST(dip,mp,w) \
211 XFS_CFORK_SIZE(&(dip)->di_core, mp, w)
206 212
207#define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core) 213#define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core)
208#define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core) 214#define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core)
@@ -216,6 +222,7 @@ typedef enum xfs_dinode_fmt
216#define XFS_CFORK_FMT_SET(dcp,w,n) \ 222#define XFS_CFORK_FMT_SET(dcp,w,n) \
217 ((w) == XFS_DATA_FORK ? \ 223 ((w) == XFS_DATA_FORK ? \
218 ((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n))) 224 ((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n)))
225#define XFS_DFORK_FORMAT(dip,w) XFS_CFORK_FORMAT(&(dip)->di_core, w)
219 226
220#define XFS_CFORK_NEXTENTS_DISK(dcp,w) \ 227#define XFS_CFORK_NEXTENTS_DISK(dcp,w) \
221 ((w) == XFS_DATA_FORK ? \ 228 ((w) == XFS_DATA_FORK ? \
@@ -223,13 +230,13 @@ typedef enum xfs_dinode_fmt
223 INT_GET((dcp)->di_anextents, ARCH_CONVERT)) 230 INT_GET((dcp)->di_anextents, ARCH_CONVERT))
224#define XFS_CFORK_NEXTENTS(dcp,w) \ 231#define XFS_CFORK_NEXTENTS(dcp,w) \
225 ((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents) 232 ((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents)
233#define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
234#define XFS_DFORK_NEXTENTS_HOST(dip,w) XFS_CFORK_NEXTENTS(&(dip)->di_core, w)
226 235
227#define XFS_CFORK_NEXT_SET(dcp,w,n) \ 236#define XFS_CFORK_NEXT_SET(dcp,w,n) \
228 ((w) == XFS_DATA_FORK ? \ 237 ((w) == XFS_DATA_FORK ? \
229 ((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n))) 238 ((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n)))
230 239
231#define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
232
233#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) 240#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp))
234 241
235/* 242/*
@@ -246,8 +253,10 @@ typedef enum xfs_dinode_fmt
246#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */ 253#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
247#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */ 254#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
248#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */ 255#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
249#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */ 256#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
250#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */ 257#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
258#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
259#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
251#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) 260#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
252#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) 261#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
253#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) 262#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -259,11 +268,14 @@ typedef enum xfs_dinode_fmt
259#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT) 268#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
260#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT) 269#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
261#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT) 270#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
271#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
272#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
262 273
263#define XFS_DIFLAG_ANY \ 274#define XFS_DIFLAG_ANY \
264 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ 275 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
265 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ 276 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
266 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ 277 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
267 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS) 278 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
279 XFS_DIFLAG_EXTSZINHERIT)
268 280
269#endif /* __XFS_DINODE_H__ */ 281#endif /* __XFS_DINODE_H__ */
diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c
index 3dd30391f551..bb87d2a700a9 100644
--- a/fs/xfs/xfs_dir.c
+++ b/fs/xfs/xfs_dir.c
@@ -176,7 +176,7 @@ xfs_dir_mount(xfs_mount_t *mp)
176 uint shortcount, leafcount, count; 176 uint shortcount, leafcount, count;
177 177
178 mp->m_dirversion = 1; 178 mp->m_dirversion = 1;
179 if (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) { 179 if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
180 shortcount = (mp->m_attroffset - 180 shortcount = (mp->m_attroffset -
181 (uint)sizeof(xfs_dir_sf_hdr_t)) / 181 (uint)sizeof(xfs_dir_sf_hdr_t)) /
182 (uint)sizeof(xfs_dir_sf_entry_t); 182 (uint)sizeof(xfs_dir_sf_entry_t);
diff --git a/fs/xfs/xfs_dir.h b/fs/xfs/xfs_dir.h
index 488defe86ba6..8cc8afb9f6c0 100644
--- a/fs/xfs/xfs_dir.h
+++ b/fs/xfs/xfs_dir.h
@@ -135,6 +135,8 @@ void xfs_dir_startup(void); /* called exactly once */
135 ((mp)->m_dirops.xd_shortform_to_single(args)) 135 ((mp)->m_dirops.xd_shortform_to_single(args))
136 136
137#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1) 137#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1)
138#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2)
138extern xfs_dirops_t xfsv1_dirops; 139extern xfs_dirops_t xfsv1_dirops;
140extern xfs_dirops_t xfsv2_dirops;
139 141
140#endif /* __XFS_DIR_H__ */ 142#endif /* __XFS_DIR_H__ */
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 7e24ffeda9e1..3158f5dc431f 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -72,9 +72,6 @@ typedef struct xfs_dir2_put_args {
72 struct uio *uio; /* uio control structure */ 72 struct uio *uio; /* uio control structure */
73} xfs_dir2_put_args_t; 73} xfs_dir2_put_args_t;
74 74
75#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2)
76extern xfs_dirops_t xfsv2_dirops;
77
78/* 75/*
79 * Other interfaces used by the rest of the dir v2 code. 76 * Other interfaces used by the rest of the dir v2 code.
80 */ 77 */
diff --git a/fs/xfs/xfs_dir_leaf.h b/fs/xfs/xfs_dir_leaf.h
index ab6b09eef9ab..eb8cd9a4667f 100644
--- a/fs/xfs/xfs_dir_leaf.h
+++ b/fs/xfs/xfs_dir_leaf.h
@@ -67,34 +67,38 @@ struct xfs_trans;
67 */ 67 */
68#define XFS_DIR_LEAF_MAPSIZE 3 /* how many freespace slots */ 68#define XFS_DIR_LEAF_MAPSIZE 3 /* how many freespace slots */
69 69
70typedef struct xfs_dir_leaf_map { /* RLE map of free bytes */
71 __uint16_t base; /* base of free region */
72 __uint16_t size; /* run length of free region */
73} xfs_dir_leaf_map_t;
74
75typedef struct xfs_dir_leaf_hdr { /* constant-structure header block */
76 xfs_da_blkinfo_t info; /* block type, links, etc. */
77 __uint16_t count; /* count of active leaf_entry's */
78 __uint16_t namebytes; /* num bytes of name strings stored */
79 __uint16_t firstused; /* first used byte in name area */
80 __uint8_t holes; /* != 0 if blk needs compaction */
81 __uint8_t pad1;
82 xfs_dir_leaf_map_t freemap[XFS_DIR_LEAF_MAPSIZE];
83} xfs_dir_leaf_hdr_t;
84
85typedef struct xfs_dir_leaf_entry { /* sorted on key, not name */
86 xfs_dahash_t hashval; /* hash value of name */
87 __uint16_t nameidx; /* index into buffer of name */
88 __uint8_t namelen; /* length of name string */
89 __uint8_t pad2;
90} xfs_dir_leaf_entry_t;
91
92typedef struct xfs_dir_leaf_name {
93 xfs_dir_ino_t inumber; /* inode number for this key */
94 __uint8_t name[1]; /* name string itself */
95} xfs_dir_leaf_name_t;
96
70typedef struct xfs_dir_leafblock { 97typedef struct xfs_dir_leafblock {
71 struct xfs_dir_leaf_hdr { /* constant-structure header block */ 98 xfs_dir_leaf_hdr_t hdr; /* constant-structure header block */
72 xfs_da_blkinfo_t info; /* block type, links, etc. */ 99 xfs_dir_leaf_entry_t entries[1]; /* var sized array */
73 __uint16_t count; /* count of active leaf_entry's */ 100 xfs_dir_leaf_name_t namelist[1]; /* grows from bottom of buf */
74 __uint16_t namebytes; /* num bytes of name strings stored */
75 __uint16_t firstused; /* first used byte in name area */
76 __uint8_t holes; /* != 0 if blk needs compaction */
77 __uint8_t pad1;
78 struct xfs_dir_leaf_map {/* RLE map of free bytes */
79 __uint16_t base; /* base of free region */
80 __uint16_t size; /* run length of free region */
81 } freemap[XFS_DIR_LEAF_MAPSIZE]; /* N largest free regions */
82 } hdr;
83 struct xfs_dir_leaf_entry { /* sorted on key, not name */
84 xfs_dahash_t hashval; /* hash value of name */
85 __uint16_t nameidx; /* index into buffer of name */
86 __uint8_t namelen; /* length of name string */
87 __uint8_t pad2;
88 } entries[1]; /* var sized array */
89 struct xfs_dir_leaf_name {
90 xfs_dir_ino_t inumber; /* inode number for this key */
91 __uint8_t name[1]; /* name string itself */
92 } namelist[1]; /* grows from bottom of buf */
93} xfs_dir_leafblock_t; 101} xfs_dir_leafblock_t;
94typedef struct xfs_dir_leaf_hdr xfs_dir_leaf_hdr_t;
95typedef struct xfs_dir_leaf_map xfs_dir_leaf_map_t;
96typedef struct xfs_dir_leaf_entry xfs_dir_leaf_entry_t;
97typedef struct xfs_dir_leaf_name xfs_dir_leaf_name_t;
98 102
99/* 103/*
100 * Length of name for which a 512-byte block filesystem 104 * Length of name for which a 512-byte block filesystem
@@ -126,11 +130,10 @@ typedef union {
126#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \ 130#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \
127 ((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash)) 131 ((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash))
128 132
129typedef struct xfs_dir_put_args 133typedef struct xfs_dir_put_args {
130{
131 xfs_dircook_t cook; /* cookie of (next) entry */ 134 xfs_dircook_t cook; /* cookie of (next) entry */
132 xfs_intino_t ino; /* inode number */ 135 xfs_intino_t ino; /* inode number */
133 struct xfs_dirent *dbp; /* buffer pointer */ 136 struct xfs_dirent *dbp; /* buffer pointer */
134 char *name; /* directory entry name */ 137 char *name; /* directory entry name */
135 int namelen; /* length of name */ 138 int namelen; /* length of name */
136 int done; /* output: set if value was stored */ 139 int done; /* output: set if value was stored */
@@ -138,7 +141,8 @@ typedef struct xfs_dir_put_args
138 struct uio *uio; /* uio control structure */ 141 struct uio *uio; /* uio control structure */
139} xfs_dir_put_args_t; 142} xfs_dir_put_args_t;
140 143
141#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) xfs_dir_leaf_entsize_byname(len) 144#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) \
145 xfs_dir_leaf_entsize_byname(len)
142static inline int xfs_dir_leaf_entsize_byname(int len) 146static inline int xfs_dir_leaf_entsize_byname(int len)
143{ 147{
144 return (uint)sizeof(xfs_dir_leaf_name_t)-1 + len; 148 return (uint)sizeof(xfs_dir_leaf_name_t)-1 + len;
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index d7b6b5d16704..2a21c5024017 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -54,7 +54,6 @@ xfs_error_trap(int e)
54 if (e != xfs_etrap[i]) 54 if (e != xfs_etrap[i])
55 continue; 55 continue;
56 cmn_err(CE_NOTE, "xfs_error_trap: error %d", e); 56 cmn_err(CE_NOTE, "xfs_error_trap: error %d", e);
57 debug_stop_all_cpus((void *)-1LL);
58 BUG(); 57 BUG();
59 break; 58 break;
60 } 59 }
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 06d8a8426c16..26b8e709a569 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -18,9 +18,6 @@
18#ifndef __XFS_ERROR_H__ 18#ifndef __XFS_ERROR_H__
19#define __XFS_ERROR_H__ 19#define __XFS_ERROR_H__
20 20
21#define prdev(fmt,targ,args...) \
22 printk("XFS: device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
23
24#define XFS_ERECOVER 1 /* Failure to recover log */ 21#define XFS_ERECOVER 1 /* Failure to recover log */
25#define XFS_ELOGSTAT 2 /* Failure to stat log in user space */ 22#define XFS_ELOGSTAT 2 /* Failure to stat log in user space */
26#define XFS_ENOLOGSPACE 3 /* Reservation too large */ 23#define XFS_ENOLOGSPACE 3 /* Reservation too large */
@@ -182,8 +179,11 @@ extern int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud);
182struct xfs_mount; 179struct xfs_mount;
183/* PRINTFLIKE4 */ 180/* PRINTFLIKE4 */
184extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp, 181extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp,
185 char *fmt, ...); 182 char *fmt, ...);
186/* PRINTFLIKE3 */ 183/* PRINTFLIKE3 */
187extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); 184extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...);
188 185
186#define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
187 xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args)
188
189#endif /* __XFS_ERROR_H__ */ 189#endif /* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index ba096f80f48d..14010f1fa82f 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -3,15 +3,15 @@
3 * All Rights Reserved. 3 * All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 6 * modify it under the terms of the GNU Lesser General Public License
7 * published by the Free Software Foundation. 7 * as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it would be useful, 9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU Lesser General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU Lesser General Public License
15 * along with this program; if not, write the Free Software Foundation, 15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
@@ -65,6 +65,8 @@ struct fsxattr {
65#define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */ 65#define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */
66#define XFS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */ 66#define XFS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */
67#define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */ 67#define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */
68#define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */
69#define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
68#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ 70#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
69 71
70/* 72/*
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index d1236d6f4045..163031c1e394 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -540,6 +540,32 @@ xfs_reserve_blocks(
540 return(0); 540 return(0);
541} 541}
542 542
543void
544xfs_fs_log_dummy(xfs_mount_t *mp)
545{
546 xfs_trans_t *tp;
547 xfs_inode_t *ip;
548
549
550 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
551 atomic_inc(&mp->m_active_trans);
552 if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
553 xfs_trans_cancel(tp, 0);
554 return;
555 }
556
557 ip = mp->m_rootip;
558 xfs_ilock(ip, XFS_ILOCK_EXCL);
559
560 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
561 xfs_trans_ihold(tp, ip);
562 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
563 xfs_trans_set_sync(tp);
564 xfs_trans_commit(tp, 0, NULL);
565
566 xfs_iunlock(ip, XFS_ILOCK_EXCL);
567}
568
543int 569int
544xfs_fs_goingdown( 570xfs_fs_goingdown(
545 xfs_mount_t *mp, 571 xfs_mount_t *mp,
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index f32713f14f9a..300d0c9d61ad 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,5 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
25extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval, 25extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
26 xfs_fsop_resblks_t *outval); 26 xfs_fsop_resblks_t *outval);
27extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags); 27extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
28extern void xfs_fs_log_dummy(xfs_mount_t *mp);
28 29
29#endif /* __XFS_FSOPS_H__ */ 30#endif /* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index fc19eedbd11b..8e380a1fb79b 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -493,7 +493,6 @@ xfs_iget(
493 493
494retry: 494retry:
495 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { 495 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
496 bhv_desc_t *bdp;
497 xfs_inode_t *ip; 496 xfs_inode_t *ip;
498 497
499 vp = LINVFS_GET_VP(inode); 498 vp = LINVFS_GET_VP(inode);
@@ -517,14 +516,12 @@ retry:
517 * to wait for the inode to go away. 516 * to wait for the inode to go away.
518 */ 517 */
519 if (is_bad_inode(inode) || 518 if (is_bad_inode(inode) ||
520 ((bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), 519 ((ip = xfs_vtoi(vp)) == NULL)) {
521 &xfs_vnodeops)) == NULL)) {
522 iput(inode); 520 iput(inode);
523 delay(1); 521 delay(1);
524 goto retry; 522 goto retry;
525 } 523 }
526 524
527 ip = XFS_BHVTOI(bdp);
528 if (lock_flags != 0) 525 if (lock_flags != 0)
529 xfs_ilock(ip, lock_flags); 526 xfs_ilock(ip, lock_flags);
530 XFS_STATS_INC(xs_ig_found); 527 XFS_STATS_INC(xs_ig_found);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index df0d4572d70a..1d7f5a7e063e 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -404,9 +404,8 @@ xfs_iformat(
404 INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) + 404 INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
405 INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) > 405 INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
406 INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) { 406 INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
407 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 407 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
408 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu." 408 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
409 " Unmount and run xfs_repair.",
410 (unsigned long long)ip->i_ino, 409 (unsigned long long)ip->i_ino,
411 (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) 410 (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
412 + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)), 411 + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
@@ -418,9 +417,8 @@ xfs_iformat(
418 } 417 }
419 418
420 if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) { 419 if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
421 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 420 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
422 "corrupt dinode %Lu, forkoff = 0x%x." 421 "corrupt dinode %Lu, forkoff = 0x%x.",
423 " Unmount and run xfs_repair.",
424 (unsigned long long)ip->i_ino, 422 (unsigned long long)ip->i_ino,
425 (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT))); 423 (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
426 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 424 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
@@ -451,8 +449,9 @@ xfs_iformat(
451 * no local regular files yet 449 * no local regular files yet
452 */ 450 */
453 if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) { 451 if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
454 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 452 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
455 "corrupt inode (local format for regular file) %Lu. Unmount and run xfs_repair.", 453 "corrupt inode %Lu "
454 "(local format for regular file).",
456 (unsigned long long) ip->i_ino); 455 (unsigned long long) ip->i_ino);
457 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 456 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
458 XFS_ERRLEVEL_LOW, 457 XFS_ERRLEVEL_LOW,
@@ -462,8 +461,9 @@ xfs_iformat(
462 461
463 di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT); 462 di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
464 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 463 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
465 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 464 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
466 "corrupt inode %Lu (bad size %Ld for local inode). Unmount and run xfs_repair.", 465 "corrupt inode %Lu "
466 "(bad size %Ld for local inode).",
467 (unsigned long long) ip->i_ino, 467 (unsigned long long) ip->i_ino,
468 (long long) di_size); 468 (long long) di_size);
469 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 469 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
@@ -551,8 +551,9 @@ xfs_iformat_local(
551 * kmem_alloc() or memcpy() below. 551 * kmem_alloc() or memcpy() below.
552 */ 552 */
553 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 553 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
554 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 554 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
555 "corrupt inode %Lu (bad size %d for local fork, size = %d). Unmount and run xfs_repair.", 555 "corrupt inode %Lu "
556 "(bad size %d for local fork, size = %d).",
556 (unsigned long long) ip->i_ino, size, 557 (unsigned long long) ip->i_ino, size,
557 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 558 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
558 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 559 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
@@ -610,8 +611,8 @@ xfs_iformat_extents(
610 * kmem_alloc() or memcpy() below. 611 * kmem_alloc() or memcpy() below.
611 */ 612 */
612 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 613 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
613 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 614 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
614 "corrupt inode %Lu ((a)extents = %d). Unmount and run xfs_repair.", 615 "corrupt inode %Lu ((a)extents = %d).",
615 (unsigned long long) ip->i_ino, nex); 616 (unsigned long long) ip->i_ino, nex);
616 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 617 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
617 ip->i_mount, dip); 618 ip->i_mount, dip);
@@ -692,8 +693,8 @@ xfs_iformat_btree(
692 || XFS_BMDR_SPACE_CALC(nrecs) > 693 || XFS_BMDR_SPACE_CALC(nrecs) >
693 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 694 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
694 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 695 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
695 xfs_fs_cmn_err(CE_WARN, ip->i_mount, 696 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
696 "corrupt inode %Lu (btree). Unmount and run xfs_repair.", 697 "corrupt inode %Lu (btree).",
697 (unsigned long long) ip->i_ino); 698 (unsigned long long) ip->i_ino);
698 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 699 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
699 ip->i_mount); 700 ip->i_mount);
@@ -809,6 +810,10 @@ _xfs_dic2xflags(
809 flags |= XFS_XFLAG_PROJINHERIT; 810 flags |= XFS_XFLAG_PROJINHERIT;
810 if (di_flags & XFS_DIFLAG_NOSYMLINKS) 811 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
811 flags |= XFS_XFLAG_NOSYMLINKS; 812 flags |= XFS_XFLAG_NOSYMLINKS;
813 if (di_flags & XFS_DIFLAG_EXTSIZE)
814 flags |= XFS_XFLAG_EXTSIZE;
815 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
816 flags |= XFS_XFLAG_EXTSZINHERIT;
812 } 817 }
813 818
814 return flags; 819 return flags;
@@ -1192,11 +1197,19 @@ xfs_ialloc(
1192 if ((mode & S_IFMT) == S_IFDIR) { 1197 if ((mode & S_IFMT) == S_IFDIR) {
1193 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1198 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1194 di_flags |= XFS_DIFLAG_RTINHERIT; 1199 di_flags |= XFS_DIFLAG_RTINHERIT;
1195 } else { 1200 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1201 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1202 ip->i_d.di_extsize = pip->i_d.di_extsize;
1203 }
1204 } else if ((mode & S_IFMT) == S_IFREG) {
1196 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { 1205 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
1197 di_flags |= XFS_DIFLAG_REALTIME; 1206 di_flags |= XFS_DIFLAG_REALTIME;
1198 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 1207 ip->i_iocore.io_flags |= XFS_IOCORE_RT;
1199 } 1208 }
1209 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1210 di_flags |= XFS_DIFLAG_EXTSIZE;
1211 ip->i_d.di_extsize = pip->i_d.di_extsize;
1212 }
1200 } 1213 }
1201 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && 1214 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1202 xfs_inherit_noatime) 1215 xfs_inherit_noatime)
@@ -1262,7 +1275,7 @@ xfs_isize_check(
1262 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1275 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1263 return; 1276 return;
1264 1277
1265 if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME ) 1278 if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
1266 return; 1279 return;
1267 1280
1268 nimaps = 2; 1281 nimaps = 2;
@@ -1765,22 +1778,19 @@ xfs_igrow_start(
1765 xfs_fsize_t new_size, 1778 xfs_fsize_t new_size,
1766 cred_t *credp) 1779 cred_t *credp)
1767{ 1780{
1768 xfs_fsize_t isize;
1769 int error; 1781 int error;
1770 1782
1771 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1783 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1772 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1784 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1773 ASSERT(new_size > ip->i_d.di_size); 1785 ASSERT(new_size > ip->i_d.di_size);
1774 1786
1775 error = 0;
1776 isize = ip->i_d.di_size;
1777 /* 1787 /*
1778 * Zero any pages that may have been created by 1788 * Zero any pages that may have been created by
1779 * xfs_write_file() beyond the end of the file 1789 * xfs_write_file() beyond the end of the file
1780 * and any blocks between the old and new file sizes. 1790 * and any blocks between the old and new file sizes.
1781 */ 1791 */
1782 error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize, 1792 error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
1783 new_size); 1793 ip->i_d.di_size, new_size);
1784 return error; 1794 return error;
1785} 1795}
1786 1796
@@ -3355,6 +3365,11 @@ xfs_iflush_int(
3355 ip->i_update_core = 0; 3365 ip->i_update_core = 0;
3356 SYNCHRONIZE(); 3366 SYNCHRONIZE();
3357 3367
3368 /*
3369 * Make sure to get the latest atime from the Linux inode.
3370 */
3371 xfs_synchronize_atime(ip);
3372
3358 if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC, 3373 if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
3359 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 3374 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3360 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 3375 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 124d30e6143b..1cfbcf18ce86 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -436,6 +436,10 @@ void xfs_ichgtime(xfs_inode_t *, int);
436xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); 436xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
437void xfs_lock_inodes(xfs_inode_t **, int, int, uint); 437void xfs_lock_inodes(xfs_inode_t **, int, int, uint);
438 438
439xfs_inode_t *xfs_vtoi(struct vnode *vp);
440
441void xfs_synchronize_atime(xfs_inode_t *);
442
439#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) 443#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
440 444
441#ifdef DEBUG 445#ifdef DEBUG
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7f3363c621e1..36aa1fcb90a5 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -271,6 +271,11 @@ xfs_inode_item_format(
271 if (ip->i_update_size) 271 if (ip->i_update_size)
272 ip->i_update_size = 0; 272 ip->i_update_size = 0;
273 273
274 /*
275 * Make sure to get the latest atime from the Linux inode.
276 */
277 xfs_synchronize_atime(ip);
278
274 vecp->i_addr = (xfs_caddr_t)&ip->i_d; 279 vecp->i_addr = (xfs_caddr_t)&ip->i_d;
275 vecp->i_len = sizeof(xfs_dinode_core_t); 280 vecp->i_len = sizeof(xfs_dinode_core_t);
276 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); 281 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
@@ -603,7 +608,7 @@ xfs_inode_item_trylock(
603 if (iip->ili_pushbuf_flag == 0) { 608 if (iip->ili_pushbuf_flag == 0) {
604 iip->ili_pushbuf_flag = 1; 609 iip->ili_pushbuf_flag = 1;
605#ifdef DEBUG 610#ifdef DEBUG
606 iip->ili_push_owner = get_thread_id(); 611 iip->ili_push_owner = current_pid();
607#endif 612#endif
608 /* 613 /*
609 * Inode is left locked in shared mode. 614 * Inode is left locked in shared mode.
@@ -782,7 +787,7 @@ xfs_inode_item_pushbuf(
782 * trying to duplicate our effort. 787 * trying to duplicate our effort.
783 */ 788 */
784 ASSERT(iip->ili_pushbuf_flag != 0); 789 ASSERT(iip->ili_pushbuf_flag != 0);
785 ASSERT(iip->ili_push_owner == get_thread_id()); 790 ASSERT(iip->ili_push_owner == current_pid());
786 791
787 /* 792 /*
788 * If flushlock isn't locked anymore, chances are that the 793 * If flushlock isn't locked anymore, chances are that the
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index ca7afc83a893..788917f355c4 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -262,7 +262,7 @@ phase2:
262 case BMAPI_WRITE: 262 case BMAPI_WRITE:
263 /* If we found an extent, return it */ 263 /* If we found an extent, return it */
264 if (nimaps && 264 if (nimaps &&
265 (imap.br_startblock != HOLESTARTBLOCK) && 265 (imap.br_startblock != HOLESTARTBLOCK) &&
266 (imap.br_startblock != DELAYSTARTBLOCK)) { 266 (imap.br_startblock != DELAYSTARTBLOCK)) {
267 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io, 267 xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
268 offset, count, iomapp, &imap, flags); 268 offset, count, iomapp, &imap, flags);
@@ -317,6 +317,58 @@ out:
317} 317}
318 318
319STATIC int 319STATIC int
320xfs_iomap_eof_align_last_fsb(
321 xfs_mount_t *mp,
322 xfs_iocore_t *io,
323 xfs_fsize_t isize,
324 xfs_extlen_t extsize,
325 xfs_fileoff_t *last_fsb)
326{
327 xfs_fileoff_t new_last_fsb = 0;
328 xfs_extlen_t align;
329 int eof, error;
330
331 if (io->io_flags & XFS_IOCORE_RT)
332 ;
333 /*
334 * If mounted with the "-o swalloc" option, roundup the allocation
335 * request to a stripe width boundary if the file size is >=
336 * stripe width and we are allocating past the allocation eof.
337 */
338 else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
339 (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)))
340 new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
341 /*
342 * Roundup the allocation request to a stripe unit (m_dalign) boundary
343 * if the file size is >= stripe unit size, and we are allocating past
344 * the allocation eof.
345 */
346 else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)))
347 new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
348
349 /*
350 * Always round up the allocation request to an extent boundary
351 * (when file on a real-time subvolume or has di_extsize hint).
352 */
353 if (extsize) {
354 if (new_last_fsb)
355 align = roundup_64(new_last_fsb, extsize);
356 else
357 align = extsize;
358 new_last_fsb = roundup_64(*last_fsb, align);
359 }
360
361 if (new_last_fsb) {
362 error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
363 if (error)
364 return error;
365 if (eof)
366 *last_fsb = new_last_fsb;
367 }
368 return 0;
369}
370
371STATIC int
320xfs_flush_space( 372xfs_flush_space(
321 xfs_inode_t *ip, 373 xfs_inode_t *ip,
322 int *fsynced, 374 int *fsynced,
@@ -362,19 +414,20 @@ xfs_iomap_write_direct(
362 xfs_iocore_t *io = &ip->i_iocore; 414 xfs_iocore_t *io = &ip->i_iocore;
363 xfs_fileoff_t offset_fsb; 415 xfs_fileoff_t offset_fsb;
364 xfs_fileoff_t last_fsb; 416 xfs_fileoff_t last_fsb;
365 xfs_filblks_t count_fsb; 417 xfs_filblks_t count_fsb, resaligned;
366 xfs_fsblock_t firstfsb; 418 xfs_fsblock_t firstfsb;
419 xfs_extlen_t extsz, temp;
420 xfs_fsize_t isize;
367 int nimaps; 421 int nimaps;
368 int error;
369 int bmapi_flag; 422 int bmapi_flag;
370 int quota_flag; 423 int quota_flag;
371 int rt; 424 int rt;
372 xfs_trans_t *tp; 425 xfs_trans_t *tp;
373 xfs_bmbt_irec_t imap; 426 xfs_bmbt_irec_t imap;
374 xfs_bmap_free_t free_list; 427 xfs_bmap_free_t free_list;
375 xfs_filblks_t qblocks, resblks; 428 uint qblocks, resblks, resrtextents;
376 int committed; 429 int committed;
377 int resrtextents; 430 int error;
378 431
379 /* 432 /*
380 * Make sure that the dquots are there. This doesn't hold 433 * Make sure that the dquots are there. This doesn't hold
@@ -384,37 +437,52 @@ xfs_iomap_write_direct(
384 if (error) 437 if (error)
385 return XFS_ERROR(error); 438 return XFS_ERROR(error);
386 439
387 offset_fsb = XFS_B_TO_FSBT(mp, offset); 440 rt = XFS_IS_REALTIME_INODE(ip);
388 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 441 if (unlikely(rt)) {
389 count_fsb = last_fsb - offset_fsb; 442 if (!(extsz = ip->i_d.di_extsize))
390 if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) { 443 extsz = mp->m_sb.sb_rextsize;
391 xfs_fileoff_t map_last_fsb; 444 } else {
392 445 extsz = ip->i_d.di_extsize;
393 map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
394 if (map_last_fsb < last_fsb) {
395 last_fsb = map_last_fsb;
396 count_fsb = last_fsb - offset_fsb;
397 }
398 ASSERT(count_fsb > 0);
399 } 446 }
400 447
401 /* 448 isize = ip->i_d.di_size;
402 * Determine if reserving space on the data or realtime partition. 449 if (io->io_new_size > isize)
403 */ 450 isize = io->io_new_size;
404 if ((rt = XFS_IS_REALTIME_INODE(ip))) {
405 xfs_extlen_t extsz;
406 451
407 if (!(extsz = ip->i_d.di_extsize)) 452 offset_fsb = XFS_B_TO_FSBT(mp, offset);
408 extsz = mp->m_sb.sb_rextsize; 453 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
409 resrtextents = qblocks = (count_fsb + extsz - 1); 454 if ((offset + count) > isize) {
410 do_div(resrtextents, mp->m_sb.sb_rextsize); 455 error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
411 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 456 &last_fsb);
412 quota_flag = XFS_QMOPT_RES_RTBLKS; 457 if (error)
458 goto error_out;
413 } else { 459 } else {
414 resrtextents = 0; 460 if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
415 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb); 461 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
416 quota_flag = XFS_QMOPT_RES_REGBLKS; 462 ret_imap->br_blockcount +
463 ret_imap->br_startoff);
417 } 464 }
465 count_fsb = last_fsb - offset_fsb;
466 ASSERT(count_fsb > 0);
467
468 resaligned = count_fsb;
469 if (unlikely(extsz)) {
470 if ((temp = do_mod(offset_fsb, extsz)))
471 resaligned += temp;
472 if ((temp = do_mod(resaligned, extsz)))
473 resaligned += extsz - temp;
474 }
475
476 if (unlikely(rt)) {
477 resrtextents = qblocks = resaligned;
478 resrtextents /= mp->m_sb.sb_rextsize;
479 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
480 quota_flag = XFS_QMOPT_RES_RTBLKS;
481 } else {
482 resrtextents = 0;
483 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
484 quota_flag = XFS_QMOPT_RES_REGBLKS;
485 }
418 486
419 /* 487 /*
420 * Allocate and setup the transaction 488 * Allocate and setup the transaction
@@ -425,7 +493,6 @@ xfs_iomap_write_direct(
425 XFS_WRITE_LOG_RES(mp), resrtextents, 493 XFS_WRITE_LOG_RES(mp), resrtextents,
426 XFS_TRANS_PERM_LOG_RES, 494 XFS_TRANS_PERM_LOG_RES,
427 XFS_WRITE_LOG_COUNT); 495 XFS_WRITE_LOG_COUNT);
428
429 /* 496 /*
430 * Check for running out of space, note: need lock to return 497 * Check for running out of space, note: need lock to return
431 */ 498 */
@@ -435,20 +502,20 @@ xfs_iomap_write_direct(
435 if (error) 502 if (error)
436 goto error_out; 503 goto error_out;
437 504
438 if (XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag)) { 505 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
439 error = (EDQUOT); 506 qblocks, 0, quota_flag);
507 if (error)
440 goto error1; 508 goto error1;
441 }
442 509
443 bmapi_flag = XFS_BMAPI_WRITE;
444 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 510 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
445 xfs_trans_ihold(tp, ip); 511 xfs_trans_ihold(tp, ip);
446 512
447 if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt)) 513 bmapi_flag = XFS_BMAPI_WRITE;
514 if ((flags & BMAPI_DIRECT) && (offset < ip->i_d.di_size || extsz))
448 bmapi_flag |= XFS_BMAPI_PREALLOC; 515 bmapi_flag |= XFS_BMAPI_PREALLOC;
449 516
450 /* 517 /*
451 * Issue the bmapi() call to allocate the blocks 518 * Issue the xfs_bmapi() call to allocate the blocks
452 */ 519 */
453 XFS_BMAP_INIT(&free_list, &firstfsb); 520 XFS_BMAP_INIT(&free_list, &firstfsb);
454 nimaps = 1; 521 nimaps = 1;
@@ -483,8 +550,10 @@ xfs_iomap_write_direct(
483 "extent-state : %x \n", 550 "extent-state : %x \n",
484 (ip->i_mount)->m_fsname, 551 (ip->i_mount)->m_fsname,
485 (long long)ip->i_ino, 552 (long long)ip->i_ino,
486 ret_imap->br_startblock, ret_imap->br_startoff, 553 (unsigned long long)ret_imap->br_startblock,
487 ret_imap->br_blockcount,ret_imap->br_state); 554 (unsigned long long)ret_imap->br_startoff,
555 (unsigned long long)ret_imap->br_blockcount,
556 ret_imap->br_state);
488 } 557 }
489 return 0; 558 return 0;
490 559
@@ -500,6 +569,63 @@ error_out:
500 return XFS_ERROR(error); 569 return XFS_ERROR(error);
501} 570}
502 571
572/*
573 * If the caller is doing a write at the end of the file,
574 * then extend the allocation out to the file system's write
575 * iosize. We clean up any extra space left over when the
576 * file is closed in xfs_inactive().
577 *
578 * For sync writes, we are flushing delayed allocate space to
579 * try to make additional space available for allocation near
580 * the filesystem full boundary - preallocation hurts in that
581 * situation, of course.
582 */
583STATIC int
584xfs_iomap_eof_want_preallocate(
585 xfs_mount_t *mp,
586 xfs_iocore_t *io,
587 xfs_fsize_t isize,
588 xfs_off_t offset,
589 size_t count,
590 int ioflag,
591 xfs_bmbt_irec_t *imap,
592 int nimaps,
593 int *prealloc)
594{
595 xfs_fileoff_t start_fsb;
596 xfs_filblks_t count_fsb;
597 xfs_fsblock_t firstblock;
598 int n, error, imaps;
599
600 *prealloc = 0;
601 if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize)
602 return 0;
603
604 /*
605 * If there are any real blocks past eof, then don't
606 * do any speculative allocation.
607 */
608 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
609 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
610 while (count_fsb > 0) {
611 imaps = nimaps;
612 firstblock = NULLFSBLOCK;
613 error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
614 0, &firstblock, 0, imap, &imaps, NULL);
615 if (error)
616 return error;
617 for (n = 0; n < imaps; n++) {
618 if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
619 (imap[n].br_startblock != DELAYSTARTBLOCK))
620 return 0;
621 start_fsb += imap[n].br_blockcount;
622 count_fsb -= imap[n].br_blockcount;
623 }
624 }
625 *prealloc = 1;
626 return 0;
627}
628
503int 629int
504xfs_iomap_write_delay( 630xfs_iomap_write_delay(
505 xfs_inode_t *ip, 631 xfs_inode_t *ip,
@@ -513,13 +639,15 @@ xfs_iomap_write_delay(
513 xfs_iocore_t *io = &ip->i_iocore; 639 xfs_iocore_t *io = &ip->i_iocore;
514 xfs_fileoff_t offset_fsb; 640 xfs_fileoff_t offset_fsb;
515 xfs_fileoff_t last_fsb; 641 xfs_fileoff_t last_fsb;
516 xfs_fsize_t isize; 642 xfs_off_t aligned_offset;
643 xfs_fileoff_t ioalign;
517 xfs_fsblock_t firstblock; 644 xfs_fsblock_t firstblock;
645 xfs_extlen_t extsz;
646 xfs_fsize_t isize;
518 int nimaps; 647 int nimaps;
519 int error;
520 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 648 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
521 int aeof; 649 int prealloc, fsynced = 0;
522 int fsynced = 0; 650 int error;
523 651
524 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); 652 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
525 653
@@ -527,152 +655,57 @@ xfs_iomap_write_delay(
527 * Make sure that the dquots are there. This doesn't hold 655 * Make sure that the dquots are there. This doesn't hold
528 * the ilock across a disk read. 656 * the ilock across a disk read.
529 */ 657 */
530
531 error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); 658 error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
532 if (error) 659 if (error)
533 return XFS_ERROR(error); 660 return XFS_ERROR(error);
534 661
662 if (XFS_IS_REALTIME_INODE(ip)) {
663 if (!(extsz = ip->i_d.di_extsize))
664 extsz = mp->m_sb.sb_rextsize;
665 } else {
666 extsz = ip->i_d.di_extsize;
667 }
668
669 offset_fsb = XFS_B_TO_FSBT(mp, offset);
670
535retry: 671retry:
536 isize = ip->i_d.di_size; 672 isize = ip->i_d.di_size;
537 if (io->io_new_size > isize) { 673 if (io->io_new_size > isize)
538 isize = io->io_new_size; 674 isize = io->io_new_size;
539 }
540 675
541 aeof = 0; 676 error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count,
542 offset_fsb = XFS_B_TO_FSBT(mp, offset); 677 ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
543 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 678 if (error)
544 /* 679 return error;
545 * If the caller is doing a write at the end of the file,
546 * then extend the allocation (and the buffer used for the write)
547 * out to the file system's write iosize. We clean up any extra
548 * space left over when the file is closed in xfs_inactive().
549 *
550 * For sync writes, we are flushing delayed allocate space to
551 * try to make additional space available for allocation near
552 * the filesystem full boundary - preallocation hurts in that
553 * situation, of course.
554 */
555 if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) {
556 xfs_off_t aligned_offset;
557 xfs_filblks_t count_fsb;
558 unsigned int iosize;
559 xfs_fileoff_t ioalign;
560 int n;
561 xfs_fileoff_t start_fsb;
562 680
563 /* 681 if (prealloc) {
564 * If there are any real blocks past eof, then don't
565 * do any speculative allocation.
566 */
567 start_fsb = XFS_B_TO_FSBT(mp,
568 ((xfs_ufsize_t)(offset + count - 1)));
569 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
570 while (count_fsb > 0) {
571 nimaps = XFS_WRITE_IMAPS;
572 error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
573 0, &firstblock, 0, imap, &nimaps, NULL);
574 if (error) {
575 return error;
576 }
577 for (n = 0; n < nimaps; n++) {
578 if ( !(io->io_flags & XFS_IOCORE_RT) &&
579 !imap[n].br_startblock) {
580 cmn_err(CE_PANIC,"Access to block "
581 "zero: fs <%s> inode: %lld "
582 "start_block : %llx start_off "
583 ": %llx blkcnt : %llx "
584 "extent-state : %x \n",
585 (ip->i_mount)->m_fsname,
586 (long long)ip->i_ino,
587 imap[n].br_startblock,
588 imap[n].br_startoff,
589 imap[n].br_blockcount,
590 imap[n].br_state);
591 }
592 if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
593 (imap[n].br_startblock != DELAYSTARTBLOCK)) {
594 goto write_map;
595 }
596 start_fsb += imap[n].br_blockcount;
597 count_fsb -= imap[n].br_blockcount;
598 }
599 }
600 iosize = mp->m_writeio_blocks;
601 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 682 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
602 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 683 ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
603 last_fsb = ioalign + iosize; 684 last_fsb = ioalign + mp->m_writeio_blocks;
604 aeof = 1; 685 } else {
686 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
605 } 687 }
606write_map:
607 nimaps = XFS_WRITE_IMAPS;
608 firstblock = NULLFSBLOCK;
609 688
610 /* 689 if (prealloc || extsz) {
611 * If mounted with the "-o swalloc" option, roundup the allocation 690 error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
612 * request to a stripe width boundary if the file size is >= 691 &last_fsb);
613 * stripe width and we are allocating past the allocation eof. 692 if (error)
614 */
615 if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth
616 && (mp->m_flags & XFS_MOUNT_SWALLOC)
617 && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) {
618 int eof;
619 xfs_fileoff_t new_last_fsb;
620
621 new_last_fsb = roundup_64(last_fsb, mp->m_swidth);
622 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
623 if (error) {
624 return error;
625 }
626 if (eof) {
627 last_fsb = new_last_fsb;
628 }
629 /*
630 * Roundup the allocation request to a stripe unit (m_dalign) boundary
631 * if the file size is >= stripe unit size, and we are allocating past
632 * the allocation eof.
633 */
634 } else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign &&
635 (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) {
636 int eof;
637 xfs_fileoff_t new_last_fsb;
638 new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
639 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
640 if (error) {
641 return error;
642 }
643 if (eof) {
644 last_fsb = new_last_fsb;
645 }
646 /*
647 * Round up the allocation request to a real-time extent boundary
648 * if the file is on the real-time subvolume.
649 */
650 } else if (io->io_flags & XFS_IOCORE_RT && aeof) {
651 int eof;
652 xfs_fileoff_t new_last_fsb;
653
654 new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize);
655 error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
656 if (error) {
657 return error; 693 return error;
658 }
659 if (eof)
660 last_fsb = new_last_fsb;
661 } 694 }
695
696 nimaps = XFS_WRITE_IMAPS;
697 firstblock = NULLFSBLOCK;
662 error = xfs_bmapi(NULL, ip, offset_fsb, 698 error = xfs_bmapi(NULL, ip, offset_fsb,
663 (xfs_filblks_t)(last_fsb - offset_fsb), 699 (xfs_filblks_t)(last_fsb - offset_fsb),
664 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | 700 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
665 XFS_BMAPI_ENTIRE, &firstblock, 1, imap, 701 XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
666 &nimaps, NULL); 702 &nimaps, NULL);
667 /* 703 if (error && (error != ENOSPC))
668 * This can be EDQUOT, if nimaps == 0
669 */
670 if (error && (error != ENOSPC)) {
671 return XFS_ERROR(error); 704 return XFS_ERROR(error);
672 } 705
673 /* 706 /*
674 * If bmapi returned us nothing, and if we didn't get back EDQUOT, 707 * If bmapi returned us nothing, and if we didn't get back EDQUOT,
675 * then we must have run out of space. 708 * then we must have run out of space - flush delalloc, and retry..
676 */ 709 */
677 if (nimaps == 0) { 710 if (nimaps == 0) {
678 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, 711 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
@@ -684,17 +717,21 @@ write_map:
684 goto retry; 717 goto retry;
685 } 718 }
686 719
687 *ret_imap = imap[0]; 720 if (!(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) {
688 *nmaps = 1;
689 if ( !(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) {
690 cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld " 721 cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld "
691 "start_block : %llx start_off : %llx blkcnt : %llx " 722 "start_block : %llx start_off : %llx blkcnt : %llx "
692 "extent-state : %x \n", 723 "extent-state : %x \n",
693 (ip->i_mount)->m_fsname, 724 (ip->i_mount)->m_fsname,
694 (long long)ip->i_ino, 725 (long long)ip->i_ino,
695 ret_imap->br_startblock, ret_imap->br_startoff, 726 (unsigned long long)ret_imap->br_startblock,
696 ret_imap->br_blockcount,ret_imap->br_state); 727 (unsigned long long)ret_imap->br_startoff,
728 (unsigned long long)ret_imap->br_blockcount,
729 ret_imap->br_state);
697 } 730 }
731
732 *ret_imap = imap[0];
733 *nmaps = 1;
734
698 return 0; 735 return 0;
699} 736}
700 737
@@ -820,17 +857,21 @@ xfs_iomap_write_allocate(
820 */ 857 */
821 858
822 for (i = 0; i < nimaps; i++) { 859 for (i = 0; i < nimaps; i++) {
823 if ( !(io->io_flags & XFS_IOCORE_RT) && 860 if (!(io->io_flags & XFS_IOCORE_RT) &&
824 !imap[i].br_startblock) { 861 !imap[i].br_startblock) {
825 cmn_err(CE_PANIC,"Access to block zero: " 862 cmn_err(CE_PANIC,"Access to block zero: "
826 "fs <%s> inode: %lld " 863 "fs <%s> inode: %lld "
827 "start_block : %llx start_off : %llx " 864 "start_block : %llx start_off : %llx "
828 "blkcnt : %llx extent-state : %x \n", 865 "blkcnt : %llx extent-state : %x \n",
829 (ip->i_mount)->m_fsname, 866 (ip->i_mount)->m_fsname,
830 (long long)ip->i_ino, 867 (long long)ip->i_ino,
831 imap[i].br_startblock, 868 (unsigned long long)
832 imap[i].br_startoff, 869 imap[i].br_startblock,
833 imap[i].br_blockcount,imap[i].br_state); 870 (unsigned long long)
871 imap[i].br_startoff,
872 (unsigned long long)
873 imap[i].br_blockcount,
874 imap[i].br_state);
834 } 875 }
835 if ((offset_fsb >= imap[i].br_startoff) && 876 if ((offset_fsb >= imap[i].br_startoff) &&
836 (offset_fsb < (imap[i].br_startoff + 877 (offset_fsb < (imap[i].br_startoff +
@@ -867,17 +908,17 @@ xfs_iomap_write_unwritten(
867{ 908{
868 xfs_mount_t *mp = ip->i_mount; 909 xfs_mount_t *mp = ip->i_mount;
869 xfs_iocore_t *io = &ip->i_iocore; 910 xfs_iocore_t *io = &ip->i_iocore;
870 xfs_trans_t *tp;
871 xfs_fileoff_t offset_fsb; 911 xfs_fileoff_t offset_fsb;
872 xfs_filblks_t count_fsb; 912 xfs_filblks_t count_fsb;
873 xfs_filblks_t numblks_fsb; 913 xfs_filblks_t numblks_fsb;
874 xfs_bmbt_irec_t imap; 914 xfs_fsblock_t firstfsb;
915 int nimaps;
916 xfs_trans_t *tp;
917 xfs_bmbt_irec_t imap;
918 xfs_bmap_free_t free_list;
919 uint resblks;
875 int committed; 920 int committed;
876 int error; 921 int error;
877 int nres;
878 int nimaps;
879 xfs_fsblock_t firstfsb;
880 xfs_bmap_free_t free_list;
881 922
882 xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, 923 xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,
883 &ip->i_iocore, offset, count); 924 &ip->i_iocore, offset, count);
@@ -886,9 +927,9 @@ xfs_iomap_write_unwritten(
886 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 927 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
887 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 928 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
888 929
889 do { 930 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
890 nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
891 931
932 do {
892 /* 933 /*
893 * set up a transaction to convert the range of extents 934 * set up a transaction to convert the range of extents
894 * from unwritten to real. Do allocations in a loop until 935 * from unwritten to real. Do allocations in a loop until
@@ -896,7 +937,7 @@ xfs_iomap_write_unwritten(
896 */ 937 */
897 938
898 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 939 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
899 error = xfs_trans_reserve(tp, nres, 940 error = xfs_trans_reserve(tp, resblks,
900 XFS_WRITE_LOG_RES(mp), 0, 941 XFS_WRITE_LOG_RES(mp), 0,
901 XFS_TRANS_PERM_LOG_RES, 942 XFS_TRANS_PERM_LOG_RES,
902 XFS_WRITE_LOG_COUNT); 943 XFS_WRITE_LOG_COUNT);
@@ -915,7 +956,7 @@ xfs_iomap_write_unwritten(
915 XFS_BMAP_INIT(&free_list, &firstfsb); 956 XFS_BMAP_INIT(&free_list, &firstfsb);
916 nimaps = 1; 957 nimaps = 1;
917 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, 958 error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
918 XFS_BMAPI_WRITE, &firstfsb, 959 XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
919 1, &imap, &nimaps, &free_list); 960 1, &imap, &nimaps, &free_list);
920 if (error) 961 if (error)
921 goto error_on_bmapi_transaction; 962 goto error_on_bmapi_transaction;
@@ -929,15 +970,17 @@ xfs_iomap_write_unwritten(
929 xfs_iunlock(ip, XFS_ILOCK_EXCL); 970 xfs_iunlock(ip, XFS_ILOCK_EXCL);
930 if (error) 971 if (error)
931 goto error0; 972 goto error0;
932 973
933 if ( !(io->io_flags & XFS_IOCORE_RT) && !imap.br_startblock) { 974 if ( !(io->io_flags & XFS_IOCORE_RT) && !imap.br_startblock) {
934 cmn_err(CE_PANIC,"Access to block zero: fs <%s> " 975 cmn_err(CE_PANIC,"Access to block zero: fs <%s> "
935 "inode: %lld start_block : %llx start_off : " 976 "inode: %lld start_block : %llx start_off : "
936 "%llx blkcnt : %llx extent-state : %x \n", 977 "%llx blkcnt : %llx extent-state : %x \n",
937 (ip->i_mount)->m_fsname, 978 (ip->i_mount)->m_fsname,
938 (long long)ip->i_ino, 979 (long long)ip->i_ino,
939 imap.br_startblock,imap.br_startoff, 980 (unsigned long long)imap.br_startblock,
940 imap.br_blockcount,imap.br_state); 981 (unsigned long long)imap.br_startoff,
982 (unsigned long long)imap.br_blockcount,
983 imap.br_state);
941 } 984 }
942 985
943 if ((numblks_fsb = imap.br_blockcount) == 0) { 986 if ((numblks_fsb = imap.br_blockcount) == 0) {
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f63646ead816..c59450e1be40 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -56,6 +56,7 @@ xfs_bulkstat_one_iget(
56{ 56{
57 xfs_dinode_core_t *dic; /* dinode core info pointer */ 57 xfs_dinode_core_t *dic; /* dinode core info pointer */
58 xfs_inode_t *ip; /* incore inode pointer */ 58 xfs_inode_t *ip; /* incore inode pointer */
59 vnode_t *vp;
59 int error; 60 int error;
60 61
61 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno); 62 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno);
@@ -72,6 +73,7 @@ xfs_bulkstat_one_iget(
72 goto out_iput; 73 goto out_iput;
73 } 74 }
74 75
76 vp = XFS_ITOV(ip);
75 dic = &ip->i_d; 77 dic = &ip->i_d;
76 78
77 /* xfs_iget returns the following without needing 79 /* xfs_iget returns the following without needing
@@ -84,8 +86,7 @@ xfs_bulkstat_one_iget(
84 buf->bs_uid = dic->di_uid; 86 buf->bs_uid = dic->di_uid;
85 buf->bs_gid = dic->di_gid; 87 buf->bs_gid = dic->di_gid;
86 buf->bs_size = dic->di_size; 88 buf->bs_size = dic->di_size;
87 buf->bs_atime.tv_sec = dic->di_atime.t_sec; 89 vn_atime_to_bstime(vp, &buf->bs_atime);
88 buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
89 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; 90 buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
90 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; 91 buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
91 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; 92 buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 29af51275ca9..3d9a36e77363 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -178,6 +178,83 @@ xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
178#define xlog_trace_iclog(iclog,state) 178#define xlog_trace_iclog(iclog,state)
179#endif /* XFS_LOG_TRACE */ 179#endif /* XFS_LOG_TRACE */
180 180
181
182static void
183xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
184{
185 if (*qp) {
186 tic->t_next = (*qp);
187 tic->t_prev = (*qp)->t_prev;
188 (*qp)->t_prev->t_next = tic;
189 (*qp)->t_prev = tic;
190 } else {
191 tic->t_prev = tic->t_next = tic;
192 *qp = tic;
193 }
194
195 tic->t_flags |= XLOG_TIC_IN_Q;
196}
197
198static void
199xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
200{
201 if (tic == tic->t_next) {
202 *qp = NULL;
203 } else {
204 *qp = tic->t_next;
205 tic->t_next->t_prev = tic->t_prev;
206 tic->t_prev->t_next = tic->t_next;
207 }
208
209 tic->t_next = tic->t_prev = NULL;
210 tic->t_flags &= ~XLOG_TIC_IN_Q;
211}
212
213static void
214xlog_grant_sub_space(struct log *log, int bytes)
215{
216 log->l_grant_write_bytes -= bytes;
217 if (log->l_grant_write_bytes < 0) {
218 log->l_grant_write_bytes += log->l_logsize;
219 log->l_grant_write_cycle--;
220 }
221
222 log->l_grant_reserve_bytes -= bytes;
223 if ((log)->l_grant_reserve_bytes < 0) {
224 log->l_grant_reserve_bytes += log->l_logsize;
225 log->l_grant_reserve_cycle--;
226 }
227
228}
229
230static void
231xlog_grant_add_space_write(struct log *log, int bytes)
232{
233 log->l_grant_write_bytes += bytes;
234 if (log->l_grant_write_bytes > log->l_logsize) {
235 log->l_grant_write_bytes -= log->l_logsize;
236 log->l_grant_write_cycle++;
237 }
238}
239
240static void
241xlog_grant_add_space_reserve(struct log *log, int bytes)
242{
243 log->l_grant_reserve_bytes += bytes;
244 if (log->l_grant_reserve_bytes > log->l_logsize) {
245 log->l_grant_reserve_bytes -= log->l_logsize;
246 log->l_grant_reserve_cycle++;
247 }
248}
249
250static inline void
251xlog_grant_add_space(struct log *log, int bytes)
252{
253 xlog_grant_add_space_write(log, bytes);
254 xlog_grant_add_space_reserve(log, bytes);
255}
256
257
181/* 258/*
182 * NOTES: 259 * NOTES:
183 * 260 *
@@ -428,7 +505,7 @@ xfs_log_mount(xfs_mount_t *mp,
428 if (readonly) 505 if (readonly)
429 vfsp->vfs_flag &= ~VFS_RDONLY; 506 vfsp->vfs_flag &= ~VFS_RDONLY;
430 507
431 error = xlog_recover(mp->m_log, readonly); 508 error = xlog_recover(mp->m_log);
432 509
433 if (readonly) 510 if (readonly)
434 vfsp->vfs_flag |= VFS_RDONLY; 511 vfsp->vfs_flag |= VFS_RDONLY;
@@ -1320,8 +1397,7 @@ xlog_sync(xlog_t *log,
1320 1397
1321 /* move grant heads by roundoff in sync */ 1398 /* move grant heads by roundoff in sync */
1322 s = GRANT_LOCK(log); 1399 s = GRANT_LOCK(log);
1323 XLOG_GRANT_ADD_SPACE(log, roundoff, 'w'); 1400 xlog_grant_add_space(log, roundoff);
1324 XLOG_GRANT_ADD_SPACE(log, roundoff, 'r');
1325 GRANT_UNLOCK(log, s); 1401 GRANT_UNLOCK(log, s);
1326 1402
1327 /* put cycle number in every block */ 1403 /* put cycle number in every block */
@@ -1515,7 +1591,6 @@ xlog_state_finish_copy(xlog_t *log,
1515 * print out info relating to regions written which consume 1591 * print out info relating to regions written which consume
1516 * the reservation 1592 * the reservation
1517 */ 1593 */
1518#if defined(XFS_LOG_RES_DEBUG)
1519STATIC void 1594STATIC void
1520xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket) 1595xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
1521{ 1596{
@@ -1605,11 +1680,11 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
1605 ticket->t_res_arr_sum, ticket->t_res_o_flow, 1680 ticket->t_res_arr_sum, ticket->t_res_o_flow,
1606 ticket->t_res_num_ophdrs, ophdr_spc, 1681 ticket->t_res_num_ophdrs, ophdr_spc,
1607 ticket->t_res_arr_sum + 1682 ticket->t_res_arr_sum +
1608 ticket->t_res_o_flow + ophdr_spc, 1683 ticket->t_res_o_flow + ophdr_spc,
1609 ticket->t_res_num); 1684 ticket->t_res_num);
1610 1685
1611 for (i = 0; i < ticket->t_res_num; i++) { 1686 for (i = 0; i < ticket->t_res_num; i++) {
1612 uint r_type = ticket->t_res_arr[i].r_type; 1687 uint r_type = ticket->t_res_arr[i].r_type;
1613 cmn_err(CE_WARN, 1688 cmn_err(CE_WARN,
1614 "region[%u]: %s - %u bytes\n", 1689 "region[%u]: %s - %u bytes\n",
1615 i, 1690 i,
@@ -1618,9 +1693,6 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
1618 ticket->t_res_arr[i].r_len); 1693 ticket->t_res_arr[i].r_len);
1619 } 1694 }
1620} 1695}
1621#else
1622#define xlog_print_tic_res(mp, ticket)
1623#endif
1624 1696
1625/* 1697/*
1626 * Write some region out to in-core log 1698 * Write some region out to in-core log
@@ -2389,7 +2461,7 @@ xlog_grant_log_space(xlog_t *log,
2389 2461
2390 /* something is already sleeping; insert new transaction at end */ 2462 /* something is already sleeping; insert new transaction at end */
2391 if (log->l_reserve_headq) { 2463 if (log->l_reserve_headq) {
2392 XLOG_INS_TICKETQ(log->l_reserve_headq, tic); 2464 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2393 xlog_trace_loggrant(log, tic, 2465 xlog_trace_loggrant(log, tic,
2394 "xlog_grant_log_space: sleep 1"); 2466 "xlog_grant_log_space: sleep 1");
2395 /* 2467 /*
@@ -2422,7 +2494,7 @@ redo:
2422 log->l_grant_reserve_bytes); 2494 log->l_grant_reserve_bytes);
2423 if (free_bytes < need_bytes) { 2495 if (free_bytes < need_bytes) {
2424 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2496 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2425 XLOG_INS_TICKETQ(log->l_reserve_headq, tic); 2497 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2426 xlog_trace_loggrant(log, tic, 2498 xlog_trace_loggrant(log, tic,
2427 "xlog_grant_log_space: sleep 2"); 2499 "xlog_grant_log_space: sleep 2");
2428 XFS_STATS_INC(xs_sleep_logspace); 2500 XFS_STATS_INC(xs_sleep_logspace);
@@ -2439,11 +2511,10 @@ redo:
2439 s = GRANT_LOCK(log); 2511 s = GRANT_LOCK(log);
2440 goto redo; 2512 goto redo;
2441 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2513 } else if (tic->t_flags & XLOG_TIC_IN_Q)
2442 XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); 2514 xlog_del_ticketq(&log->l_reserve_headq, tic);
2443 2515
2444 /* we've got enough space */ 2516 /* we've got enough space */
2445 XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); 2517 xlog_grant_add_space(log, need_bytes);
2446 XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r');
2447#ifdef DEBUG 2518#ifdef DEBUG
2448 tail_lsn = log->l_tail_lsn; 2519 tail_lsn = log->l_tail_lsn;
2449 /* 2520 /*
@@ -2464,7 +2535,7 @@ redo:
2464 2535
2465 error_return: 2536 error_return:
2466 if (tic->t_flags & XLOG_TIC_IN_Q) 2537 if (tic->t_flags & XLOG_TIC_IN_Q)
2467 XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); 2538 xlog_del_ticketq(&log->l_reserve_headq, tic);
2468 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret"); 2539 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret");
2469 /* 2540 /*
2470 * If we are failing, make sure the ticket doesn't have any 2541 * If we are failing, make sure the ticket doesn't have any
@@ -2533,7 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2533 2604
2534 if (ntic != log->l_write_headq) { 2605 if (ntic != log->l_write_headq) {
2535 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2606 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2536 XLOG_INS_TICKETQ(log->l_write_headq, tic); 2607 xlog_ins_ticketq(&log->l_write_headq, tic);
2537 2608
2538 xlog_trace_loggrant(log, tic, 2609 xlog_trace_loggrant(log, tic,
2539 "xlog_regrant_write_log_space: sleep 1"); 2610 "xlog_regrant_write_log_space: sleep 1");
@@ -2565,7 +2636,7 @@ redo:
2565 log->l_grant_write_bytes); 2636 log->l_grant_write_bytes);
2566 if (free_bytes < need_bytes) { 2637 if (free_bytes < need_bytes) {
2567 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2638 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2568 XLOG_INS_TICKETQ(log->l_write_headq, tic); 2639 xlog_ins_ticketq(&log->l_write_headq, tic);
2569 XFS_STATS_INC(xs_sleep_logspace); 2640 XFS_STATS_INC(xs_sleep_logspace);
2570 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2641 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
2571 2642
@@ -2581,9 +2652,10 @@ redo:
2581 s = GRANT_LOCK(log); 2652 s = GRANT_LOCK(log);
2582 goto redo; 2653 goto redo;
2583 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2654 } else if (tic->t_flags & XLOG_TIC_IN_Q)
2584 XLOG_DEL_TICKETQ(log->l_write_headq, tic); 2655 xlog_del_ticketq(&log->l_write_headq, tic);
2585 2656
2586 XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */ 2657 /* we've got enough space */
2658 xlog_grant_add_space_write(log, need_bytes);
2587#ifdef DEBUG 2659#ifdef DEBUG
2588 tail_lsn = log->l_tail_lsn; 2660 tail_lsn = log->l_tail_lsn;
2589 if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) { 2661 if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
@@ -2600,7 +2672,7 @@ redo:
2600 2672
2601 error_return: 2673 error_return:
2602 if (tic->t_flags & XLOG_TIC_IN_Q) 2674 if (tic->t_flags & XLOG_TIC_IN_Q)
2603 XLOG_DEL_TICKETQ(log->l_reserve_headq, tic); 2675 xlog_del_ticketq(&log->l_reserve_headq, tic);
2604 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret"); 2676 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret");
2605 /* 2677 /*
2606 * If we are failing, make sure the ticket doesn't have any 2678 * If we are failing, make sure the ticket doesn't have any
@@ -2633,8 +2705,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2633 ticket->t_cnt--; 2705 ticket->t_cnt--;
2634 2706
2635 s = GRANT_LOCK(log); 2707 s = GRANT_LOCK(log);
2636 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); 2708 xlog_grant_sub_space(log, ticket->t_curr_res);
2637 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
2638 ticket->t_curr_res = ticket->t_unit_res; 2709 ticket->t_curr_res = ticket->t_unit_res;
2639 XLOG_TIC_RESET_RES(ticket); 2710 XLOG_TIC_RESET_RES(ticket);
2640 xlog_trace_loggrant(log, ticket, 2711 xlog_trace_loggrant(log, ticket,
@@ -2647,7 +2718,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2647 return; 2718 return;
2648 } 2719 }
2649 2720
2650 XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r'); 2721 xlog_grant_add_space_reserve(log, ticket->t_unit_res);
2651 xlog_trace_loggrant(log, ticket, 2722 xlog_trace_loggrant(log, ticket,
2652 "xlog_regrant_reserve_log_space: exit"); 2723 "xlog_regrant_reserve_log_space: exit");
2653 xlog_verify_grant_head(log, 0); 2724 xlog_verify_grant_head(log, 0);
@@ -2683,8 +2754,7 @@ xlog_ungrant_log_space(xlog_t *log,
2683 s = GRANT_LOCK(log); 2754 s = GRANT_LOCK(log);
2684 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); 2755 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter");
2685 2756
2686 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); 2757 xlog_grant_sub_space(log, ticket->t_curr_res);
2687 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
2688 2758
2689 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current"); 2759 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current");
2690 2760
@@ -2693,8 +2763,7 @@ xlog_ungrant_log_space(xlog_t *log,
2693 */ 2763 */
2694 if (ticket->t_cnt > 0) { 2764 if (ticket->t_cnt > 0) {
2695 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 2765 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2696 XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w'); 2766 xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
2697 XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r');
2698 } 2767 }
2699 2768
2700 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); 2769 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index f40d4391fcfc..4b2ac88dbb83 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -96,7 +96,6 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
96 96
97 97
98/* Region types for iovec's i_type */ 98/* Region types for iovec's i_type */
99#if defined(XFS_LOG_RES_DEBUG)
100#define XLOG_REG_TYPE_BFORMAT 1 99#define XLOG_REG_TYPE_BFORMAT 1
101#define XLOG_REG_TYPE_BCHUNK 2 100#define XLOG_REG_TYPE_BCHUNK 2
102#define XLOG_REG_TYPE_EFI_FORMAT 3 101#define XLOG_REG_TYPE_EFI_FORMAT 3
@@ -117,21 +116,13 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
117#define XLOG_REG_TYPE_COMMIT 18 116#define XLOG_REG_TYPE_COMMIT 18
118#define XLOG_REG_TYPE_TRANSHDR 19 117#define XLOG_REG_TYPE_TRANSHDR 19
119#define XLOG_REG_TYPE_MAX 19 118#define XLOG_REG_TYPE_MAX 19
120#endif
121 119
122#if defined(XFS_LOG_RES_DEBUG)
123#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t)) 120#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
124#else
125#define XLOG_VEC_SET_TYPE(vecp, t)
126#endif
127
128 121
129typedef struct xfs_log_iovec { 122typedef struct xfs_log_iovec {
130 xfs_caddr_t i_addr; /* beginning address of region */ 123 xfs_caddr_t i_addr; /* beginning address of region */
131 int i_len; /* length in bytes of region */ 124 int i_len; /* length in bytes of region */
132#if defined(XFS_LOG_RES_DEBUG) 125 uint i_type; /* type of region */
133 uint i_type; /* type of region */
134#endif
135} xfs_log_iovec_t; 126} xfs_log_iovec_t;
136 127
137typedef void* xfs_log_ticket_t; 128typedef void* xfs_log_ticket_t;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 4518b188ade6..34bcbf50789c 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -253,7 +253,6 @@ typedef __uint32_t xlog_tid_t;
253 253
254 254
255/* Ticket reservation region accounting */ 255/* Ticket reservation region accounting */
256#if defined(XFS_LOG_RES_DEBUG)
257#define XLOG_TIC_LEN_MAX 15 256#define XLOG_TIC_LEN_MAX 15
258#define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \ 257#define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \
259 (t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0) 258 (t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0)
@@ -278,15 +277,9 @@ typedef __uint32_t xlog_tid_t;
278 * we don't care about. 277 * we don't care about.
279 */ 278 */
280typedef struct xlog_res { 279typedef struct xlog_res {
281 uint r_len; 280 uint r_len; /* region length :4 */
282 uint r_type; 281 uint r_type; /* region's transaction type :4 */
283} xlog_res_t; 282} xlog_res_t;
284#else
285#define XLOG_TIC_RESET_RES(t)
286#define XLOG_TIC_ADD_OPHDR(t)
287#define XLOG_TIC_ADD_REGION(t, len, type)
288#endif
289
290 283
291typedef struct xlog_ticket { 284typedef struct xlog_ticket {
292 sv_t t_sema; /* sleep on this semaphore : 20 */ 285 sv_t t_sema; /* sleep on this semaphore : 20 */
@@ -301,14 +294,12 @@ typedef struct xlog_ticket {
301 char t_flags; /* properties of reservation : 1 */ 294 char t_flags; /* properties of reservation : 1 */
302 uint t_trans_type; /* transaction type : 4 */ 295 uint t_trans_type; /* transaction type : 4 */
303 296
304#if defined (XFS_LOG_RES_DEBUG)
305 /* reservation array fields */ 297 /* reservation array fields */
306 uint t_res_num; /* num in array : 4 */ 298 uint t_res_num; /* num in array : 4 */
307 xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : X */
308 uint t_res_num_ophdrs; /* num op hdrs : 4 */ 299 uint t_res_num_ophdrs; /* num op hdrs : 4 */
309 uint t_res_arr_sum; /* array sum : 4 */ 300 uint t_res_arr_sum; /* array sum : 4 */
310 uint t_res_o_flow; /* sum overflow : 4 */ 301 uint t_res_o_flow; /* sum overflow : 4 */
311#endif 302 xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */
312} xlog_ticket_t; 303} xlog_ticket_t;
313 304
314#endif 305#endif
@@ -494,71 +485,13 @@ typedef struct log {
494 485
495#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) 486#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
496 487
497#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \
498 { \
499 if (type == 'w') { \
500 (log)->l_grant_write_bytes -= (bytes); \
501 if ((log)->l_grant_write_bytes < 0) { \
502 (log)->l_grant_write_bytes += (log)->l_logsize; \
503 (log)->l_grant_write_cycle--; \
504 } \
505 } else { \
506 (log)->l_grant_reserve_bytes -= (bytes); \
507 if ((log)->l_grant_reserve_bytes < 0) { \
508 (log)->l_grant_reserve_bytes += (log)->l_logsize;\
509 (log)->l_grant_reserve_cycle--; \
510 } \
511 } \
512 }
513#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \
514 { \
515 if (type == 'w') { \
516 (log)->l_grant_write_bytes += (bytes); \
517 if ((log)->l_grant_write_bytes > (log)->l_logsize) { \
518 (log)->l_grant_write_bytes -= (log)->l_logsize; \
519 (log)->l_grant_write_cycle++; \
520 } \
521 } else { \
522 (log)->l_grant_reserve_bytes += (bytes); \
523 if ((log)->l_grant_reserve_bytes > (log)->l_logsize) { \
524 (log)->l_grant_reserve_bytes -= (log)->l_logsize;\
525 (log)->l_grant_reserve_cycle++; \
526 } \
527 } \
528 }
529#define XLOG_INS_TICKETQ(q, tic) \
530 { \
531 if (q) { \
532 (tic)->t_next = (q); \
533 (tic)->t_prev = (q)->t_prev; \
534 (q)->t_prev->t_next = (tic); \
535 (q)->t_prev = (tic); \
536 } else { \
537 (tic)->t_prev = (tic)->t_next = (tic); \
538 (q) = (tic); \
539 } \
540 (tic)->t_flags |= XLOG_TIC_IN_Q; \
541 }
542#define XLOG_DEL_TICKETQ(q, tic) \
543 { \
544 if ((tic) == (tic)->t_next) { \
545 (q) = NULL; \
546 } else { \
547 (q) = (tic)->t_next; \
548 (tic)->t_next->t_prev = (tic)->t_prev; \
549 (tic)->t_prev->t_next = (tic)->t_next; \
550 } \
551 (tic)->t_next = (tic)->t_prev = NULL; \
552 (tic)->t_flags &= ~XLOG_TIC_IN_Q; \
553 }
554 488
555/* common routines */ 489/* common routines */
556extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); 490extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
557extern int xlog_find_tail(xlog_t *log, 491extern int xlog_find_tail(xlog_t *log,
558 xfs_daddr_t *head_blk, 492 xfs_daddr_t *head_blk,
559 xfs_daddr_t *tail_blk, 493 xfs_daddr_t *tail_blk);
560 int readonly); 494extern int xlog_recover(xlog_t *log);
561extern int xlog_recover(xlog_t *log, int readonly);
562extern int xlog_recover_finish(xlog_t *log, int mfsi_flags); 495extern int xlog_recover_finish(xlog_t *log, int mfsi_flags);
563extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); 496extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
564extern void xlog_recover_process_iunlinks(xlog_t *log); 497extern void xlog_recover_process_iunlinks(xlog_t *log);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 8ab7df768063..7d46cbd6a07a 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -783,8 +783,7 @@ int
783xlog_find_tail( 783xlog_find_tail(
784 xlog_t *log, 784 xlog_t *log,
785 xfs_daddr_t *head_blk, 785 xfs_daddr_t *head_blk,
786 xfs_daddr_t *tail_blk, 786 xfs_daddr_t *tail_blk)
787 int readonly)
788{ 787{
789 xlog_rec_header_t *rhead; 788 xlog_rec_header_t *rhead;
790 xlog_op_header_t *op_head; 789 xlog_op_header_t *op_head;
@@ -2563,10 +2562,12 @@ xlog_recover_do_quotaoff_trans(
2563 2562
2564 /* 2563 /*
2565 * The logitem format's flag tells us if this was user quotaoff, 2564 * The logitem format's flag tells us if this was user quotaoff,
2566 * group quotaoff or both. 2565 * group/project quotaoff or both.
2567 */ 2566 */
2568 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) 2567 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2569 log->l_quotaoffs_flag |= XFS_DQ_USER; 2568 log->l_quotaoffs_flag |= XFS_DQ_USER;
2569 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2570 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2570 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) 2571 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2571 log->l_quotaoffs_flag |= XFS_DQ_GROUP; 2572 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2572 2573
@@ -3890,14 +3891,13 @@ xlog_do_recover(
3890 */ 3891 */
3891int 3892int
3892xlog_recover( 3893xlog_recover(
3893 xlog_t *log, 3894 xlog_t *log)
3894 int readonly)
3895{ 3895{
3896 xfs_daddr_t head_blk, tail_blk; 3896 xfs_daddr_t head_blk, tail_blk;
3897 int error; 3897 int error;
3898 3898
3899 /* find the tail of the log */ 3899 /* find the tail of the log */
3900 if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly))) 3900 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3901 return error; 3901 return error;
3902 3902
3903 if (tail_blk != head_blk) { 3903 if (tail_blk != head_blk) {
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 303af86739bf..6088e14f84e3 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -51,7 +51,7 @@ STATIC int xfs_uuid_mount(xfs_mount_t *);
51STATIC void xfs_uuid_unmount(xfs_mount_t *mp); 51STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
52STATIC void xfs_unmountfs_wait(xfs_mount_t *); 52STATIC void xfs_unmountfs_wait(xfs_mount_t *);
53 53
54static struct { 54static const struct {
55 short offset; 55 short offset;
56 short type; /* 0 = integer 56 short type; /* 0 = integer
57 * 1 = binary / string (no translation) 57 * 1 = binary / string (no translation)
@@ -1077,8 +1077,7 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
1077 1077
1078 xfs_iflush_all(mp); 1078 xfs_iflush_all(mp);
1079 1079
1080 XFS_QM_DQPURGEALL(mp, 1080 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
1081 XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
1082 1081
1083 /* 1082 /*
1084 * Flush out the log synchronously so that we know for sure 1083 * Flush out the log synchronously so that we know for sure
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 3432fd5a3986..cd3cf9613a00 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -308,7 +308,6 @@ typedef struct xfs_mount {
308 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ 308 xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
309 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ 309 xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
310 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ 310 xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
311#define m_dev m_ddev_targp->pbr_dev
312 __uint8_t m_dircook_elog; /* log d-cookie entry bits */ 311 __uint8_t m_dircook_elog; /* log d-cookie entry bits */
313 __uint8_t m_blkbit_log; /* blocklog + NBBY */ 312 __uint8_t m_blkbit_log; /* blocklog + NBBY */
314 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ 313 __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
@@ -393,7 +392,7 @@ typedef struct xfs_mount {
393 user */ 392 user */
394#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment 393#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment
395 allocations */ 394 allocations */
396#define XFS_MOUNT_COMPAT_ATTR (1ULL << 8) /* do not use attr2 format */ 395#define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */
397 /* (1ULL << 9) -- currently unused */ 396 /* (1ULL << 9) -- currently unused */
398#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ 397#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
399#define XFS_MOUNT_SHARED (1ULL << 11) /* shared mount */ 398#define XFS_MOUNT_SHARED (1ULL << 11) /* shared mount */
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 4d4e8f4e768e..81a05cfd77d2 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -243,7 +243,6 @@ xfs_rename(
243 xfs_inode_t *inodes[4]; 243 xfs_inode_t *inodes[4];
244 int target_ip_dropped = 0; /* dropped target_ip link? */ 244 int target_ip_dropped = 0; /* dropped target_ip link? */
245 vnode_t *src_dir_vp; 245 vnode_t *src_dir_vp;
246 bhv_desc_t *target_dir_bdp;
247 int spaceres; 246 int spaceres;
248 int target_link_zero = 0; 247 int target_link_zero = 0;
249 int num_inodes; 248 int num_inodes;
@@ -260,14 +259,12 @@ xfs_rename(
260 * Find the XFS behavior descriptor for the target directory 259 * Find the XFS behavior descriptor for the target directory
261 * vnode since it was not handed to us. 260 * vnode since it was not handed to us.
262 */ 261 */
263 target_dir_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(target_dir_vp), 262 target_dp = xfs_vtoi(target_dir_vp);
264 &xfs_vnodeops); 263 if (target_dp == NULL) {
265 if (target_dir_bdp == NULL) {
266 return XFS_ERROR(EXDEV); 264 return XFS_ERROR(EXDEV);
267 } 265 }
268 266
269 src_dp = XFS_BHVTOI(src_dir_bdp); 267 src_dp = XFS_BHVTOI(src_dir_bdp);
270 target_dp = XFS_BHVTOI(target_dir_bdp);
271 mp = src_dp->i_mount; 268 mp = src_dp->i_mount;
272 269
273 if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) || 270 if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index c4b20872f07d..a59c102cf214 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -238,6 +238,7 @@ xfs_bioerror_relse(
238 } 238 }
239 return (EIO); 239 return (EIO);
240} 240}
241
241/* 242/*
242 * Prints out an ALERT message about I/O error. 243 * Prints out an ALERT message about I/O error.
243 */ 244 */
@@ -252,11 +253,9 @@ xfs_ioerror_alert(
252 "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx" 253 "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
253 " (\"%s\") error %d buf count %zd", 254 " (\"%s\") error %d buf count %zd",
254 (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname, 255 (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
255 XFS_BUFTARG_NAME(bp->pb_target), 256 XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
256 (__uint64_t)blkno, 257 (__uint64_t)blkno, func,
257 func, 258 XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
258 XFS_BUF_GETERROR(bp),
259 XFS_BUF_COUNT(bp));
260} 259}
261 260
262/* 261/*
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 4a17d335f897..bf168a91ddb8 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -68,18 +68,6 @@ struct xfs_mount;
68 (XFS_SB_VERSION_NUMBITS | \ 68 (XFS_SB_VERSION_NUMBITS | \
69 XFS_SB_VERSION_OKREALFBITS | \ 69 XFS_SB_VERSION_OKREALFBITS | \
70 XFS_SB_VERSION_OKSASHFBITS) 70 XFS_SB_VERSION_OKSASHFBITS)
71#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag,morebits) \
72 (((ia) || (dia) || (extflag) || (dirv2) || (na) || (sflag) || \
73 (morebits)) ? \
74 (XFS_SB_VERSION_4 | \
75 ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \
76 ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \
77 ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \
78 ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \
79 ((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \
80 ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \
81 ((morebits) ? XFS_SB_VERSION_MOREBITSBIT : 0)) : \
82 XFS_SB_VERSION_1)
83 71
84/* 72/*
85 * There are two words to hold XFS "feature" bits: the original 73 * There are two words to hold XFS "feature" bits: the original
@@ -105,11 +93,6 @@ struct xfs_mount;
105 (XFS_SB_VERSION2_OKREALFBITS | \ 93 (XFS_SB_VERSION2_OKREALFBITS | \
106 XFS_SB_VERSION2_OKSASHFBITS ) 94 XFS_SB_VERSION2_OKSASHFBITS )
107 95
108/*
109 * mkfs macro to set up sb_features2 word
110 */
111#define XFS_SB_VERSION2_MKFS(resvd1, sbcntr) 0
112
113typedef struct xfs_sb 96typedef struct xfs_sb
114{ 97{
115 __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ 98 __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 279e043d7323..d3d714e6b32a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1014,6 +1014,7 @@ xfs_trans_cancel(
1014 xfs_log_item_t *lip; 1014 xfs_log_item_t *lip;
1015 int i; 1015 int i;
1016#endif 1016#endif
1017 xfs_mount_t *mp = tp->t_mountp;
1017 1018
1018 /* 1019 /*
1019 * See if the caller is being too lazy to figure out if 1020 * See if the caller is being too lazy to figure out if
@@ -1026,9 +1027,10 @@ xfs_trans_cancel(
1026 * filesystem. This happens in paths where we detect 1027 * filesystem. This happens in paths where we detect
1027 * corruption and decide to give up. 1028 * corruption and decide to give up.
1028 */ 1029 */
1029 if ((tp->t_flags & XFS_TRANS_DIRTY) && 1030 if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
1030 !XFS_FORCED_SHUTDOWN(tp->t_mountp)) 1031 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1031 xfs_force_shutdown(tp->t_mountp, XFS_CORRUPT_INCORE); 1032 xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
1033 }
1032#ifdef DEBUG 1034#ifdef DEBUG
1033 if (!(flags & XFS_TRANS_ABORT)) { 1035 if (!(flags & XFS_TRANS_ABORT)) {
1034 licp = &(tp->t_items); 1036 licp = &(tp->t_items);
@@ -1040,7 +1042,7 @@ xfs_trans_cancel(
1040 } 1042 }
1041 1043
1042 lip = lidp->lid_item; 1044 lip = lidp->lid_item;
1043 if (!XFS_FORCED_SHUTDOWN(tp->t_mountp)) 1045 if (!XFS_FORCED_SHUTDOWN(mp))
1044 ASSERT(!(lip->li_type == XFS_LI_EFD)); 1046 ASSERT(!(lip->li_type == XFS_LI_EFD));
1045 } 1047 }
1046 licp = licp->lic_next; 1048 licp = licp->lic_next;
@@ -1048,7 +1050,7 @@ xfs_trans_cancel(
1048 } 1050 }
1049#endif 1051#endif
1050 xfs_trans_unreserve_and_mod_sb(tp); 1052 xfs_trans_unreserve_and_mod_sb(tp);
1051 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); 1053 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
1052 1054
1053 if (tp->t_ticket) { 1055 if (tp->t_ticket) {
1054 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 1056 if (flags & XFS_TRANS_RELEASE_LOG_RES) {
@@ -1057,7 +1059,7 @@ xfs_trans_cancel(
1057 } else { 1059 } else {
1058 log_flags = 0; 1060 log_flags = 0;
1059 } 1061 }
1060 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); 1062 xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
1061 } 1063 }
1062 1064
1063 /* mark this thread as no longer being in a transaction */ 1065 /* mark this thread as no longer being in a transaction */
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index a889963fdd14..d77901c07f63 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -973,7 +973,6 @@ void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
973void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *); 973void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
974void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); 974void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
975void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); 975void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
976void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
977void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *); 976void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
978void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); 977void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
979void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); 978void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index fefe1d60377f..34654ec6ae10 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -55,16 +55,13 @@ xfs_get_dir_entry(
55 xfs_inode_t **ipp) 55 xfs_inode_t **ipp)
56{ 56{
57 vnode_t *vp; 57 vnode_t *vp;
58 bhv_desc_t *bdp;
59 58
60 vp = VNAME_TO_VNODE(dentry); 59 vp = VNAME_TO_VNODE(dentry);
61 bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops); 60
62 if (!bdp) { 61 *ipp = xfs_vtoi(vp);
63 *ipp = NULL; 62 if (!*ipp)
64 return XFS_ERROR(ENOENT); 63 return XFS_ERROR(ENOENT);
65 }
66 VN_HOLD(vp); 64 VN_HOLD(vp);
67 *ipp = XFS_BHVTOI(bdp);
68 return 0; 65 return 0;
69} 66}
70 67
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 7bdbd991ab1c..b6ad370fab3d 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -53,6 +53,7 @@
53#include "xfs_acl.h" 53#include "xfs_acl.h"
54#include "xfs_attr.h" 54#include "xfs_attr.h"
55#include "xfs_clnt.h" 55#include "xfs_clnt.h"
56#include "xfs_fsops.h"
56 57
57STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); 58STATIC int xfs_sync(bhv_desc_t *, int, cred_t *);
58 59
@@ -290,8 +291,8 @@ xfs_start_flags(
290 mp->m_flags |= XFS_MOUNT_IDELETE; 291 mp->m_flags |= XFS_MOUNT_IDELETE;
291 if (ap->flags & XFSMNT_DIRSYNC) 292 if (ap->flags & XFSMNT_DIRSYNC)
292 mp->m_flags |= XFS_MOUNT_DIRSYNC; 293 mp->m_flags |= XFS_MOUNT_DIRSYNC;
293 if (ap->flags & XFSMNT_COMPAT_ATTR) 294 if (ap->flags & XFSMNT_ATTR2)
294 mp->m_flags |= XFS_MOUNT_COMPAT_ATTR; 295 mp->m_flags |= XFS_MOUNT_ATTR2;
295 296
296 if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE) 297 if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE)
297 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 298 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
@@ -312,6 +313,8 @@ xfs_start_flags(
312 mp->m_flags |= XFS_MOUNT_NOUUID; 313 mp->m_flags |= XFS_MOUNT_NOUUID;
313 if (ap->flags & XFSMNT_BARRIER) 314 if (ap->flags & XFSMNT_BARRIER)
314 mp->m_flags |= XFS_MOUNT_BARRIER; 315 mp->m_flags |= XFS_MOUNT_BARRIER;
316 else
317 mp->m_flags &= ~XFS_MOUNT_BARRIER;
315 318
316 return 0; 319 return 0;
317} 320}
@@ -330,10 +333,11 @@ xfs_finish_flags(
330 333
331 /* Fail a mount where the logbuf is smaller then the log stripe */ 334 /* Fail a mount where the logbuf is smaller then the log stripe */
332 if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) { 335 if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
333 if ((ap->logbufsize == -1) && 336 if ((ap->logbufsize <= 0) &&
334 (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) { 337 (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) {
335 mp->m_logbsize = mp->m_sb.sb_logsunit; 338 mp->m_logbsize = mp->m_sb.sb_logsunit;
336 } else if (ap->logbufsize < mp->m_sb.sb_logsunit) { 339 } else if (ap->logbufsize > 0 &&
340 ap->logbufsize < mp->m_sb.sb_logsunit) {
337 cmn_err(CE_WARN, 341 cmn_err(CE_WARN,
338 "XFS: logbuf size must be greater than or equal to log stripe size"); 342 "XFS: logbuf size must be greater than or equal to log stripe size");
339 return XFS_ERROR(EINVAL); 343 return XFS_ERROR(EINVAL);
@@ -347,6 +351,10 @@ xfs_finish_flags(
347 } 351 }
348 } 352 }
349 353
354 if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
355 mp->m_flags |= XFS_MOUNT_ATTR2;
356 }
357
350 /* 358 /*
351 * prohibit r/w mounts of read-only filesystems 359 * prohibit r/w mounts of read-only filesystems
352 */ 360 */
@@ -382,10 +390,6 @@ xfs_finish_flags(
382 return XFS_ERROR(EINVAL); 390 return XFS_ERROR(EINVAL);
383 } 391 }
384 392
385 if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
386 mp->m_flags &= ~XFS_MOUNT_COMPAT_ATTR;
387 }
388
389 return 0; 393 return 0;
390} 394}
391 395
@@ -504,13 +508,13 @@ xfs_mount(
504 if (error) 508 if (error)
505 goto error2; 509 goto error2;
506 510
511 if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY))
512 xfs_mountfs_check_barriers(mp);
513
507 error = XFS_IOINIT(vfsp, args, flags); 514 error = XFS_IOINIT(vfsp, args, flags);
508 if (error) 515 if (error)
509 goto error2; 516 goto error2;
510 517
511 if ((args->flags & XFSMNT_BARRIER) &&
512 !(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY))
513 xfs_mountfs_check_barriers(mp);
514 return 0; 518 return 0;
515 519
516error2: 520error2:
@@ -655,6 +659,11 @@ xfs_mntupdate(
655 else 659 else
656 mp->m_flags &= ~XFS_MOUNT_NOATIME; 660 mp->m_flags &= ~XFS_MOUNT_NOATIME;
657 661
662 if (args->flags & XFSMNT_BARRIER)
663 mp->m_flags |= XFS_MOUNT_BARRIER;
664 else
665 mp->m_flags &= ~XFS_MOUNT_BARRIER;
666
658 if ((vfsp->vfs_flag & VFS_RDONLY) && 667 if ((vfsp->vfs_flag & VFS_RDONLY) &&
659 !(*flags & MS_RDONLY)) { 668 !(*flags & MS_RDONLY)) {
660 vfsp->vfs_flag &= ~VFS_RDONLY; 669 vfsp->vfs_flag &= ~VFS_RDONLY;
@@ -1634,6 +1643,7 @@ xfs_vget(
1634#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 1643#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
1635#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 1644#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
1636 * unwritten extent conversion */ 1645 * unwritten extent conversion */
1646#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
1637#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ 1647#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
1638#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 1648#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
1639#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 1649#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
@@ -1680,7 +1690,6 @@ xfs_parseargs(
1680 int iosize; 1690 int iosize;
1681 1691
1682 args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 1692 args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
1683 args->flags |= XFSMNT_COMPAT_ATTR;
1684 1693
1685#if 0 /* XXX: off by default, until some remaining issues ironed out */ 1694#if 0 /* XXX: off by default, until some remaining issues ironed out */
1686 args->flags |= XFSMNT_IDELETE; /* default to on */ 1695 args->flags |= XFSMNT_IDELETE; /* default to on */
@@ -1806,6 +1815,8 @@ xfs_parseargs(
1806 args->flags |= XFSMNT_NOUUID; 1815 args->flags |= XFSMNT_NOUUID;
1807 } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 1816 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
1808 args->flags |= XFSMNT_BARRIER; 1817 args->flags |= XFSMNT_BARRIER;
1818 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
1819 args->flags &= ~XFSMNT_BARRIER;
1809 } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 1820 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
1810 args->flags &= ~XFSMNT_IDELETE; 1821 args->flags &= ~XFSMNT_IDELETE;
1811 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 1822 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
@@ -1815,9 +1826,9 @@ xfs_parseargs(
1815 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 1826 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
1816 args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 1827 args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
1817 } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 1828 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
1818 args->flags &= ~XFSMNT_COMPAT_ATTR; 1829 args->flags |= XFSMNT_ATTR2;
1819 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 1830 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
1820 args->flags |= XFSMNT_COMPAT_ATTR; 1831 args->flags &= ~XFSMNT_ATTR2;
1821 } else if (!strcmp(this_char, "osyncisdsync")) { 1832 } else if (!strcmp(this_char, "osyncisdsync")) {
1822 /* no-op, this is now the default */ 1833 /* no-op, this is now the default */
1823printk("XFS: osyncisdsync is now the default, option is deprecated.\n"); 1834printk("XFS: osyncisdsync is now the default, option is deprecated.\n");
@@ -1892,7 +1903,6 @@ xfs_showargs(
1892 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 1903 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
1893 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 1904 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
1894 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, 1905 { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
1895 { XFS_MOUNT_BARRIER, "," MNTOPT_BARRIER },
1896 { XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP }, 1906 { XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP },
1897 { 0, NULL } 1907 { 0, NULL }
1898 }; 1908 };
@@ -1914,33 +1924,28 @@ xfs_showargs(
1914 1924
1915 if (mp->m_logbufs > 0) 1925 if (mp->m_logbufs > 0)
1916 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 1926 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
1917
1918 if (mp->m_logbsize > 0) 1927 if (mp->m_logbsize > 0)
1919 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 1928 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
1920 1929
1921 if (mp->m_logname) 1930 if (mp->m_logname)
1922 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 1931 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
1923
1924 if (mp->m_rtname) 1932 if (mp->m_rtname)
1925 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 1933 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
1926 1934
1927 if (mp->m_dalign > 0) 1935 if (mp->m_dalign > 0)
1928 seq_printf(m, "," MNTOPT_SUNIT "=%d", 1936 seq_printf(m, "," MNTOPT_SUNIT "=%d",
1929 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 1937 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
1930
1931 if (mp->m_swidth > 0) 1938 if (mp->m_swidth > 0)
1932 seq_printf(m, "," MNTOPT_SWIDTH "=%d", 1939 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
1933 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 1940 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
1934 1941
1935 if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR))
1936 seq_printf(m, "," MNTOPT_ATTR2);
1937
1938 if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) 1942 if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE))
1939 seq_printf(m, "," MNTOPT_LARGEIO); 1943 seq_printf(m, "," MNTOPT_LARGEIO);
1944 if (mp->m_flags & XFS_MOUNT_BARRIER)
1945 seq_printf(m, "," MNTOPT_BARRIER);
1940 1946
1941 if (!(vfsp->vfs_flag & VFS_32BITINODES)) 1947 if (!(vfsp->vfs_flag & VFS_32BITINODES))
1942 seq_printf(m, "," MNTOPT_64BITINODE); 1948 seq_printf(m, "," MNTOPT_64BITINODE);
1943
1944 if (vfsp->vfs_flag & VFS_GRPID) 1949 if (vfsp->vfs_flag & VFS_GRPID)
1945 seq_printf(m, "," MNTOPT_GRPID); 1950 seq_printf(m, "," MNTOPT_GRPID);
1946 1951
@@ -1959,6 +1964,7 @@ xfs_freeze(
1959 /* Push the superblock and write an unmount record */ 1964 /* Push the superblock and write an unmount record */
1960 xfs_log_unmount_write(mp); 1965 xfs_log_unmount_write(mp);
1961 xfs_unmountfs_writesb(mp); 1966 xfs_unmountfs_writesb(mp);
1967 xfs_fs_log_dummy(mp);
1962} 1968}
1963 1969
1964 1970
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index e92cacde02f5..8076cc981e11 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -185,8 +185,7 @@ xfs_getattr(
185 break; 185 break;
186 } 186 }
187 187
188 vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec; 188 vn_atime_to_timespec(vp, &vap->va_atime);
189 vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
190 vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec; 189 vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
191 vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; 190 vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
192 vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec; 191 vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
@@ -544,24 +543,6 @@ xfs_setattr(
544 } 543 }
545 544
546 /* 545 /*
547 * Can't set extent size unless the file is marked, or
548 * about to be marked as a realtime file.
549 *
550 * This check will be removed when fixed size extents
551 * with buffered data writes is implemented.
552 *
553 */
554 if ((mask & XFS_AT_EXTSIZE) &&
555 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
556 vap->va_extsize) &&
557 (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
558 ((mask & XFS_AT_XFLAGS) &&
559 (vap->va_xflags & XFS_XFLAG_REALTIME))))) {
560 code = XFS_ERROR(EINVAL);
561 goto error_return;
562 }
563
564 /*
565 * Can't change realtime flag if any extents are allocated. 546 * Can't change realtime flag if any extents are allocated.
566 */ 547 */
567 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 548 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
@@ -823,13 +804,17 @@ xfs_setattr(
823 di_flags |= XFS_DIFLAG_RTINHERIT; 804 di_flags |= XFS_DIFLAG_RTINHERIT;
824 if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS) 805 if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS)
825 di_flags |= XFS_DIFLAG_NOSYMLINKS; 806 di_flags |= XFS_DIFLAG_NOSYMLINKS;
826 } else { 807 if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT)
808 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
809 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
827 if (vap->va_xflags & XFS_XFLAG_REALTIME) { 810 if (vap->va_xflags & XFS_XFLAG_REALTIME) {
828 di_flags |= XFS_DIFLAG_REALTIME; 811 di_flags |= XFS_DIFLAG_REALTIME;
829 ip->i_iocore.io_flags |= XFS_IOCORE_RT; 812 ip->i_iocore.io_flags |= XFS_IOCORE_RT;
830 } else { 813 } else {
831 ip->i_iocore.io_flags &= ~XFS_IOCORE_RT; 814 ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
832 } 815 }
816 if (vap->va_xflags & XFS_XFLAG_EXTSIZE)
817 di_flags |= XFS_DIFLAG_EXTSIZE;
833 } 818 }
834 ip->i_d.di_flags = di_flags; 819 ip->i_d.di_flags = di_flags;
835 } 820 }
@@ -999,10 +984,6 @@ xfs_readlink(
999 goto error_return; 984 goto error_return;
1000 } 985 }
1001 986
1002 if (!(ioflags & IO_INVIS)) {
1003 xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
1004 }
1005
1006 /* 987 /*
1007 * See if the symlink is stored inline. 988 * See if the symlink is stored inline.
1008 */ 989 */
@@ -1234,7 +1215,8 @@ xfs_inactive_free_eofblocks(
1234 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1215 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1235 1216
1236 if (!error && (nimaps != 0) && 1217 if (!error && (nimaps != 0) &&
1237 (imap.br_startblock != HOLESTARTBLOCK)) { 1218 (imap.br_startblock != HOLESTARTBLOCK ||
1219 ip->i_delayed_blks)) {
1238 /* 1220 /*
1239 * Attach the dquots to the inode up front. 1221 * Attach the dquots to the inode up front.
1240 */ 1222 */
@@ -1569,9 +1551,11 @@ xfs_release(
1569 1551
1570 if (ip->i_d.di_nlink != 0) { 1552 if (ip->i_d.di_nlink != 0) {
1571 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1553 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1572 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && 1554 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
1555 ip->i_delayed_blks > 0)) &&
1573 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1556 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
1574 (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)))) { 1557 (!(ip->i_d.di_flags &
1558 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
1575 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1559 if ((error = xfs_inactive_free_eofblocks(mp, ip)))
1576 return (error); 1560 return (error);
1577 /* Update linux inode block count after free above */ 1561 /* Update linux inode block count after free above */
@@ -1628,7 +1612,8 @@ xfs_inactive(
1628 * only one with a reference to the inode. 1612 * only one with a reference to the inode.
1629 */ 1613 */
1630 truncate = ((ip->i_d.di_nlink == 0) && 1614 truncate = ((ip->i_d.di_nlink == 0) &&
1631 ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) && 1615 ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0) ||
1616 (ip->i_delayed_blks > 0)) &&
1632 ((ip->i_d.di_mode & S_IFMT) == S_IFREG)); 1617 ((ip->i_d.di_mode & S_IFMT) == S_IFREG));
1633 1618
1634 mp = ip->i_mount; 1619 mp = ip->i_mount;
@@ -1646,10 +1631,12 @@ xfs_inactive(
1646 1631
1647 if (ip->i_d.di_nlink != 0) { 1632 if (ip->i_d.di_nlink != 0) {
1648 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1633 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1649 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) && 1634 ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
1650 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1635 ip->i_delayed_blks > 0)) &&
1651 (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)) || 1636 (ip->i_df.if_flags & XFS_IFEXTENTS) &&
1652 (ip->i_delayed_blks != 0))) { 1637 (!(ip->i_d.di_flags &
1638 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
1639 (ip->i_delayed_blks != 0)))) {
1653 if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1640 if ((error = xfs_inactive_free_eofblocks(mp, ip)))
1654 return (VN_INACTIVE_CACHE); 1641 return (VN_INACTIVE_CACHE);
1655 /* Update linux inode block count after free above */ 1642 /* Update linux inode block count after free above */
@@ -2593,7 +2580,6 @@ xfs_link(
2593 int cancel_flags; 2580 int cancel_flags;
2594 int committed; 2581 int committed;
2595 vnode_t *target_dir_vp; 2582 vnode_t *target_dir_vp;
2596 bhv_desc_t *src_bdp;
2597 int resblks; 2583 int resblks;
2598 char *target_name = VNAME(dentry); 2584 char *target_name = VNAME(dentry);
2599 int target_namelen; 2585 int target_namelen;
@@ -2606,8 +2592,7 @@ xfs_link(
2606 if (VN_ISDIR(src_vp)) 2592 if (VN_ISDIR(src_vp))
2607 return XFS_ERROR(EPERM); 2593 return XFS_ERROR(EPERM);
2608 2594
2609 src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops); 2595 sip = xfs_vtoi(src_vp);
2610 sip = XFS_BHVTOI(src_bdp);
2611 tdp = XFS_BHVTOI(target_dir_bdp); 2596 tdp = XFS_BHVTOI(target_dir_bdp);
2612 mp = tdp->i_mount; 2597 mp = tdp->i_mount;
2613 if (XFS_FORCED_SHUTDOWN(mp)) 2598 if (XFS_FORCED_SHUTDOWN(mp))
@@ -3240,7 +3225,6 @@ xfs_readdir(
3240 xfs_trans_t *tp = NULL; 3225 xfs_trans_t *tp = NULL;
3241 int error = 0; 3226 int error = 0;
3242 uint lock_mode; 3227 uint lock_mode;
3243 xfs_off_t start_offset;
3244 3228
3245 vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__, 3229 vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__,
3246 (inst_t *)__return_address); 3230 (inst_t *)__return_address);
@@ -3251,11 +3235,7 @@ xfs_readdir(
3251 } 3235 }
3252 3236
3253 lock_mode = xfs_ilock_map_shared(dp); 3237 lock_mode = xfs_ilock_map_shared(dp);
3254 start_offset = uiop->uio_offset;
3255 error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp); 3238 error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp);
3256 if (start_offset != uiop->uio_offset) {
3257 xfs_ichgtime(dp, XFS_ICHGTIME_ACC);
3258 }
3259 xfs_iunlock_map_shared(dp, lock_mode); 3239 xfs_iunlock_map_shared(dp, lock_mode);
3260 return error; 3240 return error;
3261} 3241}
@@ -3832,7 +3812,12 @@ xfs_reclaim(
3832 vn_iowait(vp); 3812 vn_iowait(vp);
3833 3813
3834 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); 3814 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
3835 ASSERT(VN_CACHED(vp) == 0); 3815
3816 /*
3817 * Make sure the atime in the XFS inode is correct before freeing the
3818 * Linux inode.
3819 */
3820 xfs_synchronize_atime(ip);
3836 3821
3837 /* If we have nothing to flush with this inode then complete the 3822 /* If we have nothing to flush with this inode then complete the
3838 * teardown now, otherwise break the link between the xfs inode 3823 * teardown now, otherwise break the link between the xfs inode
@@ -4002,42 +3987,36 @@ xfs_alloc_file_space(
4002 int alloc_type, 3987 int alloc_type,
4003 int attr_flags) 3988 int attr_flags)
4004{ 3989{
3990 xfs_mount_t *mp = ip->i_mount;
3991 xfs_off_t count;
4005 xfs_filblks_t allocated_fsb; 3992 xfs_filblks_t allocated_fsb;
4006 xfs_filblks_t allocatesize_fsb; 3993 xfs_filblks_t allocatesize_fsb;
4007 int committed; 3994 xfs_extlen_t extsz, temp;
4008 xfs_off_t count; 3995 xfs_fileoff_t startoffset_fsb;
4009 xfs_filblks_t datablocks;
4010 int error;
4011 xfs_fsblock_t firstfsb; 3996 xfs_fsblock_t firstfsb;
4012 xfs_bmap_free_t free_list; 3997 int nimaps;
4013 xfs_bmbt_irec_t *imapp; 3998 int bmapi_flag;
4014 xfs_bmbt_irec_t imaps[1]; 3999 int quota_flag;
4015 xfs_mount_t *mp;
4016 int numrtextents;
4017 int reccount;
4018 uint resblks;
4019 int rt; 4000 int rt;
4020 int rtextsize;
4021 xfs_fileoff_t startoffset_fsb;
4022 xfs_trans_t *tp; 4001 xfs_trans_t *tp;
4023 int xfs_bmapi_flags; 4002 xfs_bmbt_irec_t imaps[1], *imapp;
4003 xfs_bmap_free_t free_list;
4004 uint qblocks, resblks, resrtextents;
4005 int committed;
4006 int error;
4024 4007
4025 vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); 4008 vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
4026 mp = ip->i_mount;
4027 4009
4028 if (XFS_FORCED_SHUTDOWN(mp)) 4010 if (XFS_FORCED_SHUTDOWN(mp))
4029 return XFS_ERROR(EIO); 4011 return XFS_ERROR(EIO);
4030 4012
4031 /* 4013 rt = XFS_IS_REALTIME_INODE(ip);
4032 * determine if this is a realtime file 4014 if (unlikely(rt)) {
4033 */ 4015 if (!(extsz = ip->i_d.di_extsize))
4034 if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) { 4016 extsz = mp->m_sb.sb_rextsize;
4035 if (ip->i_d.di_extsize) 4017 } else {
4036 rtextsize = ip->i_d.di_extsize; 4018 extsz = ip->i_d.di_extsize;
4037 else 4019 }
4038 rtextsize = mp->m_sb.sb_rextsize;
4039 } else
4040 rtextsize = 0;
4041 4020
4042 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 4021 if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
4043 return error; 4022 return error;
@@ -4048,8 +4027,8 @@ xfs_alloc_file_space(
4048 count = len; 4027 count = len;
4049 error = 0; 4028 error = 0;
4050 imapp = &imaps[0]; 4029 imapp = &imaps[0];
4051 reccount = 1; 4030 nimaps = 1;
4052 xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); 4031 bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
4053 startoffset_fsb = XFS_B_TO_FSBT(mp, offset); 4032 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
4054 allocatesize_fsb = XFS_B_TO_FSB(mp, count); 4033 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
4055 4034
@@ -4070,43 +4049,51 @@ xfs_alloc_file_space(
4070 } 4049 }
4071 4050
4072 /* 4051 /*
4073 * allocate file space until done or until there is an error 4052 * Allocate file space until done or until there is an error
4074 */ 4053 */
4075retry: 4054retry:
4076 while (allocatesize_fsb && !error) { 4055 while (allocatesize_fsb && !error) {
4056 xfs_fileoff_t s, e;
4057
4077 /* 4058 /*
4078 * determine if reserving space on 4059 * Determine space reservations for data/realtime.
4079 * the data or realtime partition.
4080 */ 4060 */
4081 if (rt) { 4061 if (unlikely(extsz)) {
4082 xfs_fileoff_t s, e;
4083
4084 s = startoffset_fsb; 4062 s = startoffset_fsb;
4085 do_div(s, rtextsize); 4063 do_div(s, extsz);
4086 s *= rtextsize; 4064 s *= extsz;
4087 e = roundup_64(startoffset_fsb + allocatesize_fsb, 4065 e = startoffset_fsb + allocatesize_fsb;
4088 rtextsize); 4066 if ((temp = do_mod(startoffset_fsb, extsz)))
4089 numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize; 4067 e += temp;
4090 datablocks = 0; 4068 if ((temp = do_mod(e, extsz)))
4069 e += extsz - temp;
4070 } else {
4071 s = 0;
4072 e = allocatesize_fsb;
4073 }
4074
4075 if (unlikely(rt)) {
4076 resrtextents = qblocks = (uint)(e - s);
4077 resrtextents /= mp->m_sb.sb_rextsize;
4078 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
4079 quota_flag = XFS_QMOPT_RES_RTBLKS;
4091 } else { 4080 } else {
4092 datablocks = allocatesize_fsb; 4081 resrtextents = 0;
4093 numrtextents = 0; 4082 resblks = qblocks = \
4083 XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
4084 quota_flag = XFS_QMOPT_RES_REGBLKS;
4094 } 4085 }
4095 4086
4096 /* 4087 /*
4097 * allocate and setup the transaction 4088 * Allocate and setup the transaction.
4098 */ 4089 */
4099 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 4090 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
4100 resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks); 4091 error = xfs_trans_reserve(tp, resblks,
4101 error = xfs_trans_reserve(tp, 4092 XFS_WRITE_LOG_RES(mp), resrtextents,
4102 resblks,
4103 XFS_WRITE_LOG_RES(mp),
4104 numrtextents,
4105 XFS_TRANS_PERM_LOG_RES, 4093 XFS_TRANS_PERM_LOG_RES,
4106 XFS_WRITE_LOG_COUNT); 4094 XFS_WRITE_LOG_COUNT);
4107
4108 /* 4095 /*
4109 * check for running out of space 4096 * Check for running out of space
4110 */ 4097 */
4111 if (error) { 4098 if (error) {
4112 /* 4099 /*
@@ -4117,8 +4104,8 @@ retry:
4117 break; 4104 break;
4118 } 4105 }
4119 xfs_ilock(ip, XFS_ILOCK_EXCL); 4106 xfs_ilock(ip, XFS_ILOCK_EXCL);
4120 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, 4107 error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
4121 ip->i_udquot, ip->i_gdquot, resblks, 0, 0); 4108 qblocks, 0, quota_flag);
4122 if (error) 4109 if (error)
4123 goto error1; 4110 goto error1;
4124 4111
@@ -4126,19 +4113,19 @@ retry:
4126 xfs_trans_ihold(tp, ip); 4113 xfs_trans_ihold(tp, ip);
4127 4114
4128 /* 4115 /*
4129 * issue the bmapi() call to allocate the blocks 4116 * Issue the xfs_bmapi() call to allocate the blocks
4130 */ 4117 */
4131 XFS_BMAP_INIT(&free_list, &firstfsb); 4118 XFS_BMAP_INIT(&free_list, &firstfsb);
4132 error = xfs_bmapi(tp, ip, startoffset_fsb, 4119 error = xfs_bmapi(tp, ip, startoffset_fsb,
4133 allocatesize_fsb, xfs_bmapi_flags, 4120 allocatesize_fsb, bmapi_flag,
4134 &firstfsb, 0, imapp, &reccount, 4121 &firstfsb, 0, imapp, &nimaps,
4135 &free_list); 4122 &free_list);
4136 if (error) { 4123 if (error) {
4137 goto error0; 4124 goto error0;
4138 } 4125 }
4139 4126
4140 /* 4127 /*
4141 * complete the transaction 4128 * Complete the transaction
4142 */ 4129 */
4143 error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed); 4130 error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
4144 if (error) { 4131 if (error) {
@@ -4153,7 +4140,7 @@ retry:
4153 4140
4154 allocated_fsb = imapp->br_blockcount; 4141 allocated_fsb = imapp->br_blockcount;
4155 4142
4156 if (reccount == 0) { 4143 if (nimaps == 0) {
4157 error = XFS_ERROR(ENOSPC); 4144 error = XFS_ERROR(ENOSPC);
4158 break; 4145 break;
4159 } 4146 }
@@ -4176,9 +4163,11 @@ dmapi_enospc_check:
4176 4163
4177 return error; 4164 return error;
4178 4165
4179 error0: 4166error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
4180 xfs_bmap_cancel(&free_list); 4167 xfs_bmap_cancel(&free_list);
4181 error1: 4168 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
4169
4170error1: /* Just cancel transaction */
4182 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 4171 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
4183 xfs_iunlock(ip, XFS_ILOCK_EXCL); 4172 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4184 goto dmapi_enospc_check; 4173 goto dmapi_enospc_check;
@@ -4423,8 +4412,8 @@ xfs_free_file_space(
4423 } 4412 }
4424 xfs_ilock(ip, XFS_ILOCK_EXCL); 4413 xfs_ilock(ip, XFS_ILOCK_EXCL);
4425 error = XFS_TRANS_RESERVE_QUOTA(mp, tp, 4414 error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
4426 ip->i_udquot, ip->i_gdquot, resblks, 0, rt ? 4415 ip->i_udquot, ip->i_gdquot, resblks, 0,
4427 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS); 4416 XFS_QMOPT_RES_REGBLKS);
4428 if (error) 4417 if (error)
4429 goto error1; 4418 goto error1;
4430 4419
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index a714d0cdc204..6f92482cc96c 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -156,7 +156,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
156 /* Always update the PCB ASN. Another thread may have allocated 156 /* Always update the PCB ASN. Another thread may have allocated
157 a new mm->context (via flush_tlb_mm) without the ASN serial 157 a new mm->context (via flush_tlb_mm) without the ASN serial
158 number wrapping. We have no way to detect when this is needed. */ 158 number wrapping. We have no way to detect when this is needed. */
159 next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK; 159 task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
160} 160}
161 161
162__EXTERN_INLINE void 162__EXTERN_INLINE void
@@ -235,7 +235,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
235 if (cpu_online(i)) 235 if (cpu_online(i))
236 mm->context[i] = 0; 236 mm->context[i] = 0;
237 if (tsk != current) 237 if (tsk != current)
238 tsk->thread_info->pcb.ptbr 238 task_thread_info(tsk)->pcb.ptbr
239 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 239 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
240 return 0; 240 return 0;
241} 241}
@@ -249,7 +249,7 @@ destroy_context(struct mm_struct *mm)
249static inline void 249static inline void
250enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 250enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
251{ 251{
252 tsk->thread_info->pcb.ptbr 252 task_thread_info(tsk)->pcb.ptbr
253 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; 253 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
254} 254}
255 255
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index bb1a7a3abb8b..425b7b6d28cb 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -52,19 +52,10 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
52 52
53unsigned long get_wchan(struct task_struct *p); 53unsigned long get_wchan(struct task_struct *p);
54 54
55/* See arch/alpha/kernel/ptrace.c for details. */ 55#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
56#define PT_REG(reg) \
57 (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
58
59#define SW_REG(reg) \
60 (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
61 + offsetof(struct switch_stack, reg))
62
63#define KSTK_EIP(tsk) \
64 (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
65 56
66#define KSTK_ESP(tsk) \ 57#define KSTK_ESP(tsk) \
67 ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp) 58 ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
68 59
69#define cpu_relax() barrier() 60#define cpu_relax() barrier()
70 61
diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h
index 072375c135b4..9933b8b3612e 100644
--- a/include/asm-alpha/ptrace.h
+++ b/include/asm-alpha/ptrace.h
@@ -75,10 +75,10 @@ struct switch_stack {
75#define profile_pc(regs) instruction_pointer(regs) 75#define profile_pc(regs) instruction_pointer(regs)
76extern void show_regs(struct pt_regs *); 76extern void show_regs(struct pt_regs *);
77 77
78#define alpha_task_regs(task) \ 78#define task_pt_regs(task) \
79 ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1) 79 ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
80 80
81#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0) 81#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
82 82
83#endif 83#endif
84 84
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 050e86d12891..cc9c7e8cced5 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -131,15 +131,25 @@ struct el_common_EV6_mcheck {
131extern void halt(void) __attribute__((noreturn)); 131extern void halt(void) __attribute__((noreturn));
132#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) 132#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
133 133
134#define switch_to(P,N,L) \ 134#define switch_to(P,N,L) \
135 do { \ 135 do { \
136 (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \ 136 (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
137 check_mmu_context(); \ 137 check_mmu_context(); \
138 } while (0) 138 } while (0)
139 139
140struct task_struct; 140struct task_struct;
141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); 141extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
142 142
143/*
144 * On SMP systems, when the scheduler does migration-cost autodetection,
145 * it needs a way to flush as much of the CPU's caches as possible.
146 *
147 * TODO: fill this in!
148 */
149static inline void sched_cacheflush(void)
150{
151}
152
143#define imb() \ 153#define imb() \
144__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 154__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
145 155
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index d51491ed00b8..69ffd93f8e22 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -54,8 +54,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
54#define alloc_thread_info(tsk) \ 54#define alloc_thread_info(tsk) \
55 ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 55 ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
56#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 56#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
57#define get_thread_info(ti) get_task_struct((ti)->task)
58#define put_thread_info(ti) put_task_struct((ti)->task)
59 57
60#endif /* __ASSEMBLY__ */ 58#endif /* __ASSEMBLY__ */
61 59
diff --git a/include/asm-arm/processor.h b/include/asm-arm/processor.h
index 7d4118e09054..31290694648b 100644
--- a/include/asm-arm/processor.h
+++ b/include/asm-arm/processor.h
@@ -85,9 +85,11 @@ unsigned long get_wchan(struct task_struct *p);
85 */ 85 */
86extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 86extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
87 87
88#define KSTK_REGS(tsk) (((struct pt_regs *)(THREAD_START_SP + (unsigned long)(tsk)->thread_info)) - 1) 88#define task_pt_regs(p) \
89#define KSTK_EIP(tsk) KSTK_REGS(tsk)->ARM_pc 89 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
90#define KSTK_ESP(tsk) KSTK_REGS(tsk)->ARM_sp 90
91#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
92#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
91 93
92/* 94/*
93 * Prefetching support - only ARMv5. 95 * Prefetching support - only ARMv5.
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 5621d61ebc07..eb2de8c10515 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -168,10 +168,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
168 168
169#define switch_to(prev,next,last) \ 169#define switch_to(prev,next,last) \
170do { \ 170do { \
171 last = __switch_to(prev,prev->thread_info,next->thread_info); \ 171 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
172} while (0) 172} while (0)
173 173
174/* 174/*
175 * On SMP systems, when the scheduler does migration-cost autodetection,
176 * it needs a way to flush as much of the CPU's caches as possible.
177 *
178 * TODO: fill this in!
179 */
180static inline void sched_cacheflush(void)
181{
182}
183
184/*
175 * CPU interrupt mask handling. 185 * CPU interrupt mask handling.
176 */ 186 */
177#if __LINUX_ARM_ARCH__ >= 6 187#if __LINUX_ARM_ARCH__ >= 6
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index 7c98557b717f..33a33cbb6329 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -96,13 +96,10 @@ static inline struct thread_info *current_thread_info(void)
96extern struct thread_info *alloc_thread_info(struct task_struct *task); 96extern struct thread_info *alloc_thread_info(struct task_struct *task);
97extern void free_thread_info(struct thread_info *); 97extern void free_thread_info(struct thread_info *);
98 98
99#define get_thread_info(ti) get_task_struct((ti)->task)
100#define put_thread_info(ti) put_task_struct((ti)->task)
101
102#define thread_saved_pc(tsk) \ 99#define thread_saved_pc(tsk) \
103 ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc))) 100 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
104#define thread_saved_fp(tsk) \ 101#define thread_saved_fp(tsk) \
105 ((unsigned long)((tsk)->thread_info->cpu_context.fp)) 102 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
106 103
107extern void iwmmxt_task_disable(struct thread_info *); 104extern void iwmmxt_task_disable(struct thread_info *);
108extern void iwmmxt_task_copy(struct thread_info *, void *); 105extern void iwmmxt_task_copy(struct thread_info *, void *);
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index f23fac1938f3..ca4ccfc4b578 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -111,10 +111,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
111 111
112#define switch_to(prev,next,last) \ 112#define switch_to(prev,next,last) \
113do { \ 113do { \
114 last = __switch_to(prev,prev->thread_info,next->thread_info); \ 114 last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
115} while (0) 115} while (0)
116 116
117/* 117/*
118 * On SMP systems, when the scheduler does migration-cost autodetection,
119 * it needs a way to flush as much of the CPU's caches as possible.
120 *
121 * TODO: fill this in!
122 */
123static inline void sched_cacheflush(void)
124{
125}
126
127/*
118 * Save the current interrupt enable state & disable IRQs 128 * Save the current interrupt enable state & disable IRQs
119 */ 129 */
120#define local_irq_save(x) \ 130#define local_irq_save(x) \
diff --git a/include/asm-arm26/thread_info.h b/include/asm-arm26/thread_info.h
index aff3e5699c64..a65e58a0a767 100644
--- a/include/asm-arm26/thread_info.h
+++ b/include/asm-arm26/thread_info.h
@@ -82,18 +82,15 @@ static inline struct thread_info *current_thread_info(void)
82 82
83/* FIXME - PAGE_SIZE < 32K */ 83/* FIXME - PAGE_SIZE < 32K */
84#define THREAD_SIZE (8*32768) // FIXME - this needs attention (see kernel/fork.c which gets a nice div by zero if this is lower than 8*32768 84#define THREAD_SIZE (8*32768) // FIXME - this needs attention (see kernel/fork.c which gets a nice div by zero if this is lower than 8*32768
85#define __get_user_regs(x) (((struct pt_regs *)((unsigned long)(x) + THREAD_SIZE - 8)) - 1) 85#define task_pt_regs(task) ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE - 8) - 1)
86 86
87extern struct thread_info *alloc_thread_info(struct task_struct *task); 87extern struct thread_info *alloc_thread_info(struct task_struct *task);
88extern void free_thread_info(struct thread_info *); 88extern void free_thread_info(struct thread_info *);
89 89
90#define get_thread_info(ti) get_task_struct((ti)->task)
91#define put_thread_info(ti) put_task_struct((ti)->task)
92
93#define thread_saved_pc(tsk) \ 90#define thread_saved_pc(tsk) \
94 ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc))) 91 ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
95#define thread_saved_fp(tsk) \ 92#define thread_saved_fp(tsk) \
96 ((unsigned long)((tsk)->thread_info->cpu_context.fp)) 93 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
97 94
98#else /* !__ASSEMBLY__ */ 95#else /* !__ASSEMBLY__ */
99 96
diff --git a/include/asm-cris/arch-v10/processor.h b/include/asm-cris/arch-v10/processor.h
index e23df8dc96e8..cc692c7a0660 100644
--- a/include/asm-cris/arch-v10/processor.h
+++ b/include/asm-cris/arch-v10/processor.h
@@ -40,7 +40,7 @@ struct thread_struct {
40#define KSTK_EIP(tsk) \ 40#define KSTK_EIP(tsk) \
41({ \ 41({ \
42 unsigned long eip = 0; \ 42 unsigned long eip = 0; \
43 unsigned long regs = (unsigned long)user_regs(tsk); \ 43 unsigned long regs = (unsigned long)task_pt_regs(tsk); \
44 if (regs > PAGE_SIZE && \ 44 if (regs > PAGE_SIZE && \
45 virt_addr_valid(regs)) \ 45 virt_addr_valid(regs)) \
46 eip = ((struct pt_regs *)regs)->irp; \ 46 eip = ((struct pt_regs *)regs)->irp; \
diff --git a/include/asm-cris/arch-v32/processor.h b/include/asm-cris/arch-v32/processor.h
index 8c939bf27987..32bf2e538ced 100644
--- a/include/asm-cris/arch-v32/processor.h
+++ b/include/asm-cris/arch-v32/processor.h
@@ -36,7 +36,7 @@ struct thread_struct {
36#define KSTK_EIP(tsk) \ 36#define KSTK_EIP(tsk) \
37({ \ 37({ \
38 unsigned long eip = 0; \ 38 unsigned long eip = 0; \
39 unsigned long regs = (unsigned long)user_regs(tsk); \ 39 unsigned long regs = (unsigned long)task_pt_regs(tsk); \
40 if (regs > PAGE_SIZE && virt_addr_valid(regs)) \ 40 if (regs > PAGE_SIZE && virt_addr_valid(regs)) \
41 eip = ((struct pt_regs *)regs)->erp; \ 41 eip = ((struct pt_regs *)regs)->erp; \
42 eip; \ 42 eip; \
diff --git a/include/asm-cris/processor.h b/include/asm-cris/processor.h
index dce41009eeb0..961e2bceadbc 100644
--- a/include/asm-cris/processor.h
+++ b/include/asm-cris/processor.h
@@ -45,7 +45,8 @@ struct task_struct;
45 * Dito but for the currently running task 45 * Dito but for the currently running task
46 */ 46 */
47 47
48#define current_regs() user_regs(current->thread_info) 48#define task_pt_regs(task) user_regs(task_thread_info(task))
49#define current_regs() task_pt_regs(current)
49 50
50static inline void prepare_to_copy(struct task_struct *tsk) 51static inline void prepare_to_copy(struct task_struct *tsk)
51{ 52{
diff --git a/include/asm-cris/thread_info.h b/include/asm-cris/thread_info.h
index cef0140fc104..7ad853c3f74e 100644
--- a/include/asm-cris/thread_info.h
+++ b/include/asm-cris/thread_info.h
@@ -69,8 +69,6 @@ struct thread_info {
69/* thread information allocation */ 69/* thread information allocation */
70#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 70#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
72#define get_thread_info(ti) get_task_struct((ti)->task)
73#define put_thread_info(ti) put_task_struct((ti)->task)
74 72
75#endif /* !__ASSEMBLY__ */ 73#endif /* !__ASSEMBLY__ */
76 74
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index 60f6b2aee76d..a5576e02dd1d 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -110,8 +110,6 @@ register struct thread_info *__current_thread_info asm("gr15");
110#endif 110#endif
111 111
112#define free_thread_info(info) kfree(info) 112#define free_thread_info(info) kfree(info)
113#define get_thread_info(ti) get_task_struct((ti)->task)
114#define put_thread_info(ti) put_task_struct((ti)->task)
115 113
116#else /* !__ASSEMBLY__ */ 114#else /* !__ASSEMBLY__ */
117 115
diff --git a/include/asm-h8300/thread_info.h b/include/asm-h8300/thread_info.h
index bfcc755c3bb1..45f09dc9caff 100644
--- a/include/asm-h8300/thread_info.h
+++ b/include/asm-h8300/thread_info.h
@@ -69,8 +69,6 @@ static inline struct thread_info *current_thread_info(void)
69#define alloc_thread_info(tsk) ((struct thread_info *) \ 69#define alloc_thread_info(tsk) ((struct thread_info *) \
70 __get_free_pages(GFP_KERNEL, 1)) 70 __get_free_pages(GFP_KERNEL, 1))
71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 71#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
72#define get_thread_info(ti) get_task_struct((ti)->task)
73#define put_thread_info(ti) put_task_struct((ti)->task)
74#endif /* __ASSEMBLY__ */ 72#endif /* __ASSEMBLY__ */
75 73
76/* 74/*
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 6747006743f9..152d0baa576a 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -49,19 +49,19 @@ static inline void __save_init_fpu( struct task_struct *tsk )
49 X86_FEATURE_FXSR, 49 X86_FEATURE_FXSR,
50 "m" (tsk->thread.i387.fxsave) 50 "m" (tsk->thread.i387.fxsave)
51 :"memory"); 51 :"memory");
52 tsk->thread_info->status &= ~TS_USEDFPU; 52 task_thread_info(tsk)->status &= ~TS_USEDFPU;
53} 53}
54 54
55#define __unlazy_fpu( tsk ) do { \ 55#define __unlazy_fpu( tsk ) do { \
56 if ((tsk)->thread_info->status & TS_USEDFPU) \ 56 if (task_thread_info(tsk)->status & TS_USEDFPU) \
57 save_init_fpu( tsk ); \ 57 save_init_fpu( tsk ); \
58} while (0) 58} while (0)
59 59
60#define __clear_fpu( tsk ) \ 60#define __clear_fpu( tsk ) \
61do { \ 61do { \
62 if ((tsk)->thread_info->status & TS_USEDFPU) { \ 62 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
63 asm volatile("fnclex ; fwait"); \ 63 asm volatile("fnclex ; fwait"); \
64 (tsk)->thread_info->status &= ~TS_USEDFPU; \ 64 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
65 stts(); \ 65 stts(); \
66 } \ 66 } \
67} while (0) 67} while (0)
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 13ecf66b098c..feca5d961e2b 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -561,10 +561,20 @@ unsigned long get_wchan(struct task_struct *p);
561 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ 561 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
562}) 562})
563 563
564/*
565 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
566 * This is necessary to guarantee that the entire "struct pt_regs"
567 * is accessable even if the CPU haven't stored the SS/ESP registers
568 * on the stack (interrupt gate does not save these registers
569 * when switching to the same priv ring).
570 * Therefore beware: accessing the xss/esp fields of the
571 * "struct pt_regs" is possible, but they may contain the
572 * completely wrong values.
573 */
564#define task_pt_regs(task) \ 574#define task_pt_regs(task) \
565({ \ 575({ \
566 struct pt_regs *__regs__; \ 576 struct pt_regs *__regs__; \
567 __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ 577 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
568 __regs__ - 1; \ 578 __regs__ - 1; \
569}) 579})
570 580
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 9c0593b7a94e..36a92ed6a9d0 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -548,6 +548,15 @@ void enable_hlt(void);
548extern int es7000_plat; 548extern int es7000_plat;
549void cpu_idle_wait(void); 549void cpu_idle_wait(void);
550 550
551/*
552 * On SMP systems, when the scheduler does migration-cost autodetection,
553 * it needs a way to flush as much of the CPU's caches as possible:
554 */
555static inline void sched_cacheflush(void)
556{
557 wbinvd();
558}
559
551extern unsigned long arch_align_stack(unsigned long sp); 560extern unsigned long arch_align_stack(unsigned long sp);
552 561
553#endif 562#endif
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8fbf791651bf..2493e77e8c30 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -111,8 +111,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
111#endif 111#endif
112 112
113#define free_thread_info(info) kfree(info) 113#define free_thread_info(info) kfree(info)
114#define get_thread_info(ti) get_task_struct((ti)->task)
115#define put_thread_info(ti) put_task_struct((ti)->task)
116 114
117#else /* !__ASSEMBLY__ */ 115#else /* !__ASSEMBLY__ */
118 116
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 0ec27c9e8e45..d7e19eb344b7 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -72,7 +72,6 @@ static inline int node_to_first_cpu(int node)
72 .max_interval = 32, \ 72 .max_interval = 32, \
73 .busy_factor = 32, \ 73 .busy_factor = 32, \
74 .imbalance_pct = 125, \ 74 .imbalance_pct = 125, \
75 .cache_hot_time = (10*1000000), \
76 .cache_nice_tries = 1, \ 75 .cache_nice_tries = 1, \
77 .busy_idx = 3, \ 76 .busy_idx = 3, \
78 .idle_idx = 1, \ 77 .idle_idx = 1, \
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
index aaf11f4e9169..c0b19106665c 100644
--- a/include/asm-ia64/compat.h
+++ b/include/asm-ia64/compat.h
@@ -192,7 +192,7 @@ compat_ptr (compat_uptr_t uptr)
192static __inline__ void __user * 192static __inline__ void __user *
193compat_alloc_user_space (long len) 193compat_alloc_user_space (long len)
194{ 194{
195 struct pt_regs *regs = ia64_task_regs(current); 195 struct pt_regs *regs = task_pt_regs(current);
196 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); 196 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
197} 197}
198 198
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 94e07e727395..8c648bf72bbd 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -352,7 +352,7 @@ extern unsigned long get_wchan (struct task_struct *p);
352/* Return instruction pointer of blocked task TSK. */ 352/* Return instruction pointer of blocked task TSK. */
353#define KSTK_EIP(tsk) \ 353#define KSTK_EIP(tsk) \
354 ({ \ 354 ({ \
355 struct pt_regs *_regs = ia64_task_regs(tsk); \ 355 struct pt_regs *_regs = task_pt_regs(tsk); \
356 _regs->cr_iip + ia64_psr(_regs)->ri; \ 356 _regs->cr_iip + ia64_psr(_regs)->ri; \
357 }) 357 })
358 358
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
index 2c703d6e0c86..9471cdc3f4c0 100644
--- a/include/asm-ia64/ptrace.h
+++ b/include/asm-ia64/ptrace.h
@@ -248,7 +248,7 @@ struct switch_stack {
248}) 248})
249 249
250 /* given a pointer to a task_struct, return the user's pt_regs */ 250 /* given a pointer to a task_struct, return the user's pt_regs */
251# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) 251# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
252# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) 252# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
253# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) 253# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
254# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) 254# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
@@ -271,7 +271,7 @@ struct switch_stack {
271 * 271 *
272 * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall. 272 * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
273 */ 273 */
274# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0) 274# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
275 275
276 struct task_struct; /* forward decl */ 276 struct task_struct; /* forward decl */
277 struct unw_frame_info; /* forward decl */ 277 struct unw_frame_info; /* forward decl */
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 635235fa1e32..80c5a234e259 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -219,14 +219,14 @@ extern void ia64_load_extra (struct task_struct *task);
219 219
220#define IA64_HAS_EXTRA_STATE(t) \ 220#define IA64_HAS_EXTRA_STATE(t) \
221 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ 221 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
222 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) 222 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
223 223
224#define __switch_to(prev,next,last) do { \ 224#define __switch_to(prev,next,last) do { \
225 if (IA64_HAS_EXTRA_STATE(prev)) \ 225 if (IA64_HAS_EXTRA_STATE(prev)) \
226 ia64_save_extra(prev); \ 226 ia64_save_extra(prev); \
227 if (IA64_HAS_EXTRA_STATE(next)) \ 227 if (IA64_HAS_EXTRA_STATE(next)) \
228 ia64_load_extra(next); \ 228 ia64_load_extra(next); \
229 ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 229 ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
230 (last) = ia64_switch_to((next)); \ 230 (last) = ia64_switch_to((next)); \
231} while (0) 231} while (0)
232 232
@@ -238,8 +238,8 @@ extern void ia64_load_extra (struct task_struct *task);
238 * the latest fph state from another CPU. In other words: eager save, lazy restore. 238 * the latest fph state from another CPU. In other words: eager save, lazy restore.
239 */ 239 */
240# define switch_to(prev,next,last) do { \ 240# define switch_to(prev,next,last) do { \
241 if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ 241 if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
242 ia64_psr(ia64_task_regs(prev))->mfh = 0; \ 242 ia64_psr(task_pt_regs(prev))->mfh = 0; \
243 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ 243 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
244 __ia64_save_fpu((prev)->thread.fph); \ 244 __ia64_save_fpu((prev)->thread.fph); \
245 } \ 245 } \
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
279#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) 279#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
280 280
281void cpu_idle_wait(void); 281void cpu_idle_wait(void);
282void sched_cacheflush(void);
282 283
283#define arch_align_stack(x) (x) 284#define arch_align_stack(x) (x)
284 285
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 171b2207bde4..653bb7f9a753 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -57,11 +57,20 @@ struct thread_info {
57/* how to get the thread information struct from C */ 57/* how to get the thread information struct from C */
58#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 58#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
59#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 59#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
60#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
60#else 61#else
61#define current_thread_info() ((struct thread_info *) 0) 62#define current_thread_info() ((struct thread_info *) 0)
62#define alloc_thread_info(tsk) ((struct thread_info *) 0) 63#define alloc_thread_info(tsk) ((struct thread_info *) 0)
64#define task_thread_info(tsk) ((struct thread_info *) 0)
63#endif 65#endif
64#define free_thread_info(ti) /* nothing */ 66#define free_thread_info(ti) /* nothing */
67#define task_stack_page(tsk) ((void *)(tsk))
68
69#define __HAVE_THREAD_FUNCTIONS
70#define setup_thread_stack(p, org) \
71 *task_thread_info(p) = *task_thread_info(org); \
72 task_thread_info(p)->task = (p);
73#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
65 74
66#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 75#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
67#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER)) 76#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index f7c330467e7e..d8aae4da3978 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -55,7 +55,6 @@ void build_cpu_to_node_map(void);
55 .max_interval = 4, \ 55 .max_interval = 4, \
56 .busy_factor = 64, \ 56 .busy_factor = 64, \
57 .imbalance_pct = 125, \ 57 .imbalance_pct = 125, \
58 .cache_hot_time = (10*1000000), \
59 .per_cpu_gain = 100, \ 58 .per_cpu_gain = 100, \
60 .cache_nice_tries = 2, \ 59 .cache_nice_tries = 2, \
61 .busy_idx = 2, \ 60 .busy_idx = 2, \
@@ -81,7 +80,6 @@ void build_cpu_to_node_map(void);
81 .max_interval = 8*(min(num_online_cpus(), 32)), \ 80 .max_interval = 8*(min(num_online_cpus(), 32)), \
82 .busy_factor = 64, \ 81 .busy_factor = 64, \
83 .imbalance_pct = 125, \ 82 .imbalance_pct = 125, \
84 .cache_hot_time = (10*1000000), \
85 .cache_nice_tries = 2, \ 83 .cache_nice_tries = 2, \
86 .busy_idx = 3, \ 84 .busy_idx = 3, \
87 .idle_idx = 2, \ 85 .idle_idx = 2, \
diff --git a/include/asm-m32r/ptrace.h b/include/asm-m32r/ptrace.h
index 55cd7ecfde43..0d058b2d844e 100644
--- a/include/asm-m32r/ptrace.h
+++ b/include/asm-m32r/ptrace.h
@@ -163,6 +163,9 @@ extern void show_regs(struct pt_regs *);
163 163
164extern void withdraw_debug_trap(struct pt_regs *regs); 164extern void withdraw_debug_trap(struct pt_regs *regs);
165 165
166#define task_pt_regs(task) \
167 ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
168
166#endif /* __KERNEL */ 169#endif /* __KERNEL */
167 170
168#endif /* _ASM_M32R_PTRACE_H */ 171#endif /* _ASM_M32R_PTRACE_H */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index dcf619a0a0b0..06c12a037cba 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -68,6 +68,16 @@
68 last = __last; \ 68 last = __last; \
69} while(0) 69} while(0)
70 70
71/*
72 * On SMP systems, when the scheduler does migration-cost autodetection,
73 * it needs a way to flush as much of the CPU's caches as possible.
74 *
75 * TODO: fill this in!
76 */
77static inline void sched_cacheflush(void)
78{
79}
80
71/* Interrupt Control */ 81/* Interrupt Control */
72#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 82#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
73#define local_irq_enable() \ 83#define local_irq_enable() \
diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h
index 0f589363f619..22aff3222d22 100644
--- a/include/asm-m32r/thread_info.h
+++ b/include/asm-m32r/thread_info.h
@@ -110,8 +110,6 @@ static inline struct thread_info *current_thread_info(void)
110#endif 110#endif
111 111
112#define free_thread_info(info) kfree(info) 112#define free_thread_info(info) kfree(info)
113#define get_thread_info(ti) get_task_struct((ti)->task)
114#define put_thread_info(ti) put_task_struct((ti)->task)
115 113
116#define TI_FLAG_FAULT_CODE_SHIFT 28 114#define TI_FLAG_FAULT_CODE_SHIFT 28
117 115
diff --git a/include/asm-m68k/amigahw.h b/include/asm-m68k/amigahw.h
index 3ae5d8d55ba9..a16fe4e5a28a 100644
--- a/include/asm-m68k/amigahw.h
+++ b/include/asm-m68k/amigahw.h
@@ -274,7 +274,7 @@ struct CIA {
274#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase) 274#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase)
275 275
276#define CUSTOM_PHYSADDR (0xdff000) 276#define CUSTOM_PHYSADDR (0xdff000)
277#define custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR))) 277#define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
278 278
279#define CIAA_PHYSADDR (0xbfe001) 279#define CIAA_PHYSADDR (0xbfe001)
280#define CIAB_PHYSADDR (0xbfd000) 280#define CIAB_PHYSADDR (0xbfd000)
@@ -294,12 +294,12 @@ static inline void amifb_video_off(void)
294{ 294{
295 if (amiga_chipset == CS_ECS || amiga_chipset == CS_AGA) { 295 if (amiga_chipset == CS_ECS || amiga_chipset == CS_AGA) {
296 /* program Denise/Lisa for a higher maximum play rate */ 296 /* program Denise/Lisa for a higher maximum play rate */
297 custom.htotal = 113; /* 31 kHz */ 297 amiga_custom.htotal = 113; /* 31 kHz */
298 custom.vtotal = 223; /* 70 Hz */ 298 amiga_custom.vtotal = 223; /* 70 Hz */
299 custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */ 299 amiga_custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
300 /* suspend the monitor */ 300 /* suspend the monitor */
301 custom.hsstrt = custom.hsstop = 116; 301 amiga_custom.hsstrt = amiga_custom.hsstop = 116;
302 custom.vsstrt = custom.vsstop = 226; 302 amiga_custom.vsstrt = amiga_custom.vsstop = 226;
303 amiga_audio_min_period = 57; 303 amiga_audio_min_period = 57;
304 } 304 }
305} 305}
diff --git a/include/asm-m68k/amigaints.h b/include/asm-m68k/amigaints.h
index 2aff4cfbf7b3..aa968d014bb6 100644
--- a/include/asm-m68k/amigaints.h
+++ b/include/asm-m68k/amigaints.h
@@ -109,8 +109,6 @@
109extern void amiga_do_irq(int irq, struct pt_regs *fp); 109extern void amiga_do_irq(int irq, struct pt_regs *fp);
110extern void amiga_do_irq_list(int irq, struct pt_regs *fp); 110extern void amiga_do_irq_list(int irq, struct pt_regs *fp);
111 111
112extern unsigned short amiga_intena_vals[];
113
114/* CIA interrupt control register bits */ 112/* CIA interrupt control register bits */
115 113
116#define CIA_ICR_TA 0x01 114#define CIA_ICR_TA 0x01
diff --git a/include/asm-m68k/checksum.h b/include/asm-m68k/checksum.h
index 78860c20db01..17280ef719f5 100644
--- a/include/asm-m68k/checksum.h
+++ b/include/asm-m68k/checksum.h
@@ -25,7 +25,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
25 * better 64-bit) boundary 25 * better 64-bit) boundary
26 */ 26 */
27 27
28extern unsigned int csum_partial_copy_from_user(const unsigned char *src, 28extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
29 unsigned char *dst, 29 unsigned char *dst,
30 int len, int sum, 30 int len, int sum,
31 int *csum_err); 31 int *csum_err);
diff --git a/include/asm-m68k/dsp56k.h b/include/asm-m68k/dsp56k.h
index ab3dd33e23a1..2d8c0c9f794b 100644
--- a/include/asm-m68k/dsp56k.h
+++ b/include/asm-m68k/dsp56k.h
@@ -13,7 +13,7 @@
13/* Used for uploading DSP binary code */ 13/* Used for uploading DSP binary code */
14struct dsp56k_upload { 14struct dsp56k_upload {
15 int len; 15 int len;
16 char *bin; 16 char __user *bin;
17}; 17};
18 18
19/* For the DSP host flags */ 19/* For the DSP host flags */
diff --git a/include/asm-m68k/floppy.h b/include/asm-m68k/floppy.h
index c6e708dd9f62..63a05ed95c17 100644
--- a/include/asm-m68k/floppy.h
+++ b/include/asm-m68k/floppy.h
@@ -46,7 +46,7 @@ asmlinkage irqreturn_t floppy_hardint(int irq, void *dev_id,
46 46
47static int virtual_dma_count=0; 47static int virtual_dma_count=0;
48static int virtual_dma_residue=0; 48static int virtual_dma_residue=0;
49static char *virtual_dma_addr=0; 49static char *virtual_dma_addr=NULL;
50static int virtual_dma_mode=0; 50static int virtual_dma_mode=0;
51static int doing_pdma=0; 51static int doing_pdma=0;
52 52
diff --git a/include/asm-m68k/hardirq.h b/include/asm-m68k/hardirq.h
index 728318bf7f0e..5e1c5826c83d 100644
--- a/include/asm-m68k/hardirq.h
+++ b/include/asm-m68k/hardirq.h
@@ -14,13 +14,4 @@ typedef struct {
14 14
15#define HARDIRQ_BITS 8 15#define HARDIRQ_BITS 8
16 16
17/*
18 * The hardirq mask has to be large enough to have
19 * space for potentially all IRQ sources in the system
20 * nesting on a single CPU:
21 */
22#if (1 << HARDIRQ_BITS) < NR_IRQS
23# error HARDIRQ_BITS is too low!
24#endif
25
26#endif 17#endif
diff --git a/include/asm-m68k/io.h b/include/asm-m68k/io.h
index 6bb8b0d8f99d..dcfaa352d34c 100644
--- a/include/asm-m68k/io.h
+++ b/include/asm-m68k/io.h
@@ -24,6 +24,7 @@
24#ifdef __KERNEL__ 24#ifdef __KERNEL__
25 25
26#include <linux/config.h> 26#include <linux/config.h>
27#include <linux/compiler.h>
27#include <asm/raw_io.h> 28#include <asm/raw_io.h>
28#include <asm/virtconvert.h> 29#include <asm/virtconvert.h>
29 30
@@ -120,68 +121,68 @@ extern int isa_sex;
120 * be compiled in so the case statement will be optimised away 121 * be compiled in so the case statement will be optimised away
121 */ 122 */
122 123
123static inline u8 *isa_itb(unsigned long addr) 124static inline u8 __iomem *isa_itb(unsigned long addr)
124{ 125{
125 switch(ISA_TYPE) 126 switch(ISA_TYPE)
126 { 127 {
127#ifdef CONFIG_Q40 128#ifdef CONFIG_Q40
128 case Q40_ISA: return (u8 *)Q40_ISA_IO_B(addr); 129 case Q40_ISA: return (u8 __iomem *)Q40_ISA_IO_B(addr);
129#endif 130#endif
130#ifdef CONFIG_GG2 131#ifdef CONFIG_GG2
131 case GG2_ISA: return (u8 *)GG2_ISA_IO_B(addr); 132 case GG2_ISA: return (u8 __iomem *)GG2_ISA_IO_B(addr);
132#endif 133#endif
133#ifdef CONFIG_AMIGA_PCMCIA 134#ifdef CONFIG_AMIGA_PCMCIA
134 case AG_ISA: return (u8 *)AG_ISA_IO_B(addr); 135 case AG_ISA: return (u8 __iomem *)AG_ISA_IO_B(addr);
135#endif 136#endif
136 default: return 0; /* avoid warnings, just in case */ 137 default: return NULL; /* avoid warnings, just in case */
137 } 138 }
138} 139}
139static inline u16 *isa_itw(unsigned long addr) 140static inline u16 __iomem *isa_itw(unsigned long addr)
140{ 141{
141 switch(ISA_TYPE) 142 switch(ISA_TYPE)
142 { 143 {
143#ifdef CONFIG_Q40 144#ifdef CONFIG_Q40
144 case Q40_ISA: return (u16 *)Q40_ISA_IO_W(addr); 145 case Q40_ISA: return (u16 __iomem *)Q40_ISA_IO_W(addr);
145#endif 146#endif
146#ifdef CONFIG_GG2 147#ifdef CONFIG_GG2
147 case GG2_ISA: return (u16 *)GG2_ISA_IO_W(addr); 148 case GG2_ISA: return (u16 __iomem *)GG2_ISA_IO_W(addr);
148#endif 149#endif
149#ifdef CONFIG_AMIGA_PCMCIA 150#ifdef CONFIG_AMIGA_PCMCIA
150 case AG_ISA: return (u16 *)AG_ISA_IO_W(addr); 151 case AG_ISA: return (u16 __iomem *)AG_ISA_IO_W(addr);
151#endif 152#endif
152 default: return 0; /* avoid warnings, just in case */ 153 default: return NULL; /* avoid warnings, just in case */
153 } 154 }
154} 155}
155static inline u8 *isa_mtb(unsigned long addr) 156static inline u8 __iomem *isa_mtb(unsigned long addr)
156{ 157{
157 switch(ISA_TYPE) 158 switch(ISA_TYPE)
158 { 159 {
159#ifdef CONFIG_Q40 160#ifdef CONFIG_Q40
160 case Q40_ISA: return (u8 *)Q40_ISA_MEM_B(addr); 161 case Q40_ISA: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
161#endif 162#endif
162#ifdef CONFIG_GG2 163#ifdef CONFIG_GG2
163 case GG2_ISA: return (u8 *)GG2_ISA_MEM_B(addr); 164 case GG2_ISA: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
164#endif 165#endif
165#ifdef CONFIG_AMIGA_PCMCIA 166#ifdef CONFIG_AMIGA_PCMCIA
166 case AG_ISA: return (u8 *)addr; 167 case AG_ISA: return (u8 __iomem *)addr;
167#endif 168#endif
168 default: return 0; /* avoid warnings, just in case */ 169 default: return NULL; /* avoid warnings, just in case */
169 } 170 }
170} 171}
171static inline u16 *isa_mtw(unsigned long addr) 172static inline u16 __iomem *isa_mtw(unsigned long addr)
172{ 173{
173 switch(ISA_TYPE) 174 switch(ISA_TYPE)
174 { 175 {
175#ifdef CONFIG_Q40 176#ifdef CONFIG_Q40
176 case Q40_ISA: return (u16 *)Q40_ISA_MEM_W(addr); 177 case Q40_ISA: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
177#endif 178#endif
178#ifdef CONFIG_GG2 179#ifdef CONFIG_GG2
179 case GG2_ISA: return (u16 *)GG2_ISA_MEM_W(addr); 180 case GG2_ISA: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
180#endif 181#endif
181#ifdef CONFIG_AMIGA_PCMCIA 182#ifdef CONFIG_AMIGA_PCMCIA
182 case AG_ISA: return (u16 *)addr; 183 case AG_ISA: return (u16 __iomem *)addr;
183#endif 184#endif
184 default: return 0; /* avoid warnings, just in case */ 185 default: return NULL; /* avoid warnings, just in case */
185 } 186 }
186} 187}
187 188
@@ -326,20 +327,20 @@ static inline void isa_delay(void)
326 327
327#define mmiowb() 328#define mmiowb()
328 329
329static inline void *ioremap(unsigned long physaddr, unsigned long size) 330static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
330{ 331{
331 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 332 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
332} 333}
333static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size) 334static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
334{ 335{
335 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 336 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
336} 337}
337static inline void *ioremap_writethrough(unsigned long physaddr, 338static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
338 unsigned long size) 339 unsigned long size)
339{ 340{
340 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); 341 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
341} 342}
342static inline void *ioremap_fullcache(unsigned long physaddr, 343static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
343 unsigned long size) 344 unsigned long size)
344{ 345{
345 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 346 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index 127ad190cf2d..325c86f8512d 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -23,6 +23,15 @@
23#endif 23#endif
24 24
25/* 25/*
26 * The hardirq mask has to be large enough to have
27 * space for potentially all IRQ sources in the system
28 * nesting on a single CPU:
29 */
30#if (1 << HARDIRQ_BITS) < NR_IRQS
31# error HARDIRQ_BITS is too low!
32#endif
33
34/*
26 * Interrupt source definitions 35 * Interrupt source definitions
27 * General interrupt sources are the level 1-7. 36 * General interrupt sources are the level 1-7.
28 * Adding an interrupt service routine for one of these sources 37 * Adding an interrupt service routine for one of these sources
diff --git a/include/asm-m68k/machdep.h b/include/asm-m68k/machdep.h
index a0dd5c47002c..7d3fee342369 100644
--- a/include/asm-m68k/machdep.h
+++ b/include/asm-m68k/machdep.h
@@ -34,7 +34,6 @@ extern void (*mach_power_off)( void );
34extern unsigned long (*mach_hd_init) (unsigned long, unsigned long); 34extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
35extern void (*mach_hd_setup)(char *, int *); 35extern void (*mach_hd_setup)(char *, int *);
36extern long mach_max_dma_address; 36extern long mach_max_dma_address;
37extern void (*mach_floppy_setup)(char *, int *);
38extern void (*mach_heartbeat) (int); 37extern void (*mach_heartbeat) (int);
39extern void (*mach_l2_flush) (int); 38extern void (*mach_l2_flush) (int);
40extern void (*mach_beep) (unsigned int, unsigned int); 39extern void (*mach_beep) (unsigned int, unsigned int);
diff --git a/include/asm-m68k/raw_io.h b/include/asm-m68k/raw_io.h
index 041f0a87b25d..5439bcaa57c6 100644
--- a/include/asm-m68k/raw_io.h
+++ b/include/asm-m68k/raw_io.h
@@ -19,9 +19,9 @@
19#define IOMAP_NOCACHE_NONSER 2 19#define IOMAP_NOCACHE_NONSER 2
20#define IOMAP_WRITETHROUGH 3 20#define IOMAP_WRITETHROUGH 3
21 21
22extern void iounmap(void *addr); 22extern void iounmap(void __iomem *addr);
23 23
24extern void *__ioremap(unsigned long physaddr, unsigned long size, 24extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
25 int cacheflag); 25 int cacheflag);
26extern void __iounmap(void *addr, unsigned long size); 26extern void __iounmap(void *addr, unsigned long size);
27 27
@@ -30,21 +30,21 @@ extern void __iounmap(void *addr, unsigned long size);
30 * two accesses to memory, which may be undesirable for some devices. 30 * two accesses to memory, which may be undesirable for some devices.
31 */ 31 */
32#define in_8(addr) \ 32#define in_8(addr) \
33 ({ u8 __v = (*(volatile u8 *) (addr)); __v; }) 33 ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
34#define in_be16(addr) \ 34#define in_be16(addr) \
35 ({ u16 __v = (*(volatile u16 *) (addr)); __v; }) 35 ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
36#define in_be32(addr) \ 36#define in_be32(addr) \
37 ({ u32 __v = (*(volatile u32 *) (addr)); __v; }) 37 ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
38#define in_le16(addr) \ 38#define in_le16(addr) \
39 ({ u16 __v = le16_to_cpu(*(volatile u16 *) (addr)); __v; }) 39 ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
40#define in_le32(addr) \ 40#define in_le32(addr) \
41 ({ u32 __v = le32_to_cpu(*(volatile u32 *) (addr)); __v; }) 41 ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
42 42
43#define out_8(addr,b) (void)((*(volatile u8 *) (addr)) = (b)) 43#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
44#define out_be16(addr,w) (void)((*(volatile u16 *) (addr)) = (w)) 44#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
45#define out_be32(addr,l) (void)((*(volatile u32 *) (addr)) = (l)) 45#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
46#define out_le16(addr,w) (void)((*(volatile u16 *) (addr)) = cpu_to_le16(w)) 46#define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
47#define out_le32(addr,l) (void)((*(volatile u32 *) (addr)) = cpu_to_le32(l)) 47#define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
48 48
49#define raw_inb in_8 49#define raw_inb in_8
50#define raw_inw in_be16 50#define raw_inw in_be16
@@ -54,7 +54,7 @@ extern void __iounmap(void *addr, unsigned long size);
54#define raw_outw(val,port) out_be16((port),(val)) 54#define raw_outw(val,port) out_be16((port),(val))
55#define raw_outl(val,port) out_be32((port),(val)) 55#define raw_outl(val,port) out_be32((port),(val))
56 56
57static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len) 57static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
58{ 58{
59 unsigned int i; 59 unsigned int i;
60 60
@@ -62,7 +62,7 @@ static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
62 *buf++ = in_8(port); 62 *buf++ = in_8(port);
63} 63}
64 64
65static inline void raw_outsb(volatile u8 *port, const u8 *buf, 65static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
66 unsigned int len) 66 unsigned int len)
67{ 67{
68 unsigned int i; 68 unsigned int i;
@@ -71,7 +71,7 @@ static inline void raw_outsb(volatile u8 *port, const u8 *buf,
71 out_8(port, *buf++); 71 out_8(port, *buf++);
72} 72}
73 73
74static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr) 74static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
75{ 75{
76 unsigned int tmp; 76 unsigned int tmp;
77 77
@@ -110,7 +110,7 @@ static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
110 } 110 }
111} 111}
112 112
113static inline void raw_outsw(volatile u16 *port, const u16 *buf, 113static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
114 unsigned int nr) 114 unsigned int nr)
115{ 115{
116 unsigned int tmp; 116 unsigned int tmp;
@@ -150,7 +150,7 @@ static inline void raw_outsw(volatile u16 *port, const u16 *buf,
150 } 150 }
151} 151}
152 152
153static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr) 153static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
154{ 154{
155 unsigned int tmp; 155 unsigned int tmp;
156 156
@@ -189,7 +189,7 @@ static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
189 } 189 }
190} 190}
191 191
192static inline void raw_outsl(volatile u32 *port, const u32 *buf, 192static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
193 unsigned int nr) 193 unsigned int nr)
194{ 194{
195 unsigned int tmp; 195 unsigned int tmp;
@@ -230,7 +230,7 @@ static inline void raw_outsl(volatile u32 *port, const u32 *buf,
230} 230}
231 231
232 232
233static inline void raw_insw_swapw(volatile u16 *port, u16 *buf, 233static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
234 unsigned int nr) 234 unsigned int nr)
235{ 235{
236 if ((nr) % 8) 236 if ((nr) % 8)
@@ -283,7 +283,7 @@ static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
283 : "d0", "a0", "a1", "d6"); 283 : "d0", "a0", "a1", "d6");
284} 284}
285 285
286static inline void raw_outsw_swapw(volatile u16 *port, const u16 *buf, 286static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
287 unsigned int nr) 287 unsigned int nr)
288{ 288{
289 if ((nr) % 8) 289 if ((nr) % 8)
diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h
index a0cdf9082372..b7b7ea20caab 100644
--- a/include/asm-m68k/signal.h
+++ b/include/asm-m68k/signal.h
@@ -144,7 +144,7 @@ struct sigaction {
144#endif /* __KERNEL__ */ 144#endif /* __KERNEL__ */
145 145
146typedef struct sigaltstack { 146typedef struct sigaltstack {
147 void *ss_sp; 147 void __user *ss_sp;
148 int ss_flags; 148 int ss_flags;
149 size_t ss_size; 149 size_t ss_size;
150} stack_t; 150} stack_t;
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index e974bb072047..5156a28a18d8 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -211,7 +211,7 @@ static inline unsigned long pte_to_pgoff(pte_t pte)
211 return pte.pte & SUN3_PAGE_PGNUM_MASK; 211 return pte.pte & SUN3_PAGE_PGNUM_MASK;
212} 212}
213 213
214static inline pte_t pgoff_to_pte(inline unsigned off) 214static inline pte_t pgoff_to_pte(unsigned off)
215{ 215{
216 pte_t pte = { off + SUN3_PAGE_ACCESSED }; 216 pte_t pte = { off + SUN3_PAGE_ACCESSED };
217 return pte; 217 return pte;
diff --git a/include/asm-m68k/sun3ints.h b/include/asm-m68k/sun3ints.h
index fd838eb14213..bd038fccb64b 100644
--- a/include/asm-m68k/sun3ints.h
+++ b/include/asm-m68k/sun3ints.h
@@ -31,7 +31,6 @@ int sun3_request_irq(unsigned int irq,
31 ); 31 );
32extern void sun3_init_IRQ (void); 32extern void sun3_init_IRQ (void);
33extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *); 33extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *);
34extern irqreturn_t (*sun3_inthandler[]) (int, void *, struct pt_regs *);
35extern void sun3_free_irq (unsigned int irq, void *dev_id); 34extern void sun3_free_irq (unsigned int irq, void *dev_id);
36extern void sun3_enable_interrupts (void); 35extern void sun3_enable_interrupts (void);
37extern void sun3_disable_interrupts (void); 36extern void sun3_disable_interrupts (void);
diff --git a/include/asm-m68k/sun3xflop.h b/include/asm-m68k/sun3xflop.h
index fda1eccf10aa..98a9f79dab29 100644
--- a/include/asm-m68k/sun3xflop.h
+++ b/include/asm-m68k/sun3xflop.h
@@ -208,7 +208,7 @@ static int sun3xflop_request_irq(void)
208 208
209 if(!once) { 209 if(!once) {
210 once = 1; 210 once = 1;
211 error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", 0); 211 error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", NULL);
212 return ((error == 0) ? 0 : -1); 212 return ((error == 0) ? 0 : -1);
213 } else return 0; 213 } else return 0;
214} 214}
@@ -238,7 +238,7 @@ static int sun3xflop_init(void)
238 *sun3x_fdc.fcr_r = 0; 238 *sun3x_fdc.fcr_r = 0;
239 239
240 /* Success... */ 240 /* Success... */
241 floppy_set_flags(0, 1, FD_BROKEN_DCL); // I don't know how to detect this. 241 floppy_set_flags(NULL, 1, FD_BROKEN_DCL); // I don't know how to detect this.
242 allowed_drive_mask = 0x01; 242 allowed_drive_mask = 0x01;
243 return (int) SUN3X_FDC; 243 return (int) SUN3X_FDC;
244} 244}
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index 9532ca3c45cb..c4d622a57dfb 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -37,6 +37,7 @@ struct thread_info {
37#define init_stack (init_thread_union.stack) 37#define init_stack (init_thread_union.stack)
38 38
39#define task_thread_info(tsk) (&(tsk)->thread.info) 39#define task_thread_info(tsk) (&(tsk)->thread.info)
40#define task_stack_page(tsk) ((void *)(tsk)->thread_info)
40#define current_thread_info() task_thread_info(current) 41#define current_thread_info() task_thread_info(current)
41 42
42#define __HAVE_THREAD_FUNCTIONS 43#define __HAVE_THREAD_FUNCTIONS
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index f5cedf19cf68..2ffd87b0a769 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -42,6 +42,7 @@ struct exception_table_entry
42({ \ 42({ \
43 int __pu_err; \ 43 int __pu_err; \
44 typeof(*(ptr)) __pu_val = (x); \ 44 typeof(*(ptr)) __pu_val = (x); \
45 __chk_user_ptr(ptr); \
45 switch (sizeof (*(ptr))) { \ 46 switch (sizeof (*(ptr))) { \
46 case 1: \ 47 case 1: \
47 __put_user_asm(__pu_err, __pu_val, ptr, b); \ 48 __put_user_asm(__pu_err, __pu_val, ptr, b); \
@@ -91,6 +92,7 @@ __asm__ __volatile__ \
91({ \ 92({ \
92 int __gu_err; \ 93 int __gu_err; \
93 typeof(*(ptr)) __gu_val; \ 94 typeof(*(ptr)) __gu_val; \
95 __chk_user_ptr(ptr); \
94 switch (sizeof(*(ptr))) { \ 96 switch (sizeof(*(ptr))) { \
95 case 1: \ 97 case 1: \
96 __get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \ 98 __get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \
@@ -105,7 +107,7 @@ __asm__ __volatile__ \
105 __gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \ 107 __gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \
106 break; \ 108 break; \
107 default: \ 109 default: \
108 __gu_val = 0; \ 110 __gu_val = (typeof(*(ptr)))0; \
109 __gu_err = __get_user_bad(); \ 111 __gu_err = __get_user_bad(); \
110 break; \ 112 break; \
111 } \ 113 } \
@@ -134,7 +136,7 @@ __asm__ __volatile__ \
134 : "m"(*(ptr)), "i" (-EFAULT), "0"(0)) 136 : "m"(*(ptr)), "i" (-EFAULT), "0"(0))
135 137
136static inline unsigned long 138static inline unsigned long
137__generic_copy_from_user(void *to, const void *from, unsigned long n) 139__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
138{ 140{
139 unsigned long tmp; 141 unsigned long tmp;
140 __asm__ __volatile__ 142 __asm__ __volatile__
@@ -189,7 +191,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
189} 191}
190 192
191static inline unsigned long 193static inline unsigned long
192__generic_copy_to_user(void *to, const void *from, unsigned long n) 194__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
193{ 195{
194 unsigned long tmp; 196 unsigned long tmp;
195 __asm__ __volatile__ 197 __asm__ __volatile__
@@ -264,7 +266,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
264 : "d0", "memory") 266 : "d0", "memory")
265 267
266static inline unsigned long 268static inline unsigned long
267__constant_copy_from_user(void *to, const void *from, unsigned long n) 269__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
268{ 270{
269 switch (n) { 271 switch (n) {
270 case 0: 272 case 0:
@@ -520,7 +522,7 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
520#define __copy_from_user_inatomic __copy_from_user 522#define __copy_from_user_inatomic __copy_from_user
521 523
522static inline unsigned long 524static inline unsigned long
523__constant_copy_to_user(void *to, const void *from, unsigned long n) 525__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
524{ 526{
525 switch (n) { 527 switch (n) {
526 case 0: 528 case 0:
@@ -766,7 +768,7 @@ __constant_copy_to_user(void *to, const void *from, unsigned long n)
766 */ 768 */
767 769
768static inline long 770static inline long
769strncpy_from_user(char *dst, const char *src, long count) 771strncpy_from_user(char *dst, const char __user *src, long count)
770{ 772{
771 long res; 773 long res;
772 if (count == 0) return count; 774 if (count == 0) return count;
@@ -799,11 +801,11 @@ strncpy_from_user(char *dst, const char *src, long count)
799 * 801 *
800 * Return 0 on exception, a value greater than N if too long 802 * Return 0 on exception, a value greater than N if too long
801 */ 803 */
802static inline long strnlen_user(const char *src, long n) 804static inline long strnlen_user(const char __user *src, long n)
803{ 805{
804 long res; 806 long res;
805 807
806 res = -(long)src; 808 res = -(unsigned long)src;
807 __asm__ __volatile__ 809 __asm__ __volatile__
808 ("1:\n" 810 ("1:\n"
809 " tstl %2\n" 811 " tstl %2\n"
@@ -842,7 +844,7 @@ static inline long strnlen_user(const char *src, long n)
842 */ 844 */
843 845
844static inline unsigned long 846static inline unsigned long
845clear_user(void *to, unsigned long n) 847clear_user(void __user *to, unsigned long n)
846{ 848{
847 __asm__ __volatile__ 849 __asm__ __volatile__
848 (" tstl %1\n" 850 (" tstl %1\n"
diff --git a/include/asm-m68k/zorro.h b/include/asm-m68k/zorro.h
index cf816588bedb..5ce97c22b582 100644
--- a/include/asm-m68k/zorro.h
+++ b/include/asm-m68k/zorro.h
@@ -15,24 +15,24 @@
15#define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 15#define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
16#define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 16#define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
17 17
18static inline void *z_remap_nocache_ser(unsigned long physaddr, 18static inline void __iomem *z_remap_nocache_ser(unsigned long physaddr,
19 unsigned long size) 19 unsigned long size)
20{ 20{
21 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); 21 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
22} 22}
23 23
24static inline void *z_remap_nocache_nonser(unsigned long physaddr, 24static inline void __iomem *z_remap_nocache_nonser(unsigned long physaddr,
25 unsigned long size) 25 unsigned long size)
26{ 26{
27 return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER); 27 return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER);
28} 28}
29 29
30static inline void *z_remap_writethrough(unsigned long physaddr, 30static inline void __iomem *z_remap_writethrough(unsigned long physaddr,
31 unsigned long size) 31 unsigned long size)
32{ 32{
33 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); 33 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
34} 34}
35static inline void *z_remap_fullcache(unsigned long physaddr, 35static inline void __iomem *z_remap_fullcache(unsigned long physaddr,
36 unsigned long size) 36 unsigned long size)
37{ 37{
38 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 38 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68knommu/machdep.h b/include/asm-m68knommu/machdep.h
index 5a9f9c297f79..27c90afd3339 100644
--- a/include/asm-m68knommu/machdep.h
+++ b/include/asm-m68knommu/machdep.h
@@ -38,7 +38,6 @@ extern void (*mach_power_off)( void );
38extern unsigned long (*mach_hd_init) (unsigned long, unsigned long); 38extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
39extern void (*mach_hd_setup)(char *, int *); 39extern void (*mach_hd_setup)(char *, int *);
40extern long mach_max_dma_address; 40extern long mach_max_dma_address;
41extern void (*mach_floppy_setup)(char *, int *);
42extern void (*mach_floppy_eject)(void); 41extern void (*mach_floppy_eject)(void);
43extern void (*mach_heartbeat) (int); 42extern void (*mach_heartbeat) (int);
44extern void (*mach_l2_flush) (int); 43extern void (*mach_l2_flush) (int);
diff --git a/include/asm-m68knommu/thread_info.h b/include/asm-m68knommu/thread_info.h
index 7b9a3fa3af5d..b8f009edf2b2 100644
--- a/include/asm-m68knommu/thread_info.h
+++ b/include/asm-m68knommu/thread_info.h
@@ -75,8 +75,6 @@ static inline struct thread_info *current_thread_info(void)
75#define alloc_thread_info(tsk) ((struct thread_info *) \ 75#define alloc_thread_info(tsk) ((struct thread_info *) \
76 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER)) 76 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
77#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_SIZE_ORDER) 77#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_SIZE_ORDER)
78#define get_thread_info(ti) get_task_struct((ti)->task)
79#define put_thread_info(ti) put_task_struct((ti)->task)
80#endif /* __ASSEMBLY__ */ 78#endif /* __ASSEMBLY__ */
81 79
82#define PREEMPT_ACTIVE 0x4000000 80#define PREEMPT_ACTIVE 0x4000000
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index 82141c711c33..59d26b52ba32 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -27,7 +27,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
27 .max_interval = 32, \ 27 .max_interval = 32, \
28 .busy_factor = 32, \ 28 .busy_factor = 32, \
29 .imbalance_pct = 125, \ 29 .imbalance_pct = 125, \
30 .cache_hot_time = (10*1000), \
31 .cache_nice_tries = 1, \ 30 .cache_nice_tries = 1, \
32 .per_cpu_gain = 100, \ 31 .per_cpu_gain = 100, \
33 .flags = SD_LOAD_BALANCE \ 32 .flags = SD_LOAD_BALANCE \
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index de53055a62ae..39d2bd50fece 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -200,11 +200,11 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long
200 200
201unsigned long get_wchan(struct task_struct *p); 201unsigned long get_wchan(struct task_struct *p);
202 202
203#define __PT_REG(reg) ((long)&((struct pt_regs *)0)->reg - sizeof(struct pt_regs)) 203#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
204#define __KSTK_TOS(tsk) ((unsigned long)(tsk->thread_info) + THREAD_SIZE - 32) 204#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1)
205#define KSTK_EIP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_epc))) 205#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
206#define KSTK_ESP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(regs[29]))) 206#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
207#define KSTK_STATUS(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_status))) 207#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
208 208
209#define cpu_relax() barrier() 209#define cpu_relax() barrier()
210 210
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 330c4e497af3..e8e5d4143377 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -159,11 +159,21 @@ struct task_struct;
159do { \ 159do { \
160 if (cpu_has_dsp) \ 160 if (cpu_has_dsp) \
161 __save_dsp(prev); \ 161 __save_dsp(prev); \
162 (last) = resume(prev, next, next->thread_info); \ 162 (last) = resume(prev, next, task_thread_info(next)); \
163 if (cpu_has_dsp) \ 163 if (cpu_has_dsp) \
164 __restore_dsp(current); \ 164 __restore_dsp(current); \
165} while(0) 165} while(0)
166 166
167/*
168 * On SMP systems, when the scheduler does migration-cost autodetection,
169 * it needs a way to flush as much of the CPU's caches as possible.
170 *
171 * TODO: fill this in!
172 */
173static inline void sched_cacheflush(void)
174{
175}
176
167static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 177static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
168{ 178{
169 __u32 retval; 179 __u32 retval;
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index e6c24472e03f..1612b3fe1080 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -97,8 +97,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
97#endif 97#endif
98 98
99#define free_thread_info(info) kfree(info) 99#define free_thread_info(info) kfree(info)
100#define get_thread_info(ti) get_task_struct((ti)->task)
101#define put_thread_info(ti) put_task_struct((ti)->task)
102 100
103#endif /* !__ASSEMBLY__ */ 101#endif /* !__ASSEMBLY__ */
104 102
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index f3928d3a80cb..a5a973c0c07f 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
49 (last) = _switch_to(prev, next); \ 49 (last) = _switch_to(prev, next); \
50} while(0) 50} while(0)
51 51
52/*
53 * On SMP systems, when the scheduler does migration-cost autodetection,
54 * it needs a way to flush as much of the CPU's caches as possible.
55 *
56 * TODO: fill this in!
57 */
58static inline void sched_cacheflush(void)
59{
60}
52 61
53 62
54/* interrupt control */ 63/* interrupt control */
diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h
index 57bbb76cb6c1..ac32f140b83a 100644
--- a/include/asm-parisc/thread_info.h
+++ b/include/asm-parisc/thread_info.h
@@ -43,9 +43,6 @@ struct thread_info {
43#define alloc_thread_info(tsk) ((struct thread_info *) \ 43#define alloc_thread_info(tsk) ((struct thread_info *) \
44 __get_free_pages(GFP_KERNEL, THREAD_ORDER)) 44 __get_free_pages(GFP_KERNEL, THREAD_ORDER))
45#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 45#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
46#define get_thread_info(ti) get_task_struct((ti)->task)
47#define put_thread_info(ti) put_task_struct((ti)->task)
48
49 46
50/* how to get the thread information struct from C */ 47/* how to get the thread information struct from C */
51#define current_thread_info() ((struct thread_info *)mfctl(30)) 48#define current_thread_info() ((struct thread_info *)mfctl(30))
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 4c888303e85b..9b822afa7d0e 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -183,6 +183,16 @@ struct thread_struct;
183extern struct task_struct *_switch(struct thread_struct *prev, 183extern struct task_struct *_switch(struct thread_struct *prev,
184 struct thread_struct *next); 184 struct thread_struct *next);
185 185
186/*
187 * On SMP systems, when the scheduler does migration-cost autodetection,
188 * it needs a way to flush as much of the CPU's caches as possible.
189 *
190 * TODO: fill this in!
191 */
192static inline void sched_cacheflush(void)
193{
194}
195
186extern unsigned int rtas_data; 196extern unsigned int rtas_data;
187extern int mem_init_done; /* set on boot once kmalloc can be called */ 197extern int mem_init_done; /* set on boot once kmalloc can be called */
188extern unsigned long memory_limit; 198extern unsigned long memory_limit;
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index ac1e80e6033e..7e09d7cda933 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -89,9 +89,6 @@ struct thread_info {
89 89
90#endif /* THREAD_SHIFT < PAGE_SHIFT */ 90#endif /* THREAD_SHIFT < PAGE_SHIFT */
91 91
92#define get_thread_info(ti) get_task_struct((ti)->task)
93#define put_thread_info(ti) put_task_struct((ti)->task)
94
95/* how to get the thread information struct from C */ 92/* how to get the thread information struct from C */
96static inline struct thread_info *current_thread_info(void) 93static inline struct thread_info *current_thread_info(void)
97{ 94{
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 9f3d4da261c4..1e19cd00af25 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -39,7 +39,6 @@ static inline int node_to_first_cpu(int node)
39 .max_interval = 32, \ 39 .max_interval = 32, \
40 .busy_factor = 32, \ 40 .busy_factor = 32, \
41 .imbalance_pct = 125, \ 41 .imbalance_pct = 125, \
42 .cache_hot_time = (10*1000000), \
43 .cache_nice_tries = 1, \ 42 .cache_nice_tries = 1, \
44 .per_cpu_gain = 100, \ 43 .per_cpu_gain = 100, \
45 .busy_idx = 3, \ 44 .busy_idx = 3, \
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index b97037348277..fb49c0c49ea1 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -131,6 +131,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
131 struct task_struct *); 131 struct task_struct *);
132#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next))) 132#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
133 133
134/*
135 * On SMP systems, when the scheduler does migration-cost autodetection,
136 * it needs a way to flush as much of the CPU's caches as possible.
137 *
138 * TODO: fill this in!
139 */
140static inline void sched_cacheflush(void)
141{
142}
143
134struct thread_struct; 144struct thread_struct;
135extern struct task_struct *_switch(struct thread_struct *prev, 145extern struct task_struct *_switch(struct thread_struct *prev,
136 struct thread_struct *next); 146 struct thread_struct *next);
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index 372d51cccd53..710646e64f7d 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -163,7 +163,7 @@ static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs)
163 163
164static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 164static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
165{ 165{
166 struct pt_regs *ptregs = __KSTK_PTREGS(tsk); 166 struct pt_regs *ptregs = task_pt_regs(tsk);
167 memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs)); 167 memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs));
168 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs)); 168 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
169 regs->orig_gpr2 = ptregs->orig_gpr2; 169 regs->orig_gpr2 = ptregs->orig_gpr2;
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 4ec652ebb3b1..c5cbc4bd8414 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -191,10 +191,10 @@ extern void show_registers(struct pt_regs *regs);
191extern void show_trace(struct task_struct *task, unsigned long *sp); 191extern void show_trace(struct task_struct *task, unsigned long *sp);
192 192
193unsigned long get_wchan(struct task_struct *p); 193unsigned long get_wchan(struct task_struct *p);
194#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \ 194#define task_pt_regs(tsk) ((struct pt_regs *) \
195 ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs))) 195 (task_stack_page(tsk) + THREAD_SIZE) - 1)
196#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr) 196#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
197#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15]) 197#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
198 198
199/* 199/*
200 * Give up the time slice of the virtual PU. 200 * Give up the time slice of the virtual PU.
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 864cae7e1fd6..c7c3a9ad593f 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
104 prev = __switch_to(prev,next); \ 104 prev = __switch_to(prev,next); \
105} while (0) 105} while (0)
106 106
107/*
108 * On SMP systems, when the scheduler does migration-cost autodetection,
109 * it needs a way to flush as much of the CPU's caches as possible.
110 *
111 * TODO: fill this in!
112 */
113static inline void sched_cacheflush(void)
114{
115}
116
107#ifdef CONFIG_VIRT_CPU_ACCOUNTING 117#ifdef CONFIG_VIRT_CPU_ACCOUNTING
108extern void account_user_vtime(struct task_struct *); 118extern void account_user_vtime(struct task_struct *);
109extern void account_system_vtime(struct task_struct *); 119extern void account_system_vtime(struct task_struct *);
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 6c18a3f24316..f3797a52c4ea 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -81,8 +81,6 @@ static inline struct thread_info *current_thread_info(void)
81#define alloc_thread_info(tsk) ((struct thread_info *) \ 81#define alloc_thread_info(tsk) ((struct thread_info *) \
82 __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 82 __get_free_pages(GFP_KERNEL,THREAD_ORDER))
83#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER) 83#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
84#define get_thread_info(ti) get_task_struct((ti)->task)
85#define put_thread_info(ti) put_task_struct((ti)->task)
86 84
87#endif 85#endif
88 86
diff --git a/include/asm-sh/ptrace.h b/include/asm-sh/ptrace.h
index 0f75e16a7415..792fc35bd624 100644
--- a/include/asm-sh/ptrace.h
+++ b/include/asm-sh/ptrace.h
@@ -91,6 +91,16 @@ struct pt_dspregs {
91#define instruction_pointer(regs) ((regs)->pc) 91#define instruction_pointer(regs) ((regs)->pc)
92extern void show_regs(struct pt_regs *); 92extern void show_regs(struct pt_regs *);
93 93
94#ifdef CONFIG_SH_DSP
95#define task_pt_regs(task) \
96 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
97 - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1)
98#else
99#define task_pt_regs(task) \
100 ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
101 - sizeof(unsigned long)) - 1)
102#endif
103
94static inline unsigned long profile_pc(struct pt_regs *regs) 104static inline unsigned long profile_pc(struct pt_regs *regs)
95{ 105{
96 unsigned long pc = instruction_pointer(regs); 106 unsigned long pc = instruction_pointer(regs);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 28a3c2d8bcd7..bb0330499bdf 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -57,6 +57,16 @@
57 last = __last; \ 57 last = __last; \
58} while (0) 58} while (0)
59 59
60/*
61 * On SMP systems, when the scheduler does migration-cost autodetection,
62 * it needs a way to flush as much of the CPU's caches as possible.
63 *
64 * TODO: fill this in!
65 */
66static inline void sched_cacheflush(void)
67{
68}
69
60#define nop() __asm__ __volatile__ ("nop") 70#define nop() __asm__ __volatile__ ("nop")
61 71
62 72
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 46080cefaff8..85f0c11b4319 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void)
60#define THREAD_SIZE (2*PAGE_SIZE) 60#define THREAD_SIZE (2*PAGE_SIZE)
61#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 61#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
62#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 62#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
63#define get_thread_info(ti) get_task_struct((ti)->task)
64#define put_thread_info(ti) put_task_struct((ti)->task)
65 63
66#else /* !__ASSEMBLY__ */ 64#else /* !__ASSEMBLY__ */
67 65
diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h
index 10f024c6a2e3..1f825cb163c3 100644
--- a/include/asm-sh64/thread_info.h
+++ b/include/asm-sh64/thread_info.h
@@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void)
66 66
67#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 67#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
68#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 68#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
69#define get_thread_info(ti) get_task_struct((ti)->task)
70#define put_thread_info(ti) put_task_struct((ti)->task)
71 69
72#endif /* __ASSEMBLY__ */ 70#endif /* __ASSEMBLY__ */
73 71
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 1f6b71f9e1b6..58dd162927bb 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -155,7 +155,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
155 "here:\n" \ 155 "here:\n" \
156 : "=&r" (last) \ 156 : "=&r" (last) \
157 : "r" (&(current_set[hard_smp_processor_id()])), \ 157 : "r" (&(current_set[hard_smp_processor_id()])), \
158 "r" ((next)->thread_info), \ 158 "r" (task_thread_info(next)), \
159 "i" (TI_KPSR), \ 159 "i" (TI_KPSR), \
160 "i" (TI_KSP), \ 160 "i" (TI_KSP), \
161 "i" (TI_TASK) \ 161 "i" (TI_TASK) \
@@ -166,6 +166,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
166 } while(0) 166 } while(0)
167 167
168/* 168/*
169 * On SMP systems, when the scheduler does migration-cost autodetection,
170 * it needs a way to flush as much of the CPU's caches as possible.
171 *
172 * TODO: fill this in!
173 */
174static inline void sched_cacheflush(void)
175{
176}
177
178/*
169 * Changing the IRQ level on the Sparc. 179 * Changing the IRQ level on the Sparc.
170 */ 180 */
171extern void local_irq_restore(unsigned long); 181extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index ff6ccb3d24c6..65f060b040ab 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -92,9 +92,6 @@ BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
92BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) 92BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
93#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti) 93#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
94 94
95#define get_thread_info(ti) get_task_struct((ti)->task)
96#define put_thread_info(ti) put_task_struct((ti)->task)
97
98#endif /* __ASSEMBLY__ */ 95#endif /* __ASSEMBLY__ */
99 96
100/* 97/*
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 91458118277e..69539a8ab833 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -119,7 +119,7 @@ typedef struct {
119#endif 119#endif
120 120
121#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \ 121#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \
122 ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; }) 122 ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; })
123 123
124/* 124/*
125 * This is used to ensure we don't load something for the wrong architecture. 125 * This is used to ensure we don't load something for the wrong architecture.
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 08ba72d7722c..57ee7b306189 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -60,7 +60,7 @@ do { \
60 register unsigned long pgd_cache asm("o4"); \ 60 register unsigned long pgd_cache asm("o4"); \
61 paddr = __pa((__mm)->pgd); \ 61 paddr = __pa((__mm)->pgd); \
62 pgd_cache = 0UL; \ 62 pgd_cache = 0UL; \
63 if ((__tsk)->thread_info->flags & _TIF_32BIT) \ 63 if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
64 pgd_cache = get_pgd_cache((__mm)->pgd); \ 64 pgd_cache = get_pgd_cache((__mm)->pgd); \
65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ 65 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
66 "mov %3, %%g4\n\t" \ 66 "mov %3, %%g4\n\t" \
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 3169f3e2237e..cd8d9b4c8658 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
186 186
187extern unsigned long get_wchan(struct task_struct *task); 187extern unsigned long get_wchan(struct task_struct *task);
188 188
189#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc) 189#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
190#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP]) 190#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
191#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
191 192
192#define cpu_relax() barrier() 193#define cpu_relax() barrier()
193 194
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 309f1466b6fa..af254e581834 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -208,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
208 /* If you are tempted to conditionalize the following */ \ 208 /* If you are tempted to conditionalize the following */ \
209 /* so that ASI is only written if it changes, think again. */ \ 209 /* so that ASI is only written if it changes, think again. */ \
210 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 210 __asm__ __volatile__("wr %%g0, %0, %%asi" \
211 : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\ 211 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
212 __asm__ __volatile__( \ 212 __asm__ __volatile__( \
213 "mov %%g4, %%g7\n\t" \ 213 "mov %%g4, %%g7\n\t" \
214 "wrpr %%g0, 0x95, %%pstate\n\t" \ 214 "wrpr %%g0, 0x95, %%pstate\n\t" \
@@ -238,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
238 "b,a ret_from_syscall\n\t" \ 238 "b,a ret_from_syscall\n\t" \
239 "1:\n\t" \ 239 "1:\n\t" \
240 : "=&r" (last) \ 240 : "=&r" (last) \
241 : "0" (next->thread_info), \ 241 : "0" (task_thread_info(next)), \
242 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ 242 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
243 "i" (TI_CWP), "i" (TI_TASK) \ 243 "i" (TI_CWP), "i" (TI_TASK) \
244 : "cc", \ 244 : "cc", \
@@ -253,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
253 } \ 253 } \
254} while(0) 254} while(0)
255 255
256/*
257 * On SMP systems, when the scheduler does migration-cost autodetection,
258 * it needs a way to flush as much of the CPU's caches as possible.
259 *
260 * TODO: fill this in!
261 */
262static inline void sched_cacheflush(void)
263{
264}
265
256static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 266static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
257{ 267{
258 unsigned long tmp1, tmp2; 268 unsigned long tmp1, tmp2;
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 97267f059ef5..705c71972c32 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -56,9 +56,6 @@ static inline struct thread_info *current_thread_info(void)
56 ((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL)) 56 ((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL))
57#define free_thread_info(ti) kfree(ti) 57#define free_thread_info(ti) kfree(ti)
58 58
59#define get_thread_info(ti) get_task_struct((ti)->task)
60#define put_thread_info(ti) put_task_struct((ti)->task)
61
62#endif 59#endif
63 60
64#define PREEMPT_ACTIVE 0x10000000 61#define PREEMPT_ACTIVE 0x10000000
diff --git a/include/asm-v850/processor.h b/include/asm-v850/processor.h
index 98f929427d3d..2d31308935a0 100644
--- a/include/asm-v850/processor.h
+++ b/include/asm-v850/processor.h
@@ -98,10 +98,10 @@ unsigned long get_wchan (struct task_struct *p);
98 98
99 99
100/* Return some info about the user process TASK. */ 100/* Return some info about the user process TASK. */
101#define task_tos(task) ((unsigned long)(task)->thread_info + THREAD_SIZE) 101#define task_tos(task) ((unsigned long)task_stack_page(task) + THREAD_SIZE)
102#define task_regs(task) ((struct pt_regs *)task_tos (task) - 1) 102#define task_pt_regs(task) ((struct pt_regs *)task_tos (task) - 1)
103#define task_sp(task) (task_regs (task)->gpr[GPR_SP]) 103#define task_sp(task) (task_pt_regs (task)->gpr[GPR_SP])
104#define task_pc(task) (task_regs (task)->pc) 104#define task_pc(task) (task_pt_regs (task)->pc)
105/* Grotty old names for some. */ 105/* Grotty old names for some. */
106#define KSTK_EIP(task) task_pc (task) 106#define KSTK_EIP(task) task_pc (task)
107#define KSTK_ESP(task) task_sp (task) 107#define KSTK_ESP(task) task_sp (task)
diff --git a/include/asm-v850/thread_info.h b/include/asm-v850/thread_info.h
index e4cfad94a553..82b8f2846207 100644
--- a/include/asm-v850/thread_info.h
+++ b/include/asm-v850/thread_info.h
@@ -58,8 +58,6 @@ struct thread_info {
58#define alloc_thread_info(tsk) ((struct thread_info *) \ 58#define alloc_thread_info(tsk) ((struct thread_info *) \
59 __get_free_pages(GFP_KERNEL, 1)) 59 __get_free_pages(GFP_KERNEL, 1))
60#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 60#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
61#define get_thread_info(ti) get_task_struct((ti)->task)
62#define put_thread_info(ti) put_task_struct((ti)->task)
63 61
64#endif /* __ASSEMBLY__ */ 62#endif /* __ASSEMBLY__ */
65 63
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
index 3863a7da372b..b37ab8218ef0 100644
--- a/include/asm-x86_64/compat.h
+++ b/include/asm-x86_64/compat.h
@@ -198,7 +198,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
198 198
199static __inline__ void __user *compat_alloc_user_space(long len) 199static __inline__ void __user *compat_alloc_user_space(long len)
200{ 200{
201 struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs); 201 struct pt_regs *regs = task_pt_regs(current);
202 return (void __user *)regs->rsp - len; 202 return (void __user *)regs->rsp - len;
203} 203}
204 204
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index 57f7e1433849..876eb9a2fe78 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -30,7 +30,7 @@ extern int save_i387(struct _fpstate __user *buf);
30 */ 30 */
31 31
32#define unlazy_fpu(tsk) do { \ 32#define unlazy_fpu(tsk) do { \
33 if ((tsk)->thread_info->status & TS_USEDFPU) \ 33 if (task_thread_info(tsk)->status & TS_USEDFPU) \
34 save_init_fpu(tsk); \ 34 save_init_fpu(tsk); \
35} while (0) 35} while (0)
36 36
@@ -46,9 +46,9 @@ static inline void tolerant_fwait(void)
46} 46}
47 47
48#define clear_fpu(tsk) do { \ 48#define clear_fpu(tsk) do { \
49 if ((tsk)->thread_info->status & TS_USEDFPU) { \ 49 if (task_thread_info(tsk)->status & TS_USEDFPU) { \
50 tolerant_fwait(); \ 50 tolerant_fwait(); \
51 (tsk)->thread_info->status &= ~TS_USEDFPU; \ 51 task_thread_info(tsk)->status &= ~TS_USEDFPU; \
52 stts(); \ 52 stts(); \
53 } \ 53 } \
54} while (0) 54} while (0)
@@ -170,10 +170,10 @@ static inline void kernel_fpu_end(void)
170 preempt_enable(); 170 preempt_enable();
171} 171}
172 172
173static inline void save_init_fpu( struct task_struct *tsk ) 173static inline void save_init_fpu(struct task_struct *tsk)
174{ 174{
175 __fxsave_clear(tsk); 175 __fxsave_clear(tsk);
176 tsk->thread_info->status &= ~TS_USEDFPU; 176 task_thread_info(tsk)->status &= ~TS_USEDFPU;
177 stts(); 177 stts();
178} 178}
179 179
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 394dd729752d..87a282b1043a 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -321,8 +321,8 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
321#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8)) 321#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
322 322
323extern unsigned long get_wchan(struct task_struct *p); 323extern unsigned long get_wchan(struct task_struct *p);
324#define KSTK_EIP(tsk) \ 324#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
325 (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip) 325#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
326#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ 326#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
327 327
328 328
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 38c1e8a69c9c..0eacbefb7dd0 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -193,6 +193,15 @@ static inline void write_cr4(unsigned long val)
193#define wbinvd() \ 193#define wbinvd() \
194 __asm__ __volatile__ ("wbinvd": : :"memory"); 194 __asm__ __volatile__ ("wbinvd": : :"memory");
195 195
196/*
197 * On SMP systems, when the scheduler does migration-cost autodetection,
198 * it needs a way to flush as much of the CPU's caches as possible.
199 */
200static inline void sched_cacheflush(void)
201{
202 wbinvd();
203}
204
196#endif /* __KERNEL__ */ 205#endif /* __KERNEL__ */
197 206
198#define nop() __asm__ __volatile__ ("nop") 207#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index eb7c5fda1870..4ac0e0a36934 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -76,8 +76,6 @@ static inline struct thread_info *stack_thread_info(void)
76#define alloc_thread_info(tsk) \ 76#define alloc_thread_info(tsk) \
77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER)) 77 ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER) 78#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
79#define get_thread_info(ti) get_task_struct((ti)->task)
80#define put_thread_info(ti) put_task_struct((ti)->task)
81 79
82#else /* !__ASSEMBLY__ */ 80#else /* !__ASSEMBLY__ */
83 81
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 7d82bc56b9fa..2fa7f27381b4 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -39,7 +39,6 @@ extern int __node_distance(int, int);
39 .max_interval = 32, \ 39 .max_interval = 32, \
40 .busy_factor = 32, \ 40 .busy_factor = 32, \
41 .imbalance_pct = 125, \ 41 .imbalance_pct = 125, \
42 .cache_hot_time = (10*1000000), \
43 .cache_nice_tries = 2, \ 42 .cache_nice_tries = 2, \
44 .busy_idx = 3, \ 43 .busy_idx = 3, \
45 .idle_idx = 2, \ 44 .idle_idx = 2, \
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
index 9cab5e4298b9..d1d72ad36f08 100644
--- a/include/asm-xtensa/processor.h
+++ b/include/asm-xtensa/processor.h
@@ -184,12 +184,12 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
184#define release_segments(mm) do { } while(0) 184#define release_segments(mm) do { } while(0)
185#define forget_segments() do { } while (0) 185#define forget_segments() do { } while (0)
186 186
187#define thread_saved_pc(tsk) (xtensa_pt_regs(tsk)->pc) 187#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
188 188
189extern unsigned long get_wchan(struct task_struct *p); 189extern unsigned long get_wchan(struct task_struct *p);
190 190
191#define KSTK_EIP(tsk) (xtensa_pt_regs(tsk)->pc) 191#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
192#define KSTK_ESP(tsk) (xtensa_pt_regs(tsk)->areg[1]) 192#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
193 193
194#define cpu_relax() do { } while (0) 194#define cpu_relax() do { } while (0)
195 195
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
index aa4fd7fb3ce7..a5ac71a5205c 100644
--- a/include/asm-xtensa/ptrace.h
+++ b/include/asm-xtensa/ptrace.h
@@ -113,8 +113,8 @@ struct pt_regs {
113}; 113};
114 114
115#ifdef __KERNEL__ 115#ifdef __KERNEL__
116# define xtensa_pt_regs(tsk) ((struct pt_regs*) \ 116# define task_pt_regs(tsk) ((struct pt_regs*) \
117 (((long)(tsk)->thread_info + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4)) - 1) 117 (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
118# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) 118# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
119# define instruction_pointer(regs) ((regs)->pc) 119# define instruction_pointer(regs) ((regs)->pc)
120extern void show_regs(struct pt_regs *); 120extern void show_regs(struct pt_regs *);
diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h
index af208d41fd82..5ae34ab71597 100644
--- a/include/asm-xtensa/thread_info.h
+++ b/include/asm-xtensa/thread_info.h
@@ -93,8 +93,6 @@ static inline struct thread_info *current_thread_info(void)
93/* thread information allocation */ 93/* thread information allocation */
94#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 94#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
95#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 95#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
96#define get_thread_info(ti) get_task_struct((ti)->task)
97#define put_thread_info(ti) put_task_struct((ti)->task)
98 96
99#else /* !__ASSEMBLY__ */ 97#else /* !__ASSEMBLY__ */
100 98
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3b74c4bf2934..a72e17135421 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -631,7 +631,14 @@ struct sched_domain {
631 631
632extern void partition_sched_domains(cpumask_t *partition1, 632extern void partition_sched_domains(cpumask_t *partition1,
633 cpumask_t *partition2); 633 cpumask_t *partition2);
634#endif /* CONFIG_SMP */ 634
635/*
636 * Maximum cache size the migration-costs auto-tuning code will
637 * search from:
638 */
639extern unsigned int max_cache_size;
640
641#endif /* CONFIG_SMP */
635 642
636 643
637struct io_context; /* See blkdev.h */ 644struct io_context; /* See blkdev.h */
@@ -689,9 +696,12 @@ struct task_struct {
689 696
690 int lock_depth; /* BKL lock depth */ 697 int lock_depth; /* BKL lock depth */
691 698
692#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 699#if defined(CONFIG_SMP)
700 int last_waker_cpu; /* CPU that last woke this task up */
701#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
693 int oncpu; 702 int oncpu;
694#endif 703#endif
704#endif
695 int prio, static_prio; 705 int prio, static_prio;
696 struct list_head run_list; 706 struct list_head run_list;
697 prio_array_t *array; 707 prio_array_t *array;
@@ -1230,6 +1240,7 @@ static inline void task_unlock(struct task_struct *p)
1230#ifndef __HAVE_THREAD_FUNCTIONS 1240#ifndef __HAVE_THREAD_FUNCTIONS
1231 1241
1232#define task_thread_info(task) (task)->thread_info 1242#define task_thread_info(task) (task)->thread_info
1243#define task_stack_page(task) ((void*)((task)->thread_info))
1233 1244
1234static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) 1245static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1235{ 1246{
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 3df1d474e5c5..315a5163d6a0 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -86,7 +86,6 @@
86 .max_interval = 2, \ 86 .max_interval = 2, \
87 .busy_factor = 8, \ 87 .busy_factor = 8, \
88 .imbalance_pct = 110, \ 88 .imbalance_pct = 110, \
89 .cache_hot_time = 0, \
90 .cache_nice_tries = 0, \ 89 .cache_nice_tries = 0, \
91 .per_cpu_gain = 25, \ 90 .per_cpu_gain = 25, \
92 .busy_idx = 0, \ 91 .busy_idx = 0, \
@@ -117,7 +116,6 @@
117 .max_interval = 4, \ 116 .max_interval = 4, \
118 .busy_factor = 64, \ 117 .busy_factor = 64, \
119 .imbalance_pct = 125, \ 118 .imbalance_pct = 125, \
120 .cache_hot_time = (5*1000000/2), \
121 .cache_nice_tries = 1, \ 119 .cache_nice_tries = 1, \
122 .per_cpu_gain = 100, \ 120 .per_cpu_gain = 100, \
123 .busy_idx = 2, \ 121 .busy_idx = 2, \
diff --git a/kernel/sched.c b/kernel/sched.c
index c0c60c926d5e..c9dec2aa1976 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -34,6 +34,7 @@
34#include <linux/notifier.h> 34#include <linux/notifier.h>
35#include <linux/profile.h> 35#include <linux/profile.h>
36#include <linux/suspend.h> 36#include <linux/suspend.h>
37#include <linux/vmalloc.h>
37#include <linux/blkdev.h> 38#include <linux/blkdev.h>
38#include <linux/delay.h> 39#include <linux/delay.h>
39#include <linux/smp.h> 40#include <linux/smp.h>
@@ -1289,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
1289 } 1290 }
1290 } 1291 }
1291 1292
1293 if (p->last_waker_cpu != this_cpu)
1294 goto out_set_cpu;
1295
1292 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1296 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1293 goto out_set_cpu; 1297 goto out_set_cpu;
1294 1298
@@ -1359,6 +1363,8 @@ out_set_cpu:
1359 cpu = task_cpu(p); 1363 cpu = task_cpu(p);
1360 } 1364 }
1361 1365
1366 p->last_waker_cpu = this_cpu;
1367
1362out_activate: 1368out_activate:
1363#endif /* CONFIG_SMP */ 1369#endif /* CONFIG_SMP */
1364 if (old_state == TASK_UNINTERRUPTIBLE) { 1370 if (old_state == TASK_UNINTERRUPTIBLE) {
@@ -1440,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
1440#ifdef CONFIG_SCHEDSTATS 1446#ifdef CONFIG_SCHEDSTATS
1441 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1447 memset(&p->sched_info, 0, sizeof(p->sched_info));
1442#endif 1448#endif
1443#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 1449#if defined(CONFIG_SMP)
1450 p->last_waker_cpu = cpu;
1451#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
1444 p->oncpu = 0; 1452 p->oncpu = 0;
1445#endif 1453#endif
1454#endif
1446#ifdef CONFIG_PREEMPT 1455#ifdef CONFIG_PREEMPT
1447 /* Want to start with kernel preemption disabled. */ 1456 /* Want to start with kernel preemption disabled. */
1448 task_thread_info(p)->preempt_count = 1; 1457 task_thread_info(p)->preempt_count = 1;
@@ -5082,7 +5091,470 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5082 5091
5083#define SD_NODES_PER_DOMAIN 16 5092#define SD_NODES_PER_DOMAIN 16
5084 5093
5094/*
5095 * Self-tuning task migration cost measurement between source and target CPUs.
5096 *
5097 * This is done by measuring the cost of manipulating buffers of varying
5098 * sizes. For a given buffer-size here are the steps that are taken:
5099 *
5100 * 1) the source CPU reads+dirties a shared buffer
5101 * 2) the target CPU reads+dirties the same shared buffer
5102 *
5103 * We measure how long they take, in the following 4 scenarios:
5104 *
5105 * - source: CPU1, target: CPU2 | cost1
5106 * - source: CPU2, target: CPU1 | cost2
5107 * - source: CPU1, target: CPU1 | cost3
5108 * - source: CPU2, target: CPU2 | cost4
5109 *
5110 * We then calculate the cost3+cost4-cost1-cost2 difference - this is
5111 * the cost of migration.
5112 *
5113 * We then start off from a small buffer-size and iterate up to larger
5114 * buffer sizes, in 5% steps - measuring each buffer-size separately, and
5115 * doing a maximum search for the cost. (The maximum cost for a migration
5116 * normally occurs when the working set size is around the effective cache
5117 * size.)
5118 */
5119#define SEARCH_SCOPE 2
5120#define MIN_CACHE_SIZE (64*1024U)
5121#define DEFAULT_CACHE_SIZE (5*1024*1024U)
5122#define ITERATIONS 2
5123#define SIZE_THRESH 130
5124#define COST_THRESH 130
5125
5126/*
5127 * The migration cost is a function of 'domain distance'. Domain
5128 * distance is the number of steps a CPU has to iterate down its
5129 * domain tree to share a domain with the other CPU. The farther
5130 * two CPUs are from each other, the larger the distance gets.
5131 *
5132 * Note that we use the distance only to cache measurement results,
5133 * the distance value is not used numerically otherwise. When two
5134 * CPUs have the same distance it is assumed that the migration
5135 * cost is the same. (this is a simplification but quite practical)
5136 */
5137#define MAX_DOMAIN_DISTANCE 32
5138
5139static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
5140 { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL };
5141
5142/*
5143 * Allow override of migration cost - in units of microseconds.
5144 * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
5145 * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
5146 */
5147static int __init migration_cost_setup(char *str)
5148{
5149 int ints[MAX_DOMAIN_DISTANCE+1], i;
5150
5151 str = get_options(str, ARRAY_SIZE(ints), ints);
5152
5153 printk("#ints: %d\n", ints[0]);
5154 for (i = 1; i <= ints[0]; i++) {
5155 migration_cost[i-1] = (unsigned long long)ints[i]*1000;
5156 printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
5157 }
5158 return 1;
5159}
5160
5161__setup ("migration_cost=", migration_cost_setup);
5162
5163/*
5164 * Global multiplier (divisor) for migration-cutoff values,
5165 * in percentiles. E.g. use a value of 150 to get 1.5 times
5166 * longer cache-hot cutoff times.
5167 *
5168 * (We scale it from 100 to 128 to long long handling easier.)
5169 */
5170
5171#define MIGRATION_FACTOR_SCALE 128
5172
5173static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
5174
5175static int __init setup_migration_factor(char *str)
5176{
5177 get_option(&str, &migration_factor);
5178 migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
5179 return 1;
5180}
5181
5182__setup("migration_factor=", setup_migration_factor);
5183
5184/*
5185 * Estimated distance of two CPUs, measured via the number of domains
5186 * we have to pass for the two CPUs to be in the same span:
5187 */
5188static unsigned long domain_distance(int cpu1, int cpu2)
5189{
5190 unsigned long distance = 0;
5191 struct sched_domain *sd;
5192
5193 for_each_domain(cpu1, sd) {
5194 WARN_ON(!cpu_isset(cpu1, sd->span));
5195 if (cpu_isset(cpu2, sd->span))
5196 return distance;
5197 distance++;
5198 }
5199 if (distance >= MAX_DOMAIN_DISTANCE) {
5200 WARN_ON(1);
5201 distance = MAX_DOMAIN_DISTANCE-1;
5202 }
5203
5204 return distance;
5205}
5206
5207static unsigned int migration_debug;
5208
5209static int __init setup_migration_debug(char *str)
5210{
5211 get_option(&str, &migration_debug);
5212 return 1;
5213}
5214
5215__setup("migration_debug=", setup_migration_debug);
5216
5217/*
5218 * Maximum cache-size that the scheduler should try to measure.
5219 * Architectures with larger caches should tune this up during
5220 * bootup. Gets used in the domain-setup code (i.e. during SMP
5221 * bootup).
5222 */
5223unsigned int max_cache_size;
5224
5225static int __init setup_max_cache_size(char *str)
5226{
5227 get_option(&str, &max_cache_size);
5228 return 1;
5229}
5230
5231__setup("max_cache_size=", setup_max_cache_size);
5232
5233/*
5234 * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
5235 * is the operation that is timed, so we try to generate unpredictable
5236 * cachemisses that still end up filling the L2 cache:
5237 */
5238static void touch_cache(void *__cache, unsigned long __size)
5239{
5240 unsigned long size = __size/sizeof(long), chunk1 = size/3,
5241 chunk2 = 2*size/3;
5242 unsigned long *cache = __cache;
5243 int i;
5244
5245 for (i = 0; i < size/6; i += 8) {
5246 switch (i % 6) {
5247 case 0: cache[i]++;
5248 case 1: cache[size-1-i]++;
5249 case 2: cache[chunk1-i]++;
5250 case 3: cache[chunk1+i]++;
5251 case 4: cache[chunk2-i]++;
5252 case 5: cache[chunk2+i]++;
5253 }
5254 }
5255}
5256
5257/*
5258 * Measure the cache-cost of one task migration. Returns in units of nsec.
5259 */
5260static unsigned long long measure_one(void *cache, unsigned long size,
5261 int source, int target)
5262{
5263 cpumask_t mask, saved_mask;
5264 unsigned long long t0, t1, t2, t3, cost;
5265
5266 saved_mask = current->cpus_allowed;
5267
5268 /*
5269 * Flush source caches to RAM and invalidate them:
5270 */
5271 sched_cacheflush();
5272
5273 /*
5274 * Migrate to the source CPU:
5275 */
5276 mask = cpumask_of_cpu(source);
5277 set_cpus_allowed(current, mask);
5278 WARN_ON(smp_processor_id() != source);
5279
5280 /*
5281 * Dirty the working set:
5282 */
5283 t0 = sched_clock();
5284 touch_cache(cache, size);
5285 t1 = sched_clock();
5286
5287 /*
5288 * Migrate to the target CPU, dirty the L2 cache and access
5289 * the shared buffer. (which represents the working set
5290 * of a migrated task.)
5291 */
5292 mask = cpumask_of_cpu(target);
5293 set_cpus_allowed(current, mask);
5294 WARN_ON(smp_processor_id() != target);
5295
5296 t2 = sched_clock();
5297 touch_cache(cache, size);
5298 t3 = sched_clock();
5299
5300 cost = t1-t0 + t3-t2;
5301
5302 if (migration_debug >= 2)
5303 printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
5304 source, target, t1-t0, t1-t0, t3-t2, cost);
5305 /*
5306 * Flush target caches to RAM and invalidate them:
5307 */
5308 sched_cacheflush();
5309
5310 set_cpus_allowed(current, saved_mask);
5311
5312 return cost;
5313}
5314
5315/*
5316 * Measure a series of task migrations and return the average
5317 * result. Since this code runs early during bootup the system
5318 * is 'undisturbed' and the average latency makes sense.
5319 *
5320 * The algorithm in essence auto-detects the relevant cache-size,
5321 * so it will properly detect different cachesizes for different
5322 * cache-hierarchies, depending on how the CPUs are connected.
5323 *
5324 * Architectures can prime the upper limit of the search range via
5325 * max_cache_size, otherwise the search range defaults to 20MB...64K.
5326 */
5327static unsigned long long
5328measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
5329{
5330 unsigned long long cost1, cost2;
5331 int i;
5332
5333 /*
5334 * Measure the migration cost of 'size' bytes, over an
5335 * average of 10 runs:
5336 *
5337 * (We perturb the cache size by a small (0..4k)
5338 * value to compensate size/alignment related artifacts.
5339 * We also subtract the cost of the operation done on
5340 * the same CPU.)
5341 */
5342 cost1 = 0;
5343
5344 /*
5345 * dry run, to make sure we start off cache-cold on cpu1,
5346 * and to get any vmalloc pagefaults in advance:
5347 */
5348 measure_one(cache, size, cpu1, cpu2);
5349 for (i = 0; i < ITERATIONS; i++)
5350 cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
5351
5352 measure_one(cache, size, cpu2, cpu1);
5353 for (i = 0; i < ITERATIONS; i++)
5354 cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
5355
5356 /*
5357 * (We measure the non-migrating [cached] cost on both
5358 * cpu1 and cpu2, to handle CPUs with different speeds)
5359 */
5360 cost2 = 0;
5361
5362 measure_one(cache, size, cpu1, cpu1);
5363 for (i = 0; i < ITERATIONS; i++)
5364 cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
5365
5366 measure_one(cache, size, cpu2, cpu2);
5367 for (i = 0; i < ITERATIONS; i++)
5368 cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
5369
5370 /*
5371 * Get the per-iteration migration cost:
5372 */
5373 do_div(cost1, 2*ITERATIONS);
5374 do_div(cost2, 2*ITERATIONS);
5375
5376 return cost1 - cost2;
5377}
5378
5379static unsigned long long measure_migration_cost(int cpu1, int cpu2)
5380{
5381 unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
5382 unsigned int max_size, size, size_found = 0;
5383 long long cost = 0, prev_cost;
5384 void *cache;
5385
5386 /*
5387 * Search from max_cache_size*5 down to 64K - the real relevant
5388 * cachesize has to lie somewhere inbetween.
5389 */
5390 if (max_cache_size) {
5391 max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
5392 size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
5393 } else {
5394 /*
5395 * Since we have no estimation about the relevant
5396 * search range
5397 */
5398 max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
5399 size = MIN_CACHE_SIZE;
5400 }
5401
5402 if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
5403 printk("cpu %d and %d not both online!\n", cpu1, cpu2);
5404 return 0;
5405 }
5406
5407 /*
5408 * Allocate the working set:
5409 */
5410 cache = vmalloc(max_size);
5411 if (!cache) {
5412 printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
5413 return 1000000; // return 1 msec on very small boxen
5414 }
5415
5416 while (size <= max_size) {
5417 prev_cost = cost;
5418 cost = measure_cost(cpu1, cpu2, cache, size);
5419
5420 /*
5421 * Update the max:
5422 */
5423 if (cost > 0) {
5424 if (max_cost < cost) {
5425 max_cost = cost;
5426 size_found = size;
5427 }
5428 }
5429 /*
5430 * Calculate average fluctuation, we use this to prevent
5431 * noise from triggering an early break out of the loop:
5432 */
5433 fluct = abs(cost - prev_cost);
5434 avg_fluct = (avg_fluct + fluct)/2;
5435
5436 if (migration_debug)
5437 printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
5438 cpu1, cpu2, size,
5439 (long)cost / 1000000,
5440 ((long)cost / 100000) % 10,
5441 (long)max_cost / 1000000,
5442 ((long)max_cost / 100000) % 10,
5443 domain_distance(cpu1, cpu2),
5444 cost, avg_fluct);
5445
5446 /*
5447 * If we iterated at least 20% past the previous maximum,
5448 * and the cost has dropped by more than 20% already,
5449 * (taking fluctuations into account) then we assume to
5450 * have found the maximum and break out of the loop early:
5451 */
5452 if (size_found && (size*100 > size_found*SIZE_THRESH))
5453 if (cost+avg_fluct <= 0 ||
5454 max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
5455
5456 if (migration_debug)
5457 printk("-> found max.\n");
5458 break;
5459 }
5460 /*
5461 * Increase the cachesize in 5% steps:
5462 */
5463 size = size * 20 / 19;
5464 }
5465
5466 if (migration_debug)
5467 printk("[%d][%d] working set size found: %d, cost: %Ld\n",
5468 cpu1, cpu2, size_found, max_cost);
5469
5470 vfree(cache);
5471
5472 /*
5473 * A task is considered 'cache cold' if at least 2 times
5474 * the worst-case cost of migration has passed.
5475 *
5476 * (this limit is only listened to if the load-balancing
5477 * situation is 'nice' - if there is a large imbalance we
5478 * ignore it for the sake of CPU utilization and
5479 * processing fairness.)
5480 */
5481 return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
5482}
5483
5484static void calibrate_migration_costs(const cpumask_t *cpu_map)
5485{
5486 int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
5487 unsigned long j0, j1, distance, max_distance = 0;
5488 struct sched_domain *sd;
5489
5490 j0 = jiffies;
5491
5492 /*
5493 * First pass - calculate the cacheflush times:
5494 */
5495 for_each_cpu_mask(cpu1, *cpu_map) {
5496 for_each_cpu_mask(cpu2, *cpu_map) {
5497 if (cpu1 == cpu2)
5498 continue;
5499 distance = domain_distance(cpu1, cpu2);
5500 max_distance = max(max_distance, distance);
5501 /*
5502 * No result cached yet?
5503 */
5504 if (migration_cost[distance] == -1LL)
5505 migration_cost[distance] =
5506 measure_migration_cost(cpu1, cpu2);
5507 }
5508 }
5509 /*
5510 * Second pass - update the sched domain hierarchy with
5511 * the new cache-hot-time estimations:
5512 */
5513 for_each_cpu_mask(cpu, *cpu_map) {
5514 distance = 0;
5515 for_each_domain(cpu, sd) {
5516 sd->cache_hot_time = migration_cost[distance];
5517 distance++;
5518 }
5519 }
5520 /*
5521 * Print the matrix:
5522 */
5523 if (migration_debug)
5524 printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
5525 max_cache_size,
5526#ifdef CONFIG_X86
5527 cpu_khz/1000
5528#else
5529 -1
5530#endif
5531 );
5532 printk("migration_cost=");
5533 for (distance = 0; distance <= max_distance; distance++) {
5534 if (distance)
5535 printk(",");
5536 printk("%ld", (long)migration_cost[distance] / 1000);
5537 }
5538 printk("\n");
5539 j1 = jiffies;
5540 if (migration_debug)
5541 printk("migration: %ld seconds\n", (j1-j0)/HZ);
5542
5543 /*
5544 * Move back to the original CPU. NUMA-Q gets confused
5545 * if we migrate to another quad during bootup.
5546 */
5547 if (raw_smp_processor_id() != orig_cpu) {
5548 cpumask_t mask = cpumask_of_cpu(orig_cpu),
5549 saved_mask = current->cpus_allowed;
5550
5551 set_cpus_allowed(current, mask);
5552 set_cpus_allowed(current, saved_mask);
5553 }
5554}
5555
5085#ifdef CONFIG_NUMA 5556#ifdef CONFIG_NUMA
5557
5086/** 5558/**
5087 * find_next_best_node - find the next node to include in a sched_domain 5559 * find_next_best_node - find the next node to include in a sched_domain
5088 * @node: node whose sched_domain we're building 5560 * @node: node whose sched_domain we're building
@@ -5448,6 +5920,10 @@ next_sg:
5448#endif 5920#endif
5449 cpu_attach_domain(sd, i); 5921 cpu_attach_domain(sd, i);
5450 } 5922 }
5923 /*
5924 * Tune cache-hot values:
5925 */
5926 calibrate_migration_costs(cpu_map);
5451} 5927}
5452/* 5928/*
5453 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 5929 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1850d0aef4ac..b62cab575a84 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -208,6 +208,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
208 page = vm_normal_page(vma, addr, *pte); 208 page = vm_normal_page(vma, addr, *pte);
209 if (!page) 209 if (!page)
210 continue; 210 continue;
211 if (PageReserved(page))
212 continue;
211 nid = page_to_nid(page); 213 nid = page_to_nid(page);
212 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 214 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
213 continue; 215 continue;
@@ -290,7 +292,7 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
290static inline int vma_migratable(struct vm_area_struct *vma) 292static inline int vma_migratable(struct vm_area_struct *vma)
291{ 293{
292 if (vma->vm_flags & ( 294 if (vma->vm_flags & (
293 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP)) 295 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
294 return 0; 296 return 0;
295 return 1; 297 return 1;
296} 298}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d41a0662d4da..8c960b469593 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1742,7 +1742,7 @@ void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1742 unsigned long end_pfn = start_pfn + size; 1742 unsigned long end_pfn = start_pfn + size;
1743 unsigned long pfn; 1743 unsigned long pfn;
1744 1744
1745 for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) { 1745 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1746 if (!early_pfn_valid(pfn)) 1746 if (!early_pfn_valid(pfn))
1747 continue; 1747 continue;
1748 page = pfn_to_page(pfn); 1748 page = pfn_to_page(pfn);
diff --git a/mm/swap.c b/mm/swap.c
index ee6d71ccfa56..cbb48e721ab9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -384,6 +384,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
384 return pagevec_count(pvec); 384 return pagevec_count(pvec);
385} 385}
386 386
387EXPORT_SYMBOL(pagevec_lookup);
388
387unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 389unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
388 pgoff_t *index, int tag, unsigned nr_pages) 390 pgoff_t *index, int tag, unsigned nr_pages)
389{ 391{
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index cdc6d431972b..f9d6a9cc91c4 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -90,7 +90,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
90 file->f_mode = FMODE_WRITE | FMODE_READ; 90 file->f_mode = FMODE_WRITE | FMODE_READ;
91 91
92 /* notify everyone as to the change of file size */ 92 /* notify everyone as to the change of file size */
93 error = do_truncate(dentry, size, file); 93 error = do_truncate(dentry, size, 0, file);
94 if (error < 0) 94 if (error < 0)
95 goto close_file; 95 goto close_file;
96 96
diff --git a/sound/oss/dmasound/dmasound.h b/sound/oss/dmasound/dmasound.h
index 222014cafc1a..a1b0b92af4b5 100644
--- a/sound/oss/dmasound/dmasound.h
+++ b/sound/oss/dmasound/dmasound.h
@@ -270,7 +270,6 @@ extern int dmasound_catchRadius;
270#define SW_INPUT_VOLUME_SCALE 4 270#define SW_INPUT_VOLUME_SCALE 4
271#define SW_INPUT_VOLUME_DEFAULT (128 / SW_INPUT_VOLUME_SCALE) 271#define SW_INPUT_VOLUME_DEFAULT (128 / SW_INPUT_VOLUME_SCALE)
272 272
273extern int expand_bal; /* Balance factor for expanding (not volume!) */
274extern int expand_read_bal; /* Balance factor for reading */ 273extern int expand_read_bal; /* Balance factor for reading */
275extern uint software_input_volume; /* software implemented recording volume! */ 274extern uint software_input_volume; /* software implemented recording volume! */
276 275
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 59eb53f89318..dc31373069a5 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -67,46 +67,46 @@ static int expand_data; /* Data for expanding */
67 * ++geert: split in even more functions (one per format) 67 * ++geert: split in even more functions (one per format)
68 */ 68 */
69 69
70static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount, 70static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
71 u_char frame[], ssize_t *frameUsed, 71 u_char frame[], ssize_t *frameUsed,
72 ssize_t frameLeft); 72 ssize_t frameLeft);
73static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount, 73static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
74 u_char frame[], ssize_t *frameUsed, 74 u_char frame[], ssize_t *frameUsed,
75 ssize_t frameLeft); 75 ssize_t frameLeft);
76static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount, 76static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
77 u_char frame[], ssize_t *frameUsed, 77 u_char frame[], ssize_t *frameUsed,
78 ssize_t frameLeft); 78 ssize_t frameLeft);
79static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount, 79static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
80 u_char frame[], ssize_t *frameUsed, 80 u_char frame[], ssize_t *frameUsed,
81 ssize_t frameLeft); 81 ssize_t frameLeft);
82static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount, 82static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
83 u_char frame[], ssize_t *frameUsed, 83 u_char frame[], ssize_t *frameUsed,
84 ssize_t frameLeft); 84 ssize_t frameLeft);
85static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount, 85static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
86 u_char frame[], ssize_t *frameUsed, 86 u_char frame[], ssize_t *frameUsed,
87 ssize_t frameLeft); 87 ssize_t frameLeft);
88static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount, 88static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
89 u_char frame[], ssize_t *frameUsed, 89 u_char frame[], ssize_t *frameUsed,
90 ssize_t frameLeft); 90 ssize_t frameLeft);
91static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount, 91static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
92 u_char frame[], ssize_t *frameUsed, 92 u_char frame[], ssize_t *frameUsed,
93 ssize_t frameLeft); 93 ssize_t frameLeft);
94static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount, 94static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
95 u_char frame[], ssize_t *frameUsed, 95 u_char frame[], ssize_t *frameUsed,
96 ssize_t frameLeft); 96 ssize_t frameLeft);
97static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount, 97static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
98 u_char frame[], ssize_t *frameUsed, 98 u_char frame[], ssize_t *frameUsed,
99 ssize_t frameLeft); 99 ssize_t frameLeft);
100static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount, 100static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
101 u_char frame[], ssize_t *frameUsed, 101 u_char frame[], ssize_t *frameUsed,
102 ssize_t frameLeft); 102 ssize_t frameLeft);
103static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount, 103static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
104 u_char frame[], ssize_t *frameUsed, 104 u_char frame[], ssize_t *frameUsed,
105 ssize_t frameLeft); 105 ssize_t frameLeft);
106static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount, 106static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
107 u_char frame[], ssize_t *frameUsed, 107 u_char frame[], ssize_t *frameUsed,
108 ssize_t frameLeft); 108 ssize_t frameLeft);
109static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount, 109static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
110 u_char frame[], ssize_t *frameUsed, 110 u_char frame[], ssize_t *frameUsed,
111 ssize_t frameLeft); 111 ssize_t frameLeft);
112 112
@@ -151,7 +151,7 @@ static int FalconStateInfo(char *buffer, size_t space);
151/*** Translations ************************************************************/ 151/*** Translations ************************************************************/
152 152
153 153
154static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount, 154static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
155 u_char frame[], ssize_t *frameUsed, 155 u_char frame[], ssize_t *frameUsed,
156 ssize_t frameLeft) 156 ssize_t frameLeft)
157{ 157{
@@ -176,7 +176,7 @@ static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount,
176} 176}
177 177
178 178
179static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount, 179static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
180 u_char frame[], ssize_t *frameUsed, 180 u_char frame[], ssize_t *frameUsed,
181 ssize_t frameLeft) 181 ssize_t frameLeft)
182{ 182{
@@ -194,7 +194,7 @@ static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount,
194} 194}
195 195
196 196
197static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount, 197static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
198 u_char frame[], ssize_t *frameUsed, 198 u_char frame[], ssize_t *frameUsed,
199 ssize_t frameLeft) 199 ssize_t frameLeft)
200{ 200{
@@ -217,8 +217,9 @@ static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
217 used = count*2; 217 used = count*2;
218 while (count > 0) { 218 while (count > 0) {
219 u_short data; 219 u_short data;
220 if (get_user(data, ((u_short *)userPtr)++)) 220 if (get_user(data, (u_short __user *)userPtr))
221 return -EFAULT; 221 return -EFAULT;
222 userPtr += 2;
222 *p++ = data ^ 0x8080; 223 *p++ = data ^ 0x8080;
223 count--; 224 count--;
224 } 225 }
@@ -228,7 +229,7 @@ static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
228} 229}
229 230
230 231
231static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount, 232static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
232 u_char frame[], ssize_t *frameUsed, 233 u_char frame[], ssize_t *frameUsed,
233 ssize_t frameLeft) 234 ssize_t frameLeft)
234{ 235{
@@ -240,8 +241,9 @@ static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
240 used = count*2; 241 used = count*2;
241 while (count > 0) { 242 while (count > 0) {
242 u_short data; 243 u_short data;
243 if (get_user(data, ((u_short *)userPtr)++)) 244 if (get_user(data, (u_short __user *)userPtr))
244 return -EFAULT; 245 return -EFAULT;
246 userPtr += 2;
245 *p++ = data; 247 *p++ = data;
246 *p++ = data; 248 *p++ = data;
247 count--; 249 count--;
@@ -259,7 +261,7 @@ static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
259} 261}
260 262
261 263
262static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount, 264static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
263 u_char frame[], ssize_t *frameUsed, 265 u_char frame[], ssize_t *frameUsed,
264 ssize_t frameLeft) 266 ssize_t frameLeft)
265{ 267{
@@ -271,8 +273,9 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
271 used = count*2; 273 used = count*2;
272 while (count > 0) { 274 while (count > 0) {
273 u_short data; 275 u_short data;
274 if (get_user(data, ((u_short *)userPtr)++)) 276 if (get_user(data, (u_short __user *)userPtr))
275 return -EFAULT; 277 return -EFAULT;
278 userPtr += 2;
276 data ^= 0x8000; 279 data ^= 0x8000;
277 *p++ = data; 280 *p++ = data;
278 *p++ = data; 281 *p++ = data;
@@ -284,9 +287,10 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
284 count = min_t(unsigned long, userCount, frameLeft)>>2; 287 count = min_t(unsigned long, userCount, frameLeft)>>2;
285 used = count*4; 288 used = count*4;
286 while (count > 0) { 289 while (count > 0) {
287 u_long data; 290 u_int data;
288 if (get_user(data, ((u_int *)userPtr)++)) 291 if (get_user(data, (u_int __user *)userPtr))
289 return -EFAULT; 292 return -EFAULT;
293 userPtr += 4;
290 *p++ = data ^ 0x80008000; 294 *p++ = data ^ 0x80008000;
291 count--; 295 count--;
292 } 296 }
@@ -296,7 +300,7 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
296} 300}
297 301
298 302
299static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount, 303static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
300 u_char frame[], ssize_t *frameUsed, 304 u_char frame[], ssize_t *frameUsed,
301 ssize_t frameLeft) 305 ssize_t frameLeft)
302{ 306{
@@ -309,8 +313,9 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
309 used = count*2; 313 used = count*2;
310 while (count > 0) { 314 while (count > 0) {
311 u_short data; 315 u_short data;
312 if (get_user(data, ((u_short *)userPtr)++)) 316 if (get_user(data, (u_short __user *)userPtr))
313 return -EFAULT; 317 return -EFAULT;
318 userPtr += 2;
314 data = le2be16(data); 319 data = le2be16(data);
315 *p++ = data; 320 *p++ = data;
316 *p++ = data; 321 *p++ = data;
@@ -323,8 +328,9 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
323 used = count*4; 328 used = count*4;
324 while (count > 0) { 329 while (count > 0) {
325 u_long data; 330 u_long data;
326 if (get_user(data, ((u_int *)userPtr)++)) 331 if (get_user(data, (u_int __user *)userPtr))
327 return -EFAULT; 332 return -EFAULT;
333 userPtr += 4;
328 data = le2be16dbl(data); 334 data = le2be16dbl(data);
329 *p++ = data; 335 *p++ = data;
330 count--; 336 count--;
@@ -335,7 +341,7 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
335} 341}
336 342
337 343
338static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount, 344static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
339 u_char frame[], ssize_t *frameUsed, 345 u_char frame[], ssize_t *frameUsed,
340 ssize_t frameLeft) 346 ssize_t frameLeft)
341{ 347{
@@ -348,8 +354,9 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
348 used = count*2; 354 used = count*2;
349 while (count > 0) { 355 while (count > 0) {
350 u_short data; 356 u_short data;
351 if (get_user(data, ((u_short *)userPtr)++)) 357 if (get_user(data, (u_short __user *)userPtr))
352 return -EFAULT; 358 return -EFAULT;
359 userPtr += 2;
353 data = le2be16(data) ^ 0x8000; 360 data = le2be16(data) ^ 0x8000;
354 *p++ = data; 361 *p++ = data;
355 *p++ = data; 362 *p++ = data;
@@ -361,8 +368,9 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
361 used = count; 368 used = count;
362 while (count > 0) { 369 while (count > 0) {
363 u_long data; 370 u_long data;
364 if (get_user(data, ((u_int *)userPtr)++)) 371 if (get_user(data, (u_int __user *)userPtr))
365 return -EFAULT; 372 return -EFAULT;
373 userPtr += 4;
366 data = le2be16dbl(data) ^ 0x80008000; 374 data = le2be16dbl(data) ^ 0x80008000;
367 *p++ = data; 375 *p++ = data;
368 count--; 376 count--;
@@ -373,7 +381,7 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
373} 381}
374 382
375 383
376static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount, 384static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
377 u_char frame[], ssize_t *frameUsed, 385 u_char frame[], ssize_t *frameUsed,
378 ssize_t frameLeft) 386 ssize_t frameLeft)
379{ 387{
@@ -435,7 +443,7 @@ static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount,
435} 443}
436 444
437 445
438static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount, 446static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
439 u_char frame[], ssize_t *frameUsed, 447 u_char frame[], ssize_t *frameUsed,
440 ssize_t frameLeft) 448 ssize_t frameLeft)
441{ 449{
@@ -470,8 +478,9 @@ static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
470 if (bal < 0) { 478 if (bal < 0) {
471 if (userCount < 2) 479 if (userCount < 2)
472 break; 480 break;
473 if (get_user(data, ((u_short *)userPtr)++)) 481 if (get_user(data, (u_short __user *)userPtr))
474 return -EFAULT; 482 return -EFAULT;
483 userPtr += 2;
475 userCount -= 2; 484 userCount -= 2;
476 bal += hSpeed; 485 bal += hSpeed;
477 } 486 }
@@ -488,7 +497,7 @@ static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
488} 497}
489 498
490 499
491static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount, 500static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
492 u_char frame[], ssize_t *frameUsed, 501 u_char frame[], ssize_t *frameUsed,
493 ssize_t frameLeft) 502 ssize_t frameLeft)
494{ 503{
@@ -524,8 +533,9 @@ static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
524 if (bal < 0) { 533 if (bal < 0) {
525 if (userCount < 2) 534 if (userCount < 2)
526 break; 535 break;
527 if (get_user(data, ((u_short *)userPtr)++)) 536 if (get_user(data, (u_short __user *)userPtr))
528 return -EFAULT; 537 return -EFAULT;
538 userPtr += 2;
529 data ^= 0x8080; 539 data ^= 0x8080;
530 userCount -= 2; 540 userCount -= 2;
531 bal += hSpeed; 541 bal += hSpeed;
@@ -543,7 +553,7 @@ static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
543} 553}
544 554
545 555
546static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount, 556static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
547 u_char frame[], ssize_t *frameUsed, 557 u_char frame[], ssize_t *frameUsed,
548 ssize_t frameLeft) 558 ssize_t frameLeft)
549{ 559{
@@ -561,8 +571,9 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
561 if (bal < 0) { 571 if (bal < 0) {
562 if (userCount < 2) 572 if (userCount < 2)
563 break; 573 break;
564 if (get_user(data, ((u_short *)userPtr)++)) 574 if (get_user(data, (u_short __user *)userPtr))
565 return -EFAULT; 575 return -EFAULT;
576 userPtr += 2;
566 userCount -= 2; 577 userCount -= 2;
567 bal += hSpeed; 578 bal += hSpeed;
568 } 579 }
@@ -579,8 +590,9 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
579 if (bal < 0) { 590 if (bal < 0) {
580 if (userCount < 4) 591 if (userCount < 4)
581 break; 592 break;
582 if (get_user(data, ((u_int *)userPtr)++)) 593 if (get_user(data, (u_int __user *)userPtr))
583 return -EFAULT; 594 return -EFAULT;
595 userPtr += 4;
584 userCount -= 4; 596 userCount -= 4;
585 bal += hSpeed; 597 bal += hSpeed;
586 } 598 }
@@ -597,7 +609,7 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
597} 609}
598 610
599 611
600static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount, 612static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
601 u_char frame[], ssize_t *frameUsed, 613 u_char frame[], ssize_t *frameUsed,
602 ssize_t frameLeft) 614 ssize_t frameLeft)
603{ 615{
@@ -615,8 +627,9 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
615 if (bal < 0) { 627 if (bal < 0) {
616 if (userCount < 2) 628 if (userCount < 2)
617 break; 629 break;
618 if (get_user(data, ((u_short *)userPtr)++)) 630 if (get_user(data, (u_short __user *)userPtr))
619 return -EFAULT; 631 return -EFAULT;
632 userPtr += 2;
620 data ^= 0x8000; 633 data ^= 0x8000;
621 userCount -= 2; 634 userCount -= 2;
622 bal += hSpeed; 635 bal += hSpeed;
@@ -634,8 +647,9 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
634 if (bal < 0) { 647 if (bal < 0) {
635 if (userCount < 4) 648 if (userCount < 4)
636 break; 649 break;
637 if (get_user(data, ((u_int *)userPtr)++)) 650 if (get_user(data, (u_int __user *)userPtr))
638 return -EFAULT; 651 return -EFAULT;
652 userPtr += 4;
639 data ^= 0x80008000; 653 data ^= 0x80008000;
640 userCount -= 4; 654 userCount -= 4;
641 bal += hSpeed; 655 bal += hSpeed;
@@ -653,7 +667,7 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
653} 667}
654 668
655 669
656static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount, 670static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
657 u_char frame[], ssize_t *frameUsed, 671 u_char frame[], ssize_t *frameUsed,
658 ssize_t frameLeft) 672 ssize_t frameLeft)
659{ 673{
@@ -671,8 +685,9 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
671 if (bal < 0) { 685 if (bal < 0) {
672 if (userCount < 2) 686 if (userCount < 2)
673 break; 687 break;
674 if (get_user(data, ((u_short *)userPtr)++)) 688 if (get_user(data, (u_short __user *)userPtr))
675 return -EFAULT; 689 return -EFAULT;
690 userPtr += 2;
676 data = le2be16(data); 691 data = le2be16(data);
677 userCount -= 2; 692 userCount -= 2;
678 bal += hSpeed; 693 bal += hSpeed;
@@ -690,8 +705,9 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
690 if (bal < 0) { 705 if (bal < 0) {
691 if (userCount < 4) 706 if (userCount < 4)
692 break; 707 break;
693 if (get_user(data, ((u_int *)userPtr)++)) 708 if (get_user(data, (u_int __user *)userPtr))
694 return -EFAULT; 709 return -EFAULT;
710 userPtr += 4;
695 data = le2be16dbl(data); 711 data = le2be16dbl(data);
696 userCount -= 4; 712 userCount -= 4;
697 bal += hSpeed; 713 bal += hSpeed;
@@ -709,7 +725,7 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
709} 725}
710 726
711 727
712static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount, 728static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
713 u_char frame[], ssize_t *frameUsed, 729 u_char frame[], ssize_t *frameUsed,
714 ssize_t frameLeft) 730 ssize_t frameLeft)
715{ 731{
@@ -727,8 +743,9 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
727 if (bal < 0) { 743 if (bal < 0) {
728 if (userCount < 2) 744 if (userCount < 2)
729 break; 745 break;
730 if (get_user(data, ((u_short *)userPtr)++)) 746 if (get_user(data, (u_short __user *)userPtr))
731 return -EFAULT; 747 return -EFAULT;
748 userPtr += 2;
732 data = le2be16(data) ^ 0x8000; 749 data = le2be16(data) ^ 0x8000;
733 userCount -= 2; 750 userCount -= 2;
734 bal += hSpeed; 751 bal += hSpeed;
@@ -746,8 +763,9 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
746 if (bal < 0) { 763 if (bal < 0) {
747 if (userCount < 4) 764 if (userCount < 4)
748 break; 765 break;
749 if (get_user(data, ((u_int *)userPtr)++)) 766 if (get_user(data, (u_int __user *)userPtr))
750 return -EFAULT; 767 return -EFAULT;
768 userPtr += 4;
751 data = le2be16dbl(data) ^ 0x80008000; 769 data = le2be16dbl(data) ^ 0x80008000;
752 userCount -= 4; 770 userCount -= 4;
753 bal += hSpeed; 771 bal += hSpeed;
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c
index d59f60b26410..494070a3f870 100644
--- a/sound/oss/dmasound/dmasound_paula.c
+++ b/sound/oss/dmasound/dmasound_paula.c
@@ -34,6 +34,7 @@
34#define DMASOUND_PAULA_REVISION 0 34#define DMASOUND_PAULA_REVISION 0
35#define DMASOUND_PAULA_EDITION 4 35#define DMASOUND_PAULA_EDITION 4
36 36
37#define custom amiga_custom
37 /* 38 /*
38 * The minimum period for audio depends on htotal (for OCS/ECS/AGA) 39 * The minimum period for audio depends on htotal (for OCS/ECS/AGA)
39 * (Imported from arch/m68k/amiga/amisound.c) 40 * (Imported from arch/m68k/amiga/amisound.c)
@@ -156,7 +157,7 @@ static int AmiStateInfo(char *buffer, size_t space);
156 * Native format 157 * Native format
157 */ 158 */
158 159
159static ssize_t ami_ct_s8(const u_char *userPtr, size_t userCount, 160static ssize_t ami_ct_s8(const u_char __user *userPtr, size_t userCount,
160 u_char frame[], ssize_t *frameUsed, ssize_t frameLeft) 161 u_char frame[], ssize_t *frameUsed, ssize_t frameLeft)
161{ 162{
162 ssize_t count, used; 163 ssize_t count, used;
@@ -189,7 +190,7 @@ static ssize_t ami_ct_s8(const u_char *userPtr, size_t userCount,
189 */ 190 */
190 191
191#define GENERATE_AMI_CT8(funcname, convsample) \ 192#define GENERATE_AMI_CT8(funcname, convsample) \
192static ssize_t funcname(const u_char *userPtr, size_t userCount, \ 193static ssize_t funcname(const u_char __user *userPtr, size_t userCount, \
193 u_char frame[], ssize_t *frameUsed, \ 194 u_char frame[], ssize_t *frameUsed, \
194 ssize_t frameLeft) \ 195 ssize_t frameLeft) \
195{ \ 196{ \
@@ -240,10 +241,11 @@ GENERATE_AMI_CT8(ami_ct_u8, AMI_CT_U8)
240 */ 241 */
241 242
242#define GENERATE_AMI_CT_16(funcname, convsample) \ 243#define GENERATE_AMI_CT_16(funcname, convsample) \
243static ssize_t funcname(const u_char *userPtr, size_t userCount, \ 244static ssize_t funcname(const u_char __user *userPtr, size_t userCount, \
244 u_char frame[], ssize_t *frameUsed, \ 245 u_char frame[], ssize_t *frameUsed, \
245 ssize_t frameLeft) \ 246 ssize_t frameLeft) \
246{ \ 247{ \
248 const u_short __user *ptr = (const u_short __user *)userPtr; \
247 ssize_t count, used; \ 249 ssize_t count, used; \
248 u_short data; \ 250 u_short data; \
249 \ 251 \
@@ -253,7 +255,7 @@ static ssize_t funcname(const u_char *userPtr, size_t userCount, \
253 count = min_t(size_t, userCount, frameLeft)>>1 & ~1; \ 255 count = min_t(size_t, userCount, frameLeft)>>1 & ~1; \
254 used = count*2; \ 256 used = count*2; \
255 while (count > 0) { \ 257 while (count > 0) { \
256 if (get_user(data, ((u_short *)userPtr)++)) \ 258 if (get_user(data, ptr++)) \
257 return -EFAULT; \ 259 return -EFAULT; \
258 data = convsample(data); \ 260 data = convsample(data); \
259 *high++ = data>>8; \ 261 *high++ = data>>8; \
@@ -268,12 +270,12 @@ static ssize_t funcname(const u_char *userPtr, size_t userCount, \
268 count = min_t(size_t, userCount, frameLeft)>>2 & ~1; \ 270 count = min_t(size_t, userCount, frameLeft)>>2 & ~1; \
269 used = count*4; \ 271 used = count*4; \
270 while (count > 0) { \ 272 while (count > 0) { \
271 if (get_user(data, ((u_short *)userPtr)++)) \ 273 if (get_user(data, ptr++)) \
272 return -EFAULT; \ 274 return -EFAULT; \
273 data = convsample(data); \ 275 data = convsample(data); \
274 *lefth++ = data>>8; \ 276 *lefth++ = data>>8; \
275 *leftl++ = (data>>2) & 0x3f; \ 277 *leftl++ = (data>>2) & 0x3f; \
276 if (get_user(data, ((u_short *)userPtr)++)) \ 278 if (get_user(data, ptr++)) \
277 return -EFAULT; \ 279 return -EFAULT; \
278 data = convsample(data); \ 280 data = convsample(data); \
279 *righth++ = data>>8; \ 281 *righth++ = data>>8; \
diff --git a/sound/oss/dmasound/dmasound_q40.c b/sound/oss/dmasound/dmasound_q40.c
index 1ddaa6284b08..e2081f32b0c4 100644
--- a/sound/oss/dmasound/dmasound_q40.c
+++ b/sound/oss/dmasound/dmasound_q40.c
@@ -58,7 +58,7 @@ static void Q40Interrupt(void);
58 58
59 59
60/* userCount, frameUsed, frameLeft == byte counts */ 60/* userCount, frameUsed, frameLeft == byte counts */
61static ssize_t q40_ct_law(const u_char *userPtr, size_t userCount, 61static ssize_t q40_ct_law(const u_char __user *userPtr, size_t userCount,
62 u_char frame[], ssize_t *frameUsed, 62 u_char frame[], ssize_t *frameUsed,
63 ssize_t frameLeft) 63 ssize_t frameLeft)
64{ 64{
@@ -79,7 +79,7 @@ static ssize_t q40_ct_law(const u_char *userPtr, size_t userCount,
79} 79}
80 80
81 81
82static ssize_t q40_ct_s8(const u_char *userPtr, size_t userCount, 82static ssize_t q40_ct_s8(const u_char __user *userPtr, size_t userCount,
83 u_char frame[], ssize_t *frameUsed, 83 u_char frame[], ssize_t *frameUsed,
84 ssize_t frameLeft) 84 ssize_t frameLeft)
85{ 85{
@@ -98,7 +98,7 @@ static ssize_t q40_ct_s8(const u_char *userPtr, size_t userCount,
98 return used; 98 return used;
99} 99}
100 100
101static ssize_t q40_ct_u8(const u_char *userPtr, size_t userCount, 101static ssize_t q40_ct_u8(const u_char __user *userPtr, size_t userCount,
102 u_char frame[], ssize_t *frameUsed, 102 u_char frame[], ssize_t *frameUsed,
103 ssize_t frameLeft) 103 ssize_t frameLeft)
104{ 104{
@@ -114,7 +114,7 @@ static ssize_t q40_ct_u8(const u_char *userPtr, size_t userCount,
114 114
115 115
116/* a bit too complicated to optimise right now ..*/ 116/* a bit too complicated to optimise right now ..*/
117static ssize_t q40_ctx_law(const u_char *userPtr, size_t userCount, 117static ssize_t q40_ctx_law(const u_char __user *userPtr, size_t userCount,
118 u_char frame[], ssize_t *frameUsed, 118 u_char frame[], ssize_t *frameUsed,
119 ssize_t frameLeft) 119 ssize_t frameLeft)
120{ 120{
@@ -152,7 +152,7 @@ static ssize_t q40_ctx_law(const u_char *userPtr, size_t userCount,
152} 152}
153 153
154 154
155static ssize_t q40_ctx_s8(const u_char *userPtr, size_t userCount, 155static ssize_t q40_ctx_s8(const u_char __user *userPtr, size_t userCount,
156 u_char frame[], ssize_t *frameUsed, 156 u_char frame[], ssize_t *frameUsed,
157 ssize_t frameLeft) 157 ssize_t frameLeft)
158{ 158{
@@ -189,7 +189,7 @@ static ssize_t q40_ctx_s8(const u_char *userPtr, size_t userCount,
189} 189}
190 190
191 191
192static ssize_t q40_ctx_u8(const u_char *userPtr, size_t userCount, 192static ssize_t q40_ctx_u8(const u_char __user *userPtr, size_t userCount,
193 u_char frame[], ssize_t *frameUsed, 193 u_char frame[], ssize_t *frameUsed,
194 ssize_t frameLeft) 194 ssize_t frameLeft)
195{ 195{
@@ -224,7 +224,7 @@ static ssize_t q40_ctx_u8(const u_char *userPtr, size_t userCount,
224} 224}
225 225
226/* compressing versions */ 226/* compressing versions */
227static ssize_t q40_ctc_law(const u_char *userPtr, size_t userCount, 227static ssize_t q40_ctc_law(const u_char __user *userPtr, size_t userCount,
228 u_char frame[], ssize_t *frameUsed, 228 u_char frame[], ssize_t *frameUsed,
229 ssize_t frameLeft) 229 ssize_t frameLeft)
230{ 230{
@@ -265,7 +265,7 @@ static ssize_t q40_ctc_law(const u_char *userPtr, size_t userCount,
265} 265}
266 266
267 267
268static ssize_t q40_ctc_s8(const u_char *userPtr, size_t userCount, 268static ssize_t q40_ctc_s8(const u_char __user *userPtr, size_t userCount,
269 u_char frame[], ssize_t *frameUsed, 269 u_char frame[], ssize_t *frameUsed,
270 ssize_t frameLeft) 270 ssize_t frameLeft)
271{ 271{
@@ -304,7 +304,7 @@ static ssize_t q40_ctc_s8(const u_char *userPtr, size_t userCount,
304} 304}
305 305
306 306
307static ssize_t q40_ctc_u8(const u_char *userPtr, size_t userCount, 307static ssize_t q40_ctc_u8(const u_char __user *userPtr, size_t userCount,
308 u_char frame[], ssize_t *frameUsed, 308 u_char frame[], ssize_t *frameUsed,
309 ssize_t frameLeft) 309 ssize_t frameLeft)
310{ 310{
diff --git a/sound/oss/dmasound/trans_16.c b/sound/oss/dmasound/trans_16.c
index 23562e947806..ca973ac2a30a 100644
--- a/sound/oss/dmasound/trans_16.c
+++ b/sound/oss/dmasound/trans_16.c
@@ -17,6 +17,7 @@
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include "dmasound.h" 18#include "dmasound.h"
19 19
20extern int expand_bal; /* Balance factor for expanding (not volume!) */
20static short dmasound_alaw2dma16[] ; 21static short dmasound_alaw2dma16[] ;
21static short dmasound_ulaw2dma16[] ; 22static short dmasound_ulaw2dma16[] ;
22 23