aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/entry.S3
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/mips/kernel/linux32.c34
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c22
-rw-r--r--arch/x86/include/asm/ia32.h7
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
-rw-r--r--drivers/char/agp/intel-agp.c21
-rw-r--r--drivers/firewire/fw-card.c149
-rw-r--r--drivers/firewire/fw-cdev.c1044
-rw-r--r--drivers/firewire/fw-device.c203
-rw-r--r--drivers/firewire/fw-device.h23
-rw-r--r--drivers/firewire/fw-iso.c227
-rw-r--r--drivers/firewire/fw-ohci.c260
-rw-r--r--drivers/firewire/fw-sbp2.c57
-rw-r--r--drivers/firewire/fw-topology.c29
-rw-r--r--drivers/firewire/fw-topology.h19
-rw-r--r--drivers/firewire/fw-transaction.c185
-rw-r--r--drivers/firewire/fw-transaction.h138
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_debugfs.c235
-rw-r--r--drivers/gpu/drm/drm_drv.c12
-rw-r--r--drivers/gpu/drm/drm_info.c328
-rw-r--r--drivers/gpu/drm/drm_proc.c721
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c116
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c898
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c257
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c334
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c31
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c66
-rw-r--r--drivers/gpu/drm/i915/intel_display.c406
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c148
-rw-r--r--drivers/ieee1394/csr.c8
-rw-r--r--drivers/ieee1394/dv1394.c2
-rw-r--r--drivers/ieee1394/eth1394.c4
-rw-r--r--drivers/ieee1394/highlevel.c2
-rw-r--r--drivers/ieee1394/nodemgr.c4
-rw-r--r--drivers/ieee1394/nodemgr.h2
-rw-r--r--drivers/ieee1394/raw1394.c14
-rw-r--r--drivers/ieee1394/sbp2.c9
-rw-r--r--drivers/ieee1394/video1394.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c10
-rw-r--r--drivers/mtd/mtdsuper.c7
-rw-r--r--fs/9p/v9fs_vfs.h4
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_super.c5
-rw-r--r--fs/Kconfig56
-rw-r--r--fs/Makefile6
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/amigaffs.c8
-rw-r--r--fs/affs/namei.c4
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/attr.c3
-rw-r--r--fs/autofs/root.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/block_dev.c146
-rw-r--r--fs/buffer.c145
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/dir.c4
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/compat.c28
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/dcache.c48
-rw-r--r--fs/devpts/inode.c188
-rw-r--r--fs/dlm/dir.c18
-rw-r--r--fs/dlm/dlm_internal.h2
-rw-r--r--fs/dlm/lock.c60
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c181
-rw-r--r--fs/dlm/user.c24
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ext2/balloc.c8
-rw-r--r--fs/ext2/ialloc.c10
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext2/xattr.c8
-rw-r--r--fs/ext3/balloc.c8
-rw-r--r--fs/ext3/ialloc.c12
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/namei.c6
-rw-r--r--fs/ext3/super.c48
-rw-r--r--fs/ext3/xattr.c6
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/ialloc.c12
-rw-r--r--fs/ext4/inode.c40
-rw-r--r--fs/ext4/mballoc.c46
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/super.c54
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/fat/namei_msdos.c2
-rw-r--r--fs/fat/namei_vfat.c4
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/ops_dentry.c2
-rw-r--r--fs/gfs2/super.h2
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/sysdep.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/dentry.c2
-rw-r--r--fs/inode.c6
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/jfs/inode.c6
-rw-r--r--fs/jfs/jfs_dtree.c18
-rw-r--r--fs/jfs/jfs_extent.c10
-rw-r--r--fs/jfs/jfs_inode.c4
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/jfs_xtree.c14
-rw-r--r--fs/jfs/namei.c10
-rw-r--r--fs/jfs/xattr.c12
-rw-r--r--fs/libfs.c5
-rw-r--r--fs/namei.c48
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/ncpfs/dir.c4
-rw-r--r--fs/nfs/dir.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfsd/vfs.c4
-rw-r--r--fs/notify/inotify/inotify.c16
-rw-r--r--fs/ocfs2/dcache.c2
-rw-r--r--fs/ocfs2/dcache.h2
-rw-r--r--fs/open.c2
-rw-r--r--fs/pipe.c7
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/quota/Kconfig59
-rw-r--r--fs/quota/Makefile14
-rw-r--r--fs/quota/dquot.c (renamed from fs/dquot.c)572
-rw-r--r--fs/quota/quota.c (renamed from fs/quota.c)37
-rw-r--r--fs/quota/quota_tree.c (renamed from fs/quota_tree.c)132
-rw-r--r--fs/quota/quota_tree.h (renamed from fs/quota_tree.h)0
-rw-r--r--fs/quota/quota_v1.c (renamed from fs/quota_v1.c)48
-rw-r--r--fs/quota/quota_v2.c (renamed from fs/quota_v2.c)3
-rw-r--r--fs/quota/quotaio_v1.h (renamed from fs/quotaio_v1.h)0
-rw-r--r--fs/quota/quotaio_v2.h (renamed from fs/quotaio_v2.h)0
-rw-r--r--fs/ramfs/file-nommu.c6
-rw-r--r--fs/reiserfs/bitmap.c14
-rw-r--r--fs/reiserfs/inode.c10
-rw-r--r--fs/reiserfs/namei.c6
-rw-r--r--fs/reiserfs/stree.c14
-rw-r--r--fs/reiserfs/super.c60
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/smbfs/dir.c4
-rw-r--r--fs/super.c17
-rw-r--r--fs/sync.c2
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysv/namei.c2
-rw-r--r--fs/sysv/sysv.h2
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/balloc.c14
-rw-r--r--fs/udf/ialloc.c8
-rw-r--r--fs/ufs/balloc.c12
-rw-r--r--fs/ufs/ialloc.c8
-rw-r--r--fs/ufs/inode.c39
-rw-r--r--fs/ufs/namei.c2
-rw-r--r--fs/ufs/super.c11
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--include/drm/drmP.h77
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/compat.h8
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/firewire-cdev.h218
-rw-r--r--include/linux/fs.h220
-rw-r--r--include/linux/ncp_fs.h2
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/quota.h11
-rw-r--r--include/linux/quotaops.h119
-rw-r--r--kernel/cgroup.c5
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/rpc_pipe.c2
199 files changed, 5798 insertions, 3615 deletions
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index e4a54b615894..b45d913a51c3 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -903,8 +903,9 @@ sys_alpha_pipe:
903 stq $26, 0($sp) 903 stq $26, 0($sp)
904 .prologue 0 904 .prologue 0
905 905
906 mov $31, $17
906 lda $16, 8($sp) 907 lda $16, 8($sp)
907 jsr $26, do_pipe 908 jsr $26, do_pipe_flags
908 909
909 ldq $26, 0($sp) 910 ldq $26, 0($sp)
910 bne $0, 1f 911 bne $0, 1f
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ae41f097864b..42ee05981e71 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -46,8 +46,6 @@
46#include <asm/hwrpb.h> 46#include <asm/hwrpb.h>
47#include <asm/processor.h> 47#include <asm/processor.h>
48 48
49extern int do_pipe(int *);
50
51/* 49/*
52 * Brk needs to return an error. Still support Linux's brk(0) query idiom, 50 * Brk needs to return an error. Still support Linux's brk(0) query idiom,
53 * which OSF programs just shouldn't be doing. We're still not quite 51 * which OSF programs just shouldn't be doing. We're still not quite
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a46f8395e9a5..af9405cd70e5 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -240,7 +240,7 @@ ia32_syscall_table:
240 data8 sys_ni_syscall 240 data8 sys_ni_syscall
241 data8 sys_umask /* 60 */ 241 data8 sys_umask /* 60 */
242 data8 sys_chroot 242 data8 sys_chroot
243 data8 sys_ustat 243 data8 compat_sys_ustat
244 data8 sys_dup2 244 data8 sys_dup2
245 data8 sys_getppid 245 data8 sys_getppid
246 data8 sys_getpgrp /* 65 */ 246 data8 sys_getpgrp /* 65 */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 0e499757309b..5c0f408cfd71 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
2196 return 1; 2196 return 1;
2197} 2197}
2198 2198
2199static struct dentry_operations pfmfs_dentry_operations = { 2199static const struct dentry_operations pfmfs_dentry_operations = {
2200 .d_delete = pfmfs_delete_dentry, 2200 .d_delete = pfmfs_delete_dentry,
2201}; 2201};
2202 2202
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 49aac6e17df9..2a472713de8e 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -355,40 +355,6 @@ SYSCALL_DEFINE1(32_personality, unsigned long, personality)
355 return ret; 355 return ret;
356} 356}
357 357
358/* ustat compatibility */
359struct ustat32 {
360 compat_daddr_t f_tfree;
361 compat_ino_t f_tinode;
362 char f_fname[6];
363 char f_fpack[6];
364};
365
366extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
367
368SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
369{
370 int err;
371 struct ustat tmp;
372 struct ustat32 tmp32;
373 mm_segment_t old_fs = get_fs();
374
375 set_fs(KERNEL_DS);
376 err = sys_ustat(dev, (struct ustat __user *)&tmp);
377 set_fs(old_fs);
378
379 if (err)
380 goto out;
381
382 memset(&tmp32, 0, sizeof(struct ustat32));
383 tmp32.f_tfree = tmp.f_tfree;
384 tmp32.f_tinode = tmp.f_tinode;
385
386 err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
387
388out:
389 return err;
390}
391
392SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd, 358SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
393 compat_off_t __user *, offset, s32, count) 359 compat_off_t __user *, offset, s32, count)
394{ 360{
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 7438e92f8a01..f61d6b0e5731 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -253,7 +253,7 @@ EXPORT(sysn32_call_table)
253 PTR compat_sys_utime /* 6130 */ 253 PTR compat_sys_utime /* 6130 */
254 PTR sys_mknod 254 PTR sys_mknod
255 PTR sys_32_personality 255 PTR sys_32_personality
256 PTR sys_32_ustat 256 PTR compat_sys_ustat
257 PTR compat_sys_statfs 257 PTR compat_sys_statfs
258 PTR compat_sys_fstatfs /* 6135 */ 258 PTR compat_sys_fstatfs /* 6135 */
259 PTR sys_sysfs 259 PTR sys_sysfs
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b0fef4ff9827..60997f1f69d4 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -265,7 +265,7 @@ sys_call_table:
265 PTR sys_olduname 265 PTR sys_olduname
266 PTR sys_umask /* 4060 */ 266 PTR sys_umask /* 4060 */
267 PTR sys_chroot 267 PTR sys_chroot
268 PTR sys_32_ustat 268 PTR compat_sys_ustat
269 PTR sys_dup2 269 PTR sys_dup2
270 PTR sys_getppid 270 PTR sys_getppid
271 PTR sys_getpgrp /* 4065 */ 271 PTR sys_getpgrp /* 4065 */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 303d2b647e41..03b9a01bc16c 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -130,7 +130,7 @@
130 ENTRY_OURS(newuname) 130 ENTRY_OURS(newuname)
131 ENTRY_SAME(umask) /* 60 */ 131 ENTRY_SAME(umask) /* 60 */
132 ENTRY_SAME(chroot) 132 ENTRY_SAME(chroot)
133 ENTRY_SAME(ustat) 133 ENTRY_COMP(ustat)
134 ENTRY_SAME(dup2) 134 ENTRY_SAME(dup2)
135 ENTRY_SAME(getppid) 135 ENTRY_SAME(getppid)
136 ENTRY_SAME(getpgrp) /* 65 */ 136 ENTRY_SAME(getpgrp) /* 65 */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 72353f6070a4..fe166491e9dc 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -65,7 +65,7 @@ SYSCALL(ni_syscall)
65SYSX(sys_ni_syscall,sys_olduname, sys_olduname) 65SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
66COMPAT_SYS_SPU(umask) 66COMPAT_SYS_SPU(umask)
67SYSCALL_SPU(chroot) 67SYSCALL_SPU(chroot)
68SYSCALL(ustat) 68COMPAT_SYS(ustat)
69SYSCALL_SPU(dup2) 69SYSCALL_SPU(dup2)
70SYSCALL_SPU(getppid) 70SYSCALL_SPU(getppid)
71SYSCALL_SPU(getpgrp) 71SYSCALL_SPU(getpgrp)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 62c706eb0de6..87cf5a79a351 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -252,7 +252,7 @@ sys32_chroot_wrapper:
252sys32_ustat_wrapper: 252sys32_ustat_wrapper:
253 llgfr %r2,%r2 # dev_t 253 llgfr %r2,%r2 # dev_t
254 llgtr %r3,%r3 # struct ustat * 254 llgtr %r3,%r3 # struct ustat *
255 jg sys_ustat 255 jg compat_sys_ustat
256 256
257 .globl sys32_dup2_wrapper 257 .globl sys32_dup2_wrapper
258sys32_dup2_wrapper: 258sys32_dup2_wrapper:
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 6cd1a5b65067..79457f682b5a 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
1031 * If the address space is non-shared (ie. mm->count == 1) we avoid 1031 * If the address space is non-shared (ie. mm->count == 1) we avoid
1032 * cross calls when we want to flush the currently running process's 1032 * cross calls when we want to flush the currently running process's
1033 * tlb state. This is done by clearing all cpu bits except the current 1033 * tlb state. This is done by clearing all cpu bits except the current
1034 * processor's in current->active_mm->cpu_vm_mask and performing the 1034 * processor's in current->mm->cpu_vm_mask and performing the
1035 * flush locally only. This will force any subsequent cpus which run 1035 * flush locally only. This will force any subsequent cpus which run
1036 * this task to flush the context from the local tlb if the process 1036 * this task to flush the context from the local tlb if the process
1037 * migrates to another cpu (again). 1037 * migrates to another cpu (again).
@@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
1074 u32 ctx = CTX_HWBITS(mm->context); 1074 u32 ctx = CTX_HWBITS(mm->context);
1075 int cpu = get_cpu(); 1075 int cpu = get_cpu();
1076 1076
1077 if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) 1077 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1078 mm->cpu_vm_mask = cpumask_of_cpu(cpu); 1078 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1079 else 1079 else
1080 smp_cross_call_masked(&xcall_flush_tlb_pending, 1080 smp_cross_call_masked(&xcall_flush_tlb_pending,
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index f93c42a2b522..a8000b1cda74 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -51,7 +51,7 @@ sys_call_table32:
51/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 51/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
52 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount 52 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
53/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall 53/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
54 .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr 54 .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr
55/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents 55/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
56 .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr 56 .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
57/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall 57/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 5a0d76dc56a4..8ef8876666b2 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -557,7 +557,7 @@ ia32_sys_call_table:
557 .quad sys32_olduname 557 .quad sys32_olduname
558 .quad sys_umask /* 60 */ 558 .quad sys_umask /* 60 */
559 .quad sys_chroot 559 .quad sys_chroot
560 .quad sys32_ustat 560 .quad compat_sys_ustat
561 .quad sys_dup2 561 .quad sys_dup2
562 .quad sys_getppid 562 .quad sys_getppid
563 .quad sys_getpgrp /* 65 */ 563 .quad sys_getpgrp /* 65 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 6c0d7f6231af..efac92fd1efb 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -638,28 +638,6 @@ long sys32_uname(struct old_utsname __user *name)
638 return err ? -EFAULT : 0; 638 return err ? -EFAULT : 0;
639} 639}
640 640
641long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
642{
643 struct ustat u;
644 mm_segment_t seg;
645 int ret;
646
647 seg = get_fs();
648 set_fs(KERNEL_DS);
649 ret = sys_ustat(dev, (struct ustat __user *)&u);
650 set_fs(seg);
651 if (ret < 0)
652 return ret;
653
654 if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
655 __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
656 __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
657 __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
658 __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
659 ret = -EFAULT;
660 return ret;
661}
662
663asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv, 641asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
664 compat_uptr_t __user *envp, struct pt_regs *regs) 642 compat_uptr_t __user *envp, struct pt_regs *regs)
665{ 643{
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 50ca486fd88c..1f7e62517284 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -129,13 +129,6 @@ typedef struct compat_siginfo {
129 } _sifields; 129 } _sifields;
130} compat_siginfo_t; 130} compat_siginfo_t;
131 131
132struct ustat32 {
133 __u32 f_tfree;
134 compat_ino_t f_tinode;
135 char f_fname[6];
136 char f_fpack[6];
137};
138
139#define IA32_STACK_TOP IA32_PAGE_OFFSET 132#define IA32_STACK_TOP IA32_PAGE_OFFSET
140 133
141#ifdef __KERNEL__ 134#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index ffb08be2a530..72a6dcd1299b 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -70,8 +70,6 @@ struct old_utsname;
70asmlinkage long sys32_olduname(struct oldold_utsname __user *); 70asmlinkage long sys32_olduname(struct oldold_utsname __user *);
71long sys32_uname(struct old_utsname __user *); 71long sys32_uname(struct old_utsname __user *);
72 72
73long sys32_ustat(unsigned, struct ustat32 __user *);
74
75asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, 73asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *,
76 compat_uptr_t __user *, struct pt_regs *); 74 compat_uptr_t __user *, struct pt_regs *);
77asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); 75asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 4373adb2119a..9d9490e22e07 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -26,6 +26,10 @@
26#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 26#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
27#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC 27#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
28#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE 28#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
29#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
30#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
31#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
32#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
29#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 33#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
30#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 34#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
31#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 35#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@@ -60,7 +64,12 @@
60 64
61#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ 65#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
62 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ 66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
63 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB) 67 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
68 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
69 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
70
71#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
72 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
64 73
65#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ 74#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
66 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 75 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
510 size = 512; 519 size = 512;
511 } 520 }
512 size += 4; /* add in BIOS popup space */ 521 size += 4; /* add in BIOS popup space */
513 } else if (IS_G33) { 522 } else if (IS_G33 && !IS_IGD) {
514 /* G33's GTT size defined in gmch_ctrl */ 523 /* G33's GTT size defined in gmch_ctrl */
515 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 524 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
516 case G33_PGETBL_SIZE_1M: 525 case G33_PGETBL_SIZE_1M:
@@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
526 size = 512; 535 size = 512;
527 } 536 }
528 size += 4; 537 size += 4;
529 } else if (IS_G4X) { 538 } else if (IS_G4X || IS_IGD) {
530 /* On 4 series hardware, GTT stolen is separate from graphics 539 /* On 4 series hardware, GTT stolen is separate from graphics
531 * stolen, ignore it in stolen gtt entries counting. However, 540 * stolen, ignore it in stolen gtt entries counting. However,
532 * 4KB of the stolen memory doesn't get mapped to the GTT. 541 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -2161,6 +2170,10 @@ static const struct intel_driver_description {
2161 NULL, &intel_g33_driver }, 2170 NULL, &intel_g33_driver },
2162 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2171 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2163 NULL, &intel_g33_driver }, 2172 NULL, &intel_g33_driver },
2173 { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
2174 NULL, &intel_g33_driver },
2175 { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
2176 NULL, &intel_g33_driver },
2164 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2177 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2165 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver }, 2178 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
2166 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, 2179 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
@@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2355 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 2368 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
2356 ID(PCI_DEVICE_ID_INTEL_82945GM_HB), 2369 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
2357 ID(PCI_DEVICE_ID_INTEL_82945GME_HB), 2370 ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
2371 ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
2372 ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
2358 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), 2373 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
2359 ID(PCI_DEVICE_ID_INTEL_82G35_HB), 2374 ID(PCI_DEVICE_ID_INTEL_82G35_HB),
2360 ID(PCI_DEVICE_ID_INTEL_82965Q_HB), 2375 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index a5dd7a665aa8..8b8c8c22f0fc 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -63,8 +63,7 @@ static int descriptor_count;
63#define BIB_CMC ((1) << 30) 63#define BIB_CMC ((1) << 30)
64#define BIB_IMC ((1) << 31) 64#define BIB_IMC ((1) << 31)
65 65
66static u32 * 66static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
67generate_config_rom(struct fw_card *card, size_t *config_rom_length)
68{ 67{
69 struct fw_descriptor *desc; 68 struct fw_descriptor *desc;
70 static u32 config_rom[256]; 69 static u32 config_rom[256];
@@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
128 return config_rom; 127 return config_rom;
129} 128}
130 129
131static void 130static void update_config_roms(void)
132update_config_roms(void)
133{ 131{
134 struct fw_card *card; 132 struct fw_card *card;
135 u32 *config_rom; 133 u32 *config_rom;
@@ -141,8 +139,7 @@ update_config_roms(void)
141 } 139 }
142} 140}
143 141
144int 142int fw_core_add_descriptor(struct fw_descriptor *desc)
145fw_core_add_descriptor(struct fw_descriptor *desc)
146{ 143{
147 size_t i; 144 size_t i;
148 145
@@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
171 return 0; 168 return 0;
172} 169}
173 170
174void 171void fw_core_remove_descriptor(struct fw_descriptor *desc)
175fw_core_remove_descriptor(struct fw_descriptor *desc)
176{ 172{
177 mutex_lock(&card_mutex); 173 mutex_lock(&card_mutex);
178 174
@@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
185 mutex_unlock(&card_mutex); 181 mutex_unlock(&card_mutex);
186} 182}
187 183
184static int set_broadcast_channel(struct device *dev, void *data)
185{
186 fw_device_set_broadcast_channel(fw_device(dev), (long)data);
187 return 0;
188}
189
190static void allocate_broadcast_channel(struct fw_card *card, int generation)
191{
192 int channel, bandwidth = 0;
193
194 fw_iso_resource_manage(card, generation, 1ULL << 31,
195 &channel, &bandwidth, true);
196 if (channel == 31) {
197 card->broadcast_channel_allocated = true;
198 device_for_each_child(card->device, (void *)(long)generation,
199 set_broadcast_channel);
200 }
201}
202
188static const char gap_count_table[] = { 203static const char gap_count_table[] = {
189 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40 204 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
190}; 205};
191 206
192void 207void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
193fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
194{ 208{
195 int scheduled; 209 int scheduled;
196 210
@@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
200 fw_card_put(card); 214 fw_card_put(card);
201} 215}
202 216
203static void 217static void fw_card_bm_work(struct work_struct *work)
204fw_card_bm_work(struct work_struct *work)
205{ 218{
206 struct fw_card *card = container_of(work, struct fw_card, work.work); 219 struct fw_card *card = container_of(work, struct fw_card, work.work);
207 struct fw_device *root_device; 220 struct fw_device *root_device;
208 struct fw_node *root_node, *local_node; 221 struct fw_node *root_node;
209 unsigned long flags; 222 unsigned long flags;
210 int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode; 223 int root_id, new_root_id, irm_id, local_id;
224 int gap_count, generation, grace, rcode;
211 bool do_reset = false; 225 bool do_reset = false;
212 bool root_device_is_running; 226 bool root_device_is_running;
213 bool root_device_is_cmc; 227 bool root_device_is_cmc;
214 __be32 lock_data[2]; 228 __be32 lock_data[2];
215 229
216 spin_lock_irqsave(&card->lock, flags); 230 spin_lock_irqsave(&card->lock, flags);
217 local_node = card->local_node;
218 root_node = card->root_node;
219 231
220 if (local_node == NULL) { 232 if (card->local_node == NULL) {
221 spin_unlock_irqrestore(&card->lock, flags); 233 spin_unlock_irqrestore(&card->lock, flags);
222 goto out_put_card; 234 goto out_put_card;
223 } 235 }
224 fw_node_get(local_node);
225 fw_node_get(root_node);
226 236
227 generation = card->generation; 237 generation = card->generation;
238 root_node = card->root_node;
239 fw_node_get(root_node);
228 root_device = root_node->data; 240 root_device = root_node->data;
229 root_device_is_running = root_device && 241 root_device_is_running = root_device &&
230 atomic_read(&root_device->state) == FW_DEVICE_RUNNING; 242 atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
231 root_device_is_cmc = root_device && root_device->cmc; 243 root_device_is_cmc = root_device && root_device->cmc;
232 root_id = root_node->node_id; 244 root_id = root_node->node_id;
233 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); 245 irm_id = card->irm_node->node_id;
246 local_id = card->local_node->node_id;
247
248 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
234 249
235 if (is_next_generation(generation, card->bm_generation) || 250 if (is_next_generation(generation, card->bm_generation) ||
236 (card->bm_generation != generation && grace)) { 251 (card->bm_generation != generation && grace)) {
@@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work)
246 * next generation. 261 * next generation.
247 */ 262 */
248 263
249 irm_id = card->irm_node->node_id;
250 if (!card->irm_node->link_on) { 264 if (!card->irm_node->link_on) {
251 new_root_id = local_node->node_id; 265 new_root_id = local_id;
252 fw_notify("IRM has link off, making local node (%02x) root.\n", 266 fw_notify("IRM has link off, making local node (%02x) root.\n",
253 new_root_id); 267 new_root_id);
254 goto pick_me; 268 goto pick_me;
255 } 269 }
256 270
257 lock_data[0] = cpu_to_be32(0x3f); 271 lock_data[0] = cpu_to_be32(0x3f);
258 lock_data[1] = cpu_to_be32(local_node->node_id); 272 lock_data[1] = cpu_to_be32(local_id);
259 273
260 spin_unlock_irqrestore(&card->lock, flags); 274 spin_unlock_irqrestore(&card->lock, flags);
261 275
@@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work)
269 goto out; 283 goto out;
270 284
271 if (rcode == RCODE_COMPLETE && 285 if (rcode == RCODE_COMPLETE &&
272 lock_data[0] != cpu_to_be32(0x3f)) 286 lock_data[0] != cpu_to_be32(0x3f)) {
273 /* Somebody else is BM, let them do the work. */ 287
288 /* Somebody else is BM. Only act as IRM. */
289 if (local_id == irm_id)
290 allocate_broadcast_channel(card, generation);
291
274 goto out; 292 goto out;
293 }
275 294
276 spin_lock_irqsave(&card->lock, flags); 295 spin_lock_irqsave(&card->lock, flags);
277 296
@@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work)
282 * do a bus reset and pick the local node as 301 * do a bus reset and pick the local node as
283 * root, and thus, IRM. 302 * root, and thus, IRM.
284 */ 303 */
285 new_root_id = local_node->node_id; 304 new_root_id = local_id;
286 fw_notify("BM lock failed, making local node (%02x) root.\n", 305 fw_notify("BM lock failed, making local node (%02x) root.\n",
287 new_root_id); 306 new_root_id);
288 goto pick_me; 307 goto pick_me;
289 } 308 }
290 } else if (card->bm_generation != generation) { 309 } else if (card->bm_generation != generation) {
291 /* 310 /*
292 * OK, we weren't BM in the last generation, and it's 311 * We weren't BM in the last generation, and the last
293 * less than 100ms since last bus reset. Reschedule 312 * bus reset is less than 125ms ago. Reschedule this job.
294 * this task 100ms from now.
295 */ 313 */
296 spin_unlock_irqrestore(&card->lock, flags); 314 spin_unlock_irqrestore(&card->lock, flags);
297 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10)); 315 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
298 goto out; 316 goto out;
299 } 317 }
300 318
@@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work)
310 * Either link_on is false, or we failed to read the 328 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root. 329 * config rom. In either case, pick another root.
312 */ 330 */
313 new_root_id = local_node->node_id; 331 new_root_id = local_id;
314 } else if (!root_device_is_running) { 332 } else if (!root_device_is_running) {
315 /* 333 /*
316 * If we haven't probed this device yet, bail out now 334 * If we haven't probed this device yet, bail out now
@@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work)
332 * successfully read the config rom, but it's not 350 * successfully read the config rom, but it's not
333 * cycle master capable. 351 * cycle master capable.
334 */ 352 */
335 new_root_id = local_node->node_id; 353 new_root_id = local_id;
336 } 354 }
337 355
338 pick_me: 356 pick_me:
@@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work)
363 card->index, new_root_id, gap_count); 381 card->index, new_root_id, gap_count);
364 fw_send_phy_config(card, new_root_id, generation, gap_count); 382 fw_send_phy_config(card, new_root_id, generation, gap_count);
365 fw_core_initiate_bus_reset(card, 1); 383 fw_core_initiate_bus_reset(card, 1);
384 /* Will allocate broadcast channel after the reset. */
385 } else {
386 if (local_id == irm_id)
387 allocate_broadcast_channel(card, generation);
366 } 388 }
389
367 out: 390 out:
368 fw_node_put(root_node); 391 fw_node_put(root_node);
369 fw_node_put(local_node);
370 out_put_card: 392 out_put_card:
371 fw_card_put(card); 393 fw_card_put(card);
372} 394}
373 395
374static void 396static void flush_timer_callback(unsigned long data)
375flush_timer_callback(unsigned long data)
376{ 397{
377 struct fw_card *card = (struct fw_card *)data; 398 struct fw_card *card = (struct fw_card *)data;
378 399
379 fw_flush_transactions(card); 400 fw_flush_transactions(card);
380} 401}
381 402
382void 403void fw_card_initialize(struct fw_card *card,
383fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 404 const struct fw_card_driver *driver,
384 struct device *device) 405 struct device *device)
385{ 406{
386 static atomic_t index = ATOMIC_INIT(-1); 407 static atomic_t index = ATOMIC_INIT(-1);
387 408
@@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
406} 427}
407EXPORT_SYMBOL(fw_card_initialize); 428EXPORT_SYMBOL(fw_card_initialize);
408 429
409int 430int fw_card_add(struct fw_card *card,
410fw_card_add(struct fw_card *card, 431 u32 max_receive, u32 link_speed, u64 guid)
411 u32 max_receive, u32 link_speed, u64 guid)
412{ 432{
413 u32 *config_rom; 433 u32 *config_rom;
414 size_t length; 434 size_t length;
415 int err; 435 int ret;
416 436
417 card->max_receive = max_receive; 437 card->max_receive = max_receive;
418 card->link_speed = link_speed; 438 card->link_speed = link_speed;
@@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card,
423 list_add_tail(&card->link, &card_list); 443 list_add_tail(&card->link, &card_list);
424 mutex_unlock(&card_mutex); 444 mutex_unlock(&card_mutex);
425 445
426 err = card->driver->enable(card, config_rom, length); 446 ret = card->driver->enable(card, config_rom, length);
427 if (err < 0) { 447 if (ret < 0) {
428 mutex_lock(&card_mutex); 448 mutex_lock(&card_mutex);
429 list_del(&card->link); 449 list_del(&card->link);
430 mutex_unlock(&card_mutex); 450 mutex_unlock(&card_mutex);
431 } 451 }
432 return err; 452
453 return ret;
433} 454}
434EXPORT_SYMBOL(fw_card_add); 455EXPORT_SYMBOL(fw_card_add);
435 456
@@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add);
442 * dummy driver just fails all IO. 463 * dummy driver just fails all IO.
443 */ 464 */
444 465
445static int 466static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
446dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
447{ 467{
448 BUG(); 468 BUG();
449 return -1; 469 return -1;
450} 470}
451 471
452static int 472static int dummy_update_phy_reg(struct fw_card *card, int address,
453dummy_update_phy_reg(struct fw_card *card, int address, 473 int clear_bits, int set_bits)
454 int clear_bits, int set_bits)
455{ 474{
456 return -ENODEV; 475 return -ENODEV;
457} 476}
458 477
459static int 478static int dummy_set_config_rom(struct fw_card *card,
460dummy_set_config_rom(struct fw_card *card, 479 u32 *config_rom, size_t length)
461 u32 *config_rom, size_t length)
462{ 480{
463 /* 481 /*
464 * We take the card out of card_list before setting the dummy 482 * We take the card out of card_list before setting the dummy
@@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card,
468 return -1; 486 return -1;
469} 487}
470 488
471static void 489static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
472dummy_send_request(struct fw_card *card, struct fw_packet *packet)
473{ 490{
474 packet->callback(packet, card, -ENODEV); 491 packet->callback(packet, card, -ENODEV);
475} 492}
476 493
477static void 494static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
478dummy_send_response(struct fw_card *card, struct fw_packet *packet)
479{ 495{
480 packet->callback(packet, card, -ENODEV); 496 packet->callback(packet, card, -ENODEV);
481} 497}
482 498
483static int 499static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
484dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
485{ 500{
486 return -ENOENT; 501 return -ENOENT;
487} 502}
488 503
489static int 504static int dummy_enable_phys_dma(struct fw_card *card,
490dummy_enable_phys_dma(struct fw_card *card, 505 int node_id, int generation)
491 int node_id, int generation)
492{ 506{
493 return -ENODEV; 507 return -ENODEV;
494} 508}
@@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = {
503 .enable_phys_dma = dummy_enable_phys_dma, 517 .enable_phys_dma = dummy_enable_phys_dma,
504}; 518};
505 519
506void 520void fw_card_release(struct kref *kref)
507fw_card_release(struct kref *kref)
508{ 521{
509 struct fw_card *card = container_of(kref, struct fw_card, kref); 522 struct fw_card *card = container_of(kref, struct fw_card, kref);
510 523
511 complete(&card->done); 524 complete(&card->done);
512} 525}
513 526
514void 527void fw_core_remove_card(struct fw_card *card)
515fw_core_remove_card(struct fw_card *card)
516{ 528{
517 card->driver->update_phy_reg(card, 4, 529 card->driver->update_phy_reg(card, 4,
518 PHY_LINK_ACTIVE | PHY_CONTENDER, 0); 530 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card)
536} 548}
537EXPORT_SYMBOL(fw_core_remove_card); 549EXPORT_SYMBOL(fw_core_remove_card);
538 550
539int 551int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
540fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
541{ 552{
542 int reg = short_reset ? 5 : 1; 553 int reg = short_reset ? 5 : 1;
543 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; 554 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index ed03234cbea8..7eb6594cc3e5 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -18,87 +18,162 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/compat.h>
22#include <linux/kernel.h> 22#include <linux/delay.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h> 23#include <linux/device.h>
26#include <linux/vmalloc.h> 24#include <linux/errno.h>
25#include <linux/firewire-cdev.h>
26#include <linux/idr.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/kref.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/mutex.h>
27#include <linux/poll.h> 33#include <linux/poll.h>
28#include <linux/preempt.h> 34#include <linux/preempt.h>
35#include <linux/spinlock.h>
29#include <linux/time.h> 36#include <linux/time.h>
30#include <linux/delay.h> 37#include <linux/vmalloc.h>
31#include <linux/mm.h> 38#include <linux/wait.h>
32#include <linux/idr.h> 39#include <linux/workqueue.h>
33#include <linux/compat.h> 40
34#include <linux/firewire-cdev.h>
35#include <asm/system.h> 41#include <asm/system.h>
36#include <asm/uaccess.h> 42#include <asm/uaccess.h>
37#include "fw-transaction.h" 43
38#include "fw-topology.h"
39#include "fw-device.h" 44#include "fw-device.h"
45#include "fw-topology.h"
46#include "fw-transaction.h"
47
48struct client {
49 u32 version;
50 struct fw_device *device;
51
52 spinlock_t lock;
53 bool in_shutdown;
54 struct idr resource_idr;
55 struct list_head event_list;
56 wait_queue_head_t wait;
57 u64 bus_reset_closure;
58
59 struct fw_iso_context *iso_context;
60 u64 iso_closure;
61 struct fw_iso_buffer buffer;
62 unsigned long vm_start;
40 63
41struct client;
42struct client_resource {
43 struct list_head link; 64 struct list_head link;
44 void (*release)(struct client *client, struct client_resource *r); 65 struct kref kref;
45 u32 handle;
46}; 66};
47 67
68static inline void client_get(struct client *client)
69{
70 kref_get(&client->kref);
71}
72
73static void client_release(struct kref *kref)
74{
75 struct client *client = container_of(kref, struct client, kref);
76
77 fw_device_put(client->device);
78 kfree(client);
79}
80
81static void client_put(struct client *client)
82{
83 kref_put(&client->kref, client_release);
84}
85
86struct client_resource;
87typedef void (*client_resource_release_fn_t)(struct client *,
88 struct client_resource *);
89struct client_resource {
90 client_resource_release_fn_t release;
91 int handle;
92};
93
94struct address_handler_resource {
95 struct client_resource resource;
96 struct fw_address_handler handler;
97 __u64 closure;
98 struct client *client;
99};
100
101struct outbound_transaction_resource {
102 struct client_resource resource;
103 struct fw_transaction transaction;
104};
105
106struct inbound_transaction_resource {
107 struct client_resource resource;
108 struct fw_request *request;
109 void *data;
110 size_t length;
111};
112
113struct descriptor_resource {
114 struct client_resource resource;
115 struct fw_descriptor descriptor;
116 u32 data[0];
117};
118
119struct iso_resource {
120 struct client_resource resource;
121 struct client *client;
122 /* Schedule work and access todo only with client->lock held. */
123 struct delayed_work work;
124 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
125 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
126 int generation;
127 u64 channels;
128 s32 bandwidth;
129 struct iso_resource_event *e_alloc, *e_dealloc;
130};
131
132static void schedule_iso_resource(struct iso_resource *);
133static void release_iso_resource(struct client *, struct client_resource *);
134
48/* 135/*
49 * dequeue_event() just kfree()'s the event, so the event has to be 136 * dequeue_event() just kfree()'s the event, so the event has to be
50 * the first field in the struct. 137 * the first field in a struct XYZ_event.
51 */ 138 */
52
53struct event { 139struct event {
54 struct { void *data; size_t size; } v[2]; 140 struct { void *data; size_t size; } v[2];
55 struct list_head link; 141 struct list_head link;
56}; 142};
57 143
58struct bus_reset { 144struct bus_reset_event {
59 struct event event; 145 struct event event;
60 struct fw_cdev_event_bus_reset reset; 146 struct fw_cdev_event_bus_reset reset;
61}; 147};
62 148
63struct response { 149struct outbound_transaction_event {
64 struct event event; 150 struct event event;
65 struct fw_transaction transaction;
66 struct client *client; 151 struct client *client;
67 struct client_resource resource; 152 struct outbound_transaction_resource r;
68 struct fw_cdev_event_response response; 153 struct fw_cdev_event_response response;
69}; 154};
70 155
71struct iso_interrupt { 156struct inbound_transaction_event {
72 struct event event; 157 struct event event;
73 struct fw_cdev_event_iso_interrupt interrupt; 158 struct fw_cdev_event_request request;
74}; 159};
75 160
76struct client { 161struct iso_interrupt_event {
77 u32 version; 162 struct event event;
78 struct fw_device *device; 163 struct fw_cdev_event_iso_interrupt interrupt;
79 spinlock_t lock; 164};
80 u32 resource_handle;
81 struct list_head resource_list;
82 struct list_head event_list;
83 wait_queue_head_t wait;
84 u64 bus_reset_closure;
85
86 struct fw_iso_context *iso_context;
87 u64 iso_closure;
88 struct fw_iso_buffer buffer;
89 unsigned long vm_start;
90 165
91 struct list_head link; 166struct iso_resource_event {
167 struct event event;
168 struct fw_cdev_event_iso_resource resource;
92}; 169};
93 170
94static inline void __user * 171static inline void __user *u64_to_uptr(__u64 value)
95u64_to_uptr(__u64 value)
96{ 172{
97 return (void __user *)(unsigned long)value; 173 return (void __user *)(unsigned long)value;
98} 174}
99 175
100static inline __u64 176static inline __u64 uptr_to_u64(void __user *ptr)
101uptr_to_u64(void __user *ptr)
102{ 177{
103 return (__u64)(unsigned long)ptr; 178 return (__u64)(unsigned long)ptr;
104} 179}
@@ -107,7 +182,6 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
107{ 182{
108 struct fw_device *device; 183 struct fw_device *device;
109 struct client *client; 184 struct client *client;
110 unsigned long flags;
111 185
112 device = fw_device_get_by_devt(inode->i_rdev); 186 device = fw_device_get_by_devt(inode->i_rdev);
113 if (device == NULL) 187 if (device == NULL)
@@ -125,16 +199,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
125 } 199 }
126 200
127 client->device = device; 201 client->device = device;
128 INIT_LIST_HEAD(&client->event_list);
129 INIT_LIST_HEAD(&client->resource_list);
130 spin_lock_init(&client->lock); 202 spin_lock_init(&client->lock);
203 idr_init(&client->resource_idr);
204 INIT_LIST_HEAD(&client->event_list);
131 init_waitqueue_head(&client->wait); 205 init_waitqueue_head(&client->wait);
206 kref_init(&client->kref);
132 207
133 file->private_data = client; 208 file->private_data = client;
134 209
135 spin_lock_irqsave(&device->card->lock, flags); 210 mutex_lock(&device->client_list_mutex);
136 list_add_tail(&client->link, &device->client_list); 211 list_add_tail(&client->link, &device->client_list);
137 spin_unlock_irqrestore(&device->card->lock, flags); 212 mutex_unlock(&device->client_list_mutex);
138 213
139 return 0; 214 return 0;
140} 215}
@@ -150,68 +225,69 @@ static void queue_event(struct client *client, struct event *event,
150 event->v[1].size = size1; 225 event->v[1].size = size1;
151 226
152 spin_lock_irqsave(&client->lock, flags); 227 spin_lock_irqsave(&client->lock, flags);
153 list_add_tail(&event->link, &client->event_list); 228 if (client->in_shutdown)
229 kfree(event);
230 else
231 list_add_tail(&event->link, &client->event_list);
154 spin_unlock_irqrestore(&client->lock, flags); 232 spin_unlock_irqrestore(&client->lock, flags);
155 233
156 wake_up_interruptible(&client->wait); 234 wake_up_interruptible(&client->wait);
157} 235}
158 236
159static int 237static int dequeue_event(struct client *client,
160dequeue_event(struct client *client, char __user *buffer, size_t count) 238 char __user *buffer, size_t count)
161{ 239{
162 unsigned long flags;
163 struct event *event; 240 struct event *event;
164 size_t size, total; 241 size_t size, total;
165 int i, retval; 242 int i, ret;
166 243
167 retval = wait_event_interruptible(client->wait, 244 ret = wait_event_interruptible(client->wait,
168 !list_empty(&client->event_list) || 245 !list_empty(&client->event_list) ||
169 fw_device_is_shutdown(client->device)); 246 fw_device_is_shutdown(client->device));
170 if (retval < 0) 247 if (ret < 0)
171 return retval; 248 return ret;
172 249
173 if (list_empty(&client->event_list) && 250 if (list_empty(&client->event_list) &&
174 fw_device_is_shutdown(client->device)) 251 fw_device_is_shutdown(client->device))
175 return -ENODEV; 252 return -ENODEV;
176 253
177 spin_lock_irqsave(&client->lock, flags); 254 spin_lock_irq(&client->lock);
178 event = container_of(client->event_list.next, struct event, link); 255 event = list_first_entry(&client->event_list, struct event, link);
179 list_del(&event->link); 256 list_del(&event->link);
180 spin_unlock_irqrestore(&client->lock, flags); 257 spin_unlock_irq(&client->lock);
181 258
182 total = 0; 259 total = 0;
183 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { 260 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
184 size = min(event->v[i].size, count - total); 261 size = min(event->v[i].size, count - total);
185 if (copy_to_user(buffer + total, event->v[i].data, size)) { 262 if (copy_to_user(buffer + total, event->v[i].data, size)) {
186 retval = -EFAULT; 263 ret = -EFAULT;
187 goto out; 264 goto out;
188 } 265 }
189 total += size; 266 total += size;
190 } 267 }
191 retval = total; 268 ret = total;
192 269
193 out: 270 out:
194 kfree(event); 271 kfree(event);
195 272
196 return retval; 273 return ret;
197} 274}
198 275
199static ssize_t 276static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
200fw_device_op_read(struct file *file, 277 size_t count, loff_t *offset)
201 char __user *buffer, size_t count, loff_t *offset)
202{ 278{
203 struct client *client = file->private_data; 279 struct client *client = file->private_data;
204 280
205 return dequeue_event(client, buffer, count); 281 return dequeue_event(client, buffer, count);
206} 282}
207 283
208/* caller must hold card->lock so that node pointers can be dereferenced here */ 284static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
209static void 285 struct client *client)
210fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
211 struct client *client)
212{ 286{
213 struct fw_card *card = client->device->card; 287 struct fw_card *card = client->device->card;
214 288
289 spin_lock_irq(&card->lock);
290
215 event->closure = client->bus_reset_closure; 291 event->closure = client->bus_reset_closure;
216 event->type = FW_CDEV_EVENT_BUS_RESET; 292 event->type = FW_CDEV_EVENT_BUS_RESET;
217 event->generation = client->device->generation; 293 event->generation = client->device->generation;
@@ -220,39 +296,49 @@ fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
220 event->bm_node_id = 0; /* FIXME: We don't track the BM. */ 296 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
221 event->irm_node_id = card->irm_node->node_id; 297 event->irm_node_id = card->irm_node->node_id;
222 event->root_node_id = card->root_node->node_id; 298 event->root_node_id = card->root_node->node_id;
299
300 spin_unlock_irq(&card->lock);
223} 301}
224 302
225static void 303static void for_each_client(struct fw_device *device,
226for_each_client(struct fw_device *device, 304 void (*callback)(struct client *client))
227 void (*callback)(struct client *client))
228{ 305{
229 struct fw_card *card = device->card;
230 struct client *c; 306 struct client *c;
231 unsigned long flags;
232
233 spin_lock_irqsave(&card->lock, flags);
234 307
308 mutex_lock(&device->client_list_mutex);
235 list_for_each_entry(c, &device->client_list, link) 309 list_for_each_entry(c, &device->client_list, link)
236 callback(c); 310 callback(c);
311 mutex_unlock(&device->client_list_mutex);
312}
313
314static int schedule_reallocations(int id, void *p, void *data)
315{
316 struct client_resource *r = p;
237 317
238 spin_unlock_irqrestore(&card->lock, flags); 318 if (r->release == release_iso_resource)
319 schedule_iso_resource(container_of(r,
320 struct iso_resource, resource));
321 return 0;
239} 322}
240 323
241static void 324static void queue_bus_reset_event(struct client *client)
242queue_bus_reset_event(struct client *client)
243{ 325{
244 struct bus_reset *bus_reset; 326 struct bus_reset_event *e;
245 327
246 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC); 328 e = kzalloc(sizeof(*e), GFP_KERNEL);
247 if (bus_reset == NULL) { 329 if (e == NULL) {
248 fw_notify("Out of memory when allocating bus reset event\n"); 330 fw_notify("Out of memory when allocating bus reset event\n");
249 return; 331 return;
250 } 332 }
251 333
252 fill_bus_reset_event(&bus_reset->reset, client); 334 fill_bus_reset_event(&e->reset, client);
335
336 queue_event(client, &e->event,
337 &e->reset, sizeof(e->reset), NULL, 0);
253 338
254 queue_event(client, &bus_reset->event, 339 spin_lock_irq(&client->lock);
255 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0); 340 idr_for_each(&client->resource_idr, schedule_reallocations, client);
341 spin_unlock_irq(&client->lock);
256} 342}
257 343
258void fw_device_cdev_update(struct fw_device *device) 344void fw_device_cdev_update(struct fw_device *device)
@@ -274,11 +360,11 @@ static int ioctl_get_info(struct client *client, void *buffer)
274{ 360{
275 struct fw_cdev_get_info *get_info = buffer; 361 struct fw_cdev_get_info *get_info = buffer;
276 struct fw_cdev_event_bus_reset bus_reset; 362 struct fw_cdev_event_bus_reset bus_reset;
277 struct fw_card *card = client->device->card;
278 unsigned long ret = 0; 363 unsigned long ret = 0;
279 364
280 client->version = get_info->version; 365 client->version = get_info->version;
281 get_info->version = FW_CDEV_VERSION; 366 get_info->version = FW_CDEV_VERSION;
367 get_info->card = client->device->card->index;
282 368
283 down_read(&fw_device_rwsem); 369 down_read(&fw_device_rwsem);
284 370
@@ -300,49 +386,61 @@ static int ioctl_get_info(struct client *client, void *buffer)
300 client->bus_reset_closure = get_info->bus_reset_closure; 386 client->bus_reset_closure = get_info->bus_reset_closure;
301 if (get_info->bus_reset != 0) { 387 if (get_info->bus_reset != 0) {
302 void __user *uptr = u64_to_uptr(get_info->bus_reset); 388 void __user *uptr = u64_to_uptr(get_info->bus_reset);
303 unsigned long flags;
304 389
305 spin_lock_irqsave(&card->lock, flags);
306 fill_bus_reset_event(&bus_reset, client); 390 fill_bus_reset_event(&bus_reset, client);
307 spin_unlock_irqrestore(&card->lock, flags);
308
309 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) 391 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
310 return -EFAULT; 392 return -EFAULT;
311 } 393 }
312 394
313 get_info->card = card->index;
314
315 return 0; 395 return 0;
316} 396}
317 397
318static void 398static int add_client_resource(struct client *client,
319add_client_resource(struct client *client, struct client_resource *resource) 399 struct client_resource *resource, gfp_t gfp_mask)
320{ 400{
321 unsigned long flags; 401 unsigned long flags;
402 int ret;
403
404 retry:
405 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
406 return -ENOMEM;
322 407
323 spin_lock_irqsave(&client->lock, flags); 408 spin_lock_irqsave(&client->lock, flags);
324 list_add_tail(&resource->link, &client->resource_list); 409 if (client->in_shutdown)
325 resource->handle = client->resource_handle++; 410 ret = -ECANCELED;
411 else
412 ret = idr_get_new(&client->resource_idr, resource,
413 &resource->handle);
414 if (ret >= 0) {
415 client_get(client);
416 if (resource->release == release_iso_resource)
417 schedule_iso_resource(container_of(resource,
418 struct iso_resource, resource));
419 }
326 spin_unlock_irqrestore(&client->lock, flags); 420 spin_unlock_irqrestore(&client->lock, flags);
421
422 if (ret == -EAGAIN)
423 goto retry;
424
425 return ret < 0 ? ret : 0;
327} 426}
328 427
329static int 428static int release_client_resource(struct client *client, u32 handle,
330release_client_resource(struct client *client, u32 handle, 429 client_resource_release_fn_t release,
331 struct client_resource **resource) 430 struct client_resource **resource)
332{ 431{
333 struct client_resource *r; 432 struct client_resource *r;
334 unsigned long flags;
335 433
336 spin_lock_irqsave(&client->lock, flags); 434 spin_lock_irq(&client->lock);
337 list_for_each_entry(r, &client->resource_list, link) { 435 if (client->in_shutdown)
338 if (r->handle == handle) { 436 r = NULL;
339 list_del(&r->link); 437 else
340 break; 438 r = idr_find(&client->resource_idr, handle);
341 } 439 if (r && r->release == release)
342 } 440 idr_remove(&client->resource_idr, handle);
343 spin_unlock_irqrestore(&client->lock, flags); 441 spin_unlock_irq(&client->lock);
344 442
345 if (&r->link == &client->resource_list) 443 if (!(r && r->release == release))
346 return -EINVAL; 444 return -EINVAL;
347 445
348 if (resource) 446 if (resource)
@@ -350,203 +448,239 @@ release_client_resource(struct client *client, u32 handle,
350 else 448 else
351 r->release(client, r); 449 r->release(client, r);
352 450
451 client_put(client);
452
353 return 0; 453 return 0;
354} 454}
355 455
356static void 456static void release_transaction(struct client *client,
357release_transaction(struct client *client, struct client_resource *resource) 457 struct client_resource *resource)
358{ 458{
359 struct response *response = 459 struct outbound_transaction_resource *r = container_of(resource,
360 container_of(resource, struct response, resource); 460 struct outbound_transaction_resource, resource);
361 461
362 fw_cancel_transaction(client->device->card, &response->transaction); 462 fw_cancel_transaction(client->device->card, &r->transaction);
363} 463}
364 464
365static void 465static void complete_transaction(struct fw_card *card, int rcode,
366complete_transaction(struct fw_card *card, int rcode, 466 void *payload, size_t length, void *data)
367 void *payload, size_t length, void *data)
368{ 467{
369 struct response *response = data; 468 struct outbound_transaction_event *e = data;
370 struct client *client = response->client; 469 struct fw_cdev_event_response *rsp = &e->response;
470 struct client *client = e->client;
371 unsigned long flags; 471 unsigned long flags;
372 struct fw_cdev_event_response *r = &response->response;
373 472
374 if (length < r->length) 473 if (length < rsp->length)
375 r->length = length; 474 rsp->length = length;
376 if (rcode == RCODE_COMPLETE) 475 if (rcode == RCODE_COMPLETE)
377 memcpy(r->data, payload, r->length); 476 memcpy(rsp->data, payload, rsp->length);
378 477
379 spin_lock_irqsave(&client->lock, flags); 478 spin_lock_irqsave(&client->lock, flags);
380 list_del(&response->resource.link); 479 /*
480 * 1. If called while in shutdown, the idr tree must be left untouched.
481 * The idr handle will be removed and the client reference will be
482 * dropped later.
483 * 2. If the call chain was release_client_resource ->
484 * release_transaction -> complete_transaction (instead of a normal
485 * conclusion of the transaction), i.e. if this resource was already
486 * unregistered from the idr, the client reference will be dropped
487 * by release_client_resource and we must not drop it here.
488 */
489 if (!client->in_shutdown &&
490 idr_find(&client->resource_idr, e->r.resource.handle)) {
491 idr_remove(&client->resource_idr, e->r.resource.handle);
492 /* Drop the idr's reference */
493 client_put(client);
494 }
381 spin_unlock_irqrestore(&client->lock, flags); 495 spin_unlock_irqrestore(&client->lock, flags);
382 496
383 r->type = FW_CDEV_EVENT_RESPONSE; 497 rsp->type = FW_CDEV_EVENT_RESPONSE;
384 r->rcode = rcode; 498 rsp->rcode = rcode;
385 499
386 /* 500 /*
387 * In the case that sizeof(*r) doesn't align with the position of the 501 * In the case that sizeof(*rsp) doesn't align with the position of the
388 * data, and the read is short, preserve an extra copy of the data 502 * data, and the read is short, preserve an extra copy of the data
389 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless 503 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
390 * for short reads and some apps depended on it, this is both safe 504 * for short reads and some apps depended on it, this is both safe
391 * and prudent for compatibility. 505 * and prudent for compatibility.
392 */ 506 */
393 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data)) 507 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
394 queue_event(client, &response->event, r, sizeof(*r), 508 queue_event(client, &e->event, rsp, sizeof(*rsp),
395 r->data, r->length); 509 rsp->data, rsp->length);
396 else 510 else
397 queue_event(client, &response->event, r, sizeof(*r) + r->length, 511 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
398 NULL, 0); 512 NULL, 0);
513
514 /* Drop the transaction callback's reference */
515 client_put(client);
399} 516}
400 517
401static int ioctl_send_request(struct client *client, void *buffer) 518static int init_request(struct client *client,
519 struct fw_cdev_send_request *request,
520 int destination_id, int speed)
402{ 521{
403 struct fw_device *device = client->device; 522 struct outbound_transaction_event *e;
404 struct fw_cdev_send_request *request = buffer; 523 int ret;
405 struct response *response;
406 524
407 /* What is the biggest size we'll accept, really? */ 525 if (request->tcode != TCODE_STREAM_DATA &&
408 if (request->length > 4096) 526 (request->length > 4096 || request->length > 512 << speed))
409 return -EINVAL; 527 return -EIO;
410 528
411 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL); 529 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
412 if (response == NULL) 530 if (e == NULL)
413 return -ENOMEM; 531 return -ENOMEM;
414 532
415 response->client = client; 533 e->client = client;
416 response->response.length = request->length; 534 e->response.length = request->length;
417 response->response.closure = request->closure; 535 e->response.closure = request->closure;
418 536
419 if (request->data && 537 if (request->data &&
420 copy_from_user(response->response.data, 538 copy_from_user(e->response.data,
421 u64_to_uptr(request->data), request->length)) { 539 u64_to_uptr(request->data), request->length)) {
422 kfree(response); 540 ret = -EFAULT;
423 return -EFAULT; 541 goto failed;
424 } 542 }
425 543
426 response->resource.release = release_transaction; 544 e->r.resource.release = release_transaction;
427 add_client_resource(client, &response->resource); 545 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
546 if (ret < 0)
547 goto failed;
428 548
429 fw_send_request(device->card, &response->transaction, 549 /* Get a reference for the transaction callback */
430 request->tcode & 0x1f, 550 client_get(client);
431 device->node->node_id,
432 request->generation,
433 device->max_speed,
434 request->offset,
435 response->response.data, request->length,
436 complete_transaction, response);
437 551
438 if (request->data) 552 fw_send_request(client->device->card, &e->r.transaction,
439 return sizeof(request) + request->length; 553 request->tcode, destination_id, request->generation,
440 else 554 speed, request->offset, e->response.data,
441 return sizeof(request); 555 request->length, complete_transaction, e);
556 return 0;
557
558 failed:
559 kfree(e);
560
561 return ret;
442} 562}
443 563
444struct address_handler { 564static int ioctl_send_request(struct client *client, void *buffer)
445 struct fw_address_handler handler; 565{
446 __u64 closure; 566 struct fw_cdev_send_request *request = buffer;
447 struct client *client;
448 struct client_resource resource;
449};
450 567
451struct request { 568 switch (request->tcode) {
452 struct fw_request *request; 569 case TCODE_WRITE_QUADLET_REQUEST:
453 void *data; 570 case TCODE_WRITE_BLOCK_REQUEST:
454 size_t length; 571 case TCODE_READ_QUADLET_REQUEST:
455 struct client_resource resource; 572 case TCODE_READ_BLOCK_REQUEST:
456}; 573 case TCODE_LOCK_MASK_SWAP:
574 case TCODE_LOCK_COMPARE_SWAP:
575 case TCODE_LOCK_FETCH_ADD:
576 case TCODE_LOCK_LITTLE_ADD:
577 case TCODE_LOCK_BOUNDED_ADD:
578 case TCODE_LOCK_WRAP_ADD:
579 case TCODE_LOCK_VENDOR_DEPENDENT:
580 break;
581 default:
582 return -EINVAL;
583 }
457 584
458struct request_event { 585 return init_request(client, request, client->device->node_id,
459 struct event event; 586 client->device->max_speed);
460 struct fw_cdev_event_request request; 587}
461};
462 588
463static void 589static void release_request(struct client *client,
464release_request(struct client *client, struct client_resource *resource) 590 struct client_resource *resource)
465{ 591{
466 struct request *request = 592 struct inbound_transaction_resource *r = container_of(resource,
467 container_of(resource, struct request, resource); 593 struct inbound_transaction_resource, resource);
468 594
469 fw_send_response(client->device->card, request->request, 595 fw_send_response(client->device->card, r->request,
470 RCODE_CONFLICT_ERROR); 596 RCODE_CONFLICT_ERROR);
471 kfree(request); 597 kfree(r);
472} 598}
473 599
474static void 600static void handle_request(struct fw_card *card, struct fw_request *request,
475handle_request(struct fw_card *card, struct fw_request *r, 601 int tcode, int destination, int source,
476 int tcode, int destination, int source, 602 int generation, int speed,
477 int generation, int speed, 603 unsigned long long offset,
478 unsigned long long offset, 604 void *payload, size_t length, void *callback_data)
479 void *payload, size_t length, void *callback_data)
480{ 605{
481 struct address_handler *handler = callback_data; 606 struct address_handler_resource *handler = callback_data;
482 struct request *request; 607 struct inbound_transaction_resource *r;
483 struct request_event *e; 608 struct inbound_transaction_event *e;
484 struct client *client = handler->client; 609 int ret;
485 610
486 request = kmalloc(sizeof(*request), GFP_ATOMIC); 611 r = kmalloc(sizeof(*r), GFP_ATOMIC);
487 e = kmalloc(sizeof(*e), GFP_ATOMIC); 612 e = kmalloc(sizeof(*e), GFP_ATOMIC);
488 if (request == NULL || e == NULL) { 613 if (r == NULL || e == NULL)
489 kfree(request); 614 goto failed;
490 kfree(e);
491 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
492 return;
493 }
494 615
495 request->request = r; 616 r->request = request;
496 request->data = payload; 617 r->data = payload;
497 request->length = length; 618 r->length = length;
498 619
499 request->resource.release = release_request; 620 r->resource.release = release_request;
500 add_client_resource(client, &request->resource); 621 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
622 if (ret < 0)
623 goto failed;
501 624
502 e->request.type = FW_CDEV_EVENT_REQUEST; 625 e->request.type = FW_CDEV_EVENT_REQUEST;
503 e->request.tcode = tcode; 626 e->request.tcode = tcode;
504 e->request.offset = offset; 627 e->request.offset = offset;
505 e->request.length = length; 628 e->request.length = length;
506 e->request.handle = request->resource.handle; 629 e->request.handle = r->resource.handle;
507 e->request.closure = handler->closure; 630 e->request.closure = handler->closure;
508 631
509 queue_event(client, &e->event, 632 queue_event(handler->client, &e->event,
510 &e->request, sizeof(e->request), payload, length); 633 &e->request, sizeof(e->request), payload, length);
634 return;
635
636 failed:
637 kfree(r);
638 kfree(e);
639 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
511} 640}
512 641
513static void 642static void release_address_handler(struct client *client,
514release_address_handler(struct client *client, 643 struct client_resource *resource)
515 struct client_resource *resource)
516{ 644{
517 struct address_handler *handler = 645 struct address_handler_resource *r =
518 container_of(resource, struct address_handler, resource); 646 container_of(resource, struct address_handler_resource, resource);
519 647
520 fw_core_remove_address_handler(&handler->handler); 648 fw_core_remove_address_handler(&r->handler);
521 kfree(handler); 649 kfree(r);
522} 650}
523 651
524static int ioctl_allocate(struct client *client, void *buffer) 652static int ioctl_allocate(struct client *client, void *buffer)
525{ 653{
526 struct fw_cdev_allocate *request = buffer; 654 struct fw_cdev_allocate *request = buffer;
527 struct address_handler *handler; 655 struct address_handler_resource *r;
528 struct fw_address_region region; 656 struct fw_address_region region;
657 int ret;
529 658
530 handler = kmalloc(sizeof(*handler), GFP_KERNEL); 659 r = kmalloc(sizeof(*r), GFP_KERNEL);
531 if (handler == NULL) 660 if (r == NULL)
532 return -ENOMEM; 661 return -ENOMEM;
533 662
534 region.start = request->offset; 663 region.start = request->offset;
535 region.end = request->offset + request->length; 664 region.end = request->offset + request->length;
536 handler->handler.length = request->length; 665 r->handler.length = request->length;
537 handler->handler.address_callback = handle_request; 666 r->handler.address_callback = handle_request;
538 handler->handler.callback_data = handler; 667 r->handler.callback_data = r;
539 handler->closure = request->closure; 668 r->closure = request->closure;
540 handler->client = client; 669 r->client = client;
541 670
542 if (fw_core_add_address_handler(&handler->handler, &region) < 0) { 671 ret = fw_core_add_address_handler(&r->handler, &region);
543 kfree(handler); 672 if (ret < 0) {
544 return -EBUSY; 673 kfree(r);
674 return ret;
545 } 675 }
546 676
547 handler->resource.release = release_address_handler; 677 r->resource.release = release_address_handler;
548 add_client_resource(client, &handler->resource); 678 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
549 request->handle = handler->resource.handle; 679 if (ret < 0) {
680 release_address_handler(client, &r->resource);
681 return ret;
682 }
683 request->handle = r->resource.handle;
550 684
551 return 0; 685 return 0;
552} 686}
@@ -555,18 +689,22 @@ static int ioctl_deallocate(struct client *client, void *buffer)
555{ 689{
556 struct fw_cdev_deallocate *request = buffer; 690 struct fw_cdev_deallocate *request = buffer;
557 691
558 return release_client_resource(client, request->handle, NULL); 692 return release_client_resource(client, request->handle,
693 release_address_handler, NULL);
559} 694}
560 695
561static int ioctl_send_response(struct client *client, void *buffer) 696static int ioctl_send_response(struct client *client, void *buffer)
562{ 697{
563 struct fw_cdev_send_response *request = buffer; 698 struct fw_cdev_send_response *request = buffer;
564 struct client_resource *resource; 699 struct client_resource *resource;
565 struct request *r; 700 struct inbound_transaction_resource *r;
566 701
567 if (release_client_resource(client, request->handle, &resource) < 0) 702 if (release_client_resource(client, request->handle,
703 release_request, &resource) < 0)
568 return -EINVAL; 704 return -EINVAL;
569 r = container_of(resource, struct request, resource); 705
706 r = container_of(resource, struct inbound_transaction_resource,
707 resource);
570 if (request->length < r->length) 708 if (request->length < r->length)
571 r->length = request->length; 709 r->length = request->length;
572 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) 710 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
@@ -588,85 +726,92 @@ static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
588 return fw_core_initiate_bus_reset(client->device->card, short_reset); 726 return fw_core_initiate_bus_reset(client->device->card, short_reset);
589} 727}
590 728
591struct descriptor {
592 struct fw_descriptor d;
593 struct client_resource resource;
594 u32 data[0];
595};
596
597static void release_descriptor(struct client *client, 729static void release_descriptor(struct client *client,
598 struct client_resource *resource) 730 struct client_resource *resource)
599{ 731{
600 struct descriptor *descriptor = 732 struct descriptor_resource *r =
601 container_of(resource, struct descriptor, resource); 733 container_of(resource, struct descriptor_resource, resource);
602 734
603 fw_core_remove_descriptor(&descriptor->d); 735 fw_core_remove_descriptor(&r->descriptor);
604 kfree(descriptor); 736 kfree(r);
605} 737}
606 738
607static int ioctl_add_descriptor(struct client *client, void *buffer) 739static int ioctl_add_descriptor(struct client *client, void *buffer)
608{ 740{
609 struct fw_cdev_add_descriptor *request = buffer; 741 struct fw_cdev_add_descriptor *request = buffer;
610 struct descriptor *descriptor; 742 struct fw_card *card = client->device->card;
611 int retval; 743 struct descriptor_resource *r;
744 int ret;
745
746 /* Access policy: Allow this ioctl only on local nodes' device files. */
747 spin_lock_irq(&card->lock);
748 ret = client->device->node_id != card->local_node->node_id;
749 spin_unlock_irq(&card->lock);
750 if (ret)
751 return -ENOSYS;
612 752
613 if (request->length > 256) 753 if (request->length > 256)
614 return -EINVAL; 754 return -EINVAL;
615 755
616 descriptor = 756 r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
617 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL); 757 if (r == NULL)
618 if (descriptor == NULL)
619 return -ENOMEM; 758 return -ENOMEM;
620 759
621 if (copy_from_user(descriptor->data, 760 if (copy_from_user(r->data,
622 u64_to_uptr(request->data), request->length * 4)) { 761 u64_to_uptr(request->data), request->length * 4)) {
623 kfree(descriptor); 762 ret = -EFAULT;
624 return -EFAULT; 763 goto failed;
625 } 764 }
626 765
627 descriptor->d.length = request->length; 766 r->descriptor.length = request->length;
628 descriptor->d.immediate = request->immediate; 767 r->descriptor.immediate = request->immediate;
629 descriptor->d.key = request->key; 768 r->descriptor.key = request->key;
630 descriptor->d.data = descriptor->data; 769 r->descriptor.data = r->data;
631 770
632 retval = fw_core_add_descriptor(&descriptor->d); 771 ret = fw_core_add_descriptor(&r->descriptor);
633 if (retval < 0) { 772 if (ret < 0)
634 kfree(descriptor); 773 goto failed;
635 return retval;
636 }
637 774
638 descriptor->resource.release = release_descriptor; 775 r->resource.release = release_descriptor;
639 add_client_resource(client, &descriptor->resource); 776 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
640 request->handle = descriptor->resource.handle; 777 if (ret < 0) {
778 fw_core_remove_descriptor(&r->descriptor);
779 goto failed;
780 }
781 request->handle = r->resource.handle;
641 782
642 return 0; 783 return 0;
784 failed:
785 kfree(r);
786
787 return ret;
643} 788}
644 789
645static int ioctl_remove_descriptor(struct client *client, void *buffer) 790static int ioctl_remove_descriptor(struct client *client, void *buffer)
646{ 791{
647 struct fw_cdev_remove_descriptor *request = buffer; 792 struct fw_cdev_remove_descriptor *request = buffer;
648 793
649 return release_client_resource(client, request->handle, NULL); 794 return release_client_resource(client, request->handle,
795 release_descriptor, NULL);
650} 796}
651 797
652static void 798static void iso_callback(struct fw_iso_context *context, u32 cycle,
653iso_callback(struct fw_iso_context *context, u32 cycle, 799 size_t header_length, void *header, void *data)
654 size_t header_length, void *header, void *data)
655{ 800{
656 struct client *client = data; 801 struct client *client = data;
657 struct iso_interrupt *irq; 802 struct iso_interrupt_event *e;
658 803
659 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC); 804 e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
660 if (irq == NULL) 805 if (e == NULL)
661 return; 806 return;
662 807
663 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; 808 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
664 irq->interrupt.closure = client->iso_closure; 809 e->interrupt.closure = client->iso_closure;
665 irq->interrupt.cycle = cycle; 810 e->interrupt.cycle = cycle;
666 irq->interrupt.header_length = header_length; 811 e->interrupt.header_length = header_length;
667 memcpy(irq->interrupt.header, header, header_length); 812 memcpy(e->interrupt.header, header, header_length);
668 queue_event(client, &irq->event, &irq->interrupt, 813 queue_event(client, &e->event, &e->interrupt,
669 sizeof(irq->interrupt) + header_length, NULL, 0); 814 sizeof(e->interrupt) + header_length, NULL, 0);
670} 815}
671 816
672static int ioctl_create_iso_context(struct client *client, void *buffer) 817static int ioctl_create_iso_context(struct client *client, void *buffer)
@@ -871,6 +1016,261 @@ static int ioctl_get_cycle_timer(struct client *client, void *buffer)
871 return 0; 1016 return 0;
872} 1017}
873 1018
1019static void iso_resource_work(struct work_struct *work)
1020{
1021 struct iso_resource_event *e;
1022 struct iso_resource *r =
1023 container_of(work, struct iso_resource, work.work);
1024 struct client *client = r->client;
1025 int generation, channel, bandwidth, todo;
1026 bool skip, free, success;
1027
1028 spin_lock_irq(&client->lock);
1029 generation = client->device->generation;
1030 todo = r->todo;
1031 /* Allow 1000ms grace period for other reallocations. */
1032 if (todo == ISO_RES_ALLOC &&
1033 time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1034 if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
1035 client_get(client);
1036 skip = true;
1037 } else {
1038 /* We could be called twice within the same generation. */
1039 skip = todo == ISO_RES_REALLOC &&
1040 r->generation == generation;
1041 }
1042 free = todo == ISO_RES_DEALLOC ||
1043 todo == ISO_RES_ALLOC_ONCE ||
1044 todo == ISO_RES_DEALLOC_ONCE;
1045 r->generation = generation;
1046 spin_unlock_irq(&client->lock);
1047
1048 if (skip)
1049 goto out;
1050
1051 bandwidth = r->bandwidth;
1052
1053 fw_iso_resource_manage(client->device->card, generation,
1054 r->channels, &channel, &bandwidth,
1055 todo == ISO_RES_ALLOC ||
1056 todo == ISO_RES_REALLOC ||
1057 todo == ISO_RES_ALLOC_ONCE);
1058 /*
1059 * Is this generation outdated already? As long as this resource sticks
1060 * in the idr, it will be scheduled again for a newer generation or at
1061 * shutdown.
1062 */
1063 if (channel == -EAGAIN &&
1064 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1065 goto out;
1066
1067 success = channel >= 0 || bandwidth > 0;
1068
1069 spin_lock_irq(&client->lock);
1070 /*
1071 * Transit from allocation to reallocation, except if the client
1072 * requested deallocation in the meantime.
1073 */
1074 if (r->todo == ISO_RES_ALLOC)
1075 r->todo = ISO_RES_REALLOC;
1076 /*
1077 * Allocation or reallocation failure? Pull this resource out of the
1078 * idr and prepare for deletion, unless the client is shutting down.
1079 */
1080 if (r->todo == ISO_RES_REALLOC && !success &&
1081 !client->in_shutdown &&
1082 idr_find(&client->resource_idr, r->resource.handle)) {
1083 idr_remove(&client->resource_idr, r->resource.handle);
1084 client_put(client);
1085 free = true;
1086 }
1087 spin_unlock_irq(&client->lock);
1088
1089 if (todo == ISO_RES_ALLOC && channel >= 0)
1090 r->channels = 1ULL << channel;
1091
1092 if (todo == ISO_RES_REALLOC && success)
1093 goto out;
1094
1095 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1096 e = r->e_alloc;
1097 r->e_alloc = NULL;
1098 } else {
1099 e = r->e_dealloc;
1100 r->e_dealloc = NULL;
1101 }
1102 e->resource.handle = r->resource.handle;
1103 e->resource.channel = channel;
1104 e->resource.bandwidth = bandwidth;
1105
1106 queue_event(client, &e->event,
1107 &e->resource, sizeof(e->resource), NULL, 0);
1108
1109 if (free) {
1110 cancel_delayed_work(&r->work);
1111 kfree(r->e_alloc);
1112 kfree(r->e_dealloc);
1113 kfree(r);
1114 }
1115 out:
1116 client_put(client);
1117}
1118
1119static void schedule_iso_resource(struct iso_resource *r)
1120{
1121 client_get(r->client);
1122 if (!schedule_delayed_work(&r->work, 0))
1123 client_put(r->client);
1124}
1125
1126static void release_iso_resource(struct client *client,
1127 struct client_resource *resource)
1128{
1129 struct iso_resource *r =
1130 container_of(resource, struct iso_resource, resource);
1131
1132 spin_lock_irq(&client->lock);
1133 r->todo = ISO_RES_DEALLOC;
1134 schedule_iso_resource(r);
1135 spin_unlock_irq(&client->lock);
1136}
1137
1138static int init_iso_resource(struct client *client,
1139 struct fw_cdev_allocate_iso_resource *request, int todo)
1140{
1141 struct iso_resource_event *e1, *e2;
1142 struct iso_resource *r;
1143 int ret;
1144
1145 if ((request->channels == 0 && request->bandwidth == 0) ||
1146 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1147 request->bandwidth < 0)
1148 return -EINVAL;
1149
1150 r = kmalloc(sizeof(*r), GFP_KERNEL);
1151 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1152 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1153 if (r == NULL || e1 == NULL || e2 == NULL) {
1154 ret = -ENOMEM;
1155 goto fail;
1156 }
1157
1158 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1159 r->client = client;
1160 r->todo = todo;
1161 r->generation = -1;
1162 r->channels = request->channels;
1163 r->bandwidth = request->bandwidth;
1164 r->e_alloc = e1;
1165 r->e_dealloc = e2;
1166
1167 e1->resource.closure = request->closure;
1168 e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1169 e2->resource.closure = request->closure;
1170 e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1171
1172 if (todo == ISO_RES_ALLOC) {
1173 r->resource.release = release_iso_resource;
1174 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1175 if (ret < 0)
1176 goto fail;
1177 } else {
1178 r->resource.release = NULL;
1179 r->resource.handle = -1;
1180 schedule_iso_resource(r);
1181 }
1182 request->handle = r->resource.handle;
1183
1184 return 0;
1185 fail:
1186 kfree(r);
1187 kfree(e1);
1188 kfree(e2);
1189
1190 return ret;
1191}
1192
1193static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
1194{
1195 struct fw_cdev_allocate_iso_resource *request = buffer;
1196
1197 return init_iso_resource(client, request, ISO_RES_ALLOC);
1198}
1199
1200static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
1201{
1202 struct fw_cdev_deallocate *request = buffer;
1203
1204 return release_client_resource(client, request->handle,
1205 release_iso_resource, NULL);
1206}
1207
1208static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
1209{
1210 struct fw_cdev_allocate_iso_resource *request = buffer;
1211
1212 return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1213}
1214
1215static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
1216{
1217 struct fw_cdev_allocate_iso_resource *request = buffer;
1218
1219 return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1220}
1221
1222/*
1223 * Returns a speed code: Maximum speed to or from this device,
1224 * limited by the device's link speed, the local node's link speed,
1225 * and all PHY port speeds between the two links.
1226 */
1227static int ioctl_get_speed(struct client *client, void *buffer)
1228{
1229 return client->device->max_speed;
1230}
1231
1232static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1233{
1234 struct fw_cdev_send_request *request = buffer;
1235
1236 switch (request->tcode) {
1237 case TCODE_WRITE_QUADLET_REQUEST:
1238 case TCODE_WRITE_BLOCK_REQUEST:
1239 break;
1240 default:
1241 return -EINVAL;
1242 }
1243
1244 /* Security policy: Only allow accesses to Units Space. */
1245 if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1246 return -EACCES;
1247
1248 return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
1249}
1250
1251static int ioctl_send_stream_packet(struct client *client, void *buffer)
1252{
1253 struct fw_cdev_send_stream_packet *p = buffer;
1254 struct fw_cdev_send_request request;
1255 int dest;
1256
1257 if (p->speed > client->device->card->link_speed ||
1258 p->length > 1024 << p->speed)
1259 return -EIO;
1260
1261 if (p->tag > 3 || p->channel > 63 || p->sy > 15)
1262 return -EINVAL;
1263
1264 dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
1265 request.tcode = TCODE_STREAM_DATA;
1266 request.length = p->length;
1267 request.closure = p->closure;
1268 request.data = p->data;
1269 request.generation = p->generation;
1270
1271 return init_request(client, &request, dest, p->speed);
1272}
1273
874static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { 1274static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
875 ioctl_get_info, 1275 ioctl_get_info,
876 ioctl_send_request, 1276 ioctl_send_request,
@@ -885,13 +1285,20 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
885 ioctl_start_iso, 1285 ioctl_start_iso,
886 ioctl_stop_iso, 1286 ioctl_stop_iso,
887 ioctl_get_cycle_timer, 1287 ioctl_get_cycle_timer,
1288 ioctl_allocate_iso_resource,
1289 ioctl_deallocate_iso_resource,
1290 ioctl_allocate_iso_resource_once,
1291 ioctl_deallocate_iso_resource_once,
1292 ioctl_get_speed,
1293 ioctl_send_broadcast_request,
1294 ioctl_send_stream_packet,
888}; 1295};
889 1296
890static int 1297static int dispatch_ioctl(struct client *client,
891dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) 1298 unsigned int cmd, void __user *arg)
892{ 1299{
893 char buffer[256]; 1300 char buffer[256];
894 int retval; 1301 int ret;
895 1302
896 if (_IOC_TYPE(cmd) != '#' || 1303 if (_IOC_TYPE(cmd) != '#' ||
897 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) 1304 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
@@ -903,9 +1310,9 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
903 return -EFAULT; 1310 return -EFAULT;
904 } 1311 }
905 1312
906 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer); 1313 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
907 if (retval < 0) 1314 if (ret < 0)
908 return retval; 1315 return ret;
909 1316
910 if (_IOC_DIR(cmd) & _IOC_READ) { 1317 if (_IOC_DIR(cmd) & _IOC_READ) {
911 if (_IOC_SIZE(cmd) > sizeof(buffer) || 1318 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
@@ -913,12 +1320,11 @@ dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
913 return -EFAULT; 1320 return -EFAULT;
914 } 1321 }
915 1322
916 return retval; 1323 return ret;
917} 1324}
918 1325
919static long 1326static long fw_device_op_ioctl(struct file *file,
920fw_device_op_ioctl(struct file *file, 1327 unsigned int cmd, unsigned long arg)
921 unsigned int cmd, unsigned long arg)
922{ 1328{
923 struct client *client = file->private_data; 1329 struct client *client = file->private_data;
924 1330
@@ -929,9 +1335,8 @@ fw_device_op_ioctl(struct file *file,
929} 1335}
930 1336
931#ifdef CONFIG_COMPAT 1337#ifdef CONFIG_COMPAT
932static long 1338static long fw_device_op_compat_ioctl(struct file *file,
933fw_device_op_compat_ioctl(struct file *file, 1339 unsigned int cmd, unsigned long arg)
934 unsigned int cmd, unsigned long arg)
935{ 1340{
936 struct client *client = file->private_data; 1341 struct client *client = file->private_data;
937 1342
@@ -947,7 +1352,7 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
947 struct client *client = file->private_data; 1352 struct client *client = file->private_data;
948 enum dma_data_direction direction; 1353 enum dma_data_direction direction;
949 unsigned long size; 1354 unsigned long size;
950 int page_count, retval; 1355 int page_count, ret;
951 1356
952 if (fw_device_is_shutdown(client->device)) 1357 if (fw_device_is_shutdown(client->device))
953 return -ENODEV; 1358 return -ENODEV;
@@ -973,48 +1378,57 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
973 else 1378 else
974 direction = DMA_FROM_DEVICE; 1379 direction = DMA_FROM_DEVICE;
975 1380
976 retval = fw_iso_buffer_init(&client->buffer, client->device->card, 1381 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
977 page_count, direction); 1382 page_count, direction);
978 if (retval < 0) 1383 if (ret < 0)
979 return retval; 1384 return ret;
980 1385
981 retval = fw_iso_buffer_map(&client->buffer, vma); 1386 ret = fw_iso_buffer_map(&client->buffer, vma);
982 if (retval < 0) 1387 if (ret < 0)
983 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1388 fw_iso_buffer_destroy(&client->buffer, client->device->card);
984 1389
985 return retval; 1390 return ret;
1391}
1392
1393static int shutdown_resource(int id, void *p, void *data)
1394{
1395 struct client_resource *r = p;
1396 struct client *client = data;
1397
1398 r->release(client, r);
1399 client_put(client);
1400
1401 return 0;
986} 1402}
987 1403
988static int fw_device_op_release(struct inode *inode, struct file *file) 1404static int fw_device_op_release(struct inode *inode, struct file *file)
989{ 1405{
990 struct client *client = file->private_data; 1406 struct client *client = file->private_data;
991 struct event *e, *next_e; 1407 struct event *e, *next_e;
992 struct client_resource *r, *next_r;
993 unsigned long flags;
994 1408
995 if (client->buffer.pages) 1409 mutex_lock(&client->device->client_list_mutex);
996 fw_iso_buffer_destroy(&client->buffer, client->device->card); 1410 list_del(&client->link);
1411 mutex_unlock(&client->device->client_list_mutex);
997 1412
998 if (client->iso_context) 1413 if (client->iso_context)
999 fw_iso_context_destroy(client->iso_context); 1414 fw_iso_context_destroy(client->iso_context);
1000 1415
1001 list_for_each_entry_safe(r, next_r, &client->resource_list, link) 1416 if (client->buffer.pages)
1002 r->release(client, r); 1417 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1003 1418
1004 /* 1419 /* Freeze client->resource_idr and client->event_list */
1005 * FIXME: We should wait for the async tasklets to stop 1420 spin_lock_irq(&client->lock);
1006 * running before freeing the memory. 1421 client->in_shutdown = true;
1007 */ 1422 spin_unlock_irq(&client->lock);
1423
1424 idr_for_each(&client->resource_idr, shutdown_resource, client);
1425 idr_remove_all(&client->resource_idr);
1426 idr_destroy(&client->resource_idr);
1008 1427
1009 list_for_each_entry_safe(e, next_e, &client->event_list, link) 1428 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1010 kfree(e); 1429 kfree(e);
1011 1430
1012 spin_lock_irqsave(&client->device->card->lock, flags); 1431 client_put(client);
1013 list_del(&client->link);
1014 spin_unlock_irqrestore(&client->device->card->lock, flags);
1015
1016 fw_device_put(client->device);
1017 kfree(client);
1018 1432
1019 return 0; 1433 return 0;
1020} 1434}
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index bf53acb45652..a47e2129d83d 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -18,22 +18,26 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/ctype.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include <linux/kthread.h>
25#include <linux/device.h>
26#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
27#include <linux/idr.h> 25#include <linux/idr.h>
28#include <linux/jiffies.h> 26#include <linux/jiffies.h>
29#include <linux/string.h> 27#include <linux/kobject.h>
28#include <linux/list.h>
29#include <linux/mutex.h>
30#include <linux/rwsem.h> 30#include <linux/rwsem.h>
31#include <linux/semaphore.h> 31#include <linux/semaphore.h>
32#include <linux/spinlock.h>
33#include <linux/string.h>
34#include <linux/workqueue.h>
35
32#include <asm/system.h> 36#include <asm/system.h>
33#include <linux/ctype.h> 37
34#include "fw-transaction.h"
35#include "fw-topology.h"
36#include "fw-device.h" 38#include "fw-device.h"
39#include "fw-topology.h"
40#include "fw-transaction.h"
37 41
38void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) 42void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
39{ 43{
@@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
132 vendor, model, specifier_id, version); 136 vendor, model, specifier_id, version);
133} 137}
134 138
135static int 139static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
136fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
137{ 140{
138 struct fw_unit *unit = fw_unit(dev); 141 struct fw_unit *unit = fw_unit(dev);
139 char modalias[64]; 142 char modalias[64];
@@ -152,27 +155,6 @@ struct bus_type fw_bus_type = {
152}; 155};
153EXPORT_SYMBOL(fw_bus_type); 156EXPORT_SYMBOL(fw_bus_type);
154 157
155static void fw_device_release(struct device *dev)
156{
157 struct fw_device *device = fw_device(dev);
158 struct fw_card *card = device->card;
159 unsigned long flags;
160
161 /*
162 * Take the card lock so we don't set this to NULL while a
163 * FW_NODE_UPDATED callback is being handled or while the
164 * bus manager work looks at this node.
165 */
166 spin_lock_irqsave(&card->lock, flags);
167 device->node->data = NULL;
168 spin_unlock_irqrestore(&card->lock, flags);
169
170 fw_node_put(device->node);
171 kfree(device->config_rom);
172 kfree(device);
173 fw_card_put(card);
174}
175
176int fw_device_enable_phys_dma(struct fw_device *device) 158int fw_device_enable_phys_dma(struct fw_device *device)
177{ 159{
178 int generation = device->generation; 160 int generation = device->generation;
@@ -191,8 +173,8 @@ struct config_rom_attribute {
191 u32 key; 173 u32 key;
192}; 174};
193 175
194static ssize_t 176static ssize_t show_immediate(struct device *dev,
195show_immediate(struct device *dev, struct device_attribute *dattr, char *buf) 177 struct device_attribute *dattr, char *buf)
196{ 178{
197 struct config_rom_attribute *attr = 179 struct config_rom_attribute *attr =
198 container_of(dattr, struct config_rom_attribute, attr); 180 container_of(dattr, struct config_rom_attribute, attr);
@@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
223#define IMMEDIATE_ATTR(name, key) \ 205#define IMMEDIATE_ATTR(name, key) \
224 { __ATTR(name, S_IRUGO, show_immediate, NULL), key } 206 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
225 207
226static ssize_t 208static ssize_t show_text_leaf(struct device *dev,
227show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf) 209 struct device_attribute *dattr, char *buf)
228{ 210{
229 struct config_rom_attribute *attr = 211 struct config_rom_attribute *attr =
230 container_of(dattr, struct config_rom_attribute, attr); 212 container_of(dattr, struct config_rom_attribute, attr);
@@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = {
293 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), 275 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
294}; 276};
295 277
296static void 278static void init_fw_attribute_group(struct device *dev,
297init_fw_attribute_group(struct device *dev, 279 struct device_attribute *attrs,
298 struct device_attribute *attrs, 280 struct fw_attribute_group *group)
299 struct fw_attribute_group *group)
300{ 281{
301 struct device_attribute *attr; 282 struct device_attribute *attr;
302 int i, j; 283 int i, j;
@@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev,
319 dev->groups = group->groups; 300 dev->groups = group->groups;
320} 301}
321 302
322static ssize_t 303static ssize_t modalias_show(struct device *dev,
323modalias_show(struct device *dev, 304 struct device_attribute *attr, char *buf)
324 struct device_attribute *attr, char *buf)
325{ 305{
326 struct fw_unit *unit = fw_unit(dev); 306 struct fw_unit *unit = fw_unit(dev);
327 int length; 307 int length;
@@ -332,9 +312,8 @@ modalias_show(struct device *dev,
332 return length + 1; 312 return length + 1;
333} 313}
334 314
335static ssize_t 315static ssize_t rom_index_show(struct device *dev,
336rom_index_show(struct device *dev, 316 struct device_attribute *attr, char *buf)
337 struct device_attribute *attr, char *buf)
338{ 317{
339 struct fw_device *device = fw_device(dev->parent); 318 struct fw_device *device = fw_device(dev->parent);
340 struct fw_unit *unit = fw_unit(dev); 319 struct fw_unit *unit = fw_unit(dev);
@@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = {
349 __ATTR_NULL, 328 __ATTR_NULL,
350}; 329};
351 330
352static ssize_t 331static ssize_t config_rom_show(struct device *dev,
353config_rom_show(struct device *dev, struct device_attribute *attr, char *buf) 332 struct device_attribute *attr, char *buf)
354{ 333{
355 struct fw_device *device = fw_device(dev); 334 struct fw_device *device = fw_device(dev);
356 size_t length; 335 size_t length;
@@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
363 return length; 342 return length;
364} 343}
365 344
366static ssize_t 345static ssize_t guid_show(struct device *dev,
367guid_show(struct device *dev, struct device_attribute *attr, char *buf) 346 struct device_attribute *attr, char *buf)
368{ 347{
369 struct fw_device *device = fw_device(dev); 348 struct fw_device *device = fw_device(dev);
370 int ret; 349 int ret;
@@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = {
383 __ATTR_NULL, 362 __ATTR_NULL,
384}; 363};
385 364
386static int 365static int read_rom(struct fw_device *device,
387read_rom(struct fw_device *device, int generation, int index, u32 *data) 366 int generation, int index, u32 *data)
388{ 367{
389 int rcode; 368 int rcode;
390 369
@@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
539 518
540 kfree(old_rom); 519 kfree(old_rom);
541 ret = 0; 520 ret = 0;
542 device->cmc = rom[2] & 1 << 30; 521 device->cmc = rom[2] >> 30 & 1;
543 out: 522 out:
544 kfree(rom); 523 kfree(rom);
545 524
@@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work)
679 fw_device_put(device); 658 fw_device_put(device);
680} 659}
681 660
661static void fw_device_release(struct device *dev)
662{
663 struct fw_device *device = fw_device(dev);
664 struct fw_card *card = device->card;
665 unsigned long flags;
666
667 /*
668 * Take the card lock so we don't set this to NULL while a
669 * FW_NODE_UPDATED callback is being handled or while the
670 * bus manager work looks at this node.
671 */
672 spin_lock_irqsave(&card->lock, flags);
673 device->node->data = NULL;
674 spin_unlock_irqrestore(&card->lock, flags);
675
676 fw_node_put(device->node);
677 kfree(device->config_rom);
678 kfree(device);
679 fw_card_put(card);
680}
681
682static struct device_type fw_device_type = { 682static struct device_type fw_device_type = {
683 .release = fw_device_release, 683 .release = fw_device_release,
684}; 684};
685 685
686static void fw_device_update(struct work_struct *work); 686static int update_unit(struct device *dev, void *data)
687{
688 struct fw_unit *unit = fw_unit(dev);
689 struct fw_driver *driver = (struct fw_driver *)dev->driver;
690
691 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
692 down(&dev->sem);
693 driver->update(unit);
694 up(&dev->sem);
695 }
696
697 return 0;
698}
699
700static void fw_device_update(struct work_struct *work)
701{
702 struct fw_device *device =
703 container_of(work, struct fw_device, work.work);
704
705 fw_device_cdev_update(device);
706 device_for_each_child(&device->device, NULL, update_unit);
707}
687 708
688/* 709/*
689 * If a device was pending for deletion because its node went away but its 710 * If a device was pending for deletion because its node went away but its
@@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data)
735 return match; 756 return match;
736} 757}
737 758
759enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
760
761void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
762{
763 struct fw_card *card = device->card;
764 __be32 data;
765 int rcode;
766
767 if (!card->broadcast_channel_allocated)
768 return;
769
770 if (device->bc_implemented == BC_UNKNOWN) {
771 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
772 device->node_id, generation, device->max_speed,
773 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
774 &data, 4);
775 switch (rcode) {
776 case RCODE_COMPLETE:
777 if (data & cpu_to_be32(1 << 31)) {
778 device->bc_implemented = BC_IMPLEMENTED;
779 break;
780 }
781 /* else fall through to case address error */
782 case RCODE_ADDRESS_ERROR:
783 device->bc_implemented = BC_UNIMPLEMENTED;
784 }
785 }
786
787 if (device->bc_implemented == BC_IMPLEMENTED) {
788 data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
789 BROADCAST_CHANNEL_VALID);
790 fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
791 device->node_id, generation, device->max_speed,
792 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
793 &data, 4);
794 }
795}
796
738static void fw_device_init(struct work_struct *work) 797static void fw_device_init(struct work_struct *work)
739{ 798{
740 struct fw_device *device = 799 struct fw_device *device =
741 container_of(work, struct fw_device, work.work); 800 container_of(work, struct fw_device, work.work);
742 struct device *revived_dev; 801 struct device *revived_dev;
743 int minor, err; 802 int minor, ret;
744 803
745 /* 804 /*
746 * All failure paths here set node->data to NULL, so that we 805 * All failure paths here set node->data to NULL, so that we
@@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work)
776 835
777 fw_device_get(device); 836 fw_device_get(device);
778 down_write(&fw_device_rwsem); 837 down_write(&fw_device_rwsem);
779 err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? 838 ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
780 idr_get_new(&fw_device_idr, device, &minor) : 839 idr_get_new(&fw_device_idr, device, &minor) :
781 -ENOMEM; 840 -ENOMEM;
782 up_write(&fw_device_rwsem); 841 up_write(&fw_device_rwsem);
783 842
784 if (err < 0) 843 if (ret < 0)
785 goto error; 844 goto error;
786 845
787 device->device.bus = &fw_bus_type; 846 device->device.bus = &fw_bus_type;
@@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work)
828 device->config_rom[3], device->config_rom[4], 887 device->config_rom[3], device->config_rom[4],
829 1 << device->max_speed); 888 1 << device->max_speed);
830 device->config_rom_retries = 0; 889 device->config_rom_retries = 0;
890
891 fw_device_set_broadcast_channel(device, device->generation);
831 } 892 }
832 893
833 /* 894 /*
@@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work)
851 put_device(&device->device); /* our reference */ 912 put_device(&device->device); /* our reference */
852} 913}
853 914
854static int update_unit(struct device *dev, void *data)
855{
856 struct fw_unit *unit = fw_unit(dev);
857 struct fw_driver *driver = (struct fw_driver *)dev->driver;
858
859 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
860 down(&dev->sem);
861 driver->update(unit);
862 up(&dev->sem);
863 }
864
865 return 0;
866}
867
868static void fw_device_update(struct work_struct *work)
869{
870 struct fw_device *device =
871 container_of(work, struct fw_device, work.work);
872
873 fw_device_cdev_update(device);
874 device_for_each_child(&device->device, NULL, update_unit);
875}
876
877enum { 915enum {
878 REREAD_BIB_ERROR, 916 REREAD_BIB_ERROR,
879 REREAD_BIB_GONE, 917 REREAD_BIB_GONE,
@@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation)
894 if (i == 0 && q == 0) 932 if (i == 0 && q == 0)
895 return REREAD_BIB_GONE; 933 return REREAD_BIB_GONE;
896 934
897 if (i > device->config_rom_length || q != device->config_rom[i]) 935 if (q != device->config_rom[i])
898 return REREAD_BIB_CHANGED; 936 return REREAD_BIB_CHANGED;
899 } 937 }
900 938
@@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1004 device->node = fw_node_get(node); 1042 device->node = fw_node_get(node);
1005 device->node_id = node->node_id; 1043 device->node_id = node->node_id;
1006 device->generation = card->generation; 1044 device->generation = card->generation;
1045 mutex_init(&device->client_list_mutex);
1007 INIT_LIST_HEAD(&device->client_list); 1046 INIT_LIST_HEAD(&device->client_list);
1008 1047
1009 /* 1048 /*
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 8ef6ec2ca21c..97588937c018 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -19,10 +19,17 @@
19#ifndef __fw_device_h 19#ifndef __fw_device_h
20#define __fw_device_h 20#define __fw_device_h
21 21
22#include <linux/device.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/cdev.h>
24#include <linux/idr.h> 24#include <linux/idr.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
25#include <linux/rwsem.h> 28#include <linux/rwsem.h>
29#include <linux/sysfs.h>
30#include <linux/types.h>
31#include <linux/workqueue.h>
32
26#include <asm/atomic.h> 33#include <asm/atomic.h>
27 34
28enum fw_device_state { 35enum fw_device_state {
@@ -38,6 +45,9 @@ struct fw_attribute_group {
38 struct attribute *attrs[11]; 45 struct attribute *attrs[11];
39}; 46};
40 47
48struct fw_node;
49struct fw_card;
50
41/* 51/*
42 * Note, fw_device.generation always has to be read before fw_device.node_id. 52 * Note, fw_device.generation always has to be read before fw_device.node_id.
43 * Use SMP memory barriers to ensure this. Otherwise requests will be sent 53 * Use SMP memory barriers to ensure this. Otherwise requests will be sent
@@ -61,13 +71,18 @@ struct fw_device {
61 int node_id; 71 int node_id;
62 int generation; 72 int generation;
63 unsigned max_speed; 73 unsigned max_speed;
64 bool cmc;
65 struct fw_card *card; 74 struct fw_card *card;
66 struct device device; 75 struct device device;
76
77 struct mutex client_list_mutex;
67 struct list_head client_list; 78 struct list_head client_list;
79
68 u32 *config_rom; 80 u32 *config_rom;
69 size_t config_rom_length; 81 size_t config_rom_length;
70 int config_rom_retries; 82 int config_rom_retries;
83 unsigned cmc:1;
84 unsigned bc_implemented:2;
85
71 struct delayed_work work; 86 struct delayed_work work;
72 struct fw_attribute_group attribute_group; 87 struct fw_attribute_group attribute_group;
73}; 88};
@@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device)
96 111
97struct fw_device *fw_device_get_by_devt(dev_t devt); 112struct fw_device *fw_device_get_by_devt(dev_t devt);
98int fw_device_enable_phys_dma(struct fw_device *device); 113int fw_device_enable_phys_dma(struct fw_device *device);
114void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
99 115
100void fw_device_cdev_update(struct fw_device *device); 116void fw_device_cdev_update(struct fw_device *device);
101void fw_device_cdev_remove(struct fw_device *device); 117void fw_device_cdev_remove(struct fw_device *device);
@@ -176,8 +192,7 @@ struct fw_driver {
176 const struct fw_device_id *id_table; 192 const struct fw_device_id *id_table;
177}; 193};
178 194
179static inline struct fw_driver * 195static inline struct fw_driver *fw_driver(struct device_driver *drv)
180fw_driver(struct device_driver *drv)
181{ 196{
182 return container_of(drv, struct fw_driver, driver); 197 return container_of(drv, struct fw_driver, driver);
183} 198}
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index e14c03dc0065..2baf1007253e 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Isochronous IO functionality 2 * Isochronous I/O functionality:
3 * - Isochronous DMA context management
4 * - Isochronous bus resource management (channels, bandwidth), client side
3 * 5 *
4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> 6 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
5 * 7 *
@@ -18,21 +20,25 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 21 */
20 22
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/vmalloc.h> 24#include <linux/errno.h>
25#include <linux/firewire-constants.h>
26#include <linux/kernel.h>
25#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/spinlock.h>
29#include <linux/vmalloc.h>
26 30
27#include "fw-transaction.h"
28#include "fw-topology.h" 31#include "fw-topology.h"
29#include "fw-device.h" 32#include "fw-transaction.h"
30 33
31int 34/*
32fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, 35 * Isochronous DMA context management
33 int page_count, enum dma_data_direction direction) 36 */
37
38int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
39 int page_count, enum dma_data_direction direction)
34{ 40{
35 int i, j, retval = -ENOMEM; 41 int i, j;
36 dma_addr_t address; 42 dma_addr_t address;
37 43
38 buffer->page_count = page_count; 44 buffer->page_count = page_count;
@@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
69 kfree(buffer->pages); 75 kfree(buffer->pages);
70 out: 76 out:
71 buffer->pages = NULL; 77 buffer->pages = NULL;
72 return retval; 78
79 return -ENOMEM;
73} 80}
74 81
75int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma) 82int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
76{ 83{
77 unsigned long uaddr; 84 unsigned long uaddr;
78 int i, retval; 85 int i, err;
79 86
80 uaddr = vma->vm_start; 87 uaddr = vma->vm_start;
81 for (i = 0; i < buffer->page_count; i++) { 88 for (i = 0; i < buffer->page_count; i++) {
82 retval = vm_insert_page(vma, uaddr, buffer->pages[i]); 89 err = vm_insert_page(vma, uaddr, buffer->pages[i]);
83 if (retval) 90 if (err)
84 return retval; 91 return err;
92
85 uaddr += PAGE_SIZE; 93 uaddr += PAGE_SIZE;
86 } 94 }
87 95
@@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
105 buffer->pages = NULL; 113 buffer->pages = NULL;
106} 114}
107 115
108struct fw_iso_context * 116struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
109fw_iso_context_create(struct fw_card *card, int type, 117 int type, int channel, int speed, size_t header_size,
110 int channel, int speed, size_t header_size, 118 fw_iso_callback_t callback, void *callback_data)
111 fw_iso_callback_t callback, void *callback_data)
112{ 119{
113 struct fw_iso_context *ctx; 120 struct fw_iso_context *ctx;
114 121
115 ctx = card->driver->allocate_iso_context(card, type, header_size); 122 ctx = card->driver->allocate_iso_context(card,
123 type, channel, header_size);
116 if (IS_ERR(ctx)) 124 if (IS_ERR(ctx))
117 return ctx; 125 return ctx;
118 126
@@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
134 card->driver->free_iso_context(ctx); 142 card->driver->free_iso_context(ctx);
135} 143}
136 144
137int 145int fw_iso_context_start(struct fw_iso_context *ctx,
138fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags) 146 int cycle, int sync, int tags)
139{ 147{
140 return ctx->card->driver->start_iso(ctx, cycle, sync, tags); 148 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
141} 149}
142 150
143int 151int fw_iso_context_queue(struct fw_iso_context *ctx,
144fw_iso_context_queue(struct fw_iso_context *ctx, 152 struct fw_iso_packet *packet,
145 struct fw_iso_packet *packet, 153 struct fw_iso_buffer *buffer,
146 struct fw_iso_buffer *buffer, 154 unsigned long payload)
147 unsigned long payload)
148{ 155{
149 struct fw_card *card = ctx->card; 156 struct fw_card *card = ctx->card;
150 157
151 return card->driver->queue_iso(ctx, packet, buffer, payload); 158 return card->driver->queue_iso(ctx, packet, buffer, payload);
152} 159}
153 160
154int 161int fw_iso_context_stop(struct fw_iso_context *ctx)
155fw_iso_context_stop(struct fw_iso_context *ctx)
156{ 162{
157 return ctx->card->driver->stop_iso(ctx); 163 return ctx->card->driver->stop_iso(ctx);
158} 164}
165
166/*
167 * Isochronous bus resource management (channels, bandwidth), client side
168 */
169
170static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
171 int bandwidth, bool allocate)
172{
173 __be32 data[2];
174 int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
175
176 /*
177 * On a 1394a IRM with low contention, try < 1 is enough.
178 * On a 1394-1995 IRM, we need at least try < 2.
179 * Let's just do try < 5.
180 */
181 for (try = 0; try < 5; try++) {
182 new = allocate ? old - bandwidth : old + bandwidth;
183 if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
184 break;
185
186 data[0] = cpu_to_be32(old);
187 data[1] = cpu_to_be32(new);
188 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
189 irm_id, generation, SCODE_100,
190 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
191 data, sizeof(data))) {
192 case RCODE_GENERATION:
193 /* A generation change frees all bandwidth. */
194 return allocate ? -EAGAIN : bandwidth;
195
196 case RCODE_COMPLETE:
197 if (be32_to_cpup(data) == old)
198 return bandwidth;
199
200 old = be32_to_cpup(data);
201 /* Fall through. */
202 }
203 }
204
205 return -EIO;
206}
207
208static int manage_channel(struct fw_card *card, int irm_id, int generation,
209 u32 channels_mask, u64 offset, bool allocate)
210{
211 __be32 data[2], c, all, old;
212 int i, retry = 5;
213
214 old = all = allocate ? cpu_to_be32(~0) : 0;
215
216 for (i = 0; i < 32; i++) {
217 if (!(channels_mask & 1 << i))
218 continue;
219
220 c = cpu_to_be32(1 << (31 - i));
221 if ((old & c) != (all & c))
222 continue;
223
224 data[0] = old;
225 data[1] = old ^ c;
226 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
227 irm_id, generation, SCODE_100,
228 offset, data, sizeof(data))) {
229 case RCODE_GENERATION:
230 /* A generation change frees all channels. */
231 return allocate ? -EAGAIN : i;
232
233 case RCODE_COMPLETE:
234 if (data[0] == old)
235 return i;
236
237 old = data[0];
238
239 /* Is the IRM 1394a-2000 compliant? */
240 if ((data[0] & c) == (data[1] & c))
241 continue;
242
243 /* 1394-1995 IRM, fall through to retry. */
244 default:
245 if (retry--)
246 i--;
247 }
248 }
249
250 return -EIO;
251}
252
253static void deallocate_channel(struct fw_card *card, int irm_id,
254 int generation, int channel)
255{
256 u32 mask;
257 u64 offset;
258
259 mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
260 offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
261 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
262
263 manage_channel(card, irm_id, generation, mask, offset, false);
264}
265
266/**
267 * fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
268 *
269 * In parameters: card, generation, channels_mask, bandwidth, allocate
270 * Out parameters: channel, bandwidth
271 * This function blocks (sleeps) during communication with the IRM.
272 *
273 * Allocates or deallocates at most one channel out of channels_mask.
274 * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
275 * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
276 * channel 0 and LSB for channel 63.)
277 * Allocates or deallocates as many bandwidth allocation units as specified.
278 *
279 * Returns channel < 0 if no channel was allocated or deallocated.
280 * Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
281 *
282 * If generation is stale, deallocations succeed but allocations fail with
283 * channel = -EAGAIN.
284 *
285 * If channel allocation fails, no bandwidth will be allocated either.
286 * If bandwidth allocation fails, no channel will be allocated either.
287 * But deallocations of channel and bandwidth are tried independently
288 * of each other's success.
289 */
290void fw_iso_resource_manage(struct fw_card *card, int generation,
291 u64 channels_mask, int *channel, int *bandwidth,
292 bool allocate)
293{
294 u32 channels_hi = channels_mask; /* channels 31...0 */
295 u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
296 int irm_id, ret, c = -EINVAL;
297
298 spin_lock_irq(&card->lock);
299 irm_id = card->irm_node->node_id;
300 spin_unlock_irq(&card->lock);
301
302 if (channels_hi)
303 c = manage_channel(card, irm_id, generation, channels_hi,
304 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
305 if (channels_lo && c < 0) {
306 c = manage_channel(card, irm_id, generation, channels_lo,
307 CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
308 if (c >= 0)
309 c += 32;
310 }
311 *channel = c;
312
313 if (allocate && channels_mask != 0 && c < 0)
314 *bandwidth = 0;
315
316 if (*bandwidth == 0)
317 return;
318
319 ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
320 if (ret < 0)
321 *bandwidth = 0;
322
323 if (allocate && ret < 0 && c >= 0) {
324 deallocate_channel(card, irm_id, generation, c);
325 *channel = ret;
326 }
327}
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 6d19828a93a5..1180d0be0bb4 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -205,6 +205,7 @@ struct fw_ohci {
205 205
206 u32 it_context_mask; 206 u32 it_context_mask;
207 struct iso_context *it_context_list; 207 struct iso_context *it_context_list;
208 u64 ir_context_channels;
208 u32 ir_context_mask; 209 u32 ir_context_mask;
209 struct iso_context *ir_context_list; 210 struct iso_context *ir_context_list;
210}; 211};
@@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci)
441 reg_read(ohci, OHCI1394_Version); 442 reg_read(ohci, OHCI1394_Version);
442} 443}
443 444
444static int 445static int ohci_update_phy_reg(struct fw_card *card, int addr,
445ohci_update_phy_reg(struct fw_card *card, int addr, 446 int clear_bits, int set_bits)
446 int clear_bits, int set_bits)
447{ 447{
448 struct fw_ohci *ohci = fw_ohci(card); 448 struct fw_ohci *ohci = fw_ohci(card);
449 u32 val, old; 449 u32 val, old;
@@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data)
658 } 658 }
659} 659}
660 660
661static int 661static int ar_context_init(struct ar_context *ctx,
662ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs) 662 struct fw_ohci *ohci, u32 regs)
663{ 663{
664 struct ar_buffer ab; 664 struct ar_buffer ab;
665 665
@@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx)
690 flush_writes(ctx->ohci); 690 flush_writes(ctx->ohci);
691} 691}
692 692
693static struct descriptor * 693static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
694find_branch_descriptor(struct descriptor *d, int z)
695{ 694{
696 int b, key; 695 int b, key;
697 696
@@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data)
751 * Allocate a new buffer and add it to the list of free buffers for this 750 * Allocate a new buffer and add it to the list of free buffers for this
752 * context. Must be called with ohci->lock held. 751 * context. Must be called with ohci->lock held.
753 */ 752 */
754static int 753static int context_add_buffer(struct context *ctx)
755context_add_buffer(struct context *ctx)
756{ 754{
757 struct descriptor_buffer *desc; 755 struct descriptor_buffer *desc;
758 dma_addr_t uninitialized_var(bus_addr); 756 dma_addr_t uninitialized_var(bus_addr);
@@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
781 return 0; 779 return 0;
782} 780}
783 781
784static int 782static int context_init(struct context *ctx, struct fw_ohci *ohci,
785context_init(struct context *ctx, struct fw_ohci *ohci, 783 u32 regs, descriptor_callback_t callback)
786 u32 regs, descriptor_callback_t callback)
787{ 784{
788 ctx->ohci = ohci; 785 ctx->ohci = ohci;
789 ctx->regs = regs; 786 ctx->regs = regs;
@@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
814 return 0; 811 return 0;
815} 812}
816 813
817static void 814static void context_release(struct context *ctx)
818context_release(struct context *ctx)
819{ 815{
820 struct fw_card *card = &ctx->ohci->card; 816 struct fw_card *card = &ctx->ohci->card;
821 struct descriptor_buffer *desc, *tmp; 817 struct descriptor_buffer *desc, *tmp;
@@ -827,8 +823,8 @@ context_release(struct context *ctx)
827} 823}
828 824
829/* Must be called with ohci->lock held */ 825/* Must be called with ohci->lock held */
830static struct descriptor * 826static struct descriptor *context_get_descriptors(struct context *ctx,
831context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus) 827 int z, dma_addr_t *d_bus)
832{ 828{
833 struct descriptor *d = NULL; 829 struct descriptor *d = NULL;
834 struct descriptor_buffer *desc = ctx->buffer_tail; 830 struct descriptor_buffer *desc = ctx->buffer_tail;
@@ -912,8 +908,8 @@ struct driver_data {
912 * Must always be called with the ochi->lock held to ensure proper 908 * Must always be called with the ochi->lock held to ensure proper
913 * generation handling and locking around packet queue manipulation. 909 * generation handling and locking around packet queue manipulation.
914 */ 910 */
915static int 911static int at_context_queue_packet(struct context *ctx,
916at_context_queue_packet(struct context *ctx, struct fw_packet *packet) 912 struct fw_packet *packet)
917{ 913{
918 struct fw_ohci *ohci = ctx->ohci; 914 struct fw_ohci *ohci = ctx->ohci;
919 dma_addr_t d_bus, uninitialized_var(payload_bus); 915 dma_addr_t d_bus, uninitialized_var(payload_bus);
@@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
940 */ 936 */
941 937
942 header = (__le32 *) &d[1]; 938 header = (__le32 *) &d[1];
943 if (packet->header_length > 8) { 939 switch (packet->header_length) {
940 case 16:
941 case 12:
944 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 942 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
945 (packet->speed << 16)); 943 (packet->speed << 16));
946 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 944 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
@@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
954 header[3] = (__force __le32) packet->header[3]; 952 header[3] = (__force __le32) packet->header[3];
955 953
956 d[0].req_count = cpu_to_le16(packet->header_length); 954 d[0].req_count = cpu_to_le16(packet->header_length);
957 } else { 955 break;
956
957 case 8:
958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 958 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
959 (packet->speed << 16)); 959 (packet->speed << 16));
960 header[1] = cpu_to_le32(packet->header[0]); 960 header[1] = cpu_to_le32(packet->header[0]);
961 header[2] = cpu_to_le32(packet->header[1]); 961 header[2] = cpu_to_le32(packet->header[1]);
962 d[0].req_count = cpu_to_le16(12); 962 d[0].req_count = cpu_to_le16(12);
963 break;
964
965 case 4:
966 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
967 (packet->speed << 16));
968 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
969 d[0].req_count = cpu_to_le16(8);
970 break;
971
972 default:
973 /* BUG(); */
974 packet->ack = RCODE_SEND_ERROR;
975 return -1;
963 } 976 }
964 977
965 driver_data = (struct driver_data *) &d[3]; 978 driver_data = (struct driver_data *) &d[3];
@@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context,
1095#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1108#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1096#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1109#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1097 1110
1098static void 1111static void handle_local_rom(struct fw_ohci *ohci,
1099handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 1112 struct fw_packet *packet, u32 csr)
1100{ 1113{
1101 struct fw_packet response; 1114 struct fw_packet response;
1102 int tcode, length, i; 1115 int tcode, length, i;
@@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1122 fw_core_handle_response(&ohci->card, &response); 1135 fw_core_handle_response(&ohci->card, &response);
1123} 1136}
1124 1137
1125static void 1138static void handle_local_lock(struct fw_ohci *ohci,
1126handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) 1139 struct fw_packet *packet, u32 csr)
1127{ 1140{
1128 struct fw_packet response; 1141 struct fw_packet response;
1129 int tcode, length, ext_tcode, sel; 1142 int tcode, length, ext_tcode, sel;
@@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
1164 fw_core_handle_response(&ohci->card, &response); 1177 fw_core_handle_response(&ohci->card, &response);
1165} 1178}
1166 1179
1167static void 1180static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1168handle_local_request(struct context *ctx, struct fw_packet *packet)
1169{ 1181{
1170 u64 offset; 1182 u64 offset;
1171 u32 csr; 1183 u32 csr;
@@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
1205 } 1217 }
1206} 1218}
1207 1219
1208static void 1220static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1209at_context_transmit(struct context *ctx, struct fw_packet *packet)
1210{ 1221{
1211 unsigned long flags; 1222 unsigned long flags;
1212 int retval; 1223 int ret;
1213 1224
1214 spin_lock_irqsave(&ctx->ohci->lock, flags); 1225 spin_lock_irqsave(&ctx->ohci->lock, flags);
1215 1226
@@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
1220 return; 1231 return;
1221 } 1232 }
1222 1233
1223 retval = at_context_queue_packet(ctx, packet); 1234 ret = at_context_queue_packet(ctx, packet);
1224 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1235 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1225 1236
1226 if (retval < 0) 1237 if (ret < 0)
1227 packet->callback(packet, &ctx->ohci->card, packet->ack); 1238 packet->callback(packet, &ctx->ohci->card, packet->ack);
1228 1239
1229} 1240}
@@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1590 return 0; 1601 return 0;
1591} 1602}
1592 1603
1593static int 1604static int ohci_set_config_rom(struct fw_card *card,
1594ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) 1605 u32 *config_rom, size_t length)
1595{ 1606{
1596 struct fw_ohci *ohci; 1607 struct fw_ohci *ohci;
1597 unsigned long flags; 1608 unsigned long flags;
1598 int retval = -EBUSY; 1609 int ret = -EBUSY;
1599 __be32 *next_config_rom; 1610 __be32 *next_config_rom;
1600 dma_addr_t uninitialized_var(next_config_rom_bus); 1611 dma_addr_t uninitialized_var(next_config_rom_bus);
1601 1612
@@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1649 1660
1650 reg_write(ohci, OHCI1394_ConfigROMmap, 1661 reg_write(ohci, OHCI1394_ConfigROMmap,
1651 ohci->next_config_rom_bus); 1662 ohci->next_config_rom_bus);
1652 retval = 0; 1663 ret = 0;
1653 } 1664 }
1654 1665
1655 spin_unlock_irqrestore(&ohci->lock, flags); 1666 spin_unlock_irqrestore(&ohci->lock, flags);
@@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1661 * controller could need to access it before the bus reset 1672 * controller could need to access it before the bus reset
1662 * takes effect. 1673 * takes effect.
1663 */ 1674 */
1664 if (retval == 0) 1675 if (ret == 0)
1665 fw_core_initiate_bus_reset(&ohci->card, 1); 1676 fw_core_initiate_bus_reset(&ohci->card, 1);
1666 else 1677 else
1667 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 1678 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1668 next_config_rom, next_config_rom_bus); 1679 next_config_rom, next_config_rom_bus);
1669 1680
1670 return retval; 1681 return ret;
1671} 1682}
1672 1683
1673static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 1684static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
@@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1689 struct fw_ohci *ohci = fw_ohci(card); 1700 struct fw_ohci *ohci = fw_ohci(card);
1690 struct context *ctx = &ohci->at_request_ctx; 1701 struct context *ctx = &ohci->at_request_ctx;
1691 struct driver_data *driver_data = packet->driver_data; 1702 struct driver_data *driver_data = packet->driver_data;
1692 int retval = -ENOENT; 1703 int ret = -ENOENT;
1693 1704
1694 tasklet_disable(&ctx->tasklet); 1705 tasklet_disable(&ctx->tasklet);
1695 1706
@@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1704 driver_data->packet = NULL; 1715 driver_data->packet = NULL;
1705 packet->ack = RCODE_CANCELLED; 1716 packet->ack = RCODE_CANCELLED;
1706 packet->callback(packet, &ohci->card, packet->ack); 1717 packet->callback(packet, &ohci->card, packet->ack);
1707 retval = 0; 1718 ret = 0;
1708
1709 out: 1719 out:
1710 tasklet_enable(&ctx->tasklet); 1720 tasklet_enable(&ctx->tasklet);
1711 1721
1712 return retval; 1722 return ret;
1713} 1723}
1714 1724
1715static int 1725static int ohci_enable_phys_dma(struct fw_card *card,
1716ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation) 1726 int node_id, int generation)
1717{ 1727{
1718#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA 1728#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1719 return 0; 1729 return 0;
1720#else 1730#else
1721 struct fw_ohci *ohci = fw_ohci(card); 1731 struct fw_ohci *ohci = fw_ohci(card);
1722 unsigned long flags; 1732 unsigned long flags;
1723 int n, retval = 0; 1733 int n, ret = 0;
1724 1734
1725 /* 1735 /*
1726 * FIXME: Make sure this bitmask is cleared when we clear the busReset 1736 * FIXME: Make sure this bitmask is cleared when we clear the busReset
@@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1730 spin_lock_irqsave(&ohci->lock, flags); 1740 spin_lock_irqsave(&ohci->lock, flags);
1731 1741
1732 if (ohci->generation != generation) { 1742 if (ohci->generation != generation) {
1733 retval = -ESTALE; 1743 ret = -ESTALE;
1734 goto out; 1744 goto out;
1735 } 1745 }
1736 1746
@@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1748 flush_writes(ohci); 1758 flush_writes(ohci);
1749 out: 1759 out:
1750 spin_unlock_irqrestore(&ohci->lock, flags); 1760 spin_unlock_irqrestore(&ohci->lock, flags);
1751 return retval; 1761
1762 return ret;
1752#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ 1763#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
1753} 1764}
1754 1765
1755static u64 1766static u64 ohci_get_bus_time(struct fw_card *card)
1756ohci_get_bus_time(struct fw_card *card)
1757{ 1767{
1758 struct fw_ohci *ohci = fw_ohci(card); 1768 struct fw_ohci *ohci = fw_ohci(card);
1759 u32 cycle_time; 1769 u32 cycle_time;
@@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card)
1765 return bus_time; 1775 return bus_time;
1766} 1776}
1767 1777
1778static void copy_iso_headers(struct iso_context *ctx, void *p)
1779{
1780 int i = ctx->header_length;
1781
1782 if (i + ctx->base.header_size > PAGE_SIZE)
1783 return;
1784
1785 /*
1786 * The iso header is byteswapped to little endian by
1787 * the controller, but the remaining header quadlets
1788 * are big endian. We want to present all the headers
1789 * as big endian, so we have to swap the first quadlet.
1790 */
1791 if (ctx->base.header_size > 0)
1792 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1793 if (ctx->base.header_size > 4)
1794 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
1795 if (ctx->base.header_size > 8)
1796 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
1797 ctx->header_length += ctx->base.header_size;
1798}
1799
1768static int handle_ir_dualbuffer_packet(struct context *context, 1800static int handle_ir_dualbuffer_packet(struct context *context,
1769 struct descriptor *d, 1801 struct descriptor *d,
1770 struct descriptor *last) 1802 struct descriptor *last)
@@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1775 __le32 *ir_header; 1807 __le32 *ir_header;
1776 size_t header_length; 1808 size_t header_length;
1777 void *p, *end; 1809 void *p, *end;
1778 int i;
1779 1810
1780 if (db->first_res_count != 0 && db->second_res_count != 0) { 1811 if (db->first_res_count != 0 && db->second_res_count != 0) {
1781 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { 1812 if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
@@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context,
1788 header_length = le16_to_cpu(db->first_req_count) - 1819 header_length = le16_to_cpu(db->first_req_count) -
1789 le16_to_cpu(db->first_res_count); 1820 le16_to_cpu(db->first_res_count);
1790 1821
1791 i = ctx->header_length;
1792 p = db + 1; 1822 p = db + 1;
1793 end = p + header_length; 1823 end = p + header_length;
1794 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) { 1824 while (p < end) {
1795 /* 1825 copy_iso_headers(ctx, p);
1796 * The iso header is byteswapped to little endian by
1797 * the controller, but the remaining header quadlets
1798 * are big endian. We want to present all the headers
1799 * as big endian, so we have to swap the first
1800 * quadlet.
1801 */
1802 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1803 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1804 i += ctx->base.header_size;
1805 ctx->excess_bytes += 1826 ctx->excess_bytes +=
1806 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; 1827 (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
1807 p += ctx->base.header_size + 4; 1828 p += max(ctx->base.header_size, (size_t)8);
1808 } 1829 }
1809 ctx->header_length = i;
1810 1830
1811 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - 1831 ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
1812 le16_to_cpu(db->second_res_count); 1832 le16_to_cpu(db->second_res_count);
@@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
1832 struct descriptor *pd; 1852 struct descriptor *pd;
1833 __le32 *ir_header; 1853 __le32 *ir_header;
1834 void *p; 1854 void *p;
1835 int i;
1836 1855
1837 for (pd = d; pd <= last; pd++) { 1856 for (pd = d; pd <= last; pd++) {
1838 if (pd->transfer_status) 1857 if (pd->transfer_status)
@@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context,
1842 /* Descriptor(s) not done yet, stop iteration */ 1861 /* Descriptor(s) not done yet, stop iteration */
1843 return 0; 1862 return 0;
1844 1863
1845 i = ctx->header_length; 1864 p = last + 1;
1846 p = last + 1; 1865 copy_iso_headers(ctx, p);
1847
1848 if (ctx->base.header_size > 0 &&
1849 i + ctx->base.header_size <= PAGE_SIZE) {
1850 /*
1851 * The iso header is byteswapped to little endian by
1852 * the controller, but the remaining header quadlets
1853 * are big endian. We want to present all the headers
1854 * as big endian, so we have to swap the first quadlet.
1855 */
1856 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1857 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1858 ctx->header_length += ctx->base.header_size;
1859 }
1860 1866
1861 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) { 1867 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1862 ir_header = (__le32 *) p; 1868 ir_header = (__le32 *) p;
@@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context,
1888 return 1; 1894 return 1;
1889} 1895}
1890 1896
1891static struct fw_iso_context * 1897static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
1892ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size) 1898 int type, int channel, size_t header_size)
1893{ 1899{
1894 struct fw_ohci *ohci = fw_ohci(card); 1900 struct fw_ohci *ohci = fw_ohci(card);
1895 struct iso_context *ctx, *list; 1901 struct iso_context *ctx, *list;
1896 descriptor_callback_t callback; 1902 descriptor_callback_t callback;
1903 u64 *channels, dont_care = ~0ULL;
1897 u32 *mask, regs; 1904 u32 *mask, regs;
1898 unsigned long flags; 1905 unsigned long flags;
1899 int index, retval = -ENOMEM; 1906 int index, ret = -ENOMEM;
1900 1907
1901 if (type == FW_ISO_CONTEXT_TRANSMIT) { 1908 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1909 channels = &dont_care;
1902 mask = &ohci->it_context_mask; 1910 mask = &ohci->it_context_mask;
1903 list = ohci->it_context_list; 1911 list = ohci->it_context_list;
1904 callback = handle_it_packet; 1912 callback = handle_it_packet;
1905 } else { 1913 } else {
1914 channels = &ohci->ir_context_channels;
1906 mask = &ohci->ir_context_mask; 1915 mask = &ohci->ir_context_mask;
1907 list = ohci->ir_context_list; 1916 list = ohci->ir_context_list;
1908 if (ohci->use_dualbuffer) 1917 if (ohci->use_dualbuffer)
@@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1912 } 1921 }
1913 1922
1914 spin_lock_irqsave(&ohci->lock, flags); 1923 spin_lock_irqsave(&ohci->lock, flags);
1915 index = ffs(*mask) - 1; 1924 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
1916 if (index >= 0) 1925 if (index >= 0) {
1926 *channels &= ~(1ULL << channel);
1917 *mask &= ~(1 << index); 1927 *mask &= ~(1 << index);
1928 }
1918 spin_unlock_irqrestore(&ohci->lock, flags); 1929 spin_unlock_irqrestore(&ohci->lock, flags);
1919 1930
1920 if (index < 0) 1931 if (index < 0)
@@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1932 if (ctx->header == NULL) 1943 if (ctx->header == NULL)
1933 goto out; 1944 goto out;
1934 1945
1935 retval = context_init(&ctx->context, ohci, regs, callback); 1946 ret = context_init(&ctx->context, ohci, regs, callback);
1936 if (retval < 0) 1947 if (ret < 0)
1937 goto out_with_header; 1948 goto out_with_header;
1938 1949
1939 return &ctx->base; 1950 return &ctx->base;
@@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1945 *mask |= 1 << index; 1956 *mask |= 1 << index;
1946 spin_unlock_irqrestore(&ohci->lock, flags); 1957 spin_unlock_irqrestore(&ohci->lock, flags);
1947 1958
1948 return ERR_PTR(retval); 1959 return ERR_PTR(ret);
1949} 1960}
1950 1961
1951static int ohci_start_iso(struct fw_iso_context *base, 1962static int ohci_start_iso(struct fw_iso_context *base,
@@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
2024 } else { 2035 } else {
2025 index = ctx - ohci->ir_context_list; 2036 index = ctx - ohci->ir_context_list;
2026 ohci->ir_context_mask |= 1 << index; 2037 ohci->ir_context_mask |= 1 << index;
2038 ohci->ir_context_channels |= 1ULL << base->channel;
2027 } 2039 }
2028 2040
2029 spin_unlock_irqrestore(&ohci->lock, flags); 2041 spin_unlock_irqrestore(&ohci->lock, flags);
2030} 2042}
2031 2043
2032static int 2044static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2033ohci_queue_iso_transmit(struct fw_iso_context *base, 2045 struct fw_iso_packet *packet,
2034 struct fw_iso_packet *packet, 2046 struct fw_iso_buffer *buffer,
2035 struct fw_iso_buffer *buffer, 2047 unsigned long payload)
2036 unsigned long payload)
2037{ 2048{
2038 struct iso_context *ctx = container_of(base, struct iso_context, base); 2049 struct iso_context *ctx = container_of(base, struct iso_context, base);
2039 struct descriptor *d, *last, *pd; 2050 struct descriptor *d, *last, *pd;
@@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
2128 return 0; 2139 return 0;
2129} 2140}
2130 2141
2131static int 2142static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2132ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, 2143 struct fw_iso_packet *packet,
2133 struct fw_iso_packet *packet, 2144 struct fw_iso_buffer *buffer,
2134 struct fw_iso_buffer *buffer, 2145 unsigned long payload)
2135 unsigned long payload)
2136{ 2146{
2137 struct iso_context *ctx = container_of(base, struct iso_context, base); 2147 struct iso_context *ctx = container_of(base, struct iso_context, base);
2138 struct db_descriptor *db = NULL; 2148 struct db_descriptor *db = NULL;
@@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2151 z = 2; 2161 z = 2;
2152 2162
2153 /* 2163 /*
2154 * The OHCI controller puts the status word in the header 2164 * The OHCI controller puts the isochronous header and trailer in the
2155 * buffer too, so we need 4 extra bytes per packet. 2165 * buffer, so we need at least 8 bytes.
2156 */ 2166 */
2157 packet_count = p->header_length / ctx->base.header_size; 2167 packet_count = p->header_length / ctx->base.header_size;
2158 header_size = packet_count * (ctx->base.header_size + 4); 2168 header_size = packet_count * max(ctx->base.header_size, (size_t)8);
2159 2169
2160 /* Get header size in number of descriptors. */ 2170 /* Get header size in number of descriptors. */
2161 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2171 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2173 db = (struct db_descriptor *) d; 2183 db = (struct db_descriptor *) d;
2174 db->control = cpu_to_le16(DESCRIPTOR_STATUS | 2184 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
2175 DESCRIPTOR_BRANCH_ALWAYS); 2185 DESCRIPTOR_BRANCH_ALWAYS);
2176 db->first_size = cpu_to_le16(ctx->base.header_size + 4); 2186 db->first_size =
2187 cpu_to_le16(max(ctx->base.header_size, (size_t)8));
2177 if (p->skip && rest == p->payload_length) { 2188 if (p->skip && rest == p->payload_length) {
2178 db->control |= cpu_to_le16(DESCRIPTOR_WAIT); 2189 db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
2179 db->first_req_count = db->first_size; 2190 db->first_req_count = db->first_size;
@@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2208 return 0; 2219 return 0;
2209} 2220}
2210 2221
2211static int 2222static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2212ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, 2223 struct fw_iso_packet *packet,
2213 struct fw_iso_packet *packet, 2224 struct fw_iso_buffer *buffer,
2214 struct fw_iso_buffer *buffer, 2225 unsigned long payload)
2215 unsigned long payload)
2216{ 2226{
2217 struct iso_context *ctx = container_of(base, struct iso_context, base); 2227 struct iso_context *ctx = container_of(base, struct iso_context, base);
2218 struct descriptor *d = NULL, *pd = NULL; 2228 struct descriptor *d = NULL, *pd = NULL;
@@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2223 int page, offset, packet_count, header_size, payload_per_buffer; 2233 int page, offset, packet_count, header_size, payload_per_buffer;
2224 2234
2225 /* 2235 /*
2226 * The OHCI controller puts the status word in the 2236 * The OHCI controller puts the isochronous header and trailer in the
2227 * buffer too, so we need 4 extra bytes per packet. 2237 * buffer, so we need at least 8 bytes.
2228 */ 2238 */
2229 packet_count = p->header_length / ctx->base.header_size; 2239 packet_count = p->header_length / ctx->base.header_size;
2230 header_size = ctx->base.header_size + 4; 2240 header_size = max(ctx->base.header_size, (size_t)8);
2231 2241
2232 /* Get header size in number of descriptors. */ 2242 /* Get header size in number of descriptors. */
2233 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 2243 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2286 return 0; 2296 return 0;
2287} 2297}
2288 2298
2289static int 2299static int ohci_queue_iso(struct fw_iso_context *base,
2290ohci_queue_iso(struct fw_iso_context *base, 2300 struct fw_iso_packet *packet,
2291 struct fw_iso_packet *packet, 2301 struct fw_iso_buffer *buffer,
2292 struct fw_iso_buffer *buffer, 2302 unsigned long payload)
2293 unsigned long payload)
2294{ 2303{
2295 struct iso_context *ctx = container_of(base, struct iso_context, base); 2304 struct iso_context *ctx = container_of(base, struct iso_context, base);
2296 unsigned long flags; 2305 unsigned long flags;
2297 int retval; 2306 int ret;
2298 2307
2299 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2308 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2300 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2309 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2301 retval = ohci_queue_iso_transmit(base, packet, buffer, payload); 2310 ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
2302 else if (ctx->context.ohci->use_dualbuffer) 2311 else if (ctx->context.ohci->use_dualbuffer)
2303 retval = ohci_queue_iso_receive_dualbuffer(base, packet, 2312 ret = ohci_queue_iso_receive_dualbuffer(base, packet,
2304 buffer, payload); 2313 buffer, payload);
2305 else 2314 else
2306 retval = ohci_queue_iso_receive_packet_per_buffer(base, packet, 2315 ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
2307 buffer, 2316 buffer, payload);
2308 payload);
2309 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 2317 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
2310 2318
2311 return retval; 2319 return ret;
2312} 2320}
2313 2321
2314static const struct fw_card_driver ohci_driver = { 2322static const struct fw_card_driver ohci_driver = {
@@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev)
2357#define ohci_pmac_off(dev) 2365#define ohci_pmac_off(dev)
2358#endif /* CONFIG_PPC_PMAC */ 2366#endif /* CONFIG_PPC_PMAC */
2359 2367
2360static int __devinit 2368static int __devinit pci_probe(struct pci_dev *dev,
2361pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) 2369 const struct pci_device_id *ent)
2362{ 2370{
2363 struct fw_ohci *ohci; 2371 struct fw_ohci *ohci;
2364 u32 bus_options, max_receive, link_speed, version; 2372 u32 bus_options, max_receive, link_speed, version;
@@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2440 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 2448 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
2441 2449
2442 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 2450 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
2451 ohci->ir_context_channels = ~0ULL;
2443 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 2452 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
2444 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 2453 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
2445 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); 2454 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
@@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2467 reg_read(ohci, OHCI1394_GUIDLo); 2476 reg_read(ohci, OHCI1394_GUIDLo);
2468 2477
2469 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 2478 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
2470 if (err < 0) 2479 if (err)
2471 goto fail_self_id; 2480 goto fail_self_id;
2472 2481
2473 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2482 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2474 dev_name(&dev->dev), version >> 16, version & 0xff); 2483 dev_name(&dev->dev), version >> 16, version & 0xff);
2484
2475 return 0; 2485 return 0;
2476 2486
2477 fail_self_id: 2487 fail_self_id:
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index c71c4419d9e8..2bcf51557c72 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -392,20 +392,18 @@ static const struct {
392 } 392 }
393}; 393};
394 394
395static void 395static void free_orb(struct kref *kref)
396free_orb(struct kref *kref)
397{ 396{
398 struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref); 397 struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
399 398
400 kfree(orb); 399 kfree(orb);
401} 400}
402 401
403static void 402static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
404sbp2_status_write(struct fw_card *card, struct fw_request *request, 403 int tcode, int destination, int source,
405 int tcode, int destination, int source, 404 int generation, int speed,
406 int generation, int speed, 405 unsigned long long offset,
407 unsigned long long offset, 406 void *payload, size_t length, void *callback_data)
408 void *payload, size_t length, void *callback_data)
409{ 407{
410 struct sbp2_logical_unit *lu = callback_data; 408 struct sbp2_logical_unit *lu = callback_data;
411 struct sbp2_orb *orb; 409 struct sbp2_orb *orb;
@@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
451 fw_send_response(card, request, RCODE_COMPLETE); 449 fw_send_response(card, request, RCODE_COMPLETE);
452} 450}
453 451
454static void 452static void complete_transaction(struct fw_card *card, int rcode,
455complete_transaction(struct fw_card *card, int rcode, 453 void *payload, size_t length, void *data)
456 void *payload, size_t length, void *data)
457{ 454{
458 struct sbp2_orb *orb = data; 455 struct sbp2_orb *orb = data;
459 unsigned long flags; 456 unsigned long flags;
@@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode,
482 kref_put(&orb->kref, free_orb); 479 kref_put(&orb->kref, free_orb);
483} 480}
484 481
485static void 482static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
486sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 483 int node_id, int generation, u64 offset)
487 int node_id, int generation, u64 offset)
488{ 484{
489 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 485 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
490 unsigned long flags; 486 unsigned long flags;
@@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
531 return retval; 527 return retval;
532} 528}
533 529
534static void 530static void complete_management_orb(struct sbp2_orb *base_orb,
535complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 531 struct sbp2_status *status)
536{ 532{
537 struct sbp2_management_orb *orb = 533 struct sbp2_management_orb *orb =
538 container_of(base_orb, struct sbp2_management_orb, base); 534 container_of(base_orb, struct sbp2_management_orb, base);
@@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
542 complete(&orb->done); 538 complete(&orb->done);
543} 539}
544 540
545static int 541static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
546sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, 542 int generation, int function,
547 int generation, int function, int lun_or_login_id, 543 int lun_or_login_id, void *response)
548 void *response)
549{ 544{
550 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 545 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
551 struct sbp2_management_orb *orb; 546 struct sbp2_management_orb *orb;
@@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
652 &d, sizeof(d)); 647 &d, sizeof(d));
653} 648}
654 649
655static void 650static void complete_agent_reset_write_no_wait(struct fw_card *card,
656complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, 651 int rcode, void *payload, size_t length, void *data)
657 void *payload, size_t length, void *data)
658{ 652{
659 kfree(data); 653 kfree(data);
660} 654}
@@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
1299 sizeof(orb->page_table), DMA_TO_DEVICE); 1293 sizeof(orb->page_table), DMA_TO_DEVICE);
1300} 1294}
1301 1295
1302static unsigned int 1296static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1303sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1304{ 1297{
1305 int sam_status; 1298 int sam_status;
1306 1299
@@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
1337 } 1330 }
1338} 1331}
1339 1332
1340static void 1333static void complete_command_orb(struct sbp2_orb *base_orb,
1341complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) 1334 struct sbp2_status *status)
1342{ 1335{
1343 struct sbp2_command_orb *orb = 1336 struct sbp2_command_orb *orb =
1344 container_of(base_orb, struct sbp2_command_orb, base); 1337 container_of(base_orb, struct sbp2_command_orb, base);
@@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
1384 orb->done(orb->cmd); 1377 orb->done(orb->cmd);
1385} 1378}
1386 1379
1387static int 1380static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1388sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device, 1381 struct fw_device *device, struct sbp2_logical_unit *lu)
1389 struct sbp2_logical_unit *lu)
1390{ 1382{
1391 struct scatterlist *sg = scsi_sglist(orb->cmd); 1383 struct scatterlist *sg = scsi_sglist(orb->cmd);
1392 int i, n; 1384 int i, n;
@@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1584 * This is the concatenation of target port identifier and logical unit 1576 * This is the concatenation of target port identifier and logical unit
1585 * identifier as per SAM-2...SAM-4 annex A. 1577 * identifier as per SAM-2...SAM-4 annex A.
1586 */ 1578 */
1587static ssize_t 1579static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
1588sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, 1580 struct device_attribute *attr, char *buf)
1589 char *buf)
1590{ 1581{
1591 struct scsi_device *sdev = to_scsi_device(dev); 1582 struct scsi_device *sdev = to_scsi_device(dev);
1592 struct sbp2_logical_unit *lu; 1583 struct sbp2_logical_unit *lu;
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 8dd6703b55cd..d0deecc4de93 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card,
314 struct fw_node * node, 314 struct fw_node * node,
315 struct fw_node * parent); 315 struct fw_node * parent);
316 316
317static void 317static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
318for_each_fw_node(struct fw_card *card, struct fw_node *root, 318 fw_node_callback_t callback)
319 fw_node_callback_t callback)
320{ 319{
321 struct list_head list; 320 struct list_head list;
322 struct fw_node *node, *next, *child, *parent; 321 struct fw_node *node, *next, *child, *parent;
@@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
349 fw_node_put(node); 348 fw_node_put(node);
350} 349}
351 350
352static void 351static void report_lost_node(struct fw_card *card,
353report_lost_node(struct fw_card *card, 352 struct fw_node *node, struct fw_node *parent)
354 struct fw_node *node, struct fw_node *parent)
355{ 353{
356 fw_node_event(card, node, FW_NODE_DESTROYED); 354 fw_node_event(card, node, FW_NODE_DESTROYED);
357 fw_node_put(node); 355 fw_node_put(node);
@@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
360 card->bm_retries = 0; 358 card->bm_retries = 0;
361} 359}
362 360
363static void 361static void report_found_node(struct fw_card *card,
364report_found_node(struct fw_card *card, 362 struct fw_node *node, struct fw_node *parent)
365 struct fw_node *node, struct fw_node *parent)
366{ 363{
367 int b_path = (node->phy_speed == SCODE_BETA); 364 int b_path = (node->phy_speed == SCODE_BETA);
368 365
@@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
415 * found, lost or updated. Update the nodes in the card topology tree 412 * found, lost or updated. Update the nodes in the card topology tree
416 * as we go. 413 * as we go.
417 */ 414 */
418static void 415static void update_tree(struct fw_card *card, struct fw_node *root)
419update_tree(struct fw_card *card, struct fw_node *root)
420{ 416{
421 struct list_head list0, list1; 417 struct list_head list0, list1;
422 struct fw_node *node0, *node1, *next1; 418 struct fw_node *node0, *node1, *next1;
@@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
497 } 493 }
498} 494}
499 495
500static void 496static void update_topology_map(struct fw_card *card,
501update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count) 497 u32 *self_ids, int self_id_count)
502{ 498{
503 int node_count; 499 int node_count;
504 500
@@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
510 fw_compute_block_crc(card->topology_map); 506 fw_compute_block_crc(card->topology_map);
511} 507}
512 508
513void 509void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
514fw_core_handle_bus_reset(struct fw_card *card, 510 int self_id_count, u32 *self_ids)
515 int node_id, int generation,
516 int self_id_count, u32 * self_ids)
517{ 511{
518 struct fw_node *local_node; 512 struct fw_node *local_node;
519 unsigned long flags; 513 unsigned long flags;
@@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card,
532 526
533 spin_lock_irqsave(&card->lock, flags); 527 spin_lock_irqsave(&card->lock, flags);
534 528
529 card->broadcast_channel_allocated = false;
535 card->node_id = node_id; 530 card->node_id = node_id;
536 /* 531 /*
537 * Update node_id before generation to prevent anybody from using 532 * Update node_id before generation to prevent anybody from using
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
index addb9f8ea776..3c497bb4fae4 100644
--- a/drivers/firewire/fw-topology.h
+++ b/drivers/firewire/fw-topology.h
@@ -19,6 +19,11 @@
19#ifndef __fw_topology_h 19#ifndef __fw_topology_h
20#define __fw_topology_h 20#define __fw_topology_h
21 21
22#include <linux/list.h>
23#include <linux/slab.h>
24
25#include <asm/atomic.h>
26
22enum { 27enum {
23 FW_NODE_CREATED, 28 FW_NODE_CREATED,
24 FW_NODE_UPDATED, 29 FW_NODE_UPDATED,
@@ -51,26 +56,22 @@ struct fw_node {
51 struct fw_node *ports[0]; 56 struct fw_node *ports[0];
52}; 57};
53 58
54static inline struct fw_node * 59static inline struct fw_node *fw_node_get(struct fw_node *node)
55fw_node_get(struct fw_node *node)
56{ 60{
57 atomic_inc(&node->ref_count); 61 atomic_inc(&node->ref_count);
58 62
59 return node; 63 return node;
60} 64}
61 65
62static inline void 66static inline void fw_node_put(struct fw_node *node)
63fw_node_put(struct fw_node *node)
64{ 67{
65 if (atomic_dec_and_test(&node->ref_count)) 68 if (atomic_dec_and_test(&node->ref_count))
66 kfree(node); 69 kfree(node);
67} 70}
68 71
69void 72struct fw_card;
70fw_destroy_nodes(struct fw_card *card); 73void fw_destroy_nodes(struct fw_card *card);
71
72int
73fw_compute_block_crc(u32 *block);
74 74
75int fw_compute_block_crc(u32 *block);
75 76
76#endif /* __fw_topology_h */ 77#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 699ac041f39a..283dac6d327d 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -64,10 +64,8 @@
64#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 64#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
65#define PHY_IDENTIFIER(id) ((id) << 30) 65#define PHY_IDENTIFIER(id) ((id) << 30)
66 66
67static int 67static int close_transaction(struct fw_transaction *transaction,
68close_transaction(struct fw_transaction *transaction, 68 struct fw_card *card, int rcode)
69 struct fw_card *card, int rcode,
70 u32 *payload, size_t length)
71{ 69{
72 struct fw_transaction *t; 70 struct fw_transaction *t;
73 unsigned long flags; 71 unsigned long flags;
@@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction,
83 spin_unlock_irqrestore(&card->lock, flags); 81 spin_unlock_irqrestore(&card->lock, flags);
84 82
85 if (&t->link != &card->transaction_list) { 83 if (&t->link != &card->transaction_list) {
86 t->callback(card, rcode, payload, length, t->callback_data); 84 t->callback(card, rcode, NULL, 0, t->callback_data);
87 return 0; 85 return 0;
88 } 86 }
89 87
@@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction,
94 * Only valid for transactions that are potentially pending (ie have 92 * Only valid for transactions that are potentially pending (ie have
95 * been sent). 93 * been sent).
96 */ 94 */
97int 95int fw_cancel_transaction(struct fw_card *card,
98fw_cancel_transaction(struct fw_card *card, 96 struct fw_transaction *transaction)
99 struct fw_transaction *transaction)
100{ 97{
101 /* 98 /*
102 * Cancel the packet transmission if it's still queued. That 99 * Cancel the packet transmission if it's still queued. That
@@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card,
112 * if the transaction is still pending and remove it in that case. 109 * if the transaction is still pending and remove it in that case.
113 */ 110 */
114 111
115 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0); 112 return close_transaction(transaction, card, RCODE_CANCELLED);
116} 113}
117EXPORT_SYMBOL(fw_cancel_transaction); 114EXPORT_SYMBOL(fw_cancel_transaction);
118 115
119static void 116static void transmit_complete_callback(struct fw_packet *packet,
120transmit_complete_callback(struct fw_packet *packet, 117 struct fw_card *card, int status)
121 struct fw_card *card, int status)
122{ 118{
123 struct fw_transaction *t = 119 struct fw_transaction *t =
124 container_of(packet, struct fw_transaction, packet); 120 container_of(packet, struct fw_transaction, packet);
125 121
126 switch (status) { 122 switch (status) {
127 case ACK_COMPLETE: 123 case ACK_COMPLETE:
128 close_transaction(t, card, RCODE_COMPLETE, NULL, 0); 124 close_transaction(t, card, RCODE_COMPLETE);
129 break; 125 break;
130 case ACK_PENDING: 126 case ACK_PENDING:
131 t->timestamp = packet->timestamp; 127 t->timestamp = packet->timestamp;
@@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet,
133 case ACK_BUSY_X: 129 case ACK_BUSY_X:
134 case ACK_BUSY_A: 130 case ACK_BUSY_A:
135 case ACK_BUSY_B: 131 case ACK_BUSY_B:
136 close_transaction(t, card, RCODE_BUSY, NULL, 0); 132 close_transaction(t, card, RCODE_BUSY);
137 break; 133 break;
138 case ACK_DATA_ERROR: 134 case ACK_DATA_ERROR:
139 close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0); 135 close_transaction(t, card, RCODE_DATA_ERROR);
140 break; 136 break;
141 case ACK_TYPE_ERROR: 137 case ACK_TYPE_ERROR:
142 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0); 138 close_transaction(t, card, RCODE_TYPE_ERROR);
143 break; 139 break;
144 default: 140 default:
145 /* 141 /*
146 * In this case the ack is really a juju specific 142 * In this case the ack is really a juju specific
147 * rcode, so just forward that to the callback. 143 * rcode, so just forward that to the callback.
148 */ 144 */
149 close_transaction(t, card, status, NULL, 0); 145 close_transaction(t, card, status);
150 break; 146 break;
151 } 147 }
152} 148}
153 149
154static void 150static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
155fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
156 int destination_id, int source_id, int generation, int speed, 151 int destination_id, int source_id, int generation, int speed,
157 unsigned long long offset, void *payload, size_t length) 152 unsigned long long offset, void *payload, size_t length)
158{ 153{
159 int ext_tcode; 154 int ext_tcode;
160 155
156 if (tcode == TCODE_STREAM_DATA) {
157 packet->header[0] =
158 HEADER_DATA_LENGTH(length) |
159 destination_id |
160 HEADER_TCODE(TCODE_STREAM_DATA);
161 packet->header_length = 4;
162 packet->payload = payload;
163 packet->payload_length = length;
164
165 goto common;
166 }
167
161 if (tcode > 0x10) { 168 if (tcode > 0x10) {
162 ext_tcode = tcode & ~0x10; 169 ext_tcode = tcode & ~0x10;
163 tcode = TCODE_LOCK_REQUEST; 170 tcode = TCODE_LOCK_REQUEST;
@@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
204 packet->payload_length = 0; 211 packet->payload_length = 0;
205 break; 212 break;
206 } 213 }
207 214 common:
208 packet->speed = speed; 215 packet->speed = speed;
209 packet->generation = generation; 216 packet->generation = generation;
210 packet->ack = 0; 217 packet->ack = 0;
@@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
246 * @param callback function to be called when the transaction is completed 253 * @param callback function to be called when the transaction is completed
247 * @param callback_data pointer to arbitrary data, which will be 254 * @param callback_data pointer to arbitrary data, which will be
248 * passed to the callback 255 * passed to the callback
256 *
257 * In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
258 * needs to synthesize @destination_id with fw_stream_packet_destination_id().
249 */ 259 */
250void 260void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
251fw_send_request(struct fw_card *card, struct fw_transaction *t, 261 int destination_id, int generation, int speed,
252 int tcode, int destination_id, int generation, int speed, 262 unsigned long long offset, void *payload, size_t length,
253 unsigned long long offset, 263 fw_transaction_callback_t callback, void *callback_data)
254 void *payload, size_t length,
255 fw_transaction_callback_t callback, void *callback_data)
256{ 264{
257 unsigned long flags; 265 unsigned long flags;
258 int tlabel; 266 int tlabel;
@@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode,
322 * Returns the RCODE. 330 * Returns the RCODE.
323 */ 331 */
324int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, 332int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
325 int generation, int speed, unsigned long long offset, 333 int generation, int speed, unsigned long long offset,
326 void *data, size_t length) 334 void *payload, size_t length)
327{ 335{
328 struct transaction_callback_data d; 336 struct transaction_callback_data d;
329 struct fw_transaction t; 337 struct fw_transaction t;
330 338
331 init_completion(&d.done); 339 init_completion(&d.done);
332 d.payload = data; 340 d.payload = payload;
333 fw_send_request(card, &t, tcode, destination_id, generation, speed, 341 fw_send_request(card, &t, tcode, destination_id, generation, speed,
334 offset, data, length, transaction_callback, &d); 342 offset, payload, length, transaction_callback, &d);
335 wait_for_completion(&d.done); 343 wait_for_completion(&d.done);
336 344
337 return d.rcode; 345 return d.rcode;
@@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card)
399 } 407 }
400} 408}
401 409
402static struct fw_address_handler * 410static struct fw_address_handler *lookup_overlapping_address_handler(
403lookup_overlapping_address_handler(struct list_head *list, 411 struct list_head *list, unsigned long long offset, size_t length)
404 unsigned long long offset, size_t length)
405{ 412{
406 struct fw_address_handler *handler; 413 struct fw_address_handler *handler;
407 414
@@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list,
414 return NULL; 421 return NULL;
415} 422}
416 423
417static struct fw_address_handler * 424static struct fw_address_handler *lookup_enclosing_address_handler(
418lookup_enclosing_address_handler(struct list_head *list, 425 struct list_head *list, unsigned long long offset, size_t length)
419 unsigned long long offset, size_t length)
420{ 426{
421 struct fw_address_handler *handler; 427 struct fw_address_handler *handler;
422 428
@@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region =
449#endif /* 0 */ 455#endif /* 0 */
450 456
451/** 457/**
452 * Allocate a range of addresses in the node space of the OHCI 458 * fw_core_add_address_handler - register for incoming requests
453 * controller. When a request is received that falls within the 459 * @handler: callback
454 * specified address range, the specified callback is invoked. The 460 * @region: region in the IEEE 1212 node space address range
455 * parameters passed to the callback give the details of the 461 *
456 * particular request. 462 * region->start, ->end, and handler->length have to be quadlet-aligned.
463 *
464 * When a request is received that falls within the specified address range,
465 * the specified callback is invoked. The parameters passed to the callback
466 * give the details of the particular request.
457 * 467 *
458 * Return value: 0 on success, non-zero otherwise. 468 * Return value: 0 on success, non-zero otherwise.
459 * The start offset of the handler's address region is determined by 469 * The start offset of the handler's address region is determined by
460 * fw_core_add_address_handler() and is returned in handler->offset. 470 * fw_core_add_address_handler() and is returned in handler->offset.
461 * The offset is quadlet-aligned.
462 */ 471 */
463int 472int fw_core_add_address_handler(struct fw_address_handler *handler,
464fw_core_add_address_handler(struct fw_address_handler *handler, 473 const struct fw_address_region *region)
465 const struct fw_address_region *region)
466{ 474{
467 struct fw_address_handler *other; 475 struct fw_address_handler *other;
468 unsigned long flags; 476 unsigned long flags;
469 int ret = -EBUSY; 477 int ret = -EBUSY;
470 478
479 if (region->start & 0xffff000000000003ULL ||
480 region->end & 0xffff000000000003ULL ||
481 region->start >= region->end ||
482 handler->length & 3 ||
483 handler->length == 0)
484 return -EINVAL;
485
471 spin_lock_irqsave(&address_handler_lock, flags); 486 spin_lock_irqsave(&address_handler_lock, flags);
472 487
473 handler->offset = roundup(region->start, 4); 488 handler->offset = region->start;
474 while (handler->offset + handler->length <= region->end) { 489 while (handler->offset + handler->length <= region->end) {
475 other = 490 other =
476 lookup_overlapping_address_handler(&address_handler_list, 491 lookup_overlapping_address_handler(&address_handler_list,
477 handler->offset, 492 handler->offset,
478 handler->length); 493 handler->length);
479 if (other != NULL) { 494 if (other != NULL) {
480 handler->offset = 495 handler->offset += other->length;
481 roundup(other->offset + other->length, 4);
482 } else { 496 } else {
483 list_add_tail(&handler->link, &address_handler_list); 497 list_add_tail(&handler->link, &address_handler_list);
484 ret = 0; 498 ret = 0;
@@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
493EXPORT_SYMBOL(fw_core_add_address_handler); 507EXPORT_SYMBOL(fw_core_add_address_handler);
494 508
495/** 509/**
496 * Deallocate a range of addresses allocated with fw_allocate. This 510 * fw_core_remove_address_handler - unregister an address handler
497 * will call the associated callback one last time with a the special
498 * tcode TCODE_DEALLOCATE, to let the client destroy the registered
499 * callback data. For convenience, the callback parameters offset and
500 * length are set to the start and the length respectively for the
501 * deallocated region, payload is set to NULL.
502 */ 511 */
503void fw_core_remove_address_handler(struct fw_address_handler *handler) 512void fw_core_remove_address_handler(struct fw_address_handler *handler)
504{ 513{
@@ -518,9 +527,8 @@ struct fw_request {
518 u32 data[0]; 527 u32 data[0];
519}; 528};
520 529
521static void 530static void free_response_callback(struct fw_packet *packet,
522free_response_callback(struct fw_packet *packet, 531 struct fw_card *card, int status)
523 struct fw_card *card, int status)
524{ 532{
525 struct fw_request *request; 533 struct fw_request *request;
526 534
@@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet,
528 kfree(request); 536 kfree(request);
529} 537}
530 538
531void 539void fw_fill_response(struct fw_packet *response, u32 *request_header,
532fw_fill_response(struct fw_packet *response, u32 *request_header, 540 int rcode, void *payload, size_t length)
533 int rcode, void *payload, size_t length)
534{ 541{
535 int tcode, tlabel, extended_tcode, source, destination; 542 int tcode, tlabel, extended_tcode, source, destination;
536 543
@@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
588} 595}
589EXPORT_SYMBOL(fw_fill_response); 596EXPORT_SYMBOL(fw_fill_response);
590 597
591static struct fw_request * 598static struct fw_request *allocate_request(struct fw_packet *p)
592allocate_request(struct fw_packet *p)
593{ 599{
594 struct fw_request *request; 600 struct fw_request *request;
595 u32 *data, length; 601 u32 *data, length;
@@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p)
649 return request; 655 return request;
650} 656}
651 657
652void 658void fw_send_response(struct fw_card *card,
653fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) 659 struct fw_request *request, int rcode)
654{ 660{
655 /* unified transaction or broadcast transaction: don't respond */ 661 /* unified transaction or broadcast transaction: don't respond */
656 if (request->ack != ACK_PENDING || 662 if (request->ack != ACK_PENDING ||
@@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
670} 676}
671EXPORT_SYMBOL(fw_send_response); 677EXPORT_SYMBOL(fw_send_response);
672 678
673void 679void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
674fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
675{ 680{
676 struct fw_address_handler *handler; 681 struct fw_address_handler *handler;
677 struct fw_request *request; 682 struct fw_request *request;
@@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
719} 724}
720EXPORT_SYMBOL(fw_core_handle_request); 725EXPORT_SYMBOL(fw_core_handle_request);
721 726
722void 727void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
723fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
724{ 728{
725 struct fw_transaction *t; 729 struct fw_transaction *t;
726 unsigned long flags; 730 unsigned long flags;
@@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region =
793 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, 797 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
794 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; 798 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
795 799
796static void 800static void handle_topology_map(struct fw_card *card, struct fw_request *request,
797handle_topology_map(struct fw_card *card, struct fw_request *request, 801 int tcode, int destination, int source, int generation,
798 int tcode, int destination, int source, 802 int speed, unsigned long long offset,
799 int generation, int speed, 803 void *payload, size_t length, void *callback_data)
800 unsigned long long offset,
801 void *payload, size_t length, void *callback_data)
802{ 804{
803 int i, start, end; 805 int i, start, end;
804 __be32 *map; 806 __be32 *map;
@@ -832,12 +834,10 @@ static const struct fw_address_region registers_region =
832 { .start = CSR_REGISTER_BASE, 834 { .start = CSR_REGISTER_BASE,
833 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; 835 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
834 836
835static void 837static void handle_registers(struct fw_card *card, struct fw_request *request,
836handle_registers(struct fw_card *card, struct fw_request *request, 838 int tcode, int destination, int source, int generation,
837 int tcode, int destination, int source, 839 int speed, unsigned long long offset,
838 int generation, int speed, 840 void *payload, size_t length, void *callback_data)
839 unsigned long long offset,
840 void *payload, size_t length, void *callback_data)
841{ 841{
842 int reg = offset & ~CSR_REGISTER_BASE; 842 int reg = offset & ~CSR_REGISTER_BASE;
843 unsigned long long bus_time; 843 unsigned long long bus_time;
@@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = {
939 939
940static int __init fw_core_init(void) 940static int __init fw_core_init(void)
941{ 941{
942 int retval; 942 int ret;
943 943
944 retval = bus_register(&fw_bus_type); 944 ret = bus_register(&fw_bus_type);
945 if (retval < 0) 945 if (ret < 0)
946 return retval; 946 return ret;
947 947
948 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); 948 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
949 if (fw_cdev_major < 0) { 949 if (fw_cdev_major < 0) {
@@ -951,19 +951,10 @@ static int __init fw_core_init(void)
951 return fw_cdev_major; 951 return fw_cdev_major;
952 } 952 }
953 953
954 retval = fw_core_add_address_handler(&topology_map, 954 fw_core_add_address_handler(&topology_map, &topology_map_region);
955 &topology_map_region); 955 fw_core_add_address_handler(&registers, &registers_region);
956 BUG_ON(retval < 0); 956 fw_core_add_descriptor(&vendor_id_descriptor);
957 957 fw_core_add_descriptor(&model_id_descriptor);
958 retval = fw_core_add_address_handler(&registers,
959 &registers_region);
960 BUG_ON(retval < 0);
961
962 /* Add the vendor textual descriptor. */
963 retval = fw_core_add_descriptor(&vendor_id_descriptor);
964 BUG_ON(retval < 0);
965 retval = fw_core_add_descriptor(&model_id_descriptor);
966 BUG_ON(retval < 0);
967 958
968 return 0; 959 return 0;
969} 960}
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index 1d78e9cc5940..dfa799068f89 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -82,14 +82,14 @@
82#define CSR_SPEED_MAP 0x2000 82#define CSR_SPEED_MAP 0x2000
83#define CSR_SPEED_MAP_END 0x3000 83#define CSR_SPEED_MAP_END 0x3000
84 84
85#define BANDWIDTH_AVAILABLE_INITIAL 4915
85#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 86#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
86#define BROADCAST_CHANNEL_VALID (1 << 30) 87#define BROADCAST_CHANNEL_VALID (1 << 30)
87 88
88#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) 89#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
89#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) 90#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
90 91
91static inline void 92static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
92fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
93{ 93{
94 u32 *dst = _dst; 94 u32 *dst = _dst;
95 __be32 *src = _src; 95 __be32 *src = _src;
@@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
99 dst[i] = be32_to_cpu(src[i]); 99 dst[i] = be32_to_cpu(src[i]);
100} 100}
101 101
102static inline void 102static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
103fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
104{ 103{
105 fw_memcpy_from_be32(_dst, _src, size); 104 fw_memcpy_from_be32(_dst, _src, size);
106} 105}
@@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
125 struct fw_card *card, int status); 124 struct fw_card *card, int status);
126 125
127typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, 126typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
128 void *data, 127 void *data, size_t length,
129 size_t length,
130 void *callback_data); 128 void *callback_data);
131 129
132/* 130/*
@@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card,
141 void *data, size_t length, 139 void *data, size_t length,
142 void *callback_data); 140 void *callback_data);
143 141
144typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
145 int node_id, int generation,
146 u32 *self_ids,
147 int self_id_count,
148 void *callback_data);
149
150struct fw_packet { 142struct fw_packet {
151 int speed; 143 int speed;
152 int generation; 144 int generation;
@@ -187,12 +179,6 @@ struct fw_transaction {
187 void *callback_data; 179 void *callback_data;
188}; 180};
189 181
190static inline struct fw_packet *
191fw_packet(struct list_head *l)
192{
193 return list_entry(l, struct fw_packet, link);
194}
195
196struct fw_address_handler { 182struct fw_address_handler {
197 u64 offset; 183 u64 offset;
198 size_t length; 184 size_t length;
@@ -201,7 +187,6 @@ struct fw_address_handler {
201 struct list_head link; 187 struct list_head link;
202}; 188};
203 189
204
205struct fw_address_region { 190struct fw_address_region {
206 u64 start; 191 u64 start;
207 u64 end; 192 u64 end;
@@ -255,6 +240,7 @@ struct fw_card {
255 int bm_retries; 240 int bm_retries;
256 int bm_generation; 241 int bm_generation;
257 242
243 bool broadcast_channel_allocated;
258 u32 broadcast_channel; 244 u32 broadcast_channel;
259 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; 245 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
260}; 246};
@@ -315,10 +301,8 @@ struct fw_iso_packet {
315struct fw_iso_context; 301struct fw_iso_context;
316 302
317typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, 303typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
318 u32 cycle, 304 u32 cycle, size_t header_length,
319 size_t header_length, 305 void *header, void *data);
320 void *header,
321 void *data);
322 306
323/* 307/*
324 * An iso buffer is just a set of pages mapped for DMA in the 308 * An iso buffer is just a set of pages mapped for DMA in the
@@ -344,36 +328,25 @@ struct fw_iso_context {
344 void *callback_data; 328 void *callback_data;
345}; 329};
346 330
347int 331int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
348fw_iso_buffer_init(struct fw_iso_buffer *buffer, 332 int page_count, enum dma_data_direction direction);
349 struct fw_card *card, 333int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
350 int page_count, 334void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
351 enum dma_data_direction direction); 335
352int 336struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
353fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); 337 int type, int channel, int speed, size_t header_size,
354void 338 fw_iso_callback_t callback, void *callback_data);
355fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); 339int fw_iso_context_queue(struct fw_iso_context *ctx,
356 340 struct fw_iso_packet *packet,
357struct fw_iso_context * 341 struct fw_iso_buffer *buffer,
358fw_iso_context_create(struct fw_card *card, int type, 342 unsigned long payload);
359 int channel, int speed, size_t header_size, 343int fw_iso_context_start(struct fw_iso_context *ctx,
360 fw_iso_callback_t callback, void *callback_data); 344 int cycle, int sync, int tags);
361 345int fw_iso_context_stop(struct fw_iso_context *ctx);
362void 346void fw_iso_context_destroy(struct fw_iso_context *ctx);
363fw_iso_context_destroy(struct fw_iso_context *ctx); 347
364 348void fw_iso_resource_manage(struct fw_card *card, int generation,
365int 349 u64 channels_mask, int *channel, int *bandwidth, bool allocate);
366fw_iso_context_queue(struct fw_iso_context *ctx,
367 struct fw_iso_packet *packet,
368 struct fw_iso_buffer *buffer,
369 unsigned long payload);
370
371int
372fw_iso_context_start(struct fw_iso_context *ctx,
373 int cycle, int sync, int tags);
374
375int
376fw_iso_context_stop(struct fw_iso_context *ctx);
377 350
378struct fw_card_driver { 351struct fw_card_driver {
379 /* 352 /*
@@ -415,7 +388,7 @@ struct fw_card_driver {
415 388
416 struct fw_iso_context * 389 struct fw_iso_context *
417 (*allocate_iso_context)(struct fw_card *card, 390 (*allocate_iso_context)(struct fw_card *card,
418 int type, size_t header_size); 391 int type, int channel, size_t header_size);
419 void (*free_iso_context)(struct fw_iso_context *ctx); 392 void (*free_iso_context)(struct fw_iso_context *ctx);
420 393
421 int (*start_iso)(struct fw_iso_context *ctx, 394 int (*start_iso)(struct fw_iso_context *ctx,
@@ -429,54 +402,45 @@ struct fw_card_driver {
429 int (*stop_iso)(struct fw_iso_context *ctx); 402 int (*stop_iso)(struct fw_iso_context *ctx);
430}; 403};
431 404
432int 405int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
433fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
434 406
435void 407void fw_send_request(struct fw_card *card, struct fw_transaction *t,
436fw_send_request(struct fw_card *card, struct fw_transaction *t,
437 int tcode, int destination_id, int generation, int speed, 408 int tcode, int destination_id, int generation, int speed,
438 unsigned long long offset, void *data, size_t length, 409 unsigned long long offset, void *payload, size_t length,
439 fw_transaction_callback_t callback, void *callback_data); 410 fw_transaction_callback_t callback, void *callback_data);
440
441int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
442 int generation, int speed, unsigned long long offset,
443 void *data, size_t length);
444
445int fw_cancel_transaction(struct fw_card *card, 411int fw_cancel_transaction(struct fw_card *card,
446 struct fw_transaction *transaction); 412 struct fw_transaction *transaction);
447
448void fw_flush_transactions(struct fw_card *card); 413void fw_flush_transactions(struct fw_card *card);
449 414int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
415 int generation, int speed, unsigned long long offset,
416 void *payload, size_t length);
450void fw_send_phy_config(struct fw_card *card, 417void fw_send_phy_config(struct fw_card *card,
451 int node_id, int generation, int gap_count); 418 int node_id, int generation, int gap_count);
452 419
420static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
421{
422 return tag << 14 | channel << 8 | sy;
423}
424
453/* 425/*
454 * Called by the topology code to inform the device code of node 426 * Called by the topology code to inform the device code of node
455 * activity; found, lost, or updated nodes. 427 * activity; found, lost, or updated nodes.
456 */ 428 */
457void 429void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
458fw_node_event(struct fw_card *card, struct fw_node *node, int event);
459 430
460/* API used by card level drivers */ 431/* API used by card level drivers */
461 432
462void 433void fw_card_initialize(struct fw_card *card,
463fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, 434 const struct fw_card_driver *driver, struct device *device);
464 struct device *device); 435int fw_card_add(struct fw_card *card,
465int 436 u32 max_receive, u32 link_speed, u64 guid);
466fw_card_add(struct fw_card *card, 437void fw_core_remove_card(struct fw_card *card);
467 u32 max_receive, u32 link_speed, u64 guid); 438void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
468 439 int generation, int self_id_count, u32 *self_ids);
469void 440void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
470fw_core_remove_card(struct fw_card *card); 441void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
471 442
472void 443extern int fw_irm_set_broadcast_channel_register(struct device *dev,
473fw_core_handle_bus_reset(struct fw_card *card, 444 void *data);
474 int node_id, int generation,
475 int self_id_count, u32 *self_ids);
476void
477fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
478
479void
480fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
481 445
482#endif /* __fw_transaction_h */ 446#endif /* __fw_transaction_h */
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 30022c4a5c12..4ec5061fa584 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o 13 drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o
14 15
15drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
16 17
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
new file mode 100644
index 000000000000..c77c6c6d9d2c
--- /dev/null
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -0,0 +1,235 @@
1/**
2 * \file drm_debugfs.c
3 * debugfs support for DRM
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30 * OTHER DEALINGS IN THE SOFTWARE.
31 */
32
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35#include "drmP.h"
36
37#if defined(CONFIG_DEBUG_FS)
38
39/***************************************************
40 * Initialization, etc.
41 **************************************************/
42
43static struct drm_info_list drm_debugfs_list[] = {
44 {"name", drm_name_info, 0},
45 {"vm", drm_vm_info, 0},
46 {"clients", drm_clients_info, 0},
47 {"queues", drm_queues_info, 0},
48 {"bufs", drm_bufs_info, 0},
49 {"gem_names", drm_gem_name_info, DRIVER_GEM},
50 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
51#if DRM_DEBUG_CODE
52 {"vma", drm_vma_info, 0},
53#endif
54};
55#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
56
57
58static int drm_debugfs_open(struct inode *inode, struct file *file)
59{
60 struct drm_info_node *node = inode->i_private;
61
62 return single_open(file, node->info_ent->show, node);
63}
64
65
66static const struct file_operations drm_debugfs_fops = {
67 .owner = THIS_MODULE,
68 .open = drm_debugfs_open,
69 .read = seq_read,
70 .llseek = seq_lseek,
71 .release = single_release,
72};
73
74
75/**
76 * Initialize a given set of debugfs files for a device
77 *
78 * \param files The array of files to create
79 * \param count The number of files given
80 * \param root DRI debugfs dir entry.
81 * \param minor device minor number
82 * \return Zero on success, non-zero on failure
83 *
84 * Create a given set of debugfs files represented by an array of
85 * gdm_debugfs_lists in the given root directory.
86 */
87int drm_debugfs_create_files(struct drm_info_list *files, int count,
88 struct dentry *root, struct drm_minor *minor)
89{
90 struct drm_device *dev = minor->dev;
91 struct dentry *ent;
92 struct drm_info_node *tmp;
93 char name[64];
94 int i, ret;
95
96 for (i = 0; i < count; i++) {
97 u32 features = files[i].driver_features;
98
99 if (features != 0 &&
100 (dev->driver->driver_features & features) != features)
101 continue;
102
103 tmp = drm_alloc(sizeof(struct drm_info_node),
104 _DRM_DRIVER);
105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
106 root, tmp, &drm_debugfs_fops);
107 if (!ent) {
108 DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
109 name, files[i].name);
110 drm_free(tmp, sizeof(struct drm_info_node),
111 _DRM_DRIVER);
112 ret = -1;
113 goto fail;
114 }
115
116 tmp->minor = minor;
117 tmp->dent = ent;
118 tmp->info_ent = &files[i];
119 list_add(&(tmp->list), &(minor->debugfs_nodes.list));
120 }
121 return 0;
122
123fail:
124 drm_debugfs_remove_files(files, count, minor);
125 return ret;
126}
127EXPORT_SYMBOL(drm_debugfs_create_files);
128
129/**
130 * Initialize the DRI debugfs filesystem for a device
131 *
132 * \param dev DRM device
133 * \param minor device minor number
134 * \param root DRI debugfs dir entry.
135 *
136 * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
137 * "/debugfs/dri/%minor%/", and each entry in debugfs_list as
138 * "/debugfs/dri/%minor%/%name%".
139 */
140int drm_debugfs_init(struct drm_minor *minor, int minor_id,
141 struct dentry *root)
142{
143 struct drm_device *dev = minor->dev;
144 char name[64];
145 int ret;
146
147 INIT_LIST_HEAD(&minor->debugfs_nodes.list);
148 sprintf(name, "%d", minor_id);
149 minor->debugfs_root = debugfs_create_dir(name, root);
150 if (!minor->debugfs_root) {
151 DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
152 return -1;
153 }
154
155 ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
156 minor->debugfs_root, minor);
157 if (ret) {
158 debugfs_remove(minor->debugfs_root);
159 minor->debugfs_root = NULL;
160 DRM_ERROR("Failed to create core drm debugfs files\n");
161 return ret;
162 }
163
164 if (dev->driver->debugfs_init) {
165 ret = dev->driver->debugfs_init(minor);
166 if (ret) {
167 DRM_ERROR("DRM: Driver failed to initialize "
168 "/debugfs/dri.\n");
169 return ret;
170 }
171 }
172 return 0;
173}
174
175
176/**
177 * Remove a list of debugfs files
178 *
179 * \param files The list of files
180 * \param count The number of files
181 * \param minor The minor of which we should remove the files
182 * \return always zero.
183 *
184 * Remove all debugfs entries created by debugfs_init().
185 */
186int drm_debugfs_remove_files(struct drm_info_list *files, int count,
187 struct drm_minor *minor)
188{
189 struct list_head *pos, *q;
190 struct drm_info_node *tmp;
191 int i;
192
193 for (i = 0; i < count; i++) {
194 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
195 tmp = list_entry(pos, struct drm_info_node, list);
196 if (tmp->info_ent == &files[i]) {
197 debugfs_remove(tmp->dent);
198 list_del(pos);
199 drm_free(tmp, sizeof(struct drm_info_node),
200 _DRM_DRIVER);
201 }
202 }
203 }
204 return 0;
205}
206EXPORT_SYMBOL(drm_debugfs_remove_files);
207
208/**
209 * Cleanup the debugfs filesystem resources.
210 *
211 * \param minor device minor number.
212 * \return always zero.
213 *
214 * Remove all debugfs entries created by debugfs_init().
215 */
216int drm_debugfs_cleanup(struct drm_minor *minor)
217{
218 struct drm_device *dev = minor->dev;
219
220 if (!minor->debugfs_root)
221 return 0;
222
223 if (dev->driver->debugfs_cleanup)
224 dev->driver->debugfs_cleanup(minor);
225
226 drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
227
228 debugfs_remove(minor->debugfs_root);
229 minor->debugfs_root = NULL;
230
231 return 0;
232}
233
234#endif /* CONFIG_DEBUG_FS */
235
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 14c7a23dc157..ed32edb17166 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,9 +46,11 @@
46 * OTHER DEALINGS IN THE SOFTWARE. 46 * OTHER DEALINGS IN THE SOFTWARE.
47 */ 47 */
48 48
49#include <linux/debugfs.h>
49#include "drmP.h" 50#include "drmP.h"
50#include "drm_core.h" 51#include "drm_core.h"
51 52
53
52static int drm_version(struct drm_device *dev, void *data, 54static int drm_version(struct drm_device *dev, void *data,
53 struct drm_file *file_priv); 55 struct drm_file *file_priv);
54 56
@@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev)
178 180
179 /* Clear AGP information */ 181 /* Clear AGP information */
180 if (drm_core_has_AGP(dev) && dev->agp && 182 if (drm_core_has_AGP(dev) && dev->agp &&
181 !drm_core_check_feature(dev, DRIVER_MODESET)) { 183 !drm_core_check_feature(dev, DRIVER_MODESET)) {
182 struct drm_agp_mem *entry, *tempe; 184 struct drm_agp_mem *entry, *tempe;
183 185
184 /* Remove AGP resources, but leave dev->agp 186 /* Remove AGP resources, but leave dev->agp
@@ -382,6 +384,13 @@ static int __init drm_core_init(void)
382 goto err_p3; 384 goto err_p3;
383 } 385 }
384 386
387 drm_debugfs_root = debugfs_create_dir("dri", NULL);
388 if (!drm_debugfs_root) {
389 DRM_ERROR("Cannot create /debugfs/dri\n");
390 ret = -1;
391 goto err_p3;
392 }
393
385 drm_mem_init(); 394 drm_mem_init();
386 395
387 DRM_INFO("Initialized %s %d.%d.%d %s\n", 396 DRM_INFO("Initialized %s %d.%d.%d %s\n",
@@ -400,6 +409,7 @@ err_p1:
400static void __exit drm_core_exit(void) 409static void __exit drm_core_exit(void)
401{ 410{
402 remove_proc_entry("dri", NULL); 411 remove_proc_entry("dri", NULL);
412 debugfs_remove(drm_debugfs_root);
403 drm_sysfs_destroy(); 413 drm_sysfs_destroy();
404 414
405 unregister_chrdev(DRM_MAJOR, "drm"); 415 unregister_chrdev(DRM_MAJOR, "drm");
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
new file mode 100644
index 000000000000..fc98952b9033
--- /dev/null
+++ b/drivers/gpu/drm/drm_info.c
@@ -0,0 +1,328 @@
1/**
2 * \file drm_info.c
3 * DRM info file implementations
4 *
5 * \author Ben Gamari <bgamari@gmail.com>
6 */
7
8/*
9 * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
10 *
11 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13 * Copyright 2008 Ben Gamari <bgamari@gmail.com>
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/seq_file.h>
37#include "drmP.h"
38
39/**
40 * Called when "/proc/dri/.../name" is read.
41 *
42 * Prints the device name together with the bus id if available.
43 */
44int drm_name_info(struct seq_file *m, void *data)
45{
46 struct drm_info_node *node = (struct drm_info_node *) m->private;
47 struct drm_minor *minor = node->minor;
48 struct drm_device *dev = minor->dev;
49 struct drm_master *master = minor->master;
50
51 if (!master)
52 return 0;
53
54 if (master->unique) {
55 seq_printf(m, "%s %s %s\n",
56 dev->driver->pci_driver.name,
57 pci_name(dev->pdev), master->unique);
58 } else {
59 seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
60 pci_name(dev->pdev));
61 }
62
63 return 0;
64}
65
66/**
67 * Called when "/proc/dri/.../vm" is read.
68 *
69 * Prints information about all mappings in drm_device::maplist.
70 */
71int drm_vm_info(struct seq_file *m, void *data)
72{
73 struct drm_info_node *node = (struct drm_info_node *) m->private;
74 struct drm_device *dev = node->minor->dev;
75 struct drm_map *map;
76 struct drm_map_list *r_list;
77
78 /* Hardcoded from _DRM_FRAME_BUFFER,
79 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
80 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
81 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
82 const char *type;
83 int i;
84
85 mutex_lock(&dev->struct_mutex);
86 seq_printf(m, "slot offset size type flags address mtrr\n\n");
87 i = 0;
88 list_for_each_entry(r_list, &dev->maplist, head) {
89 map = r_list->map;
90 if (!map)
91 continue;
92 if (map->type < 0 || map->type > 5)
93 type = "??";
94 else
95 type = types[map->type];
96
97 seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
98 i,
99 map->offset,
100 map->size, type, map->flags,
101 (unsigned long) r_list->user_token);
102 if (map->mtrr < 0)
103 seq_printf(m, "none\n");
104 else
105 seq_printf(m, "%4d\n", map->mtrr);
106 i++;
107 }
108 mutex_unlock(&dev->struct_mutex);
109 return 0;
110}
111
112/**
113 * Called when "/proc/dri/.../queues" is read.
114 */
115int drm_queues_info(struct seq_file *m, void *data)
116{
117 struct drm_info_node *node = (struct drm_info_node *) m->private;
118 struct drm_device *dev = node->minor->dev;
119 int i;
120 struct drm_queue *q;
121
122 mutex_lock(&dev->struct_mutex);
123 seq_printf(m, " ctx/flags use fin"
124 " blk/rw/rwf wait flushed queued"
125 " locks\n\n");
126 for (i = 0; i < dev->queue_count; i++) {
127 q = dev->queuelist[i];
128 atomic_inc(&q->use_count);
129 seq_printf(m, "%5d/0x%03x %5d %5d"
130 " %5d/%c%c/%c%c%c %5Zd\n",
131 i,
132 q->flags,
133 atomic_read(&q->use_count),
134 atomic_read(&q->finalization),
135 atomic_read(&q->block_count),
136 atomic_read(&q->block_read) ? 'r' : '-',
137 atomic_read(&q->block_write) ? 'w' : '-',
138 waitqueue_active(&q->read_queue) ? 'r' : '-',
139 waitqueue_active(&q->write_queue) ? 'w' : '-',
140 waitqueue_active(&q->flush_queue) ? 'f' : '-',
141 DRM_BUFCOUNT(&q->waitlist));
142 atomic_dec(&q->use_count);
143 }
144 mutex_unlock(&dev->struct_mutex);
145 return 0;
146}
147
148/**
149 * Called when "/proc/dri/.../bufs" is read.
150 */
151int drm_bufs_info(struct seq_file *m, void *data)
152{
153 struct drm_info_node *node = (struct drm_info_node *) m->private;
154 struct drm_device *dev = node->minor->dev;
155 struct drm_device_dma *dma;
156 int i, seg_pages;
157
158 mutex_lock(&dev->struct_mutex);
159 dma = dev->dma;
160 if (!dma) {
161 mutex_unlock(&dev->struct_mutex);
162 return 0;
163 }
164
165 seq_printf(m, " o size count free segs pages kB\n\n");
166 for (i = 0; i <= DRM_MAX_ORDER; i++) {
167 if (dma->bufs[i].buf_count) {
168 seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
169 seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
170 i,
171 dma->bufs[i].buf_size,
172 dma->bufs[i].buf_count,
173 atomic_read(&dma->bufs[i].freelist.count),
174 dma->bufs[i].seg_count,
175 seg_pages,
176 seg_pages * PAGE_SIZE / 1024);
177 }
178 }
179 seq_printf(m, "\n");
180 for (i = 0; i < dma->buf_count; i++) {
181 if (i && !(i % 32))
182 seq_printf(m, "\n");
183 seq_printf(m, " %d", dma->buflist[i]->list);
184 }
185 seq_printf(m, "\n");
186 mutex_unlock(&dev->struct_mutex);
187 return 0;
188}
189
190/**
191 * Called when "/proc/dri/.../vblank" is read.
192 */
193int drm_vblank_info(struct seq_file *m, void *data)
194{
195 struct drm_info_node *node = (struct drm_info_node *) m->private;
196 struct drm_device *dev = node->minor->dev;
197 int crtc;
198
199 mutex_lock(&dev->struct_mutex);
200 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
201 seq_printf(m, "CRTC %d enable: %d\n",
202 crtc, atomic_read(&dev->vblank_refcount[crtc]));
203 seq_printf(m, "CRTC %d counter: %d\n",
204 crtc, drm_vblank_count(dev, crtc));
205 seq_printf(m, "CRTC %d last wait: %d\n",
206 crtc, dev->last_vblank_wait[crtc]);
207 seq_printf(m, "CRTC %d in modeset: %d\n",
208 crtc, dev->vblank_inmodeset[crtc]);
209 }
210 mutex_unlock(&dev->struct_mutex);
211 return 0;
212}
213
214/**
215 * Called when "/proc/dri/.../clients" is read.
216 *
217 */
218int drm_clients_info(struct seq_file *m, void *data)
219{
220 struct drm_info_node *node = (struct drm_info_node *) m->private;
221 struct drm_device *dev = node->minor->dev;
222 struct drm_file *priv;
223
224 mutex_lock(&dev->struct_mutex);
225 seq_printf(m, "a dev pid uid magic ioctls\n\n");
226 list_for_each_entry(priv, &dev->filelist, lhead) {
227 seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
228 priv->authenticated ? 'y' : 'n',
229 priv->minor->index,
230 priv->pid,
231 priv->uid, priv->magic, priv->ioctl_count);
232 }
233 mutex_unlock(&dev->struct_mutex);
234 return 0;
235}
236
237
238int drm_gem_one_name_info(int id, void *ptr, void *data)
239{
240 struct drm_gem_object *obj = ptr;
241 struct seq_file *m = data;
242
243 seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
244
245 seq_printf(m, "%6d %8zd %7d %8d\n",
246 obj->name, obj->size,
247 atomic_read(&obj->handlecount.refcount),
248 atomic_read(&obj->refcount.refcount));
249 return 0;
250}
251
252int drm_gem_name_info(struct seq_file *m, void *data)
253{
254 struct drm_info_node *node = (struct drm_info_node *) m->private;
255 struct drm_device *dev = node->minor->dev;
256
257 seq_printf(m, " name size handles refcount\n");
258 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
259 return 0;
260}
261
262int drm_gem_object_info(struct seq_file *m, void* data)
263{
264 struct drm_info_node *node = (struct drm_info_node *) m->private;
265 struct drm_device *dev = node->minor->dev;
266
267 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
268 seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
269 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
270 seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
271 seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
272 seq_printf(m, "%d gtt total\n", dev->gtt_total);
273 return 0;
274}
275
276#if DRM_DEBUG_CODE
277
278int drm_vma_info(struct seq_file *m, void *data)
279{
280 struct drm_info_node *node = (struct drm_info_node *) m->private;
281 struct drm_device *dev = node->minor->dev;
282 struct drm_vma_entry *pt;
283 struct vm_area_struct *vma;
284#if defined(__i386__)
285 unsigned int pgprot;
286#endif
287
288 mutex_lock(&dev->struct_mutex);
289 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08lx\n",
290 atomic_read(&dev->vma_count),
291 high_memory, virt_to_phys(high_memory));
292
293 list_for_each_entry(pt, &dev->vmalist, head) {
294 vma = pt->vma;
295 if (!vma)
296 continue;
297 seq_printf(m,
298 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
299 pt->pid, vma->vm_start, vma->vm_end,
300 vma->vm_flags & VM_READ ? 'r' : '-',
301 vma->vm_flags & VM_WRITE ? 'w' : '-',
302 vma->vm_flags & VM_EXEC ? 'x' : '-',
303 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
304 vma->vm_flags & VM_LOCKED ? 'l' : '-',
305 vma->vm_flags & VM_IO ? 'i' : '-',
306 vma->vm_pgoff);
307
308#if defined(__i386__)
309 pgprot = pgprot_val(vma->vm_page_prot);
310 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
311 pgprot & _PAGE_PRESENT ? 'p' : '-',
312 pgprot & _PAGE_RW ? 'w' : 'r',
313 pgprot & _PAGE_USER ? 'u' : 's',
314 pgprot & _PAGE_PWT ? 't' : 'b',
315 pgprot & _PAGE_PCD ? 'u' : 'c',
316 pgprot & _PAGE_ACCESSED ? 'a' : '-',
317 pgprot & _PAGE_DIRTY ? 'd' : '-',
318 pgprot & _PAGE_PSE ? 'm' : 'k',
319 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
320#endif
321 seq_printf(m, "\n");
322 }
323 mutex_unlock(&dev->struct_mutex);
324 return 0;
325}
326
327#endif
328
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 8df849f66830..9b3c5af61e98 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -37,697 +37,196 @@
37 * OTHER DEALINGS IN THE SOFTWARE. 37 * OTHER DEALINGS IN THE SOFTWARE.
38 */ 38 */
39 39
40#include <linux/seq_file.h>
40#include "drmP.h" 41#include "drmP.h"
41 42
42static int drm_name_info(char *buf, char **start, off_t offset, 43
43 int request, int *eof, void *data); 44/***************************************************
44static int drm_vm_info(char *buf, char **start, off_t offset, 45 * Initialization, etc.
45 int request, int *eof, void *data); 46 **************************************************/
46static int drm_clients_info(char *buf, char **start, off_t offset,
47 int request, int *eof, void *data);
48static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data);
52static int drm_vblank_info(char *buf, char **start, off_t offset,
53 int request, int *eof, void *data);
54static int drm_gem_name_info(char *buf, char **start, off_t offset,
55 int request, int *eof, void *data);
56static int drm_gem_object_info(char *buf, char **start, off_t offset,
57 int request, int *eof, void *data);
58#if DRM_DEBUG_CODE
59static int drm_vma_info(char *buf, char **start, off_t offset,
60 int request, int *eof, void *data);
61#endif
62 47
63/** 48/**
64 * Proc file list. 49 * Proc file list.
65 */ 50 */
66static struct drm_proc_list { 51static struct drm_info_list drm_proc_list[] = {
67 const char *name; /**< file name */
68 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
69 u32 driver_features; /**< Required driver features for this entry */
70} drm_proc_list[] = {
71 {"name", drm_name_info, 0}, 52 {"name", drm_name_info, 0},
72 {"mem", drm_mem_info, 0},
73 {"vm", drm_vm_info, 0}, 53 {"vm", drm_vm_info, 0},
74 {"clients", drm_clients_info, 0}, 54 {"clients", drm_clients_info, 0},
75 {"queues", drm_queues_info, 0}, 55 {"queues", drm_queues_info, 0},
76 {"bufs", drm_bufs_info, 0}, 56 {"bufs", drm_bufs_info, 0},
77 {"vblank", drm_vblank_info, 0},
78 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 57 {"gem_names", drm_gem_name_info, DRIVER_GEM},
79 {"gem_objects", drm_gem_object_info, DRIVER_GEM}, 58 {"gem_objects", drm_gem_object_info, DRIVER_GEM},
80#if DRM_DEBUG_CODE 59#if DRM_DEBUG_CODE
81 {"vma", drm_vma_info}, 60 {"vma", drm_vma_info, 0},
82#endif 61#endif
83}; 62};
84
85#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) 63#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
86 64
65static int drm_proc_open(struct inode *inode, struct file *file)
66{
67 struct drm_info_node* node = PDE(inode)->data;
68
69 return single_open(file, node->info_ent->show, node);
70}
71
72static const struct file_operations drm_proc_fops = {
73 .owner = THIS_MODULE,
74 .open = drm_proc_open,
75 .read = seq_read,
76 .llseek = seq_lseek,
77 .release = single_release,
78};
79
80
87/** 81/**
88 * Initialize the DRI proc filesystem for a device. 82 * Initialize a given set of proc files for a device
89 * 83 *
90 * \param dev DRM device. 84 * \param files The array of files to create
91 * \param minor device minor number. 85 * \param count The number of files given
92 * \param root DRI proc dir entry. 86 * \param root DRI proc dir entry.
93 * \param dev_root resulting DRI device proc dir entry. 87 * \param minor device minor number
94 * \return root entry pointer on success, or NULL on failure. 88 * \return Zero on success, non-zero on failure
95 * 89 *
96 * Create the DRI proc root entry "/proc/dri", the device proc root entry 90 * Create a given set of proc files represented by an array of
97 * "/proc/dri/%minor%/", and each entry in proc_list as 91 * gdm_proc_lists in the given root directory.
98 * "/proc/dri/%minor%/%name%".
99 */ 92 */
100int drm_proc_init(struct drm_minor *minor, int minor_id, 93int drm_proc_create_files(struct drm_info_list *files, int count,
101 struct proc_dir_entry *root) 94 struct proc_dir_entry *root, struct drm_minor *minor)
102{ 95{
103 struct drm_device *dev = minor->dev; 96 struct drm_device *dev = minor->dev;
104 struct proc_dir_entry *ent; 97 struct proc_dir_entry *ent;
105 int i, j, ret; 98 struct drm_info_node *tmp;
106 char name[64]; 99 char name[64];
100 int i, ret;
107 101
108 sprintf(name, "%d", minor_id); 102 for (i = 0; i < count; i++) {
109 minor->dev_root = proc_mkdir(name, root); 103 u32 features = files[i].driver_features;
110 if (!minor->dev_root) {
111 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
112 return -1;
113 }
114
115 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
116 u32 features = drm_proc_list[i].driver_features;
117 104
118 if (features != 0 && 105 if (features != 0 &&
119 (dev->driver->driver_features & features) != features) 106 (dev->driver->driver_features & features) != features)
120 continue; 107 continue;
121 108
122 ent = create_proc_entry(drm_proc_list[i].name, 109 tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
123 S_IFREG | S_IRUGO, minor->dev_root); 110 ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
124 if (!ent) { 111 if (!ent) {
125 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 112 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
126 name, drm_proc_list[i].name); 113 name, files[i].name);
114 drm_free(tmp, sizeof(struct drm_info_node),
115 _DRM_DRIVER);
127 ret = -1; 116 ret = -1;
128 goto fail; 117 goto fail;
129 } 118 }
130 ent->read_proc = drm_proc_list[i].f;
131 ent->data = minor;
132 }
133 119
134 if (dev->driver->proc_init) { 120 ent->proc_fops = &drm_proc_fops;
135 ret = dev->driver->proc_init(minor); 121 ent->data = tmp;
136 if (ret) { 122 tmp->minor = minor;
137 DRM_ERROR("DRM: Driver failed to initialize " 123 tmp->info_ent = &files[i];
138 "/proc/dri.\n"); 124 list_add(&(tmp->list), &(minor->proc_nodes.list));
139 goto fail;
140 }
141 } 125 }
142
143 return 0; 126 return 0;
144 fail:
145 127
146 for (j = 0; j < i; j++) 128fail:
147 remove_proc_entry(drm_proc_list[i].name, 129 for (i = 0; i < count; i++)
148 minor->dev_root); 130 remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
149 remove_proc_entry(name, root);
150 minor->dev_root = NULL;
151 return ret; 131 return ret;
152} 132}
153 133
154/** 134/**
155 * Cleanup the proc filesystem resources. 135 * Initialize the DRI proc filesystem for a device
156 * 136 *
157 * \param minor device minor number. 137 * \param dev DRM device
138 * \param minor device minor number
158 * \param root DRI proc dir entry. 139 * \param root DRI proc dir entry.
159 * \param dev_root DRI device proc dir entry. 140 * \param dev_root resulting DRI device proc dir entry.
160 * \return always zero. 141 * \return root entry pointer on success, or NULL on failure.
161 * 142 *
162 * Remove all proc entries created by proc_init(). 143 * Create the DRI proc root entry "/proc/dri", the device proc root entry
144 * "/proc/dri/%minor%/", and each entry in proc_list as
145 * "/proc/dri/%minor%/%name%".
163 */ 146 */
164int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 147int drm_proc_init(struct drm_minor *minor, int minor_id,
148 struct proc_dir_entry *root)
165{ 149{
166 struct drm_device *dev = minor->dev; 150 struct drm_device *dev = minor->dev;
167 int i;
168 char name[64]; 151 char name[64];
152 int ret;
169 153
170 if (!root || !minor->dev_root) 154 INIT_LIST_HEAD(&minor->proc_nodes.list);
171 return 0; 155 sprintf(name, "%d", minor_id);
172 156 minor->proc_root = proc_mkdir(name, root);
173 if (dev->driver->proc_cleanup) 157 if (!minor->proc_root) {
174 dev->driver->proc_cleanup(minor); 158 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
175 159 return -1;
176 for (i = 0; i < DRM_PROC_ENTRIES; i++)
177 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
178 sprintf(name, "%d", minor->index);
179 remove_proc_entry(name, root);
180
181 return 0;
182}
183
184/**
185 * Called when "/proc/dri/.../name" is read.
186 *
187 * \param buf output buffer.
188 * \param start start of output data.
189 * \param offset requested start offset.
190 * \param request requested number of bytes.
191 * \param eof whether there is no more data to return.
192 * \param data private data.
193 * \return number of written bytes.
194 *
195 * Prints the device name together with the bus id if available.
196 */
197static int drm_name_info(char *buf, char **start, off_t offset, int request,
198 int *eof, void *data)
199{
200 struct drm_minor *minor = (struct drm_minor *) data;
201 struct drm_master *master = minor->master;
202 struct drm_device *dev = minor->dev;
203 int len = 0;
204
205 if (offset > DRM_PROC_LIMIT) {
206 *eof = 1;
207 return 0;
208 } 160 }
209 161
210 if (!master) 162 ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
211 return 0; 163 minor->proc_root, minor);
212 164 if (ret) {
213 *start = &buf[offset]; 165 remove_proc_entry(name, root);
214 *eof = 0; 166 minor->proc_root = NULL;
215 167 DRM_ERROR("Failed to create core drm proc files\n");
216 if (master->unique) { 168 return ret;
217 DRM_PROC_PRINT("%s %s %s\n",
218 dev->driver->pci_driver.name,
219 pci_name(dev->pdev), master->unique);
220 } else {
221 DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
222 pci_name(dev->pdev));
223 } 169 }
224 170
225 if (len > request + offset) 171 if (dev->driver->proc_init) {
226 return request; 172 ret = dev->driver->proc_init(minor);
227 *eof = 1; 173 if (ret) {
228 return len - offset; 174 DRM_ERROR("DRM: Driver failed to initialize "
229} 175 "/proc/dri.\n");
230 176 return ret;
231/**
232 * Called when "/proc/dri/.../vm" is read.
233 *
234 * \param buf output buffer.
235 * \param start start of output data.
236 * \param offset requested start offset.
237 * \param request requested number of bytes.
238 * \param eof whether there is no more data to return.
239 * \param data private data.
240 * \return number of written bytes.
241 *
242 * Prints information about all mappings in drm_device::maplist.
243 */
244static int drm__vm_info(char *buf, char **start, off_t offset, int request,
245 int *eof, void *data)
246{
247 struct drm_minor *minor = (struct drm_minor *) data;
248 struct drm_device *dev = minor->dev;
249 int len = 0;
250 struct drm_map *map;
251 struct drm_map_list *r_list;
252
253 /* Hardcoded from _DRM_FRAME_BUFFER,
254 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
255 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
256 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
257 const char *type;
258 int i;
259
260 if (offset > DRM_PROC_LIMIT) {
261 *eof = 1;
262 return 0;
263 }
264
265 *start = &buf[offset];
266 *eof = 0;
267
268 DRM_PROC_PRINT("slot offset size type flags "
269 "address mtrr\n\n");
270 i = 0;
271 list_for_each_entry(r_list, &dev->maplist, head) {
272 map = r_list->map;
273 if (!map)
274 continue;
275 if (map->type < 0 || map->type > 5)
276 type = "??";
277 else
278 type = types[map->type];
279 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
280 i,
281 map->offset,
282 map->size, type, map->flags,
283 (unsigned long) r_list->user_token);
284 if (map->mtrr < 0) {
285 DRM_PROC_PRINT("none\n");
286 } else {
287 DRM_PROC_PRINT("%4d\n", map->mtrr);
288 } 177 }
289 i++;
290 }
291
292 if (len > request + offset)
293 return request;
294 *eof = 1;
295 return len - offset;
296}
297
298/**
299 * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
300 */
301static int drm_vm_info(char *buf, char **start, off_t offset, int request,
302 int *eof, void *data)
303{
304 struct drm_minor *minor = (struct drm_minor *) data;
305 struct drm_device *dev = minor->dev;
306 int ret;
307
308 mutex_lock(&dev->struct_mutex);
309 ret = drm__vm_info(buf, start, offset, request, eof, data);
310 mutex_unlock(&dev->struct_mutex);
311 return ret;
312}
313
314/**
315 * Called when "/proc/dri/.../queues" is read.
316 *
317 * \param buf output buffer.
318 * \param start start of output data.
319 * \param offset requested start offset.
320 * \param request requested number of bytes.
321 * \param eof whether there is no more data to return.
322 * \param data private data.
323 * \return number of written bytes.
324 */
325static int drm__queues_info(char *buf, char **start, off_t offset,
326 int request, int *eof, void *data)
327{
328 struct drm_minor *minor = (struct drm_minor *) data;
329 struct drm_device *dev = minor->dev;
330 int len = 0;
331 int i;
332 struct drm_queue *q;
333
334 if (offset > DRM_PROC_LIMIT) {
335 *eof = 1;
336 return 0;
337 } 178 }
338 179 return 0;
339 *start = &buf[offset];
340 *eof = 0;
341
342 DRM_PROC_PRINT(" ctx/flags use fin"
343 " blk/rw/rwf wait flushed queued"
344 " locks\n\n");
345 for (i = 0; i < dev->queue_count; i++) {
346 q = dev->queuelist[i];
347 atomic_inc(&q->use_count);
348 DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
349 "%5d/0x%03x %5d %5d"
350 " %5d/%c%c/%c%c%c %5Zd\n",
351 i,
352 q->flags,
353 atomic_read(&q->use_count),
354 atomic_read(&q->finalization),
355 atomic_read(&q->block_count),
356 atomic_read(&q->block_read) ? 'r' : '-',
357 atomic_read(&q->block_write) ? 'w' : '-',
358 waitqueue_active(&q->read_queue) ? 'r' : '-',
359 waitqueue_active(&q->
360 write_queue) ? 'w' : '-',
361 waitqueue_active(&q->
362 flush_queue) ? 'f' : '-',
363 DRM_BUFCOUNT(&q->waitlist));
364 atomic_dec(&q->use_count);
365 }
366
367 if (len > request + offset)
368 return request;
369 *eof = 1;
370 return len - offset;
371}
372
373/**
374 * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
375 */
376static int drm_queues_info(char *buf, char **start, off_t offset, int request,
377 int *eof, void *data)
378{
379 struct drm_minor *minor = (struct drm_minor *) data;
380 struct drm_device *dev = minor->dev;
381 int ret;
382
383 mutex_lock(&dev->struct_mutex);
384 ret = drm__queues_info(buf, start, offset, request, eof, data);
385 mutex_unlock(&dev->struct_mutex);
386 return ret;
387} 180}
388 181
389/** 182int drm_proc_remove_files(struct drm_info_list *files, int count,
390 * Called when "/proc/dri/.../bufs" is read. 183 struct drm_minor *minor)
391 *
392 * \param buf output buffer.
393 * \param start start of output data.
394 * \param offset requested start offset.
395 * \param request requested number of bytes.
396 * \param eof whether there is no more data to return.
397 * \param data private data.
398 * \return number of written bytes.
399 */
400static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
401 int *eof, void *data)
402{ 184{
403 struct drm_minor *minor = (struct drm_minor *) data; 185 struct list_head *pos, *q;
404 struct drm_device *dev = minor->dev; 186 struct drm_info_node *tmp;
405 int len = 0;
406 struct drm_device_dma *dma = dev->dma;
407 int i; 187 int i;
408 188
409 if (!dma || offset > DRM_PROC_LIMIT) { 189 for (i = 0; i < count; i++) {
410 *eof = 1; 190 list_for_each_safe(pos, q, &minor->proc_nodes.list) {
411 return 0; 191 tmp = list_entry(pos, struct drm_info_node, list);
412 } 192 if (tmp->info_ent == &files[i]) {
413 193 remove_proc_entry(files[i].name,
414 *start = &buf[offset]; 194 minor->proc_root);
415 *eof = 0; 195 list_del(pos);
416 196 drm_free(tmp, sizeof(struct drm_info_node),
417 DRM_PROC_PRINT(" o size count free segs pages kB\n\n"); 197 _DRM_DRIVER);
418 for (i = 0; i <= DRM_MAX_ORDER; i++) { 198 }
419 if (dma->bufs[i].buf_count) 199 }
420 DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
421 i,
422 dma->bufs[i].buf_size,
423 dma->bufs[i].buf_count,
424 atomic_read(&dma->bufs[i]
425 .freelist.count),
426 dma->bufs[i].seg_count,
427 dma->bufs[i].seg_count
428 * (1 << dma->bufs[i].page_order),
429 (dma->bufs[i].seg_count
430 * (1 << dma->bufs[i].page_order))
431 * PAGE_SIZE / 1024);
432 }
433 DRM_PROC_PRINT("\n");
434 for (i = 0; i < dma->buf_count; i++) {
435 if (i && !(i % 32))
436 DRM_PROC_PRINT("\n");
437 DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
438 } 200 }
439 DRM_PROC_PRINT("\n"); 201 return 0;
440
441 if (len > request + offset)
442 return request;
443 *eof = 1;
444 return len - offset;
445}
446
447/**
448 * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
449 */
450static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
451 int *eof, void *data)
452{
453 struct drm_minor *minor = (struct drm_minor *) data;
454 struct drm_device *dev = minor->dev;
455 int ret;
456
457 mutex_lock(&dev->struct_mutex);
458 ret = drm__bufs_info(buf, start, offset, request, eof, data);
459 mutex_unlock(&dev->struct_mutex);
460 return ret;
461} 202}
462 203
463/** 204/**
464 * Called when "/proc/dri/.../vblank" is read. 205 * Cleanup the proc filesystem resources.
465 * 206 *
466 * \param buf output buffer. 207 * \param minor device minor number.
467 * \param start start of output data. 208 * \param root DRI proc dir entry.
468 * \param offset requested start offset. 209 * \param dev_root DRI device proc dir entry.
469 * \param request requested number of bytes. 210 * \return always zero.
470 * \param eof whether there is no more data to return.
471 * \param data private data.
472 * \return number of written bytes.
473 */
474static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
475 int *eof, void *data)
476{
477 struct drm_minor *minor = (struct drm_minor *) data;
478 struct drm_device *dev = minor->dev;
479 int len = 0;
480 int crtc;
481
482 if (offset > DRM_PROC_LIMIT) {
483 *eof = 1;
484 return 0;
485 }
486
487 *start = &buf[offset];
488 *eof = 0;
489
490 for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
491 DRM_PROC_PRINT("CRTC %d enable: %d\n",
492 crtc, atomic_read(&dev->vblank_refcount[crtc]));
493 DRM_PROC_PRINT("CRTC %d counter: %d\n",
494 crtc, drm_vblank_count(dev, crtc));
495 DRM_PROC_PRINT("CRTC %d last wait: %d\n",
496 crtc, dev->last_vblank_wait[crtc]);
497 DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
498 crtc, dev->vblank_inmodeset[crtc]);
499 }
500
501 if (len > request + offset)
502 return request;
503 *eof = 1;
504 return len - offset;
505}
506
507/**
508 * Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
509 */
510static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
511 int *eof, void *data)
512{
513 struct drm_minor *minor = (struct drm_minor *) data;
514 struct drm_device *dev = minor->dev;
515 int ret;
516
517 mutex_lock(&dev->struct_mutex);
518 ret = drm__vblank_info(buf, start, offset, request, eof, data);
519 mutex_unlock(&dev->struct_mutex);
520 return ret;
521}
522
523/**
524 * Called when "/proc/dri/.../clients" is read.
525 * 211 *
526 * \param buf output buffer. 212 * Remove all proc entries created by proc_init().
527 * \param start start of output data.
528 * \param offset requested start offset.
529 * \param request requested number of bytes.
530 * \param eof whether there is no more data to return.
531 * \param data private data.
532 * \return number of written bytes.
533 */ 213 */
534static int drm__clients_info(char *buf, char **start, off_t offset, 214int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
535 int request, int *eof, void *data)
536{ 215{
537 struct drm_minor *minor = (struct drm_minor *) data;
538 struct drm_device *dev = minor->dev; 216 struct drm_device *dev = minor->dev;
539 int len = 0; 217 char name[64];
540 struct drm_file *priv;
541 218
542 if (offset > DRM_PROC_LIMIT) { 219 if (!root || !minor->proc_root)
543 *eof = 1;
544 return 0; 220 return 0;
545 }
546
547 *start = &buf[offset];
548 *eof = 0;
549
550 DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
551 list_for_each_entry(priv, &dev->filelist, lhead) {
552 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
553 priv->authenticated ? 'y' : 'n',
554 priv->minor->index,
555 priv->pid,
556 priv->uid, priv->magic, priv->ioctl_count);
557 }
558 221
559 if (len > request + offset) 222 if (dev->driver->proc_cleanup)
560 return request; 223 dev->driver->proc_cleanup(minor);
561 *eof = 1;
562 return len - offset;
563}
564
565/**
566 * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
567 */
568static int drm_clients_info(char *buf, char **start, off_t offset,
569 int request, int *eof, void *data)
570{
571 struct drm_minor *minor = (struct drm_minor *) data;
572 struct drm_device *dev = minor->dev;
573 int ret;
574
575 mutex_lock(&dev->struct_mutex);
576 ret = drm__clients_info(buf, start, offset, request, eof, data);
577 mutex_unlock(&dev->struct_mutex);
578 return ret;
579}
580
581struct drm_gem_name_info_data {
582 int len;
583 char *buf;
584 int eof;
585};
586 224
587static int drm_gem_one_name_info(int id, void *ptr, void *data) 225 drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
588{
589 struct drm_gem_object *obj = ptr;
590 struct drm_gem_name_info_data *nid = data;
591 226
592 DRM_INFO("name %d size %zd\n", obj->name, obj->size); 227 sprintf(name, "%d", minor->index);
593 if (nid->eof) 228 remove_proc_entry(name, root);
594 return 0;
595 229
596 nid->len += sprintf(&nid->buf[nid->len],
597 "%6d %8zd %7d %8d\n",
598 obj->name, obj->size,
599 atomic_read(&obj->handlecount.refcount),
600 atomic_read(&obj->refcount.refcount));
601 if (nid->len > DRM_PROC_LIMIT) {
602 nid->eof = 1;
603 return 0;
604 }
605 return 0; 230 return 0;
606} 231}
607 232
608static int drm_gem_name_info(char *buf, char **start, off_t offset,
609 int request, int *eof, void *data)
610{
611 struct drm_minor *minor = (struct drm_minor *) data;
612 struct drm_device *dev = minor->dev;
613 struct drm_gem_name_info_data nid;
614
615 if (offset > DRM_PROC_LIMIT) {
616 *eof = 1;
617 return 0;
618 }
619
620 nid.len = sprintf(buf, " name size handles refcount\n");
621 nid.buf = buf;
622 nid.eof = 0;
623 idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
624
625 *start = &buf[offset];
626 *eof = 0;
627 if (nid.len > request + offset)
628 return request;
629 *eof = 1;
630 return nid.len - offset;
631}
632
633static int drm_gem_object_info(char *buf, char **start, off_t offset,
634 int request, int *eof, void *data)
635{
636 struct drm_minor *minor = (struct drm_minor *) data;
637 struct drm_device *dev = minor->dev;
638 int len = 0;
639
640 if (offset > DRM_PROC_LIMIT) {
641 *eof = 1;
642 return 0;
643 }
644
645 *start = &buf[offset];
646 *eof = 0;
647 DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
648 DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
649 DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
650 DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
651 DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
652 DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
653 if (len > request + offset)
654 return request;
655 *eof = 1;
656 return len - offset;
657}
658
659#if DRM_DEBUG_CODE
660
661static int drm__vma_info(char *buf, char **start, off_t offset, int request,
662 int *eof, void *data)
663{
664 struct drm_minor *minor = (struct drm_minor *) data;
665 struct drm_device *dev = minor->dev;
666 int len = 0;
667 struct drm_vma_entry *pt;
668 struct vm_area_struct *vma;
669#if defined(__i386__)
670 unsigned int pgprot;
671#endif
672
673 if (offset > DRM_PROC_LIMIT) {
674 *eof = 1;
675 return 0;
676 }
677
678 *start = &buf[offset];
679 *eof = 0;
680
681 DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
682 atomic_read(&dev->vma_count),
683 high_memory, virt_to_phys(high_memory));
684 list_for_each_entry(pt, &dev->vmalist, head) {
685 if (!(vma = pt->vma))
686 continue;
687 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
688 pt->pid,
689 vma->vm_start,
690 vma->vm_end,
691 vma->vm_flags & VM_READ ? 'r' : '-',
692 vma->vm_flags & VM_WRITE ? 'w' : '-',
693 vma->vm_flags & VM_EXEC ? 'x' : '-',
694 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
695 vma->vm_flags & VM_LOCKED ? 'l' : '-',
696 vma->vm_flags & VM_IO ? 'i' : '-',
697 vma->vm_pgoff);
698
699#if defined(__i386__)
700 pgprot = pgprot_val(vma->vm_page_prot);
701 DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
702 pgprot & _PAGE_PRESENT ? 'p' : '-',
703 pgprot & _PAGE_RW ? 'w' : 'r',
704 pgprot & _PAGE_USER ? 'u' : 's',
705 pgprot & _PAGE_PWT ? 't' : 'b',
706 pgprot & _PAGE_PCD ? 'u' : 'c',
707 pgprot & _PAGE_ACCESSED ? 'a' : '-',
708 pgprot & _PAGE_DIRTY ? 'd' : '-',
709 pgprot & _PAGE_PSE ? 'm' : 'k',
710 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
711#endif
712 DRM_PROC_PRINT("\n");
713 }
714
715 if (len > request + offset)
716 return request;
717 *eof = 1;
718 return len - offset;
719}
720
721static int drm_vma_info(char *buf, char **start, off_t offset, int request,
722 int *eof, void *data)
723{
724 struct drm_minor *minor = (struct drm_minor *) data;
725 struct drm_device *dev = minor->dev;
726 int ret;
727
728 mutex_lock(&dev->struct_mutex);
729 ret = drm__vma_info(buf, start, offset, request, eof, data);
730 mutex_unlock(&dev->struct_mutex);
731 return ret;
732}
733#endif
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 7c8b15b22bf2..48f33be8fd0f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -50,6 +50,7 @@ struct idr drm_minors_idr;
50 50
51struct class *drm_class; 51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
53struct dentry *drm_debugfs_root;
53 54
54static int drm_minor_get_id(struct drm_device *dev, int type) 55static int drm_minor_get_id(struct drm_device *dev, int type)
55{ 56{
@@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
313 goto err_mem; 314 goto err_mem;
314 } 315 }
315 } else 316 } else
316 new_minor->dev_root = NULL; 317 new_minor->proc_root = NULL;
318
319#if defined(CONFIG_DEBUG_FS)
320 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
321 if (ret) {
322 DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
323 goto err_g2;
324 }
325#endif
317 326
318 ret = drm_sysfs_device_add(new_minor); 327 ret = drm_sysfs_device_add(new_minor);
319 if (ret) { 328 if (ret) {
@@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p)
451 460
452 if (minor->type == DRM_MINOR_LEGACY) 461 if (minor->type == DRM_MINOR_LEGACY)
453 drm_proc_cleanup(minor, drm_proc_root); 462 drm_proc_cleanup(minor, drm_proc_root);
463#if defined(CONFIG_DEBUG_FS)
464 drm_debugfs_cleanup(minor);
465#endif
466
454 drm_sysfs_device_remove(minor); 467 drm_sysfs_device_remove(minor);
455 468
456 idr_remove(&drm_minors_idr, minor->index); 469 idr_remove(&drm_minors_idr, minor->index);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 793cba39d832..51c5a050aa73 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
7 i915_suspend.o \ 7 i915_suspend.o \
8 i915_gem.o \ 8 i915_gem.o \
9 i915_gem_debug.o \ 9 i915_gem_debug.o \
10 i915_gem_proc.o \ 10 i915_gem_debugfs.o \
11 i915_gem_tiling.o \ 11 i915_gem_tiling.o \
12 intel_display.o \ 12 intel_display.o \
13 intel_crt.o \ 13 intel_crt.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6d21b9e48b89..a818b377e1f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -41,7 +41,6 @@
41int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 41int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
42{ 42{
43 drm_i915_private_t *dev_priv = dev->dev_private; 43 drm_i915_private_t *dev_priv = dev->dev_private;
44 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
45 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 44 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
46 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 45 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
47 u32 last_acthd = I915_READ(acthd_reg); 46 u32 last_acthd = I915_READ(acthd_reg);
@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
58 if (ring->space >= n) 57 if (ring->space >= n)
59 return 0; 58 return 0;
60 59
61 if (master_priv->sarea_priv) 60 if (dev->primary->master) {
62 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 61 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
62 if (master_priv->sarea_priv)
63 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
64 }
65
63 66
64 if (ring->head != last_head) 67 if (ring->head != last_head)
65 i = 0; 68 i = 0;
@@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
356 return ret; 359 return ret;
357} 360}
358 361
359static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) 362static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
360{ 363{
361 drm_i915_private_t *dev_priv = dev->dev_private; 364 drm_i915_private_t *dev_priv = dev->dev_private;
362 int i; 365 int i;
@@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
370 for (i = 0; i < dwords;) { 373 for (i = 0; i < dwords;) {
371 int cmd, sz; 374 int cmd, sz;
372 375
373 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) 376 cmd = buffer[i];
374 return -EINVAL;
375 377
376 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) 378 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
377 return -EINVAL; 379 return -EINVAL;
@@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
379 OUT_RING(cmd); 381 OUT_RING(cmd);
380 382
381 while (++i, --sz) { 383 while (++i, --sz) {
382 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], 384 OUT_RING(buffer[i]);
383 sizeof(cmd))) {
384 return -EINVAL;
385 }
386 OUT_RING(cmd);
387 } 385 }
388 } 386 }
389 387
@@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
397 395
398int 396int
399i915_emit_box(struct drm_device *dev, 397i915_emit_box(struct drm_device *dev,
400 struct drm_clip_rect __user *boxes, 398 struct drm_clip_rect *boxes,
401 int i, int DR1, int DR4) 399 int i, int DR1, int DR4)
402{ 400{
403 drm_i915_private_t *dev_priv = dev->dev_private; 401 drm_i915_private_t *dev_priv = dev->dev_private;
404 struct drm_clip_rect box; 402 struct drm_clip_rect box = boxes[i];
405 RING_LOCALS; 403 RING_LOCALS;
406 404
407 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
408 return -EFAULT;
409 }
410
411 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 405 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
412 DRM_ERROR("Bad box %d,%d..%d,%d\n", 406 DRM_ERROR("Bad box %d,%d..%d,%d\n",
413 box.x1, box.y1, box.x2, box.y2); 407 box.x1, box.y1, box.x2, box.y2);
@@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
460} 454}
461 455
462static int i915_dispatch_cmdbuffer(struct drm_device * dev, 456static int i915_dispatch_cmdbuffer(struct drm_device * dev,
463 drm_i915_cmdbuffer_t * cmd) 457 drm_i915_cmdbuffer_t *cmd,
458 struct drm_clip_rect *cliprects,
459 void *cmdbuf)
464{ 460{
465 int nbox = cmd->num_cliprects; 461 int nbox = cmd->num_cliprects;
466 int i = 0, count, ret; 462 int i = 0, count, ret;
@@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
476 472
477 for (i = 0; i < count; i++) { 473 for (i = 0; i < count; i++) {
478 if (i < nbox) { 474 if (i < nbox) {
479 ret = i915_emit_box(dev, cmd->cliprects, i, 475 ret = i915_emit_box(dev, cliprects, i,
480 cmd->DR1, cmd->DR4); 476 cmd->DR1, cmd->DR4);
481 if (ret) 477 if (ret)
482 return ret; 478 return ret;
483 } 479 }
484 480
485 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); 481 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
486 if (ret) 482 if (ret)
487 return ret; 483 return ret;
488 } 484 }
@@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
492} 488}
493 489
494static int i915_dispatch_batchbuffer(struct drm_device * dev, 490static int i915_dispatch_batchbuffer(struct drm_device * dev,
495 drm_i915_batchbuffer_t * batch) 491 drm_i915_batchbuffer_t * batch,
492 struct drm_clip_rect *cliprects)
496{ 493{
497 drm_i915_private_t *dev_priv = dev->dev_private; 494 drm_i915_private_t *dev_priv = dev->dev_private;
498 struct drm_clip_rect __user *boxes = batch->cliprects;
499 int nbox = batch->num_cliprects; 495 int nbox = batch->num_cliprects;
500 int i = 0, count; 496 int i = 0, count;
501 RING_LOCALS; 497 RING_LOCALS;
@@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
511 507
512 for (i = 0; i < count; i++) { 508 for (i = 0; i < count; i++) {
513 if (i < nbox) { 509 if (i < nbox) {
514 int ret = i915_emit_box(dev, boxes, i, 510 int ret = i915_emit_box(dev, cliprects, i,
515 batch->DR1, batch->DR4); 511 batch->DR1, batch->DR4);
516 if (ret) 512 if (ret)
517 return ret; 513 return ret;
@@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
626 master_priv->sarea_priv; 622 master_priv->sarea_priv;
627 drm_i915_batchbuffer_t *batch = data; 623 drm_i915_batchbuffer_t *batch = data;
628 int ret; 624 int ret;
625 struct drm_clip_rect *cliprects = NULL;
629 626
630 if (!dev_priv->allow_batchbuffer) { 627 if (!dev_priv->allow_batchbuffer) {
631 DRM_ERROR("Batchbuffer ioctl disabled\n"); 628 DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
637 634
638 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
639 636
640 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 637 if (batch->num_cliprects < 0)
641 batch->num_cliprects * 638 return -EINVAL;
642 sizeof(struct drm_clip_rect))) 639
643 return -EFAULT; 640 if (batch->num_cliprects) {
641 cliprects = drm_calloc(batch->num_cliprects,
642 sizeof(struct drm_clip_rect),
643 DRM_MEM_DRIVER);
644 if (cliprects == NULL)
645 return -ENOMEM;
646
647 ret = copy_from_user(cliprects, batch->cliprects,
648 batch->num_cliprects *
649 sizeof(struct drm_clip_rect));
650 if (ret != 0)
651 goto fail_free;
652 }
644 653
645 mutex_lock(&dev->struct_mutex); 654 mutex_lock(&dev->struct_mutex);
646 ret = i915_dispatch_batchbuffer(dev, batch); 655 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
647 mutex_unlock(&dev->struct_mutex); 656 mutex_unlock(&dev->struct_mutex);
648 657
649 if (sarea_priv) 658 if (sarea_priv)
650 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 659 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
660
661fail_free:
662 drm_free(cliprects,
663 batch->num_cliprects * sizeof(struct drm_clip_rect),
664 DRM_MEM_DRIVER);
665
651 return ret; 666 return ret;
652} 667}
653 668
@@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
659 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) 674 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
660 master_priv->sarea_priv; 675 master_priv->sarea_priv;
661 drm_i915_cmdbuffer_t *cmdbuf = data; 676 drm_i915_cmdbuffer_t *cmdbuf = data;
677 struct drm_clip_rect *cliprects = NULL;
678 void *batch_data;
662 int ret; 679 int ret;
663 680
664 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 681 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
666 683
667 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
668 685
669 if (cmdbuf->num_cliprects && 686 if (cmdbuf->num_cliprects < 0)
670 DRM_VERIFYAREA_READ(cmdbuf->cliprects, 687 return -EINVAL;
671 cmdbuf->num_cliprects * 688
672 sizeof(struct drm_clip_rect))) { 689 batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
673 DRM_ERROR("Fault accessing cliprects\n"); 690 if (batch_data == NULL)
674 return -EFAULT; 691 return -ENOMEM;
692
693 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
694 if (ret != 0)
695 goto fail_batch_free;
696
697 if (cmdbuf->num_cliprects) {
698 cliprects = drm_calloc(cmdbuf->num_cliprects,
699 sizeof(struct drm_clip_rect),
700 DRM_MEM_DRIVER);
701 if (cliprects == NULL)
702 goto fail_batch_free;
703
704 ret = copy_from_user(cliprects, cmdbuf->cliprects,
705 cmdbuf->num_cliprects *
706 sizeof(struct drm_clip_rect));
707 if (ret != 0)
708 goto fail_clip_free;
675 } 709 }
676 710
677 mutex_lock(&dev->struct_mutex); 711 mutex_lock(&dev->struct_mutex);
678 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 712 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
679 mutex_unlock(&dev->struct_mutex); 713 mutex_unlock(&dev->struct_mutex);
680 if (ret) { 714 if (ret) {
681 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 715 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
682 return ret; 716 goto fail_batch_free;
683 } 717 }
684 718
685 if (sarea_priv) 719 if (sarea_priv)
686 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 720 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
687 return 0; 721
722fail_batch_free:
723 drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
724fail_clip_free:
725 drm_free(cliprects,
726 cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
727 DRM_MEM_DRIVER);
728
729 return ret;
688} 730}
689 731
690static int i915_flip_bufs(struct drm_device *dev, void *data, 732static int i915_flip_bufs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b293ef0bae71..dcb91f5df6e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -150,8 +150,10 @@ static struct drm_driver driver = {
150 .get_reg_ofs = drm_core_get_reg_ofs, 150 .get_reg_ofs = drm_core_get_reg_ofs,
151 .master_create = i915_master_create, 151 .master_create = i915_master_create,
152 .master_destroy = i915_master_destroy, 152 .master_destroy = i915_master_destroy,
153 .proc_init = i915_gem_proc_init, 153#if defined(CONFIG_DEBUG_FS)
154 .proc_cleanup = i915_gem_proc_cleanup, 154 .debugfs_init = i915_gem_debugfs_init,
155 .debugfs_cleanup = i915_gem_debugfs_cleanup,
156#endif
155 .gem_init_object = i915_gem_init_object, 157 .gem_init_object = i915_gem_init_object,
156 .gem_free_object = i915_gem_free_object, 158 .gem_free_object = i915_gem_free_object,
157 .gem_vm_ops = &i915_gem_vm_ops, 159 .gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6cc9861e0a1..c1685d0c704f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
404 /** AGP memory structure for our GTT binding. */ 404 /** AGP memory structure for our GTT binding. */
405 DRM_AGP_MEM *agp_mem; 405 DRM_AGP_MEM *agp_mem;
406 406
407 struct page **page_list; 407 struct page **pages;
408 int pages_refcount;
408 409
409 /** 410 /**
410 * Current offset of the object in GTT space. 411 * Current offset of the object in GTT space.
@@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
519extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 520extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
520 unsigned long arg); 521 unsigned long arg);
521extern int i915_emit_box(struct drm_device *dev, 522extern int i915_emit_box(struct drm_device *dev,
522 struct drm_clip_rect __user *boxes, 523 struct drm_clip_rect *boxes,
523 int i, int DR1, int DR4); 524 int i, int DR1, int DR4);
524 525
525/* i915_irq.c */ 526/* i915_irq.c */
@@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
604int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 605int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
605 struct drm_file *file_priv); 606 struct drm_file *file_priv);
606void i915_gem_load(struct drm_device *dev); 607void i915_gem_load(struct drm_device *dev);
607int i915_gem_proc_init(struct drm_minor *minor);
608void i915_gem_proc_cleanup(struct drm_minor *minor);
609int i915_gem_init_object(struct drm_gem_object *obj); 608int i915_gem_init_object(struct drm_gem_object *obj);
610void i915_gem_free_object(struct drm_gem_object *obj); 609void i915_gem_free_object(struct drm_gem_object *obj);
611int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 610int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
649 const char *where, uint32_t mark); 648 const char *where, uint32_t mark);
650void i915_dump_lru(struct drm_device *dev, const char *where); 649void i915_dump_lru(struct drm_device *dev, const char *where);
651 650
651/* i915_debugfs.c */
652int i915_gem_debugfs_init(struct drm_minor *minor);
653void i915_gem_debugfs_cleanup(struct drm_minor *minor);
654
652/* i915_suspend.c */ 655/* i915_suspend.c */
653extern int i915_save_state(struct drm_device *dev); 656extern int i915_save_state(struct drm_device *dev);
654extern int i915_restore_state(struct drm_device *dev); 657extern int i915_restore_state(struct drm_device *dev);
@@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 (dev)->pci_device == 0x2E22 || \ 787 (dev)->pci_device == 0x2E22 || \
785 IS_GM45(dev)) 788 IS_GM45(dev))
786 789
790#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
791#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
792#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
793
787#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 794#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
788 (dev)->pci_device == 0x29B2 || \ 795 (dev)->pci_device == 0x29B2 || \
789 (dev)->pci_device == 0x29D2) 796 (dev)->pci_device == 0x29D2 || \
797 (IS_IGD(dev)))
790 798
791#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 799#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
792 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 800 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
793 801
794#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 802#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
795 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 803 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
804 IS_IGD(dev))
796 805
797#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 806#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
798/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 807/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37427e4016cb..b52cba0f16d2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 50 unsigned alignment);
@@ -136,6 +136,224 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
136 return 0; 136 return 0;
137} 137}
138 138
139static inline int
140fast_shmem_read(struct page **pages,
141 loff_t page_base, int page_offset,
142 char __user *data,
143 int length)
144{
145 char __iomem *vaddr;
146 int ret;
147
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL)
150 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0);
153
154 return ret;
155}
156
157static inline int
158slow_shmem_copy(struct page *dst_page,
159 int dst_offset,
160 struct page *src_page,
161 int src_offset,
162 int length)
163{
164 char *dst_vaddr, *src_vaddr;
165
166 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
167 if (dst_vaddr == NULL)
168 return -ENOMEM;
169
170 src_vaddr = kmap_atomic(src_page, KM_USER1);
171 if (src_vaddr == NULL) {
172 kunmap_atomic(dst_vaddr, KM_USER0);
173 return -ENOMEM;
174 }
175
176 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
177
178 kunmap_atomic(src_vaddr, KM_USER1);
179 kunmap_atomic(dst_vaddr, KM_USER0);
180
181 return 0;
182}
183
184/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a
187 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
188 */
189static int
190i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
191 struct drm_i915_gem_pread *args,
192 struct drm_file *file_priv)
193{
194 struct drm_i915_gem_object *obj_priv = obj->driver_private;
195 ssize_t remain;
196 loff_t offset, page_base;
197 char __user *user_data;
198 int page_offset, page_length;
199 int ret;
200
201 user_data = (char __user *) (uintptr_t) args->data_ptr;
202 remain = args->size;
203
204 mutex_lock(&dev->struct_mutex);
205
206 ret = i915_gem_object_get_pages(obj);
207 if (ret != 0)
208 goto fail_unlock;
209
210 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
211 args->size);
212 if (ret != 0)
213 goto fail_put_pages;
214
215 obj_priv = obj->driver_private;
216 offset = args->offset;
217
218 while (remain > 0) {
219 /* Operation in this page
220 *
221 * page_base = page offset within aperture
222 * page_offset = offset within page
223 * page_length = bytes to copy for this page
224 */
225 page_base = (offset & ~(PAGE_SIZE-1));
226 page_offset = offset & (PAGE_SIZE-1);
227 page_length = remain;
228 if ((page_offset + remain) > PAGE_SIZE)
229 page_length = PAGE_SIZE - page_offset;
230
231 ret = fast_shmem_read(obj_priv->pages,
232 page_base, page_offset,
233 user_data, page_length);
234 if (ret)
235 goto fail_put_pages;
236
237 remain -= page_length;
238 user_data += page_length;
239 offset += page_length;
240 }
241
242fail_put_pages:
243 i915_gem_object_put_pages(obj);
244fail_unlock:
245 mutex_unlock(&dev->struct_mutex);
246
247 return ret;
248}
249
250/**
251 * This is the fallback shmem pread path, which allocates temporary storage
252 * in kernel space to copy_to_user into outside of the struct_mutex, so we
253 * can copy out of the object's backing pages while holding the struct mutex
254 * and not take page faults.
255 */
256static int
257i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
258 struct drm_i915_gem_pread *args,
259 struct drm_file *file_priv)
260{
261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
262 struct mm_struct *mm = current->mm;
263 struct page **user_pages;
264 ssize_t remain;
265 loff_t offset, pinned_pages, i;
266 loff_t first_data_page, last_data_page, num_pages;
267 int shmem_page_index, shmem_page_offset;
268 int data_page_index, data_page_offset;
269 int page_length;
270 int ret;
271 uint64_t data_ptr = args->data_ptr;
272
273 remain = args->size;
274
275 /* Pin the user pages containing the data. We can't fault while
276 * holding the struct mutex, yet we want to hold it while
277 * dereferencing the user data.
278 */
279 first_data_page = data_ptr / PAGE_SIZE;
280 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
281 num_pages = last_data_page - first_data_page + 1;
282
283 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
284 if (user_pages == NULL)
285 return -ENOMEM;
286
287 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) {
292 ret = -EFAULT;
293 goto fail_put_user_pages;
294 }
295
296 mutex_lock(&dev->struct_mutex);
297
298 ret = i915_gem_object_get_pages(obj);
299 if (ret != 0)
300 goto fail_unlock;
301
302 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
303 args->size);
304 if (ret != 0)
305 goto fail_put_pages;
306
307 obj_priv = obj->driver_private;
308 offset = args->offset;
309
310 while (remain > 0) {
311 /* Operation in this page
312 *
313 * shmem_page_index = page number within shmem file
314 * shmem_page_offset = offset within page in shmem file
315 * data_page_index = page number in get_user_pages return
316 * data_page_offset = offset with data_page_index page.
317 * page_length = bytes to copy for this page
318 */
319 shmem_page_index = offset / PAGE_SIZE;
320 shmem_page_offset = offset & ~PAGE_MASK;
321 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
322 data_page_offset = data_ptr & ~PAGE_MASK;
323
324 page_length = remain;
325 if ((shmem_page_offset + page_length) > PAGE_SIZE)
326 page_length = PAGE_SIZE - shmem_page_offset;
327 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset;
329
330 ret = slow_shmem_copy(user_pages[data_page_index],
331 data_page_offset,
332 obj_priv->pages[shmem_page_index],
333 shmem_page_offset,
334 page_length);
335 if (ret)
336 goto fail_put_pages;
337
338 remain -= page_length;
339 data_ptr += page_length;
340 offset += page_length;
341 }
342
343fail_put_pages:
344 i915_gem_object_put_pages(obj);
345fail_unlock:
346 mutex_unlock(&dev->struct_mutex);
347fail_put_user_pages:
348 for (i = 0; i < pinned_pages; i++) {
349 SetPageDirty(user_pages[i]);
350 page_cache_release(user_pages[i]);
351 }
352 kfree(user_pages);
353
354 return ret;
355}
356
139/** 357/**
140 * Reads data from the object referenced by handle. 358 * Reads data from the object referenced by handle.
141 * 359 *
@@ -148,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
148 struct drm_i915_gem_pread *args = data; 366 struct drm_i915_gem_pread *args = data;
149 struct drm_gem_object *obj; 367 struct drm_gem_object *obj;
150 struct drm_i915_gem_object *obj_priv; 368 struct drm_i915_gem_object *obj_priv;
151 ssize_t read;
152 loff_t offset;
153 int ret; 369 int ret;
154 370
155 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 371 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -167,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
167 return -EINVAL; 383 return -EINVAL;
168 } 384 }
169 385
170 mutex_lock(&dev->struct_mutex); 386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
171 387 if (ret != 0)
172 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
173 args->size);
174 if (ret != 0) {
175 drm_gem_object_unreference(obj);
176 mutex_unlock(&dev->struct_mutex);
177 return ret;
178 }
179
180 offset = args->offset;
181
182 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
183 args->size, &offset);
184 if (read != args->size) {
185 drm_gem_object_unreference(obj);
186 mutex_unlock(&dev->struct_mutex);
187 if (read < 0)
188 return read;
189 else
190 return -EINVAL;
191 }
192 389
193 drm_gem_object_unreference(obj); 390 drm_gem_object_unreference(obj);
194 mutex_unlock(&dev->struct_mutex);
195 391
196 return 0; 392 return ret;
197} 393}
198 394
199/* This is the fast write path which cannot handle 395/* This is the fast write path which cannot handle
@@ -223,29 +419,51 @@ fast_user_write(struct io_mapping *mapping,
223 */ 419 */
224 420
225static inline int 421static inline int
226slow_user_write(struct io_mapping *mapping, 422slow_kernel_write(struct io_mapping *mapping,
227 loff_t page_base, int page_offset, 423 loff_t gtt_base, int gtt_offset,
228 char __user *user_data, 424 struct page *user_page, int user_offset,
229 int length) 425 int length)
230{ 426{
231 char __iomem *vaddr; 427 char *src_vaddr, *dst_vaddr;
232 unsigned long unwritten; 428 unsigned long unwritten;
233 429
234 vaddr = io_mapping_map_wc(mapping, page_base); 430 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
235 if (vaddr == NULL) 431 src_vaddr = kmap_atomic(user_page, KM_USER1);
236 return -EFAULT; 432 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
237 unwritten = __copy_from_user(vaddr + page_offset, 433 src_vaddr + user_offset,
238 user_data, length); 434 length);
239 io_mapping_unmap(vaddr); 435 kunmap_atomic(src_vaddr, KM_USER1);
436 io_mapping_unmap_atomic(dst_vaddr);
240 if (unwritten) 437 if (unwritten)
241 return -EFAULT; 438 return -EFAULT;
242 return 0; 439 return 0;
243} 440}
244 441
442static inline int
443fast_shmem_write(struct page **pages,
444 loff_t page_base, int page_offset,
445 char __user *data,
446 int length)
447{
448 char __iomem *vaddr;
449
450 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
451 if (vaddr == NULL)
452 return -ENOMEM;
453 __copy_from_user_inatomic(vaddr + page_offset, data, length);
454 kunmap_atomic(vaddr, KM_USER0);
455
456 return 0;
457}
458
459/**
460 * This is the fast pwrite path, where we copy the data directly from the
461 * user into the GTT, uncached.
462 */
245static int 463static int
246i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 464i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
247 struct drm_i915_gem_pwrite *args, 465 struct drm_i915_gem_pwrite *args,
248 struct drm_file *file_priv) 466 struct drm_file *file_priv)
249{ 467{
250 struct drm_i915_gem_object *obj_priv = obj->driver_private; 468 struct drm_i915_gem_object *obj_priv = obj->driver_private;
251 drm_i915_private_t *dev_priv = dev->dev_private; 469 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -273,7 +491,6 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
273 491
274 obj_priv = obj->driver_private; 492 obj_priv = obj->driver_private;
275 offset = obj_priv->gtt_offset + args->offset; 493 offset = obj_priv->gtt_offset + args->offset;
276 obj_priv->dirty = 1;
277 494
278 while (remain > 0) { 495 while (remain > 0) {
279 /* Operation in this page 496 /* Operation in this page
@@ -292,16 +509,11 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
292 page_offset, user_data, page_length); 509 page_offset, user_data, page_length);
293 510
294 /* If we get a fault while copying data, then (presumably) our 511 /* If we get a fault while copying data, then (presumably) our
295 * source page isn't available. In this case, use the 512 * source page isn't available. Return the error and we'll
296 * non-atomic function 513 * retry in the slow path.
297 */ 514 */
298 if (ret) { 515 if (ret)
299 ret = slow_user_write (dev_priv->mm.gtt_mapping, 516 goto fail;
300 page_base, page_offset,
301 user_data, page_length);
302 if (ret)
303 goto fail;
304 }
305 517
306 remain -= page_length; 518 remain -= page_length;
307 user_data += page_length; 519 user_data += page_length;
@@ -315,39 +527,284 @@ fail:
315 return ret; 527 return ret;
316} 528}
317 529
530/**
531 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
532 * the memory and maps it using kmap_atomic for copying.
533 *
534 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
535 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
536 */
318static int 537static int
319i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 538i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
320 struct drm_i915_gem_pwrite *args, 539 struct drm_i915_gem_pwrite *args,
321 struct drm_file *file_priv) 540 struct drm_file *file_priv)
322{ 541{
542 struct drm_i915_gem_object *obj_priv = obj->driver_private;
543 drm_i915_private_t *dev_priv = dev->dev_private;
544 ssize_t remain;
545 loff_t gtt_page_base, offset;
546 loff_t first_data_page, last_data_page, num_pages;
547 loff_t pinned_pages, i;
548 struct page **user_pages;
549 struct mm_struct *mm = current->mm;
550 int gtt_page_offset, data_page_offset, data_page_index, page_length;
323 int ret; 551 int ret;
324 loff_t offset; 552 uint64_t data_ptr = args->data_ptr;
325 ssize_t written; 553
554 remain = args->size;
555
556 /* Pin the user pages containing the data. We can't fault while
557 * holding the struct mutex, and all of the pwrite implementations
558 * want to hold it while dereferencing the user data.
559 */
560 first_data_page = data_ptr / PAGE_SIZE;
561 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
562 num_pages = last_data_page - first_data_page + 1;
563
564 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
565 if (user_pages == NULL)
566 return -ENOMEM;
567
568 down_read(&mm->mmap_sem);
569 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
570 num_pages, 0, 0, user_pages, NULL);
571 up_read(&mm->mmap_sem);
572 if (pinned_pages < num_pages) {
573 ret = -EFAULT;
574 goto out_unpin_pages;
575 }
326 576
327 mutex_lock(&dev->struct_mutex); 577 mutex_lock(&dev->struct_mutex);
578 ret = i915_gem_object_pin(obj, 0);
579 if (ret)
580 goto out_unlock;
581
582 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
583 if (ret)
584 goto out_unpin_object;
585
586 obj_priv = obj->driver_private;
587 offset = obj_priv->gtt_offset + args->offset;
588
589 while (remain > 0) {
590 /* Operation in this page
591 *
592 * gtt_page_base = page offset within aperture
593 * gtt_page_offset = offset within page in aperture
594 * data_page_index = page number in get_user_pages return
595 * data_page_offset = offset with data_page_index page.
596 * page_length = bytes to copy for this page
597 */
598 gtt_page_base = offset & PAGE_MASK;
599 gtt_page_offset = offset & ~PAGE_MASK;
600 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
601 data_page_offset = data_ptr & ~PAGE_MASK;
602
603 page_length = remain;
604 if ((gtt_page_offset + page_length) > PAGE_SIZE)
605 page_length = PAGE_SIZE - gtt_page_offset;
606 if ((data_page_offset + page_length) > PAGE_SIZE)
607 page_length = PAGE_SIZE - data_page_offset;
608
609 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
610 gtt_page_base, gtt_page_offset,
611 user_pages[data_page_index],
612 data_page_offset,
613 page_length);
614
615 /* If we get a fault while copying data, then (presumably) our
616 * source page isn't available. Return the error and we'll
617 * retry in the slow path.
618 */
619 if (ret)
620 goto out_unpin_object;
621
622 remain -= page_length;
623 offset += page_length;
624 data_ptr += page_length;
625 }
626
627out_unpin_object:
628 i915_gem_object_unpin(obj);
629out_unlock:
630 mutex_unlock(&dev->struct_mutex);
631out_unpin_pages:
632 for (i = 0; i < pinned_pages; i++)
633 page_cache_release(user_pages[i]);
634 kfree(user_pages);
635
636 return ret;
637}
638
639/**
640 * This is the fast shmem pwrite path, which attempts to directly
641 * copy_from_user into the kmapped pages backing the object.
642 */
643static int
644i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
645 struct drm_i915_gem_pwrite *args,
646 struct drm_file *file_priv)
647{
648 struct drm_i915_gem_object *obj_priv = obj->driver_private;
649 ssize_t remain;
650 loff_t offset, page_base;
651 char __user *user_data;
652 int page_offset, page_length;
653 int ret;
654
655 user_data = (char __user *) (uintptr_t) args->data_ptr;
656 remain = args->size;
657
658 mutex_lock(&dev->struct_mutex);
659
660 ret = i915_gem_object_get_pages(obj);
661 if (ret != 0)
662 goto fail_unlock;
328 663
329 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 664 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
330 if (ret) { 665 if (ret != 0)
331 mutex_unlock(&dev->struct_mutex); 666 goto fail_put_pages;
332 return ret; 667
668 obj_priv = obj->driver_private;
669 offset = args->offset;
670 obj_priv->dirty = 1;
671
672 while (remain > 0) {
673 /* Operation in this page
674 *
675 * page_base = page offset within aperture
676 * page_offset = offset within page
677 * page_length = bytes to copy for this page
678 */
679 page_base = (offset & ~(PAGE_SIZE-1));
680 page_offset = offset & (PAGE_SIZE-1);
681 page_length = remain;
682 if ((page_offset + remain) > PAGE_SIZE)
683 page_length = PAGE_SIZE - page_offset;
684
685 ret = fast_shmem_write(obj_priv->pages,
686 page_base, page_offset,
687 user_data, page_length);
688 if (ret)
689 goto fail_put_pages;
690
691 remain -= page_length;
692 user_data += page_length;
693 offset += page_length;
333 } 694 }
334 695
696fail_put_pages:
697 i915_gem_object_put_pages(obj);
698fail_unlock:
699 mutex_unlock(&dev->struct_mutex);
700
701 return ret;
702}
703
704/**
705 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
706 * the memory and maps it using kmap_atomic for copying.
707 *
708 * This avoids taking mmap_sem for faulting on the user's address while the
709 * struct_mutex is held.
710 */
711static int
712i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
713 struct drm_i915_gem_pwrite *args,
714 struct drm_file *file_priv)
715{
716 struct drm_i915_gem_object *obj_priv = obj->driver_private;
717 struct mm_struct *mm = current->mm;
718 struct page **user_pages;
719 ssize_t remain;
720 loff_t offset, pinned_pages, i;
721 loff_t first_data_page, last_data_page, num_pages;
722 int shmem_page_index, shmem_page_offset;
723 int data_page_index, data_page_offset;
724 int page_length;
725 int ret;
726 uint64_t data_ptr = args->data_ptr;
727
728 remain = args->size;
729
730 /* Pin the user pages containing the data. We can't fault while
731 * holding the struct mutex, and all of the pwrite implementations
732 * want to hold it while dereferencing the user data.
733 */
734 first_data_page = data_ptr / PAGE_SIZE;
735 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
736 num_pages = last_data_page - first_data_page + 1;
737
738 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
739 if (user_pages == NULL)
740 return -ENOMEM;
741
742 down_read(&mm->mmap_sem);
743 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
744 num_pages, 0, 0, user_pages, NULL);
745 up_read(&mm->mmap_sem);
746 if (pinned_pages < num_pages) {
747 ret = -EFAULT;
748 goto fail_put_user_pages;
749 }
750
751 mutex_lock(&dev->struct_mutex);
752
753 ret = i915_gem_object_get_pages(obj);
754 if (ret != 0)
755 goto fail_unlock;
756
757 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
758 if (ret != 0)
759 goto fail_put_pages;
760
761 obj_priv = obj->driver_private;
335 offset = args->offset; 762 offset = args->offset;
763 obj_priv->dirty = 1;
336 764
337 written = vfs_write(obj->filp, 765 while (remain > 0) {
338 (char __user *)(uintptr_t) args->data_ptr, 766 /* Operation in this page
339 args->size, &offset); 767 *
340 if (written != args->size) { 768 * shmem_page_index = page number within shmem file
341 mutex_unlock(&dev->struct_mutex); 769 * shmem_page_offset = offset within page in shmem file
342 if (written < 0) 770 * data_page_index = page number in get_user_pages return
343 return written; 771 * data_page_offset = offset with data_page_index page.
344 else 772 * page_length = bytes to copy for this page
345 return -EINVAL; 773 */
774 shmem_page_index = offset / PAGE_SIZE;
775 shmem_page_offset = offset & ~PAGE_MASK;
776 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
777 data_page_offset = data_ptr & ~PAGE_MASK;
778
779 page_length = remain;
780 if ((shmem_page_offset + page_length) > PAGE_SIZE)
781 page_length = PAGE_SIZE - shmem_page_offset;
782 if ((data_page_offset + page_length) > PAGE_SIZE)
783 page_length = PAGE_SIZE - data_page_offset;
784
785 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
786 shmem_page_offset,
787 user_pages[data_page_index],
788 data_page_offset,
789 page_length);
790 if (ret)
791 goto fail_put_pages;
792
793 remain -= page_length;
794 data_ptr += page_length;
795 offset += page_length;
346 } 796 }
347 797
798fail_put_pages:
799 i915_gem_object_put_pages(obj);
800fail_unlock:
348 mutex_unlock(&dev->struct_mutex); 801 mutex_unlock(&dev->struct_mutex);
802fail_put_user_pages:
803 for (i = 0; i < pinned_pages; i++)
804 page_cache_release(user_pages[i]);
805 kfree(user_pages);
349 806
350 return 0; 807 return ret;
351} 808}
352 809
353/** 810/**
@@ -388,10 +845,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
388 if (obj_priv->phys_obj) 845 if (obj_priv->phys_obj)
389 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 846 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
390 else if (obj_priv->tiling_mode == I915_TILING_NONE && 847 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
391 dev->gtt_total != 0) 848 dev->gtt_total != 0) {
392 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 849 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
393 else 850 if (ret == -EFAULT) {
394 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 851 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
852 file_priv);
853 }
854 } else {
855 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
856 if (ret == -EFAULT) {
857 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
858 file_priv);
859 }
860 }
395 861
396#if WATCH_PWRITE 862#if WATCH_PWRITE
397 if (ret) 863 if (ret)
@@ -816,29 +1282,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
816} 1282}
817 1283
818static void 1284static void
819i915_gem_object_free_page_list(struct drm_gem_object *obj) 1285i915_gem_object_put_pages(struct drm_gem_object *obj)
820{ 1286{
821 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1287 struct drm_i915_gem_object *obj_priv = obj->driver_private;
822 int page_count = obj->size / PAGE_SIZE; 1288 int page_count = obj->size / PAGE_SIZE;
823 int i; 1289 int i;
824 1290
825 if (obj_priv->page_list == NULL) 1291 BUG_ON(obj_priv->pages_refcount == 0);
826 return;
827 1292
1293 if (--obj_priv->pages_refcount != 0)
1294 return;
828 1295
829 for (i = 0; i < page_count; i++) 1296 for (i = 0; i < page_count; i++)
830 if (obj_priv->page_list[i] != NULL) { 1297 if (obj_priv->pages[i] != NULL) {
831 if (obj_priv->dirty) 1298 if (obj_priv->dirty)
832 set_page_dirty(obj_priv->page_list[i]); 1299 set_page_dirty(obj_priv->pages[i]);
833 mark_page_accessed(obj_priv->page_list[i]); 1300 mark_page_accessed(obj_priv->pages[i]);
834 page_cache_release(obj_priv->page_list[i]); 1301 page_cache_release(obj_priv->pages[i]);
835 } 1302 }
836 obj_priv->dirty = 0; 1303 obj_priv->dirty = 0;
837 1304
838 drm_free(obj_priv->page_list, 1305 drm_free(obj_priv->pages,
839 page_count * sizeof(struct page *), 1306 page_count * sizeof(struct page *),
840 DRM_MEM_DRIVER); 1307 DRM_MEM_DRIVER);
841 obj_priv->page_list = NULL; 1308 obj_priv->pages = NULL;
842} 1309}
843 1310
844static void 1311static void
@@ -1290,7 +1757,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1290 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1757 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1291 i915_gem_clear_fence_reg(obj); 1758 i915_gem_clear_fence_reg(obj);
1292 1759
1293 i915_gem_object_free_page_list(obj); 1760 i915_gem_object_put_pages(obj);
1294 1761
1295 if (obj_priv->gtt_space) { 1762 if (obj_priv->gtt_space) {
1296 atomic_dec(&dev->gtt_count); 1763 atomic_dec(&dev->gtt_count);
@@ -1409,7 +1876,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1409} 1876}
1410 1877
1411static int 1878static int
1412i915_gem_object_get_page_list(struct drm_gem_object *obj) 1879i915_gem_object_get_pages(struct drm_gem_object *obj)
1413{ 1880{
1414 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1881 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1415 int page_count, i; 1882 int page_count, i;
@@ -1418,18 +1885,19 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1418 struct page *page; 1885 struct page *page;
1419 int ret; 1886 int ret;
1420 1887
1421 if (obj_priv->page_list) 1888 if (obj_priv->pages_refcount++ != 0)
1422 return 0; 1889 return 0;
1423 1890
1424 /* Get the list of pages out of our struct file. They'll be pinned 1891 /* Get the list of pages out of our struct file. They'll be pinned
1425 * at this point until we release them. 1892 * at this point until we release them.
1426 */ 1893 */
1427 page_count = obj->size / PAGE_SIZE; 1894 page_count = obj->size / PAGE_SIZE;
1428 BUG_ON(obj_priv->page_list != NULL); 1895 BUG_ON(obj_priv->pages != NULL);
1429 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), 1896 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
1430 DRM_MEM_DRIVER); 1897 DRM_MEM_DRIVER);
1431 if (obj_priv->page_list == NULL) { 1898 if (obj_priv->pages == NULL) {
1432 DRM_ERROR("Faled to allocate page list\n"); 1899 DRM_ERROR("Faled to allocate page list\n");
1900 obj_priv->pages_refcount--;
1433 return -ENOMEM; 1901 return -ENOMEM;
1434 } 1902 }
1435 1903
@@ -1440,10 +1908,10 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
1440 if (IS_ERR(page)) { 1908 if (IS_ERR(page)) {
1441 ret = PTR_ERR(page); 1909 ret = PTR_ERR(page);
1442 DRM_ERROR("read_mapping_page failed: %d\n", ret); 1910 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1443 i915_gem_object_free_page_list(obj); 1911 i915_gem_object_put_pages(obj);
1444 return ret; 1912 return ret;
1445 } 1913 }
1446 obj_priv->page_list[i] = page; 1914 obj_priv->pages[i] = page;
1447 } 1915 }
1448 return 0; 1916 return 0;
1449} 1917}
@@ -1766,7 +2234,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1766 DRM_INFO("Binding object of size %d at 0x%08x\n", 2234 DRM_INFO("Binding object of size %d at 0x%08x\n",
1767 obj->size, obj_priv->gtt_offset); 2235 obj->size, obj_priv->gtt_offset);
1768#endif 2236#endif
1769 ret = i915_gem_object_get_page_list(obj); 2237 ret = i915_gem_object_get_pages(obj);
1770 if (ret) { 2238 if (ret) {
1771 drm_mm_put_block(obj_priv->gtt_space); 2239 drm_mm_put_block(obj_priv->gtt_space);
1772 obj_priv->gtt_space = NULL; 2240 obj_priv->gtt_space = NULL;
@@ -1778,12 +2246,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1778 * into the GTT. 2246 * into the GTT.
1779 */ 2247 */
1780 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2248 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1781 obj_priv->page_list, 2249 obj_priv->pages,
1782 page_count, 2250 page_count,
1783 obj_priv->gtt_offset, 2251 obj_priv->gtt_offset,
1784 obj_priv->agp_type); 2252 obj_priv->agp_type);
1785 if (obj_priv->agp_mem == NULL) { 2253 if (obj_priv->agp_mem == NULL) {
1786 i915_gem_object_free_page_list(obj); 2254 i915_gem_object_put_pages(obj);
1787 drm_mm_put_block(obj_priv->gtt_space); 2255 drm_mm_put_block(obj_priv->gtt_space);
1788 obj_priv->gtt_space = NULL; 2256 obj_priv->gtt_space = NULL;
1789 return -ENOMEM; 2257 return -ENOMEM;
@@ -1810,10 +2278,10 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
1810 * to GPU, and we can ignore the cache flush because it'll happen 2278 * to GPU, and we can ignore the cache flush because it'll happen
1811 * again at bind time. 2279 * again at bind time.
1812 */ 2280 */
1813 if (obj_priv->page_list == NULL) 2281 if (obj_priv->pages == NULL)
1814 return; 2282 return;
1815 2283
1816 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 2284 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
1817} 2285}
1818 2286
1819/** Flushes any GPU write domain for the object if it's dirty. */ 2287/** Flushes any GPU write domain for the object if it's dirty. */
@@ -1913,7 +2381,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1913static int 2381static int
1914i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2382i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1915{ 2383{
1916 struct drm_device *dev = obj->dev;
1917 int ret; 2384 int ret;
1918 2385
1919 i915_gem_object_flush_gpu_write_domain(obj); 2386 i915_gem_object_flush_gpu_write_domain(obj);
@@ -1932,7 +2399,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1932 /* Flush the CPU cache if it's still invalid. */ 2399 /* Flush the CPU cache if it's still invalid. */
1933 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2400 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1934 i915_gem_clflush_object(obj); 2401 i915_gem_clflush_object(obj);
1935 drm_agp_chipset_flush(dev);
1936 2402
1937 obj->read_domains |= I915_GEM_DOMAIN_CPU; 2403 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1938 } 2404 }
@@ -2144,7 +2610,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2144static void 2610static void
2145i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 2611i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2146{ 2612{
2147 struct drm_device *dev = obj->dev;
2148 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2613 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2149 2614
2150 if (!obj_priv->page_cpu_valid) 2615 if (!obj_priv->page_cpu_valid)
@@ -2158,9 +2623,8 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2158 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 2623 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2159 if (obj_priv->page_cpu_valid[i]) 2624 if (obj_priv->page_cpu_valid[i])
2160 continue; 2625 continue;
2161 drm_clflush_pages(obj_priv->page_list + i, 1); 2626 drm_clflush_pages(obj_priv->pages + i, 1);
2162 } 2627 }
2163 drm_agp_chipset_flush(dev);
2164 } 2628 }
2165 2629
2166 /* Free the page_cpu_valid mappings which are now stale, whether 2630 /* Free the page_cpu_valid mappings which are now stale, whether
@@ -2224,7 +2688,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2224 if (obj_priv->page_cpu_valid[i]) 2688 if (obj_priv->page_cpu_valid[i])
2225 continue; 2689 continue;
2226 2690
2227 drm_clflush_pages(obj_priv->page_list + i, 1); 2691 drm_clflush_pages(obj_priv->pages + i, 1);
2228 2692
2229 obj_priv->page_cpu_valid[i] = 1; 2693 obj_priv->page_cpu_valid[i] = 1;
2230 } 2694 }
@@ -2245,12 +2709,11 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2245static int 2709static int
2246i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 2710i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2247 struct drm_file *file_priv, 2711 struct drm_file *file_priv,
2248 struct drm_i915_gem_exec_object *entry) 2712 struct drm_i915_gem_exec_object *entry,
2713 struct drm_i915_gem_relocation_entry *relocs)
2249{ 2714{
2250 struct drm_device *dev = obj->dev; 2715 struct drm_device *dev = obj->dev;
2251 drm_i915_private_t *dev_priv = dev->dev_private; 2716 drm_i915_private_t *dev_priv = dev->dev_private;
2252 struct drm_i915_gem_relocation_entry reloc;
2253 struct drm_i915_gem_relocation_entry __user *relocs;
2254 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2717 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2255 int i, ret; 2718 int i, ret;
2256 void __iomem *reloc_page; 2719 void __iomem *reloc_page;
@@ -2262,25 +2725,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2262 2725
2263 entry->offset = obj_priv->gtt_offset; 2726 entry->offset = obj_priv->gtt_offset;
2264 2727
2265 relocs = (struct drm_i915_gem_relocation_entry __user *)
2266 (uintptr_t) entry->relocs_ptr;
2267 /* Apply the relocations, using the GTT aperture to avoid cache 2728 /* Apply the relocations, using the GTT aperture to avoid cache
2268 * flushing requirements. 2729 * flushing requirements.
2269 */ 2730 */
2270 for (i = 0; i < entry->relocation_count; i++) { 2731 for (i = 0; i < entry->relocation_count; i++) {
2732 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2271 struct drm_gem_object *target_obj; 2733 struct drm_gem_object *target_obj;
2272 struct drm_i915_gem_object *target_obj_priv; 2734 struct drm_i915_gem_object *target_obj_priv;
2273 uint32_t reloc_val, reloc_offset; 2735 uint32_t reloc_val, reloc_offset;
2274 uint32_t __iomem *reloc_entry; 2736 uint32_t __iomem *reloc_entry;
2275 2737
2276 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
2277 if (ret != 0) {
2278 i915_gem_object_unpin(obj);
2279 return ret;
2280 }
2281
2282 target_obj = drm_gem_object_lookup(obj->dev, file_priv, 2738 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2283 reloc.target_handle); 2739 reloc->target_handle);
2284 if (target_obj == NULL) { 2740 if (target_obj == NULL) {
2285 i915_gem_object_unpin(obj); 2741 i915_gem_object_unpin(obj);
2286 return -EBADF; 2742 return -EBADF;
@@ -2292,53 +2748,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2292 */ 2748 */
2293 if (target_obj_priv->gtt_space == NULL) { 2749 if (target_obj_priv->gtt_space == NULL) {
2294 DRM_ERROR("No GTT space found for object %d\n", 2750 DRM_ERROR("No GTT space found for object %d\n",
2295 reloc.target_handle); 2751 reloc->target_handle);
2296 drm_gem_object_unreference(target_obj); 2752 drm_gem_object_unreference(target_obj);
2297 i915_gem_object_unpin(obj); 2753 i915_gem_object_unpin(obj);
2298 return -EINVAL; 2754 return -EINVAL;
2299 } 2755 }
2300 2756
2301 if (reloc.offset > obj->size - 4) { 2757 if (reloc->offset > obj->size - 4) {
2302 DRM_ERROR("Relocation beyond object bounds: " 2758 DRM_ERROR("Relocation beyond object bounds: "
2303 "obj %p target %d offset %d size %d.\n", 2759 "obj %p target %d offset %d size %d.\n",
2304 obj, reloc.target_handle, 2760 obj, reloc->target_handle,
2305 (int) reloc.offset, (int) obj->size); 2761 (int) reloc->offset, (int) obj->size);
2306 drm_gem_object_unreference(target_obj); 2762 drm_gem_object_unreference(target_obj);
2307 i915_gem_object_unpin(obj); 2763 i915_gem_object_unpin(obj);
2308 return -EINVAL; 2764 return -EINVAL;
2309 } 2765 }
2310 if (reloc.offset & 3) { 2766 if (reloc->offset & 3) {
2311 DRM_ERROR("Relocation not 4-byte aligned: " 2767 DRM_ERROR("Relocation not 4-byte aligned: "
2312 "obj %p target %d offset %d.\n", 2768 "obj %p target %d offset %d.\n",
2313 obj, reloc.target_handle, 2769 obj, reloc->target_handle,
2314 (int) reloc.offset); 2770 (int) reloc->offset);
2315 drm_gem_object_unreference(target_obj); 2771 drm_gem_object_unreference(target_obj);
2316 i915_gem_object_unpin(obj); 2772 i915_gem_object_unpin(obj);
2317 return -EINVAL; 2773 return -EINVAL;
2318 } 2774 }
2319 2775
2320 if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 2776 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2321 reloc.read_domains & I915_GEM_DOMAIN_CPU) { 2777 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
2322 DRM_ERROR("reloc with read/write CPU domains: " 2778 DRM_ERROR("reloc with read/write CPU domains: "
2323 "obj %p target %d offset %d " 2779 "obj %p target %d offset %d "
2324 "read %08x write %08x", 2780 "read %08x write %08x",
2325 obj, reloc.target_handle, 2781 obj, reloc->target_handle,
2326 (int) reloc.offset, 2782 (int) reloc->offset,
2327 reloc.read_domains, 2783 reloc->read_domains,
2328 reloc.write_domain); 2784 reloc->write_domain);
2329 drm_gem_object_unreference(target_obj); 2785 drm_gem_object_unreference(target_obj);
2330 i915_gem_object_unpin(obj); 2786 i915_gem_object_unpin(obj);
2331 return -EINVAL; 2787 return -EINVAL;
2332 } 2788 }
2333 2789
2334 if (reloc.write_domain && target_obj->pending_write_domain && 2790 if (reloc->write_domain && target_obj->pending_write_domain &&
2335 reloc.write_domain != target_obj->pending_write_domain) { 2791 reloc->write_domain != target_obj->pending_write_domain) {
2336 DRM_ERROR("Write domain conflict: " 2792 DRM_ERROR("Write domain conflict: "
2337 "obj %p target %d offset %d " 2793 "obj %p target %d offset %d "
2338 "new %08x old %08x\n", 2794 "new %08x old %08x\n",
2339 obj, reloc.target_handle, 2795 obj, reloc->target_handle,
2340 (int) reloc.offset, 2796 (int) reloc->offset,
2341 reloc.write_domain, 2797 reloc->write_domain,
2342 target_obj->pending_write_domain); 2798 target_obj->pending_write_domain);
2343 drm_gem_object_unreference(target_obj); 2799 drm_gem_object_unreference(target_obj);
2344 i915_gem_object_unpin(obj); 2800 i915_gem_object_unpin(obj);
@@ -2351,22 +2807,22 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2351 "presumed %08x delta %08x\n", 2807 "presumed %08x delta %08x\n",
2352 __func__, 2808 __func__,
2353 obj, 2809 obj,
2354 (int) reloc.offset, 2810 (int) reloc->offset,
2355 (int) reloc.target_handle, 2811 (int) reloc->target_handle,
2356 (int) reloc.read_domains, 2812 (int) reloc->read_domains,
2357 (int) reloc.write_domain, 2813 (int) reloc->write_domain,
2358 (int) target_obj_priv->gtt_offset, 2814 (int) target_obj_priv->gtt_offset,
2359 (int) reloc.presumed_offset, 2815 (int) reloc->presumed_offset,
2360 reloc.delta); 2816 reloc->delta);
2361#endif 2817#endif
2362 2818
2363 target_obj->pending_read_domains |= reloc.read_domains; 2819 target_obj->pending_read_domains |= reloc->read_domains;
2364 target_obj->pending_write_domain |= reloc.write_domain; 2820 target_obj->pending_write_domain |= reloc->write_domain;
2365 2821
2366 /* If the relocation already has the right value in it, no 2822 /* If the relocation already has the right value in it, no
2367 * more work needs to be done. 2823 * more work needs to be done.
2368 */ 2824 */
2369 if (target_obj_priv->gtt_offset == reloc.presumed_offset) { 2825 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
2370 drm_gem_object_unreference(target_obj); 2826 drm_gem_object_unreference(target_obj);
2371 continue; 2827 continue;
2372 } 2828 }
@@ -2381,32 +2837,26 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2381 /* Map the page containing the relocation we're going to 2837 /* Map the page containing the relocation we're going to
2382 * perform. 2838 * perform.
2383 */ 2839 */
2384 reloc_offset = obj_priv->gtt_offset + reloc.offset; 2840 reloc_offset = obj_priv->gtt_offset + reloc->offset;
2385 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 2841 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2386 (reloc_offset & 2842 (reloc_offset &
2387 ~(PAGE_SIZE - 1))); 2843 ~(PAGE_SIZE - 1)));
2388 reloc_entry = (uint32_t __iomem *)(reloc_page + 2844 reloc_entry = (uint32_t __iomem *)(reloc_page +
2389 (reloc_offset & (PAGE_SIZE - 1))); 2845 (reloc_offset & (PAGE_SIZE - 1)));
2390 reloc_val = target_obj_priv->gtt_offset + reloc.delta; 2846 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
2391 2847
2392#if WATCH_BUF 2848#if WATCH_BUF
2393 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", 2849 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2394 obj, (unsigned int) reloc.offset, 2850 obj, (unsigned int) reloc->offset,
2395 readl(reloc_entry), reloc_val); 2851 readl(reloc_entry), reloc_val);
2396#endif 2852#endif
2397 writel(reloc_val, reloc_entry); 2853 writel(reloc_val, reloc_entry);
2398 io_mapping_unmap_atomic(reloc_page); 2854 io_mapping_unmap_atomic(reloc_page);
2399 2855
2400 /* Write the updated presumed offset for this entry back out 2856 /* The updated presumed offset for this entry will be
2401 * to the user. 2857 * copied back out to the user.
2402 */ 2858 */
2403 reloc.presumed_offset = target_obj_priv->gtt_offset; 2859 reloc->presumed_offset = target_obj_priv->gtt_offset;
2404 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
2405 if (ret != 0) {
2406 drm_gem_object_unreference(target_obj);
2407 i915_gem_object_unpin(obj);
2408 return ret;
2409 }
2410 2860
2411 drm_gem_object_unreference(target_obj); 2861 drm_gem_object_unreference(target_obj);
2412 } 2862 }
@@ -2423,11 +2873,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2423static int 2873static int
2424i915_dispatch_gem_execbuffer(struct drm_device *dev, 2874i915_dispatch_gem_execbuffer(struct drm_device *dev,
2425 struct drm_i915_gem_execbuffer *exec, 2875 struct drm_i915_gem_execbuffer *exec,
2876 struct drm_clip_rect *cliprects,
2426 uint64_t exec_offset) 2877 uint64_t exec_offset)
2427{ 2878{
2428 drm_i915_private_t *dev_priv = dev->dev_private; 2879 drm_i915_private_t *dev_priv = dev->dev_private;
2429 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
2430 (uintptr_t) exec->cliprects_ptr;
2431 int nbox = exec->num_cliprects; 2880 int nbox = exec->num_cliprects;
2432 int i = 0, count; 2881 int i = 0, count;
2433 uint32_t exec_start, exec_len; 2882 uint32_t exec_start, exec_len;
@@ -2448,7 +2897,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
2448 2897
2449 for (i = 0; i < count; i++) { 2898 for (i = 0; i < count; i++) {
2450 if (i < nbox) { 2899 if (i < nbox) {
2451 int ret = i915_emit_box(dev, boxes, i, 2900 int ret = i915_emit_box(dev, cliprects, i,
2452 exec->DR1, exec->DR4); 2901 exec->DR1, exec->DR4);
2453 if (ret) 2902 if (ret)
2454 return ret; 2903 return ret;
@@ -2504,6 +2953,75 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
2504 return ret; 2953 return ret;
2505} 2954}
2506 2955
2956static int
2957i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
2958 uint32_t buffer_count,
2959 struct drm_i915_gem_relocation_entry **relocs)
2960{
2961 uint32_t reloc_count = 0, reloc_index = 0, i;
2962 int ret;
2963
2964 *relocs = NULL;
2965 for (i = 0; i < buffer_count; i++) {
2966 if (reloc_count + exec_list[i].relocation_count < reloc_count)
2967 return -EINVAL;
2968 reloc_count += exec_list[i].relocation_count;
2969 }
2970
2971 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER);
2972 if (*relocs == NULL)
2973 return -ENOMEM;
2974
2975 for (i = 0; i < buffer_count; i++) {
2976 struct drm_i915_gem_relocation_entry __user *user_relocs;
2977
2978 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
2979
2980 ret = copy_from_user(&(*relocs)[reloc_index],
2981 user_relocs,
2982 exec_list[i].relocation_count *
2983 sizeof(**relocs));
2984 if (ret != 0) {
2985 drm_free(*relocs, reloc_count * sizeof(**relocs),
2986 DRM_MEM_DRIVER);
2987 *relocs = NULL;
2988 return ret;
2989 }
2990
2991 reloc_index += exec_list[i].relocation_count;
2992 }
2993
2994 return ret;
2995}
2996
2997static int
2998i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
2999 uint32_t buffer_count,
3000 struct drm_i915_gem_relocation_entry *relocs)
3001{
3002 uint32_t reloc_count = 0, i;
3003 int ret;
3004
3005 for (i = 0; i < buffer_count; i++) {
3006 struct drm_i915_gem_relocation_entry __user *user_relocs;
3007
3008 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3009
3010 if (ret == 0) {
3011 ret = copy_to_user(user_relocs,
3012 &relocs[reloc_count],
3013 exec_list[i].relocation_count *
3014 sizeof(*relocs));
3015 }
3016
3017 reloc_count += exec_list[i].relocation_count;
3018 }
3019
3020 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3021
3022 return ret;
3023}
3024
2507int 3025int
2508i915_gem_execbuffer(struct drm_device *dev, void *data, 3026i915_gem_execbuffer(struct drm_device *dev, void *data,
2509 struct drm_file *file_priv) 3027 struct drm_file *file_priv)
@@ -2515,9 +3033,11 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2515 struct drm_gem_object **object_list = NULL; 3033 struct drm_gem_object **object_list = NULL;
2516 struct drm_gem_object *batch_obj; 3034 struct drm_gem_object *batch_obj;
2517 struct drm_i915_gem_object *obj_priv; 3035 struct drm_i915_gem_object *obj_priv;
2518 int ret, i, pinned = 0; 3036 struct drm_clip_rect *cliprects = NULL;
3037 struct drm_i915_gem_relocation_entry *relocs;
3038 int ret, ret2, i, pinned = 0;
2519 uint64_t exec_offset; 3039 uint64_t exec_offset;
2520 uint32_t seqno, flush_domains; 3040 uint32_t seqno, flush_domains, reloc_index;
2521 int pin_tries; 3041 int pin_tries;
2522 3042
2523#if WATCH_EXEC 3043#if WATCH_EXEC
@@ -2551,6 +3071,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2551 goto pre_mutex_err; 3071 goto pre_mutex_err;
2552 } 3072 }
2553 3073
3074 if (args->num_cliprects != 0) {
3075 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3076 DRM_MEM_DRIVER);
3077 if (cliprects == NULL)
3078 goto pre_mutex_err;
3079
3080 ret = copy_from_user(cliprects,
3081 (struct drm_clip_rect __user *)
3082 (uintptr_t) args->cliprects_ptr,
3083 sizeof(*cliprects) * args->num_cliprects);
3084 if (ret != 0) {
3085 DRM_ERROR("copy %d cliprects failed: %d\n",
3086 args->num_cliprects, ret);
3087 goto pre_mutex_err;
3088 }
3089 }
3090
3091 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3092 &relocs);
3093 if (ret != 0)
3094 goto pre_mutex_err;
3095
2554 mutex_lock(&dev->struct_mutex); 3096 mutex_lock(&dev->struct_mutex);
2555 3097
2556 i915_verify_inactive(dev, __FILE__, __LINE__); 3098 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2593,15 +3135,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2593 /* Pin and relocate */ 3135 /* Pin and relocate */
2594 for (pin_tries = 0; ; pin_tries++) { 3136 for (pin_tries = 0; ; pin_tries++) {
2595 ret = 0; 3137 ret = 0;
3138 reloc_index = 0;
3139
2596 for (i = 0; i < args->buffer_count; i++) { 3140 for (i = 0; i < args->buffer_count; i++) {
2597 object_list[i]->pending_read_domains = 0; 3141 object_list[i]->pending_read_domains = 0;
2598 object_list[i]->pending_write_domain = 0; 3142 object_list[i]->pending_write_domain = 0;
2599 ret = i915_gem_object_pin_and_relocate(object_list[i], 3143 ret = i915_gem_object_pin_and_relocate(object_list[i],
2600 file_priv, 3144 file_priv,
2601 &exec_list[i]); 3145 &exec_list[i],
3146 &relocs[reloc_index]);
2602 if (ret) 3147 if (ret)
2603 break; 3148 break;
2604 pinned = i + 1; 3149 pinned = i + 1;
3150 reloc_index += exec_list[i].relocation_count;
2605 } 3151 }
2606 /* success */ 3152 /* success */
2607 if (ret == 0) 3153 if (ret == 0)
@@ -2687,7 +3233,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2687#endif 3233#endif
2688 3234
2689 /* Exec the batchbuffer */ 3235 /* Exec the batchbuffer */
2690 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 3236 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
2691 if (ret) { 3237 if (ret) {
2692 DRM_ERROR("dispatch failed %d\n", ret); 3238 DRM_ERROR("dispatch failed %d\n", ret);
2693 goto err; 3239 goto err;
@@ -2751,11 +3297,27 @@ err:
2751 args->buffer_count, ret); 3297 args->buffer_count, ret);
2752 } 3298 }
2753 3299
3300 /* Copy the updated relocations out regardless of current error
3301 * state. Failure to update the relocs would mean that the next
3302 * time userland calls execbuf, it would do so with presumed offset
3303 * state that didn't match the actual object state.
3304 */
3305 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3306 relocs);
3307 if (ret2 != 0) {
3308 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3309
3310 if (ret == 0)
3311 ret = ret2;
3312 }
3313
2754pre_mutex_err: 3314pre_mutex_err:
2755 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3315 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2756 DRM_MEM_DRIVER); 3316 DRM_MEM_DRIVER);
2757 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, 3317 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2758 DRM_MEM_DRIVER); 3318 DRM_MEM_DRIVER);
3319 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3320 DRM_MEM_DRIVER);
2759 3321
2760 return ret; 3322 return ret;
2761} 3323}
@@ -3192,7 +3754,7 @@ i915_gem_init_hws(struct drm_device *dev)
3192 3754
3193 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 3755 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3194 3756
3195 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); 3757 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3196 if (dev_priv->hw_status_page == NULL) { 3758 if (dev_priv->hw_status_page == NULL) {
3197 DRM_ERROR("Failed to map status page.\n"); 3759 DRM_ERROR("Failed to map status page.\n");
3198 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3760 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -3222,7 +3784,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
3222 obj = dev_priv->hws_obj; 3784 obj = dev_priv->hws_obj;
3223 obj_priv = obj->driver_private; 3785 obj_priv = obj->driver_private;
3224 3786
3225 kunmap(obj_priv->page_list[0]); 3787 kunmap(obj_priv->pages[0]);
3226 i915_gem_object_unpin(obj); 3788 i915_gem_object_unpin(obj);
3227 drm_gem_object_unreference(obj); 3789 drm_gem_object_unreference(obj);
3228 dev_priv->hws_obj = NULL; 3790 dev_priv->hws_obj = NULL;
@@ -3525,20 +4087,20 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
3525 if (!obj_priv->phys_obj) 4087 if (!obj_priv->phys_obj)
3526 return; 4088 return;
3527 4089
3528 ret = i915_gem_object_get_page_list(obj); 4090 ret = i915_gem_object_get_pages(obj);
3529 if (ret) 4091 if (ret)
3530 goto out; 4092 goto out;
3531 4093
3532 page_count = obj->size / PAGE_SIZE; 4094 page_count = obj->size / PAGE_SIZE;
3533 4095
3534 for (i = 0; i < page_count; i++) { 4096 for (i = 0; i < page_count; i++) {
3535 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4097 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
3536 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4098 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3537 4099
3538 memcpy(dst, src, PAGE_SIZE); 4100 memcpy(dst, src, PAGE_SIZE);
3539 kunmap_atomic(dst, KM_USER0); 4101 kunmap_atomic(dst, KM_USER0);
3540 } 4102 }
3541 drm_clflush_pages(obj_priv->page_list, page_count); 4103 drm_clflush_pages(obj_priv->pages, page_count);
3542 drm_agp_chipset_flush(dev); 4104 drm_agp_chipset_flush(dev);
3543out: 4105out:
3544 obj_priv->phys_obj->cur_obj = NULL; 4106 obj_priv->phys_obj->cur_obj = NULL;
@@ -3581,7 +4143,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3581 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4143 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3582 obj_priv->phys_obj->cur_obj = obj; 4144 obj_priv->phys_obj->cur_obj = obj;
3583 4145
3584 ret = i915_gem_object_get_page_list(obj); 4146 ret = i915_gem_object_get_pages(obj);
3585 if (ret) { 4147 if (ret) {
3586 DRM_ERROR("failed to get page list\n"); 4148 DRM_ERROR("failed to get page list\n");
3587 goto out; 4149 goto out;
@@ -3590,7 +4152,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
3590 page_count = obj->size / PAGE_SIZE; 4152 page_count = obj->size / PAGE_SIZE;
3591 4153
3592 for (i = 0; i < page_count; i++) { 4154 for (i = 0; i < page_count; i++) {
3593 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); 4155 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
3594 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4156 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3595 4157
3596 memcpy(dst, src, PAGE_SIZE); 4158 memcpy(dst, src, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
new file mode 100644
index 000000000000..455ec970b385
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include <linux/seq_file.h>
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34
35#define DRM_I915_RING_DEBUG 1
36
37
38#if defined(CONFIG_DEBUG_FS)
39
40#define ACTIVE_LIST 1
41#define FLUSHING_LIST 2
42#define INACTIVE_LIST 3
43
44static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
45{
46 if (obj_priv->user_pin_count > 0)
47 return "P";
48 else if (obj_priv->pin_count > 0)
49 return "p";
50 else
51 return " ";
52}
53
54static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
55{
56 switch (obj_priv->tiling_mode) {
57 default:
58 case I915_TILING_NONE: return " ";
59 case I915_TILING_X: return "X";
60 case I915_TILING_Y: return "Y";
61 }
62}
63
64static int i915_gem_object_list_info(struct seq_file *m, void *data)
65{
66 struct drm_info_node *node = (struct drm_info_node *) m->private;
67 uintptr_t list = (uintptr_t) node->info_ent->data;
68 struct list_head *head;
69 struct drm_device *dev = node->minor->dev;
70 drm_i915_private_t *dev_priv = dev->dev_private;
71 struct drm_i915_gem_object *obj_priv;
72
73 switch (list) {
74 case ACTIVE_LIST:
75 seq_printf(m, "Active:\n");
76 head = &dev_priv->mm.active_list;
77 break;
78 case INACTIVE_LIST:
79 seq_printf(m, "Inctive:\n");
80 head = &dev_priv->mm.inactive_list;
81 break;
82 case FLUSHING_LIST:
83 seq_printf(m, "Flushing:\n");
84 head = &dev_priv->mm.flushing_list;
85 break;
86 default:
87 DRM_INFO("Ooops, unexpected list\n");
88 return 0;
89 }
90
91 list_for_each_entry(obj_priv, head, list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94
95 seq_printf(m, " %p: %s %08x %08x %d",
96 obj,
97 get_pin_flag(obj_priv),
98 obj->read_domains, obj->write_domain,
99 obj_priv->last_rendering_seqno);
100
101 if (obj->name)
102 seq_printf(m, " (name: %d)", obj->name);
103 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
104 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
105 seq_printf(m, "\n");
106 }
107 return 0;
108}
109
110static int i915_gem_request_info(struct seq_file *m, void *data)
111{
112 struct drm_info_node *node = (struct drm_info_node *) m->private;
113 struct drm_device *dev = node->minor->dev;
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 struct drm_i915_gem_request *gem_request;
116
117 seq_printf(m, "Request:\n");
118 list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
119 seq_printf(m, " %d @ %d\n",
120 gem_request->seqno,
121 (int) (jiffies - gem_request->emitted_jiffies));
122 }
123 return 0;
124}
125
126static int i915_gem_seqno_info(struct seq_file *m, void *data)
127{
128 struct drm_info_node *node = (struct drm_info_node *) m->private;
129 struct drm_device *dev = node->minor->dev;
130 drm_i915_private_t *dev_priv = dev->dev_private;
131
132 if (dev_priv->hw_status_page != NULL) {
133 seq_printf(m, "Current sequence: %d\n",
134 i915_get_gem_seqno(dev));
135 } else {
136 seq_printf(m, "Current sequence: hws uninitialized\n");
137 }
138 seq_printf(m, "Waiter sequence: %d\n",
139 dev_priv->mm.waiting_gem_seqno);
140 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
141 return 0;
142}
143
144
145static int i915_interrupt_info(struct seq_file *m, void *data)
146{
147 struct drm_info_node *node = (struct drm_info_node *) m->private;
148 struct drm_device *dev = node->minor->dev;
149 drm_i915_private_t *dev_priv = dev->dev_private;
150
151 seq_printf(m, "Interrupt enable: %08x\n",
152 I915_READ(IER));
153 seq_printf(m, "Interrupt identity: %08x\n",
154 I915_READ(IIR));
155 seq_printf(m, "Interrupt mask: %08x\n",
156 I915_READ(IMR));
157 seq_printf(m, "Pipe A stat: %08x\n",
158 I915_READ(PIPEASTAT));
159 seq_printf(m, "Pipe B stat: %08x\n",
160 I915_READ(PIPEBSTAT));
161 seq_printf(m, "Interrupts received: %d\n",
162 atomic_read(&dev_priv->irq_received));
163 if (dev_priv->hw_status_page != NULL) {
164 seq_printf(m, "Current sequence: %d\n",
165 i915_get_gem_seqno(dev));
166 } else {
167 seq_printf(m, "Current sequence: hws uninitialized\n");
168 }
169 seq_printf(m, "Waiter sequence: %d\n",
170 dev_priv->mm.waiting_gem_seqno);
171 seq_printf(m, "IRQ sequence: %d\n",
172 dev_priv->mm.irq_gem_seqno);
173 return 0;
174}
175
176static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
177{
178 struct drm_info_node *node = (struct drm_info_node *) m->private;
179 struct drm_device *dev = node->minor->dev;
180 drm_i915_private_t *dev_priv = dev->dev_private;
181 int i;
182
183 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
184 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
185 for (i = 0; i < dev_priv->num_fence_regs; i++) {
186 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
187
188 if (obj == NULL) {
189 seq_printf(m, "Fenced object[%2d] = unused\n", i);
190 } else {
191 struct drm_i915_gem_object *obj_priv;
192
193 obj_priv = obj->driver_private;
194 seq_printf(m, "Fenced object[%2d] = %p: %s "
195 "%08x %08zx %08x %s %08x %08x %d",
196 i, obj, get_pin_flag(obj_priv),
197 obj_priv->gtt_offset,
198 obj->size, obj_priv->stride,
199 get_tiling_flag(obj_priv),
200 obj->read_domains, obj->write_domain,
201 obj_priv->last_rendering_seqno);
202 if (obj->name)
203 seq_printf(m, " (name: %d)", obj->name);
204 seq_printf(m, "\n");
205 }
206 }
207
208 return 0;
209}
210
211static int i915_hws_info(struct seq_file *m, void *data)
212{
213 struct drm_info_node *node = (struct drm_info_node *) m->private;
214 struct drm_device *dev = node->minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int i;
217 volatile u32 *hws;
218
219 hws = (volatile u32 *)dev_priv->hw_status_page;
220 if (hws == NULL)
221 return 0;
222
223 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
224 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
225 i * 4,
226 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
227 }
228 return 0;
229}
230
231static struct drm_info_list i915_gem_debugfs_list[] = {
232 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
233 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
234 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
235 {"i915_gem_request", i915_gem_request_info, 0},
236 {"i915_gem_seqno", i915_gem_seqno_info, 0},
237 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
238 {"i915_gem_interrupt", i915_interrupt_info, 0},
239 {"i915_gem_hws", i915_hws_info, 0},
240};
241#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
242
243int i915_gem_debugfs_init(struct drm_minor *minor)
244{
245 return drm_debugfs_create_files(i915_gem_debugfs_list,
246 I915_GEM_DEBUGFS_ENTRIES,
247 minor->debugfs_root, minor);
248}
249
250void i915_gem_debugfs_cleanup(struct drm_minor *minor)
251{
252 drm_debugfs_remove_files(i915_gem_debugfs_list,
253 I915_GEM_DEBUGFS_ENTRIES, minor);
254}
255
256#endif /* CONFIG_DEBUG_FS */
257
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
deleted file mode 100644
index 4d1b9de0cd8b..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ /dev/null
@@ -1,334 +0,0 @@
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34static int i915_gem_active_info(char *buf, char **start, off_t offset,
35 int request, int *eof, void *data)
36{
37 struct drm_minor *minor = (struct drm_minor *) data;
38 struct drm_device *dev = minor->dev;
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *obj_priv;
41 int len = 0;
42
43 if (offset > DRM_PROC_LIMIT) {
44 *eof = 1;
45 return 0;
46 }
47
48 *start = &buf[offset];
49 *eof = 0;
50 DRM_PROC_PRINT("Active:\n");
51 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
52 list)
53 {
54 struct drm_gem_object *obj = obj_priv->obj;
55 if (obj->name) {
56 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
57 obj, obj->name,
58 obj->read_domains, obj->write_domain,
59 obj_priv->last_rendering_seqno);
60 } else {
61 DRM_PROC_PRINT(" %p: %08x %08x %d\n",
62 obj,
63 obj->read_domains, obj->write_domain,
64 obj_priv->last_rendering_seqno);
65 }
66 }
67 if (len > request + offset)
68 return request;
69 *eof = 1;
70 return len - offset;
71}
72
73static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
74 int request, int *eof, void *data)
75{
76 struct drm_minor *minor = (struct drm_minor *) data;
77 struct drm_device *dev = minor->dev;
78 drm_i915_private_t *dev_priv = dev->dev_private;
79 struct drm_i915_gem_object *obj_priv;
80 int len = 0;
81
82 if (offset > DRM_PROC_LIMIT) {
83 *eof = 1;
84 return 0;
85 }
86
87 *start = &buf[offset];
88 *eof = 0;
89 DRM_PROC_PRINT("Flushing:\n");
90 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
91 list)
92 {
93 struct drm_gem_object *obj = obj_priv->obj;
94 if (obj->name) {
95 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
96 obj, obj->name,
97 obj->read_domains, obj->write_domain,
98 obj_priv->last_rendering_seqno);
99 } else {
100 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
101 obj->read_domains, obj->write_domain,
102 obj_priv->last_rendering_seqno);
103 }
104 }
105 if (len > request + offset)
106 return request;
107 *eof = 1;
108 return len - offset;
109}
110
111static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
112 int request, int *eof, void *data)
113{
114 struct drm_minor *minor = (struct drm_minor *) data;
115 struct drm_device *dev = minor->dev;
116 drm_i915_private_t *dev_priv = dev->dev_private;
117 struct drm_i915_gem_object *obj_priv;
118 int len = 0;
119
120 if (offset > DRM_PROC_LIMIT) {
121 *eof = 1;
122 return 0;
123 }
124
125 *start = &buf[offset];
126 *eof = 0;
127 DRM_PROC_PRINT("Inactive:\n");
128 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
129 list)
130 {
131 struct drm_gem_object *obj = obj_priv->obj;
132 if (obj->name) {
133 DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
134 obj, obj->name,
135 obj->read_domains, obj->write_domain,
136 obj_priv->last_rendering_seqno);
137 } else {
138 DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
139 obj->read_domains, obj->write_domain,
140 obj_priv->last_rendering_seqno);
141 }
142 }
143 if (len > request + offset)
144 return request;
145 *eof = 1;
146 return len - offset;
147}
148
149static int i915_gem_request_info(char *buf, char **start, off_t offset,
150 int request, int *eof, void *data)
151{
152 struct drm_minor *minor = (struct drm_minor *) data;
153 struct drm_device *dev = minor->dev;
154 drm_i915_private_t *dev_priv = dev->dev_private;
155 struct drm_i915_gem_request *gem_request;
156 int len = 0;
157
158 if (offset > DRM_PROC_LIMIT) {
159 *eof = 1;
160 return 0;
161 }
162
163 *start = &buf[offset];
164 *eof = 0;
165 DRM_PROC_PRINT("Request:\n");
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list)
168 {
169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies));
172 }
173 if (len > request + offset)
174 return request;
175 *eof = 1;
176 return len - offset;
177}
178
179static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
180 int request, int *eof, void *data)
181{
182 struct drm_minor *minor = (struct drm_minor *) data;
183 struct drm_device *dev = minor->dev;
184 drm_i915_private_t *dev_priv = dev->dev_private;
185 int len = 0;
186
187 if (offset > DRM_PROC_LIMIT) {
188 *eof = 1;
189 return 0;
190 }
191
192 *start = &buf[offset];
193 *eof = 0;
194 if (dev_priv->hw_status_page != NULL) {
195 DRM_PROC_PRINT("Current sequence: %d\n",
196 i915_get_gem_seqno(dev));
197 } else {
198 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
199 }
200 DRM_PROC_PRINT("Waiter sequence: %d\n",
201 dev_priv->mm.waiting_gem_seqno);
202 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
203 if (len > request + offset)
204 return request;
205 *eof = 1;
206 return len - offset;
207}
208
209
210static int i915_interrupt_info(char *buf, char **start, off_t offset,
211 int request, int *eof, void *data)
212{
213 struct drm_minor *minor = (struct drm_minor *) data;
214 struct drm_device *dev = minor->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private;
216 int len = 0;
217
218 if (offset > DRM_PROC_LIMIT) {
219 *eof = 1;
220 return 0;
221 }
222
223 *start = &buf[offset];
224 *eof = 0;
225 DRM_PROC_PRINT("Interrupt enable: %08x\n",
226 I915_READ(IER));
227 DRM_PROC_PRINT("Interrupt identity: %08x\n",
228 I915_READ(IIR));
229 DRM_PROC_PRINT("Interrupt mask: %08x\n",
230 I915_READ(IMR));
231 DRM_PROC_PRINT("Pipe A stat: %08x\n",
232 I915_READ(PIPEASTAT));
233 DRM_PROC_PRINT("Pipe B stat: %08x\n",
234 I915_READ(PIPEBSTAT));
235 DRM_PROC_PRINT("Interrupts received: %d\n",
236 atomic_read(&dev_priv->irq_received));
237 if (dev_priv->hw_status_page != NULL) {
238 DRM_PROC_PRINT("Current sequence: %d\n",
239 i915_get_gem_seqno(dev));
240 } else {
241 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
242 }
243 DRM_PROC_PRINT("Waiter sequence: %d\n",
244 dev_priv->mm.waiting_gem_seqno);
245 DRM_PROC_PRINT("IRQ sequence: %d\n",
246 dev_priv->mm.irq_gem_seqno);
247 if (len > request + offset)
248 return request;
249 *eof = 1;
250 return len - offset;
251}
252
253static int i915_hws_info(char *buf, char **start, off_t offset,
254 int request, int *eof, void *data)
255{
256 struct drm_minor *minor = (struct drm_minor *) data;
257 struct drm_device *dev = minor->dev;
258 drm_i915_private_t *dev_priv = dev->dev_private;
259 int len = 0, i;
260 volatile u32 *hws;
261
262 if (offset > DRM_PROC_LIMIT) {
263 *eof = 1;
264 return 0;
265 }
266
267 hws = (volatile u32 *)dev_priv->hw_status_page;
268 if (hws == NULL) {
269 *eof = 1;
270 return 0;
271 }
272
273 *start = &buf[offset];
274 *eof = 0;
275 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
276 DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
277 i * 4,
278 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
279 }
280 if (len > request + offset)
281 return request;
282 *eof = 1;
283 return len - offset;
284}
285
286static struct drm_proc_list {
287 /** file name */
288 const char *name;
289 /** proc callback*/
290 int (*f) (char *, char **, off_t, int, int *, void *);
291} i915_gem_proc_list[] = {
292 {"i915_gem_active", i915_gem_active_info},
293 {"i915_gem_flushing", i915_gem_flushing_info},
294 {"i915_gem_inactive", i915_gem_inactive_info},
295 {"i915_gem_request", i915_gem_request_info},
296 {"i915_gem_seqno", i915_gem_seqno_info},
297 {"i915_gem_interrupt", i915_interrupt_info},
298 {"i915_gem_hws", i915_hws_info},
299};
300
301#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
302
303int i915_gem_proc_init(struct drm_minor *minor)
304{
305 struct proc_dir_entry *ent;
306 int i, j;
307
308 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
309 ent = create_proc_entry(i915_gem_proc_list[i].name,
310 S_IFREG | S_IRUGO, minor->dev_root);
311 if (!ent) {
312 DRM_ERROR("Cannot create /proc/dri/.../%s\n",
313 i915_gem_proc_list[i].name);
314 for (j = 0; j < i; j++)
315 remove_proc_entry(i915_gem_proc_list[i].name,
316 minor->dev_root);
317 return -1;
318 }
319 ent->read_proc = i915_gem_proc_list[i].f;
320 ent->data = minor;
321 }
322 return 0;
323}
324
325void i915_gem_proc_cleanup(struct drm_minor *minor)
326{
327 int i;
328
329 if (!minor->dev_root)
330 return;
331
332 for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
333 remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
334}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7fb4191ef934..4cce1aef438e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
96 */ 96 */
97 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 97 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
98 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 98 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
99 } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || 99 } else if (IS_MOBILE(dev)) {
100 IS_GM45(dev)) {
101 uint32_t dcc; 100 uint32_t dcc;
102 101
103 /* On 915-945 and GM965, channel interleave by the CPU is 102 /* On mobile 9xx chipsets, channel interleave by the CPU is
104 * determined by DCC. The CPU will alternate based on bit 6 103 * determined by DCC. For single-channel, neither the CPU
105 * in interleaved mode, and the GPU will then also alternate 104 * nor the GPU do swizzling. For dual channel interleaved,
106 * on bit 6, 9, and 10 for X, but the CPU may also optionally 105 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
107 * alternate based on bit 17 (XOR not disabled and XOR 106 * 9 for Y tiled. The CPU's interleave is independent, and
108 * bit == 17). 107 * can be based on either bit 11 (haven't seen this yet) or
108 * bit 17 (common).
109 */ 109 */
110 dcc = I915_READ(DCC); 110 dcc = I915_READ(DCC);
111 switch (dcc & DCC_ADDRESSING_MODE_MASK) { 111 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
115 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 115 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
116 break; 116 break;
117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 117 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
118 if (IS_I915G(dev) || IS_I915GM(dev) || 118 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
119 dcc & DCC_CHANNEL_XOR_DISABLE) { 119 /* This is the base swizzling by the GPU for
120 * tiled buffers.
121 */
120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 122 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
121 swizzle_y = I915_BIT_6_SWIZZLE_9; 123 swizzle_y = I915_BIT_6_SWIZZLE_9;
122 } else if ((IS_I965GM(dev) || IS_GM45(dev)) && 124 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
123 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { 125 /* Bit 11 swizzling by the CPU in addition. */
124 /* GM965/GM45 does either bit 11 or bit 17
125 * swizzling.
126 */
127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
128 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 127 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
129 } else { 128 } else {
130 /* Bit 17 or perhaps other swizzling */ 129 /* Bit 17 swizzling by the CPU in addition. */
131 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
132 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
133 } 132 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 90600d899413..377cc588f5e9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -359,6 +359,7 @@
359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 359#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 360#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 361#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
362#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
362 363
363#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 364#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
364#define I915_CRC_ERROR_ENABLE (1UL<<29) 365#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -435,6 +436,7 @@
435 */ 436 */
436#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 437#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
437#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 438#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
439#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
438/* i830, required in DVO non-gang */ 440/* i830, required in DVO non-gang */
439#define PLL_P2_DIVIDE_BY_4 (1 << 23) 441#define PLL_P2_DIVIDE_BY_4 (1 << 23)
440#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 442#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -501,10 +503,12 @@
501#define FPB0 0x06048 503#define FPB0 0x06048
502#define FPB1 0x0604c 504#define FPB1 0x0604c
503#define FP_N_DIV_MASK 0x003f0000 505#define FP_N_DIV_MASK 0x003f0000
506#define FP_N_IGD_DIV_MASK 0x00ff0000
504#define FP_N_DIV_SHIFT 16 507#define FP_N_DIV_SHIFT 16
505#define FP_M1_DIV_MASK 0x00003f00 508#define FP_M1_DIV_MASK 0x00003f00
506#define FP_M1_DIV_SHIFT 8 509#define FP_M1_DIV_SHIFT 8
507#define FP_M2_DIV_MASK 0x0000003f 510#define FP_M2_DIV_MASK 0x0000003f
511#define FP_M2_IGD_DIV_MASK 0x000000ff
508#define FP_M2_DIV_SHIFT 0 512#define FP_M2_DIV_SHIFT 0
509#define DPLL_TEST 0x606c 513#define DPLL_TEST 0x606c
510#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 514#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -629,6 +633,22 @@
629#define TV_HOTPLUG_INT_EN (1 << 18) 633#define TV_HOTPLUG_INT_EN (1 << 18)
630#define CRT_HOTPLUG_INT_EN (1 << 9) 634#define CRT_HOTPLUG_INT_EN (1 << 9)
631#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 635#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
636#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
637/* must use period 64 on GM45 according to docs */
638#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
639#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
640#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
641#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
642#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
643#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
644#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
645#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
646#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
647#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
648#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
649#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
650#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
651
632 652
633#define PORT_HOTPLUG_STAT 0x61114 653#define PORT_HOTPLUG_STAT 0x61114
634#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 654#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -856,7 +876,7 @@
856 */ 876 */
857# define TV_ENC_C0_FIX (1 << 10) 877# define TV_ENC_C0_FIX (1 << 10)
858/** Bits that must be preserved by software */ 878/** Bits that must be preserved by software */
859# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) 879# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
860# define TV_FUSE_STATE_MASK (3 << 4) 880# define TV_FUSE_STATE_MASK (3 << 4)
861/** Read-only state that reports all features enabled */ 881/** Read-only state that reports all features enabled */
862# define TV_FUSE_STATE_ENABLED (0 << 4) 882# define TV_FUSE_STATE_ENABLED (0 << 4)
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 5ea715ace3a0..de621aad85b5 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -162,13 +162,13 @@ struct bdb_lvds_options {
162 u8 panel_type; 162 u8 panel_type;
163 u8 rsvd1; 163 u8 rsvd1;
164 /* LVDS capabilities, stored in a dword */ 164 /* LVDS capabilities, stored in a dword */
165 u8 rsvd2:1;
166 u8 lvds_edid:1;
167 u8 pixel_dither:1;
168 u8 pfit_ratio_auto:1;
169 u8 pfit_gfx_mode_enhanced:1;
170 u8 pfit_text_mode_enhanced:1;
171 u8 pfit_mode:2; 165 u8 pfit_mode:2;
166 u8 pfit_text_mode_enhanced:1;
167 u8 pfit_gfx_mode_enhanced:1;
168 u8 pfit_ratio_auto:1;
169 u8 pixel_dither:1;
170 u8 lvds_edid:1;
171 u8 rsvd2:1;
172 u8 rsvd4; 172 u8 rsvd4;
173} __attribute__((packed)); 173} __attribute__((packed));
174 174
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dcaed3466e83..2b6d44381c31 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
64static int intel_crt_mode_valid(struct drm_connector *connector, 64static int intel_crt_mode_valid(struct drm_connector *connector,
65 struct drm_display_mode *mode) 65 struct drm_display_mode *mode)
66{ 66{
67 struct drm_device *dev = connector->dev;
68
69 int max_clock = 0;
67 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 70 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
68 return MODE_NO_DBLESCAN; 71 return MODE_NO_DBLESCAN;
69 72
70 if (mode->clock > 400000 || mode->clock < 25000) 73 if (mode->clock < 25000)
71 return MODE_CLOCK_RANGE; 74 return MODE_CLOCK_LOW;
75
76 if (!IS_I9XX(dev))
77 max_clock = 350000;
78 else
79 max_clock = 400000;
80 if (mode->clock > max_clock)
81 return MODE_CLOCK_HIGH;
72 82
73 return MODE_OK; 83 return MODE_OK;
74} 84}
@@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 123 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
114 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 124 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
115 125
116 if (intel_crtc->pipe == 0) 126 if (intel_crtc->pipe == 0) {
117 adpa |= ADPA_PIPE_A_SELECT; 127 adpa |= ADPA_PIPE_A_SELECT;
118 else 128 I915_WRITE(BCLRPAT_A, 0);
129 } else {
119 adpa |= ADPA_PIPE_B_SELECT; 130 adpa |= ADPA_PIPE_B_SELECT;
131 I915_WRITE(BCLRPAT_B, 0);
132 }
120 133
121 I915_WRITE(ADPA, adpa); 134 I915_WRITE(ADPA, adpa);
122} 135}
@@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
133{ 146{
134 struct drm_device *dev = connector->dev; 147 struct drm_device *dev = connector->dev;
135 struct drm_i915_private *dev_priv = dev->dev_private; 148 struct drm_i915_private *dev_priv = dev->dev_private;
136 u32 temp; 149 u32 hotplug_en;
137 150 int i, tries = 0;
138 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 151 /*
139 152 * On 4 series desktop, CRT detect sequence need to be done twice
140 temp = I915_READ(PORT_HOTPLUG_EN); 153 * to get a reliable result.
141 154 */
142 I915_WRITE(PORT_HOTPLUG_EN,
143 temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
144 155
145 do { 156 if (IS_G4X(dev) && !IS_GM45(dev))
146 if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) 157 tries = 2;
147 break; 158 else
148 msleep(1); 159 tries = 1;
149 } while (time_after(timeout, jiffies)); 160 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
161 hotplug_en &= ~(CRT_HOTPLUG_MASK);
162 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
163
164 if (IS_GM45(dev))
165 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
166
167 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
168
169 for (i = 0; i < tries ; i++) {
170 unsigned long timeout;
171 /* turn on the FORCE_DETECT */
172 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
173 timeout = jiffies + msecs_to_jiffies(1000);
174 /* wait for FORCE_DETECT to go off */
175 do {
176 if (!(I915_READ(PORT_HOTPLUG_EN) &
177 CRT_HOTPLUG_FORCE_DETECT))
178 break;
179 msleep(1);
180 } while (time_after(timeout, jiffies));
181 }
150 182
151 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == 183 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
152 CRT_HOTPLUG_MONITOR_COLOR) 184 CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a2834276cb38..d9c50ff94d76 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -56,11 +56,13 @@ typedef struct {
56} intel_p2_t; 56} intel_p2_t;
57 57
58#define INTEL_P2_NUM 2 58#define INTEL_P2_NUM 2
59 59typedef struct intel_limit intel_limit_t;
60typedef struct { 60struct intel_limit {
61 intel_range_t dot, vco, n, m, m1, m2, p, p1; 61 intel_range_t dot, vco, n, m, m1, m2, p, p1;
62 intel_p2_t p2; 62 intel_p2_t p2;
63} intel_limit_t; 63 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
64 int, int, intel_clock_t *);
65};
64 66
65#define I8XX_DOT_MIN 25000 67#define I8XX_DOT_MIN 25000
66#define I8XX_DOT_MAX 350000 68#define I8XX_DOT_MAX 350000
@@ -90,18 +92,32 @@ typedef struct {
90#define I9XX_DOT_MAX 400000 92#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000 93#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000 94#define I9XX_VCO_MAX 2800000
95#define IGD_VCO_MIN 1700000
96#define IGD_VCO_MAX 3500000
93#define I9XX_N_MIN 1 97#define I9XX_N_MIN 1
94#define I9XX_N_MAX 6 98#define I9XX_N_MAX 6
99/* IGD's Ncounter is a ring counter */
100#define IGD_N_MIN 3
101#define IGD_N_MAX 6
95#define I9XX_M_MIN 70 102#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120 103#define I9XX_M_MAX 120
104#define IGD_M_MIN 2
105#define IGD_M_MAX 256
97#define I9XX_M1_MIN 10 106#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 22 107#define I9XX_M1_MAX 22
99#define I9XX_M2_MIN 5 108#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9 109#define I9XX_M2_MAX 9
110/* IGD M1 is reserved, and must be 0 */
111#define IGD_M1_MIN 0
112#define IGD_M1_MAX 0
113#define IGD_M2_MIN 0
114#define IGD_M2_MAX 254
101#define I9XX_P_SDVO_DAC_MIN 5 115#define I9XX_P_SDVO_DAC_MIN 5
102#define I9XX_P_SDVO_DAC_MAX 80 116#define I9XX_P_SDVO_DAC_MAX 80
103#define I9XX_P_LVDS_MIN 7 117#define I9XX_P_LVDS_MIN 7
104#define I9XX_P_LVDS_MAX 98 118#define I9XX_P_LVDS_MAX 98
119#define IGD_P_LVDS_MIN 7
120#define IGD_P_LVDS_MAX 112
105#define I9XX_P1_MIN 1 121#define I9XX_P1_MIN 1
106#define I9XX_P1_MAX 8 122#define I9XX_P1_MAX 8
107#define I9XX_P2_SDVO_DAC_SLOW 10 123#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -115,6 +131,97 @@ typedef struct {
115#define INTEL_LIMIT_I8XX_LVDS 1 131#define INTEL_LIMIT_I8XX_LVDS 1
116#define INTEL_LIMIT_I9XX_SDVO_DAC 2 132#define INTEL_LIMIT_I9XX_SDVO_DAC 2
117#define INTEL_LIMIT_I9XX_LVDS 3 133#define INTEL_LIMIT_I9XX_LVDS 3
134#define INTEL_LIMIT_G4X_SDVO 4
135#define INTEL_LIMIT_G4X_HDMI_DAC 5
136#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9
140
141/*The parameter is for SDVO on G4x platform*/
142#define G4X_DOT_SDVO_MIN 25000
143#define G4X_DOT_SDVO_MAX 270000
144#define G4X_VCO_MIN 1750000
145#define G4X_VCO_MAX 3500000
146#define G4X_N_SDVO_MIN 1
147#define G4X_N_SDVO_MAX 4
148#define G4X_M_SDVO_MIN 104
149#define G4X_M_SDVO_MAX 138
150#define G4X_M1_SDVO_MIN 17
151#define G4X_M1_SDVO_MAX 23
152#define G4X_M2_SDVO_MIN 5
153#define G4X_M2_SDVO_MAX 11
154#define G4X_P_SDVO_MIN 10
155#define G4X_P_SDVO_MAX 30
156#define G4X_P1_SDVO_MIN 1
157#define G4X_P1_SDVO_MAX 3
158#define G4X_P2_SDVO_SLOW 10
159#define G4X_P2_SDVO_FAST 10
160#define G4X_P2_SDVO_LIMIT 270000
161
162/*The parameter is for HDMI_DAC on G4x platform*/
163#define G4X_DOT_HDMI_DAC_MIN 22000
164#define G4X_DOT_HDMI_DAC_MAX 400000
165#define G4X_N_HDMI_DAC_MIN 1
166#define G4X_N_HDMI_DAC_MAX 4
167#define G4X_M_HDMI_DAC_MIN 104
168#define G4X_M_HDMI_DAC_MAX 138
169#define G4X_M1_HDMI_DAC_MIN 16
170#define G4X_M1_HDMI_DAC_MAX 23
171#define G4X_M2_HDMI_DAC_MIN 5
172#define G4X_M2_HDMI_DAC_MAX 11
173#define G4X_P_HDMI_DAC_MIN 5
174#define G4X_P_HDMI_DAC_MAX 80
175#define G4X_P1_HDMI_DAC_MIN 1
176#define G4X_P1_HDMI_DAC_MAX 8
177#define G4X_P2_HDMI_DAC_SLOW 10
178#define G4X_P2_HDMI_DAC_FAST 5
179#define G4X_P2_HDMI_DAC_LIMIT 165000
180
181/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
182#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
183#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
184#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
185#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
186#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
187#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
188#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
189#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
190#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
191#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
192#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
193#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
194#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
195#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
196#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
197#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
198#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
199
200/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
201#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
202#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
203#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
204#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
205#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
206#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
207#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
208#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
209#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
210#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
211#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
212#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
213#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
214#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
215#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
216#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
218
219static bool
220intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
221 int target, int refclk, intel_clock_t *best_clock);
222static bool
223intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
224 int target, int refclk, intel_clock_t *best_clock);
118 225
119static const intel_limit_t intel_limits[] = { 226static const intel_limit_t intel_limits[] = {
120 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 227 { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
128 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX }, 235 .p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
129 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 236 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
130 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, 237 .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
238 .find_pll = intel_find_best_PLL,
131 }, 239 },
132 { /* INTEL_LIMIT_I8XX_LVDS */ 240 { /* INTEL_LIMIT_I8XX_LVDS */
133 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 241 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
140 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX }, 248 .p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
141 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, 249 .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
142 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, 250 .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
251 .find_pll = intel_find_best_PLL,
143 }, 252 },
144 { /* INTEL_LIMIT_I9XX_SDVO_DAC */ 253 { /* INTEL_LIMIT_I9XX_SDVO_DAC */
145 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 254 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
152 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 261 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
153 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 262 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
154 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 263 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
264 .find_pll = intel_find_best_PLL,
155 }, 265 },
156 { /* INTEL_LIMIT_I9XX_LVDS */ 266 { /* INTEL_LIMIT_I9XX_LVDS */
157 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 267 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
167 */ 277 */
168 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 278 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
169 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, 279 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
280 .find_pll = intel_find_best_PLL,
281 },
282 /* below parameter and function is for G4X Chipset Family*/
283 { /* INTEL_LIMIT_G4X_SDVO */
284 .dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
285 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
286 .n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
287 .m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
288 .m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
289 .m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
290 .p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
291 .p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
292 .p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
293 .p2_slow = G4X_P2_SDVO_SLOW,
294 .p2_fast = G4X_P2_SDVO_FAST
295 },
296 .find_pll = intel_g4x_find_best_PLL,
297 },
298 { /* INTEL_LIMIT_G4X_HDMI_DAC */
299 .dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
300 .vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
301 .n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
302 .m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
303 .m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
304 .m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
305 .p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
306 .p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
307 .p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
308 .p2_slow = G4X_P2_HDMI_DAC_SLOW,
309 .p2_fast = G4X_P2_HDMI_DAC_FAST
310 },
311 .find_pll = intel_g4x_find_best_PLL,
312 },
313 { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
314 .dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
315 .max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
316 .vco = { .min = G4X_VCO_MIN,
317 .max = G4X_VCO_MAX },
318 .n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
319 .max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
320 .m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
321 .max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
322 .m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
323 .max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
324 .m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
325 .max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
326 .p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
327 .max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
328 .p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
329 .max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
330 .p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
331 .p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
332 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
333 },
334 .find_pll = intel_g4x_find_best_PLL,
335 },
336 { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
337 .dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
338 .max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
339 .vco = { .min = G4X_VCO_MIN,
340 .max = G4X_VCO_MAX },
341 .n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
342 .max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
343 .m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
344 .max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
345 .m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
346 .max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
347 .m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
348 .max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
349 .p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
350 .max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
351 .p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
352 .max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
353 .p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
354 .p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
355 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
356 },
357 .find_pll = intel_g4x_find_best_PLL,
358 },
359 { /* INTEL_LIMIT_IGD_SDVO */
360 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
361 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
362 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
363 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
364 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
365 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
366 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
170 }, 370 },
371 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
373 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
374 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
375 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
376 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
377 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
378 .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
379 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
380 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
383 },
384
171}; 385};
172 386
387static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
388{
389 struct drm_device *dev = crtc->dev;
390 struct drm_i915_private *dev_priv = dev->dev_private;
391 const intel_limit_t *limit;
392
393 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
394 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
395 LVDS_CLKB_POWER_UP)
396 /* LVDS with dual channel */
397 limit = &intel_limits
398 [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
399 else
400 /* LVDS with dual channel */
401 limit = &intel_limits
402 [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
403 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
404 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
405 limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
406 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
407 limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
408 } else /* The option is for other outputs */
409 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
410
411 return limit;
412}
413
173static const intel_limit_t *intel_limit(struct drm_crtc *crtc) 414static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
174{ 415{
175 struct drm_device *dev = crtc->dev; 416 struct drm_device *dev = crtc->dev;
176 const intel_limit_t *limit; 417 const intel_limit_t *limit;
177 418
178 if (IS_I9XX(dev)) { 419 if (IS_G4X(dev)) {
420 limit = intel_g4x_limit(crtc);
421 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
179 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 422 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
180 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS]; 423 limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
181 else 424 else
182 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; 425 limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
426 } else if (IS_IGD(dev)) {
427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
428 limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
429 else
430 limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
183 } else { 431 } else {
184 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 432 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
185 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS]; 433 limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
189 return limit; 437 return limit;
190} 438}
191 439
192static void intel_clock(int refclk, intel_clock_t *clock) 440/* m1 is reserved as 0 in IGD, n is a ring counter */
441static void igd_clock(int refclk, intel_clock_t *clock)
193{ 442{
443 clock->m = clock->m2 + 2;
444 clock->p = clock->p1 * clock->p2;
445 clock->vco = refclk * clock->m / clock->n;
446 clock->dot = clock->vco / clock->p;
447}
448
449static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
450{
451 if (IS_IGD(dev)) {
452 igd_clock(refclk, clock);
453 return;
454 }
194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 455 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
195 clock->p = clock->p1 * clock->p2; 456 clock->p = clock->p1 * clock->p2;
196 clock->vco = refclk * clock->m / (clock->n + 2); 457 clock->vco = refclk * clock->m / (clock->n + 2);
@@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
226static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) 487static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
227{ 488{
228 const intel_limit_t *limit = intel_limit (crtc); 489 const intel_limit_t *limit = intel_limit (crtc);
490 struct drm_device *dev = crtc->dev;
229 491
230 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 492 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
231 INTELPllInvalid ("p1 out of range\n"); 493 INTELPllInvalid ("p1 out of range\n");
@@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
235 INTELPllInvalid ("m2 out of range\n"); 497 INTELPllInvalid ("m2 out of range\n");
236 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 498 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
237 INTELPllInvalid ("m1 out of range\n"); 499 INTELPllInvalid ("m1 out of range\n");
238 if (clock->m1 <= clock->m2) 500 if (clock->m1 <= clock->m2 && !IS_IGD(dev))
239 INTELPllInvalid ("m1 <= m2\n"); 501 INTELPllInvalid ("m1 <= m2\n");
240 if (clock->m < limit->m.min || limit->m.max < clock->m) 502 if (clock->m < limit->m.min || limit->m.max < clock->m)
241 INTELPllInvalid ("m out of range\n"); 503 INTELPllInvalid ("m out of range\n");
@@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
252 return true; 514 return true;
253} 515}
254 516
255/** 517static bool
256 * Returns a set of divisors for the desired target clock with the given 518intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 * refclk, or FALSE. The returned values represent the clock equation: 519 int target, int refclk, intel_clock_t *best_clock)
258 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 520
259 */
260static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
261 int refclk, intel_clock_t *best_clock)
262{ 521{
263 struct drm_device *dev = crtc->dev; 522 struct drm_device *dev = crtc->dev;
264 struct drm_i915_private *dev_priv = dev->dev_private; 523 struct drm_i915_private *dev_priv = dev->dev_private;
265 intel_clock_t clock; 524 intel_clock_t clock;
266 const intel_limit_t *limit = intel_limit(crtc);
267 int err = target; 525 int err = target;
268 526
269 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 527 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
289 memset (best_clock, 0, sizeof (*best_clock)); 547 memset (best_clock, 0, sizeof (*best_clock));
290 548
291 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 549 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
292 for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && 550 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
293 clock.m2 <= limit->m2.max; clock.m2++) { 551 /* m1 is always 0 in IGD */
552 if (clock.m2 >= clock.m1 && !IS_IGD(dev))
553 break;
294 for (clock.n = limit->n.min; clock.n <= limit->n.max; 554 for (clock.n = limit->n.min; clock.n <= limit->n.max;
295 clock.n++) { 555 clock.n++) {
296 for (clock.p1 = limit->p1.min; 556 for (clock.p1 = limit->p1.min;
297 clock.p1 <= limit->p1.max; clock.p1++) { 557 clock.p1 <= limit->p1.max; clock.p1++) {
298 int this_err; 558 int this_err;
299 559
300 intel_clock(refclk, &clock); 560 intel_clock(dev, refclk, &clock);
301 561
302 if (!intel_PLL_is_valid(crtc, &clock)) 562 if (!intel_PLL_is_valid(crtc, &clock))
303 continue; 563 continue;
@@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
315 return (err != target); 575 return (err != target);
316} 576}
317 577
578static bool
579intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
580 int target, int refclk, intel_clock_t *best_clock)
581{
582 struct drm_device *dev = crtc->dev;
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 intel_clock_t clock;
585 int max_n;
586 bool found;
587 /* approximately equals target * 0.00488 */
588 int err_most = (target >> 8) + (target >> 10);
589 found = false;
590
591 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
592 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
593 LVDS_CLKB_POWER_UP)
594 clock.p2 = limit->p2.p2_fast;
595 else
596 clock.p2 = limit->p2.p2_slow;
597 } else {
598 if (target < limit->p2.dot_limit)
599 clock.p2 = limit->p2.p2_slow;
600 else
601 clock.p2 = limit->p2.p2_fast;
602 }
603
604 memset(best_clock, 0, sizeof(*best_clock));
605 max_n = limit->n.max;
606 /* based on hardware requriment prefer smaller n to precision */
607 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
608 /* based on hardware requirment prefere larger m1,m2, p1 */
609 for (clock.m1 = limit->m1.max;
610 clock.m1 >= limit->m1.min; clock.m1--) {
611 for (clock.m2 = limit->m2.max;
612 clock.m2 >= limit->m2.min; clock.m2--) {
613 for (clock.p1 = limit->p1.max;
614 clock.p1 >= limit->p1.min; clock.p1--) {
615 int this_err;
616
617 intel_clock(dev, refclk, &clock);
618 if (!intel_PLL_is_valid(crtc, &clock))
619 continue;
620 this_err = abs(clock.dot - target) ;
621 if (this_err < err_most) {
622 *best_clock = clock;
623 err_most = this_err;
624 max_n = clock.n;
625 found = true;
626 }
627 }
628 }
629 }
630 }
631
632 return found;
633}
634
318void 635void
319intel_wait_for_vblank(struct drm_device *dev) 636intel_wait_for_vblank(struct drm_device *dev)
320{ 637{
@@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
634 return 400000; 951 return 400000;
635 else if (IS_I915G(dev)) 952 else if (IS_I915G(dev))
636 return 333000; 953 return 333000;
637 else if (IS_I945GM(dev) || IS_845G(dev)) 954 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
638 return 200000; 955 return 200000;
639 else if (IS_I915GM(dev)) { 956 else if (IS_I915GM(dev)) {
640 u16 gcfgc = 0; 957 u16 gcfgc = 0;
@@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
733 bool is_crt = false, is_lvds = false, is_tv = false; 1050 bool is_crt = false, is_lvds = false, is_tv = false;
734 struct drm_mode_config *mode_config = &dev->mode_config; 1051 struct drm_mode_config *mode_config = &dev->mode_config;
735 struct drm_connector *connector; 1052 struct drm_connector *connector;
1053 const intel_limit_t *limit;
736 int ret; 1054 int ret;
737 1055
738 drm_vblank_pre_modeset(dev, pipe); 1056 drm_vblank_pre_modeset(dev, pipe);
@@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
776 refclk = 48000; 1094 refclk = 48000;
777 } 1095 }
778 1096
779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 1097 /*
1098 * Returns a set of divisors for the desired target clock with the given
1099 * refclk, or FALSE. The returned values represent the clock equation:
1100 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
1101 */
1102 limit = intel_limit(crtc);
1103 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
780 if (!ok) { 1104 if (!ok) {
781 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 1105 DRM_ERROR("Couldn't find PLL settings for mode!\n");
782 return -EINVAL; 1106 return -EINVAL;
783 } 1107 }
784 1108
785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 1109 if (IS_IGD(dev))
1110 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
1111 else
1112 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
786 1113
787 dpll = DPLL_VGA_MODE_DIS; 1114 dpll = DPLL_VGA_MODE_DIS;
788 if (IS_I9XX(dev)) { 1115 if (IS_I9XX(dev)) {
@@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
799 } 1126 }
800 1127
801 /* compute bitmask from p1 value */ 1128 /* compute bitmask from p1 value */
802 dpll |= (1 << (clock.p1 - 1)) << 16; 1129 if (IS_IGD(dev))
1130 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
1131 else
1132 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
803 switch (clock.p2) { 1133 switch (clock.p2) {
804 case 5: 1134 case 5:
805 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1135 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1279 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 1609 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
1280 1610
1281 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 1611 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
1282 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 1612 if (IS_IGD(dev)) {
1283 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 1613 clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
1614 clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
1615 } else {
1616 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
1617 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
1618 }
1619
1284 if (IS_I9XX(dev)) { 1620 if (IS_I9XX(dev)) {
1285 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 1621 if (IS_IGD(dev))
1622 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
1623 DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
1624 else
1625 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
1286 DPLL_FPA01_P1_POST_DIV_SHIFT); 1626 DPLL_FPA01_P1_POST_DIV_SHIFT);
1287 1627
1288 switch (dpll & DPLL_MODE_MASK) { 1628 switch (dpll & DPLL_MODE_MASK) {
@@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1301 } 1641 }
1302 1642
1303 /* XXX: Handle the 100Mhz refclk */ 1643 /* XXX: Handle the 100Mhz refclk */
1304 intel_clock(96000, &clock); 1644 intel_clock(dev, 96000, &clock);
1305 } else { 1645 } else {
1306 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1646 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1307 1647
@@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1313 if ((dpll & PLL_REF_INPUT_MASK) == 1653 if ((dpll & PLL_REF_INPUT_MASK) ==
1314 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1654 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1315 /* XXX: might not be 66MHz */ 1655 /* XXX: might not be 66MHz */
1316 intel_clock(66000, &clock); 1656 intel_clock(dev, 66000, &clock);
1317 } else 1657 } else
1318 intel_clock(48000, &clock); 1658 intel_clock(dev, 48000, &clock);
1319 } else { 1659 } else {
1320 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1660 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1321 clock.p1 = 2; 1661 clock.p1 = 2;
@@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1328 else 1668 else
1329 clock.p2 = 2; 1669 clock.p2 = 2;
1330 1670
1331 intel_clock(48000, &clock); 1671 intel_clock(dev, 48000, &clock);
1332 } 1672 }
1333 } 1673 }
1334 1674
@@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
1474 1814
1475 if (IS_I9XX(dev)) { 1815 if (IS_I9XX(dev)) {
1476 int found; 1816 int found;
1817 u32 reg;
1477 1818
1478 if (I915_READ(SDVOB) & SDVO_DETECTED) { 1819 if (I915_READ(SDVOB) & SDVO_DETECTED) {
1479 found = intel_sdvo_init(dev, SDVOB); 1820 found = intel_sdvo_init(dev, SDVOB);
1480 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1821 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1481 intel_hdmi_init(dev, SDVOB); 1822 intel_hdmi_init(dev, SDVOB);
1482 } 1823 }
1483 if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { 1824
1825 /* Before G4X SDVOC doesn't have its own detect register */
1826 if (IS_G4X(dev))
1827 reg = SDVOC;
1828 else
1829 reg = SDVOB;
1830
1831 if (I915_READ(reg) & SDVO_DETECTED) {
1484 found = intel_sdvo_init(dev, SDVOC); 1832 found = intel_sdvo_init(dev, SDVOC);
1485 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1833 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1486 intel_hdmi_init(dev, SDVOC); 1834 intel_hdmi_init(dev, SDVOC);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0d211af98854..6619f26e46a5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
265 pfit_control = 0; 265 pfit_control = 0;
266 266
267 if (!IS_I965G(dev)) { 267 if (!IS_I965G(dev)) {
268 if (dev_priv->panel_wants_dither) 268 if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
269 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 269 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
270 } 270 }
271 else 271 else
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 56485d67369b..ceca9471a75a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -217,8 +217,8 @@ static const u32 filter_table[] = {
217 */ 217 */
218static const struct color_conversion ntsc_m_csc_composite = { 218static const struct color_conversion ntsc_m_csc_composite = {
219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 219 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 220 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 221 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
222}; 222};
223 223
224static const struct video_levels ntsc_m_levels_composite = { 224static const struct video_levels ntsc_m_levels_composite = {
@@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
226}; 226};
227 227
228static const struct color_conversion ntsc_m_csc_svideo = { 228static const struct color_conversion ntsc_m_csc_svideo = {
229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 229 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 230 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 231 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
232}; 232};
233 233
234static const struct video_levels ntsc_m_levels_svideo = { 234static const struct video_levels ntsc_m_levels_svideo = {
@@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
237 237
238static const struct color_conversion ntsc_j_csc_composite = { 238static const struct color_conversion ntsc_j_csc_composite = {
239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119, 239 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00, 240 .ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00, 241 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
242}; 242};
243 243
244static const struct video_levels ntsc_j_levels_composite = { 244static const struct video_levels ntsc_j_levels_composite = {
@@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
247 247
248static const struct color_conversion ntsc_j_csc_svideo = { 248static const struct color_conversion ntsc_j_csc_svideo = {
249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c, 249 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00, 250 .ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00, 251 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
252}; 252};
253 253
254static const struct video_levels ntsc_j_levels_svideo = { 254static const struct video_levels ntsc_j_levels_svideo = {
@@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
257 257
258static const struct color_conversion pal_csc_composite = { 258static const struct color_conversion pal_csc_composite = {
259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113, 259 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00, 260 .ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00, 261 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
262}; 262};
263 263
264static const struct video_levels pal_levels_composite = { 264static const struct video_levels pal_levels_composite = {
@@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
267 267
268static const struct color_conversion pal_csc_svideo = { 268static const struct color_conversion pal_csc_svideo = {
269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145, 269 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00, 270 .ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00, 271 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
272}; 272};
273 273
274static const struct video_levels pal_levels_svideo = { 274static const struct video_levels pal_levels_svideo = {
@@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
277 277
278static const struct color_conversion pal_m_csc_composite = { 278static const struct color_conversion pal_m_csc_composite = {
279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 279 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 280 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 281 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
282}; 282};
283 283
284static const struct video_levels pal_m_levels_composite = { 284static const struct video_levels pal_m_levels_composite = {
@@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
286}; 286};
287 287
288static const struct color_conversion pal_m_csc_svideo = { 288static const struct color_conversion pal_m_csc_svideo = {
289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 289 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 290 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 291 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
292}; 292};
293 293
294static const struct video_levels pal_m_levels_svideo = { 294static const struct video_levels pal_m_levels_svideo = {
@@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
297 297
298static const struct color_conversion pal_n_csc_composite = { 298static const struct color_conversion pal_n_csc_composite = {
299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104, 299 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00, 300 .ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00, 301 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
302}; 302};
303 303
304static const struct video_levels pal_n_levels_composite = { 304static const struct video_levels pal_n_levels_composite = {
@@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
306}; 306};
307 307
308static const struct color_conversion pal_n_csc_svideo = { 308static const struct color_conversion pal_n_csc_svideo = {
309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134, 309 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00, 310 .ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00, 311 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
312}; 312};
313 313
314static const struct video_levels pal_n_levels_svideo = { 314static const struct video_levels pal_n_levels_svideo = {
@@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
319 * Component connections 319 * Component connections
320 */ 320 */
321static const struct color_conversion sdtv_csc_yprpb = { 321static const struct color_conversion sdtv_csc_yprpb = {
322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146, 322 .ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00, 323 .ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00, 324 .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
325}; 325};
326 326
327static const struct color_conversion sdtv_csc_rgb = { 327static const struct color_conversion sdtv_csc_rgb = {
@@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
331}; 331};
332 332
333static const struct color_conversion hdtv_csc_yprpb = { 333static const struct color_conversion hdtv_csc_yprpb = {
334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146, 334 .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00, 335 .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00, 336 .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
337}; 337};
338 338
339static const struct color_conversion hdtv_csc_rgb = { 339static const struct color_conversion hdtv_csc_rgb = {
@@ -414,7 +414,7 @@ struct tv_mode {
414static const struct tv_mode tv_modes[] = { 414static const struct tv_mode tv_modes[] = {
415 { 415 {
416 .name = "NTSC-M", 416 .name = "NTSC-M",
417 .clock = 107520, 417 .clock = 108000,
418 .refresh = 29970, 418 .refresh = 29970,
419 .oversample = TV_OVERSAMPLE_8X, 419 .oversample = TV_OVERSAMPLE_8X,
420 .component_only = 0, 420 .component_only = 0,
@@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
442 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 442 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
443 443
444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 444 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
445 .dda1_inc = 136, 445 .dda1_inc = 135,
446 .dda2_inc = 7624, .dda2_size = 20013, 446 .dda2_inc = 20800, .dda2_size = 27456,
447 .dda3_inc = 0, .dda3_size = 0, 447 .dda3_inc = 0, .dda3_size = 0,
448 .sc_reset = TV_SC_RESET_EVERY_4, 448 .sc_reset = TV_SC_RESET_EVERY_4,
449 .pal_burst = false, 449 .pal_burst = false,
@@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
457 }, 457 },
458 { 458 {
459 .name = "NTSC-443", 459 .name = "NTSC-443",
460 .clock = 107520, 460 .clock = 108000,
461 .refresh = 29970, 461 .refresh = 29970,
462 .oversample = TV_OVERSAMPLE_8X, 462 .oversample = TV_OVERSAMPLE_8X,
463 .component_only = 0, 463 .component_only = 0,
@@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
485 485
486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 486 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
487 .dda1_inc = 168, 487 .dda1_inc = 168,
488 .dda2_inc = 18557, .dda2_size = 20625, 488 .dda2_inc = 4093, .dda2_size = 27456,
489 .dda3_inc = 0, .dda3_size = 0, 489 .dda3_inc = 310, .dda3_size = 525,
490 .sc_reset = TV_SC_RESET_EVERY_8, 490 .sc_reset = TV_SC_RESET_NEVER,
491 .pal_burst = true, 491 .pal_burst = false,
492 492
493 .composite_levels = &ntsc_m_levels_composite, 493 .composite_levels = &ntsc_m_levels_composite,
494 .composite_color = &ntsc_m_csc_composite, 494 .composite_color = &ntsc_m_csc_composite,
@@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
499 }, 499 },
500 { 500 {
501 .name = "NTSC-J", 501 .name = "NTSC-J",
502 .clock = 107520, 502 .clock = 108000,
503 .refresh = 29970, 503 .refresh = 29970,
504 .oversample = TV_OVERSAMPLE_8X, 504 .oversample = TV_OVERSAMPLE_8X,
505 .component_only = 0, 505 .component_only = 0,
@@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
527 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 527 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
528 528
529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 529 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
530 .dda1_inc = 136, 530 .dda1_inc = 135,
531 .dda2_inc = 7624, .dda2_size = 20013, 531 .dda2_inc = 20800, .dda2_size = 27456,
532 .dda3_inc = 0, .dda3_size = 0, 532 .dda3_inc = 0, .dda3_size = 0,
533 .sc_reset = TV_SC_RESET_EVERY_4, 533 .sc_reset = TV_SC_RESET_EVERY_4,
534 .pal_burst = false, 534 .pal_burst = false,
@@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
542 }, 542 },
543 { 543 {
544 .name = "PAL-M", 544 .name = "PAL-M",
545 .clock = 107520, 545 .clock = 108000,
546 .refresh = 29970, 546 .refresh = 29970,
547 .oversample = TV_OVERSAMPLE_8X, 547 .oversample = TV_OVERSAMPLE_8X,
548 .component_only = 0, 548 .component_only = 0,
@@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
570 .vburst_start_f4 = 10, .vburst_end_f4 = 240, 570 .vburst_start_f4 = 10, .vburst_end_f4 = 240,
571 571
572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */ 572 /* desired 3.5800000 actual 3.5800000 clock 107.52 */
573 .dda1_inc = 136, 573 .dda1_inc = 135,
574 .dda2_inc = 7624, .dda2_size = 20013, 574 .dda2_inc = 16704, .dda2_size = 27456,
575 .dda3_inc = 0, .dda3_size = 0, 575 .dda3_inc = 0, .dda3_size = 0,
576 .sc_reset = TV_SC_RESET_EVERY_4, 576 .sc_reset = TV_SC_RESET_EVERY_8,
577 .pal_burst = false, 577 .pal_burst = true,
578 578
579 .composite_levels = &pal_m_levels_composite, 579 .composite_levels = &pal_m_levels_composite,
580 .composite_color = &pal_m_csc_composite, 580 .composite_color = &pal_m_csc_composite,
@@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
586 { 586 {
587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 587 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
588 .name = "PAL-N", 588 .name = "PAL-N",
589 .clock = 107520, 589 .clock = 108000,
590 .refresh = 25000, 590 .refresh = 25000,
591 .oversample = TV_OVERSAMPLE_8X, 591 .oversample = TV_OVERSAMPLE_8X,
592 .component_only = 0, 592 .component_only = 0,
@@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
615 615
616 616
617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 617 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
618 .dda1_inc = 168, 618 .dda1_inc = 135,
619 .dda2_inc = 18557, .dda2_size = 20625, 619 .dda2_inc = 23578, .dda2_size = 27648,
620 .dda3_inc = 0, .dda3_size = 0, 620 .dda3_inc = 134, .dda3_size = 625,
621 .sc_reset = TV_SC_RESET_EVERY_8, 621 .sc_reset = TV_SC_RESET_EVERY_8,
622 .pal_burst = true, 622 .pal_burst = true,
623 623
@@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
631 { 631 {
632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */ 632 /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
633 .name = "PAL", 633 .name = "PAL",
634 .clock = 107520, 634 .clock = 108000,
635 .refresh = 25000, 635 .refresh = 25000,
636 .oversample = TV_OVERSAMPLE_8X, 636 .oversample = TV_OVERSAMPLE_8X,
637 .component_only = 0, 637 .component_only = 0,
638 638
639 .hsync_end = 64, .hblank_end = 128, 639 .hsync_end = 64, .hblank_end = 142,
640 .hblank_start = 844, .htotal = 863, 640 .hblank_start = 844, .htotal = 863,
641 641
642 .progressive = false, .trilevel_sync = false, 642 .progressive = false, .trilevel_sync = false,
@@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
659 659
660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */ 660 /* desired 4.4336180 actual 4.4336180 clock 107.52 */
661 .dda1_inc = 168, 661 .dda1_inc = 168,
662 .dda2_inc = 18557, .dda2_size = 20625, 662 .dda2_inc = 4122, .dda2_size = 27648,
663 .dda3_inc = 0, .dda3_size = 0, 663 .dda3_inc = 67, .dda3_size = 625,
664 .sc_reset = TV_SC_RESET_EVERY_8, 664 .sc_reset = TV_SC_RESET_EVERY_8,
665 .pal_burst = true, 665 .pal_burst = true,
666 666
@@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
689 .veq_ena = false, 689 .veq_ena = false,
690 690
691 .vi_end_f1 = 44, .vi_end_f2 = 44, 691 .vi_end_f1 = 44, .vi_end_f2 = 44,
692 .nbr_end = 496, 692 .nbr_end = 479,
693 693
694 .burst_ena = false, 694 .burst_ena = false,
695 695
@@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
713 .veq_ena = false, 713 .veq_ena = false,
714 714
715 .vi_end_f1 = 44, .vi_end_f2 = 44, 715 .vi_end_f1 = 44, .vi_end_f2 = 44,
716 .nbr_end = 496, 716 .nbr_end = 479,
717 717
718 .burst_ena = false, 718 .burst_ena = false,
719 719
@@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
876 .component_only = 1, 876 .component_only = 1,
877 877
878 .hsync_end = 88, .hblank_end = 235, 878 .hsync_end = 88, .hblank_end = 235,
879 .hblank_start = 2155, .htotal = 2200, 879 .hblank_start = 2155, .htotal = 2201,
880 880
881 .progressive = false, .trilevel_sync = true, 881 .progressive = false, .trilevel_sync = true,
882 882
@@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
1086 return MODE_OK; 1086 return MODE_OK;
1087 return MODE_CLOCK_RANGE; 1087 return MODE_CLOCK_RANGE;
1088} 1088}
@@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1135 if (!tv_mode) 1135 if (!tv_mode)
1136 return; /* can't happen (mode_prepare prevents this) */ 1136 return; /* can't happen (mode_prepare prevents this) */
1137 1137
1138 tv_ctl = 0; 1138 tv_ctl = I915_READ(TV_CTL);
1139 tv_ctl &= TV_CTL_SAVE;
1139 1140
1140 switch (tv_priv->type) { 1141 switch (tv_priv->type) {
1141 default: 1142 default:
@@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1215 /* dda1 implies valid video levels */ 1216 /* dda1 implies valid video levels */
1216 if (tv_mode->dda1_inc) { 1217 if (tv_mode->dda1_inc) {
1217 scctl1 |= TV_SC_DDA1_EN; 1218 scctl1 |= TV_SC_DDA1_EN;
1218 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1219 } 1219 }
1220 1220
1221 if (tv_mode->dda2_inc) 1221 if (tv_mode->dda2_inc)
@@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1225 scctl1 |= TV_SC_DDA3_EN; 1225 scctl1 |= TV_SC_DDA3_EN;
1226 1226
1227 scctl1 |= tv_mode->sc_reset; 1227 scctl1 |= tv_mode->sc_reset;
1228 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1228 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; 1229 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1229 1230
1230 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | 1231 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1266 color_conversion->av); 1267 color_conversion->av);
1267 } 1268 }
1268 1269
1269 I915_WRITE(TV_CLR_KNOBS, 0x00606000); 1270 if (IS_I965G(dev))
1271 I915_WRITE(TV_CLR_KNOBS, 0x00404000);
1272 else
1273 I915_WRITE(TV_CLR_KNOBS, 0x00606000);
1274
1270 if (video_levels) 1275 if (video_levels)
1271 I915_WRITE(TV_CLR_LEVEL, 1276 I915_WRITE(TV_CLR_LEVEL,
1272 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | 1277 ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1401 tv_dac = I915_READ(TV_DAC); 1406 tv_dac = I915_READ(TV_DAC);
1402 I915_WRITE(TV_DAC, save_tv_dac); 1407 I915_WRITE(TV_DAC, save_tv_dac);
1403 I915_WRITE(TV_CTL, save_tv_ctl); 1408 I915_WRITE(TV_CTL, save_tv_ctl);
1409 intel_wait_for_vblank(dev);
1404 } 1410 }
1405 /* 1411 /*
1406 * A B C 1412 * A B C
@@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
1451 mode = reported_modes[0]; 1457 mode = reported_modes[0];
1452 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1458 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1453 1459
1454 if (encoder->crtc) { 1460 if (encoder->crtc && encoder->crtc->enabled) {
1455 type = intel_tv_detect_type(encoder->crtc, intel_output); 1461 type = intel_tv_detect_type(encoder->crtc, intel_output);
1456 } else { 1462 } else {
1457 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1463 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
1462 type = -1; 1468 type = -1;
1463 } 1469 }
1464 1470
1471 tv_priv->type = type;
1472
1465 if (type < 0) 1473 if (type < 0)
1466 return connector_status_disconnected; 1474 return connector_status_disconnected;
1467 1475
@@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
1495 struct drm_display_mode *mode_ptr; 1503 struct drm_display_mode *mode_ptr;
1496 struct intel_output *intel_output = to_intel_output(connector); 1504 struct intel_output *intel_output = to_intel_output(connector);
1497 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1505 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
1498 int j; 1506 int j, count = 0;
1507 u64 tmp;
1499 1508
1500 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]); 1509 for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
1501 j++) { 1510 j++) {
@@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
1510 && !tv_mode->component_only)) 1519 && !tv_mode->component_only))
1511 continue; 1520 continue;
1512 1521
1513 mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode), 1522 mode_ptr = drm_mode_create(connector->dev);
1514 DRM_MEM_DRIVER); 1523 if (!mode_ptr)
1524 continue;
1515 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); 1525 strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
1516 1526
1517 mode_ptr->hdisplay = hactive_s; 1527 mode_ptr->hdisplay = hactive_s;
@@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
1528 mode_ptr->vsync_end = mode_ptr->vsync_start + 1; 1538 mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
1529 mode_ptr->vtotal = vactive_s + 33; 1539 mode_ptr->vtotal = vactive_s + 33;
1530 1540
1531 mode_ptr->clock = (int) (tv_mode->refresh * 1541 tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
1532 mode_ptr->vtotal * 1542 tmp *= mode_ptr->htotal;
1533 mode_ptr->htotal / 1000) / 1000; 1543 tmp = div_u64(tmp, 1000000);
1544 mode_ptr->clock = (int) tmp;
1534 1545
1535 mode_ptr->type = DRM_MODE_TYPE_DRIVER; 1546 mode_ptr->type = DRM_MODE_TYPE_DRIVER;
1536 drm_mode_probed_add(connector, mode_ptr); 1547 drm_mode_probed_add(connector, mode_ptr);
1548 count++;
1537 } 1549 }
1538 1550
1539 return 0; 1551 return count;
1540} 1552}
1541 1553
1542static void 1554static void
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c
index 31400c8ae051..d696f69ebce5 100644
--- a/drivers/ieee1394/csr.c
+++ b/drivers/ieee1394/csr.c
@@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = {
68 .host_reset = host_reset, 68 .host_reset = host_reset,
69}; 69};
70 70
71const static struct hpsb_address_ops map_ops = { 71static const struct hpsb_address_ops map_ops = {
72 .read = read_maps, 72 .read = read_maps,
73}; 73};
74 74
75const static struct hpsb_address_ops fcp_ops = { 75static const struct hpsb_address_ops fcp_ops = {
76 .write = write_fcp, 76 .write = write_fcp,
77}; 77};
78 78
79const static struct hpsb_address_ops reg_ops = { 79static const struct hpsb_address_ops reg_ops = {
80 .read = read_regs, 80 .read = read_regs,
81 .write = write_regs, 81 .write = write_regs,
82 .lock = lock_regs, 82 .lock = lock_regs,
83 .lock64 = lock64_regs, 83 .lock64 = lock64_regs,
84}; 84};
85 85
86const static struct hpsb_address_ops config_rom_ops = { 86static const struct hpsb_address_ops config_rom_ops = {
87 .read = read_config_rom, 87 .read = read_config_rom,
88}; 88};
89 89
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cb15bfa38d70..823a6297a1af 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops=
2171 * Export information about protocols/devices supported by this driver. 2171 * Export information about protocols/devices supported by this driver.
2172 */ 2172 */
2173#ifdef MODULE 2173#ifdef MODULE
2174static struct ieee1394_device_id dv1394_id_table[] = { 2174static const struct ieee1394_device_id dv1394_id_table[] = {
2175 { 2175 {
2176 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2176 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2177 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, 2177 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 1a919df809f8..4ca103577c0a 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host);
181static void ether1394_host_reset(struct hpsb_host *host); 181static void ether1394_host_reset(struct hpsb_host *host);
182 182
183/* Function for incoming 1394 packets */ 183/* Function for incoming 1394 packets */
184const static struct hpsb_address_ops addr_ops = { 184static const struct hpsb_address_ops addr_ops = {
185 .write = ether1394_write, 185 .write = ether1394_write,
186}; 186};
187 187
@@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud)
438 return eth1394_new_node(hi, ud); 438 return eth1394_new_node(hi, ud);
439} 439}
440 440
441static struct ieee1394_device_id eth1394_id_table[] = { 441static const struct ieee1394_device_id eth1394_id_table[] = {
442 { 442 {
443 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID | 443 .match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
444 IEEE1394_MATCH_VERSION), 444 IEEE1394_MATCH_VERSION),
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 600e391c8fe7..4bc443546e04 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
478 return retval; 478 return retval;
479} 479}
480 480
481const static struct hpsb_address_ops dummy_ops; 481static const struct hpsb_address_ops dummy_ops;
482 482
483/* dummy address spaces as lower and upper bounds of the host's a.s. list */ 483/* dummy address spaces as lower and upper bounds of the host's a.s. list */
484static void init_hpsb_highlevel(struct hpsb_host *host) 484static void init_hpsb_highlevel(struct hpsb_host *host)
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 53aada5bbe1e..a6d55bebe61a 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = {
484static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf) 484static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
485{ 485{
486 struct hpsb_protocol_driver *driver; 486 struct hpsb_protocol_driver *driver;
487 struct ieee1394_device_id *id; 487 const struct ieee1394_device_id *id;
488 int length = 0; 488 int length = 0;
489 char *scratch = buf; 489 char *scratch = buf;
490 490
@@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
658{ 658{
659 struct hpsb_protocol_driver *driver; 659 struct hpsb_protocol_driver *driver;
660 struct unit_directory *ud; 660 struct unit_directory *ud;
661 struct ieee1394_device_id *id; 661 const struct ieee1394_device_id *id;
662 662
663 /* We only match unit directories */ 663 /* We only match unit directories */
664 if (dev->platform_data != &nodemgr_ud_platform_data) 664 if (dev->platform_data != &nodemgr_ud_platform_data)
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index ee5acdbd114a..749b271d3107 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -125,7 +125,7 @@ struct hpsb_protocol_driver {
125 * probe function below can implement further protocol 125 * probe function below can implement further protocol
126 * dependent or vendor dependent checking. 126 * dependent or vendor dependent checking.
127 */ 127 */
128 struct ieee1394_device_id *id_table; 128 const struct ieee1394_device_id *id_table;
129 129
130 /* 130 /*
131 * The update function is called when the node has just 131 * The update function is called when the node has just
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index bad66c65b0d6..da5f8829b503 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
90static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, 90static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
91 u64 addr, octlet_t data, octlet_t arg, int ext_tcode, 91 u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
92 u16 flags); 92 u16 flags);
93const static struct hpsb_address_ops arm_ops = { 93static const struct hpsb_address_ops arm_ops = {
94 .read = arm_read, 94 .read = arm_read,
95 .write = arm_write, 95 .write = arm_write,
96 .lock = arm_lock, 96 .lock = arm_lock,
@@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf)
369{ 369{
370 struct compat_raw1394_req __user *cr = (typeof(cr)) buf; 370 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
371 struct raw1394_request __user *r; 371 struct raw1394_request __user *r;
372
372 r = compat_alloc_user_space(sizeof(struct raw1394_request)); 373 r = compat_alloc_user_space(sizeof(struct raw1394_request));
373 374
374#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x)) 375#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
@@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf)
378 C(tag) || 379 C(tag) ||
379 C(sendb) || 380 C(sendb) ||
380 C(recvb)) 381 C(recvb))
381 return ERR_PTR(-EFAULT); 382 return (__force const char __user *)ERR_PTR(-EFAULT);
383
382 return (const char __user *)r; 384 return (const char __user *)r;
383} 385}
384#undef C 386#undef C
@@ -389,6 +391,7 @@ static int
389raw1394_compat_read(const char __user *buf, struct raw1394_request *r) 391raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
390{ 392{
391 struct compat_raw1394_req __user *cr = (typeof(cr)) buf; 393 struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
394
392 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) || 395 if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
393 P(type) || 396 P(type) ||
394 P(error) || 397 P(error) ||
@@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
400 P(sendb) || 403 P(sendb) ||
401 P(recvb)) 404 P(recvb))
402 return -EFAULT; 405 return -EFAULT;
406
403 return sizeof(struct compat_raw1394_req); 407 return sizeof(struct compat_raw1394_req);
404} 408}
405#undef P 409#undef P
@@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
2249 sizeof(struct compat_raw1394_req) != 2253 sizeof(struct compat_raw1394_req) !=
2250 sizeof(struct raw1394_request)) { 2254 sizeof(struct raw1394_request)) {
2251 buffer = raw1394_compat_write(buffer); 2255 buffer = raw1394_compat_write(buffer);
2252 if (IS_ERR(buffer)) 2256 if (IS_ERR((__force void *)buffer))
2253 return PTR_ERR(buffer); 2257 return PTR_ERR((__force void *)buffer);
2254 } else 2258 } else
2255#endif 2259#endif
2256 if (count != sizeof(struct raw1394_request)) { 2260 if (count != sizeof(struct raw1394_request)) {
@@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
2978 * Export information about protocols/devices supported by this driver. 2982 * Export information about protocols/devices supported by this driver.
2979 */ 2983 */
2980#ifdef MODULE 2984#ifdef MODULE
2981static struct ieee1394_device_id raw1394_id_table[] = { 2985static const struct ieee1394_device_id raw1394_id_table[] = {
2982 { 2986 {
2983 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 2987 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
2984 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, 2988 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f3fd8657ce4b..a51ab233342d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = {
265 .host_reset = sbp2_host_reset, 265 .host_reset = sbp2_host_reset,
266}; 266};
267 267
268const static struct hpsb_address_ops sbp2_ops = { 268static const struct hpsb_address_ops sbp2_ops = {
269 .write = sbp2_handle_status_write 269 .write = sbp2_handle_status_write
270}; 270};
271 271
@@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
275static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64, 275static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
276 size_t, u16); 276 size_t, u16);
277 277
278const static struct hpsb_address_ops sbp2_physdma_ops = { 278static const struct hpsb_address_ops sbp2_physdma_ops = {
279 .read = sbp2_handle_physdma_read, 279 .read = sbp2_handle_physdma_read,
280 .write = sbp2_handle_physdma_write, 280 .write = sbp2_handle_physdma_write,
281}; 281};
@@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = {
285/* 285/*
286 * Interface to driver core and IEEE 1394 core 286 * Interface to driver core and IEEE 1394 core
287 */ 287 */
288static struct ieee1394_device_id sbp2_id_table[] = { 288static const struct ieee1394_device_id sbp2_id_table[] = {
289 { 289 {
290 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 290 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
291 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, 291 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
@@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1413 "(firmware_revision 0x%06x, vendor_id 0x%06x," 1413 "(firmware_revision 0x%06x, vendor_id 0x%06x,"
1414 " model_id 0x%06x)", 1414 " model_id 0x%06x)",
1415 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), 1415 NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
1416 workarounds, firmware_revision, 1416 workarounds, firmware_revision, ud->vendor_id,
1417 ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
1418 model); 1417 model);
1419 1418
1420 /* We would need one SCSI host template for each target to adjust 1419 /* We would need one SCSI host template for each target to adjust
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 679a918a5cc7..d287ba79821d 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops=
1294 * Export information about protocols/devices supported by this driver. 1294 * Export information about protocols/devices supported by this driver.
1295 */ 1295 */
1296#ifdef MODULE 1296#ifdef MODULE
1297static struct ieee1394_device_id video1394_id_table[] = { 1297static const struct ieee1394_device_id video1394_id_table[] = {
1298 { 1298 {
1299 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, 1299 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
1300 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, 1300 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index b55d9ccaf33e..32526f103b59 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype)
115} 115}
116 116
117static const char *debug_fcp_opcode(unsigned int opcode, 117static const char *debug_fcp_opcode(unsigned int opcode,
118 const u8 *data, size_t length) 118 const u8 *data, int length)
119{ 119{
120 switch (opcode) { 120 switch (opcode) {
121 case AVC_OPCODE_VENDOR: break; 121 case AVC_OPCODE_VENDOR: break;
@@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode,
135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC"; 135 case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl"; 136 case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK"; 137 case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
138 case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2";
138 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA"; 139 case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
139 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host"; 140 case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
140 } 141 }
141 return "Vendor"; 142 return "Vendor";
142} 143}
143 144
144static void debug_fcp(const u8 *data, size_t length) 145static void debug_fcp(const u8 *data, int length)
145{ 146{
146 unsigned int subunit_type, subunit_id, op; 147 unsigned int subunit_type, subunit_id, op;
147 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> "; 148 const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
@@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
266 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0; 267 c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
267 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1; 268 c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
268 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2; 269 c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
269 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK; 270 if (fdtv->type == FIREDTV_DVB_S2)
271 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2;
272 else
273 c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
270 274
271 c->operand[4] = (params->frequency >> 24) & 0xff; 275 c->operand[4] = (params->frequency >> 24) & 0xff;
272 c->operand[5] = (params->frequency >> 16) & 0xff; 276 c->operand[5] = (params->frequency >> 16) & 0xff;
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 00d46e137b2a..92285d0089c2 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -81,13 +81,16 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
81 81
82 /* go */ 82 /* go */
83 sb->s_flags |= MS_ACTIVE; 83 sb->s_flags |= MS_ACTIVE;
84 return simple_set_mnt(mnt, sb); 84 simple_set_mnt(mnt, sb);
85
86 return 0;
85 87
86 /* new mountpoint for an already mounted superblock */ 88 /* new mountpoint for an already mounted superblock */
87already_mounted: 89already_mounted:
88 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", 90 DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
89 mtd->index, mtd->name); 91 mtd->index, mtd->name);
90 ret = simple_set_mnt(mnt, sb); 92 simple_set_mnt(mnt, sb);
93 ret = 0;
91 goto out_put; 94 goto out_put;
92 95
93out_error: 96out_error:
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index c295ba786edd..f0c7de78e205 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -41,8 +41,8 @@ extern struct file_system_type v9fs_fs_type;
41extern const struct address_space_operations v9fs_addr_operations; 41extern const struct address_space_operations v9fs_addr_operations;
42extern const struct file_operations v9fs_file_operations; 42extern const struct file_operations v9fs_file_operations;
43extern const struct file_operations v9fs_dir_operations; 43extern const struct file_operations v9fs_dir_operations;
44extern struct dentry_operations v9fs_dentry_operations; 44extern const struct dentry_operations v9fs_dentry_operations;
45extern struct dentry_operations v9fs_cached_dentry_operations; 45extern const struct dentry_operations v9fs_cached_dentry_operations;
46 46
47struct inode *v9fs_get_inode(struct super_block *sb, int mode); 47struct inode *v9fs_get_inode(struct super_block *sb, int mode);
48ino_t v9fs_qid2ino(struct p9_qid *qid); 48ino_t v9fs_qid2ino(struct p9_qid *qid);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 06dcc7c4f234..d74325295b1e 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -104,12 +104,12 @@ void v9fs_dentry_release(struct dentry *dentry)
104 } 104 }
105} 105}
106 106
107struct dentry_operations v9fs_cached_dentry_operations = { 107const struct dentry_operations v9fs_cached_dentry_operations = {
108 .d_delete = v9fs_cached_dentry_delete, 108 .d_delete = v9fs_cached_dentry_delete,
109 .d_release = v9fs_dentry_release, 109 .d_release = v9fs_dentry_release,
110}; 110};
111 111
112struct dentry_operations v9fs_dentry_operations = { 112const struct dentry_operations v9fs_dentry_operations = {
113 .d_delete = v9fs_dentry_delete, 113 .d_delete = v9fs_dentry_delete,
114 .d_release = v9fs_dentry_release, 114 .d_release = v9fs_dentry_release,
115}; 115};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 93212e40221a..5f8ab8adb5f5 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -168,8 +168,9 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
168 p9stat_free(st); 168 p9stat_free(st);
169 kfree(st); 169 kfree(st);
170 170
171P9_DPRINTK(P9_DEBUG_VFS, " return simple set mount\n"); 171P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
172 return simple_set_mnt(mnt, sb); 172 simple_set_mnt(mnt, sb);
173 return 0;
173 174
174release_sb: 175release_sb:
175 if (sb) { 176 if (sb) {
diff --git a/fs/Kconfig b/fs/Kconfig
index 93945dd0b1ae..cef8b18ceaa3 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -56,61 +56,7 @@ endif # BLOCK
56 56
57source "fs/notify/Kconfig" 57source "fs/notify/Kconfig"
58 58
59config QUOTA 59source "fs/quota/Kconfig"
60 bool "Quota support"
61 help
62 If you say Y here, you will be able to set per user limits for disk
63 usage (also called disk quotas). Currently, it works for the
64 ext2, ext3, and reiserfs file system. ext3 also supports journalled
65 quotas for which you don't need to run quotacheck(8) after an unclean
66 shutdown.
67 For further details, read the Quota mini-HOWTO, available from
68 <http://www.tldp.org/docs.html#howto>, or the documentation provided
69 with the quota tools. Probably the quota support is only useful for
70 multi user systems. If unsure, say N.
71
72config QUOTA_NETLINK_INTERFACE
73 bool "Report quota messages through netlink interface"
74 depends on QUOTA && NET
75 help
76 If you say Y here, quota warnings (about exceeding softlimit, reaching
77 hardlimit, etc.) will be reported through netlink interface. If unsure,
78 say Y.
79
80config PRINT_QUOTA_WARNING
81 bool "Print quota warnings to console (OBSOLETE)"
82 depends on QUOTA
83 default y
84 help
85 If you say Y here, quota warnings (about exceeding softlimit, reaching
86 hardlimit, etc.) will be printed to the process' controlling terminal.
87 Note that this behavior is currently deprecated and may go away in
88 future. Please use notification via netlink socket instead.
89
90# Generic support for tree structured quota files. Seleted when needed.
91config QUOTA_TREE
92 tristate
93
94config QFMT_V1
95 tristate "Old quota format support"
96 depends on QUOTA
97 help
98 This quota format was (is) used by kernels earlier than 2.4.22. If
99 you have quota working and you don't want to convert to new quota
100 format say Y here.
101
102config QFMT_V2
103 tristate "Quota format v2 support"
104 depends on QUOTA
105 select QUOTA_TREE
106 help
107 This quota format allows using quotas with 32-bit UIDs/GIDs. If you
108 need this functionality say Y here.
109
110config QUOTACTL
111 bool
112 depends on XFS_QUOTA || QUOTA
113 default y
114 60
115source "fs/autofs/Kconfig" 61source "fs/autofs/Kconfig"
116source "fs/autofs4/Kconfig" 62source "fs/autofs4/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dc20db348679..6e82a307bcd4 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -51,11 +51,7 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o xattr_acl.o
51obj-$(CONFIG_NFS_COMMON) += nfs_common/ 51obj-$(CONFIG_NFS_COMMON) += nfs_common/
52obj-$(CONFIG_GENERIC_ACL) += generic_acl.o 52obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
53 53
54obj-$(CONFIG_QUOTA) += dquot.o 54obj-y += quota/
55obj-$(CONFIG_QFMT_V1) += quota_v1.o
56obj-$(CONFIG_QFMT_V2) += quota_v2.o
57obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
58obj-$(CONFIG_QUOTACTL) += quota.o
59 55
60obj-$(CONFIG_PROC_FS) += proc/ 56obj-$(CONFIG_PROC_FS) += proc/
61obj-y += partitions/ 57obj-y += partitions/
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 831157502d5a..e0a85dbeeb88 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -86,7 +86,7 @@ void __adfs_error(struct super_block *sb, const char *function,
86/* dir_*.c */ 86/* dir_*.c */
87extern const struct inode_operations adfs_dir_inode_operations; 87extern const struct inode_operations adfs_dir_inode_operations;
88extern const struct file_operations adfs_dir_operations; 88extern const struct file_operations adfs_dir_operations;
89extern struct dentry_operations adfs_dentry_operations; 89extern const struct dentry_operations adfs_dentry_operations;
90extern struct adfs_dir_ops adfs_f_dir_ops; 90extern struct adfs_dir_ops adfs_f_dir_ops;
91extern struct adfs_dir_ops adfs_fplus_dir_ops; 91extern struct adfs_dir_ops adfs_fplus_dir_ops;
92 92
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 85a30e929800..e867ccf37246 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -263,7 +263,7 @@ adfs_compare(struct dentry *parent, struct qstr *entry, struct qstr *name)
263 return 0; 263 return 0;
264} 264}
265 265
266struct dentry_operations adfs_dentry_operations = { 266const struct dentry_operations adfs_dentry_operations = {
267 .d_hash = adfs_hash, 267 .d_hash = adfs_hash,
268 .d_compare = adfs_compare, 268 .d_compare = adfs_compare,
269}; 269};
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e9ec915f7553..1a2d5e3c7f4e 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -199,8 +199,7 @@ extern const struct address_space_operations affs_symlink_aops;
199extern const struct address_space_operations affs_aops; 199extern const struct address_space_operations affs_aops;
200extern const struct address_space_operations affs_aops_ofs; 200extern const struct address_space_operations affs_aops_ofs;
201 201
202extern struct dentry_operations affs_dentry_operations; 202extern const struct dentry_operations affs_dentry_operations;
203extern struct dentry_operations affs_dentry_operations_intl;
204 203
205static inline void 204static inline void
206affs_set_blocksize(struct super_block *sb, int size) 205affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 805573005de6..7d0f0a30f7a3 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -179,14 +179,18 @@ affs_remove_link(struct dentry *dentry)
179 affs_lock_dir(dir); 179 affs_lock_dir(dir);
180 affs_fix_dcache(dentry, link_ino); 180 affs_fix_dcache(dentry, link_ino);
181 retval = affs_remove_hash(dir, link_bh); 181 retval = affs_remove_hash(dir, link_bh);
182 if (retval) 182 if (retval) {
183 affs_unlock_dir(dir);
183 goto done; 184 goto done;
185 }
184 mark_buffer_dirty_inode(link_bh, inode); 186 mark_buffer_dirty_inode(link_bh, inode);
185 187
186 memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32); 188 memcpy(AFFS_TAIL(sb, bh)->name, AFFS_TAIL(sb, link_bh)->name, 32);
187 retval = affs_insert_hash(dir, bh); 189 retval = affs_insert_hash(dir, bh);
188 if (retval) 190 if (retval) {
191 affs_unlock_dir(dir);
189 goto done; 192 goto done;
193 }
190 mark_buffer_dirty_inode(bh, inode); 194 mark_buffer_dirty_inode(bh, inode);
191 195
192 affs_unlock_dir(dir); 196 affs_unlock_dir(dir);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index cfcf1b6cf82b..960d336ec694 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -19,12 +19,12 @@ static int affs_intl_toupper(int ch);
19static int affs_intl_hash_dentry(struct dentry *, struct qstr *); 19static int affs_intl_hash_dentry(struct dentry *, struct qstr *);
20static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 20static int affs_intl_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
21 21
22struct dentry_operations affs_dentry_operations = { 22const struct dentry_operations affs_dentry_operations = {
23 .d_hash = affs_hash_dentry, 23 .d_hash = affs_hash_dentry,
24 .d_compare = affs_compare_dentry, 24 .d_compare = affs_compare_dentry,
25}; 25};
26 26
27static struct dentry_operations affs_intl_dentry_operations = { 27static const struct dentry_operations affs_intl_dentry_operations = {
28 .d_hash = affs_intl_hash_dentry, 28 .d_hash = affs_intl_hash_dentry,
29 .d_compare = affs_intl_compare_dentry, 29 .d_compare = affs_intl_compare_dentry,
30}; 30};
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 99cf390641f7..9bd757774c9e 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -62,7 +62,7 @@ const struct inode_operations afs_dir_inode_operations = {
62 .setattr = afs_setattr, 62 .setattr = afs_setattr,
63}; 63};
64 64
65static struct dentry_operations afs_fs_dentry_operations = { 65static const struct dentry_operations afs_fs_dentry_operations = {
66 .d_revalidate = afs_d_revalidate, 66 .d_revalidate = afs_d_revalidate,
67 .d_delete = afs_d_delete, 67 .d_delete = afs_d_delete,
68 .d_release = afs_d_release, 68 .d_release = afs_d_release,
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 3bbdb9d02376..1dd96d4406c0 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -48,7 +48,7 @@ static struct file_system_type anon_inode_fs_type = {
48 .get_sb = anon_inodefs_get_sb, 48 .get_sb = anon_inodefs_get_sb,
49 .kill_sb = kill_anon_super, 49 .kill_sb = kill_anon_super,
50}; 50};
51static struct dentry_operations anon_inodefs_dentry_operations = { 51static const struct dentry_operations anon_inodefs_dentry_operations = {
52 .d_delete = anon_inodefs_delete_dentry, 52 .d_delete = anon_inodefs_delete_dentry,
53}; 53};
54 54
diff --git a/fs/attr.c b/fs/attr.c
index f4360192a938..9fe1b1bd30a8 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -173,7 +173,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
173 if (!error) { 173 if (!error) {
174 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 174 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
175 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) 175 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
176 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 176 error = vfs_dq_transfer(inode, attr) ?
177 -EDQUOT : 0;
177 if (!error) 178 if (!error)
178 error = inode_setattr(inode, attr); 179 error = inode_setattr(inode, attr);
179 } 180 }
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8aacade56956..4a1401cea0a1 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -192,7 +192,7 @@ static int autofs_revalidate(struct dentry * dentry, struct nameidata *nd)
192 return 1; 192 return 1;
193} 193}
194 194
195static struct dentry_operations autofs_dentry_operations = { 195static const struct dentry_operations autofs_dentry_operations = {
196 .d_revalidate = autofs_revalidate, 196 .d_revalidate = autofs_revalidate,
197}; 197};
198 198
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 716e12b627b2..69c8142da838 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -310,7 +310,7 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi)
310 return ino; 310 return ino;
311} 311}
312 312
313static struct dentry_operations autofs4_sb_dentry_operations = { 313static const struct dentry_operations autofs4_sb_dentry_operations = {
314 .d_release = autofs4_dentry_release, 314 .d_release = autofs4_dentry_release,
315}; 315};
316 316
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2a41c2a7fc52..74b1469a9504 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -349,13 +349,13 @@ void autofs4_dentry_release(struct dentry *de)
349} 349}
350 350
351/* For dentries of directories in the root dir */ 351/* For dentries of directories in the root dir */
352static struct dentry_operations autofs4_root_dentry_operations = { 352static const struct dentry_operations autofs4_root_dentry_operations = {
353 .d_revalidate = autofs4_revalidate, 353 .d_revalidate = autofs4_revalidate,
354 .d_release = autofs4_dentry_release, 354 .d_release = autofs4_dentry_release,
355}; 355};
356 356
357/* For other dentries */ 357/* For other dentries */
358static struct dentry_operations autofs4_dentry_operations = { 358static const struct dentry_operations autofs4_dentry_operations = {
359 .d_revalidate = autofs4_revalidate, 359 .d_revalidate = autofs4_revalidate,
360 .d_release = autofs4_dentry_release, 360 .d_release = autofs4_dentry_release,
361}; 361};
diff --git a/fs/block_dev.c b/fs/block_dev.c
index b3c1efff5e1d..8c3c6899ccf3 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -18,6 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/blkpg.h> 19#include <linux/blkpg.h>
20#include <linux/buffer_head.h> 20#include <linux/buffer_head.h>
21#include <linux/pagevec.h>
21#include <linux/writeback.h> 22#include <linux/writeback.h>
22#include <linux/mpage.h> 23#include <linux/mpage.h>
23#include <linux/mount.h> 24#include <linux/mount.h>
@@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
174 iov, offset, nr_segs, blkdev_get_blocks, NULL); 175 iov, offset, nr_segs, blkdev_get_blocks, NULL);
175} 176}
176 177
178/*
179 * Write out and wait upon all the dirty data associated with a block
180 * device via its mapping. Does not take the superblock lock.
181 */
182int sync_blockdev(struct block_device *bdev)
183{
184 int ret = 0;
185
186 if (bdev)
187 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
188 return ret;
189}
190EXPORT_SYMBOL(sync_blockdev);
191
192/*
193 * Write out and wait upon all dirty data associated with this
194 * device. Filesystem data as well as the underlying block
195 * device. Takes the superblock lock.
196 */
197int fsync_bdev(struct block_device *bdev)
198{
199 struct super_block *sb = get_super(bdev);
200 if (sb) {
201 int res = fsync_super(sb);
202 drop_super(sb);
203 return res;
204 }
205 return sync_blockdev(bdev);
206}
207
208/**
209 * freeze_bdev -- lock a filesystem and force it into a consistent state
210 * @bdev: blockdevice to lock
211 *
212 * This takes the block device bd_mount_sem to make sure no new mounts
213 * happen on bdev until thaw_bdev() is called.
214 * If a superblock is found on this device, we take the s_umount semaphore
215 * on it to make sure nobody unmounts until the snapshot creation is done.
216 * The reference counter (bd_fsfreeze_count) guarantees that only the last
217 * unfreeze process can unfreeze the frozen filesystem actually when multiple
218 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
219 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
220 * actually.
221 */
222struct super_block *freeze_bdev(struct block_device *bdev)
223{
224 struct super_block *sb;
225 int error = 0;
226
227 mutex_lock(&bdev->bd_fsfreeze_mutex);
228 if (bdev->bd_fsfreeze_count > 0) {
229 bdev->bd_fsfreeze_count++;
230 sb = get_super(bdev);
231 mutex_unlock(&bdev->bd_fsfreeze_mutex);
232 return sb;
233 }
234 bdev->bd_fsfreeze_count++;
235
236 down(&bdev->bd_mount_sem);
237 sb = get_super(bdev);
238 if (sb && !(sb->s_flags & MS_RDONLY)) {
239 sb->s_frozen = SB_FREEZE_WRITE;
240 smp_wmb();
241
242 __fsync_super(sb);
243
244 sb->s_frozen = SB_FREEZE_TRANS;
245 smp_wmb();
246
247 sync_blockdev(sb->s_bdev);
248
249 if (sb->s_op->freeze_fs) {
250 error = sb->s_op->freeze_fs(sb);
251 if (error) {
252 printk(KERN_ERR
253 "VFS:Filesystem freeze failed\n");
254 sb->s_frozen = SB_UNFROZEN;
255 drop_super(sb);
256 up(&bdev->bd_mount_sem);
257 bdev->bd_fsfreeze_count--;
258 mutex_unlock(&bdev->bd_fsfreeze_mutex);
259 return ERR_PTR(error);
260 }
261 }
262 }
263
264 sync_blockdev(bdev);
265 mutex_unlock(&bdev->bd_fsfreeze_mutex);
266
267 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
268}
269EXPORT_SYMBOL(freeze_bdev);
270
271/**
272 * thaw_bdev -- unlock filesystem
273 * @bdev: blockdevice to unlock
274 * @sb: associated superblock
275 *
276 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
277 */
278int thaw_bdev(struct block_device *bdev, struct super_block *sb)
279{
280 int error = 0;
281
282 mutex_lock(&bdev->bd_fsfreeze_mutex);
283 if (!bdev->bd_fsfreeze_count) {
284 mutex_unlock(&bdev->bd_fsfreeze_mutex);
285 return -EINVAL;
286 }
287
288 bdev->bd_fsfreeze_count--;
289 if (bdev->bd_fsfreeze_count > 0) {
290 if (sb)
291 drop_super(sb);
292 mutex_unlock(&bdev->bd_fsfreeze_mutex);
293 return 0;
294 }
295
296 if (sb) {
297 BUG_ON(sb->s_bdev != bdev);
298 if (!(sb->s_flags & MS_RDONLY)) {
299 if (sb->s_op->unfreeze_fs) {
300 error = sb->s_op->unfreeze_fs(sb);
301 if (error) {
302 printk(KERN_ERR
303 "VFS:Filesystem thaw failed\n");
304 sb->s_frozen = SB_FREEZE_TRANS;
305 bdev->bd_fsfreeze_count++;
306 mutex_unlock(&bdev->bd_fsfreeze_mutex);
307 return error;
308 }
309 }
310 sb->s_frozen = SB_UNFROZEN;
311 smp_wmb();
312 wake_up(&sb->s_wait_unfrozen);
313 }
314 drop_super(sb);
315 }
316
317 up(&bdev->bd_mount_sem);
318 mutex_unlock(&bdev->bd_fsfreeze_mutex);
319 return 0;
320}
321EXPORT_SYMBOL(thaw_bdev);
322
177static int blkdev_writepage(struct page *page, struct writeback_control *wbc) 323static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
178{ 324{
179 return block_write_full_page(page, blkdev_get_block, wbc); 325 return block_write_full_page(page, blkdev_get_block, wbc);
diff --git a/fs/buffer.c b/fs/buffer.c
index 891e1c78e4f1..a2fd743d97cb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -166,151 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166} 166}
167 167
168/* 168/*
169 * Write out and wait upon all the dirty data associated with a block
170 * device via its mapping. Does not take the superblock lock.
171 */
172int sync_blockdev(struct block_device *bdev)
173{
174 int ret = 0;
175
176 if (bdev)
177 ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
178 return ret;
179}
180EXPORT_SYMBOL(sync_blockdev);
181
182/*
183 * Write out and wait upon all dirty data associated with this
184 * device. Filesystem data as well as the underlying block
185 * device. Takes the superblock lock.
186 */
187int fsync_bdev(struct block_device *bdev)
188{
189 struct super_block *sb = get_super(bdev);
190 if (sb) {
191 int res = fsync_super(sb);
192 drop_super(sb);
193 return res;
194 }
195 return sync_blockdev(bdev);
196}
197
198/**
199 * freeze_bdev -- lock a filesystem and force it into a consistent state
200 * @bdev: blockdevice to lock
201 *
202 * This takes the block device bd_mount_sem to make sure no new mounts
203 * happen on bdev until thaw_bdev() is called.
204 * If a superblock is found on this device, we take the s_umount semaphore
205 * on it to make sure nobody unmounts until the snapshot creation is done.
206 * The reference counter (bd_fsfreeze_count) guarantees that only the last
207 * unfreeze process can unfreeze the frozen filesystem actually when multiple
208 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
209 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
210 * actually.
211 */
212struct super_block *freeze_bdev(struct block_device *bdev)
213{
214 struct super_block *sb;
215 int error = 0;
216
217 mutex_lock(&bdev->bd_fsfreeze_mutex);
218 if (bdev->bd_fsfreeze_count > 0) {
219 bdev->bd_fsfreeze_count++;
220 sb = get_super(bdev);
221 mutex_unlock(&bdev->bd_fsfreeze_mutex);
222 return sb;
223 }
224 bdev->bd_fsfreeze_count++;
225
226 down(&bdev->bd_mount_sem);
227 sb = get_super(bdev);
228 if (sb && !(sb->s_flags & MS_RDONLY)) {
229 sb->s_frozen = SB_FREEZE_WRITE;
230 smp_wmb();
231
232 __fsync_super(sb);
233
234 sb->s_frozen = SB_FREEZE_TRANS;
235 smp_wmb();
236
237 sync_blockdev(sb->s_bdev);
238
239 if (sb->s_op->freeze_fs) {
240 error = sb->s_op->freeze_fs(sb);
241 if (error) {
242 printk(KERN_ERR
243 "VFS:Filesystem freeze failed\n");
244 sb->s_frozen = SB_UNFROZEN;
245 drop_super(sb);
246 up(&bdev->bd_mount_sem);
247 bdev->bd_fsfreeze_count--;
248 mutex_unlock(&bdev->bd_fsfreeze_mutex);
249 return ERR_PTR(error);
250 }
251 }
252 }
253
254 sync_blockdev(bdev);
255 mutex_unlock(&bdev->bd_fsfreeze_mutex);
256
257 return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
258}
259EXPORT_SYMBOL(freeze_bdev);
260
261/**
262 * thaw_bdev -- unlock filesystem
263 * @bdev: blockdevice to unlock
264 * @sb: associated superblock
265 *
266 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
267 */
268int thaw_bdev(struct block_device *bdev, struct super_block *sb)
269{
270 int error = 0;
271
272 mutex_lock(&bdev->bd_fsfreeze_mutex);
273 if (!bdev->bd_fsfreeze_count) {
274 mutex_unlock(&bdev->bd_fsfreeze_mutex);
275 return -EINVAL;
276 }
277
278 bdev->bd_fsfreeze_count--;
279 if (bdev->bd_fsfreeze_count > 0) {
280 if (sb)
281 drop_super(sb);
282 mutex_unlock(&bdev->bd_fsfreeze_mutex);
283 return 0;
284 }
285
286 if (sb) {
287 BUG_ON(sb->s_bdev != bdev);
288 if (!(sb->s_flags & MS_RDONLY)) {
289 if (sb->s_op->unfreeze_fs) {
290 error = sb->s_op->unfreeze_fs(sb);
291 if (error) {
292 printk(KERN_ERR
293 "VFS:Filesystem thaw failed\n");
294 sb->s_frozen = SB_FREEZE_TRANS;
295 bdev->bd_fsfreeze_count++;
296 mutex_unlock(&bdev->bd_fsfreeze_mutex);
297 return error;
298 }
299 }
300 sb->s_frozen = SB_UNFROZEN;
301 smp_wmb();
302 wake_up(&sb->s_wait_unfrozen);
303 }
304 drop_super(sb);
305 }
306
307 up(&bdev->bd_mount_sem);
308 mutex_unlock(&bdev->bd_fsfreeze_mutex);
309 return 0;
310}
311EXPORT_SYMBOL(thaw_bdev);
312
313/*
314 * Various filesystems appear to want __find_get_block to be non-blocking. 169 * Various filesystems appear to want __find_get_block to be non-blocking.
315 * But it's the page lock which protects the buffers. To get around this, 170 * But it's the page lock which protects the buffers. To get around this,
316 * we get exclusion from try_to_free_buffers with the blockdev mapping's 171 * we get exclusion from try_to_free_buffers with the blockdev mapping's
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 13ea53251dcf..38491fd3871d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -606,7 +606,8 @@ cifs_get_sb(struct file_system_type *fs_type,
606 return rc; 606 return rc;
607 } 607 }
608 sb->s_flags |= MS_ACTIVE; 608 sb->s_flags |= MS_ACTIVE;
609 return simple_set_mnt(mnt, sb); 609 simple_set_mnt(mnt, sb);
610 return 0;
610} 611}
611 612
612static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 613static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 2b1d28a9ee28..77e190dc2883 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -78,8 +78,8 @@ extern int cifs_dir_open(struct inode *inode, struct file *file);
78extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); 78extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
79 79
80/* Functions related to dir entries */ 80/* Functions related to dir entries */
81extern struct dentry_operations cifs_dentry_ops; 81extern const struct dentry_operations cifs_dentry_ops;
82extern struct dentry_operations cifs_ci_dentry_ops; 82extern const struct dentry_operations cifs_ci_dentry_ops;
83 83
84/* Functions related to symlinks */ 84/* Functions related to symlinks */
85extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd); 85extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f9b6f68be976..2f35cccfcd8d 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -701,7 +701,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
701 return rc; 701 return rc;
702} */ 702} */
703 703
704struct dentry_operations cifs_dentry_ops = { 704const struct dentry_operations cifs_dentry_ops = {
705 .d_revalidate = cifs_d_revalidate, 705 .d_revalidate = cifs_d_revalidate,
706/* d_delete: cifs_d_delete, */ /* not needed except for debugging */ 706/* d_delete: cifs_d_delete, */ /* not needed except for debugging */
707}; 707};
@@ -739,7 +739,7 @@ static int cifs_ci_compare(struct dentry *dentry, struct qstr *a,
739 return 1; 739 return 1;
740} 740}
741 741
742struct dentry_operations cifs_ci_dentry_ops = { 742const struct dentry_operations cifs_ci_dentry_ops = {
743 .d_revalidate = cifs_d_revalidate, 743 .d_revalidate = cifs_d_revalidate,
744 .d_hash = cifs_ci_hash, 744 .d_hash = cifs_ci_hash,
745 .d_compare = cifs_ci_compare, 745 .d_compare = cifs_ci_compare,
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 75b1fa90b2cb..4bb9d0a5decc 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -59,7 +59,7 @@ static int coda_return_EIO(void)
59} 59}
60#define CODA_EIO_ERROR ((void *) (coda_return_EIO)) 60#define CODA_EIO_ERROR ((void *) (coda_return_EIO))
61 61
62static struct dentry_operations coda_dentry_operations = 62static const struct dentry_operations coda_dentry_operations =
63{ 63{
64 .d_revalidate = coda_dentry_revalidate, 64 .d_revalidate = coda_dentry_revalidate,
65 .d_delete = coda_dentry_delete, 65 .d_delete = coda_dentry_delete,
diff --git a/fs/compat.c b/fs/compat.c
index 0949b43794a4..5e374aad33f7 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -378,6 +378,34 @@ out:
378 return error; 378 return error;
379} 379}
380 380
381/*
382 * This is a copy of sys_ustat, just dealing with a structure layout.
383 * Given how simple this syscall is that apporach is more maintainable
384 * than the various conversion hacks.
385 */
386asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
387{
388 struct super_block *sb;
389 struct compat_ustat tmp;
390 struct kstatfs sbuf;
391 int err;
392
393 sb = user_get_super(new_decode_dev(dev));
394 if (!sb)
395 return -EINVAL;
396 err = vfs_statfs(sb->s_root, &sbuf);
397 drop_super(sb);
398 if (err)
399 return err;
400
401 memset(&tmp, 0, sizeof(struct compat_ustat));
402 tmp.f_tfree = sbuf.f_bfree;
403 tmp.f_tinode = sbuf.f_ffree;
404 if (copy_to_user(u, &tmp, sizeof(struct compat_ustat)))
405 return -EFAULT;
406 return 0;
407}
408
381static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl) 409static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
382{ 410{
383 if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) || 411 if (!access_ok(VERIFY_READ, ufl, sizeof(*ufl)) ||
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e93341f3e82..05373db21a4e 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@ static int configfs_d_delete(struct dentry *dentry)
72 return 1; 72 return 1;
73} 73}
74 74
75static struct dentry_operations configfs_dentry_ops = { 75static const struct dentry_operations configfs_dentry_ops = {
76 .d_iput = configfs_d_iput, 76 .d_iput = configfs_d_iput,
77 /* simple_delete_dentry() isn't exported */ 77 /* simple_delete_dentry() isn't exported */
78 .d_delete = configfs_d_delete, 78 .d_delete = configfs_d_delete,
diff --git a/fs/dcache.c b/fs/dcache.c
index 07e2d4a44bda..90bbd7e1b116 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1247,15 +1247,18 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1247 struct dentry *found; 1247 struct dentry *found;
1248 struct dentry *new; 1248 struct dentry *new;
1249 1249
1250 /* Does a dentry matching the name exist already? */ 1250 /*
1251 * First check if a dentry matching the name already exists,
1252 * if not go ahead and create it now.
1253 */
1251 found = d_hash_and_lookup(dentry->d_parent, name); 1254 found = d_hash_and_lookup(dentry->d_parent, name);
1252 /* If not, create it now and return */
1253 if (!found) { 1255 if (!found) {
1254 new = d_alloc(dentry->d_parent, name); 1256 new = d_alloc(dentry->d_parent, name);
1255 if (!new) { 1257 if (!new) {
1256 error = -ENOMEM; 1258 error = -ENOMEM;
1257 goto err_out; 1259 goto err_out;
1258 } 1260 }
1261
1259 found = d_splice_alias(inode, new); 1262 found = d_splice_alias(inode, new);
1260 if (found) { 1263 if (found) {
1261 dput(new); 1264 dput(new);
@@ -1263,61 +1266,46 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1263 } 1266 }
1264 return new; 1267 return new;
1265 } 1268 }
1266 /* Matching dentry exists, check if it is negative. */ 1269
1270 /*
1271 * If a matching dentry exists, and it's not negative use it.
1272 *
1273 * Decrement the reference count to balance the iget() done
1274 * earlier on.
1275 */
1267 if (found->d_inode) { 1276 if (found->d_inode) {
1268 if (unlikely(found->d_inode != inode)) { 1277 if (unlikely(found->d_inode != inode)) {
1269 /* This can't happen because bad inodes are unhashed. */ 1278 /* This can't happen because bad inodes are unhashed. */
1270 BUG_ON(!is_bad_inode(inode)); 1279 BUG_ON(!is_bad_inode(inode));
1271 BUG_ON(!is_bad_inode(found->d_inode)); 1280 BUG_ON(!is_bad_inode(found->d_inode));
1272 } 1281 }
1273 /*
1274 * Already have the inode and the dentry attached, decrement
1275 * the reference count to balance the iget() done
1276 * earlier on. We found the dentry using d_lookup() so it
1277 * cannot be disconnected and thus we do not need to worry
1278 * about any NFS/disconnectedness issues here.
1279 */
1280 iput(inode); 1282 iput(inode);
1281 return found; 1283 return found;
1282 } 1284 }
1285
1283 /* 1286 /*
1284 * Negative dentry: instantiate it unless the inode is a directory and 1287 * Negative dentry: instantiate it unless the inode is a directory and
1285 * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED), 1288 * already has a dentry.
1286 * in which case d_move() that in place of the found dentry.
1287 */ 1289 */
1288 if (!S_ISDIR(inode->i_mode)) {
1289 /* Not a directory; everything is easy. */
1290 d_instantiate(found, inode);
1291 return found;
1292 }
1293 spin_lock(&dcache_lock); 1290 spin_lock(&dcache_lock);
1294 if (list_empty(&inode->i_dentry)) { 1291 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1295 /*
1296 * Directory without a 'disconnected' dentry; we need to do
1297 * d_instantiate() by hand because it takes dcache_lock which
1298 * we already hold.
1299 */
1300 __d_instantiate(found, inode); 1292 __d_instantiate(found, inode);
1301 spin_unlock(&dcache_lock); 1293 spin_unlock(&dcache_lock);
1302 security_d_instantiate(found, inode); 1294 security_d_instantiate(found, inode);
1303 return found; 1295 return found;
1304 } 1296 }
1297
1305 /* 1298 /*
1306 * Directory with a 'disconnected' dentry; get a reference to the 1299 * In case a directory already has a (disconnected) entry grab a
1307 * 'disconnected' dentry. 1300 * reference to it, move it in place and use it.
1308 */ 1301 */
1309 new = list_entry(inode->i_dentry.next, struct dentry, d_alias); 1302 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1310 dget_locked(new); 1303 dget_locked(new);
1311 spin_unlock(&dcache_lock); 1304 spin_unlock(&dcache_lock);
1312 /* Do security vodoo. */
1313 security_d_instantiate(found, inode); 1305 security_d_instantiate(found, inode);
1314 /* Move new in place of found. */
1315 d_move(new, found); 1306 d_move(new, found);
1316 /* Balance the iget() we did above. */
1317 iput(inode); 1307 iput(inode);
1318 /* Throw away found. */
1319 dput(found); 1308 dput(found);
1320 /* Use new as the actual dentry. */
1321 return new; 1309 return new;
1322 1310
1323err_out: 1311err_out:
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index bff4052b05e7..63a4a59e4148 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -322,177 +322,81 @@ static int compare_init_pts_sb(struct super_block *s, void *p)
322} 322}
323 323
324/* 324/*
325 * Safely parse the mount options in @data and update @opts. 325 * devpts_get_sb()
326 * 326 *
327 * devpts ends up parsing options two times during mount, due to the 327 * If the '-o newinstance' mount option was specified, mount a new
328 * two modes of operation it supports. The first parse occurs in 328 * (private) instance of devpts. PTYs created in this instance are
329 * devpts_get_sb() when determining the mode (single-instance or 329 * independent of the PTYs in other devpts instances.
330 * multi-instance mode). The second parse happens in devpts_remount()
331 * or new_pts_mount() depending on the mode.
332 * 330 *
333 * Parsing of options modifies the @data making subsequent parsing 331 * If the '-o newinstance' option was not specified, mount/remount the
334 * incorrect. So make a local copy of @data and parse it. 332 * initial kernel mount of devpts. This type of mount gives the
333 * legacy, single-instance semantics.
335 * 334 *
336 * Return: 0 On success, -errno on error 335 * The 'newinstance' option is needed to support multiple namespace
337 */ 336 * semantics in devpts while preserving backward compatibility of the
338static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts) 337 * current 'single-namespace' semantics. i.e all mounts of devpts
339{ 338 * without the 'newinstance' mount option should bind to the initial
340 int rc; 339 * kernel mount, like get_sb_single().
341 void *datacp;
342
343 if (!data)
344 return 0;
345
346 /* Use kstrdup() ? */
347 datacp = kmalloc(PAGE_SIZE, GFP_KERNEL);
348 if (!datacp)
349 return -ENOMEM;
350
351 memcpy(datacp, data, PAGE_SIZE);
352 rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts);
353 kfree(datacp);
354
355 return rc;
356}
357
358/*
359 * Mount a new (private) instance of devpts. PTYs created in this
360 * instance are independent of the PTYs in other devpts instances.
361 */
362static int new_pts_mount(struct file_system_type *fs_type, int flags,
363 void *data, struct vfsmount *mnt)
364{
365 int err;
366 struct pts_fs_info *fsi;
367 struct pts_mount_opts *opts;
368
369 err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt);
370 if (err)
371 return err;
372
373 fsi = DEVPTS_SB(mnt->mnt_sb);
374 opts = &fsi->mount_opts;
375
376 err = parse_mount_options(data, PARSE_MOUNT, opts);
377 if (err)
378 goto fail;
379
380 err = mknod_ptmx(mnt->mnt_sb);
381 if (err)
382 goto fail;
383
384 return 0;
385
386fail:
387 dput(mnt->mnt_sb->s_root);
388 deactivate_super(mnt->mnt_sb);
389 return err;
390}
391
392/*
393 * Check if 'newinstance' mount option was specified in @data.
394 * 340 *
395 * Return: -errno on error (eg: invalid mount options specified) 341 * Mounts with 'newinstance' option create a new, private namespace.
396 * : 1 if 'newinstance' mount option was specified
397 * : 0 if 'newinstance' mount option was NOT specified
398 */
399static int is_new_instance_mount(void *data)
400{
401 int rc;
402 struct pts_mount_opts opts;
403
404 if (!data)
405 return 0;
406
407 rc = safe_parse_mount_options(data, &opts);
408 if (!rc)
409 rc = opts.newinstance;
410
411 return rc;
412}
413
414/*
415 * get_init_pts_sb()
416 *
417 * This interface is needed to support multiple namespace semantics in
418 * devpts while preserving backward compatibility of the current 'single-
419 * namespace' semantics. i.e all mounts of devpts without the 'newinstance'
420 * mount option should bind to the initial kernel mount, like
421 * get_sb_single().
422 * 342 *
423 * Mounts with 'newinstance' option create a new private namespace. 343 * NOTE:
424 * 344 *
425 * But for single-mount semantics, devpts cannot use get_sb_single(), 345 * For single-mount semantics, devpts cannot use get_sb_single(),
426 * because get_sb_single()/sget() find and use the super-block from 346 * because get_sb_single()/sget() find and use the super-block from
427 * the most recent mount of devpts. But that recent mount may be a 347 * the most recent mount of devpts. But that recent mount may be a
428 * 'newinstance' mount and get_sb_single() would pick the newinstance 348 * 'newinstance' mount and get_sb_single() would pick the newinstance
429 * super-block instead of the initial super-block. 349 * super-block instead of the initial super-block.
430 *
431 * This interface is identical to get_sb_single() except that it
432 * consistently selects the 'single-namespace' superblock even in the
433 * presence of the private namespace (i.e 'newinstance') super-blocks.
434 */ 350 */
435static int get_init_pts_sb(struct file_system_type *fs_type, int flags, 351static int devpts_get_sb(struct file_system_type *fs_type,
436 void *data, struct vfsmount *mnt) 352 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
437{ 353{
438 struct super_block *s;
439 int error; 354 int error;
355 struct pts_mount_opts opts;
356 struct super_block *s;
357
358 memset(&opts, 0, sizeof(opts));
359 if (data) {
360 error = parse_mount_options(data, PARSE_MOUNT, &opts);
361 if (error)
362 return error;
363 }
364
365 if (opts.newinstance)
366 s = sget(fs_type, NULL, set_anon_super, NULL);
367 else
368 s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
440 369
441 s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
442 if (IS_ERR(s)) 370 if (IS_ERR(s))
443 return PTR_ERR(s); 371 return PTR_ERR(s);
444 372
445 if (!s->s_root) { 373 if (!s->s_root) {
446 s->s_flags = flags; 374 s->s_flags = flags;
447 error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); 375 error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
448 if (error) { 376 if (error)
449 up_write(&s->s_umount); 377 goto out_undo_sget;
450 deactivate_super(s);
451 return error;
452 }
453 s->s_flags |= MS_ACTIVE; 378 s->s_flags |= MS_ACTIVE;
454 } 379 }
455 do_remount_sb(s, flags, data, 0);
456 return simple_set_mnt(mnt, s);
457}
458 380
459/* 381 simple_set_mnt(mnt, s);
460 * Mount or remount the initial kernel mount of devpts. This type of
461 * mount maintains the legacy, single-instance semantics, while the
462 * kernel still allows multiple-instances.
463 */
464static int init_pts_mount(struct file_system_type *fs_type, int flags,
465 void *data, struct vfsmount *mnt)
466{
467 int err;
468 382
469 err = get_init_pts_sb(fs_type, flags, data, mnt); 383 memcpy(&(DEVPTS_SB(s))->mount_opts, &opts, sizeof(opts));
470 if (err)
471 return err;
472 384
473 err = mknod_ptmx(mnt->mnt_sb); 385 error = mknod_ptmx(s);
474 if (err) { 386 if (error)
475 dput(mnt->mnt_sb->s_root); 387 goto out_dput;
476 deactivate_super(mnt->mnt_sb);
477 }
478 388
479 return err; 389 return 0;
480}
481
482static int devpts_get_sb(struct file_system_type *fs_type,
483 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
484{
485 int new;
486
487 new = is_new_instance_mount(data);
488 if (new < 0)
489 return new;
490 390
491 if (new) 391out_dput:
492 return new_pts_mount(fs_type, flags, data, mnt); 392 dput(s->s_root);
493 393
494 return init_pts_mount(fs_type, flags, data, mnt); 394out_undo_sget:
395 up_write(&s->s_umount);
396 deactivate_super(s);
397 return error;
495} 398}
399
496#else 400#else
497/* 401/*
498 * This supports only the legacy single-instance semantics (no 402 * This supports only the legacy single-instance semantics (no
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 92969f879a17..858fba14aaa6 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -156,7 +156,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
156 156
157 bucket = dir_hash(ls, name, namelen); 157 bucket = dir_hash(ls, name, namelen);
158 158
159 write_lock(&ls->ls_dirtbl[bucket].lock); 159 spin_lock(&ls->ls_dirtbl[bucket].lock);
160 160
161 de = search_bucket(ls, name, namelen, bucket); 161 de = search_bucket(ls, name, namelen, bucket);
162 162
@@ -173,7 +173,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
173 list_del(&de->list); 173 list_del(&de->list);
174 kfree(de); 174 kfree(de);
175 out: 175 out:
176 write_unlock(&ls->ls_dirtbl[bucket].lock); 176 spin_unlock(&ls->ls_dirtbl[bucket].lock);
177} 177}
178 178
179void dlm_dir_clear(struct dlm_ls *ls) 179void dlm_dir_clear(struct dlm_ls *ls)
@@ -185,14 +185,14 @@ void dlm_dir_clear(struct dlm_ls *ls)
185 DLM_ASSERT(list_empty(&ls->ls_recover_list), ); 185 DLM_ASSERT(list_empty(&ls->ls_recover_list), );
186 186
187 for (i = 0; i < ls->ls_dirtbl_size; i++) { 187 for (i = 0; i < ls->ls_dirtbl_size; i++) {
188 write_lock(&ls->ls_dirtbl[i].lock); 188 spin_lock(&ls->ls_dirtbl[i].lock);
189 head = &ls->ls_dirtbl[i].list; 189 head = &ls->ls_dirtbl[i].list;
190 while (!list_empty(head)) { 190 while (!list_empty(head)) {
191 de = list_entry(head->next, struct dlm_direntry, list); 191 de = list_entry(head->next, struct dlm_direntry, list);
192 list_del(&de->list); 192 list_del(&de->list);
193 put_free_de(ls, de); 193 put_free_de(ls, de);
194 } 194 }
195 write_unlock(&ls->ls_dirtbl[i].lock); 195 spin_unlock(&ls->ls_dirtbl[i].lock);
196 } 196 }
197} 197}
198 198
@@ -307,17 +307,17 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
307 307
308 bucket = dir_hash(ls, name, namelen); 308 bucket = dir_hash(ls, name, namelen);
309 309
310 write_lock(&ls->ls_dirtbl[bucket].lock); 310 spin_lock(&ls->ls_dirtbl[bucket].lock);
311 de = search_bucket(ls, name, namelen, bucket); 311 de = search_bucket(ls, name, namelen, bucket);
312 if (de) { 312 if (de) {
313 *r_nodeid = de->master_nodeid; 313 *r_nodeid = de->master_nodeid;
314 write_unlock(&ls->ls_dirtbl[bucket].lock); 314 spin_unlock(&ls->ls_dirtbl[bucket].lock);
315 if (*r_nodeid == nodeid) 315 if (*r_nodeid == nodeid)
316 return -EEXIST; 316 return -EEXIST;
317 return 0; 317 return 0;
318 } 318 }
319 319
320 write_unlock(&ls->ls_dirtbl[bucket].lock); 320 spin_unlock(&ls->ls_dirtbl[bucket].lock);
321 321
322 if (namelen > DLM_RESNAME_MAXLEN) 322 if (namelen > DLM_RESNAME_MAXLEN)
323 return -EINVAL; 323 return -EINVAL;
@@ -330,7 +330,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
330 de->length = namelen; 330 de->length = namelen;
331 memcpy(de->name, name, namelen); 331 memcpy(de->name, name, namelen);
332 332
333 write_lock(&ls->ls_dirtbl[bucket].lock); 333 spin_lock(&ls->ls_dirtbl[bucket].lock);
334 tmp = search_bucket(ls, name, namelen, bucket); 334 tmp = search_bucket(ls, name, namelen, bucket);
335 if (tmp) { 335 if (tmp) {
336 kfree(de); 336 kfree(de);
@@ -339,7 +339,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
339 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list); 339 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
340 } 340 }
341 *r_nodeid = de->master_nodeid; 341 *r_nodeid = de->master_nodeid;
342 write_unlock(&ls->ls_dirtbl[bucket].lock); 342 spin_unlock(&ls->ls_dirtbl[bucket].lock);
343 return 0; 343 return 0;
344} 344}
345 345
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 076e86f38bc8..d01ca0a711db 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -99,7 +99,7 @@ struct dlm_direntry {
99 99
100struct dlm_dirtable { 100struct dlm_dirtable {
101 struct list_head list; 101 struct list_head list;
102 rwlock_t lock; 102 spinlock_t lock;
103}; 103};
104 104
105struct dlm_rsbtable { 105struct dlm_rsbtable {
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 01e7d39c5fba..205ec95b347e 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -835,7 +835,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
835 lkb->lkb_wait_count++; 835 lkb->lkb_wait_count++;
836 hold_lkb(lkb); 836 hold_lkb(lkb);
837 837
838 log_debug(ls, "add overlap %x cur %d new %d count %d flags %x", 838 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
839 lkb->lkb_id, lkb->lkb_wait_type, mstype, 839 lkb->lkb_id, lkb->lkb_wait_type, mstype,
840 lkb->lkb_wait_count, lkb->lkb_flags); 840 lkb->lkb_wait_count, lkb->lkb_flags);
841 goto out; 841 goto out;
@@ -851,7 +851,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
851 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); 851 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
852 out: 852 out:
853 if (error) 853 if (error)
854 log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s", 854 log_error(ls, "addwait error %x %d flags %x %d %d %s",
855 lkb->lkb_id, error, lkb->lkb_flags, mstype, 855 lkb->lkb_id, error, lkb->lkb_flags, mstype,
856 lkb->lkb_wait_type, lkb->lkb_resource->res_name); 856 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
857 mutex_unlock(&ls->ls_waiters_mutex); 857 mutex_unlock(&ls->ls_waiters_mutex);
@@ -863,23 +863,55 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
863 request reply on the requestqueue) between dlm_recover_waiters_pre() which 863 request reply on the requestqueue) between dlm_recover_waiters_pre() which
864 set RESEND and dlm_recover_waiters_post() */ 864 set RESEND and dlm_recover_waiters_post() */
865 865
866static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype) 866static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
867 struct dlm_message *ms)
867{ 868{
868 struct dlm_ls *ls = lkb->lkb_resource->res_ls; 869 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
869 int overlap_done = 0; 870 int overlap_done = 0;
870 871
871 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) { 872 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
873 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
872 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; 874 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
873 overlap_done = 1; 875 overlap_done = 1;
874 goto out_del; 876 goto out_del;
875 } 877 }
876 878
877 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) { 879 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
880 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
878 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; 881 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
879 overlap_done = 1; 882 overlap_done = 1;
880 goto out_del; 883 goto out_del;
881 } 884 }
882 885
886 /* Cancel state was preemptively cleared by a successful convert,
887 see next comment, nothing to do. */
888
889 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
890 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
891 log_debug(ls, "remwait %x cancel_reply wait_type %d",
892 lkb->lkb_id, lkb->lkb_wait_type);
893 return -1;
894 }
895
896 /* Remove for the convert reply, and premptively remove for the
897 cancel reply. A convert has been granted while there's still
898 an outstanding cancel on it (the cancel is moot and the result
899 in the cancel reply should be 0). We preempt the cancel reply
900 because the app gets the convert result and then can follow up
901 with another op, like convert. This subsequent op would see the
902 lingering state of the cancel and fail with -EBUSY. */
903
904 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
905 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
906 is_overlap_cancel(lkb) && ms && !ms->m_result) {
907 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
908 lkb->lkb_id);
909 lkb->lkb_wait_type = 0;
910 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
911 lkb->lkb_wait_count--;
912 goto out_del;
913 }
914
883 /* N.B. type of reply may not always correspond to type of original 915 /* N.B. type of reply may not always correspond to type of original
884 msg due to lookup->request optimization, verify others? */ 916 msg due to lookup->request optimization, verify others? */
885 917
@@ -888,8 +920,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
888 goto out_del; 920 goto out_del;
889 } 921 }
890 922
891 log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d", 923 log_error(ls, "remwait error %x reply %d flags %x no wait_type",
892 lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type); 924 lkb->lkb_id, mstype, lkb->lkb_flags);
893 return -1; 925 return -1;
894 926
895 out_del: 927 out_del:
@@ -899,7 +931,7 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
899 this would happen */ 931 this would happen */
900 932
901 if (overlap_done && lkb->lkb_wait_type) { 933 if (overlap_done && lkb->lkb_wait_type) {
902 log_error(ls, "remove_from_waiters %x reply %d give up on %d", 934 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
903 lkb->lkb_id, mstype, lkb->lkb_wait_type); 935 lkb->lkb_id, mstype, lkb->lkb_wait_type);
904 lkb->lkb_wait_count--; 936 lkb->lkb_wait_count--;
905 lkb->lkb_wait_type = 0; 937 lkb->lkb_wait_type = 0;
@@ -921,7 +953,7 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
921 int error; 953 int error;
922 954
923 mutex_lock(&ls->ls_waiters_mutex); 955 mutex_lock(&ls->ls_waiters_mutex);
924 error = _remove_from_waiters(lkb, mstype); 956 error = _remove_from_waiters(lkb, mstype, NULL);
925 mutex_unlock(&ls->ls_waiters_mutex); 957 mutex_unlock(&ls->ls_waiters_mutex);
926 return error; 958 return error;
927} 959}
@@ -936,7 +968,7 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
936 968
937 if (ms != &ls->ls_stub_ms) 969 if (ms != &ls->ls_stub_ms)
938 mutex_lock(&ls->ls_waiters_mutex); 970 mutex_lock(&ls->ls_waiters_mutex);
939 error = _remove_from_waiters(lkb, ms->m_type); 971 error = _remove_from_waiters(lkb, ms->m_type, ms);
940 if (ms != &ls->ls_stub_ms) 972 if (ms != &ls->ls_stub_ms)
941 mutex_unlock(&ls->ls_waiters_mutex); 973 mutex_unlock(&ls->ls_waiters_mutex);
942 return error; 974 return error;
@@ -2083,6 +2115,11 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2083 lkb->lkb_timeout_cs = args->timeout; 2115 lkb->lkb_timeout_cs = args->timeout;
2084 rv = 0; 2116 rv = 0;
2085 out: 2117 out:
2118 if (rv)
2119 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2120 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2121 lkb->lkb_status, lkb->lkb_wait_type,
2122 lkb->lkb_resource->res_name);
2086 return rv; 2123 return rv;
2087} 2124}
2088 2125
@@ -2149,6 +2186,13 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2149 goto out; 2186 goto out;
2150 } 2187 }
2151 2188
2189 /* there's nothing to cancel */
2190 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2191 !lkb->lkb_wait_type) {
2192 rv = -EBUSY;
2193 goto out;
2194 }
2195
2152 switch (lkb->lkb_wait_type) { 2196 switch (lkb->lkb_wait_type) {
2153 case DLM_MSG_LOOKUP: 2197 case DLM_MSG_LOOKUP:
2154 case DLM_MSG_REQUEST: 2198 case DLM_MSG_REQUEST:
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index aa32e5f02493..cd8e2df3c295 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -487,7 +487,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
487 goto out_lkbfree; 487 goto out_lkbfree;
488 for (i = 0; i < size; i++) { 488 for (i = 0; i < size; i++) {
489 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list); 489 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
490 rwlock_init(&ls->ls_dirtbl[i].lock); 490 spin_lock_init(&ls->ls_dirtbl[i].lock);
491 } 491 }
492 492
493 INIT_LIST_HEAD(&ls->ls_waiters); 493 INIT_LIST_HEAD(&ls->ls_waiters);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 103a5ebd1371..609108a83267 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,7 @@
21 * 21 *
22 * Cluster nodes are referred to by their nodeids. nodeids are 22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to 23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is it's 24 * be expanded for the cluster infrastructure then that is its
25 * responsibility. It is this layer's 25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or 26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication. 27 * whatever it needs for inter-node communication.
@@ -36,9 +36,9 @@
36 * of high load. Also, this way, the sending thread can collect together 36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block. 37 * messages bound for one node and send them in one block.
38 * 38 *
39 * lowcomms will choose to use wither TCP or SCTP as its transport layer 39 * lowcomms will choose to use either TCP or SCTP as its transport layer
40 * depending on the configuration variable 'protocol'. This should be set 40 * depending on the configuration variable 'protocol'. This should be set
41 * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a 41 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
42 * cluster-wide mechanism as it must be the same on all nodes of the cluster 42 * cluster-wide mechanism as it must be the same on all nodes of the cluster
43 * for the DLM to function. 43 * for the DLM to function.
44 * 44 *
@@ -48,11 +48,11 @@
48#include <net/sock.h> 48#include <net/sock.h>
49#include <net/tcp.h> 49#include <net/tcp.h>
50#include <linux/pagemap.h> 50#include <linux/pagemap.h>
51#include <linux/idr.h>
52#include <linux/file.h> 51#include <linux/file.h>
53#include <linux/mutex.h> 52#include <linux/mutex.h>
54#include <linux/sctp.h> 53#include <linux/sctp.h>
55#include <net/sctp/user.h> 54#include <net/sctp/user.h>
55#include <net/ipv6.h>
56 56
57#include "dlm_internal.h" 57#include "dlm_internal.h"
58#include "lowcomms.h" 58#include "lowcomms.h"
@@ -60,6 +60,7 @@
60#include "config.h" 60#include "config.h"
61 61
62#define NEEDED_RMEM (4*1024*1024) 62#define NEEDED_RMEM (4*1024*1024)
63#define CONN_HASH_SIZE 32
63 64
64struct cbuf { 65struct cbuf {
65 unsigned int base; 66 unsigned int base;
@@ -114,6 +115,7 @@ struct connection {
114 int retries; 115 int retries;
115#define MAX_CONNECT_RETRIES 3 116#define MAX_CONNECT_RETRIES 3
116 int sctp_assoc; 117 int sctp_assoc;
118 struct hlist_node list;
117 struct connection *othercon; 119 struct connection *othercon;
118 struct work_struct rwork; /* Receive workqueue */ 120 struct work_struct rwork; /* Receive workqueue */
119 struct work_struct swork; /* Send workqueue */ 121 struct work_struct swork; /* Send workqueue */
@@ -138,14 +140,37 @@ static int dlm_local_count;
138static struct workqueue_struct *recv_workqueue; 140static struct workqueue_struct *recv_workqueue;
139static struct workqueue_struct *send_workqueue; 141static struct workqueue_struct *send_workqueue;
140 142
141static DEFINE_IDR(connections_idr); 143static struct hlist_head connection_hash[CONN_HASH_SIZE];
142static DEFINE_MUTEX(connections_lock); 144static DEFINE_MUTEX(connections_lock);
143static int max_nodeid;
144static struct kmem_cache *con_cache; 145static struct kmem_cache *con_cache;
145 146
146static void process_recv_sockets(struct work_struct *work); 147static void process_recv_sockets(struct work_struct *work);
147static void process_send_sockets(struct work_struct *work); 148static void process_send_sockets(struct work_struct *work);
148 149
150
151/* This is deliberately very simple because most clusters have simple
152 sequential nodeids, so we should be able to go straight to a connection
153 struct in the array */
154static inline int nodeid_hash(int nodeid)
155{
156 return nodeid & (CONN_HASH_SIZE-1);
157}
158
159static struct connection *__find_con(int nodeid)
160{
161 int r;
162 struct hlist_node *h;
163 struct connection *con;
164
165 r = nodeid_hash(nodeid);
166
167 hlist_for_each_entry(con, h, &connection_hash[r], list) {
168 if (con->nodeid == nodeid)
169 return con;
170 }
171 return NULL;
172}
173
149/* 174/*
150 * If 'allocation' is zero then we don't attempt to create a new 175 * If 'allocation' is zero then we don't attempt to create a new
151 * connection structure for this node. 176 * connection structure for this node.
@@ -154,31 +179,17 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
154{ 179{
155 struct connection *con = NULL; 180 struct connection *con = NULL;
156 int r; 181 int r;
157 int n;
158 182
159 con = idr_find(&connections_idr, nodeid); 183 con = __find_con(nodeid);
160 if (con || !alloc) 184 if (con || !alloc)
161 return con; 185 return con;
162 186
163 r = idr_pre_get(&connections_idr, alloc);
164 if (!r)
165 return NULL;
166
167 con = kmem_cache_zalloc(con_cache, alloc); 187 con = kmem_cache_zalloc(con_cache, alloc);
168 if (!con) 188 if (!con)
169 return NULL; 189 return NULL;
170 190
171 r = idr_get_new_above(&connections_idr, con, nodeid, &n); 191 r = nodeid_hash(nodeid);
172 if (r) { 192 hlist_add_head(&con->list, &connection_hash[r]);
173 kmem_cache_free(con_cache, con);
174 return NULL;
175 }
176
177 if (n != nodeid) {
178 idr_remove(&connections_idr, n);
179 kmem_cache_free(con_cache, con);
180 return NULL;
181 }
182 193
183 con->nodeid = nodeid; 194 con->nodeid = nodeid;
184 mutex_init(&con->sock_mutex); 195 mutex_init(&con->sock_mutex);
@@ -189,19 +200,30 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
189 200
190 /* Setup action pointers for child sockets */ 201 /* Setup action pointers for child sockets */
191 if (con->nodeid) { 202 if (con->nodeid) {
192 struct connection *zerocon = idr_find(&connections_idr, 0); 203 struct connection *zerocon = __find_con(0);
193 204
194 con->connect_action = zerocon->connect_action; 205 con->connect_action = zerocon->connect_action;
195 if (!con->rx_action) 206 if (!con->rx_action)
196 con->rx_action = zerocon->rx_action; 207 con->rx_action = zerocon->rx_action;
197 } 208 }
198 209
199 if (nodeid > max_nodeid)
200 max_nodeid = nodeid;
201
202 return con; 210 return con;
203} 211}
204 212
213/* Loop round all connections */
214static void foreach_conn(void (*conn_func)(struct connection *c))
215{
216 int i;
217 struct hlist_node *h, *n;
218 struct connection *con;
219
220 for (i = 0; i < CONN_HASH_SIZE; i++) {
221 hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
222 conn_func(con);
223 }
224 }
225}
226
205static struct connection *nodeid2con(int nodeid, gfp_t allocation) 227static struct connection *nodeid2con(int nodeid, gfp_t allocation)
206{ 228{
207 struct connection *con; 229 struct connection *con;
@@ -217,14 +239,17 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
217static struct connection *assoc2con(int assoc_id) 239static struct connection *assoc2con(int assoc_id)
218{ 240{
219 int i; 241 int i;
242 struct hlist_node *h;
220 struct connection *con; 243 struct connection *con;
221 244
222 mutex_lock(&connections_lock); 245 mutex_lock(&connections_lock);
223 for (i=0; i<=max_nodeid; i++) { 246
224 con = __nodeid2con(i, 0); 247 for (i = 0 ; i < CONN_HASH_SIZE; i++) {
225 if (con && con->sctp_assoc == assoc_id) { 248 hlist_for_each_entry(con, h, &connection_hash[i], list) {
226 mutex_unlock(&connections_lock); 249 if (con && con->sctp_assoc == assoc_id) {
227 return con; 250 mutex_unlock(&connections_lock);
251 return con;
252 }
228 } 253 }
229 } 254 }
230 mutex_unlock(&connections_lock); 255 mutex_unlock(&connections_lock);
@@ -250,8 +275,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
250 } else { 275 } else {
251 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 276 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
252 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 277 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
253 memcpy(&ret6->sin6_addr, &in6->sin6_addr, 278 ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
254 sizeof(in6->sin6_addr));
255 } 279 }
256 280
257 return 0; 281 return 0;
@@ -376,25 +400,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd)
376 log_print("send EOF to node failed: %d", ret); 400 log_print("send EOF to node failed: %d", ret);
377} 401}
378 402
403static void sctp_init_failed_foreach(struct connection *con)
404{
405 con->sctp_assoc = 0;
406 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
407 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
408 queue_work(send_workqueue, &con->swork);
409 }
410}
411
379/* INIT failed but we don't know which node... 412/* INIT failed but we don't know which node...
380 restart INIT on all pending nodes */ 413 restart INIT on all pending nodes */
381static void sctp_init_failed(void) 414static void sctp_init_failed(void)
382{ 415{
383 int i;
384 struct connection *con;
385
386 mutex_lock(&connections_lock); 416 mutex_lock(&connections_lock);
387 for (i=1; i<=max_nodeid; i++) { 417
388 con = __nodeid2con(i, 0); 418 foreach_conn(sctp_init_failed_foreach);
389 if (!con) 419
390 continue;
391 con->sctp_assoc = 0;
392 if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
393 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
394 queue_work(send_workqueue, &con->swork);
395 }
396 }
397 }
398 mutex_unlock(&connections_lock); 420 mutex_unlock(&connections_lock);
399} 421}
400 422
@@ -1313,13 +1335,10 @@ out_connect:
1313 1335
1314static void clean_one_writequeue(struct connection *con) 1336static void clean_one_writequeue(struct connection *con)
1315{ 1337{
1316 struct list_head *list; 1338 struct writequeue_entry *e, *safe;
1317 struct list_head *temp;
1318 1339
1319 spin_lock(&con->writequeue_lock); 1340 spin_lock(&con->writequeue_lock);
1320 list_for_each_safe(list, temp, &con->writequeue) { 1341 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1321 struct writequeue_entry *e =
1322 list_entry(list, struct writequeue_entry, list);
1323 list_del(&e->list); 1342 list_del(&e->list);
1324 free_entry(e); 1343 free_entry(e);
1325 } 1344 }
@@ -1369,14 +1388,7 @@ static void process_send_sockets(struct work_struct *work)
1369/* Discard all entries on the write queues */ 1388/* Discard all entries on the write queues */
1370static void clean_writequeues(void) 1389static void clean_writequeues(void)
1371{ 1390{
1372 int nodeid; 1391 foreach_conn(clean_one_writequeue);
1373
1374 for (nodeid = 1; nodeid <= max_nodeid; nodeid++) {
1375 struct connection *con = __nodeid2con(nodeid, 0);
1376
1377 if (con)
1378 clean_one_writequeue(con);
1379 }
1380} 1392}
1381 1393
1382static void work_stop(void) 1394static void work_stop(void)
@@ -1406,23 +1418,29 @@ static int work_start(void)
1406 return 0; 1418 return 0;
1407} 1419}
1408 1420
1409void dlm_lowcomms_stop(void) 1421static void stop_conn(struct connection *con)
1410{ 1422{
1411 int i; 1423 con->flags |= 0x0F;
1412 struct connection *con; 1424 if (con->sock)
1425 con->sock->sk->sk_user_data = NULL;
1426}
1413 1427
1428static void free_conn(struct connection *con)
1429{
1430 close_connection(con, true);
1431 if (con->othercon)
1432 kmem_cache_free(con_cache, con->othercon);
1433 hlist_del(&con->list);
1434 kmem_cache_free(con_cache, con);
1435}
1436
1437void dlm_lowcomms_stop(void)
1438{
1414 /* Set all the flags to prevent any 1439 /* Set all the flags to prevent any
1415 socket activity. 1440 socket activity.
1416 */ 1441 */
1417 mutex_lock(&connections_lock); 1442 mutex_lock(&connections_lock);
1418 for (i = 0; i <= max_nodeid; i++) { 1443 foreach_conn(stop_conn);
1419 con = __nodeid2con(i, 0);
1420 if (con) {
1421 con->flags |= 0x0F;
1422 if (con->sock)
1423 con->sock->sk->sk_user_data = NULL;
1424 }
1425 }
1426 mutex_unlock(&connections_lock); 1444 mutex_unlock(&connections_lock);
1427 1445
1428 work_stop(); 1446 work_stop();
@@ -1430,25 +1448,20 @@ void dlm_lowcomms_stop(void)
1430 mutex_lock(&connections_lock); 1448 mutex_lock(&connections_lock);
1431 clean_writequeues(); 1449 clean_writequeues();
1432 1450
1433 for (i = 0; i <= max_nodeid; i++) { 1451 foreach_conn(free_conn);
1434 con = __nodeid2con(i, 0); 1452
1435 if (con) {
1436 close_connection(con, true);
1437 if (con->othercon)
1438 kmem_cache_free(con_cache, con->othercon);
1439 kmem_cache_free(con_cache, con);
1440 }
1441 }
1442 max_nodeid = 0;
1443 mutex_unlock(&connections_lock); 1453 mutex_unlock(&connections_lock);
1444 kmem_cache_destroy(con_cache); 1454 kmem_cache_destroy(con_cache);
1445 idr_init(&connections_idr);
1446} 1455}
1447 1456
1448int dlm_lowcomms_start(void) 1457int dlm_lowcomms_start(void)
1449{ 1458{
1450 int error = -EINVAL; 1459 int error = -EINVAL;
1451 struct connection *con; 1460 struct connection *con;
1461 int i;
1462
1463 for (i = 0; i < CONN_HASH_SIZE; i++)
1464 INIT_HLIST_HEAD(&connection_hash[i]);
1452 1465
1453 init_local(); 1466 init_local();
1454 if (!dlm_local_count) { 1467 if (!dlm_local_count) {
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 065149e84f42..ebce994ab0b7 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. 2 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
3 * 3 *
4 * This copyrighted material is made available to anyone wishing to use, 4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions 5 * modify, copy, or redistribute it subject to the terms and conditions
@@ -84,7 +84,7 @@ struct dlm_lock_result32 {
84 84
85static void compat_input(struct dlm_write_request *kb, 85static void compat_input(struct dlm_write_request *kb,
86 struct dlm_write_request32 *kb32, 86 struct dlm_write_request32 *kb32,
87 size_t count) 87 int namelen)
88{ 88{
89 kb->version[0] = kb32->version[0]; 89 kb->version[0] = kb32->version[0];
90 kb->version[1] = kb32->version[1]; 90 kb->version[1] = kb32->version[1];
@@ -96,8 +96,7 @@ static void compat_input(struct dlm_write_request *kb,
96 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { 96 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
97 kb->i.lspace.flags = kb32->i.lspace.flags; 97 kb->i.lspace.flags = kb32->i.lspace.flags;
98 kb->i.lspace.minor = kb32->i.lspace.minor; 98 kb->i.lspace.minor = kb32->i.lspace.minor;
99 memcpy(kb->i.lspace.name, kb32->i.lspace.name, count - 99 memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
100 offsetof(struct dlm_write_request32, i.lspace.name));
101 } else if (kb->cmd == DLM_USER_PURGE) { 100 } else if (kb->cmd == DLM_USER_PURGE) {
102 kb->i.purge.nodeid = kb32->i.purge.nodeid; 101 kb->i.purge.nodeid = kb32->i.purge.nodeid;
103 kb->i.purge.pid = kb32->i.purge.pid; 102 kb->i.purge.pid = kb32->i.purge.pid;
@@ -115,8 +114,7 @@ static void compat_input(struct dlm_write_request *kb,
115 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; 114 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
116 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; 115 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
117 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); 116 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
118 memcpy(kb->i.lock.name, kb32->i.lock.name, count - 117 memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
119 offsetof(struct dlm_write_request32, i.lock.name));
120 } 118 }
121} 119}
122 120
@@ -539,9 +537,16 @@ static ssize_t device_write(struct file *file, const char __user *buf,
539#ifdef CONFIG_COMPAT 537#ifdef CONFIG_COMPAT
540 if (!kbuf->is64bit) { 538 if (!kbuf->is64bit) {
541 struct dlm_write_request32 *k32buf; 539 struct dlm_write_request32 *k32buf;
540 int namelen = 0;
541
542 if (count > sizeof(struct dlm_write_request32))
543 namelen = count - sizeof(struct dlm_write_request32);
544
542 k32buf = (struct dlm_write_request32 *)kbuf; 545 k32buf = (struct dlm_write_request32 *)kbuf;
543 kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) - 546
544 sizeof(struct dlm_write_request32)), GFP_KERNEL); 547 /* add 1 after namelen so that the name string is terminated */
548 kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
549 GFP_KERNEL);
545 if (!kbuf) { 550 if (!kbuf) {
546 kfree(k32buf); 551 kfree(k32buf);
547 return -ENOMEM; 552 return -ENOMEM;
@@ -549,7 +554,8 @@ static ssize_t device_write(struct file *file, const char __user *buf,
549 554
550 if (proc) 555 if (proc)
551 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); 556 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
552 compat_input(kbuf, k32buf, count + 1); 557
558 compat_input(kbuf, k32buf, namelen);
553 kfree(k32buf); 559 kfree(k32buf);
554 } 560 }
555#endif 561#endif
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 3e5637fc3779..44d725f612cf 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb)
18 18
19 spin_lock(&inode_lock); 19 spin_lock(&inode_lock);
20 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 20 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
21 if (inode->i_state & (I_FREEING|I_WILL_FREE)) 21 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
22 continue; 22 continue;
23 if (inode->i_mapping->nrpages == 0) 23 if (inode->i_mapping->nrpages == 0)
24 continue; 24 continue;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 5e596583946c..2dda5ade75bc 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -89,7 +89,7 @@ static void ecryptfs_d_release(struct dentry *dentry)
89 return; 89 return;
90} 90}
91 91
92struct dentry_operations ecryptfs_dops = { 92const struct dentry_operations ecryptfs_dops = {
93 .d_revalidate = ecryptfs_d_revalidate, 93 .d_revalidate = ecryptfs_d_revalidate,
94 .d_release = ecryptfs_d_release, 94 .d_release = ecryptfs_d_release,
95}; 95};
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index ac749d4d644f..064c5820e4e5 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -580,7 +580,7 @@ extern const struct inode_operations ecryptfs_main_iops;
580extern const struct inode_operations ecryptfs_dir_iops; 580extern const struct inode_operations ecryptfs_dir_iops;
581extern const struct inode_operations ecryptfs_symlink_iops; 581extern const struct inode_operations ecryptfs_symlink_iops;
582extern const struct super_operations ecryptfs_sops; 582extern const struct super_operations ecryptfs_sops;
583extern struct dentry_operations ecryptfs_dops; 583extern const struct dentry_operations ecryptfs_dops;
584extern struct address_space_operations ecryptfs_aops; 584extern struct address_space_operations ecryptfs_aops;
585extern int ecryptfs_verbosity; 585extern int ecryptfs_verbosity;
586extern unsigned int ecryptfs_message_buf_len; 586extern unsigned int ecryptfs_message_buf_len;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 4a29d6376081..7f8d2e5a7ea6 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -570,7 +570,7 @@ do_more:
570error_return: 570error_return:
571 brelse(bitmap_bh); 571 brelse(bitmap_bh);
572 release_blocks(sb, freed); 572 release_blocks(sb, freed);
573 DQUOT_FREE_BLOCK(inode, freed); 573 vfs_dq_free_block(inode, freed);
574} 574}
575 575
576/** 576/**
@@ -1247,7 +1247,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
1247 /* 1247 /*
1248 * Check quota for allocation of this block. 1248 * Check quota for allocation of this block.
1249 */ 1249 */
1250 if (DQUOT_ALLOC_BLOCK(inode, num)) { 1250 if (vfs_dq_alloc_block(inode, num)) {
1251 *errp = -EDQUOT; 1251 *errp = -EDQUOT;
1252 return 0; 1252 return 0;
1253 } 1253 }
@@ -1409,7 +1409,7 @@ allocated:
1409 1409
1410 *errp = 0; 1410 *errp = 0;
1411 brelse(bitmap_bh); 1411 brelse(bitmap_bh);
1412 DQUOT_FREE_BLOCK(inode, *count-num); 1412 vfs_dq_free_block(inode, *count-num);
1413 *count = num; 1413 *count = num;
1414 return ret_block; 1414 return ret_block;
1415 1415
@@ -1420,7 +1420,7 @@ out:
1420 * Undo the block allocation 1420 * Undo the block allocation
1421 */ 1421 */
1422 if (!performed_allocation) 1422 if (!performed_allocation)
1423 DQUOT_FREE_BLOCK(inode, *count); 1423 vfs_dq_free_block(inode, *count);
1424 brelse(bitmap_bh); 1424 brelse(bitmap_bh);
1425 return 0; 1425 return 0;
1426} 1426}
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 66321a877e74..15387c9c17d8 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -121,8 +121,8 @@ void ext2_free_inode (struct inode * inode)
121 if (!is_bad_inode(inode)) { 121 if (!is_bad_inode(inode)) {
122 /* Quota is already initialized in iput() */ 122 /* Quota is already initialized in iput() */
123 ext2_xattr_delete_inode(inode); 123 ext2_xattr_delete_inode(inode);
124 DQUOT_FREE_INODE(inode); 124 vfs_dq_free_inode(inode);
125 DQUOT_DROP(inode); 125 vfs_dq_drop(inode);
126 } 126 }
127 127
128 es = EXT2_SB(sb)->s_es; 128 es = EXT2_SB(sb)->s_es;
@@ -586,7 +586,7 @@ got:
586 goto fail_drop; 586 goto fail_drop;
587 } 587 }
588 588
589 if (DQUOT_ALLOC_INODE(inode)) { 589 if (vfs_dq_alloc_inode(inode)) {
590 err = -EDQUOT; 590 err = -EDQUOT;
591 goto fail_drop; 591 goto fail_drop;
592 } 592 }
@@ -605,10 +605,10 @@ got:
605 return inode; 605 return inode;
606 606
607fail_free_drop: 607fail_free_drop:
608 DQUOT_FREE_INODE(inode); 608 vfs_dq_free_inode(inode);
609 609
610fail_drop: 610fail_drop:
611 DQUOT_DROP(inode); 611 vfs_dq_drop(inode);
612 inode->i_flags |= S_NOQUOTA; 612 inode->i_flags |= S_NOQUOTA;
613 inode->i_nlink = 0; 613 inode->i_nlink = 0;
614 unlock_new_inode(inode); 614 unlock_new_inode(inode);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 23fff2f87783..b43b95563663 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1444,7 +1444,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1444 return error; 1444 return error;
1445 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 1445 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
1446 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 1446 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
1447 error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0; 1447 error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
1448 if (error) 1448 if (error)
1449 return error; 1449 return error;
1450 } 1450 }
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7c6e3606f0ec..f983225266dc 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1331,6 +1331,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
1331 sb->s_blocksize - offset : toread; 1331 sb->s_blocksize - offset : toread;
1332 1332
1333 tmp_bh.b_state = 0; 1333 tmp_bh.b_state = 0;
1334 tmp_bh.b_size = sb->s_blocksize;
1334 err = ext2_get_block(inode, blk, &tmp_bh, 0); 1335 err = ext2_get_block(inode, blk, &tmp_bh, 0);
1335 if (err < 0) 1336 if (err < 0)
1336 return err; 1337 return err;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 987a5261cc2e..7913531ec6d5 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -642,7 +642,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
642 ea_bdebug(new_bh, "reusing block"); 642 ea_bdebug(new_bh, "reusing block");
643 643
644 error = -EDQUOT; 644 error = -EDQUOT;
645 if (DQUOT_ALLOC_BLOCK(inode, 1)) { 645 if (vfs_dq_alloc_block(inode, 1)) {
646 unlock_buffer(new_bh); 646 unlock_buffer(new_bh);
647 goto cleanup; 647 goto cleanup;
648 } 648 }
@@ -699,7 +699,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
699 * as if nothing happened and cleanup the unused block */ 699 * as if nothing happened and cleanup the unused block */
700 if (error && error != -ENOSPC) { 700 if (error && error != -ENOSPC) {
701 if (new_bh && new_bh != old_bh) 701 if (new_bh && new_bh != old_bh)
702 DQUOT_FREE_BLOCK(inode, 1); 702 vfs_dq_free_block(inode, 1);
703 goto cleanup; 703 goto cleanup;
704 } 704 }
705 } else 705 } else
@@ -731,7 +731,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
731 le32_add_cpu(&HDR(old_bh)->h_refcount, -1); 731 le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
732 if (ce) 732 if (ce)
733 mb_cache_entry_release(ce); 733 mb_cache_entry_release(ce);
734 DQUOT_FREE_BLOCK(inode, 1); 734 vfs_dq_free_block(inode, 1);
735 mark_buffer_dirty(old_bh); 735 mark_buffer_dirty(old_bh);
736 ea_bdebug(old_bh, "refcount now=%d", 736 ea_bdebug(old_bh, "refcount now=%d",
737 le32_to_cpu(HDR(old_bh)->h_refcount)); 737 le32_to_cpu(HDR(old_bh)->h_refcount));
@@ -794,7 +794,7 @@ ext2_xattr_delete_inode(struct inode *inode)
794 mark_buffer_dirty(bh); 794 mark_buffer_dirty(bh);
795 if (IS_SYNC(inode)) 795 if (IS_SYNC(inode))
796 sync_dirty_buffer(bh); 796 sync_dirty_buffer(bh);
797 DQUOT_FREE_BLOCK(inode, 1); 797 vfs_dq_free_block(inode, 1);
798 } 798 }
799 EXT2_I(inode)->i_file_acl = 0; 799 EXT2_I(inode)->i_file_acl = 0;
800 800
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 0dbf1c048475..225202db8974 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
676 } 676 }
677 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); 677 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
678 if (dquot_freed_blocks) 678 if (dquot_freed_blocks)
679 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); 679 vfs_dq_free_block(inode, dquot_freed_blocks);
680 return; 680 return;
681} 681}
682 682
@@ -1502,7 +1502,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1502 /* 1502 /*
1503 * Check quota for allocation of this block. 1503 * Check quota for allocation of this block.
1504 */ 1504 */
1505 if (DQUOT_ALLOC_BLOCK(inode, num)) { 1505 if (vfs_dq_alloc_block(inode, num)) {
1506 *errp = -EDQUOT; 1506 *errp = -EDQUOT;
1507 return 0; 1507 return 0;
1508 } 1508 }
@@ -1714,7 +1714,7 @@ allocated:
1714 1714
1715 *errp = 0; 1715 *errp = 0;
1716 brelse(bitmap_bh); 1716 brelse(bitmap_bh);
1717 DQUOT_FREE_BLOCK(inode, *count-num); 1717 vfs_dq_free_block(inode, *count-num);
1718 *count = num; 1718 *count = num;
1719 return ret_block; 1719 return ret_block;
1720 1720
@@ -1729,7 +1729,7 @@ out:
1729 * Undo the block allocation 1729 * Undo the block allocation
1730 */ 1730 */
1731 if (!performed_allocation) 1731 if (!performed_allocation)
1732 DQUOT_FREE_BLOCK(inode, *count); 1732 vfs_dq_free_block(inode, *count);
1733 brelse(bitmap_bh); 1733 brelse(bitmap_bh);
1734 return 0; 1734 return 0;
1735} 1735}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 8de6c720e510..dd13d60d524b 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -123,10 +123,10 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
123 * Note: we must free any quota before locking the superblock, 123 * Note: we must free any quota before locking the superblock,
124 * as writing the quota to disk may need the lock as well. 124 * as writing the quota to disk may need the lock as well.
125 */ 125 */
126 DQUOT_INIT(inode); 126 vfs_dq_init(inode);
127 ext3_xattr_delete_inode(handle, inode); 127 ext3_xattr_delete_inode(handle, inode);
128 DQUOT_FREE_INODE(inode); 128 vfs_dq_free_inode(inode);
129 DQUOT_DROP(inode); 129 vfs_dq_drop(inode);
130 130
131 is_directory = S_ISDIR(inode->i_mode); 131 is_directory = S_ISDIR(inode->i_mode);
132 132
@@ -589,7 +589,7 @@ got:
589 sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; 589 sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
590 590
591 ret = inode; 591 ret = inode;
592 if(DQUOT_ALLOC_INODE(inode)) { 592 if (vfs_dq_alloc_inode(inode)) {
593 err = -EDQUOT; 593 err = -EDQUOT;
594 goto fail_drop; 594 goto fail_drop;
595 } 595 }
@@ -620,10 +620,10 @@ really_out:
620 return ret; 620 return ret;
621 621
622fail_free_drop: 622fail_free_drop:
623 DQUOT_FREE_INODE(inode); 623 vfs_dq_free_inode(inode);
624 624
625fail_drop: 625fail_drop:
626 DQUOT_DROP(inode); 626 vfs_dq_drop(inode);
627 inode->i_flags |= S_NOQUOTA; 627 inode->i_flags |= S_NOQUOTA;
628 inode->i_nlink = 0; 628 inode->i_nlink = 0;
629 unlock_new_inode(inode); 629 unlock_new_inode(inode);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 05e5c2e5c0d7..4a09ff169870 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3063,7 +3063,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3063 error = PTR_ERR(handle); 3063 error = PTR_ERR(handle);
3064 goto err_out; 3064 goto err_out;
3065 } 3065 }
3066 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 3066 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
3067 if (error) { 3067 if (error) {
3068 ext3_journal_stop(handle); 3068 ext3_journal_stop(handle);
3069 return error; 3069 return error;
@@ -3154,7 +3154,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
3154 ret = 2 * (bpp + indirects) + 2; 3154 ret = 2 * (bpp + indirects) + 2;
3155 3155
3156#ifdef CONFIG_QUOTA 3156#ifdef CONFIG_QUOTA
3157 /* We know that structure was already allocated during DQUOT_INIT so 3157 /* We know that structure was already allocated during vfs_dq_init so
3158 * we will be updating only the data blocks + inodes */ 3158 * we will be updating only the data blocks + inodes */
3159 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); 3159 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
3160#endif 3160#endif
@@ -3245,7 +3245,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3245 * i_size has been changed by generic_commit_write() and we thus need 3245 * i_size has been changed by generic_commit_write() and we thus need
3246 * to include the updated inode in the current transaction. 3246 * to include the updated inode in the current transaction.
3247 * 3247 *
3248 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 3248 * Also, vfs_dq_alloc_space() will always dirty the inode when blocks
3249 * are allocated to the file. 3249 * are allocated to the file.
3250 * 3250 *
3251 * If the inode is marked synchronous, we don't honour that here - doing 3251 * If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 4db4ffa1edad..e2fc63cbba8b 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2049,7 +2049,7 @@ static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
2049 2049
2050 /* Initialize quotas before so that eventual writes go in 2050 /* Initialize quotas before so that eventual writes go in
2051 * separate transaction */ 2051 * separate transaction */
2052 DQUOT_INIT(dentry->d_inode); 2052 vfs_dq_init(dentry->d_inode);
2053 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); 2053 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
2054 if (IS_ERR(handle)) 2054 if (IS_ERR(handle))
2055 return PTR_ERR(handle); 2055 return PTR_ERR(handle);
@@ -2108,7 +2108,7 @@ static int ext3_unlink(struct inode * dir, struct dentry *dentry)
2108 2108
2109 /* Initialize quotas before so that eventual writes go 2109 /* Initialize quotas before so that eventual writes go
2110 * in separate transaction */ 2110 * in separate transaction */
2111 DQUOT_INIT(dentry->d_inode); 2111 vfs_dq_init(dentry->d_inode);
2112 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); 2112 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
2113 if (IS_ERR(handle)) 2113 if (IS_ERR(handle))
2114 return PTR_ERR(handle); 2114 return PTR_ERR(handle);
@@ -2272,7 +2272,7 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
2272 /* Initialize quotas before so that eventual writes go 2272 /* Initialize quotas before so that eventual writes go
2273 * in separate transaction */ 2273 * in separate transaction */
2274 if (new_dentry->d_inode) 2274 if (new_dentry->d_inode)
2275 DQUOT_INIT(new_dentry->d_inode); 2275 vfs_dq_init(new_dentry->d_inode);
2276 handle = ext3_journal_start(old_dir, 2 * 2276 handle = ext3_journal_start(old_dir, 2 *
2277 EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2277 EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
2278 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2); 2278 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4a970411a458..9e5b8e387e1e 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
707#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") 707#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
708#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 708#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
709 709
710static int ext3_dquot_initialize(struct inode *inode, int type);
711static int ext3_dquot_drop(struct inode *inode);
712static int ext3_write_dquot(struct dquot *dquot); 710static int ext3_write_dquot(struct dquot *dquot);
713static int ext3_acquire_dquot(struct dquot *dquot); 711static int ext3_acquire_dquot(struct dquot *dquot);
714static int ext3_release_dquot(struct dquot *dquot); 712static int ext3_release_dquot(struct dquot *dquot);
@@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
723 const char *data, size_t len, loff_t off); 721 const char *data, size_t len, loff_t off);
724 722
725static struct dquot_operations ext3_quota_operations = { 723static struct dquot_operations ext3_quota_operations = {
726 .initialize = ext3_dquot_initialize, 724 .initialize = dquot_initialize,
727 .drop = ext3_dquot_drop, 725 .drop = dquot_drop,
728 .alloc_space = dquot_alloc_space, 726 .alloc_space = dquot_alloc_space,
729 .alloc_inode = dquot_alloc_inode, 727 .alloc_inode = dquot_alloc_inode,
730 .free_space = dquot_free_space, 728 .free_space = dquot_free_space,
@@ -1438,7 +1436,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1438 } 1436 }
1439 1437
1440 list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); 1438 list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
1441 DQUOT_INIT(inode); 1439 vfs_dq_init(inode);
1442 if (inode->i_nlink) { 1440 if (inode->i_nlink) {
1443 printk(KERN_DEBUG 1441 printk(KERN_DEBUG
1444 "%s: truncating inode %lu to %Ld bytes\n", 1442 "%s: truncating inode %lu to %Ld bytes\n",
@@ -2702,7 +2700,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
2702 * Process 1 Process 2 2700 * Process 1 Process 2
2703 * ext3_create() quota_sync() 2701 * ext3_create() quota_sync()
2704 * journal_start() write_dquot() 2702 * journal_start() write_dquot()
2705 * DQUOT_INIT() down(dqio_mutex) 2703 * vfs_dq_init() down(dqio_mutex)
2706 * down(dqio_mutex) journal_start() 2704 * down(dqio_mutex) journal_start()
2707 * 2705 *
2708 */ 2706 */
@@ -2714,44 +2712,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
2714 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 2712 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
2715} 2713}
2716 2714
2717static int ext3_dquot_initialize(struct inode *inode, int type)
2718{
2719 handle_t *handle;
2720 int ret, err;
2721
2722 /* We may create quota structure so we need to reserve enough blocks */
2723 handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
2724 if (IS_ERR(handle))
2725 return PTR_ERR(handle);
2726 ret = dquot_initialize(inode, type);
2727 err = ext3_journal_stop(handle);
2728 if (!ret)
2729 ret = err;
2730 return ret;
2731}
2732
2733static int ext3_dquot_drop(struct inode *inode)
2734{
2735 handle_t *handle;
2736 int ret, err;
2737
2738 /* We may delete quota structure so we need to reserve enough blocks */
2739 handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
2740 if (IS_ERR(handle)) {
2741 /*
2742 * We call dquot_drop() anyway to at least release references
2743 * to quota structures so that umount does not hang.
2744 */
2745 dquot_drop(inode);
2746 return PTR_ERR(handle);
2747 }
2748 ret = dquot_drop(inode);
2749 err = ext3_journal_stop(handle);
2750 if (!ret)
2751 ret = err;
2752 return ret;
2753}
2754
2755static int ext3_write_dquot(struct dquot *dquot) 2715static int ext3_write_dquot(struct dquot *dquot)
2756{ 2716{
2757 int ret, err; 2717 int ret, err;
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 175414ac2210..83b7be849bd5 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -498,7 +498,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
498 error = ext3_journal_dirty_metadata(handle, bh); 498 error = ext3_journal_dirty_metadata(handle, bh);
499 if (IS_SYNC(inode)) 499 if (IS_SYNC(inode))
500 handle->h_sync = 1; 500 handle->h_sync = 1;
501 DQUOT_FREE_BLOCK(inode, 1); 501 vfs_dq_free_block(inode, 1);
502 ea_bdebug(bh, "refcount now=%d; releasing", 502 ea_bdebug(bh, "refcount now=%d; releasing",
503 le32_to_cpu(BHDR(bh)->h_refcount)); 503 le32_to_cpu(BHDR(bh)->h_refcount));
504 if (ce) 504 if (ce)
@@ -774,7 +774,7 @@ inserted:
774 /* The old block is released after updating 774 /* The old block is released after updating
775 the inode. */ 775 the inode. */
776 error = -EDQUOT; 776 error = -EDQUOT;
777 if (DQUOT_ALLOC_BLOCK(inode, 1)) 777 if (vfs_dq_alloc_block(inode, 1))
778 goto cleanup; 778 goto cleanup;
779 error = ext3_journal_get_write_access(handle, 779 error = ext3_journal_get_write_access(handle,
780 new_bh); 780 new_bh);
@@ -848,7 +848,7 @@ cleanup:
848 return error; 848 return error;
849 849
850cleanup_dquot: 850cleanup_dquot:
851 DQUOT_FREE_BLOCK(inode, 1); 851 vfs_dq_free_block(inode, 1);
852 goto cleanup; 852 goto cleanup;
853 853
854bad_block: 854bad_block:
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index de9459b4cb94..38f40d55899c 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -536,7 +536,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
536 ext4_mb_free_blocks(handle, inode, block, count, 536 ext4_mb_free_blocks(handle, inode, block, count,
537 metadata, &dquot_freed_blocks); 537 metadata, &dquot_freed_blocks);
538 if (dquot_freed_blocks) 538 if (dquot_freed_blocks)
539 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); 539 vfs_dq_free_block(inode, dquot_freed_blocks);
540 return; 540 return;
541} 541}
542 542
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b0c87dce66a3..6083bb38057b 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -20,6 +20,7 @@
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/magic.h> 21#include <linux/magic.h>
22#include <linux/jbd2.h> 22#include <linux/jbd2.h>
23#include <linux/quota.h>
23#include "ext4_i.h" 24#include "ext4_i.h"
24 25
25/* 26/*
@@ -1098,6 +1099,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
1098extern int ext4_block_truncate_page(handle_t *handle, 1099extern int ext4_block_truncate_page(handle_t *handle,
1099 struct address_space *mapping, loff_t from); 1100 struct address_space *mapping, loff_t from);
1100extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page); 1101extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
1102extern qsize_t ext4_get_reserved_space(struct inode *inode);
1101 1103
1102/* ioctl.c */ 1104/* ioctl.c */
1103extern long ext4_ioctl(struct file *, unsigned int, unsigned long); 1105extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2d2b3585ee91..fb51b40e3e8f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -220,10 +220,10 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
220 * Note: we must free any quota before locking the superblock, 220 * Note: we must free any quota before locking the superblock,
221 * as writing the quota to disk may need the lock as well. 221 * as writing the quota to disk may need the lock as well.
222 */ 222 */
223 DQUOT_INIT(inode); 223 vfs_dq_init(inode);
224 ext4_xattr_delete_inode(handle, inode); 224 ext4_xattr_delete_inode(handle, inode);
225 DQUOT_FREE_INODE(inode); 225 vfs_dq_free_inode(inode);
226 DQUOT_DROP(inode); 226 vfs_dq_drop(inode);
227 227
228 is_directory = S_ISDIR(inode->i_mode); 228 is_directory = S_ISDIR(inode->i_mode);
229 229
@@ -915,7 +915,7 @@ got:
915 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 915 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
916 916
917 ret = inode; 917 ret = inode;
918 if (DQUOT_ALLOC_INODE(inode)) { 918 if (vfs_dq_alloc_inode(inode)) {
919 err = -EDQUOT; 919 err = -EDQUOT;
920 goto fail_drop; 920 goto fail_drop;
921 } 921 }
@@ -956,10 +956,10 @@ really_out:
956 return ret; 956 return ret;
957 957
958fail_free_drop: 958fail_free_drop:
959 DQUOT_FREE_INODE(inode); 959 vfs_dq_free_inode(inode);
960 960
961fail_drop: 961fail_drop:
962 DQUOT_DROP(inode); 962 vfs_dq_drop(inode);
963 inode->i_flags |= S_NOQUOTA; 963 inode->i_flags |= S_NOQUOTA;
964 inode->i_nlink = 0; 964 inode->i_nlink = 0;
965 unlock_new_inode(inode); 965 unlock_new_inode(inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c7fed5b18745..71d3ecd5db79 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -975,6 +975,17 @@ out:
975 return err; 975 return err;
976} 976}
977 977
978qsize_t ext4_get_reserved_space(struct inode *inode)
979{
980 unsigned long long total;
981
982 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
983 total = EXT4_I(inode)->i_reserved_data_blocks +
984 EXT4_I(inode)->i_reserved_meta_blocks;
985 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
986
987 return total;
988}
978/* 989/*
979 * Calculate the number of metadata blocks need to reserve 990 * Calculate the number of metadata blocks need to reserve
980 * to allocate @blocks for non extent file based file 991 * to allocate @blocks for non extent file based file
@@ -1036,8 +1047,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1036 /* update per-inode reservations */ 1047 /* update per-inode reservations */
1037 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 1048 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
1038 EXT4_I(inode)->i_reserved_data_blocks -= used; 1049 EXT4_I(inode)->i_reserved_data_blocks -= used;
1039
1040 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1050 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1051
1052 /*
1053 * free those over-booking quota for metadata blocks
1054 */
1055
1056 if (mdb_free)
1057 vfs_dq_release_reservation_block(inode, mdb_free);
1041} 1058}
1042 1059
1043/* 1060/*
@@ -1553,8 +1570,8 @@ static int ext4_journalled_write_end(struct file *file,
1553static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1570static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
1554{ 1571{
1555 int retries = 0; 1572 int retries = 0;
1556 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1573 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1557 unsigned long md_needed, mdblocks, total = 0; 1574 unsigned long md_needed, mdblocks, total = 0;
1558 1575
1559 /* 1576 /*
1560 * recalculate the amount of metadata blocks to reserve 1577 * recalculate the amount of metadata blocks to reserve
@@ -1570,12 +1587,23 @@ repeat:
1570 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1587 md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
1571 total = md_needed + nrblocks; 1588 total = md_needed + nrblocks;
1572 1589
1590 /*
1591 * Make quota reservation here to prevent quota overflow
1592 * later. Real quota accounting is done at pages writeout
1593 * time.
1594 */
1595 if (vfs_dq_reserve_block(inode, total)) {
1596 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1597 return -EDQUOT;
1598 }
1599
1573 if (ext4_claim_free_blocks(sbi, total)) { 1600 if (ext4_claim_free_blocks(sbi, total)) {
1574 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1601 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1575 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1602 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1576 yield(); 1603 yield();
1577 goto repeat; 1604 goto repeat;
1578 } 1605 }
1606 vfs_dq_release_reservation_block(inode, total);
1579 return -ENOSPC; 1607 return -ENOSPC;
1580 } 1608 }
1581 EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1609 EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1629,6 +1657,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1629 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1657 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
1630 EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1658 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1631 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1659 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1660
1661 vfs_dq_release_reservation_block(inode, release);
1632} 1662}
1633 1663
1634static void ext4_da_page_release_reservation(struct page *page, 1664static void ext4_da_page_release_reservation(struct page *page,
@@ -4612,7 +4642,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4612 error = PTR_ERR(handle); 4642 error = PTR_ERR(handle);
4613 goto err_out; 4643 goto err_out;
4614 } 4644 }
4615 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 4645 error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
4616 if (error) { 4646 if (error) {
4617 ext4_journal_stop(handle); 4647 ext4_journal_stop(handle);
4618 return error; 4648 return error;
@@ -4991,7 +5021,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4991 * i_size has been changed by generic_commit_write() and we thus need 5021 * i_size has been changed by generic_commit_write() and we thus need
4992 * to include the updated inode in the current transaction. 5022 * to include the updated inode in the current transaction.
4993 * 5023 *
4994 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 5024 * Also, vfs_dq_alloc_block() will always dirty the inode when blocks
4995 * are allocated to the file. 5025 * are allocated to the file.
4996 * 5026 *
4997 * If the inode is marked synchronous, we don't honour that here - doing 5027 * If the inode is marked synchronous, we don't honour that here - doing
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9f61e62f435f..b038188bd039 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3086 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 3086 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3087 /* release all the reserved blocks if non delalloc */ 3087 /* release all the reserved blocks if non delalloc */
3088 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); 3088 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
3089 else 3089 else {
3090 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 3090 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
3091 ac->ac_b_ex.fe_len); 3091 ac->ac_b_ex.fe_len);
3092 /* convert reserved quota blocks to real quota blocks */
3093 vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
3094 }
3092 3095
3093 if (sbi->s_log_groups_per_flex) { 3096 if (sbi->s_log_groups_per_flex) {
3094 ext4_group_t flex_group = ext4_flex_group(sbi, 3097 ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -4544,7 +4547,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4544 struct ext4_sb_info *sbi; 4547 struct ext4_sb_info *sbi;
4545 struct super_block *sb; 4548 struct super_block *sb;
4546 ext4_fsblk_t block = 0; 4549 ext4_fsblk_t block = 0;
4547 unsigned int inquota; 4550 unsigned int inquota = 0;
4548 unsigned int reserv_blks = 0; 4551 unsigned int reserv_blks = 0;
4549 4552
4550 sb = ar->inode->i_sb; 4553 sb = ar->inode->i_sb;
@@ -4562,9 +4565,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4562 (unsigned long long) ar->pleft, 4565 (unsigned long long) ar->pleft,
4563 (unsigned long long) ar->pright); 4566 (unsigned long long) ar->pright);
4564 4567
4565 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) { 4568 /*
4566 /* 4569 * For delayed allocation, we could skip the ENOSPC and
4567 * With delalloc we already reserved the blocks 4570 * EDQUOT check, as blocks and quotas have been already
4571 * reserved when data being copied into pagecache.
4572 */
4573 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4574 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4575 else {
4576 /* Without delayed allocation we need to verify
4577 * there is enough free blocks to do block allocation
4578 * and verify allocation doesn't exceed the quota limits.
4568 */ 4579 */
4569 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { 4580 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4570 /* let others to free the space */ 4581 /* let others to free the space */
@@ -4576,19 +4587,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4576 return 0; 4587 return 0;
4577 } 4588 }
4578 reserv_blks = ar->len; 4589 reserv_blks = ar->len;
4590 while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
4591 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4592 ar->len--;
4593 }
4594 inquota = ar->len;
4595 if (ar->len == 0) {
4596 *errp = -EDQUOT;
4597 goto out3;
4598 }
4579 } 4599 }
4580 while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4581 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4582 ar->len--;
4583 }
4584 if (ar->len == 0) {
4585 *errp = -EDQUOT;
4586 goto out3;
4587 }
4588 inquota = ar->len;
4589
4590 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4591 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4592 4600
4593 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); 4601 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4594 if (!ac) { 4602 if (!ac) {
@@ -4654,8 +4662,8 @@ repeat:
4654out2: 4662out2:
4655 kmem_cache_free(ext4_ac_cachep, ac); 4663 kmem_cache_free(ext4_ac_cachep, ac);
4656out1: 4664out1:
4657 if (ar->len < inquota) 4665 if (inquota && ar->len < inquota)
4658 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); 4666 vfs_dq_free_block(ar->inode, inquota - ar->len);
4659out3: 4667out3:
4660 if (!ar->len) { 4668 if (!ar->len) {
4661 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) 4669 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ba702bd7910d..83410244d3ee 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2092,7 +2092,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
2092 2092
2093 /* Initialize quotas before so that eventual writes go in 2093 /* Initialize quotas before so that eventual writes go in
2094 * separate transaction */ 2094 * separate transaction */
2095 DQUOT_INIT(dentry->d_inode); 2095 vfs_dq_init(dentry->d_inode);
2096 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2096 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
2097 if (IS_ERR(handle)) 2097 if (IS_ERR(handle))
2098 return PTR_ERR(handle); 2098 return PTR_ERR(handle);
@@ -2151,7 +2151,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2151 2151
2152 /* Initialize quotas before so that eventual writes go 2152 /* Initialize quotas before so that eventual writes go
2153 * in separate transaction */ 2153 * in separate transaction */
2154 DQUOT_INIT(dentry->d_inode); 2154 vfs_dq_init(dentry->d_inode);
2155 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2155 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
2156 if (IS_ERR(handle)) 2156 if (IS_ERR(handle))
2157 return PTR_ERR(handle); 2157 return PTR_ERR(handle);
@@ -2318,7 +2318,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2318 /* Initialize quotas before so that eventual writes go 2318 /* Initialize quotas before so that eventual writes go
2319 * in separate transaction */ 2319 * in separate transaction */
2320 if (new_dentry->d_inode) 2320 if (new_dentry->d_inode)
2321 DQUOT_INIT(new_dentry->d_inode); 2321 vfs_dq_init(new_dentry->d_inode);
2322 handle = ext4_journal_start(old_dir, 2 * 2322 handle = ext4_journal_start(old_dir, 2 *
2323 EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2323 EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
2324 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2); 2324 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 39d1993cfa13..f7371a6a923d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_
926#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group") 926#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
927#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 927#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
928 928
929static int ext4_dquot_initialize(struct inode *inode, int type);
930static int ext4_dquot_drop(struct inode *inode);
931static int ext4_write_dquot(struct dquot *dquot); 929static int ext4_write_dquot(struct dquot *dquot);
932static int ext4_acquire_dquot(struct dquot *dquot); 930static int ext4_acquire_dquot(struct dquot *dquot);
933static int ext4_release_dquot(struct dquot *dquot); 931static int ext4_release_dquot(struct dquot *dquot);
@@ -942,9 +940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
942 const char *data, size_t len, loff_t off); 940 const char *data, size_t len, loff_t off);
943 941
944static struct dquot_operations ext4_quota_operations = { 942static struct dquot_operations ext4_quota_operations = {
945 .initialize = ext4_dquot_initialize, 943 .initialize = dquot_initialize,
946 .drop = ext4_dquot_drop, 944 .drop = dquot_drop,
947 .alloc_space = dquot_alloc_space, 945 .alloc_space = dquot_alloc_space,
946 .reserve_space = dquot_reserve_space,
947 .claim_space = dquot_claim_space,
948 .release_rsv = dquot_release_reserved_space,
949 .get_reserved_space = ext4_get_reserved_space,
948 .alloc_inode = dquot_alloc_inode, 950 .alloc_inode = dquot_alloc_inode,
949 .free_space = dquot_free_space, 951 .free_space = dquot_free_space,
950 .free_inode = dquot_free_inode, 952 .free_inode = dquot_free_inode,
@@ -1802,7 +1804,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
1802 } 1804 }
1803 1805
1804 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); 1806 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
1805 DQUOT_INIT(inode); 1807 vfs_dq_init(inode);
1806 if (inode->i_nlink) { 1808 if (inode->i_nlink) {
1807 printk(KERN_DEBUG 1809 printk(KERN_DEBUG
1808 "%s: truncating inode %lu to %lld bytes\n", 1810 "%s: truncating inode %lu to %lld bytes\n",
@@ -3367,8 +3369,8 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
3367 * is locked for write. Otherwise the are possible deadlocks: 3369 * is locked for write. Otherwise the are possible deadlocks:
3368 * Process 1 Process 2 3370 * Process 1 Process 2
3369 * ext4_create() quota_sync() 3371 * ext4_create() quota_sync()
3370 * jbd2_journal_start() write_dquot() 3372 * jbd2_journal_start() write_dquot()
3371 * DQUOT_INIT() down(dqio_mutex) 3373 * vfs_dq_init() down(dqio_mutex)
3372 * down(dqio_mutex) jbd2_journal_start() 3374 * down(dqio_mutex) jbd2_journal_start()
3373 * 3375 *
3374 */ 3376 */
@@ -3380,44 +3382,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
3380 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type]; 3382 return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
3381} 3383}
3382 3384
3383static int ext4_dquot_initialize(struct inode *inode, int type)
3384{
3385 handle_t *handle;
3386 int ret, err;
3387
3388 /* We may create quota structure so we need to reserve enough blocks */
3389 handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
3390 if (IS_ERR(handle))
3391 return PTR_ERR(handle);
3392 ret = dquot_initialize(inode, type);
3393 err = ext4_journal_stop(handle);
3394 if (!ret)
3395 ret = err;
3396 return ret;
3397}
3398
3399static int ext4_dquot_drop(struct inode *inode)
3400{
3401 handle_t *handle;
3402 int ret, err;
3403
3404 /* We may delete quota structure so we need to reserve enough blocks */
3405 handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
3406 if (IS_ERR(handle)) {
3407 /*
3408 * We call dquot_drop() anyway to at least release references
3409 * to quota structures so that umount does not hang.
3410 */
3411 dquot_drop(inode);
3412 return PTR_ERR(handle);
3413 }
3414 ret = dquot_drop(inode);
3415 err = ext4_journal_stop(handle);
3416 if (!ret)
3417 ret = err;
3418 return ret;
3419}
3420
3421static int ext4_write_dquot(struct dquot *dquot) 3385static int ext4_write_dquot(struct dquot *dquot)
3422{ 3386{
3423 int ret, err; 3387 int ret, err;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 157ce6589c54..62b31c246994 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -490,7 +490,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
490 error = ext4_handle_dirty_metadata(handle, inode, bh); 490 error = ext4_handle_dirty_metadata(handle, inode, bh);
491 if (IS_SYNC(inode)) 491 if (IS_SYNC(inode))
492 ext4_handle_sync(handle); 492 ext4_handle_sync(handle);
493 DQUOT_FREE_BLOCK(inode, 1); 493 vfs_dq_free_block(inode, 1);
494 ea_bdebug(bh, "refcount now=%d; releasing", 494 ea_bdebug(bh, "refcount now=%d; releasing",
495 le32_to_cpu(BHDR(bh)->h_refcount)); 495 le32_to_cpu(BHDR(bh)->h_refcount));
496 if (ce) 496 if (ce)
@@ -784,7 +784,7 @@ inserted:
784 /* The old block is released after updating 784 /* The old block is released after updating
785 the inode. */ 785 the inode. */
786 error = -EDQUOT; 786 error = -EDQUOT;
787 if (DQUOT_ALLOC_BLOCK(inode, 1)) 787 if (vfs_dq_alloc_block(inode, 1))
788 goto cleanup; 788 goto cleanup;
789 error = ext4_journal_get_write_access(handle, 789 error = ext4_journal_get_write_access(handle,
790 new_bh); 790 new_bh);
@@ -860,7 +860,7 @@ cleanup:
860 return error; 860 return error;
861 861
862cleanup_dquot: 862cleanup_dquot:
863 DQUOT_FREE_BLOCK(inode, 1); 863 vfs_dq_free_block(inode, 1);
864 goto cleanup; 864 goto cleanup;
865 865
866bad_block: 866bad_block:
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 7ba03a4acbe0..da3f361a37dd 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -188,7 +188,7 @@ old_compare:
188 goto out; 188 goto out;
189} 189}
190 190
191static struct dentry_operations msdos_dentry_operations = { 191static const struct dentry_operations msdos_dentry_operations = {
192 .d_hash = msdos_hash, 192 .d_hash = msdos_hash,
193 .d_compare = msdos_cmp, 193 .d_compare = msdos_cmp,
194}; 194};
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 8ae32e37673c..a0e00e3a46e9 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -166,13 +166,13 @@ static int vfat_cmp(struct dentry *dentry, struct qstr *a, struct qstr *b)
166 return 1; 166 return 1;
167} 167}
168 168
169static struct dentry_operations vfat_ci_dentry_ops = { 169static const struct dentry_operations vfat_ci_dentry_ops = {
170 .d_revalidate = vfat_revalidate_ci, 170 .d_revalidate = vfat_revalidate_ci,
171 .d_hash = vfat_hashi, 171 .d_hash = vfat_hashi,
172 .d_compare = vfat_cmpi, 172 .d_compare = vfat_cmpi,
173}; 173};
174 174
175static struct dentry_operations vfat_dentry_ops = { 175static const struct dentry_operations vfat_dentry_ops = {
176 .d_revalidate = vfat_revalidate, 176 .d_revalidate = vfat_revalidate,
177 .d_hash = vfat_hash, 177 .d_hash = vfat_hash,
178 .d_compare = vfat_cmp, 178 .d_compare = vfat_cmp,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fdff346e96fd..06da05261e04 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -224,7 +224,7 @@ static int invalid_nodeid(u64 nodeid)
224 return !nodeid || nodeid == FUSE_ROOT_ID; 224 return !nodeid || nodeid == FUSE_ROOT_ID;
225} 225}
226 226
227struct dentry_operations fuse_dentry_operations = { 227const struct dentry_operations fuse_dentry_operations = {
228 .d_revalidate = fuse_dentry_revalidate, 228 .d_revalidate = fuse_dentry_revalidate,
229}; 229};
230 230
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 5e64b815a5a1..6fc5aedaa0d5 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -493,7 +493,7 @@ static inline u64 get_node_id(struct inode *inode)
493/** Device operations */ 493/** Device operations */
494extern const struct file_operations fuse_dev_operations; 494extern const struct file_operations fuse_dev_operations;
495 495
496extern struct dentry_operations fuse_dentry_operations; 496extern const struct dentry_operations fuse_dentry_operations;
497 497
498/** 498/**
499 * Get a filled in inode 499 * Get a filled in inode
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 5eb57b044382..022c66cd5606 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -107,7 +107,7 @@ static int gfs2_dhash(struct dentry *dentry, struct qstr *str)
107 return 0; 107 return 0;
108} 108}
109 109
110struct dentry_operations gfs2_dops = { 110const struct dentry_operations gfs2_dops = {
111 .d_revalidate = gfs2_drevalidate, 111 .d_revalidate = gfs2_drevalidate,
112 .d_hash = gfs2_dhash, 112 .d_hash = gfs2_dhash,
113}; 113};
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 91abdbedcc86..b56413e3e40d 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -49,7 +49,7 @@ extern struct file_system_type gfs2_fs_type;
49extern struct file_system_type gfs2meta_fs_type; 49extern struct file_system_type gfs2meta_fs_type;
50extern const struct export_operations gfs2_export_ops; 50extern const struct export_operations gfs2_export_ops;
51extern const struct super_operations gfs2_super_ops; 51extern const struct super_operations gfs2_super_ops;
52extern struct dentry_operations gfs2_dops; 52extern const struct dentry_operations gfs2_dops;
53 53
54#endif /* __SUPER_DOT_H__ */ 54#endif /* __SUPER_DOT_H__ */
55 55
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 9955232fdf8c..052387e11671 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -213,7 +213,7 @@ extern void hfs_mdb_put(struct super_block *);
213extern int hfs_part_find(struct super_block *, sector_t *, sector_t *); 213extern int hfs_part_find(struct super_block *, sector_t *, sector_t *);
214 214
215/* string.c */ 215/* string.c */
216extern struct dentry_operations hfs_dentry_operations; 216extern const struct dentry_operations hfs_dentry_operations;
217 217
218extern int hfs_hash_dentry(struct dentry *, struct qstr *); 218extern int hfs_hash_dentry(struct dentry *, struct qstr *);
219extern int hfs_strcmp(const unsigned char *, unsigned int, 219extern int hfs_strcmp(const unsigned char *, unsigned int,
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 5bf89ec01cd4..7478f5c219aa 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -31,7 +31,7 @@ static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
31 return 1; 31 return 1;
32} 32}
33 33
34struct dentry_operations hfs_dentry_operations = 34const struct dentry_operations hfs_dentry_operations =
35{ 35{
36 .d_revalidate = hfs_revalidate_dentry, 36 .d_revalidate = hfs_revalidate_dentry,
37 .d_hash = hfs_hash_dentry, 37 .d_hash = hfs_hash_dentry,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index f027a905225f..5c10d803d9df 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -327,7 +327,7 @@ void hfsplus_file_truncate(struct inode *);
327/* inode.c */ 327/* inode.c */
328extern const struct address_space_operations hfsplus_aops; 328extern const struct address_space_operations hfsplus_aops;
329extern const struct address_space_operations hfsplus_btree_aops; 329extern const struct address_space_operations hfsplus_btree_aops;
330extern struct dentry_operations hfsplus_dentry_operations; 330extern const struct dentry_operations hfsplus_dentry_operations;
331 331
332void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *); 332void hfsplus_inode_read_fork(struct inode *, struct hfsplus_fork_raw *);
333void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *); 333void hfsplus_inode_write_fork(struct inode *, struct hfsplus_fork_raw *);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index f105ee9e1cc4..1bcf597c0562 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -137,7 +137,7 @@ const struct address_space_operations hfsplus_aops = {
137 .writepages = hfsplus_writepages, 137 .writepages = hfsplus_writepages,
138}; 138};
139 139
140struct dentry_operations hfsplus_dentry_operations = { 140const struct dentry_operations hfsplus_dentry_operations = {
141 .d_hash = hfsplus_hash_dentry, 141 .d_hash = hfsplus_hash_dentry,
142 .d_compare = hfsplus_compare_dentry, 142 .d_compare = hfsplus_compare_dentry,
143}; 143};
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5c538e0ec14b..fe02ad4740e7 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -31,12 +31,12 @@ static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
31 31
32#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode) 32#define FILE_HOSTFS_I(file) HOSTFS_I((file)->f_path.dentry->d_inode)
33 33
34int hostfs_d_delete(struct dentry *dentry) 34static int hostfs_d_delete(struct dentry *dentry)
35{ 35{
36 return 1; 36 return 1;
37} 37}
38 38
39struct dentry_operations hostfs_dentry_ops = { 39static const struct dentry_operations hostfs_dentry_ops = {
40 .d_delete = hostfs_d_delete, 40 .d_delete = hostfs_d_delete,
41}; 41};
42 42
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 08319126b2af..940d6d150bee 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -49,7 +49,7 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
49 return 0; 49 return 0;
50} 50}
51 51
52static struct dentry_operations hpfs_dentry_operations = { 52static const struct dentry_operations hpfs_dentry_operations = {
53 .d_hash = hpfs_hash_dentry, 53 .d_hash = hpfs_hash_dentry,
54 .d_compare = hpfs_compare_dentry, 54 .d_compare = hpfs_compare_dentry,
55}; 55};
diff --git a/fs/inode.c b/fs/inode.c
index 643ac43e5a5c..d06d6d268de9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -294,7 +294,7 @@ void clear_inode(struct inode *inode)
294 BUG_ON(!(inode->i_state & I_FREEING)); 294 BUG_ON(!(inode->i_state & I_FREEING));
295 BUG_ON(inode->i_state & I_CLEAR); 295 BUG_ON(inode->i_state & I_CLEAR);
296 inode_sync_wait(inode); 296 inode_sync_wait(inode);
297 DQUOT_DROP(inode); 297 vfs_dq_drop(inode);
298 if (inode->i_sb->s_op->clear_inode) 298 if (inode->i_sb->s_op->clear_inode)
299 inode->i_sb->s_op->clear_inode(inode); 299 inode->i_sb->s_op->clear_inode(inode);
300 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 300 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
@@ -366,6 +366,8 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
366 if (tmp == head) 366 if (tmp == head)
367 break; 367 break;
368 inode = list_entry(tmp, struct inode, i_sb_list); 368 inode = list_entry(tmp, struct inode, i_sb_list);
369 if (inode->i_state & I_NEW)
370 continue;
369 invalidate_inode_buffers(inode); 371 invalidate_inode_buffers(inode);
370 if (!atomic_read(&inode->i_count)) { 372 if (!atomic_read(&inode->i_count)) {
371 list_move(&inode->i_list, dispose); 373 list_move(&inode->i_list, dispose);
@@ -1168,7 +1170,7 @@ void generic_delete_inode(struct inode *inode)
1168 if (op->delete_inode) { 1170 if (op->delete_inode) {
1169 void (*delete)(struct inode *) = op->delete_inode; 1171 void (*delete)(struct inode *) = op->delete_inode;
1170 if (!is_bad_inode(inode)) 1172 if (!is_bad_inode(inode))
1171 DQUOT_INIT(inode); 1173 vfs_dq_init(inode);
1172 /* Filesystems implementing their own 1174 /* Filesystems implementing their own
1173 * s_op->delete_inode are required to call 1175 * s_op->delete_inode are required to call
1174 * truncate_inode_pages and clear_inode() 1176 * truncate_inode_pages and clear_inode()
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 6147ec3643a0..13d2eddd0692 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -114,7 +114,7 @@ static const struct super_operations isofs_sops = {
114}; 114};
115 115
116 116
117static struct dentry_operations isofs_dentry_ops[] = { 117static const struct dentry_operations isofs_dentry_ops[] = {
118 { 118 {
119 .d_hash = isofs_hash, 119 .d_hash = isofs_hash,
120 .d_compare = isofs_dentry_cmp, 120 .d_compare = isofs_dentry_cmp,
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index d3e5c33665de..a166c1669e82 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -233,7 +233,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
233 233
234 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 234 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
235 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 235 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
236 if (DQUOT_TRANSFER(inode, iattr)) 236 if (vfs_dq_transfer(inode, iattr))
237 return -EDQUOT; 237 return -EDQUOT;
238 } 238 }
239 239
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index b00ee9f05a06..b2ae190a77ba 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -158,9 +158,9 @@ void jfs_delete_inode(struct inode *inode)
158 /* 158 /*
159 * Free the inode from the quota allocation. 159 * Free the inode from the quota allocation.
160 */ 160 */
161 DQUOT_INIT(inode); 161 vfs_dq_init(inode);
162 DQUOT_FREE_INODE(inode); 162 vfs_dq_free_inode(inode);
163 DQUOT_DROP(inode); 163 vfs_dq_drop(inode);
164 } 164 }
165 165
166 clear_inode(inode); 166 clear_inode(inode);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 4dcc05819998..925871e9887b 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
381 * It's time to move the inline table to an external 381 * It's time to move the inline table to an external
382 * page and begin to build the xtree 382 * page and begin to build the xtree
383 */ 383 */
384 if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage)) 384 if (vfs_dq_alloc_block(ip, sbi->nbperpage))
385 goto clean_up; 385 goto clean_up;
386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { 386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
387 DQUOT_FREE_BLOCK(ip, sbi->nbperpage); 387 vfs_dq_free_block(ip, sbi->nbperpage);
388 goto clean_up; 388 goto clean_up;
389 } 389 }
390 390
@@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
408 memcpy(&jfs_ip->i_dirtable, temp_table, 408 memcpy(&jfs_ip->i_dirtable, temp_table,
409 sizeof (temp_table)); 409 sizeof (temp_table));
410 dbFree(ip, xaddr, sbi->nbperpage); 410 dbFree(ip, xaddr, sbi->nbperpage);
411 DQUOT_FREE_BLOCK(ip, sbi->nbperpage); 411 vfs_dq_free_block(ip, sbi->nbperpage);
412 goto clean_up; 412 goto clean_up;
413 } 413 }
414 ip->i_size = PSIZE; 414 ip->i_size = PSIZE;
@@ -1027,7 +1027,7 @@ static int dtSplitUp(tid_t tid,
1027 n = xlen; 1027 n = xlen;
1028 1028
1029 /* Allocate blocks to quota. */ 1029 /* Allocate blocks to quota. */
1030 if (DQUOT_ALLOC_BLOCK(ip, n)) { 1030 if (vfs_dq_alloc_block(ip, n)) {
1031 rc = -EDQUOT; 1031 rc = -EDQUOT;
1032 goto extendOut; 1032 goto extendOut;
1033 } 1033 }
@@ -1308,7 +1308,7 @@ static int dtSplitUp(tid_t tid,
1308 1308
1309 /* Rollback quota allocation */ 1309 /* Rollback quota allocation */
1310 if (rc && quota_allocation) 1310 if (rc && quota_allocation)
1311 DQUOT_FREE_BLOCK(ip, quota_allocation); 1311 vfs_dq_free_block(ip, quota_allocation);
1312 1312
1313 dtSplitUp_Exit: 1313 dtSplitUp_Exit:
1314 1314
@@ -1369,7 +1369,7 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
1369 return -EIO; 1369 return -EIO;
1370 1370
1371 /* Allocate blocks to quota. */ 1371 /* Allocate blocks to quota. */
1372 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1372 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1373 release_metapage(rmp); 1373 release_metapage(rmp);
1374 return -EDQUOT; 1374 return -EDQUOT;
1375 } 1375 }
@@ -1916,7 +1916,7 @@ static int dtSplitRoot(tid_t tid,
1916 rp = rmp->data; 1916 rp = rmp->data;
1917 1917
1918 /* Allocate blocks to quota. */ 1918 /* Allocate blocks to quota. */
1919 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1919 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1920 release_metapage(rmp); 1920 release_metapage(rmp);
1921 return -EDQUOT; 1921 return -EDQUOT;
1922 } 1922 }
@@ -2287,7 +2287,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
2287 xlen = lengthPXD(&fp->header.self); 2287 xlen = lengthPXD(&fp->header.self);
2288 2288
2289 /* Free quota allocation. */ 2289 /* Free quota allocation. */
2290 DQUOT_FREE_BLOCK(ip, xlen); 2290 vfs_dq_free_block(ip, xlen);
2291 2291
2292 /* free/invalidate its buffer page */ 2292 /* free/invalidate its buffer page */
2293 discard_metapage(fmp); 2293 discard_metapage(fmp);
@@ -2363,7 +2363,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
2363 xlen = lengthPXD(&p->header.self); 2363 xlen = lengthPXD(&p->header.self);
2364 2364
2365 /* Free quota allocation */ 2365 /* Free quota allocation */
2366 DQUOT_FREE_BLOCK(ip, xlen); 2366 vfs_dq_free_block(ip, xlen);
2367 2367
2368 /* free/invalidate its buffer page */ 2368 /* free/invalidate its buffer page */
2369 discard_metapage(mp); 2369 discard_metapage(mp);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 7ae1e3281de9..169802ea07f9 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -141,7 +141,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
141 } 141 }
142 142
143 /* Allocate blocks to quota. */ 143 /* Allocate blocks to quota. */
144 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { 144 if (vfs_dq_alloc_block(ip, nxlen)) {
145 dbFree(ip, nxaddr, (s64) nxlen); 145 dbFree(ip, nxaddr, (s64) nxlen);
146 mutex_unlock(&JFS_IP(ip)->commit_mutex); 146 mutex_unlock(&JFS_IP(ip)->commit_mutex);
147 return -EDQUOT; 147 return -EDQUOT;
@@ -164,7 +164,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
164 */ 164 */
165 if (rc) { 165 if (rc) {
166 dbFree(ip, nxaddr, nxlen); 166 dbFree(ip, nxaddr, nxlen);
167 DQUOT_FREE_BLOCK(ip, nxlen); 167 vfs_dq_free_block(ip, nxlen);
168 mutex_unlock(&JFS_IP(ip)->commit_mutex); 168 mutex_unlock(&JFS_IP(ip)->commit_mutex);
169 return (rc); 169 return (rc);
170 } 170 }
@@ -256,7 +256,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
256 goto exit; 256 goto exit;
257 257
258 /* Allocat blocks to quota. */ 258 /* Allocat blocks to quota. */
259 if (DQUOT_ALLOC_BLOCK(ip, nxlen)) { 259 if (vfs_dq_alloc_block(ip, nxlen)) {
260 dbFree(ip, nxaddr, (s64) nxlen); 260 dbFree(ip, nxaddr, (s64) nxlen);
261 mutex_unlock(&JFS_IP(ip)->commit_mutex); 261 mutex_unlock(&JFS_IP(ip)->commit_mutex);
262 return -EDQUOT; 262 return -EDQUOT;
@@ -297,7 +297,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
297 /* extend the extent */ 297 /* extend the extent */
298 if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { 298 if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
299 dbFree(ip, xaddr + xlen, delta); 299 dbFree(ip, xaddr + xlen, delta);
300 DQUOT_FREE_BLOCK(ip, nxlen); 300 vfs_dq_free_block(ip, nxlen);
301 goto exit; 301 goto exit;
302 } 302 }
303 } else { 303 } else {
@@ -308,7 +308,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
308 */ 308 */
309 if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { 309 if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
310 dbFree(ip, nxaddr, nxlen); 310 dbFree(ip, nxaddr, nxlen);
311 DQUOT_FREE_BLOCK(ip, nxlen); 311 vfs_dq_free_block(ip, nxlen);
312 goto exit; 312 goto exit;
313 } 313 }
314 } 314 }
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index d4d142c2edd4..dc0e02159ac9 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -116,7 +116,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
116 /* 116 /*
117 * Allocate inode to quota. 117 * Allocate inode to quota.
118 */ 118 */
119 if (DQUOT_ALLOC_INODE(inode)) { 119 if (vfs_dq_alloc_inode(inode)) {
120 rc = -EDQUOT; 120 rc = -EDQUOT;
121 goto fail_drop; 121 goto fail_drop;
122 } 122 }
@@ -162,7 +162,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
162 return inode; 162 return inode;
163 163
164fail_drop: 164fail_drop:
165 DQUOT_DROP(inode); 165 vfs_dq_drop(inode);
166 inode->i_flags |= S_NOQUOTA; 166 inode->i_flags |= S_NOQUOTA;
167fail_unlock: 167fail_unlock:
168 inode->i_nlink = 0; 168 inode->i_nlink = 0;
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index adb2fafcc544..1eff7db34d63 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -47,5 +47,5 @@ extern const struct file_operations jfs_dir_operations;
47extern const struct inode_operations jfs_file_inode_operations; 47extern const struct inode_operations jfs_file_inode_operations;
48extern const struct file_operations jfs_file_operations; 48extern const struct file_operations jfs_file_operations;
49extern const struct inode_operations jfs_symlink_inode_operations; 49extern const struct inode_operations jfs_symlink_inode_operations;
50extern struct dentry_operations jfs_ci_dentry_operations; 50extern const struct dentry_operations jfs_ci_dentry_operations;
51#endif /* _H_JFS_INODE */ 51#endif /* _H_JFS_INODE */
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index ae3acafb447b..a27e26c90568 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -846,10 +846,10 @@ int xtInsert(tid_t tid, /* transaction id */
846 hint = addressXAD(xad) + lengthXAD(xad) - 1; 846 hint = addressXAD(xad) + lengthXAD(xad) - 1;
847 } else 847 } else
848 hint = 0; 848 hint = 0;
849 if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen))) 849 if ((rc = vfs_dq_alloc_block(ip, xlen)))
850 goto out; 850 goto out;
851 if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { 851 if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
852 DQUOT_FREE_BLOCK(ip, xlen); 852 vfs_dq_free_block(ip, xlen);
853 goto out; 853 goto out;
854 } 854 }
855 } 855 }
@@ -878,7 +878,7 @@ int xtInsert(tid_t tid, /* transaction id */
878 /* undo data extent allocation */ 878 /* undo data extent allocation */
879 if (*xaddrp == 0) { 879 if (*xaddrp == 0) {
880 dbFree(ip, xaddr, (s64) xlen); 880 dbFree(ip, xaddr, (s64) xlen);
881 DQUOT_FREE_BLOCK(ip, xlen); 881 vfs_dq_free_block(ip, xlen);
882 } 882 }
883 return rc; 883 return rc;
884 } 884 }
@@ -1246,7 +1246,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
1246 rbn = addressPXD(pxd); 1246 rbn = addressPXD(pxd);
1247 1247
1248 /* Allocate blocks to quota. */ 1248 /* Allocate blocks to quota. */
1249 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1249 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1250 rc = -EDQUOT; 1250 rc = -EDQUOT;
1251 goto clean_up; 1251 goto clean_up;
1252 } 1252 }
@@ -1456,7 +1456,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
1456 1456
1457 /* Rollback quota allocation. */ 1457 /* Rollback quota allocation. */
1458 if (quota_allocation) 1458 if (quota_allocation)
1459 DQUOT_FREE_BLOCK(ip, quota_allocation); 1459 vfs_dq_free_block(ip, quota_allocation);
1460 1460
1461 return (rc); 1461 return (rc);
1462} 1462}
@@ -1513,7 +1513,7 @@ xtSplitRoot(tid_t tid,
1513 return -EIO; 1513 return -EIO;
1514 1514
1515 /* Allocate blocks to quota. */ 1515 /* Allocate blocks to quota. */
1516 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) { 1516 if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
1517 release_metapage(rmp); 1517 release_metapage(rmp);
1518 return -EDQUOT; 1518 return -EDQUOT;
1519 } 1519 }
@@ -3941,7 +3941,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
3941 ip->i_size = newsize; 3941 ip->i_size = newsize;
3942 3942
3943 /* update quota allocation to reflect freed blocks */ 3943 /* update quota allocation to reflect freed blocks */
3944 DQUOT_FREE_BLOCK(ip, nfreed); 3944 vfs_dq_free_block(ip, nfreed);
3945 3945
3946 /* 3946 /*
3947 * free tlock of invalidated pages 3947 * free tlock of invalidated pages
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b4de56b851e4..514ee2edb92a 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -35,7 +35,7 @@
35/* 35/*
36 * forward references 36 * forward references
37 */ 37 */
38struct dentry_operations jfs_ci_dentry_operations; 38const struct dentry_operations jfs_ci_dentry_operations;
39 39
40static s64 commitZeroLink(tid_t, struct inode *); 40static s64 commitZeroLink(tid_t, struct inode *);
41 41
@@ -356,7 +356,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
356 jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); 356 jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
357 357
358 /* Init inode for quota operations. */ 358 /* Init inode for quota operations. */
359 DQUOT_INIT(ip); 359 vfs_dq_init(ip);
360 360
361 /* directory must be empty to be removed */ 361 /* directory must be empty to be removed */
362 if (!dtEmpty(ip)) { 362 if (!dtEmpty(ip)) {
@@ -483,7 +483,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
483 jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); 483 jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
484 484
485 /* Init inode for quota operations. */ 485 /* Init inode for quota operations. */
486 DQUOT_INIT(ip); 486 vfs_dq_init(ip);
487 487
488 if ((rc = get_UCSname(&dname, dentry))) 488 if ((rc = get_UCSname(&dname, dentry)))
489 goto out; 489 goto out;
@@ -1136,7 +1136,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1136 } else if (new_ip) { 1136 } else if (new_ip) {
1137 IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); 1137 IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
1138 /* Init inode for quota operations. */ 1138 /* Init inode for quota operations. */
1139 DQUOT_INIT(new_ip); 1139 vfs_dq_init(new_ip);
1140 } 1140 }
1141 1141
1142 /* 1142 /*
@@ -1595,7 +1595,7 @@ out:
1595 return result; 1595 return result;
1596} 1596}
1597 1597
1598struct dentry_operations jfs_ci_dentry_operations = 1598const struct dentry_operations jfs_ci_dentry_operations =
1599{ 1599{
1600 .d_hash = jfs_ci_hash, 1600 .d_hash = jfs_ci_hash,
1601 .d_compare = jfs_ci_compare, 1601 .d_compare = jfs_ci_compare,
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 9b7f2cdaae0a..61dfa8173ebc 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
260 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; 260 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
261 261
262 /* Allocate new blocks to quota. */ 262 /* Allocate new blocks to quota. */
263 if (DQUOT_ALLOC_BLOCK(ip, nblocks)) { 263 if (vfs_dq_alloc_block(ip, nblocks)) {
264 return -EDQUOT; 264 return -EDQUOT;
265 } 265 }
266 266
267 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); 267 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
268 if (rc) { 268 if (rc) {
269 /*Rollback quota allocation. */ 269 /*Rollback quota allocation. */
270 DQUOT_FREE_BLOCK(ip, nblocks); 270 vfs_dq_free_block(ip, nblocks);
271 return rc; 271 return rc;
272 } 272 }
273 273
@@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
332 332
333 failed: 333 failed:
334 /* Rollback quota allocation. */ 334 /* Rollback quota allocation. */
335 DQUOT_FREE_BLOCK(ip, nblocks); 335 vfs_dq_free_block(ip, nblocks);
336 336
337 dbFree(ip, blkno, nblocks); 337 dbFree(ip, blkno, nblocks);
338 return rc; 338 return rc;
@@ -538,7 +538,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
538 538
539 if (blocks_needed > current_blocks) { 539 if (blocks_needed > current_blocks) {
540 /* Allocate new blocks to quota. */ 540 /* Allocate new blocks to quota. */
541 if (DQUOT_ALLOC_BLOCK(inode, blocks_needed)) 541 if (vfs_dq_alloc_block(inode, blocks_needed))
542 return -EDQUOT; 542 return -EDQUOT;
543 543
544 quota_allocation = blocks_needed; 544 quota_allocation = blocks_needed;
@@ -602,7 +602,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
602 clean_up: 602 clean_up:
603 /* Rollback quota allocation */ 603 /* Rollback quota allocation */
604 if (quota_allocation) 604 if (quota_allocation)
605 DQUOT_FREE_BLOCK(inode, quota_allocation); 605 vfs_dq_free_block(inode, quota_allocation);
606 606
607 return (rc); 607 return (rc);
608} 608}
@@ -677,7 +677,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
677 677
678 /* If old blocks exist, they must be removed from quota allocation. */ 678 /* If old blocks exist, they must be removed from quota allocation. */
679 if (old_blocks) 679 if (old_blocks)
680 DQUOT_FREE_BLOCK(inode, old_blocks); 680 vfs_dq_free_block(inode, old_blocks);
681 681
682 inode->i_ctime = CURRENT_TIME; 682 inode->i_ctime = CURRENT_TIME;
683 683
diff --git a/fs/libfs.c b/fs/libfs.c
index 49b44099dabb..4910a36f516e 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -44,7 +44,7 @@ static int simple_delete_dentry(struct dentry *dentry)
44 */ 44 */
45struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 45struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
46{ 46{
47 static struct dentry_operations simple_dentry_operations = { 47 static const struct dentry_operations simple_dentry_operations = {
48 .d_delete = simple_delete_dentry, 48 .d_delete = simple_delete_dentry,
49 }; 49 };
50 50
@@ -242,7 +242,8 @@ int get_sb_pseudo(struct file_system_type *fs_type, char *name,
242 d_instantiate(dentry, root); 242 d_instantiate(dentry, root);
243 s->s_root = dentry; 243 s->s_root = dentry;
244 s->s_flags |= MS_ACTIVE; 244 s->s_flags |= MS_ACTIVE;
245 return simple_set_mnt(mnt, s); 245 simple_set_mnt(mnt, s);
246 return 0;
246 247
247Enomem: 248Enomem:
248 up_write(&s->s_umount); 249 up_write(&s->s_umount);
diff --git a/fs/namei.c b/fs/namei.c
index 199317642ad6..d040ce11785d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1473,7 +1473,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
1473 error = security_inode_create(dir, dentry, mode); 1473 error = security_inode_create(dir, dentry, mode);
1474 if (error) 1474 if (error)
1475 return error; 1475 return error;
1476 DQUOT_INIT(dir); 1476 vfs_dq_init(dir);
1477 error = dir->i_op->create(dir, dentry, mode, nd); 1477 error = dir->i_op->create(dir, dentry, mode, nd);
1478 if (!error) 1478 if (!error)
1479 fsnotify_create(dir, dentry); 1479 fsnotify_create(dir, dentry);
@@ -1489,24 +1489,22 @@ int may_open(struct path *path, int acc_mode, int flag)
1489 if (!inode) 1489 if (!inode)
1490 return -ENOENT; 1490 return -ENOENT;
1491 1491
1492 if (S_ISLNK(inode->i_mode)) 1492 switch (inode->i_mode & S_IFMT) {
1493 case S_IFLNK:
1493 return -ELOOP; 1494 return -ELOOP;
1494 1495 case S_IFDIR:
1495 if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE)) 1496 if (acc_mode & MAY_WRITE)
1496 return -EISDIR; 1497 return -EISDIR;
1497 1498 break;
1498 /* 1499 case S_IFBLK:
1499 * FIFO's, sockets and device files are special: they don't 1500 case S_IFCHR:
1500 * actually live on the filesystem itself, and as such you
1501 * can write to them even if the filesystem is read-only.
1502 */
1503 if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
1504 flag &= ~O_TRUNC;
1505 } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
1506 if (path->mnt->mnt_flags & MNT_NODEV) 1501 if (path->mnt->mnt_flags & MNT_NODEV)
1507 return -EACCES; 1502 return -EACCES;
1508 1503 /*FALLTHRU*/
1504 case S_IFIFO:
1505 case S_IFSOCK:
1509 flag &= ~O_TRUNC; 1506 flag &= ~O_TRUNC;
1507 break;
1510 } 1508 }
1511 1509
1512 error = inode_permission(inode, acc_mode); 1510 error = inode_permission(inode, acc_mode);
@@ -1552,7 +1550,7 @@ int may_open(struct path *path, int acc_mode, int flag)
1552 error = security_path_truncate(path, 0, 1550 error = security_path_truncate(path, 0,
1553 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 1551 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
1554 if (!error) { 1552 if (!error) {
1555 DQUOT_INIT(inode); 1553 vfs_dq_init(inode);
1556 1554
1557 error = do_truncate(dentry, 0, 1555 error = do_truncate(dentry, 0,
1558 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, 1556 ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -1563,7 +1561,7 @@ int may_open(struct path *path, int acc_mode, int flag)
1563 return error; 1561 return error;
1564 } else 1562 } else
1565 if (flag & FMODE_WRITE) 1563 if (flag & FMODE_WRITE)
1566 DQUOT_INIT(inode); 1564 vfs_dq_init(inode);
1567 1565
1568 return 0; 1566 return 0;
1569} 1567}
@@ -1946,7 +1944,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1946 if (error) 1944 if (error)
1947 return error; 1945 return error;
1948 1946
1949 DQUOT_INIT(dir); 1947 vfs_dq_init(dir);
1950 error = dir->i_op->mknod(dir, dentry, mode, dev); 1948 error = dir->i_op->mknod(dir, dentry, mode, dev);
1951 if (!error) 1949 if (!error)
1952 fsnotify_create(dir, dentry); 1950 fsnotify_create(dir, dentry);
@@ -2045,7 +2043,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2045 if (error) 2043 if (error)
2046 return error; 2044 return error;
2047 2045
2048 DQUOT_INIT(dir); 2046 vfs_dq_init(dir);
2049 error = dir->i_op->mkdir(dir, dentry, mode); 2047 error = dir->i_op->mkdir(dir, dentry, mode);
2050 if (!error) 2048 if (!error)
2051 fsnotify_mkdir(dir, dentry); 2049 fsnotify_mkdir(dir, dentry);
@@ -2131,7 +2129,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
2131 if (!dir->i_op->rmdir) 2129 if (!dir->i_op->rmdir)
2132 return -EPERM; 2130 return -EPERM;
2133 2131
2134 DQUOT_INIT(dir); 2132 vfs_dq_init(dir);
2135 2133
2136 mutex_lock(&dentry->d_inode->i_mutex); 2134 mutex_lock(&dentry->d_inode->i_mutex);
2137 dentry_unhash(dentry); 2135 dentry_unhash(dentry);
@@ -2218,7 +2216,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
2218 if (!dir->i_op->unlink) 2216 if (!dir->i_op->unlink)
2219 return -EPERM; 2217 return -EPERM;
2220 2218
2221 DQUOT_INIT(dir); 2219 vfs_dq_init(dir);
2222 2220
2223 mutex_lock(&dentry->d_inode->i_mutex); 2221 mutex_lock(&dentry->d_inode->i_mutex);
2224 if (d_mountpoint(dentry)) 2222 if (d_mountpoint(dentry))
@@ -2329,7 +2327,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
2329 if (error) 2327 if (error)
2330 return error; 2328 return error;
2331 2329
2332 DQUOT_INIT(dir); 2330 vfs_dq_init(dir);
2333 error = dir->i_op->symlink(dir, dentry, oldname); 2331 error = dir->i_op->symlink(dir, dentry, oldname);
2334 if (!error) 2332 if (!error)
2335 fsnotify_create(dir, dentry); 2333 fsnotify_create(dir, dentry);
@@ -2413,7 +2411,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
2413 return error; 2411 return error;
2414 2412
2415 mutex_lock(&inode->i_mutex); 2413 mutex_lock(&inode->i_mutex);
2416 DQUOT_INIT(dir); 2414 vfs_dq_init(dir);
2417 error = dir->i_op->link(old_dentry, dir, new_dentry); 2415 error = dir->i_op->link(old_dentry, dir, new_dentry);
2418 mutex_unlock(&inode->i_mutex); 2416 mutex_unlock(&inode->i_mutex);
2419 if (!error) 2417 if (!error)
@@ -2612,8 +2610,8 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2612 if (!old_dir->i_op->rename) 2610 if (!old_dir->i_op->rename)
2613 return -EPERM; 2611 return -EPERM;
2614 2612
2615 DQUOT_INIT(old_dir); 2613 vfs_dq_init(old_dir);
2616 DQUOT_INIT(new_dir); 2614 vfs_dq_init(new_dir);
2617 2615
2618 old_name = fsnotify_oldname_init(old_dentry->d_name.name); 2616 old_name = fsnotify_oldname_init(old_dentry->d_name.name);
2619 2617
diff --git a/fs/namespace.c b/fs/namespace.c
index f0e753097353..0a42e0e96027 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,11 +397,10 @@ static void __mnt_unmake_readonly(struct vfsmount *mnt)
397 spin_unlock(&vfsmount_lock); 397 spin_unlock(&vfsmount_lock);
398} 398}
399 399
400int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb) 400void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
401{ 401{
402 mnt->mnt_sb = sb; 402 mnt->mnt_sb = sb;
403 mnt->mnt_root = dget(sb->s_root); 403 mnt->mnt_root = dget(sb->s_root);
404 return 0;
405} 404}
406 405
407EXPORT_SYMBOL(simple_set_mnt); 406EXPORT_SYMBOL(simple_set_mnt);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 07e9715b8658..9c590722d87e 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -79,7 +79,7 @@ static int ncp_hash_dentry(struct dentry *, struct qstr *);
79static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *); 79static int ncp_compare_dentry (struct dentry *, struct qstr *, struct qstr *);
80static int ncp_delete_dentry(struct dentry *); 80static int ncp_delete_dentry(struct dentry *);
81 81
82static struct dentry_operations ncp_dentry_operations = 82static const struct dentry_operations ncp_dentry_operations =
83{ 83{
84 .d_revalidate = ncp_lookup_validate, 84 .d_revalidate = ncp_lookup_validate,
85 .d_hash = ncp_hash_dentry, 85 .d_hash = ncp_hash_dentry,
@@ -87,7 +87,7 @@ static struct dentry_operations ncp_dentry_operations =
87 .d_delete = ncp_delete_dentry, 87 .d_delete = ncp_delete_dentry,
88}; 88};
89 89
90struct dentry_operations ncp_root_dentry_operations = 90const struct dentry_operations ncp_root_dentry_operations =
91{ 91{
92 .d_hash = ncp_hash_dentry, 92 .d_hash = ncp_hash_dentry,
93 .d_compare = ncp_compare_dentry, 93 .d_compare = ncp_compare_dentry,
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 672368f865ca..78bf72fc1db3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -899,7 +899,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
899 iput(inode); 899 iput(inode);
900} 900}
901 901
902struct dentry_operations nfs_dentry_operations = { 902const struct dentry_operations nfs_dentry_operations = {
903 .d_revalidate = nfs_lookup_revalidate, 903 .d_revalidate = nfs_lookup_revalidate,
904 .d_delete = nfs_dentry_delete, 904 .d_delete = nfs_dentry_delete,
905 .d_iput = nfs_dentry_iput, 905 .d_iput = nfs_dentry_iput,
@@ -967,7 +967,7 @@ out:
967#ifdef CONFIG_NFS_V4 967#ifdef CONFIG_NFS_V4
968static int nfs_open_revalidate(struct dentry *, struct nameidata *); 968static int nfs_open_revalidate(struct dentry *, struct nameidata *);
969 969
970struct dentry_operations nfs4_dentry_operations = { 970const struct dentry_operations nfs4_dentry_operations = {
971 .d_revalidate = nfs_open_revalidate, 971 .d_revalidate = nfs_open_revalidate,
972 .d_delete = nfs_dentry_delete, 972 .d_delete = nfs_dentry_delete,
973 .d_iput = nfs_dentry_iput, 973 .d_iput = nfs_dentry_iput,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4e4d33204376..84345deab26f 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -179,7 +179,7 @@ struct nfs4_state_recovery_ops {
179 int (*recover_lock)(struct nfs4_state *, struct file_lock *); 179 int (*recover_lock)(struct nfs4_state *, struct file_lock *);
180}; 180};
181 181
182extern struct dentry_operations nfs4_dentry_operations; 182extern const struct dentry_operations nfs4_dentry_operations;
183extern const struct inode_operations nfs4_dir_inode_operations; 183extern const struct inode_operations nfs4_dir_inode_operations;
184 184
185/* inode.c */ 185/* inode.c */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c165a6403df0..78376b6c0236 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -356,7 +356,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
356 put_write_access(inode); 356 put_write_access(inode);
357 goto out_nfserr; 357 goto out_nfserr;
358 } 358 }
359 DQUOT_INIT(inode); 359 vfs_dq_init(inode);
360 } 360 }
361 361
362 /* sanitize the mode change */ 362 /* sanitize the mode change */
@@ -723,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
723 else 723 else
724 flags = O_WRONLY|O_LARGEFILE; 724 flags = O_WRONLY|O_LARGEFILE;
725 725
726 DQUOT_INIT(inode); 726 vfs_dq_init(inode);
727 } 727 }
728 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), 728 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
729 flags, cred); 729 flags, cred);
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
index 331f2e88e284..220c13f0d73d 100644
--- a/fs/notify/inotify/inotify.c
+++ b/fs/notify/inotify/inotify.c
@@ -380,6 +380,14 @@ void inotify_unmount_inodes(struct list_head *list)
380 struct list_head *watches; 380 struct list_head *watches;
381 381
382 /* 382 /*
383 * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
384 * I_WILL_FREE, or I_NEW which is fine because by that point
385 * the inode cannot have any associated watches.
386 */
387 if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
388 continue;
389
390 /*
383 * If i_count is zero, the inode cannot have any watches and 391 * If i_count is zero, the inode cannot have any watches and
384 * doing an __iget/iput with MS_ACTIVE clear would actually 392 * doing an __iget/iput with MS_ACTIVE clear would actually
385 * evict all inodes with zero i_count from icache which is 393 * evict all inodes with zero i_count from icache which is
@@ -388,14 +396,6 @@ void inotify_unmount_inodes(struct list_head *list)
388 if (!atomic_read(&inode->i_count)) 396 if (!atomic_read(&inode->i_count))
389 continue; 397 continue;
390 398
391 /*
392 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
393 * I_WILL_FREE which is fine because by that point the inode
394 * cannot have any associated watches.
395 */
396 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
397 continue;
398
399 need_iput_tmp = need_iput; 399 need_iput_tmp = need_iput;
400 need_iput = NULL; 400 need_iput = NULL;
401 /* In case inotify_remove_watch_locked() drops a reference. */ 401 /* In case inotify_remove_watch_locked() drops a reference. */
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index e9d7c2038c0f..7d604480557a 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -455,7 +455,7 @@ out_move:
455 d_move(dentry, target); 455 d_move(dentry, target);
456} 456}
457 457
458struct dentry_operations ocfs2_dentry_ops = { 458const struct dentry_operations ocfs2_dentry_ops = {
459 .d_revalidate = ocfs2_dentry_revalidate, 459 .d_revalidate = ocfs2_dentry_revalidate,
460 .d_iput = ocfs2_dentry_iput, 460 .d_iput = ocfs2_dentry_iput,
461}; 461};
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index d06e16c06640..faa12e75f98d 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -26,7 +26,7 @@
26#ifndef OCFS2_DCACHE_H 26#ifndef OCFS2_DCACHE_H
27#define OCFS2_DCACHE_H 27#define OCFS2_DCACHE_H
28 28
29extern struct dentry_operations ocfs2_dentry_ops; 29extern const struct dentry_operations ocfs2_dentry_ops;
30 30
31struct ocfs2_dentry_lock { 31struct ocfs2_dentry_lock {
32 /* Use count of dentry lock */ 32 /* Use count of dentry lock */
diff --git a/fs/open.c b/fs/open.c
index a3a78ceb2a2b..75b61677daaf 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -273,7 +273,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
273 if (!error) 273 if (!error)
274 error = security_path_truncate(&path, length, 0); 274 error = security_path_truncate(&path, length, 0);
275 if (!error) { 275 if (!error) {
276 DQUOT_INIT(inode); 276 vfs_dq_init(inode);
277 error = do_truncate(path.dentry, length, 0, NULL); 277 error = do_truncate(path.dentry, length, 0, NULL);
278 } 278 }
279 279
diff --git a/fs/pipe.c b/fs/pipe.c
index 94ad15967cf9..4af7aa521813 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -860,7 +860,7 @@ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
860 dentry->d_inode->i_ino); 860 dentry->d_inode->i_ino);
861} 861}
862 862
863static struct dentry_operations pipefs_dentry_operations = { 863static const struct dentry_operations pipefs_dentry_operations = {
864 .d_delete = pipefs_delete_dentry, 864 .d_delete = pipefs_delete_dentry,
865 .d_dname = pipefs_dname, 865 .d_dname = pipefs_dname,
866}; 866};
@@ -1024,11 +1024,6 @@ int do_pipe_flags(int *fd, int flags)
1024 return error; 1024 return error;
1025} 1025}
1026 1026
1027int do_pipe(int *fd)
1028{
1029 return do_pipe_flags(fd, 0);
1030}
1031
1032/* 1027/*
1033 * sys_pipe() is the normal C calling standard for creating 1028 * sys_pipe() is the normal C calling standard for creating
1034 * a pipe. It's not the way Unix traditionally does this, though. 1029 * a pipe. It's not the way Unix traditionally does this, though.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index beaa0ce3b82e..aef6d55b7de6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1545,7 +1545,7 @@ static int pid_delete_dentry(struct dentry * dentry)
1545 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; 1545 return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first;
1546} 1546}
1547 1547
1548static struct dentry_operations pid_dentry_operations = 1548static const struct dentry_operations pid_dentry_operations =
1549{ 1549{
1550 .d_revalidate = pid_revalidate, 1550 .d_revalidate = pid_revalidate,
1551 .d_delete = pid_delete_dentry, 1551 .d_delete = pid_delete_dentry,
@@ -1717,7 +1717,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1717 return 0; 1717 return 0;
1718} 1718}
1719 1719
1720static struct dentry_operations tid_fd_dentry_operations = 1720static const struct dentry_operations tid_fd_dentry_operations =
1721{ 1721{
1722 .d_revalidate = tid_fd_revalidate, 1722 .d_revalidate = tid_fd_revalidate,
1723 .d_delete = pid_delete_dentry, 1723 .d_delete = pid_delete_dentry,
@@ -2339,7 +2339,7 @@ static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
2339 return 0; 2339 return 0;
2340} 2340}
2341 2341
2342static struct dentry_operations proc_base_dentry_operations = 2342static const struct dentry_operations proc_base_dentry_operations =
2343{ 2343{
2344 .d_revalidate = proc_base_revalidate, 2344 .d_revalidate = proc_base_revalidate,
2345 .d_delete = pid_delete_dentry, 2345 .d_delete = pid_delete_dentry,
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index db7fa5cab988..5d2989e9dcc1 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -363,7 +363,7 @@ static int proc_delete_dentry(struct dentry * dentry)
363 return 1; 363 return 1;
364} 364}
365 365
366static struct dentry_operations proc_dentry_operations = 366static const struct dentry_operations proc_dentry_operations =
367{ 367{
368 .d_delete = proc_delete_dentry, 368 .d_delete = proc_delete_dentry,
369}; 369};
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 94fcfff6863a..9b1e4e9a16bf 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -7,7 +7,7 @@
7#include <linux/security.h> 7#include <linux/security.h>
8#include "internal.h" 8#include "internal.h"
9 9
10static struct dentry_operations proc_sys_dentry_operations; 10static const struct dentry_operations proc_sys_dentry_operations;
11static const struct file_operations proc_sys_file_operations; 11static const struct file_operations proc_sys_file_operations;
12static const struct inode_operations proc_sys_inode_operations; 12static const struct inode_operations proc_sys_inode_operations;
13static const struct file_operations proc_sys_dir_file_operations; 13static const struct file_operations proc_sys_dir_file_operations;
@@ -396,7 +396,7 @@ static int proc_sys_compare(struct dentry *dir, struct qstr *qstr,
396 return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl); 396 return !sysctl_is_seen(PROC_I(dentry->d_inode)->sysctl);
397} 397}
398 398
399static struct dentry_operations proc_sys_dentry_operations = { 399static const struct dentry_operations proc_sys_dentry_operations = {
400 .d_revalidate = proc_sys_revalidate, 400 .d_revalidate = proc_sys_revalidate,
401 .d_delete = proc_sys_delete, 401 .d_delete = proc_sys_delete,
402 .d_compare = proc_sys_compare, 402 .d_compare = proc_sys_compare,
diff --git a/fs/proc/root.c b/fs/proc/root.c
index f6299a25594e..1e15a2b176e8 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -83,7 +83,8 @@ static int proc_get_sb(struct file_system_type *fs_type,
83 ns->proc_mnt = mnt; 83 ns->proc_mnt = mnt;
84 } 84 }
85 85
86 return simple_set_mnt(mnt, sb); 86 simple_set_mnt(mnt, sb);
87 return 0;
87} 88}
88 89
89static void proc_kill_sb(struct super_block *sb) 90static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 000000000000..8047e01ef46b
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,59 @@
1#
2# Quota configuration
3#
4
5config QUOTA
6 bool "Quota support"
7 help
8 If you say Y here, you will be able to set per user limits for disk
9 usage (also called disk quotas). Currently, it works for the
10 ext2, ext3, and reiserfs file system. ext3 also supports journalled
11 quotas for which you don't need to run quotacheck(8) after an unclean
12 shutdown.
13 For further details, read the Quota mini-HOWTO, available from
14 <http://www.tldp.org/docs.html#howto>, or the documentation provided
15 with the quota tools. Probably the quota support is only useful for
16 multi user systems. If unsure, say N.
17
18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET
21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure,
24 say Y.
25
26config PRINT_QUOTA_WARNING
27 bool "Print quota warnings to console (OBSOLETE)"
28 depends on QUOTA
29 default y
30 help
31 If you say Y here, quota warnings (about exceeding softlimit, reaching
32 hardlimit, etc.) will be printed to the process' controlling terminal.
33 Note that this behavior is currently deprecated and may go away in
34 future. Please use notification via netlink socket instead.
35
36# Generic support for tree structured quota files. Selected when needed.
37config QUOTA_TREE
38 tristate
39
40config QFMT_V1
41 tristate "Old quota format support"
42 depends on QUOTA
43 help
44 This quota format was (is) used by kernels earlier than 2.4.22. If
45 you have quota working and you don't want to convert to new quota
46 format say Y here.
47
48config QFMT_V2
49 tristate "Quota format v2 support"
50 depends on QUOTA
51 select QUOTA_TREE
52 help
53 This quota format allows using quotas with 32-bit UIDs/GIDs. If you
54 need this functionality say Y here.
55
56config QUOTACTL
57 bool
58 depends on XFS_QUOTA || QUOTA
59 default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 000000000000..385a0831cc99
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for the Linux filesystems.
3#
4# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
5# Rewritten to use lists instead of if-statements.
6#
7
8obj-y :=
9
10obj-$(CONFIG_QUOTA) += dquot.o
11obj-$(CONFIG_QFMT_V1) += quota_v1.o
12obj-$(CONFIG_QFMT_V2) += quota_v2.o
13obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
14obj-$(CONFIG_QUOTACTL) += quota.o
diff --git a/fs/dquot.c b/fs/quota/dquot.c
index d6add0bf5ad3..2ca967a5ef77 100644
--- a/fs/dquot.c
+++ b/fs/quota/dquot.c
@@ -129,9 +129,10 @@
129 * i_mutex on quota files is special (it's below dqio_mutex) 129 * i_mutex on quota files is special (it's below dqio_mutex)
130 */ 130 */
131 131
132static DEFINE_SPINLOCK(dq_list_lock); 132static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
133static DEFINE_SPINLOCK(dq_state_lock); 133static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
134DEFINE_SPINLOCK(dq_data_lock); 134__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
135EXPORT_SYMBOL(dq_data_lock);
135 136
136static char *quotatypes[] = INITQFNAMES; 137static char *quotatypes[] = INITQFNAMES;
137static struct quota_format_type *quota_formats; /* List of registered formats */ 138static struct quota_format_type *quota_formats; /* List of registered formats */
@@ -148,35 +149,46 @@ int register_quota_format(struct quota_format_type *fmt)
148 spin_unlock(&dq_list_lock); 149 spin_unlock(&dq_list_lock);
149 return 0; 150 return 0;
150} 151}
152EXPORT_SYMBOL(register_quota_format);
151 153
152void unregister_quota_format(struct quota_format_type *fmt) 154void unregister_quota_format(struct quota_format_type *fmt)
153{ 155{
154 struct quota_format_type **actqf; 156 struct quota_format_type **actqf;
155 157
156 spin_lock(&dq_list_lock); 158 spin_lock(&dq_list_lock);
157 for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); 159 for (actqf = &quota_formats; *actqf && *actqf != fmt;
160 actqf = &(*actqf)->qf_next)
161 ;
158 if (*actqf) 162 if (*actqf)
159 *actqf = (*actqf)->qf_next; 163 *actqf = (*actqf)->qf_next;
160 spin_unlock(&dq_list_lock); 164 spin_unlock(&dq_list_lock);
161} 165}
166EXPORT_SYMBOL(unregister_quota_format);
162 167
163static struct quota_format_type *find_quota_format(int id) 168static struct quota_format_type *find_quota_format(int id)
164{ 169{
165 struct quota_format_type *actqf; 170 struct quota_format_type *actqf;
166 171
167 spin_lock(&dq_list_lock); 172 spin_lock(&dq_list_lock);
168 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); 173 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
174 actqf = actqf->qf_next)
175 ;
169 if (!actqf || !try_module_get(actqf->qf_owner)) { 176 if (!actqf || !try_module_get(actqf->qf_owner)) {
170 int qm; 177 int qm;
171 178
172 spin_unlock(&dq_list_lock); 179 spin_unlock(&dq_list_lock);
173 180
174 for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); 181 for (qm = 0; module_names[qm].qm_fmt_id &&
175 if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) 182 module_names[qm].qm_fmt_id != id; qm++)
183 ;
184 if (!module_names[qm].qm_fmt_id ||
185 request_module(module_names[qm].qm_mod_name))
176 return NULL; 186 return NULL;
177 187
178 spin_lock(&dq_list_lock); 188 spin_lock(&dq_list_lock);
179 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); 189 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
190 actqf = actqf->qf_next)
191 ;
180 if (actqf && !try_module_get(actqf->qf_owner)) 192 if (actqf && !try_module_get(actqf->qf_owner))
181 actqf = NULL; 193 actqf = NULL;
182 } 194 }
@@ -215,6 +227,7 @@ static unsigned int dq_hash_bits, dq_hash_mask;
215static struct hlist_head *dquot_hash; 227static struct hlist_head *dquot_hash;
216 228
217struct dqstats dqstats; 229struct dqstats dqstats;
230EXPORT_SYMBOL(dqstats);
218 231
219static inline unsigned int 232static inline unsigned int
220hashfn(const struct super_block *sb, unsigned int id, int type) 233hashfn(const struct super_block *sb, unsigned int id, int type)
@@ -230,7 +243,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
230 */ 243 */
231static inline void insert_dquot_hash(struct dquot *dquot) 244static inline void insert_dquot_hash(struct dquot *dquot)
232{ 245{
233 struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); 246 struct hlist_head *head;
247 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
234 hlist_add_head(&dquot->dq_hash, head); 248 hlist_add_head(&dquot->dq_hash, head);
235} 249}
236 250
@@ -239,17 +253,19 @@ static inline void remove_dquot_hash(struct dquot *dquot)
239 hlist_del_init(&dquot->dq_hash); 253 hlist_del_init(&dquot->dq_hash);
240} 254}
241 255
242static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) 256static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
257 unsigned int id, int type)
243{ 258{
244 struct hlist_node *node; 259 struct hlist_node *node;
245 struct dquot *dquot; 260 struct dquot *dquot;
246 261
247 hlist_for_each (node, dquot_hash+hashent) { 262 hlist_for_each (node, dquot_hash+hashent) {
248 dquot = hlist_entry(node, struct dquot, dq_hash); 263 dquot = hlist_entry(node, struct dquot, dq_hash);
249 if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) 264 if (dquot->dq_sb == sb && dquot->dq_id == id &&
265 dquot->dq_type == type)
250 return dquot; 266 return dquot;
251 } 267 }
252 return NODQUOT; 268 return NULL;
253} 269}
254 270
255/* Add a dquot to the tail of the free list */ 271/* Add a dquot to the tail of the free list */
@@ -309,6 +325,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
309 spin_unlock(&dq_list_lock); 325 spin_unlock(&dq_list_lock);
310 return 0; 326 return 0;
311} 327}
328EXPORT_SYMBOL(dquot_mark_dquot_dirty);
312 329
313/* This function needs dq_list_lock */ 330/* This function needs dq_list_lock */
314static inline int clear_dquot_dirty(struct dquot *dquot) 331static inline int clear_dquot_dirty(struct dquot *dquot)
@@ -345,8 +362,10 @@ int dquot_acquire(struct dquot *dquot)
345 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { 362 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
346 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 363 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
347 /* Write the info if needed */ 364 /* Write the info if needed */
348 if (info_dirty(&dqopt->info[dquot->dq_type])) 365 if (info_dirty(&dqopt->info[dquot->dq_type])) {
349 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); 366 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
367 dquot->dq_sb, dquot->dq_type);
368 }
350 if (ret < 0) 369 if (ret < 0)
351 goto out_iolock; 370 goto out_iolock;
352 if (ret2 < 0) { 371 if (ret2 < 0) {
@@ -360,6 +379,7 @@ out_iolock:
360 mutex_unlock(&dquot->dq_lock); 379 mutex_unlock(&dquot->dq_lock);
361 return ret; 380 return ret;
362} 381}
382EXPORT_SYMBOL(dquot_acquire);
363 383
364/* 384/*
365 * Write dquot to disk 385 * Write dquot to disk
@@ -380,8 +400,10 @@ int dquot_commit(struct dquot *dquot)
380 * => we have better not writing it */ 400 * => we have better not writing it */
381 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 401 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
382 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); 402 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
383 if (info_dirty(&dqopt->info[dquot->dq_type])) 403 if (info_dirty(&dqopt->info[dquot->dq_type])) {
384 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); 404 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
405 dquot->dq_sb, dquot->dq_type);
406 }
385 if (ret >= 0) 407 if (ret >= 0)
386 ret = ret2; 408 ret = ret2;
387 } 409 }
@@ -389,6 +411,7 @@ out_sem:
389 mutex_unlock(&dqopt->dqio_mutex); 411 mutex_unlock(&dqopt->dqio_mutex);
390 return ret; 412 return ret;
391} 413}
414EXPORT_SYMBOL(dquot_commit);
392 415
393/* 416/*
394 * Release dquot 417 * Release dquot
@@ -406,8 +429,10 @@ int dquot_release(struct dquot *dquot)
406 if (dqopt->ops[dquot->dq_type]->release_dqblk) { 429 if (dqopt->ops[dquot->dq_type]->release_dqblk) {
407 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); 430 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
408 /* Write the info */ 431 /* Write the info */
409 if (info_dirty(&dqopt->info[dquot->dq_type])) 432 if (info_dirty(&dqopt->info[dquot->dq_type])) {
410 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); 433 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
434 dquot->dq_sb, dquot->dq_type);
435 }
411 if (ret >= 0) 436 if (ret >= 0)
412 ret = ret2; 437 ret = ret2;
413 } 438 }
@@ -417,6 +442,7 @@ out_dqlock:
417 mutex_unlock(&dquot->dq_lock); 442 mutex_unlock(&dquot->dq_lock);
418 return ret; 443 return ret;
419} 444}
445EXPORT_SYMBOL(dquot_release);
420 446
421void dquot_destroy(struct dquot *dquot) 447void dquot_destroy(struct dquot *dquot)
422{ 448{
@@ -516,6 +542,7 @@ out:
516 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 542 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
517 return ret; 543 return ret;
518} 544}
545EXPORT_SYMBOL(dquot_scan_active);
519 546
520int vfs_quota_sync(struct super_block *sb, int type) 547int vfs_quota_sync(struct super_block *sb, int type)
521{ 548{
@@ -533,7 +560,8 @@ int vfs_quota_sync(struct super_block *sb, int type)
533 spin_lock(&dq_list_lock); 560 spin_lock(&dq_list_lock);
534 dirty = &dqopt->info[cnt].dqi_dirty_list; 561 dirty = &dqopt->info[cnt].dqi_dirty_list;
535 while (!list_empty(dirty)) { 562 while (!list_empty(dirty)) {
536 dquot = list_first_entry(dirty, struct dquot, dq_dirty); 563 dquot = list_first_entry(dirty, struct dquot,
564 dq_dirty);
537 /* Dirty and inactive can be only bad dquot... */ 565 /* Dirty and inactive can be only bad dquot... */
538 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { 566 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
539 clear_dquot_dirty(dquot); 567 clear_dquot_dirty(dquot);
@@ -563,6 +591,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
563 591
564 return 0; 592 return 0;
565} 593}
594EXPORT_SYMBOL(vfs_quota_sync);
566 595
567/* Free unused dquots from cache */ 596/* Free unused dquots from cache */
568static void prune_dqcache(int count) 597static void prune_dqcache(int count)
@@ -672,6 +701,7 @@ we_slept:
672 put_dquot_last(dquot); 701 put_dquot_last(dquot);
673 spin_unlock(&dq_list_lock); 702 spin_unlock(&dq_list_lock);
674} 703}
704EXPORT_SYMBOL(dqput);
675 705
676struct dquot *dquot_alloc(struct super_block *sb, int type) 706struct dquot *dquot_alloc(struct super_block *sb, int type)
677{ 707{
@@ -685,7 +715,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
685 715
686 dquot = sb->dq_op->alloc_dquot(sb, type); 716 dquot = sb->dq_op->alloc_dquot(sb, type);
687 if(!dquot) 717 if(!dquot)
688 return NODQUOT; 718 return NULL;
689 719
690 mutex_init(&dquot->dq_lock); 720 mutex_init(&dquot->dq_lock);
691 INIT_LIST_HEAD(&dquot->dq_free); 721 INIT_LIST_HEAD(&dquot->dq_free);
@@ -711,10 +741,10 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
711struct dquot *dqget(struct super_block *sb, unsigned int id, int type) 741struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
712{ 742{
713 unsigned int hashent = hashfn(sb, id, type); 743 unsigned int hashent = hashfn(sb, id, type);
714 struct dquot *dquot = NODQUOT, *empty = NODQUOT; 744 struct dquot *dquot = NULL, *empty = NULL;
715 745
716 if (!sb_has_quota_active(sb, type)) 746 if (!sb_has_quota_active(sb, type))
717 return NODQUOT; 747 return NULL;
718we_slept: 748we_slept:
719 spin_lock(&dq_list_lock); 749 spin_lock(&dq_list_lock);
720 spin_lock(&dq_state_lock); 750 spin_lock(&dq_state_lock);
@@ -725,15 +755,17 @@ we_slept:
725 } 755 }
726 spin_unlock(&dq_state_lock); 756 spin_unlock(&dq_state_lock);
727 757
728 if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { 758 dquot = find_dquot(hashent, sb, id, type);
729 if (empty == NODQUOT) { 759 if (!dquot) {
760 if (!empty) {
730 spin_unlock(&dq_list_lock); 761 spin_unlock(&dq_list_lock);
731 if ((empty = get_empty_dquot(sb, type)) == NODQUOT) 762 empty = get_empty_dquot(sb, type);
763 if (!empty)
732 schedule(); /* Try to wait for a moment... */ 764 schedule(); /* Try to wait for a moment... */
733 goto we_slept; 765 goto we_slept;
734 } 766 }
735 dquot = empty; 767 dquot = empty;
736 empty = NODQUOT; 768 empty = NULL;
737 dquot->dq_id = id; 769 dquot->dq_id = id;
738 /* all dquots go on the inuse_list */ 770 /* all dquots go on the inuse_list */
739 put_inuse(dquot); 771 put_inuse(dquot);
@@ -749,13 +781,14 @@ we_slept:
749 dqstats.lookups++; 781 dqstats.lookups++;
750 spin_unlock(&dq_list_lock); 782 spin_unlock(&dq_list_lock);
751 } 783 }
752 /* Wait for dq_lock - after this we know that either dquot_release() is already 784 /* Wait for dq_lock - after this we know that either dquot_release() is
753 * finished or it will be canceled due to dq_count > 1 test */ 785 * already finished or it will be canceled due to dq_count > 1 test */
754 wait_on_dquot(dquot); 786 wait_on_dquot(dquot);
755 /* Read the dquot and instantiate it (everything done only if needed) */ 787 /* Read the dquot / allocate space in quota file */
756 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { 788 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
789 sb->dq_op->acquire_dquot(dquot) < 0) {
757 dqput(dquot); 790 dqput(dquot);
758 dquot = NODQUOT; 791 dquot = NULL;
759 goto out; 792 goto out;
760 } 793 }
761#ifdef __DQUOT_PARANOIA 794#ifdef __DQUOT_PARANOIA
@@ -767,6 +800,7 @@ out:
767 800
768 return dquot; 801 return dquot;
769} 802}
803EXPORT_SYMBOL(dqget);
770 804
771static int dqinit_needed(struct inode *inode, int type) 805static int dqinit_needed(struct inode *inode, int type)
772{ 806{
@@ -775,9 +809,9 @@ static int dqinit_needed(struct inode *inode, int type)
775 if (IS_NOQUOTA(inode)) 809 if (IS_NOQUOTA(inode))
776 return 0; 810 return 0;
777 if (type != -1) 811 if (type != -1)
778 return inode->i_dquot[type] == NODQUOT; 812 return !inode->i_dquot[type];
779 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 813 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
780 if (inode->i_dquot[cnt] == NODQUOT) 814 if (!inode->i_dquot[cnt])
781 return 1; 815 return 1;
782 return 0; 816 return 0;
783} 817}
@@ -789,12 +823,12 @@ static void add_dquot_ref(struct super_block *sb, int type)
789 823
790 spin_lock(&inode_lock); 824 spin_lock(&inode_lock);
791 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 825 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
826 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
827 continue;
792 if (!atomic_read(&inode->i_writecount)) 828 if (!atomic_read(&inode->i_writecount))
793 continue; 829 continue;
794 if (!dqinit_needed(inode, type)) 830 if (!dqinit_needed(inode, type))
795 continue; 831 continue;
796 if (inode->i_state & (I_FREEING|I_WILL_FREE))
797 continue;
798 832
799 __iget(inode); 833 __iget(inode);
800 spin_unlock(&inode_lock); 834 spin_unlock(&inode_lock);
@@ -813,7 +847,10 @@ static void add_dquot_ref(struct super_block *sb, int type)
813 iput(old_inode); 847 iput(old_inode);
814} 848}
815 849
816/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ 850/*
851 * Return 0 if dqput() won't block.
852 * (note that 1 doesn't necessarily mean blocking)
853 */
817static inline int dqput_blocks(struct dquot *dquot) 854static inline int dqput_blocks(struct dquot *dquot)
818{ 855{
819 if (atomic_read(&dquot->dq_count) <= 1) 856 if (atomic_read(&dquot->dq_count) <= 1)
@@ -821,22 +858,27 @@ static inline int dqput_blocks(struct dquot *dquot)
821 return 0; 858 return 0;
822} 859}
823 860
824/* Remove references to dquots from inode - add dquot to list for freeing if needed */ 861/*
825/* We can't race with anybody because we hold dqptr_sem for writing... */ 862 * Remove references to dquots from inode and add dquot to list for freeing
863 * if we have the last referece to dquot
864 * We can't race with anybody because we hold dqptr_sem for writing...
865 */
826static int remove_inode_dquot_ref(struct inode *inode, int type, 866static int remove_inode_dquot_ref(struct inode *inode, int type,
827 struct list_head *tofree_head) 867 struct list_head *tofree_head)
828{ 868{
829 struct dquot *dquot = inode->i_dquot[type]; 869 struct dquot *dquot = inode->i_dquot[type];
830 870
831 inode->i_dquot[type] = NODQUOT; 871 inode->i_dquot[type] = NULL;
832 if (dquot != NODQUOT) { 872 if (dquot) {
833 if (dqput_blocks(dquot)) { 873 if (dqput_blocks(dquot)) {
834#ifdef __DQUOT_PARANOIA 874#ifdef __DQUOT_PARANOIA
835 if (atomic_read(&dquot->dq_count) != 1) 875 if (atomic_read(&dquot->dq_count) != 1)
836 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); 876 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
837#endif 877#endif
838 spin_lock(&dq_list_lock); 878 spin_lock(&dq_list_lock);
839 list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ 879 /* As dquot must have currently users it can't be on
880 * the free list... */
881 list_add(&dquot->dq_free, tofree_head);
840 spin_unlock(&dq_list_lock); 882 spin_unlock(&dq_list_lock);
841 return 1; 883 return 1;
842 } 884 }
@@ -846,19 +888,22 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
846 return 0; 888 return 0;
847} 889}
848 890
849/* Free list of dquots - called from inode.c */ 891/*
850/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ 892 * Free list of dquots
893 * Dquots are removed from inodes and no new references can be got so we are
894 * the only ones holding reference
895 */
851static void put_dquot_list(struct list_head *tofree_head) 896static void put_dquot_list(struct list_head *tofree_head)
852{ 897{
853 struct list_head *act_head; 898 struct list_head *act_head;
854 struct dquot *dquot; 899 struct dquot *dquot;
855 900
856 act_head = tofree_head->next; 901 act_head = tofree_head->next;
857 /* So now we have dquots on the list... Just free them */
858 while (act_head != tofree_head) { 902 while (act_head != tofree_head) {
859 dquot = list_entry(act_head, struct dquot, dq_free); 903 dquot = list_entry(act_head, struct dquot, dq_free);
860 act_head = act_head->next; 904 act_head = act_head->next;
861 list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ 905 /* Remove dquot from the list so we won't have problems... */
906 list_del_init(&dquot->dq_free);
862 dqput(dquot); 907 dqput(dquot);
863 } 908 }
864} 909}
@@ -870,6 +915,12 @@ static void remove_dquot_ref(struct super_block *sb, int type,
870 915
871 spin_lock(&inode_lock); 916 spin_lock(&inode_lock);
872 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 917 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
918 /*
919 * We have to scan also I_NEW inodes because they can already
920 * have quota pointer initialized. Luckily, we need to touch
921 * only quota pointers and these have separate locking
922 * (dqptr_sem).
923 */
873 if (!IS_NOQUOTA(inode)) 924 if (!IS_NOQUOTA(inode))
874 remove_inode_dquot_ref(inode, type, tofree_head); 925 remove_inode_dquot_ref(inode, type, tofree_head);
875 } 926 }
@@ -899,7 +950,29 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
899 dquot->dq_dqb.dqb_curspace += number; 950 dquot->dq_dqb.dqb_curspace += number;
900} 951}
901 952
902static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) 953static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
954{
955 dquot->dq_dqb.dqb_rsvspace += number;
956}
957
958/*
959 * Claim reserved quota space
960 */
961static void dquot_claim_reserved_space(struct dquot *dquot,
962 qsize_t number)
963{
964 WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
965 dquot->dq_dqb.dqb_curspace += number;
966 dquot->dq_dqb.dqb_rsvspace -= number;
967}
968
969static inline
970void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
971{
972 dquot->dq_dqb.dqb_rsvspace -= number;
973}
974
975static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
903{ 976{
904 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 977 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
905 dquot->dq_dqb.dqb_curinodes >= number) 978 dquot->dq_dqb.dqb_curinodes >= number)
@@ -911,7 +984,7 @@ static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
911 clear_bit(DQ_INODES_B, &dquot->dq_flags); 984 clear_bit(DQ_INODES_B, &dquot->dq_flags);
912} 985}
913 986
914static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) 987static void dquot_decr_space(struct dquot *dquot, qsize_t number)
915{ 988{
916 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || 989 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
917 dquot->dq_dqb.dqb_curspace >= number) 990 dquot->dq_dqb.dqb_curspace >= number)
@@ -938,7 +1011,7 @@ static int warning_issued(struct dquot *dquot, const int warntype)
938#ifdef CONFIG_PRINT_QUOTA_WARNING 1011#ifdef CONFIG_PRINT_QUOTA_WARNING
939static int flag_print_warnings = 1; 1012static int flag_print_warnings = 1;
940 1013
941static inline int need_print_warning(struct dquot *dquot) 1014static int need_print_warning(struct dquot *dquot)
942{ 1015{
943 if (!flag_print_warnings) 1016 if (!flag_print_warnings)
944 return 0; 1017 return 0;
@@ -1065,13 +1138,17 @@ err_out:
1065 kfree_skb(skb); 1138 kfree_skb(skb);
1066} 1139}
1067#endif 1140#endif
1068 1141/*
1069static inline void flush_warnings(struct dquot * const *dquots, char *warntype) 1142 * Write warnings to the console and send warning messages over netlink.
1143 *
1144 * Note that this function can sleep.
1145 */
1146static void flush_warnings(struct dquot *const *dquots, char *warntype)
1070{ 1147{
1071 int i; 1148 int i;
1072 1149
1073 for (i = 0; i < MAXQUOTAS; i++) 1150 for (i = 0; i < MAXQUOTAS; i++)
1074 if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && 1151 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
1075 !warning_issued(dquots[i], warntype[i])) { 1152 !warning_issued(dquots[i], warntype[i])) {
1076#ifdef CONFIG_PRINT_QUOTA_WARNING 1153#ifdef CONFIG_PRINT_QUOTA_WARNING
1077 print_warning(dquots[i], warntype[i]); 1154 print_warning(dquots[i], warntype[i]);
@@ -1082,42 +1159,47 @@ static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
1082 } 1159 }
1083} 1160}
1084 1161
1085static inline char ignore_hardlimit(struct dquot *dquot) 1162static int ignore_hardlimit(struct dquot *dquot)
1086{ 1163{
1087 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; 1164 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1088 1165
1089 return capable(CAP_SYS_RESOURCE) && 1166 return capable(CAP_SYS_RESOURCE) &&
1090 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); 1167 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1168 !(info->dqi_flags & V1_DQF_RSQUASH));
1091} 1169}
1092 1170
1093/* needs dq_data_lock */ 1171/* needs dq_data_lock */
1094static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) 1172static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1095{ 1173{
1174 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1175
1096 *warntype = QUOTA_NL_NOWARN; 1176 *warntype = QUOTA_NL_NOWARN;
1097 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1177 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
1098 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1178 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1099 return QUOTA_OK; 1179 return QUOTA_OK;
1100 1180
1101 if (dquot->dq_dqb.dqb_ihardlimit && 1181 if (dquot->dq_dqb.dqb_ihardlimit &&
1102 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && 1182 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1103 !ignore_hardlimit(dquot)) { 1183 !ignore_hardlimit(dquot)) {
1104 *warntype = QUOTA_NL_IHARDWARN; 1184 *warntype = QUOTA_NL_IHARDWARN;
1105 return NO_QUOTA; 1185 return NO_QUOTA;
1106 } 1186 }
1107 1187
1108 if (dquot->dq_dqb.dqb_isoftlimit && 1188 if (dquot->dq_dqb.dqb_isoftlimit &&
1109 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && 1189 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1110 dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && 1190 dquot->dq_dqb.dqb_itime &&
1191 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1111 !ignore_hardlimit(dquot)) { 1192 !ignore_hardlimit(dquot)) {
1112 *warntype = QUOTA_NL_ISOFTLONGWARN; 1193 *warntype = QUOTA_NL_ISOFTLONGWARN;
1113 return NO_QUOTA; 1194 return NO_QUOTA;
1114 } 1195 }
1115 1196
1116 if (dquot->dq_dqb.dqb_isoftlimit && 1197 if (dquot->dq_dqb.dqb_isoftlimit &&
1117 (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && 1198 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1118 dquot->dq_dqb.dqb_itime == 0) { 1199 dquot->dq_dqb.dqb_itime == 0) {
1119 *warntype = QUOTA_NL_ISOFTWARN; 1200 *warntype = QUOTA_NL_ISOFTWARN;
1120 dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; 1201 dquot->dq_dqb.dqb_itime = get_seconds() +
1202 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1121 } 1203 }
1122 1204
1123 return QUOTA_OK; 1205 return QUOTA_OK;
@@ -1126,13 +1208,19 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1126/* needs dq_data_lock */ 1208/* needs dq_data_lock */
1127static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) 1209static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
1128{ 1210{
1211 qsize_t tspace;
1212 struct super_block *sb = dquot->dq_sb;
1213
1129 *warntype = QUOTA_NL_NOWARN; 1214 *warntype = QUOTA_NL_NOWARN;
1130 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1215 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
1131 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1216 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1132 return QUOTA_OK; 1217 return QUOTA_OK;
1133 1218
1219 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1220 + space;
1221
1134 if (dquot->dq_dqb.dqb_bhardlimit && 1222 if (dquot->dq_dqb.dqb_bhardlimit &&
1135 dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit && 1223 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1136 !ignore_hardlimit(dquot)) { 1224 !ignore_hardlimit(dquot)) {
1137 if (!prealloc) 1225 if (!prealloc)
1138 *warntype = QUOTA_NL_BHARDWARN; 1226 *warntype = QUOTA_NL_BHARDWARN;
@@ -1140,8 +1228,9 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1140 } 1228 }
1141 1229
1142 if (dquot->dq_dqb.dqb_bsoftlimit && 1230 if (dquot->dq_dqb.dqb_bsoftlimit &&
1143 dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && 1231 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1144 dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && 1232 dquot->dq_dqb.dqb_btime &&
1233 get_seconds() >= dquot->dq_dqb.dqb_btime &&
1145 !ignore_hardlimit(dquot)) { 1234 !ignore_hardlimit(dquot)) {
1146 if (!prealloc) 1235 if (!prealloc)
1147 *warntype = QUOTA_NL_BSOFTLONGWARN; 1236 *warntype = QUOTA_NL_BSOFTLONGWARN;
@@ -1149,11 +1238,12 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1149 } 1238 }
1150 1239
1151 if (dquot->dq_dqb.dqb_bsoftlimit && 1240 if (dquot->dq_dqb.dqb_bsoftlimit &&
1152 dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit && 1241 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1153 dquot->dq_dqb.dqb_btime == 0) { 1242 dquot->dq_dqb.dqb_btime == 0) {
1154 if (!prealloc) { 1243 if (!prealloc) {
1155 *warntype = QUOTA_NL_BSOFTWARN; 1244 *warntype = QUOTA_NL_BSOFTWARN;
1156 dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; 1245 dquot->dq_dqb.dqb_btime = get_seconds() +
1246 sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
1157 } 1247 }
1158 else 1248 else
1159 /* 1249 /*
@@ -1168,15 +1258,18 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
1168 1258
1169static int info_idq_free(struct dquot *dquot, qsize_t inodes) 1259static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1170{ 1260{
1261 qsize_t newinodes;
1262
1171 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || 1263 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1172 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || 1264 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1173 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) 1265 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
1174 return QUOTA_NL_NOWARN; 1266 return QUOTA_NL_NOWARN;
1175 1267
1176 if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) 1268 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1269 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1177 return QUOTA_NL_ISOFTBELOW; 1270 return QUOTA_NL_ISOFTBELOW;
1178 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && 1271 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1179 dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) 1272 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1180 return QUOTA_NL_IHARDBELOW; 1273 return QUOTA_NL_IHARDBELOW;
1181 return QUOTA_NL_NOWARN; 1274 return QUOTA_NL_NOWARN;
1182} 1275}
@@ -1203,7 +1296,7 @@ int dquot_initialize(struct inode *inode, int type)
1203{ 1296{
1204 unsigned int id = 0; 1297 unsigned int id = 0;
1205 int cnt, ret = 0; 1298 int cnt, ret = 0;
1206 struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; 1299 struct dquot *got[MAXQUOTAS] = { NULL, NULL };
1207 struct super_block *sb = inode->i_sb; 1300 struct super_block *sb = inode->i_sb;
1208 1301
1209 /* First test before acquiring mutex - solves deadlocks when we 1302 /* First test before acquiring mutex - solves deadlocks when we
@@ -1236,9 +1329,9 @@ int dquot_initialize(struct inode *inode, int type)
1236 /* Avoid races with quotaoff() */ 1329 /* Avoid races with quotaoff() */
1237 if (!sb_has_quota_active(sb, cnt)) 1330 if (!sb_has_quota_active(sb, cnt))
1238 continue; 1331 continue;
1239 if (inode->i_dquot[cnt] == NODQUOT) { 1332 if (!inode->i_dquot[cnt]) {
1240 inode->i_dquot[cnt] = got[cnt]; 1333 inode->i_dquot[cnt] = got[cnt];
1241 got[cnt] = NODQUOT; 1334 got[cnt] = NULL;
1242 } 1335 }
1243 } 1336 }
1244out_err: 1337out_err:
@@ -1248,6 +1341,7 @@ out_err:
1248 dqput(got[cnt]); 1341 dqput(got[cnt]);
1249 return ret; 1342 return ret;
1250} 1343}
1344EXPORT_SYMBOL(dquot_initialize);
1251 1345
1252/* 1346/*
1253 * Release all quotas referenced by inode 1347 * Release all quotas referenced by inode
@@ -1260,7 +1354,7 @@ int dquot_drop(struct inode *inode)
1260 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1354 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1261 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1355 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1262 put[cnt] = inode->i_dquot[cnt]; 1356 put[cnt] = inode->i_dquot[cnt];
1263 inode->i_dquot[cnt] = NODQUOT; 1357 inode->i_dquot[cnt] = NULL;
1264 } 1358 }
1265 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1359 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1266 1360
@@ -1268,6 +1362,7 @@ int dquot_drop(struct inode *inode)
1268 dqput(put[cnt]); 1362 dqput(put[cnt]);
1269 return 0; 1363 return 0;
1270} 1364}
1365EXPORT_SYMBOL(dquot_drop);
1271 1366
1272/* Wrapper to remove references to quota structures from inode */ 1367/* Wrapper to remove references to quota structures from inode */
1273void vfs_dq_drop(struct inode *inode) 1368void vfs_dq_drop(struct inode *inode)
@@ -1284,12 +1379,13 @@ void vfs_dq_drop(struct inode *inode)
1284 * must assure that nobody can come after the DQUOT_DROP and 1379 * must assure that nobody can come after the DQUOT_DROP and
1285 * add quota pointers back anyway */ 1380 * add quota pointers back anyway */
1286 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1381 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1287 if (inode->i_dquot[cnt] != NODQUOT) 1382 if (inode->i_dquot[cnt])
1288 break; 1383 break;
1289 if (cnt < MAXQUOTAS) 1384 if (cnt < MAXQUOTAS)
1290 inode->i_sb->dq_op->drop(inode); 1385 inode->i_sb->dq_op->drop(inode);
1291 } 1386 }
1292} 1387}
1388EXPORT_SYMBOL(vfs_dq_drop);
1293 1389
1294/* 1390/*
1295 * Following four functions update i_blocks+i_bytes fields and 1391 * Following four functions update i_blocks+i_bytes fields and
@@ -1303,51 +1399,93 @@ void vfs_dq_drop(struct inode *inode)
1303/* 1399/*
1304 * This operation can block, but only after everything is updated 1400 * This operation can block, but only after everything is updated
1305 */ 1401 */
1306int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) 1402int __dquot_alloc_space(struct inode *inode, qsize_t number,
1403 int warn, int reserve)
1307{ 1404{
1308 int cnt, ret = NO_QUOTA; 1405 int cnt, ret = QUOTA_OK;
1309 char warntype[MAXQUOTAS]; 1406 char warntype[MAXQUOTAS];
1310 1407
1311 /* First test before acquiring mutex - solves deadlocks when we
1312 * re-enter the quota code and are already holding the mutex */
1313 if (IS_NOQUOTA(inode)) {
1314out_add:
1315 inode_add_bytes(inode, number);
1316 return QUOTA_OK;
1317 }
1318 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1408 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1319 warntype[cnt] = QUOTA_NL_NOWARN; 1409 warntype[cnt] = QUOTA_NL_NOWARN;
1320 1410
1321 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1322 if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */
1323 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1324 goto out_add;
1325 }
1326 spin_lock(&dq_data_lock); 1411 spin_lock(&dq_data_lock);
1327 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1412 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1328 if (inode->i_dquot[cnt] == NODQUOT) 1413 if (!inode->i_dquot[cnt])
1329 continue; 1414 continue;
1330 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA) 1415 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
1331 goto warn_put_all; 1416 == NO_QUOTA) {
1417 ret = NO_QUOTA;
1418 goto out_unlock;
1419 }
1332 } 1420 }
1333 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1421 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1334 if (inode->i_dquot[cnt] == NODQUOT) 1422 if (!inode->i_dquot[cnt])
1335 continue; 1423 continue;
1336 dquot_incr_space(inode->i_dquot[cnt], number); 1424 if (reserve)
1425 dquot_resv_space(inode->i_dquot[cnt], number);
1426 else
1427 dquot_incr_space(inode->i_dquot[cnt], number);
1337 } 1428 }
1338 inode_add_bytes(inode, number); 1429 if (!reserve)
1339 ret = QUOTA_OK; 1430 inode_add_bytes(inode, number);
1340warn_put_all: 1431out_unlock:
1341 spin_unlock(&dq_data_lock); 1432 spin_unlock(&dq_data_lock);
1342 if (ret == QUOTA_OK)
1343 /* Dirtify all the dquots - this can block when journalling */
1344 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1345 if (inode->i_dquot[cnt])
1346 mark_dquot_dirty(inode->i_dquot[cnt]);
1347 flush_warnings(inode->i_dquot, warntype); 1433 flush_warnings(inode->i_dquot, warntype);
1434 return ret;
1435}
1436
1437int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1438{
1439 int cnt, ret = QUOTA_OK;
1440
1441 /*
1442 * First test before acquiring mutex - solves deadlocks when we
1443 * re-enter the quota code and are already holding the mutex
1444 */
1445 if (IS_NOQUOTA(inode)) {
1446 inode_add_bytes(inode, number);
1447 goto out;
1448 }
1449
1450 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1451 if (IS_NOQUOTA(inode)) {
1452 inode_add_bytes(inode, number);
1453 goto out_unlock;
1454 }
1455
1456 ret = __dquot_alloc_space(inode, number, warn, 0);
1457 if (ret == NO_QUOTA)
1458 goto out_unlock;
1459
1460 /* Dirtify all the dquots - this can block when journalling */
1461 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1462 if (inode->i_dquot[cnt])
1463 mark_dquot_dirty(inode->i_dquot[cnt]);
1464out_unlock:
1348 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1465 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1466out:
1349 return ret; 1467 return ret;
1350} 1468}
1469EXPORT_SYMBOL(dquot_alloc_space);
1470
1471int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
1472{
1473 int ret = QUOTA_OK;
1474
1475 if (IS_NOQUOTA(inode))
1476 goto out;
1477
1478 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1479 if (IS_NOQUOTA(inode))
1480 goto out_unlock;
1481
1482 ret = __dquot_alloc_space(inode, number, warn, 1);
1483out_unlock:
1484 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1485out:
1486 return ret;
1487}
1488EXPORT_SYMBOL(dquot_reserve_space);
1351 1489
1352/* 1490/*
1353 * This operation can block, but only after everything is updated 1491 * This operation can block, but only after everything is updated
@@ -1370,14 +1508,15 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
1370 } 1508 }
1371 spin_lock(&dq_data_lock); 1509 spin_lock(&dq_data_lock);
1372 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1510 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1373 if (inode->i_dquot[cnt] == NODQUOT) 1511 if (!inode->i_dquot[cnt])
1374 continue; 1512 continue;
1375 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) 1513 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
1514 == NO_QUOTA)
1376 goto warn_put_all; 1515 goto warn_put_all;
1377 } 1516 }
1378 1517
1379 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1518 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1380 if (inode->i_dquot[cnt] == NODQUOT) 1519 if (!inode->i_dquot[cnt])
1381 continue; 1520 continue;
1382 dquot_incr_inodes(inode->i_dquot[cnt], number); 1521 dquot_incr_inodes(inode->i_dquot[cnt], number);
1383 } 1522 }
@@ -1393,6 +1532,73 @@ warn_put_all:
1393 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1532 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1394 return ret; 1533 return ret;
1395} 1534}
1535EXPORT_SYMBOL(dquot_alloc_inode);
1536
1537int dquot_claim_space(struct inode *inode, qsize_t number)
1538{
1539 int cnt;
1540 int ret = QUOTA_OK;
1541
1542 if (IS_NOQUOTA(inode)) {
1543 inode_add_bytes(inode, number);
1544 goto out;
1545 }
1546
1547 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1548 if (IS_NOQUOTA(inode)) {
1549 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1550 inode_add_bytes(inode, number);
1551 goto out;
1552 }
1553
1554 spin_lock(&dq_data_lock);
1555 /* Claim reserved quotas to allocated quotas */
1556 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1557 if (inode->i_dquot[cnt])
1558 dquot_claim_reserved_space(inode->i_dquot[cnt],
1559 number);
1560 }
1561 /* Update inode bytes */
1562 inode_add_bytes(inode, number);
1563 spin_unlock(&dq_data_lock);
1564 /* Dirtify all the dquots - this can block when journalling */
1565 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1566 if (inode->i_dquot[cnt])
1567 mark_dquot_dirty(inode->i_dquot[cnt]);
1568 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1569out:
1570 return ret;
1571}
1572EXPORT_SYMBOL(dquot_claim_space);
1573
1574/*
1575 * Release reserved quota space
1576 */
1577void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1578{
1579 int cnt;
1580
1581 if (IS_NOQUOTA(inode))
1582 goto out;
1583
1584 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1585 if (IS_NOQUOTA(inode))
1586 goto out_unlock;
1587
1588 spin_lock(&dq_data_lock);
1589 /* Release reserved dquots */
1590 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1591 if (inode->i_dquot[cnt])
1592 dquot_free_reserved_space(inode->i_dquot[cnt], number);
1593 }
1594 spin_unlock(&dq_data_lock);
1595
1596out_unlock:
1597 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1598out:
1599 return;
1600}
1601EXPORT_SYMBOL(dquot_release_reserved_space);
1396 1602
1397/* 1603/*
1398 * This operation can block, but only after everything is updated 1604 * This operation can block, but only after everything is updated
@@ -1418,7 +1624,7 @@ out_sub:
1418 } 1624 }
1419 spin_lock(&dq_data_lock); 1625 spin_lock(&dq_data_lock);
1420 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1626 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1421 if (inode->i_dquot[cnt] == NODQUOT) 1627 if (!inode->i_dquot[cnt])
1422 continue; 1628 continue;
1423 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); 1629 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1424 dquot_decr_space(inode->i_dquot[cnt], number); 1630 dquot_decr_space(inode->i_dquot[cnt], number);
@@ -1433,6 +1639,7 @@ out_sub:
1433 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1639 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1434 return QUOTA_OK; 1640 return QUOTA_OK;
1435} 1641}
1642EXPORT_SYMBOL(dquot_free_space);
1436 1643
1437/* 1644/*
1438 * This operation can block, but only after everything is updated 1645 * This operation can block, but only after everything is updated
@@ -1455,7 +1662,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1455 } 1662 }
1456 spin_lock(&dq_data_lock); 1663 spin_lock(&dq_data_lock);
1457 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1664 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1458 if (inode->i_dquot[cnt] == NODQUOT) 1665 if (!inode->i_dquot[cnt])
1459 continue; 1666 continue;
1460 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); 1667 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
1461 dquot_decr_inodes(inode->i_dquot[cnt], number); 1668 dquot_decr_inodes(inode->i_dquot[cnt], number);
@@ -1469,6 +1676,20 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1469 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1676 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1470 return QUOTA_OK; 1677 return QUOTA_OK;
1471} 1678}
1679EXPORT_SYMBOL(dquot_free_inode);
1680
1681/*
1682 * call back function, get reserved quota space from underlying fs
1683 */
1684qsize_t dquot_get_reserved_space(struct inode *inode)
1685{
1686 qsize_t reserved_space = 0;
1687
1688 if (sb_any_quota_active(inode->i_sb) &&
1689 inode->i_sb->dq_op->get_reserved_space)
1690 reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
1691 return reserved_space;
1692}
1472 1693
1473/* 1694/*
1474 * Transfer the number of inode and blocks from one diskquota to an other. 1695 * Transfer the number of inode and blocks from one diskquota to an other.
@@ -1478,7 +1699,8 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
1478 */ 1699 */
1479int dquot_transfer(struct inode *inode, struct iattr *iattr) 1700int dquot_transfer(struct inode *inode, struct iattr *iattr)
1480{ 1701{
1481 qsize_t space; 1702 qsize_t space, cur_space;
1703 qsize_t rsv_space = 0;
1482 struct dquot *transfer_from[MAXQUOTAS]; 1704 struct dquot *transfer_from[MAXQUOTAS];
1483 struct dquot *transfer_to[MAXQUOTAS]; 1705 struct dquot *transfer_to[MAXQUOTAS];
1484 int cnt, ret = QUOTA_OK; 1706 int cnt, ret = QUOTA_OK;
@@ -1493,22 +1715,16 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1493 return QUOTA_OK; 1715 return QUOTA_OK;
1494 /* Initialize the arrays */ 1716 /* Initialize the arrays */
1495 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1717 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1496 transfer_from[cnt] = NODQUOT; 1718 transfer_from[cnt] = NULL;
1497 transfer_to[cnt] = NODQUOT; 1719 transfer_to[cnt] = NULL;
1498 warntype_to[cnt] = QUOTA_NL_NOWARN; 1720 warntype_to[cnt] = QUOTA_NL_NOWARN;
1499 switch (cnt) {
1500 case USRQUOTA:
1501 if (!chuid)
1502 continue;
1503 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
1504 break;
1505 case GRPQUOTA:
1506 if (!chgid)
1507 continue;
1508 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
1509 break;
1510 }
1511 } 1721 }
1722 if (chuid)
1723 transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
1724 USRQUOTA);
1725 if (chgid)
1726 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
1727 GRPQUOTA);
1512 1728
1513 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1729 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1514 /* Now recheck reliably when holding dqptr_sem */ 1730 /* Now recheck reliably when holding dqptr_sem */
@@ -1517,10 +1733,12 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1517 goto put_all; 1733 goto put_all;
1518 } 1734 }
1519 spin_lock(&dq_data_lock); 1735 spin_lock(&dq_data_lock);
1520 space = inode_get_bytes(inode); 1736 cur_space = inode_get_bytes(inode);
1737 rsv_space = dquot_get_reserved_space(inode);
1738 space = cur_space + rsv_space;
1521 /* Build the transfer_from list and check the limits */ 1739 /* Build the transfer_from list and check the limits */
1522 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1740 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1523 if (transfer_to[cnt] == NODQUOT) 1741 if (!transfer_to[cnt])
1524 continue; 1742 continue;
1525 transfer_from[cnt] = inode->i_dquot[cnt]; 1743 transfer_from[cnt] = inode->i_dquot[cnt];
1526 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == 1744 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
@@ -1536,7 +1754,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1536 /* 1754 /*
1537 * Skip changes for same uid or gid or for turned off quota-type. 1755 * Skip changes for same uid or gid or for turned off quota-type.
1538 */ 1756 */
1539 if (transfer_to[cnt] == NODQUOT) 1757 if (!transfer_to[cnt])
1540 continue; 1758 continue;
1541 1759
1542 /* Due to IO error we might not have transfer_from[] structure */ 1760 /* Due to IO error we might not have transfer_from[] structure */
@@ -1546,11 +1764,14 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1546 warntype_from_space[cnt] = 1764 warntype_from_space[cnt] =
1547 info_bdq_free(transfer_from[cnt], space); 1765 info_bdq_free(transfer_from[cnt], space);
1548 dquot_decr_inodes(transfer_from[cnt], 1); 1766 dquot_decr_inodes(transfer_from[cnt], 1);
1549 dquot_decr_space(transfer_from[cnt], space); 1767 dquot_decr_space(transfer_from[cnt], cur_space);
1768 dquot_free_reserved_space(transfer_from[cnt],
1769 rsv_space);
1550 } 1770 }
1551 1771
1552 dquot_incr_inodes(transfer_to[cnt], 1); 1772 dquot_incr_inodes(transfer_to[cnt], 1);
1553 dquot_incr_space(transfer_to[cnt], space); 1773 dquot_incr_space(transfer_to[cnt], cur_space);
1774 dquot_resv_space(transfer_to[cnt], rsv_space);
1554 1775
1555 inode->i_dquot[cnt] = transfer_to[cnt]; 1776 inode->i_dquot[cnt] = transfer_to[cnt];
1556 } 1777 }
@@ -1564,7 +1785,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1564 if (transfer_to[cnt]) { 1785 if (transfer_to[cnt]) {
1565 mark_dquot_dirty(transfer_to[cnt]); 1786 mark_dquot_dirty(transfer_to[cnt]);
1566 /* The reference we got is transferred to the inode */ 1787 /* The reference we got is transferred to the inode */
1567 transfer_to[cnt] = NODQUOT; 1788 transfer_to[cnt] = NULL;
1568 } 1789 }
1569 } 1790 }
1570warn_put_all: 1791warn_put_all:
@@ -1582,10 +1803,11 @@ over_quota:
1582 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1803 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1583 /* Clear dquot pointers we don't want to dqput() */ 1804 /* Clear dquot pointers we don't want to dqput() */
1584 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1805 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1585 transfer_from[cnt] = NODQUOT; 1806 transfer_from[cnt] = NULL;
1586 ret = NO_QUOTA; 1807 ret = NO_QUOTA;
1587 goto warn_put_all; 1808 goto warn_put_all;
1588} 1809}
1810EXPORT_SYMBOL(dquot_transfer);
1589 1811
1590/* Wrapper for transferring ownership of an inode */ 1812/* Wrapper for transferring ownership of an inode */
1591int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) 1813int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
@@ -1597,7 +1819,7 @@ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
1597 } 1819 }
1598 return 0; 1820 return 0;
1599} 1821}
1600 1822EXPORT_SYMBOL(vfs_dq_transfer);
1601 1823
1602/* 1824/*
1603 * Write info of quota file to disk 1825 * Write info of quota file to disk
@@ -1612,6 +1834,7 @@ int dquot_commit_info(struct super_block *sb, int type)
1612 mutex_unlock(&dqopt->dqio_mutex); 1834 mutex_unlock(&dqopt->dqio_mutex);
1613 return ret; 1835 return ret;
1614} 1836}
1837EXPORT_SYMBOL(dquot_commit_info);
1615 1838
1616/* 1839/*
1617 * Definitions of diskquota operations. 1840 * Definitions of diskquota operations.
@@ -1697,8 +1920,8 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
1697 drop_dquot_ref(sb, cnt); 1920 drop_dquot_ref(sb, cnt);
1698 invalidate_dquots(sb, cnt); 1921 invalidate_dquots(sb, cnt);
1699 /* 1922 /*
1700 * Now all dquots should be invalidated, all writes done so we should be only 1923 * Now all dquots should be invalidated, all writes done so we
1701 * users of the info. No locks needed. 1924 * should be only users of the info. No locks needed.
1702 */ 1925 */
1703 if (info_dirty(&dqopt->info[cnt])) 1926 if (info_dirty(&dqopt->info[cnt]))
1704 sb->dq_op->write_info(sb, cnt); 1927 sb->dq_op->write_info(sb, cnt);
@@ -1736,10 +1959,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
1736 /* If quota was reenabled in the meantime, we have 1959 /* If quota was reenabled in the meantime, we have
1737 * nothing to do */ 1960 * nothing to do */
1738 if (!sb_has_quota_loaded(sb, cnt)) { 1961 if (!sb_has_quota_loaded(sb, cnt)) {
1739 mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); 1962 mutex_lock_nested(&toputinode[cnt]->i_mutex,
1963 I_MUTEX_QUOTA);
1740 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 1964 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
1741 S_NOATIME | S_NOQUOTA); 1965 S_NOATIME | S_NOQUOTA);
1742 truncate_inode_pages(&toputinode[cnt]->i_data, 0); 1966 truncate_inode_pages(&toputinode[cnt]->i_data,
1967 0);
1743 mutex_unlock(&toputinode[cnt]->i_mutex); 1968 mutex_unlock(&toputinode[cnt]->i_mutex);
1744 mark_inode_dirty(toputinode[cnt]); 1969 mark_inode_dirty(toputinode[cnt]);
1745 } 1970 }
@@ -1764,13 +1989,14 @@ put_inodes:
1764 } 1989 }
1765 return ret; 1990 return ret;
1766} 1991}
1992EXPORT_SYMBOL(vfs_quota_disable);
1767 1993
1768int vfs_quota_off(struct super_block *sb, int type, int remount) 1994int vfs_quota_off(struct super_block *sb, int type, int remount)
1769{ 1995{
1770 return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : 1996 return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
1771 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); 1997 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
1772} 1998}
1773 1999EXPORT_SYMBOL(vfs_quota_off);
1774/* 2000/*
1775 * Turn quotas on on a device 2001 * Turn quotas on on a device
1776 */ 2002 */
@@ -1828,7 +2054,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
1828 * possible) Also nobody should write to the file - we use 2054 * possible) Also nobody should write to the file - we use
1829 * special IO operations which ignore the immutable bit. */ 2055 * special IO operations which ignore the immutable bit. */
1830 down_write(&dqopt->dqptr_sem); 2056 down_write(&dqopt->dqptr_sem);
1831 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); 2057 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2058 S_NOQUOTA);
1832 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2059 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
1833 up_write(&dqopt->dqptr_sem); 2060 up_write(&dqopt->dqptr_sem);
1834 sb->dq_op->drop(inode); 2061 sb->dq_op->drop(inode);
@@ -1847,7 +2074,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
1847 dqopt->info[type].dqi_fmt_id = format_id; 2074 dqopt->info[type].dqi_fmt_id = format_id;
1848 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); 2075 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
1849 mutex_lock(&dqopt->dqio_mutex); 2076 mutex_lock(&dqopt->dqio_mutex);
1850 if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { 2077 error = dqopt->ops[type]->read_file_info(sb, type);
2078 if (error < 0) {
1851 mutex_unlock(&dqopt->dqio_mutex); 2079 mutex_unlock(&dqopt->dqio_mutex);
1852 goto out_file_init; 2080 goto out_file_init;
1853 } 2081 }
@@ -1927,6 +2155,7 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
1927 DQUOT_LIMITS_ENABLED); 2155 DQUOT_LIMITS_ENABLED);
1928 return error; 2156 return error;
1929} 2157}
2158EXPORT_SYMBOL(vfs_quota_on_path);
1930 2159
1931int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, 2160int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
1932 int remount) 2161 int remount)
@@ -1944,6 +2173,7 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
1944 } 2173 }
1945 return error; 2174 return error;
1946} 2175}
2176EXPORT_SYMBOL(vfs_quota_on);
1947 2177
1948/* 2178/*
1949 * More powerful function for turning on quotas allowing setting 2179 * More powerful function for turning on quotas allowing setting
@@ -1990,6 +2220,7 @@ out_lock:
1990load_quota: 2220load_quota:
1991 return vfs_load_quota_inode(inode, type, format_id, flags); 2221 return vfs_load_quota_inode(inode, type, format_id, flags);
1992} 2222}
2223EXPORT_SYMBOL(vfs_quota_enable);
1993 2224
1994/* 2225/*
1995 * This function is used when filesystem needs to initialize quotas 2226 * This function is used when filesystem needs to initialize quotas
@@ -2019,6 +2250,7 @@ out:
2019 dput(dentry); 2250 dput(dentry);
2020 return error; 2251 return error;
2021} 2252}
2253EXPORT_SYMBOL(vfs_quota_on_mount);
2022 2254
2023/* Wrapper to turn on quotas when remounting rw */ 2255/* Wrapper to turn on quotas when remounting rw */
2024int vfs_dq_quota_on_remount(struct super_block *sb) 2256int vfs_dq_quota_on_remount(struct super_block *sb)
@@ -2035,6 +2267,7 @@ int vfs_dq_quota_on_remount(struct super_block *sb)
2035 } 2267 }
2036 return ret; 2268 return ret;
2037} 2269}
2270EXPORT_SYMBOL(vfs_dq_quota_on_remount);
2038 2271
2039static inline qsize_t qbtos(qsize_t blocks) 2272static inline qsize_t qbtos(qsize_t blocks)
2040{ 2273{
@@ -2054,7 +2287,7 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
2054 spin_lock(&dq_data_lock); 2287 spin_lock(&dq_data_lock);
2055 di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); 2288 di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
2056 di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); 2289 di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
2057 di->dqb_curspace = dm->dqb_curspace; 2290 di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
2058 di->dqb_ihardlimit = dm->dqb_ihardlimit; 2291 di->dqb_ihardlimit = dm->dqb_ihardlimit;
2059 di->dqb_isoftlimit = dm->dqb_isoftlimit; 2292 di->dqb_isoftlimit = dm->dqb_isoftlimit;
2060 di->dqb_curinodes = dm->dqb_curinodes; 2293 di->dqb_curinodes = dm->dqb_curinodes;
@@ -2064,18 +2297,20 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
2064 spin_unlock(&dq_data_lock); 2297 spin_unlock(&dq_data_lock);
2065} 2298}
2066 2299
2067int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) 2300int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
2301 struct if_dqblk *di)
2068{ 2302{
2069 struct dquot *dquot; 2303 struct dquot *dquot;
2070 2304
2071 dquot = dqget(sb, id, type); 2305 dquot = dqget(sb, id, type);
2072 if (dquot == NODQUOT) 2306 if (!dquot)
2073 return -ESRCH; 2307 return -ESRCH;
2074 do_get_dqblk(dquot, di); 2308 do_get_dqblk(dquot, di);
2075 dqput(dquot); 2309 dqput(dquot);
2076 2310
2077 return 0; 2311 return 0;
2078} 2312}
2313EXPORT_SYMBOL(vfs_get_dqblk);
2079 2314
2080/* Generic routine for setting common part of quota structure */ 2315/* Generic routine for setting common part of quota structure */
2081static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) 2316static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
@@ -2094,7 +2329,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2094 2329
2095 spin_lock(&dq_data_lock); 2330 spin_lock(&dq_data_lock);
2096 if (di->dqb_valid & QIF_SPACE) { 2331 if (di->dqb_valid & QIF_SPACE) {
2097 dm->dqb_curspace = di->dqb_curspace; 2332 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
2098 check_blim = 1; 2333 check_blim = 1;
2099 __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 2334 __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2100 } 2335 }
@@ -2127,22 +2362,25 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2127 } 2362 }
2128 2363
2129 if (check_blim) { 2364 if (check_blim) {
2130 if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { 2365 if (!dm->dqb_bsoftlimit ||
2366 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2131 dm->dqb_btime = 0; 2367 dm->dqb_btime = 0;
2132 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 2368 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2133 } 2369 } else if (!(di->dqb_valid & QIF_BTIME))
2134 else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ 2370 /* Set grace only if user hasn't provided his own... */
2135 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; 2371 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2136 } 2372 }
2137 if (check_ilim) { 2373 if (check_ilim) {
2138 if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { 2374 if (!dm->dqb_isoftlimit ||
2375 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2139 dm->dqb_itime = 0; 2376 dm->dqb_itime = 0;
2140 clear_bit(DQ_INODES_B, &dquot->dq_flags); 2377 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2141 } 2378 } else if (!(di->dqb_valid & QIF_ITIME))
2142 else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ 2379 /* Set grace only if user hasn't provided his own... */
2143 dm->dqb_itime = get_seconds() + dqi->dqi_igrace; 2380 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2144 } 2381 }
2145 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) 2382 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2383 dm->dqb_isoftlimit)
2146 clear_bit(DQ_FAKE_B, &dquot->dq_flags); 2384 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2147 else 2385 else
2148 set_bit(DQ_FAKE_B, &dquot->dq_flags); 2386 set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -2152,7 +2390,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2152 return 0; 2390 return 0;
2153} 2391}
2154 2392
2155int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) 2393int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
2394 struct if_dqblk *di)
2156{ 2395{
2157 struct dquot *dquot; 2396 struct dquot *dquot;
2158 int rc; 2397 int rc;
@@ -2167,6 +2406,7 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
2167out: 2406out:
2168 return rc; 2407 return rc;
2169} 2408}
2409EXPORT_SYMBOL(vfs_set_dqblk);
2170 2410
2171/* Generic routine for getting common part of quota file information */ 2411/* Generic routine for getting common part of quota file information */
2172int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2412int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2188,6 +2428,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2188 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2428 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2189 return 0; 2429 return 0;
2190} 2430}
2431EXPORT_SYMBOL(vfs_get_dqinfo);
2191 2432
2192/* Generic routine for setting common part of quota file information */ 2433/* Generic routine for setting common part of quota file information */
2193int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2434int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2207,7 +2448,8 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2207 if (ii->dqi_valid & IIF_IGRACE) 2448 if (ii->dqi_valid & IIF_IGRACE)
2208 mi->dqi_igrace = ii->dqi_igrace; 2449 mi->dqi_igrace = ii->dqi_igrace;
2209 if (ii->dqi_valid & IIF_FLAGS) 2450 if (ii->dqi_valid & IIF_FLAGS)
2210 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); 2451 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
2452 (ii->dqi_flags & DQF_MASK);
2211 spin_unlock(&dq_data_lock); 2453 spin_unlock(&dq_data_lock);
2212 mark_info_dirty(sb, type); 2454 mark_info_dirty(sb, type);
2213 /* Force write to disk */ 2455 /* Force write to disk */
@@ -2216,6 +2458,7 @@ out:
2216 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2458 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2217 return err; 2459 return err;
2218} 2460}
2461EXPORT_SYMBOL(vfs_set_dqinfo);
2219 2462
2220struct quotactl_ops vfs_quotactl_ops = { 2463struct quotactl_ops vfs_quotactl_ops = {
2221 .quota_on = vfs_quota_on, 2464 .quota_on = vfs_quota_on,
@@ -2365,43 +2608,10 @@ static int __init dquot_init(void)
2365 2608
2366#ifdef CONFIG_QUOTA_NETLINK_INTERFACE 2609#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2367 if (genl_register_family(&quota_genl_family) != 0) 2610 if (genl_register_family(&quota_genl_family) != 0)
2368 printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); 2611 printk(KERN_ERR
2612 "VFS: Failed to create quota netlink interface.\n");
2369#endif 2613#endif
2370 2614
2371 return 0; 2615 return 0;
2372} 2616}
2373module_init(dquot_init); 2617module_init(dquot_init);
2374
2375EXPORT_SYMBOL(register_quota_format);
2376EXPORT_SYMBOL(unregister_quota_format);
2377EXPORT_SYMBOL(dqstats);
2378EXPORT_SYMBOL(dq_data_lock);
2379EXPORT_SYMBOL(vfs_quota_enable);
2380EXPORT_SYMBOL(vfs_quota_on);
2381EXPORT_SYMBOL(vfs_quota_on_path);
2382EXPORT_SYMBOL(vfs_quota_on_mount);
2383EXPORT_SYMBOL(vfs_quota_disable);
2384EXPORT_SYMBOL(vfs_quota_off);
2385EXPORT_SYMBOL(dquot_scan_active);
2386EXPORT_SYMBOL(vfs_quota_sync);
2387EXPORT_SYMBOL(vfs_get_dqinfo);
2388EXPORT_SYMBOL(vfs_set_dqinfo);
2389EXPORT_SYMBOL(vfs_get_dqblk);
2390EXPORT_SYMBOL(vfs_set_dqblk);
2391EXPORT_SYMBOL(dquot_commit);
2392EXPORT_SYMBOL(dquot_commit_info);
2393EXPORT_SYMBOL(dquot_acquire);
2394EXPORT_SYMBOL(dquot_release);
2395EXPORT_SYMBOL(dquot_mark_dquot_dirty);
2396EXPORT_SYMBOL(dquot_initialize);
2397EXPORT_SYMBOL(dquot_drop);
2398EXPORT_SYMBOL(vfs_dq_drop);
2399EXPORT_SYMBOL(dqget);
2400EXPORT_SYMBOL(dqput);
2401EXPORT_SYMBOL(dquot_alloc_space);
2402EXPORT_SYMBOL(dquot_alloc_inode);
2403EXPORT_SYMBOL(dquot_free_space);
2404EXPORT_SYMBOL(dquot_free_inode);
2405EXPORT_SYMBOL(dquot_transfer);
2406EXPORT_SYMBOL(vfs_dq_transfer);
2407EXPORT_SYMBOL(vfs_dq_quota_on_remount);
diff --git a/fs/quota.c b/fs/quota/quota.c
index d76ada914f98..b7f5a468f076 100644
--- a/fs/quota.c
+++ b/fs/quota/quota.c
@@ -20,7 +20,8 @@
20#include <linux/types.h> 20#include <linux/types.h>
21 21
22/* Check validity of generic quotactl commands */ 22/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) 23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
24 qid_t id)
24{ 25{
25 if (type >= MAXQUOTAS) 26 if (type >= MAXQUOTAS)
26 return -EINVAL; 27 return -EINVAL;
@@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
72 case Q_SETINFO: 73 case Q_SETINFO:
73 case Q_SETQUOTA: 74 case Q_SETQUOTA:
74 case Q_GETQUOTA: 75 case Q_GETQUOTA:
75 /* This is just informative test so we are satisfied without a lock */ 76 /* This is just an informative test so we are satisfied
77 * without the lock */
76 if (!sb_has_quota_active(sb, type)) 78 if (!sb_has_quota_active(sb, type))
77 return -ESRCH; 79 return -ESRCH;
78 } 80 }
@@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
92} 94}
93 95
94/* Check validity of XFS Quota Manager commands */ 96/* Check validity of XFS Quota Manager commands */
95static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) 97static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
98 qid_t id)
96{ 99{
97 if (type >= XQM_MAXQUOTAS) 100 if (type >= XQM_MAXQUOTAS)
98 return -EINVAL; 101 return -EINVAL;
@@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i
142 return 0; 145 return 0;
143} 146}
144 147
145static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) 148static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
149 qid_t id)
146{ 150{
147 int error; 151 int error;
148 152
@@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type)
180 continue; 184 continue;
181 if (!sb_has_quota_active(sb, cnt)) 185 if (!sb_has_quota_active(sb, cnt))
182 continue; 186 continue;
183 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); 187 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
188 I_MUTEX_QUOTA);
184 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 189 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
185 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); 190 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
186 } 191 }
@@ -200,14 +205,15 @@ void sync_dquots(struct super_block *sb, int type)
200 spin_lock(&sb_lock); 205 spin_lock(&sb_lock);
201restart: 206restart:
202 list_for_each_entry(sb, &super_blocks, s_list) { 207 list_for_each_entry(sb, &super_blocks, s_list) {
203 /* This test just improves performance so it needn't be reliable... */ 208 /* This test just improves performance so it needn't be
209 * reliable... */
204 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 210 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
205 if (type != -1 && type != cnt) 211 if (type != -1 && type != cnt)
206 continue; 212 continue;
207 if (!sb_has_quota_active(sb, cnt)) 213 if (!sb_has_quota_active(sb, cnt))
208 continue; 214 continue;
209 if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && 215 if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
210 list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) 216 list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
211 continue; 217 continue;
212 break; 218 break;
213 } 219 }
@@ -227,7 +233,8 @@ restart:
227} 233}
228 234
229/* Copy parameters and call proper function */ 235/* Copy parameters and call proper function */
230static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) 236static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
237 void __user *addr)
231{ 238{
232 int ret; 239 int ret;
233 240
@@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
235 case Q_QUOTAON: { 242 case Q_QUOTAON: {
236 char *pathname; 243 char *pathname;
237 244
238 if (IS_ERR(pathname = getname(addr))) 245 pathname = getname(addr);
246 if (IS_ERR(pathname))
239 return PTR_ERR(pathname); 247 return PTR_ERR(pathname);
240 ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); 248 ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
241 putname(pathname); 249 putname(pathname);
@@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
261 case Q_GETINFO: { 269 case Q_GETINFO: {
262 struct if_dqinfo info; 270 struct if_dqinfo info;
263 271
264 if ((ret = sb->s_qcop->get_info(sb, type, &info))) 272 ret = sb->s_qcop->get_info(sb, type, &info);
273 if (ret)
265 return ret; 274 return ret;
266 if (copy_to_user(addr, &info, sizeof(info))) 275 if (copy_to_user(addr, &info, sizeof(info)))
267 return -EFAULT; 276 return -EFAULT;
@@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
277 case Q_GETQUOTA: { 286 case Q_GETQUOTA: {
278 struct if_dqblk idq; 287 struct if_dqblk idq;
279 288
280 if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) 289 ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
290 if (ret)
281 return ret; 291 return ret;
282 if (copy_to_user(addr, &idq, sizeof(idq))) 292 if (copy_to_user(addr, &idq, sizeof(idq)))
283 return -EFAULT; 293 return -EFAULT;
@@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
322 case Q_XGETQUOTA: { 332 case Q_XGETQUOTA: {
323 struct fs_disk_quota fdq; 333 struct fs_disk_quota fdq;
324 334
325 if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) 335 ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
336 if (ret)
326 return ret; 337 return ret;
327 if (copy_to_user(addr, &fdq, sizeof(fdq))) 338 if (copy_to_user(addr, &fdq, sizeof(fdq)))
328 return -EFAULT; 339 return -EFAULT;
@@ -341,7 +352,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
341 * look up a superblock on which quota ops will be performed 352 * look up a superblock on which quota ops will be performed
342 * - use the name of a block device to find the superblock thereon 353 * - use the name of a block device to find the superblock thereon
343 */ 354 */
344static inline struct super_block *quotactl_block(const char __user *special) 355static struct super_block *quotactl_block(const char __user *special)
345{ 356{
346#ifdef CONFIG_BLOCK 357#ifdef CONFIG_BLOCK
347 struct block_device *bdev; 358 struct block_device *bdev;
diff --git a/fs/quota_tree.c b/fs/quota/quota_tree.c
index 953404c95b17..f81f4bcfb178 100644
--- a/fs/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -22,8 +22,6 @@ MODULE_LICENSE("GPL");
22 22
23#define __QUOTA_QT_PARANOIA 23#define __QUOTA_QT_PARANOIA
24 24
25typedef char *dqbuf_t;
26
27static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) 25static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
28{ 26{
29 unsigned int epb = info->dqi_usable_bs >> 2; 27 unsigned int epb = info->dqi_usable_bs >> 2;
@@ -35,46 +33,42 @@ static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
35} 33}
36 34
37/* Number of entries in one blocks */ 35/* Number of entries in one blocks */
38static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) 36static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
39{ 37{
40 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) 38 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
41 / info->dqi_entry_size; 39 / info->dqi_entry_size;
42} 40}
43 41
44static dqbuf_t getdqbuf(size_t size) 42static char *getdqbuf(size_t size)
45{ 43{
46 dqbuf_t buf = kmalloc(size, GFP_NOFS); 44 char *buf = kmalloc(size, GFP_NOFS);
47 if (!buf) 45 if (!buf)
48 printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); 46 printk(KERN_WARNING
47 "VFS: Not enough memory for quota buffers.\n");
49 return buf; 48 return buf;
50} 49}
51 50
52static inline void freedqbuf(dqbuf_t buf) 51static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
53{
54 kfree(buf);
55}
56
57static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf)
58{ 52{
59 struct super_block *sb = info->dqi_sb; 53 struct super_block *sb = info->dqi_sb;
60 54
61 memset(buf, 0, info->dqi_usable_bs); 55 memset(buf, 0, info->dqi_usable_bs);
62 return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, 56 return sb->s_op->quota_read(sb, info->dqi_type, buf,
63 info->dqi_usable_bs, blk << info->dqi_blocksize_bits); 57 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
64} 58}
65 59
66static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) 60static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
67{ 61{
68 struct super_block *sb = info->dqi_sb; 62 struct super_block *sb = info->dqi_sb;
69 63
70 return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, 64 return sb->s_op->quota_write(sb, info->dqi_type, buf,
71 info->dqi_usable_bs, blk << info->dqi_blocksize_bits); 65 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
72} 66}
73 67
74/* Remove empty block from list and return it */ 68/* Remove empty block from list and return it */
75static int get_free_dqblk(struct qtree_mem_dqinfo *info) 69static int get_free_dqblk(struct qtree_mem_dqinfo *info)
76{ 70{
77 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 71 char *buf = getdqbuf(info->dqi_usable_bs);
78 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 72 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
79 int ret, blk; 73 int ret, blk;
80 74
@@ -98,12 +92,12 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
98 mark_info_dirty(info->dqi_sb, info->dqi_type); 92 mark_info_dirty(info->dqi_sb, info->dqi_type);
99 ret = blk; 93 ret = blk;
100out_buf: 94out_buf:
101 freedqbuf(buf); 95 kfree(buf);
102 return ret; 96 return ret;
103} 97}
104 98
105/* Insert empty block to the list */ 99/* Insert empty block to the list */
106static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) 100static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
107{ 101{
108 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 102 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
109 int err; 103 int err;
@@ -120,9 +114,10 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk)
120} 114}
121 115
122/* Remove given block from the list of blocks with free entries */ 116/* Remove given block from the list of blocks with free entries */
123static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) 117static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
118 uint blk)
124{ 119{
125 dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); 120 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
126 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 121 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
127 uint nextblk = le32_to_cpu(dh->dqdh_next_free); 122 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
128 uint prevblk = le32_to_cpu(dh->dqdh_prev_free); 123 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
@@ -153,21 +148,24 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
153 info->dqi_free_entry = nextblk; 148 info->dqi_free_entry = nextblk;
154 mark_info_dirty(info->dqi_sb, info->dqi_type); 149 mark_info_dirty(info->dqi_sb, info->dqi_type);
155 } 150 }
156 freedqbuf(tmpbuf); 151 kfree(tmpbuf);
157 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); 152 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
158 /* No matter whether write succeeds block is out of list */ 153 /* No matter whether write succeeds block is out of list */
159 if (write_blk(info, blk, buf) < 0) 154 if (write_blk(info, blk, buf) < 0)
160 printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); 155 printk(KERN_ERR
156 "VFS: Can't write block (%u) with free entries.\n",
157 blk);
161 return 0; 158 return 0;
162out_buf: 159out_buf:
163 freedqbuf(tmpbuf); 160 kfree(tmpbuf);
164 return err; 161 return err;
165} 162}
166 163
167/* Insert given block to the beginning of list with free entries */ 164/* Insert given block to the beginning of list with free entries */
168static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) 165static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
166 uint blk)
169{ 167{
170 dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); 168 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
171 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 169 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
172 int err; 170 int err;
173 171
@@ -188,12 +186,12 @@ static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint
188 if (err < 0) 186 if (err < 0)
189 goto out_buf; 187 goto out_buf;
190 } 188 }
191 freedqbuf(tmpbuf); 189 kfree(tmpbuf);
192 info->dqi_free_entry = blk; 190 info->dqi_free_entry = blk;
193 mark_info_dirty(info->dqi_sb, info->dqi_type); 191 mark_info_dirty(info->dqi_sb, info->dqi_type);
194 return 0; 192 return 0;
195out_buf: 193out_buf:
196 freedqbuf(tmpbuf); 194 kfree(tmpbuf);
197 return err; 195 return err;
198} 196}
199 197
@@ -215,7 +213,7 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
215{ 213{
216 uint blk, i; 214 uint blk, i;
217 struct qt_disk_dqdbheader *dh; 215 struct qt_disk_dqdbheader *dh;
218 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 216 char *buf = getdqbuf(info->dqi_usable_bs);
219 char *ddquot; 217 char *ddquot;
220 218
221 *err = 0; 219 *err = 0;
@@ -233,11 +231,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
233 blk = get_free_dqblk(info); 231 blk = get_free_dqblk(info);
234 if ((int)blk < 0) { 232 if ((int)blk < 0) {
235 *err = blk; 233 *err = blk;
236 freedqbuf(buf); 234 kfree(buf);
237 return 0; 235 return 0;
238 } 236 }
239 memset(buf, 0, info->dqi_usable_bs); 237 memset(buf, 0, info->dqi_usable_bs);
240 /* This is enough as block is already zeroed and entry list is empty... */ 238 /* This is enough as the block is already zeroed and the entry
239 * list is empty... */
241 info->dqi_free_entry = blk; 240 info->dqi_free_entry = blk;
242 mark_info_dirty(dquot->dq_sb, dquot->dq_type); 241 mark_info_dirty(dquot->dq_sb, dquot->dq_type);
243 } 242 }
@@ -253,9 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
253 } 252 }
254 le16_add_cpu(&dh->dqdh_entries, 1); 253 le16_add_cpu(&dh->dqdh_entries, 1);
255 /* Find free structure in block */ 254 /* Find free structure in block */
256 for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); 255 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
257 i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); 256 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
258 i++, ddquot += info->dqi_entry_size); 257 if (qtree_entry_unused(info, ddquot))
258 break;
259 ddquot += info->dqi_entry_size;
260 }
259#ifdef __QUOTA_QT_PARANOIA 261#ifdef __QUOTA_QT_PARANOIA
260 if (i == qtree_dqstr_in_blk(info)) { 262 if (i == qtree_dqstr_in_blk(info)) {
261 printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " 263 printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
@@ -273,10 +275,10 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
273 dquot->dq_off = (blk << info->dqi_blocksize_bits) + 275 dquot->dq_off = (blk << info->dqi_blocksize_bits) +
274 sizeof(struct qt_disk_dqdbheader) + 276 sizeof(struct qt_disk_dqdbheader) +
275 i * info->dqi_entry_size; 277 i * info->dqi_entry_size;
276 freedqbuf(buf); 278 kfree(buf);
277 return blk; 279 return blk;
278out_buf: 280out_buf:
279 freedqbuf(buf); 281 kfree(buf);
280 return 0; 282 return 0;
281} 283}
282 284
@@ -284,7 +286,7 @@ out_buf:
284static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 286static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
285 uint *treeblk, int depth) 287 uint *treeblk, int depth)
286{ 288{
287 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 289 char *buf = getdqbuf(info->dqi_usable_bs);
288 int ret = 0, newson = 0, newact = 0; 290 int ret = 0, newson = 0, newact = 0;
289 __le32 *ref; 291 __le32 *ref;
290 uint newblk; 292 uint newblk;
@@ -333,7 +335,7 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
333 put_free_dqblk(info, buf, *treeblk); 335 put_free_dqblk(info, buf, *treeblk);
334 } 336 }
335out_buf: 337out_buf:
336 freedqbuf(buf); 338 kfree(buf);
337 return ret; 339 return ret;
338} 340}
339 341
@@ -346,14 +348,15 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
346} 348}
347 349
348/* 350/*
349 * We don't have to be afraid of deadlocks as we never have quotas on quota files... 351 * We don't have to be afraid of deadlocks as we never have quotas on quota
352 * files...
350 */ 353 */
351int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 354int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
352{ 355{
353 int type = dquot->dq_type; 356 int type = dquot->dq_type;
354 struct super_block *sb = dquot->dq_sb; 357 struct super_block *sb = dquot->dq_sb;
355 ssize_t ret; 358 ssize_t ret;
356 dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); 359 char *ddquot = getdqbuf(info->dqi_entry_size);
357 360
358 if (!ddquot) 361 if (!ddquot)
359 return -ENOMEM; 362 return -ENOMEM;
@@ -364,15 +367,15 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
364 if (ret < 0) { 367 if (ret < 0) {
365 printk(KERN_ERR "VFS: Error %zd occurred while " 368 printk(KERN_ERR "VFS: Error %zd occurred while "
366 "creating quota.\n", ret); 369 "creating quota.\n", ret);
367 freedqbuf(ddquot); 370 kfree(ddquot);
368 return ret; 371 return ret;
369 } 372 }
370 } 373 }
371 spin_lock(&dq_data_lock); 374 spin_lock(&dq_data_lock);
372 info->dqi_ops->mem2disk_dqblk(ddquot, dquot); 375 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
373 spin_unlock(&dq_data_lock); 376 spin_unlock(&dq_data_lock);
374 ret = sb->s_op->quota_write(sb, type, (char *)ddquot, 377 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
375 info->dqi_entry_size, dquot->dq_off); 378 dquot->dq_off);
376 if (ret != info->dqi_entry_size) { 379 if (ret != info->dqi_entry_size) {
377 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", 380 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
378 sb->s_id); 381 sb->s_id);
@@ -382,7 +385,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
382 ret = 0; 385 ret = 0;
383 } 386 }
384 dqstats.writes++; 387 dqstats.writes++;
385 freedqbuf(ddquot); 388 kfree(ddquot);
386 389
387 return ret; 390 return ret;
388} 391}
@@ -393,7 +396,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
393 uint blk) 396 uint blk)
394{ 397{
395 struct qt_disk_dqdbheader *dh; 398 struct qt_disk_dqdbheader *dh;
396 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 399 char *buf = getdqbuf(info->dqi_usable_bs);
397 int ret = 0; 400 int ret = 0;
398 401
399 if (!buf) 402 if (!buf)
@@ -444,7 +447,7 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
444 } 447 }
445 dquot->dq_off = 0; /* Quota is now unattached */ 448 dquot->dq_off = 0; /* Quota is now unattached */
446out_buf: 449out_buf:
447 freedqbuf(buf); 450 kfree(buf);
448 return ret; 451 return ret;
449} 452}
450 453
@@ -452,7 +455,7 @@ out_buf:
452static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 455static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
453 uint *blk, int depth) 456 uint *blk, int depth)
454{ 457{
455 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 458 char *buf = getdqbuf(info->dqi_usable_bs);
456 int ret = 0; 459 int ret = 0;
457 uint newblk; 460 uint newblk;
458 __le32 *ref = (__le32 *)buf; 461 __le32 *ref = (__le32 *)buf;
@@ -475,9 +478,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
475 int i; 478 int i;
476 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); 479 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
477 /* Block got empty? */ 480 /* Block got empty? */
478 for (i = 0; 481 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
479 i < (info->dqi_usable_bs >> 2) && !ref[i]; 482 ;
480 i++);
481 /* Don't put the root block into the free block list */ 483 /* Don't put the root block into the free block list */
482 if (i == (info->dqi_usable_bs >> 2) 484 if (i == (info->dqi_usable_bs >> 2)
483 && *blk != QT_TREEOFF) { 485 && *blk != QT_TREEOFF) {
@@ -491,7 +493,7 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
491 } 493 }
492 } 494 }
493out_buf: 495out_buf:
494 freedqbuf(buf); 496 kfree(buf);
495 return ret; 497 return ret;
496} 498}
497 499
@@ -510,7 +512,7 @@ EXPORT_SYMBOL(qtree_delete_dquot);
510static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, 512static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
511 struct dquot *dquot, uint blk) 513 struct dquot *dquot, uint blk)
512{ 514{
513 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 515 char *buf = getdqbuf(info->dqi_usable_bs);
514 loff_t ret = 0; 516 loff_t ret = 0;
515 int i; 517 int i;
516 char *ddquot; 518 char *ddquot;
@@ -522,9 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
522 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); 524 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
523 goto out_buf; 525 goto out_buf;
524 } 526 }
525 for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); 527 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
526 i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); 528 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
527 i++, ddquot += info->dqi_entry_size); 529 if (info->dqi_ops->is_id(ddquot, dquot))
530 break;
531 ddquot += info->dqi_entry_size;
532 }
528 if (i == qtree_dqstr_in_blk(info)) { 533 if (i == qtree_dqstr_in_blk(info)) {
529 printk(KERN_ERR "VFS: Quota for id %u referenced " 534 printk(KERN_ERR "VFS: Quota for id %u referenced "
530 "but not present.\n", dquot->dq_id); 535 "but not present.\n", dquot->dq_id);
@@ -535,7 +540,7 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
535 qt_disk_dqdbheader) + i * info->dqi_entry_size; 540 qt_disk_dqdbheader) + i * info->dqi_entry_size;
536 } 541 }
537out_buf: 542out_buf:
538 freedqbuf(buf); 543 kfree(buf);
539 return ret; 544 return ret;
540} 545}
541 546
@@ -543,7 +548,7 @@ out_buf:
543static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, 548static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
544 struct dquot *dquot, uint blk, int depth) 549 struct dquot *dquot, uint blk, int depth)
545{ 550{
546 dqbuf_t buf = getdqbuf(info->dqi_usable_bs); 551 char *buf = getdqbuf(info->dqi_usable_bs);
547 loff_t ret = 0; 552 loff_t ret = 0;
548 __le32 *ref = (__le32 *)buf; 553 __le32 *ref = (__le32 *)buf;
549 554
@@ -563,7 +568,7 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
563 else 568 else
564 ret = find_block_dqentry(info, dquot, blk); 569 ret = find_block_dqentry(info, dquot, blk);
565out_buf: 570out_buf:
566 freedqbuf(buf); 571 kfree(buf);
567 return ret; 572 return ret;
568} 573}
569 574
@@ -579,7 +584,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
579 int type = dquot->dq_type; 584 int type = dquot->dq_type;
580 struct super_block *sb = dquot->dq_sb; 585 struct super_block *sb = dquot->dq_sb;
581 loff_t offset; 586 loff_t offset;
582 dqbuf_t ddquot; 587 char *ddquot;
583 int ret = 0; 588 int ret = 0;
584 589
585#ifdef __QUOTA_QT_PARANOIA 590#ifdef __QUOTA_QT_PARANOIA
@@ -607,8 +612,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
607 ddquot = getdqbuf(info->dqi_entry_size); 612 ddquot = getdqbuf(info->dqi_entry_size);
608 if (!ddquot) 613 if (!ddquot)
609 return -ENOMEM; 614 return -ENOMEM;
610 ret = sb->s_op->quota_read(sb, type, (char *)ddquot, 615 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
611 info->dqi_entry_size, dquot->dq_off); 616 dquot->dq_off);
612 if (ret != info->dqi_entry_size) { 617 if (ret != info->dqi_entry_size) {
613 if (ret >= 0) 618 if (ret >= 0)
614 ret = -EIO; 619 ret = -EIO;
@@ -616,7 +621,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
616 "structure for id %u.\n", dquot->dq_id); 621 "structure for id %u.\n", dquot->dq_id);
617 set_bit(DQ_FAKE_B, &dquot->dq_flags); 622 set_bit(DQ_FAKE_B, &dquot->dq_flags);
618 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 623 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
619 freedqbuf(ddquot); 624 kfree(ddquot);
620 goto out; 625 goto out;
621 } 626 }
622 spin_lock(&dq_data_lock); 627 spin_lock(&dq_data_lock);
@@ -627,7 +632,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
627 !dquot->dq_dqb.dqb_isoftlimit) 632 !dquot->dq_dqb.dqb_isoftlimit)
628 set_bit(DQ_FAKE_B, &dquot->dq_flags); 633 set_bit(DQ_FAKE_B, &dquot->dq_flags);
629 spin_unlock(&dq_data_lock); 634 spin_unlock(&dq_data_lock);
630 freedqbuf(ddquot); 635 kfree(ddquot);
631out: 636out:
632 dqstats.reads++; 637 dqstats.reads++;
633 return ret; 638 return ret;
@@ -638,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot);
638 * the only one operating on dquot (thanks to dq_lock) */ 643 * the only one operating on dquot (thanks to dq_lock) */
639int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 644int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
640{ 645{
641 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) 646 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
647 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
642 return qtree_delete_dquot(info, dquot); 648 return qtree_delete_dquot(info, dquot);
643 return 0; 649 return 0;
644} 650}
diff --git a/fs/quota_tree.h b/fs/quota/quota_tree.h
index a1ab8db81a51..a1ab8db81a51 100644
--- a/fs/quota_tree.h
+++ b/fs/quota/quota_tree.h
diff --git a/fs/quota_v1.c b/fs/quota/quota_v1.c
index b4af1c69ad16..0edcf42b1778 100644
--- a/fs/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -62,11 +62,14 @@ static int v1_read_dqblk(struct dquot *dquot)
62 62
63 /* Set structure to 0s in case read fails/is after end of file */ 63 /* Set structure to 0s in case read fails/is after end of file */
64 memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); 64 memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
65 dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); 65 dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
66 sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
66 67
67 v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); 68 v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
68 if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && 69 if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
69 dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) 70 dquot->dq_dqb.dqb_bsoftlimit == 0 &&
71 dquot->dq_dqb.dqb_ihardlimit == 0 &&
72 dquot->dq_dqb.dqb_isoftlimit == 0)
70 set_bit(DQ_FAKE_B, &dquot->dq_flags); 73 set_bit(DQ_FAKE_B, &dquot->dq_flags);
71 dqstats.reads++; 74 dqstats.reads++;
72 75
@@ -81,13 +84,16 @@ static int v1_commit_dqblk(struct dquot *dquot)
81 84
82 v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); 85 v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
83 if (dquot->dq_id == 0) { 86 if (dquot->dq_id == 0) {
84 dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; 87 dqblk.dqb_btime =
85 dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; 88 sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
89 dqblk.dqb_itime =
90 sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
86 } 91 }
87 ret = 0; 92 ret = 0;
88 if (sb_dqopt(dquot->dq_sb)->files[type]) 93 if (sb_dqopt(dquot->dq_sb)->files[type])
89 ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, 94 ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
90 sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); 95 (char *)&dqblk, sizeof(struct v1_disk_dqblk),
96 v1_dqoff(dquot->dq_id));
91 if (ret != sizeof(struct v1_disk_dqblk)) { 97 if (ret != sizeof(struct v1_disk_dqblk)) {
92 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", 98 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
93 dquot->dq_sb->s_id); 99 dquot->dq_sb->s_id);
@@ -130,15 +136,20 @@ static int v1_check_quota_file(struct super_block *sb, int type)
130 return 0; 136 return 0;
131 blocks = isize >> BLOCK_SIZE_BITS; 137 blocks = isize >> BLOCK_SIZE_BITS;
132 off = isize & (BLOCK_SIZE - 1); 138 off = isize & (BLOCK_SIZE - 1);
133 if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) 139 if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
140 sizeof(struct v1_disk_dqblk))
134 return 0; 141 return 0;
135 /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ 142 /* Doublecheck whether we didn't get file with new format - with old
136 size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); 143 * quotactl() this could happen */
144 size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
145 sizeof(struct v2_disk_dqheader), 0);
137 if (size != sizeof(struct v2_disk_dqheader)) 146 if (size != sizeof(struct v2_disk_dqheader))
138 return 1; /* Probably not new format */ 147 return 1; /* Probably not new format */
139 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) 148 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
140 return 1; /* Definitely not new format */ 149 return 1; /* Definitely not new format */
141 printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); 150 printk(KERN_INFO
151 "VFS: %s: Refusing to turn on old quota format on given file."
152 " It probably contains newer quota format.\n", sb->s_id);
142 return 0; /* Seems like a new format file -> refuse it */ 153 return 0; /* Seems like a new format file -> refuse it */
143} 154}
144 155
@@ -148,7 +159,9 @@ static int v1_read_file_info(struct super_block *sb, int type)
148 struct v1_disk_dqblk dqblk; 159 struct v1_disk_dqblk dqblk;
149 int ret; 160 int ret;
150 161
151 if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { 162 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
163 sizeof(struct v1_disk_dqblk), v1_dqoff(0));
164 if (ret != sizeof(struct v1_disk_dqblk)) {
152 if (ret >= 0) 165 if (ret >= 0)
153 ret = -EIO; 166 ret = -EIO;
154 goto out; 167 goto out;
@@ -157,8 +170,10 @@ static int v1_read_file_info(struct super_block *sb, int type)
157 /* limits are stored as unsigned 32-bit data */ 170 /* limits are stored as unsigned 32-bit data */
158 dqopt->info[type].dqi_maxblimit = 0xffffffff; 171 dqopt->info[type].dqi_maxblimit = 0xffffffff;
159 dqopt->info[type].dqi_maxilimit = 0xffffffff; 172 dqopt->info[type].dqi_maxilimit = 0xffffffff;
160 dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; 173 dqopt->info[type].dqi_igrace =
161 dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; 174 dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
175 dqopt->info[type].dqi_bgrace =
176 dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
162out: 177out:
163 return ret; 178 return ret;
164} 179}
@@ -170,8 +185,9 @@ static int v1_write_file_info(struct super_block *sb, int type)
170 int ret; 185 int ret;
171 186
172 dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; 187 dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
173 if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, 188 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
174 sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { 189 sizeof(struct v1_disk_dqblk), v1_dqoff(0));
190 if (ret != sizeof(struct v1_disk_dqblk)) {
175 if (ret >= 0) 191 if (ret >= 0)
176 ret = -EIO; 192 ret = -EIO;
177 goto out; 193 goto out;
diff --git a/fs/quota_v2.c b/fs/quota/quota_v2.c
index b618b563635c..a5475fb1ae44 100644
--- a/fs/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type)
54 static const uint quota_magics[] = V2_INITQMAGICS; 54 static const uint quota_magics[] = V2_INITQMAGICS;
55 static const uint quota_versions[] = V2_INITQVERSIONS; 55 static const uint quota_versions[] = V2_INITQVERSIONS;
56 56
57 size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); 57 size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
58 sizeof(struct v2_disk_dqheader), 0);
58 if (size != sizeof(struct v2_disk_dqheader)) { 59 if (size != sizeof(struct v2_disk_dqheader)) {
59 printk("quota_v2: failed read expected=%zd got=%zd\n", 60 printk("quota_v2: failed read expected=%zd got=%zd\n",
60 sizeof(struct v2_disk_dqheader), size); 61 sizeof(struct v2_disk_dqheader), size);
diff --git a/fs/quotaio_v1.h b/fs/quota/quotaio_v1.h
index 746654b5de70..746654b5de70 100644
--- a/fs/quotaio_v1.h
+++ b/fs/quota/quotaio_v1.h
diff --git a/fs/quotaio_v2.h b/fs/quota/quotaio_v2.h
index 530fe580685c..530fe580685c 100644
--- a/fs/quotaio_v2.h
+++ b/fs/quota/quotaio_v2.h
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 5d7c7ececa64..995ef1d6686c 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -18,7 +18,6 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/backing-dev.h> 19#include <linux/backing-dev.h>
20#include <linux/ramfs.h> 20#include <linux/ramfs.h>
21#include <linux/quotaops.h>
22#include <linux/pagevec.h> 21#include <linux/pagevec.h>
23#include <linux/mman.h> 22#include <linux/mman.h>
24 23
@@ -205,11 +204,6 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
205 if (ret) 204 if (ret)
206 return ret; 205 return ret;
207 206
208 /* by providing our own setattr() method, we skip this quotaism */
209 if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
210 (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
211 ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
212
213 /* pick out size-changing events */ 207 /* pick out size-changing events */
214 if (ia->ia_valid & ATTR_SIZE) { 208 if (ia->ia_valid & ATTR_SIZE) {
215 loff_t size = i_size_read(inode); 209 loff_t size = i_size_read(inode);
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4646caa60455..f32d1425cc9f 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -430,7 +430,7 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
430 430
431 journal_mark_dirty(th, s, sbh); 431 journal_mark_dirty(th, s, sbh);
432 if (for_unformatted) 432 if (for_unformatted)
433 DQUOT_FREE_BLOCK_NODIRTY(inode, 1); 433 vfs_dq_free_block_nodirty(inode, 1);
434} 434}
435 435
436void reiserfs_free_block(struct reiserfs_transaction_handle *th, 436void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1055,7 +1055,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1055 amount_needed, hint->inode->i_uid); 1055 amount_needed, hint->inode->i_uid);
1056#endif 1056#endif
1057 quota_ret = 1057 quota_ret =
1058 DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed); 1058 vfs_dq_alloc_block_nodirty(hint->inode, amount_needed);
1059 if (quota_ret) /* Quota exceeded? */ 1059 if (quota_ret) /* Quota exceeded? */
1060 return QUOTA_EXCEEDED; 1060 return QUOTA_EXCEEDED;
1061 if (hint->preallocate && hint->prealloc_size) { 1061 if (hint->preallocate && hint->prealloc_size) {
@@ -1064,8 +1064,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1064 "reiserquota: allocating (prealloc) %d blocks id=%u", 1064 "reiserquota: allocating (prealloc) %d blocks id=%u",
1065 hint->prealloc_size, hint->inode->i_uid); 1065 hint->prealloc_size, hint->inode->i_uid);
1066#endif 1066#endif
1067 quota_ret = 1067 quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode,
1068 DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
1069 hint->prealloc_size); 1068 hint->prealloc_size);
1070 if (quota_ret) 1069 if (quota_ret)
1071 hint->preallocate = hint->prealloc_size = 0; 1070 hint->preallocate = hint->prealloc_size = 0;
@@ -1098,7 +1097,10 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1098 nr_allocated, 1097 nr_allocated,
1099 hint->inode->i_uid); 1098 hint->inode->i_uid);
1100#endif 1099#endif
1101 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */ 1100 /* Free not allocated blocks */
1101 vfs_dq_free_block_nodirty(hint->inode,
1102 amount_needed + hint->prealloc_size -
1103 nr_allocated);
1102 } 1104 }
1103 while (nr_allocated--) 1105 while (nr_allocated--)
1104 reiserfs_free_block(hint->th, hint->inode, 1106 reiserfs_free_block(hint->th, hint->inode,
@@ -1129,7 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
1129 REISERFS_I(hint->inode)->i_prealloc_count, 1131 REISERFS_I(hint->inode)->i_prealloc_count,
1130 hint->inode->i_uid); 1132 hint->inode->i_uid);
1131#endif 1133#endif
1132 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + 1134 vfs_dq_free_block_nodirty(hint->inode, amount_needed +
1133 hint->prealloc_size - nr_allocated - 1135 hint->prealloc_size - nr_allocated -
1134 REISERFS_I(hint->inode)-> 1136 REISERFS_I(hint->inode)->
1135 i_prealloc_count); 1137 i_prealloc_count);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 55fce92cdf18..823227a7662a 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -53,7 +53,7 @@ void reiserfs_delete_inode(struct inode *inode)
53 * after delete_object so that quota updates go into the same transaction as 53 * after delete_object so that quota updates go into the same transaction as
54 * stat data deletion */ 54 * stat data deletion */
55 if (!err) 55 if (!err)
56 DQUOT_FREE_INODE(inode); 56 vfs_dq_free_inode(inode);
57 57
58 if (journal_end(&th, inode->i_sb, jbegin_count)) 58 if (journal_end(&th, inode->i_sb, jbegin_count))
59 goto out; 59 goto out;
@@ -1763,7 +1763,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1763 1763
1764 BUG_ON(!th->t_trans_id); 1764 BUG_ON(!th->t_trans_id);
1765 1765
1766 if (DQUOT_ALLOC_INODE(inode)) { 1766 if (vfs_dq_alloc_inode(inode)) {
1767 err = -EDQUOT; 1767 err = -EDQUOT;
1768 goto out_end_trans; 1768 goto out_end_trans;
1769 } 1769 }
@@ -1947,12 +1947,12 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1947 INODE_PKEY(inode)->k_objectid = 0; 1947 INODE_PKEY(inode)->k_objectid = 0;
1948 1948
1949 /* Quota change must be inside a transaction for journaling */ 1949 /* Quota change must be inside a transaction for journaling */
1950 DQUOT_FREE_INODE(inode); 1950 vfs_dq_free_inode(inode);
1951 1951
1952 out_end_trans: 1952 out_end_trans:
1953 journal_end(th, th->t_super, th->t_blocks_allocated); 1953 journal_end(th, th->t_super, th->t_blocks_allocated);
1954 /* Drop can be outside and it needs more credits so it's better to have it outside */ 1954 /* Drop can be outside and it needs more credits so it's better to have it outside */
1955 DQUOT_DROP(inode); 1955 vfs_dq_drop(inode);
1956 inode->i_flags |= S_NOQUOTA; 1956 inode->i_flags |= S_NOQUOTA;
1957 make_bad_inode(inode); 1957 make_bad_inode(inode);
1958 1958
@@ -3119,7 +3119,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3119 if (error) 3119 if (error)
3120 goto out; 3120 goto out;
3121 error = 3121 error =
3122 DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 3122 vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
3123 if (error) { 3123 if (error) {
3124 journal_end(&th, inode->i_sb, 3124 journal_end(&th, inode->i_sb,
3125 jbegin_count); 3125 jbegin_count);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 738967f6c8ee..639d635d9d4b 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -555,7 +555,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
555*/ 555*/
556static int drop_new_inode(struct inode *inode) 556static int drop_new_inode(struct inode *inode)
557{ 557{
558 DQUOT_DROP(inode); 558 vfs_dq_drop(inode);
559 make_bad_inode(inode); 559 make_bad_inode(inode);
560 inode->i_flags |= S_NOQUOTA; 560 inode->i_flags |= S_NOQUOTA;
561 iput(inode); 561 iput(inode);
@@ -563,7 +563,7 @@ static int drop_new_inode(struct inode *inode)
563} 563}
564 564
565/* utility function that does setup for reiserfs_new_inode. 565/* utility function that does setup for reiserfs_new_inode.
566** DQUOT_INIT needs lots of credits so it's better to have it 566** vfs_dq_init needs lots of credits so it's better to have it
567** outside of a transaction, so we had to pull some bits of 567** outside of a transaction, so we had to pull some bits of
568** reiserfs_new_inode out into this func. 568** reiserfs_new_inode out into this func.
569*/ 569*/
@@ -586,7 +586,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
586 } else { 586 } else {
587 inode->i_gid = current_fsgid(); 587 inode->i_gid = current_fsgid();
588 } 588 }
589 DQUOT_INIT(inode); 589 vfs_dq_init(inode);
590 return 0; 590 return 0;
591} 591}
592 592
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index abbc64dcc8d4..73aaa33f6735 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1297,7 +1297,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath
1297 "reiserquota delete_item(): freeing %u, id=%u type=%c", 1297 "reiserquota delete_item(): freeing %u, id=%u type=%c",
1298 quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih)); 1298 quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
1299#endif 1299#endif
1300 DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); 1300 vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
1301 1301
1302 /* Return deleted body length */ 1302 /* Return deleted body length */
1303 return n_ret_value; 1303 return n_ret_value;
@@ -1383,7 +1383,7 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
1383 quota_cut_bytes, inode->i_uid, 1383 quota_cut_bytes, inode->i_uid,
1384 key2type(key)); 1384 key2type(key));
1385#endif 1385#endif
1386 DQUOT_FREE_SPACE_NODIRTY(inode, 1386 vfs_dq_free_space_nodirty(inode,
1387 quota_cut_bytes); 1387 quota_cut_bytes);
1388 } 1388 }
1389 break; 1389 break;
@@ -1734,7 +1734,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
1734 "reiserquota cut_from_item(): freeing %u id=%u type=%c", 1734 "reiserquota cut_from_item(): freeing %u id=%u type=%c",
1735 quota_cut_bytes, p_s_inode->i_uid, '?'); 1735 quota_cut_bytes, p_s_inode->i_uid, '?');
1736#endif 1736#endif
1737 DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes); 1737 vfs_dq_free_space_nodirty(p_s_inode, quota_cut_bytes);
1738 return n_ret_value; 1738 return n_ret_value;
1739} 1739}
1740 1740
@@ -1971,7 +1971,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
1971 key2type(&(p_s_key->on_disk_key))); 1971 key2type(&(p_s_key->on_disk_key)));
1972#endif 1972#endif
1973 1973
1974 if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) { 1974 if (vfs_dq_alloc_space_nodirty(inode, n_pasted_size)) {
1975 pathrelse(p_s_search_path); 1975 pathrelse(p_s_search_path);
1976 return -EDQUOT; 1976 return -EDQUOT;
1977 } 1977 }
@@ -2027,7 +2027,7 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
2027 n_pasted_size, inode->i_uid, 2027 n_pasted_size, inode->i_uid,
2028 key2type(&(p_s_key->on_disk_key))); 2028 key2type(&(p_s_key->on_disk_key)));
2029#endif 2029#endif
2030 DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size); 2030 vfs_dq_free_space_nodirty(inode, n_pasted_size);
2031 return retval; 2031 return retval;
2032} 2032}
2033 2033
@@ -2060,7 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
2060#endif 2060#endif
2061 /* We can't dirty inode here. It would be immediately written but 2061 /* We can't dirty inode here. It would be immediately written but
2062 * appropriate stat item isn't inserted yet... */ 2062 * appropriate stat item isn't inserted yet... */
2063 if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) { 2063 if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) {
2064 pathrelse(p_s_path); 2064 pathrelse(p_s_path);
2065 return -EDQUOT; 2065 return -EDQUOT;
2066 } 2066 }
@@ -2112,6 +2112,6 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath
2112 quota_bytes, inode->i_uid, head2type(p_s_ih)); 2112 quota_bytes, inode->i_uid, head2type(p_s_ih));
2113#endif 2113#endif
2114 if (inode) 2114 if (inode)
2115 DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes); 2115 vfs_dq_free_space_nodirty(inode, quota_bytes);
2116 return retval; 2116 return retval;
2117} 2117}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f3c820b75829..5dbafb739401 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -250,7 +250,7 @@ static int finish_unfinished(struct super_block *s)
250 retval = remove_save_link_only(s, &save_link_key, 0); 250 retval = remove_save_link_only(s, &save_link_key, 0);
251 continue; 251 continue;
252 } 252 }
253 DQUOT_INIT(inode); 253 vfs_dq_init(inode);
254 254
255 if (truncate && S_ISDIR(inode->i_mode)) { 255 if (truncate && S_ISDIR(inode->i_mode)) {
256 /* We got a truncate request for a dir which is impossible. 256 /* We got a truncate request for a dir which is impossible.
@@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = {
629#ifdef CONFIG_QUOTA 629#ifdef CONFIG_QUOTA
630#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") 630#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
631 631
632static int reiserfs_dquot_initialize(struct inode *, int);
633static int reiserfs_dquot_drop(struct inode *);
634static int reiserfs_write_dquot(struct dquot *); 632static int reiserfs_write_dquot(struct dquot *);
635static int reiserfs_acquire_dquot(struct dquot *); 633static int reiserfs_acquire_dquot(struct dquot *);
636static int reiserfs_release_dquot(struct dquot *); 634static int reiserfs_release_dquot(struct dquot *);
@@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int);
639static int reiserfs_quota_on(struct super_block *, int, int, char *, int); 637static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
640 638
641static struct dquot_operations reiserfs_quota_operations = { 639static struct dquot_operations reiserfs_quota_operations = {
642 .initialize = reiserfs_dquot_initialize, 640 .initialize = dquot_initialize,
643 .drop = reiserfs_dquot_drop, 641 .drop = dquot_drop,
644 .alloc_space = dquot_alloc_space, 642 .alloc_space = dquot_alloc_space,
645 .alloc_inode = dquot_alloc_inode, 643 .alloc_inode = dquot_alloc_inode,
646 .free_space = dquot_free_space, 644 .free_space = dquot_free_space,
@@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1896} 1894}
1897 1895
1898#ifdef CONFIG_QUOTA 1896#ifdef CONFIG_QUOTA
1899static int reiserfs_dquot_initialize(struct inode *inode, int type)
1900{
1901 struct reiserfs_transaction_handle th;
1902 int ret, err;
1903
1904 /* We may create quota structure so we need to reserve enough blocks */
1905 reiserfs_write_lock(inode->i_sb);
1906 ret =
1907 journal_begin(&th, inode->i_sb,
1908 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
1909 if (ret)
1910 goto out;
1911 ret = dquot_initialize(inode, type);
1912 err =
1913 journal_end(&th, inode->i_sb,
1914 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
1915 if (!ret && err)
1916 ret = err;
1917 out:
1918 reiserfs_write_unlock(inode->i_sb);
1919 return ret;
1920}
1921
1922static int reiserfs_dquot_drop(struct inode *inode)
1923{
1924 struct reiserfs_transaction_handle th;
1925 int ret, err;
1926
1927 /* We may delete quota structure so we need to reserve enough blocks */
1928 reiserfs_write_lock(inode->i_sb);
1929 ret =
1930 journal_begin(&th, inode->i_sb,
1931 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
1932 if (ret) {
1933 /*
1934 * We call dquot_drop() anyway to at least release references
1935 * to quota structures so that umount does not hang.
1936 */
1937 dquot_drop(inode);
1938 goto out;
1939 }
1940 ret = dquot_drop(inode);
1941 err =
1942 journal_end(&th, inode->i_sb,
1943 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
1944 if (!ret && err)
1945 ret = err;
1946 out:
1947 reiserfs_write_unlock(inode->i_sb);
1948 return ret;
1949}
1950
1951static int reiserfs_write_dquot(struct dquot *dquot) 1897static int reiserfs_write_dquot(struct dquot *dquot)
1952{ 1898{
1953 struct reiserfs_transaction_handle th; 1899 struct reiserfs_transaction_handle th;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ad92461cbfc3..ae881ccd2f03 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -1136,7 +1136,7 @@ xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
1136 return 1; 1136 return 1;
1137} 1137}
1138 1138
1139static struct dentry_operations xattr_lookup_poison_ops = { 1139static const struct dentry_operations xattr_lookup_poison_ops = {
1140 .d_compare = xattr_lookup_poison, 1140 .d_compare = xattr_lookup_poison,
1141}; 1141};
1142 1142
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index e7ddd0328ddc..3e4803b4427e 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -277,7 +277,7 @@ static int smb_hash_dentry(struct dentry *, struct qstr *);
277static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 277static int smb_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
278static int smb_delete_dentry(struct dentry *); 278static int smb_delete_dentry(struct dentry *);
279 279
280static struct dentry_operations smbfs_dentry_operations = 280static const struct dentry_operations smbfs_dentry_operations =
281{ 281{
282 .d_revalidate = smb_lookup_validate, 282 .d_revalidate = smb_lookup_validate,
283 .d_hash = smb_hash_dentry, 283 .d_hash = smb_hash_dentry,
@@ -285,7 +285,7 @@ static struct dentry_operations smbfs_dentry_operations =
285 .d_delete = smb_delete_dentry, 285 .d_delete = smb_delete_dentry,
286}; 286};
287 287
288static struct dentry_operations smbfs_dentry_operations_case = 288static const struct dentry_operations smbfs_dentry_operations_case =
289{ 289{
290 .d_revalidate = smb_lookup_validate, 290 .d_revalidate = smb_lookup_validate,
291 .d_delete = smb_delete_dentry, 291 .d_delete = smb_delete_dentry,
diff --git a/fs/super.c b/fs/super.c
index dd4acb158b5e..2ba481518ba7 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -197,7 +197,7 @@ void deactivate_super(struct super_block *s)
197 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) { 197 if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
198 s->s_count -= S_BIAS-1; 198 s->s_count -= S_BIAS-1;
199 spin_unlock(&sb_lock); 199 spin_unlock(&sb_lock);
200 DQUOT_OFF(s, 0); 200 vfs_dq_off(s, 0);
201 down_write(&s->s_umount); 201 down_write(&s->s_umount);
202 fs->kill_sb(s); 202 fs->kill_sb(s);
203 put_filesystem(fs); 203 put_filesystem(fs);
@@ -266,7 +266,7 @@ EXPORT_SYMBOL(unlock_super);
266void __fsync_super(struct super_block *sb) 266void __fsync_super(struct super_block *sb)
267{ 267{
268 sync_inodes_sb(sb, 0); 268 sync_inodes_sb(sb, 0);
269 DQUOT_SYNC(sb); 269 vfs_dq_sync(sb);
270 lock_super(sb); 270 lock_super(sb);
271 if (sb->s_dirt && sb->s_op->write_super) 271 if (sb->s_dirt && sb->s_op->write_super)
272 sb->s_op->write_super(sb); 272 sb->s_op->write_super(sb);
@@ -655,7 +655,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
655 mark_files_ro(sb); 655 mark_files_ro(sb);
656 else if (!fs_may_remount_ro(sb)) 656 else if (!fs_may_remount_ro(sb))
657 return -EBUSY; 657 return -EBUSY;
658 retval = DQUOT_OFF(sb, 1); 658 retval = vfs_dq_off(sb, 1);
659 if (retval < 0 && retval != -ENOSYS) 659 if (retval < 0 && retval != -ENOSYS)
660 return -EBUSY; 660 return -EBUSY;
661 } 661 }
@@ -670,7 +670,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
670 } 670 }
671 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 671 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
672 if (remount_rw) 672 if (remount_rw)
673 DQUOT_ON_REMOUNT(sb); 673 vfs_dq_quota_on_remount(sb);
674 return 0; 674 return 0;
675} 675}
676 676
@@ -838,7 +838,8 @@ int get_sb_bdev(struct file_system_type *fs_type,
838 bdev->bd_super = s; 838 bdev->bd_super = s;
839 } 839 }
840 840
841 return simple_set_mnt(mnt, s); 841 simple_set_mnt(mnt, s);
842 return 0;
842 843
843error_s: 844error_s:
844 error = PTR_ERR(s); 845 error = PTR_ERR(s);
@@ -884,7 +885,8 @@ int get_sb_nodev(struct file_system_type *fs_type,
884 return error; 885 return error;
885 } 886 }
886 s->s_flags |= MS_ACTIVE; 887 s->s_flags |= MS_ACTIVE;
887 return simple_set_mnt(mnt, s); 888 simple_set_mnt(mnt, s);
889 return 0;
888} 890}
889 891
890EXPORT_SYMBOL(get_sb_nodev); 892EXPORT_SYMBOL(get_sb_nodev);
@@ -916,7 +918,8 @@ int get_sb_single(struct file_system_type *fs_type,
916 s->s_flags |= MS_ACTIVE; 918 s->s_flags |= MS_ACTIVE;
917 } 919 }
918 do_remount_sb(s, flags, data, 0); 920 do_remount_sb(s, flags, data, 0);
919 return simple_set_mnt(mnt, s); 921 simple_set_mnt(mnt, s);
922 return 0;
920} 923}
921 924
922EXPORT_SYMBOL(get_sb_single); 925EXPORT_SYMBOL(get_sb_single);
diff --git a/fs/sync.c b/fs/sync.c
index ec95a69d17aa..7abc65fbf21d 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -25,7 +25,7 @@ static void do_sync(unsigned long wait)
25{ 25{
26 wakeup_pdflush(0); 26 wakeup_pdflush(0);
27 sync_inodes(0); /* All mappings, inodes and their blockdevs */ 27 sync_inodes(0); /* All mappings, inodes and their blockdevs */
28 DQUOT_SYNC(NULL); 28 vfs_dq_sync(NULL);
29 sync_supers(); /* Write the superblocks */ 29 sync_supers(); /* Write the superblocks */
30 sync_filesystems(0); /* Start syncing the filesystems */ 30 sync_filesystems(0); /* Start syncing the filesystems */
31 sync_filesystems(wait); /* Waitingly sync the filesystems */ 31 sync_filesystems(wait); /* Waitingly sync the filesystems */
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 66aeb4fff0c3..d88d0fac9fa5 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -302,7 +302,7 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
302 iput(inode); 302 iput(inode);
303} 303}
304 304
305static struct dentry_operations sysfs_dentry_ops = { 305static const struct dentry_operations sysfs_dentry_ops = {
306 .d_iput = sysfs_d_iput, 306 .d_iput = sysfs_d_iput,
307}; 307};
308 308
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index a1f1ef33e81c..33e047b59b8d 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -38,7 +38,7 @@ static int sysv_hash(struct dentry *dentry, struct qstr *qstr)
38 return 0; 38 return 0;
39} 39}
40 40
41struct dentry_operations sysv_dentry_operations = { 41const struct dentry_operations sysv_dentry_operations = {
42 .d_hash = sysv_hash, 42 .d_hash = sysv_hash,
43}; 43};
44 44
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 38ebe3f85b3d..5784a318c883 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -170,7 +170,7 @@ extern const struct file_operations sysv_file_operations;
170extern const struct file_operations sysv_dir_operations; 170extern const struct file_operations sysv_dir_operations;
171extern const struct address_space_operations sysv_aops; 171extern const struct address_space_operations sysv_aops;
172extern const struct super_operations sysv_sops; 172extern const struct super_operations sysv_sops;
173extern struct dentry_operations sysv_dentry_operations; 173extern const struct dentry_operations sysv_dentry_operations;
174 174
175 175
176enum { 176enum {
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1182b66a5491..c5c98355459a 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2034,7 +2034,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
2034 /* 'fill_super()' opens ubi again so we must close it here */ 2034 /* 'fill_super()' opens ubi again so we must close it here */
2035 ubi_close_volume(ubi); 2035 ubi_close_volume(ubi);
2036 2036
2037 return simple_set_mnt(mnt, sb); 2037 simple_set_mnt(mnt, sb);
2038 return 0;
2038 2039
2039out_deact: 2040out_deact:
2040 up_write(&sb->s_umount); 2041 up_write(&sb->s_umount);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b809bd494bd..2bb788a2acb1 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -206,7 +206,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
206 ((char *)bh->b_data)[(bit + i) >> 3]); 206 ((char *)bh->b_data)[(bit + i) >> 3]);
207 } else { 207 } else {
208 if (inode) 208 if (inode)
209 DQUOT_FREE_BLOCK(inode, 1); 209 vfs_dq_free_block(inode, 1);
210 udf_add_free_space(sbi, sbi->s_partition, 1); 210 udf_add_free_space(sbi, sbi->s_partition, 1);
211 } 211 }
212 } 212 }
@@ -261,11 +261,11 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
261 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 261 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
262 if (!udf_test_bit(bit, bh->b_data)) 262 if (!udf_test_bit(bit, bh->b_data))
263 goto out; 263 goto out;
264 else if (DQUOT_PREALLOC_BLOCK(inode, 1)) 264 else if (vfs_dq_prealloc_block(inode, 1))
265 goto out; 265 goto out;
266 else if (!udf_clear_bit(bit, bh->b_data)) { 266 else if (!udf_clear_bit(bit, bh->b_data)) {
267 udf_debug("bit already cleared for block %d\n", bit); 267 udf_debug("bit already cleared for block %d\n", bit);
268 DQUOT_FREE_BLOCK(inode, 1); 268 vfs_dq_free_block(inode, 1);
269 goto out; 269 goto out;
270 } 270 }
271 block_count--; 271 block_count--;
@@ -393,7 +393,7 @@ got_block:
393 /* 393 /*
394 * Check quota for allocation of this block. 394 * Check quota for allocation of this block.
395 */ 395 */
396 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { 396 if (inode && vfs_dq_alloc_block(inode, 1)) {
397 mutex_unlock(&sbi->s_alloc_mutex); 397 mutex_unlock(&sbi->s_alloc_mutex);
398 *err = -EDQUOT; 398 *err = -EDQUOT;
399 return 0; 399 return 0;
@@ -452,7 +452,7 @@ static void udf_table_free_blocks(struct super_block *sb,
452 /* We do this up front - There are some error conditions that 452 /* We do this up front - There are some error conditions that
453 could occure, but.. oh well */ 453 could occure, but.. oh well */
454 if (inode) 454 if (inode)
455 DQUOT_FREE_BLOCK(inode, count); 455 vfs_dq_free_block(inode, count);
456 if (udf_add_free_space(sbi, sbi->s_partition, count)) 456 if (udf_add_free_space(sbi, sbi->s_partition, count))
457 mark_buffer_dirty(sbi->s_lvid_bh); 457 mark_buffer_dirty(sbi->s_lvid_bh);
458 458
@@ -700,7 +700,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
700 epos.offset -= adsize; 700 epos.offset -= adsize;
701 701
702 alloc_count = (elen >> sb->s_blocksize_bits); 702 alloc_count = (elen >> sb->s_blocksize_bits);
703 if (inode && DQUOT_PREALLOC_BLOCK(inode, 703 if (inode && vfs_dq_prealloc_block(inode,
704 alloc_count > block_count ? block_count : alloc_count)) 704 alloc_count > block_count ? block_count : alloc_count))
705 alloc_count = 0; 705 alloc_count = 0;
706 else if (alloc_count > block_count) { 706 else if (alloc_count > block_count) {
@@ -806,7 +806,7 @@ static int udf_table_new_block(struct super_block *sb,
806 goal_eloc.logicalBlockNum++; 806 goal_eloc.logicalBlockNum++;
807 goal_elen -= sb->s_blocksize; 807 goal_elen -= sb->s_blocksize;
808 808
809 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { 809 if (inode && vfs_dq_alloc_block(inode, 1)) {
810 brelse(goal_epos.bh); 810 brelse(goal_epos.bh);
811 mutex_unlock(&sbi->s_alloc_mutex); 811 mutex_unlock(&sbi->s_alloc_mutex);
812 *err = -EDQUOT; 812 *err = -EDQUOT;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 31fc84297ddb..47dbe5613f90 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -36,8 +36,8 @@ void udf_free_inode(struct inode *inode)
36 * Note: we must free any quota before locking the superblock, 36 * Note: we must free any quota before locking the superblock,
37 * as writing the quota to disk may need the lock as well. 37 * as writing the quota to disk may need the lock as well.
38 */ 38 */
39 DQUOT_FREE_INODE(inode); 39 vfs_dq_free_inode(inode);
40 DQUOT_DROP(inode); 40 vfs_dq_drop(inode);
41 41
42 clear_inode(inode); 42 clear_inode(inode);
43 43
@@ -154,8 +154,8 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
154 insert_inode_hash(inode); 154 insert_inode_hash(inode);
155 mark_inode_dirty(inode); 155 mark_inode_dirty(inode);
156 156
157 if (DQUOT_ALLOC_INODE(inode)) { 157 if (vfs_dq_alloc_inode(inode)) {
158 DQUOT_DROP(inode); 158 vfs_dq_drop(inode);
159 inode->i_flags |= S_NOQUOTA; 159 inode->i_flags |= S_NOQUOTA;
160 inode->i_nlink = 0; 160 inode->i_nlink = 0;
161 iput(inode); 161 iput(inode);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0d9ada173739..54c16ec95dff 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -85,7 +85,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
85 "bit already cleared for fragment %u", i); 85 "bit already cleared for fragment %u", i);
86 } 86 }
87 87
88 DQUOT_FREE_BLOCK (inode, count); 88 vfs_dq_free_block(inode, count);
89 89
90 90
91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
@@ -195,7 +195,7 @@ do_more:
195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
197 ufs_clusteracct (sb, ucpi, blkno, 1); 197 ufs_clusteracct (sb, ucpi, blkno, 1);
198 DQUOT_FREE_BLOCK(inode, uspi->s_fpb); 198 vfs_dq_free_block(inode, uspi->s_fpb);
199 199
200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
201 uspi->cs_total.cs_nbfree++; 201 uspi->cs_total.cs_nbfree++;
@@ -556,7 +556,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
556 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); 556 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
557 for (i = oldcount; i < newcount; i++) 557 for (i = oldcount; i < newcount; i++)
558 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); 558 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
559 if(DQUOT_ALLOC_BLOCK(inode, count)) { 559 if (vfs_dq_alloc_block(inode, count)) {
560 *err = -EDQUOT; 560 *err = -EDQUOT;
561 return 0; 561 return 0;
562 } 562 }
@@ -664,7 +664,7 @@ cg_found:
664 for (i = count; i < uspi->s_fpb; i++) 664 for (i = count; i < uspi->s_fpb; i++)
665 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 665 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
666 i = uspi->s_fpb - count; 666 i = uspi->s_fpb - count;
667 DQUOT_FREE_BLOCK(inode, i); 667 vfs_dq_free_block(inode, i);
668 668
669 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 669 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
670 uspi->cs_total.cs_nffree += i; 670 uspi->cs_total.cs_nffree += i;
@@ -676,7 +676,7 @@ cg_found:
676 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 676 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
677 if (result == INVBLOCK) 677 if (result == INVBLOCK)
678 return 0; 678 return 0;
679 if(DQUOT_ALLOC_BLOCK(inode, count)) { 679 if (vfs_dq_alloc_block(inode, count)) {
680 *err = -EDQUOT; 680 *err = -EDQUOT;
681 return 0; 681 return 0;
682 } 682 }
@@ -747,7 +747,7 @@ gotit:
747 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 747 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
748 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 748 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
749 ufs_clusteracct (sb, ucpi, blkno, -1); 749 ufs_clusteracct (sb, ucpi, blkno, -1);
750 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) { 750 if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
751 *err = -EDQUOT; 751 *err = -EDQUOT;
752 return INVBLOCK; 752 return INVBLOCK;
753 } 753 }
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 6f5dcf006096..3527c00fef0d 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -95,8 +95,8 @@ void ufs_free_inode (struct inode * inode)
95 95
96 is_directory = S_ISDIR(inode->i_mode); 96 is_directory = S_ISDIR(inode->i_mode);
97 97
98 DQUOT_FREE_INODE(inode); 98 vfs_dq_free_inode(inode);
99 DQUOT_DROP(inode); 99 vfs_dq_drop(inode);
100 100
101 clear_inode (inode); 101 clear_inode (inode);
102 102
@@ -355,8 +355,8 @@ cg_found:
355 355
356 unlock_super (sb); 356 unlock_super (sb);
357 357
358 if (DQUOT_ALLOC_INODE(inode)) { 358 if (vfs_dq_alloc_inode(inode)) {
359 DQUOT_DROP(inode); 359 vfs_dq_drop(inode);
360 err = -EDQUOT; 360 err = -EDQUOT;
361 goto fail_without_unlock; 361 goto fail_without_unlock;
362 } 362 }
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 39f877898565..3d2512c21f05 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -622,7 +622,6 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
622 struct ufs_inode_info *ufsi = UFS_I(inode); 622 struct ufs_inode_info *ufsi = UFS_I(inode);
623 struct super_block *sb = inode->i_sb; 623 struct super_block *sb = inode->i_sb;
624 mode_t mode; 624 mode_t mode;
625 unsigned i;
626 625
627 /* 626 /*
628 * Copy data to the in-core inode. 627 * Copy data to the in-core inode.
@@ -655,11 +654,12 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
655 654
656 655
657 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 656 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
658 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 657 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr,
659 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i]; 658 sizeof(ufs_inode->ui_u2.ui_addr));
660 } else { 659 } else {
661 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 660 memcpy(ufsi->i_u1.i_symlink, ufs_inode->ui_u2.ui_symlink,
662 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i]; 661 sizeof(ufs_inode->ui_u2.ui_symlink) - 1);
662 ufsi->i_u1.i_symlink[sizeof(ufs_inode->ui_u2.ui_symlink) - 1] = 0;
663 } 663 }
664 return 0; 664 return 0;
665} 665}
@@ -669,7 +669,6 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
669 struct ufs_inode_info *ufsi = UFS_I(inode); 669 struct ufs_inode_info *ufsi = UFS_I(inode);
670 struct super_block *sb = inode->i_sb; 670 struct super_block *sb = inode->i_sb;
671 mode_t mode; 671 mode_t mode;
672 unsigned i;
673 672
674 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino); 673 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
675 /* 674 /*
@@ -704,12 +703,12 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
704 */ 703 */
705 704
706 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 705 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
707 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 706 memcpy(ufsi->i_u1.u2_i_data, &ufs2_inode->ui_u2.ui_addr,
708 ufsi->i_u1.u2_i_data[i] = 707 sizeof(ufs2_inode->ui_u2.ui_addr));
709 ufs2_inode->ui_u2.ui_addr.ui_db[i];
710 } else { 708 } else {
711 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 709 memcpy(ufsi->i_u1.i_symlink, ufs2_inode->ui_u2.ui_symlink,
712 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i]; 710 sizeof(ufs2_inode->ui_u2.ui_symlink) - 1);
711 ufsi->i_u1.i_symlink[sizeof(ufs2_inode->ui_u2.ui_symlink) - 1] = 0;
713 } 712 }
714 return 0; 713 return 0;
715} 714}
@@ -781,7 +780,6 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
781{ 780{
782 struct super_block *sb = inode->i_sb; 781 struct super_block *sb = inode->i_sb;
783 struct ufs_inode_info *ufsi = UFS_I(inode); 782 struct ufs_inode_info *ufsi = UFS_I(inode);
784 unsigned i;
785 783
786 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 784 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
787 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink); 785 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
@@ -809,12 +807,12 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
809 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 807 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
810 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0]; 808 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
811 } else if (inode->i_blocks) { 809 } else if (inode->i_blocks) {
812 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 810 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.i_data,
813 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i]; 811 sizeof(ufs_inode->ui_u2.ui_addr));
814 } 812 }
815 else { 813 else {
816 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 814 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
817 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; 815 sizeof(ufs_inode->ui_u2.ui_symlink));
818 } 816 }
819 817
820 if (!inode->i_nlink) 818 if (!inode->i_nlink)
@@ -825,7 +823,6 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
825{ 823{
826 struct super_block *sb = inode->i_sb; 824 struct super_block *sb = inode->i_sb;
827 struct ufs_inode_info *ufsi = UFS_I(inode); 825 struct ufs_inode_info *ufsi = UFS_I(inode);
828 unsigned i;
829 826
830 UFSD("ENTER\n"); 827 UFSD("ENTER\n");
831 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode); 828 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
@@ -850,11 +847,11 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
850 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */ 847 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
851 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0]; 848 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
852 } else if (inode->i_blocks) { 849 } else if (inode->i_blocks) {
853 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++) 850 memcpy(&ufs_inode->ui_u2.ui_addr, ufsi->i_u1.u2_i_data,
854 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i]; 851 sizeof(ufs_inode->ui_u2.ui_addr));
855 } else { 852 } else {
856 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++) 853 memcpy(&ufs_inode->ui_u2.ui_symlink, ufsi->i_u1.i_symlink,
857 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i]; 854 sizeof(ufs_inode->ui_u2.ui_symlink));
858 } 855 }
859 856
860 if (!inode->i_nlink) 857 if (!inode->i_nlink)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index e3a9b1fac75a..23119fe7ad62 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -147,7 +147,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
147 } else { 147 } else {
148 /* fast symlink */ 148 /* fast symlink */
149 inode->i_op = &ufs_fast_symlink_inode_operations; 149 inode->i_op = &ufs_fast_symlink_inode_operations;
150 memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l); 150 memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l);
151 inode->i_size = l-1; 151 inode->i_size = l-1;
152 } 152 }
153 mark_inode_dirty(inode); 153 mark_inode_dirty(inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 261a1c2f22dd..e1c1fc5ee239 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -636,6 +636,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
636 unsigned block_size, super_block_size; 636 unsigned block_size, super_block_size;
637 unsigned flags; 637 unsigned flags;
638 unsigned super_block_offset; 638 unsigned super_block_offset;
639 unsigned maxsymlen;
639 int ret = -EINVAL; 640 int ret = -EINVAL;
640 641
641 uspi = NULL; 642 uspi = NULL;
@@ -1069,6 +1070,16 @@ magic_found:
1069 uspi->s_maxsymlinklen = 1070 uspi->s_maxsymlinklen =
1070 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); 1071 fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
1071 1072
1073 if (uspi->fs_magic == UFS2_MAGIC)
1074 maxsymlen = 2 * 4 * (UFS_NDADDR + UFS_NINDIR);
1075 else
1076 maxsymlen = 4 * (UFS_NDADDR + UFS_NINDIR);
1077 if (uspi->s_maxsymlinklen > maxsymlen) {
1078 ufs_warning(sb, __func__, "ufs_read_super: excessive maximum "
1079 "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
1080 uspi->s_maxsymlinklen = maxsymlen;
1081 }
1082
1072 inode = ufs_iget(sb, UFS_ROOTINO); 1083 inode = ufs_iget(sb, UFS_ROOTINO);
1073 if (IS_ERR(inode)) { 1084 if (IS_ERR(inode)) {
1074 ret = PTR_ERR(inode); 1085 ret = PTR_ERR(inode);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 11c035168ea6..69b3427d7885 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -23,7 +23,7 @@ struct ufs_sb_info {
23struct ufs_inode_info { 23struct ufs_inode_info {
24 union { 24 union {
25 __fs32 i_data[15]; 25 __fs32 i_data[15];
26 __u8 i_symlink[4*15]; 26 __u8 i_symlink[2 * 4 * 15];
27 __fs64 u2_i_data[15]; 27 __fs64 u2_i_data[15];
28 } i_u1; 28 } i_u1;
29 __u32 i_flags; 29 __u32 i_flags;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index e5f4ae989abf..c19a93c3be85 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -758,6 +758,8 @@ struct drm_driver {
758 758
759 int (*proc_init)(struct drm_minor *minor); 759 int (*proc_init)(struct drm_minor *minor);
760 void (*proc_cleanup)(struct drm_minor *minor); 760 void (*proc_cleanup)(struct drm_minor *minor);
761 int (*debugfs_init)(struct drm_minor *minor);
762 void (*debugfs_cleanup)(struct drm_minor *minor);
761 763
762 /** 764 /**
763 * Driver-specific constructor for drm_gem_objects, to set up 765 * Driver-specific constructor for drm_gem_objects, to set up
@@ -793,6 +795,48 @@ struct drm_driver {
793#define DRM_MINOR_CONTROL 2 795#define DRM_MINOR_CONTROL 2
794#define DRM_MINOR_RENDER 3 796#define DRM_MINOR_RENDER 3
795 797
798
799/**
800 * debugfs node list. This structure represents a debugfs file to
801 * be created by the drm core
802 */
803struct drm_debugfs_list {
804 const char *name; /** file name */
805 int (*show)(struct seq_file*, void*); /** show callback */
806 u32 driver_features; /**< Required driver features for this entry */
807};
808
809/**
810 * debugfs node structure. This structure represents a debugfs file.
811 */
812struct drm_debugfs_node {
813 struct list_head list;
814 struct drm_minor *minor;
815 struct drm_debugfs_list *debugfs_ent;
816 struct dentry *dent;
817};
818
819/**
820 * Info file list entry. This structure represents a debugfs or proc file to
821 * be created by the drm core
822 */
823struct drm_info_list {
824 const char *name; /** file name */
825 int (*show)(struct seq_file*, void*); /** show callback */
826 u32 driver_features; /**< Required driver features for this entry */
827 void *data;
828};
829
830/**
831 * debugfs node structure. This structure represents a debugfs file.
832 */
833struct drm_info_node {
834 struct list_head list;
835 struct drm_minor *minor;
836 struct drm_info_list *info_ent;
837 struct dentry *dent;
838};
839
796/** 840/**
797 * DRM minor structure. This structure represents a drm minor number. 841 * DRM minor structure. This structure represents a drm minor number.
798 */ 842 */
@@ -802,7 +846,12 @@ struct drm_minor {
802 dev_t device; /**< Device number for mknod */ 846 dev_t device; /**< Device number for mknod */
803 struct device kdev; /**< Linux device */ 847 struct device kdev; /**< Linux device */
804 struct drm_device *dev; 848 struct drm_device *dev;
805 struct proc_dir_entry *dev_root; /**< proc directory entry */ 849
850 struct proc_dir_entry *proc_root; /**< proc directory entry */
851 struct drm_info_node proc_nodes;
852 struct dentry *debugfs_root;
853 struct drm_info_node debugfs_nodes;
854
806 struct drm_master *master; /* currently active master for this node */ 855 struct drm_master *master; /* currently active master for this node */
807 struct list_head master_list; 856 struct list_head master_list;
808 struct drm_mode_group mode_group; 857 struct drm_mode_group mode_group;
@@ -1258,6 +1307,7 @@ extern unsigned int drm_debug;
1258 1307
1259extern struct class *drm_class; 1308extern struct class *drm_class;
1260extern struct proc_dir_entry *drm_proc_root; 1309extern struct proc_dir_entry *drm_proc_root;
1310extern struct dentry *drm_debugfs_root;
1261 1311
1262extern struct idr drm_minors_idr; 1312extern struct idr drm_minors_idr;
1263 1313
@@ -1268,6 +1318,31 @@ extern int drm_proc_init(struct drm_minor *minor, int minor_id,
1268 struct proc_dir_entry *root); 1318 struct proc_dir_entry *root);
1269extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); 1319extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
1270 1320
1321 /* Debugfs support */
1322#if defined(CONFIG_DEBUG_FS)
1323extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
1324 struct dentry *root);
1325extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
1326 struct dentry *root, struct drm_minor *minor);
1327extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
1328 struct drm_minor *minor);
1329extern int drm_debugfs_cleanup(struct drm_minor *minor);
1330#endif
1331
1332 /* Info file support */
1333extern int drm_name_info(struct seq_file *m, void *data);
1334extern int drm_vm_info(struct seq_file *m, void *data);
1335extern int drm_queues_info(struct seq_file *m, void *data);
1336extern int drm_bufs_info(struct seq_file *m, void *data);
1337extern int drm_vblank_info(struct seq_file *m, void *data);
1338extern int drm_clients_info(struct seq_file *m, void* data);
1339extern int drm_gem_name_info(struct seq_file *m, void *data);
1340extern int drm_gem_object_info(struct seq_file *m, void* data);
1341
1342#if DRM_DEBUG_CODE
1343extern int drm_vma_info(struct seq_file *m, void *data);
1344#endif
1345
1271 /* Scatter Gather Support (drm_scatter.h) */ 1346 /* Scatter Gather Support (drm_scatter.h) */
1272extern void drm_sg_cleanup(struct drm_sg_mem * entry); 1347extern void drm_sg_cleanup(struct drm_sg_mem * entry);
1273extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, 1348extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 5165f240aa68..76c4c8243038 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -418,4 +418,6 @@
418 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 418 {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
419 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 419 {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
420 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ 420 {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
421 {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
422 {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
421 {0, 0, 0} 423 {0, 0, 0}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index bd7ac793be19..f19fd9045ea0 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping);
165void unmap_underlying_metadata(struct block_device *bdev, sector_t block); 165void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
166 166
167void mark_buffer_async_write(struct buffer_head *bh); 167void mark_buffer_async_write(struct buffer_head *bh);
168void invalidate_bdev(struct block_device *);
169int sync_blockdev(struct block_device *bdev);
170void __wait_on_buffer(struct buffer_head *); 168void __wait_on_buffer(struct buffer_head *);
171wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 169wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
172int fsync_bdev(struct block_device *);
173struct super_block *freeze_bdev(struct block_device *);
174int thaw_bdev(struct block_device *, struct super_block *);
175int fsync_super(struct super_block *);
176int fsync_no_super(struct block_device *);
177struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 170struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
178 unsigned size); 171 unsigned size);
179struct buffer_head *__getblk(struct block_device *bdev, sector_t block, 172struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 3fd2194ff573..b880864672de 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -125,6 +125,13 @@ struct compat_dirent {
125 char d_name[256]; 125 char d_name[256];
126}; 126};
127 127
128struct compat_ustat {
129 compat_daddr_t f_tfree;
130 compat_ino_t f_tinode;
131 char f_fname[6];
132 char f_fpack[6];
133};
134
128typedef union compat_sigval { 135typedef union compat_sigval {
129 compat_int_t sival_int; 136 compat_int_t sival_int;
130 compat_uptr_t sival_ptr; 137 compat_uptr_t sival_ptr;
@@ -178,6 +185,7 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
178 unsigned nsems, const struct compat_timespec __user *timeout); 185 unsigned nsems, const struct compat_timespec __user *timeout);
179asmlinkage long compat_sys_keyctl(u32 option, 186asmlinkage long compat_sys_keyctl(u32 option,
180 u32 arg2, u32 arg3, u32 arg4, u32 arg5); 187 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
188asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
181 189
182asmlinkage ssize_t compat_sys_readv(unsigned long fd, 190asmlinkage ssize_t compat_sys_readv(unsigned long fd,
183 const struct compat_iovec __user *vec, unsigned long vlen); 191 const struct compat_iovec __user *vec, unsigned long vlen);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c66d22487bf8..15156364d196 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -112,7 +112,7 @@ struct dentry {
112 struct list_head d_subdirs; /* our children */ 112 struct list_head d_subdirs; /* our children */
113 struct list_head d_alias; /* inode alias list */ 113 struct list_head d_alias; /* inode alias list */
114 unsigned long d_time; /* used by d_revalidate */ 114 unsigned long d_time; /* used by d_revalidate */
115 struct dentry_operations *d_op; 115 const struct dentry_operations *d_op;
116 struct super_block *d_sb; /* The root of the dentry tree */ 116 struct super_block *d_sb; /* The root of the dentry tree */
117 void *d_fsdata; /* fs-specific data */ 117 void *d_fsdata; /* fs-specific data */
118 118
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 4d078e99c017..c6b3ca3af6df 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -25,10 +25,12 @@
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/firewire-constants.h> 26#include <linux/firewire-constants.h>
27 27
28#define FW_CDEV_EVENT_BUS_RESET 0x00 28#define FW_CDEV_EVENT_BUS_RESET 0x00
29#define FW_CDEV_EVENT_RESPONSE 0x01 29#define FW_CDEV_EVENT_RESPONSE 0x01
30#define FW_CDEV_EVENT_REQUEST 0x02 30#define FW_CDEV_EVENT_REQUEST 0x02
31#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 31#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
32#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
33#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
32 34
33/** 35/**
34 * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types 36 * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
@@ -136,7 +138,24 @@ struct fw_cdev_event_request {
136 * This event is sent when the controller has completed an &fw_cdev_iso_packet 138 * This event is sent when the controller has completed an &fw_cdev_iso_packet
137 * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers 139 * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
138 * stripped of all packets up until and including the interrupt packet are 140 * stripped of all packets up until and including the interrupt packet are
139 * returned in the @header field. 141 * returned in the @header field. The amount of header data per packet is as
142 * specified at iso context creation by &fw_cdev_create_iso_context.header_size.
143 *
144 * In version 1 of this ABI, header data consisted of the 1394 isochronous
145 * packet header, followed by quadlets from the packet payload if
146 * &fw_cdev_create_iso_context.header_size > 4.
147 *
148 * In version 2 of this ABI, header data consist of the 1394 isochronous
149 * packet header, followed by a timestamp quadlet if
150 * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
151 * packet payload if &fw_cdev_create_iso_context.header_size > 8.
152 *
153 * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
154 *
155 * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
156 * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
157 * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
158 * order.
140 */ 159 */
141struct fw_cdev_event_iso_interrupt { 160struct fw_cdev_event_iso_interrupt {
142 __u64 closure; 161 __u64 closure;
@@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt {
147}; 166};
148 167
149/** 168/**
169 * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
170 * @closure: See &fw_cdev_event_common;
171 * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl
172 * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
173 * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
174 * @handle: Reference by which an allocated resource can be deallocated
175 * @channel: Isochronous channel which was (de)allocated, if any
176 * @bandwidth: Bandwidth allocation units which were (de)allocated, if any
177 *
178 * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous
179 * resource was allocated at the IRM. The client has to check @channel and
180 * @bandwidth for whether the allocation actually succeeded.
181 *
182 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous
183 * resource was deallocated at the IRM. It is also sent when automatic
184 * reallocation after a bus reset failed.
185 *
186 * @channel is <0 if no channel was (de)allocated or if reallocation failed.
187 * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed.
188 */
189struct fw_cdev_event_iso_resource {
190 __u64 closure;
191 __u32 type;
192 __u32 handle;
193 __s32 channel;
194 __s32 bandwidth;
195};
196
197/**
150 * union fw_cdev_event - Convenience union of fw_cdev_event_ types 198 * union fw_cdev_event - Convenience union of fw_cdev_event_ types
151 * @common: Valid for all types 199 * @common: Valid for all types
152 * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET 200 * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
153 * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE 201 * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
154 * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST 202 * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
155 * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT 203 * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
204 * @iso_resource: Valid if @common.type ==
205 * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
206 * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
156 * 207 *
157 * Convenience union for userspace use. Events could be read(2) into an 208 * Convenience union for userspace use. Events could be read(2) into an
158 * appropriately aligned char buffer and then cast to this union for further 209 * appropriately aligned char buffer and then cast to this union for further
@@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt {
163 * not fit will be discarded so that the next read(2) will return a new event. 214 * not fit will be discarded so that the next read(2) will return a new event.
164 */ 215 */
165union fw_cdev_event { 216union fw_cdev_event {
166 struct fw_cdev_event_common common; 217 struct fw_cdev_event_common common;
167 struct fw_cdev_event_bus_reset bus_reset; 218 struct fw_cdev_event_bus_reset bus_reset;
168 struct fw_cdev_event_response response; 219 struct fw_cdev_event_response response;
169 struct fw_cdev_event_request request; 220 struct fw_cdev_event_request request;
170 struct fw_cdev_event_iso_interrupt iso_interrupt; 221 struct fw_cdev_event_iso_interrupt iso_interrupt;
222 struct fw_cdev_event_iso_resource iso_resource;
171}; 223};
172 224
173#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) 225/* available since kernel version 2.6.22 */
174#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) 226#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info)
175#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) 227#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request)
176#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) 228#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate)
177#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) 229#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate)
178#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) 230#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response)
179#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) 231#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset)
180#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) 232#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor)
233#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor)
234#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context)
235#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso)
236#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
237#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
181 238
182#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) 239/* available since kernel version 2.6.24 */
183#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) 240#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
184#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso)
185#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso)
186#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer)
187 241
188/* FW_CDEV_VERSION History 242/* available since kernel version 2.6.30 */
189 * 243#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource)
190 * 1 Feb 18, 2007: Initial version. 244#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate)
245#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource)
246#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource)
247#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */
248#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request)
249#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet)
250
251/*
252 * FW_CDEV_VERSION History
253 * 1 (2.6.22) - initial version
254 * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
255 * &fw_cdev_create_iso_context.header_size is 8 or more
191 */ 256 */
192#define FW_CDEV_VERSION 1 257#define FW_CDEV_VERSION 2
193 258
194/** 259/**
195 * struct fw_cdev_get_info - General purpose information ioctl 260 * struct fw_cdev_get_info - General purpose information ioctl
@@ -201,7 +266,7 @@ union fw_cdev_event {
201 * case, @rom_length is updated with the actual length of the 266 * case, @rom_length is updated with the actual length of the
202 * configuration ROM. 267 * configuration ROM.
203 * @rom: If non-zero, address of a buffer to be filled by a copy of the 268 * @rom: If non-zero, address of a buffer to be filled by a copy of the
204 * local node's configuration ROM 269 * device's configuration ROM
205 * @bus_reset: If non-zero, address of a buffer to be filled by a 270 * @bus_reset: If non-zero, address of a buffer to be filled by a
206 * &struct fw_cdev_event_bus_reset with the current state 271 * &struct fw_cdev_event_bus_reset with the current state
207 * of the bus. This does not cause a bus reset to happen. 272 * of the bus. This does not cause a bus reset to happen.
@@ -229,7 +294,7 @@ struct fw_cdev_get_info {
229 * Send a request to the device. This ioctl implements all outgoing requests. 294 * Send a request to the device. This ioctl implements all outgoing requests.
230 * Both quadlet and block request specify the payload as a pointer to the data 295 * Both quadlet and block request specify the payload as a pointer to the data
231 * in the @data field. Once the transaction completes, the kernel writes an 296 * in the @data field. Once the transaction completes, the kernel writes an
232 * &fw_cdev_event_request event back. The @closure field is passed back to 297 * &fw_cdev_event_response event back. The @closure field is passed back to
233 * user space in the response event. 298 * user space in the response event.
234 */ 299 */
235struct fw_cdev_send_request { 300struct fw_cdev_send_request {
@@ -284,9 +349,9 @@ struct fw_cdev_allocate {
284}; 349};
285 350
286/** 351/**
287 * struct fw_cdev_deallocate - Free an address range allocation 352 * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource
288 * @handle: Handle to the address range, as returned by the kernel when the 353 * @handle: Handle to the address range or iso resource, as returned by the
289 * range was allocated 354 * kernel when the range or resource was allocated
290 */ 355 */
291struct fw_cdev_deallocate { 356struct fw_cdev_deallocate {
292 __u32 handle; 357 __u32 handle;
@@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset {
329 * If successful, the kernel adds the descriptor and writes back a handle to the 394 * If successful, the kernel adds the descriptor and writes back a handle to the
330 * kernel-side object to be used for later removal of the descriptor block and 395 * kernel-side object to be used for later removal of the descriptor block and
331 * immediate key. 396 * immediate key.
397 *
398 * This ioctl affects the configuration ROMs of all local nodes.
399 * The ioctl only succeeds on device files which represent a local node.
332 */ 400 */
333struct fw_cdev_add_descriptor { 401struct fw_cdev_add_descriptor {
334 __u32 immediate; 402 __u32 immediate;
@@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor {
344 * descriptor was added 412 * descriptor was added
345 * 413 *
346 * Remove a descriptor block and accompanying immediate key from the local 414 * Remove a descriptor block and accompanying immediate key from the local
347 * node's configuration ROM. 415 * nodes' configuration ROMs.
348 */ 416 */
349struct fw_cdev_remove_descriptor { 417struct fw_cdev_remove_descriptor {
350 __u32 handle; 418 __u32 handle;
@@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor {
370 * 438 *
371 * If a context was successfully created, the kernel writes back a handle to the 439 * If a context was successfully created, the kernel writes back a handle to the
372 * context, which must be passed in for subsequent operations on that context. 440 * context, which must be passed in for subsequent operations on that context.
441 *
442 * Note that the effect of a @header_size > 4 depends on
443 * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
373 */ 444 */
374struct fw_cdev_create_iso_context { 445struct fw_cdev_create_iso_context {
375 __u32 type; 446 __u32 type;
@@ -473,10 +544,91 @@ struct fw_cdev_stop_iso {
473 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer 544 * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
474 * and also the system clock. This allows to express the receive time of an 545 * and also the system clock. This allows to express the receive time of an
475 * isochronous packet as a system time with microsecond accuracy. 546 * isochronous packet as a system time with microsecond accuracy.
547 *
548 * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
549 * 12 bits cycleOffset, in host byte order.
476 */ 550 */
477struct fw_cdev_get_cycle_timer { 551struct fw_cdev_get_cycle_timer {
478 __u64 local_time; 552 __u64 local_time;
479 __u32 cycle_timer; 553 __u32 cycle_timer;
480}; 554};
481 555
556/**
557 * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth
558 * @closure: Passed back to userspace in correponding iso resource events
559 * @channels: Isochronous channels of which one is to be (de)allocated
560 * @bandwidth: Isochronous bandwidth units to be (de)allocated
561 * @handle: Handle to the allocation, written by the kernel (only valid in
562 * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls)
563 *
564 * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an
565 * isochronous channel and/or of isochronous bandwidth at the isochronous
566 * resource manager (IRM). Only one of the channels specified in @channels is
567 * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after
568 * communication with the IRM, indicating success or failure in the event data.
569 * The kernel will automatically reallocate the resources after bus resets.
570 * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event
571 * will be sent. The kernel will also automatically deallocate the resources
572 * when the file descriptor is closed.
573 *
574 * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate
575 * deallocation of resources which were allocated as described above.
576 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
577 *
578 * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation
579 * without automatic re- or deallocation.
580 * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation,
581 * indicating success or failure in its data.
582 *
583 * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like
584 * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed
585 * instead of allocated.
586 * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation.
587 *
588 * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources
589 * for the lifetime of the fd or handle.
590 * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources
591 * for the duration of a bus generation.
592 *
593 * @channels is a host-endian bitfield with the least significant bit
594 * representing channel 0 and the most significant bit representing channel 63:
595 * 1ULL << c for each channel c that is a candidate for (de)allocation.
596 *
597 * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send
598 * one quadlet of data (payload or header data) at speed S1600.
599 */
600struct fw_cdev_allocate_iso_resource {
601 __u64 closure;
602 __u64 channels;
603 __u32 bandwidth;
604 __u32 handle;
605};
606
607/**
608 * struct fw_cdev_send_stream_packet - send an asynchronous stream packet
609 * @length: Length of outgoing payload, in bytes
610 * @tag: Data format tag
611 * @channel: Isochronous channel to transmit to
612 * @sy: Synchronization code
613 * @closure: Passed back to userspace in the response event
614 * @data: Userspace pointer to payload
615 * @generation: The bus generation where packet is valid
616 * @speed: Speed to transmit at
617 *
618 * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
619 * to every device which is listening to the specified channel. The kernel
620 * writes an &fw_cdev_event_response event which indicates success or failure of
621 * the transmission.
622 */
623struct fw_cdev_send_stream_packet {
624 __u32 length;
625 __u32 tag;
626 __u32 channel;
627 __u32 sy;
628 __u64 closure;
629 __u64 data;
630 __u32 generation;
631 __u32 speed;
632};
633
482#endif /* _LINUX_FIREWIRE_CDEV_H */ 634#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1cd44f727dac..42436ae42f70 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1064,34 +1064,147 @@ extern int lease_modify(struct file_lock **, int);
1064extern int lock_may_read(struct inode *, loff_t start, unsigned long count); 1064extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
1065extern int lock_may_write(struct inode *, loff_t start, unsigned long count); 1065extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
1066#else /* !CONFIG_FILE_LOCKING */ 1066#else /* !CONFIG_FILE_LOCKING */
1067#define fcntl_getlk(a, b) ({ -EINVAL; }) 1067static inline int fcntl_getlk(struct file *file, struct flock __user *user)
1068#define fcntl_setlk(a, b, c, d) ({ -EACCES; }) 1068{
1069 return -EINVAL;
1070}
1071
1072static inline int fcntl_setlk(unsigned int fd, struct file *file,
1073 unsigned int cmd, struct flock __user *user)
1074{
1075 return -EACCES;
1076}
1077
1069#if BITS_PER_LONG == 32 1078#if BITS_PER_LONG == 32
1070#define fcntl_getlk64(a, b) ({ -EINVAL; }) 1079static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user)
1071#define fcntl_setlk64(a, b, c, d) ({ -EACCES; }) 1080{
1081 return -EINVAL;
1082}
1083
1084static inline int fcntl_setlk64(unsigned int fd, struct file *file,
1085 unsigned int cmd, struct flock64 __user *user)
1086{
1087 return -EACCES;
1088}
1072#endif 1089#endif
1073#define fcntl_setlease(a, b, c) ({ 0; }) 1090static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1074#define fcntl_getlease(a) ({ 0; }) 1091{
1075#define locks_init_lock(a) ({ }) 1092 return 0;
1076#define __locks_copy_lock(a, b) ({ }) 1093}
1077#define locks_copy_lock(a, b) ({ }) 1094
1078#define locks_remove_posix(a, b) ({ }) 1095static inline int fcntl_getlease(struct file *filp)
1079#define locks_remove_flock(a) ({ }) 1096{
1080#define posix_test_lock(a, b) ({ 0; }) 1097 return 0;
1081#define posix_lock_file(a, b, c) ({ -ENOLCK; }) 1098}
1082#define posix_lock_file_wait(a, b) ({ -ENOLCK; }) 1099
1083#define posix_unblock_lock(a, b) (-ENOENT) 1100static inline void locks_init_lock(struct file_lock *fl)
1084#define vfs_test_lock(a, b) ({ 0; }) 1101{
1085#define vfs_lock_file(a, b, c, d) (-ENOLCK) 1102 return;
1086#define vfs_cancel_lock(a, b) ({ 0; }) 1103}
1087#define flock_lock_file_wait(a, b) ({ -ENOLCK; }) 1104
1088#define __break_lease(a, b) ({ 0; }) 1105static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
1089#define lease_get_mtime(a, b) ({ }) 1106{
1090#define generic_setlease(a, b, c) ({ -EINVAL; }) 1107 return;
1091#define vfs_setlease(a, b, c) ({ -EINVAL; }) 1108}
1092#define lease_modify(a, b) ({ -EINVAL; }) 1109
1093#define lock_may_read(a, b, c) ({ 1; }) 1110static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
1094#define lock_may_write(a, b, c) ({ 1; }) 1111{
1112 return;
1113}
1114
1115static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
1116{
1117 return;
1118}
1119
1120static inline void locks_remove_flock(struct file *filp)
1121{
1122 return;
1123}
1124
1125static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
1126{
1127 return;
1128}
1129
1130static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1131 struct file_lock *conflock)
1132{
1133 return -ENOLCK;
1134}
1135
1136static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1137{
1138 return -ENOLCK;
1139}
1140
1141static inline int posix_unblock_lock(struct file *filp,
1142 struct file_lock *waiter)
1143{
1144 return -ENOENT;
1145}
1146
1147static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
1148{
1149 return 0;
1150}
1151
1152static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
1153 struct file_lock *fl, struct file_lock *conf)
1154{
1155 return -ENOLCK;
1156}
1157
1158static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1159{
1160 return 0;
1161}
1162
1163static inline int flock_lock_file_wait(struct file *filp,
1164 struct file_lock *request)
1165{
1166 return -ENOLCK;
1167}
1168
1169static inline int __break_lease(struct inode *inode, unsigned int mode)
1170{
1171 return 0;
1172}
1173
1174static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
1175{
1176 return;
1177}
1178
1179static inline int generic_setlease(struct file *filp, long arg,
1180 struct file_lock **flp)
1181{
1182 return -EINVAL;
1183}
1184
1185static inline int vfs_setlease(struct file *filp, long arg,
1186 struct file_lock **lease)
1187{
1188 return -EINVAL;
1189}
1190
1191static inline int lease_modify(struct file_lock **before, int arg)
1192{
1193 return -EINVAL;
1194}
1195
1196static inline int lock_may_read(struct inode *inode, loff_t start,
1197 unsigned long len)
1198{
1199 return 1;
1200}
1201
1202static inline int lock_may_write(struct inode *inode, loff_t start,
1203 unsigned long len)
1204{
1205 return 1;
1206}
1207
1095#endif /* !CONFIG_FILE_LOCKING */ 1208#endif /* !CONFIG_FILE_LOCKING */
1096 1209
1097 1210
@@ -1607,7 +1720,7 @@ struct super_block *sget(struct file_system_type *type,
1607extern int get_sb_pseudo(struct file_system_type *, char *, 1720extern int get_sb_pseudo(struct file_system_type *, char *,
1608 const struct super_operations *ops, unsigned long, 1721 const struct super_operations *ops, unsigned long,
1609 struct vfsmount *mnt); 1722 struct vfsmount *mnt);
1610extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); 1723extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
1611int __put_super_and_need_restart(struct super_block *sb); 1724int __put_super_and_need_restart(struct super_block *sb);
1612 1725
1613/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ 1726/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
@@ -1688,13 +1801,44 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1688 return 0; 1801 return 0;
1689} 1802}
1690#else /* !CONFIG_FILE_LOCKING */ 1803#else /* !CONFIG_FILE_LOCKING */
1691#define locks_mandatory_locked(a) ({ 0; }) 1804static inline int locks_mandatory_locked(struct inode *inode)
1692#define locks_mandatory_area(a, b, c, d, e) ({ 0; }) 1805{
1693#define __mandatory_lock(a) ({ 0; }) 1806 return 0;
1694#define mandatory_lock(a) ({ 0; }) 1807}
1695#define locks_verify_locked(a) ({ 0; }) 1808
1696#define locks_verify_truncate(a, b, c) ({ 0; }) 1809static inline int locks_mandatory_area(int rw, struct inode *inode,
1697#define break_lease(a, b) ({ 0; }) 1810 struct file *filp, loff_t offset,
1811 size_t count)
1812{
1813 return 0;
1814}
1815
1816static inline int __mandatory_lock(struct inode *inode)
1817{
1818 return 0;
1819}
1820
1821static inline int mandatory_lock(struct inode *inode)
1822{
1823 return 0;
1824}
1825
1826static inline int locks_verify_locked(struct inode *inode)
1827{
1828 return 0;
1829}
1830
1831static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
1832 size_t size)
1833{
1834 return 0;
1835}
1836
1837static inline int break_lease(struct inode *inode, unsigned int mode)
1838{
1839 return 0;
1840}
1841
1698#endif /* CONFIG_FILE_LOCKING */ 1842#endif /* CONFIG_FILE_LOCKING */
1699 1843
1700/* fs/open.c */ 1844/* fs/open.c */
@@ -1731,6 +1875,13 @@ extern void bd_set_size(struct block_device *, loff_t size);
1731extern void bd_forget(struct inode *inode); 1875extern void bd_forget(struct inode *inode);
1732extern void bdput(struct block_device *); 1876extern void bdput(struct block_device *);
1733extern struct block_device *open_by_devnum(dev_t, fmode_t); 1877extern struct block_device *open_by_devnum(dev_t, fmode_t);
1878extern void invalidate_bdev(struct block_device *);
1879extern int sync_blockdev(struct block_device *bdev);
1880extern struct super_block *freeze_bdev(struct block_device *);
1881extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
1882extern int fsync_bdev(struct block_device *);
1883extern int fsync_super(struct super_block *);
1884extern int fsync_no_super(struct block_device *);
1734#else 1885#else
1735static inline void bd_forget(struct inode *inode) {} 1886static inline void bd_forget(struct inode *inode) {}
1736#endif 1887#endif
@@ -1882,7 +2033,6 @@ static inline void allow_write_access(struct file *file)
1882 if (file) 2033 if (file)
1883 atomic_inc(&file->f_path.dentry->d_inode->i_writecount); 2034 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
1884} 2035}
1885extern int do_pipe(int *);
1886extern int do_pipe_flags(int *, int); 2036extern int do_pipe_flags(int *, int);
1887extern struct file *create_read_pipe(struct file *f, int flags); 2037extern struct file *create_read_pipe(struct file *f, int flags);
1888extern struct file *create_write_pipe(int flags); 2038extern struct file *create_write_pipe(int flags);
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index f69e66d151cc..30b06c893944 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,7 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
204/* linux/fs/ncpfs/dir.c */ 204/* linux/fs/ncpfs/dir.c */
205extern const struct inode_operations ncp_dir_inode_operations; 205extern const struct inode_operations ncp_dir_inode_operations;
206extern const struct file_operations ncp_dir_operations; 206extern const struct file_operations ncp_dir_operations;
207extern struct dentry_operations ncp_root_dentry_operations; 207extern const struct dentry_operations ncp_root_dentry_operations;
208int ncp_conn_logged_in(struct super_block *); 208int ncp_conn_logged_in(struct super_block *);
209int ncp_date_dos2unix(__le16 time, __le16 date); 209int ncp_date_dos2unix(__le16 time, __le16 date);
210void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); 210void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index db867b04ac3c..8cc8807f77d6 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -415,7 +415,7 @@ extern const struct inode_operations nfs_dir_inode_operations;
415extern const struct inode_operations nfs3_dir_inode_operations; 415extern const struct inode_operations nfs3_dir_inode_operations;
416#endif /* CONFIG_NFS_V3 */ 416#endif /* CONFIG_NFS_V3 */
417extern const struct file_operations nfs_dir_operations; 417extern const struct file_operations nfs_dir_operations;
418extern struct dentry_operations nfs_dentry_operations; 418extern const struct dentry_operations nfs_dentry_operations;
419 419
420extern void nfs_force_lookup_revalidate(struct inode *dir); 420extern void nfs_force_lookup_revalidate(struct inode *dir);
421extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); 421extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2e5f00066afd..43a713fce11c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -785,7 +785,7 @@ struct nfs_access_entry;
785 */ 785 */
786struct nfs_rpc_ops { 786struct nfs_rpc_ops {
787 u32 version; /* Protocol version */ 787 u32 version; /* Protocol version */
788 struct dentry_operations *dentry_ops; 788 const struct dentry_operations *dentry_ops;
789 const struct inode_operations *dir_inode_ops; 789 const struct inode_operations *dir_inode_ops;
790 const struct inode_operations *file_inode_ops; 790 const struct inode_operations *file_inode_ops;
791 791
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d72d5d84fde5..78c48895b12a 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -198,6 +198,7 @@ struct mem_dqblk {
198 qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ 198 qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
199 qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ 199 qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */
200 qsize_t dqb_curspace; /* current used space */ 200 qsize_t dqb_curspace; /* current used space */
201 qsize_t dqb_rsvspace; /* current reserved space for delalloc*/
201 qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ 202 qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
202 qsize_t dqb_isoftlimit; /* preferred inode limit */ 203 qsize_t dqb_isoftlimit; /* preferred inode limit */
203 qsize_t dqb_curinodes; /* current # allocated inodes */ 204 qsize_t dqb_curinodes; /* current # allocated inodes */
@@ -276,8 +277,6 @@ struct dquot {
276 struct mem_dqblk dq_dqb; /* Diskquota usage */ 277 struct mem_dqblk dq_dqb; /* Diskquota usage */
277}; 278};
278 279
279#define NODQUOT (struct dquot *)NULL
280
281#define QUOTA_OK 0 280#define QUOTA_OK 0
282#define NO_QUOTA 1 281#define NO_QUOTA 1
283 282
@@ -308,6 +307,14 @@ struct dquot_operations {
308 int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ 307 int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
309 int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ 308 int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
310 int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ 309 int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
310 /* reserve quota for delayed block allocation */
311 int (*reserve_space) (struct inode *, qsize_t, int);
312 /* claim reserved quota for delayed alloc */
313 int (*claim_space) (struct inode *, qsize_t);
314 /* release rsved quota for delayed alloc */
315 void (*release_rsv) (struct inode *, qsize_t);
316 /* get reserved quota for delayed alloc */
317 qsize_t (*get_reserved_space) (struct inode *);
311}; 318};
312 319
313/* Operations handling requests from userspace */ 320/* Operations handling requests from userspace */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 0b35b3a1be05..36353d95c8db 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -35,6 +35,11 @@ void dquot_destroy(struct dquot *dquot);
35int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); 35int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
36int dquot_alloc_inode(const struct inode *inode, qsize_t number); 36int dquot_alloc_inode(const struct inode *inode, qsize_t number);
37 37
38int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
39int dquot_claim_space(struct inode *inode, qsize_t number);
40void dquot_release_reserved_space(struct inode *inode, qsize_t number);
41qsize_t dquot_get_reserved_space(struct inode *inode);
42
38int dquot_free_space(struct inode *inode, qsize_t number); 43int dquot_free_space(struct inode *inode, qsize_t number);
39int dquot_free_inode(const struct inode *inode, qsize_t number); 44int dquot_free_inode(const struct inode *inode, qsize_t number);
40 45
@@ -183,6 +188,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
183 return ret; 188 return ret;
184} 189}
185 190
191static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
192{
193 if (sb_any_quota_active(inode->i_sb)) {
194 /* Used space is updated in alloc_space() */
195 if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
196 return 1;
197 }
198 return 0;
199}
200
186static inline int vfs_dq_alloc_inode(struct inode *inode) 201static inline int vfs_dq_alloc_inode(struct inode *inode)
187{ 202{
188 if (sb_any_quota_active(inode->i_sb)) { 203 if (sb_any_quota_active(inode->i_sb)) {
@@ -193,6 +208,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode)
193 return 0; 208 return 0;
194} 209}
195 210
211/*
212 * Convert in-memory reserved quotas to real consumed quotas
213 */
214static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
215{
216 if (sb_any_quota_active(inode->i_sb)) {
217 if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
218 return 1;
219 } else
220 inode_add_bytes(inode, nr);
221
222 mark_inode_dirty(inode);
223 return 0;
224}
225
226/*
227 * Release reserved (in-memory) quotas
228 */
229static inline
230void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
231{
232 if (sb_any_quota_active(inode->i_sb))
233 inode->i_sb->dq_op->release_rsv(inode, nr);
234}
235
196static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) 236static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
197{ 237{
198 if (sb_any_quota_active(inode->i_sb)) 238 if (sb_any_quota_active(inode->i_sb))
@@ -339,6 +379,22 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
339 return 0; 379 return 0;
340} 380}
341 381
382static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
383{
384 return 0;
385}
386
387static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
388{
389 return vfs_dq_alloc_space(inode, nr);
390}
391
392static inline
393int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
394{
395 return 0;
396}
397
342static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) 398static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
343{ 399{
344 inode_sub_bytes(inode, nr); 400 inode_sub_bytes(inode, nr);
@@ -354,67 +410,48 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
354 410
355static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr) 411static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
356{ 412{
357 return vfs_dq_prealloc_space_nodirty(inode, 413 return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
358 nr << inode->i_sb->s_blocksize_bits);
359} 414}
360 415
361static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr) 416static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
362{ 417{
363 return vfs_dq_prealloc_space(inode, 418 return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
364 nr << inode->i_sb->s_blocksize_bits);
365} 419}
366 420
367static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr) 421static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
368{ 422{
369 return vfs_dq_alloc_space_nodirty(inode, 423 return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
370 nr << inode->i_sb->s_blocksize_bits);
371} 424}
372 425
373static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) 426static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
374{ 427{
375 return vfs_dq_alloc_space(inode, 428 return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
376 nr << inode->i_sb->s_blocksize_bits); 429}
430
431static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
432{
433 return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
434}
435
436static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
437{
438 return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
439}
440
441static inline
442void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
443{
444 vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
377} 445}
378 446
379static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) 447static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
380{ 448{
381 vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); 449 vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
382} 450}
383 451
384static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) 452static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
385{ 453{
386 vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits); 454 vfs_dq_free_space(inode, nr << inode->i_blkbits);
387} 455}
388 456
389/*
390 * Define uppercase equivalents for compatibility with old function names
391 * Can go away when we think all users have been converted (15/04/2008)
392 */
393#define DQUOT_INIT(inode) vfs_dq_init(inode)
394#define DQUOT_DROP(inode) vfs_dq_drop(inode)
395#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
396 vfs_dq_prealloc_space_nodirty(inode, nr)
397#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
398#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
399 vfs_dq_alloc_space_nodirty(inode, nr)
400#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
401#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
402 vfs_dq_prealloc_block_nodirty(inode, nr)
403#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
404#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
405 vfs_dq_alloc_block_nodirty(inode, nr)
406#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
407#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
408#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
409 vfs_dq_free_space_nodirty(inode, nr)
410#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
411#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
412 vfs_dq_free_block_nodirty(inode, nr)
413#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
414#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
415#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
416#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
417#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
418#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
419
420#endif /* _LINUX_QUOTAOPS_ */ 457#endif /* _LINUX_QUOTAOPS_ */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9edb5c4b79b4..c500ca7239b2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1071,7 +1071,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1071 mutex_unlock(&cgroup_mutex); 1071 mutex_unlock(&cgroup_mutex);
1072 } 1072 }
1073 1073
1074 return simple_set_mnt(mnt, sb); 1074 simple_set_mnt(mnt, sb);
1075 return 0;
1075 1076
1076 free_cg_links: 1077 free_cg_links:
1077 free_cg_links(&tmp_cg_links); 1078 free_cg_links(&tmp_cg_links);
@@ -1627,7 +1628,7 @@ static struct inode_operations cgroup_dir_inode_operations = {
1627static int cgroup_create_file(struct dentry *dentry, int mode, 1628static int cgroup_create_file(struct dentry *dentry, int mode,
1628 struct super_block *sb) 1629 struct super_block *sb)
1629{ 1630{
1630 static struct dentry_operations cgroup_dops = { 1631 static const struct dentry_operations cgroup_dops = {
1631 .d_iput = cgroup_diput, 1632 .d_iput = cgroup_diput,
1632 }; 1633 };
1633 1634
diff --git a/net/socket.c b/net/socket.c
index af0205ff56f2..0b14b79c03af 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -328,7 +328,7 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
328 dentry->d_inode->i_ino); 328 dentry->d_inode->i_ino);
329} 329}
330 330
331static struct dentry_operations sockfs_dentry_operations = { 331static const struct dentry_operations sockfs_dentry_operations = {
332 .d_delete = sockfs_delete_dentry, 332 .d_delete = sockfs_delete_dentry,
333 .d_dname = sockfs_dname, 333 .d_dname = sockfs_dname,
334}; 334};
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 577385a4a5dc..9ced0628d69c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -480,7 +480,7 @@ static int rpc_delete_dentry(struct dentry *dentry)
480 return 1; 480 return 1;
481} 481}
482 482
483static struct dentry_operations rpc_dentry_operations = { 483static const struct dentry_operations rpc_dentry_operations = {
484 .d_delete = rpc_delete_dentry, 484 .d_delete = rpc_delete_dentry,
485}; 485};
486 486